diff --git a/.github/workflows/build-check_aarch64.yml b/.github/workflows/build-check_aarch64.yml new file mode 100644 index 0000000000000..e9b915067c02e --- /dev/null +++ b/.github/workflows/build-check_aarch64.yml @@ -0,0 +1,34 @@ +name: aarch64 CI +on: + pull_request: + branches: + - '**' + - '!mainline' + +jobs: + kernel-build-job: + runs-on: + labels: kernel-build-arm64 + container: + image: rockylinux:8 + env: + ROCKY_ENV: rocky8 + ports: + - 80 + options: --cpus 8 + steps: + - name: Install tools and Libraries + run: | + dnf groupinstall 'Development Tools' -y + dnf install --enablerepo=devel bc dwarves kernel-devel openssl-devel elfutils-libelf-devel -y + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: "${{ github.event.pull_request.head.sha }}" + fetch-depth: 0 + - name: Build the Kernel + run: | + git config --global --add safe.directory /__w/kernel-src-tree/kernel-src-tree + cp configs/kernel-aarch64.config .config + make olddefconfig + make -j8 diff --git a/.github/workflows/build-check_x86_64.yml b/.github/workflows/build-check_x86_64.yml new file mode 100644 index 0000000000000..033208cc7fdf1 --- /dev/null +++ b/.github/workflows/build-check_x86_64.yml @@ -0,0 +1,34 @@ +name: x86_64 CI +on: + pull_request: + branches: + - '**' + - '!mainline' + +jobs: + kernel-build-job: + runs-on: + labels: kernel-build + container: + image: rockylinux:8 + env: + ROCKY_ENV: rocky8 + ports: + - 80 + options: --cpus 8 + steps: + - name: Install tools and Libraries + run: | + dnf groupinstall 'Development Tools' -y + dnf install --enablerepo=devel bc dwarves kernel-devel openssl-devel elfutils-libelf-devel -y + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: "${{ github.event.pull_request.head.sha }}" + fetch-depth: 0 + - name: Build the Kernel + run: | + git config --global --add safe.directory /__w/kernel-src-tree/kernel-src-tree + cp configs/kernel-x86_64.config .config + make olddefconfig + make -j8 diff --git a/Makefile.rhelver b/Makefile.rhelver index 08fa0f88cd6bc..5f1c58fe9d267 100644 --- a/Makefile.rhelver +++ b/Makefile.rhelver @@ -12,7 +12,7 @@ RHEL_MINOR = 10 # # Use this spot to avoid future merge conflicts. # Do not trim this comment. -RHEL_RELEASE = 553.33.1 +RHEL_RELEASE = 553.36.1 # # ZSTREAM diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index dbe876ce744c4..4fc0f57bcfa83 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -391,6 +391,90 @@ int intel_microcode_sanity_check(void *mc, bool print_err, int hdr_type) } EXPORT_SYMBOL_GPL(intel_microcode_sanity_check); +#define MSR_IA32_TME_ACTIVATE 0x982 + +/* Helpers to access TME_ACTIVATE MSR */ +#define TME_ACTIVATE_LOCKED(x) (x & 0x1) +#define TME_ACTIVATE_ENABLED(x) (x & 0x2) + +#define TME_ACTIVATE_POLICY(x) ((x >> 4) & 0xf) /* Bits 7:4 */ +#define TME_ACTIVATE_POLICY_AES_XTS_128 0 + +#define TME_ACTIVATE_KEYID_BITS(x) ((x >> 32) & 0xf) /* Bits 35:32 */ + +#define TME_ACTIVATE_CRYPTO_ALGS(x) ((x >> 48) & 0xffff) /* Bits 63:48 */ +#define TME_ACTIVATE_CRYPTO_AES_XTS_128 1 + +/* Values for mktme_status (SW only construct) */ +#define MKTME_ENABLED 0 +#define MKTME_DISABLED 1 +#define MKTME_UNINITIALIZED 2 +static int mktme_status = MKTME_UNINITIALIZED; + +static void detect_tme_early(struct cpuinfo_x86 *c) +{ + u64 tme_activate, tme_policy, tme_crypto_algs; + int keyid_bits = 0, nr_keyids = 0; + static u64 tme_activate_cpu0 = 0; + + rdmsrl(MSR_IA32_TME_ACTIVATE, tme_activate); + + if (mktme_status != MKTME_UNINITIALIZED) { + if (tme_activate != tme_activate_cpu0) { + /* Broken BIOS? */ + pr_err_once("x86/tme: configuration is inconsistent between CPUs\n"); + pr_err_once("x86/tme: MKTME is not usable\n"); + mktme_status = MKTME_DISABLED; + + /* Proceed. We may need to exclude bits from x86_phys_bits. */ + } + } else { + tme_activate_cpu0 = tme_activate; + } + + if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) { + pr_info_once("x86/tme: not enabled by BIOS\n"); + mktme_status = MKTME_DISABLED; + return; + } + + if (mktme_status != MKTME_UNINITIALIZED) + goto detect_keyid_bits; + + pr_info("x86/tme: enabled by BIOS\n"); + + tme_policy = TME_ACTIVATE_POLICY(tme_activate); + if (tme_policy != TME_ACTIVATE_POLICY_AES_XTS_128) + pr_warn("x86/tme: Unknown policy is active: %#llx\n", tme_policy); + + tme_crypto_algs = TME_ACTIVATE_CRYPTO_ALGS(tme_activate); + if (!(tme_crypto_algs & TME_ACTIVATE_CRYPTO_AES_XTS_128)) { + pr_err("x86/mktme: No known encryption algorithm is supported: %#llx\n", + tme_crypto_algs); + mktme_status = MKTME_DISABLED; + } +detect_keyid_bits: + keyid_bits = TME_ACTIVATE_KEYID_BITS(tme_activate); + nr_keyids = (1UL << keyid_bits) - 1; + if (nr_keyids) { + pr_info_once("x86/mktme: enabled by BIOS\n"); + pr_info_once("x86/mktme: %d KeyIDs available\n", nr_keyids); + } else { + pr_info_once("x86/mktme: disabled by BIOS\n"); + } + + if (mktme_status == MKTME_UNINITIALIZED) { + /* MKTME is usable */ + mktme_status = MKTME_ENABLED; + } + + /* + * KeyID bits effectively lower the number of physical address + * bits. Update cpuinfo_x86::x86_phys_bits accordingly. + */ + c->x86_phys_bits -= keyid_bits; +} + static void early_init_intel(struct cpuinfo_x86 *c) { u64 misc_enable; @@ -542,6 +626,13 @@ static void early_init_intel(struct cpuinfo_x86 *c) */ if (detect_extended_topology_early(c) < 0) detect_ht_early(c); + + /* + * Adjust the number of physical bits early because it affects the + * valid bits of the MTRR mask registers. + */ + if (cpu_has(c, X86_FEATURE_TME)) + detect_tme_early(c); } static void bsp_init_intel(struct cpuinfo_x86 *c) @@ -702,90 +793,6 @@ static void srat_detect_node(struct cpuinfo_x86 *c) #endif } -#define MSR_IA32_TME_ACTIVATE 0x982 - -/* Helpers to access TME_ACTIVATE MSR */ -#define TME_ACTIVATE_LOCKED(x) (x & 0x1) -#define TME_ACTIVATE_ENABLED(x) (x & 0x2) - -#define TME_ACTIVATE_POLICY(x) ((x >> 4) & 0xf) /* Bits 7:4 */ -#define TME_ACTIVATE_POLICY_AES_XTS_128 0 - -#define TME_ACTIVATE_KEYID_BITS(x) ((x >> 32) & 0xf) /* Bits 35:32 */ - -#define TME_ACTIVATE_CRYPTO_ALGS(x) ((x >> 48) & 0xffff) /* Bits 63:48 */ -#define TME_ACTIVATE_CRYPTO_AES_XTS_128 1 - -/* Values for mktme_status (SW only construct) */ -#define MKTME_ENABLED 0 -#define MKTME_DISABLED 1 -#define MKTME_UNINITIALIZED 2 -static int mktme_status = MKTME_UNINITIALIZED; - -static void detect_tme(struct cpuinfo_x86 *c) -{ - u64 tme_activate, tme_policy, tme_crypto_algs; - int keyid_bits = 0, nr_keyids = 0; - static u64 tme_activate_cpu0 = 0; - - rdmsrl(MSR_IA32_TME_ACTIVATE, tme_activate); - - if (mktme_status != MKTME_UNINITIALIZED) { - if (tme_activate != tme_activate_cpu0) { - /* Broken BIOS? */ - pr_err_once("x86/tme: configuration is inconsistent between CPUs\n"); - pr_err_once("x86/tme: MKTME is not usable\n"); - mktme_status = MKTME_DISABLED; - - /* Proceed. We may need to exclude bits from x86_phys_bits. */ - } - } else { - tme_activate_cpu0 = tme_activate; - } - - if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) { - pr_info_once("x86/tme: not enabled by BIOS\n"); - mktme_status = MKTME_DISABLED; - return; - } - - if (mktme_status != MKTME_UNINITIALIZED) - goto detect_keyid_bits; - - pr_info("x86/tme: enabled by BIOS\n"); - - tme_policy = TME_ACTIVATE_POLICY(tme_activate); - if (tme_policy != TME_ACTIVATE_POLICY_AES_XTS_128) - pr_warn("x86/tme: Unknown policy is active: %#llx\n", tme_policy); - - tme_crypto_algs = TME_ACTIVATE_CRYPTO_ALGS(tme_activate); - if (!(tme_crypto_algs & TME_ACTIVATE_CRYPTO_AES_XTS_128)) { - pr_err("x86/mktme: No known encryption algorithm is supported: %#llx\n", - tme_crypto_algs); - mktme_status = MKTME_DISABLED; - } -detect_keyid_bits: - keyid_bits = TME_ACTIVATE_KEYID_BITS(tme_activate); - nr_keyids = (1UL << keyid_bits) - 1; - if (nr_keyids) { - pr_info_once("x86/mktme: enabled by BIOS\n"); - pr_info_once("x86/mktme: %d KeyIDs available\n", nr_keyids); - } else { - pr_info_once("x86/mktme: disabled by BIOS\n"); - } - - if (mktme_status == MKTME_UNINITIALIZED) { - /* MKTME is usable */ - mktme_status = MKTME_ENABLED; - } - - /* - * KeyID bits effectively lower the number of physical address - * bits. Update cpuinfo_x86::x86_phys_bits accordingly. - */ - c->x86_phys_bits -= keyid_bits; -} - static void init_cpuid_fault(struct cpuinfo_x86 *c) { u64 msr; @@ -922,9 +929,6 @@ static void init_intel(struct cpuinfo_x86 *c) init_ia32_feat_ctl(c); - if (cpu_has(c, X86_FEATURE_TME)) - detect_tme(c); - init_intel_misc_features(c); split_lock_init(); diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.c b/arch/x86/kernel/cpu/mtrr/mtrr.c index 11abcf861f381..ff4560dfec944 100644 --- a/arch/x86/kernel/cpu/mtrr/mtrr.c +++ b/arch/x86/kernel/cpu/mtrr/mtrr.c @@ -767,6 +767,20 @@ void __init mtrr_bp_init(void) if (mtrr_enabled()) mtrr_bp_pat_init(); + /* + * RHEL-only: Intel systems with TME feature enabled reduce + * phys_addr by keyid_bits (see detect_tme_early()) but this does + * not affect mtrr_cleanup() as phys_addr is calculated independently + * here (see f6b980646b93 upstream). To make TME enabled systems boot + * and to minimize the change for other environments, use + * boot_cpu_data.x86_phys_bits here instead. + */ + if (boot_cpu_has(X86_FEATURE_TME)) { + phys_addr = boot_cpu_data.x86_phys_bits; + size_or_mask = SIZE_OR_MASK_BITS(phys_addr); + size_and_mask = ~size_or_mask & 0xfffff00000ULL; + } + if (mtrr_cleanup(phys_addr)) { changed_by_mtrr_cleanup = 1; mtrr_if->set_all(); diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 1add8785dc9f6..7f2a081a98ab6 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -1724,7 +1724,7 @@ static void sev_migrate_from(struct kvm *dst_kvm, struct kvm *src_kvm) * Note, the source is not required to have the same number of * vCPUs as the destination when migrating a vanilla SEV VM. */ - src_vcpu = kvm_get_vcpu(dst_kvm, i); + src_vcpu = kvm_get_vcpu(src_kvm, i); src_svm = to_svm(src_vcpu); /* diff --git a/ciq/ciq_backports/kernel-4.18.0-553.34.1.el8_10/13325333.failed b/ciq/ciq_backports/kernel-4.18.0-553.34.1.el8_10/13325333.failed new file mode 100644 index 0000000000000..8bacf9a797c4e --- /dev/null +++ b/ciq/ciq_backports/kernel-4.18.0-553.34.1.el8_10/13325333.failed @@ -0,0 +1,91 @@ +xfs: fix sparse inode limits on runt AG + +jira LE-2289 +Rebuild_History Non-Buildable kernel-4.18.0-553.34.1.el8_10 +commit-author Dave Chinner +commit 13325333582d4820d39b9e8f63d6a54e745585d9 +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-4.18.0-553.34.1.el8_10/13325333.failed + +The runt AG at the end of a filesystem is almost always smaller than +the mp->m_sb.sb_agblocks. Unfortunately, when setting the max_agbno +limit for the inode chunk allocation, we do not take this into +account. This means we can allocate a sparse inode chunk that +overlaps beyond the end of an AG. When we go to allocate an inode +from that sparse chunk, the irec fails validation because the +agbno of the start of the irec is beyond valid limits for the runt +AG. + +Prevent this from happening by taking into account the size of the +runt AG when allocating inode chunks. Also convert the various +checks for valid inode chunk agbnos to use xfs_ag_block_count() +so that they will also catch such issues in the future. + +Fixes: 56d1115c9bc7 ("xfs: allocate sparse inode chunks on full chunk allocation failure") + Signed-off-by: Dave Chinner + Reviewed-by: Darrick J. Wong + Signed-off-by: Carlos Maiolino + +(cherry picked from commit 13325333582d4820d39b9e8f63d6a54e745585d9) + Signed-off-by: Jonathan Maple + +# Conflicts: +# fs/xfs/libxfs/xfs_ialloc.c +diff --cc fs/xfs/libxfs/xfs_ialloc.c +index 75930a32c8e8,f3a840a425f5..000000000000 +--- a/fs/xfs/libxfs/xfs_ialloc.c ++++ b/fs/xfs/libxfs/xfs_ialloc.c +@@@ -2295,11 -2473,10 +2296,16 @@@ xfs_imap + /* + * Split up the inode number into its parts. + */ + + pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino)); + agino = XFS_INO_TO_AGINO(mp, ino); + agbno = XFS_AGINO_TO_AGBNO(mp, agino); +++<<<<<<< HEAD + + if (!pag || agbno >= mp->m_sb.sb_agblocks || + + ino != XFS_AGINO_TO_INO(mp, pag->pag_agno, agino)) { +++======= ++ if (agbno >= xfs_ag_block_count(mp, pag_agno(pag)) || ++ ino != xfs_agino_to_ino(pag, agino)) { +++>>>>>>> 13325333582d (xfs: fix sparse inode limits on runt AG) + error = -EINVAL; + #ifdef DEBUG + /* +@@@ -2307,24 -2484,19 +2313,30 @@@ + * as they can be invalid without implying corruption. + */ + if (flags & XFS_IGET_UNTRUSTED) +++<<<<<<< HEAD + + goto out_drop; + + if (!pag) { + + xfs_alert(mp, + + "%s: agno (%d) >= mp->m_sb.sb_agcount (%d)", + + __func__, XFS_INO_TO_AGNO(mp, ino), + + mp->m_sb.sb_agcount); + + } + + if (agbno >= mp->m_sb.sb_agblocks) { +++======= ++ return error; ++ if (agbno >= xfs_ag_block_count(mp, pag_agno(pag))) { +++>>>>>>> 13325333582d (xfs: fix sparse inode limits on runt AG) + xfs_alert(mp, + "%s: agbno (0x%llx) >= mp->m_sb.sb_agblocks (0x%lx)", + __func__, (unsigned long long)agbno, +- (unsigned long)mp->m_sb.sb_agblocks); ++ (unsigned long)xfs_ag_block_count(mp, ++ pag_agno(pag))); + } + - if (ino != xfs_agino_to_ino(pag, agino)) { + + if (pag && ino != XFS_AGINO_TO_INO(mp, pag->pag_agno, agino)) { + xfs_alert(mp, + - "%s: ino (0x%llx) != xfs_agino_to_ino() (0x%llx)", + + "%s: ino (0x%llx) != XFS_AGINO_TO_INO() (0x%llx)", + __func__, ino, + - xfs_agino_to_ino(pag, agino)); + + XFS_AGINO_TO_INO(mp, pag->pag_agno, agino)); + } + xfs_stack_trace(); + #endif /* DEBUG */ +* Unmerged path fs/xfs/libxfs/xfs_ialloc.c diff --git a/ciq/ciq_backports/kernel-4.18.0-553.34.1.el8_10/6890cb1a.failed b/ciq/ciq_backports/kernel-4.18.0-553.34.1.el8_10/6890cb1a.failed new file mode 100644 index 0000000000000..0b86f5a40b3ac --- /dev/null +++ b/ciq/ciq_backports/kernel-4.18.0-553.34.1.el8_10/6890cb1a.failed @@ -0,0 +1,307 @@ +x86/cpu/intel: Detect TME keyid bits before setting MTRR mask registers + +jira LE-2289 +Rebuild_History Non-Buildable kernel-4.18.0-553.34.1.el8_10 +commit-author Paolo Bonzini +commit 6890cb1ace350b4386c8aee1343dc3b3ddd214da +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-4.18.0-553.34.1.el8_10/6890cb1a.failed + +MKTME repurposes the high bit of physical address to key id for encryption +key and, even though MAXPHYADDR in CPUID[0x80000008] remains the same, +the valid bits in the MTRR mask register are based on the reduced number +of physical address bits. + +detect_tme() in arch/x86/kernel/cpu/intel.c detects TME and subtracts +it from the total usable physical bits, but it is called too late. +Move the call to early_init_intel() so that it is called in setup_arch(), +before MTRRs are setup. + +This fixes boot on TDX-enabled systems, which until now only worked with +"disable_mtrr_cleanup". Without the patch, the values written to the +MTRRs mask registers were 52-bit wide (e.g. 0x000fffff_80000800) and +the writes failed; with the patch, the values are 46-bit wide, which +matches the reduced MAXPHYADDR that is shown in /proc/cpuinfo. + + Reported-by: Zixi Chen + Signed-off-by: Paolo Bonzini + Signed-off-by: Dave Hansen + Cc:stable@vger.kernel.org +Link: https://lore.kernel.org/all/20240131230902.1867092-3-pbonzini%40redhat.com +(cherry picked from commit 6890cb1ace350b4386c8aee1343dc3b3ddd214da) + Signed-off-by: Jonathan Maple + +# Conflicts: +# arch/x86/kernel/cpu/intel.c +diff --cc arch/x86/kernel/cpu/intel.c +index dbe876ce744c,40dec9b56f87..000000000000 +--- a/arch/x86/kernel/cpu/intel.c ++++ b/arch/x86/kernel/cpu/intel.c +@@@ -217,179 -184,89 +217,265 @@@ static bool bad_spectre_microcode(struc + return false; + } + +++<<<<<<< HEAD + +int intel_cpu_collect_info(struct ucode_cpu_info *uci) + +{ + + unsigned int val[2]; + + unsigned int family, model; + + struct cpu_signature csig = { 0 }; + + unsigned int eax, ebx, ecx, edx; + + + + memset(uci, 0, sizeof(*uci)); + + + + eax = 0x00000001; + + ecx = 0; + + native_cpuid(&eax, &ebx, &ecx, &edx); + + csig.sig = eax; + + + + family = x86_family(eax); + + model = x86_model(eax); + + + + if (model >= 5 || family > 6) { + + /* get processor flags from MSR 0x17 */ + + native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]); + + csig.pf = 1 << ((val[1] >> 18) & 7); + + } + + + + csig.rev = intel_get_microcode_revision(); + + + + uci->cpu_sig = csig; + + + + return 0; + +} + +EXPORT_SYMBOL_GPL(intel_cpu_collect_info); + + + +/* + + * Returns 1 if update has been found, 0 otherwise. + + */ + +int intel_find_matching_signature(void *mc, unsigned int csig, int cpf) + +{ + + struct microcode_header_intel *mc_hdr = mc; + + struct extended_sigtable *ext_hdr; + + struct extended_signature *ext_sig; + + int i; + + + + if (intel_cpu_signatures_match(csig, cpf, mc_hdr->sig, mc_hdr->pf)) + + return 1; + + + + /* Look for ext. headers: */ + + if (get_totalsize(mc_hdr) <= get_datasize(mc_hdr) + MC_HEADER_SIZE) + + return 0; + + + + ext_hdr = mc + get_datasize(mc_hdr) + MC_HEADER_SIZE; + + ext_sig = (void *)ext_hdr + EXT_HEADER_SIZE; + + + + for (i = 0; i < ext_hdr->count; i++) { + + if (intel_cpu_signatures_match(csig, cpf, ext_sig->sig, ext_sig->pf)) + + return 1; + + ext_sig++; + + } + + return 0; + +} + +EXPORT_SYMBOL_GPL(intel_find_matching_signature); + + + +/** + + * intel_microcode_sanity_check() - Sanity check microcode file. + + * @mc: Pointer to the microcode file contents. + + * @print_err: Display failure reason if true, silent if false. + + * @hdr_type: Type of file, i.e. normal microcode file or In Field Scan file. + + * Validate if the microcode header type matches with the type + + * specified here. + + * + + * Validate certain header fields and verify if computed checksum matches + + * with the one specified in the header. + + * + + * Return: 0 if the file passes all the checks, -EINVAL if any of the checks + + * fail. + + */ + +int intel_microcode_sanity_check(void *mc, bool print_err, int hdr_type) + +{ + + unsigned long total_size, data_size, ext_table_size; + + struct microcode_header_intel *mc_header = mc; + + struct extended_sigtable *ext_header = NULL; + + u32 sum, orig_sum, ext_sigcount = 0, i; + + struct extended_signature *ext_sig; + + + + total_size = get_totalsize(mc_header); + + data_size = get_datasize(mc_header); + + + + if (data_size + MC_HEADER_SIZE > total_size) { + + if (print_err) + + pr_err("Error: bad microcode data file size.\n"); + + return -EINVAL; + + } + + + + if (mc_header->ldrver != 1 || mc_header->hdrver != hdr_type) { + + if (print_err) + + pr_err("Error: invalid/unknown microcode update format. Header type %d\n", + + mc_header->hdrver); + + return -EINVAL; + + } + + + + ext_table_size = total_size - (MC_HEADER_SIZE + data_size); + + if (ext_table_size) { + + u32 ext_table_sum = 0; + + u32 *ext_tablep; + + + + if (ext_table_size < EXT_HEADER_SIZE || + + ((ext_table_size - EXT_HEADER_SIZE) % EXT_SIGNATURE_SIZE)) { + + if (print_err) + + pr_err("Error: truncated extended signature table.\n"); + + return -EINVAL; + + } + + + + ext_header = mc + MC_HEADER_SIZE + data_size; + + if (ext_table_size != exttable_size(ext_header)) { + + if (print_err) + + pr_err("Error: extended signature table size mismatch.\n"); + + return -EFAULT; + + } + + + + ext_sigcount = ext_header->count; + + + + /* + + * Check extended table checksum: the sum of all dwords that + + * comprise a valid table must be 0. + + */ + + ext_tablep = (u32 *)ext_header; + + + + i = ext_table_size / sizeof(u32); + + while (i--) + + ext_table_sum += ext_tablep[i]; + + + + if (ext_table_sum) { + + if (print_err) + + pr_warn("Bad extended signature table checksum, aborting.\n"); + + return -EINVAL; + + } + + } + + + + /* + + * Calculate the checksum of update data and header. The checksum of + + * valid update data and header including the extended signature table + + * must be 0. + + */ + + orig_sum = 0; + + i = (MC_HEADER_SIZE + data_size) / sizeof(u32); + + while (i--) + + orig_sum += ((u32 *)mc)[i]; + + + + if (orig_sum) { + + if (print_err) + + pr_err("Bad microcode data checksum, aborting.\n"); + + return -EINVAL; + + } + + + + if (!ext_table_size) + + return 0; + + + + /* + + * Check extended signature checksum: 0 => valid. + + */ + + for (i = 0; i < ext_sigcount; i++) { + + ext_sig = (void *)ext_header + EXT_HEADER_SIZE + + + EXT_SIGNATURE_SIZE * i; + + + + sum = (mc_header->sig + mc_header->pf + mc_header->cksum) - + + (ext_sig->sig + ext_sig->pf + ext_sig->cksum); + + if (sum) { + + if (print_err) + + pr_err("Bad extended signature checksum, aborting.\n"); + + return -EINVAL; + + } + + } + + return 0; + +} + +EXPORT_SYMBOL_GPL(intel_microcode_sanity_check); +++======= ++ #define MSR_IA32_TME_ACTIVATE 0x982 ++ ++ /* Helpers to access TME_ACTIVATE MSR */ ++ #define TME_ACTIVATE_LOCKED(x) (x & 0x1) ++ #define TME_ACTIVATE_ENABLED(x) (x & 0x2) ++ ++ #define TME_ACTIVATE_POLICY(x) ((x >> 4) & 0xf) /* Bits 7:4 */ ++ #define TME_ACTIVATE_POLICY_AES_XTS_128 0 ++ ++ #define TME_ACTIVATE_KEYID_BITS(x) ((x >> 32) & 0xf) /* Bits 35:32 */ ++ ++ #define TME_ACTIVATE_CRYPTO_ALGS(x) ((x >> 48) & 0xffff) /* Bits 63:48 */ ++ #define TME_ACTIVATE_CRYPTO_AES_XTS_128 1 ++ ++ /* Values for mktme_status (SW only construct) */ ++ #define MKTME_ENABLED 0 ++ #define MKTME_DISABLED 1 ++ #define MKTME_UNINITIALIZED 2 ++ static int mktme_status = MKTME_UNINITIALIZED; ++ ++ static void detect_tme_early(struct cpuinfo_x86 *c) ++ { ++ u64 tme_activate, tme_policy, tme_crypto_algs; ++ int keyid_bits = 0, nr_keyids = 0; ++ static u64 tme_activate_cpu0 = 0; ++ ++ rdmsrl(MSR_IA32_TME_ACTIVATE, tme_activate); ++ ++ if (mktme_status != MKTME_UNINITIALIZED) { ++ if (tme_activate != tme_activate_cpu0) { ++ /* Broken BIOS? */ ++ pr_err_once("x86/tme: configuration is inconsistent between CPUs\n"); ++ pr_err_once("x86/tme: MKTME is not usable\n"); ++ mktme_status = MKTME_DISABLED; ++ ++ /* Proceed. We may need to exclude bits from x86_phys_bits. */ ++ } ++ } else { ++ tme_activate_cpu0 = tme_activate; ++ } ++ ++ if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) { ++ pr_info_once("x86/tme: not enabled by BIOS\n"); ++ mktme_status = MKTME_DISABLED; ++ return; ++ } ++ ++ if (mktme_status != MKTME_UNINITIALIZED) ++ goto detect_keyid_bits; ++ ++ pr_info("x86/tme: enabled by BIOS\n"); ++ ++ tme_policy = TME_ACTIVATE_POLICY(tme_activate); ++ if (tme_policy != TME_ACTIVATE_POLICY_AES_XTS_128) ++ pr_warn("x86/tme: Unknown policy is active: %#llx\n", tme_policy); ++ ++ tme_crypto_algs = TME_ACTIVATE_CRYPTO_ALGS(tme_activate); ++ if (!(tme_crypto_algs & TME_ACTIVATE_CRYPTO_AES_XTS_128)) { ++ pr_err("x86/mktme: No known encryption algorithm is supported: %#llx\n", ++ tme_crypto_algs); ++ mktme_status = MKTME_DISABLED; ++ } ++ detect_keyid_bits: ++ keyid_bits = TME_ACTIVATE_KEYID_BITS(tme_activate); ++ nr_keyids = (1UL << keyid_bits) - 1; ++ if (nr_keyids) { ++ pr_info_once("x86/mktme: enabled by BIOS\n"); ++ pr_info_once("x86/mktme: %d KeyIDs available\n", nr_keyids); ++ } else { ++ pr_info_once("x86/mktme: disabled by BIOS\n"); ++ } ++ ++ if (mktme_status == MKTME_UNINITIALIZED) { ++ /* MKTME is usable */ ++ mktme_status = MKTME_ENABLED; ++ } ++ ++ /* ++ * KeyID bits effectively lower the number of physical address ++ * bits. Update cpuinfo_x86::x86_phys_bits accordingly. ++ */ ++ c->x86_phys_bits -= keyid_bits; ++ } +++>>>>>>> 6890cb1ace35 (x86/cpu/intel: Detect TME keyid bits before setting MTRR mask registers) + + static void early_init_intel(struct cpuinfo_x86 *c) + { +* Unmerged path arch/x86/kernel/cpu/intel.c diff --git a/ciq/ciq_backports/kernel-4.18.0-553.34.1.el8_10/ce7356ae.failed b/ciq/ciq_backports/kernel-4.18.0-553.34.1.el8_10/ce7356ae.failed new file mode 100644 index 0000000000000..059ac978a19d1 --- /dev/null +++ b/ciq/ciq_backports/kernel-4.18.0-553.34.1.el8_10/ce7356ae.failed @@ -0,0 +1,53 @@ +mptcp: cope racing subflow creation in mptcp_rcv_space_adjust + +jira LE-2289 +cve CVE-2024-53122 +Rebuild_History Non-Buildable kernel-4.18.0-553.34.1.el8_10 +commit-author Paolo Abeni +commit ce7356ae35943cc6494cc692e62d51a734062b7d +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-4.18.0-553.34.1.el8_10/ce7356ae.failed + +Additional active subflows - i.e. created by the in kernel path +manager - are included into the subflow list before starting the +3whs. + +A racing recvmsg() spooling data received on an already established +subflow would unconditionally call tcp_cleanup_rbuf() on all the +current subflows, potentially hitting a divide by zero error on +the newly created ones. + +Explicitly check that the subflow is in a suitable state before +invoking tcp_cleanup_rbuf(). + +Fixes: c76c6956566f ("mptcp: call tcp_cleanup_rbuf on subflows") + Signed-off-by: Paolo Abeni + Reviewed-by: Matthieu Baerts (NGI0) +Link: https://patch.msgid.link/02374660836e1b52afc91966b7535c8c5f7bafb0.1731060874.git.pabeni@redhat.com + Signed-off-by: Jakub Kicinski +(cherry picked from commit ce7356ae35943cc6494cc692e62d51a734062b7d) + Signed-off-by: Jonathan Maple + +# Conflicts: +# net/mptcp/protocol.c +diff --cc net/mptcp/protocol.c +index 4b1fc02736dd,48d480982b78..000000000000 +--- a/net/mptcp/protocol.c ++++ b/net/mptcp/protocol.c +@@@ -1921,8 -2081,9 +1921,14 @@@ static void mptcp_rcv_space_adjust(stru + ssk = mptcp_subflow_tcp_sock(subflow); + slow = lock_sock_fast(ssk); + WRITE_ONCE(ssk->sk_rcvbuf, rcvbuf); +++<<<<<<< HEAD + + tcp_sk(ssk)->window_clamp = window_clamp; + + tcp_cleanup_rbuf(ssk, 1); +++======= ++ WRITE_ONCE(tcp_sk(ssk)->window_clamp, window_clamp); ++ if (tcp_can_send_ack(ssk)) ++ tcp_cleanup_rbuf(ssk, 1); +++>>>>>>> ce7356ae3594 (mptcp: cope racing subflow creation in mptcp_rcv_space_adjust) + unlock_sock_fast(ssk, slow); + } + } +* Unmerged path net/mptcp/protocol.c diff --git a/ciq/ciq_backports/kernel-4.18.0-553.34.1.el8_10/f74dacb4.failed b/ciq/ciq_backports/kernel-4.18.0-553.34.1.el8_10/f74dacb4.failed new file mode 100644 index 0000000000000..be0912297ed51 --- /dev/null +++ b/ciq/ciq_backports/kernel-4.18.0-553.34.1.el8_10/f74dacb4.failed @@ -0,0 +1,243 @@ +dlm: fix recovery of middle conversions + +jira LE-2289 +Rebuild_History Non-Buildable kernel-4.18.0-553.34.1.el8_10 +commit-author Alexander Aring +commit f74dacb4c81164d7578a11d5f8b660ad87059e6a +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-4.18.0-553.34.1.el8_10/f74dacb4.failed + +In one special case, recovery is unable to reliably rebuild +lock state by simply recreating lkb structs as sent from the +lock holders. That case is when the lkb's include conversions +between PR and CW modes. + +The recovery code has always recognized this special case, +but the implemention has always been broken, and would set +invalid modes in recovered lkb's. Unpredictable or bogus +errors could then be returned for further locking calls on +these locks. + +This bug has gone unnoticed for so long due to some +combination of: +- applications never or infrequently converting between PR/CW +- recovery not occuring during these conversions +- if the recovery bug does occur, the caller may not notice, + depending on what further locking calls are made, e.g. if + the lock is simply unlocked it may go unnoticed + +However, a core analysis from a recent gfs2 bug report points +to this broken code. + +PR = Protected Read +CW = Concurrent Write +PR and CW are incompatible +PR and PR are compatible +CW and CW are compatible + +Example 1 + +node C, resource R +granted: PR node A +granted: PR node B +granted: NL node C +granted: NL node D + +- A sends convert PR->CW to C +- C fails before A gets a reply +- recovery occurs + +At this point, A does not know if it still holds +the lock in PR, or if its conversion to CW was granted: +- If A's conversion to CW was granted, then another + node's CW lock may also have been granted. +- If A's conversion to CW was not granted, it still + holds a PR lock, and other nodes may also hold PR locks. + +So, the new master of R cannot simply recreate the lock +from A using granted mode PR and requested mode CW. +The new master must look at all the recovered locks to +determine the correct granted modes, and ensure that all +the recovered locks are recreated in compatible states. + +The correct lock recovery steps in this example are: +- node D becomes the new master of R +- node B sends D its lkb, granted PR +- node A sends D its lkb, convert PR->CW +- D determines the correct lock state is: + granted: PR node B + convert: PR->CW node A + +The lkb sent by each node was recreated without +any change on the new master node. + +Example 2 + +node C, resource R +granted: PR node A +granted: NL node C +granted: NL node D +waiting: CW node B + +- A sends convert PR->CW to C +- C grants the conversion to CW for A +- C grants the waiting request for CW to B +- C sends granted message to B, but fails + before it can send the granted message to A +- B receives the granted message from C + +At this point: +- A believes it is converting PR->CW +- B believes it is holding a CW lock + +The correct lock recovery steps in this example are: +- node D becomes the new master of R +- node A sends D its lkb, convert PR->CW +- node B sends D its lkb, granted CW +- D determins the correct lock state is: + granted: CW node B + granted: CW node A + +The lkb sent by B is recreated without change, +but the lkb sent by A is changed because the +granted mode was not compatible. + +Fixes to make this work correctly: + +recover_convert_waiter: should not make any changes +to a converting lkb that is still waiting for a reply +message. It was previously setting grmode to IV, which +is invalid state, so the lkb would not be handled +correctly by other code. + +receive_rcom_lock_args: was checking the wrong lkb field +(wait_type instead of status) to determine if the lkb is +being converted, and in need of inspection for this special +recovery. It was also setting grmode to IV in the lkb, +causing it to be mishandled by other code. +Now, this function just puts the lkb, directly as sent, +onto the convert queue of the resource being recovered, +and corrects it in recover_conversion() later, if needed. + +recover_conversion: the job of this function is to detect +and correct lkb states for the special PR/CW conversions. +The new code now checks for recovered lkbs on the granted +queue with grmode PR or CW, and takes the real grmode from +that. Then it looks for lkbs on the convert queue with an +incompatible grmode (i.e. grmode PR when the real grmode is +CW, or v.v.) These converting lkbs need to be fixed. +They are fixed by temporarily setting their grmode to NL, +so that grmodes are not incompatible and won't confuse other +locking code. The converting lkb will then be granted at +the end of recovery, replacing the temporary NL grmode. + + Signed-off-by: Alexander Aring + Signed-off-by: David Teigland +(cherry picked from commit f74dacb4c81164d7578a11d5f8b660ad87059e6a) + Signed-off-by: Jonathan Maple + +# Conflicts: +# fs/dlm/lock.c +diff --cc fs/dlm/lock.c +index 9933ec3c213f,fc1d710166e9..000000000000 +--- a/fs/dlm/lock.c ++++ b/fs/dlm/lock.c +@@@ -5082,20 -4996,22 +5082,35 @@@ void dlm_receive_buffer(union dlm_packe + } + + static void recover_convert_waiter(struct dlm_ls *ls, struct dlm_lkb *lkb, + - struct dlm_message *ms_local) + + struct dlm_message *ms_stub) + { + if (middle_conversion(lkb)) { ++ log_rinfo(ls, "%s %x middle convert in progress", __func__, ++ lkb->lkb_id); ++ ++ /* We sent this lock to the new master. The new master will ++ * tell us when it's granted. We no longer need a reply, so ++ * use a fake reply to put the lkb into the right state. ++ */ + hold_lkb(lkb); +++<<<<<<< HEAD + + memset(ms_stub, 0, sizeof(struct dlm_message)); + + ms_stub->m_flags = DLM_IFL_STUB_MS; + + ms_stub->m_type = DLM_MSG_CONVERT_REPLY; + + ms_stub->m_result = -EINPROGRESS; + + ms_stub->m_header.h_nodeid = lkb->lkb_nodeid; + + _receive_convert_reply(lkb, ms_stub); + + + + /* Same special case as in receive_rcom_lock_args() */ + + lkb->lkb_grmode = DLM_LOCK_IV; + + rsb_set_flag(lkb->lkb_resource, RSB_RECOVER_CONVERT); +++======= ++ memset(ms_local, 0, sizeof(struct dlm_message)); ++ ms_local->m_type = cpu_to_le32(DLM_MSG_CONVERT_REPLY); ++ ms_local->m_result = cpu_to_le32(to_dlm_errno(-EINPROGRESS)); ++ ms_local->m_header.h_nodeid = cpu_to_le32(lkb->lkb_nodeid); ++ _receive_convert_reply(lkb, ms_local, true); +++>>>>>>> f74dacb4c811 (dlm: fix recovery of middle conversions) + unhold_lkb(lkb); + + } else if (lkb->lkb_rqmode >= lkb->lkb_grmode) { +* Unmerged path fs/dlm/lock.c +diff --git a/fs/dlm/recover.c b/fs/dlm/recover.c +index 98b710cc9cf3..400cb8e9487f 100644 +--- a/fs/dlm/recover.c ++++ b/fs/dlm/recover.c +@@ -815,33 +815,42 @@ static void recover_lvb(struct dlm_rsb *r) + } + + /* All master rsb's flagged RECOVER_CONVERT need to be looked at. The locks +- converting PR->CW or CW->PR need to have their lkb_grmode set. */ ++ * converting PR->CW or CW->PR may need to have their lkb_grmode changed. ++ */ + + static void recover_conversion(struct dlm_rsb *r) + { + struct dlm_ls *ls = r->res_ls; ++ uint32_t other_lkid = 0; ++ int other_grmode = -1; + struct dlm_lkb *lkb; +- int grmode = -1; + + list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) { + if (lkb->lkb_grmode == DLM_LOCK_PR || + lkb->lkb_grmode == DLM_LOCK_CW) { +- grmode = lkb->lkb_grmode; ++ other_grmode = lkb->lkb_grmode; ++ other_lkid = lkb->lkb_id; + break; + } + } + ++ if (other_grmode == -1) ++ return; ++ + list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) { +- if (lkb->lkb_grmode != DLM_LOCK_IV) +- continue; +- if (grmode == -1) { +- log_debug(ls, "recover_conversion %x set gr to rq %d", +- lkb->lkb_id, lkb->lkb_rqmode); +- lkb->lkb_grmode = lkb->lkb_rqmode; +- } else { +- log_debug(ls, "recover_conversion %x set gr %d", +- lkb->lkb_id, grmode); +- lkb->lkb_grmode = grmode; ++ /* Lock recovery created incompatible granted modes, so ++ * change the granted mode of the converting lock to ++ * NL. The rqmode of the converting lock should be CW, ++ * which means the converting lock should be granted at ++ * the end of recovery. ++ */ ++ if (((lkb->lkb_grmode == DLM_LOCK_PR) && (other_grmode == DLM_LOCK_CW)) || ++ ((lkb->lkb_grmode == DLM_LOCK_CW) && (other_grmode == DLM_LOCK_PR))) { ++ log_limit(ls, "%s %x gr %d rq %d, remote %d %x, other_lkid %u, other gr %d, set gr=NL", ++ __func__, lkb->lkb_id, lkb->lkb_grmode, ++ lkb->lkb_rqmode, lkb->lkb_nodeid, ++ lkb->lkb_remid, other_lkid, other_grmode); ++ lkb->lkb_grmode = DLM_LOCK_NL; + } + } + } diff --git a/ciq/ciq_backports/kernel-4.18.0-553.34.1.el8_10/rebuild.details.txt b/ciq/ciq_backports/kernel-4.18.0-553.34.1.el8_10/rebuild.details.txt new file mode 100644 index 0000000000000..7fc17316e13dc --- /dev/null +++ b/ciq/ciq_backports/kernel-4.18.0-553.34.1.el8_10/rebuild.details.txt @@ -0,0 +1,26 @@ +Rebuild_History BUILDABLE +Rebuilding Kernel from rpm changelog with Fuzz Limit: 87.50% +Number of commits in upstream range v4.18~1..master: 522243 +Number of commits in rpm: 14 +Number of commits matched with upstream: 8 (57.14%) +Number of commits in upstream but not in rpm: 522235 +Number of commits NOT found in upstream: 6 (42.86%) + +Rebuilding Kernel on Branch rocky8_10_rebuild_kernel-4.18.0-553.34.1.el8_10 for kernel-4.18.0-553.34.1.el8_10 +Clean Cherry Picks: 4 (50.00%) +Empty Cherry Picks: 4 (50.00%) +_______________________________ + +__EMPTY COMMITS__________________________ +f74dacb4c81164d7578a11d5f8b660ad87059e6a dlm: fix recovery of middle conversions +6890cb1ace350b4386c8aee1343dc3b3ddd214da x86/cpu/intel: Detect TME keyid bits before setting MTRR mask registers +13325333582d4820d39b9e8f63d6a54e745585d9 xfs: fix sparse inode limits on runt AG +ce7356ae35943cc6494cc692e62d51a734062b7d mptcp: cope racing subflow creation in mptcp_rcv_space_adjust + +__CHANGES NOT IN UPSTREAM________________ +Adding prod certs and changed cert date to 20210620 +Adding Rocky secure boot certs +Fixing vmlinuz removal +Fixing UEFI CA path +Porting to 8.10, debranding and Rocky branding +Fixing pesign_key_name values diff --git a/ciq/ciq_backports/kernel-4.18.0-553.36.1.el8_10/7e1c3f58.failed b/ciq/ciq_backports/kernel-4.18.0-553.36.1.el8_10/7e1c3f58.failed new file mode 100644 index 0000000000000..c4a94ac46b6fd --- /dev/null +++ b/ciq/ciq_backports/kernel-4.18.0-553.36.1.el8_10/7e1c3f58.failed @@ -0,0 +1,59 @@ +cpufreq: intel_pstate: Support Emerald Rapids OOB mode + +jira LE-2289 +Rebuild_History Non-Buildable kernel-4.18.0-553.36.1.el8_10 +commit-author Srinivas Pandruvada +commit 7e1c3f584ee78b0d0210fc424420d9529f3ca952 +Empty-Commit: Cherry-Pick Conflicts during history rebuild. +Will be included in final tarball splat. Ref for failed cherry-pick at: +ciq/ciq_backports/kernel-4.18.0-553.36.1.el8_10/7e1c3f58.failed + +Prevent intel_pstate from loading when OOB (Out Of Band) P-states mode is +enabled in Emerald Rapids. + +The OOB identifying bits are same as for the prior generation CPUs +like Sapphire Rapids servers, so also add Emerald Rapids to the +intel_pstate_cpu_oob_ids[] list. + + Signed-off-by: Srinivas Pandruvada + Signed-off-by: Rafael J. Wysocki +(cherry picked from commit 7e1c3f584ee78b0d0210fc424420d9529f3ca952) + Signed-off-by: Jonathan Maple + +# Conflicts: +# drivers/cpufreq/intel_pstate.c +diff --cc drivers/cpufreq/intel_pstate.c +index 405d00dd8746,efb29a473be2..000000000000 +--- a/drivers/cpufreq/intel_pstate.c ++++ b/drivers/cpufreq/intel_pstate.c +@@@ -2414,17 -2397,20 +2414,26 @@@ static const struct x86_cpu_id intel_ps + }; + MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); + + -#ifdef CONFIG_ACPI + static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = { +++<<<<<<< HEAD + + X86_MATCH(BROADWELL_D, core_funcs), + + X86_MATCH(BROADWELL_X, core_funcs), + + X86_MATCH(SKYLAKE_X, core_funcs), + + X86_MATCH(ICELAKE_X, core_funcs), + + X86_MATCH(SAPPHIRERAPIDS_X, core_funcs), +++======= ++ X86_MATCH(INTEL_BROADWELL_D, core_funcs), ++ X86_MATCH(INTEL_BROADWELL_X, core_funcs), ++ X86_MATCH(INTEL_SKYLAKE_X, core_funcs), ++ X86_MATCH(INTEL_ICELAKE_X, core_funcs), ++ X86_MATCH(INTEL_SAPPHIRERAPIDS_X, core_funcs), ++ X86_MATCH(INTEL_EMERALDRAPIDS_X, core_funcs), +++>>>>>>> 7e1c3f584ee7 (cpufreq: intel_pstate: Support Emerald Rapids OOB mode) + {} + }; + -#endif + + static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = { + - X86_MATCH(INTEL_KABYLAKE, core_funcs), + + X86_MATCH(KABYLAKE, core_funcs), + {} + }; + +* Unmerged path drivers/cpufreq/intel_pstate.c diff --git a/ciq/ciq_backports/kernel-4.18.0-553.36.1.el8_10/rebuild.details.txt b/ciq/ciq_backports/kernel-4.18.0-553.36.1.el8_10/rebuild.details.txt new file mode 100644 index 0000000000000..6145ae56d560d --- /dev/null +++ b/ciq/ciq_backports/kernel-4.18.0-553.36.1.el8_10/rebuild.details.txt @@ -0,0 +1,23 @@ +Rebuild_History BUILDABLE +Rebuilding Kernel from rpm changelog with Fuzz Limit: 87.50% +Number of commits in upstream range v4.18~1..master: 522243 +Number of commits in rpm: 15 +Number of commits matched with upstream: 9 (60.00%) +Number of commits in upstream but not in rpm: 522234 +Number of commits NOT found in upstream: 6 (40.00%) + +Rebuilding Kernel on Branch rocky8_10_rebuild_kernel-4.18.0-553.36.1.el8_10 for kernel-4.18.0-553.36.1.el8_10 +Clean Cherry Picks: 7 (77.78%) +Empty Cherry Picks: 1 (11.11%) +_______________________________ + +__EMPTY COMMITS__________________________ +7e1c3f584ee78b0d0210fc424420d9529f3ca952 cpufreq: intel_pstate: Support Emerald Rapids OOB mode + +__CHANGES NOT IN UPSTREAM________________ +Adding prod certs and changed cert date to 20210620 +Adding Rocky secure boot certs +Fixing vmlinuz removal +Fixing UEFI CA path +Porting to 8.10, debranding and Rocky branding +Fixing pesign_key_name values diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 405d00dd87460..3bd0ecc0f409e 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -2420,6 +2420,7 @@ static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = { X86_MATCH(SKYLAKE_X, core_funcs), X86_MATCH(ICELAKE_X, core_funcs), X86_MATCH(SAPPHIRERAPIDS_X, core_funcs), + X86_MATCH(EMERALDRAPIDS_X, core_funcs), {} }; diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 63df85ff7d112..cd6c7864dc432 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -791,6 +791,7 @@ enum i40e_filter_state { I40E_FILTER_ACTIVE, /* Added to switch by FW */ I40E_FILTER_FAILED, /* Rejected by FW */ I40E_FILTER_REMOVE, /* To be removed */ + I40E_FILTER_NEW_SYNC, /* New, not sent yet, is in i40e_sync_vsi_filters() */ /* There is no 'removed' state; the filter struct is freed */ }; struct i40e_mac_filter { diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 9954493cd4489..55043a414fd36 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -105,6 +105,7 @@ static char *i40e_filter_state_string[] = { "ACTIVE", "FAILED", "REMOVE", + "NEW_SYNC", }; /** diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 8176bbcfe0702..12fdcf3858fb0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -1244,8 +1244,12 @@ int i40e_count_filters(struct i40e_vsi *vsi) int bkt; int cnt = 0; - hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) - ++cnt; + hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { + if (f->state == I40E_FILTER_NEW || + f->state == I40E_FILTER_NEW_SYNC || + f->state == I40E_FILTER_ACTIVE) + ++cnt; + } return cnt; } @@ -1429,6 +1433,8 @@ static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi, new->f = add_head; new->state = add_head->state; + if (add_head->state == I40E_FILTER_NEW) + add_head->state = I40E_FILTER_NEW_SYNC; /* Add the new filter to the tmp list */ hlist_add_head(&new->hlist, tmp_add_list); @@ -1538,6 +1544,8 @@ static int i40e_correct_vf_mac_vlan_filters(struct i40e_vsi *vsi, return -ENOMEM; new_mac->f = add_head; new_mac->state = add_head->state; + if (add_head->state == I40E_FILTER_NEW) + add_head->state = I40E_FILTER_NEW_SYNC; /* Add the new filter to the tmp list */ hlist_add_head(&new_mac->hlist, tmp_add_list); @@ -2428,7 +2436,8 @@ static int i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name, struct i40e_mac_filter *f) { - bool enable = f->state == I40E_FILTER_NEW; + bool enable = f->state == I40E_FILTER_NEW || + f->state == I40E_FILTER_NEW_SYNC; struct i40e_hw *hw = &vsi->back->hw; int aq_ret; @@ -2602,6 +2611,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) /* Add it to the hash list */ hlist_add_head(&new->hlist, &tmp_add_list); + f->state = I40E_FILTER_NEW_SYNC; } /* Count the number of active (current and new) VLAN @@ -2753,7 +2763,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) spin_lock_bh(&vsi->mac_filter_hash_lock); hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) { /* Only update the state if we're still NEW */ - if (new->f->state == I40E_FILTER_NEW) + if (new->f->state == I40E_FILTER_NEW || + new->f->state == I40E_FILTER_NEW_SYNC) new->f->state = new->state; hlist_del(&new->hlist); netdev_hw_addr_refcnt(new->f, vsi->netdev, -1); diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index e57668bbcf527..32db554d65e0f 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -418,6 +418,8 @@ static int st_chk_result(struct scsi_tape *STp, struct st_request * SRpnt) if (cmdstatp->have_sense && cmdstatp->sense_hdr.asc == 0 && cmdstatp->sense_hdr.ascq == 0x17) STp->cleaning_req = 1; /* ASC and ASCQ => cleaning requested */ + if (cmdstatp->have_sense && scode == UNIT_ATTENTION && cmdstatp->sense_hdr.asc == 0x29) + STp->pos_unknown = 1; /* ASC => power on / reset */ STp->pos_unknown |= STp->device->was_reset; @@ -990,7 +992,10 @@ static int test_ready(struct scsi_tape *STp, int do_wait) scode = cmdstatp->sense_hdr.sense_key; if (scode == UNIT_ATTENTION) { /* New media? */ - new_session = 1; + if (cmdstatp->sense_hdr.asc == 0x28) { /* New media */ + new_session = 1; + DEBC_printk(STp, "New tape session."); + } if (attentions < MAX_ATTENTIONS) { attentions++; continue; @@ -3504,6 +3509,7 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg) int i, cmd_nr, cmd_type, bt; int retval = 0; unsigned int blk; + bool cmd_mtiocget; struct scsi_tape *STp = file->private_data; struct st_modedef *STm; struct st_partstat *STps; @@ -3618,6 +3624,7 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg) */ if (mtc.mt_op != MTREW && mtc.mt_op != MTOFFL && + mtc.mt_op != MTLOAD && mtc.mt_op != MTRETEN && mtc.mt_op != MTERASE && mtc.mt_op != MTSEEK && @@ -3731,17 +3738,28 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg) goto out; } + cmd_mtiocget = cmd_type == _IOC_TYPE(MTIOCGET) && cmd_nr == _IOC_NR(MTIOCGET); + if ((i = flush_buffer(STp, 0)) < 0) { - retval = i; - goto out; - } - if (STp->can_partitions && - (i = switch_partition(STp)) < 0) { - retval = i; - goto out; + if (cmd_mtiocget && STp->pos_unknown) { + /* flush fails -> modify status accordingly */ + reset_state(STp); + STp->pos_unknown = 1; + } else { /* return error */ + retval = i; + goto out; + } + } else { /* flush_buffer succeeds */ + if (STp->can_partitions) { + i = switch_partition(STp); + if (i < 0) { + retval = i; + goto out; + } + } } - if (cmd_type == _IOC_TYPE(MTIOCGET) && cmd_nr == _IOC_NR(MTIOCGET)) { + if (cmd_mtiocget) { struct mtget mt_status; if (_IOC_SIZE(cmd_in) != sizeof(struct mtget)) { @@ -3755,7 +3773,7 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg) ((STp->density << MT_ST_DENSITY_SHIFT) & MT_ST_DENSITY_MASK); mt_status.mt_blkno = STps->drv_block; mt_status.mt_fileno = STps->drv_file; - if (STp->block_size != 0) { + if (STp->block_size != 0 && mt_status.mt_blkno >= 0) { if (STps->rw == ST_WRITING) mt_status.mt_blkno += (STp->buffer)->buffer_bytes / STp->block_size; diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index d8c49f0fc3723..721b563fdc655 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -149,6 +149,8 @@ struct hv_fc_wwn_packet { */ static int vmstor_proto_version; +static bool hv_dev_is_fc(struct hv_device *hv_dev); + #define STORVSC_LOGGING_NONE 0 #define STORVSC_LOGGING_ERROR 1 #define STORVSC_LOGGING_WARN 2 @@ -316,6 +318,9 @@ enum storvsc_request_type { #define SRB_STATUS_ABORTED 0x02 #define SRB_STATUS_ERROR 0x04 #define SRB_STATUS_INVALID_REQUEST 0x06 +#define SRB_STATUS_TIMEOUT 0x09 +#define SRB_STATUS_SELECTION_TIMEOUT 0x0A +#define SRB_STATUS_BUS_RESET 0x0E #define SRB_STATUS_DATA_OVERRUN 0x12 #define SRB_STATUS_INVALID_LUN 0x20 #define SRB_STATUS_INTERNAL_ERROR 0x30 @@ -981,6 +986,10 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb, case SRB_STATUS_ABORTED: case SRB_STATUS_INVALID_REQUEST: case SRB_STATUS_INTERNAL_ERROR: + case SRB_STATUS_TIMEOUT: + case SRB_STATUS_SELECTION_TIMEOUT: + case SRB_STATUS_BUS_RESET: + case SRB_STATUS_DATA_OVERRUN: if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID) { /* Check for capacity change */ if ((asc == 0x2a) && (ascq == 0x9)) { @@ -1130,6 +1139,7 @@ static void storvsc_on_io_completion(struct storvsc_device *stor_device, * not correctly handle: * INQUIRY command with page code parameter set to 0x80 * MODE_SENSE command with cmd[2] == 0x1c + * MAINTENANCE_IN is not supported by HyperV FC passthrough * * Setup srb and scsi status so this won't be fatal. * We do this so we can distinguish truly fatal failues @@ -1137,7 +1147,9 @@ static void storvsc_on_io_completion(struct storvsc_device *stor_device, */ if ((stor_pkt->vm_srb.cdb[0] == INQUIRY) || - (stor_pkt->vm_srb.cdb[0] == MODE_SENSE)) { + (stor_pkt->vm_srb.cdb[0] == MODE_SENSE) || + (stor_pkt->vm_srb.cdb[0] == MAINTENANCE_IN && + hv_dev_is_fc(device))) { vstor_packet->vm_srb.scsi_status = 0; vstor_packet->vm_srb.srb_status = SRB_STATUS_SUCCESS; } diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index 9933ec3c213f9..0733727c255c9 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c @@ -5085,6 +5085,13 @@ static void recover_convert_waiter(struct dlm_ls *ls, struct dlm_lkb *lkb, struct dlm_message *ms_stub) { if (middle_conversion(lkb)) { + log_rinfo(ls, "%s %x middle convert in progress", __func__, + lkb->lkb_id); + + /* We sent this lock to the new master. The new master will + * tell us when it's granted. We no longer need a reply, so + * use a fake reply to put the lkb into the right state. + */ hold_lkb(lkb); memset(ms_stub, 0, sizeof(struct dlm_message)); ms_stub->m_flags = DLM_IFL_STUB_MS; @@ -5092,10 +5099,6 @@ static void recover_convert_waiter(struct dlm_ls *ls, struct dlm_lkb *lkb, ms_stub->m_result = -EINPROGRESS; ms_stub->m_header.h_nodeid = lkb->lkb_nodeid; _receive_convert_reply(lkb, ms_stub); - - /* Same special case as in receive_rcom_lock_args() */ - lkb->lkb_grmode = DLM_LOCK_IV; - rsb_set_flag(lkb->lkb_resource, RSB_RECOVER_CONVERT); unhold_lkb(lkb); } else if (lkb->lkb_rqmode >= lkb->lkb_grmode) { @@ -5619,10 +5622,11 @@ static int receive_rcom_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb, The real granted mode of these converting locks cannot be determined until all locks have been rebuilt on the rsb (recover_conversion) */ - if (rl->rl_wait_type == cpu_to_le16(DLM_MSG_CONVERT) && - middle_conversion(lkb)) { - rl->rl_status = DLM_LKSTS_CONVERT; - lkb->lkb_grmode = DLM_LOCK_IV; + if (rl->rl_status == DLM_LKSTS_CONVERT && middle_conversion(lkb)) { + /* We may need to adjust grmode depending on other granted locks. */ + log_limit(ls, "%s %x middle convert gr %d rq %d remote %d %x", + __func__, lkb->lkb_id, lkb->lkb_grmode, + lkb->lkb_rqmode, lkb->lkb_nodeid, lkb->lkb_remid); rsb_set_flag(r, RSB_RECOVER_CONVERT); } diff --git a/fs/dlm/recover.c b/fs/dlm/recover.c index 98b710cc9cf30..400cb8e9487f5 100644 --- a/fs/dlm/recover.c +++ b/fs/dlm/recover.c @@ -815,33 +815,42 @@ static void recover_lvb(struct dlm_rsb *r) } /* All master rsb's flagged RECOVER_CONVERT need to be looked at. The locks - converting PR->CW or CW->PR need to have their lkb_grmode set. */ + * converting PR->CW or CW->PR may need to have their lkb_grmode changed. + */ static void recover_conversion(struct dlm_rsb *r) { struct dlm_ls *ls = r->res_ls; + uint32_t other_lkid = 0; + int other_grmode = -1; struct dlm_lkb *lkb; - int grmode = -1; list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) { if (lkb->lkb_grmode == DLM_LOCK_PR || lkb->lkb_grmode == DLM_LOCK_CW) { - grmode = lkb->lkb_grmode; + other_grmode = lkb->lkb_grmode; + other_lkid = lkb->lkb_id; break; } } + if (other_grmode == -1) + return; + list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) { - if (lkb->lkb_grmode != DLM_LOCK_IV) - continue; - if (grmode == -1) { - log_debug(ls, "recover_conversion %x set gr to rq %d", - lkb->lkb_id, lkb->lkb_rqmode); - lkb->lkb_grmode = lkb->lkb_rqmode; - } else { - log_debug(ls, "recover_conversion %x set gr %d", - lkb->lkb_id, grmode); - lkb->lkb_grmode = grmode; + /* Lock recovery created incompatible granted modes, so + * change the granted mode of the converting lock to + * NL. The rqmode of the converting lock should be CW, + * which means the converting lock should be granted at + * the end of recovery. + */ + if (((lkb->lkb_grmode == DLM_LOCK_PR) && (other_grmode == DLM_LOCK_CW)) || + ((lkb->lkb_grmode == DLM_LOCK_CW) && (other_grmode == DLM_LOCK_PR))) { + log_limit(ls, "%s %x gr %d rq %d, remote %d %x, other_lkid %u, other gr %d, set gr=NL", + __func__, lkb->lkb_id, lkb->lkb_grmode, + lkb->lkb_rqmode, lkb->lkb_nodeid, + lkb->lkb_remid, other_lkid, other_grmode); + lkb->lkb_grmode = DLM_LOCK_NL; } } } diff --git a/fs/nfs/write.c b/fs/nfs/write.c index ea249bf2c08af..320ba093a736d 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -1425,8 +1425,6 @@ static void nfs_async_write_error(struct list_head *head, int error) static void nfs_async_write_reschedule_io(struct nfs_pgio_header *hdr) { nfs_async_write_error(&hdr->pages, 0); - filemap_fdatawrite_range(hdr->inode->i_mapping, hdr->args.offset, - hdr->args.offset + hdr->args.count - 1); } static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops = { diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c index 75930a32c8e80..294301290948c 100644 --- a/fs/xfs/libxfs/xfs_ialloc.c +++ b/fs/xfs/libxfs/xfs_ialloc.c @@ -778,7 +778,8 @@ xfs_ialloc_ag_alloc( * the end of the AG. */ args.min_agbno = args.mp->m_sb.sb_inoalignmt; - args.max_agbno = round_down(args.mp->m_sb.sb_agblocks, + args.max_agbno = round_down(xfs_ag_block_count(args.mp, + pag->pag_agno), args.mp->m_sb.sb_inoalignmt) - igeo->ialloc_blks; @@ -2172,9 +2173,9 @@ xfs_difree( return -EINVAL; } agbno = XFS_AGINO_TO_AGBNO(mp, agino); - if (agbno >= mp->m_sb.sb_agblocks) { - xfs_warn(mp, "%s: agbno >= mp->m_sb.sb_agblocks (%d >= %d).", - __func__, agbno, mp->m_sb.sb_agblocks); + if (agbno >= xfs_ag_block_count(mp, pag->pag_agno)) { + xfs_warn(mp, "%s: agbno >= xfs_ag_block_count (%d >= %d).", + __func__, agbno, xfs_ag_block_count(mp, pag->pag_agno)); ASSERT(0); return -EINVAL; } @@ -2298,7 +2299,7 @@ xfs_imap( pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino)); agino = XFS_INO_TO_AGINO(mp, ino); agbno = XFS_AGINO_TO_AGBNO(mp, agino); - if (!pag || agbno >= mp->m_sb.sb_agblocks || + if (!pag || agbno >= xfs_ag_block_count(mp, pag->pag_agno) || ino != XFS_AGINO_TO_INO(mp, pag->pag_agno, agino)) { error = -EINVAL; #ifdef DEBUG @@ -2314,11 +2315,12 @@ xfs_imap( __func__, XFS_INO_TO_AGNO(mp, ino), mp->m_sb.sb_agcount); } - if (agbno >= mp->m_sb.sb_agblocks) { + if (agbno >= xfs_ag_block_count(mp, pag->pag_agno)) { xfs_alert(mp, "%s: agbno (0x%llx) >= mp->m_sb.sb_agblocks (0x%lx)", __func__, (unsigned long long)agbno, - (unsigned long)mp->m_sb.sb_agblocks); + (unsigned long)xfs_ag_block_count(mp, + pag->pag_agno)); } if (pag && ino != XFS_AGINO_TO_INO(mp, pag->pag_agno, agino)) { xfs_alert(mp, diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 4b1fc02736dde..e5996261da447 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -1922,7 +1922,8 @@ static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied) slow = lock_sock_fast(ssk); WRITE_ONCE(ssk->sk_rcvbuf, rcvbuf); tcp_sk(ssk)->window_clamp = window_clamp; - tcp_cleanup_rbuf(ssk, 1); + if (tcp_can_send_ack(ssk)) + tcp_cleanup_rbuf(ssk, 1); unlock_sock_fast(ssk, slow); } } diff --git a/samples/pktgen/README.rst b/samples/pktgen/README.rst index ff8929da61c5a..fd39215db508f 100644 --- a/samples/pktgen/README.rst +++ b/samples/pktgen/README.rst @@ -20,6 +20,7 @@ across the sample scripts. Usage example is printed on errors:: -s : ($PKT_SIZE) packet size -d : ($DEST_IP) destination IP -m : ($DST_MAC) destination MAC-addr + -p : ($DST_PORT) destination PORT range (e.g. 433-444) is also allowed -t : ($THREADS) threads to start -f : ($F_THREAD) index of first thread (zero indexed CPU number) -c : ($SKB_CLONE) SKB clones send before alloc new SKB diff --git a/samples/pktgen/functions.sh b/samples/pktgen/functions.sh index f8bb3cd0f4ce6..4af4046d71be4 100644 --- a/samples/pktgen/functions.sh +++ b/samples/pktgen/functions.sh @@ -162,3 +162,37 @@ function get_node_cpus() echo $node_cpu_list } + +# Given a single or range of port(s), return minimum and maximum port number. +function parse_ports() +{ + local port_str=$1 + local port_list + local min_port + local max_port + + IFS="-" read -ra port_list <<< $port_str + + min_port=${port_list[0]} + max_port=${port_list[1]:-$min_port} + + echo $min_port $max_port +} + +# Given a minimum and maximum port, verify port number. +function validate_ports() +{ + local min_port=$1 + local max_port=$2 + + # 0 < port < 65536 + if [[ $min_port -gt 0 && $min_port -lt 65536 ]]; then + if [[ $max_port -gt 0 && $max_port -lt 65536 ]]; then + if [[ $min_port -le $max_port ]]; then + return 0 + fi + fi + fi + + err 5 "Invalid port(s): $min_port-$max_port" +} diff --git a/samples/pktgen/parameters.sh b/samples/pktgen/parameters.sh index 72fc562876e23..a06b00a0c7b6f 100644 --- a/samples/pktgen/parameters.sh +++ b/samples/pktgen/parameters.sh @@ -10,6 +10,7 @@ function usage() { echo " -s : (\$PKT_SIZE) packet size" echo " -d : (\$DEST_IP) destination IP" echo " -m : (\$DST_MAC) destination MAC-addr" + echo " -p : (\$DST_PORT) destination PORT range (e.g. 433-444) is also allowed" echo " -t : (\$THREADS) threads to start" echo " -f : (\$F_THREAD) index of first thread (zero indexed CPU number)" echo " -c : (\$SKB_CLONE) SKB clones send before alloc new SKB" @@ -23,7 +24,7 @@ function usage() { ## --- Parse command line arguments / parameters --- ## echo "Commandline options:" -while getopts "s:i:d:m:f:t:c:n:b:vxh6" option; do +while getopts "s:i:d:m:p:f:t:c:n:b:vxh6" option; do case $option in i) # interface export DEV=$OPTARG @@ -41,6 +42,10 @@ while getopts "s:i:d:m:f:t:c:n:b:vxh6" option; do export DST_MAC=$OPTARG info "Destination MAC set to: DST_MAC=$DST_MAC" ;; + p) # PORT + export DST_PORT=$OPTARG + info "Destination PORT set to: DST_PORT=$DST_PORT" + ;; f) export F_THREAD=$OPTARG info "Index of first thread (zero indexed CPU number): $F_THREAD" diff --git a/samples/pktgen/pktgen_bench_xmit_mode_netif_receive.sh b/samples/pktgen/pktgen_bench_xmit_mode_netif_receive.sh index 2839f7d315cf6..e14b1a9144d92 100755 --- a/samples/pktgen/pktgen_bench_xmit_mode_netif_receive.sh +++ b/samples/pktgen/pktgen_bench_xmit_mode_netif_receive.sh @@ -41,6 +41,10 @@ fi [ -z "$DST_MAC" ] && DST_MAC="90:e2:ba:ff:ff:ff" [ -z "$BURST" ] && BURST=1024 [ -z "$COUNT" ] && COUNT="10000000" # Zero means indefinitely +if [ -n "$DST_PORT" ]; then + read -r DST_MIN DST_MAX <<< $(parse_ports $DST_PORT) + validate_ports $DST_MIN $DST_MAX +fi # Base Config DELAY="0" # Zero means max speed @@ -69,6 +73,13 @@ for ((thread = $F_THREAD; thread <= $L_THREAD; thread++)); do pg_set $dev "dst_mac $DST_MAC" pg_set $dev "dst$IP6 $DEST_IP" + if [ -n "$DST_PORT" ]; then + # Single destination port or random port range + pg_set $dev "flag UDPDST_RND" + pg_set $dev "udp_dst_min $DST_MIN" + pg_set $dev "udp_dst_max $DST_MAX" + fi + # Inject packet into RX path of stack pg_set $dev "xmit_mode netif_receive" diff --git a/samples/pktgen/pktgen_bench_xmit_mode_queue_xmit.sh b/samples/pktgen/pktgen_bench_xmit_mode_queue_xmit.sh index e1ee54465def8..82c3e504e056c 100755 --- a/samples/pktgen/pktgen_bench_xmit_mode_queue_xmit.sh +++ b/samples/pktgen/pktgen_bench_xmit_mode_queue_xmit.sh @@ -24,6 +24,10 @@ if [[ -n "$BURST" ]]; then err 1 "Bursting not supported for this mode" fi [ -z "$COUNT" ] && COUNT="10000000" # Zero means indefinitely +if [ -n "$DST_PORT" ]; then + read -r DST_MIN DST_MAX <<< $(parse_ports $DST_PORT) + validate_ports $DST_MIN $DST_MAX +fi # Base Config DELAY="0" # Zero means max speed @@ -52,6 +56,13 @@ for ((thread = $F_THREAD; thread <= $L_THREAD; thread++)); do pg_set $dev "dst_mac $DST_MAC" pg_set $dev "dst$IP6 $DEST_IP" + if [ -n "$DST_PORT" ]; then + # Single destination port or random port range + pg_set $dev "flag UDPDST_RND" + pg_set $dev "udp_dst_min $DST_MIN" + pg_set $dev "udp_dst_max $DST_MAX" + fi + # Inject packet into TX qdisc egress path of stack pg_set $dev "xmit_mode queue_xmit" done diff --git a/samples/pktgen/pktgen_sample01_simple.sh b/samples/pktgen/pktgen_sample01_simple.sh index e9ab4edba2d72..d1702fdde8f33 100755 --- a/samples/pktgen/pktgen_sample01_simple.sh +++ b/samples/pktgen/pktgen_sample01_simple.sh @@ -22,6 +22,10 @@ fi # Example enforce param "-m" for dst_mac [ -z "$DST_MAC" ] && usage && err 2 "Must specify -m dst_mac" [ -z "$COUNT" ] && COUNT="100000" # Zero means indefinitely +if [ -n "$DST_PORT" ]; then + read -r DST_MIN DST_MAX <<< $(parse_ports $DST_PORT) + validate_ports $DST_MIN $DST_MAX +fi # Base Config DELAY="0" # Zero means max speed @@ -59,6 +63,13 @@ pg_set $DEV "flag NO_TIMESTAMP" pg_set $DEV "dst_mac $DST_MAC" pg_set $DEV "dst$IP6 $DEST_IP" +if [ -n "$DST_PORT" ]; then + # Single destination port or random port range + pg_set $DEV "flag UDPDST_RND" + pg_set $DEV "udp_dst_min $DST_MIN" + pg_set $DEV "udp_dst_max $DST_MAX" +fi + # Setup random UDP port src range pg_set $DEV "flag UDPSRC_RND" pg_set $DEV "udp_src_min $UDP_MIN" diff --git a/samples/pktgen/pktgen_sample02_multiqueue.sh b/samples/pktgen/pktgen_sample02_multiqueue.sh index 99f740ae98571..7f7a9a27548f7 100755 --- a/samples/pktgen/pktgen_sample02_multiqueue.sh +++ b/samples/pktgen/pktgen_sample02_multiqueue.sh @@ -29,6 +29,10 @@ if [ -z "$DEST_IP" ]; then [ -z "$IP6" ] && DEST_IP="198.18.0.42" || DEST_IP="FD00::1" fi [ -z "$DST_MAC" ] && DST_MAC="90:e2:ba:ff:ff:ff" +if [ -n "$DST_PORT" ]; then + read -r DST_MIN DST_MAX <<< $(parse_ports $DST_PORT) + validate_ports $DST_MIN $DST_MAX +fi # General cleanup everything since last run pg_ctrl "reset" @@ -60,6 +64,13 @@ for ((thread = $F_THREAD; thread <= $L_THREAD; thread++)); do pg_set $dev "dst_mac $DST_MAC" pg_set $dev "dst$IP6 $DEST_IP" + if [ -n "$DST_PORT" ]; then + # Single destination port or random port range + pg_set $dev "flag UDPDST_RND" + pg_set $dev "udp_dst_min $DST_MIN" + pg_set $dev "udp_dst_max $DST_MAX" + fi + # Setup random UDP port src range pg_set $dev "flag UDPSRC_RND" pg_set $dev "udp_src_min $UDP_MIN" diff --git a/samples/pktgen/pktgen_sample03_burst_single_flow.sh b/samples/pktgen/pktgen_sample03_burst_single_flow.sh index 8fdd36722d9ed..b520637817ced 100755 --- a/samples/pktgen/pktgen_sample03_burst_single_flow.sh +++ b/samples/pktgen/pktgen_sample03_burst_single_flow.sh @@ -33,6 +33,10 @@ fi [ -z "$BURST" ] && BURST=32 [ -z "$CLONE_SKB" ] && CLONE_SKB="0" # No need for clones when bursting [ -z "$COUNT" ] && COUNT="0" # Zero means indefinitely +if [ -n "$DST_PORT" ]; then + read -r DST_MIN DST_MAX <<< $(parse_ports $DST_PORT) + validate_ports $DST_MIN $DST_MAX +fi # Base Config DELAY="0" # Zero means max speed @@ -60,6 +64,13 @@ for ((thread = $F_THREAD; thread <= $L_THREAD; thread++)); do pg_set $dev "dst_mac $DST_MAC" pg_set $dev "dst$IP6 $DEST_IP" + if [ -n "$DST_PORT" ]; then + # Single destination port or random port range + pg_set $dev "flag UDPDST_RND" + pg_set $dev "udp_dst_min $DST_MIN" + pg_set $dev "udp_dst_max $DST_MAX" + fi + # Setup burst, for easy testing -b 0 disable bursting # (internally in pktgen default and minimum burst=1) if [[ ${BURST} -ne 0 ]]; then diff --git a/samples/pktgen/pktgen_sample04_many_flows.sh b/samples/pktgen/pktgen_sample04_many_flows.sh index 4df92b7176da8..5b6e9d9cb5b5a 100755 --- a/samples/pktgen/pktgen_sample04_many_flows.sh +++ b/samples/pktgen/pktgen_sample04_many_flows.sh @@ -17,6 +17,10 @@ source ${basedir}/parameters.sh [ -z "$DST_MAC" ] && DST_MAC="90:e2:ba:ff:ff:ff" [ -z "$CLONE_SKB" ] && CLONE_SKB="0" [ -z "$COUNT" ] && COUNT="0" # Zero means indefinitely +if [ -n "$DST_PORT" ]; then + read -r DST_MIN DST_MAX <<< $(parse_ports $DST_PORT) + validate_ports $DST_MIN $DST_MAX +fi # NOTICE: Script specific settings # ======= @@ -56,6 +60,13 @@ for ((thread = $F_THREAD; thread <= $L_THREAD; thread++)); do pg_set $dev "dst_mac $DST_MAC" pg_set $dev "dst $DEST_IP" + if [ -n "$DST_PORT" ]; then + # Single destination port or random port range + pg_set $dev "flag UDPDST_RND" + pg_set $dev "udp_dst_min $DST_MIN" + pg_set $dev "udp_dst_max $DST_MAX" + fi + # Randomize source IP-addresses pg_set $dev "flag IPSRC_RND" pg_set $dev "src_min 198.18.0.0" diff --git a/samples/pktgen/pktgen_sample05_flow_per_thread.sh b/samples/pktgen/pktgen_sample05_flow_per_thread.sh index 7f8b5e59f01ed..0c06e63fbe972 100755 --- a/samples/pktgen/pktgen_sample05_flow_per_thread.sh +++ b/samples/pktgen/pktgen_sample05_flow_per_thread.sh @@ -22,7 +22,10 @@ source ${basedir}/parameters.sh [ -z "$CLONE_SKB" ] && CLONE_SKB="0" [ -z "$BURST" ] && BURST=32 [ -z "$COUNT" ] && COUNT="0" # Zero means indefinitely - +if [ -n "$DST_PORT" ]; then + read -r DST_MIN DST_MAX <<< $(parse_ports $DST_PORT) + validate_ports $DST_MIN $DST_MAX +fi # Base Config DELAY="0" # Zero means max speed @@ -50,6 +53,13 @@ for ((thread = $F_THREAD; thread <= $L_THREAD; thread++)); do pg_set $dev "dst_mac $DST_MAC" pg_set $dev "dst $DEST_IP" + if [ -n "$DST_PORT" ]; then + # Single destination port or random port range + pg_set $dev "flag UDPDST_RND" + pg_set $dev "udp_dst_min $DST_MIN" + pg_set $dev "udp_dst_max $DST_MAX" + fi + # Setup source IP-addresses based on thread number pg_set $dev "src_min 198.18.$((thread+1)).1" pg_set $dev "src_max 198.18.$((thread+1)).1" diff --git a/samples/pktgen/pktgen_sample06_numa_awared_queue_irq_affinity.sh b/samples/pktgen/pktgen_sample06_numa_awared_queue_irq_affinity.sh index 353adc17205eb..97f0266c03568 100755 --- a/samples/pktgen/pktgen_sample06_numa_awared_queue_irq_affinity.sh +++ b/samples/pktgen/pktgen_sample06_numa_awared_queue_irq_affinity.sh @@ -35,6 +35,10 @@ if [ -z "$DEST_IP" ]; then [ -z "$IP6" ] && DEST_IP="198.18.0.42" || DEST_IP="FD00::1" fi [ -z "$DST_MAC" ] && DST_MAC="90:e2:ba:ff:ff:ff" +if [ -n "$DST_PORT" ]; then + read -r DST_MIN DST_MAX <<< $(parse_ports $DST_PORT) + validate_ports $DST_MIN $DST_MAX +fi # General cleanup everything since last run pg_ctrl "reset" @@ -77,6 +81,13 @@ for ((i = 0; i < $THREADS; i++)); do pg_set $dev "dst_mac $DST_MAC" pg_set $dev "dst$IP6 $DEST_IP" + if [ -n "$DST_PORT" ]; then + # Single destination port or random port range + pg_set $dev "flag UDPDST_RND" + pg_set $dev "udp_dst_min $DST_MIN" + pg_set $dev "udp_dst_max $DST_MAX" + fi + # Setup random UDP port src range pg_set $dev "flag UDPSRC_RND" pg_set $dev "udp_src_min $UDP_MIN"