|
1 | 1 | /* |
2 | | - * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. |
| 2 | + * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. |
3 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | 4 | * |
5 | 5 | * This code is free software; you can redistribute it and/or modify it |
@@ -9725,6 +9725,68 @@ void Assembler::evpmaxsq(XMMRegister dst, KRegister mask, XMMRegister nds, Addre |
9725 | 9725 | emit_operand(dst, src); |
9726 | 9726 | } |
9727 | 9727 |
|
| 9728 | +void Assembler::evpternlogd(XMMRegister dst, int imm8, KRegister mask, XMMRegister src2, XMMRegister src3, bool merge, int vector_len) { |
| 9729 | + assert(VM_Version::supports_evex(), "requires EVEX support"); |
| 9730 | + assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support"); |
| 9731 | + InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); |
| 9732 | + attributes.set_is_evex_instruction(); |
| 9733 | + attributes.set_embedded_opmask_register_specifier(mask); |
| 9734 | + if (merge) { |
| 9735 | + attributes.reset_is_clear_context(); |
| 9736 | + } |
| 9737 | + int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src3->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); |
| 9738 | + emit_int24(0x25, (unsigned char)(0xC0 | encode), imm8); |
| 9739 | +} |
| 9740 | + |
| 9741 | +void Assembler::evpternlogd(XMMRegister dst, int imm8, KRegister mask, XMMRegister src2, Address src3, bool merge, int vector_len) { |
| 9742 | + assert(VM_Version::supports_evex(), "requires EVEX support"); |
| 9743 | + assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support"); |
| 9744 | + assert(dst != xnoreg, "sanity"); |
| 9745 | + InstructionMark im(this); |
| 9746 | + InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); |
| 9747 | + attributes.set_is_evex_instruction(); |
| 9748 | + attributes.set_embedded_opmask_register_specifier(mask); |
| 9749 | + attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); |
| 9750 | + if (merge) { |
| 9751 | + attributes.reset_is_clear_context(); |
| 9752 | + } |
| 9753 | + vex_prefix(src3, src2->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); |
| 9754 | + emit_int8(0x25); |
| 9755 | + emit_operand(dst, src3); |
| 9756 | + emit_int8(imm8); |
| 9757 | +} |
| 9758 | + |
| 9759 | +void Assembler::evpternlogq(XMMRegister dst, int imm8, KRegister mask, XMMRegister src2, XMMRegister src3, bool merge, int vector_len) { |
| 9760 | + assert(VM_Version::supports_evex(), "requires EVEX support"); |
| 9761 | + assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support"); |
| 9762 | + InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); |
| 9763 | + attributes.set_is_evex_instruction(); |
| 9764 | + attributes.set_embedded_opmask_register_specifier(mask); |
| 9765 | + if (merge) { |
| 9766 | + attributes.reset_is_clear_context(); |
| 9767 | + } |
| 9768 | + int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src3->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); |
| 9769 | + emit_int24(0x25, (unsigned char)(0xC0 | encode), imm8); |
| 9770 | +} |
| 9771 | + |
| 9772 | +void Assembler::evpternlogq(XMMRegister dst, int imm8, KRegister mask, XMMRegister src2, Address src3, bool merge, int vector_len) { |
| 9773 | + assert(VM_Version::supports_evex(), "requires EVEX support"); |
| 9774 | + assert(vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl(), "requires VL support"); |
| 9775 | + assert(dst != xnoreg, "sanity"); |
| 9776 | + InstructionMark im(this); |
| 9777 | + InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true); |
| 9778 | + attributes.set_is_evex_instruction(); |
| 9779 | + attributes.set_embedded_opmask_register_specifier(mask); |
| 9780 | + attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit); |
| 9781 | + if (merge) { |
| 9782 | + attributes.reset_is_clear_context(); |
| 9783 | + } |
| 9784 | + vex_prefix(src3, src2->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes); |
| 9785 | + emit_int8(0x25); |
| 9786 | + emit_operand(dst, src3); |
| 9787 | + emit_int8(imm8); |
| 9788 | +} |
| 9789 | + |
9728 | 9790 | // duplicate 4-byte integer data from src into programmed locations in dest : requires AVX512VL |
9729 | 9791 | void Assembler::vpbroadcastd(XMMRegister dst, XMMRegister src, int vector_len) { |
9730 | 9792 | assert(UseAVX >= 2, ""); |
|
0 commit comments