From e42ce422a97876e5ecd1c845b12ec65f96f8a293 Mon Sep 17 00:00:00 2001 From: Reid Kleckner Date: Mon, 23 Aug 2021 15:56:29 -0700 Subject: [PATCH 1/9] [dllexport] Instantiate default ctor default args Fixes https://bugs.llvm.org/show_bug.cgi?id=51414. Differential Revision: https://reviews.llvm.org/D108021 --- clang/lib/Sema/SemaDeclCXX.cpp | 9 +++++++++ .../dllexport-ctor-closure-nested.cpp | 20 +++++++++++++++++++ 2 files changed, 29 insertions(+) create mode 100644 clang/test/CodeGenCXX/dllexport-ctor-closure-nested.cpp diff --git a/clang/lib/Sema/SemaDeclCXX.cpp b/clang/lib/Sema/SemaDeclCXX.cpp index 4827f6b3bb345..d980869a3b067 100644 --- a/clang/lib/Sema/SemaDeclCXX.cpp +++ b/clang/lib/Sema/SemaDeclCXX.cpp @@ -6005,6 +6005,15 @@ static void ReferenceDllExportedMembers(Sema &S, CXXRecordDecl *Class) { if (TSK == TSK_ImplicitInstantiation && !ClassAttr->isInherited()) continue; + // If this is an MS ABI dllexport default constructor, instantiate any + // default arguments. + if (S.Context.getTargetInfo().getCXXABI().isMicrosoft()) { + auto *CD = dyn_cast(MD); + if (CD && CD->isDefaultConstructor() && TSK == TSK_Undeclared) { + S.InstantiateDefaultCtorDefaultArgs(CD); + } + } + S.MarkFunctionReferenced(Class->getLocation(), MD); // The function will be passed to the consumer when its definition is diff --git a/clang/test/CodeGenCXX/dllexport-ctor-closure-nested.cpp b/clang/test/CodeGenCXX/dllexport-ctor-closure-nested.cpp new file mode 100644 index 0000000000000..36dab8600e21b --- /dev/null +++ b/clang/test/CodeGenCXX/dllexport-ctor-closure-nested.cpp @@ -0,0 +1,20 @@ +// RUN: %clang_cc1 -triple i686-windows-msvc -emit-llvm -std=c++14 \ +// RUN: -fno-threadsafe-statics -fms-extensions -O1 -mconstructor-aliases \ +// RUN: -disable-llvm-passes -o - %s -w -fms-compatibility-version=19.00 | \ +// RUN: FileCheck %s + +struct HasDtor { + ~HasDtor(); + int o; +}; +struct HasImplicitDtor1 { + HasDtor o; +}; +struct __declspec(dllexport) CtorClosureOuter { + struct __declspec(dllexport) CtorClosureInner { + CtorClosureInner(const HasImplicitDtor1 &v = {}) {} + }; +}; + +// CHECK-LABEL: $"??1HasImplicitDtor1@@QAE@XZ" = comdat any +// CHECK-LABEL: define weak_odr dso_local dllexport x86_thiscallcc void @"??_FCtorClosureInner@CtorClosureOuter@@QAEXXZ"({{.*}}) {{#[0-9]+}} comdat From b546f4347b87c20502c747b366a3303949a34d69 Mon Sep 17 00:00:00 2001 From: MaheshRavishankar Date: Mon, 23 Aug 2021 16:27:15 -0700 Subject: [PATCH 2/9] [mlir]Linalg] Allow controlling fusion of linalg.generic -> linalg.tensor_expand_shape. Differential Revision: https://reviews.llvm.org/D108565 --- .../Linalg/Transforms/ElementwiseOpFusion.cpp | 32 +++++++--- .../Linalg/reshape_control_fusion.mlir | 62 +++++++++++++++++++ .../Linalg/TestLinalgElementwiseFusion.cpp | 50 +++++++++++++++ mlir/tools/mlir-opt/mlir-opt.cpp | 2 + 4 files changed, 137 insertions(+), 9 deletions(-) create mode 100644 mlir/test/Dialect/Linalg/reshape_control_fusion.mlir diff --git a/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp b/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp index 43a4105e4c3f8..2b29f58428dce 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp @@ -1133,7 +1133,13 @@ struct FoldConsumerReshapeOpByLinearization /// by expanding the dimensionality of the loop in the producer op. struct FoldReshapeWithGenericOpByExpansion : public OpRewritePattern { - using OpRewritePattern::OpRewritePattern; + + FoldReshapeWithGenericOpByExpansion( + MLIRContext *context, ControlElementwiseOpsFusionFn foldReshapes, + PatternBenefit benefit = 1) + : OpRewritePattern(context, benefit), + controlFoldingReshapes(foldReshapes) {} + LogicalResult matchAndRewrite(TensorExpandShapeOp reshapeOp, PatternRewriter &rewriter) const override { // Fold only if all constraints of fusing with reshape by expansion are met. @@ -1141,7 +1147,8 @@ struct FoldReshapeWithGenericOpByExpansion if (!producer || producer.getNumOutputs() != 1 || !isFusableWithReshapeByDimExpansion(producer, producer.getOutputOperand(0)) || - isUnitDimExpansionOnly(reshapeOp)) + !controlFoldingReshapes(producer->getResult(0), + reshapeOp->getOpOperand(0))) return failure(); Optional> replacementValues = fuseWithReshapeByExpansion( producer, reshapeOp, producer.getOutputOperand(0), rewriter); @@ -1150,6 +1157,9 @@ struct FoldReshapeWithGenericOpByExpansion rewriter.replaceOp(reshapeOp, replacementValues.getValue()); return success(); } + +private: + ControlElementwiseOpsFusionFn controlFoldingReshapes; }; /// Pattern to fold a generic op with a splat constant. @@ -1242,12 +1252,15 @@ fuseElementwiseOps(PatternRewriter &rewriter, OpOperand *consumerOpOperand, bool mlir::linalg::skipUnitDimReshape(const OpResult &producer, OpOperand &consumer) { - auto expandShapeOp = producer.getDefiningOp(); - if (expandShapeOp) - return !isUnitDimExpansionOnly(expandShapeOp); - auto collapseShapeOp = - producer.getDefiningOp(); - return !isUnitDimExpansionOnly(collapseShapeOp); + if (auto producerCollapseOp = + dyn_cast(producer.getOwner())) { + return !isUnitDimExpansionOnly(producerCollapseOp); + } + if (auto consumerExpandOp = + dyn_cast(consumer.getOwner())) { + return !isUnitDimExpansionOnly(consumerExpandOp); + } + return true; } namespace { @@ -1389,7 +1402,8 @@ void mlir::linalg::populateFoldUnitDimsReshapeOpsByLinearizationPatterns( void mlir::linalg::populateFoldReshapeOpsByExpansionPatterns( RewritePatternSet &patterns, ControlElementwiseOpsFusionFn controlFoldingReshapes) { - patterns.add(patterns.getContext()); + patterns.add(patterns.getContext(), + controlFoldingReshapes); patterns.add(patterns.getContext(), controlFoldingReshapes); } diff --git a/mlir/test/Dialect/Linalg/reshape_control_fusion.mlir b/mlir/test/Dialect/Linalg/reshape_control_fusion.mlir new file mode 100644 index 0000000000000..f72723f36804c --- /dev/null +++ b/mlir/test/Dialect/Linalg/reshape_control_fusion.mlir @@ -0,0 +1,62 @@ +// RUN: mlir-opt -test-linalg-control-fusion-by-expansion %s -split-input-file | FileCheck %s + +func @control_producer_reshape_fusion(%arg0 : tensor, %arg1 : tensor) -> tensor { + %c0 = constant 0 : index + %c1 = constant 1 : index + %0 = linalg.tensor_collapse_shape %arg0 [[0, 1], [2]] : tensor into tensor + %d0 = tensor.dim %0, %c0 : tensor + %d1 = tensor.dim %0, %c1 : tensor + %init = linalg.init_tensor [%d0, %d1] : tensor + %1 = linalg.generic { + indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, affine_map<(d0, d1) -> (d1)>, affine_map<(d0, d1) -> (d0, d1)>], + iterator_types = ["parallel", "parallel"]} + ins(%0, %arg1 : tensor, tensor) + outs(%init : tensor) { + ^bb0(%arg2 : f32, %arg3:f32, %arg4 : f32): + %2 = addf %arg2, %arg3 : f32 + linalg.yield %2 : f32 + } -> tensor + return %1 : tensor +} +// CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0, d1) -> (d0, d1)> +// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0, d1) -> (d1)> +// CHECK: builtin.func @control_producer_reshape_fusion +// CHECK-SAME: %[[ARG0:[a-zA-Z0-9_]+]]: tensor +// CHECK-SAME: %[[ARG1:[a-zA-Z0-9_]+]]: tensor +// CHECK-DAG: %[[C0:.+]] = constant 0 : index +// CHECK-DAG: %[[C1:.+]] = constant 1 : index +// CHECK: %[[RESHAPE:.+]] = linalg.tensor_collapse_shape %[[ARG0]] +// CHECK-SAME: {{\[}}[0, 1], [2]{{\]}} : tensor into tensor +// CHECK: %[[RESULT:.+]] = linalg.generic +// CHECK-SAME: indexing_maps = [#[[MAP0]], #[[MAP1]], #[[MAP0]]] +// CHECK-SAME: ins(%[[RESHAPE]], %[[ARG1]] : tensor, tensor) +// CHECK: return %[[RESULT]] + +// ----- + +func @control_consumer_reshape_fusion(%arg0 : tensor<1x?x?xf32>, %arg1 : tensor<1x?x?xf32>) -> tensor<1x?x?xf32> { + %c1 = constant 1 : index + %c2 = constant 2 : index + %cst = constant 0.0 : f32 + %d0 = tensor.dim %arg0, %c1 : tensor<1x?x?xf32> + %d1 = tensor.dim %arg1, %c2 : tensor<1x?x?xf32> + %init = linalg.init_tensor [%d0, %d1] : tensor + %fill = linalg.generic { + indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>], + iterator_types = ["parallel", "parallel"]} + outs(%init : tensor) { + ^bb0(%arg2: f32): + linalg.yield %cst : f32 + } -> tensor + %0 = linalg.tensor_expand_shape %fill [[0, 1], [2]] : tensor into tensor<1x?x?xf32> + %1 = linalg.batch_matmul ins(%arg0, %arg1 : tensor<1x?x?xf32>, tensor<1x?x?xf32>) + outs(%0 : tensor<1x?x?xf32>) -> tensor<1x?x?xf32> + return %1 : tensor<1x?x?xf32> +} +// CHECK-DAG: #[[MAP:.+]] = affine_map<(d0, d1, d2) -> (d0, d1, d2) +// CHECK: builtin.func @control_consumer_reshape_fusion +// CHECK: %[[FILL:.+]] = linalg.generic +// CHECK-SAME: indexing_maps = [#[[MAP]]] +// CHECK-SAME: outs(%{{.+}} : tensor<1x?x?xf32>) +// CHECK: linalg.batch_matmul +// CHECK-SAME: outs(%[[FILL]] : tensor<1x?x?xf32>) diff --git a/mlir/test/lib/Dialect/Linalg/TestLinalgElementwiseFusion.cpp b/mlir/test/lib/Dialect/Linalg/TestLinalgElementwiseFusion.cpp index 487d42f8e9848..e9f72e500686a 100644 --- a/mlir/test/lib/Dialect/Linalg/TestLinalgElementwiseFusion.cpp +++ b/mlir/test/lib/Dialect/Linalg/TestLinalgElementwiseFusion.cpp @@ -73,6 +73,52 @@ struct TestLinalgElementwiseFusion } }; +struct TestLinalgControlFuseByExpansion + : public PassWrapper { + void getDependentDialects(DialectRegistry ®istry) const override { + registry + .insert(); + } + StringRef getArgument() const final { + return "test-linalg-control-fusion-by-expansion"; + } + StringRef getDescription() const final { + return "Test controlling of fusion of elementwise ops with reshape by " + "expansion"; + } + + void runOnFunction() override { + MLIRContext *context = &this->getContext(); + FuncOp funcOp = this->getFunction(); + RewritePatternSet fusionPatterns(context); + + linalg::ControlElementwiseOpsFusionFn controlReshapeFusionFn = + [](const OpResult &producer, OpOperand &consumer) { + if (auto collapseOp = + producer.getDefiningOp()) { + if (!collapseOp.src().getDefiningOp()) { + return false; + } + } + if (auto expandOp = + dyn_cast(consumer.getOwner())) { + if (expandOp->hasOneUse()) { + OpOperand &use = *expandOp->getUses().begin(); + auto linalgOp = dyn_cast(use.getOwner()); + if (linalgOp && linalgOp.isOutputTensor(&use)) + return true; + } + } + return linalg::skipUnitDimReshape(producer, consumer); + }; + + linalg::populateFoldReshapeOpsByExpansionPatterns(fusionPatterns, + controlReshapeFusionFn); + (void)applyPatternsAndFoldGreedily(funcOp.getBody(), + std::move(fusionPatterns)); + } +}; + struct TestPushExpandingReshape : public PassWrapper { void getDependentDialects(DialectRegistry ®istry) const override { @@ -99,6 +145,10 @@ void registerTestLinalgElementwiseFusion() { PassRegistration(); } +void registerTestLinalgControlFuseByExpansion() { + PassRegistration(); +} + void registerTestPushExpandingReshape() { PassRegistration(); } diff --git a/mlir/tools/mlir-opt/mlir-opt.cpp b/mlir/tools/mlir-opt/mlir-opt.cpp index 3059b1fafb96a..2fba77f76b433 100644 --- a/mlir/tools/mlir-opt/mlir-opt.cpp +++ b/mlir/tools/mlir-opt/mlir-opt.cpp @@ -78,6 +78,7 @@ void registerTestGpuParallelLoopMappingPass(); void registerTestIRVisitorsPass(); void registerTestInterfaces(); void registerTestLinalgCodegenStrategy(); +void registerTestLinalgControlFuseByExpansion(); void registerTestLinalgDistribution(); void registerTestLinalgElementwiseFusion(); void registerTestPushExpandingReshape(); @@ -165,6 +166,7 @@ void registerTestPasses() { mlir::test::registerTestIRVisitorsPass(); mlir::test::registerTestInterfaces(); mlir::test::registerTestLinalgCodegenStrategy(); + mlir::test::registerTestLinalgControlFuseByExpansion(); mlir::test::registerTestLinalgDistribution(); mlir::test::registerTestLinalgElementwiseFusion(); mlir::test::registerTestPushExpandingReshape(); From 253cb50c60991f155e49f1d76cd01f8ba66d3524 Mon Sep 17 00:00:00 2001 From: Haowei Wu Date: Mon, 9 Aug 2021 11:45:49 -0700 Subject: [PATCH 3/9] [lit] Add the option to output test result as resultdb json format This change adds the option --resultdb-output=path allow llvm-lit generating LuCI ResultDB JSON output for the test results, which can be better integrated with certain CI/CQ framework. Differential Revision: https://reviews.llvm.org/D108238 --- llvm/utils/lit/lit/cl_arguments.py | 5 +- llvm/utils/lit/lit/reports.py | 86 +++++++++++++++++++ .../lit/tests/test-output-micro-resultdb.py | 63 ++++++++++++++ llvm/utils/lit/tests/test-output-resultdb.py | 22 +++++ 4 files changed, 175 insertions(+), 1 deletion(-) create mode 100644 llvm/utils/lit/tests/test-output-micro-resultdb.py create mode 100644 llvm/utils/lit/tests/test-output-resultdb.py diff --git a/llvm/utils/lit/lit/cl_arguments.py b/llvm/utils/lit/lit/cl_arguments.py index 70e0c8d6a17ec..aa83c7ddad4e6 100644 --- a/llvm/utils/lit/lit/cl_arguments.py +++ b/llvm/utils/lit/lit/cl_arguments.py @@ -115,6 +115,9 @@ def parse_args(): execution_group.add_argument("--xunit-xml-output", type=lit.reports.XunitReport, help="Write XUnit-compatible XML test reports to the specified file") + execution_group.add_argument("--resultdb-output", + type=lit.reports.ResultDBReport, + help="Write LuCI ResuldDB compatible JSON to the specified file") execution_group.add_argument("--time-trace-output", type=lit.reports.TimeTraceReport, help="Write Chrome tracing compatible JSON to the specified file") @@ -229,7 +232,7 @@ def parse_args(): else: opts.shard = None - opts.reports = filter(None, [opts.output, opts.xunit_xml_output, opts.time_trace_output]) + opts.reports = filter(None, [opts.output, opts.xunit_xml_output, opts.resultdb_output, opts.time_trace_output]) return opts diff --git a/llvm/utils/lit/lit/reports.py b/llvm/utils/lit/lit/reports.py index c7eed2220a4d1..c8add0e5e593b 100755 --- a/llvm/utils/lit/lit/reports.py +++ b/llvm/utils/lit/lit/reports.py @@ -1,3 +1,5 @@ +import base64 +import datetime import itertools import json @@ -153,6 +155,90 @@ def _get_skip_reason(self, test): return 'Unsupported configuration' +def gen_resultdb_test_entry( + test_name, start_time, elapsed_time, test_output, result_code, is_expected +): + test_data = { + 'testId': test_name, + 'start_time': datetime.datetime.fromtimestamp(start_time).isoformat() + 'Z', + 'duration': '%.9fs' % elapsed_time, + 'summary_html': '

', + 'artifacts': { + 'artifact-content-in-request': { + 'contents': base64.b64encode(test_output.encode('utf-8')).decode( + 'utf-8' + ), + }, + }, + 'expected': is_expected, + } + if ( + result_code == lit.Test.PASS + or result_code == lit.Test.XPASS + or result_code == lit.Test.FLAKYPASS + ): + test_data['status'] = 'PASS' + elif result_code == lit.Test.FAIL or result_code == lit.Test.XFAIL: + test_data['status'] = 'FAIL' + elif ( + result_code == lit.Test.UNSUPPORTED + or result_code == lit.Test.SKIPPED + or result_code == lit.Test.EXCLUDED + ): + test_data['status'] = 'SKIP' + elif result_code == lit.Test.UNRESOLVED or result_code == lit.Test.TIMEOUT: + test_data['status'] = 'ABORT' + return test_data + + +class ResultDBReport(object): + def __init__(self, output_file): + self.output_file = output_file + + def write_results(self, tests, elapsed): + unexecuted_codes = {lit.Test.EXCLUDED, lit.Test.SKIPPED} + tests = [t for t in tests if t.result.code not in unexecuted_codes] + data = {} + data['__version__'] = lit.__versioninfo__ + data['elapsed'] = elapsed + # Encode the tests. + data['tests'] = tests_data = [] + for test in tests: + tests_data.append( + gen_resultdb_test_entry( + test_name=test.getFullName(), + start_time=test.result.start, + elapsed_time=test.result.elapsed, + test_output=test.result.output, + result_code=test.result.code, + is_expected=not test.result.code.isFailure, + ) + ) + if test.result.microResults: + for key, micro_test in test.result.microResults.items(): + # Expand parent test name with micro test name + parent_name = test.getFullName() + micro_full_name = parent_name + ':' + key + 'microres' + tests_data.append( + gen_resultdb_test_entry( + test_name=micro_full_name, + start_time=micro_test.start + if micro_test.start + else test.result.start, + elapsed_time=micro_test.elapsed + if micro_test.elapsed + else test.result.elapsed, + test_output=micro_test.output, + result_code=micro_test.code, + is_expected=not micro_test.code.isFailure, + ) + ) + + with open(self.output_file, 'w') as file: + json.dump(data, file, indent=2, sort_keys=True) + file.write('\n') + + class TimeTraceReport(object): def __init__(self, output_file): self.output_file = output_file diff --git a/llvm/utils/lit/tests/test-output-micro-resultdb.py b/llvm/utils/lit/tests/test-output-micro-resultdb.py new file mode 100644 index 0000000000000..a67a17916f237 --- /dev/null +++ b/llvm/utils/lit/tests/test-output-micro-resultdb.py @@ -0,0 +1,63 @@ +# RUN: %{lit} -j 1 -v %{inputs}/test-data-micro --resultdb-output %t.results.out +# RUN: FileCheck < %t.results.out %s +# RUN: rm %t.results.out + + +# CHECK: { +# CHECK: "__version__" +# CHECK: "elapsed" +# CHECK-NEXT: "tests": [ +# CHECK-NEXT: { +# CHECK-NEXT: "artifacts": { +# CHECK-NEXT: "artifact-content-in-request": { +# CHECK-NEXT: "contents": "VGVzdCBwYXNzZWQu" +# CHECK-NEXT: } +# CHECK-NEXT: }, +# CHECK-NEXT: "duration" +# CHECK-NEXT: "expected": true, +# CHECK-NEXT: "start_time" +# CHECK-NEXT: "status": "PASS", +# CHECK-NEXT: "summary_html": "

", +# CHECK-NEXT: "testId": "test-data-micro :: micro-tests.ini" +# CHECK-NEXT: }, +# CHECK-NEXT: { +# CHECK-NEXT: "artifacts": { +# CHECK-NEXT: "artifact-content-in-request": { +# CHECK-NEXT: "contents": "" +# CHECK-NEXT: } +# CHECK-NEXT: }, +# CHECK-NEXT: "duration" +# CHECK-NEXT: "expected": true, +# CHECK-NEXT: "start_time" +# CHECK-NEXT: "status": "PASS", +# CHECK-NEXT: "summary_html": "

", +# CHECK-NEXT: "testId": "test-data-micro :: micro-tests.ini:test0microres" +# CHECK-NEXT: }, +# CHECK-NEXT: { +# CHECK-NEXT: "artifacts": { +# CHECK-NEXT: "artifact-content-in-request": { +# CHECK-NEXT: "contents": "" +# CHECK-NEXT: } +# CHECK-NEXT: }, +# CHECK-NEXT: "duration" +# CHECK-NEXT: "expected": true, +# CHECK-NEXT: "start_time" +# CHECK-NEXT: "status": "PASS", +# CHECK-NEXT: "summary_html": "

", +# CHECK-NEXT: "testId": "test-data-micro :: micro-tests.ini:test1microres" +# CHECK-NEXT: }, +# CHECK-NEXT: { +# CHECK-NEXT: "artifacts": { +# CHECK-NEXT: "artifact-content-in-request": { +# CHECK-NEXT: "contents": "" +# CHECK-NEXT: } +# CHECK-NEXT: }, +# CHECK-NEXT: "duration" +# CHECK-NEXT: "expected": true, +# CHECK-NEXT: "start_time" +# CHECK-NEXT: "status": "PASS", +# CHECK-NEXT: "summary_html": "

", +# CHECK-NEXT: "testId": "test-data-micro :: micro-tests.ini:test2microres" +# CHECK-NEXT: } +# CHECK-NEXT: ] +# CHECK-NEXT: } diff --git a/llvm/utils/lit/tests/test-output-resultdb.py b/llvm/utils/lit/tests/test-output-resultdb.py new file mode 100644 index 0000000000000..04d7e626adbe1 --- /dev/null +++ b/llvm/utils/lit/tests/test-output-resultdb.py @@ -0,0 +1,22 @@ +# RUN: %{lit} -j 1 -v %{inputs}/test-data --resultdb-output %t.results.out > %t.out +# RUN: FileCheck < %t.results.out %s + +# CHECK: { +# CHECK: "__version__" +# CHECK: "elapsed" +# CHECK-NEXT: "tests": [ +# CHECK-NEXT: { +# CHECK-NEXT: "artifacts": { +# CHECK-NEXT: "artifact-content-in-request": { +# CHECK-NEXT: "contents": "VGVzdCBwYXNzZWQu" +# CHECK-NEXT: } +# CHECK-NEXT: }, +# CHECK-NEXT: "duration" +# CHECK-NEXT: "expected": true, +# CHECK-NEXT: "start_time" +# CHECK-NEXT: "status": "PASS", +# CHECK-NEXT: "summary_html": "

", +# CHECK-NEXT: "testId": "test-data :: metrics.ini" +# CHECK-NEXT: } +# CHECK-NEXT: ] +# CHECK-NEXT: } From 2ec2b25fbaafa845495249de4f25bdcd1141c6ef Mon Sep 17 00:00:00 2001 From: Jessica Paquette Date: Mon, 23 Aug 2021 15:17:47 -0700 Subject: [PATCH 4/9] [AArch64][GlobalISel] Select @llvm.aarch64.neon.ld2.* This is pretty similar to the ST2 selection code in `AArch64InstructionSelector::selectIntrinsicWithSideEffects`. This is a GISel equivalent of the ld2 case in `AArch64DAGToDAGISel::Select`. There's some weirdness there that appears here too (e.g. using ld1 for scalar cases, which are 1-element vectors in SDAG.) It's a little gross that we have to create the copy and then select it right after, but I think we'd need to refactor the existing copy selection code quite a bit to do better. This was falling back while building llvm-project with GISel for AArch64. Differential Revision: https://reviews.llvm.org/D108590 --- .../GISel/AArch64InstructionSelector.cpp | 53 +++- .../CodeGen/AArch64/GlobalISel/select-ld2.mir | 232 ++++++++++++++++++ 2 files changed, 280 insertions(+), 5 deletions(-) create mode 100644 llvm/test/CodeGen/AArch64/GlobalISel/select-ld2.mir diff --git a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp index 15bdc81330f62..59ff8659148fd 100644 --- a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp +++ b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp @@ -5057,6 +5057,11 @@ bool AArch64InstructionSelector::selectIntrinsicWithSideEffects( if (!IntrinID) return false; + const LLT S8 = LLT::scalar(8); + const LLT S16 = LLT::scalar(16); + const LLT S32 = LLT::scalar(32); + const LLT S64 = LLT::scalar(64); + const LLT P0 = LLT::pointer(0, 64); // Select the instruction. switch (IntrinID) { default: @@ -5081,16 +5086,54 @@ bool AArch64InstructionSelector::selectIntrinsicWithSideEffects( MIB.buildInstr(AArch64::BRK, {}, {}) .addImm(I.getOperand(1).getImm() | ('U' << 8)); break; + case Intrinsic::aarch64_neon_ld2: { + Register Dst1 = I.getOperand(0).getReg(); + Register Dst2 = I.getOperand(1).getReg(); + Register Ptr = I.getOperand(3).getReg(); + LLT Ty = MRI.getType(Dst1); + unsigned Opc = 0; + if (Ty == LLT::fixed_vector(8, S8)) + Opc = AArch64::LD2Twov8b; + else if (Ty == LLT::fixed_vector(16, S8)) + Opc = AArch64::LD2Twov16b; + else if (Ty == LLT::fixed_vector(4, S16)) + Opc = AArch64::LD2Twov4h; + else if (Ty == LLT::fixed_vector(8, S16)) + Opc = AArch64::LD2Twov8h; + else if (Ty == LLT::fixed_vector(2, S32)) + Opc = AArch64::LD2Twov2s; + else if (Ty == LLT::fixed_vector(4, S32)) + Opc = AArch64::LD2Twov4s; + else if (Ty == LLT::fixed_vector(2, S64) || Ty == LLT::fixed_vector(2, P0)) + Opc = AArch64::LD2Twov2d; + else if (Ty == S64 || Ty == P0) + Opc = AArch64::LD1Twov1d; + else + llvm_unreachable("Unexpected type for ld2!"); + unsigned SubReg = + Ty.getSizeInBits() == 64 ? AArch64::dsub0 : AArch64::qsub0; + // This will be selected as a load into a wide register, which is broken + // into two vectors subregister copies. + auto Load = MIB.buildInstr(Opc, {Ty}, {Ptr}); + Load.cloneMemRefs(I); + constrainSelectedInstRegOperands(*Load, TII, TRI, RBI); + Register SelectedLoadDst = Load->getOperand(0).getReg(); + // Emit the subreg copies and immediately select them. + // FIXME: We should refactor our copy code into an emitCopy helper and + // clean up uses of this pattern elsewhere in the selector. + auto Vec1 = MIB.buildInstr(TargetOpcode::COPY, {Dst1}, {}) + .addReg(SelectedLoadDst, 0, SubReg); + auto Vec2 = MIB.buildInstr(AArch64::COPY, {Dst2}, {}) + .addReg(SelectedLoadDst, 0, SubReg + 1); + selectCopy(*Vec1, TII, MRI, TRI, RBI); + selectCopy(*Vec2, TII, MRI, TRI, RBI); + break; + } case Intrinsic::aarch64_neon_st2: { Register Src1 = I.getOperand(1).getReg(); Register Src2 = I.getOperand(2).getReg(); Register Ptr = I.getOperand(3).getReg(); LLT Ty = MRI.getType(Src1); - const LLT S8 = LLT::scalar(8); - const LLT S16 = LLT::scalar(16); - const LLT S32 = LLT::scalar(32); - const LLT S64 = LLT::scalar(64); - const LLT P0 = LLT::pointer(0, 64); unsigned Opc; if (Ty == LLT::fixed_vector(8, S8)) Opc = AArch64::ST2Twov8b; diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-ld2.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-ld2.mir new file mode 100644 index 0000000000000..940485ea0b427 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-ld2.mir @@ -0,0 +1,232 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -mtriple=aarch64 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s +... +--- +name: LD2Twov8b +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0: + liveins: $x0, $x1, $x2 + ; CHECK-LABEL: name: LD2Twov8b + ; CHECK: liveins: $x0, $x1, $x2 + ; CHECK: %ptr:gpr64sp = COPY $x0 + ; CHECK: [[LD2Twov8b:%[0-9]+]]:dd = LD2Twov8b %ptr :: (load (<8 x s64>)) + ; CHECK: %dst1:fpr64 = COPY [[LD2Twov8b]].dsub0 + ; CHECK: %dst2:fpr64 = COPY [[LD2Twov8b]].dsub1 + ; CHECK: $d0 = COPY %dst1 + ; CHECK: $d1 = COPY %dst2 + ; CHECK: RET_ReallyLR implicit $d0, implicit $d1 + %ptr:gpr(p0) = COPY $x0 + %dst1:fpr(<8 x s8>), %dst2:fpr(<8 x s8>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.neon.ld2), %ptr(p0) :: (load (<8 x s64>)) + $d0 = COPY %dst1(<8 x s8>) + $d1 = COPY %dst2(<8 x s8>) + RET_ReallyLR implicit $d0, implicit $d1 +... +--- +name: LD2Twov16b +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0: + liveins: $x0, $x1, $x2 + ; CHECK-LABEL: name: LD2Twov16b + ; CHECK: liveins: $x0, $x1, $x2 + ; CHECK: %ptr:gpr64sp = COPY $x0 + ; CHECK: [[LD2Twov16b:%[0-9]+]]:qq = LD2Twov16b %ptr :: (load (<16 x s64>)) + ; CHECK: %dst1:fpr128 = COPY [[LD2Twov16b]].qsub0 + ; CHECK: %dst2:fpr128 = COPY [[LD2Twov16b]].qsub1 + ; CHECK: $q0 = COPY %dst1 + ; CHECK: $q1 = COPY %dst2 + ; CHECK: RET_ReallyLR implicit $q0, implicit $q1 + %ptr:gpr(p0) = COPY $x0 + %dst1:fpr(<16 x s8>), %dst2:fpr(<16 x s8>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.neon.ld2), %ptr(p0) :: (load (<16 x s64>)) + $q0 = COPY %dst1(<16 x s8>) + $q1 = COPY %dst2(<16 x s8>) + RET_ReallyLR implicit $q0, implicit $q1 +... +--- +name: LD2Twov4h +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0: + liveins: $x0, $x1, $x2 + ; CHECK-LABEL: name: LD2Twov4h + ; CHECK: liveins: $x0, $x1, $x2 + ; CHECK: %ptr:gpr64sp = COPY $x0 + ; CHECK: [[LD2Twov4h:%[0-9]+]]:dd = LD2Twov4h %ptr :: (load (<4 x s64>)) + ; CHECK: %dst1:fpr64 = COPY [[LD2Twov4h]].dsub0 + ; CHECK: %dst2:fpr64 = COPY [[LD2Twov4h]].dsub1 + ; CHECK: $d0 = COPY %dst1 + ; CHECK: $d1 = COPY %dst2 + ; CHECK: RET_ReallyLR implicit $d0, implicit $d1 + %ptr:gpr(p0) = COPY $x0 + %dst1:fpr(<4 x s16>), %dst2:fpr(<4 x s16>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.neon.ld2), %ptr(p0) :: (load (<4 x s64>)) + $d0 = COPY %dst1(<4 x s16>) + $d1 = COPY %dst2(<4 x s16>) + RET_ReallyLR implicit $d0, implicit $d1 +... +--- +name: LD2Twov8h +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0: + liveins: $x0, $x1, $x2 + ; CHECK-LABEL: name: LD2Twov8h + ; CHECK: liveins: $x0, $x1, $x2 + ; CHECK: %ptr:gpr64sp = COPY $x0 + ; CHECK: [[LD2Twov8h:%[0-9]+]]:qq = LD2Twov8h %ptr :: (load (<8 x s64>)) + ; CHECK: %dst1:fpr128 = COPY [[LD2Twov8h]].qsub0 + ; CHECK: %dst2:fpr128 = COPY [[LD2Twov8h]].qsub1 + ; CHECK: $q0 = COPY %dst1 + ; CHECK: $q1 = COPY %dst2 + ; CHECK: RET_ReallyLR implicit $q0, implicit $q1 + %ptr:gpr(p0) = COPY $x0 + %dst1:fpr(<8 x s16>), %dst2:fpr(<8 x s16>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.neon.ld2), %ptr(p0) :: (load (<8 x s64>)) + $q0 = COPY %dst1(<8 x s16>) + $q1 = COPY %dst2(<8 x s16>) + RET_ReallyLR implicit $q0, implicit $q1 +... +--- +name: LD2Twov2s +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0: + liveins: $x0, $x1, $x2 + ; CHECK-LABEL: name: LD2Twov2s + ; CHECK: liveins: $x0, $x1, $x2 + ; CHECK: %ptr:gpr64sp = COPY $x0 + ; CHECK: [[LD2Twov2s:%[0-9]+]]:dd = LD2Twov2s %ptr :: (load (<2 x s64>)) + ; CHECK: %dst1:fpr64 = COPY [[LD2Twov2s]].dsub0 + ; CHECK: %dst2:fpr64 = COPY [[LD2Twov2s]].dsub1 + ; CHECK: $d0 = COPY %dst1 + ; CHECK: $d1 = COPY %dst2 + ; CHECK: RET_ReallyLR implicit $d0, implicit $d1 + %ptr:gpr(p0) = COPY $x0 + %dst1:fpr(<2 x s32>), %dst2:fpr(<2 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.neon.ld2), %ptr(p0) :: (load (<2 x s64>)) + $d0 = COPY %dst1(<2 x s32>) + $d1 = COPY %dst2(<2 x s32>) + RET_ReallyLR implicit $d0, implicit $d1 +... +--- +name: LD2Twov4s +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0: + liveins: $x0, $x1, $x2 + ; CHECK-LABEL: name: LD2Twov4s + ; CHECK: liveins: $x0, $x1, $x2 + ; CHECK: %ptr:gpr64sp = COPY $x0 + ; CHECK: [[LD2Twov4s:%[0-9]+]]:qq = LD2Twov4s %ptr :: (load (<4 x s64>)) + ; CHECK: %dst1:fpr128 = COPY [[LD2Twov4s]].qsub0 + ; CHECK: %dst2:fpr128 = COPY [[LD2Twov4s]].qsub1 + ; CHECK: $q0 = COPY %dst1 + ; CHECK: $q1 = COPY %dst2 + ; CHECK: RET_ReallyLR implicit $q0, implicit $q1 + %ptr:gpr(p0) = COPY $x0 + %dst1:fpr(<4 x s32>), %dst2:fpr(<4 x s32>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.neon.ld2), %ptr(p0) :: (load (<4 x s64>)) + $q0 = COPY %dst1(<4 x s32>) + $q1 = COPY %dst2(<4 x s32>) + RET_ReallyLR implicit $q0, implicit $q1 +... +--- +name: LD2Twov2d_s64 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0: + liveins: $x0, $x1, $x2 + ; CHECK-LABEL: name: LD2Twov2d_s64 + ; CHECK: liveins: $x0, $x1, $x2 + ; CHECK: %ptr:gpr64sp = COPY $x0 + ; CHECK: [[LD2Twov2d:%[0-9]+]]:qq = LD2Twov2d %ptr :: (load (<2 x s64>)) + ; CHECK: %dst1:fpr128 = COPY [[LD2Twov2d]].qsub0 + ; CHECK: %dst2:fpr128 = COPY [[LD2Twov2d]].qsub1 + ; CHECK: $q0 = COPY %dst1 + ; CHECK: $q1 = COPY %dst2 + ; CHECK: RET_ReallyLR implicit $q0, implicit $q1 + %ptr:gpr(p0) = COPY $x0 + %dst1:fpr(<2 x s64>), %dst2:fpr(<2 x s64>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.neon.ld2), %ptr(p0) :: (load (<2 x s64>)) + $q0 = COPY %dst1(<2 x s64>) + $q1 = COPY %dst2(<2 x s64>) + RET_ReallyLR implicit $q0, implicit $q1 +... +--- +name: LD2Twov2d_p0 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0: + liveins: $x0, $x1, $x2 + ; CHECK-LABEL: name: LD2Twov2d_p0 + ; CHECK: liveins: $x0, $x1, $x2 + ; CHECK: %ptr:gpr64sp = COPY $x0 + ; CHECK: [[LD2Twov2d:%[0-9]+]]:qq = LD2Twov2d %ptr :: (load (<2 x p0>)) + ; CHECK: %dst1:fpr128 = COPY [[LD2Twov2d]].qsub0 + ; CHECK: %dst2:fpr128 = COPY [[LD2Twov2d]].qsub1 + ; CHECK: $q0 = COPY %dst1 + ; CHECK: $q1 = COPY %dst2 + ; CHECK: RET_ReallyLR implicit $q0, implicit $q1 + %ptr:gpr(p0) = COPY $x0 + %dst1:fpr(<2 x p0>), %dst2:fpr(<2 x p0>) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.neon.ld2), %ptr(p0) :: (load (<2 x p0>)) + $q0 = COPY %dst1(<2 x p0>) + $q1 = COPY %dst2(<2 x p0>) + RET_ReallyLR implicit $q0, implicit $q1 +... +--- +name: LD1Twov1d_s64 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0: + liveins: $x0, $x1, $x2 + ; CHECK-LABEL: name: LD1Twov1d_s64 + ; CHECK: liveins: $x0, $x1, $x2 + ; CHECK: %ptr:gpr64sp = COPY $x0 + ; CHECK: [[LD1Twov1d:%[0-9]+]]:dd = LD1Twov1d %ptr :: (load (s64)) + ; CHECK: %dst1:fpr64 = COPY [[LD1Twov1d]].dsub0 + ; CHECK: %dst2:fpr64 = COPY [[LD1Twov1d]].dsub1 + ; CHECK: $d0 = COPY %dst1 + ; CHECK: $d1 = COPY %dst2 + ; CHECK: RET_ReallyLR implicit $d0, implicit $d1 + %ptr:gpr(p0) = COPY $x0 + %dst1:fpr(s64), %dst2:fpr(s64) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.neon.ld2), %ptr(p0) :: (load (s64)) + $d0 = COPY %dst1(s64) + $d1 = COPY %dst2(s64) + RET_ReallyLR implicit $d0, implicit $d1 +... +--- +name: LD1Twov1d_p0 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.0: + liveins: $x0, $x1, $x2 + ; CHECK-LABEL: name: LD1Twov1d_p0 + ; CHECK: liveins: $x0, $x1, $x2 + ; CHECK: %ptr:gpr64sp = COPY $x0 + ; CHECK: [[LD1Twov1d:%[0-9]+]]:dd = LD1Twov1d %ptr :: (load (p0)) + ; CHECK: %dst1:fpr64 = COPY [[LD1Twov1d]].dsub0 + ; CHECK: %dst2:fpr64 = COPY [[LD1Twov1d]].dsub1 + ; CHECK: $d0 = COPY %dst1 + ; CHECK: $d1 = COPY %dst2 + ; CHECK: RET_ReallyLR implicit $d0, implicit $d1 + %ptr:gpr(p0) = COPY $x0 + %dst1:fpr(p0), %dst2:fpr(p0) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.neon.ld2), %ptr(p0) :: (load (p0)) + $d0 = COPY %dst1(p0) + $d1 = COPY %dst2(p0) + RET_ReallyLR implicit $d0, implicit $d1 From 96ef794fd04d8d2e7d15527e4ec18895de83a9cf Mon Sep 17 00:00:00 2001 From: Philip Reames Date: Mon, 23 Aug 2021 17:36:24 -0700 Subject: [PATCH 5/9] [SCEV] Add a hasFlags utility to improve readability [NFC] --- llvm/include/llvm/Analysis/ScalarEvolution.h | 4 ++++ llvm/lib/Analysis/ScalarEvolution.cpp | 8 +++----- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/llvm/include/llvm/Analysis/ScalarEvolution.h b/llvm/include/llvm/Analysis/ScalarEvolution.h index ae9c73fede961..bbe94c8ed8f6f 100644 --- a/llvm/include/llvm/Analysis/ScalarEvolution.h +++ b/llvm/include/llvm/Analysis/ScalarEvolution.h @@ -472,6 +472,10 @@ class ScalarEvolution { clearFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags OffFlags) { return (SCEV::NoWrapFlags)(Flags & ~OffFlags); } + LLVM_NODISCARD static bool hasFlags(SCEV::NoWrapFlags Flags, + SCEV::NoWrapFlags TestFlags) { + return TestFlags == maskFlags(Flags, TestFlags); + }; ScalarEvolution(Function &F, TargetLibraryInfo &TLI, AssumptionCache &AC, DominatorTree &DT, LoopInfo &LI); diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp index e6fdcc1ea4f10..b43e69fb4ed25 100644 --- a/llvm/lib/Analysis/ScalarEvolution.cpp +++ b/llvm/lib/Analysis/ScalarEvolution.cpp @@ -2562,8 +2562,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl &Ops, APInt ConstAdd = C1 + C2; auto AddFlags = AddExpr->getNoWrapFlags(); // Adding a smaller constant is NUW if the original AddExpr was NUW. - if (ScalarEvolution::maskFlags(AddFlags, SCEV::FlagNUW) == - SCEV::FlagNUW && + if (ScalarEvolution::hasFlags(AddFlags, SCEV::FlagNUW) && ConstAdd.ule(C1)) { PreservedFlags = ScalarEvolution::setFlags(PreservedFlags, SCEV::FlagNUW); @@ -2571,8 +2570,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl &Ops, // Adding a constant with the same sign and small magnitude is NSW, if the // original AddExpr was NSW. - if (ScalarEvolution::maskFlags(AddFlags, SCEV::FlagNSW) == - SCEV::FlagNSW && + if (ScalarEvolution::hasFlags(AddFlags, SCEV::FlagNSW) && C1.isSignBitSet() == ConstAdd.isSignBitSet() && ConstAdd.abs().ule(C1.abs())) { PreservedFlags = @@ -4207,7 +4205,7 @@ const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS, auto AddFlags = SCEV::FlagAnyWrap; const bool RHSIsNotMinSigned = !getSignedRangeMin(RHS).isMinSignedValue(); - if (maskFlags(Flags, SCEV::FlagNSW) == SCEV::FlagNSW) { + if (hasFlags(Flags, SCEV::FlagNSW)) { // Let M be the minimum representable signed value. Then (-1)*RHS // signed-wraps if and only if RHS is M. That can happen even for // a NSW subtraction because e.g. (-1)*M signed-wraps even though From c728bd5bbaab5dad3bf0703f1a85d65cd1237e79 Mon Sep 17 00:00:00 2001 From: "Wang, Pengfei" Date: Tue, 24 Aug 2021 09:05:33 +0800 Subject: [PATCH 6/9] [X86] AVX512FP16 instructions enabling 5/6 Enable FP16 FMA instructions. Ref.: https://software.intel.com/content/www/us/en/develop/download/intel-avx512-fp16-architecture-specification.html Reviewed By: LuoYuanke Differential Revision: https://reviews.llvm.org/D105268 --- clang/include/clang/Basic/BuiltinsX86.def | 19 + clang/lib/CodeGen/CGBuiltin.cpp | 56 +- clang/lib/Headers/avx512fp16intrin.h | 486 ++++ clang/lib/Headers/avx512vlfp16intrin.h | 372 +++ clang/lib/Sema/SemaChecking.cpp | 11 + clang/test/CodeGen/X86/avx512fp16-builtins.c | 833 ++++++ .../test/CodeGen/X86/avx512vlfp16-builtins.c | 405 +++ llvm/include/llvm/IR/IntrinsicsX86.td | 23 + llvm/lib/Target/X86/X86ISelLowering.cpp | 8 +- llvm/lib/Target/X86/X86InstrAVX512.td | 118 +- llvm/lib/Target/X86/X86InstrFMA3Info.cpp | 46 +- llvm/lib/Target/X86/X86InstrFoldTables.cpp | 210 ++ llvm/lib/Target/X86/X86InstrFormats.td | 1 - llvm/lib/Target/X86/X86InstrInfo.cpp | 18 + llvm/lib/Target/X86/X86IntrinsicsInfo.h | 6 + .../CodeGen/X86/avx512fp16-fma-commute.ll | 1363 +++++++++ .../CodeGen/X86/avx512fp16-fma-intrinsics.ll | 585 ++++ .../X86/avx512fp16vl-fma-intrinsics.ll | 530 ++++ .../test/CodeGen/X86/fp-strict-scalar-fp16.ll | 19 + .../X86/stack-folding-fp-avx512fp16-fma.ll | 2526 +++++++++++++++++ .../X86/stack-folding-fp-avx512fp16vl-fma.ll | 1595 +++++++++++ llvm/test/CodeGen/X86/vec-strict-128-fp16.ll | 12 + llvm/test/CodeGen/X86/vec-strict-256-fp16.ll | 12 + llvm/test/CodeGen/X86/vec-strict-512-fp16.ll | 12 + llvm/test/MC/Disassembler/X86/avx512fp16.txt | 720 +++++ .../test/MC/Disassembler/X86/avx512fp16vl.txt | 720 +++++ llvm/test/MC/X86/avx512fp16.s | 720 +++++ llvm/test/MC/X86/avx512fp16vl.s | 720 +++++ llvm/test/MC/X86/intel-syntax-avx512fp16.s | 720 +++++ llvm/test/MC/X86/intel-syntax-avx512fp16vl.s | 720 +++++ 30 files changed, 13519 insertions(+), 67 deletions(-) create mode 100644 llvm/test/CodeGen/X86/avx512fp16-fma-commute.ll create mode 100644 llvm/test/CodeGen/X86/avx512fp16-fma-intrinsics.ll create mode 100644 llvm/test/CodeGen/X86/avx512fp16vl-fma-intrinsics.ll create mode 100644 llvm/test/CodeGen/X86/stack-folding-fp-avx512fp16-fma.ll create mode 100644 llvm/test/CodeGen/X86/stack-folding-fp-avx512fp16vl-fma.ll diff --git a/clang/include/clang/Basic/BuiltinsX86.def b/clang/include/clang/Basic/BuiltinsX86.def index f21c17ee0ebe9..0ab1444e7120a 100644 --- a/clang/include/clang/Basic/BuiltinsX86.def +++ b/clang/include/clang/Basic/BuiltinsX86.def @@ -1995,6 +1995,25 @@ TARGET_BUILTIN(__builtin_ia32_vcvtps2phx128_mask, "V8xV4fV8xUc", "ncV:128:", "av TARGET_BUILTIN(__builtin_ia32_vcvtps2phx256_mask, "V8xV8fV8xUc", "ncV:256:", "avx512fp16,avx512vl") TARGET_BUILTIN(__builtin_ia32_vcvtps2phx512_mask, "V16xV16fV16xUsIi", "ncV:512:", "avx512fp16") +TARGET_BUILTIN(__builtin_ia32_vfmaddph, "V8xV8xV8xV8x", "ncV:128:", "avx512fp16,avx512vl") +TARGET_BUILTIN(__builtin_ia32_vfmaddph256, "V16xV16xV16xV16x", "ncV:256:", "avx512fp16,avx512vl") +TARGET_BUILTIN(__builtin_ia32_vfmaddph512_mask, "V32xV32xV32xV32xUiIi", "ncV:512:", "avx512fp16") +TARGET_BUILTIN(__builtin_ia32_vfmaddph512_mask3, "V32xV32xV32xV32xUiIi", "ncV:512:", "avx512fp16") +TARGET_BUILTIN(__builtin_ia32_vfmaddph512_maskz, "V32xV32xV32xV32xUiIi", "ncV:512:", "avx512fp16") +TARGET_BUILTIN(__builtin_ia32_vfmaddsubph, "V8xV8xV8xV8x", "ncV:128:", "avx512fp16,avx512vl") +TARGET_BUILTIN(__builtin_ia32_vfmaddsubph256, "V16xV16xV16xV16x", "ncV:256:", "avx512fp16,avx512vl") +TARGET_BUILTIN(__builtin_ia32_vfmaddsubph512_mask, "V32xV32xV32xV32xUiIi", "ncV:512:", "avx512fp16") +TARGET_BUILTIN(__builtin_ia32_vfmaddsubph512_maskz, "V32xV32xV32xV32xUiIi", "ncV:512:", "avx512fp16") +TARGET_BUILTIN(__builtin_ia32_vfmaddsubph512_mask3, "V32xV32xV32xV32xUiIi", "ncV:512:", "avx512fp16") + +TARGET_BUILTIN(__builtin_ia32_vfmsubaddph512_mask3, "V32xV32xV32xV32xUiIi", "ncV:512:", "avx512fp16") +TARGET_BUILTIN(__builtin_ia32_vfmsubph512_mask3, "V32xV32xV32xV32xUiIi", "ncV:512:", "avx512fp16") + +TARGET_BUILTIN(__builtin_ia32_vfmaddsh3_mask, "V8xV8xV8xV8xUcIi", "ncV:128:", "avx512fp16") +TARGET_BUILTIN(__builtin_ia32_vfmaddsh3_maskz, "V8xV8xV8xV8xUcIi", "ncV:128:", "avx512fp16") +TARGET_BUILTIN(__builtin_ia32_vfmaddsh3_mask3, "V8xV8xV8xV8xUcIi", "ncV:128:", "avx512fp16") +TARGET_BUILTIN(__builtin_ia32_vfmsubsh3_mask3, "V8xV8xV8xV8xUcIi", "ncV:128:", "avx512fp16") + // generic select intrinsics TARGET_BUILTIN(__builtin_ia32_selectb_128, "V16cUsV16cV16c", "ncV:128:", "avx512bw,avx512vl") TARGET_BUILTIN(__builtin_ia32_selectb_256, "V32cUiV32cV32c", "ncV:256:", "avx512bw,avx512vl") diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp index a1007da6e5472..ca6987b378a85 100644 --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -12050,6 +12050,22 @@ static Value *EmitX86FMAExpr(CodeGenFunction &CGF, const CallExpr *E, Intrinsic::ID IID = Intrinsic::not_intrinsic; switch (BuiltinID) { default: break; + case clang::X86::BI__builtin_ia32_vfmsubph512_mask3: + Subtract = true; + LLVM_FALLTHROUGH; + case clang::X86::BI__builtin_ia32_vfmaddph512_mask: + case clang::X86::BI__builtin_ia32_vfmaddph512_maskz: + case clang::X86::BI__builtin_ia32_vfmaddph512_mask3: + IID = llvm::Intrinsic::x86_avx512fp16_vfmadd_ph_512; + break; + case clang::X86::BI__builtin_ia32_vfmsubaddph512_mask3: + Subtract = true; + LLVM_FALLTHROUGH; + case clang::X86::BI__builtin_ia32_vfmaddsubph512_mask: + case clang::X86::BI__builtin_ia32_vfmaddsubph512_maskz: + case clang::X86::BI__builtin_ia32_vfmaddsubph512_mask3: + IID = llvm::Intrinsic::x86_avx512fp16_vfmaddsub_ph_512; + break; case clang::X86::BI__builtin_ia32_vfmsubps512_mask3: Subtract = true; LLVM_FALLTHROUGH; @@ -12113,22 +12129,30 @@ static Value *EmitX86FMAExpr(CodeGenFunction &CGF, const CallExpr *E, // Handle any required masking. Value *MaskFalseVal = nullptr; switch (BuiltinID) { + case clang::X86::BI__builtin_ia32_vfmaddph512_mask: case clang::X86::BI__builtin_ia32_vfmaddps512_mask: case clang::X86::BI__builtin_ia32_vfmaddpd512_mask: + case clang::X86::BI__builtin_ia32_vfmaddsubph512_mask: case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask: case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask: MaskFalseVal = Ops[0]; break; + case clang::X86::BI__builtin_ia32_vfmaddph512_maskz: case clang::X86::BI__builtin_ia32_vfmaddps512_maskz: case clang::X86::BI__builtin_ia32_vfmaddpd512_maskz: + case clang::X86::BI__builtin_ia32_vfmaddsubph512_maskz: case clang::X86::BI__builtin_ia32_vfmaddsubps512_maskz: case clang::X86::BI__builtin_ia32_vfmaddsubpd512_maskz: MaskFalseVal = Constant::getNullValue(Ops[0]->getType()); break; + case clang::X86::BI__builtin_ia32_vfmsubph512_mask3: + case clang::X86::BI__builtin_ia32_vfmaddph512_mask3: case clang::X86::BI__builtin_ia32_vfmsubps512_mask3: case clang::X86::BI__builtin_ia32_vfmaddps512_mask3: case clang::X86::BI__builtin_ia32_vfmsubpd512_mask3: case clang::X86::BI__builtin_ia32_vfmaddpd512_mask3: + case clang::X86::BI__builtin_ia32_vfmsubaddph512_mask3: + case clang::X86::BI__builtin_ia32_vfmaddsubph512_mask3: case clang::X86::BI__builtin_ia32_vfmsubaddps512_mask3: case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask3: case clang::X86::BI__builtin_ia32_vfmsubaddpd512_mask3: @@ -12159,9 +12183,21 @@ static Value *EmitScalarFMAExpr(CodeGenFunction &CGF, const CallExpr *E, Ops[2] = CGF.Builder.CreateExtractElement(Ops[2], (uint64_t)0); Value *Res; if (Rnd != 4) { - Intrinsic::ID IID = Ops[0]->getType()->getPrimitiveSizeInBits() == 32 ? - Intrinsic::x86_avx512_vfmadd_f32 : - Intrinsic::x86_avx512_vfmadd_f64; + Intrinsic::ID IID; + + switch (Ops[0]->getType()->getPrimitiveSizeInBits()) { + case 16: + IID = Intrinsic::x86_avx512fp16_vfmadd_f16; + break; + case 32: + IID = Intrinsic::x86_avx512_vfmadd_f32; + break; + case 64: + IID = Intrinsic::x86_avx512_vfmadd_f64; + break; + default: + llvm_unreachable("Unexpected size"); + } Res = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IID), {Ops[0], Ops[1], Ops[2], Ops[4]}); } else if (CGF.Builder.getIsFPConstrained()) { @@ -12764,6 +12800,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, case X86::BI__builtin_ia32_vfmaddss3: case X86::BI__builtin_ia32_vfmaddsd3: + case X86::BI__builtin_ia32_vfmaddsh3_mask: case X86::BI__builtin_ia32_vfmaddss3_mask: case X86::BI__builtin_ia32_vfmaddsd3_mask: return EmitScalarFMAExpr(*this, E, Ops, Ops[0]); @@ -12771,20 +12808,28 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, case X86::BI__builtin_ia32_vfmaddsd: return EmitScalarFMAExpr(*this, E, Ops, Constant::getNullValue(Ops[0]->getType())); + case X86::BI__builtin_ia32_vfmaddsh3_maskz: case X86::BI__builtin_ia32_vfmaddss3_maskz: case X86::BI__builtin_ia32_vfmaddsd3_maskz: return EmitScalarFMAExpr(*this, E, Ops, Ops[0], /*ZeroMask*/ true); + case X86::BI__builtin_ia32_vfmaddsh3_mask3: case X86::BI__builtin_ia32_vfmaddss3_mask3: case X86::BI__builtin_ia32_vfmaddsd3_mask3: return EmitScalarFMAExpr(*this, E, Ops, Ops[2], /*ZeroMask*/ false, 2); + case X86::BI__builtin_ia32_vfmsubsh3_mask3: case X86::BI__builtin_ia32_vfmsubss3_mask3: case X86::BI__builtin_ia32_vfmsubsd3_mask3: return EmitScalarFMAExpr(*this, E, Ops, Ops[2], /*ZeroMask*/ false, 2, /*NegAcc*/ true); + case X86::BI__builtin_ia32_vfmaddph: case X86::BI__builtin_ia32_vfmaddps: case X86::BI__builtin_ia32_vfmaddpd: + case X86::BI__builtin_ia32_vfmaddph256: case X86::BI__builtin_ia32_vfmaddps256: case X86::BI__builtin_ia32_vfmaddpd256: + case X86::BI__builtin_ia32_vfmaddph512_mask: + case X86::BI__builtin_ia32_vfmaddph512_maskz: + case X86::BI__builtin_ia32_vfmaddph512_mask3: case X86::BI__builtin_ia32_vfmaddps512_mask: case X86::BI__builtin_ia32_vfmaddps512_maskz: case X86::BI__builtin_ia32_vfmaddps512_mask3: @@ -12793,7 +12838,12 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, case X86::BI__builtin_ia32_vfmaddpd512_maskz: case X86::BI__builtin_ia32_vfmaddpd512_mask3: case X86::BI__builtin_ia32_vfmsubpd512_mask3: + case X86::BI__builtin_ia32_vfmsubph512_mask3: return EmitX86FMAExpr(*this, E, Ops, BuiltinID, /*IsAddSub*/ false); + case X86::BI__builtin_ia32_vfmaddsubph512_mask: + case X86::BI__builtin_ia32_vfmaddsubph512_maskz: + case X86::BI__builtin_ia32_vfmaddsubph512_mask3: + case X86::BI__builtin_ia32_vfmsubaddph512_mask3: case X86::BI__builtin_ia32_vfmaddsubps512_mask: case X86::BI__builtin_ia32_vfmaddsubps512_maskz: case X86::BI__builtin_ia32_vfmaddsubps512_mask3: diff --git a/clang/lib/Headers/avx512fp16intrin.h b/clang/lib/Headers/avx512fp16intrin.h index 48370d0bf0ee0..6440be3799df8 100644 --- a/clang/lib/Headers/avx512fp16intrin.h +++ b/clang/lib/Headers/avx512fp16intrin.h @@ -2423,6 +2423,492 @@ _mm512_maskz_cvtxps_ph(__mmask16 __U, __m512 __A) { _MM_FROUND_CUR_DIRECTION); } +#define _mm512_fmadd_round_ph(A, B, C, R) \ + ((__m512h)__builtin_ia32_vfmaddph512_mask( \ + (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), (__v32hf)(__m512h)(C), \ + (__mmask32)-1, (int)(R))) + +#define _mm512_mask_fmadd_round_ph(A, U, B, C, R) \ + ((__m512h)__builtin_ia32_vfmaddph512_mask( \ + (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), (__v32hf)(__m512h)(C), \ + (__mmask32)(U), (int)(R))) + +#define _mm512_mask3_fmadd_round_ph(A, B, C, U, R) \ + ((__m512h)__builtin_ia32_vfmaddph512_mask3( \ + (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), (__v32hf)(__m512h)(C), \ + (__mmask32)(U), (int)(R))) + +#define _mm512_maskz_fmadd_round_ph(U, A, B, C, R) \ + ((__m512h)__builtin_ia32_vfmaddph512_maskz( \ + (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), (__v32hf)(__m512h)(C), \ + (__mmask32)(U), (int)(R))) + +#define _mm512_fmsub_round_ph(A, B, C, R) \ + ((__m512h)__builtin_ia32_vfmaddph512_mask( \ + (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), -(__v32hf)(__m512h)(C), \ + (__mmask32)-1, (int)(R))) + +#define _mm512_mask_fmsub_round_ph(A, U, B, C, R) \ + ((__m512h)__builtin_ia32_vfmaddph512_mask( \ + (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), -(__v32hf)(__m512h)(C), \ + (__mmask32)(U), (int)(R))) + +#define _mm512_maskz_fmsub_round_ph(U, A, B, C, R) \ + ((__m512h)__builtin_ia32_vfmaddph512_maskz( \ + (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), -(__v32hf)(__m512h)(C), \ + (__mmask32)(U), (int)(R))) + +#define _mm512_fnmadd_round_ph(A, B, C, R) \ + ((__m512h)__builtin_ia32_vfmaddph512_mask( \ + (__v32hf)(__m512h)(A), -(__v32hf)(__m512h)(B), (__v32hf)(__m512h)(C), \ + (__mmask32)-1, (int)(R))) + +#define _mm512_mask3_fnmadd_round_ph(A, B, C, U, R) \ + ((__m512h)__builtin_ia32_vfmaddph512_mask3( \ + -(__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), (__v32hf)(__m512h)(C), \ + (__mmask32)(U), (int)(R))) + +#define _mm512_maskz_fnmadd_round_ph(U, A, B, C, R) \ + ((__m512h)__builtin_ia32_vfmaddph512_maskz( \ + -(__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), (__v32hf)(__m512h)(C), \ + (__mmask32)(U), (int)(R))) + +#define _mm512_fnmsub_round_ph(A, B, C, R) \ + ((__m512h)__builtin_ia32_vfmaddph512_mask( \ + (__v32hf)(__m512h)(A), -(__v32hf)(__m512h)(B), -(__v32hf)(__m512h)(C), \ + (__mmask32)-1, (int)(R))) + +#define _mm512_maskz_fnmsub_round_ph(U, A, B, C, R) \ + ((__m512h)__builtin_ia32_vfmaddph512_maskz( \ + -(__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), -(__v32hf)(__m512h)(C), \ + (__mmask32)(U), (int)(R))) + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_fmadd_ph(__m512h __A, + __m512h __B, + __m512h __C) { + return (__m512h)__builtin_ia32_vfmaddph512_mask((__v32hf)__A, (__v32hf)__B, + (__v32hf)__C, (__mmask32)-1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_mask_fmadd_ph(__m512h __A, __mmask32 __U, __m512h __B, __m512h __C) { + return (__m512h)__builtin_ia32_vfmaddph512_mask((__v32hf)__A, (__v32hf)__B, + (__v32hf)__C, (__mmask32)__U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_mask3_fmadd_ph(__m512h __A, __m512h __B, __m512h __C, __mmask32 __U) { + return (__m512h)__builtin_ia32_vfmaddph512_mask3((__v32hf)__A, (__v32hf)__B, + (__v32hf)__C, (__mmask32)__U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_maskz_fmadd_ph(__mmask32 __U, __m512h __A, __m512h __B, __m512h __C) { + return (__m512h)__builtin_ia32_vfmaddph512_maskz((__v32hf)__A, (__v32hf)__B, + (__v32hf)__C, (__mmask32)__U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_fmsub_ph(__m512h __A, + __m512h __B, + __m512h __C) { + return (__m512h)__builtin_ia32_vfmaddph512_mask((__v32hf)__A, (__v32hf)__B, + -(__v32hf)__C, (__mmask32)-1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_mask_fmsub_ph(__m512h __A, __mmask32 __U, __m512h __B, __m512h __C) { + return (__m512h)__builtin_ia32_vfmaddph512_mask((__v32hf)__A, (__v32hf)__B, + -(__v32hf)__C, (__mmask32)__U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_maskz_fmsub_ph(__mmask32 __U, __m512h __A, __m512h __B, __m512h __C) { + return (__m512h)__builtin_ia32_vfmaddph512_maskz( + (__v32hf)__A, (__v32hf)__B, -(__v32hf)__C, (__mmask32)__U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_fnmadd_ph(__m512h __A, + __m512h __B, + __m512h __C) { + return (__m512h)__builtin_ia32_vfmaddph512_mask((__v32hf)__A, -(__v32hf)__B, + (__v32hf)__C, (__mmask32)-1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_mask3_fnmadd_ph(__m512h __A, __m512h __B, __m512h __C, __mmask32 __U) { + return (__m512h)__builtin_ia32_vfmaddph512_mask3(-(__v32hf)__A, (__v32hf)__B, + (__v32hf)__C, (__mmask32)__U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_maskz_fnmadd_ph(__mmask32 __U, __m512h __A, __m512h __B, __m512h __C) { + return (__m512h)__builtin_ia32_vfmaddph512_maskz(-(__v32hf)__A, (__v32hf)__B, + (__v32hf)__C, (__mmask32)__U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_fnmsub_ph(__m512h __A, + __m512h __B, + __m512h __C) { + return (__m512h)__builtin_ia32_vfmaddph512_mask((__v32hf)__A, -(__v32hf)__B, + -(__v32hf)__C, (__mmask32)-1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_maskz_fnmsub_ph(__mmask32 __U, __m512h __A, __m512h __B, __m512h __C) { + return (__m512h)__builtin_ia32_vfmaddph512_maskz( + -(__v32hf)__A, (__v32hf)__B, -(__v32hf)__C, (__mmask32)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_fmaddsub_round_ph(A, B, C, R) \ + ((__m512h)__builtin_ia32_vfmaddsubph512_mask( \ + (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), (__v32hf)(__m512h)(C), \ + (__mmask32)-1, (int)(R))) + +#define _mm512_mask_fmaddsub_round_ph(A, U, B, C, R) \ + ((__m512h)__builtin_ia32_vfmaddsubph512_mask( \ + (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), (__v32hf)(__m512h)(C), \ + (__mmask32)(U), (int)(R))) + +#define _mm512_mask3_fmaddsub_round_ph(A, B, C, U, R) \ + ((__m512h)__builtin_ia32_vfmaddsubph512_mask3( \ + (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), (__v32hf)(__m512h)(C), \ + (__mmask32)(U), (int)(R))) + +#define _mm512_maskz_fmaddsub_round_ph(U, A, B, C, R) \ + ((__m512h)__builtin_ia32_vfmaddsubph512_maskz( \ + (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), (__v32hf)(__m512h)(C), \ + (__mmask32)(U), (int)(R))) + +#define _mm512_fmsubadd_round_ph(A, B, C, R) \ + ((__m512h)__builtin_ia32_vfmaddsubph512_mask( \ + (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), -(__v32hf)(__m512h)(C), \ + (__mmask32)-1, (int)(R))) + +#define _mm512_mask_fmsubadd_round_ph(A, U, B, C, R) \ + ((__m512h)__builtin_ia32_vfmaddsubph512_mask( \ + (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), -(__v32hf)(__m512h)(C), \ + (__mmask32)(U), (int)(R))) + +#define _mm512_maskz_fmsubadd_round_ph(U, A, B, C, R) \ + ((__m512h)__builtin_ia32_vfmaddsubph512_maskz( \ + (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), -(__v32hf)(__m512h)(C), \ + (__mmask32)(U), (int)(R))) + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_fmaddsub_ph(__m512h __A, __m512h __B, __m512h __C) { + return (__m512h)__builtin_ia32_vfmaddsubph512_mask( + (__v32hf)__A, (__v32hf)__B, (__v32hf)__C, (__mmask32)-1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_mask_fmaddsub_ph(__m512h __A, __mmask32 __U, __m512h __B, __m512h __C) { + return (__m512h)__builtin_ia32_vfmaddsubph512_mask( + (__v32hf)__A, (__v32hf)__B, (__v32hf)__C, (__mmask32)__U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_mask3_fmaddsub_ph(__m512h __A, __m512h __B, __m512h __C, __mmask32 __U) { + return (__m512h)__builtin_ia32_vfmaddsubph512_mask3( + (__v32hf)__A, (__v32hf)__B, (__v32hf)__C, (__mmask32)__U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_maskz_fmaddsub_ph(__mmask32 __U, __m512h __A, __m512h __B, __m512h __C) { + return (__m512h)__builtin_ia32_vfmaddsubph512_maskz( + (__v32hf)__A, (__v32hf)__B, (__v32hf)__C, (__mmask32)__U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_fmsubadd_ph(__m512h __A, __m512h __B, __m512h __C) { + return (__m512h)__builtin_ia32_vfmaddsubph512_mask( + (__v32hf)__A, (__v32hf)__B, -(__v32hf)__C, (__mmask32)-1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_mask_fmsubadd_ph(__m512h __A, __mmask32 __U, __m512h __B, __m512h __C) { + return (__m512h)__builtin_ia32_vfmaddsubph512_mask( + (__v32hf)__A, (__v32hf)__B, -(__v32hf)__C, (__mmask32)__U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_maskz_fmsubadd_ph(__mmask32 __U, __m512h __A, __m512h __B, __m512h __C) { + return (__m512h)__builtin_ia32_vfmaddsubph512_maskz( + (__v32hf)__A, (__v32hf)__B, -(__v32hf)__C, (__mmask32)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_mask3_fmsub_round_ph(A, B, C, U, R) \ + ((__m512h)__builtin_ia32_vfmsubph512_mask3( \ + (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), (__v32hf)(__m512h)(C), \ + (__mmask32)(U), (int)(R))) + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_mask3_fmsub_ph(__m512h __A, __m512h __B, __m512h __C, __mmask32 __U) { + return (__m512h)__builtin_ia32_vfmsubph512_mask3((__v32hf)__A, (__v32hf)__B, + (__v32hf)__C, (__mmask32)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_mask3_fmsubadd_round_ph(A, B, C, U, R) \ + ((__m512h)__builtin_ia32_vfmsubaddph512_mask3( \ + (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), (__v32hf)(__m512h)(C), \ + (__mmask32)(U), (int)(R))) + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_mask3_fmsubadd_ph(__m512h __A, __m512h __B, __m512h __C, __mmask32 __U) { + return (__m512h)__builtin_ia32_vfmsubaddph512_mask3( + (__v32hf)__A, (__v32hf)__B, (__v32hf)__C, (__mmask32)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_mask_fnmadd_round_ph(A, U, B, C, R) \ + ((__m512h)__builtin_ia32_vfmaddph512_mask( \ + (__v32hf)(__m512h)(A), -(__v32hf)(__m512h)(B), (__v32hf)(__m512h)(C), \ + (__mmask32)(U), (int)(R))) + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_mask_fnmadd_ph(__m512h __A, __mmask32 __U, __m512h __B, __m512h __C) { + return (__m512h)__builtin_ia32_vfmaddph512_mask((__v32hf)__A, -(__v32hf)__B, + (__v32hf)__C, (__mmask32)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm512_mask_fnmsub_round_ph(A, U, B, C, R) \ + ((__m512h)__builtin_ia32_vfmaddph512_mask( \ + (__v32hf)(__m512h)(A), -(__v32hf)(__m512h)(B), -(__v32hf)(__m512h)(C), \ + (__mmask32)(U), (int)(R))) + +#define _mm512_mask3_fnmsub_round_ph(A, B, C, U, R) \ + ((__m512h)__builtin_ia32_vfmsubph512_mask3( \ + -(__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), (__v32hf)(__m512h)(C), \ + (__mmask32)(U), (int)(R))) + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_mask_fnmsub_ph(__m512h __A, __mmask32 __U, __m512h __B, __m512h __C) { + return (__m512h)__builtin_ia32_vfmaddph512_mask((__v32hf)__A, -(__v32hf)__B, + -(__v32hf)__C, (__mmask32)__U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m512h __DEFAULT_FN_ATTRS512 +_mm512_mask3_fnmsub_ph(__m512h __A, __m512h __B, __m512h __C, __mmask32 __U) { + return (__m512h)__builtin_ia32_vfmsubph512_mask3(-(__v32hf)__A, (__v32hf)__B, + (__v32hf)__C, (__mmask32)__U, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fmadd_sh(__m128h __W, + __m128h __A, + __m128h __B) { + return __builtin_ia32_vfmaddsh3_mask((__v8hf)__W, (__v8hf)__A, (__v8hf)__B, + (__mmask8)-1, _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_fmadd_sh(__m128h __W, + __mmask8 __U, + __m128h __A, + __m128h __B) { + return __builtin_ia32_vfmaddsh3_mask((__v8hf)__W, (__v8hf)__A, (__v8hf)__B, + (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_fmadd_round_sh(A, B, C, R) \ + ((__m128h)__builtin_ia32_vfmaddsh3_mask( \ + (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)(__m128h)(C), \ + (__mmask8)-1, (int)(R))) + +#define _mm_mask_fmadd_round_sh(W, U, A, B, R) \ + ((__m128h)__builtin_ia32_vfmaddsh3_mask( \ + (__v8hf)(__m128h)(W), (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_maskz_fmadd_sh(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) { + return __builtin_ia32_vfmaddsh3_maskz((__v8hf)__A, (__v8hf)__B, (__v8hf)__C, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_fmadd_round_sh(U, A, B, C, R) \ + ((__m128h)__builtin_ia32_vfmaddsh3_maskz( \ + (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)(__m128h)(C), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_mask3_fmadd_sh(__m128h __W, __m128h __X, __m128h __Y, __mmask8 __U) { + return __builtin_ia32_vfmaddsh3_mask3((__v8hf)__W, (__v8hf)__X, (__v8hf)__Y, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask3_fmadd_round_sh(W, X, Y, U, R) \ + ((__m128h)__builtin_ia32_vfmaddsh3_mask3( \ + (__v8hf)(__m128h)(W), (__v8hf)(__m128h)(X), (__v8hf)(__m128h)(Y), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fmsub_sh(__m128h __W, + __m128h __A, + __m128h __B) { + return (__m128h)__builtin_ia32_vfmaddsh3_mask((__v8hf)__W, (__v8hf)__A, + -(__v8hf)__B, (__mmask8)-1, + _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_fmsub_sh(__m128h __W, + __mmask8 __U, + __m128h __A, + __m128h __B) { + return (__m128h)__builtin_ia32_vfmaddsh3_mask((__v8hf)__W, (__v8hf)__A, + -(__v8hf)__B, (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_fmsub_round_sh(A, B, C, R) \ + ((__m128h)__builtin_ia32_vfmaddsh3_mask( \ + (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), -(__v8hf)(__m128h)(C), \ + (__mmask8)-1, (int)(R))) + +#define _mm_mask_fmsub_round_sh(W, U, A, B, R) \ + ((__m128h)__builtin_ia32_vfmaddsh3_mask( \ + (__v8hf)(__m128h)(W), (__v8hf)(__m128h)(A), -(__v8hf)(__m128h)(B), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_maskz_fmsub_sh(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) { + return (__m128h)__builtin_ia32_vfmaddsh3_maskz((__v8hf)__A, (__v8hf)__B, + -(__v8hf)__C, (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_fmsub_round_sh(U, A, B, C, R) \ + ((__m128h)__builtin_ia32_vfmaddsh3_maskz( \ + (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), -(__v8hf)(__m128h)(C), \ + (__mmask8)(U), (int)R)) + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_mask3_fmsub_sh(__m128h __W, __m128h __X, __m128h __Y, __mmask8 __U) { + return __builtin_ia32_vfmsubsh3_mask3((__v8hf)__W, (__v8hf)__X, (__v8hf)__Y, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask3_fmsub_round_sh(W, X, Y, U, R) \ + ((__m128h)__builtin_ia32_vfmsubsh3_mask3( \ + (__v8hf)(__m128h)(W), (__v8hf)(__m128h)(X), (__v8hf)(__m128h)(Y), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fnmadd_sh(__m128h __W, + __m128h __A, + __m128h __B) { + return __builtin_ia32_vfmaddsh3_mask((__v8hf)__W, -(__v8hf)__A, (__v8hf)__B, + (__mmask8)-1, _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_mask_fnmadd_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) { + return __builtin_ia32_vfmaddsh3_mask((__v8hf)__W, -(__v8hf)__A, (__v8hf)__B, + (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_fnmadd_round_sh(A, B, C, R) \ + ((__m128h)__builtin_ia32_vfmaddsh3_mask( \ + (__v8hf)(__m128h)(A), -(__v8hf)(__m128h)(B), (__v8hf)(__m128h)(C), \ + (__mmask8)-1, (int)(R))) + +#define _mm_mask_fnmadd_round_sh(W, U, A, B, R) \ + ((__m128h)__builtin_ia32_vfmaddsh3_mask( \ + (__v8hf)(__m128h)(W), -(__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_maskz_fnmadd_sh(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) { + return __builtin_ia32_vfmaddsh3_maskz((__v8hf)__A, -(__v8hf)__B, (__v8hf)__C, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_fnmadd_round_sh(U, A, B, C, R) \ + ((__m128h)__builtin_ia32_vfmaddsh3_maskz( \ + (__v8hf)(__m128h)(A), -(__v8hf)(__m128h)(B), (__v8hf)(__m128h)(C), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_mask3_fnmadd_sh(__m128h __W, __m128h __X, __m128h __Y, __mmask8 __U) { + return __builtin_ia32_vfmaddsh3_mask3((__v8hf)__W, -(__v8hf)__X, (__v8hf)__Y, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask3_fnmadd_round_sh(W, X, Y, U, R) \ + ((__m128h)__builtin_ia32_vfmaddsh3_mask3( \ + (__v8hf)(__m128h)(W), -(__v8hf)(__m128h)(X), (__v8hf)(__m128h)(Y), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fnmsub_sh(__m128h __W, + __m128h __A, + __m128h __B) { + return __builtin_ia32_vfmaddsh3_mask((__v8hf)__W, -(__v8hf)__A, -(__v8hf)__B, + (__mmask8)-1, _MM_FROUND_CUR_DIRECTION); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_mask_fnmsub_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) { + return __builtin_ia32_vfmaddsh3_mask((__v8hf)__W, -(__v8hf)__A, -(__v8hf)__B, + (__mmask8)__U, _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_fnmsub_round_sh(A, B, C, R) \ + ((__m128h)__builtin_ia32_vfmaddsh3_mask( \ + (__v8hf)(__m128h)(A), -(__v8hf)(__m128h)(B), -(__v8hf)(__m128h)(C), \ + (__mmask8)-1, (int)(R))) + +#define _mm_mask_fnmsub_round_sh(W, U, A, B, R) \ + ((__m128h)__builtin_ia32_vfmaddsh3_mask( \ + (__v8hf)(__m128h)(W), -(__v8hf)(__m128h)(A), -(__v8hf)(__m128h)(B), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_maskz_fnmsub_sh(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) { + return __builtin_ia32_vfmaddsh3_maskz((__v8hf)__A, -(__v8hf)__B, -(__v8hf)__C, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_maskz_fnmsub_round_sh(U, A, B, C, R) \ + ((__m128h)__builtin_ia32_vfmaddsh3_maskz( \ + (__v8hf)(__m128h)(A), -(__v8hf)(__m128h)(B), -(__v8hf)(__m128h)(C), \ + (__mmask8)(U), (int)(R))) + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_mask3_fnmsub_sh(__m128h __W, __m128h __X, __m128h __Y, __mmask8 __U) { + return __builtin_ia32_vfmsubsh3_mask3((__v8hf)__W, -(__v8hf)__X, (__v8hf)__Y, + (__mmask8)__U, + _MM_FROUND_CUR_DIRECTION); +} + +#define _mm_mask3_fnmsub_round_sh(W, X, Y, U, R) \ + ((__m128h)__builtin_ia32_vfmsubsh3_mask3( \ + (__v8hf)(__m128h)(W), -(__v8hf)(__m128h)(X), (__v8hf)(__m128h)(Y), \ + (__mmask8)(U), (int)(R))) + static __inline__ _Float16 __DEFAULT_FN_ATTRS512 _mm512_reduce_add_ph(__m512h __W) { return __builtin_ia32_reduce_fadd_ph512(-0.0f16, __W); diff --git a/clang/lib/Headers/avx512vlfp16intrin.h b/clang/lib/Headers/avx512vlfp16intrin.h index 1809211fd4066..8f48b0156cd69 100644 --- a/clang/lib/Headers/avx512vlfp16intrin.h +++ b/clang/lib/Headers/avx512vlfp16intrin.h @@ -1371,6 +1371,378 @@ _mm256_maskz_cvtxps_ph(__mmask8 __U, __m256 __A) { (__v8sf)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)__U); } +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fmadd_ph(__m128h __A, + __m128h __B, + __m128h __C) { + return (__m128h)__builtin_ia32_vfmaddph((__v8hf)__A, (__v8hf)__B, + (__v8hf)__C); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_fmadd_ph(__m128h __A, + __mmask8 __U, + __m128h __B, + __m128h __C) { + return (__m128h)__builtin_ia32_selectph_128( + (__mmask8)__U, + __builtin_ia32_vfmaddph((__v8hf)__A, (__v8hf)__B, (__v8hf)__C), + (__v8hf)__A); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_mask3_fmadd_ph(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) { + return (__m128h)__builtin_ia32_selectph_128( + (__mmask8)__U, + __builtin_ia32_vfmaddph((__v8hf)__A, (__v8hf)__B, (__v8hf)__C), + (__v8hf)__C); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_maskz_fmadd_ph(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) { + return (__m128h)__builtin_ia32_selectph_128( + (__mmask8)__U, + __builtin_ia32_vfmaddph((__v8hf)__A, (__v8hf)__B, (__v8hf)__C), + (__v8hf)_mm_setzero_ph()); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fmsub_ph(__m128h __A, + __m128h __B, + __m128h __C) { + return (__m128h)__builtin_ia32_vfmaddph((__v8hf)__A, (__v8hf)__B, + -(__v8hf)__C); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_fmsub_ph(__m128h __A, + __mmask8 __U, + __m128h __B, + __m128h __C) { + return (__m128h)__builtin_ia32_selectph_128( + (__mmask8)__U, _mm_fmsub_ph((__v8hf)__A, (__v8hf)__B, (__v8hf)__C), + (__v8hf)__A); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_maskz_fmsub_ph(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) { + return (__m128h)__builtin_ia32_selectph_128( + (__mmask8)__U, _mm_fmsub_ph((__v8hf)__A, (__v8hf)__B, (__v8hf)__C), + (__v8hf)_mm_setzero_ph()); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_mask3_fnmadd_ph(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) { + return (__m128h)__builtin_ia32_selectph_128( + (__mmask8)__U, + __builtin_ia32_vfmaddph(-(__v8hf)__A, (__v8hf)__B, (__v8hf)__C), + (__v8hf)__C); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_maskz_fnmadd_ph(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) { + return (__m128h)__builtin_ia32_selectph_128( + (__mmask8)__U, + __builtin_ia32_vfmaddph(-(__v8hf)__A, (__v8hf)__B, (__v8hf)__C), + (__v8hf)_mm_setzero_ph()); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_maskz_fnmsub_ph(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) { + return (__m128h)__builtin_ia32_selectph_128( + (__mmask8)__U, + __builtin_ia32_vfmaddph(-(__v8hf)__A, (__v8hf)__B, -(__v8hf)__C), + (__v8hf)_mm_setzero_ph()); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_fmadd_ph(__m256h __A, + __m256h __B, + __m256h __C) { + return (__m256h)__builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B, + (__v16hf)__C); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_mask_fmadd_ph(__m256h __A, __mmask16 __U, __m256h __B, __m256h __C) { + return (__m256h)__builtin_ia32_selectph_256( + (__mmask16)__U, + __builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B, (__v16hf)__C), + (__v16hf)__A); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_mask3_fmadd_ph(__m256h __A, __m256h __B, __m256h __C, __mmask16 __U) { + return (__m256h)__builtin_ia32_selectph_256( + (__mmask16)__U, + __builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B, (__v16hf)__C), + (__v16hf)__C); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_maskz_fmadd_ph(__mmask16 __U, __m256h __A, __m256h __B, __m256h __C) { + return (__m256h)__builtin_ia32_selectph_256( + (__mmask16)__U, + __builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B, (__v16hf)__C), + (__v16hf)_mm256_setzero_ph()); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_fmsub_ph(__m256h __A, + __m256h __B, + __m256h __C) { + return (__m256h)__builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B, + -(__v16hf)__C); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_mask_fmsub_ph(__m256h __A, __mmask16 __U, __m256h __B, __m256h __C) { + return (__m256h)__builtin_ia32_selectph_256( + (__mmask16)__U, + __builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B, -(__v16hf)__C), + (__v16hf)__A); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_maskz_fmsub_ph(__mmask16 __U, __m256h __A, __m256h __B, __m256h __C) { + return (__m256h)__builtin_ia32_selectph_256( + (__mmask16)__U, + __builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B, -(__v16hf)__C), + (__v16hf)_mm256_setzero_ph()); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_mask3_fnmadd_ph(__m256h __A, __m256h __B, __m256h __C, __mmask16 __U) { + return (__m256h)__builtin_ia32_selectph_256( + (__mmask16)__U, + __builtin_ia32_vfmaddph256(-(__v16hf)__A, (__v16hf)__B, (__v16hf)__C), + (__v16hf)__C); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_maskz_fnmadd_ph(__mmask16 __U, __m256h __A, __m256h __B, __m256h __C) { + return (__m256h)__builtin_ia32_selectph_256( + (__mmask16)__U, + __builtin_ia32_vfmaddph256(-(__v16hf)__A, (__v16hf)__B, (__v16hf)__C), + (__v16hf)_mm256_setzero_ph()); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_maskz_fnmsub_ph(__mmask16 __U, __m256h __A, __m256h __B, __m256h __C) { + return (__m256h)__builtin_ia32_selectph_256( + (__mmask16)__U, + __builtin_ia32_vfmaddph256(-(__v16hf)__A, (__v16hf)__B, -(__v16hf)__C), + (__v16hf)_mm256_setzero_ph()); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fmaddsub_ph(__m128h __A, + __m128h __B, + __m128h __C) { + return (__m128h)__builtin_ia32_vfmaddsubph((__v8hf)__A, (__v8hf)__B, + (__v8hf)__C); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_mask_fmaddsub_ph(__m128h __A, __mmask8 __U, __m128h __B, __m128h __C) { + return (__m128h)__builtin_ia32_selectph_128( + (__mmask8)__U, + __builtin_ia32_vfmaddsubph((__v8hf)__A, (__v8hf)__B, (__v8hf)__C), + (__v8hf)__A); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_mask3_fmaddsub_ph(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) { + return (__m128h)__builtin_ia32_selectph_128( + (__mmask8)__U, + __builtin_ia32_vfmaddsubph((__v8hf)__A, (__v8hf)__B, (__v8hf)__C), + (__v8hf)__C); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_maskz_fmaddsub_ph(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) { + return (__m128h)__builtin_ia32_selectph_128( + (__mmask8)__U, + __builtin_ia32_vfmaddsubph((__v8hf)__A, (__v8hf)__B, (__v8hf)__C), + (__v8hf)_mm_setzero_ph()); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fmsubadd_ph(__m128h __A, + __m128h __B, + __m128h __C) { + return (__m128h)__builtin_ia32_vfmaddsubph((__v8hf)__A, (__v8hf)__B, + -(__v8hf)__C); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_mask_fmsubadd_ph(__m128h __A, __mmask8 __U, __m128h __B, __m128h __C) { + return (__m128h)__builtin_ia32_selectph_128( + (__mmask8)__U, + __builtin_ia32_vfmaddsubph((__v8hf)__A, (__v8hf)__B, -(__v8hf)__C), + (__v8hf)__A); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_maskz_fmsubadd_ph(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) { + return (__m128h)__builtin_ia32_selectph_128( + (__mmask8)__U, + __builtin_ia32_vfmaddsubph((__v8hf)__A, (__v8hf)__B, -(__v8hf)__C), + (__v8hf)_mm_setzero_ph()); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_fmaddsub_ph(__m256h __A, __m256h __B, __m256h __C) { + return (__m256h)__builtin_ia32_vfmaddsubph256((__v16hf)__A, (__v16hf)__B, + (__v16hf)__C); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_mask_fmaddsub_ph(__m256h __A, __mmask16 __U, __m256h __B, __m256h __C) { + return (__m256h)__builtin_ia32_selectph_256( + (__mmask16)__U, + __builtin_ia32_vfmaddsubph256((__v16hf)__A, (__v16hf)__B, (__v16hf)__C), + (__v16hf)__A); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_mask3_fmaddsub_ph(__m256h __A, __m256h __B, __m256h __C, __mmask16 __U) { + return (__m256h)__builtin_ia32_selectph_256( + (__mmask16)__U, + __builtin_ia32_vfmaddsubph256((__v16hf)__A, (__v16hf)__B, (__v16hf)__C), + (__v16hf)__C); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_maskz_fmaddsub_ph(__mmask16 __U, __m256h __A, __m256h __B, __m256h __C) { + return (__m256h)__builtin_ia32_selectph_256( + (__mmask16)__U, + __builtin_ia32_vfmaddsubph256((__v16hf)__A, (__v16hf)__B, (__v16hf)__C), + (__v16hf)_mm256_setzero_ph()); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_fmsubadd_ph(__m256h __A, __m256h __B, __m256h __C) { + return (__m256h)__builtin_ia32_vfmaddsubph256((__v16hf)__A, (__v16hf)__B, + -(__v16hf)__C); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_mask_fmsubadd_ph(__m256h __A, __mmask16 __U, __m256h __B, __m256h __C) { + return (__m256h)__builtin_ia32_selectph_256( + (__mmask16)__U, + __builtin_ia32_vfmaddsubph256((__v16hf)__A, (__v16hf)__B, -(__v16hf)__C), + (__v16hf)__A); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_maskz_fmsubadd_ph(__mmask16 __U, __m256h __A, __m256h __B, __m256h __C) { + return (__m256h)__builtin_ia32_selectph_256( + (__mmask16)__U, + __builtin_ia32_vfmaddsubph256((__v16hf)__A, (__v16hf)__B, -(__v16hf)__C), + (__v16hf)_mm256_setzero_ph()); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_mask3_fmsub_ph(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) { + return (__m128h)__builtin_ia32_selectph_128( + (__mmask8)__U, + __builtin_ia32_vfmaddph((__v8hf)__A, (__v8hf)__B, -(__v8hf)__C), + (__v8hf)__C); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_mask3_fmsub_ph(__m256h __A, __m256h __B, __m256h __C, __mmask16 __U) { + return (__m256h)__builtin_ia32_selectph_256( + (__mmask16)__U, + __builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B, -(__v16hf)__C), + (__v16hf)__C); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_mask3_fmsubadd_ph(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) { + return (__m128h)__builtin_ia32_selectph_128( + (__mmask8)__U, + __builtin_ia32_vfmaddsubph((__v8hf)__A, (__v8hf)__B, -(__v8hf)__C), + (__v8hf)__C); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_mask3_fmsubadd_ph(__m256h __A, __m256h __B, __m256h __C, __mmask16 __U) { + return (__m256h)__builtin_ia32_selectph_256( + (__mmask16)__U, + __builtin_ia32_vfmaddsubph256((__v16hf)__A, (__v16hf)__B, -(__v16hf)__C), + (__v16hf)__C); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fnmadd_ph(__m128h __A, + __m128h __B, + __m128h __C) { + return (__m128h)__builtin_ia32_vfmaddph((__v8hf)__A, -(__v8hf)__B, + (__v8hf)__C); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_mask_fnmadd_ph(__m128h __A, __mmask8 __U, __m128h __B, __m128h __C) { + return (__m128h)__builtin_ia32_selectph_128( + (__mmask8)__U, + __builtin_ia32_vfmaddph((__v8hf)__A, -(__v8hf)__B, (__v8hf)__C), + (__v8hf)__A); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_fnmadd_ph(__m256h __A, + __m256h __B, + __m256h __C) { + return (__m256h)__builtin_ia32_vfmaddph256((__v16hf)__A, -(__v16hf)__B, + (__v16hf)__C); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_mask_fnmadd_ph(__m256h __A, __mmask16 __U, __m256h __B, __m256h __C) { + return (__m256h)__builtin_ia32_selectph_256( + (__mmask16)__U, + __builtin_ia32_vfmaddph256((__v16hf)__A, -(__v16hf)__B, (__v16hf)__C), + (__v16hf)__A); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fnmsub_ph(__m128h __A, + __m128h __B, + __m128h __C) { + return (__m128h)__builtin_ia32_vfmaddph((__v8hf)__A, -(__v8hf)__B, + -(__v8hf)__C); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_mask_fnmsub_ph(__m128h __A, __mmask8 __U, __m128h __B, __m128h __C) { + return (__m128h)__builtin_ia32_selectph_128( + (__mmask8)__U, + __builtin_ia32_vfmaddph((__v8hf)__A, -(__v8hf)__B, -(__v8hf)__C), + (__v8hf)__A); +} + +static __inline__ __m128h __DEFAULT_FN_ATTRS128 +_mm_mask3_fnmsub_ph(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) { + return (__m128h)__builtin_ia32_selectph_128( + (__mmask8)__U, + __builtin_ia32_vfmaddph((__v8hf)__A, -(__v8hf)__B, -(__v8hf)__C), + (__v8hf)__C); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_fnmsub_ph(__m256h __A, + __m256h __B, + __m256h __C) { + return (__m256h)__builtin_ia32_vfmaddph256((__v16hf)__A, -(__v16hf)__B, + -(__v16hf)__C); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_mask_fnmsub_ph(__m256h __A, __mmask16 __U, __m256h __B, __m256h __C) { + return (__m256h)__builtin_ia32_selectph_256( + (__mmask16)__U, + __builtin_ia32_vfmaddph256((__v16hf)__A, -(__v16hf)__B, -(__v16hf)__C), + (__v16hf)__A); +} + +static __inline__ __m256h __DEFAULT_FN_ATTRS256 +_mm256_mask3_fnmsub_ph(__m256h __A, __m256h __B, __m256h __C, __mmask16 __U) { + return (__m256h)__builtin_ia32_selectph_256( + (__mmask16)__U, + __builtin_ia32_vfmaddph256((__v16hf)__A, -(__v16hf)__B, -(__v16hf)__C), + (__v16hf)__C); +} + static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_blend_ph(__mmask8 __U, __m128h __A, __m128h __W) { diff --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp index 4e7d5b66bca7f..ee3efc14c1ab8 100644 --- a/clang/lib/Sema/SemaChecking.cpp +++ b/clang/lib/Sema/SemaChecking.cpp @@ -4084,6 +4084,9 @@ bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) { case X86::BI__builtin_ia32_vfmaddss3_mask: case X86::BI__builtin_ia32_vfmaddss3_maskz: case X86::BI__builtin_ia32_vfmaddss3_mask3: + case X86::BI__builtin_ia32_vfmaddsh3_mask: + case X86::BI__builtin_ia32_vfmaddsh3_maskz: + case X86::BI__builtin_ia32_vfmaddsh3_mask3: case X86::BI__builtin_ia32_vfmaddpd512_mask: case X86::BI__builtin_ia32_vfmaddpd512_maskz: case X86::BI__builtin_ia32_vfmaddpd512_mask3: @@ -4092,6 +4095,10 @@ bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) { case X86::BI__builtin_ia32_vfmaddps512_maskz: case X86::BI__builtin_ia32_vfmaddps512_mask3: case X86::BI__builtin_ia32_vfmsubps512_mask3: + case X86::BI__builtin_ia32_vfmaddph512_mask: + case X86::BI__builtin_ia32_vfmaddph512_maskz: + case X86::BI__builtin_ia32_vfmaddph512_mask3: + case X86::BI__builtin_ia32_vfmsubph512_mask3: case X86::BI__builtin_ia32_vfmaddsubpd512_mask: case X86::BI__builtin_ia32_vfmaddsubpd512_maskz: case X86::BI__builtin_ia32_vfmaddsubpd512_mask3: @@ -4100,6 +4107,10 @@ bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) { case X86::BI__builtin_ia32_vfmaddsubps512_maskz: case X86::BI__builtin_ia32_vfmaddsubps512_mask3: case X86::BI__builtin_ia32_vfmsubaddps512_mask3: + case X86::BI__builtin_ia32_vfmaddsubph512_mask: + case X86::BI__builtin_ia32_vfmaddsubph512_maskz: + case X86::BI__builtin_ia32_vfmaddsubph512_mask3: + case X86::BI__builtin_ia32_vfmsubaddph512_mask3: ArgNum = 4; HasRC = true; break; diff --git a/clang/test/CodeGen/X86/avx512fp16-builtins.c b/clang/test/CodeGen/X86/avx512fp16-builtins.c index 42591662606eb..1a6ddeea15fca 100644 --- a/clang/test/CodeGen/X86/avx512fp16-builtins.c +++ b/clang/test/CodeGen/X86/avx512fp16-builtins.c @@ -3163,6 +3163,839 @@ __m256h test_mm512_maskz_cvtxps_ph(__mmask16 A, __m512 B) { return _mm512_maskz_cvtxps_ph(A, B); } +__m512h test_mm512_fmadd_round_ph(__m512h __A, __m512h __B, __m512h __C) { + // CHECK-LABEL: @test_mm512_fmadd_round_ph + // CHECK: @llvm.x86.avx512fp16.vfmadd.ph.512 + return _mm512_fmadd_round_ph(__A, __B, __C, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); +} + +__m512h test_mm512_mask_fmadd_round_ph(__m512h __A, __mmask32 __U, __m512h __B, __m512h __C) { + // CHECK-LABEL: @test_mm512_mask_fmadd_round_ph + // CHECK: @llvm.x86.avx512fp16.vfmadd.ph.512 + // CHECK: bitcast i32 %{{.*}} to <32 x i1> + // CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}} + return _mm512_mask_fmadd_round_ph(__A, __U, __B, __C, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); +} + +__m512h test_mm512_mask3_fmadd_round_ph(__m512h __A, __m512h __B, __m512h __C, __mmask32 __U) { + // CHECK-LABEL: @test_mm512_mask3_fmadd_round_ph + // CHECK: @llvm.x86.avx512fp16.vfmadd.ph.512 + // CHECK: bitcast i32 %{{.*}} to <32 x i1> + // CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}} + return _mm512_mask3_fmadd_round_ph(__A, __B, __C, __U, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); +} + +__m512h test_mm512_maskz_fmadd_round_ph(__mmask32 __U, __m512h __A, __m512h __B, __m512h __C) { + // CHECK-LABEL: @test_mm512_maskz_fmadd_round_ph + // CHECK: @llvm.x86.avx512fp16.vfmadd.ph.512 + // CHECK: bitcast i32 %{{.*}} to <32 x i1> + // CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> zeroinitializer + return _mm512_maskz_fmadd_round_ph(__U, __A, __B, __C, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); +} + +__m512h test_mm512_fmsub_round_ph(__m512h __A, __m512h __B, __m512h __C) { + // CHECK-LABEL: @test_mm512_fmsub_round_ph + // CHECK: fneg + // CHECK: @llvm.x86.avx512fp16.vfmadd.ph.512 + return _mm512_fmsub_round_ph(__A, __B, __C, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); +} + +__m512h test_mm512_mask_fmsub_round_ph(__m512h __A, __mmask32 __U, __m512h __B, __m512h __C) { + // CHECK-LABEL: @test_mm512_mask_fmsub_round_ph + // CHECK: fneg + // CHECK: @llvm.x86.avx512fp16.vfmadd.ph.512 + // CHECK: bitcast i32 %{{.*}} to <32 x i1> + // CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}} + return _mm512_mask_fmsub_round_ph(__A, __U, __B, __C, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); +} + +__m512h test_mm512_maskz_fmsub_round_ph(__mmask32 __U, __m512h __A, __m512h __B, __m512h __C) { + // CHECK-LABEL: @test_mm512_maskz_fmsub_round_ph + // CHECK: fneg + // CHECK: @llvm.x86.avx512fp16.vfmadd.ph.512 + // CHECK: bitcast i32 %{{.*}} to <32 x i1> + // CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> zeroinitializer + return _mm512_maskz_fmsub_round_ph(__U, __A, __B, __C, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); +} + +__m512h test_mm512_fnmadd_round_ph(__m512h __A, __m512h __B, __m512h __C) { + // CHECK-LABEL: @test_mm512_fnmadd_round_ph + // CHECK: fneg + // CHECK: @llvm.x86.avx512fp16.vfmadd.ph.512 + return _mm512_fnmadd_round_ph(__A, __B, __C, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); +} + +__m512h test_mm512_mask3_fnmadd_round_ph(__m512h __A, __m512h __B, __m512h __C, __mmask32 __U) { + // CHECK-LABEL: @test_mm512_mask3_fnmadd_round_ph + // CHECK: fneg + // CHECK: @llvm.x86.avx512fp16.vfmadd.ph.512 + // CHECK: bitcast i32 %{{.*}} to <32 x i1> + // CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}} + return _mm512_mask3_fnmadd_round_ph(__A, __B, __C, __U, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); +} + +__m512h test_mm512_maskz_fnmadd_round_ph(__mmask32 __U, __m512h __A, __m512h __B, __m512h __C) { + // CHECK-LABEL: @test_mm512_maskz_fnmadd_round_ph + // CHECK: fneg + // CHECK: @llvm.x86.avx512fp16.vfmadd.ph.512 + // CHECK: bitcast i32 %{{.*}} to <32 x i1> + // CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> zeroinitializer + return _mm512_maskz_fnmadd_round_ph(__U, __A, __B, __C, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); +} + +__m512h test_mm512_fnmsub_round_ph(__m512h __A, __m512h __B, __m512h __C) { + // CHECK-LABEL: @test_mm512_fnmsub_round_ph + // CHECK: fneg + // CHECK: fneg + // CHECK: @llvm.x86.avx512fp16.vfmadd.ph.512 + return _mm512_fnmsub_round_ph(__A, __B, __C, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); +} + +__m512h test_mm512_maskz_fnmsub_round_ph(__mmask32 __U, __m512h __A, __m512h __B, __m512h __C) { + // CHECK-LABEL: @test_mm512_maskz_fnmsub_round_ph + // CHECK: fneg + // CHECK: fneg + // CHECK: @llvm.x86.avx512fp16.vfmadd.ph.512 + // CHECK: bitcast i32 %{{.*}} to <32 x i1> + // CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> zeroinitializer + return _mm512_maskz_fnmsub_round_ph(__U, __A, __B, __C, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); +} + +__m512h test_mm512_fmadd_ph(__m512h __A, __m512h __B, __m512h __C) { + // CHECK-LABEL: @test_mm512_fmadd_ph + // CHECK: call <32 x half> @llvm.fma.v32f16(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}) + return _mm512_fmadd_ph(__A, __B, __C); +} + +__m512h test_mm512_mask_fmadd_ph(__m512h __A, __mmask32 __U, __m512h __B, __m512h __C) { + // CHECK-LABEL: @test_mm512_mask_fmadd_ph + // CHECK: call <32 x half> @llvm.fma.v32f16(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}) + return _mm512_mask_fmadd_ph(__A, __U, __B, __C); +} + +__m512h test_mm512_mask3_fmadd_ph(__m512h __A, __m512h __B, __m512h __C, __mmask32 __U) { + // CHECK-LABEL: @test_mm512_mask3_fmadd_ph + // CHECK: call <32 x half> @llvm.fma.v32f16(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}) + // CHECK: bitcast i32 %{{.*}} to <32 x i1> + // CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}} + return _mm512_mask3_fmadd_ph(__A, __B, __C, __U); +} + +__m512h test_mm512_maskz_fmadd_ph(__mmask32 __U, __m512h __A, __m512h __B, __m512h __C) { + // CHECK-LABEL: @test_mm512_maskz_fmadd_ph + // CHECK: call <32 x half> @llvm.fma.v32f16(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}) + // CHECK: bitcast i32 %{{.*}} to <32 x i1> + // CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> zeroinitializer + return _mm512_maskz_fmadd_ph(__U, __A, __B, __C); +} + +__m512h test_mm512_fmsub_ph(__m512h __A, __m512h __B, __m512h __C) { + // CHECK-LABEL: @test_mm512_fmsub_ph + // CHECK: fneg + // CHECK: call <32 x half> @llvm.fma.v32f16(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}) + return _mm512_fmsub_ph(__A, __B, __C); +} + +__m512h test_mm512_mask_fmsub_ph(__m512h __A, __mmask32 __U, __m512h __B, __m512h __C) { + // CHECK-LABEL: @test_mm512_mask_fmsub_ph + // CHECK: fneg + // CHECK: call <32 x half> @llvm.fma.v32f16(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}) + // CHECK: bitcast i32 %{{.*}} to <32 x i1> + // CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}} + return _mm512_mask_fmsub_ph(__A, __U, __B, __C); +} + +__m512h test_mm512_maskz_fmsub_ph(__mmask32 __U, __m512h __A, __m512h __B, __m512h __C) { + // CHECK-LABEL: @test_mm512_maskz_fmsub_ph + // CHECK: fneg + // CHECK: call <32 x half> @llvm.fma.v32f16(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}) + // CHECK: bitcast i32 %{{.*}} to <32 x i1> + // CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> zeroinitializer + return _mm512_maskz_fmsub_ph(__U, __A, __B, __C); +} + +__m512h test_mm512_fnmadd_ph(__m512h __A, __m512h __B, __m512h __C) { + // CHECK-LABEL: @test_mm512_fnmadd_ph + // CHECK: fneg + // CHECK: call <32 x half> @llvm.fma.v32f16(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}) + return _mm512_fnmadd_ph(__A, __B, __C); +} + +__m512h test_mm512_mask3_fnmadd_ph(__m512h __A, __m512h __B, __m512h __C, __mmask32 __U) { + // CHECK-LABEL: @test_mm512_mask3_fnmadd_ph + // CHECK: fneg + // CHECK: call <32 x half> @llvm.fma.v32f16(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}) + // CHECK: bitcast i32 %{{.*}} to <32 x i1> + // CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}} + return _mm512_mask3_fnmadd_ph(__A, __B, __C, __U); +} + +__m512h test_mm512_maskz_fnmadd_ph(__mmask32 __U, __m512h __A, __m512h __B, __m512h __C) { + // CHECK-LABEL: @test_mm512_maskz_fnmadd_ph + // CHECK: fneg + // CHECK: call <32 x half> @llvm.fma.v32f16(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}) + // CHECK: bitcast i32 %{{.*}} to <32 x i1> + // CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> zeroinitializer + return _mm512_maskz_fnmadd_ph(__U, __A, __B, __C); +} + +__m512h test_mm512_fnmsub_ph(__m512h __A, __m512h __B, __m512h __C) { + // CHECK-LABEL: @test_mm512_fnmsub_ph + // CHECK: fneg + // CHECK: fneg + // CHECK: call <32 x half> @llvm.fma.v32f16(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}) + return _mm512_fnmsub_ph(__A, __B, __C); +} + +__m512h test_mm512_maskz_fnmsub_ph(__mmask32 __U, __m512h __A, __m512h __B, __m512h __C) { + // CHECK-LABEL: @test_mm512_maskz_fnmsub_ph + // CHECK: fneg + // CHECK: fneg + // CHECK: call <32 x half> @llvm.fma.v32f16(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}) + // CHECK: bitcast i32 %{{.*}} to <32 x i1> + // CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> zeroinitializer + return _mm512_maskz_fnmsub_ph(__U, __A, __B, __C); +} + +__m512h test_mm512_fmaddsub_round_ph(__m512h __A, __m512h __B, __m512h __C) { + // CHECK-LABEL: @test_mm512_fmaddsub_round_ph + // CHECK: @llvm.x86.avx512fp16.vfmaddsub.ph.512 + return _mm512_fmaddsub_round_ph(__A, __B, __C, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); +} + +__m512h test_mm512_mask_fmaddsub_round_ph(__m512h __A, __mmask32 __U, __m512h __B, __m512h __C) { + // CHECK-LABEL: @test_mm512_mask_fmaddsub_round_ph + // CHECK: @llvm.x86.avx512fp16.vfmaddsub.ph.512 + // CHECK: bitcast i32 %{{.*}} to <32 x i1> + // CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}} + return _mm512_mask_fmaddsub_round_ph(__A, __U, __B, __C, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); +} + +__m512h test_mm512_mask3_fmaddsub_round_ph(__m512h __A, __m512h __B, __m512h __C, __mmask32 __U) { + // CHECK-LABEL: @test_mm512_mask3_fmaddsub_round_ph + // CHECK: @llvm.x86.avx512fp16.vfmaddsub.ph.512 + // CHECK: bitcast i32 %{{.*}} to <32 x i1> + // CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}} + return _mm512_mask3_fmaddsub_round_ph(__A, __B, __C, __U, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); +} + +__m512h test_mm512_maskz_fmaddsub_round_ph(__mmask32 __U, __m512h __A, __m512h __B, __m512h __C) { + // CHECK-LABEL: @test_mm512_maskz_fmaddsub_round_ph + // CHECK: @llvm.x86.avx512fp16.vfmaddsub.ph.512 + // CHECK: bitcast i32 %{{.*}} to <32 x i1> + // CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> zeroinitializer + return _mm512_maskz_fmaddsub_round_ph(__U, __A, __B, __C, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); +} + +__m512h test_mm512_fmsubadd_round_ph(__m512h __A, __m512h __B, __m512h __C) { + // CHECK-LABEL: @test_mm512_fmsubadd_round_ph + // CHECK: fneg + // CHECK: @llvm.x86.avx512fp16.vfmaddsub.ph.512 + return _mm512_fmsubadd_round_ph(__A, __B, __C, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); +} + +__m512h test_mm512_mask_fmsubadd_round_ph(__m512h __A, __mmask32 __U, __m512h __B, __m512h __C) { + // CHECK-LABEL: @test_mm512_mask_fmsubadd_round_ph + // CHECK: fneg + // CHECK: @llvm.x86.avx512fp16.vfmaddsub.ph.512 + // CHECK: bitcast i32 %{{.*}} to <32 x i1> + // CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}} + return _mm512_mask_fmsubadd_round_ph(__A, __U, __B, __C, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); +} + +__m512h test_mm512_maskz_fmsubadd_round_ph(__mmask32 __U, __m512h __A, __m512h __B, __m512h __C) { + // CHECK-LABEL: @test_mm512_maskz_fmsubadd_round_ph + // CHECK: fneg + // CHECK: @llvm.x86.avx512fp16.vfmaddsub.ph.512 + // CHECK: bitcast i32 %{{.*}} to <32 x i1> + // CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> zeroinitializer + return _mm512_maskz_fmsubadd_round_ph(__U, __A, __B, __C, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); +} + +__m512h test_mm512_fmaddsub_ph(__m512h __A, __m512h __B, __m512h __C) { + // CHECK-LABEL: @test_mm512_fmaddsub_ph + // CHECK-NOT: fneg + // CHECK: call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}, i32 4) + return _mm512_fmaddsub_ph(__A, __B, __C); +} + +__m512h test_mm512_mask_fmaddsub_ph(__m512h __A, __mmask32 __U, __m512h __B, __m512h __C) { + // CHECK-LABEL: @test_mm512_mask_fmaddsub_ph + // CHECK-NOT: fneg + // CHECK: call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}, i32 4) + // CHECK: bitcast i32 %{{.*}} to <32 x i1> + // CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}} + return _mm512_mask_fmaddsub_ph(__A, __U, __B, __C); +} + +__m512h test_mm512_mask3_fmaddsub_ph(__m512h __A, __m512h __B, __m512h __C, __mmask32 __U) { + // CHECK-LABEL: @test_mm512_mask3_fmaddsub_ph + // CHECK-NOT: fneg + // CHECK: call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}, i32 4) + // CHECK: bitcast i32 %{{.*}} to <32 x i1> + // CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}} + return _mm512_mask3_fmaddsub_ph(__A, __B, __C, __U); +} + +__m512h test_mm512_maskz_fmaddsub_ph(__mmask32 __U, __m512h __A, __m512h __B, __m512h __C) { + // CHECK-LABEL: @test_mm512_maskz_fmaddsub_ph + // CHECK-NOT: fneg + // CHECK: call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}, i32 4) + // CHECK: bitcast i32 %{{.*}} to <32 x i1> + // CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> zeroinitializer + return _mm512_maskz_fmaddsub_ph(__U, __A, __B, __C); +} + +__m512h test_mm512_fmsubadd_ph(__m512h __A, __m512h __B, __m512h __C) { + // CHECK-LABEL: @test_mm512_fmsubadd_ph + // CHECK: [[NEG:%.+]] = fneg + // CHECK: call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> [[NEG]], i32 4) + return _mm512_fmsubadd_ph(__A, __B, __C); +} + +__m512h test_mm512_mask_fmsubadd_ph(__m512h __A, __mmask32 __U, __m512h __B, __m512h __C) { + // CHECK-LABEL: @test_mm512_mask_fmsubadd_ph + // CHECK: [[NEG:%.+]] = fneg + // CHECK: call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> [[NEG]], i32 4) + // CHECK: bitcast i32 %{{.*}} to <32 x i1> + // CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}} + return _mm512_mask_fmsubadd_ph(__A, __U, __B, __C); +} + +__m512h test_mm512_maskz_fmsubadd_ph(__mmask32 __U, __m512h __A, __m512h __B, __m512h __C) { + // CHECK-LABEL: @test_mm512_maskz_fmsubadd_ph + // CHECK: [[NEG:%.+]] = fneg + // CHECK: call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> [[NEG]], i32 4) + // CHECK: bitcast i32 %{{.*}} to <32 x i1> + // CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> zeroinitializer + return _mm512_maskz_fmsubadd_ph(__U, __A, __B, __C); +} + +__m512h test_mm512_mask3_fmsub_round_ph(__m512h __A, __m512h __B, __m512h __C, __mmask32 __U) { + // CHECK-LABEL: @test_mm512_mask3_fmsub_round_ph + // CHECK: fneg + // CHECK: @llvm.x86.avx512fp16.vfmadd.ph.512 + // CHECK: bitcast i32 %{{.*}} to <32 x i1> + // CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}} + return _mm512_mask3_fmsub_round_ph(__A, __B, __C, __U, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); +} + +__m512h test_mm512_mask3_fmsub_ph(__m512h __A, __m512h __B, __m512h __C, __mmask32 __U) { + // CHECK-LABEL: @test_mm512_mask3_fmsub_ph + // CHECK: fneg + // CHECK: call <32 x half> @llvm.fma.v32f16(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}) + // CHECK: bitcast i32 %{{.*}} to <32 x i1> + // CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}} + return _mm512_mask3_fmsub_ph(__A, __B, __C, __U); +} + +__m512h test_mm512_mask3_fmsubadd_round_ph(__m512h __A, __m512h __B, __m512h __C, __mmask32 __U) { + // CHECK-LABEL: @test_mm512_mask3_fmsubadd_round_ph + // CHECK: fneg + // CHECK: @llvm.x86.avx512fp16.vfmaddsub.ph.512 + // CHECK: bitcast i32 %{{.*}} to <32 x i1> + // CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}} + return _mm512_mask3_fmsubadd_round_ph(__A, __B, __C, __U, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); +} + +__m512h test_mm512_mask3_fmsubadd_ph(__m512h __A, __m512h __B, __m512h __C, __mmask32 __U) { + // CHECK-LABEL: @test_mm512_mask3_fmsubadd_ph + // CHECK: [[NEG:%.+]] = fneg + // CHECK: call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> [[NEG]], i32 4) + // CHECK: bitcast i32 %{{.*}} to <32 x i1> + // CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}} + return _mm512_mask3_fmsubadd_ph(__A, __B, __C, __U); +} + +__m512h test_mm512_mask_fnmadd_round_ph(__m512h __A, __mmask32 __U, __m512h __B, __m512h __C) { + // CHECK-LABEL: @test_mm512_mask_fnmadd_round_ph + // CHECK: fneg + // CHECK: @llvm.x86.avx512fp16.vfmadd.ph.512 + // CHECK: bitcast i32 %{{.*}} to <32 x i1> + // CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}} + return _mm512_mask_fnmadd_round_ph(__A, __U, __B, __C, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); +} + +__m512h test_mm512_mask_fnmadd_ph(__m512h __A, __mmask32 __U, __m512h __B, __m512h __C) { + // CHECK-LABEL: @test_mm512_mask_fnmadd_ph + // CHECK: fneg + // CHECK: call <32 x half> @llvm.fma.v32f16(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}) + // CHECK: bitcast i32 %{{.*}} to <32 x i1> + // CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}} + return _mm512_mask_fnmadd_ph(__A, __U, __B, __C); +} + +__m512h test_mm512_mask_fnmsub_round_ph(__m512h __A, __mmask32 __U, __m512h __B, __m512h __C) { + // CHECK-LABEL: @test_mm512_mask_fnmsub_round_ph + // CHECK: fneg + // CHECK: fneg + // CHECK: @llvm.x86.avx512fp16.vfmadd.ph.512 + // CHECK: bitcast i32 %{{.*}} to <32 x i1> + // CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}} + return _mm512_mask_fnmsub_round_ph(__A, __U, __B, __C, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); +} + +__m512h test_mm512_mask3_fnmsub_round_ph(__m512h __A, __m512h __B, __m512h __C, __mmask32 __U) { + // CHECK-LABEL: @test_mm512_mask3_fnmsub_round_ph + // CHECK: fneg + // CHECK: fneg + // CHECK: @llvm.x86.avx512fp16.vfmadd.ph.512 + // CHECK: bitcast i32 %{{.*}} to <32 x i1> + // CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}} + return _mm512_mask3_fnmsub_round_ph(__A, __B, __C, __U, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); +} + +__m512h test_mm512_mask_fnmsub_ph(__m512h __A, __mmask32 __U, __m512h __B, __m512h __C) { + // CHECK-LABEL: @test_mm512_mask_fnmsub_ph + // CHECK: fneg + // CHECK: fneg + // CHECK: call <32 x half> @llvm.fma.v32f16(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}) + // CHECK: bitcast i32 %{{.*}} to <32 x i1> + // CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}} + return _mm512_mask_fnmsub_ph(__A, __U, __B, __C); +} + +__m512h test_mm512_mask3_fnmsub_ph(__m512h __A, __m512h __B, __m512h __C, __mmask32 __U) { + // CHECK-LABEL: @test_mm512_mask3_fnmsub_ph + // CHECK: fneg + // CHECK: fneg + // CHECK: call <32 x half> @llvm.fma.v32f16(<32 x half> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}}) + // CHECK: bitcast i32 %{{.*}} to <32 x i1> + // CHECK: select <32 x i1> %{{.*}}, <32 x half> %{{.*}}, <32 x half> %{{.*}} + return _mm512_mask3_fnmsub_ph(__A, __B, __C, __U); +} + +__m128h test_mm_fmadd_sh(__m128h __W, __m128h __A, __m128h __B) { + // CHECK-LABEL: @test_mm_fmadd_sh + // CHECK: [[A:%.+]] = extractelement <8 x half> [[ORIGA:%.+]], i64 0 + // CHECK-NEXT: [[B:%.+]] = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: [[C:%.+]] = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: [[FMA:%.+]] = call half @llvm.fma.f16(half [[A]], half [[B]], half [[C]]) + return _mm_fmadd_sh(__W, __A, __B); +} + +__m128h test_mm_mask_fmadd_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) { + // CHECK-LABEL: @test_mm_mask_fmadd_sh + // CHECK: [[A:%.+]] = extractelement <8 x half> [[ORIGA:%.+]], i64 0 + // CHECK-NEXT: [[B:%.+]] = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: [[C:%.+]] = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: [[FMA:%.+]] = call half @llvm.fma.f16(half [[A]], half [[B]], half [[C]]) + // CHECK-NEXT: bitcast i8 %{{.*}} to <8 x i1> + // CHECK-NEXT: extractelement <8 x i1> %{{.*}}, i64 0 + // CHECK-NEXT: [[SEL:%.+]] = select i1 %{{.*}}, half [[FMA]], half [[A]] + // CHECK-NEXT: insertelement <8 x half> [[ORIGA]], half [[SEL]], i64 0 + return _mm_mask_fmadd_sh(__W, __U, __A, __B); +} + +__m128h test_mm_fmadd_round_sh(__m128h __A, __m128h __B, __m128h __C) { + // CHECK-LABEL: @test_mm_fmadd_round_sh + // CHECK: [[A:%.+]] = extractelement <8 x half> [[ORIGA:%.+]], i64 0 + // CHECK-NEXT: [[B:%.+]] = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: [[C:%.+]] = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: [[FMA:%.+]] = call half @llvm.x86.avx512fp16.vfmadd.f16(half [[A]], half [[B]], half [[C]], i32 11) + // CHECK-NEXT: insertelement <8 x half> [[ORIGA]], half [[FMA]], i64 0 + return _mm_fmadd_round_sh(__A, __B, __C, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); +} + +__m128h test_mm_mask_fmadd_round_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) { + // CHECK-LABEL: @test_mm_mask_fmadd_round_sh + // CHECK: [[A:%.+]] = extractelement <8 x half> [[ORIGA:%.+]], i64 0 + // CHECK-NEXT: [[B:%.+]] = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: [[C:%.+]] = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: [[FMA:%.+]] = call half @llvm.x86.avx512fp16.vfmadd.f16(half [[A]], half [[B]], half [[C]], i32 11) + // CHECK-NEXT: bitcast i8 %{{.*}} to <8 x i1> + // CHECK-NEXT: extractelement <8 x i1> %{{.*}}, i64 0 + // CHECK-NEXT: [[SEL:%.+]] = select i1 %{{.*}}, half [[FMA]], half [[A]] + // CHECK-NEXT: insertelement <8 x half> [[ORIGA]], half [[SEL]], i64 0 + return _mm_mask_fmadd_round_sh(__W, __U, __A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); +} + +__m128h test_mm_maskz_fmadd_sh(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) { + // CHECK-LABEL: @test_mm_maskz_fmadd_sh + // CHECK: [[A:%.+]] = extractelement <8 x half> [[ORIGA:%.+]], i64 0 + // CHECK-NEXT: [[B:%.+]] = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: [[C:%.+]] = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: [[FMA:%.+]] = call half @llvm.fma.f16(half [[A]], half [[B]], half [[C]]) + // CHECK-NEXT: bitcast i8 %{{.*}} to <8 x i1> + // CHECK-NEXT: extractelement <8 x i1> %{{.*}}, i64 0 + // CHECK-NEXT: [[SEL:%.+]] = select i1 %{{.*}}, half [[FMA]], half 0xH0000 + // CHECK-NEXT: insertelement <8 x half> [[ORIGA]], half [[SEL]], i64 0 + return _mm_maskz_fmadd_sh(__U, __A, __B, __C); +} + +__m128h test_mm_maskz_fmadd_round_sh(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) { + // CHECK-LABEL: @test_mm_maskz_fmadd_round_sh + // CHECK: [[A:%.+]] = extractelement <8 x half> [[ORIGA:%.+]], i64 0 + // CHECK-NEXT: [[B:%.+]] = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: [[C:%.+]] = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: [[FMA:%.+]] = call half @llvm.x86.avx512fp16.vfmadd.f16(half [[A]], half [[B]], half [[C]], i32 11) + // CHECK-NEXT: bitcast i8 %{{.*}} to <8 x i1> + // CHECK-NEXT: extractelement <8 x i1> %{{.*}}, i64 0 + // CHECK-NEXT: [[SEL:%.+]] = select i1 %{{.*}}, half [[FMA]], half 0xH0000 + // CHECK-NEXT: insertelement <8 x half> [[ORIGA]], half [[SEL]], i64 0 + return _mm_maskz_fmadd_round_sh(__U, __A, __B, __C, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); +} + +__m128h test_mm_mask3_fmadd_sh(__m128h __W, __m128h __X, __m128h __Y, __mmask8 __U) { + // CHECK-LABEL: @test_mm_mask3_fmadd_sh + // CHECK: [[A:%.+]] = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: [[B:%.+]] = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: [[C:%.+]] = extractelement <8 x half> [[ORIGC:%.+]], i64 0 + // CHECK-NEXT: [[FMA:%.+]] = call half @llvm.fma.f16(half [[A]], half [[B]], half [[C]]) + // CHECK-NEXT: bitcast i8 %{{.*}} to <8 x i1> + // CHECK-NEXT: extractelement <8 x i1> %{{.*}}, i64 0 + // CHECK-NEXT: [[SEL:%.+]] = select i1 %{{.*}}, half [[FMA]], half [[C]] + // CHECK-NEXT: insertelement <8 x half> [[ORIGC]], half [[SEL]], i64 0 + return _mm_mask3_fmadd_sh(__W, __X, __Y, __U); +} + +__m128h test_mm_mask3_fmadd_round_sh(__m128h __W, __m128h __X, __m128h __Y, __mmask8 __U) { + // CHECK-LABEL: @test_mm_mask3_fmadd_round_sh + // CHECK: [[A:%.+]] = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: [[B:%.+]] = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: [[C:%.+]] = extractelement <8 x half> [[ORIGC:%.+]], i64 0 + // CHECK-NEXT: [[FMA:%.+]] = call half @llvm.x86.avx512fp16.vfmadd.f16(half [[A]], half [[B]], half [[C]], i32 11) + // CHECK-NEXT: bitcast i8 %{{.*}} to <8 x i1> + // CHECK-NEXT: extractelement <8 x i1> %{{.*}}, i64 0 + // CHECK-NEXT: [[SEL:%.+]] = select i1 %{{.*}}, half [[FMA]], half [[C]] + // CHECK-NEXT: insertelement <8 x half> [[ORIGC]], half [[SEL]], i64 0 + return _mm_mask3_fmadd_round_sh(__W, __X, __Y, __U, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); +} + +__m128h test_mm_fmsub_sh(__m128h __W, __m128h __A, __m128h __B) { + // CHECK-LABEL: @test_mm_fmsub_sh + // CHECK: %{{.*}} = fneg <8 x half> %{{.*}} + // CHECK-NEXT: %{{.*}} = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: %{{.*}} = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: %{{.*}} = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: %{{.*}} = call half @llvm.fma.f16(half %{{.*}}, half %{{.*}}, half %{{.*}}) + // CHECK-NEXT: %{{.*}} = insertelement <8 x half> %{{.*}}, half %{{.*}}, i64 0 + // CHECK-NEXT: ret <8 x half> %{{.*}} + return _mm_fmsub_sh(__W, __A, __B); +} + +__m128h test_mm_mask_fmsub_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) { + // CHECK-LABEL: @test_mm_mask_fmsub_sh + // CHECK: %{{.*}} = fneg <8 x half> %{{.*}} + // CHECK: %{{.*}} = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: %{{.*}} = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: %{{.*}} = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: %{{.*}} = call half @llvm.fma.f16(half %{{.*}}, half %{{.*}}, half %{{.*}}) + // CHECK-NEXT: %{{.*}} = bitcast i8 %{{.*}} to <8 x i1> + // CHECK-NEXT: %{{.*}} = extractelement <8 x i1> %{{.*}}, i64 0 + // CHECK-NEXT: %{{.*}} = select i1 %{{.*}}, half %{{.*}}, half %{{.*}} + // CHECK-NEXT: %{{.*}} = insertelement <8 x half> %{{.*}}, half %{{.*}}, i64 0 + // CHECK-NEXT: ret <8 x half> %{{.*}} + return _mm_mask_fmsub_sh(__W, __U, __A, __B); +} + +__m128h test_mm_fmsub_round_sh(__m128h __A, __m128h __B, __m128h __C) { + // CHECK-LABEL: @test_mm_fmsub_round_sh + // CHECK: %{{.*}} = fneg <8 x half> %{{.*}} + // CHECK-NEXT: %{{.*}} = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: %{{.*}} = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: %{{.*}} = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: %{{.*}} = call half @llvm.x86.avx512fp16.vfmadd.f16(half %{{.*}}, half %{{.*}}, half %{{.*}}, i32 11) + // CHECK-NEXT: %{{.*}} = insertelement <8 x half> %{{.*}}, half %{{.*}}, i64 0 + // CHECK-NEXT: ret <8 x half> %{{.*}} + return _mm_fmsub_round_sh(__A, __B, __C, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); +} + +__m128h test_mm_mask_fmsub_round_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) { + // CHECK-LABEL: @test_mm_mask_fmsub_round_sh + // CHECK: %{{.*}} = fneg <8 x half> %{{.*}} + // CHECK: %{{.*}} = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: %{{.*}} = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: %{{.*}} = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: %{{.*}} = call half @llvm.x86.avx512fp16.vfmadd.f16(half %{{.*}}, half %{{.*}}, half %{{.*}}, i32 11) + // CHECK-NEXT: %{{.*}} = bitcast i8 %{{.*}} to <8 x i1> + // CHECK-NEXT: %{{.*}} = extractelement <8 x i1> %{{.*}}, i64 0 + // CHECK-NEXT: %{{.*}} = select i1 %{{.*}}, half %{{.*}}, half %{{.*}} + // CHECK-NEXT: %{{.*}} = insertelement <8 x half> %{{.*}}, half %{{.*}}, i64 0 + // CHECK-NEXT: ret <8 x half> %{{.*}} + return _mm_mask_fmsub_round_sh(__W, __U, __A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); +} + +__m128h test_mm_maskz_fmsub_sh(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) { + // CHECK-LABEL: @test_mm_maskz_fmsub_sh + // CHECK: %{{.*}} = fneg <8 x half> %{{.*}} + // CHECK: %{{.*}} = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: %{{.*}} = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: %{{.*}} = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: %{{.*}} = call half @llvm.fma.f16(half %{{.*}}, half %{{.*}}, half %{{.*}}) + // CHECK-NEXT: %{{.*}} = bitcast i8 %{{.*}} to <8 x i1> + // CHECK-NEXT: %{{.*}} = extractelement <8 x i1> %{{.*}}, i64 0 + // CHECK-NEXT: %{{.*}} = select i1 %{{.*}}, half %{{.*}}, half 0xH0000 + // CHECK-NEXT: %{{.*}} = insertelement <8 x half> %{{.*}}, half %{{.*}}, i64 0 + // CHECK-NEXT: ret <8 x half> %{{.*}} + return _mm_maskz_fmsub_sh(__U, __A, __B, __C); +} + +__m128h test_mm_maskz_fmsub_round_sh(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) { + // CHECK-LABEL: @test_mm_maskz_fmsub_round_sh + // CHECK: %{{.*}} = fneg <8 x half> %{{.*}} + // CHECK: %{{.*}} = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: %{{.*}} = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: %{{.*}} = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: %{{.*}} = call half @llvm.x86.avx512fp16.vfmadd.f16(half %{{.*}}, half %{{.*}}, half %{{.*}}, i32 11) + // CHECK-NEXT: %{{.*}} = bitcast i8 %{{.*}} to <8 x i1> + // CHECK-NEXT: %{{.*}} = extractelement <8 x i1> %{{.*}}, i64 0 + // CHECK-NEXT: %{{.*}} = select i1 %{{.*}}, half %{{.*}}, half 0xH0000 + // CHECK-NEXT: %{{.*}} = insertelement <8 x half> %{{.*}}, half %{{.*}}, i64 0 + // CHECK-NEXT: ret <8 x half> %{{.*}} + return _mm_maskz_fmsub_round_sh(__U, __A, __B, __C, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); +} + +__m128h test_mm_mask3_fmsub_sh(__m128h __W, __m128h __X, __m128h __Y, __mmask8 __U) { + // CHECK-LABEL: @test_mm_mask3_fmsub_sh + // CHECK: [[NEG:%.+]] = fneg + // CHECK: [[A:%.+]] = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: [[B:%.+]] = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: [[C:%.+]] = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: [[FMA:%.+]] = call half @llvm.fma.f16(half [[A]], half [[B]], half [[C]]) + // CHECK-NEXT: [[C2:%.+]] = extractelement <8 x half> [[ORIGC:%.+]], i64 0 + // CHECK-NEXT: bitcast i8 %{{.*}} to <8 x i1> + // CHECK-NEXT: extractelement <8 x i1> %{{.*}}, i64 0 + // CHECK-NEXT: [[SEL:%.+]] = select i1 %{{.*}}, half [[FMA]], half [[C2]] + // CHECK-NEXT: insertelement <8 x half> [[ORIGC]], half [[SEL]], i64 0 + return _mm_mask3_fmsub_sh(__W, __X, __Y, __U); +} + +__m128h test_mm_mask3_fmsub_round_sh(__m128h __W, __m128h __X, __m128h __Y, __mmask8 __U) { + // CHECK-LABEL: @test_mm_mask3_fmsub_round_sh + // CHECK: [[NEG:%.+]] = fneg + // CHECK: [[A:%.+]] = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: [[B:%.+]] = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: [[C:%.+]] = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: [[FMA:%.+]] = call half @llvm.x86.avx512fp16.vfmadd.f16(half [[A]], half [[B]], half [[C]], i32 11) + // CHECK-NEXT: [[C2:%.+]] = extractelement <8 x half> [[ORIGC:%.+]], i64 0 + // CHECK-NEXT: bitcast i8 %{{.*}} to <8 x i1> + // CHECK-NEXT: extractelement <8 x i1> %{{.*}}, i64 0 + // CHECK-NEXT: [[SEL:%.+]] = select i1 %{{.*}}, half [[FMA]], half [[C2]] + // CHECK-NEXT: insertelement <8 x half> [[ORIGC]], half [[SEL]], i64 0 + return _mm_mask3_fmsub_round_sh(__W, __X, __Y, __U, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); +} + +__m128h test_mm_fnmadd_sh(__m128h __W, __m128h __A, __m128h __B) { + // CHECK-LABEL: @test_mm_fnmadd_sh + // CHECK: [[NEG:%.+]] = fneg + // CHECK: [[A:%.+]] = extractelement <8 x half> [[ORIGA:%.+]], i64 0 + // CHECK-NEXT: [[B:%.+]] = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: [[C:%.+]] = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: [[FMA:%.+]] = call half @llvm.fma.f16(half [[A]], half [[B]], half [[C]]) + return _mm_fnmadd_sh(__W, __A, __B); +} + +__m128h test_mm_mask_fnmadd_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) { + // CHECK-LABEL: @test_mm_mask_fnmadd_sh + // CHECK: [[NEG:%.+]] = fneg + // CHECK: [[A:%.+]] = extractelement <8 x half> [[ORIGA:%.+]], i64 0 + // CHECK-NEXT: [[B:%.+]] = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: [[C:%.+]] = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: [[FMA:%.+]] = call half @llvm.fma.f16(half [[A]], half [[B]], half [[C]]) + // CHECK-NEXT: bitcast i8 %{{.*}} to <8 x i1> + // CHECK-NEXT: extractelement <8 x i1> %{{.*}}, i64 0 + // CHECK-NEXT: [[SEL:%.+]] = select i1 %{{.*}}, half [[FMA]], half [[A]] + // CHECK-NEXT: insertelement <8 x half> [[ORIGA]], half [[SEL]], i64 0 + return _mm_mask_fnmadd_sh(__W, __U, __A, __B); +} + +__m128h test_mm_fnmadd_round_sh(__m128h __A, __m128h __B, __m128h __C) { + // CHECK-LABEL: @test_mm_fnmadd_round_sh + // CHECK: [[NEG:%.+]] = fneg + // CHECK: [[A:%.+]] = extractelement <8 x half> [[ORIGA:%.+]], i64 0 + // CHECK-NEXT: [[B:%.+]] = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: [[C:%.+]] = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: [[FMA:%.+]] = call half @llvm.x86.avx512fp16.vfmadd.f16(half [[A]], half [[B]], half [[C]], i32 11) + // CHECK-NEXT: insertelement <8 x half> [[ORIGA]], half [[FMA]], i64 0 + return _mm_fnmadd_round_sh(__A, __B, __C, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); +} + +__m128h test_mm_mask_fnmadd_round_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) { + // CHECK-LABEL: @test_mm_mask_fnmadd_round_sh + // CHECK: [[NEG:%.+]] = fneg + // CHECK: [[A:%.+]] = extractelement <8 x half> [[ORIGA:%.+]], i64 0 + // CHECK-NEXT: [[B:%.+]] = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: [[C:%.+]] = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: [[FMA:%.+]] = call half @llvm.x86.avx512fp16.vfmadd.f16(half [[A]], half [[B]], half [[C]], i32 11) + // CHECK-NEXT: bitcast i8 %{{.*}} to <8 x i1> + // CHECK-NEXT: extractelement <8 x i1> %{{.*}}, i64 0 + // CHECK-NEXT: [[SEL:%.+]] = select i1 %{{.*}}, half [[FMA]], half [[A]] + // CHECK-NEXT: insertelement <8 x half> [[ORIGA]], half [[SEL]], i64 0 + return _mm_mask_fnmadd_round_sh(__W, __U, __A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); +} + +__m128h test_mm_maskz_fnmadd_sh(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) { + // CHECK-LABEL: @test_mm_maskz_fnmadd_sh + // CHECK: [[NEG:%.+]] = fneg + // CHECK: [[A:%.+]] = extractelement <8 x half> [[ORIGA:%.+]], i64 0 + // CHECK-NEXT: [[B:%.+]] = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: [[C:%.+]] = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: [[FMA:%.+]] = call half @llvm.fma.f16(half [[A]], half [[B]], half [[C]]) + // CHECK-NEXT: bitcast i8 %{{.*}} to <8 x i1> + // CHECK-NEXT: extractelement <8 x i1> %{{.*}}, i64 0 + // CHECK-NEXT: [[SEL:%.+]] = select i1 %{{.*}}, half [[FMA]], half 0xH0000 + // CHECK-NEXT: insertelement <8 x half> [[ORIGA]], half [[SEL]], i64 0 + return _mm_maskz_fnmadd_sh(__U, __A, __B, __C); +} + +__m128h test_mm_maskz_fnmadd_round_sh(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) { + // CHECK-LABEL: @test_mm_maskz_fnmadd_round_sh + // CHECK: [[NEG:%.+]] = fneg + // CHECK: [[A:%.+]] = extractelement <8 x half> [[ORIGA:%.+]], i64 0 + // CHECK-NEXT: [[B:%.+]] = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: [[C:%.+]] = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: [[FMA:%.+]] = call half @llvm.x86.avx512fp16.vfmadd.f16(half [[A]], half [[B]], half [[C]], i32 11) + // CHECK-NEXT: bitcast i8 %{{.*}} to <8 x i1> + // CHECK-NEXT: extractelement <8 x i1> %{{.*}}, i64 0 + // CHECK-NEXT: [[SEL:%.+]] = select i1 %{{.*}}, half [[FMA]], half 0xH0000 + // CHECK-NEXT: insertelement <8 x half> [[ORIGA]], half [[SEL]], i64 0 + return _mm_maskz_fnmadd_round_sh(__U, __A, __B, __C, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); +} + +__m128h test_mm_mask3_fnmadd_sh(__m128h __W, __m128h __X, __m128h __Y, __mmask8 __U) { + // CHECK-LABEL: @test_mm_mask3_fnmadd_sh + // CHECK: [[NEG:%.+]] = fneg + // CHECK: [[A:%.+]] = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: [[B:%.+]] = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: [[C:%.+]] = extractelement <8 x half> [[ORIGC:%.+]], i64 0 + // CHECK-NEXT: [[FMA:%.+]] = call half @llvm.fma.f16(half [[A]], half [[B]], half [[C]]) + // CHECK-NEXT: bitcast i8 %{{.*}} to <8 x i1> + // CHECK-NEXT: extractelement <8 x i1> %{{.*}}, i64 0 + // CHECK-NEXT: [[SEL:%.+]] = select i1 %{{.*}}, half [[FMA]], half [[C]] + // CHECK-NEXT: insertelement <8 x half> [[ORIGC]], half [[SEL]], i64 0 + return _mm_mask3_fnmadd_sh(__W, __X, __Y, __U); +} + +__m128h test_mm_mask3_fnmadd_round_sh(__m128h __W, __m128h __X, __m128h __Y, __mmask8 __U) { + // CHECK-LABEL: @test_mm_mask3_fnmadd_round_sh + // CHECK: [[NEG:%.+]] = fneg + // CHECK: [[A:%.+]] = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: [[B:%.+]] = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: [[C:%.+]] = extractelement <8 x half> [[ORIGC:%.+]], i64 0 + // CHECK-NEXT: [[FMA:%.+]] = call half @llvm.x86.avx512fp16.vfmadd.f16(half [[A]], half [[B]], half [[C]], i32 11) + // CHECK-NEXT: bitcast i8 %{{.*}} to <8 x i1> + // CHECK-NEXT: extractelement <8 x i1> %{{.*}}, i64 0 + // CHECK-NEXT: [[SEL:%.+]] = select i1 %{{.*}}, half [[FMA]], half [[C]] + // CHECK-NEXT: insertelement <8 x half> [[ORIGC]], half [[SEL]], i64 0 + return _mm_mask3_fnmadd_round_sh(__W, __X, __Y, __U, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); +} + +__m128h test_mm_fnmsub_sh(__m128h __W, __m128h __A, __m128h __B) { + // CHECK-LABEL: @test_mm_fnmsub_sh + // CHECK: [[NEG:%.+]] = fneg + // CHECK: [[NEG2:%.+]] = fneg + // CHECK: [[A:%.+]] = extractelement <8 x half> [[ORIGA:%.+]], i64 0 + // CHECK-NEXT: [[B:%.+]] = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: [[C:%.+]] = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: [[FMA:%.+]] = call half @llvm.fma.f16(half [[A]], half [[B]], half [[C]]) + return _mm_fnmsub_sh(__W, __A, __B); +} + +__m128h test_mm_mask_fnmsub_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) { + // CHECK-LABEL: @test_mm_mask_fnmsub_sh + // CHECK: [[NEG:%.+]] = fneg + // CHECK: [[NEG2:%.+]] = fneg + // CHECK: [[A:%.+]] = extractelement <8 x half> [[ORIGA:%.+]], i64 0 + // CHECK-NEXT: [[B:%.+]] = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: [[C:%.+]] = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: [[FMA:%.+]] = call half @llvm.fma.f16(half [[A]], half [[B]], half [[C]]) + // CHECK-NEXT: bitcast i8 %{{.*}} to <8 x i1> + // CHECK-NEXT: extractelement <8 x i1> %{{.*}}, i64 0 + // CHECK-NEXT: [[SEL:%.+]] = select i1 %{{.*}}, half [[FMA]], half [[A]] + // CHECK-NEXT: insertelement <8 x half> [[ORIGA]], half [[SEL]], i64 0 + return _mm_mask_fnmsub_sh(__W, __U, __A, __B); +} + +__m128h test_mm_fnmsub_round_sh(__m128h __A, __m128h __B, __m128h __C) { + // CHECK-LABEL: @test_mm_fnmsub_round_sh + // CHECK: [[NEG:%.+]] = fneg + // CHECK: [[NEG2:%.+]] = fneg + // CHECK: [[A:%.+]] = extractelement <8 x half> [[ORIGA:%.+]], i64 0 + // CHECK-NEXT: [[B:%.+]] = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: [[C:%.+]] = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: [[FMA:%.+]] = call half @llvm.x86.avx512fp16.vfmadd.f16(half [[A]], half [[B]], half [[C]], i32 11) + // CHECK-NEXT: insertelement <8 x half> [[ORIGA]], half [[FMA]], i64 0 + return _mm_fnmsub_round_sh(__A, __B, __C, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); +} + +__m128h test_mm_mask_fnmsub_round_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) { + // CHECK-LABEL: @test_mm_mask_fnmsub_round_sh + // CHECK: [[NEG:%.+]] = fneg + // CHECK: [[NEG2:%.+]] = fneg + // CHECK: [[A:%.+]] = extractelement <8 x half> [[ORIGA:%.+]], i64 0 + // CHECK-NEXT: [[B:%.+]] = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: [[C:%.+]] = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: [[FMA:%.+]] = call half @llvm.x86.avx512fp16.vfmadd.f16(half [[A]], half [[B]], half [[C]], i32 11) + // CHECK-NEXT: bitcast i8 %{{.*}} to <8 x i1> + // CHECK-NEXT: extractelement <8 x i1> %{{.*}}, i64 0 + // CHECK-NEXT: [[SEL:%.+]] = select i1 %{{.*}}, half [[FMA]], half [[A]] + // CHECK-NEXT: insertelement <8 x half> [[ORIGA]], half [[SEL]], i64 0 + return _mm_mask_fnmsub_round_sh(__W, __U, __A, __B, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); +} + +__m128h test_mm_maskz_fnmsub_sh(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) { + // CHECK-LABEL: @test_mm_maskz_fnmsub_sh + // CHECK: [[NEG:%.+]] = fneg + // CHECK: [[NEG2:%.+]] = fneg + // CHECK: [[A:%.+]] = extractelement <8 x half> [[ORIGA:%.+]], i64 0 + // CHECK-NEXT: [[B:%.+]] = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: [[C:%.+]] = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: [[FMA:%.+]] = call half @llvm.fma.f16(half [[A]], half [[B]], half [[C]]) + // CHECK-NEXT: bitcast i8 %{{.*}} to <8 x i1> + // CHECK-NEXT: extractelement <8 x i1> %{{.*}}, i64 0 + // CHECK-NEXT: [[SEL:%.+]] = select i1 %{{.*}}, half [[FMA]], half 0xH0000 + // CHECK-NEXT: insertelement <8 x half> [[ORIGA]], half [[SEL]], i64 0 + return _mm_maskz_fnmsub_sh(__U, __A, __B, __C); +} + +__m128h test_mm_maskz_fnmsub_round_sh(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) { + // CHECK-LABEL: @test_mm_maskz_fnmsub_round_sh + // CHECK: [[NEG:%.+]] = fneg + // CHECK: [[NEG2:%.+]] = fneg + // CHECK: [[A:%.+]] = extractelement <8 x half> [[ORIGA:%.+]], i64 0 + // CHECK-NEXT: [[B:%.+]] = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: [[C:%.+]] = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: [[FMA:%.+]] = call half @llvm.x86.avx512fp16.vfmadd.f16(half [[A]], half [[B]], half [[C]], i32 11) + // CHECK-NEXT: bitcast i8 %{{.*}} to <8 x i1> + // CHECK-NEXT: extractelement <8 x i1> %{{.*}}, i64 0 + // CHECK-NEXT: [[SEL:%.+]] = select i1 %{{.*}}, half [[FMA]], half 0xH0000 + // CHECK-NEXT: insertelement <8 x half> [[ORIGA]], half [[SEL]], i64 0 + return _mm_maskz_fnmsub_round_sh(__U, __A, __B, __C, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); +} + +__m128h test_mm_mask3_fnmsub_sh(__m128h __W, __m128h __X, __m128h __Y, __mmask8 __U) { + // CHECK-LABEL: @test_mm_mask3_fnmsub_sh + // CHECK: [[NEG:%.+]] = fneg + // CHECK: [[NEG2:%.+]] = fneg + // CHECK: [[A:%.+]] = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: [[B:%.+]] = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: [[C:%.+]] = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: [[FMA:%.+]] = call half @llvm.fma.f16(half [[A]], half [[B]], half [[C]]) + // CHECK-NEXT: [[C2:%.+]] = extractelement <8 x half> [[ORIGC:%.+]], i64 0 + // CHECK-NEXT: bitcast i8 %{{.*}} to <8 x i1> + // CHECK-NEXT: extractelement <8 x i1> %{{.*}}, i64 0 + // CHECK-NEXT: [[SEL:%.+]] = select i1 %{{.*}}, half [[FMA]], half [[C2]] + // CHECK-NEXT: insertelement <8 x half> [[ORIGC]], half [[SEL]], i64 0 + return _mm_mask3_fnmsub_sh(__W, __X, __Y, __U); +} + +__m128h test_mm_mask3_fnmsub_round_sh(__m128h __W, __m128h __X, __m128h __Y, __mmask8 __U) { + // CHECK-LABEL: @test_mm_mask3_fnmsub_round_sh + // CHECK: [[NEG:%.+]] = fneg + // CHECK: [[NEG2:%.+]] = fneg + // CHECK: [[A:%.+]] = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: [[B:%.+]] = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: [[C:%.+]] = extractelement <8 x half> %{{.*}}, i64 0 + // CHECK-NEXT: [[FMA:%.+]] = call half @llvm.x86.avx512fp16.vfmadd.f16(half [[A]], half [[B]], half [[C]], i32 11) + // CHECK-NEXT: [[C2:%.+]] = extractelement <8 x half> [[ORIGC:%.+]], i64 0 + // CHECK-NEXT: bitcast i8 %{{.*}} to <8 x i1> + // CHECK-NEXT: extractelement <8 x i1> %{{.*}}, i64 0 + // CHECK-NEXT: [[SEL:%.+]] = select i1 %{{.*}}, half [[FMA]], half [[C2]] + // CHECK-NEXT: insertelement <8 x half> [[ORIGC]], half [[SEL]], i64 0 + return _mm_mask3_fnmsub_round_sh(__W, __X, __Y, __U, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC); +} + _Float16 test_mm512_reduce_add_ph(__m512h __W) { // CHECK-LABEL: @test_mm512_reduce_add_ph // CHECK: call reassoc half @llvm.vector.reduce.fadd.v32f16(half 0xH8000, <32 x half> %{{.*}}) diff --git a/clang/test/CodeGen/X86/avx512vlfp16-builtins.c b/clang/test/CodeGen/X86/avx512vlfp16-builtins.c index cb99d655f21c6..8644309b63224 100644 --- a/clang/test/CodeGen/X86/avx512vlfp16-builtins.c +++ b/clang/test/CodeGen/X86/avx512vlfp16-builtins.c @@ -2321,6 +2321,411 @@ __m128h test_mm256_maskz_cvtxps_ph(__mmask8 A, __m256 B) { return _mm256_maskz_cvtxps_ph(A, B); } +__m128h test_mm_fmadd_ph(__m128h __A, __m128h __B, __m128h __C) { + // CHECK-LABEL: @test_mm_fmadd_ph + // CHECK: call <8 x half> @llvm.fma.v8f16(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}}) + return _mm_fmadd_ph(__A, __B, __C); +} + +__m128h test_mm_mask_fmadd_ph(__m128h __A, __mmask8 __U, __m128h __B, __m128h __C) { + // CHECK-LABEL: @test_mm_mask_fmadd_ph + // CHECK: call <8 x half> @llvm.fma.v8f16(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}}) + // CHECK: bitcast i8 %{{.*}} to <8 x i1> + // CHECK: select <8 x i1> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}} + return _mm_mask_fmadd_ph(__A, __U, __B, __C); +} + +__m128h test_mm_fmsub_ph(__m128h __A, __m128h __B, __m128h __C) { + // CHECK-LABEL: @test_mm_fmsub_ph + // CHECK: fneg + // CHECK: call <8 x half> @llvm.fma.v8f16(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}}) + return _mm_fmsub_ph(__A, __B, __C); +} + +__m128h test_mm_mask_fmsub_ph(__m128h __A, __mmask8 __U, __m128h __B, __m128h __C) { + // CHECK-LABEL: @test_mm_mask_fmsub_ph + // CHECK: fneg + // CHECK: call <8 x half> @llvm.fma.v8f16(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}}) + // CHECK: bitcast i8 %{{.*}} to <8 x i1> + // CHECK: select <8 x i1> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}} + return _mm_mask_fmsub_ph(__A, __U, __B, __C); +} + +__m128h test_mm_mask3_fmadd_ph(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) { + // CHECK-LABEL: @test_mm_mask3_fmadd_ph + // CHECK: call <8 x half> @llvm.fma.v8f16(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}}) + // CHECK: bitcast i8 %{{.*}} to <8 x i1> + // CHECK: select <8 x i1> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}} + return _mm_mask3_fmadd_ph(__A, __B, __C, __U); +} + +__m128h test_mm_mask3_fnmadd_ph(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) { + // CHECK-LABEL: @test_mm_mask3_fnmadd_ph + // CHECK: fneg + // CHECK: call <8 x half> @llvm.fma.v8f16(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}}) + // CHECK: bitcast i8 %{{.*}} to <8 x i1> + // CHECK: select <8 x i1> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}} + return _mm_mask3_fnmadd_ph(__A, __B, __C, __U); +} + +__m128h test_mm_maskz_fmadd_ph(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) { + // CHECK-LABEL: @test_mm_maskz_fmadd_ph + // CHECK: call <8 x half> @llvm.fma.v8f16(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}}) + // CHECK: bitcast i8 %{{.*}} to <8 x i1> + // CHECK: select <8 x i1> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}} + return _mm_maskz_fmadd_ph(__U, __A, __B, __C); +} + +__m128h test_mm_maskz_fmsub_ph(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) { + // CHECK-LABEL: @test_mm_maskz_fmsub_ph + // CHECK: fneg + // CHECK: call <8 x half> @llvm.fma.v8f16(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}}) + // CHECK: bitcast i8 %{{.*}} to <8 x i1> + // CHECK: select <8 x i1> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}} + return _mm_maskz_fmsub_ph(__U, __A, __B, __C); +} + +__m128h test_mm_maskz_fnmadd_ph(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) { + // CHECK-LABEL: @test_mm_maskz_fnmadd_ph + // CHECK: fneg + // CHECK: call <8 x half> @llvm.fma.v8f16(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}}) + // CHECK: bitcast i8 %{{.*}} to <8 x i1> + // CHECK: select <8 x i1> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}} + return _mm_maskz_fnmadd_ph(__U, __A, __B, __C); +} + +__m128h test_mm_maskz_fnmsub_ph(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) { + // CHECK-LABEL: @test_mm_maskz_fnmsub_ph + // CHECK: fneg + // CHECK: fneg + // CHECK: call <8 x half> @llvm.fma.v8f16(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}}) + // CHECK: bitcast i8 %{{.*}} to <8 x i1> + // CHECK: select <8 x i1> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}} + return _mm_maskz_fnmsub_ph(__U, __A, __B, __C); +} + +__m256h test_mm256_fmadd_ph(__m256h __A, __m256h __B, __m256h __C) { + // CHECK-LABEL: @test_mm256_fmadd_ph + // CHECK: call <16 x half> @llvm.fma.v16f16(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}}) + return _mm256_fmadd_ph(__A, __B, __C); +} + +__m256h test_mm256_mask_fmadd_ph(__m256h __A, __mmask8 __U, __m256h __B, __m256h __C) { + // CHECK-LABEL: @test_mm256_mask_fmadd_ph + // CHECK: call <16 x half> @llvm.fma.v16f16(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}}) + // CHECK: bitcast i16 %{{.*}} to <16 x i1> + // CHECK: select <16 x i1> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}} + return _mm256_mask_fmadd_ph(__A, __U, __B, __C); +} + +__m256h test_mm256_fmsub_ph(__m256h __A, __m256h __B, __m256h __C) { + // CHECK-LABEL: @test_mm256_fmsub_ph + // CHECK: fneg + // CHECK: call <16 x half> @llvm.fma.v16f16(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}}) + return _mm256_fmsub_ph(__A, __B, __C); +} + +__m256h test_mm256_mask_fmsub_ph(__m256h __A, __mmask16 __U, __m256h __B, __m256h __C) { + // CHECK-LABEL: @test_mm256_mask_fmsub_ph + // CHECK: fneg + // CHECK: call <16 x half> @llvm.fma.v16f16(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}}) + // CHECK: bitcast i16 %{{.*}} to <16 x i1> + // CHECK: select <16 x i1> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}} + return _mm256_mask_fmsub_ph(__A, __U, __B, __C); +} + +__m256h test_mm256_mask3_fmadd_ph(__m256h __A, __m256h __B, __m256h __C, __mmask16 __U) { + // CHECK-LABEL: @test_mm256_mask3_fmadd_ph + // CHECK: call <16 x half> @llvm.fma.v16f16(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}}) + // CHECK: bitcast i16 %{{.*}} to <16 x i1> + // CHECK: select <16 x i1> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}} + return _mm256_mask3_fmadd_ph(__A, __B, __C, __U); +} + +__m256h test_mm256_mask3_fnmadd_ph(__m256h __A, __m256h __B, __m256h __C, __mmask16 __U) { + // CHECK-LABEL: @test_mm256_mask3_fnmadd_ph + // CHECK: fneg + // CHECK: call <16 x half> @llvm.fma.v16f16(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}}) + // CHECK: bitcast i16 %{{.*}} to <16 x i1> + // CHECK: select <16 x i1> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}} + return _mm256_mask3_fnmadd_ph(__A, __B, __C, __U); +} + +__m256h test_mm256_maskz_fmadd_ph(__mmask16 __U, __m256h __A, __m256h __B, __m256h __C) { + // CHECK-LABEL: @test_mm256_maskz_fmadd_ph + // CHECK: call <16 x half> @llvm.fma.v16f16(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}}) + // CHECK: bitcast i16 %{{.*}} to <16 x i1> + // CHECK: select <16 x i1> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}} + return _mm256_maskz_fmadd_ph(__U, __A, __B, __C); +} + +__m256h test_mm256_maskz_fmsub_ph(__mmask16 __U, __m256h __A, __m256h __B, __m256h __C) { + // CHECK-LABEL: @test_mm256_maskz_fmsub_ph + // CHECK: fneg + // CHECK: call <16 x half> @llvm.fma.v16f16(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}}) + // CHECK: bitcast i16 %{{.*}} to <16 x i1> + // CHECK: select <16 x i1> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}} + return _mm256_maskz_fmsub_ph(__U, __A, __B, __C); +} + +__m256h test_mm256_maskz_fnmadd_ph(__mmask16 __U, __m256h __A, __m256h __B, __m256h __C) { + // CHECK-LABEL: @test_mm256_maskz_fnmadd_ph + // CHECK: fneg + // CHECK: call <16 x half> @llvm.fma.v16f16(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}}) + // CHECK: bitcast i16 %{{.*}} to <16 x i1> + // CHECK: select <16 x i1> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}} + return _mm256_maskz_fnmadd_ph(__U, __A, __B, __C); +} + +__m256h test_mm256_maskz_fnmsub_ph(__mmask16 __U, __m256h __A, __m256h __B, __m256h __C) { + // CHECK-LABEL: @test_mm256_maskz_fnmsub_ph + // CHECK: fneg + // CHECK: fneg + // CHECK: call <16 x half> @llvm.fma.v16f16(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}}) + // CHECK: bitcast i16 %{{.*}} to <16 x i1> + // CHECK: select <16 x i1> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}} + return _mm256_maskz_fnmsub_ph(__U, __A, __B, __C); +} + +__m128h test_mm_fmaddsub_ph(__m128h __A, __m128h __B, __m128h __C) { + // CHECK-LABEL: @test_mm_fmaddsub_ph + // CHECK-NOT: fneg + // CHECK: call <8 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.128(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}}) + return _mm_fmaddsub_ph(__A, __B, __C); +} + +__m128h test_mm_mask_fmaddsub_ph(__m128h __A, __mmask8 __U, __m128h __B, __m128h __C) { + // CHECK-LABEL: @test_mm_mask_fmaddsub_ph + // CHECK-NOT: fneg + // CHECK: call <8 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.128(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}}) + // CHECK: bitcast i8 %{{.*}} to <8 x i1> + // CHECK: select <8 x i1> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}} + return _mm_mask_fmaddsub_ph(__A, __U, __B, __C); +} + +__m128h test_mm_fmsubadd_ph(__m128h __A, __m128h __B, __m128h __C) { + // CHECK-LABEL: @test_mm_fmsubadd_ph + // CHECK: [[NEG:%.+]] = fneg + // CHECK: call <8 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.128(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> [[NEG]]) + return _mm_fmsubadd_ph(__A, __B, __C); +} + +__m128h test_mm_mask_fmsubadd_ph(__m128h __A, __mmask8 __U, __m128h __B, __m128h __C) { + // CHECK-LABEL: @test_mm_mask_fmsubadd_ph + // CHECK: [[NEG:%.+]] = fneg + // CHECK: call <8 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.128(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> [[NEG]]) + // CHECK: bitcast i8 %{{.*}} to <8 x i1> + // CHECK: select <8 x i1> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}} + return _mm_mask_fmsubadd_ph(__A, __U, __B, __C); +} + +__m128h test_mm_mask3_fmaddsub_ph(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) { + // CHECK-LABEL: @test_mm_mask3_fmaddsub_ph + // CHECK-NOT: fneg + // CHECK: call <8 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.128(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}}) + // CHECK: bitcast i8 %{{.*}} to <8 x i1> + // CHECK: select <8 x i1> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}} + return _mm_mask3_fmaddsub_ph(__A, __B, __C, __U); +} + +__m128h test_mm_maskz_fmaddsub_ph(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) { + // CHECK-LABEL: @test_mm_maskz_fmaddsub_ph + // CHECK-NOT: fneg + // CHECK: call <8 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.128(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}}) + // CHECK: bitcast i8 %{{.*}} to <8 x i1> + // CHECK: select <8 x i1> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}} + return _mm_maskz_fmaddsub_ph(__U, __A, __B, __C); +} + +__m128h test_mm_maskz_fmsubadd_ph(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) { + // CHECK-LABEL: @test_mm_maskz_fmsubadd_ph + // CHECK: [[NEG:%.+]] = fneg + // CHECK: call <8 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.128(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> [[NEG]]) + // CHECK: bitcast i8 %{{.*}} to <8 x i1> + // CHECK: select <8 x i1> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}} + return _mm_maskz_fmsubadd_ph(__U, __A, __B, __C); +} + +__m256h test_mm256_fmaddsub_ph(__m256h __A, __m256h __B, __m256h __C) { + // CHECK-LABEL: @test_mm256_fmaddsub_ph + // CHECK-NOT: fneg + // CHECK: call <16 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.256(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}}) + return _mm256_fmaddsub_ph(__A, __B, __C); +} + +__m256h test_mm256_mask_fmaddsub_ph(__m256h __A, __mmask16 __U, __m256h __B, __m256h __C) { + // CHECK-LABEL: @test_mm256_mask_fmaddsub_ph + // CHECK-NOT: fneg + // CHECK: call <16 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.256(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}}) + // CHECK: bitcast i16 %{{.*}} to <16 x i1> + // CHECK: select <16 x i1> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}} + return _mm256_mask_fmaddsub_ph(__A, __U, __B, __C); +} + +__m256h test_mm256_fmsubadd_ph(__m256h __A, __m256h __B, __m256h __C) { + // CHECK-LABEL: @test_mm256_fmsubadd_ph + // CHECK: [[NEG:%.+]] = fneg + // CHECK: call <16 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.256(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> [[NEG]]) + return _mm256_fmsubadd_ph(__A, __B, __C); +} + +__m256h test_mm256_mask_fmsubadd_ph(__m256h __A, __mmask16 __U, __m256h __B, __m256h __C) { + // CHECK-LABEL: @test_mm256_mask_fmsubadd_ph + // CHECK: [[NEG:%.+]] = fneg + // CHECK: call <16 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.256(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> [[NEG]]) + // CHECK: bitcast i16 %{{.*}} to <16 x i1> + // CHECK: select <16 x i1> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}} + return _mm256_mask_fmsubadd_ph(__A, __U, __B, __C); +} + +__m256h test_mm256_mask3_fmaddsub_ph(__m256h __A, __m256h __B, __m256h __C, __mmask16 __U) { + // CHECK-LABEL: @test_mm256_mask3_fmaddsub_ph + // CHECK-NOT: fneg + // CHECK: call <16 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.256(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}}) + // CHECK: bitcast i16 %{{.*}} to <16 x i1> + // CHECK: select <16 x i1> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}} + return _mm256_mask3_fmaddsub_ph(__A, __B, __C, __U); +} + +__m256h test_mm256_maskz_fmaddsub_ph(__mmask16 __U, __m256h __A, __m256h __B, __m256h __C) { + // CHECK-LABEL: @test_mm256_maskz_fmaddsub_ph + // CHECK-NOT: fneg + // CHECK: call <16 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.256(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}}) + // CHECK: bitcast i16 %{{.*}} to <16 x i1> + // CHECK: select <16 x i1> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}} + return _mm256_maskz_fmaddsub_ph(__U, __A, __B, __C); +} + +__m256h test_mm256_maskz_fmsubadd_ph(__mmask16 __U, __m256h __A, __m256h __B, __m256h __C) { + // CHECK-LABEL: @test_mm256_maskz_fmsubadd_ph + // CHECK: [[NEG:%.+]] = fneg + // CHECK: call <16 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.256(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> [[NEG]]) + // CHECK: bitcast i16 %{{.*}} to <16 x i1> + // CHECK: select <16 x i1> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}} + return _mm256_maskz_fmsubadd_ph(__U, __A, __B, __C); +} + +__m128h test_mm_mask3_fmsub_ph(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) { + // CHECK-LABEL: @test_mm_mask3_fmsub_ph + // CHECK: fneg + // CHECK: call <8 x half> @llvm.fma.v8f16(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}}) + // CHECK: bitcast i8 %{{.*}} to <8 x i1> + // CHECK: select <8 x i1> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}} + return _mm_mask3_fmsub_ph(__A, __B, __C, __U); +} + +__m256h test_mm256_mask3_fmsub_ph(__m256h __A, __m256h __B, __m256h __C, __mmask16 __U) { + // CHECK-LABEL: @test_mm256_mask3_fmsub_ph + // CHECK: fneg + // CHECK: call <16 x half> @llvm.fma.v16f16(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}}) + // CHECK: bitcast i16 %{{.*}} to <16 x i1> + // CHECK: select <16 x i1> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}} + return _mm256_mask3_fmsub_ph(__A, __B, __C, __U); +} + +__m128h test_mm_mask3_fmsubadd_ph(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) { + // CHECK-LABEL: @test_mm_mask3_fmsubadd_ph + // CHECK: [[NEG:%.+]] = fneg + // CHECK: call <8 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.128(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> [[NEG]]) + // CHECK: bitcast i8 %{{.*}} to <8 x i1> + // CHECK: select <8 x i1> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}} + return _mm_mask3_fmsubadd_ph(__A, __B, __C, __U); +} + +__m256h test_mm256_mask3_fmsubadd_ph(__m256h __A, __m256h __B, __m256h __C, __mmask16 __U) { + // CHECK-LABEL: @test_mm256_mask3_fmsubadd_ph + // CHECK: [[NEG:%.+]] = fneg + // CHECK: call <16 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.256(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> [[NEG]]) + // CHECK: bitcast i16 %{{.*}} to <16 x i1> + // CHECK: select <16 x i1> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}} + return _mm256_mask3_fmsubadd_ph(__A, __B, __C, __U); +} + +__m128h test_mm_fnmadd_ph(__m128h __A, __m128h __B, __m128h __C) { + // CHECK-LABEL: @test_mm_fnmadd_ph + // CHECK: fneg + // CHECK: call <8 x half> @llvm.fma.v8f16(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}}) + return _mm_fnmadd_ph(__A, __B, __C); +} + +__m128h test_mm_mask_fnmadd_ph(__m128h __A, __mmask8 __U, __m128h __B, __m128h __C) { + // CHECK-LABEL: @test_mm_mask_fnmadd_ph + // CHECK: fneg + // CHECK: call <8 x half> @llvm.fma.v8f16(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}}) + // CHECK: bitcast i8 %{{.*}} to <8 x i1> + // CHECK: select <8 x i1> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}} + return _mm_mask_fnmadd_ph(__A, __U, __B, __C); +} + +__m256h test_mm256_fnmadd_ph(__m256h __A, __m256h __B, __m256h __C) { + // CHECK-LABEL: @test_mm256_fnmadd_ph + // CHECK: fneg + // CHECK: call <16 x half> @llvm.fma.v16f16(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}}) + return _mm256_fnmadd_ph(__A, __B, __C); +} + +__m256h test_mm256_mask_fnmadd_ph(__m256h __A, __mmask16 __U, __m256h __B, __m256h __C) { + // CHECK-LABEL: @test_mm256_mask_fnmadd_ph + // CHECK: fneg + // CHECK: call <16 x half> @llvm.fma.v16f16(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}}) + // CHECK: bitcast i16 %{{.*}} to <16 x i1> + // CHECK: select <16 x i1> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}} + return _mm256_mask_fnmadd_ph(__A, __U, __B, __C); +} + +__m128h test_mm_fnmsub_ph(__m128h __A, __m128h __B, __m128h __C) { + // CHECK-LABEL: @test_mm_fnmsub_ph + // CHECK: fneg + // CHECK: fneg + // CHECK: call <8 x half> @llvm.fma.v8f16(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}}) + return _mm_fnmsub_ph(__A, __B, __C); +} + +__m128h test_mm_mask_fnmsub_ph(__m128h __A, __mmask8 __U, __m128h __B, __m128h __C) { + // CHECK-LABEL: @test_mm_mask_fnmsub_ph + // CHECK: fneg + // CHECK: fneg + // CHECK: call <8 x half> @llvm.fma.v8f16(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}}) + // CHECK: bitcast i8 %{{.*}} to <8 x i1> + // CHECK: select <8 x i1> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}} + return _mm_mask_fnmsub_ph(__A, __U, __B, __C); +} + +__m128h test_mm_mask3_fnmsub_ph(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) { + // CHECK-LABEL: @test_mm_mask3_fnmsub_ph + // CHECK: fneg + // CHECK: fneg + // CHECK: call <8 x half> @llvm.fma.v8f16(<8 x half> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}}) + // CHECK: bitcast i8 %{{.*}} to <8 x i1> + // CHECK: select <8 x i1> %{{.*}}, <8 x half> %{{.*}}, <8 x half> %{{.*}} + return _mm_mask3_fnmsub_ph(__A, __B, __C, __U); +} + +__m256h test_mm256_fnmsub_ph(__m256h __A, __m256h __B, __m256h __C) { + // CHECK-LABEL: @test_mm256_fnmsub_ph + // CHECK: fneg + // CHECK: fneg + // CHECK: call <16 x half> @llvm.fma.v16f16(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}}) + return _mm256_fnmsub_ph(__A, __B, __C); +} + +__m256h test_mm256_mask_fnmsub_ph(__m256h __A, __mmask16 __U, __m256h __B, __m256h __C) { + // CHECK-LABEL: @test_mm256_mask_fnmsub_ph + // CHECK: fneg + // CHECK: fneg + // CHECK: call <16 x half> @llvm.fma.v16f16(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}}) + // CHECK: bitcast i16 %{{.*}} to <16 x i1> + // CHECK: select <16 x i1> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}} + return _mm256_mask_fnmsub_ph(__A, __U, __B, __C); +} + +__m256h test_mm256_mask3_fnmsub_ph(__m256h __A, __m256h __B, __m256h __C, __mmask16 __U) { + // CHECK-LABEL: @test_mm256_mask3_fnmsub_ph + // CHECK: fneg + // CHECK: fneg + // CHECK: call <16 x half> @llvm.fma.v16f16(<16 x half> %{{.*}}, <16 x half> %{{.*}}, <16 x half> %{{.*}}) + return _mm256_mask3_fnmsub_ph(__A, __B, __C, __U); +} __m128h test_mm_mask_blend_ph(__mmask8 __U, __m128h __A, __m128h __W) { // CHECK-LABEL: @test_mm_mask_blend_ph // CHECK: %{{.*}} = bitcast i8 %{{.*}} to <8 x i1> diff --git a/llvm/include/llvm/IR/IntrinsicsX86.td b/llvm/include/llvm/IR/IntrinsicsX86.td index c79c6118db680..680e649290653 100644 --- a/llvm/include/llvm/IR/IntrinsicsX86.td +++ b/llvm/include/llvm/IR/IntrinsicsX86.td @@ -5709,4 +5709,27 @@ let TargetPrefix = "x86" in { [ llvm_v8f16_ty, llvm_v8f16_ty, llvm_v8f16_ty, llvm_i8_ty, llvm_i32_ty ], [ IntrNoMem, ImmArg> ]>; + + def int_x86_avx512fp16_vfmadd_ph_512 + : Intrinsic<[ llvm_v32f16_ty ], + [ llvm_v32f16_ty, llvm_v32f16_ty, llvm_v32f16_ty, llvm_i32_ty ], + [ IntrNoMem, ImmArg> ]>; + def int_x86_avx512fp16_vfmaddsub_ph_128 + : GCCBuiltin<"__builtin_ia32_vfmaddsubph">, + Intrinsic<[ llvm_v8f16_ty ], + [ llvm_v8f16_ty, llvm_v8f16_ty, llvm_v8f16_ty ], + [ IntrNoMem ]>; + def int_x86_avx512fp16_vfmaddsub_ph_256 + : GCCBuiltin<"__builtin_ia32_vfmaddsubph256">, + Intrinsic<[ llvm_v16f16_ty ], + [ llvm_v16f16_ty, llvm_v16f16_ty, llvm_v16f16_ty ], + [ IntrNoMem ]>; + def int_x86_avx512fp16_vfmaddsub_ph_512 + : Intrinsic<[ llvm_v32f16_ty ], + [ llvm_v32f16_ty, llvm_v32f16_ty, llvm_v32f16_ty, llvm_i32_ty ], + [ IntrNoMem, ImmArg> ]>; + def int_x86_avx512fp16_vfmadd_f16 + : Intrinsic<[ llvm_half_ty ], + [ llvm_half_ty, llvm_half_ty, llvm_half_ty, llvm_i32_ty ], + [ IntrNoMem, ImmArg> ]>; } diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index c70715033cc03..9f8e76c786f9a 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -1934,6 +1934,8 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM, setOperationAction(ISD::LOAD, VT, Legal); setOperationAction(ISD::STORE, VT, Legal); + setOperationAction(ISD::FMA, VT, Legal); + setOperationAction(ISD::STRICT_FMA, VT, Legal); setOperationAction(ISD::VSELECT, VT, Legal); setOperationAction(ISD::BUILD_VECTOR, VT, Custom); setOperationAction(ISD::SELECT, VT, Custom); @@ -32720,6 +32722,8 @@ bool X86TargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, return false; switch (VT.getSimpleVT().SimpleTy) { + case MVT::f16: + return Subtarget.hasFP16(); case MVT::f32: case MVT::f64: return true; @@ -49021,7 +49025,9 @@ static SDValue combineFMA(SDNode *N, SelectionDAG &DAG, } EVT ScalarVT = VT.getScalarType(); - if ((ScalarVT != MVT::f32 && ScalarVT != MVT::f64) || !Subtarget.hasAnyFMA()) + if (((ScalarVT != MVT::f32 && ScalarVT != MVT::f64) || + !Subtarget.hasAnyFMA()) && + !(ScalarVT == MVT::f16 && Subtarget.hasFP16())) return SDValue(); auto invertIfNegative = [&DAG, &TLI, &DCI](SDValue &V) { diff --git a/llvm/lib/Target/X86/X86InstrAVX512.td b/llvm/lib/Target/X86/X86InstrAVX512.td index c92abc7e8c95d..df3e1554320ef 100644 --- a/llvm/lib/Target/X86/X86InstrAVX512.td +++ b/llvm/lib/Target/X86/X86InstrAVX512.td @@ -6760,14 +6760,14 @@ multiclass avx512_fma3p_213_rm opc, string OpcodeStr, SDPatternOperator OpcodeStr, "$src3, $src2", "$src2, $src3", (_.VT (OpNode _.RC:$src2, _.RC:$src1, _.RC:$src3)), (_.VT (MaskOpNode _.RC:$src2, _.RC:$src1, _.RC:$src3)), 1, 1>, - AVX512FMA3Base, Sched<[sched]>; + EVEX_4V, Sched<[sched]>; defm m: AVX512_maskable_fma, - AVX512FMA3Base, Sched<[sched.Folded, sched.ReadAfterFold]>; + EVEX_4V, Sched<[sched.Folded, sched.ReadAfterFold]>; defm mb: AVX512_maskable_fma opc, string OpcodeStr, SDPatternOperator _.RC:$src1,(_.VT (_.BroadcastLdFrag addr:$src3))), (MaskOpNode _.RC:$src2, _.RC:$src1,(_.VT (_.BroadcastLdFrag addr:$src3))), 1, 0>, - AVX512FMA3Base, EVEX_B, Sched<[sched.Folded, sched.ReadAfterFold]>; + EVEX_4V, EVEX_B, Sched<[sched.Folded, sched.ReadAfterFold]>; } } @@ -6791,21 +6791,22 @@ multiclass avx512_fma3_213_round opc, string OpcodeStr, SDNode OpNode, OpcodeStr, "$rc, $src3, $src2", "$src2, $src3, $rc", (_.VT ( OpNode _.RC:$src2, _.RC:$src1, _.RC:$src3, (i32 timm:$rc))), (_.VT ( OpNode _.RC:$src2, _.RC:$src1, _.RC:$src3, (i32 timm:$rc))), 1, 1>, - AVX512FMA3Base, EVEX_B, EVEX_RC, Sched<[sched]>; + EVEX_4V, EVEX_B, EVEX_RC, Sched<[sched]>; } multiclass avx512_fma3p_213_common opc, string OpcodeStr, SDPatternOperator OpNode, SDNode MaskOpNode, SDNode OpNodeRnd, X86SchedWriteWidths sched, - AVX512VLVectorVTInfo _, string Suff> { - let Predicates = [HasAVX512] in { + AVX512VLVectorVTInfo _, string Suff, + Predicate prd = HasAVX512> { + let Predicates = [prd] in { defm Z : avx512_fma3p_213_rm, avx512_fma3_213_round, EVEX_V512, EVEX_CD8<_.info512.EltSize, CD8VF>; } - let Predicates = [HasVLX, HasAVX512] in { + let Predicates = [HasVLX, prd] in { defm Z256 : avx512_fma3p_213_rm, EVEX_V256, EVEX_CD8<_.info256.EltSize, CD8VF>; @@ -6817,12 +6818,15 @@ multiclass avx512_fma3p_213_common opc, string OpcodeStr, SDPatternOpera multiclass avx512_fma3p_213_f opc, string OpcodeStr, SDPatternOperator OpNode, SDNode MaskOpNode, SDNode OpNodeRnd> { + defm PH : avx512_fma3p_213_common, T_MAP6PD; defm PS : avx512_fma3p_213_common; + avx512vl_f32_info, "PS">, T8PD; defm PD : avx512_fma3p_213_common, VEX_W; + avx512vl_f64_info, "PD">, T8PD, VEX_W; } defm VFMADD213 : avx512_fma3p_213_f<0xA8, "vfmadd213", any_fma, @@ -6849,14 +6853,14 @@ multiclass avx512_fma3p_231_rm opc, string OpcodeStr, SDPatternOperator OpcodeStr, "$src3, $src2", "$src2, $src3", (null_frag), (_.VT (MaskOpNode _.RC:$src2, _.RC:$src3, _.RC:$src1)), 1, 1>, - AVX512FMA3Base, Sched<[sched]>; + EVEX_4V, Sched<[sched]>; defm m: AVX512_maskable_fma, - AVX512FMA3Base, Sched<[sched.Folded, sched.ReadAfterFold]>; + EVEX_4V, Sched<[sched.Folded, sched.ReadAfterFold]>; defm mb: AVX512_maskable_fma opc, string OpcodeStr, SDPatternOperator _.RC:$src1)), (_.VT (MaskOpNode _.RC:$src2, (_.VT (_.BroadcastLdFrag addr:$src3)), - _.RC:$src1)), 1, 0>, AVX512FMA3Base, EVEX_B, + _.RC:$src1)), 1, 0>, EVEX_4V, EVEX_B, Sched<[sched.Folded, sched.ReadAfterFold]>; } } @@ -6882,21 +6886,22 @@ multiclass avx512_fma3_231_round opc, string OpcodeStr, SDNode OpNode, OpcodeStr, "$rc, $src3, $src2", "$src2, $src3, $rc", (null_frag), (_.VT (OpNode _.RC:$src2, _.RC:$src3, _.RC:$src1, (i32 timm:$rc))), - 1, 1>, AVX512FMA3Base, EVEX_B, EVEX_RC, Sched<[sched]>; + 1, 1>, EVEX_4V, EVEX_B, EVEX_RC, Sched<[sched]>; } multiclass avx512_fma3p_231_common opc, string OpcodeStr, SDPatternOperator OpNode, SDNode MaskOpNode, SDNode OpNodeRnd, X86SchedWriteWidths sched, - AVX512VLVectorVTInfo _, string Suff> { - let Predicates = [HasAVX512] in { + AVX512VLVectorVTInfo _, string Suff, + Predicate prd = HasAVX512> { + let Predicates = [prd] in { defm Z : avx512_fma3p_231_rm, avx512_fma3_231_round, EVEX_V512, EVEX_CD8<_.info512.EltSize, CD8VF>; } - let Predicates = [HasVLX, HasAVX512] in { + let Predicates = [HasVLX, prd] in { defm Z256 : avx512_fma3p_231_rm, EVEX_V256, EVEX_CD8<_.info256.EltSize, CD8VF>; @@ -6908,12 +6913,15 @@ multiclass avx512_fma3p_231_common opc, string OpcodeStr, SDPatternOpera multiclass avx512_fma3p_231_f opc, string OpcodeStr, SDPatternOperator OpNode, SDNode MaskOpNode, SDNode OpNodeRnd > { + defm PH : avx512_fma3p_231_common, T_MAP6PD; defm PS : avx512_fma3p_231_common; + avx512vl_f32_info, "PS">, T8PD; defm PD : avx512_fma3p_231_common, VEX_W; + avx512vl_f64_info, "PD">, T8PD, VEX_W; } defm VFMADD231 : avx512_fma3p_231_f<0xB8, "vfmadd231", any_fma, @@ -6939,7 +6947,7 @@ multiclass avx512_fma3p_132_rm opc, string OpcodeStr, SDPatternOperator OpcodeStr, "$src3, $src2", "$src2, $src3", (null_frag), (_.VT (MaskOpNode _.RC:$src1, _.RC:$src3, _.RC:$src2)), 1, 1>, - AVX512FMA3Base, Sched<[sched]>; + EVEX_4V, Sched<[sched]>; // Pattern is 312 order so that the load is in a different place from the // 213 and 231 patterns this helps tablegen's duplicate pattern detection. @@ -6948,7 +6956,7 @@ multiclass avx512_fma3p_132_rm opc, string OpcodeStr, SDPatternOperator OpcodeStr, "$src3, $src2", "$src2, $src3", (_.VT (OpNode (_.LdFrag addr:$src3), _.RC:$src1, _.RC:$src2)), (_.VT (MaskOpNode (_.LdFrag addr:$src3), _.RC:$src1, _.RC:$src2)), 1, 0>, - AVX512FMA3Base, Sched<[sched.Folded, sched.ReadAfterFold]>; + EVEX_4V, Sched<[sched.Folded, sched.ReadAfterFold]>; // Pattern is 312 order so that the load is in a different place from the // 213 and 231 patterns this helps tablegen's duplicate pattern detection. @@ -6960,7 +6968,7 @@ multiclass avx512_fma3p_132_rm opc, string OpcodeStr, SDPatternOperator _.RC:$src1, _.RC:$src2)), (_.VT (MaskOpNode (_.VT (_.BroadcastLdFrag addr:$src3)), _.RC:$src1, _.RC:$src2)), 1, 0>, - AVX512FMA3Base, EVEX_B, Sched<[sched.Folded, sched.ReadAfterFold]>; + EVEX_4V, EVEX_B, Sched<[sched.Folded, sched.ReadAfterFold]>; } } @@ -6974,21 +6982,22 @@ multiclass avx512_fma3_132_round opc, string OpcodeStr, SDNode OpNode, OpcodeStr, "$rc, $src3, $src2", "$src2, $src3, $rc", (null_frag), (_.VT (OpNode _.RC:$src1, _.RC:$src3, _.RC:$src2, (i32 timm:$rc))), - 1, 1>, AVX512FMA3Base, EVEX_B, EVEX_RC, Sched<[sched]>; + 1, 1>, EVEX_4V, EVEX_B, EVEX_RC, Sched<[sched]>; } multiclass avx512_fma3p_132_common opc, string OpcodeStr, SDPatternOperator OpNode, SDNode MaskOpNode, SDNode OpNodeRnd, X86SchedWriteWidths sched, - AVX512VLVectorVTInfo _, string Suff> { - let Predicates = [HasAVX512] in { + AVX512VLVectorVTInfo _, string Suff, + Predicate prd = HasAVX512> { + let Predicates = [prd] in { defm Z : avx512_fma3p_132_rm, avx512_fma3_132_round, EVEX_V512, EVEX_CD8<_.info512.EltSize, CD8VF>; } - let Predicates = [HasVLX, HasAVX512] in { + let Predicates = [HasVLX, prd] in { defm Z256 : avx512_fma3p_132_rm, EVEX_V256, EVEX_CD8<_.info256.EltSize, CD8VF>; @@ -7000,12 +7009,15 @@ multiclass avx512_fma3p_132_common opc, string OpcodeStr, SDPatternOpera multiclass avx512_fma3p_132_f opc, string OpcodeStr, SDPatternOperator OpNode, SDNode MaskOpNode, SDNode OpNodeRnd > { + defm PH : avx512_fma3p_132_common, T_MAP6PD; defm PS : avx512_fma3p_132_common; + avx512vl_f32_info, "PS">, T8PD; defm PD : avx512_fma3p_132_common, VEX_W; + avx512vl_f64_info, "PD">, T8PD, VEX_W; } defm VFMADD132 : avx512_fma3p_132_f<0x98, "vfmadd132", any_fma, @@ -7028,39 +7040,39 @@ let Constraints = "$src1 = $dst", hasSideEffects = 0 in { defm r_Int: AVX512_maskable_3src_scalar, - AVX512FMA3Base, Sched<[SchedWriteFMA.Scl]>, SIMD_EXC; + EVEX_4V, Sched<[SchedWriteFMA.Scl]>, SIMD_EXC; let mayLoad = 1 in defm m_Int: AVX512_maskable_3src_scalar, - AVX512FMA3Base, Sched<[SchedWriteFMA.Scl.Folded, SchedWriteFMA.Scl.ReadAfterFold]>, SIMD_EXC; + EVEX_4V, Sched<[SchedWriteFMA.Scl.Folded, SchedWriteFMA.Scl.ReadAfterFold]>, SIMD_EXC; let Uses = [MXCSR] in defm rb_Int: AVX512_maskable_3src_scalar, - AVX512FMA3Base, EVEX_B, EVEX_RC, Sched<[SchedWriteFMA.Scl]>; + EVEX_4V, EVEX_B, EVEX_RC, Sched<[SchedWriteFMA.Scl]>; let isCodeGenOnly = 1, isCommutable = 1 in { - def r : AVX512FMA3S, Sched<[SchedWriteFMA.Scl]>, SIMD_EXC; - def m : AVX512FMA3S, Sched<[SchedWriteFMA.Scl]>, EVEX_4V, SIMD_EXC; + def m : AVX512, Sched<[SchedWriteFMA.Scl.Folded, SchedWriteFMA.Scl.ReadAfterFold]>, SIMD_EXC; + [RHS_m]>, Sched<[SchedWriteFMA.Scl.Folded, SchedWriteFMA.Scl.ReadAfterFold]>, EVEX_4V, SIMD_EXC; let Uses = [MXCSR] in - def rb : AVX512FMA3S, EVEX_B, EVEX_RC, - Sched<[SchedWriteFMA.Scl]>; + Sched<[SchedWriteFMA.Scl]>, EVEX_4V; }// isCodeGenOnly = 1 }// Constraints = "$src1 = $dst" } @@ -7104,10 +7116,15 @@ multiclass avx512_fma3s opc213, bits<8> opc231, bits<8> opc132, let Predicates = [HasAVX512] in { defm NAME : avx512_fma3s_all, - EVEX_CD8<32, CD8VT1>, VEX_LIG; + EVEX_CD8<32, CD8VT1>, VEX_LIG, T8PD; defm NAME : avx512_fma3s_all, - EVEX_CD8<64, CD8VT1>, VEX_LIG, VEX_W; + EVEX_CD8<64, CD8VT1>, VEX_LIG, VEX_W, T8PD; + } + let Predicates = [HasFP16] in { + defm NAME : avx512_fma3s_all, + EVEX_CD8<16, CD8VT1>, VEX_LIG, T_MAP6PD; } } @@ -7119,8 +7136,9 @@ defm VFNMSUB : avx512_fma3s<0xAF, 0xBF, 0x9F, "vfnmsub", X86any_Fnmsub, X86Fnmsu multiclass avx512_scalar_fma_patterns { - let Predicates = [HasAVX512] in { + X86VectorVTInfo _, PatLeaf ZeroFP, + Predicate prd = HasAVX512> { + let Predicates = [prd] in { def : Pat<(_.VT (Move (_.VT VR128X:$src1), (_.VT (scalar_to_vector (Op _.FRC:$src2, (_.EltVT (extractelt (_.VT VR128X:$src1), (iPTR 0))), @@ -7318,6 +7336,14 @@ multiclass avx512_scalar_fma_patterns; } } +defm : avx512_scalar_fma_patterns; +defm : avx512_scalar_fma_patterns; +defm : avx512_scalar_fma_patterns; +defm : avx512_scalar_fma_patterns; defm : avx512_scalar_fma_patterns; @@ -7350,13 +7376,13 @@ multiclass avx512_pmadd52_rm opc, string OpcodeStr, SDNode OpNode, (ins _.RC:$src2, _.RC:$src3), OpcodeStr, "$src3, $src2", "$src2, $src3", (_.VT (OpNode _.RC:$src2, _.RC:$src3, _.RC:$src1)), 1, 1>, - AVX512FMA3Base, Sched<[sched]>; + T8PD, EVEX_4V, Sched<[sched]>; defm m: AVX512_maskable_3src, - AVX512FMA3Base, Sched<[sched.Folded, sched.ReadAfterFold]>; + T8PD, EVEX_4V, Sched<[sched.Folded, sched.ReadAfterFold]>; defm mb: AVX512_maskable_3src opc, string OpcodeStr, SDNode OpNode, (OpNode _.RC:$src2, (_.VT (_.BroadcastLdFrag addr:$src3)), _.RC:$src1)>, - AVX512FMA3Base, EVEX_B, Sched<[sched.Folded, sched.ReadAfterFold]>; + T8PD, EVEX_4V, EVEX_B, Sched<[sched.Folded, sched.ReadAfterFold]>; } } } // Constraints = "$src1 = $dst" @@ -12355,13 +12381,13 @@ multiclass VBMI2_shift_var_rm Op, string OpStr, SDNode OpNode, (ins VTI.RC:$src2, VTI.RC:$src3), OpStr, "$src3, $src2", "$src2, $src3", (VTI.VT (OpNode VTI.RC:$src1, VTI.RC:$src2, VTI.RC:$src3))>, - AVX512FMA3Base, Sched<[sched]>; + T8PD, EVEX_4V, Sched<[sched]>; defm m: AVX512_maskable_3src, - AVX512FMA3Base, + T8PD, EVEX_4V, Sched<[sched.Folded, sched.ReadAfterFold]>; } } @@ -12377,7 +12403,7 @@ multiclass VBMI2_shift_var_rmb Op, string OpStr, SDNode OpNode, "$src2, ${src3}"#VTI.BroadcastStr, (OpNode VTI.RC:$src1, VTI.RC:$src2, (VTI.VT (VTI.BroadcastLdFrag addr:$src3)))>, - AVX512FMA3Base, EVEX_B, + T8PD, EVEX_4V, EVEX_B, Sched<[sched.Folded, sched.ReadAfterFold]>; } diff --git a/llvm/lib/Target/X86/X86InstrFMA3Info.cpp b/llvm/lib/Target/X86/X86InstrFMA3Info.cpp index 6d803e931b685..52b2a62316cde 100644 --- a/llvm/lib/Target/X86/X86InstrFMA3Info.cpp +++ b/llvm/lib/Target/X86/X86InstrFMA3Info.cpp @@ -28,35 +28,43 @@ using namespace llvm; FMA3GROUP(Name, Suf##k, Attrs | X86InstrFMA3Group::KMergeMasked) \ FMA3GROUP(Name, Suf##kz, Attrs | X86InstrFMA3Group::KZeroMasked) -#define FMA3GROUP_PACKED_WIDTHS(Name, Suf, Attrs) \ - FMA3GROUP(Name, Suf##Ym, Attrs) \ - FMA3GROUP(Name, Suf##Yr, Attrs) \ +#define FMA3GROUP_PACKED_WIDTHS_Z(Name, Suf, Attrs) \ FMA3GROUP_MASKED(Name, Suf##Z128m, Attrs) \ FMA3GROUP_MASKED(Name, Suf##Z128r, Attrs) \ FMA3GROUP_MASKED(Name, Suf##Z256m, Attrs) \ FMA3GROUP_MASKED(Name, Suf##Z256r, Attrs) \ FMA3GROUP_MASKED(Name, Suf##Zm, Attrs) \ FMA3GROUP_MASKED(Name, Suf##Zr, Attrs) \ + +#define FMA3GROUP_PACKED_WIDTHS_ALL(Name, Suf, Attrs) \ + FMA3GROUP(Name, Suf##Ym, Attrs) \ + FMA3GROUP(Name, Suf##Yr, Attrs) \ + FMA3GROUP_PACKED_WIDTHS_Z(Name, Suf, Attrs) \ FMA3GROUP(Name, Suf##m, Attrs) \ FMA3GROUP(Name, Suf##r, Attrs) #define FMA3GROUP_PACKED(Name, Attrs) \ - FMA3GROUP_PACKED_WIDTHS(Name, PD, Attrs) \ - FMA3GROUP_PACKED_WIDTHS(Name, PS, Attrs) + FMA3GROUP_PACKED_WIDTHS_ALL(Name, PD, Attrs) \ + FMA3GROUP_PACKED_WIDTHS_Z(Name, PH, Attrs) \ + FMA3GROUP_PACKED_WIDTHS_ALL(Name, PS, Attrs) -#define FMA3GROUP_SCALAR_WIDTHS(Name, Suf, Attrs) \ +#define FMA3GROUP_SCALAR_WIDTHS_Z(Name, Suf, Attrs) \ FMA3GROUP(Name, Suf##Zm, Attrs) \ FMA3GROUP_MASKED(Name, Suf##Zm_Int, Attrs | X86InstrFMA3Group::Intrinsic) \ FMA3GROUP(Name, Suf##Zr, Attrs) \ FMA3GROUP_MASKED(Name, Suf##Zr_Int, Attrs | X86InstrFMA3Group::Intrinsic) \ + +#define FMA3GROUP_SCALAR_WIDTHS_ALL(Name, Suf, Attrs) \ + FMA3GROUP_SCALAR_WIDTHS_Z(Name, Suf, Attrs) \ FMA3GROUP(Name, Suf##m, Attrs) \ FMA3GROUP(Name, Suf##m_Int, Attrs | X86InstrFMA3Group::Intrinsic) \ FMA3GROUP(Name, Suf##r, Attrs) \ FMA3GROUP(Name, Suf##r_Int, Attrs | X86InstrFMA3Group::Intrinsic) #define FMA3GROUP_SCALAR(Name, Attrs) \ - FMA3GROUP_SCALAR_WIDTHS(Name, SD, Attrs) \ - FMA3GROUP_SCALAR_WIDTHS(Name, SS, Attrs) + FMA3GROUP_SCALAR_WIDTHS_ALL(Name, SD, Attrs) \ + FMA3GROUP_SCALAR_WIDTHS_Z(Name, SH, Attrs) \ + FMA3GROUP_SCALAR_WIDTHS_ALL(Name, SS, Attrs) #define FMA3GROUP_FULL(Name, Attrs) \ FMA3GROUP_PACKED(Name, Attrs) \ @@ -78,15 +86,19 @@ static const X86InstrFMA3Group Groups[] = { #define FMA3GROUP_PACKED_AVX512(Name, Suf, Attrs) \ FMA3GROUP_PACKED_AVX512_WIDTHS(Name, PD, Suf, Attrs) \ + FMA3GROUP_PACKED_AVX512_WIDTHS(Name, PH, Suf, Attrs) \ FMA3GROUP_PACKED_AVX512_WIDTHS(Name, PS, Suf, Attrs) #define FMA3GROUP_PACKED_AVX512_ROUND(Name, Suf, Attrs) \ FMA3GROUP_MASKED(Name, PDZ##Suf, Attrs) \ + FMA3GROUP_MASKED(Name, PHZ##Suf, Attrs) \ FMA3GROUP_MASKED(Name, PSZ##Suf, Attrs) #define FMA3GROUP_SCALAR_AVX512_ROUND(Name, Suf, Attrs) \ FMA3GROUP(Name, SDZ##Suf, Attrs) \ FMA3GROUP_MASKED(Name, SDZ##Suf##_Int, Attrs) \ + FMA3GROUP(Name, SHZ##Suf, Attrs) \ + FMA3GROUP_MASKED(Name, SHZ##Suf##_Int, Attrs) \ FMA3GROUP(Name, SSZ##Suf, Attrs) \ FMA3GROUP_MASKED(Name, SSZ##Suf##_Int, Attrs) @@ -130,14 +142,16 @@ const X86InstrFMA3Group *llvm::getFMA3Group(unsigned Opcode, uint64_t TSFlags) { // FMA3 instructions have a well defined encoding pattern we can exploit. uint8_t BaseOpcode = X86II::getBaseOpcodeFor(TSFlags); - bool IsFMA3 = ((TSFlags & X86II::EncodingMask) == X86II::VEX || - (TSFlags & X86II::EncodingMask) == X86II::EVEX) && - (TSFlags & X86II::OpMapMask) == X86II::T8 && - (TSFlags & X86II::OpPrefixMask) == X86II::PD && - ((BaseOpcode >= 0x96 && BaseOpcode <= 0x9F) || - (BaseOpcode >= 0xA6 && BaseOpcode <= 0xAF) || - (BaseOpcode >= 0xB6 && BaseOpcode <= 0xBF)); - if (!IsFMA3) + bool IsFMA3Opcode = ((BaseOpcode >= 0x96 && BaseOpcode <= 0x9F) || + (BaseOpcode >= 0xA6 && BaseOpcode <= 0xAF) || + (BaseOpcode >= 0xB6 && BaseOpcode <= 0xBF)); + bool IsFMA3Encoding = ((TSFlags & X86II::EncodingMask) == X86II::VEX && + (TSFlags & X86II::OpMapMask) == X86II::T8) || + ((TSFlags & X86II::EncodingMask) == X86II::EVEX && + ((TSFlags & X86II::OpMapMask) == X86II::T8 || + (TSFlags & X86II::OpMapMask) == X86II::T_MAP6)); + bool IsFMA3Prefix = (TSFlags & X86II::OpPrefixMask) == X86II::PD; + if (!IsFMA3Opcode || !IsFMA3Encoding || !IsFMA3Prefix) return nullptr; verifyTables(); diff --git a/llvm/lib/Target/X86/X86InstrFoldTables.cpp b/llvm/lib/Target/X86/X86InstrFoldTables.cpp index 959c8d4a2d886..235f0d4b92613 100644 --- a/llvm/lib/Target/X86/X86InstrFoldTables.cpp +++ b/llvm/lib/Target/X86/X86InstrFoldTables.cpp @@ -3288,6 +3288,9 @@ static const X86MemoryFoldTableEntry MemoryFoldTable3[] = { { X86::VFMADD132PDZ256r, X86::VFMADD132PDZ256m, 0 }, { X86::VFMADD132PDZr, X86::VFMADD132PDZm, 0 }, { X86::VFMADD132PDr, X86::VFMADD132PDm, 0 }, + { X86::VFMADD132PHZ128r, X86::VFMADD132PHZ128m, 0 }, + { X86::VFMADD132PHZ256r, X86::VFMADD132PHZ256m, 0 }, + { X86::VFMADD132PHZr, X86::VFMADD132PHZm, 0 }, { X86::VFMADD132PSYr, X86::VFMADD132PSYm, 0 }, { X86::VFMADD132PSZ128r, X86::VFMADD132PSZ128m, 0 }, { X86::VFMADD132PSZ256r, X86::VFMADD132PSZ256m, 0 }, @@ -3297,6 +3300,8 @@ static const X86MemoryFoldTableEntry MemoryFoldTable3[] = { { X86::VFMADD132SDZr_Int, X86::VFMADD132SDZm_Int, TB_NO_REVERSE }, { X86::VFMADD132SDr, X86::VFMADD132SDm, 0 }, { X86::VFMADD132SDr_Int, X86::VFMADD132SDm_Int, TB_NO_REVERSE }, + { X86::VFMADD132SHZr, X86::VFMADD132SHZm, 0 }, + { X86::VFMADD132SHZr_Int, X86::VFMADD132SHZm_Int, TB_NO_REVERSE }, { X86::VFMADD132SSZr, X86::VFMADD132SSZm, 0 }, { X86::VFMADD132SSZr_Int, X86::VFMADD132SSZm_Int, TB_NO_REVERSE }, { X86::VFMADD132SSr, X86::VFMADD132SSm, 0 }, @@ -3306,6 +3311,9 @@ static const X86MemoryFoldTableEntry MemoryFoldTable3[] = { { X86::VFMADD213PDZ256r, X86::VFMADD213PDZ256m, 0 }, { X86::VFMADD213PDZr, X86::VFMADD213PDZm, 0 }, { X86::VFMADD213PDr, X86::VFMADD213PDm, 0 }, + { X86::VFMADD213PHZ128r, X86::VFMADD213PHZ128m, 0 }, + { X86::VFMADD213PHZ256r, X86::VFMADD213PHZ256m, 0 }, + { X86::VFMADD213PHZr, X86::VFMADD213PHZm, 0 }, { X86::VFMADD213PSYr, X86::VFMADD213PSYm, 0 }, { X86::VFMADD213PSZ128r, X86::VFMADD213PSZ128m, 0 }, { X86::VFMADD213PSZ256r, X86::VFMADD213PSZ256m, 0 }, @@ -3315,6 +3323,8 @@ static const X86MemoryFoldTableEntry MemoryFoldTable3[] = { { X86::VFMADD213SDZr_Int, X86::VFMADD213SDZm_Int, TB_NO_REVERSE }, { X86::VFMADD213SDr, X86::VFMADD213SDm, 0 }, { X86::VFMADD213SDr_Int, X86::VFMADD213SDm_Int, TB_NO_REVERSE }, + { X86::VFMADD213SHZr, X86::VFMADD213SHZm, 0 }, + { X86::VFMADD213SHZr_Int, X86::VFMADD213SHZm_Int, TB_NO_REVERSE }, { X86::VFMADD213SSZr, X86::VFMADD213SSZm, 0 }, { X86::VFMADD213SSZr_Int, X86::VFMADD213SSZm_Int, TB_NO_REVERSE }, { X86::VFMADD213SSr, X86::VFMADD213SSm, 0 }, @@ -3324,6 +3334,9 @@ static const X86MemoryFoldTableEntry MemoryFoldTable3[] = { { X86::VFMADD231PDZ256r, X86::VFMADD231PDZ256m, 0 }, { X86::VFMADD231PDZr, X86::VFMADD231PDZm, 0 }, { X86::VFMADD231PDr, X86::VFMADD231PDm, 0 }, + { X86::VFMADD231PHZ128r, X86::VFMADD231PHZ128m, 0 }, + { X86::VFMADD231PHZ256r, X86::VFMADD231PHZ256m, 0 }, + { X86::VFMADD231PHZr, X86::VFMADD231PHZm, 0 }, { X86::VFMADD231PSYr, X86::VFMADD231PSYm, 0 }, { X86::VFMADD231PSZ128r, X86::VFMADD231PSZ128m, 0 }, { X86::VFMADD231PSZ256r, X86::VFMADD231PSZ256m, 0 }, @@ -3333,6 +3346,8 @@ static const X86MemoryFoldTableEntry MemoryFoldTable3[] = { { X86::VFMADD231SDZr_Int, X86::VFMADD231SDZm_Int, TB_NO_REVERSE }, { X86::VFMADD231SDr, X86::VFMADD231SDm, 0 }, { X86::VFMADD231SDr_Int, X86::VFMADD231SDm_Int, TB_NO_REVERSE }, + { X86::VFMADD231SHZr, X86::VFMADD231SHZm, 0 }, + { X86::VFMADD231SHZr_Int, X86::VFMADD231SHZm_Int, TB_NO_REVERSE }, { X86::VFMADD231SSZr, X86::VFMADD231SSZm, 0 }, { X86::VFMADD231SSZr_Int, X86::VFMADD231SSZm_Int, TB_NO_REVERSE }, { X86::VFMADD231SSr, X86::VFMADD231SSm, 0 }, @@ -3350,6 +3365,9 @@ static const X86MemoryFoldTableEntry MemoryFoldTable3[] = { { X86::VFMADDSUB132PDZ256r, X86::VFMADDSUB132PDZ256m, 0 }, { X86::VFMADDSUB132PDZr, X86::VFMADDSUB132PDZm, 0 }, { X86::VFMADDSUB132PDr, X86::VFMADDSUB132PDm, 0 }, + { X86::VFMADDSUB132PHZ128r, X86::VFMADDSUB132PHZ128m, 0 }, + { X86::VFMADDSUB132PHZ256r, X86::VFMADDSUB132PHZ256m, 0 }, + { X86::VFMADDSUB132PHZr, X86::VFMADDSUB132PHZm, 0 }, { X86::VFMADDSUB132PSYr, X86::VFMADDSUB132PSYm, 0 }, { X86::VFMADDSUB132PSZ128r, X86::VFMADDSUB132PSZ128m, 0 }, { X86::VFMADDSUB132PSZ256r, X86::VFMADDSUB132PSZ256m, 0 }, @@ -3360,6 +3378,9 @@ static const X86MemoryFoldTableEntry MemoryFoldTable3[] = { { X86::VFMADDSUB213PDZ256r, X86::VFMADDSUB213PDZ256m, 0 }, { X86::VFMADDSUB213PDZr, X86::VFMADDSUB213PDZm, 0 }, { X86::VFMADDSUB213PDr, X86::VFMADDSUB213PDm, 0 }, + { X86::VFMADDSUB213PHZ128r, X86::VFMADDSUB213PHZ128m, 0 }, + { X86::VFMADDSUB213PHZ256r, X86::VFMADDSUB213PHZ256m, 0 }, + { X86::VFMADDSUB213PHZr, X86::VFMADDSUB213PHZm, 0 }, { X86::VFMADDSUB213PSYr, X86::VFMADDSUB213PSYm, 0 }, { X86::VFMADDSUB213PSZ128r, X86::VFMADDSUB213PSZ128m, 0 }, { X86::VFMADDSUB213PSZ256r, X86::VFMADDSUB213PSZ256m, 0 }, @@ -3370,6 +3391,9 @@ static const X86MemoryFoldTableEntry MemoryFoldTable3[] = { { X86::VFMADDSUB231PDZ256r, X86::VFMADDSUB231PDZ256m, 0 }, { X86::VFMADDSUB231PDZr, X86::VFMADDSUB231PDZm, 0 }, { X86::VFMADDSUB231PDr, X86::VFMADDSUB231PDm, 0 }, + { X86::VFMADDSUB231PHZ128r, X86::VFMADDSUB231PHZ128m, 0 }, + { X86::VFMADDSUB231PHZ256r, X86::VFMADDSUB231PHZ256m, 0 }, + { X86::VFMADDSUB231PHZr, X86::VFMADDSUB231PHZm, 0 }, { X86::VFMADDSUB231PSYr, X86::VFMADDSUB231PSYm, 0 }, { X86::VFMADDSUB231PSZ128r, X86::VFMADDSUB231PSZ128m, 0 }, { X86::VFMADDSUB231PSZ256r, X86::VFMADDSUB231PSZ256m, 0 }, @@ -3384,6 +3408,9 @@ static const X86MemoryFoldTableEntry MemoryFoldTable3[] = { { X86::VFMSUB132PDZ256r, X86::VFMSUB132PDZ256m, 0 }, { X86::VFMSUB132PDZr, X86::VFMSUB132PDZm, 0 }, { X86::VFMSUB132PDr, X86::VFMSUB132PDm, 0 }, + { X86::VFMSUB132PHZ128r, X86::VFMSUB132PHZ128m, 0 }, + { X86::VFMSUB132PHZ256r, X86::VFMSUB132PHZ256m, 0 }, + { X86::VFMSUB132PHZr, X86::VFMSUB132PHZm, 0 }, { X86::VFMSUB132PSYr, X86::VFMSUB132PSYm, 0 }, { X86::VFMSUB132PSZ128r, X86::VFMSUB132PSZ128m, 0 }, { X86::VFMSUB132PSZ256r, X86::VFMSUB132PSZ256m, 0 }, @@ -3393,6 +3420,8 @@ static const X86MemoryFoldTableEntry MemoryFoldTable3[] = { { X86::VFMSUB132SDZr_Int, X86::VFMSUB132SDZm_Int, TB_NO_REVERSE }, { X86::VFMSUB132SDr, X86::VFMSUB132SDm, 0 }, { X86::VFMSUB132SDr_Int, X86::VFMSUB132SDm_Int, TB_NO_REVERSE }, + { X86::VFMSUB132SHZr, X86::VFMSUB132SHZm, 0 }, + { X86::VFMSUB132SHZr_Int, X86::VFMSUB132SHZm_Int, TB_NO_REVERSE }, { X86::VFMSUB132SSZr, X86::VFMSUB132SSZm, 0 }, { X86::VFMSUB132SSZr_Int, X86::VFMSUB132SSZm_Int, TB_NO_REVERSE }, { X86::VFMSUB132SSr, X86::VFMSUB132SSm, 0 }, @@ -3402,6 +3431,9 @@ static const X86MemoryFoldTableEntry MemoryFoldTable3[] = { { X86::VFMSUB213PDZ256r, X86::VFMSUB213PDZ256m, 0 }, { X86::VFMSUB213PDZr, X86::VFMSUB213PDZm, 0 }, { X86::VFMSUB213PDr, X86::VFMSUB213PDm, 0 }, + { X86::VFMSUB213PHZ128r, X86::VFMSUB213PHZ128m, 0 }, + { X86::VFMSUB213PHZ256r, X86::VFMSUB213PHZ256m, 0 }, + { X86::VFMSUB213PHZr, X86::VFMSUB213PHZm, 0 }, { X86::VFMSUB213PSYr, X86::VFMSUB213PSYm, 0 }, { X86::VFMSUB213PSZ128r, X86::VFMSUB213PSZ128m, 0 }, { X86::VFMSUB213PSZ256r, X86::VFMSUB213PSZ256m, 0 }, @@ -3411,6 +3443,8 @@ static const X86MemoryFoldTableEntry MemoryFoldTable3[] = { { X86::VFMSUB213SDZr_Int, X86::VFMSUB213SDZm_Int, TB_NO_REVERSE }, { X86::VFMSUB213SDr, X86::VFMSUB213SDm, 0 }, { X86::VFMSUB213SDr_Int, X86::VFMSUB213SDm_Int, TB_NO_REVERSE }, + { X86::VFMSUB213SHZr, X86::VFMSUB213SHZm, 0 }, + { X86::VFMSUB213SHZr_Int, X86::VFMSUB213SHZm_Int, TB_NO_REVERSE }, { X86::VFMSUB213SSZr, X86::VFMSUB213SSZm, 0 }, { X86::VFMSUB213SSZr_Int, X86::VFMSUB213SSZm_Int, TB_NO_REVERSE }, { X86::VFMSUB213SSr, X86::VFMSUB213SSm, 0 }, @@ -3420,6 +3454,9 @@ static const X86MemoryFoldTableEntry MemoryFoldTable3[] = { { X86::VFMSUB231PDZ256r, X86::VFMSUB231PDZ256m, 0 }, { X86::VFMSUB231PDZr, X86::VFMSUB231PDZm, 0 }, { X86::VFMSUB231PDr, X86::VFMSUB231PDm, 0 }, + { X86::VFMSUB231PHZ128r, X86::VFMSUB231PHZ128m, 0 }, + { X86::VFMSUB231PHZ256r, X86::VFMSUB231PHZ256m, 0 }, + { X86::VFMSUB231PHZr, X86::VFMSUB231PHZm, 0 }, { X86::VFMSUB231PSYr, X86::VFMSUB231PSYm, 0 }, { X86::VFMSUB231PSZ128r, X86::VFMSUB231PSZ128m, 0 }, { X86::VFMSUB231PSZ256r, X86::VFMSUB231PSZ256m, 0 }, @@ -3429,6 +3466,8 @@ static const X86MemoryFoldTableEntry MemoryFoldTable3[] = { { X86::VFMSUB231SDZr_Int, X86::VFMSUB231SDZm_Int, TB_NO_REVERSE }, { X86::VFMSUB231SDr, X86::VFMSUB231SDm, 0 }, { X86::VFMSUB231SDr_Int, X86::VFMSUB231SDm_Int, TB_NO_REVERSE }, + { X86::VFMSUB231SHZr, X86::VFMSUB231SHZm, 0 }, + { X86::VFMSUB231SHZr_Int, X86::VFMSUB231SHZm_Int, TB_NO_REVERSE }, { X86::VFMSUB231SSZr, X86::VFMSUB231SSZm, 0 }, { X86::VFMSUB231SSZr_Int, X86::VFMSUB231SSZm_Int, TB_NO_REVERSE }, { X86::VFMSUB231SSr, X86::VFMSUB231SSm, 0 }, @@ -3438,6 +3477,9 @@ static const X86MemoryFoldTableEntry MemoryFoldTable3[] = { { X86::VFMSUBADD132PDZ256r, X86::VFMSUBADD132PDZ256m, 0 }, { X86::VFMSUBADD132PDZr, X86::VFMSUBADD132PDZm, 0 }, { X86::VFMSUBADD132PDr, X86::VFMSUBADD132PDm, 0 }, + { X86::VFMSUBADD132PHZ128r, X86::VFMSUBADD132PHZ128m, 0 }, + { X86::VFMSUBADD132PHZ256r, X86::VFMSUBADD132PHZ256m, 0 }, + { X86::VFMSUBADD132PHZr, X86::VFMSUBADD132PHZm, 0 }, { X86::VFMSUBADD132PSYr, X86::VFMSUBADD132PSYm, 0 }, { X86::VFMSUBADD132PSZ128r, X86::VFMSUBADD132PSZ128m, 0 }, { X86::VFMSUBADD132PSZ256r, X86::VFMSUBADD132PSZ256m, 0 }, @@ -3448,6 +3490,9 @@ static const X86MemoryFoldTableEntry MemoryFoldTable3[] = { { X86::VFMSUBADD213PDZ256r, X86::VFMSUBADD213PDZ256m, 0 }, { X86::VFMSUBADD213PDZr, X86::VFMSUBADD213PDZm, 0 }, { X86::VFMSUBADD213PDr, X86::VFMSUBADD213PDm, 0 }, + { X86::VFMSUBADD213PHZ128r, X86::VFMSUBADD213PHZ128m, 0 }, + { X86::VFMSUBADD213PHZ256r, X86::VFMSUBADD213PHZ256m, 0 }, + { X86::VFMSUBADD213PHZr, X86::VFMSUBADD213PHZm, 0 }, { X86::VFMSUBADD213PSYr, X86::VFMSUBADD213PSYm, 0 }, { X86::VFMSUBADD213PSZ128r, X86::VFMSUBADD213PSZ128m, 0 }, { X86::VFMSUBADD213PSZ256r, X86::VFMSUBADD213PSZ256m, 0 }, @@ -3458,6 +3503,9 @@ static const X86MemoryFoldTableEntry MemoryFoldTable3[] = { { X86::VFMSUBADD231PDZ256r, X86::VFMSUBADD231PDZ256m, 0 }, { X86::VFMSUBADD231PDZr, X86::VFMSUBADD231PDZm, 0 }, { X86::VFMSUBADD231PDr, X86::VFMSUBADD231PDm, 0 }, + { X86::VFMSUBADD231PHZ128r, X86::VFMSUBADD231PHZ128m, 0 }, + { X86::VFMSUBADD231PHZ256r, X86::VFMSUBADD231PHZ256m, 0 }, + { X86::VFMSUBADD231PHZr, X86::VFMSUBADD231PHZm, 0 }, { X86::VFMSUBADD231PSYr, X86::VFMSUBADD231PSYm, 0 }, { X86::VFMSUBADD231PSZ128r, X86::VFMSUBADD231PSZ128m, 0 }, { X86::VFMSUBADD231PSZ256r, X86::VFMSUBADD231PSZ256m, 0 }, @@ -3480,6 +3528,9 @@ static const X86MemoryFoldTableEntry MemoryFoldTable3[] = { { X86::VFNMADD132PDZ256r, X86::VFNMADD132PDZ256m, 0 }, { X86::VFNMADD132PDZr, X86::VFNMADD132PDZm, 0 }, { X86::VFNMADD132PDr, X86::VFNMADD132PDm, 0 }, + { X86::VFNMADD132PHZ128r, X86::VFNMADD132PHZ128m, 0 }, + { X86::VFNMADD132PHZ256r, X86::VFNMADD132PHZ256m, 0 }, + { X86::VFNMADD132PHZr, X86::VFNMADD132PHZm, 0 }, { X86::VFNMADD132PSYr, X86::VFNMADD132PSYm, 0 }, { X86::VFNMADD132PSZ128r, X86::VFNMADD132PSZ128m, 0 }, { X86::VFNMADD132PSZ256r, X86::VFNMADD132PSZ256m, 0 }, @@ -3489,6 +3540,8 @@ static const X86MemoryFoldTableEntry MemoryFoldTable3[] = { { X86::VFNMADD132SDZr_Int, X86::VFNMADD132SDZm_Int, TB_NO_REVERSE }, { X86::VFNMADD132SDr, X86::VFNMADD132SDm, 0 }, { X86::VFNMADD132SDr_Int, X86::VFNMADD132SDm_Int, TB_NO_REVERSE }, + { X86::VFNMADD132SHZr, X86::VFNMADD132SHZm, 0 }, + { X86::VFNMADD132SHZr_Int, X86::VFNMADD132SHZm_Int, TB_NO_REVERSE }, { X86::VFNMADD132SSZr, X86::VFNMADD132SSZm, 0 }, { X86::VFNMADD132SSZr_Int, X86::VFNMADD132SSZm_Int, TB_NO_REVERSE }, { X86::VFNMADD132SSr, X86::VFNMADD132SSm, 0 }, @@ -3498,6 +3551,9 @@ static const X86MemoryFoldTableEntry MemoryFoldTable3[] = { { X86::VFNMADD213PDZ256r, X86::VFNMADD213PDZ256m, 0 }, { X86::VFNMADD213PDZr, X86::VFNMADD213PDZm, 0 }, { X86::VFNMADD213PDr, X86::VFNMADD213PDm, 0 }, + { X86::VFNMADD213PHZ128r, X86::VFNMADD213PHZ128m, 0 }, + { X86::VFNMADD213PHZ256r, X86::VFNMADD213PHZ256m, 0 }, + { X86::VFNMADD213PHZr, X86::VFNMADD213PHZm, 0 }, { X86::VFNMADD213PSYr, X86::VFNMADD213PSYm, 0 }, { X86::VFNMADD213PSZ128r, X86::VFNMADD213PSZ128m, 0 }, { X86::VFNMADD213PSZ256r, X86::VFNMADD213PSZ256m, 0 }, @@ -3507,6 +3563,8 @@ static const X86MemoryFoldTableEntry MemoryFoldTable3[] = { { X86::VFNMADD213SDZr_Int, X86::VFNMADD213SDZm_Int, TB_NO_REVERSE }, { X86::VFNMADD213SDr, X86::VFNMADD213SDm, 0 }, { X86::VFNMADD213SDr_Int, X86::VFNMADD213SDm_Int, TB_NO_REVERSE }, + { X86::VFNMADD213SHZr, X86::VFNMADD213SHZm, 0 }, + { X86::VFNMADD213SHZr_Int, X86::VFNMADD213SHZm_Int, TB_NO_REVERSE }, { X86::VFNMADD213SSZr, X86::VFNMADD213SSZm, 0 }, { X86::VFNMADD213SSZr_Int, X86::VFNMADD213SSZm_Int, TB_NO_REVERSE }, { X86::VFNMADD213SSr, X86::VFNMADD213SSm, 0 }, @@ -3516,6 +3574,9 @@ static const X86MemoryFoldTableEntry MemoryFoldTable3[] = { { X86::VFNMADD231PDZ256r, X86::VFNMADD231PDZ256m, 0 }, { X86::VFNMADD231PDZr, X86::VFNMADD231PDZm, 0 }, { X86::VFNMADD231PDr, X86::VFNMADD231PDm, 0 }, + { X86::VFNMADD231PHZ128r, X86::VFNMADD231PHZ128m, 0 }, + { X86::VFNMADD231PHZ256r, X86::VFNMADD231PHZ256m, 0 }, + { X86::VFNMADD231PHZr, X86::VFNMADD231PHZm, 0 }, { X86::VFNMADD231PSYr, X86::VFNMADD231PSYm, 0 }, { X86::VFNMADD231PSZ128r, X86::VFNMADD231PSZ128m, 0 }, { X86::VFNMADD231PSZ256r, X86::VFNMADD231PSZ256m, 0 }, @@ -3525,6 +3586,8 @@ static const X86MemoryFoldTableEntry MemoryFoldTable3[] = { { X86::VFNMADD231SDZr_Int, X86::VFNMADD231SDZm_Int, TB_NO_REVERSE }, { X86::VFNMADD231SDr, X86::VFNMADD231SDm, 0 }, { X86::VFNMADD231SDr_Int, X86::VFNMADD231SDm_Int, TB_NO_REVERSE }, + { X86::VFNMADD231SHZr, X86::VFNMADD231SHZm, 0 }, + { X86::VFNMADD231SHZr_Int, X86::VFNMADD231SHZm_Int, TB_NO_REVERSE }, { X86::VFNMADD231SSZr, X86::VFNMADD231SSZm, 0 }, { X86::VFNMADD231SSZr_Int, X86::VFNMADD231SSZm_Int, TB_NO_REVERSE }, { X86::VFNMADD231SSr, X86::VFNMADD231SSm, 0 }, @@ -3542,6 +3605,9 @@ static const X86MemoryFoldTableEntry MemoryFoldTable3[] = { { X86::VFNMSUB132PDZ256r, X86::VFNMSUB132PDZ256m, 0 }, { X86::VFNMSUB132PDZr, X86::VFNMSUB132PDZm, 0 }, { X86::VFNMSUB132PDr, X86::VFNMSUB132PDm, 0 }, + { X86::VFNMSUB132PHZ128r, X86::VFNMSUB132PHZ128m, 0 }, + { X86::VFNMSUB132PHZ256r, X86::VFNMSUB132PHZ256m, 0 }, + { X86::VFNMSUB132PHZr, X86::VFNMSUB132PHZm, 0 }, { X86::VFNMSUB132PSYr, X86::VFNMSUB132PSYm, 0 }, { X86::VFNMSUB132PSZ128r, X86::VFNMSUB132PSZ128m, 0 }, { X86::VFNMSUB132PSZ256r, X86::VFNMSUB132PSZ256m, 0 }, @@ -3551,6 +3617,8 @@ static const X86MemoryFoldTableEntry MemoryFoldTable3[] = { { X86::VFNMSUB132SDZr_Int, X86::VFNMSUB132SDZm_Int, TB_NO_REVERSE }, { X86::VFNMSUB132SDr, X86::VFNMSUB132SDm, 0 }, { X86::VFNMSUB132SDr_Int, X86::VFNMSUB132SDm_Int, TB_NO_REVERSE }, + { X86::VFNMSUB132SHZr, X86::VFNMSUB132SHZm, 0 }, + { X86::VFNMSUB132SHZr_Int, X86::VFNMSUB132SHZm_Int, TB_NO_REVERSE }, { X86::VFNMSUB132SSZr, X86::VFNMSUB132SSZm, 0 }, { X86::VFNMSUB132SSZr_Int, X86::VFNMSUB132SSZm_Int, TB_NO_REVERSE }, { X86::VFNMSUB132SSr, X86::VFNMSUB132SSm, 0 }, @@ -3560,6 +3628,9 @@ static const X86MemoryFoldTableEntry MemoryFoldTable3[] = { { X86::VFNMSUB213PDZ256r, X86::VFNMSUB213PDZ256m, 0 }, { X86::VFNMSUB213PDZr, X86::VFNMSUB213PDZm, 0 }, { X86::VFNMSUB213PDr, X86::VFNMSUB213PDm, 0 }, + { X86::VFNMSUB213PHZ128r, X86::VFNMSUB213PHZ128m, 0 }, + { X86::VFNMSUB213PHZ256r, X86::VFNMSUB213PHZ256m, 0 }, + { X86::VFNMSUB213PHZr, X86::VFNMSUB213PHZm, 0 }, { X86::VFNMSUB213PSYr, X86::VFNMSUB213PSYm, 0 }, { X86::VFNMSUB213PSZ128r, X86::VFNMSUB213PSZ128m, 0 }, { X86::VFNMSUB213PSZ256r, X86::VFNMSUB213PSZ256m, 0 }, @@ -3569,6 +3640,8 @@ static const X86MemoryFoldTableEntry MemoryFoldTable3[] = { { X86::VFNMSUB213SDZr_Int, X86::VFNMSUB213SDZm_Int, TB_NO_REVERSE }, { X86::VFNMSUB213SDr, X86::VFNMSUB213SDm, 0 }, { X86::VFNMSUB213SDr_Int, X86::VFNMSUB213SDm_Int, TB_NO_REVERSE }, + { X86::VFNMSUB213SHZr, X86::VFNMSUB213SHZm, 0 }, + { X86::VFNMSUB213SHZr_Int, X86::VFNMSUB213SHZm_Int, TB_NO_REVERSE }, { X86::VFNMSUB213SSZr, X86::VFNMSUB213SSZm, 0 }, { X86::VFNMSUB213SSZr_Int, X86::VFNMSUB213SSZm_Int, TB_NO_REVERSE }, { X86::VFNMSUB213SSr, X86::VFNMSUB213SSm, 0 }, @@ -3578,6 +3651,9 @@ static const X86MemoryFoldTableEntry MemoryFoldTable3[] = { { X86::VFNMSUB231PDZ256r, X86::VFNMSUB231PDZ256m, 0 }, { X86::VFNMSUB231PDZr, X86::VFNMSUB231PDZm, 0 }, { X86::VFNMSUB231PDr, X86::VFNMSUB231PDm, 0 }, + { X86::VFNMSUB231PHZ128r, X86::VFNMSUB231PHZ128m, 0 }, + { X86::VFNMSUB231PHZ256r, X86::VFNMSUB231PHZ256m, 0 }, + { X86::VFNMSUB231PHZr, X86::VFNMSUB231PHZm, 0 }, { X86::VFNMSUB231PSYr, X86::VFNMSUB231PSYm, 0 }, { X86::VFNMSUB231PSZ128r, X86::VFNMSUB231PSZ128m, 0 }, { X86::VFNMSUB231PSZ256r, X86::VFNMSUB231PSZ256m, 0 }, @@ -3587,6 +3663,8 @@ static const X86MemoryFoldTableEntry MemoryFoldTable3[] = { { X86::VFNMSUB231SDZr_Int, X86::VFNMSUB231SDZm_Int, TB_NO_REVERSE }, { X86::VFNMSUB231SDr, X86::VFNMSUB231SDm, 0 }, { X86::VFNMSUB231SDr_Int, X86::VFNMSUB231SDm_Int, TB_NO_REVERSE }, + { X86::VFNMSUB231SHZr, X86::VFNMSUB231SHZm, 0 }, + { X86::VFNMSUB231SHZr_Int, X86::VFNMSUB231SHZm_Int, TB_NO_REVERSE }, { X86::VFNMSUB231SSZr, X86::VFNMSUB231SSZm, 0 }, { X86::VFNMSUB231SSZr_Int, X86::VFNMSUB231SSZm_Int, TB_NO_REVERSE }, { X86::VFNMSUB231SSr, X86::VFNMSUB231SSm, 0 }, @@ -4599,6 +4677,12 @@ static const X86MemoryFoldTableEntry MemoryFoldTable4[] = { { X86::VFMADD132PDZ256rkz, X86::VFMADD132PDZ256mkz, 0 }, { X86::VFMADD132PDZrk, X86::VFMADD132PDZmk, 0 }, { X86::VFMADD132PDZrkz, X86::VFMADD132PDZmkz, 0 }, + { X86::VFMADD132PHZ128rk, X86::VFMADD132PHZ128mk, 0 }, + { X86::VFMADD132PHZ128rkz, X86::VFMADD132PHZ128mkz, 0 }, + { X86::VFMADD132PHZ256rk, X86::VFMADD132PHZ256mk, 0 }, + { X86::VFMADD132PHZ256rkz, X86::VFMADD132PHZ256mkz, 0 }, + { X86::VFMADD132PHZrk, X86::VFMADD132PHZmk, 0 }, + { X86::VFMADD132PHZrkz, X86::VFMADD132PHZmkz, 0 }, { X86::VFMADD132PSZ128rk, X86::VFMADD132PSZ128mk, 0 }, { X86::VFMADD132PSZ128rkz, X86::VFMADD132PSZ128mkz, 0 }, { X86::VFMADD132PSZ256rk, X86::VFMADD132PSZ256mk, 0 }, @@ -4607,6 +4691,8 @@ static const X86MemoryFoldTableEntry MemoryFoldTable4[] = { { X86::VFMADD132PSZrkz, X86::VFMADD132PSZmkz, 0 }, { X86::VFMADD132SDZr_Intk, X86::VFMADD132SDZm_Intk, TB_NO_REVERSE }, { X86::VFMADD132SDZr_Intkz, X86::VFMADD132SDZm_Intkz, TB_NO_REVERSE }, + { X86::VFMADD132SHZr_Intk, X86::VFMADD132SHZm_Intk, TB_NO_REVERSE }, + { X86::VFMADD132SHZr_Intkz, X86::VFMADD132SHZm_Intkz, TB_NO_REVERSE }, { X86::VFMADD132SSZr_Intk, X86::VFMADD132SSZm_Intk, TB_NO_REVERSE }, { X86::VFMADD132SSZr_Intkz, X86::VFMADD132SSZm_Intkz, TB_NO_REVERSE }, { X86::VFMADD213PDZ128rk, X86::VFMADD213PDZ128mk, 0 }, @@ -4615,6 +4701,12 @@ static const X86MemoryFoldTableEntry MemoryFoldTable4[] = { { X86::VFMADD213PDZ256rkz, X86::VFMADD213PDZ256mkz, 0 }, { X86::VFMADD213PDZrk, X86::VFMADD213PDZmk, 0 }, { X86::VFMADD213PDZrkz, X86::VFMADD213PDZmkz, 0 }, + { X86::VFMADD213PHZ128rk, X86::VFMADD213PHZ128mk, 0 }, + { X86::VFMADD213PHZ128rkz, X86::VFMADD213PHZ128mkz, 0 }, + { X86::VFMADD213PHZ256rk, X86::VFMADD213PHZ256mk, 0 }, + { X86::VFMADD213PHZ256rkz, X86::VFMADD213PHZ256mkz, 0 }, + { X86::VFMADD213PHZrk, X86::VFMADD213PHZmk, 0 }, + { X86::VFMADD213PHZrkz, X86::VFMADD213PHZmkz, 0 }, { X86::VFMADD213PSZ128rk, X86::VFMADD213PSZ128mk, 0 }, { X86::VFMADD213PSZ128rkz, X86::VFMADD213PSZ128mkz, 0 }, { X86::VFMADD213PSZ256rk, X86::VFMADD213PSZ256mk, 0 }, @@ -4623,6 +4715,8 @@ static const X86MemoryFoldTableEntry MemoryFoldTable4[] = { { X86::VFMADD213PSZrkz, X86::VFMADD213PSZmkz, 0 }, { X86::VFMADD213SDZr_Intk, X86::VFMADD213SDZm_Intk, TB_NO_REVERSE }, { X86::VFMADD213SDZr_Intkz, X86::VFMADD213SDZm_Intkz, TB_NO_REVERSE }, + { X86::VFMADD213SHZr_Intk, X86::VFMADD213SHZm_Intk, TB_NO_REVERSE }, + { X86::VFMADD213SHZr_Intkz, X86::VFMADD213SHZm_Intkz, TB_NO_REVERSE }, { X86::VFMADD213SSZr_Intk, X86::VFMADD213SSZm_Intk, TB_NO_REVERSE }, { X86::VFMADD213SSZr_Intkz, X86::VFMADD213SSZm_Intkz, TB_NO_REVERSE }, { X86::VFMADD231PDZ128rk, X86::VFMADD231PDZ128mk, 0 }, @@ -4631,6 +4725,12 @@ static const X86MemoryFoldTableEntry MemoryFoldTable4[] = { { X86::VFMADD231PDZ256rkz, X86::VFMADD231PDZ256mkz, 0 }, { X86::VFMADD231PDZrk, X86::VFMADD231PDZmk, 0 }, { X86::VFMADD231PDZrkz, X86::VFMADD231PDZmkz, 0 }, + { X86::VFMADD231PHZ128rk, X86::VFMADD231PHZ128mk, 0 }, + { X86::VFMADD231PHZ128rkz, X86::VFMADD231PHZ128mkz, 0 }, + { X86::VFMADD231PHZ256rk, X86::VFMADD231PHZ256mk, 0 }, + { X86::VFMADD231PHZ256rkz, X86::VFMADD231PHZ256mkz, 0 }, + { X86::VFMADD231PHZrk, X86::VFMADD231PHZmk, 0 }, + { X86::VFMADD231PHZrkz, X86::VFMADD231PHZmkz, 0 }, { X86::VFMADD231PSZ128rk, X86::VFMADD231PSZ128mk, 0 }, { X86::VFMADD231PSZ128rkz, X86::VFMADD231PSZ128mkz, 0 }, { X86::VFMADD231PSZ256rk, X86::VFMADD231PSZ256mk, 0 }, @@ -4639,6 +4739,8 @@ static const X86MemoryFoldTableEntry MemoryFoldTable4[] = { { X86::VFMADD231PSZrkz, X86::VFMADD231PSZmkz, 0 }, { X86::VFMADD231SDZr_Intk, X86::VFMADD231SDZm_Intk, TB_NO_REVERSE }, { X86::VFMADD231SDZr_Intkz, X86::VFMADD231SDZm_Intkz, TB_NO_REVERSE }, + { X86::VFMADD231SHZr_Intk, X86::VFMADD231SHZm_Intk, TB_NO_REVERSE }, + { X86::VFMADD231SHZr_Intkz, X86::VFMADD231SHZm_Intkz, TB_NO_REVERSE }, { X86::VFMADD231SSZr_Intk, X86::VFMADD231SSZm_Intk, TB_NO_REVERSE }, { X86::VFMADD231SSZr_Intkz, X86::VFMADD231SSZm_Intkz, TB_NO_REVERSE }, { X86::VFMADDSUB132PDZ128rk, X86::VFMADDSUB132PDZ128mk, 0 }, @@ -4647,6 +4749,12 @@ static const X86MemoryFoldTableEntry MemoryFoldTable4[] = { { X86::VFMADDSUB132PDZ256rkz, X86::VFMADDSUB132PDZ256mkz, 0 }, { X86::VFMADDSUB132PDZrk, X86::VFMADDSUB132PDZmk, 0 }, { X86::VFMADDSUB132PDZrkz, X86::VFMADDSUB132PDZmkz, 0 }, + { X86::VFMADDSUB132PHZ128rk, X86::VFMADDSUB132PHZ128mk, 0 }, + { X86::VFMADDSUB132PHZ128rkz, X86::VFMADDSUB132PHZ128mkz, 0 }, + { X86::VFMADDSUB132PHZ256rk, X86::VFMADDSUB132PHZ256mk, 0 }, + { X86::VFMADDSUB132PHZ256rkz, X86::VFMADDSUB132PHZ256mkz, 0 }, + { X86::VFMADDSUB132PHZrk, X86::VFMADDSUB132PHZmk, 0 }, + { X86::VFMADDSUB132PHZrkz, X86::VFMADDSUB132PHZmkz, 0 }, { X86::VFMADDSUB132PSZ128rk, X86::VFMADDSUB132PSZ128mk, 0 }, { X86::VFMADDSUB132PSZ128rkz, X86::VFMADDSUB132PSZ128mkz, 0 }, { X86::VFMADDSUB132PSZ256rk, X86::VFMADDSUB132PSZ256mk, 0 }, @@ -4659,6 +4767,12 @@ static const X86MemoryFoldTableEntry MemoryFoldTable4[] = { { X86::VFMADDSUB213PDZ256rkz, X86::VFMADDSUB213PDZ256mkz, 0 }, { X86::VFMADDSUB213PDZrk, X86::VFMADDSUB213PDZmk, 0 }, { X86::VFMADDSUB213PDZrkz, X86::VFMADDSUB213PDZmkz, 0 }, + { X86::VFMADDSUB213PHZ128rk, X86::VFMADDSUB213PHZ128mk, 0 }, + { X86::VFMADDSUB213PHZ128rkz, X86::VFMADDSUB213PHZ128mkz, 0 }, + { X86::VFMADDSUB213PHZ256rk, X86::VFMADDSUB213PHZ256mk, 0 }, + { X86::VFMADDSUB213PHZ256rkz, X86::VFMADDSUB213PHZ256mkz, 0 }, + { X86::VFMADDSUB213PHZrk, X86::VFMADDSUB213PHZmk, 0 }, + { X86::VFMADDSUB213PHZrkz, X86::VFMADDSUB213PHZmkz, 0 }, { X86::VFMADDSUB213PSZ128rk, X86::VFMADDSUB213PSZ128mk, 0 }, { X86::VFMADDSUB213PSZ128rkz, X86::VFMADDSUB213PSZ128mkz, 0 }, { X86::VFMADDSUB213PSZ256rk, X86::VFMADDSUB213PSZ256mk, 0 }, @@ -4671,6 +4785,12 @@ static const X86MemoryFoldTableEntry MemoryFoldTable4[] = { { X86::VFMADDSUB231PDZ256rkz, X86::VFMADDSUB231PDZ256mkz, 0 }, { X86::VFMADDSUB231PDZrk, X86::VFMADDSUB231PDZmk, 0 }, { X86::VFMADDSUB231PDZrkz, X86::VFMADDSUB231PDZmkz, 0 }, + { X86::VFMADDSUB231PHZ128rk, X86::VFMADDSUB231PHZ128mk, 0 }, + { X86::VFMADDSUB231PHZ128rkz, X86::VFMADDSUB231PHZ128mkz, 0 }, + { X86::VFMADDSUB231PHZ256rk, X86::VFMADDSUB231PHZ256mk, 0 }, + { X86::VFMADDSUB231PHZ256rkz, X86::VFMADDSUB231PHZ256mkz, 0 }, + { X86::VFMADDSUB231PHZrk, X86::VFMADDSUB231PHZmk, 0 }, + { X86::VFMADDSUB231PHZrkz, X86::VFMADDSUB231PHZmkz, 0 }, { X86::VFMADDSUB231PSZ128rk, X86::VFMADDSUB231PSZ128mk, 0 }, { X86::VFMADDSUB231PSZ128rkz, X86::VFMADDSUB231PSZ128mkz, 0 }, { X86::VFMADDSUB231PSZ256rk, X86::VFMADDSUB231PSZ256mk, 0 }, @@ -4683,6 +4803,12 @@ static const X86MemoryFoldTableEntry MemoryFoldTable4[] = { { X86::VFMSUB132PDZ256rkz, X86::VFMSUB132PDZ256mkz, 0 }, { X86::VFMSUB132PDZrk, X86::VFMSUB132PDZmk, 0 }, { X86::VFMSUB132PDZrkz, X86::VFMSUB132PDZmkz, 0 }, + { X86::VFMSUB132PHZ128rk, X86::VFMSUB132PHZ128mk, 0 }, + { X86::VFMSUB132PHZ128rkz, X86::VFMSUB132PHZ128mkz, 0 }, + { X86::VFMSUB132PHZ256rk, X86::VFMSUB132PHZ256mk, 0 }, + { X86::VFMSUB132PHZ256rkz, X86::VFMSUB132PHZ256mkz, 0 }, + { X86::VFMSUB132PHZrk, X86::VFMSUB132PHZmk, 0 }, + { X86::VFMSUB132PHZrkz, X86::VFMSUB132PHZmkz, 0 }, { X86::VFMSUB132PSZ128rk, X86::VFMSUB132PSZ128mk, 0 }, { X86::VFMSUB132PSZ128rkz, X86::VFMSUB132PSZ128mkz, 0 }, { X86::VFMSUB132PSZ256rk, X86::VFMSUB132PSZ256mk, 0 }, @@ -4691,6 +4817,8 @@ static const X86MemoryFoldTableEntry MemoryFoldTable4[] = { { X86::VFMSUB132PSZrkz, X86::VFMSUB132PSZmkz, 0 }, { X86::VFMSUB132SDZr_Intk, X86::VFMSUB132SDZm_Intk, TB_NO_REVERSE }, { X86::VFMSUB132SDZr_Intkz, X86::VFMSUB132SDZm_Intkz, TB_NO_REVERSE }, + { X86::VFMSUB132SHZr_Intk, X86::VFMSUB132SHZm_Intk, TB_NO_REVERSE }, + { X86::VFMSUB132SHZr_Intkz, X86::VFMSUB132SHZm_Intkz, TB_NO_REVERSE }, { X86::VFMSUB132SSZr_Intk, X86::VFMSUB132SSZm_Intk, TB_NO_REVERSE }, { X86::VFMSUB132SSZr_Intkz, X86::VFMSUB132SSZm_Intkz, TB_NO_REVERSE }, { X86::VFMSUB213PDZ128rk, X86::VFMSUB213PDZ128mk, 0 }, @@ -4699,6 +4827,12 @@ static const X86MemoryFoldTableEntry MemoryFoldTable4[] = { { X86::VFMSUB213PDZ256rkz, X86::VFMSUB213PDZ256mkz, 0 }, { X86::VFMSUB213PDZrk, X86::VFMSUB213PDZmk, 0 }, { X86::VFMSUB213PDZrkz, X86::VFMSUB213PDZmkz, 0 }, + { X86::VFMSUB213PHZ128rk, X86::VFMSUB213PHZ128mk, 0 }, + { X86::VFMSUB213PHZ128rkz, X86::VFMSUB213PHZ128mkz, 0 }, + { X86::VFMSUB213PHZ256rk, X86::VFMSUB213PHZ256mk, 0 }, + { X86::VFMSUB213PHZ256rkz, X86::VFMSUB213PHZ256mkz, 0 }, + { X86::VFMSUB213PHZrk, X86::VFMSUB213PHZmk, 0 }, + { X86::VFMSUB213PHZrkz, X86::VFMSUB213PHZmkz, 0 }, { X86::VFMSUB213PSZ128rk, X86::VFMSUB213PSZ128mk, 0 }, { X86::VFMSUB213PSZ128rkz, X86::VFMSUB213PSZ128mkz, 0 }, { X86::VFMSUB213PSZ256rk, X86::VFMSUB213PSZ256mk, 0 }, @@ -4707,6 +4841,8 @@ static const X86MemoryFoldTableEntry MemoryFoldTable4[] = { { X86::VFMSUB213PSZrkz, X86::VFMSUB213PSZmkz, 0 }, { X86::VFMSUB213SDZr_Intk, X86::VFMSUB213SDZm_Intk, TB_NO_REVERSE }, { X86::VFMSUB213SDZr_Intkz, X86::VFMSUB213SDZm_Intkz, TB_NO_REVERSE }, + { X86::VFMSUB213SHZr_Intk, X86::VFMSUB213SHZm_Intk, TB_NO_REVERSE }, + { X86::VFMSUB213SHZr_Intkz, X86::VFMSUB213SHZm_Intkz, TB_NO_REVERSE }, { X86::VFMSUB213SSZr_Intk, X86::VFMSUB213SSZm_Intk, TB_NO_REVERSE }, { X86::VFMSUB213SSZr_Intkz, X86::VFMSUB213SSZm_Intkz, TB_NO_REVERSE }, { X86::VFMSUB231PDZ128rk, X86::VFMSUB231PDZ128mk, 0 }, @@ -4715,6 +4851,12 @@ static const X86MemoryFoldTableEntry MemoryFoldTable4[] = { { X86::VFMSUB231PDZ256rkz, X86::VFMSUB231PDZ256mkz, 0 }, { X86::VFMSUB231PDZrk, X86::VFMSUB231PDZmk, 0 }, { X86::VFMSUB231PDZrkz, X86::VFMSUB231PDZmkz, 0 }, + { X86::VFMSUB231PHZ128rk, X86::VFMSUB231PHZ128mk, 0 }, + { X86::VFMSUB231PHZ128rkz, X86::VFMSUB231PHZ128mkz, 0 }, + { X86::VFMSUB231PHZ256rk, X86::VFMSUB231PHZ256mk, 0 }, + { X86::VFMSUB231PHZ256rkz, X86::VFMSUB231PHZ256mkz, 0 }, + { X86::VFMSUB231PHZrk, X86::VFMSUB231PHZmk, 0 }, + { X86::VFMSUB231PHZrkz, X86::VFMSUB231PHZmkz, 0 }, { X86::VFMSUB231PSZ128rk, X86::VFMSUB231PSZ128mk, 0 }, { X86::VFMSUB231PSZ128rkz, X86::VFMSUB231PSZ128mkz, 0 }, { X86::VFMSUB231PSZ256rk, X86::VFMSUB231PSZ256mk, 0 }, @@ -4723,6 +4865,8 @@ static const X86MemoryFoldTableEntry MemoryFoldTable4[] = { { X86::VFMSUB231PSZrkz, X86::VFMSUB231PSZmkz, 0 }, { X86::VFMSUB231SDZr_Intk, X86::VFMSUB231SDZm_Intk, TB_NO_REVERSE }, { X86::VFMSUB231SDZr_Intkz, X86::VFMSUB231SDZm_Intkz, TB_NO_REVERSE }, + { X86::VFMSUB231SHZr_Intk, X86::VFMSUB231SHZm_Intk, TB_NO_REVERSE }, + { X86::VFMSUB231SHZr_Intkz, X86::VFMSUB231SHZm_Intkz, TB_NO_REVERSE }, { X86::VFMSUB231SSZr_Intk, X86::VFMSUB231SSZm_Intk, TB_NO_REVERSE }, { X86::VFMSUB231SSZr_Intkz, X86::VFMSUB231SSZm_Intkz, TB_NO_REVERSE }, { X86::VFMSUBADD132PDZ128rk, X86::VFMSUBADD132PDZ128mk, 0 }, @@ -4731,6 +4875,12 @@ static const X86MemoryFoldTableEntry MemoryFoldTable4[] = { { X86::VFMSUBADD132PDZ256rkz, X86::VFMSUBADD132PDZ256mkz, 0 }, { X86::VFMSUBADD132PDZrk, X86::VFMSUBADD132PDZmk, 0 }, { X86::VFMSUBADD132PDZrkz, X86::VFMSUBADD132PDZmkz, 0 }, + { X86::VFMSUBADD132PHZ128rk, X86::VFMSUBADD132PHZ128mk, 0 }, + { X86::VFMSUBADD132PHZ128rkz, X86::VFMSUBADD132PHZ128mkz, 0 }, + { X86::VFMSUBADD132PHZ256rk, X86::VFMSUBADD132PHZ256mk, 0 }, + { X86::VFMSUBADD132PHZ256rkz, X86::VFMSUBADD132PHZ256mkz, 0 }, + { X86::VFMSUBADD132PHZrk, X86::VFMSUBADD132PHZmk, 0 }, + { X86::VFMSUBADD132PHZrkz, X86::VFMSUBADD132PHZmkz, 0 }, { X86::VFMSUBADD132PSZ128rk, X86::VFMSUBADD132PSZ128mk, 0 }, { X86::VFMSUBADD132PSZ128rkz, X86::VFMSUBADD132PSZ128mkz, 0 }, { X86::VFMSUBADD132PSZ256rk, X86::VFMSUBADD132PSZ256mk, 0 }, @@ -4743,6 +4893,12 @@ static const X86MemoryFoldTableEntry MemoryFoldTable4[] = { { X86::VFMSUBADD213PDZ256rkz, X86::VFMSUBADD213PDZ256mkz, 0 }, { X86::VFMSUBADD213PDZrk, X86::VFMSUBADD213PDZmk, 0 }, { X86::VFMSUBADD213PDZrkz, X86::VFMSUBADD213PDZmkz, 0 }, + { X86::VFMSUBADD213PHZ128rk, X86::VFMSUBADD213PHZ128mk, 0 }, + { X86::VFMSUBADD213PHZ128rkz, X86::VFMSUBADD213PHZ128mkz, 0 }, + { X86::VFMSUBADD213PHZ256rk, X86::VFMSUBADD213PHZ256mk, 0 }, + { X86::VFMSUBADD213PHZ256rkz, X86::VFMSUBADD213PHZ256mkz, 0 }, + { X86::VFMSUBADD213PHZrk, X86::VFMSUBADD213PHZmk, 0 }, + { X86::VFMSUBADD213PHZrkz, X86::VFMSUBADD213PHZmkz, 0 }, { X86::VFMSUBADD213PSZ128rk, X86::VFMSUBADD213PSZ128mk, 0 }, { X86::VFMSUBADD213PSZ128rkz, X86::VFMSUBADD213PSZ128mkz, 0 }, { X86::VFMSUBADD213PSZ256rk, X86::VFMSUBADD213PSZ256mk, 0 }, @@ -4755,6 +4911,12 @@ static const X86MemoryFoldTableEntry MemoryFoldTable4[] = { { X86::VFMSUBADD231PDZ256rkz, X86::VFMSUBADD231PDZ256mkz, 0 }, { X86::VFMSUBADD231PDZrk, X86::VFMSUBADD231PDZmk, 0 }, { X86::VFMSUBADD231PDZrkz, X86::VFMSUBADD231PDZmkz, 0 }, + { X86::VFMSUBADD231PHZ128rk, X86::VFMSUBADD231PHZ128mk, 0 }, + { X86::VFMSUBADD231PHZ128rkz, X86::VFMSUBADD231PHZ128mkz, 0 }, + { X86::VFMSUBADD231PHZ256rk, X86::VFMSUBADD231PHZ256mk, 0 }, + { X86::VFMSUBADD231PHZ256rkz, X86::VFMSUBADD231PHZ256mkz, 0 }, + { X86::VFMSUBADD231PHZrk, X86::VFMSUBADD231PHZmk, 0 }, + { X86::VFMSUBADD231PHZrkz, X86::VFMSUBADD231PHZmkz, 0 }, { X86::VFMSUBADD231PSZ128rk, X86::VFMSUBADD231PSZ128mk, 0 }, { X86::VFMSUBADD231PSZ128rkz, X86::VFMSUBADD231PSZ128mkz, 0 }, { X86::VFMSUBADD231PSZ256rk, X86::VFMSUBADD231PSZ256mk, 0 }, @@ -4767,6 +4929,12 @@ static const X86MemoryFoldTableEntry MemoryFoldTable4[] = { { X86::VFNMADD132PDZ256rkz, X86::VFNMADD132PDZ256mkz, 0 }, { X86::VFNMADD132PDZrk, X86::VFNMADD132PDZmk, 0 }, { X86::VFNMADD132PDZrkz, X86::VFNMADD132PDZmkz, 0 }, + { X86::VFNMADD132PHZ128rk, X86::VFNMADD132PHZ128mk, 0 }, + { X86::VFNMADD132PHZ128rkz, X86::VFNMADD132PHZ128mkz, 0 }, + { X86::VFNMADD132PHZ256rk, X86::VFNMADD132PHZ256mk, 0 }, + { X86::VFNMADD132PHZ256rkz, X86::VFNMADD132PHZ256mkz, 0 }, + { X86::VFNMADD132PHZrk, X86::VFNMADD132PHZmk, 0 }, + { X86::VFNMADD132PHZrkz, X86::VFNMADD132PHZmkz, 0 }, { X86::VFNMADD132PSZ128rk, X86::VFNMADD132PSZ128mk, 0 }, { X86::VFNMADD132PSZ128rkz, X86::VFNMADD132PSZ128mkz, 0 }, { X86::VFNMADD132PSZ256rk, X86::VFNMADD132PSZ256mk, 0 }, @@ -4775,6 +4943,8 @@ static const X86MemoryFoldTableEntry MemoryFoldTable4[] = { { X86::VFNMADD132PSZrkz, X86::VFNMADD132PSZmkz, 0 }, { X86::VFNMADD132SDZr_Intk, X86::VFNMADD132SDZm_Intk, TB_NO_REVERSE }, { X86::VFNMADD132SDZr_Intkz, X86::VFNMADD132SDZm_Intkz, TB_NO_REVERSE }, + { X86::VFNMADD132SHZr_Intk, X86::VFNMADD132SHZm_Intk, TB_NO_REVERSE }, + { X86::VFNMADD132SHZr_Intkz, X86::VFNMADD132SHZm_Intkz, TB_NO_REVERSE }, { X86::VFNMADD132SSZr_Intk, X86::VFNMADD132SSZm_Intk, TB_NO_REVERSE }, { X86::VFNMADD132SSZr_Intkz, X86::VFNMADD132SSZm_Intkz, TB_NO_REVERSE }, { X86::VFNMADD213PDZ128rk, X86::VFNMADD213PDZ128mk, 0 }, @@ -4783,6 +4953,12 @@ static const X86MemoryFoldTableEntry MemoryFoldTable4[] = { { X86::VFNMADD213PDZ256rkz, X86::VFNMADD213PDZ256mkz, 0 }, { X86::VFNMADD213PDZrk, X86::VFNMADD213PDZmk, 0 }, { X86::VFNMADD213PDZrkz, X86::VFNMADD213PDZmkz, 0 }, + { X86::VFNMADD213PHZ128rk, X86::VFNMADD213PHZ128mk, 0 }, + { X86::VFNMADD213PHZ128rkz, X86::VFNMADD213PHZ128mkz, 0 }, + { X86::VFNMADD213PHZ256rk, X86::VFNMADD213PHZ256mk, 0 }, + { X86::VFNMADD213PHZ256rkz, X86::VFNMADD213PHZ256mkz, 0 }, + { X86::VFNMADD213PHZrk, X86::VFNMADD213PHZmk, 0 }, + { X86::VFNMADD213PHZrkz, X86::VFNMADD213PHZmkz, 0 }, { X86::VFNMADD213PSZ128rk, X86::VFNMADD213PSZ128mk, 0 }, { X86::VFNMADD213PSZ128rkz, X86::VFNMADD213PSZ128mkz, 0 }, { X86::VFNMADD213PSZ256rk, X86::VFNMADD213PSZ256mk, 0 }, @@ -4791,6 +4967,8 @@ static const X86MemoryFoldTableEntry MemoryFoldTable4[] = { { X86::VFNMADD213PSZrkz, X86::VFNMADD213PSZmkz, 0 }, { X86::VFNMADD213SDZr_Intk, X86::VFNMADD213SDZm_Intk, TB_NO_REVERSE }, { X86::VFNMADD213SDZr_Intkz, X86::VFNMADD213SDZm_Intkz, TB_NO_REVERSE }, + { X86::VFNMADD213SHZr_Intk, X86::VFNMADD213SHZm_Intk, TB_NO_REVERSE }, + { X86::VFNMADD213SHZr_Intkz, X86::VFNMADD213SHZm_Intkz, TB_NO_REVERSE }, { X86::VFNMADD213SSZr_Intk, X86::VFNMADD213SSZm_Intk, TB_NO_REVERSE }, { X86::VFNMADD213SSZr_Intkz, X86::VFNMADD213SSZm_Intkz, TB_NO_REVERSE }, { X86::VFNMADD231PDZ128rk, X86::VFNMADD231PDZ128mk, 0 }, @@ -4799,6 +4977,12 @@ static const X86MemoryFoldTableEntry MemoryFoldTable4[] = { { X86::VFNMADD231PDZ256rkz, X86::VFNMADD231PDZ256mkz, 0 }, { X86::VFNMADD231PDZrk, X86::VFNMADD231PDZmk, 0 }, { X86::VFNMADD231PDZrkz, X86::VFNMADD231PDZmkz, 0 }, + { X86::VFNMADD231PHZ128rk, X86::VFNMADD231PHZ128mk, 0 }, + { X86::VFNMADD231PHZ128rkz, X86::VFNMADD231PHZ128mkz, 0 }, + { X86::VFNMADD231PHZ256rk, X86::VFNMADD231PHZ256mk, 0 }, + { X86::VFNMADD231PHZ256rkz, X86::VFNMADD231PHZ256mkz, 0 }, + { X86::VFNMADD231PHZrk, X86::VFNMADD231PHZmk, 0 }, + { X86::VFNMADD231PHZrkz, X86::VFNMADD231PHZmkz, 0 }, { X86::VFNMADD231PSZ128rk, X86::VFNMADD231PSZ128mk, 0 }, { X86::VFNMADD231PSZ128rkz, X86::VFNMADD231PSZ128mkz, 0 }, { X86::VFNMADD231PSZ256rk, X86::VFNMADD231PSZ256mk, 0 }, @@ -4807,6 +4991,8 @@ static const X86MemoryFoldTableEntry MemoryFoldTable4[] = { { X86::VFNMADD231PSZrkz, X86::VFNMADD231PSZmkz, 0 }, { X86::VFNMADD231SDZr_Intk, X86::VFNMADD231SDZm_Intk, TB_NO_REVERSE }, { X86::VFNMADD231SDZr_Intkz, X86::VFNMADD231SDZm_Intkz, TB_NO_REVERSE }, + { X86::VFNMADD231SHZr_Intk, X86::VFNMADD231SHZm_Intk, TB_NO_REVERSE }, + { X86::VFNMADD231SHZr_Intkz, X86::VFNMADD231SHZm_Intkz, TB_NO_REVERSE }, { X86::VFNMADD231SSZr_Intk, X86::VFNMADD231SSZm_Intk, TB_NO_REVERSE }, { X86::VFNMADD231SSZr_Intkz, X86::VFNMADD231SSZm_Intkz, TB_NO_REVERSE }, { X86::VFNMSUB132PDZ128rk, X86::VFNMSUB132PDZ128mk, 0 }, @@ -4815,6 +5001,12 @@ static const X86MemoryFoldTableEntry MemoryFoldTable4[] = { { X86::VFNMSUB132PDZ256rkz, X86::VFNMSUB132PDZ256mkz, 0 }, { X86::VFNMSUB132PDZrk, X86::VFNMSUB132PDZmk, 0 }, { X86::VFNMSUB132PDZrkz, X86::VFNMSUB132PDZmkz, 0 }, + { X86::VFNMSUB132PHZ128rk, X86::VFNMSUB132PHZ128mk, 0 }, + { X86::VFNMSUB132PHZ128rkz, X86::VFNMSUB132PHZ128mkz, 0 }, + { X86::VFNMSUB132PHZ256rk, X86::VFNMSUB132PHZ256mk, 0 }, + { X86::VFNMSUB132PHZ256rkz, X86::VFNMSUB132PHZ256mkz, 0 }, + { X86::VFNMSUB132PHZrk, X86::VFNMSUB132PHZmk, 0 }, + { X86::VFNMSUB132PHZrkz, X86::VFNMSUB132PHZmkz, 0 }, { X86::VFNMSUB132PSZ128rk, X86::VFNMSUB132PSZ128mk, 0 }, { X86::VFNMSUB132PSZ128rkz, X86::VFNMSUB132PSZ128mkz, 0 }, { X86::VFNMSUB132PSZ256rk, X86::VFNMSUB132PSZ256mk, 0 }, @@ -4823,6 +5015,8 @@ static const X86MemoryFoldTableEntry MemoryFoldTable4[] = { { X86::VFNMSUB132PSZrkz, X86::VFNMSUB132PSZmkz, 0 }, { X86::VFNMSUB132SDZr_Intk, X86::VFNMSUB132SDZm_Intk, TB_NO_REVERSE }, { X86::VFNMSUB132SDZr_Intkz, X86::VFNMSUB132SDZm_Intkz, TB_NO_REVERSE }, + { X86::VFNMSUB132SHZr_Intk, X86::VFNMSUB132SHZm_Intk, TB_NO_REVERSE }, + { X86::VFNMSUB132SHZr_Intkz, X86::VFNMSUB132SHZm_Intkz, TB_NO_REVERSE }, { X86::VFNMSUB132SSZr_Intk, X86::VFNMSUB132SSZm_Intk, TB_NO_REVERSE }, { X86::VFNMSUB132SSZr_Intkz, X86::VFNMSUB132SSZm_Intkz, TB_NO_REVERSE }, { X86::VFNMSUB213PDZ128rk, X86::VFNMSUB213PDZ128mk, 0 }, @@ -4831,6 +5025,12 @@ static const X86MemoryFoldTableEntry MemoryFoldTable4[] = { { X86::VFNMSUB213PDZ256rkz, X86::VFNMSUB213PDZ256mkz, 0 }, { X86::VFNMSUB213PDZrk, X86::VFNMSUB213PDZmk, 0 }, { X86::VFNMSUB213PDZrkz, X86::VFNMSUB213PDZmkz, 0 }, + { X86::VFNMSUB213PHZ128rk, X86::VFNMSUB213PHZ128mk, 0 }, + { X86::VFNMSUB213PHZ128rkz, X86::VFNMSUB213PHZ128mkz, 0 }, + { X86::VFNMSUB213PHZ256rk, X86::VFNMSUB213PHZ256mk, 0 }, + { X86::VFNMSUB213PHZ256rkz, X86::VFNMSUB213PHZ256mkz, 0 }, + { X86::VFNMSUB213PHZrk, X86::VFNMSUB213PHZmk, 0 }, + { X86::VFNMSUB213PHZrkz, X86::VFNMSUB213PHZmkz, 0 }, { X86::VFNMSUB213PSZ128rk, X86::VFNMSUB213PSZ128mk, 0 }, { X86::VFNMSUB213PSZ128rkz, X86::VFNMSUB213PSZ128mkz, 0 }, { X86::VFNMSUB213PSZ256rk, X86::VFNMSUB213PSZ256mk, 0 }, @@ -4839,6 +5039,8 @@ static const X86MemoryFoldTableEntry MemoryFoldTable4[] = { { X86::VFNMSUB213PSZrkz, X86::VFNMSUB213PSZmkz, 0 }, { X86::VFNMSUB213SDZr_Intk, X86::VFNMSUB213SDZm_Intk, TB_NO_REVERSE }, { X86::VFNMSUB213SDZr_Intkz, X86::VFNMSUB213SDZm_Intkz, TB_NO_REVERSE }, + { X86::VFNMSUB213SHZr_Intk, X86::VFNMSUB213SHZm_Intk, TB_NO_REVERSE }, + { X86::VFNMSUB213SHZr_Intkz, X86::VFNMSUB213SHZm_Intkz, TB_NO_REVERSE }, { X86::VFNMSUB213SSZr_Intk, X86::VFNMSUB213SSZm_Intk, TB_NO_REVERSE }, { X86::VFNMSUB213SSZr_Intkz, X86::VFNMSUB213SSZm_Intkz, TB_NO_REVERSE }, { X86::VFNMSUB231PDZ128rk, X86::VFNMSUB231PDZ128mk, 0 }, @@ -4847,6 +5049,12 @@ static const X86MemoryFoldTableEntry MemoryFoldTable4[] = { { X86::VFNMSUB231PDZ256rkz, X86::VFNMSUB231PDZ256mkz, 0 }, { X86::VFNMSUB231PDZrk, X86::VFNMSUB231PDZmk, 0 }, { X86::VFNMSUB231PDZrkz, X86::VFNMSUB231PDZmkz, 0 }, + { X86::VFNMSUB231PHZ128rk, X86::VFNMSUB231PHZ128mk, 0 }, + { X86::VFNMSUB231PHZ128rkz, X86::VFNMSUB231PHZ128mkz, 0 }, + { X86::VFNMSUB231PHZ256rk, X86::VFNMSUB231PHZ256mk, 0 }, + { X86::VFNMSUB231PHZ256rkz, X86::VFNMSUB231PHZ256mkz, 0 }, + { X86::VFNMSUB231PHZrk, X86::VFNMSUB231PHZmk, 0 }, + { X86::VFNMSUB231PHZrkz, X86::VFNMSUB231PHZmkz, 0 }, { X86::VFNMSUB231PSZ128rk, X86::VFNMSUB231PSZ128mk, 0 }, { X86::VFNMSUB231PSZ128rkz, X86::VFNMSUB231PSZ128mkz, 0 }, { X86::VFNMSUB231PSZ256rk, X86::VFNMSUB231PSZ256mk, 0 }, @@ -4855,6 +5063,8 @@ static const X86MemoryFoldTableEntry MemoryFoldTable4[] = { { X86::VFNMSUB231PSZrkz, X86::VFNMSUB231PSZmkz, 0 }, { X86::VFNMSUB231SDZr_Intk, X86::VFNMSUB231SDZm_Intk, TB_NO_REVERSE }, { X86::VFNMSUB231SDZr_Intkz, X86::VFNMSUB231SDZm_Intkz, TB_NO_REVERSE }, + { X86::VFNMSUB231SHZr_Intk, X86::VFNMSUB231SHZm_Intk, TB_NO_REVERSE }, + { X86::VFNMSUB231SHZr_Intkz, X86::VFNMSUB231SHZm_Intkz, TB_NO_REVERSE }, { X86::VFNMSUB231SSZr_Intk, X86::VFNMSUB231SSZm_Intk, TB_NO_REVERSE }, { X86::VFNMSUB231SSZr_Intkz, X86::VFNMSUB231SSZm_Intkz, TB_NO_REVERSE }, { X86::VGETEXPSDZrk, X86::VGETEXPSDZmk, TB_NO_REVERSE }, diff --git a/llvm/lib/Target/X86/X86InstrFormats.td b/llvm/lib/Target/X86/X86InstrFormats.td index 6d6a2d6ad099a..1949c9c1a4fb6 100644 --- a/llvm/lib/Target/X86/X86InstrFormats.td +++ b/llvm/lib/Target/X86/X86InstrFormats.td @@ -882,7 +882,6 @@ class AVX512FMA3S o, Format F, dag outs, dag ins, string asm, listpattern> : I, T8PD, EVEX_4V, Requires<[HasAVX512]>; -class AVX512FMA3Base : T8PD, EVEX_4V; class AVX512 o, Format F, dag outs, dag ins, string asm, listpattern> diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp index 7f0e151b9eba2..baa8f5d7222d9 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.cpp +++ b/llvm/lib/Target/X86/X86InstrInfo.cpp @@ -6137,6 +6137,24 @@ static bool isNonFoldablePartialRegisterLoad(const MachineInstr &LoadMI, case X86::VMINSHZrr_Intk: case X86::VMINSHZrr_Intkz: case X86::VMULSHZrr_Intk: case X86::VMULSHZrr_Intkz: case X86::VSUBSHZrr_Intk: case X86::VSUBSHZrr_Intkz: + case X86::VFMADD132SHZr_Int: case X86::VFNMADD132SHZr_Int: + case X86::VFMADD213SHZr_Int: case X86::VFNMADD213SHZr_Int: + case X86::VFMADD231SHZr_Int: case X86::VFNMADD231SHZr_Int: + case X86::VFMSUB132SHZr_Int: case X86::VFNMSUB132SHZr_Int: + case X86::VFMSUB213SHZr_Int: case X86::VFNMSUB213SHZr_Int: + case X86::VFMSUB231SHZr_Int: case X86::VFNMSUB231SHZr_Int: + case X86::VFMADD132SHZr_Intk: case X86::VFNMADD132SHZr_Intk: + case X86::VFMADD213SHZr_Intk: case X86::VFNMADD213SHZr_Intk: + case X86::VFMADD231SHZr_Intk: case X86::VFNMADD231SHZr_Intk: + case X86::VFMSUB132SHZr_Intk: case X86::VFNMSUB132SHZr_Intk: + case X86::VFMSUB213SHZr_Intk: case X86::VFNMSUB213SHZr_Intk: + case X86::VFMSUB231SHZr_Intk: case X86::VFNMSUB231SHZr_Intk: + case X86::VFMADD132SHZr_Intkz: case X86::VFNMADD132SHZr_Intkz: + case X86::VFMADD213SHZr_Intkz: case X86::VFNMADD213SHZr_Intkz: + case X86::VFMADD231SHZr_Intkz: case X86::VFNMADD231SHZr_Intkz: + case X86::VFMSUB132SHZr_Intkz: case X86::VFNMSUB132SHZr_Intkz: + case X86::VFMSUB213SHZr_Intkz: case X86::VFNMSUB213SHZr_Intkz: + case X86::VFMSUB231SHZr_Intkz: case X86::VFNMSUB231SHZr_Intkz: return false; default: return true; diff --git a/llvm/lib/Target/X86/X86IntrinsicsInfo.h b/llvm/lib/Target/X86/X86IntrinsicsInfo.h index efc4811084f94..b0e1e808a5369 100644 --- a/llvm/lib/Target/X86/X86IntrinsicsInfo.h +++ b/llvm/lib/Target/X86/X86IntrinsicsInfo.h @@ -1187,6 +1187,12 @@ static const IntrinsicData IntrinsicsWithoutChain[] = { X86ISD::SCALAR_UINT_TO_FP, X86ISD::SCALAR_UINT_TO_FP_RND), X86_INTRINSIC_DATA(avx512fp16_vcvtusi642sh, INTR_TYPE_2OP, X86ISD::SCALAR_UINT_TO_FP, X86ISD::SCALAR_UINT_TO_FP_RND), + X86_INTRINSIC_DATA(avx512fp16_vfmadd_f16, INTR_TYPE_3OP, ISD::FMA, X86ISD::FMADD_RND), + X86_INTRINSIC_DATA(avx512fp16_vfmadd_ph_512, INTR_TYPE_3OP, ISD::FMA, X86ISD::FMADD_RND), + X86_INTRINSIC_DATA(avx512fp16_vfmaddsub_ph_128, INTR_TYPE_3OP, X86ISD::FMADDSUB, 0), + X86_INTRINSIC_DATA(avx512fp16_vfmaddsub_ph_256, INTR_TYPE_3OP, X86ISD::FMADDSUB, 0), + X86_INTRINSIC_DATA(avx512fp16_vfmaddsub_ph_512, INTR_TYPE_3OP, X86ISD::FMADDSUB, + X86ISD::FMADDSUB_RND), X86_INTRINSIC_DATA(bmi_bextr_32, INTR_TYPE_2OP, X86ISD::BEXTR, 0), X86_INTRINSIC_DATA(bmi_bextr_64, INTR_TYPE_2OP, X86ISD::BEXTR, 0), X86_INTRINSIC_DATA(bmi_bzhi_32, INTR_TYPE_2OP, X86ISD::BZHI, 0), diff --git a/llvm/test/CodeGen/X86/avx512fp16-fma-commute.ll b/llvm/test/CodeGen/X86/avx512fp16-fma-commute.ll new file mode 100644 index 0000000000000..3d03494273742 --- /dev/null +++ b/llvm/test/CodeGen/X86/avx512fp16-fma-commute.ll @@ -0,0 +1,1363 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s --mtriple=x86_64-unknown-unknown -mattr=avx512fp16,avx512vl | FileCheck %s + +declare half @llvm.fma.f16(half, half, half) +declare <8 x half> @llvm.fma.v8f16(<8 x half>, <8 x half>, <8 x half>) +declare <16 x half> @llvm.fma.v16f16(<16 x half>, <16 x half>, <16 x half>) +declare <32 x half> @llvm.fma.v32f16(<32 x half>, <32 x half>, <32 x half>) + +define half @fma_123_f16(half %x, half %y, half %z) { +; CHECK-LABEL: fma_123_f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmadd213sh %xmm2, %xmm1, %xmm0 +; CHECK-NEXT: retq + %a = call half @llvm.fma.f16(half %x, half %y, half %z) + ret half %a +} + +define half @fma_213_f16(half %x, half %y, half %z) { +; CHECK-LABEL: fma_213_f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmadd213sh %xmm2, %xmm1, %xmm0 +; CHECK-NEXT: retq + %a = call half @llvm.fma.f16(half %y, half %x, half %z) + ret half %a +} + +define half @fma_231_f16(half %x, half %y, half %z) { +; CHECK-LABEL: fma_231_f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmadd231sh %xmm1, %xmm2, %xmm0 +; CHECK-NEXT: retq + %a = call half @llvm.fma.f16(half %y, half %z, half %x) + ret half %a +} + +define half @fma_321_f16(half %x, half %y, half %z) { +; CHECK-LABEL: fma_321_f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmadd231sh %xmm1, %xmm2, %xmm0 +; CHECK-NEXT: retq + %a = call half @llvm.fma.f16(half %z, half %y, half %x) + ret half %a +} + +define half @fma_132_f16(half %x, half %y, half %z) { +; CHECK-LABEL: fma_132_f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmadd213sh %xmm1, %xmm2, %xmm0 +; CHECK-NEXT: retq + %a = call half @llvm.fma.f16(half %x, half %z, half %y) + ret half %a +} + +define half @fma_312_f16(half %x, half %y, half %z) { +; CHECK-LABEL: fma_312_f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmadd213sh %xmm1, %xmm2, %xmm0 +; CHECK-NEXT: retq + %a = call half @llvm.fma.f16(half %z, half %x, half %y) + ret half %a +} + +define half @fma_load_123_f16(half %x, half %y, half* %zp) { +; CHECK-LABEL: fma_load_123_f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmadd213sh (%rdi), %xmm1, %xmm0 +; CHECK-NEXT: retq + %z = load half, half* %zp + %a = call half @llvm.fma.f16(half %x, half %y, half %z) + ret half %a +} + +define half @fma_load_213_f16(half %x, half %y, half* %zp) { +; CHECK-LABEL: fma_load_213_f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmadd213sh (%rdi), %xmm1, %xmm0 +; CHECK-NEXT: retq + %z = load half, half* %zp + %a = call half @llvm.fma.f16(half %y, half %x, half %z) + ret half %a +} + +define half @fma_load_231_f16(half %x, half %y, half* %zp) { +; CHECK-LABEL: fma_load_231_f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmadd231sh (%rdi), %xmm1, %xmm0 +; CHECK-NEXT: retq + %z = load half, half* %zp + %a = call half @llvm.fma.f16(half %y, half %z, half %x) + ret half %a +} + +define half @fma_load_321_f16(half %x, half %y, half* %zp) { +; CHECK-LABEL: fma_load_321_f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmadd231sh (%rdi), %xmm1, %xmm0 +; CHECK-NEXT: retq + %z = load half, half* %zp + %a = call half @llvm.fma.f16(half %z, half %y, half %x) + ret half %a +} + +define half @fma_load_132_f16(half %x, half %y, half* %zp) { +; CHECK-LABEL: fma_load_132_f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmadd132sh (%rdi), %xmm1, %xmm0 +; CHECK-NEXT: retq + %z = load half, half* %zp + %a = call half @llvm.fma.f16(half %x, half %z, half %y) + ret half %a +} + +define half @fma_load_312_f16(half %x, half %y, half* %zp) { +; CHECK-LABEL: fma_load_312_f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmadd132sh (%rdi), %xmm1, %xmm0 +; CHECK-NEXT: retq + %z = load half, half* %zp + %a = call half @llvm.fma.f16(half %z, half %x, half %y) + ret half %a +} + +define <8 x half> @fma_123_v8f16(<8 x half> %x, <8 x half> %y, <8 x half> %z) { +; CHECK-LABEL: fma_123_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmadd213ph %xmm2, %xmm1, %xmm0 +; CHECK-NEXT: retq + %a = call <8 x half> @llvm.fma.v8f16(<8 x half> %x, <8 x half> %y, <8 x half> %z) + ret <8 x half> %a +} + +define <8 x half> @fma_213_v8f16(<8 x half> %x, <8 x half> %y, <8 x half> %z) { +; CHECK-LABEL: fma_213_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmadd213ph %xmm2, %xmm1, %xmm0 +; CHECK-NEXT: retq + %a = call <8 x half> @llvm.fma.v8f16(<8 x half> %y, <8 x half> %x, <8 x half> %z) + ret <8 x half> %a +} + +define <8 x half> @fma_231_v8f16(<8 x half> %x, <8 x half> %y, <8 x half> %z) { +; CHECK-LABEL: fma_231_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmadd231ph %xmm1, %xmm2, %xmm0 +; CHECK-NEXT: retq + %a = call <8 x half> @llvm.fma.v8f16(<8 x half> %y, <8 x half> %z, <8 x half> %x) + ret <8 x half> %a +} + +define <8 x half> @fma_321_v8f16(<8 x half> %x, <8 x half> %y, <8 x half> %z) { +; CHECK-LABEL: fma_321_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmadd231ph %xmm1, %xmm2, %xmm0 +; CHECK-NEXT: retq + %a = call <8 x half> @llvm.fma.v8f16(<8 x half> %z, <8 x half> %y, <8 x half> %x) + ret <8 x half> %a +} + +define <8 x half> @fma_132_v8f16(<8 x half> %x, <8 x half> %y, <8 x half> %z) { +; CHECK-LABEL: fma_132_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmadd213ph %xmm1, %xmm2, %xmm0 +; CHECK-NEXT: retq + %a = call <8 x half> @llvm.fma.v8f16(<8 x half> %x, <8 x half> %z, <8 x half> %y) + ret <8 x half> %a +} + +define <8 x half> @fma_312_v8f16(<8 x half> %x, <8 x half> %y, <8 x half> %z) { +; CHECK-LABEL: fma_312_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmadd213ph %xmm1, %xmm2, %xmm0 +; CHECK-NEXT: retq + %a = call <8 x half> @llvm.fma.v8f16(<8 x half> %z, <8 x half> %x, <8 x half> %y) + ret <8 x half> %a +} + +define <8 x half> @fma_load_123_v8f16(<8 x half> %x, <8 x half> %y, <8 x half>* %zp) { +; CHECK-LABEL: fma_load_123_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmadd213ph (%rdi), %xmm1, %xmm0 +; CHECK-NEXT: retq + %z = load <8 x half>, <8 x half>* %zp + %a = call <8 x half> @llvm.fma.v8f16(<8 x half> %x, <8 x half> %y, <8 x half> %z) + ret <8 x half> %a +} + +define <8 x half> @fma_load_213_v8f16(<8 x half> %x, <8 x half> %y, <8 x half>* %zp) { +; CHECK-LABEL: fma_load_213_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmadd213ph (%rdi), %xmm1, %xmm0 +; CHECK-NEXT: retq + %z = load <8 x half>, <8 x half>* %zp + %a = call <8 x half> @llvm.fma.v8f16(<8 x half> %y, <8 x half> %x, <8 x half> %z) + ret <8 x half> %a +} + +define <8 x half> @fma_load_231_v8f16(<8 x half> %x, <8 x half> %y, <8 x half>* %zp) { +; CHECK-LABEL: fma_load_231_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmadd231ph (%rdi), %xmm1, %xmm0 +; CHECK-NEXT: retq + %z = load <8 x half>, <8 x half>* %zp + %a = call <8 x half> @llvm.fma.v8f16(<8 x half> %y, <8 x half> %z, <8 x half> %x) + ret <8 x half> %a +} + +define <8 x half> @fma_load_321_v8f16(<8 x half> %x, <8 x half> %y, <8 x half>* %zp) { +; CHECK-LABEL: fma_load_321_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmadd231ph (%rdi), %xmm1, %xmm0 +; CHECK-NEXT: retq + %z = load <8 x half>, <8 x half>* %zp + %a = call <8 x half> @llvm.fma.v8f16(<8 x half> %z, <8 x half> %y, <8 x half> %x) + ret <8 x half> %a +} + +define <8 x half> @fma_load_132_v8f16(<8 x half> %x, <8 x half> %y, <8 x half>* %zp) { +; CHECK-LABEL: fma_load_132_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmadd132ph (%rdi), %xmm1, %xmm0 +; CHECK-NEXT: retq + %z = load <8 x half>, <8 x half>* %zp + %a = call <8 x half> @llvm.fma.v8f16(<8 x half> %x, <8 x half> %z, <8 x half> %y) + ret <8 x half> %a +} + +define <8 x half> @fma_load_312_v8f16(<8 x half> %x, <8 x half> %y, <8 x half>* %zp) { +; CHECK-LABEL: fma_load_312_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmadd132ph (%rdi), %xmm1, %xmm0 +; CHECK-NEXT: retq + %z = load <8 x half>, <8 x half>* %zp + %a = call <8 x half> @llvm.fma.v8f16(<8 x half> %z, <8 x half> %x, <8 x half> %y) + ret <8 x half> %a +} + +define <8 x half> @fma_mask_123_v8f16(<8 x half> %x, <8 x half> %y, <8 x half> %z, i8 %mask) { +; CHECK-LABEL: fma_mask_123_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %edi, %k1 +; CHECK-NEXT: vfmadd132ph %xmm1, %xmm2, %xmm0 {%k1} +; CHECK-NEXT: retq + %a = call <8 x half> @llvm.fma.v8f16(<8 x half> %x, <8 x half> %y, <8 x half> %z) + %b = bitcast i8 %mask to <8 x i1> + %c = select <8 x i1> %b, <8 x half> %a, <8 x half> %x + ret <8 x half> %c +} + +define <8 x half> @fma_mask_213_v8f16(<8 x half> %x, <8 x half> %y, <8 x half> %z, i8 %mask) { +; CHECK-LABEL: fma_mask_213_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %edi, %k1 +; CHECK-NEXT: vfmadd213ph %xmm2, %xmm1, %xmm0 {%k1} +; CHECK-NEXT: retq + %a = call <8 x half> @llvm.fma.v8f16(<8 x half> %y, <8 x half> %x, <8 x half> %z) + %b = bitcast i8 %mask to <8 x i1> + %c = select <8 x i1> %b, <8 x half> %a, <8 x half> %x + ret <8 x half> %c +} + +define <8 x half> @fma_mask_231_v8f16(<8 x half> %x, <8 x half> %y, <8 x half> %z, i8 %mask) { +; CHECK-LABEL: fma_mask_231_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %edi, %k1 +; CHECK-NEXT: vfmadd231ph %xmm2, %xmm1, %xmm0 {%k1} +; CHECK-NEXT: retq + %a = call <8 x half> @llvm.fma.v8f16(<8 x half> %y, <8 x half> %z, <8 x half> %x) + %b = bitcast i8 %mask to <8 x i1> + %c = select <8 x i1> %b, <8 x half> %a, <8 x half> %x + ret <8 x half> %c +} + +define <8 x half> @fma_mask_321_v8f16(<8 x half> %x, <8 x half> %y, <8 x half> %z, i8 %mask) { +; CHECK-LABEL: fma_mask_321_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %edi, %k1 +; CHECK-NEXT: vfmadd231ph %xmm1, %xmm2, %xmm0 {%k1} +; CHECK-NEXT: retq + %a = call <8 x half> @llvm.fma.v8f16(<8 x half> %z, <8 x half> %y, <8 x half> %x) + %b = bitcast i8 %mask to <8 x i1> + %c = select <8 x i1> %b, <8 x half> %a, <8 x half> %x + ret <8 x half> %c +} + +define <8 x half> @fma_mask_132_v8f16(<8 x half> %x, <8 x half> %y, <8 x half> %z, i8 %mask) { +; CHECK-LABEL: fma_mask_132_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %edi, %k1 +; CHECK-NEXT: vfmadd132ph %xmm2, %xmm1, %xmm0 {%k1} +; CHECK-NEXT: retq + %a = call <8 x half> @llvm.fma.v8f16(<8 x half> %x, <8 x half> %z, <8 x half> %y) + %b = bitcast i8 %mask to <8 x i1> + %c = select <8 x i1> %b, <8 x half> %a, <8 x half> %x + ret <8 x half> %c +} + +define <8 x half> @fma_mask_312_v8f16(<8 x half> %x, <8 x half> %y, <8 x half> %z, i8 %mask) { +; CHECK-LABEL: fma_mask_312_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %edi, %k1 +; CHECK-NEXT: vfmadd213ph %xmm1, %xmm2, %xmm0 {%k1} +; CHECK-NEXT: retq + %a = call <8 x half> @llvm.fma.v8f16(<8 x half> %z, <8 x half> %x, <8 x half> %y) + %b = bitcast i8 %mask to <8 x i1> + %c = select <8 x i1> %b, <8 x half> %a, <8 x half> %x + ret <8 x half> %c +} + +define <8 x half> @fma_maskz_123_v8f16(<8 x half> %x, <8 x half> %y, <8 x half> %z, i8 %mask) { +; CHECK-LABEL: fma_maskz_123_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %edi, %k1 +; CHECK-NEXT: vfmadd213ph %xmm2, %xmm1, %xmm0 {%k1} {z} +; CHECK-NEXT: retq + %a = call <8 x half> @llvm.fma.v8f16(<8 x half> %x, <8 x half> %y, <8 x half> %z) + %b = bitcast i8 %mask to <8 x i1> + %c = select <8 x i1> %b, <8 x half> %a, <8 x half> zeroinitializer + ret <8 x half> %c +} + +define <8 x half> @fma_maskz_213_v8f16(<8 x half> %x, <8 x half> %y, <8 x half> %z, i8 %mask) { +; CHECK-LABEL: fma_maskz_213_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %edi, %k1 +; CHECK-NEXT: vfmadd213ph %xmm2, %xmm1, %xmm0 {%k1} {z} +; CHECK-NEXT: retq + %a = call <8 x half> @llvm.fma.v8f16(<8 x half> %y, <8 x half> %x, <8 x half> %z) + %b = bitcast i8 %mask to <8 x i1> + %c = select <8 x i1> %b, <8 x half> %a, <8 x half> zeroinitializer + ret <8 x half> %c +} + +define <8 x half> @fma_maskz_231_v8f16(<8 x half> %x, <8 x half> %y, <8 x half> %z, i8 %mask) { +; CHECK-LABEL: fma_maskz_231_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %edi, %k1 +; CHECK-NEXT: vfmadd231ph %xmm1, %xmm2, %xmm0 {%k1} {z} +; CHECK-NEXT: retq + %a = call <8 x half> @llvm.fma.v8f16(<8 x half> %y, <8 x half> %z, <8 x half> %x) + %b = bitcast i8 %mask to <8 x i1> + %c = select <8 x i1> %b, <8 x half> %a, <8 x half> zeroinitializer + ret <8 x half> %c +} + +define <8 x half> @fma_maskz_321_v8f16(<8 x half> %x, <8 x half> %y, <8 x half> %z, i8 %mask) { +; CHECK-LABEL: fma_maskz_321_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %edi, %k1 +; CHECK-NEXT: vfmadd231ph %xmm1, %xmm2, %xmm0 {%k1} {z} +; CHECK-NEXT: retq + %a = call <8 x half> @llvm.fma.v8f16(<8 x half> %z, <8 x half> %y, <8 x half> %x) + %b = bitcast i8 %mask to <8 x i1> + %c = select <8 x i1> %b, <8 x half> %a, <8 x half> zeroinitializer + ret <8 x half> %c +} + +define <8 x half> @fma_maskz_132_v8f16(<8 x half> %x, <8 x half> %y, <8 x half> %z, i8 %mask) { +; CHECK-LABEL: fma_maskz_132_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %edi, %k1 +; CHECK-NEXT: vfmadd213ph %xmm1, %xmm2, %xmm0 {%k1} {z} +; CHECK-NEXT: retq + %a = call <8 x half> @llvm.fma.v8f16(<8 x half> %x, <8 x half> %z, <8 x half> %y) + %b = bitcast i8 %mask to <8 x i1> + %c = select <8 x i1> %b, <8 x half> %a, <8 x half> zeroinitializer + ret <8 x half> %c +} + +define <8 x half> @fma_maskz_312_v8f16(<8 x half> %x, <8 x half> %y, <8 x half> %z, i8 %mask) { +; CHECK-LABEL: fma_maskz_312_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %edi, %k1 +; CHECK-NEXT: vfmadd213ph %xmm1, %xmm2, %xmm0 {%k1} {z} +; CHECK-NEXT: retq + %a = call <8 x half> @llvm.fma.v8f16(<8 x half> %z, <8 x half> %x, <8 x half> %y) + %b = bitcast i8 %mask to <8 x i1> + %c = select <8 x i1> %b, <8 x half> %a, <8 x half> zeroinitializer + ret <8 x half> %c +} + +define <8 x half> @fma_mask_load_123_v8f16(<8 x half> %x, <8 x half> %y, <8 x half>* %zp, i8 %mask) { +; CHECK-LABEL: fma_mask_load_123_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %esi, %k1 +; CHECK-NEXT: vfmadd213ph (%rdi), %xmm1, %xmm0 {%k1} +; CHECK-NEXT: retq + %z = load <8 x half>, <8 x half>* %zp + %a = call <8 x half> @llvm.fma.v8f16(<8 x half> %x, <8 x half> %y, <8 x half> %z) + %b = bitcast i8 %mask to <8 x i1> + %c = select <8 x i1> %b, <8 x half> %a, <8 x half> %x + ret <8 x half> %c +} + +define <8 x half> @fma_mask_load_213_v8f16(<8 x half> %x, <8 x half> %y, <8 x half>* %zp, i8 %mask) { +; CHECK-LABEL: fma_mask_load_213_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %esi, %k1 +; CHECK-NEXT: vfmadd213ph (%rdi), %xmm1, %xmm0 {%k1} +; CHECK-NEXT: retq + %z = load <8 x half>, <8 x half>* %zp + %a = call <8 x half> @llvm.fma.v8f16(<8 x half> %y, <8 x half> %x, <8 x half> %z) + %b = bitcast i8 %mask to <8 x i1> + %c = select <8 x i1> %b, <8 x half> %a, <8 x half> %x + ret <8 x half> %c +} + +define <8 x half> @fma_mask_load_231_v8f16(<8 x half> %x, <8 x half> %y, <8 x half>* %zp, i8 %mask) { +; CHECK-LABEL: fma_mask_load_231_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %esi, %k1 +; CHECK-NEXT: vfmadd231ph (%rdi), %xmm1, %xmm0 {%k1} +; CHECK-NEXT: retq + %z = load <8 x half>, <8 x half>* %zp + %a = call <8 x half> @llvm.fma.v8f16(<8 x half> %y, <8 x half> %z, <8 x half> %x) + %b = bitcast i8 %mask to <8 x i1> + %c = select <8 x i1> %b, <8 x half> %a, <8 x half> %x + ret <8 x half> %c +} + +define <8 x half> @fma_mask_load_321_v8f16(<8 x half> %x, <8 x half> %y, <8 x half>* %zp, i8 %mask) { +; CHECK-LABEL: fma_mask_load_321_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %esi, %k1 +; CHECK-NEXT: vfmadd231ph (%rdi), %xmm1, %xmm0 {%k1} +; CHECK-NEXT: retq + %z = load <8 x half>, <8 x half>* %zp + %a = call <8 x half> @llvm.fma.v8f16(<8 x half> %z, <8 x half> %y, <8 x half> %x) + %b = bitcast i8 %mask to <8 x i1> + %c = select <8 x i1> %b, <8 x half> %a, <8 x half> %x + ret <8 x half> %c +} + +define <8 x half> @fma_mask_load_132_v8f16(<8 x half> %x, <8 x half> %y, <8 x half>* %zp, i8 %mask) { +; CHECK-LABEL: fma_mask_load_132_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %esi, %k1 +; CHECK-NEXT: vfmadd132ph (%rdi), %xmm1, %xmm0 {%k1} +; CHECK-NEXT: retq + %z = load <8 x half>, <8 x half>* %zp + %a = call <8 x half> @llvm.fma.v8f16(<8 x half> %x, <8 x half> %z, <8 x half> %y) + %b = bitcast i8 %mask to <8 x i1> + %c = select <8 x i1> %b, <8 x half> %a, <8 x half> %x + ret <8 x half> %c +} + +define <8 x half> @fma_mask_load_312_v8f16(<8 x half> %x, <8 x half> %y, <8 x half>* %zp, i8 %mask) { +; CHECK-LABEL: fma_mask_load_312_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %esi, %k1 +; CHECK-NEXT: vfmadd132ph (%rdi), %xmm1, %xmm0 {%k1} +; CHECK-NEXT: retq + %z = load <8 x half>, <8 x half>* %zp + %a = call <8 x half> @llvm.fma.v8f16(<8 x half> %z, <8 x half> %x, <8 x half> %y) + %b = bitcast i8 %mask to <8 x i1> + %c = select <8 x i1> %b, <8 x half> %a, <8 x half> %x + ret <8 x half> %c +} + +define <8 x half> @fma_maskz_load_123_v8f16(<8 x half> %x, <8 x half> %y, <8 x half>* %zp, i8 %mask) { +; CHECK-LABEL: fma_maskz_load_123_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %esi, %k1 +; CHECK-NEXT: vfmadd213ph (%rdi), %xmm1, %xmm0 {%k1} {z} +; CHECK-NEXT: retq + %z = load <8 x half>, <8 x half>* %zp + %a = call <8 x half> @llvm.fma.v8f16(<8 x half> %x, <8 x half> %y, <8 x half> %z) + %b = bitcast i8 %mask to <8 x i1> + %c = select <8 x i1> %b, <8 x half> %a, <8 x half> zeroinitializer + ret <8 x half> %c +} + +define <8 x half> @fma_maskz_load_213_v8f16(<8 x half> %x, <8 x half> %y, <8 x half>* %zp, i8 %mask) { +; CHECK-LABEL: fma_maskz_load_213_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %esi, %k1 +; CHECK-NEXT: vfmadd213ph (%rdi), %xmm1, %xmm0 {%k1} {z} +; CHECK-NEXT: retq + %z = load <8 x half>, <8 x half>* %zp + %a = call <8 x half> @llvm.fma.v8f16(<8 x half> %y, <8 x half> %x, <8 x half> %z) + %b = bitcast i8 %mask to <8 x i1> + %c = select <8 x i1> %b, <8 x half> %a, <8 x half> zeroinitializer + ret <8 x half> %c +} + +define <8 x half> @fma_maskz_load_231_v8f16(<8 x half> %x, <8 x half> %y, <8 x half>* %zp, i8 %mask) { +; CHECK-LABEL: fma_maskz_load_231_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %esi, %k1 +; CHECK-NEXT: vfmadd231ph (%rdi), %xmm1, %xmm0 {%k1} {z} +; CHECK-NEXT: retq + %z = load <8 x half>, <8 x half>* %zp + %a = call <8 x half> @llvm.fma.v8f16(<8 x half> %y, <8 x half> %z, <8 x half> %x) + %b = bitcast i8 %mask to <8 x i1> + %c = select <8 x i1> %b, <8 x half> %a, <8 x half> zeroinitializer + ret <8 x half> %c +} + +define <8 x half> @fma_maskz_load_321_v8f16(<8 x half> %x, <8 x half> %y, <8 x half>* %zp, i8 %mask) { +; CHECK-LABEL: fma_maskz_load_321_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %esi, %k1 +; CHECK-NEXT: vfmadd231ph (%rdi), %xmm1, %xmm0 {%k1} {z} +; CHECK-NEXT: retq + %z = load <8 x half>, <8 x half>* %zp + %a = call <8 x half> @llvm.fma.v8f16(<8 x half> %z, <8 x half> %y, <8 x half> %x) + %b = bitcast i8 %mask to <8 x i1> + %c = select <8 x i1> %b, <8 x half> %a, <8 x half> zeroinitializer + ret <8 x half> %c +} + +define <8 x half> @fma_maskz_load_132_v8f16(<8 x half> %x, <8 x half> %y, <8 x half>* %zp, i8 %mask) { +; CHECK-LABEL: fma_maskz_load_132_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %esi, %k1 +; CHECK-NEXT: vfmadd132ph (%rdi), %xmm1, %xmm0 {%k1} {z} +; CHECK-NEXT: retq + %z = load <8 x half>, <8 x half>* %zp + %a = call <8 x half> @llvm.fma.v8f16(<8 x half> %x, <8 x half> %z, <8 x half> %y) + %b = bitcast i8 %mask to <8 x i1> + %c = select <8 x i1> %b, <8 x half> %a, <8 x half> zeroinitializer + ret <8 x half> %c +} + +define <8 x half> @fma_maskz_load_312_v8f16(<8 x half> %x, <8 x half> %y, <8 x half>* %zp, i8 %mask) { +; CHECK-LABEL: fma_maskz_load_312_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %esi, %k1 +; CHECK-NEXT: vfmadd132ph (%rdi), %xmm1, %xmm0 {%k1} {z} +; CHECK-NEXT: retq + %z = load <8 x half>, <8 x half>* %zp + %a = call <8 x half> @llvm.fma.v8f16(<8 x half> %z, <8 x half> %x, <8 x half> %y) + %b = bitcast i8 %mask to <8 x i1> + %c = select <8 x i1> %b, <8 x half> %a, <8 x half> zeroinitializer + ret <8 x half> %c +} + +define <16 x half> @fma_123_v16f16(<16 x half> %x, <16 x half> %y, <16 x half> %z) { +; CHECK-LABEL: fma_123_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmadd213ph %ymm2, %ymm1, %ymm0 +; CHECK-NEXT: retq + %a = call <16 x half> @llvm.fma.v16f16(<16 x half> %x, <16 x half> %y, <16 x half> %z) + ret <16 x half> %a +} + +define <16 x half> @fma_213_v16f16(<16 x half> %x, <16 x half> %y, <16 x half> %z) { +; CHECK-LABEL: fma_213_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmadd213ph %ymm2, %ymm1, %ymm0 +; CHECK-NEXT: retq + %a = call <16 x half> @llvm.fma.v16f16(<16 x half> %y, <16 x half> %x, <16 x half> %z) + ret <16 x half> %a +} + +define <16 x half> @fma_231_v16f16(<16 x half> %x, <16 x half> %y, <16 x half> %z) { +; CHECK-LABEL: fma_231_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmadd231ph %ymm1, %ymm2, %ymm0 +; CHECK-NEXT: retq + %a = call <16 x half> @llvm.fma.v16f16(<16 x half> %y, <16 x half> %z, <16 x half> %x) + ret <16 x half> %a +} + +define <16 x half> @fma_321_v16f16(<16 x half> %x, <16 x half> %y, <16 x half> %z) { +; CHECK-LABEL: fma_321_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmadd231ph %ymm1, %ymm2, %ymm0 +; CHECK-NEXT: retq + %a = call <16 x half> @llvm.fma.v16f16(<16 x half> %z, <16 x half> %y, <16 x half> %x) + ret <16 x half> %a +} + +define <16 x half> @fma_132_v16f16(<16 x half> %x, <16 x half> %y, <16 x half> %z) { +; CHECK-LABEL: fma_132_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmadd213ph %ymm1, %ymm2, %ymm0 +; CHECK-NEXT: retq + %a = call <16 x half> @llvm.fma.v16f16(<16 x half> %x, <16 x half> %z, <16 x half> %y) + ret <16 x half> %a +} + +define <16 x half> @fma_312_v16f16(<16 x half> %x, <16 x half> %y, <16 x half> %z) { +; CHECK-LABEL: fma_312_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmadd213ph %ymm1, %ymm2, %ymm0 +; CHECK-NEXT: retq + %a = call <16 x half> @llvm.fma.v16f16(<16 x half> %z, <16 x half> %x, <16 x half> %y) + ret <16 x half> %a +} + +define <16 x half> @fma_load_123_v16f16(<16 x half> %x, <16 x half> %y, <16 x half>* %zp) { +; CHECK-LABEL: fma_load_123_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmadd213ph (%rdi), %ymm1, %ymm0 +; CHECK-NEXT: retq + %z = load <16 x half>, <16 x half>* %zp + %a = call <16 x half> @llvm.fma.v16f16(<16 x half> %x, <16 x half> %y, <16 x half> %z) + ret <16 x half> %a +} + +define <16 x half> @fma_load_213_v16f16(<16 x half> %x, <16 x half> %y, <16 x half>* %zp) { +; CHECK-LABEL: fma_load_213_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmadd213ph (%rdi), %ymm1, %ymm0 +; CHECK-NEXT: retq + %z = load <16 x half>, <16 x half>* %zp + %a = call <16 x half> @llvm.fma.v16f16(<16 x half> %y, <16 x half> %x, <16 x half> %z) + ret <16 x half> %a +} + +define <16 x half> @fma_load_231_v16f16(<16 x half> %x, <16 x half> %y, <16 x half>* %zp) { +; CHECK-LABEL: fma_load_231_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmadd231ph (%rdi), %ymm1, %ymm0 +; CHECK-NEXT: retq + %z = load <16 x half>, <16 x half>* %zp + %a = call <16 x half> @llvm.fma.v16f16(<16 x half> %y, <16 x half> %z, <16 x half> %x) + ret <16 x half> %a +} + +define <16 x half> @fma_load_321_v16f16(<16 x half> %x, <16 x half> %y, <16 x half>* %zp) { +; CHECK-LABEL: fma_load_321_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmadd231ph (%rdi), %ymm1, %ymm0 +; CHECK-NEXT: retq + %z = load <16 x half>, <16 x half>* %zp + %a = call <16 x half> @llvm.fma.v16f16(<16 x half> %z, <16 x half> %y, <16 x half> %x) + ret <16 x half> %a +} + +define <16 x half> @fma_load_132_v16f16(<16 x half> %x, <16 x half> %y, <16 x half>* %zp) { +; CHECK-LABEL: fma_load_132_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmadd132ph (%rdi), %ymm1, %ymm0 +; CHECK-NEXT: retq + %z = load <16 x half>, <16 x half>* %zp + %a = call <16 x half> @llvm.fma.v16f16(<16 x half> %x, <16 x half> %z, <16 x half> %y) + ret <16 x half> %a +} + +define <16 x half> @fma_load_312_v16f16(<16 x half> %x, <16 x half> %y, <16 x half>* %zp) { +; CHECK-LABEL: fma_load_312_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmadd132ph (%rdi), %ymm1, %ymm0 +; CHECK-NEXT: retq + %z = load <16 x half>, <16 x half>* %zp + %a = call <16 x half> @llvm.fma.v16f16(<16 x half> %z, <16 x half> %x, <16 x half> %y) + ret <16 x half> %a +} + +define <16 x half> @fma_mask_123_v16f16(<16 x half> %x, <16 x half> %y, <16 x half> %z, i16 %mask) { +; CHECK-LABEL: fma_mask_123_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %edi, %k1 +; CHECK-NEXT: vfmadd132ph %ymm1, %ymm2, %ymm0 {%k1} +; CHECK-NEXT: retq + %a = call <16 x half> @llvm.fma.v16f16(<16 x half> %x, <16 x half> %y, <16 x half> %z) + %b = bitcast i16 %mask to <16 x i1> + %c = select <16 x i1> %b, <16 x half> %a, <16 x half> %x + ret <16 x half> %c +} + +define <16 x half> @fma_mask_213_v16f16(<16 x half> %x, <16 x half> %y, <16 x half> %z, i16 %mask) { +; CHECK-LABEL: fma_mask_213_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %edi, %k1 +; CHECK-NEXT: vfmadd213ph %ymm2, %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %a = call <16 x half> @llvm.fma.v16f16(<16 x half> %y, <16 x half> %x, <16 x half> %z) + %b = bitcast i16 %mask to <16 x i1> + %c = select <16 x i1> %b, <16 x half> %a, <16 x half> %x + ret <16 x half> %c +} + +define <16 x half> @fma_mask_231_v16f16(<16 x half> %x, <16 x half> %y, <16 x half> %z, i16 %mask) { +; CHECK-LABEL: fma_mask_231_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %edi, %k1 +; CHECK-NEXT: vfmadd231ph %ymm2, %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %a = call <16 x half> @llvm.fma.v16f16(<16 x half> %y, <16 x half> %z, <16 x half> %x) + %b = bitcast i16 %mask to <16 x i1> + %c = select <16 x i1> %b, <16 x half> %a, <16 x half> %x + ret <16 x half> %c +} + +define <16 x half> @fma_mask_321_v16f16(<16 x half> %x, <16 x half> %y, <16 x half> %z, i16 %mask) { +; CHECK-LABEL: fma_mask_321_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %edi, %k1 +; CHECK-NEXT: vfmadd231ph %ymm1, %ymm2, %ymm0 {%k1} +; CHECK-NEXT: retq + %a = call <16 x half> @llvm.fma.v16f16(<16 x half> %z, <16 x half> %y, <16 x half> %x) + %b = bitcast i16 %mask to <16 x i1> + %c = select <16 x i1> %b, <16 x half> %a, <16 x half> %x + ret <16 x half> %c +} + +define <16 x half> @fma_mask_132_v16f16(<16 x half> %x, <16 x half> %y, <16 x half> %z, i16 %mask) { +; CHECK-LABEL: fma_mask_132_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %edi, %k1 +; CHECK-NEXT: vfmadd132ph %ymm2, %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %a = call <16 x half> @llvm.fma.v16f16(<16 x half> %x, <16 x half> %z, <16 x half> %y) + %b = bitcast i16 %mask to <16 x i1> + %c = select <16 x i1> %b, <16 x half> %a, <16 x half> %x + ret <16 x half> %c +} + +define <16 x half> @fma_mask_312_v16f16(<16 x half> %x, <16 x half> %y, <16 x half> %z, i16 %mask) { +; CHECK-LABEL: fma_mask_312_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %edi, %k1 +; CHECK-NEXT: vfmadd213ph %ymm1, %ymm2, %ymm0 {%k1} +; CHECK-NEXT: retq + %a = call <16 x half> @llvm.fma.v16f16(<16 x half> %z, <16 x half> %x, <16 x half> %y) + %b = bitcast i16 %mask to <16 x i1> + %c = select <16 x i1> %b, <16 x half> %a, <16 x half> %x + ret <16 x half> %c +} + +define <16 x half> @fma_maskz_123_v16f16(<16 x half> %x, <16 x half> %y, <16 x half> %z, i16 %mask) { +; CHECK-LABEL: fma_maskz_123_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %edi, %k1 +; CHECK-NEXT: vfmadd213ph %ymm2, %ymm1, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %a = call <16 x half> @llvm.fma.v16f16(<16 x half> %x, <16 x half> %y, <16 x half> %z) + %b = bitcast i16 %mask to <16 x i1> + %c = select <16 x i1> %b, <16 x half> %a, <16 x half> zeroinitializer + ret <16 x half> %c +} + +define <16 x half> @fma_maskz_213_v16f16(<16 x half> %x, <16 x half> %y, <16 x half> %z, i16 %mask) { +; CHECK-LABEL: fma_maskz_213_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %edi, %k1 +; CHECK-NEXT: vfmadd213ph %ymm2, %ymm1, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %a = call <16 x half> @llvm.fma.v16f16(<16 x half> %y, <16 x half> %x, <16 x half> %z) + %b = bitcast i16 %mask to <16 x i1> + %c = select <16 x i1> %b, <16 x half> %a, <16 x half> zeroinitializer + ret <16 x half> %c +} + +define <16 x half> @fma_maskz_231_v16f16(<16 x half> %x, <16 x half> %y, <16 x half> %z, i16 %mask) { +; CHECK-LABEL: fma_maskz_231_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %edi, %k1 +; CHECK-NEXT: vfmadd231ph %ymm1, %ymm2, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %a = call <16 x half> @llvm.fma.v16f16(<16 x half> %y, <16 x half> %z, <16 x half> %x) + %b = bitcast i16 %mask to <16 x i1> + %c = select <16 x i1> %b, <16 x half> %a, <16 x half> zeroinitializer + ret <16 x half> %c +} + +define <16 x half> @fma_maskz_321_v16f16(<16 x half> %x, <16 x half> %y, <16 x half> %z, i16 %mask) { +; CHECK-LABEL: fma_maskz_321_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %edi, %k1 +; CHECK-NEXT: vfmadd231ph %ymm1, %ymm2, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %a = call <16 x half> @llvm.fma.v16f16(<16 x half> %z, <16 x half> %y, <16 x half> %x) + %b = bitcast i16 %mask to <16 x i1> + %c = select <16 x i1> %b, <16 x half> %a, <16 x half> zeroinitializer + ret <16 x half> %c +} + +define <16 x half> @fma_maskz_132_v16f16(<16 x half> %x, <16 x half> %y, <16 x half> %z, i16 %mask) { +; CHECK-LABEL: fma_maskz_132_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %edi, %k1 +; CHECK-NEXT: vfmadd213ph %ymm1, %ymm2, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %a = call <16 x half> @llvm.fma.v16f16(<16 x half> %x, <16 x half> %z, <16 x half> %y) + %b = bitcast i16 %mask to <16 x i1> + %c = select <16 x i1> %b, <16 x half> %a, <16 x half> zeroinitializer + ret <16 x half> %c +} + +define <16 x half> @fma_maskz_312_v16f16(<16 x half> %x, <16 x half> %y, <16 x half> %z, i16 %mask) { +; CHECK-LABEL: fma_maskz_312_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %edi, %k1 +; CHECK-NEXT: vfmadd213ph %ymm1, %ymm2, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %a = call <16 x half> @llvm.fma.v16f16(<16 x half> %z, <16 x half> %x, <16 x half> %y) + %b = bitcast i16 %mask to <16 x i1> + %c = select <16 x i1> %b, <16 x half> %a, <16 x half> zeroinitializer + ret <16 x half> %c +} + +define <16 x half> @fma_mask_load_123_v16f16(<16 x half> %x, <16 x half> %y, <16 x half>* %zp, i16 %mask) { +; CHECK-LABEL: fma_mask_load_123_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %esi, %k1 +; CHECK-NEXT: vfmadd213ph (%rdi), %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %z = load <16 x half>, <16 x half>* %zp + %a = call <16 x half> @llvm.fma.v16f16(<16 x half> %x, <16 x half> %y, <16 x half> %z) + %b = bitcast i16 %mask to <16 x i1> + %c = select <16 x i1> %b, <16 x half> %a, <16 x half> %x + ret <16 x half> %c +} + +define <16 x half> @fma_mask_load_213_v16f16(<16 x half> %x, <16 x half> %y, <16 x half>* %zp, i16 %mask) { +; CHECK-LABEL: fma_mask_load_213_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %esi, %k1 +; CHECK-NEXT: vfmadd213ph (%rdi), %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %z = load <16 x half>, <16 x half>* %zp + %a = call <16 x half> @llvm.fma.v16f16(<16 x half> %y, <16 x half> %x, <16 x half> %z) + %b = bitcast i16 %mask to <16 x i1> + %c = select <16 x i1> %b, <16 x half> %a, <16 x half> %x + ret <16 x half> %c +} + +define <16 x half> @fma_mask_load_231_v16f16(<16 x half> %x, <16 x half> %y, <16 x half>* %zp, i16 %mask) { +; CHECK-LABEL: fma_mask_load_231_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %esi, %k1 +; CHECK-NEXT: vfmadd231ph (%rdi), %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %z = load <16 x half>, <16 x half>* %zp + %a = call <16 x half> @llvm.fma.v16f16(<16 x half> %y, <16 x half> %z, <16 x half> %x) + %b = bitcast i16 %mask to <16 x i1> + %c = select <16 x i1> %b, <16 x half> %a, <16 x half> %x + ret <16 x half> %c +} + +define <16 x half> @fma_mask_load_321_v16f16(<16 x half> %x, <16 x half> %y, <16 x half>* %zp, i16 %mask) { +; CHECK-LABEL: fma_mask_load_321_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %esi, %k1 +; CHECK-NEXT: vfmadd231ph (%rdi), %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %z = load <16 x half>, <16 x half>* %zp + %a = call <16 x half> @llvm.fma.v16f16(<16 x half> %z, <16 x half> %y, <16 x half> %x) + %b = bitcast i16 %mask to <16 x i1> + %c = select <16 x i1> %b, <16 x half> %a, <16 x half> %x + ret <16 x half> %c +} + +define <16 x half> @fma_mask_load_132_v16f16(<16 x half> %x, <16 x half> %y, <16 x half>* %zp, i16 %mask) { +; CHECK-LABEL: fma_mask_load_132_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %esi, %k1 +; CHECK-NEXT: vfmadd132ph (%rdi), %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %z = load <16 x half>, <16 x half>* %zp + %a = call <16 x half> @llvm.fma.v16f16(<16 x half> %x, <16 x half> %z, <16 x half> %y) + %b = bitcast i16 %mask to <16 x i1> + %c = select <16 x i1> %b, <16 x half> %a, <16 x half> %x + ret <16 x half> %c +} + +define <16 x half> @fma_mask_load_312_v16f16(<16 x half> %x, <16 x half> %y, <16 x half>* %zp, i16 %mask) { +; CHECK-LABEL: fma_mask_load_312_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %esi, %k1 +; CHECK-NEXT: vfmadd132ph (%rdi), %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %z = load <16 x half>, <16 x half>* %zp + %a = call <16 x half> @llvm.fma.v16f16(<16 x half> %z, <16 x half> %x, <16 x half> %y) + %b = bitcast i16 %mask to <16 x i1> + %c = select <16 x i1> %b, <16 x half> %a, <16 x half> %x + ret <16 x half> %c +} + +define <16 x half> @fma_maskz_load_123_v16f16(<16 x half> %x, <16 x half> %y, <16 x half>* %zp, i16 %mask) { +; CHECK-LABEL: fma_maskz_load_123_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %esi, %k1 +; CHECK-NEXT: vfmadd213ph (%rdi), %ymm1, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %z = load <16 x half>, <16 x half>* %zp + %a = call <16 x half> @llvm.fma.v16f16(<16 x half> %x, <16 x half> %y, <16 x half> %z) + %b = bitcast i16 %mask to <16 x i1> + %c = select <16 x i1> %b, <16 x half> %a, <16 x half> zeroinitializer + ret <16 x half> %c +} + +define <16 x half> @fma_maskz_load_213_v16f16(<16 x half> %x, <16 x half> %y, <16 x half>* %zp, i16 %mask) { +; CHECK-LABEL: fma_maskz_load_213_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %esi, %k1 +; CHECK-NEXT: vfmadd213ph (%rdi), %ymm1, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %z = load <16 x half>, <16 x half>* %zp + %a = call <16 x half> @llvm.fma.v16f16(<16 x half> %y, <16 x half> %x, <16 x half> %z) + %b = bitcast i16 %mask to <16 x i1> + %c = select <16 x i1> %b, <16 x half> %a, <16 x half> zeroinitializer + ret <16 x half> %c +} + +define <16 x half> @fma_maskz_load_231_v16f16(<16 x half> %x, <16 x half> %y, <16 x half>* %zp, i16 %mask) { +; CHECK-LABEL: fma_maskz_load_231_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %esi, %k1 +; CHECK-NEXT: vfmadd231ph (%rdi), %ymm1, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %z = load <16 x half>, <16 x half>* %zp + %a = call <16 x half> @llvm.fma.v16f16(<16 x half> %y, <16 x half> %z, <16 x half> %x) + %b = bitcast i16 %mask to <16 x i1> + %c = select <16 x i1> %b, <16 x half> %a, <16 x half> zeroinitializer + ret <16 x half> %c +} + +define <16 x half> @fma_maskz_load_321_v16f16(<16 x half> %x, <16 x half> %y, <16 x half>* %zp, i16 %mask) { +; CHECK-LABEL: fma_maskz_load_321_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %esi, %k1 +; CHECK-NEXT: vfmadd231ph (%rdi), %ymm1, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %z = load <16 x half>, <16 x half>* %zp + %a = call <16 x half> @llvm.fma.v16f16(<16 x half> %z, <16 x half> %y, <16 x half> %x) + %b = bitcast i16 %mask to <16 x i1> + %c = select <16 x i1> %b, <16 x half> %a, <16 x half> zeroinitializer + ret <16 x half> %c +} + +define <16 x half> @fma_maskz_load_132_v16f16(<16 x half> %x, <16 x half> %y, <16 x half>* %zp, i16 %mask) { +; CHECK-LABEL: fma_maskz_load_132_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %esi, %k1 +; CHECK-NEXT: vfmadd132ph (%rdi), %ymm1, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %z = load <16 x half>, <16 x half>* %zp + %a = call <16 x half> @llvm.fma.v16f16(<16 x half> %x, <16 x half> %z, <16 x half> %y) + %b = bitcast i16 %mask to <16 x i1> + %c = select <16 x i1> %b, <16 x half> %a, <16 x half> zeroinitializer + ret <16 x half> %c +} + +define <16 x half> @fma_maskz_load_312_v16f16(<16 x half> %x, <16 x half> %y, <16 x half>* %zp, i16 %mask) { +; CHECK-LABEL: fma_maskz_load_312_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %esi, %k1 +; CHECK-NEXT: vfmadd132ph (%rdi), %ymm1, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %z = load <16 x half>, <16 x half>* %zp + %a = call <16 x half> @llvm.fma.v16f16(<16 x half> %z, <16 x half> %x, <16 x half> %y) + %b = bitcast i16 %mask to <16 x i1> + %c = select <16 x i1> %b, <16 x half> %a, <16 x half> zeroinitializer + ret <16 x half> %c +} + +define <32 x half> @fma_123_v32f16(<32 x half> %x, <32 x half> %y, <32 x half> %z) { +; CHECK-LABEL: fma_123_v32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmadd213ph %zmm2, %zmm1, %zmm0 +; CHECK-NEXT: retq + %a = call <32 x half> @llvm.fma.v32f16(<32 x half> %x, <32 x half> %y, <32 x half> %z) + ret <32 x half> %a +} + +define <32 x half> @fma_213_v32f16(<32 x half> %x, <32 x half> %y, <32 x half> %z) { +; CHECK-LABEL: fma_213_v32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmadd213ph %zmm2, %zmm1, %zmm0 +; CHECK-NEXT: retq + %a = call <32 x half> @llvm.fma.v32f16(<32 x half> %y, <32 x half> %x, <32 x half> %z) + ret <32 x half> %a +} + +define <32 x half> @fma_231_v32f16(<32 x half> %x, <32 x half> %y, <32 x half> %z) { +; CHECK-LABEL: fma_231_v32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmadd231ph %zmm1, %zmm2, %zmm0 +; CHECK-NEXT: retq + %a = call <32 x half> @llvm.fma.v32f16(<32 x half> %y, <32 x half> %z, <32 x half> %x) + ret <32 x half> %a +} + +define <32 x half> @fma_321_v32f16(<32 x half> %x, <32 x half> %y, <32 x half> %z) { +; CHECK-LABEL: fma_321_v32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmadd231ph %zmm1, %zmm2, %zmm0 +; CHECK-NEXT: retq + %a = call <32 x half> @llvm.fma.v32f16(<32 x half> %z, <32 x half> %y, <32 x half> %x) + ret <32 x half> %a +} + +define <32 x half> @fma_132_v32f16(<32 x half> %x, <32 x half> %y, <32 x half> %z) { +; CHECK-LABEL: fma_132_v32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmadd213ph %zmm1, %zmm2, %zmm0 +; CHECK-NEXT: retq + %a = call <32 x half> @llvm.fma.v32f16(<32 x half> %x, <32 x half> %z, <32 x half> %y) + ret <32 x half> %a +} + +define <32 x half> @fma_312_v32f16(<32 x half> %x, <32 x half> %y, <32 x half> %z) { +; CHECK-LABEL: fma_312_v32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmadd213ph %zmm1, %zmm2, %zmm0 +; CHECK-NEXT: retq + %a = call <32 x half> @llvm.fma.v32f16(<32 x half> %z, <32 x half> %x, <32 x half> %y) + ret <32 x half> %a +} + +define <32 x half> @fma_load_123_v32f16(<32 x half> %x, <32 x half> %y, <32 x half>* %zp) { +; CHECK-LABEL: fma_load_123_v32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmadd213ph (%rdi), %zmm1, %zmm0 +; CHECK-NEXT: retq + %z = load <32 x half>, <32 x half>* %zp + %a = call <32 x half> @llvm.fma.v32f16(<32 x half> %x, <32 x half> %y, <32 x half> %z) + ret <32 x half> %a +} + +define <32 x half> @fma_load_213_v32f16(<32 x half> %x, <32 x half> %y, <32 x half>* %zp) { +; CHECK-LABEL: fma_load_213_v32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmadd213ph (%rdi), %zmm1, %zmm0 +; CHECK-NEXT: retq + %z = load <32 x half>, <32 x half>* %zp + %a = call <32 x half> @llvm.fma.v32f16(<32 x half> %y, <32 x half> %x, <32 x half> %z) + ret <32 x half> %a +} + +define <32 x half> @fma_load_231_v32f16(<32 x half> %x, <32 x half> %y, <32 x half>* %zp) { +; CHECK-LABEL: fma_load_231_v32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmadd231ph (%rdi), %zmm1, %zmm0 +; CHECK-NEXT: retq + %z = load <32 x half>, <32 x half>* %zp + %a = call <32 x half> @llvm.fma.v32f16(<32 x half> %y, <32 x half> %z, <32 x half> %x) + ret <32 x half> %a +} + +define <32 x half> @fma_load_321_v32f16(<32 x half> %x, <32 x half> %y, <32 x half>* %zp) { +; CHECK-LABEL: fma_load_321_v32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmadd231ph (%rdi), %zmm1, %zmm0 +; CHECK-NEXT: retq + %z = load <32 x half>, <32 x half>* %zp + %a = call <32 x half> @llvm.fma.v32f16(<32 x half> %z, <32 x half> %y, <32 x half> %x) + ret <32 x half> %a +} + +define <32 x half> @fma_load_132_v32f16(<32 x half> %x, <32 x half> %y, <32 x half>* %zp) { +; CHECK-LABEL: fma_load_132_v32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmadd132ph (%rdi), %zmm1, %zmm0 +; CHECK-NEXT: retq + %z = load <32 x half>, <32 x half>* %zp + %a = call <32 x half> @llvm.fma.v32f16(<32 x half> %x, <32 x half> %z, <32 x half> %y) + ret <32 x half> %a +} + +define <32 x half> @fma_load_312_v32f16(<32 x half> %x, <32 x half> %y, <32 x half>* %zp) { +; CHECK-LABEL: fma_load_312_v32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmadd132ph (%rdi), %zmm1, %zmm0 +; CHECK-NEXT: retq + %z = load <32 x half>, <32 x half>* %zp + %a = call <32 x half> @llvm.fma.v32f16(<32 x half> %z, <32 x half> %x, <32 x half> %y) + ret <32 x half> %a +} + +define <32 x half> @fma_mask_123_v32f16(<32 x half> %x, <32 x half> %y, <32 x half> %z, i32 %mask) { +; CHECK-LABEL: fma_mask_123_v32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %edi, %k1 +; CHECK-NEXT: vfmadd132ph %zmm1, %zmm2, %zmm0 {%k1} +; CHECK-NEXT: retq + %a = call <32 x half> @llvm.fma.v32f16(<32 x half> %x, <32 x half> %y, <32 x half> %z) + %b = bitcast i32 %mask to <32 x i1> + %c = select <32 x i1> %b, <32 x half> %a, <32 x half> %x + ret <32 x half> %c +} + +define <32 x half> @fma_mask_213_v32f16(<32 x half> %x, <32 x half> %y, <32 x half> %z, i32 %mask) { +; CHECK-LABEL: fma_mask_213_v32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %edi, %k1 +; CHECK-NEXT: vfmadd213ph %zmm2, %zmm1, %zmm0 {%k1} +; CHECK-NEXT: retq + %a = call <32 x half> @llvm.fma.v32f16(<32 x half> %y, <32 x half> %x, <32 x half> %z) + %b = bitcast i32 %mask to <32 x i1> + %c = select <32 x i1> %b, <32 x half> %a, <32 x half> %x + ret <32 x half> %c +} + +define <32 x half> @fma_mask_231_v32f16(<32 x half> %x, <32 x half> %y, <32 x half> %z, i32 %mask) { +; CHECK-LABEL: fma_mask_231_v32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %edi, %k1 +; CHECK-NEXT: vfmadd231ph %zmm2, %zmm1, %zmm0 {%k1} +; CHECK-NEXT: retq + %a = call <32 x half> @llvm.fma.v32f16(<32 x half> %y, <32 x half> %z, <32 x half> %x) + %b = bitcast i32 %mask to <32 x i1> + %c = select <32 x i1> %b, <32 x half> %a, <32 x half> %x + ret <32 x half> %c +} + +define <32 x half> @fma_mask_321_v32f16(<32 x half> %x, <32 x half> %y, <32 x half> %z, i32 %mask) { +; CHECK-LABEL: fma_mask_321_v32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %edi, %k1 +; CHECK-NEXT: vfmadd231ph %zmm1, %zmm2, %zmm0 {%k1} +; CHECK-NEXT: retq + %a = call <32 x half> @llvm.fma.v32f16(<32 x half> %z, <32 x half> %y, <32 x half> %x) + %b = bitcast i32 %mask to <32 x i1> + %c = select <32 x i1> %b, <32 x half> %a, <32 x half> %x + ret <32 x half> %c +} + +define <32 x half> @fma_mask_132_v32f16(<32 x half> %x, <32 x half> %y, <32 x half> %z, i32 %mask) { +; CHECK-LABEL: fma_mask_132_v32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %edi, %k1 +; CHECK-NEXT: vfmadd132ph %zmm2, %zmm1, %zmm0 {%k1} +; CHECK-NEXT: retq + %a = call <32 x half> @llvm.fma.v32f16(<32 x half> %x, <32 x half> %z, <32 x half> %y) + %b = bitcast i32 %mask to <32 x i1> + %c = select <32 x i1> %b, <32 x half> %a, <32 x half> %x + ret <32 x half> %c +} + +define <32 x half> @fma_mask_312_v32f16(<32 x half> %x, <32 x half> %y, <32 x half> %z, i32 %mask) { +; CHECK-LABEL: fma_mask_312_v32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %edi, %k1 +; CHECK-NEXT: vfmadd213ph %zmm1, %zmm2, %zmm0 {%k1} +; CHECK-NEXT: retq + %a = call <32 x half> @llvm.fma.v32f16(<32 x half> %z, <32 x half> %x, <32 x half> %y) + %b = bitcast i32 %mask to <32 x i1> + %c = select <32 x i1> %b, <32 x half> %a, <32 x half> %x + ret <32 x half> %c +} + +define <32 x half> @fma_maskz_123_v32f16(<32 x half> %x, <32 x half> %y, <32 x half> %z, i32 %mask) { +; CHECK-LABEL: fma_maskz_123_v32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %edi, %k1 +; CHECK-NEXT: vfmadd213ph %zmm2, %zmm1, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %a = call <32 x half> @llvm.fma.v32f16(<32 x half> %x, <32 x half> %y, <32 x half> %z) + %b = bitcast i32 %mask to <32 x i1> + %c = select <32 x i1> %b, <32 x half> %a, <32 x half> zeroinitializer + ret <32 x half> %c +} + +define <32 x half> @fma_maskz_213_v32f16(<32 x half> %x, <32 x half> %y, <32 x half> %z, i32 %mask) { +; CHECK-LABEL: fma_maskz_213_v32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %edi, %k1 +; CHECK-NEXT: vfmadd213ph %zmm2, %zmm1, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %a = call <32 x half> @llvm.fma.v32f16(<32 x half> %y, <32 x half> %x, <32 x half> %z) + %b = bitcast i32 %mask to <32 x i1> + %c = select <32 x i1> %b, <32 x half> %a, <32 x half> zeroinitializer + ret <32 x half> %c +} + +define <32 x half> @fma_maskz_231_v32f16(<32 x half> %x, <32 x half> %y, <32 x half> %z, i32 %mask) { +; CHECK-LABEL: fma_maskz_231_v32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %edi, %k1 +; CHECK-NEXT: vfmadd231ph %zmm1, %zmm2, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %a = call <32 x half> @llvm.fma.v32f16(<32 x half> %y, <32 x half> %z, <32 x half> %x) + %b = bitcast i32 %mask to <32 x i1> + %c = select <32 x i1> %b, <32 x half> %a, <32 x half> zeroinitializer + ret <32 x half> %c +} + +define <32 x half> @fma_maskz_321_v32f16(<32 x half> %x, <32 x half> %y, <32 x half> %z, i32 %mask) { +; CHECK-LABEL: fma_maskz_321_v32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %edi, %k1 +; CHECK-NEXT: vfmadd231ph %zmm1, %zmm2, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %a = call <32 x half> @llvm.fma.v32f16(<32 x half> %z, <32 x half> %y, <32 x half> %x) + %b = bitcast i32 %mask to <32 x i1> + %c = select <32 x i1> %b, <32 x half> %a, <32 x half> zeroinitializer + ret <32 x half> %c +} + +define <32 x half> @fma_maskz_132_v32f16(<32 x half> %x, <32 x half> %y, <32 x half> %z, i32 %mask) { +; CHECK-LABEL: fma_maskz_132_v32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %edi, %k1 +; CHECK-NEXT: vfmadd213ph %zmm1, %zmm2, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %a = call <32 x half> @llvm.fma.v32f16(<32 x half> %x, <32 x half> %z, <32 x half> %y) + %b = bitcast i32 %mask to <32 x i1> + %c = select <32 x i1> %b, <32 x half> %a, <32 x half> zeroinitializer + ret <32 x half> %c +} + +define <32 x half> @fma_maskz_312_v32f16(<32 x half> %x, <32 x half> %y, <32 x half> %z, i32 %mask) { +; CHECK-LABEL: fma_maskz_312_v32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %edi, %k1 +; CHECK-NEXT: vfmadd213ph %zmm1, %zmm2, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %a = call <32 x half> @llvm.fma.v32f16(<32 x half> %z, <32 x half> %x, <32 x half> %y) + %b = bitcast i32 %mask to <32 x i1> + %c = select <32 x i1> %b, <32 x half> %a, <32 x half> zeroinitializer + ret <32 x half> %c +} + +define <32 x half> @fma_mask_load_123_v32f16(<32 x half> %x, <32 x half> %y, <32 x half>* %zp, i32 %mask) { +; CHECK-LABEL: fma_mask_load_123_v32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %esi, %k1 +; CHECK-NEXT: vfmadd213ph (%rdi), %zmm1, %zmm0 {%k1} +; CHECK-NEXT: retq + %z = load <32 x half>, <32 x half>* %zp + %a = call <32 x half> @llvm.fma.v32f16(<32 x half> %x, <32 x half> %y, <32 x half> %z) + %b = bitcast i32 %mask to <32 x i1> + %c = select <32 x i1> %b, <32 x half> %a, <32 x half> %x + ret <32 x half> %c +} + +define <32 x half> @fma_mask_load_213_v32f16(<32 x half> %x, <32 x half> %y, <32 x half>* %zp, i32 %mask) { +; CHECK-LABEL: fma_mask_load_213_v32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %esi, %k1 +; CHECK-NEXT: vfmadd213ph (%rdi), %zmm1, %zmm0 {%k1} +; CHECK-NEXT: retq + %z = load <32 x half>, <32 x half>* %zp + %a = call <32 x half> @llvm.fma.v32f16(<32 x half> %y, <32 x half> %x, <32 x half> %z) + %b = bitcast i32 %mask to <32 x i1> + %c = select <32 x i1> %b, <32 x half> %a, <32 x half> %x + ret <32 x half> %c +} + +define <32 x half> @fma_mask_load_231_v32f16(<32 x half> %x, <32 x half> %y, <32 x half>* %zp, i32 %mask) { +; CHECK-LABEL: fma_mask_load_231_v32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %esi, %k1 +; CHECK-NEXT: vfmadd231ph (%rdi), %zmm1, %zmm0 {%k1} +; CHECK-NEXT: retq + %z = load <32 x half>, <32 x half>* %zp + %a = call <32 x half> @llvm.fma.v32f16(<32 x half> %y, <32 x half> %z, <32 x half> %x) + %b = bitcast i32 %mask to <32 x i1> + %c = select <32 x i1> %b, <32 x half> %a, <32 x half> %x + ret <32 x half> %c +} + +define <32 x half> @fma_mask_load_321_v32f16(<32 x half> %x, <32 x half> %y, <32 x half>* %zp, i32 %mask) { +; CHECK-LABEL: fma_mask_load_321_v32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %esi, %k1 +; CHECK-NEXT: vfmadd231ph (%rdi), %zmm1, %zmm0 {%k1} +; CHECK-NEXT: retq + %z = load <32 x half>, <32 x half>* %zp + %a = call <32 x half> @llvm.fma.v32f16(<32 x half> %z, <32 x half> %y, <32 x half> %x) + %b = bitcast i32 %mask to <32 x i1> + %c = select <32 x i1> %b, <32 x half> %a, <32 x half> %x + ret <32 x half> %c +} + +define <32 x half> @fma_mask_load_132_v32f16(<32 x half> %x, <32 x half> %y, <32 x half>* %zp, i32 %mask) { +; CHECK-LABEL: fma_mask_load_132_v32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %esi, %k1 +; CHECK-NEXT: vfmadd132ph (%rdi), %zmm1, %zmm0 {%k1} +; CHECK-NEXT: retq + %z = load <32 x half>, <32 x half>* %zp + %a = call <32 x half> @llvm.fma.v32f16(<32 x half> %x, <32 x half> %z, <32 x half> %y) + %b = bitcast i32 %mask to <32 x i1> + %c = select <32 x i1> %b, <32 x half> %a, <32 x half> %x + ret <32 x half> %c +} + +define <32 x half> @fma_mask_load_312_v32f16(<32 x half> %x, <32 x half> %y, <32 x half>* %zp, i32 %mask) { +; CHECK-LABEL: fma_mask_load_312_v32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %esi, %k1 +; CHECK-NEXT: vfmadd132ph (%rdi), %zmm1, %zmm0 {%k1} +; CHECK-NEXT: retq + %z = load <32 x half>, <32 x half>* %zp + %a = call <32 x half> @llvm.fma.v32f16(<32 x half> %z, <32 x half> %x, <32 x half> %y) + %b = bitcast i32 %mask to <32 x i1> + %c = select <32 x i1> %b, <32 x half> %a, <32 x half> %x + ret <32 x half> %c +} + +define <32 x half> @fma_maskz_load_123_v32f16(<32 x half> %x, <32 x half> %y, <32 x half>* %zp, i32 %mask) { +; CHECK-LABEL: fma_maskz_load_123_v32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %esi, %k1 +; CHECK-NEXT: vfmadd213ph (%rdi), %zmm1, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %z = load <32 x half>, <32 x half>* %zp + %a = call <32 x half> @llvm.fma.v32f16(<32 x half> %x, <32 x half> %y, <32 x half> %z) + %b = bitcast i32 %mask to <32 x i1> + %c = select <32 x i1> %b, <32 x half> %a, <32 x half> zeroinitializer + ret <32 x half> %c +} + +define <32 x half> @fma_maskz_load_213_v32f16(<32 x half> %x, <32 x half> %y, <32 x half>* %zp, i32 %mask) { +; CHECK-LABEL: fma_maskz_load_213_v32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %esi, %k1 +; CHECK-NEXT: vfmadd213ph (%rdi), %zmm1, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %z = load <32 x half>, <32 x half>* %zp + %a = call <32 x half> @llvm.fma.v32f16(<32 x half> %y, <32 x half> %x, <32 x half> %z) + %b = bitcast i32 %mask to <32 x i1> + %c = select <32 x i1> %b, <32 x half> %a, <32 x half> zeroinitializer + ret <32 x half> %c +} + +define <32 x half> @fma_maskz_load_231_v32f16(<32 x half> %x, <32 x half> %y, <32 x half>* %zp, i32 %mask) { +; CHECK-LABEL: fma_maskz_load_231_v32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %esi, %k1 +; CHECK-NEXT: vfmadd231ph (%rdi), %zmm1, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %z = load <32 x half>, <32 x half>* %zp + %a = call <32 x half> @llvm.fma.v32f16(<32 x half> %y, <32 x half> %z, <32 x half> %x) + %b = bitcast i32 %mask to <32 x i1> + %c = select <32 x i1> %b, <32 x half> %a, <32 x half> zeroinitializer + ret <32 x half> %c +} + +define <32 x half> @fma_maskz_load_321_v32f16(<32 x half> %x, <32 x half> %y, <32 x half>* %zp, i32 %mask) { +; CHECK-LABEL: fma_maskz_load_321_v32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %esi, %k1 +; CHECK-NEXT: vfmadd231ph (%rdi), %zmm1, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %z = load <32 x half>, <32 x half>* %zp + %a = call <32 x half> @llvm.fma.v32f16(<32 x half> %z, <32 x half> %y, <32 x half> %x) + %b = bitcast i32 %mask to <32 x i1> + %c = select <32 x i1> %b, <32 x half> %a, <32 x half> zeroinitializer + ret <32 x half> %c +} + +define <32 x half> @fma_maskz_load_132_v32f16(<32 x half> %x, <32 x half> %y, <32 x half>* %zp, i32 %mask) { +; CHECK-LABEL: fma_maskz_load_132_v32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %esi, %k1 +; CHECK-NEXT: vfmadd132ph (%rdi), %zmm1, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %z = load <32 x half>, <32 x half>* %zp + %a = call <32 x half> @llvm.fma.v32f16(<32 x half> %x, <32 x half> %z, <32 x half> %y) + %b = bitcast i32 %mask to <32 x i1> + %c = select <32 x i1> %b, <32 x half> %a, <32 x half> zeroinitializer + ret <32 x half> %c +} + +define <32 x half> @fma_maskz_load_312_v32f16(<32 x half> %x, <32 x half> %y, <32 x half>* %zp, i32 %mask) { +; CHECK-LABEL: fma_maskz_load_312_v32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: kmovd %esi, %k1 +; CHECK-NEXT: vfmadd132ph (%rdi), %zmm1, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %z = load <32 x half>, <32 x half>* %zp + %a = call <32 x half> @llvm.fma.v32f16(<32 x half> %z, <32 x half> %x, <32 x half> %y) + %b = bitcast i32 %mask to <32 x i1> + %c = select <32 x i1> %b, <32 x half> %a, <32 x half> zeroinitializer + ret <32 x half> %c +} diff --git a/llvm/test/CodeGen/X86/avx512fp16-fma-intrinsics.ll b/llvm/test/CodeGen/X86/avx512fp16-fma-intrinsics.ll new file mode 100644 index 0000000000000..0729a9ee40857 --- /dev/null +++ b/llvm/test/CodeGen/X86/avx512fp16-fma-intrinsics.ll @@ -0,0 +1,585 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512fp16 --show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512fp16 --show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64 + + +declare <32 x half> @llvm.x86.avx512fp16.vfmadd.ph.512(<32 x half>, <32 x half>, <32 x half>, i32) + +define <32 x half> @test_x86_vfnmadd_ph_z(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2) { +; CHECK-LABEL: test_x86_vfnmadd_ph_z: +; CHECK: # %bb.0: +; CHECK-NEXT: vfnmadd213ph %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf6,0x75,0x48,0xac,0xc2] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] + %1 = fsub <32 x half> , %a1 + %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %a0, <32 x half> %1, <32 x half> %a2) + ret <32 x half> %2 +} + +define <32 x half> @test_mask_vfnmadd_ph(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32 %mask) { +; X86-LABEL: test_mask_vfnmadd_ph: +; X86: # %bb.0: +; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vfnmadd132ph %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x49,0x9c,0xc1] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_mask_vfnmadd_ph: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vfnmadd132ph %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x49,0x9c,0xc1] +; X64-NEXT: retq # encoding: [0xc3] + %1 = fsub <32 x half> , %a1 + %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %a0, <32 x half> %1, <32 x half> %a2) + %3 = bitcast i32 %mask to <32 x i1> + %4 = select <32 x i1> %3, <32 x half> %2, <32 x half> %a0 + ret <32 x half> %4 +} + +define <32 x half> @test_x86_vfnmsubph_z(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2) { +; CHECK-LABEL: test_x86_vfnmsubph_z: +; CHECK: # %bb.0: +; CHECK-NEXT: vfnmsub213ph %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf6,0x75,0x48,0xae,0xc2] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] + %1 = fsub <32 x half> , %a1 + %2 = fsub <32 x half> , %a2 + %3 = call <32 x half> @llvm.fma.v32f16(<32 x half> %a0, <32 x half> %1, <32 x half> %2) + ret <32 x half> %3 +} + +define <32 x half> @test_mask_vfnmsub_ph(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32 %mask) { +; X86-LABEL: test_mask_vfnmsub_ph: +; X86: # %bb.0: +; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vfnmsub132ph %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x49,0x9e,0xc1] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_mask_vfnmsub_ph: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vfnmsub132ph %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x49,0x9e,0xc1] +; X64-NEXT: retq # encoding: [0xc3] + %1 = fsub <32 x half> , %a1 + %2 = fsub <32 x half> , %a2 + %3 = call <32 x half> @llvm.fma.v32f16(<32 x half> %a0, <32 x half> %1, <32 x half> %2) + %4 = bitcast i32 %mask to <32 x i1> + %5 = select <32 x i1> %4, <32 x half> %3, <32 x half> %a0 + ret <32 x half> %5 +} + +define <32 x half> @test_x86_vfmaddsubph_z(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2) { +; CHECK-LABEL: test_x86_vfmaddsubph_z: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmaddsub213ph %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf6,0x75,0x48,0xa6,0xc2] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] + %res = call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32 4) #2 + ret <32 x half> %res +} + +define <32 x half> @test_mask_fmaddsub_ph(<32 x half> %a, <32 x half> %b, <32 x half> %c, i32 %mask) { +; X86-LABEL: test_mask_fmaddsub_ph: +; X86: # %bb.0: +; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vfmaddsub132ph %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x49,0x96,0xc1] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_mask_fmaddsub_ph: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vfmaddsub132ph %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x49,0x96,0xc1] +; X64-NEXT: retq # encoding: [0xc3] + %res = call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %a, <32 x half> %b, <32 x half> %c, i32 4) + %bc = bitcast i32 %mask to <32 x i1> + %sel = select <32 x i1> %bc, <32 x half> %res, <32 x half> %a + ret <32 x half> %sel +} + +declare <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half>, <32 x half>, <32 x half>, i32) nounwind readnone + +define <32 x half>@test_int_x86_avx512_mask_vfmaddsub_ph_512(<32 x half> %x0, <32 x half> %x1, <32 x half> %x2, i32 %x3){ +; X86-LABEL: test_int_x86_avx512_mask_vfmaddsub_ph_512: +; X86: # %bb.0: +; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vfmaddsub132ph %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x49,0x96,0xc1] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx512_mask_vfmaddsub_ph_512: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vfmaddsub132ph %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x49,0x96,0xc1] +; X64-NEXT: retq # encoding: [0xc3] + %res = call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %x0, <32 x half> %x1, <32 x half> %x2, i32 4) + %bc = bitcast i32 %x3 to <32 x i1> + %sel = select <32 x i1> %bc, <32 x half> %res, <32 x half> %x0 + ret <32 x half> %sel +} + +define <32 x half>@test_int_x86_avx512_mask3_vfmaddsub_ph_512(<32 x half> %x0, <32 x half> %x1, <32 x half> %x2, i32 %x3){ +; X86-LABEL: test_int_x86_avx512_mask3_vfmaddsub_ph_512: +; X86: # %bb.0: +; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vfmaddsub231ph %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf6,0x7d,0x49,0xb6,0xd1] +; X86-NEXT: vmovaps %zmm2, %zmm0 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx512_mask3_vfmaddsub_ph_512: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vfmaddsub231ph %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf6,0x7d,0x49,0xb6,0xd1] +; X64-NEXT: vmovaps %zmm2, %zmm0 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] +; X64-NEXT: retq # encoding: [0xc3] + %res = call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %x0, <32 x half> %x1, <32 x half> %x2, i32 4) + %bc = bitcast i32 %x3 to <32 x i1> + %sel = select <32 x i1> %bc, <32 x half> %res, <32 x half> %x2 + ret <32 x half> %sel +} + +define <32 x half>@test_int_x86_avx512_maskz_vfmaddsub_ph_512(<32 x half> %x0, <32 x half> %x1, <32 x half> %x2, i32 %x3){ +; X86-LABEL: test_int_x86_avx512_maskz_vfmaddsub_ph_512: +; X86: # %bb.0: +; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vfmaddsub213ph %zmm2, %zmm1, %zmm0 {%k1} {z} # encoding: [0x62,0xf6,0x75,0xc9,0xa6,0xc2] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx512_maskz_vfmaddsub_ph_512: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vfmaddsub213ph %zmm2, %zmm1, %zmm0 {%k1} {z} # encoding: [0x62,0xf6,0x75,0xc9,0xa6,0xc2] +; X64-NEXT: retq # encoding: [0xc3] + %res = call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %x0, <32 x half> %x1, <32 x half> %x2, i32 4) + %bc = bitcast i32 %x3 to <32 x i1> + %sel = select <32 x i1> %bc, <32 x half> %res, <32 x half> zeroinitializer + ret <32 x half> %sel +} + +define <32 x half>@test_int_x86_avx512_mask3_vfmsubadd_ph_512(<32 x half> %x0, <32 x half> %x1, <32 x half> %x2, i32 %x3){ +; X86-LABEL: test_int_x86_avx512_mask3_vfmsubadd_ph_512: +; X86: # %bb.0: +; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vfmsubadd231ph %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf6,0x7d,0x49,0xb7,0xd1] +; X86-NEXT: vmovaps %zmm2, %zmm0 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx512_mask3_vfmsubadd_ph_512: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vfmsubadd231ph %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf6,0x7d,0x49,0xb7,0xd1] +; X64-NEXT: vmovaps %zmm2, %zmm0 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] +; X64-NEXT: retq # encoding: [0xc3] + %neg = fneg <32 x half> %x2 + %res = call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %x0, <32 x half> %x1, <32 x half> %neg, i32 4) + %bc = bitcast i32 %x3 to <32 x i1> + %sel = select <32 x i1> %bc, <32 x half> %res, <32 x half> %x2 + ret <32 x half> %sel +} + +define <32 x half> @test_mask_round_vfmadd512_ph_rrb_rne(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32 %mask) { +; X86-LABEL: test_mask_round_vfmadd512_ph_rrb_rne: +; X86: # %bb.0: +; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vfmadd132ph {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x19,0x98,0xc1] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_mask_round_vfmadd512_ph_rrb_rne: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vfmadd132ph {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x19,0x98,0xc1] +; X64-NEXT: retq # encoding: [0xc3] + %res = call <32 x half> @llvm.x86.avx512fp16.vfmadd.ph.512(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32 8) nounwind + %bc = bitcast i32 %mask to <32 x i1> + %sel = select <32 x i1> %bc, <32 x half> %res, <32 x half> %a0 + ret <32 x half> %sel +} + +define <32 x half> @test_mask_round_vfmadd512_ph_rrb_rtn(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32 %mask) { +; X86-LABEL: test_mask_round_vfmadd512_ph_rrb_rtn: +; X86: # %bb.0: +; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vfmadd132ph {rd-sae}, %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x39,0x98,0xc1] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_mask_round_vfmadd512_ph_rrb_rtn: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vfmadd132ph {rd-sae}, %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x39,0x98,0xc1] +; X64-NEXT: retq # encoding: [0xc3] + %res = call <32 x half> @llvm.x86.avx512fp16.vfmadd.ph.512(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32 9) nounwind + %bc = bitcast i32 %mask to <32 x i1> + %sel = select <32 x i1> %bc, <32 x half> %res, <32 x half> %a0 + ret <32 x half> %sel +} + +define <32 x half> @test_mask_round_vfmadd512_ph_rrb_rtp(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32 %mask) { +; X86-LABEL: test_mask_round_vfmadd512_ph_rrb_rtp: +; X86: # %bb.0: +; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vfmadd132ph {ru-sae}, %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x59,0x98,0xc1] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_mask_round_vfmadd512_ph_rrb_rtp: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vfmadd132ph {ru-sae}, %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x59,0x98,0xc1] +; X64-NEXT: retq # encoding: [0xc3] + %res = call <32 x half> @llvm.x86.avx512fp16.vfmadd.ph.512(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32 10) nounwind + %bc = bitcast i32 %mask to <32 x i1> + %sel = select <32 x i1> %bc, <32 x half> %res, <32 x half> %a0 + ret <32 x half> %sel +} + +define <32 x half> @test_mask_round_vfmadd512_ph_rrb_rtz(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32 %mask) { +; X86-LABEL: test_mask_round_vfmadd512_ph_rrb_rtz: +; X86: # %bb.0: +; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vfmadd132ph {rz-sae}, %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x79,0x98,0xc1] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_mask_round_vfmadd512_ph_rrb_rtz: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vfmadd132ph {rz-sae}, %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x79,0x98,0xc1] +; X64-NEXT: retq # encoding: [0xc3] + %res = call <32 x half> @llvm.x86.avx512fp16.vfmadd.ph.512(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32 11) nounwind + %bc = bitcast i32 %mask to <32 x i1> + %sel = select <32 x i1> %bc, <32 x half> %res, <32 x half> %a0 + ret <32 x half> %sel +} + +define <32 x half> @test_mask_round_vfmadd512_ph_rrb_current(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32 %mask) { +; X86-LABEL: test_mask_round_vfmadd512_ph_rrb_current: +; X86: # %bb.0: +; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vfmadd132ph %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x49,0x98,0xc1] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_mask_round_vfmadd512_ph_rrb_current: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vfmadd132ph %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x49,0x98,0xc1] +; X64-NEXT: retq # encoding: [0xc3] + %res = call <32 x half> @llvm.x86.avx512fp16.vfmadd.ph.512(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32 4) nounwind + %bc = bitcast i32 %mask to <32 x i1> + %sel = select <32 x i1> %bc, <32 x half> %res, <32 x half> %a0 + ret <32 x half> %sel +} + +define <32 x half> @test_mask_round_vfmadd512_ph_rrbz_rne(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2) { +; CHECK-LABEL: test_mask_round_vfmadd512_ph_rrbz_rne: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmadd213ph {rn-sae}, %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf6,0x75,0x18,0xa8,0xc2] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] + %res = call <32 x half> @llvm.x86.avx512fp16.vfmadd.ph.512(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32 8) nounwind + ret <32 x half> %res +} + +define <32 x half> @test_mask_round_vfmadd512_ph_rrbz_rtn(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2) { +; CHECK-LABEL: test_mask_round_vfmadd512_ph_rrbz_rtn: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmadd213ph {rd-sae}, %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf6,0x75,0x38,0xa8,0xc2] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] + %res = call <32 x half> @llvm.x86.avx512fp16.vfmadd.ph.512(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32 9) nounwind + ret <32 x half> %res +} + +define <32 x half> @test_mask_round_vfmadd512_ph_rrbz_rtp(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2) { +; CHECK-LABEL: test_mask_round_vfmadd512_ph_rrbz_rtp: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmadd213ph {ru-sae}, %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf6,0x75,0x58,0xa8,0xc2] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] + %res = call <32 x half> @llvm.x86.avx512fp16.vfmadd.ph.512(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32 10) nounwind + ret <32 x half> %res +} + +define <32 x half> @test_mask_round_vfmadd512_ph_rrbz_rtz(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2) { +; CHECK-LABEL: test_mask_round_vfmadd512_ph_rrbz_rtz: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmadd213ph {rz-sae}, %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf6,0x75,0x78,0xa8,0xc2] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] + %res = call <32 x half> @llvm.x86.avx512fp16.vfmadd.ph.512(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32 11) nounwind + ret <32 x half> %res +} + +define <32 x half> @test_mask_round_vfmadd512_ph_rrbz_current(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2) { +; CHECK-LABEL: test_mask_round_vfmadd512_ph_rrbz_current: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmadd213ph %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf6,0x75,0x48,0xa8,0xc2] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] + %res = call <32 x half> @llvm.x86.avx512fp16.vfmadd.ph.512(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32 4) nounwind + ret <32 x half> %res +} + +define <32 x half>@test_int_x86_avx512_mask3_vfmsub_ph_512(<32 x half> %x0, <32 x half> %x1, <32 x half> %x2, i32 %x3){ +; X86-LABEL: test_int_x86_avx512_mask3_vfmsub_ph_512: +; X86: # %bb.0: +; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vfmsub231ph %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf6,0x7d,0x49,0xba,0xd1] +; X86-NEXT: vmovaps %zmm2, %zmm0 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx512_mask3_vfmsub_ph_512: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vfmsub231ph %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf6,0x7d,0x49,0xba,0xd1] +; X64-NEXT: vmovaps %zmm2, %zmm0 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] +; X64-NEXT: retq # encoding: [0xc3] + %1 = fsub <32 x half> , %x2 + %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %x0, <32 x half> %x1, <32 x half> %1) + %3 = bitcast i32 %x3 to <32 x i1> + %4 = select <32 x i1> %3, <32 x half> %2, <32 x half> %x2 + ret <32 x half> %4 +} + +define <32 x half>@test_int_x86_avx512_mask_vfmadd_ph_512(<32 x half> %x0, <32 x half> %x1, <32 x half> %x2, i32 %x3){ +; X86-LABEL: test_int_x86_avx512_mask_vfmadd_ph_512: +; X86: # %bb.0: +; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vfmadd132ph %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x49,0x98,0xc1] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx512_mask_vfmadd_ph_512: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vfmadd132ph %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x49,0x98,0xc1] +; X64-NEXT: retq # encoding: [0xc3] + %res = call <32 x half> @llvm.x86.avx512fp16.vfmadd.ph.512(<32 x half> %x0, <32 x half> %x1, <32 x half> %x2, i32 4) + %bc = bitcast i32 %x3 to <32 x i1> + %sel = select <32 x i1> %bc, <32 x half> %res, <32 x half> %x0 + ret <32 x half> %sel +} + +define <32 x half>@test_int_x86_avx512_mask3_vfmadd_ph_512(<32 x half> %x0, <32 x half> %x1, <32 x half> %x2, i32 %x3){ +; X86-LABEL: test_int_x86_avx512_mask3_vfmadd_ph_512: +; X86: # %bb.0: +; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vfmadd231ph %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf6,0x7d,0x49,0xb8,0xd1] +; X86-NEXT: vmovaps %zmm2, %zmm0 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx512_mask3_vfmadd_ph_512: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vfmadd231ph %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf6,0x7d,0x49,0xb8,0xd1] +; X64-NEXT: vmovaps %zmm2, %zmm0 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] +; X64-NEXT: retq # encoding: [0xc3] + %1 = call <32 x half> @llvm.fma.v32f16(<32 x half> %x0, <32 x half> %x1, <32 x half> %x2) + %2 = bitcast i32 %x3 to <32 x i1> + %3 = select <32 x i1> %2, <32 x half> %1, <32 x half> %x2 + ret <32 x half> %3 +} + +define <32 x half> @test_int_x86_avx512_maskz_vfmadd_ph_512(<32 x half> %x0, <32 x half> %x1, <32 x half> %x2, i32 %x3) { +; X86-LABEL: test_int_x86_avx512_maskz_vfmadd_ph_512: +; X86: # %bb.0: +; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vfmadd213ph %zmm2, %zmm1, %zmm0 {%k1} {z} # encoding: [0x62,0xf6,0x75,0xc9,0xa8,0xc2] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx512_maskz_vfmadd_ph_512: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vfmadd213ph %zmm2, %zmm1, %zmm0 {%k1} {z} # encoding: [0x62,0xf6,0x75,0xc9,0xa8,0xc2] +; X64-NEXT: retq # encoding: [0xc3] + %1 = call <32 x half> @llvm.fma.v32f16(<32 x half> %x0, <32 x half> %x1, <32 x half> %x2) + %2 = bitcast i32 %x3 to <32 x i1> + %3 = select <32 x i1> %2, <32 x half> %1, <32 x half> zeroinitializer + ret <32 x half> %3 +} + +define <32 x half>@test_int_x86_avx512_mask_vfnmsub_ph_512(<32 x half> %x0, <32 x half> %x1, <32 x half> %x2, i32 %x3){ +; X86-LABEL: test_int_x86_avx512_mask_vfnmsub_ph_512: +; X86: # %bb.0: +; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vfnmsub132ph %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x49,0x9e,0xc1] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx512_mask_vfnmsub_ph_512: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vfnmsub132ph %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x49,0x9e,0xc1] +; X64-NEXT: retq # encoding: [0xc3] + %1 = fsub <32 x half> , %x1 + %2 = fsub <32 x half> , %x2 + %3 = call <32 x half> @llvm.fma.v32f16(<32 x half> %x0, <32 x half> %1, <32 x half> %2) + %4 = bitcast i32 %x3 to <32 x i1> + %5 = select <32 x i1> %4, <32 x half> %3, <32 x half> %x0 + ret <32 x half> %5 +} + +define <32 x half>@test_int_x86_avx512_mask3_vfnmsub_ph_512(<32 x half> %x0, <32 x half> %x1, <32 x half> %x2, i32 %x3){ +; X86-LABEL: test_int_x86_avx512_mask3_vfnmsub_ph_512: +; X86: # %bb.0: +; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vfnmsub231ph %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf6,0x7d,0x49,0xbe,0xd1] +; X86-NEXT: vmovaps %zmm2, %zmm0 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx512_mask3_vfnmsub_ph_512: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vfnmsub231ph %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf6,0x7d,0x49,0xbe,0xd1] +; X64-NEXT: vmovaps %zmm2, %zmm0 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] +; X64-NEXT: retq # encoding: [0xc3] + %1 = fsub <32 x half> , %x0 + %2 = fsub <32 x half> , %x2 + %3 = call <32 x half> @llvm.fma.v32f16(<32 x half> %1, <32 x half> %x1, <32 x half> %2) + %4 = bitcast i32 %x3 to <32 x i1> + %5 = select <32 x i1> %4, <32 x half> %3, <32 x half> %x2 + ret <32 x half> %5 +} + +define <32 x half>@test_int_x86_avx512_mask_vfnmadd_ph_512(<32 x half> %x0, <32 x half> %x1, <32 x half> %x2, i32 %x3){ +; X86-LABEL: test_int_x86_avx512_mask_vfnmadd_ph_512: +; X86: # %bb.0: +; X86-NEXT: kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vfnmadd132ph %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x49,0x9c,0xc1] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx512_mask_vfnmadd_ph_512: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vfnmadd132ph %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x49,0x9c,0xc1] +; X64-NEXT: retq # encoding: [0xc3] + %1 = fsub <32 x half> , %x1 + %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %x0, <32 x half> %1, <32 x half> %x2) + %3 = bitcast i32 %x3 to <32 x i1> + %4 = select <32 x i1> %3, <32 x half> %2, <32 x half> %x0 + ret <32 x half> %4 +} + +define <32 x half> @test_x86_fma_vfnmadd_ph_512(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2) #0 { +; CHECK-LABEL: test_x86_fma_vfnmadd_ph_512: +; CHECK: # %bb.0: +; CHECK-NEXT: vfnmadd213ph %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf6,0x75,0x48,0xac,0xc2] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] + %1 = fsub <32 x half> , %a0 + %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %1, <32 x half> %a1, <32 x half> %a2) + ret <32 x half> %2 +} + +define <32 x half> @test_x86_fma_vfnmsub_ph_512(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2) #0 { +; CHECK-LABEL: test_x86_fma_vfnmsub_ph_512: +; CHECK: # %bb.0: +; CHECK-NEXT: vfnmsub213ph %zmm0, %zmm1, %zmm0 # encoding: [0x62,0xf6,0x75,0x48,0xae,0xc0] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] + %1 = fsub <32 x half> , %a0 + %2 = fsub <32 x half> , %a0 + %3 = call <32 x half> @llvm.fma.v32f16(<32 x half> %1, <32 x half> %a1, <32 x half> %2) + ret <32 x half> %3 +} + +define <8 x half>@test_int_x86_avx512_mask3_vfmadd_sh(<8 x half> %x0, <8 x half> %x1, half *%ptr_b, i8 %x3, i32 %x4) { +; X86-LABEL: test_int_x86_avx512_mask3_vfmadd_sh: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x08] +; X86-NEXT: vfmadd231sh (%eax), %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf6,0x7d,0x09,0xb9,0x08] +; X86-NEXT: vmovaps %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx512_mask3_vfmadd_sh: +; X64: # %bb.0: +; X64-NEXT: kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce] +; X64-NEXT: vfmadd231sh (%rdi), %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf6,0x7d,0x09,0xb9,0x0f] +; X64-NEXT: vmovaps %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1] +; X64-NEXT: retq # encoding: [0xc3] + %q = load half, half* %ptr_b + %vecinit.i = insertelement <8 x half> undef, half %q, i32 0 + %1 = extractelement <8 x half> %x0, i64 0 + %2 = extractelement <8 x half> %vecinit.i, i64 0 + %3 = extractelement <8 x half> %x1, i64 0 + %4 = call half @llvm.fma.f16(half %1, half %2, half %3) + %5 = bitcast i8 %x3 to <8 x i1> + %6 = extractelement <8 x i1> %5, i64 0 + %7 = select i1 %6, half %4, half %3 + %8 = insertelement <8 x half> %x1, half %7, i64 0 + ret <8 x half> %8 +} + +define <8 x half>@test_int_x86_avx512_maskz_vfmadd_sh(<8 x half> %x0, <8 x half> %x1, <8 x half> %x2, i8 %x3, i32 %x4 ){ +; X86-LABEL: test_int_x86_avx512_maskz_vfmadd_sh: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vfmadd213sh %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf6,0x75,0x89,0xa9,0xc2] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx512_maskz_vfmadd_sh: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vfmadd213sh %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf6,0x75,0x89,0xa9,0xc2] +; X64-NEXT: retq # encoding: [0xc3] + %1 = extractelement <8 x half> %x0, i64 0 + %2 = extractelement <8 x half> %x1, i64 0 + %3 = extractelement <8 x half> %x2, i64 0 + %4 = call half @llvm.fma.f16(half %1, half %2, half %3) + %5 = bitcast i8 %x3 to <8 x i1> + %6 = extractelement <8 x i1> %5, i64 0 + %7 = select i1 %6, half %4, half 0.000000e+00 + %8 = insertelement <8 x half> %x0, half %7, i64 0 + %9 = extractelement <8 x half> %x0, i64 0 + %10 = extractelement <8 x half> %x1, i64 0 + %11 = extractelement <8 x half> %x2, i64 0 + %12 = call half @llvm.x86.avx512fp16.vfmadd.f16(half %9, half %10, half %11, i32 3) + %13 = bitcast i8 %x3 to <8 x i1> + %14 = extractelement <8 x i1> %13, i64 0 + %15 = select i1 %14, half %12, half 0.000000e+00 + %16 = insertelement <8 x half> %x0, half %15, i64 0 + %res2 = fadd <8 x half> %8, %16 + ret <8 x half> %8 +} + +define void @fmadd_sh_mask_memfold(half* %a, half* %b, i8 %c) { +; X86-LABEL: fmadd_sh_mask_memfold: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x0c] +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x08] +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx # encoding: [0x8b,0x4c,0x24,0x04] +; X86-NEXT: vmovsh (%ecx), %xmm0 # encoding: [0x62,0xf5,0x7e,0x08,0x10,0x01] +; X86-NEXT: vmovsh (%eax), %xmm1 # encoding: [0x62,0xf5,0x7e,0x08,0x10,0x08] +; X86-NEXT: vfmadd213sh %xmm0, %xmm0, %xmm1 # encoding: [0x62,0xf6,0x7d,0x08,0xa9,0xc8] +; X86-NEXT: vmovsh %xmm1, %xmm0, %xmm0 {%k1} # encoding: [0x62,0xf5,0x7e,0x09,0x10,0xc1] +; X86-NEXT: vmovsh %xmm0, (%ecx) # encoding: [0x62,0xf5,0x7e,0x08,0x11,0x01] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: fmadd_sh_mask_memfold: +; X64: # %bb.0: +; X64-NEXT: vmovsh (%rdi), %xmm0 # encoding: [0x62,0xf5,0x7e,0x08,0x10,0x07] +; X64-NEXT: vmovsh (%rsi), %xmm1 # encoding: [0x62,0xf5,0x7e,0x08,0x10,0x0e] +; X64-NEXT: vfmadd213sh %xmm0, %xmm0, %xmm1 # encoding: [0x62,0xf6,0x7d,0x08,0xa9,0xc8] +; X64-NEXT: kmovd %edx, %k1 # encoding: [0xc5,0xfb,0x92,0xca] +; X64-NEXT: vmovsh %xmm1, %xmm0, %xmm0 {%k1} # encoding: [0x62,0xf5,0x7e,0x09,0x10,0xc1] +; X64-NEXT: vmovsh %xmm0, (%rdi) # encoding: [0x62,0xf5,0x7e,0x08,0x11,0x07] +; X64-NEXT: retq # encoding: [0xc3] + %a.val = load half, half* %a + %av0 = insertelement <8 x half> undef, half %a.val, i32 0 + %av1 = insertelement <8 x half> %av0, half 0.000000e+00, i32 1 + %av2 = insertelement <8 x half> %av1, half 0.000000e+00, i32 2 + %av3 = insertelement <8 x half> %av2, half 0.000000e+00, i32 3 + %av4 = insertelement <8 x half> %av3, half 0.000000e+00, i32 4 + %av5 = insertelement <8 x half> %av4, half 0.000000e+00, i32 5 + %av6 = insertelement <8 x half> %av5, half 0.000000e+00, i32 6 + %av = insertelement <8 x half> %av6, half 0.000000e+00, i32 7 + + %b.val = load half, half* %b + %bv0 = insertelement <8 x half> undef, half %b.val, i32 0 + %bv1 = insertelement <8 x half> %bv0, half 0.000000e+00, i32 1 + %bv2 = insertelement <8 x half> %bv1, half 0.000000e+00, i32 2 + %bv3 = insertelement <8 x half> %bv2, half 0.000000e+00, i32 3 + %bv4 = insertelement <8 x half> %bv3, half 0.000000e+00, i32 4 + %bv5 = insertelement <8 x half> %bv4, half 0.000000e+00, i32 5 + %bv6 = insertelement <8 x half> %bv5, half 0.000000e+00, i32 6 + %bv = insertelement <8 x half> %bv6, half 0.000000e+00, i32 7 + %1 = extractelement <8 x half> %av, i64 0 + %2 = extractelement <8 x half> %bv, i64 0 + %3 = extractelement <8 x half> %av, i64 0 + %4 = call half @llvm.fma.f16(half %1, half %2, half %3) + %5 = bitcast i8 %c to <8 x i1> + %6 = extractelement <8 x i1> %5, i64 0 + %7 = select i1 %6, half %4, half %1 + %8 = insertelement <8 x half> %av, half %7, i64 0 + %sr = extractelement <8 x half> %8, i32 0 + store half %sr, half* %a + ret void +} + +declare half @llvm.fma.f16(half, half, half) +declare half @llvm.x86.avx512fp16.vfmadd.f16(half, half, half, i32) + +declare <32 x half> @llvm.fma.v32f16(<32 x half>, <32 x half>, <32 x half>) diff --git a/llvm/test/CodeGen/X86/avx512fp16vl-fma-intrinsics.ll b/llvm/test/CodeGen/X86/avx512fp16vl-fma-intrinsics.ll new file mode 100644 index 0000000000000..237c9aa0309a5 --- /dev/null +++ b/llvm/test/CodeGen/X86/avx512fp16vl-fma-intrinsics.ll @@ -0,0 +1,530 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512vl -mattr=+avx512fp16 --show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl -mattr=+avx512fp16 --show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64 + + +define <16 x half> @test_x86_vfnmadd_ph_z_256(<16 x half> %a0, <16 x half> %a1, <16 x half> %a2) { +; CHECK-LABEL: test_x86_vfnmadd_ph_z_256: +; CHECK: # %bb.0: +; CHECK-NEXT: vfnmadd213ph %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf6,0x75,0x28,0xac,0xc2] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] + %1 = fsub <16 x half> , %a1 + %2 = call <16 x half> @llvm.fma.v16f16(<16 x half> %a0, <16 x half> %1, <16 x half> %a2) + ret <16 x half> %2 +} + +define <16 x half> @test_mask_vfnmadd_ph_256(<16 x half> %a0, <16 x half> %a1, <16 x half> %a2, i16 %mask) { +; X86-LABEL: test_mask_vfnmadd_ph_256: +; X86: # %bb.0: +; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] +; X86-NEXT: vfnmadd132ph %ymm1, %ymm2, %ymm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x29,0x9c,0xc1] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_mask_vfnmadd_ph_256: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vfnmadd132ph %ymm1, %ymm2, %ymm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x29,0x9c,0xc1] +; X64-NEXT: retq # encoding: [0xc3] + %1 = fsub <16 x half> , %a1 + %2 = call <16 x half> @llvm.fma.v16f16(<16 x half> %a0, <16 x half> %1, <16 x half> %a2) + %3 = bitcast i16 %mask to <16 x i1> + %4 = select <16 x i1> %3, <16 x half> %2, <16 x half> %a0 + ret <16 x half> %4 +} + +define <16 x half> @test_x86_vfnmsubph_z_256(<16 x half> %a0, <16 x half> %a1, <16 x half> %a2) { +; CHECK-LABEL: test_x86_vfnmsubph_z_256: +; CHECK: # %bb.0: +; CHECK-NEXT: vfnmsub213ph %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf6,0x75,0x28,0xae,0xc2] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] + %1 = fsub <16 x half> , %a1 + %2 = fsub <16 x half> , %a2 + %3 = call <16 x half> @llvm.fma.v16f16(<16 x half> %a0, <16 x half> %1, <16 x half> %2) + ret <16 x half> %3 +} + +define <16 x half> @test_mask_vfnmsub_ph_256(<16 x half> %a0, <16 x half> %a1, <16 x half> %a2, i16 %mask) { +; X86-LABEL: test_mask_vfnmsub_ph_256: +; X86: # %bb.0: +; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] +; X86-NEXT: vfnmsub132ph %ymm1, %ymm2, %ymm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x29,0x9e,0xc1] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_mask_vfnmsub_ph_256: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vfnmsub132ph %ymm1, %ymm2, %ymm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x29,0x9e,0xc1] +; X64-NEXT: retq # encoding: [0xc3] + %1 = fsub <16 x half> , %a1 + %2 = fsub <16 x half> , %a2 + %3 = call <16 x half> @llvm.fma.v16f16(<16 x half> %a0, <16 x half> %1, <16 x half> %2) + %4 = bitcast i16 %mask to <16 x i1> + %5 = select <16 x i1> %4, <16 x half> %3, <16 x half> %a0 + ret <16 x half> %5 +} + +define <16 x half>@test_int_x86_avx512_mask3_vfmaddsub_ph_256(<16 x half> %x0, <16 x half> %x1, <16 x half> %x2, i16 %x3){ +; X86-LABEL: test_int_x86_avx512_mask3_vfmaddsub_ph_256: +; X86: # %bb.0: +; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] +; X86-NEXT: vfmaddsub231ph %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf6,0x7d,0x29,0xb6,0xd1] +; X86-NEXT: vmovaps %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx512_mask3_vfmaddsub_ph_256: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vfmaddsub231ph %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf6,0x7d,0x29,0xb6,0xd1] +; X64-NEXT: vmovaps %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2] +; X64-NEXT: retq # encoding: [0xc3] + %res = call <16 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.256(<16 x half> %x0, <16 x half> %x1, <16 x half> %x2) + %bc = bitcast i16 %x3 to <16 x i1> + %sel = select <16 x i1> %bc, <16 x half> %res, <16 x half> %x2 + ret <16 x half> %sel +} +declare <16 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.256(<16 x half>, <16 x half>, <16 x half>) + +define <16 x half>@test_int_x86_avx512_maskz_vfmaddsub_ph_256(<16 x half> %x0, <16 x half> %x1, <16 x half> %x2, i16 %x3){ +; X86-LABEL: test_int_x86_avx512_maskz_vfmaddsub_ph_256: +; X86: # %bb.0: +; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] +; X86-NEXT: vfmaddsub213ph %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf6,0x75,0xa9,0xa6,0xc2] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx512_maskz_vfmaddsub_ph_256: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vfmaddsub213ph %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf6,0x75,0xa9,0xa6,0xc2] +; X64-NEXT: retq # encoding: [0xc3] + %res = call <16 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.256(<16 x half> %x0, <16 x half> %x1, <16 x half> %x2) + %bc = bitcast i16 %x3 to <16 x i1> + %sel = select <16 x i1> %bc, <16 x half> %res, <16 x half> zeroinitializer + ret <16 x half> %sel +} + +define <16 x half>@test_int_x86_avx512_mask3_vfmsubadd_ph_256(<16 x half> %x0, <16 x half> %x1, <16 x half> %x2, i16 %x3){ +; X86-LABEL: test_int_x86_avx512_mask3_vfmsubadd_ph_256: +; X86: # %bb.0: +; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] +; X86-NEXT: vfmsubadd231ph %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf6,0x7d,0x29,0xb7,0xd1] +; X86-NEXT: vmovaps %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx512_mask3_vfmsubadd_ph_256: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vfmsubadd231ph %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf6,0x7d,0x29,0xb7,0xd1] +; X64-NEXT: vmovaps %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2] +; X64-NEXT: retq # encoding: [0xc3] + %neg = fneg <16 x half> %x2 + %res = call <16 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.256(<16 x half> %x0, <16 x half> %x1, <16 x half> %neg) + %bc = bitcast i16 %x3 to <16 x i1> + %sel = select <16 x i1> %bc, <16 x half> %res, <16 x half> %x2 + ret <16 x half> %sel +} + +define <16 x half>@test_int_x86_avx512_mask3_vfmsub_ph_256(<16 x half> %x0, <16 x half> %x1, <16 x half> %x2, i16 %x3){ +; X86-LABEL: test_int_x86_avx512_mask3_vfmsub_ph_256: +; X86: # %bb.0: +; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] +; X86-NEXT: vfmsub231ph %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf6,0x7d,0x29,0xba,0xd1] +; X86-NEXT: vmovaps %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx512_mask3_vfmsub_ph_256: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vfmsub231ph %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf6,0x7d,0x29,0xba,0xd1] +; X64-NEXT: vmovaps %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2] +; X64-NEXT: retq # encoding: [0xc3] + %1 = fsub <16 x half> , %x2 + %2 = call <16 x half> @llvm.fma.v16f16(<16 x half> %x0, <16 x half> %x1, <16 x half> %1) + %3 = bitcast i16 %x3 to <16 x i1> + %4 = select <16 x i1> %3, <16 x half> %2, <16 x half> %x2 + ret <16 x half> %4 +} + +define <16 x half>@test_int_x86_avx512_mask3_vfmadd_ph_256(<16 x half> %x0, <16 x half> %x1, <16 x half> %x2, i16 %x3){ +; X86-LABEL: test_int_x86_avx512_mask3_vfmadd_ph_256: +; X86: # %bb.0: +; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] +; X86-NEXT: vfmadd231ph %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf6,0x7d,0x29,0xb8,0xd1] +; X86-NEXT: vmovaps %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx512_mask3_vfmadd_ph_256: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vfmadd231ph %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf6,0x7d,0x29,0xb8,0xd1] +; X64-NEXT: vmovaps %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2] +; X64-NEXT: retq # encoding: [0xc3] + %1 = call <16 x half> @llvm.fma.v16f16(<16 x half> %x0, <16 x half> %x1, <16 x half> %x2) + %2 = bitcast i16 %x3 to <16 x i1> + %3 = select <16 x i1> %2, <16 x half> %1, <16 x half> %x2 + ret <16 x half> %3 +} + +define <16 x half> @test_int_x86_avx512_maskz_vfmadd_ph_256(<16 x half> %x0, <16 x half> %x1, <16 x half> %x2, i16 %x3) { +; X86-LABEL: test_int_x86_avx512_maskz_vfmadd_ph_256: +; X86: # %bb.0: +; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] +; X86-NEXT: vfmadd213ph %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf6,0x75,0xa9,0xa8,0xc2] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx512_maskz_vfmadd_ph_256: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vfmadd213ph %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf6,0x75,0xa9,0xa8,0xc2] +; X64-NEXT: retq # encoding: [0xc3] + %1 = call <16 x half> @llvm.fma.v16f16(<16 x half> %x0, <16 x half> %x1, <16 x half> %x2) + %2 = bitcast i16 %x3 to <16 x i1> + %3 = select <16 x i1> %2, <16 x half> %1, <16 x half> zeroinitializer + ret <16 x half> %3 +} + +define <16 x half>@test_int_x86_avx512_mask_vfnmsub_ph_256(<16 x half> %x0, <16 x half> %x1, <16 x half> %x2, i16 %x3){ +; X86-LABEL: test_int_x86_avx512_mask_vfnmsub_ph_256: +; X86: # %bb.0: +; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] +; X86-NEXT: vfnmsub132ph %ymm1, %ymm2, %ymm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x29,0x9e,0xc1] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx512_mask_vfnmsub_ph_256: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vfnmsub132ph %ymm1, %ymm2, %ymm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x29,0x9e,0xc1] +; X64-NEXT: retq # encoding: [0xc3] + %1 = fsub <16 x half> , %x1 + %2 = fsub <16 x half> , %x2 + %3 = call <16 x half> @llvm.fma.v16f16(<16 x half> %x0, <16 x half> %1, <16 x half> %2) + %4 = bitcast i16 %x3 to <16 x i1> + %5 = select <16 x i1> %4, <16 x half> %3, <16 x half> %x0 + ret <16 x half> %5 +} + +define <16 x half>@test_int_x86_avx512_mask3_vfnmsub_ph_256(<16 x half> %x0, <16 x half> %x1, <16 x half> %x2, i16 %x3){ +; X86-LABEL: test_int_x86_avx512_mask3_vfnmsub_ph_256: +; X86: # %bb.0: +; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] +; X86-NEXT: vfnmsub231ph %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf6,0x7d,0x29,0xbe,0xd1] +; X86-NEXT: vmovaps %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx512_mask3_vfnmsub_ph_256: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vfnmsub231ph %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf6,0x7d,0x29,0xbe,0xd1] +; X64-NEXT: vmovaps %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2] +; X64-NEXT: retq # encoding: [0xc3] + %1 = fsub <16 x half> , %x0 + %2 = fsub <16 x half> , %x2 + %3 = call <16 x half> @llvm.fma.v16f16(<16 x half> %1, <16 x half> %x1, <16 x half> %2) + %4 = bitcast i16 %x3 to <16 x i1> + %5 = select <16 x i1> %4, <16 x half> %3, <16 x half> %x2 + ret <16 x half> %5 +} + +define <16 x half>@test_int_x86_avx512_mask_vfnmadd_ph_256(<16 x half> %x0, <16 x half> %x1, <16 x half> %x2, i16 %x3){ +; X86-LABEL: test_int_x86_avx512_mask_vfnmadd_ph_256: +; X86: # %bb.0: +; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] +; X86-NEXT: vfnmadd132ph %ymm1, %ymm2, %ymm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x29,0x9c,0xc1] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx512_mask_vfnmadd_ph_256: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vfnmadd132ph %ymm1, %ymm2, %ymm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x29,0x9c,0xc1] +; X64-NEXT: retq # encoding: [0xc3] + %1 = fsub <16 x half> , %x1 + %2 = call <16 x half> @llvm.fma.v16f16(<16 x half> %x0, <16 x half> %1, <16 x half> %x2) + %3 = bitcast i16 %x3 to <16 x i1> + %4 = select <16 x i1> %3, <16 x half> %2, <16 x half> %x0 + ret <16 x half> %4 +} + +define <16 x half> @test_x86_fma_vfnmadd_ph_256(<16 x half> %a0, <16 x half> %a1, <16 x half> %a2) #0 { +; CHECK-LABEL: test_x86_fma_vfnmadd_ph_256: +; CHECK: # %bb.0: +; CHECK-NEXT: vfnmadd213ph %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf6,0x75,0x28,0xac,0xc2] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] + %1 = fsub <16 x half> , %a0 + %2 = call <16 x half> @llvm.fma.v16f16(<16 x half> %1, <16 x half> %a1, <16 x half> %a2) + ret <16 x half> %2 +} + +define <16 x half> @test_x86_fma_vfnmsub_ph_256(<16 x half> %a0, <16 x half> %a1, <16 x half> %a2) #0 { +; CHECK-LABEL: test_x86_fma_vfnmsub_ph_256: +; CHECK: # %bb.0: +; CHECK-NEXT: vfnmsub213ph %ymm0, %ymm1, %ymm0 # encoding: [0x62,0xf6,0x75,0x28,0xae,0xc0] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] + %1 = fsub <16 x half> , %a0 + %2 = fsub <16 x half> , %a0 + %3 = call <16 x half> @llvm.fma.v16f16(<16 x half> %1, <16 x half> %a1, <16 x half> %2) + ret <16 x half> %3 +} + +declare <16 x half> @llvm.fma.v16f16(<16 x half>, <16 x half>, <16 x half>) + +define <8 x half> @test_x86_vfnmadd_ph_z_128(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2) { +; CHECK-LABEL: test_x86_vfnmadd_ph_z_128: +; CHECK: # %bb.0: +; CHECK-NEXT: vfnmadd213ph %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf6,0x75,0x08,0xac,0xc2] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] + %1 = fsub <8 x half> , %a1 + %2 = call <8 x half> @llvm.fma.v8f16(<8 x half> %a0, <8 x half> %1, <8 x half> %a2) + ret <8 x half> %2 +} + +define <8 x half> @test_mask_vfnmadd_ph_128(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2, i8 %mask) { +; X86-LABEL: test_mask_vfnmadd_ph_128: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vfnmadd132ph %xmm1, %xmm2, %xmm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x09,0x9c,0xc1] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_mask_vfnmadd_ph_128: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vfnmadd132ph %xmm1, %xmm2, %xmm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x09,0x9c,0xc1] +; X64-NEXT: retq # encoding: [0xc3] + %1 = fsub <8 x half> , %a1 + %2 = call <8 x half> @llvm.fma.v8f16(<8 x half> %a0, <8 x half> %1, <8 x half> %a2) + %3 = bitcast i8 %mask to <8 x i1> + %4 = select <8 x i1> %3, <8 x half> %2, <8 x half> %a0 + ret <8 x half> %4 +} + +define <8 x half> @test_x86_vfnmsubph_z_128(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2) { +; CHECK-LABEL: test_x86_vfnmsubph_z_128: +; CHECK: # %bb.0: +; CHECK-NEXT: vfnmsub213ph %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf6,0x75,0x08,0xae,0xc2] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] + %1 = fsub <8 x half> , %a1 + %2 = fsub <8 x half> , %a2 + %3 = call <8 x half> @llvm.fma.v8f16(<8 x half> %a0, <8 x half> %1, <8 x half> %2) + ret <8 x half> %3 +} + +define <8 x half> @test_mask_vfnmsub_ph_128(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2, i8 %mask) { +; X86-LABEL: test_mask_vfnmsub_ph_128: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vfnmsub132ph %xmm1, %xmm2, %xmm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x09,0x9e,0xc1] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_mask_vfnmsub_ph_128: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vfnmsub132ph %xmm1, %xmm2, %xmm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x09,0x9e,0xc1] +; X64-NEXT: retq # encoding: [0xc3] + %1 = fsub <8 x half> , %a1 + %2 = fsub <8 x half> , %a2 + %3 = call <8 x half> @llvm.fma.v8f16(<8 x half> %a0, <8 x half> %1, <8 x half> %2) + %4 = bitcast i8 %mask to <8 x i1> + %5 = select <8 x i1> %4, <8 x half> %3, <8 x half> %a0 + ret <8 x half> %5 +} + +define <8 x half>@test_int_x86_avx512_mask3_vfmaddsub_ph_128(<8 x half> %x0, <8 x half> %x1, <8 x half> %x2, i8 %x3){ +; X86-LABEL: test_int_x86_avx512_mask3_vfmaddsub_ph_128: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vfmaddsub231ph %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf6,0x7d,0x09,0xb6,0xd1] +; X86-NEXT: vmovaps %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx512_mask3_vfmaddsub_ph_128: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vfmaddsub231ph %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf6,0x7d,0x09,0xb6,0xd1] +; X64-NEXT: vmovaps %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2] +; X64-NEXT: retq # encoding: [0xc3] + %res = call <8 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.128(<8 x half> %x0, <8 x half> %x1, <8 x half> %x2) + %bc = bitcast i8 %x3 to <8 x i1> + %sel = select <8 x i1> %bc, <8 x half> %res, <8 x half> %x2 + ret <8 x half> %sel +} +declare <8 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.128(<8 x half>, <8 x half>, <8 x half>) + +define <8 x half>@test_int_x86_avx512_maskz_vfmaddsub_ph_128(<8 x half> %x0, <8 x half> %x1, <8 x half> %x2, i8 %x3){ +; X86-LABEL: test_int_x86_avx512_maskz_vfmaddsub_ph_128: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vfmaddsub213ph %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf6,0x75,0x89,0xa6,0xc2] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx512_maskz_vfmaddsub_ph_128: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vfmaddsub213ph %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf6,0x75,0x89,0xa6,0xc2] +; X64-NEXT: retq # encoding: [0xc3] + %res = call <8 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.128(<8 x half> %x0, <8 x half> %x1, <8 x half> %x2) + %bc = bitcast i8 %x3 to <8 x i1> + %sel = select <8 x i1> %bc, <8 x half> %res, <8 x half> zeroinitializer + ret <8 x half> %sel +} + +define <8 x half>@test_int_x86_avx512_mask3_vfmsubadd_ph_128(<8 x half> %x0, <8 x half> %x1, <8 x half> %x2, i8 %x3){ +; X86-LABEL: test_int_x86_avx512_mask3_vfmsubadd_ph_128: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vfmsubadd231ph %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf6,0x7d,0x09,0xb7,0xd1] +; X86-NEXT: vmovaps %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx512_mask3_vfmsubadd_ph_128: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vfmsubadd231ph %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf6,0x7d,0x09,0xb7,0xd1] +; X64-NEXT: vmovaps %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2] +; X64-NEXT: retq # encoding: [0xc3] + %neg = fneg <8 x half> %x2 + %res = call <8 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.128(<8 x half> %x0, <8 x half> %x1, <8 x half> %neg) + %bc = bitcast i8 %x3 to <8 x i1> + %sel = select <8 x i1> %bc, <8 x half> %res, <8 x half> %x2 + ret <8 x half> %sel +} + +define <8 x half>@test_int_x86_avx512_mask3_vfmsub_ph_128(<8 x half> %x0, <8 x half> %x1, <8 x half> %x2, i8 %x3){ +; X86-LABEL: test_int_x86_avx512_mask3_vfmsub_ph_128: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vfmsub231ph %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf6,0x7d,0x09,0xba,0xd1] +; X86-NEXT: vmovaps %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx512_mask3_vfmsub_ph_128: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vfmsub231ph %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf6,0x7d,0x09,0xba,0xd1] +; X64-NEXT: vmovaps %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2] +; X64-NEXT: retq # encoding: [0xc3] + %1 = fsub <8 x half> , %x2 + %2 = call <8 x half> @llvm.fma.v8f16(<8 x half> %x0, <8 x half> %x1, <8 x half> %1) + %3 = bitcast i8 %x3 to <8 x i1> + %4 = select <8 x i1> %3, <8 x half> %2, <8 x half> %x2 + ret <8 x half> %4 +} + +define <8 x half>@test_int_x86_avx512_mask3_vfmadd_ph_128(<8 x half> %x0, <8 x half> %x1, <8 x half> %x2, i8 %x3){ +; X86-LABEL: test_int_x86_avx512_mask3_vfmadd_ph_128: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vfmadd231ph %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf6,0x7d,0x09,0xb8,0xd1] +; X86-NEXT: vmovaps %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx512_mask3_vfmadd_ph_128: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vfmadd231ph %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf6,0x7d,0x09,0xb8,0xd1] +; X64-NEXT: vmovaps %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2] +; X64-NEXT: retq # encoding: [0xc3] + %1 = call <8 x half> @llvm.fma.v8f16(<8 x half> %x0, <8 x half> %x1, <8 x half> %x2) + %2 = bitcast i8 %x3 to <8 x i1> + %3 = select <8 x i1> %2, <8 x half> %1, <8 x half> %x2 + ret <8 x half> %3 +} + +define <8 x half> @test_int_x86_avx512_maskz_vfmadd_ph_128(<8 x half> %x0, <8 x half> %x1, <8 x half> %x2, i8 %x3) { +; X86-LABEL: test_int_x86_avx512_maskz_vfmadd_ph_128: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vfmadd213ph %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf6,0x75,0x89,0xa8,0xc2] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx512_maskz_vfmadd_ph_128: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vfmadd213ph %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf6,0x75,0x89,0xa8,0xc2] +; X64-NEXT: retq # encoding: [0xc3] + %1 = call <8 x half> @llvm.fma.v8f16(<8 x half> %x0, <8 x half> %x1, <8 x half> %x2) + %2 = bitcast i8 %x3 to <8 x i1> + %3 = select <8 x i1> %2, <8 x half> %1, <8 x half> zeroinitializer + ret <8 x half> %3 +} + +define <8 x half>@test_int_x86_avx512_mask_vfnmsub_ph_128(<8 x half> %x0, <8 x half> %x1, <8 x half> %x2, i8 %x3){ +; X86-LABEL: test_int_x86_avx512_mask_vfnmsub_ph_128: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vfnmsub132ph %xmm1, %xmm2, %xmm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x09,0x9e,0xc1] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx512_mask_vfnmsub_ph_128: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vfnmsub132ph %xmm1, %xmm2, %xmm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x09,0x9e,0xc1] +; X64-NEXT: retq # encoding: [0xc3] + %1 = fsub <8 x half> , %x1 + %2 = fsub <8 x half> , %x2 + %3 = call <8 x half> @llvm.fma.v8f16(<8 x half> %x0, <8 x half> %1, <8 x half> %2) + %4 = bitcast i8 %x3 to <8 x i1> + %5 = select <8 x i1> %4, <8 x half> %3, <8 x half> %x0 + ret <8 x half> %5 +} + +define <8 x half>@test_int_x86_avx512_mask3_vfnmsub_ph_128(<8 x half> %x0, <8 x half> %x1, <8 x half> %x2, i8 %x3){ +; X86-LABEL: test_int_x86_avx512_mask3_vfnmsub_ph_128: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vfnmsub231ph %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf6,0x7d,0x09,0xbe,0xd1] +; X86-NEXT: vmovaps %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx512_mask3_vfnmsub_ph_128: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vfnmsub231ph %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf6,0x7d,0x09,0xbe,0xd1] +; X64-NEXT: vmovaps %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2] +; X64-NEXT: retq # encoding: [0xc3] + %1 = fsub <8 x half> , %x0 + %2 = fsub <8 x half> , %x2 + %3 = call <8 x half> @llvm.fma.v8f16(<8 x half> %1, <8 x half> %x1, <8 x half> %2) + %4 = bitcast i8 %x3 to <8 x i1> + %5 = select <8 x i1> %4, <8 x half> %3, <8 x half> %x2 + ret <8 x half> %5 +} + +define <8 x half>@test_int_x86_avx512_mask_vfnmadd_ph_128(<8 x half> %x0, <8 x half> %x1, <8 x half> %x2, i8 %x3){ +; X86-LABEL: test_int_x86_avx512_mask_vfnmadd_ph_128: +; X86: # %bb.0: +; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04] +; X86-NEXT: vfnmadd132ph %xmm1, %xmm2, %xmm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x09,0x9c,0xc1] +; X86-NEXT: retl # encoding: [0xc3] +; +; X64-LABEL: test_int_x86_avx512_mask_vfnmadd_ph_128: +; X64: # %bb.0: +; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf] +; X64-NEXT: vfnmadd132ph %xmm1, %xmm2, %xmm0 {%k1} # encoding: [0x62,0xf6,0x6d,0x09,0x9c,0xc1] +; X64-NEXT: retq # encoding: [0xc3] + %1 = fsub <8 x half> , %x1 + %2 = call <8 x half> @llvm.fma.v8f16(<8 x half> %x0, <8 x half> %1, <8 x half> %x2) + %3 = bitcast i8 %x3 to <8 x i1> + %4 = select <8 x i1> %3, <8 x half> %2, <8 x half> %x0 + ret <8 x half> %4 +} + +define <8 x half> @test_x86_fma_vfnmadd_ph_128(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2) #0 { +; CHECK-LABEL: test_x86_fma_vfnmadd_ph_128: +; CHECK: # %bb.0: +; CHECK-NEXT: vfnmadd213ph %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf6,0x75,0x08,0xac,0xc2] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] + %1 = fsub <8 x half> , %a0 + %2 = call <8 x half> @llvm.fma.v8f16(<8 x half> %1, <8 x half> %a1, <8 x half> %a2) + ret <8 x half> %2 +} + +define <8 x half> @test_x86_fma_vfnmsub_ph_128(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2) #0 { +; CHECK-LABEL: test_x86_fma_vfnmsub_ph_128: +; CHECK: # %bb.0: +; CHECK-NEXT: vfnmsub213ph %xmm0, %xmm1, %xmm0 # encoding: [0x62,0xf6,0x75,0x08,0xae,0xc0] +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] + %1 = fsub <8 x half> , %a0 + %2 = fsub <8 x half> , %a0 + %3 = call <8 x half> @llvm.fma.v8f16(<8 x half> %1, <8 x half> %a1, <8 x half> %2) + ret <8 x half> %3 +} + +declare <8 x half> @llvm.fma.v8f16(<8 x half>, <8 x half>, <8 x half>) diff --git a/llvm/test/CodeGen/X86/fp-strict-scalar-fp16.ll b/llvm/test/CodeGen/X86/fp-strict-scalar-fp16.ll index e7e52f153bc35..cb334aba458f7 100644 --- a/llvm/test/CodeGen/X86/fp-strict-scalar-fp16.ll +++ b/llvm/test/CodeGen/X86/fp-strict-scalar-fp16.ll @@ -11,6 +11,7 @@ declare double @llvm.experimental.constrained.fpext.f64.f16(half, metadata) declare half @llvm.experimental.constrained.fptrunc.f16.f32(float, metadata, metadata) declare half @llvm.experimental.constrained.fptrunc.f16.f64(double, metadata, metadata) declare half @llvm.experimental.constrained.sqrt.f16(half, metadata, metadata) +declare half @llvm.experimental.constrained.fma.f16(half, half, half, metadata, metadata) define half @fadd_f16(half %a, half %b) nounwind strictfp { ; X86-LABEL: fadd_f16: @@ -197,4 +198,22 @@ define void @fsqrt_f16(half* %a) nounwind strictfp { ret void } +define half @fma_f16(half %a, half %b, half %c) nounwind strictfp { +; X86-LABEL: fma_f16: +; X86: # %bb.0: +; X86-NEXT: vmovsh {{[0-9]+}}(%esp), %xmm1 +; X86-NEXT: vmovsh {{[0-9]+}}(%esp), %xmm0 +; X86-NEXT: vfmadd213sh {{[0-9]+}}(%esp), %xmm1, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: fma_f16: +; X64: # %bb.0: +; X64-NEXT: vfmadd213sh %xmm2, %xmm1, %xmm0 +; X64-NEXT: retq + %res = call half @llvm.experimental.constrained.fma.f16(half %a, half %b, half %c, + metadata !"round.dynamic", + metadata !"fpexcept.strict") #0 + ret half %res +} + attributes #0 = { strictfp } diff --git a/llvm/test/CodeGen/X86/stack-folding-fp-avx512fp16-fma.ll b/llvm/test/CodeGen/X86/stack-folding-fp-avx512fp16-fma.ll new file mode 100644 index 0000000000000..a0cc87f87db8f --- /dev/null +++ b/llvm/test/CodeGen/X86/stack-folding-fp-avx512fp16-fma.ll @@ -0,0 +1,2526 @@ +; RUN: llc -O3 -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+avx512fp16 < %s | FileCheck %s + +target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-unknown" + +; Stack reload folding tests. +; +; By including a nop call with sideeffects we can force a partial register spill of the +; relevant registers and check that the reload is correctly folded into the instruction. + +define <32 x half> @stack_fold_fmadd123ph(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2) { + ;CHECK-LABEL: stack_fold_fmadd123ph: + ;CHECK: vfmadd213ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2) + ret <32 x half> %2 +} +declare <32 x half> @llvm.fma.v32f16(<32 x half>, <32 x half>, <32 x half>) + +define <32 x half> @stack_fold_fmadd213ph(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2) { + ;CHECK-LABEL: stack_fold_fmadd213ph: + ;CHECK: vfmadd213ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %a1, <32 x half> %a0, <32 x half> %a2) + ret <32 x half> %2 +} + +define <32 x half> @stack_fold_fmadd231ph(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2) { + ;CHECK-LABEL: stack_fold_fmadd231ph: + ;CHECK: vfmadd231ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %a1, <32 x half> %a2, <32 x half> %a0) + ret <32 x half> %2 +} + +define <32 x half> @stack_fold_fmadd321ph(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2) { + ;CHECK-LABEL: stack_fold_fmadd321ph: + ;CHECK: vfmadd231ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %a2, <32 x half> %a1, <32 x half> %a0) + ret <32 x half> %2 +} + +define <32 x half> @stack_fold_fmadd132ph(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2) { + ;CHECK-LABEL: stack_fold_fmadd132ph: + ;CHECK: vfmadd132ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %a0, <32 x half> %a2, <32 x half> %a1) + ret <32 x half> %2 +} + +define <32 x half> @stack_fold_fmadd312ph(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2) { + ;CHECK-LABEL: stack_fold_fmadd312ph: + ;CHECK: vfmadd132ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %a2, <32 x half> %a0, <32 x half> %a1) + ret <32 x half> %2 +} + +define <32 x half> @stack_fold_fmadd123ph_mask(<32 x half>* %p, <32 x half> %a1, <32 x half> %a2, i32 %mask) { + ;CHECK-LABEL: stack_fold_fmadd123ph_mask: + ;CHECK: vfmadd213ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <32 x half>, <32 x half>* %p + %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2) + %3 = bitcast i32 %mask to <32 x i1> + %4 = select <32 x i1> %3, <32 x half> %2, <32 x half> %a0 + ret <32 x half> %4 +} + +define <32 x half> @stack_fold_fmadd213ph_mask(<32 x half>* %p, <32 x half> %a1, <32 x half> %a2, i32 %mask) { + ;CHECK-LABEL: stack_fold_fmadd213ph_mask: + ;CHECK: vfmadd213ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <32 x half>, <32 x half>* %p + %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %a1, <32 x half> %a0, <32 x half> %a2) + %3 = bitcast i32 %mask to <32 x i1> + %4 = select <32 x i1> %3, <32 x half> %2, <32 x half> %a0 + ret <32 x half> %4 +} + +define <32 x half> @stack_fold_fmadd231ph_mask(<32 x half>* %p, <32 x half> %a1, <32 x half> %a2, i32 %mask) { + ;CHECK-LABEL: stack_fold_fmadd231ph_mask: + ;CHECK: vfmadd231ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <32 x half>, <32 x half>* %p + %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %a1, <32 x half> %a2, <32 x half> %a0) + %3 = bitcast i32 %mask to <32 x i1> + %4 = select <32 x i1> %3, <32 x half> %2, <32 x half> %a0 + ret <32 x half> %4 +} + +define <32 x half> @stack_fold_fmadd321ph_mask(<32 x half>* %p, <32 x half> %a1, <32 x half> %a2, i32 %mask) { + ;CHECK-LABEL: stack_fold_fmadd321ph_mask: + ;CHECK: vfmadd231ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <32 x half>, <32 x half>* %p + %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %a2, <32 x half> %a1, <32 x half> %a0) + %3 = bitcast i32 %mask to <32 x i1> + %4 = select <32 x i1> %3, <32 x half> %2, <32 x half> %a0 + ret <32 x half> %4 +} + +define <32 x half> @stack_fold_fmadd132ph_mask(<32 x half>* %p, <32 x half> %a1, <32 x half> %a2, i32 %mask) { + ;CHECK-LABEL: stack_fold_fmadd132ph_mask: + ;CHECK: vfmadd132ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <32 x half>, <32 x half>* %p + %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %a0, <32 x half> %a2, <32 x half> %a1) + %3 = bitcast i32 %mask to <32 x i1> + %4 = select <32 x i1> %3, <32 x half> %2, <32 x half> %a0 + ret <32 x half> %4 +} + +define <32 x half> @stack_fold_fmadd312ph_mask(<32 x half>* %p, <32 x half> %a1, <32 x half> %a2, i32 %mask) { + ;CHECK-LABEL: stack_fold_fmadd312ph_mask: + ;CHECK: vfmadd132ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <32 x half>, <32 x half>* %p + %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %a2, <32 x half> %a0, <32 x half> %a1) + %3 = bitcast i32 %mask to <32 x i1> + %4 = select <32 x i1> %3, <32 x half> %2, <32 x half> %a0 + ret <32 x half> %4 +} + +define <32 x half> @stack_fold_fmadd123ph_maskz(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32* %mask) { + ;CHECK-LABEL: stack_fold_fmadd123ph_maskz: + ;CHECK: vfmadd213ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2) + %3 = load i32, i32* %mask + %4 = bitcast i32 %3 to <32 x i1> + %5 = select <32 x i1> %4, <32 x half> %2, <32 x half> zeroinitializer + ret <32 x half> %5 +} + +define <32 x half> @stack_fold_fmadd213ph_maskz(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32* %mask) { + ;CHECK-LABEL: stack_fold_fmadd213ph_maskz: + ;CHECK: vfmadd213ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %a1, <32 x half> %a0, <32 x half> %a2) + %3 = load i32, i32* %mask + %4 = bitcast i32 %3 to <32 x i1> + %5 = select <32 x i1> %4, <32 x half> %2, <32 x half> zeroinitializer + ret <32 x half> %5 +} + +define <32 x half> @stack_fold_fmadd231ph_maskz(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32* %mask) { + ;CHECK-LABEL: stack_fold_fmadd231ph_maskz: + ;CHECK: vfmadd231ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %a1, <32 x half> %a2, <32 x half> %a0) + %3 = load i32, i32* %mask + %4 = bitcast i32 %3 to <32 x i1> + %5 = select <32 x i1> %4, <32 x half> %2, <32 x half> zeroinitializer + ret <32 x half> %5 +} + +define <32 x half> @stack_fold_fmadd321ph_maskz(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32* %mask) { + ;CHECK-LABEL: stack_fold_fmadd321ph_maskz: + ;CHECK: vfmadd231ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %a2, <32 x half> %a1, <32 x half> %a0) + %3 = load i32, i32* %mask + %4 = bitcast i32 %3 to <32 x i1> + %5 = select <32 x i1> %4, <32 x half> %2, <32 x half> zeroinitializer + ret <32 x half> %5 +} + +define <32 x half> @stack_fold_fmadd132ph_maskz(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32* %mask) { + ;CHECK-LABEL: stack_fold_fmadd132ph_maskz: + ;CHECK: vfmadd132ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %a0, <32 x half> %a2, <32 x half> %a1) + %3 = load i32, i32* %mask + %4 = bitcast i32 %3 to <32 x i1> + %5 = select <32 x i1> %4, <32 x half> %2, <32 x half> zeroinitializer + ret <32 x half> %5 +} + +define <32 x half> @stack_fold_fmadd312ph_maskz(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32* %mask) { + ;CHECK-LABEL: stack_fold_fmadd312ph_maskz: + ;CHECK: vfmadd132ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %a2, <32 x half> %a0, <32 x half> %a1) + %3 = load i32, i32* %mask + %4 = bitcast i32 %3 to <32 x i1> + %5 = select <32 x i1> %4, <32 x half> %2, <32 x half> zeroinitializer + ret <32 x half> %5 +} + +define <32 x half> @stack_fold_fmsub123ph(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2) { + ;CHECK-LABEL: stack_fold_fmsub123ph: + ;CHECK: vfmsub213ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg <32 x half> %a2 + %3 = call <32 x half> @llvm.fma.v32f16(<32 x half> %a0, <32 x half> %a1, <32 x half> %2) + ret <32 x half> %3 +} + +define <32 x half> @stack_fold_fmsub213ph(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2) { + ;CHECK-LABEL: stack_fold_fmsub213ph: + ;CHECK: vfmsub213ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg <32 x half> %a2 + %3 = call <32 x half> @llvm.fma.v32f16(<32 x half> %a1, <32 x half> %a0, <32 x half> %2) + ret <32 x half> %3 +} + +define <32 x half> @stack_fold_fmsub231ph(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2) { + ;CHECK-LABEL: stack_fold_fmsub231ph: + ;CHECK: vfmsub231ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg <32 x half> %a0 + %3 = call <32 x half> @llvm.fma.v32f16(<32 x half> %a1, <32 x half> %a2, <32 x half> %2) + ret <32 x half> %3 +} + +define <32 x half> @stack_fold_fmsub321ph(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2) { + ;CHECK-LABEL: stack_fold_fmsub321ph: + ;CHECK: vfmsub231ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg <32 x half> %a0 + %3 = call <32 x half> @llvm.fma.v32f16(<32 x half> %a2, <32 x half> %a1, <32 x half> %2) + ret <32 x half> %3 +} + +define <32 x half> @stack_fold_fmsub132ph(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2) { + ;CHECK-LABEL: stack_fold_fmsub132ph: + ;CHECK: vfmsub132ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg <32 x half> %a1 + %3 = call <32 x half> @llvm.fma.v32f16(<32 x half> %a0, <32 x half> %a2, <32 x half> %2) + ret <32 x half> %3 +} + +define <32 x half> @stack_fold_fmsub312ph(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2) { + ;CHECK-LABEL: stack_fold_fmsub312ph: + ;CHECK: vfmsub132ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg <32 x half> %a1 + %3 = call <32 x half> @llvm.fma.v32f16(<32 x half> %a2, <32 x half> %a0, <32 x half> %2) + ret <32 x half> %3 +} + +define <32 x half> @stack_fold_fmsub123ph_mask(<32 x half>* %p, <32 x half> %a1, <32 x half> %a2, i32 %mask) { + ;CHECK-LABEL: stack_fold_fmsub123ph_mask: + ;CHECK: vfmsub213ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <32 x half>, <32 x half>* %p + %neg = fneg <32 x half> %a2 + %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %a0, <32 x half> %a1, <32 x half> %neg) + %3 = bitcast i32 %mask to <32 x i1> + %4 = select <32 x i1> %3, <32 x half> %2, <32 x half> %a0 + ret <32 x half> %4 +} + +define <32 x half> @stack_fold_fmsub213ph_mask(<32 x half>* %p, <32 x half> %a1, <32 x half> %a2, i32 %mask) { + ;CHECK-LABEL: stack_fold_fmsub213ph_mask: + ;CHECK: vfmsub213ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <32 x half>, <32 x half>* %p + %neg = fneg <32 x half> %a2 + %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %a1, <32 x half> %a0, <32 x half> %neg) + %3 = bitcast i32 %mask to <32 x i1> + %4 = select <32 x i1> %3, <32 x half> %2, <32 x half> %a0 + ret <32 x half> %4 +} + +define <32 x half> @stack_fold_fmsub231ph_mask(<32 x half>* %p, <32 x half> %a1, <32 x half> %a2, i32 %mask) { + ;CHECK-LABEL: stack_fold_fmsub231ph_mask: + ;CHECK: vfmsub231ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <32 x half>, <32 x half>* %p + %neg = fneg <32 x half> %a0 + %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %a1, <32 x half> %a2, <32 x half> %neg) + %3 = bitcast i32 %mask to <32 x i1> + %4 = select <32 x i1> %3, <32 x half> %2, <32 x half> %a0 + ret <32 x half> %4 +} + +define <32 x half> @stack_fold_fmsub321ph_mask(<32 x half>* %p, <32 x half> %a1, <32 x half> %a2, i32 %mask) { + ;CHECK-LABEL: stack_fold_fmsub321ph_mask: + ;CHECK: vfmsub231ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <32 x half>, <32 x half>* %p + %neg = fneg <32 x half> %a0 + %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %a2, <32 x half> %a1, <32 x half> %neg) + %3 = bitcast i32 %mask to <32 x i1> + %4 = select <32 x i1> %3, <32 x half> %2, <32 x half> %a0 + ret <32 x half> %4 +} + +define <32 x half> @stack_fold_fmsub132ph_mask(<32 x half>* %p, <32 x half> %a1, <32 x half> %a2, i32 %mask) { + ;CHECK-LABEL: stack_fold_fmsub132ph_mask: + ;CHECK: vfmsub132ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <32 x half>, <32 x half>* %p + %neg = fneg <32 x half> %a1 + %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %a0, <32 x half> %a2, <32 x half> %neg) + %3 = bitcast i32 %mask to <32 x i1> + %4 = select <32 x i1> %3, <32 x half> %2, <32 x half> %a0 + ret <32 x half> %4 +} + +define <32 x half> @stack_fold_fmsub312ph_mask(<32 x half>* %p, <32 x half> %a1, <32 x half> %a2, i32 %mask) { + ;CHECK-LABEL: stack_fold_fmsub312ph_mask: + ;CHECK: vfmsub132ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <32 x half>, <32 x half>* %p + %neg = fneg <32 x half> %a1 + %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %a2, <32 x half> %a0, <32 x half> %neg) + %3 = bitcast i32 %mask to <32 x i1> + %4 = select <32 x i1> %3, <32 x half> %2, <32 x half> %a0 + ret <32 x half> %4 +} + +define <32 x half> @stack_fold_fmsub123ph_maskz(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32* %mask) { + ;CHECK-LABEL: stack_fold_fmsub123ph_maskz: + ;CHECK: vfmsub213ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %neg = fneg <32 x half> %a2 + %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %a0, <32 x half> %a1, <32 x half> %neg) + %3 = load i32, i32* %mask + %4 = bitcast i32 %3 to <32 x i1> + %5 = select <32 x i1> %4, <32 x half> %2, <32 x half> zeroinitializer + ret <32 x half> %5 +} + +define <32 x half> @stack_fold_fmsub213ph_maskz(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32* %mask) { + ;CHECK-LABEL: stack_fold_fmsub213ph_maskz: + ;CHECK: vfmsub213ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %neg = fneg <32 x half> %a2 + %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %a1, <32 x half> %a0, <32 x half> %neg) + %3 = load i32, i32* %mask + %4 = bitcast i32 %3 to <32 x i1> + %5 = select <32 x i1> %4, <32 x half> %2, <32 x half> zeroinitializer + ret <32 x half> %5 +} + +define <32 x half> @stack_fold_fmsub231ph_maskz(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32* %mask) { + ;CHECK-LABEL: stack_fold_fmsub231ph_maskz: + ;CHECK: vfmsub231ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %neg = fneg <32 x half> %a0 + %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %a1, <32 x half> %a2, <32 x half> %neg) + %3 = load i32, i32* %mask + %4 = bitcast i32 %3 to <32 x i1> + %5 = select <32 x i1> %4, <32 x half> %2, <32 x half> zeroinitializer + ret <32 x half> %5 +} + +define <32 x half> @stack_fold_fmsub321ph_maskz(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32* %mask) { + ;CHECK-LABEL: stack_fold_fmsub321ph_maskz: + ;CHECK: vfmsub231ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %neg = fneg <32 x half> %a0 + %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %a2, <32 x half> %a1, <32 x half> %neg) + %3 = load i32, i32* %mask + %4 = bitcast i32 %3 to <32 x i1> + %5 = select <32 x i1> %4, <32 x half> %2, <32 x half> zeroinitializer + ret <32 x half> %5 +} + +define <32 x half> @stack_fold_fmsub132ph_maskz(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32* %mask) { + ;CHECK-LABEL: stack_fold_fmsub132ph_maskz: + ;CHECK: vfmsub132ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %neg = fneg <32 x half> %a1 + %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %a0, <32 x half> %a2, <32 x half> %neg) + %3 = load i32, i32* %mask + %4 = bitcast i32 %3 to <32 x i1> + %5 = select <32 x i1> %4, <32 x half> %2, <32 x half> zeroinitializer + ret <32 x half> %5 +} + +define <32 x half> @stack_fold_fmsub312ph_maskz(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32* %mask) { + ;CHECK-LABEL: stack_fold_fmsub312ph_maskz: + ;CHECK: vfmsub132ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %neg = fneg <32 x half> %a1 + %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %a2, <32 x half> %a0, <32 x half> %neg) + %3 = load i32, i32* %mask + %4 = bitcast i32 %3 to <32 x i1> + %5 = select <32 x i1> %4, <32 x half> %2, <32 x half> zeroinitializer + ret <32 x half> %5 +} + +define <32 x half> @stack_fold_fnmadd123ph(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2) { + ;CHECK-LABEL: stack_fold_fnmadd123ph: + ;CHECK: vfnmadd213ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg <32 x half> %a0 + %3 = call <32 x half> @llvm.fma.v32f16(<32 x half> %2, <32 x half> %a1, <32 x half> %a2) + ret <32 x half> %3 +} + +define <32 x half> @stack_fold_fnmadd213ph(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2) { + ;CHECK-LABEL: stack_fold_fnmadd213ph: + ;CHECK: vfnmadd213ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg <32 x half> %a1 + %3 = call <32 x half> @llvm.fma.v32f16(<32 x half> %2, <32 x half> %a0, <32 x half> %a2) + ret <32 x half> %3 +} + +define <32 x half> @stack_fold_fnmadd231ph(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2) { + ;CHECK-LABEL: stack_fold_fnmadd231ph: + ;CHECK: vfnmadd231ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg <32 x half> %a1 + %3 = call <32 x half> @llvm.fma.v32f16(<32 x half> %2, <32 x half> %a2, <32 x half> %a0) + ret <32 x half> %3 +} + +define <32 x half> @stack_fold_fnmadd321ph(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2) { + ;CHECK-LABEL: stack_fold_fnmadd321ph: + ;CHECK: vfnmadd231ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg <32 x half> %a2 + %3 = call <32 x half> @llvm.fma.v32f16(<32 x half> %2, <32 x half> %a1, <32 x half> %a0) + ret <32 x half> %3 +} + +define <32 x half> @stack_fold_fnmadd132ph(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2) { + ;CHECK-LABEL: stack_fold_fnmadd132ph: + ;CHECK: vfnmadd132ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg <32 x half> %a0 + %3 = call <32 x half> @llvm.fma.v32f16(<32 x half> %2, <32 x half> %a2, <32 x half> %a1) + ret <32 x half> %3 +} + +define <32 x half> @stack_fold_fnmadd312ph(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2) { + ;CHECK-LABEL: stack_fold_fnmadd312ph: + ;CHECK: vfnmadd132ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg <32 x half> %a2 + %3 = call <32 x half> @llvm.fma.v32f16(<32 x half> %2, <32 x half> %a0, <32 x half> %a1) + ret <32 x half> %3 +} + +define <32 x half> @stack_fold_fnmadd123ph_mask(<32 x half>* %p, <32 x half> %a1, <32 x half> %a2, i32 %mask) { + ;CHECK-LABEL: stack_fold_fnmadd123ph_mask: + ;CHECK: vfnmadd213ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <32 x half>, <32 x half>* %p + %neg = fneg <32 x half> %a0 + %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %neg, <32 x half> %a1, <32 x half> %a2) + %3 = bitcast i32 %mask to <32 x i1> + %4 = select <32 x i1> %3, <32 x half> %2, <32 x half> %a0 + ret <32 x half> %4 +} + +define <32 x half> @stack_fold_fnmadd213ph_mask(<32 x half>* %p, <32 x half> %a1, <32 x half> %a2, i32 %mask) { + ;CHECK-LABEL: stack_fold_fnmadd213ph_mask: + ;CHECK: vfnmadd213ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <32 x half>, <32 x half>* %p + %neg = fneg <32 x half> %a1 + %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %neg, <32 x half> %a0, <32 x half> %a2) + %3 = bitcast i32 %mask to <32 x i1> + %4 = select <32 x i1> %3, <32 x half> %2, <32 x half> %a0 + ret <32 x half> %4 +} + +define <32 x half> @stack_fold_fnmadd231ph_mask(<32 x half>* %p, <32 x half> %a1, <32 x half> %a2, i32 %mask) { + ;CHECK-LABEL: stack_fold_fnmadd231ph_mask: + ;CHECK: vfnmadd231ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <32 x half>, <32 x half>* %p + %neg = fneg <32 x half> %a1 + %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %neg, <32 x half> %a2, <32 x half> %a0) + %3 = bitcast i32 %mask to <32 x i1> + %4 = select <32 x i1> %3, <32 x half> %2, <32 x half> %a0 + ret <32 x half> %4 +} + +define <32 x half> @stack_fold_fnmadd321ph_mask(<32 x half>* %p, <32 x half> %a1, <32 x half> %a2, i32 %mask) { + ;CHECK-LABEL: stack_fold_fnmadd321ph_mask: + ;CHECK: vfnmadd231ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <32 x half>, <32 x half>* %p + %neg = fneg <32 x half> %a2 + %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %neg, <32 x half> %a1, <32 x half> %a0) + %3 = bitcast i32 %mask to <32 x i1> + %4 = select <32 x i1> %3, <32 x half> %2, <32 x half> %a0 + ret <32 x half> %4 +} + +define <32 x half> @stack_fold_fnmadd132ph_mask(<32 x half>* %p, <32 x half> %a1, <32 x half> %a2, i32 %mask) { + ;CHECK-LABEL: stack_fold_fnmadd132ph_mask: + ;CHECK: vfnmadd132ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <32 x half>, <32 x half>* %p + %neg = fneg <32 x half> %a0 + %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %neg, <32 x half> %a2, <32 x half> %a1) + %3 = bitcast i32 %mask to <32 x i1> + %4 = select <32 x i1> %3, <32 x half> %2, <32 x half> %a0 + ret <32 x half> %4 +} + +define <32 x half> @stack_fold_fnmadd312ph_mask(<32 x half>* %p, <32 x half> %a1, <32 x half> %a2, i32 %mask) { + ;CHECK-LABEL: stack_fold_fnmadd312ph_mask: + ;CHECK: vfnmadd132ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <32 x half>, <32 x half>* %p + %neg = fneg <32 x half> %a2 + %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %neg, <32 x half> %a0, <32 x half> %a1) + %3 = bitcast i32 %mask to <32 x i1> + %4 = select <32 x i1> %3, <32 x half> %2, <32 x half> %a0 + ret <32 x half> %4 +} + +define <32 x half> @stack_fold_fnmadd123ph_maskz(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32* %mask) { + ;CHECK-LABEL: stack_fold_fnmadd123ph_maskz: + ;CHECK: vfnmadd213ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %neg = fneg <32 x half> %a0 + %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %neg, <32 x half> %a1, <32 x half> %a2) + %3 = load i32, i32* %mask + %4 = bitcast i32 %3 to <32 x i1> + %5 = select <32 x i1> %4, <32 x half> %2, <32 x half> zeroinitializer + ret <32 x half> %5 +} + +define <32 x half> @stack_fold_fnmadd213ph_maskz(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32* %mask) { + ;CHECK-LABEL: stack_fold_fnmadd213ph_maskz: + ;CHECK: vfnmadd213ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %neg = fneg <32 x half> %a1 + %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %neg, <32 x half> %a0, <32 x half> %a2) + %3 = load i32, i32* %mask + %4 = bitcast i32 %3 to <32 x i1> + %5 = select <32 x i1> %4, <32 x half> %2, <32 x half> zeroinitializer + ret <32 x half> %5 +} + +define <32 x half> @stack_fold_fnmadd231ph_maskz(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32* %mask) { + ;CHECK-LABEL: stack_fold_fnmadd231ph_maskz: + ;CHECK: vfnmadd231ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %neg = fneg <32 x half> %a1 + %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %neg, <32 x half> %a2, <32 x half> %a0) + %3 = load i32, i32* %mask + %4 = bitcast i32 %3 to <32 x i1> + %5 = select <32 x i1> %4, <32 x half> %2, <32 x half> zeroinitializer + ret <32 x half> %5 +} + +define <32 x half> @stack_fold_fnmadd321ph_maskz(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32* %mask) { + ;CHECK-LABEL: stack_fold_fnmadd321ph_maskz: + ;CHECK: vfnmadd231ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %neg = fneg <32 x half> %a2 + %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %neg, <32 x half> %a1, <32 x half> %a0) + %3 = load i32, i32* %mask + %4 = bitcast i32 %3 to <32 x i1> + %5 = select <32 x i1> %4, <32 x half> %2, <32 x half> zeroinitializer + ret <32 x half> %5 +} + +define <32 x half> @stack_fold_fnmadd132ph_maskz(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32* %mask) { + ;CHECK-LABEL: stack_fold_fnmadd132ph_maskz: + ;CHECK: vfnmadd132ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %neg = fneg <32 x half> %a0 + %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %neg, <32 x half> %a2, <32 x half> %a1) + %3 = load i32, i32* %mask + %4 = bitcast i32 %3 to <32 x i1> + %5 = select <32 x i1> %4, <32 x half> %2, <32 x half> zeroinitializer + ret <32 x half> %5 +} + +define <32 x half> @stack_fold_fnmadd312ph_maskz(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32* %mask) { + ;CHECK-LABEL: stack_fold_fnmadd312ph_maskz: + ;CHECK: vfnmadd132ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %neg = fneg <32 x half> %a2 + %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %neg, <32 x half> %a0, <32 x half> %a1) + %3 = load i32, i32* %mask + %4 = bitcast i32 %3 to <32 x i1> + %5 = select <32 x i1> %4, <32 x half> %2, <32 x half> zeroinitializer + ret <32 x half> %5 +} + +define <32 x half> @stack_fold_fnmsub123ph(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2) { + ;CHECK-LABEL: stack_fold_fnmsub123ph: + ;CHECK: vfnmsub213ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg <32 x half> %a0 + %3 = fneg <32 x half> %a2 + %4 = call <32 x half> @llvm.fma.v32f16(<32 x half> %2, <32 x half> %a1, <32 x half> %3) + ret <32 x half> %4 +} + +define <32 x half> @stack_fold_fnmsub213ph(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2) { + ;CHECK-LABEL: stack_fold_fnmsub213ph: + ;CHECK: vfnmsub213ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg <32 x half> %a1 + %3 = fneg <32 x half> %a2 + %4 = call <32 x half> @llvm.fma.v32f16(<32 x half> %2, <32 x half> %a0, <32 x half> %3) + ret <32 x half> %4 +} + +define <32 x half> @stack_fold_fnmsub231ph(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2) { + ;CHECK-LABEL: stack_fold_fnmsub231ph: + ;CHECK: vfnmsub231ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg <32 x half> %a1 + %3 = fneg <32 x half> %a0 + %4 = call <32 x half> @llvm.fma.v32f16(<32 x half> %2, <32 x half> %a2, <32 x half> %3) + ret <32 x half> %4 +} + +define <32 x half> @stack_fold_fnmsub321ph(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2) { + ;CHECK-LABEL: stack_fold_fnmsub321ph: + ;CHECK: vfnmsub231ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg <32 x half> %a2 + %3 = fneg <32 x half> %a0 + %4 = call <32 x half> @llvm.fma.v32f16(<32 x half> %2, <32 x half> %a1, <32 x half> %3) + ret <32 x half> %4 +} + +define <32 x half> @stack_fold_fnmsub132ph(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2) { + ;CHECK-LABEL: stack_fold_fnmsub132ph: + ;CHECK: vfnmsub132ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg <32 x half> %a0 + %3 = fneg <32 x half> %a1 + %4 = call <32 x half> @llvm.fma.v32f16(<32 x half> %2, <32 x half> %a2, <32 x half> %3) + ret <32 x half> %4 +} + +define <32 x half> @stack_fold_fnmsub312ph(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2) { + ;CHECK-LABEL: stack_fold_fnmsub312ph: + ;CHECK: vfnmsub132ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg <32 x half> %a2 + %3 = fneg <32 x half> %a1 + %4 = call <32 x half> @llvm.fma.v32f16(<32 x half> %2, <32 x half> %a0, <32 x half> %3) + ret <32 x half> %4 +} + +define <32 x half> @stack_fold_fnmsub123ph_mask(<32 x half>* %p, <32 x half> %a1, <32 x half> %a2, i32 %mask) { + ;CHECK-LABEL: stack_fold_fnmsub123ph_mask: + ;CHECK: vfnmsub213ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <32 x half>, <32 x half>* %p + %neg = fneg <32 x half> %a2 + %neg1 = fneg <32 x half> %a0 + %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %neg1, <32 x half> %a1, <32 x half> %neg) + %3 = bitcast i32 %mask to <32 x i1> + %4 = select <32 x i1> %3, <32 x half> %2, <32 x half> %a0 + ret <32 x half> %4 +} + +define <32 x half> @stack_fold_fnmsub213ph_mask(<32 x half>* %p, <32 x half> %a1, <32 x half> %a2, i32 %mask) { + ;CHECK-LABEL: stack_fold_fnmsub213ph_mask: + ;CHECK: vfnmsub213ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <32 x half>, <32 x half>* %p + %neg = fneg <32 x half> %a2 + %neg1 = fneg <32 x half> %a1 + %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %neg1, <32 x half> %a0, <32 x half> %neg) + %3 = bitcast i32 %mask to <32 x i1> + %4 = select <32 x i1> %3, <32 x half> %2, <32 x half> %a0 + ret <32 x half> %4 +} + +define <32 x half> @stack_fold_fnmsub231ph_mask(<32 x half>* %p, <32 x half> %a1, <32 x half> %a2, i32 %mask) { + ;CHECK-LABEL: stack_fold_fnmsub231ph_mask: + ;CHECK: vfnmsub231ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <32 x half>, <32 x half>* %p + %neg = fneg <32 x half> %a0 + %neg1 = fneg <32 x half> %a1 + %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %neg1, <32 x half> %a2, <32 x half> %neg) + %3 = bitcast i32 %mask to <32 x i1> + %4 = select <32 x i1> %3, <32 x half> %2, <32 x half> %a0 + ret <32 x half> %4 +} + +define <32 x half> @stack_fold_fnmsub321ph_mask(<32 x half>* %p, <32 x half> %a1, <32 x half> %a2, i32 %mask) { + ;CHECK-LABEL: stack_fold_fnmsub321ph_mask: + ;CHECK: vfnmsub231ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <32 x half>, <32 x half>* %p + %neg = fneg <32 x half> %a0 + %neg1 = fneg <32 x half> %a2 + %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %neg1, <32 x half> %a1, <32 x half> %neg) + %3 = bitcast i32 %mask to <32 x i1> + %4 = select <32 x i1> %3, <32 x half> %2, <32 x half> %a0 + ret <32 x half> %4 +} + +define <32 x half> @stack_fold_fnmsub132ph_mask(<32 x half>* %p, <32 x half> %a1, <32 x half> %a2, i32 %mask) { + ;CHECK-LABEL: stack_fold_fnmsub132ph_mask: + ;CHECK: vfnmsub132ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <32 x half>, <32 x half>* %p + %neg = fneg <32 x half> %a1 + %neg1 = fneg <32 x half> %a0 + %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %neg1, <32 x half> %a2, <32 x half> %neg) + %3 = bitcast i32 %mask to <32 x i1> + %4 = select <32 x i1> %3, <32 x half> %2, <32 x half> %a0 + ret <32 x half> %4 +} + +define <32 x half> @stack_fold_fnmsub312ph_mask(<32 x half>* %p, <32 x half> %a1, <32 x half> %a2, i32 %mask) { + ;CHECK-LABEL: stack_fold_fnmsub312ph_mask: + ;CHECK: vfnmsub132ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <32 x half>, <32 x half>* %p + %neg = fneg <32 x half> %a1 + %neg1 = fneg <32 x half> %a2 + %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %neg1, <32 x half> %a0, <32 x half> %neg) + %3 = bitcast i32 %mask to <32 x i1> + %4 = select <32 x i1> %3, <32 x half> %2, <32 x half> %a0 + ret <32 x half> %4 +} + +define <32 x half> @stack_fold_fnmsub123ph_maskz(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32* %mask) { + ;CHECK-LABEL: stack_fold_fnmsub123ph_maskz: + ;CHECK: vfnmsub213ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %neg = fneg <32 x half> %a2 + %neg1 = fneg <32 x half> %a0 + %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %neg1, <32 x half> %a1, <32 x half> %neg) + %3 = load i32, i32* %mask + %4 = bitcast i32 %3 to <32 x i1> + %5 = select <32 x i1> %4, <32 x half> %2, <32 x half> zeroinitializer + ret <32 x half> %5 +} + +define <32 x half> @stack_fold_fnmsub213ph_maskz(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32* %mask) { + ;CHECK-LABEL: stack_fold_fnmsub213ph_maskz: + ;CHECK: vfnmsub213ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %neg = fneg <32 x half> %a2 + %neg1 = fneg <32 x half> %a1 + %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %neg1, <32 x half> %a0, <32 x half> %neg) + %3 = load i32, i32* %mask + %4 = bitcast i32 %3 to <32 x i1> + %5 = select <32 x i1> %4, <32 x half> %2, <32 x half> zeroinitializer + ret <32 x half> %5 +} + +define <32 x half> @stack_fold_fnmsub231ph_maskz(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32* %mask) { + ;CHECK-LABEL: stack_fold_fnmsub231ph_maskz: + ;CHECK: vfnmsub231ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %neg = fneg <32 x half> %a0 + %neg1 = fneg <32 x half> %a1 + %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %neg1, <32 x half> %a2, <32 x half> %neg) + %3 = load i32, i32* %mask + %4 = bitcast i32 %3 to <32 x i1> + %5 = select <32 x i1> %4, <32 x half> %2, <32 x half> zeroinitializer + ret <32 x half> %5 +} + +define <32 x half> @stack_fold_fnmsub321ph_maskz(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32* %mask) { + ;CHECK-LABEL: stack_fold_fnmsub321ph_maskz: + ;CHECK: vfnmsub231ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %neg = fneg <32 x half> %a0 + %neg1 = fneg <32 x half> %a2 + %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %neg1, <32 x half> %a1, <32 x half> %neg) + %3 = load i32, i32* %mask + %4 = bitcast i32 %3 to <32 x i1> + %5 = select <32 x i1> %4, <32 x half> %2, <32 x half> zeroinitializer + ret <32 x half> %5 +} + +define <32 x half> @stack_fold_fnmsub132ph_maskz(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32* %mask) { + ;CHECK-LABEL: stack_fold_fnmsub132ph_maskz: + ;CHECK: vfnmsub132ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %neg = fneg <32 x half> %a1 + %neg1 = fneg <32 x half> %a0 + %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %neg1, <32 x half> %a2, <32 x half> %neg) + %3 = load i32, i32* %mask + %4 = bitcast i32 %3 to <32 x i1> + %5 = select <32 x i1> %4, <32 x half> %2, <32 x half> zeroinitializer + ret <32 x half> %5 +} + +define <32 x half> @stack_fold_fnmsub312ph_maskz(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32* %mask) { + ;CHECK-LABEL: stack_fold_fnmsub312ph_maskz: + ;CHECK: vfnmsub132ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %neg = fneg <32 x half> %a1 + %neg1 = fneg <32 x half> %a2 + %2 = call <32 x half> @llvm.fma.v32f16(<32 x half> %neg1, <32 x half> %a0, <32 x half> %neg) + %3 = load i32, i32* %mask + %4 = bitcast i32 %3 to <32 x i1> + %5 = select <32 x i1> %4, <32 x half> %2, <32 x half> zeroinitializer + ret <32 x half> %5 +} + +define half @stack_fold_fmadd123sh(half %a0, half %a1, half %a2) { + ;CHECK-LABEL: stack_fold_fmadd123sh: + ;CHECK: vfmadd213sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 2-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = call half @llvm.fma.f16(half %a0, half %a1, half %a2) + ret half %2 +} +declare half @llvm.fma.f16(half, half, half) + +define half @stack_fold_fmadd213sh(half %a0, half %a1, half %a2) { + ;CHECK-LABEL: stack_fold_fmadd213sh: + ;CHECK: vfmadd213sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 2-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = call half @llvm.fma.f16(half %a1, half %a0, half %a2) + ret half %2 +} + +define half @stack_fold_fmadd231sh(half %a0, half %a1, half %a2) { + ;CHECK-LABEL: stack_fold_fmadd231sh: + ;CHECK: vfmadd231sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 2-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = call half @llvm.fma.f16(half %a1, half %a2, half %a0) + ret half %2 +} + +define half @stack_fold_fmadd321sh(half %a0, half %a1, half %a2) { + ;CHECK-LABEL: stack_fold_fmadd321sh: + ;CHECK: vfmadd231sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 2-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = call half @llvm.fma.f16(half %a2, half %a1, half %a0) + ret half %2 +} + +define half @stack_fold_fmadd132sh(half %a0, half %a1, half %a2) { + ;CHECK-LABEL: stack_fold_fmadd132sh: + ;CHECK: vfmadd132sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 2-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = call half @llvm.fma.f16(half %a0, half %a2, half %a1) + ret half %2 +} + +define half @stack_fold_fmadd312sh(half %a0, half %a1, half %a2) { + ;CHECK-LABEL: stack_fold_fmadd312sh: + ;CHECK: vfmadd132sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 2-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = call half @llvm.fma.f16(half %a2, half %a0, half %a1) + ret half %2 +} + +define half @stack_fold_fmsub123sh(half %a0, half %a1, half %a2) { + ;CHECK-LABEL: stack_fold_fmsub123sh: + ;CHECK: vfmsub213sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 2-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg half %a2 + %3 = call half @llvm.fma.f16(half %a0, half %a1, half %2) + ret half %3 +} + +define half @stack_fold_fmsub213sh(half %a0, half %a1, half %a2) { + ;CHECK-LABEL: stack_fold_fmsub213sh: + ;CHECK: vfmsub213sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 2-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg half %a2 + %3 = call half @llvm.fma.f16(half %a1, half %a0, half %2) + ret half %3 +} + +define half @stack_fold_fmsub231sh(half %a0, half %a1, half %a2) { + ;CHECK-LABEL: stack_fold_fmsub231sh: + ;CHECK: vfmsub231sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 2-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg half %a0 + %3 = call half @llvm.fma.f16(half %a1, half %a2, half %2) + ret half %3 +} + +define half @stack_fold_fmsub321sh(half %a0, half %a1, half %a2) { + ;CHECK-LABEL: stack_fold_fmsub321sh: + ;CHECK: vfmsub231sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 2-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg half %a0 + %3 = call half @llvm.fma.f16(half %a2, half %a1, half %2) + ret half %3 +} + +define half @stack_fold_fmsub132sh(half %a0, half %a1, half %a2) { + ;CHECK-LABEL: stack_fold_fmsub132sh: + ;CHECK: vfmsub132sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 2-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg half %a1 + %3 = call half @llvm.fma.f16(half %a0, half %a2, half %2) + ret half %3 +} + +define half @stack_fold_fmsub312sh(half %a0, half %a1, half %a2) { + ;CHECK-LABEL: stack_fold_fmsub312sh: + ;CHECK: vfmsub132sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 2-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg half %a1 + %3 = call half @llvm.fma.f16(half %a2, half %a0, half %2) + ret half %3 +} + +define half @stack_fold_fnmadd123sh(half %a0, half %a1, half %a2) { + ;CHECK-LABEL: stack_fold_fnmadd123sh: + ;CHECK: vfnmadd213sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 2-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg half %a0 + %3 = call half @llvm.fma.f16(half %2, half %a1, half %a2) + ret half %3 +} + +define half @stack_fold_fnmadd213sh(half %a0, half %a1, half %a2) { + ;CHECK-LABEL: stack_fold_fnmadd213sh: + ;CHECK: vfnmadd213sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 2-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg half %a1 + %3 = call half @llvm.fma.f16(half %2, half %a0, half %a2) + ret half %3 +} + +define half @stack_fold_fnmadd231sh(half %a0, half %a1, half %a2) { + ;CHECK-LABEL: stack_fold_fnmadd231sh: + ;CHECK: vfnmadd231sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 2-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg half %a1 + %3 = call half @llvm.fma.f16(half %2, half %a2, half %a0) + ret half %3 +} + +define half @stack_fold_fnmadd321sh(half %a0, half %a1, half %a2) { + ;CHECK-LABEL: stack_fold_fnmadd321sh: + ;CHECK: vfnmadd231sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 2-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg half %a2 + %3 = call half @llvm.fma.f16(half %2, half %a1, half %a0) + ret half %3 +} + +define half @stack_fold_fnmadd132sh(half %a0, half %a1, half %a2) { + ;CHECK-LABEL: stack_fold_fnmadd132sh: + ;CHECK: vfnmadd132sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 2-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg half %a0 + %3 = call half @llvm.fma.f16(half %2, half %a2, half %a1) + ret half %3 +} + +define half @stack_fold_fnmadd312sh(half %a0, half %a1, half %a2) { + ;CHECK-LABEL: stack_fold_fnmadd312sh: + ;CHECK: vfnmadd132sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 2-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg half %a2 + %3 = call half @llvm.fma.f16(half %2, half %a0, half %a1) + ret half %3 +} + +define half @stack_fold_fnmsub123sh(half %a0, half %a1, half %a2) { + ;CHECK-LABEL: stack_fold_fnmsub123sh: + ;CHECK: vfnmsub213sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 2-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg half %a0 + %3 = fneg half %a2 + %4 = call half @llvm.fma.f16(half %2, half %a1, half %3) + ret half %4 +} + +define half @stack_fold_fnmsub213sh(half %a0, half %a1, half %a2) { + ;CHECK-LABEL: stack_fold_fnmsub213sh: + ;CHECK: vfnmsub213sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 2-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg half %a1 + %3 = fneg half %a2 + %4 = call half @llvm.fma.f16(half %2, half %a0, half %3) + ret half %4 +} + +define half @stack_fold_fnmsub231sh(half %a0, half %a1, half %a2) { + ;CHECK-LABEL: stack_fold_fnmsub231sh: + ;CHECK: vfnmsub231sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 2-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg half %a1 + %3 = fneg half %a0 + %4 = call half @llvm.fma.f16(half %2, half %a2, half %3) + ret half %4 +} + +define half @stack_fold_fnmsub321sh(half %a0, half %a1, half %a2) { + ;CHECK-LABEL: stack_fold_fnmsub321sh: + ;CHECK: vfnmsub231sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 2-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg half %a2 + %3 = fneg half %a0 + %4 = call half @llvm.fma.f16(half %2, half %a1, half %3) + ret half %4 +} + +define half @stack_fold_fnmsub132sh(half %a0, half %a1, half %a2) { + ;CHECK-LABEL: stack_fold_fnmsub132sh: + ;CHECK: vfnmsub132sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 2-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg half %a0 + %3 = fneg half %a1 + %4 = call half @llvm.fma.f16(half %2, half %a2, half %3) + ret half %4 +} + +define half @stack_fold_fnmsub312sh(half %a0, half %a1, half %a2) { + ;CHECK-LABEL: stack_fold_fnmsub312sh: + ;CHECK: vfnmsub132sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 2-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg half %a2 + %3 = fneg half %a1 + %4 = call half @llvm.fma.f16(half %2, half %a0, half %3) + ret half %4 +} + +define <8 x half> @stack_fold_fmadd123sh_int(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v) { + ;CHECK-LABEL: stack_fold_fmadd123sh_int: + ;CHECK: vfmadd213sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %2 = call half @llvm.fma.f16(half %a0, half %a1, half %a2) + %res = insertelement <8 x half> %a0v, half %2, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fmadd213sh_int(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v) { + ;CHECK-LABEL: stack_fold_fmadd213sh_int: + ;CHECK: vfmadd213sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %2 = call half @llvm.fma.f16(half %a1, half %a0, half %a2) + %res = insertelement <8 x half> %a0v, half %2, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fmadd231sh_int(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v) { + ;CHECK-LABEL: stack_fold_fmadd231sh_int: + ;CHECK: vfmadd231sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %2 = call half @llvm.fma.f16(half %a1, half %a2, half %a0) + %res = insertelement <8 x half> %a0v, half %2, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fmadd321sh_int(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v) { + ;CHECK-LABEL: stack_fold_fmadd321sh_int: + ;CHECK: vfmadd231sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %2 = call half @llvm.fma.f16(half %a2, half %a1, half %a0) + %res = insertelement <8 x half> %a0v, half %2, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fmadd132sh_int(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v) { + ;CHECK-LABEL: stack_fold_fmadd132sh_int: + ;CHECK: vfmadd132sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %2 = call half @llvm.fma.f16(half %a0, half %a2, half %a1) + %res = insertelement <8 x half> %a0v, half %2, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fmadd312sh_int(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v) { + ;CHECK-LABEL: stack_fold_fmadd312sh_int: + ;CHECK: vfmadd132sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %2 = call half @llvm.fma.f16(half %a2, half %a0, half %a1) + %res = insertelement <8 x half> %a0v, half %2, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fmsub123sh_int(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v) { + ;CHECK-LABEL: stack_fold_fmsub123sh_int: + ;CHECK: vfmsub213sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %neg = fneg half %a2 + %2 = call half @llvm.fma.f16(half %a0, half %a1, half %neg) + %res = insertelement <8 x half> %a0v, half %2, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fmsub213sh_int(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v) { + ;CHECK-LABEL: stack_fold_fmsub213sh_int: + ;CHECK: vfmsub213sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %neg = fneg half %a2 + %2 = call half @llvm.fma.f16(half %a1, half %a0, half %neg) + %res = insertelement <8 x half> %a0v, half %2, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fmsub231sh_int(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v) { + ;CHECK-LABEL: stack_fold_fmsub231sh_int: + ;CHECK: vfmsub231sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %neg = fneg half %a0 + %2 = call half @llvm.fma.f16(half %a1, half %a2, half %neg) + %res = insertelement <8 x half> %a0v, half %2, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fmsub321sh_int(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v) { + ;CHECK-LABEL: stack_fold_fmsub321sh_int: + ;CHECK: vfmsub231sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %neg = fneg half %a0 + %2 = call half @llvm.fma.f16(half %a2, half %a1, half %neg) + %res = insertelement <8 x half> %a0v, half %2, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fmsub132sh_int(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v) { + ;CHECK-LABEL: stack_fold_fmsub132sh_int: + ;CHECK: vfmsub132sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %neg = fneg half %a1 + %2 = call half @llvm.fma.f16(half %a0, half %a2, half %neg) + %res = insertelement <8 x half> %a0v, half %2, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fmsub312sh_int(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v) { + ;CHECK-LABEL: stack_fold_fmsub312sh_int: + ;CHECK: vfmsub132sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %neg = fneg half %a1 + %2 = call half @llvm.fma.f16(half %a2, half %a0, half %neg) + %res = insertelement <8 x half> %a0v, half %2, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fnmadd123sh_int(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v) { + ;CHECK-LABEL: stack_fold_fnmadd123sh_int: + ;CHECK: vfnmadd213sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %neg1 = fneg half %a0 + %2 = call half @llvm.fma.f16(half %neg1, half %a1, half %a2) + %res = insertelement <8 x half> %a0v, half %2, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fnmadd213sh_int(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v) { + ;CHECK-LABEL: stack_fold_fnmadd213sh_int: + ;CHECK: vfnmadd213sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %neg1 = fneg half %a1 + %2 = call half @llvm.fma.f16(half %neg1, half %a0, half %a2) + %res = insertelement <8 x half> %a0v, half %2, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fnmadd231sh_int(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v) { + ;CHECK-LABEL: stack_fold_fnmadd231sh_int: + ;CHECK: vfnmadd231sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %neg1 = fneg half %a1 + %2 = call half @llvm.fma.f16(half %neg1, half %a2, half %a0) + %res = insertelement <8 x half> %a0v, half %2, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fnmadd321sh_int(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v) { + ;CHECK-LABEL: stack_fold_fnmadd321sh_int: + ;CHECK: vfnmadd231sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %neg1 = fneg half %a2 + %2 = call half @llvm.fma.f16(half %neg1, half %a1, half %a0) + %res = insertelement <8 x half> %a0v, half %2, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fnmadd132sh_int(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v) { + ;CHECK-LABEL: stack_fold_fnmadd132sh_int: + ;CHECK: vfnmadd132sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %neg1 = fneg half %a0 + %2 = call half @llvm.fma.f16(half %neg1, half %a2, half %a1) + %res = insertelement <8 x half> %a0v, half %2, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fnmadd312sh_int(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v) { + ;CHECK-LABEL: stack_fold_fnmadd312sh_int: + ;CHECK: vfnmadd132sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %neg1 = fneg half %a2 + %2 = call half @llvm.fma.f16(half %neg1, half %a0, half %a1) + %res = insertelement <8 x half> %a0v, half %2, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fnmsub123sh_int(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v) { + ;CHECK-LABEL: stack_fold_fnmsub123sh_int: + ;CHECK: vfnmsub213sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %neg = fneg half %a2 + %neg1 = fneg half %a0 + %2 = call half @llvm.fma.f16(half %neg1, half %a1, half %neg) + %res = insertelement <8 x half> %a0v, half %2, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fnmsub213sh_int(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v) { + ;CHECK-LABEL: stack_fold_fnmsub213sh_int: + ;CHECK: vfnmsub213sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %neg = fneg half %a2 + %neg1 = fneg half %a1 + %2 = call half @llvm.fma.f16(half %neg1, half %a0, half %neg) + %res = insertelement <8 x half> %a0v, half %2, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fnmsub231sh_int(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v) { + ;CHECK-LABEL: stack_fold_fnmsub231sh_int: + ;CHECK: vfnmsub231sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %neg = fneg half %a0 + %neg1 = fneg half %a1 + %2 = call half @llvm.fma.f16(half %neg1, half %a2, half %neg) + %res = insertelement <8 x half> %a0v, half %2, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fnmsub321sh_int(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v) { + ;CHECK-LABEL: stack_fold_fnmsub321sh_int: + ;CHECK: vfnmsub231sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %neg = fneg half %a0 + %neg1 = fneg half %a2 + %2 = call half @llvm.fma.f16(half %neg1, half %a1, half %neg) + %res = insertelement <8 x half> %a0v, half %2, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fnmsub132sh_int(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v) { + ;CHECK-LABEL: stack_fold_fnmsub132sh_int: + ;CHECK: vfnmsub132sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %neg = fneg half %a1 + %neg1 = fneg half %a0 + %2 = call half @llvm.fma.f16(half %neg1, half %a2, half %neg) + %res = insertelement <8 x half> %a0v, half %2, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fnmsub312sh_int(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v) { + ;CHECK-LABEL: stack_fold_fnmsub312sh_int: + ;CHECK: vfnmsub132sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %neg = fneg half %a1 + %neg1 = fneg half %a2 + %2 = call half @llvm.fma.f16(half %neg1, half %a0, half %neg) + %res = insertelement <8 x half> %a0v, half %2, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fmadd123sh_intk(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v, i8* %mask) { + ;CHECK-LABEL: stack_fold_fmadd123sh_intk: + ;CHECK: vfmadd213sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %2 = call half @llvm.fma.f16(half %a0, half %a1, half %a2) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, half %2, half %a0 + %res = insertelement <8 x half> %a0v, half %6, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fmadd213sh_intk(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v, i8* %mask) { + ;CHECK-LABEL: stack_fold_fmadd213sh_intk: + ;CHECK: vfmadd213sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %2 = call half @llvm.fma.f16(half %a1, half %a0, half %a2) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, half %2, half %a0 + %res = insertelement <8 x half> %a0v, half %6, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fmadd231sh_intk(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v, i8* %mask) { + ;CHECK-LABEL: stack_fold_fmadd231sh_intk: + ;CHECK: vfmadd231sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %2 = call half @llvm.fma.f16(half %a1, half %a2, half %a0) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, half %2, half %a0 + %res = insertelement <8 x half> %a0v, half %6, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fmadd321sh_intk(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v, i8* %mask) { + ;CHECK-LABEL: stack_fold_fmadd321sh_intk: + ;CHECK: vfmadd231sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %2 = call half @llvm.fma.f16(half %a2, half %a1, half %a0) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, half %2, half %a0 + %res = insertelement <8 x half> %a0v, half %6, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fmadd132sh_intk(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v, i8* %mask) { + ;CHECK-LABEL: stack_fold_fmadd132sh_intk: + ;CHECK: vfmadd132sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %2 = call half @llvm.fma.f16(half %a0, half %a2, half %a1) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, half %2, half %a0 + %res = insertelement <8 x half> %a0v, half %6, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fmadd312sh_intk(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v, i8* %mask) { + ;CHECK-LABEL: stack_fold_fmadd312sh_intk: + ;CHECK: vfmadd132sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %2 = call half @llvm.fma.f16(half %a2, half %a0, half %a1) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, half %2, half %a0 + %res = insertelement <8 x half> %a0v, half %6, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fmsub123sh_intk(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v, i8* %mask) { + ;CHECK-LABEL: stack_fold_fmsub123sh_intk: + ;CHECK: vfmsub213sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %neg = fneg half %a2 + %2 = call half @llvm.fma.f16(half %a0, half %a1, half %neg) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, half %2, half %a0 + %res = insertelement <8 x half> %a0v, half %6, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fmsub213sh_intk(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v, i8* %mask) { + ;CHECK-LABEL: stack_fold_fmsub213sh_intk: + ;CHECK: vfmsub213sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %neg = fneg half %a2 + %2 = call half @llvm.fma.f16(half %a1, half %a0, half %neg) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, half %2, half %a0 + %res = insertelement <8 x half> %a0v, half %6, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fmsub231sh_intk(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v, i8* %mask) { + ;CHECK-LABEL: stack_fold_fmsub231sh_intk: + ;CHECK: vfmsub231sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %neg = fneg half %a0 + %2 = call half @llvm.fma.f16(half %a1, half %a2, half %neg) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, half %2, half %a0 + %res = insertelement <8 x half> %a0v, half %6, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fmsub321sh_intk(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v, i8* %mask) { + ;CHECK-LABEL: stack_fold_fmsub321sh_intk: + ;CHECK: vfmsub231sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %neg = fneg half %a0 + %2 = call half @llvm.fma.f16(half %a2, half %a1, half %neg) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, half %2, half %a0 + %res = insertelement <8 x half> %a0v, half %6, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fmsub132sh_intk(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v, i8* %mask) { + ;CHECK-LABEL: stack_fold_fmsub132sh_intk: + ;CHECK: vfmsub132sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %neg = fneg half %a1 + %2 = call half @llvm.fma.f16(half %a0, half %a2, half %neg) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, half %2, half %a0 + %res = insertelement <8 x half> %a0v, half %6, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fmsub312sh_intk(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v, i8* %mask) { + ;CHECK-LABEL: stack_fold_fmsub312sh_intk: + ;CHECK: vfmsub132sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %neg = fneg half %a1 + %2 = call half @llvm.fma.f16(half %a2, half %a0, half %neg) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, half %2, half %a0 + %res = insertelement <8 x half> %a0v, half %6, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fnmadd123sh_intk(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v, i8* %mask) { + ;CHECK-LABEL: stack_fold_fnmadd123sh_intk: + ;CHECK: vfnmadd213sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %neg1 = fneg half %a0 + %2 = call half @llvm.fma.f16(half %neg1, half %a1, half %a2) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, half %2, half %a0 + %res = insertelement <8 x half> %a0v, half %6, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fnmadd213sh_intk(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v, i8* %mask) { + ;CHECK-LABEL: stack_fold_fnmadd213sh_intk: + ;CHECK: vfnmadd213sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %neg1 = fneg half %a1 + %2 = call half @llvm.fma.f16(half %neg1, half %a0, half %a2) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, half %2, half %a0 + %res = insertelement <8 x half> %a0v, half %6, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fnmadd231sh_intk(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v, i8* %mask) { + ;CHECK-LABEL: stack_fold_fnmadd231sh_intk: + ;CHECK: vfnmadd231sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %neg1 = fneg half %a1 + %2 = call half @llvm.fma.f16(half %neg1, half %a2, half %a0) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, half %2, half %a0 + %res = insertelement <8 x half> %a0v, half %6, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fnmadd321sh_intk(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v, i8* %mask) { + ;CHECK-LABEL: stack_fold_fnmadd321sh_intk: + ;CHECK: vfnmadd231sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %neg1 = fneg half %a2 + %2 = call half @llvm.fma.f16(half %neg1, half %a1, half %a0) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, half %2, half %a0 + %res = insertelement <8 x half> %a0v, half %6, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fnmadd132sh_intk(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v, i8* %mask) { + ;CHECK-LABEL: stack_fold_fnmadd132sh_intk: + ;CHECK: vfnmadd132sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %neg1 = fneg half %a0 + %2 = call half @llvm.fma.f16(half %neg1, half %a2, half %a1) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, half %2, half %a0 + %res = insertelement <8 x half> %a0v, half %6, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fnmadd312sh_intk(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v, i8* %mask) { + ;CHECK-LABEL: stack_fold_fnmadd312sh_intk: + ;CHECK: vfnmadd132sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %neg1 = fneg half %a2 + %2 = call half @llvm.fma.f16(half %neg1, half %a0, half %a1) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, half %2, half %a0 + %res = insertelement <8 x half> %a0v, half %6, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fnmsub123sh_intk(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v, i8* %mask) { + ;CHECK-LABEL: stack_fold_fnmsub123sh_intk: + ;CHECK: vfnmsub213sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %neg = fneg half %a2 + %neg1 = fneg half %a0 + %2 = call half @llvm.fma.f16(half %neg1, half %a1, half %neg) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, half %2, half %a0 + %res = insertelement <8 x half> %a0v, half %6, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fnmsub213sh_intk(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v, i8* %mask) { + ;CHECK-LABEL: stack_fold_fnmsub213sh_intk: + ;CHECK: vfnmsub213sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %neg = fneg half %a2 + %neg1 = fneg half %a1 + %2 = call half @llvm.fma.f16(half %neg1, half %a0, half %neg) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, half %2, half %a0 + %res = insertelement <8 x half> %a0v, half %6, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fnmsub231sh_intk(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v, i8* %mask) { + ;CHECK-LABEL: stack_fold_fnmsub231sh_intk: + ;CHECK: vfnmsub231sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %neg = fneg half %a0 + %neg1 = fneg half %a1 + %2 = call half @llvm.fma.f16(half %neg1, half %a2, half %neg) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, half %2, half %a0 + %res = insertelement <8 x half> %a0v, half %6, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fnmsub321sh_intk(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v, i8* %mask) { + ;CHECK-LABEL: stack_fold_fnmsub321sh_intk: + ;CHECK: vfnmsub231sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %neg = fneg half %a0 + %neg1 = fneg half %a2 + %2 = call half @llvm.fma.f16(half %neg1, half %a1, half %neg) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, half %2, half %a0 + %res = insertelement <8 x half> %a0v, half %6, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fnmsub132sh_intk(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v, i8* %mask) { + ;CHECK-LABEL: stack_fold_fnmsub132sh_intk: + ;CHECK: vfnmsub132sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %neg = fneg half %a1 + %neg1 = fneg half %a0 + %2 = call half @llvm.fma.f16(half %neg1, half %a2, half %neg) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, half %2, half %a0 + %res = insertelement <8 x half> %a0v, half %6, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fnmsub312sh_intk(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v, i8* %mask) { + ;CHECK-LABEL: stack_fold_fnmsub312sh_intk: + ;CHECK: vfnmsub132sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %neg = fneg half %a1 + %neg1 = fneg half %a2 + %2 = call half @llvm.fma.f16(half %neg1, half %a0, half %neg) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, half %2, half %a0 + %res = insertelement <8 x half> %a0v, half %6, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fmadd123sh_intkz(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v, i8* %mask) { + ;CHECK-LABEL: stack_fold_fmadd123sh_intkz: + ;CHECK: vfmadd213sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %2 = call half @llvm.fma.f16(half %a0, half %a1, half %a2) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, half %2, half zeroinitializer + %res = insertelement <8 x half> %a0v, half %6, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fmadd213sh_intkz(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v, i8* %mask) { + ;CHECK-LABEL: stack_fold_fmadd213sh_intkz: + ;CHECK: vfmadd213sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %2 = call half @llvm.fma.f16(half %a1, half %a0, half %a2) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, half %2, half zeroinitializer + %res = insertelement <8 x half> %a0v, half %6, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fmadd231sh_intkz(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v, i8* %mask) { + ;CHECK-LABEL: stack_fold_fmadd231sh_intkz: + ;CHECK: vfmadd231sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %2 = call half @llvm.fma.f16(half %a1, half %a2, half %a0) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, half %2, half zeroinitializer + %res = insertelement <8 x half> %a0v, half %6, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fmadd321sh_intkz(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v, i8* %mask) { + ;CHECK-LABEL: stack_fold_fmadd321sh_intkz: + ;CHECK: vfmadd231sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %2 = call half @llvm.fma.f16(half %a2, half %a1, half %a0) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, half %2, half zeroinitializer + %res = insertelement <8 x half> %a0v, half %6, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fmadd132sh_intkz(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v, i8* %mask) { + ;CHECK-LABEL: stack_fold_fmadd132sh_intkz: + ;CHECK: vfmadd132sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %2 = call half @llvm.fma.f16(half %a0, half %a2, half %a1) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, half %2, half zeroinitializer + %res = insertelement <8 x half> %a0v, half %6, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fmadd312sh_intkz(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v, i8* %mask) { + ;CHECK-LABEL: stack_fold_fmadd312sh_intkz: + ;CHECK: vfmadd132sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %2 = call half @llvm.fma.f16(half %a2, half %a0, half %a1) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, half %2, half zeroinitializer + %res = insertelement <8 x half> %a0v, half %6, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fmsub123sh_intkz(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v, i8* %mask) { + ;CHECK-LABEL: stack_fold_fmsub123sh_intkz: + ;CHECK: vfmsub213sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %neg = fneg half %a2 + %2 = call half @llvm.fma.f16(half %a0, half %a1, half %neg) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, half %2, half zeroinitializer + %res = insertelement <8 x half> %a0v, half %6, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fmsub213sh_intkz(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v, i8* %mask) { + ;CHECK-LABEL: stack_fold_fmsub213sh_intkz: + ;CHECK: vfmsub213sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %neg = fneg half %a2 + %2 = call half @llvm.fma.f16(half %a1, half %a0, half %neg) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, half %2, half zeroinitializer + %res = insertelement <8 x half> %a0v, half %6, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fmsub231sh_intkz(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v, i8* %mask) { + ;CHECK-LABEL: stack_fold_fmsub231sh_intkz: + ;CHECK: vfmsub231sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %neg = fneg half %a0 + %2 = call half @llvm.fma.f16(half %a1, half %a2, half %neg) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, half %2, half zeroinitializer + %res = insertelement <8 x half> %a0v, half %6, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fmsub321sh_intkz(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v, i8* %mask) { + ;CHECK-LABEL: stack_fold_fmsub321sh_intkz: + ;CHECK: vfmsub231sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %neg = fneg half %a0 + %2 = call half @llvm.fma.f16(half %a2, half %a1, half %neg) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, half %2, half zeroinitializer + %res = insertelement <8 x half> %a0v, half %6, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fmsub132sh_intkz(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v, i8* %mask) { + ;CHECK-LABEL: stack_fold_fmsub132sh_intkz: + ;CHECK: vfmsub132sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %neg = fneg half %a1 + %2 = call half @llvm.fma.f16(half %a0, half %a2, half %neg) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, half %2, half zeroinitializer + %res = insertelement <8 x half> %a0v, half %6, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fmsub312sh_intkz(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v, i8* %mask) { + ;CHECK-LABEL: stack_fold_fmsub312sh_intkz: + ;CHECK: vfmsub132sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %neg = fneg half %a1 + %2 = call half @llvm.fma.f16(half %a2, half %a0, half %neg) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, half %2, half zeroinitializer + %res = insertelement <8 x half> %a0v, half %6, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fnmadd123sh_intkz(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v, i8* %mask) { + ;CHECK-LABEL: stack_fold_fnmadd123sh_intkz: + ;CHECK: vfnmadd213sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %neg1 = fneg half %a0 + %2 = call half @llvm.fma.f16(half %neg1, half %a1, half %a2) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, half %2, half zeroinitializer + %res = insertelement <8 x half> %a0v, half %6, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fnmadd213sh_intkz(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v, i8* %mask) { + ;CHECK-LABEL: stack_fold_fnmadd213sh_intkz: + ;CHECK: vfnmadd213sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %neg1 = fneg half %a1 + %2 = call half @llvm.fma.f16(half %neg1, half %a0, half %a2) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, half %2, half zeroinitializer + %res = insertelement <8 x half> %a0v, half %6, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fnmadd231sh_intkz(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v, i8* %mask) { + ;CHECK-LABEL: stack_fold_fnmadd231sh_intkz: + ;CHECK: vfnmadd231sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %neg1 = fneg half %a1 + %2 = call half @llvm.fma.f16(half %neg1, half %a2, half %a0) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, half %2, half zeroinitializer + %res = insertelement <8 x half> %a0v, half %6, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fnmadd321sh_intkz(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v, i8* %mask) { + ;CHECK-LABEL: stack_fold_fnmadd321sh_intkz: + ;CHECK: vfnmadd231sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %neg1 = fneg half %a2 + %2 = call half @llvm.fma.f16(half %neg1, half %a1, half %a0) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, half %2, half zeroinitializer + %res = insertelement <8 x half> %a0v, half %6, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fnmadd132sh_intkz(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v, i8* %mask) { + ;CHECK-LABEL: stack_fold_fnmadd132sh_intkz: + ;CHECK: vfnmadd132sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %neg1 = fneg half %a0 + %2 = call half @llvm.fma.f16(half %neg1, half %a2, half %a1) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, half %2, half zeroinitializer + %res = insertelement <8 x half> %a0v, half %6, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fnmadd312sh_intkz(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v, i8* %mask) { + ;CHECK-LABEL: stack_fold_fnmadd312sh_intkz: + ;CHECK: vfnmadd132sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %neg1 = fneg half %a2 + %2 = call half @llvm.fma.f16(half %neg1, half %a0, half %a1) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, half %2, half zeroinitializer + %res = insertelement <8 x half> %a0v, half %6, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fnmsub123sh_intkz(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v, i8* %mask) { + ;CHECK-LABEL: stack_fold_fnmsub123sh_intkz: + ;CHECK: vfnmsub213sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %neg = fneg half %a2 + %neg1 = fneg half %a0 + %2 = call half @llvm.fma.f16(half %neg1, half %a1, half %neg) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, half %2, half zeroinitializer + %res = insertelement <8 x half> %a0v, half %6, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fnmsub213sh_intkz(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v, i8* %mask) { + ;CHECK-LABEL: stack_fold_fnmsub213sh_intkz: + ;CHECK: vfnmsub213sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %neg = fneg half %a2 + %neg1 = fneg half %a1 + %2 = call half @llvm.fma.f16(half %neg1, half %a0, half %neg) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, half %2, half zeroinitializer + %res = insertelement <8 x half> %a0v, half %6, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fnmsub231sh_intkz(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v, i8* %mask) { + ;CHECK-LABEL: stack_fold_fnmsub231sh_intkz: + ;CHECK: vfnmsub231sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %neg = fneg half %a0 + %neg1 = fneg half %a1 + %2 = call half @llvm.fma.f16(half %neg1, half %a2, half %neg) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, half %2, half zeroinitializer + %res = insertelement <8 x half> %a0v, half %6, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fnmsub321sh_intkz(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v, i8* %mask) { + ;CHECK-LABEL: stack_fold_fnmsub321sh_intkz: + ;CHECK: vfnmsub231sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %neg = fneg half %a0 + %neg1 = fneg half %a2 + %2 = call half @llvm.fma.f16(half %neg1, half %a1, half %neg) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, half %2, half zeroinitializer + %res = insertelement <8 x half> %a0v, half %6, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fnmsub132sh_intkz(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v, i8* %mask) { + ;CHECK-LABEL: stack_fold_fnmsub132sh_intkz: + ;CHECK: vfnmsub132sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %neg = fneg half %a1 + %neg1 = fneg half %a0 + %2 = call half @llvm.fma.f16(half %neg1, half %a2, half %neg) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, half %2, half zeroinitializer + %res = insertelement <8 x half> %a0v, half %6, i64 0 + ret <8 x half> %res +} + +define <8 x half> @stack_fold_fnmsub312sh_intkz(<8 x half> %a0v, <8 x half> %a1v, <8 x half> %a2v, i8* %mask) { + ;CHECK-LABEL: stack_fold_fnmsub312sh_intkz: + ;CHECK: vfnmsub132sh {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = extractelement <8 x half> %a0v, i64 0 + %a1 = extractelement <8 x half> %a1v, i64 0 + %a2 = extractelement <8 x half> %a2v, i64 0 + %neg = fneg half %a1 + %neg1 = fneg half %a2 + %2 = call half @llvm.fma.f16(half %neg1, half %a0, half %neg) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, half %2, half zeroinitializer + %res = insertelement <8 x half> %a0v, half %6, i64 0 + ret <8 x half> %res +} + +define <32 x half> @stack_fold_fmaddsub123ph(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2) { + ;CHECK-LABEL: stack_fold_fmaddsub123ph: + ;CHECK: vfmaddsub213ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32 4) + ret <32 x half> %2 +} +declare <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half>, <32 x half>, <32 x half>, i32) + +define <32 x half> @stack_fold_fmaddsub213ph(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2) { + ;CHECK-LABEL: stack_fold_fmaddsub213ph: + ;CHECK: vfmaddsub213ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %a1, <32 x half> %a0, <32 x half> %a2, i32 4) + ret <32 x half> %2 +} + +define <32 x half> @stack_fold_fmaddsub231ph(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2) { + ;CHECK-LABEL: stack_fold_fmaddsub231ph: + ;CHECK: vfmaddsub231ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %a1, <32 x half> %a2, <32 x half> %a0, i32 4) + ret <32 x half> %2 +} + +define <32 x half> @stack_fold_fmaddsub321ph(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2) { + ;CHECK-LABEL: stack_fold_fmaddsub321ph: + ;CHECK: vfmaddsub231ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %a2, <32 x half> %a1, <32 x half> %a0, i32 4) + ret <32 x half> %2 +} + +define <32 x half> @stack_fold_fmaddsub132ph(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2) { + ;CHECK-LABEL: stack_fold_fmaddsub132ph: + ;CHECK: vfmaddsub132ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %a0, <32 x half> %a2, <32 x half> %a1, i32 4) + ret <32 x half> %2 +} + +define <32 x half> @stack_fold_fmaddsub312ph(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2) { + ;CHECK-LABEL: stack_fold_fmaddsub312ph: + ;CHECK: vfmaddsub132ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %a2, <32 x half> %a0, <32 x half> %a1, i32 4) + ret <32 x half> %2 +} + +define <32 x half> @stack_fold_fmaddsub123ph_mask(<32 x half>* %p, <32 x half> %a1, <32 x half> %a2, i32 %mask) { + ;CHECK-LABEL: stack_fold_fmaddsub123ph_mask: + ;CHECK: vfmaddsub213ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <32 x half>, <32 x half>* %p + %2 = call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32 4) + %3 = bitcast i32 %mask to <32 x i1> + %4 = select <32 x i1> %3, <32 x half> %2, <32 x half> %a0 + ret <32 x half> %4 +} + +define <32 x half> @stack_fold_fmaddsub213ph_mask(<32 x half>* %p, <32 x half> %a1, <32 x half> %a2, i32 %mask) { + ;CHECK-LABEL: stack_fold_fmaddsub213ph_mask: + ;CHECK: vfmaddsub213ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <32 x half>, <32 x half>* %p + %2 = call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %a1, <32 x half> %a0, <32 x half> %a2, i32 4) + %3 = bitcast i32 %mask to <32 x i1> + %4 = select <32 x i1> %3, <32 x half> %2, <32 x half> %a0 + ret <32 x half> %4 +} + +define <32 x half> @stack_fold_fmaddsub231ph_mask(<32 x half>* %p, <32 x half> %a1, <32 x half> %a2, i32 %mask) { + ;CHECK-LABEL: stack_fold_fmaddsub231ph_mask: + ;CHECK: vfmaddsub231ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <32 x half>, <32 x half>* %p + %2 = call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %a1, <32 x half> %a2, <32 x half> %a0, i32 4) + %3 = bitcast i32 %mask to <32 x i1> + %4 = select <32 x i1> %3, <32 x half> %2, <32 x half> %a0 + ret <32 x half> %4 +} + +define <32 x half> @stack_fold_fmaddsub321ph_mask(<32 x half>* %p, <32 x half> %a1, <32 x half> %a2, i32 %mask) { + ;CHECK-LABEL: stack_fold_fmaddsub321ph_mask: + ;CHECK: vfmaddsub231ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <32 x half>, <32 x half>* %p + %2 = call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %a2, <32 x half> %a1, <32 x half> %a0, i32 4) + %3 = bitcast i32 %mask to <32 x i1> + %4 = select <32 x i1> %3, <32 x half> %2, <32 x half> %a0 + ret <32 x half> %4 +} + +define <32 x half> @stack_fold_fmaddsub132ph_mask(<32 x half>* %p, <32 x half> %a1, <32 x half> %a2, i32 %mask) { + ;CHECK-LABEL: stack_fold_fmaddsub132ph_mask: + ;CHECK: vfmaddsub132ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <32 x half>, <32 x half>* %p + %2 = call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %a0, <32 x half> %a2, <32 x half> %a1, i32 4) + %3 = bitcast i32 %mask to <32 x i1> + %4 = select <32 x i1> %3, <32 x half> %2, <32 x half> %a0 + ret <32 x half> %4 +} + +define <32 x half> @stack_fold_fmaddsub312ph_mask(<32 x half>* %p, <32 x half> %a1, <32 x half> %a2, i32 %mask) { + ;CHECK-LABEL: stack_fold_fmaddsub312ph_mask: + ;CHECK: vfmaddsub132ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <32 x half>, <32 x half>* %p + %2 = call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %a2, <32 x half> %a0, <32 x half> %a1, i32 4) + %3 = bitcast i32 %mask to <32 x i1> + %4 = select <32 x i1> %3, <32 x half> %2, <32 x half> %a0 + ret <32 x half> %4 +} + +define <32 x half> @stack_fold_fmaddsub123ph_maskz(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32* %mask) { + ;CHECK-LABEL: stack_fold_fmaddsub123ph_maskz: + ;CHECK: vfmaddsub213ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32 4) + %3 = load i32, i32* %mask + %4 = bitcast i32 %3 to <32 x i1> + %5 = select <32 x i1> %4, <32 x half> %2, <32 x half> zeroinitializer + ret <32 x half> %5 +} + +define <32 x half> @stack_fold_fmaddsub213ph_maskz(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32* %mask) { + ;CHECK-LABEL: stack_fold_fmaddsub213ph_maskz: + ;CHECK: vfmaddsub213ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %a1, <32 x half> %a0, <32 x half> %a2, i32 4) + %3 = load i32, i32* %mask + %4 = bitcast i32 %3 to <32 x i1> + %5 = select <32 x i1> %4, <32 x half> %2, <32 x half> zeroinitializer + ret <32 x half> %5 +} + +define <32 x half> @stack_fold_fmaddsub231ph_maskz(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32* %mask) { + ;CHECK-LABEL: stack_fold_fmaddsub231ph_maskz: + ;CHECK: vfmaddsub231ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %a1, <32 x half> %a2, <32 x half> %a0, i32 4) + %3 = load i32, i32* %mask + %4 = bitcast i32 %3 to <32 x i1> + %5 = select <32 x i1> %4, <32 x half> %2, <32 x half> zeroinitializer + ret <32 x half> %5 +} + +define <32 x half> @stack_fold_fmaddsub321ph_maskz(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32* %mask) { + ;CHECK-LABEL: stack_fold_fmaddsub321ph_maskz: + ;CHECK: vfmaddsub231ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %a2, <32 x half> %a1, <32 x half> %a0, i32 4) + %3 = load i32, i32* %mask + %4 = bitcast i32 %3 to <32 x i1> + %5 = select <32 x i1> %4, <32 x half> %2, <32 x half> zeroinitializer + ret <32 x half> %5 +} + +define <32 x half> @stack_fold_fmaddsub132ph_maskz(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32* %mask) { + ;CHECK-LABEL: stack_fold_fmaddsub132ph_maskz: + ;CHECK: vfmaddsub132ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %a0, <32 x half> %a2, <32 x half> %a1, i32 4) + %3 = load i32, i32* %mask + %4 = bitcast i32 %3 to <32 x i1> + %5 = select <32 x i1> %4, <32 x half> %2, <32 x half> zeroinitializer + ret <32 x half> %5 +} + +define <32 x half> @stack_fold_fmaddsub312ph_maskz(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32* %mask) { + ;CHECK-LABEL: stack_fold_fmaddsub312ph_maskz: + ;CHECK: vfmaddsub132ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %a2, <32 x half> %a0, <32 x half> %a1, i32 4) + %3 = load i32, i32* %mask + %4 = bitcast i32 %3 to <32 x i1> + %5 = select <32 x i1> %4, <32 x half> %2, <32 x half> zeroinitializer + ret <32 x half> %5 +} + +define <32 x half> @stack_fold_fmsubadd123ph(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2) { + ;CHECK-LABEL: stack_fold_fmsubadd123ph: + ;CHECK: vfmsubadd213ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg <32 x half> %a2 + %3 = call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %a0, <32 x half> %a1, <32 x half> %2, i32 4) + ret <32 x half> %3 +} + +define <32 x half> @stack_fold_fmsubadd213ph(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2) { + ;CHECK-LABEL: stack_fold_fmsubadd213ph: + ;CHECK: vfmsubadd213ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg <32 x half> %a2 + %3 = call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %a1, <32 x half> %a0, <32 x half> %2, i32 4) + ret <32 x half> %3 +} + +define <32 x half> @stack_fold_fmsubadd231ph(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2) { + ;CHECK-LABEL: stack_fold_fmsubadd231ph: + ;CHECK: vfmsubadd231ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg <32 x half> %a0 + %3 = call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %a1, <32 x half> %a2, <32 x half> %2, i32 4) + ret <32 x half> %3 +} + +define <32 x half> @stack_fold_fmsubadd321ph(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2) { + ;CHECK-LABEL: stack_fold_fmsubadd321ph: + ;CHECK: vfmsubadd231ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg <32 x half> %a0 + %3 = call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %a2, <32 x half> %a1, <32 x half> %2, i32 4) + ret <32 x half> %3 +} + +define <32 x half> @stack_fold_fmsubadd132ph(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2) { + ;CHECK-LABEL: stack_fold_fmsubadd132ph: + ;CHECK: vfmsubadd132ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg <32 x half> %a1 + %3 = call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %a0, <32 x half> %a2, <32 x half> %2, i32 4) + ret <32 x half> %3 +} + +define <32 x half> @stack_fold_fmsubadd312ph(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2) { + ;CHECK-LABEL: stack_fold_fmsubadd312ph: + ;CHECK: vfmsubadd132ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg <32 x half> %a1 + %3 = call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %a2, <32 x half> %a0, <32 x half> %2, i32 4) + ret <32 x half> %3 +} + +define <32 x half> @stack_fold_fmsubadd123ph_mask(<32 x half>* %p, <32 x half> %a1, <32 x half> %a2, i32 %mask) { + ;CHECK-LABEL: stack_fold_fmsubadd123ph_mask: + ;CHECK: vfmsubadd213ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <32 x half>, <32 x half>* %p + %neg = fneg <32 x half> %a2 + %2 = call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %a0, <32 x half> %a1, <32 x half> %neg, i32 4) + %3 = bitcast i32 %mask to <32 x i1> + %4 = select <32 x i1> %3, <32 x half> %2, <32 x half> %a0 + ret <32 x half> %4 +} + +define <32 x half> @stack_fold_fmsubadd213ph_mask(<32 x half>* %p, <32 x half> %a1, <32 x half> %a2, i32 %mask) { + ;CHECK-LABEL: stack_fold_fmsubadd213ph_mask: + ;CHECK: vfmsubadd213ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <32 x half>, <32 x half>* %p + %neg = fneg <32 x half> %a2 + %2 = call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %a1, <32 x half> %a0, <32 x half> %neg, i32 4) + %3 = bitcast i32 %mask to <32 x i1> + %4 = select <32 x i1> %3, <32 x half> %2, <32 x half> %a0 + ret <32 x half> %4 +} + +define <32 x half> @stack_fold_fmsubadd231ph_mask(<32 x half>* %p, <32 x half> %a1, <32 x half> %a2, i32 %mask) { + ;CHECK-LABEL: stack_fold_fmsubadd231ph_mask: + ;CHECK: vfmsubadd231ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <32 x half>, <32 x half>* %p + %neg = fneg <32 x half> %a0 + %2 = call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %a1, <32 x half> %a2, <32 x half> %neg, i32 4) + %3 = bitcast i32 %mask to <32 x i1> + %4 = select <32 x i1> %3, <32 x half> %2, <32 x half> %a0 + ret <32 x half> %4 +} + +define <32 x half> @stack_fold_fmsubadd321ph_mask(<32 x half>* %p, <32 x half> %a1, <32 x half> %a2, i32 %mask) { + ;CHECK-LABEL: stack_fold_fmsubadd321ph_mask: + ;CHECK: vfmsubadd231ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <32 x half>, <32 x half>* %p + %neg = fneg <32 x half> %a0 + %2 = call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %a2, <32 x half> %a1, <32 x half> %neg, i32 4) + %3 = bitcast i32 %mask to <32 x i1> + %4 = select <32 x i1> %3, <32 x half> %2, <32 x half> %a0 + ret <32 x half> %4 +} + +define <32 x half> @stack_fold_fmsubadd132ph_mask(<32 x half>* %p, <32 x half> %a1, <32 x half> %a2, i32 %mask) { + ;CHECK-LABEL: stack_fold_fmsubadd132ph_mask: + ;CHECK: vfmsubadd132ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <32 x half>, <32 x half>* %p + %neg = fneg <32 x half> %a1 + %2 = call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %a0, <32 x half> %a2, <32 x half> %neg, i32 4) + %3 = bitcast i32 %mask to <32 x i1> + %4 = select <32 x i1> %3, <32 x half> %2, <32 x half> %a0 + ret <32 x half> %4 +} + +define <32 x half> @stack_fold_fmsubadd312ph_mask(<32 x half>* %p, <32 x half> %a1, <32 x half> %a2, i32 %mask) { + ;CHECK-LABEL: stack_fold_fmsubadd312ph_mask: + ;CHECK: vfmsubadd132ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <32 x half>, <32 x half>* %p + %neg = fneg <32 x half> %a1 + %2 = call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %a2, <32 x half> %a0, <32 x half> %neg, i32 4) + %3 = bitcast i32 %mask to <32 x i1> + %4 = select <32 x i1> %3, <32 x half> %2, <32 x half> %a0 + ret <32 x half> %4 +} + +define <32 x half> @stack_fold_fmsubadd123ph_maskz(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32* %mask) { + ;CHECK-LABEL: stack_fold_fmsubadd123ph_maskz: + ;CHECK: vfmsubadd213ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %neg = fneg <32 x half> %a2 + %2 = call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %a0, <32 x half> %a1, <32 x half> %neg, i32 4) + %3 = load i32, i32* %mask + %4 = bitcast i32 %3 to <32 x i1> + %5 = select <32 x i1> %4, <32 x half> %2, <32 x half> zeroinitializer + ret <32 x half> %5 +} + +define <32 x half> @stack_fold_fmsubadd213ph_maskz(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32* %mask) { + ;CHECK-LABEL: stack_fold_fmsubadd213ph_maskz: + ;CHECK: vfmsubadd213ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %neg = fneg <32 x half> %a2 + %2 = call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %a1, <32 x half> %a0, <32 x half> %neg, i32 4) + %3 = load i32, i32* %mask + %4 = bitcast i32 %3 to <32 x i1> + %5 = select <32 x i1> %4, <32 x half> %2, <32 x half> zeroinitializer + ret <32 x half> %5 +} + +define <32 x half> @stack_fold_fmsubadd231ph_maskz(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32* %mask) { + ;CHECK-LABEL: stack_fold_fmsubadd231ph_maskz: + ;CHECK: vfmsubadd231ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %neg = fneg <32 x half> %a0 + %2 = call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %a1, <32 x half> %a2, <32 x half> %neg, i32 4) + %3 = load i32, i32* %mask + %4 = bitcast i32 %3 to <32 x i1> + %5 = select <32 x i1> %4, <32 x half> %2, <32 x half> zeroinitializer + ret <32 x half> %5 +} + +define <32 x half> @stack_fold_fmsubadd321ph_maskz(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32* %mask) { + ;CHECK-LABEL: stack_fold_fmsubadd321ph_maskz: + ;CHECK: vfmsubadd231ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %neg = fneg <32 x half> %a0 + %2 = call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %a2, <32 x half> %a1, <32 x half> %neg, i32 4) + %3 = load i32, i32* %mask + %4 = bitcast i32 %3 to <32 x i1> + %5 = select <32 x i1> %4, <32 x half> %2, <32 x half> zeroinitializer + ret <32 x half> %5 +} + +define <32 x half> @stack_fold_fmsubadd132ph_maskz(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32* %mask) { + ;CHECK-LABEL: stack_fold_fmsubadd132ph_maskz: + ;CHECK: vfmsubadd132ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %neg = fneg <32 x half> %a1 + %2 = call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %a0, <32 x half> %a2, <32 x half> %neg, i32 4) + %3 = load i32, i32* %mask + %4 = bitcast i32 %3 to <32 x i1> + %5 = select <32 x i1> %4, <32 x half> %2, <32 x half> zeroinitializer + ret <32 x half> %5 +} + +define <32 x half> @stack_fold_fmsubadd312ph_maskz(<32 x half> %a0, <32 x half> %a1, <32 x half> %a2, i32* %mask) { + ;CHECK-LABEL: stack_fold_fmsubadd312ph_maskz: + ;CHECK: vfmsubadd132ph {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 64-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %neg = fneg <32 x half> %a1 + %2 = call <32 x half> @llvm.x86.avx512fp16.vfmaddsub.ph.512(<32 x half> %a2, <32 x half> %a0, <32 x half> %neg, i32 4) + %3 = load i32, i32* %mask + %4 = bitcast i32 %3 to <32 x i1> + %5 = select <32 x i1> %4, <32 x half> %2, <32 x half> zeroinitializer + ret <32 x half> %5 +} diff --git a/llvm/test/CodeGen/X86/stack-folding-fp-avx512fp16vl-fma.ll b/llvm/test/CodeGen/X86/stack-folding-fp-avx512fp16vl-fma.ll new file mode 100644 index 0000000000000..0415d47009d5a --- /dev/null +++ b/llvm/test/CodeGen/X86/stack-folding-fp-avx512fp16vl-fma.ll @@ -0,0 +1,1595 @@ +; RUN: llc -O3 -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+avx512fp16,+avx512vl < %s | FileCheck %s + +target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-unknown" + +; Stack reload folding tests. +; +; By including a nop call with sideeffects we can force a partial register spill of the +; relevant registers and check that the reload is correctly folded into the instruction. + +define <8 x half> @stack_fold_fmadd123ph(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2) { + ;CHECK-LABEL: stack_fold_fmadd123ph: + ;CHECK: vfmadd213ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = call <8 x half> @llvm.fma.v8f16(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2) + ret <8 x half> %2 +} +declare <8 x half> @llvm.fma.v8f16(<8 x half>, <8 x half>, <8 x half>) + +define <8 x half> @stack_fold_fmadd213ph(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2) { + ;CHECK-LABEL: stack_fold_fmadd213ph: + ;CHECK: vfmadd213ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = call <8 x half> @llvm.fma.v8f16(<8 x half> %a1, <8 x half> %a0, <8 x half> %a2) + ret <8 x half> %2 +} + +define <8 x half> @stack_fold_fmadd231ph(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2) { + ;CHECK-LABEL: stack_fold_fmadd231ph: + ;CHECK: vfmadd231ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = call <8 x half> @llvm.fma.v8f16(<8 x half> %a1, <8 x half> %a2, <8 x half> %a0) + ret <8 x half> %2 +} + +define <8 x half> @stack_fold_fmadd321ph(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2) { + ;CHECK-LABEL: stack_fold_fmadd321ph: + ;CHECK: vfmadd231ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = call <8 x half> @llvm.fma.v8f16(<8 x half> %a2, <8 x half> %a1, <8 x half> %a0) + ret <8 x half> %2 +} + +define <8 x half> @stack_fold_fmadd132ph(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2) { + ;CHECK-LABEL: stack_fold_fmadd132ph: + ;CHECK: vfmadd132ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = call <8 x half> @llvm.fma.v8f16(<8 x half> %a0, <8 x half> %a2, <8 x half> %a1) + ret <8 x half> %2 +} + +define <8 x half> @stack_fold_fmadd312ph(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2) { + ;CHECK-LABEL: stack_fold_fmadd312ph: + ;CHECK: vfmadd132ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = call <8 x half> @llvm.fma.v8f16(<8 x half> %a2, <8 x half> %a0, <8 x half> %a1) + ret <8 x half> %2 +} + +define <8 x half> @stack_fold_fmadd123ph_mask(<8 x half>* %p, <8 x half> %a1, <8 x half> %a2, i8 %mask) { + ;CHECK-LABEL: stack_fold_fmadd123ph_mask: + ;CHECK: vfmadd213ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <8 x half>, <8 x half>* %p + %2 = call <8 x half> @llvm.fma.v8f16(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2) + %3 = bitcast i8 %mask to <8 x i1> + %4 = select <8 x i1> %3, <8 x half> %2, <8 x half> %a0 + ret <8 x half> %4 +} + +define <8 x half> @stack_fold_fmadd213ph_mask(<8 x half>* %p, <8 x half> %a1, <8 x half> %a2, i8 %mask) { + ;CHECK-LABEL: stack_fold_fmadd213ph_mask: + ;CHECK: vfmadd213ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <8 x half>, <8 x half>* %p + %2 = call <8 x half> @llvm.fma.v8f16(<8 x half> %a1, <8 x half> %a0, <8 x half> %a2) + %3 = bitcast i8 %mask to <8 x i1> + %4 = select <8 x i1> %3, <8 x half> %2, <8 x half> %a0 + ret <8 x half> %4 +} + +define <8 x half> @stack_fold_fmadd231ph_mask(<8 x half>* %p, <8 x half> %a1, <8 x half> %a2, i8 %mask) { + ;CHECK-LABEL: stack_fold_fmadd231ph_mask: + ;CHECK: vfmadd231ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <8 x half>, <8 x half>* %p + %2 = call <8 x half> @llvm.fma.v8f16(<8 x half> %a1, <8 x half> %a2, <8 x half> %a0) + %3 = bitcast i8 %mask to <8 x i1> + %4 = select <8 x i1> %3, <8 x half> %2, <8 x half> %a0 + ret <8 x half> %4 +} + +define <8 x half> @stack_fold_fmadd321ph_mask(<8 x half>* %p, <8 x half> %a1, <8 x half> %a2, i8 %mask) { + ;CHECK-LABEL: stack_fold_fmadd321ph_mask: + ;CHECK: vfmadd231ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <8 x half>, <8 x half>* %p + %2 = call <8 x half> @llvm.fma.v8f16(<8 x half> %a2, <8 x half> %a1, <8 x half> %a0) + %3 = bitcast i8 %mask to <8 x i1> + %4 = select <8 x i1> %3, <8 x half> %2, <8 x half> %a0 + ret <8 x half> %4 +} + +define <8 x half> @stack_fold_fmadd132ph_mask(<8 x half>* %p, <8 x half> %a1, <8 x half> %a2, i8 %mask) { + ;CHECK-LABEL: stack_fold_fmadd132ph_mask: + ;CHECK: vfmadd132ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <8 x half>, <8 x half>* %p + %2 = call <8 x half> @llvm.fma.v8f16(<8 x half> %a0, <8 x half> %a2, <8 x half> %a1) + %3 = bitcast i8 %mask to <8 x i1> + %4 = select <8 x i1> %3, <8 x half> %2, <8 x half> %a0 + ret <8 x half> %4 +} + +define <8 x half> @stack_fold_fmadd312ph_mask(<8 x half>* %p, <8 x half> %a1, <8 x half> %a2, i8 %mask) { + ;CHECK-LABEL: stack_fold_fmadd312ph_mask: + ;CHECK: vfmadd132ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <8 x half>, <8 x half>* %p + %2 = call <8 x half> @llvm.fma.v8f16(<8 x half> %a2, <8 x half> %a0, <8 x half> %a1) + %3 = bitcast i8 %mask to <8 x i1> + %4 = select <8 x i1> %3, <8 x half> %2, <8 x half> %a0 + ret <8 x half> %4 +} + +define <8 x half> @stack_fold_fmadd123ph_maskz(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2, i8* %mask) { + ;CHECK-LABEL: stack_fold_fmadd123ph_maskz: + ;CHECK: vfmadd213ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = call <8 x half> @llvm.fma.v8f16(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = select <8 x i1> %4, <8 x half> %2, <8 x half> zeroinitializer + ret <8 x half> %5 +} + +define <8 x half> @stack_fold_fmadd213ph_maskz(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2, i8* %mask) { + ;CHECK-LABEL: stack_fold_fmadd213ph_maskz: + ;CHECK: vfmadd213ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = call <8 x half> @llvm.fma.v8f16(<8 x half> %a1, <8 x half> %a0, <8 x half> %a2) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = select <8 x i1> %4, <8 x half> %2, <8 x half> zeroinitializer + ret <8 x half> %5 +} + +define <8 x half> @stack_fold_fmadd231ph_maskz(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2, i8* %mask) { + ;CHECK-LABEL: stack_fold_fmadd231ph_maskz: + ;CHECK: vfmadd231ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = call <8 x half> @llvm.fma.v8f16(<8 x half> %a1, <8 x half> %a2, <8 x half> %a0) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = select <8 x i1> %4, <8 x half> %2, <8 x half> zeroinitializer + ret <8 x half> %5 +} + +define <8 x half> @stack_fold_fmadd321ph_maskz(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2, i8* %mask) { + ;CHECK-LABEL: stack_fold_fmadd321ph_maskz: + ;CHECK: vfmadd231ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = call <8 x half> @llvm.fma.v8f16(<8 x half> %a2, <8 x half> %a1, <8 x half> %a0) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = select <8 x i1> %4, <8 x half> %2, <8 x half> zeroinitializer + ret <8 x half> %5 +} + +define <8 x half> @stack_fold_fmadd132ph_maskz(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2, i8* %mask) { + ;CHECK-LABEL: stack_fold_fmadd132ph_maskz: + ;CHECK: vfmadd132ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = call <8 x half> @llvm.fma.v8f16(<8 x half> %a0, <8 x half> %a2, <8 x half> %a1) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = select <8 x i1> %4, <8 x half> %2, <8 x half> zeroinitializer + ret <8 x half> %5 +} + +define <8 x half> @stack_fold_fmadd312ph_maskz(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2, i8* %mask) { + ;CHECK-LABEL: stack_fold_fmadd312ph_maskz: + ;CHECK: vfmadd132ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = call <8 x half> @llvm.fma.v8f16(<8 x half> %a2, <8 x half> %a0, <8 x half> %a1) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = select <8 x i1> %4, <8 x half> %2, <8 x half> zeroinitializer + ret <8 x half> %5 +} + +define <8 x half> @stack_fold_fmsub123ph(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2) { + ;CHECK-LABEL: stack_fold_fmsub123ph: + ;CHECK: vfmsub213ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg <8 x half> %a2 + %3 = call <8 x half> @llvm.fma.v8f16(<8 x half> %a0, <8 x half> %a1, <8 x half> %2) + ret <8 x half> %3 +} + +define <8 x half> @stack_fold_fmsub213ph(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2) { + ;CHECK-LABEL: stack_fold_fmsub213ph: + ;CHECK: vfmsub213ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg <8 x half> %a2 + %3 = call <8 x half> @llvm.fma.v8f16(<8 x half> %a1, <8 x half> %a0, <8 x half> %2) + ret <8 x half> %3 +} + +define <8 x half> @stack_fold_fmsub231ph(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2) { + ;CHECK-LABEL: stack_fold_fmsub231ph: + ;CHECK: vfmsub231ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg <8 x half> %a0 + %3 = call <8 x half> @llvm.fma.v8f16(<8 x half> %a1, <8 x half> %a2, <8 x half> %2) + ret <8 x half> %3 +} + +define <8 x half> @stack_fold_fmsub321ph(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2) { + ;CHECK-LABEL: stack_fold_fmsub321ph: + ;CHECK: vfmsub231ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg <8 x half> %a0 + %3 = call <8 x half> @llvm.fma.v8f16(<8 x half> %a2, <8 x half> %a1, <8 x half> %2) + ret <8 x half> %3 +} + +define <8 x half> @stack_fold_fmsub132ph(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2) { + ;CHECK-LABEL: stack_fold_fmsub132ph: + ;CHECK: vfmsub132ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg <8 x half> %a1 + %3 = call <8 x half> @llvm.fma.v8f16(<8 x half> %a0, <8 x half> %a2, <8 x half> %2) + ret <8 x half> %3 +} + +define <8 x half> @stack_fold_fmsub312ph(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2) { + ;CHECK-LABEL: stack_fold_fmsub312ph: + ;CHECK: vfmsub132ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg <8 x half> %a1 + %3 = call <8 x half> @llvm.fma.v8f16(<8 x half> %a2, <8 x half> %a0, <8 x half> %2) + ret <8 x half> %3 +} + +define <8 x half> @stack_fold_fmsub123ph_mask(<8 x half>* %p, <8 x half> %a1, <8 x half> %a2, i8 %mask) { + ;CHECK-LABEL: stack_fold_fmsub123ph_mask: + ;CHECK: vfmsub213ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <8 x half>, <8 x half>* %p + %neg = fneg <8 x half> %a2 + %2 = call <8 x half> @llvm.fma.v8f16(<8 x half> %a0, <8 x half> %a1, <8 x half> %neg) + %3 = bitcast i8 %mask to <8 x i1> + %4 = select <8 x i1> %3, <8 x half> %2, <8 x half> %a0 + ret <8 x half> %4 +} + +define <8 x half> @stack_fold_fmsub213ph_mask(<8 x half>* %p, <8 x half> %a1, <8 x half> %a2, i8 %mask) { + ;CHECK-LABEL: stack_fold_fmsub213ph_mask: + ;CHECK: vfmsub213ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <8 x half>, <8 x half>* %p + %neg = fneg <8 x half> %a2 + %2 = call <8 x half> @llvm.fma.v8f16(<8 x half> %a1, <8 x half> %a0, <8 x half> %neg) + %3 = bitcast i8 %mask to <8 x i1> + %4 = select <8 x i1> %3, <8 x half> %2, <8 x half> %a0 + ret <8 x half> %4 +} + +define <8 x half> @stack_fold_fmsub231ph_mask(<8 x half>* %p, <8 x half> %a1, <8 x half> %a2, i8 %mask) { + ;CHECK-LABEL: stack_fold_fmsub231ph_mask: + ;CHECK: vfmsub231ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <8 x half>, <8 x half>* %p + %neg = fneg <8 x half> %a0 + %2 = call <8 x half> @llvm.fma.v8f16(<8 x half> %a1, <8 x half> %a2, <8 x half> %neg) + %3 = bitcast i8 %mask to <8 x i1> + %4 = select <8 x i1> %3, <8 x half> %2, <8 x half> %a0 + ret <8 x half> %4 +} + +define <8 x half> @stack_fold_fmsub321ph_mask(<8 x half>* %p, <8 x half> %a1, <8 x half> %a2, i8 %mask) { + ;CHECK-LABEL: stack_fold_fmsub321ph_mask: + ;CHECK: vfmsub231ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <8 x half>, <8 x half>* %p + %neg = fneg <8 x half> %a0 + %2 = call <8 x half> @llvm.fma.v8f16(<8 x half> %a2, <8 x half> %a1, <8 x half> %neg) + %3 = bitcast i8 %mask to <8 x i1> + %4 = select <8 x i1> %3, <8 x half> %2, <8 x half> %a0 + ret <8 x half> %4 +} + +define <8 x half> @stack_fold_fmsub132ph_mask(<8 x half>* %p, <8 x half> %a1, <8 x half> %a2, i8 %mask) { + ;CHECK-LABEL: stack_fold_fmsub132ph_mask: + ;CHECK: vfmsub132ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <8 x half>, <8 x half>* %p + %neg = fneg <8 x half> %a1 + %2 = call <8 x half> @llvm.fma.v8f16(<8 x half> %a0, <8 x half> %a2, <8 x half> %neg) + %3 = bitcast i8 %mask to <8 x i1> + %4 = select <8 x i1> %3, <8 x half> %2, <8 x half> %a0 + ret <8 x half> %4 +} + +define <8 x half> @stack_fold_fmsub312ph_mask(<8 x half>* %p, <8 x half> %a1, <8 x half> %a2, i8 %mask) { + ;CHECK-LABEL: stack_fold_fmsub312ph_mask: + ;CHECK: vfmsub132ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <8 x half>, <8 x half>* %p + %neg = fneg <8 x half> %a1 + %2 = call <8 x half> @llvm.fma.v8f16(<8 x half> %a2, <8 x half> %a0, <8 x half> %neg) + %3 = bitcast i8 %mask to <8 x i1> + %4 = select <8 x i1> %3, <8 x half> %2, <8 x half> %a0 + ret <8 x half> %4 +} + +define <8 x half> @stack_fold_fmsub123ph_maskz(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2, i8* %mask) { + ;CHECK-LABEL: stack_fold_fmsub123ph_maskz: + ;CHECK: vfmsub213ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %neg = fneg <8 x half> %a2 + %2 = call <8 x half> @llvm.fma.v8f16(<8 x half> %a0, <8 x half> %a1, <8 x half> %neg) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = select <8 x i1> %4, <8 x half> %2, <8 x half> zeroinitializer + ret <8 x half> %5 +} + +define <8 x half> @stack_fold_fmsub213ph_maskz(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2, i8* %mask) { + ;CHECK-LABEL: stack_fold_fmsub213ph_maskz: + ;CHECK: vfmsub213ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %neg = fneg <8 x half> %a2 + %2 = call <8 x half> @llvm.fma.v8f16(<8 x half> %a1, <8 x half> %a0, <8 x half> %neg) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = select <8 x i1> %4, <8 x half> %2, <8 x half> zeroinitializer + ret <8 x half> %5 +} + +define <8 x half> @stack_fold_fmsub231ph_maskz(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2, i8* %mask) { + ;CHECK-LABEL: stack_fold_fmsub231ph_maskz: + ;CHECK: vfmsub231ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %neg = fneg <8 x half> %a0 + %2 = call <8 x half> @llvm.fma.v8f16(<8 x half> %a1, <8 x half> %a2, <8 x half> %neg) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = select <8 x i1> %4, <8 x half> %2, <8 x half> zeroinitializer + ret <8 x half> %5 +} + +define <8 x half> @stack_fold_fmsub321ph_maskz(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2, i8* %mask) { + ;CHECK-LABEL: stack_fold_fmsub321ph_maskz: + ;CHECK: vfmsub231ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %neg = fneg <8 x half> %a0 + %2 = call <8 x half> @llvm.fma.v8f16(<8 x half> %a2, <8 x half> %a1, <8 x half> %neg) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = select <8 x i1> %4, <8 x half> %2, <8 x half> zeroinitializer + ret <8 x half> %5 +} + +define <8 x half> @stack_fold_fmsub132ph_maskz(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2, i8* %mask) { + ;CHECK-LABEL: stack_fold_fmsub132ph_maskz: + ;CHECK: vfmsub132ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %neg = fneg <8 x half> %a1 + %2 = call <8 x half> @llvm.fma.v8f16(<8 x half> %a0, <8 x half> %a2, <8 x half> %neg) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = select <8 x i1> %4, <8 x half> %2, <8 x half> zeroinitializer + ret <8 x half> %5 +} + +define <8 x half> @stack_fold_fmsub312ph_maskz(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2, i8* %mask) { + ;CHECK-LABEL: stack_fold_fmsub312ph_maskz: + ;CHECK: vfmsub132ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %neg = fneg <8 x half> %a1 + %2 = call <8 x half> @llvm.fma.v8f16(<8 x half> %a2, <8 x half> %a0, <8 x half> %neg) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = select <8 x i1> %4, <8 x half> %2, <8 x half> zeroinitializer + ret <8 x half> %5 +} + +define <8 x half> @stack_fold_fnmadd123ph(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2) { + ;CHECK-LABEL: stack_fold_fnmadd123ph: + ;CHECK: vfnmadd213ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg <8 x half> %a0 + %3 = call <8 x half> @llvm.fma.v8f16(<8 x half> %2, <8 x half> %a1, <8 x half> %a2) + ret <8 x half> %3 +} + +define <8 x half> @stack_fold_fnmadd213ph(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2) { + ;CHECK-LABEL: stack_fold_fnmadd213ph: + ;CHECK: vfnmadd213ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg <8 x half> %a1 + %3 = call <8 x half> @llvm.fma.v8f16(<8 x half> %2, <8 x half> %a0, <8 x half> %a2) + ret <8 x half> %3 +} + +define <8 x half> @stack_fold_fnmadd231ph(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2) { + ;CHECK-LABEL: stack_fold_fnmadd231ph: + ;CHECK: vfnmadd231ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg <8 x half> %a1 + %3 = call <8 x half> @llvm.fma.v8f16(<8 x half> %2, <8 x half> %a2, <8 x half> %a0) + ret <8 x half> %3 +} + +define <8 x half> @stack_fold_fnmadd321ph(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2) { + ;CHECK-LABEL: stack_fold_fnmadd321ph: + ;CHECK: vfnmadd231ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg <8 x half> %a2 + %3 = call <8 x half> @llvm.fma.v8f16(<8 x half> %2, <8 x half> %a1, <8 x half> %a0) + ret <8 x half> %3 +} + +define <8 x half> @stack_fold_fnmadd132ph(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2) { + ;CHECK-LABEL: stack_fold_fnmadd132ph: + ;CHECK: vfnmadd132ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg <8 x half> %a0 + %3 = call <8 x half> @llvm.fma.v8f16(<8 x half> %2, <8 x half> %a2, <8 x half> %a1) + ret <8 x half> %3 +} + +define <8 x half> @stack_fold_fnmadd312ph(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2) { + ;CHECK-LABEL: stack_fold_fnmadd312ph: + ;CHECK: vfnmadd132ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg <8 x half> %a2 + %3 = call <8 x half> @llvm.fma.v8f16(<8 x half> %2, <8 x half> %a0, <8 x half> %a1) + ret <8 x half> %3 +} + +define <8 x half> @stack_fold_fnmadd123ph_mask(<8 x half>* %p, <8 x half> %a1, <8 x half> %a2, i8 %mask) { + ;CHECK-LABEL: stack_fold_fnmadd123ph_mask: + ;CHECK: vfnmadd213ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <8 x half>, <8 x half>* %p + %neg = fneg <8 x half> %a0 + %2 = call <8 x half> @llvm.fma.v8f16(<8 x half> %neg, <8 x half> %a1, <8 x half> %a2) + %3 = bitcast i8 %mask to <8 x i1> + %4 = select <8 x i1> %3, <8 x half> %2, <8 x half> %a0 + ret <8 x half> %4 +} + +define <8 x half> @stack_fold_fnmadd213ph_mask(<8 x half>* %p, <8 x half> %a1, <8 x half> %a2, i8 %mask) { + ;CHECK-LABEL: stack_fold_fnmadd213ph_mask: + ;CHECK: vfnmadd213ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <8 x half>, <8 x half>* %p + %neg = fneg <8 x half> %a1 + %2 = call <8 x half> @llvm.fma.v8f16(<8 x half> %neg, <8 x half> %a0, <8 x half> %a2) + %3 = bitcast i8 %mask to <8 x i1> + %4 = select <8 x i1> %3, <8 x half> %2, <8 x half> %a0 + ret <8 x half> %4 +} + +define <8 x half> @stack_fold_fnmadd231ph_mask(<8 x half>* %p, <8 x half> %a1, <8 x half> %a2, i8 %mask) { + ;CHECK-LABEL: stack_fold_fnmadd231ph_mask: + ;CHECK: vfnmadd231ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <8 x half>, <8 x half>* %p + %neg = fneg <8 x half> %a1 + %2 = call <8 x half> @llvm.fma.v8f16(<8 x half> %neg, <8 x half> %a2, <8 x half> %a0) + %3 = bitcast i8 %mask to <8 x i1> + %4 = select <8 x i1> %3, <8 x half> %2, <8 x half> %a0 + ret <8 x half> %4 +} + +define <8 x half> @stack_fold_fnmadd321ph_mask(<8 x half>* %p, <8 x half> %a1, <8 x half> %a2, i8 %mask) { + ;CHECK-LABEL: stack_fold_fnmadd321ph_mask: + ;CHECK: vfnmadd231ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <8 x half>, <8 x half>* %p + %neg = fneg <8 x half> %a2 + %2 = call <8 x half> @llvm.fma.v8f16(<8 x half> %neg, <8 x half> %a1, <8 x half> %a0) + %3 = bitcast i8 %mask to <8 x i1> + %4 = select <8 x i1> %3, <8 x half> %2, <8 x half> %a0 + ret <8 x half> %4 +} + +define <8 x half> @stack_fold_fnmadd132ph_mask(<8 x half>* %p, <8 x half> %a1, <8 x half> %a2, i8 %mask) { + ;CHECK-LABEL: stack_fold_fnmadd132ph_mask: + ;CHECK: vfnmadd132ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <8 x half>, <8 x half>* %p + %neg = fneg <8 x half> %a0 + %2 = call <8 x half> @llvm.fma.v8f16(<8 x half> %neg, <8 x half> %a2, <8 x half> %a1) + %3 = bitcast i8 %mask to <8 x i1> + %4 = select <8 x i1> %3, <8 x half> %2, <8 x half> %a0 + ret <8 x half> %4 +} + +define <8 x half> @stack_fold_fnmadd312ph_mask(<8 x half>* %p, <8 x half> %a1, <8 x half> %a2, i8 %mask) { + ;CHECK-LABEL: stack_fold_fnmadd312ph_mask: + ;CHECK: vfnmadd132ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <8 x half>, <8 x half>* %p + %neg = fneg <8 x half> %a2 + %2 = call <8 x half> @llvm.fma.v8f16(<8 x half> %neg, <8 x half> %a0, <8 x half> %a1) + %3 = bitcast i8 %mask to <8 x i1> + %4 = select <8 x i1> %3, <8 x half> %2, <8 x half> %a0 + ret <8 x half> %4 +} + +define <8 x half> @stack_fold_fnmadd123ph_maskz(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2, i8* %mask) { + ;CHECK-LABEL: stack_fold_fnmadd123ph_maskz: + ;CHECK: vfnmadd213ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %neg = fneg <8 x half> %a0 + %2 = call <8 x half> @llvm.fma.v8f16(<8 x half> %neg, <8 x half> %a1, <8 x half> %a2) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = select <8 x i1> %4, <8 x half> %2, <8 x half> zeroinitializer + ret <8 x half> %5 +} + +define <8 x half> @stack_fold_fnmadd213ph_maskz(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2, i8* %mask) { + ;CHECK-LABEL: stack_fold_fnmadd213ph_maskz: + ;CHECK: vfnmadd213ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %neg = fneg <8 x half> %a1 + %2 = call <8 x half> @llvm.fma.v8f16(<8 x half> %neg, <8 x half> %a0, <8 x half> %a2) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = select <8 x i1> %4, <8 x half> %2, <8 x half> zeroinitializer + ret <8 x half> %5 +} + +define <8 x half> @stack_fold_fnmadd231ph_maskz(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2, i8* %mask) { + ;CHECK-LABEL: stack_fold_fnmadd231ph_maskz: + ;CHECK: vfnmadd231ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %neg = fneg <8 x half> %a1 + %2 = call <8 x half> @llvm.fma.v8f16(<8 x half> %neg, <8 x half> %a2, <8 x half> %a0) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = select <8 x i1> %4, <8 x half> %2, <8 x half> zeroinitializer + ret <8 x half> %5 +} + +define <8 x half> @stack_fold_fnmadd321ph_maskz(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2, i8* %mask) { + ;CHECK-LABEL: stack_fold_fnmadd321ph_maskz: + ;CHECK: vfnmadd231ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %neg = fneg <8 x half> %a2 + %2 = call <8 x half> @llvm.fma.v8f16(<8 x half> %neg, <8 x half> %a1, <8 x half> %a0) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = select <8 x i1> %4, <8 x half> %2, <8 x half> zeroinitializer + ret <8 x half> %5 +} + +define <8 x half> @stack_fold_fnmadd132ph_maskz(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2, i8* %mask) { + ;CHECK-LABEL: stack_fold_fnmadd132ph_maskz: + ;CHECK: vfnmadd132ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %neg = fneg <8 x half> %a0 + %2 = call <8 x half> @llvm.fma.v8f16(<8 x half> %neg, <8 x half> %a2, <8 x half> %a1) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = select <8 x i1> %4, <8 x half> %2, <8 x half> zeroinitializer + ret <8 x half> %5 +} + +define <8 x half> @stack_fold_fnmadd312ph_maskz(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2, i8* %mask) { + ;CHECK-LABEL: stack_fold_fnmadd312ph_maskz: + ;CHECK: vfnmadd132ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %neg = fneg <8 x half> %a2 + %2 = call <8 x half> @llvm.fma.v8f16(<8 x half> %neg, <8 x half> %a0, <8 x half> %a1) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = select <8 x i1> %4, <8 x half> %2, <8 x half> zeroinitializer + ret <8 x half> %5 +} + +define <8 x half> @stack_fold_fnmsub123ph(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2) { + ;CHECK-LABEL: stack_fold_fnmsub123ph: + ;CHECK: vfnmsub213ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg <8 x half> %a0 + %3 = fneg <8 x half> %a2 + %4 = call <8 x half> @llvm.fma.v8f16(<8 x half> %2, <8 x half> %a1, <8 x half> %3) + ret <8 x half> %4 +} + +define <8 x half> @stack_fold_fnmsub213ph(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2) { + ;CHECK-LABEL: stack_fold_fnmsub213ph: + ;CHECK: vfnmsub213ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg <8 x half> %a1 + %3 = fneg <8 x half> %a2 + %4 = call <8 x half> @llvm.fma.v8f16(<8 x half> %2, <8 x half> %a0, <8 x half> %3) + ret <8 x half> %4 +} + +define <8 x half> @stack_fold_fnmsub231ph(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2) { + ;CHECK-LABEL: stack_fold_fnmsub231ph: + ;CHECK: vfnmsub231ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg <8 x half> %a1 + %3 = fneg <8 x half> %a0 + %4 = call <8 x half> @llvm.fma.v8f16(<8 x half> %2, <8 x half> %a2, <8 x half> %3) + ret <8 x half> %4 +} + +define <8 x half> @stack_fold_fnmsub321ph(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2) { + ;CHECK-LABEL: stack_fold_fnmsub321ph: + ;CHECK: vfnmsub231ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg <8 x half> %a2 + %3 = fneg <8 x half> %a0 + %4 = call <8 x half> @llvm.fma.v8f16(<8 x half> %2, <8 x half> %a1, <8 x half> %3) + ret <8 x half> %4 +} + +define <8 x half> @stack_fold_fnmsub132ph(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2) { + ;CHECK-LABEL: stack_fold_fnmsub132ph: + ;CHECK: vfnmsub132ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg <8 x half> %a0 + %3 = fneg <8 x half> %a1 + %4 = call <8 x half> @llvm.fma.v8f16(<8 x half> %2, <8 x half> %a2, <8 x half> %3) + ret <8 x half> %4 +} + +define <8 x half> @stack_fold_fnmsub312ph(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2) { + ;CHECK-LABEL: stack_fold_fnmsub312ph: + ;CHECK: vfnmsub132ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg <8 x half> %a2 + %3 = fneg <8 x half> %a1 + %4 = call <8 x half> @llvm.fma.v8f16(<8 x half> %2, <8 x half> %a0, <8 x half> %3) + ret <8 x half> %4 +} + +define <8 x half> @stack_fold_fnmsub123ph_mask(<8 x half>* %p, <8 x half> %a1, <8 x half> %a2, i8 %mask) { + ;CHECK-LABEL: stack_fold_fnmsub123ph_mask: + ;CHECK: vfnmsub213ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <8 x half>, <8 x half>* %p + %neg = fneg <8 x half> %a2 + %neg1 = fneg <8 x half> %a0 + %2 = call <8 x half> @llvm.fma.v8f16(<8 x half> %neg1, <8 x half> %a1, <8 x half> %neg) + %3 = bitcast i8 %mask to <8 x i1> + %4 = select <8 x i1> %3, <8 x half> %2, <8 x half> %a0 + ret <8 x half> %4 +} + +define <8 x half> @stack_fold_fnmsub213ph_mask(<8 x half>* %p, <8 x half> %a1, <8 x half> %a2, i8 %mask) { + ;CHECK-LABEL: stack_fold_fnmsub213ph_mask: + ;CHECK: vfnmsub213ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <8 x half>, <8 x half>* %p + %neg = fneg <8 x half> %a2 + %neg1 = fneg <8 x half> %a1 + %2 = call <8 x half> @llvm.fma.v8f16(<8 x half> %neg1, <8 x half> %a0, <8 x half> %neg) + %3 = bitcast i8 %mask to <8 x i1> + %4 = select <8 x i1> %3, <8 x half> %2, <8 x half> %a0 + ret <8 x half> %4 +} + +define <8 x half> @stack_fold_fnmsub231ph_mask(<8 x half>* %p, <8 x half> %a1, <8 x half> %a2, i8 %mask) { + ;CHECK-LABEL: stack_fold_fnmsub231ph_mask: + ;CHECK: vfnmsub231ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <8 x half>, <8 x half>* %p + %neg = fneg <8 x half> %a0 + %neg1 = fneg <8 x half> %a1 + %2 = call <8 x half> @llvm.fma.v8f16(<8 x half> %neg1, <8 x half> %a2, <8 x half> %neg) + %3 = bitcast i8 %mask to <8 x i1> + %4 = select <8 x i1> %3, <8 x half> %2, <8 x half> %a0 + ret <8 x half> %4 +} + +define <8 x half> @stack_fold_fnmsub321ph_mask(<8 x half>* %p, <8 x half> %a1, <8 x half> %a2, i8 %mask) { + ;CHECK-LABEL: stack_fold_fnmsub321ph_mask: + ;CHECK: vfnmsub231ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <8 x half>, <8 x half>* %p + %neg = fneg <8 x half> %a0 + %neg1 = fneg <8 x half> %a2 + %2 = call <8 x half> @llvm.fma.v8f16(<8 x half> %neg1, <8 x half> %a1, <8 x half> %neg) + %3 = bitcast i8 %mask to <8 x i1> + %4 = select <8 x i1> %3, <8 x half> %2, <8 x half> %a0 + ret <8 x half> %4 +} + +define <8 x half> @stack_fold_fnmsub132ph_mask(<8 x half>* %p, <8 x half> %a1, <8 x half> %a2, i8 %mask) { + ;CHECK-LABEL: stack_fold_fnmsub132ph_mask: + ;CHECK: vfnmsub132ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <8 x half>, <8 x half>* %p + %neg = fneg <8 x half> %a1 + %neg1 = fneg <8 x half> %a0 + %2 = call <8 x half> @llvm.fma.v8f16(<8 x half> %neg1, <8 x half> %a2, <8 x half> %neg) + %3 = bitcast i8 %mask to <8 x i1> + %4 = select <8 x i1> %3, <8 x half> %2, <8 x half> %a0 + ret <8 x half> %4 +} + +define <8 x half> @stack_fold_fnmsub312ph_mask(<8 x half>* %p, <8 x half> %a1, <8 x half> %a2, i8 %mask) { + ;CHECK-LABEL: stack_fold_fnmsub312ph_mask: + ;CHECK: vfnmsub132ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <8 x half>, <8 x half>* %p + %neg = fneg <8 x half> %a1 + %neg1 = fneg <8 x half> %a2 + %2 = call <8 x half> @llvm.fma.v8f16(<8 x half> %neg1, <8 x half> %a0, <8 x half> %neg) + %3 = bitcast i8 %mask to <8 x i1> + %4 = select <8 x i1> %3, <8 x half> %2, <8 x half> %a0 + ret <8 x half> %4 +} + +define <8 x half> @stack_fold_fnmsub123ph_maskz(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2, i8* %mask) { + ;CHECK-LABEL: stack_fold_fnmsub123ph_maskz: + ;CHECK: vfnmsub213ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %neg = fneg <8 x half> %a2 + %neg1 = fneg <8 x half> %a0 + %2 = call <8 x half> @llvm.fma.v8f16(<8 x half> %neg1, <8 x half> %a1, <8 x half> %neg) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = select <8 x i1> %4, <8 x half> %2, <8 x half> zeroinitializer + ret <8 x half> %5 +} + +define <8 x half> @stack_fold_fnmsub213ph_maskz(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2, i8* %mask) { + ;CHECK-LABEL: stack_fold_fnmsub213ph_maskz: + ;CHECK: vfnmsub213ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %neg = fneg <8 x half> %a2 + %neg1 = fneg <8 x half> %a1 + %2 = call <8 x half> @llvm.fma.v8f16(<8 x half> %neg1, <8 x half> %a0, <8 x half> %neg) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = select <8 x i1> %4, <8 x half> %2, <8 x half> zeroinitializer + ret <8 x half> %5 +} + +define <8 x half> @stack_fold_fnmsub231ph_maskz(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2, i8* %mask) { + ;CHECK-LABEL: stack_fold_fnmsub231ph_maskz: + ;CHECK: vfnmsub231ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %neg = fneg <8 x half> %a0 + %neg1 = fneg <8 x half> %a1 + %2 = call <8 x half> @llvm.fma.v8f16(<8 x half> %neg1, <8 x half> %a2, <8 x half> %neg) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = select <8 x i1> %4, <8 x half> %2, <8 x half> zeroinitializer + ret <8 x half> %5 +} + +define <8 x half> @stack_fold_fnmsub321ph_maskz(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2, i8* %mask) { + ;CHECK-LABEL: stack_fold_fnmsub321ph_maskz: + ;CHECK: vfnmsub231ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %neg = fneg <8 x half> %a0 + %neg1 = fneg <8 x half> %a2 + %2 = call <8 x half> @llvm.fma.v8f16(<8 x half> %neg1, <8 x half> %a1, <8 x half> %neg) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = select <8 x i1> %4, <8 x half> %2, <8 x half> zeroinitializer + ret <8 x half> %5 +} + +define <8 x half> @stack_fold_fnmsub132ph_maskz(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2, i8* %mask) { + ;CHECK-LABEL: stack_fold_fnmsub132ph_maskz: + ;CHECK: vfnmsub132ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %neg = fneg <8 x half> %a1 + %neg1 = fneg <8 x half> %a0 + %2 = call <8 x half> @llvm.fma.v8f16(<8 x half> %neg1, <8 x half> %a2, <8 x half> %neg) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = select <8 x i1> %4, <8 x half> %2, <8 x half> zeroinitializer + ret <8 x half> %5 +} + +define <8 x half> @stack_fold_fnmsub312ph_maskz(<8 x half> %a0, <8 x half> %a1, <8 x half> %a2, i8* %mask) { + ;CHECK-LABEL: stack_fold_fnmsub312ph_maskz: + ;CHECK: vfnmsub132ph {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %neg = fneg <8 x half> %a1 + %neg1 = fneg <8 x half> %a2 + %2 = call <8 x half> @llvm.fma.v8f16(<8 x half> %neg1, <8 x half> %a0, <8 x half> %neg) + %3 = load i8, i8* %mask + %4 = bitcast i8 %3 to <8 x i1> + %5 = select <8 x i1> %4, <8 x half> %2, <8 x half> zeroinitializer + ret <8 x half> %5 +} + +define <16 x half> @stack_fold_fmadd123ph_ymm(<16 x half> %a0, <16 x half> %a1, <16 x half> %a2) { + ;CHECK-LABEL: stack_fold_fmadd123ph_ymm: + ;CHECK: vfmadd213ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = call <16 x half> @llvm.fma.v16f16(<16 x half> %a0, <16 x half> %a1, <16 x half> %a2) + ret <16 x half> %2 +} +declare <16 x half> @llvm.fma.v16f16(<16 x half>, <16 x half>, <16 x half>) + +define <16 x half> @stack_fold_fmadd213ph_ymm(<16 x half> %a0, <16 x half> %a1, <16 x half> %a2) { + ;CHECK-LABEL: stack_fold_fmadd213ph_ymm: + ;CHECK: vfmadd213ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = call <16 x half> @llvm.fma.v16f16(<16 x half> %a1, <16 x half> %a0, <16 x half> %a2) + ret <16 x half> %2 +} + +define <16 x half> @stack_fold_fmadd231ph_ymm(<16 x half> %a0, <16 x half> %a1, <16 x half> %a2) { + ;CHECK-LABEL: stack_fold_fmadd231ph_ymm: + ;CHECK: vfmadd231ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = call <16 x half> @llvm.fma.v16f16(<16 x half> %a1, <16 x half> %a2, <16 x half> %a0) + ret <16 x half> %2 +} + +define <16 x half> @stack_fold_fmadd321ph_ymm(<16 x half> %a0, <16 x half> %a1, <16 x half> %a2) { + ;CHECK-LABEL: stack_fold_fmadd321ph_ymm: + ;CHECK: vfmadd231ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = call <16 x half> @llvm.fma.v16f16(<16 x half> %a2, <16 x half> %a1, <16 x half> %a0) + ret <16 x half> %2 +} + +define <16 x half> @stack_fold_fmadd132ph_ymm(<16 x half> %a0, <16 x half> %a1, <16 x half> %a2) { + ;CHECK-LABEL: stack_fold_fmadd132ph_ymm: + ;CHECK: vfmadd132ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = call <16 x half> @llvm.fma.v16f16(<16 x half> %a0, <16 x half> %a2, <16 x half> %a1) + ret <16 x half> %2 +} + +define <16 x half> @stack_fold_fmadd312ph_ymm(<16 x half> %a0, <16 x half> %a1, <16 x half> %a2) { + ;CHECK-LABEL: stack_fold_fmadd312ph_ymm: + ;CHECK: vfmadd132ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = call <16 x half> @llvm.fma.v16f16(<16 x half> %a2, <16 x half> %a0, <16 x half> %a1) + ret <16 x half> %2 +} + +define <16 x half> @stack_fold_fmadd123ph_mask_ymm(<16 x half>* %p, <16 x half> %a1, <16 x half> %a2, i16 %mask) { + ;CHECK-LABEL: stack_fold_fmadd123ph_mask_ymm: + ;CHECK: vfmadd213ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <16 x half>, <16 x half>* %p + %2 = call <16 x half> @llvm.fma.v16f16(<16 x half> %a0, <16 x half> %a1, <16 x half> %a2) + %3 = bitcast i16 %mask to <16 x i1> + %4 = select <16 x i1> %3, <16 x half> %2, <16 x half> %a0 + ret <16 x half> %4 +} + +define <16 x half> @stack_fold_fmadd213ph_mask_ymm(<16 x half>* %p, <16 x half> %a1, <16 x half> %a2, i16 %mask) { + ;CHECK-LABEL: stack_fold_fmadd213ph_mask_ymm: + ;CHECK: vfmadd213ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <16 x half>, <16 x half>* %p + %2 = call <16 x half> @llvm.fma.v16f16(<16 x half> %a1, <16 x half> %a0, <16 x half> %a2) + %3 = bitcast i16 %mask to <16 x i1> + %4 = select <16 x i1> %3, <16 x half> %2, <16 x half> %a0 + ret <16 x half> %4 +} + +define <16 x half> @stack_fold_fmadd231ph_mask_ymm(<16 x half>* %p, <16 x half> %a1, <16 x half> %a2, i16 %mask) { + ;CHECK-LABEL: stack_fold_fmadd231ph_mask_ymm: + ;CHECK: vfmadd231ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <16 x half>, <16 x half>* %p + %2 = call <16 x half> @llvm.fma.v16f16(<16 x half> %a1, <16 x half> %a2, <16 x half> %a0) + %3 = bitcast i16 %mask to <16 x i1> + %4 = select <16 x i1> %3, <16 x half> %2, <16 x half> %a0 + ret <16 x half> %4 +} + +define <16 x half> @stack_fold_fmadd321ph_mask_ymm(<16 x half>* %p, <16 x half> %a1, <16 x half> %a2, i16 %mask) { + ;CHECK-LABEL: stack_fold_fmadd321ph_mask_ymm: + ;CHECK: vfmadd231ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <16 x half>, <16 x half>* %p + %2 = call <16 x half> @llvm.fma.v16f16(<16 x half> %a2, <16 x half> %a1, <16 x half> %a0) + %3 = bitcast i16 %mask to <16 x i1> + %4 = select <16 x i1> %3, <16 x half> %2, <16 x half> %a0 + ret <16 x half> %4 +} + +define <16 x half> @stack_fold_fmadd132ph_mask_ymm(<16 x half>* %p, <16 x half> %a1, <16 x half> %a2, i16 %mask) { + ;CHECK-LABEL: stack_fold_fmadd132ph_mask_ymm: + ;CHECK: vfmadd132ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <16 x half>, <16 x half>* %p + %2 = call <16 x half> @llvm.fma.v16f16(<16 x half> %a0, <16 x half> %a2, <16 x half> %a1) + %3 = bitcast i16 %mask to <16 x i1> + %4 = select <16 x i1> %3, <16 x half> %2, <16 x half> %a0 + ret <16 x half> %4 +} + +define <16 x half> @stack_fold_fmadd312ph_mask_ymm(<16 x half>* %p, <16 x half> %a1, <16 x half> %a2, i16 %mask) { + ;CHECK-LABEL: stack_fold_fmadd312ph_mask_ymm: + ;CHECK: vfmadd132ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <16 x half>, <16 x half>* %p + %2 = call <16 x half> @llvm.fma.v16f16(<16 x half> %a2, <16 x half> %a0, <16 x half> %a1) + %3 = bitcast i16 %mask to <16 x i1> + %4 = select <16 x i1> %3, <16 x half> %2, <16 x half> %a0 + ret <16 x half> %4 +} + +define <16 x half> @stack_fold_fmadd123ph_maskz_ymm(<16 x half> %a0, <16 x half> %a1, <16 x half> %a2, i16* %mask) { + ;CHECK-LABEL: stack_fold_fmadd123ph_maskz_ymm: + ;CHECK: vfmadd213ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = call <16 x half> @llvm.fma.v16f16(<16 x half> %a0, <16 x half> %a1, <16 x half> %a2) + %3 = load i16, i16* %mask + %4 = bitcast i16 %3 to <16 x i1> + %5 = select <16 x i1> %4, <16 x half> %2, <16 x half> zeroinitializer + ret <16 x half> %5 +} + +define <16 x half> @stack_fold_fmadd213ph_maskz_ymm(<16 x half> %a0, <16 x half> %a1, <16 x half> %a2, i16* %mask) { + ;CHECK-LABEL: stack_fold_fmadd213ph_maskz_ymm: + ;CHECK: vfmadd213ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = call <16 x half> @llvm.fma.v16f16(<16 x half> %a1, <16 x half> %a0, <16 x half> %a2) + %3 = load i16, i16* %mask + %4 = bitcast i16 %3 to <16 x i1> + %5 = select <16 x i1> %4, <16 x half> %2, <16 x half> zeroinitializer + ret <16 x half> %5 +} + +define <16 x half> @stack_fold_fmadd231ph_maskz_ymm(<16 x half> %a0, <16 x half> %a1, <16 x half> %a2, i16* %mask) { + ;CHECK-LABEL: stack_fold_fmadd231ph_maskz_ymm: + ;CHECK: vfmadd231ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = call <16 x half> @llvm.fma.v16f16(<16 x half> %a1, <16 x half> %a2, <16 x half> %a0) + %3 = load i16, i16* %mask + %4 = bitcast i16 %3 to <16 x i1> + %5 = select <16 x i1> %4, <16 x half> %2, <16 x half> zeroinitializer + ret <16 x half> %5 +} + +define <16 x half> @stack_fold_fmadd321ph_maskz_ymm(<16 x half> %a0, <16 x half> %a1, <16 x half> %a2, i16* %mask) { + ;CHECK-LABEL: stack_fold_fmadd321ph_maskz_ymm: + ;CHECK: vfmadd231ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = call <16 x half> @llvm.fma.v16f16(<16 x half> %a2, <16 x half> %a1, <16 x half> %a0) + %3 = load i16, i16* %mask + %4 = bitcast i16 %3 to <16 x i1> + %5 = select <16 x i1> %4, <16 x half> %2, <16 x half> zeroinitializer + ret <16 x half> %5 +} + +define <16 x half> @stack_fold_fmadd132ph_maskz_ymm(<16 x half> %a0, <16 x half> %a1, <16 x half> %a2, i16* %mask) { + ;CHECK-LABEL: stack_fold_fmadd132ph_maskz_ymm: + ;CHECK: vfmadd132ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = call <16 x half> @llvm.fma.v16f16(<16 x half> %a0, <16 x half> %a2, <16 x half> %a1) + %3 = load i16, i16* %mask + %4 = bitcast i16 %3 to <16 x i1> + %5 = select <16 x i1> %4, <16 x half> %2, <16 x half> zeroinitializer + ret <16 x half> %5 +} + +define <16 x half> @stack_fold_fmadd312ph_maskz_ymm(<16 x half> %a0, <16 x half> %a1, <16 x half> %a2, i16* %mask) { + ;CHECK-LABEL: stack_fold_fmadd312ph_maskz_ymm: + ;CHECK: vfmadd132ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = call <16 x half> @llvm.fma.v16f16(<16 x half> %a2, <16 x half> %a0, <16 x half> %a1) + %3 = load i16, i16* %mask + %4 = bitcast i16 %3 to <16 x i1> + %5 = select <16 x i1> %4, <16 x half> %2, <16 x half> zeroinitializer + ret <16 x half> %5 +} + +define <16 x half> @stack_fold_fmsub123ph_ymm(<16 x half> %a0, <16 x half> %a1, <16 x half> %a2) { + ;CHECK-LABEL: stack_fold_fmsub123ph_ymm: + ;CHECK: vfmsub213ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg <16 x half> %a2 + %3 = call <16 x half> @llvm.fma.v16f16(<16 x half> %a0, <16 x half> %a1, <16 x half> %2) + ret <16 x half> %3 +} + +define <16 x half> @stack_fold_fmsub213ph_ymm(<16 x half> %a0, <16 x half> %a1, <16 x half> %a2) { + ;CHECK-LABEL: stack_fold_fmsub213ph_ymm: + ;CHECK: vfmsub213ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg <16 x half> %a2 + %3 = call <16 x half> @llvm.fma.v16f16(<16 x half> %a1, <16 x half> %a0, <16 x half> %2) + ret <16 x half> %3 +} + +define <16 x half> @stack_fold_fmsub231ph_ymm(<16 x half> %a0, <16 x half> %a1, <16 x half> %a2) { + ;CHECK-LABEL: stack_fold_fmsub231ph_ymm: + ;CHECK: vfmsub231ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg <16 x half> %a0 + %3 = call <16 x half> @llvm.fma.v16f16(<16 x half> %a1, <16 x half> %a2, <16 x half> %2) + ret <16 x half> %3 +} + +define <16 x half> @stack_fold_fmsub321ph_ymm(<16 x half> %a0, <16 x half> %a1, <16 x half> %a2) { + ;CHECK-LABEL: stack_fold_fmsub321ph_ymm: + ;CHECK: vfmsub231ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg <16 x half> %a0 + %3 = call <16 x half> @llvm.fma.v16f16(<16 x half> %a2, <16 x half> %a1, <16 x half> %2) + ret <16 x half> %3 +} + +define <16 x half> @stack_fold_fmsub132ph_ymm(<16 x half> %a0, <16 x half> %a1, <16 x half> %a2) { + ;CHECK-LABEL: stack_fold_fmsub132ph_ymm: + ;CHECK: vfmsub132ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg <16 x half> %a1 + %3 = call <16 x half> @llvm.fma.v16f16(<16 x half> %a0, <16 x half> %a2, <16 x half> %2) + ret <16 x half> %3 +} + +define <16 x half> @stack_fold_fmsub312ph_ymm(<16 x half> %a0, <16 x half> %a1, <16 x half> %a2) { + ;CHECK-LABEL: stack_fold_fmsub312ph_ymm: + ;CHECK: vfmsub132ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg <16 x half> %a1 + %3 = call <16 x half> @llvm.fma.v16f16(<16 x half> %a2, <16 x half> %a0, <16 x half> %2) + ret <16 x half> %3 +} + +define <16 x half> @stack_fold_fmsub123ph_mask_ymm(<16 x half>* %p, <16 x half> %a1, <16 x half> %a2, i16 %mask) { + ;CHECK-LABEL: stack_fold_fmsub123ph_mask_ymm: + ;CHECK: vfmsub213ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <16 x half>, <16 x half>* %p + %neg = fneg <16 x half> %a2 + %2 = call <16 x half> @llvm.fma.v16f16(<16 x half> %a0, <16 x half> %a1, <16 x half> %neg) + %3 = bitcast i16 %mask to <16 x i1> + %4 = select <16 x i1> %3, <16 x half> %2, <16 x half> %a0 + ret <16 x half> %4 +} + +define <16 x half> @stack_fold_fmsub213ph_mask_ymm(<16 x half>* %p, <16 x half> %a1, <16 x half> %a2, i16 %mask) { + ;CHECK-LABEL: stack_fold_fmsub213ph_mask_ymm: + ;CHECK: vfmsub213ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <16 x half>, <16 x half>* %p + %neg = fneg <16 x half> %a2 + %2 = call <16 x half> @llvm.fma.v16f16(<16 x half> %a1, <16 x half> %a0, <16 x half> %neg) + %3 = bitcast i16 %mask to <16 x i1> + %4 = select <16 x i1> %3, <16 x half> %2, <16 x half> %a0 + ret <16 x half> %4 +} + +define <16 x half> @stack_fold_fmsub231ph_mask_ymm(<16 x half>* %p, <16 x half> %a1, <16 x half> %a2, i16 %mask) { + ;CHECK-LABEL: stack_fold_fmsub231ph_mask_ymm: + ;CHECK: vfmsub231ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <16 x half>, <16 x half>* %p + %neg = fneg <16 x half> %a0 + %2 = call <16 x half> @llvm.fma.v16f16(<16 x half> %a1, <16 x half> %a2, <16 x half> %neg) + %3 = bitcast i16 %mask to <16 x i1> + %4 = select <16 x i1> %3, <16 x half> %2, <16 x half> %a0 + ret <16 x half> %4 +} + +define <16 x half> @stack_fold_fmsub321ph_mask_ymm(<16 x half>* %p, <16 x half> %a1, <16 x half> %a2, i16 %mask) { + ;CHECK-LABEL: stack_fold_fmsub321ph_mask_ymm: + ;CHECK: vfmsub231ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <16 x half>, <16 x half>* %p + %neg = fneg <16 x half> %a0 + %2 = call <16 x half> @llvm.fma.v16f16(<16 x half> %a2, <16 x half> %a1, <16 x half> %neg) + %3 = bitcast i16 %mask to <16 x i1> + %4 = select <16 x i1> %3, <16 x half> %2, <16 x half> %a0 + ret <16 x half> %4 +} + +define <16 x half> @stack_fold_fmsub132ph_mask_ymm(<16 x half>* %p, <16 x half> %a1, <16 x half> %a2, i16 %mask) { + ;CHECK-LABEL: stack_fold_fmsub132ph_mask_ymm: + ;CHECK: vfmsub132ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <16 x half>, <16 x half>* %p + %neg = fneg <16 x half> %a1 + %2 = call <16 x half> @llvm.fma.v16f16(<16 x half> %a0, <16 x half> %a2, <16 x half> %neg) + %3 = bitcast i16 %mask to <16 x i1> + %4 = select <16 x i1> %3, <16 x half> %2, <16 x half> %a0 + ret <16 x half> %4 +} + +define <16 x half> @stack_fold_fmsub312ph_mask_ymm(<16 x half>* %p, <16 x half> %a1, <16 x half> %a2, i16 %mask) { + ;CHECK-LABEL: stack_fold_fmsub312ph_mask_ymm: + ;CHECK: vfmsub132ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <16 x half>, <16 x half>* %p + %neg = fneg <16 x half> %a1 + %2 = call <16 x half> @llvm.fma.v16f16(<16 x half> %a2, <16 x half> %a0, <16 x half> %neg) + %3 = bitcast i16 %mask to <16 x i1> + %4 = select <16 x i1> %3, <16 x half> %2, <16 x half> %a0 + ret <16 x half> %4 +} + +define <16 x half> @stack_fold_fmsub123ph_maskz_ymm(<16 x half> %a0, <16 x half> %a1, <16 x half> %a2, i16* %mask) { + ;CHECK-LABEL: stack_fold_fmsub123ph_maskz_ymm: + ;CHECK: vfmsub213ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %neg = fneg <16 x half> %a2 + %2 = call <16 x half> @llvm.fma.v16f16(<16 x half> %a0, <16 x half> %a1, <16 x half> %neg) + %3 = load i16, i16* %mask + %4 = bitcast i16 %3 to <16 x i1> + %5 = select <16 x i1> %4, <16 x half> %2, <16 x half> zeroinitializer + ret <16 x half> %5 +} + +define <16 x half> @stack_fold_fmsub213ph_maskz_ymm(<16 x half> %a0, <16 x half> %a1, <16 x half> %a2, i16* %mask) { + ;CHECK-LABEL: stack_fold_fmsub213ph_maskz_ymm: + ;CHECK: vfmsub213ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %neg = fneg <16 x half> %a2 + %2 = call <16 x half> @llvm.fma.v16f16(<16 x half> %a1, <16 x half> %a0, <16 x half> %neg) + %3 = load i16, i16* %mask + %4 = bitcast i16 %3 to <16 x i1> + %5 = select <16 x i1> %4, <16 x half> %2, <16 x half> zeroinitializer + ret <16 x half> %5 +} + +define <16 x half> @stack_fold_fmsub231ph_maskz_ymm(<16 x half> %a0, <16 x half> %a1, <16 x half> %a2, i16* %mask) { + ;CHECK-LABEL: stack_fold_fmsub231ph_maskz_ymm: + ;CHECK: vfmsub231ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %neg = fneg <16 x half> %a0 + %2 = call <16 x half> @llvm.fma.v16f16(<16 x half> %a1, <16 x half> %a2, <16 x half> %neg) + %3 = load i16, i16* %mask + %4 = bitcast i16 %3 to <16 x i1> + %5 = select <16 x i1> %4, <16 x half> %2, <16 x half> zeroinitializer + ret <16 x half> %5 +} + +define <16 x half> @stack_fold_fmsub321ph_maskz_ymm(<16 x half> %a0, <16 x half> %a1, <16 x half> %a2, i16* %mask) { + ;CHECK-LABEL: stack_fold_fmsub321ph_maskz_ymm: + ;CHECK: vfmsub231ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %neg = fneg <16 x half> %a0 + %2 = call <16 x half> @llvm.fma.v16f16(<16 x half> %a2, <16 x half> %a1, <16 x half> %neg) + %3 = load i16, i16* %mask + %4 = bitcast i16 %3 to <16 x i1> + %5 = select <16 x i1> %4, <16 x half> %2, <16 x half> zeroinitializer + ret <16 x half> %5 +} + +define <16 x half> @stack_fold_fmsub132ph_maskz_ymm(<16 x half> %a0, <16 x half> %a1, <16 x half> %a2, i16* %mask) { + ;CHECK-LABEL: stack_fold_fmsub132ph_maskz_ymm: + ;CHECK: vfmsub132ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %neg = fneg <16 x half> %a1 + %2 = call <16 x half> @llvm.fma.v16f16(<16 x half> %a0, <16 x half> %a2, <16 x half> %neg) + %3 = load i16, i16* %mask + %4 = bitcast i16 %3 to <16 x i1> + %5 = select <16 x i1> %4, <16 x half> %2, <16 x half> zeroinitializer + ret <16 x half> %5 +} + +define <16 x half> @stack_fold_fmsub312ph_maskz_ymm(<16 x half> %a0, <16 x half> %a1, <16 x half> %a2, i16* %mask) { + ;CHECK-LABEL: stack_fold_fmsub312ph_maskz_ymm: + ;CHECK: vfmsub132ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %neg = fneg <16 x half> %a1 + %2 = call <16 x half> @llvm.fma.v16f16(<16 x half> %a2, <16 x half> %a0, <16 x half> %neg) + %3 = load i16, i16* %mask + %4 = bitcast i16 %3 to <16 x i1> + %5 = select <16 x i1> %4, <16 x half> %2, <16 x half> zeroinitializer + ret <16 x half> %5 +} + +define <16 x half> @stack_fold_fnmadd123ph_ymm(<16 x half> %a0, <16 x half> %a1, <16 x half> %a2) { + ;CHECK-LABEL: stack_fold_fnmadd123ph_ymm: + ;CHECK: vfnmadd213ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg <16 x half> %a0 + %3 = call <16 x half> @llvm.fma.v16f16(<16 x half> %2, <16 x half> %a1, <16 x half> %a2) + ret <16 x half> %3 +} + +define <16 x half> @stack_fold_fnmadd213ph_ymm(<16 x half> %a0, <16 x half> %a1, <16 x half> %a2) { + ;CHECK-LABEL: stack_fold_fnmadd213ph_ymm: + ;CHECK: vfnmadd213ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg <16 x half> %a1 + %3 = call <16 x half> @llvm.fma.v16f16(<16 x half> %2, <16 x half> %a0, <16 x half> %a2) + ret <16 x half> %3 +} + +define <16 x half> @stack_fold_fnmadd231ph_ymm(<16 x half> %a0, <16 x half> %a1, <16 x half> %a2) { + ;CHECK-LABEL: stack_fold_fnmadd231ph_ymm: + ;CHECK: vfnmadd231ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg <16 x half> %a1 + %3 = call <16 x half> @llvm.fma.v16f16(<16 x half> %2, <16 x half> %a2, <16 x half> %a0) + ret <16 x half> %3 +} + +define <16 x half> @stack_fold_fnmadd321ph_ymm(<16 x half> %a0, <16 x half> %a1, <16 x half> %a2) { + ;CHECK-LABEL: stack_fold_fnmadd321ph_ymm: + ;CHECK: vfnmadd231ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg <16 x half> %a2 + %3 = call <16 x half> @llvm.fma.v16f16(<16 x half> %2, <16 x half> %a1, <16 x half> %a0) + ret <16 x half> %3 +} + +define <16 x half> @stack_fold_fnmadd132ph_ymm(<16 x half> %a0, <16 x half> %a1, <16 x half> %a2) { + ;CHECK-LABEL: stack_fold_fnmadd132ph_ymm: + ;CHECK: vfnmadd132ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg <16 x half> %a0 + %3 = call <16 x half> @llvm.fma.v16f16(<16 x half> %2, <16 x half> %a2, <16 x half> %a1) + ret <16 x half> %3 +} + +define <16 x half> @stack_fold_fnmadd312ph_ymm(<16 x half> %a0, <16 x half> %a1, <16 x half> %a2) { + ;CHECK-LABEL: stack_fold_fnmadd312ph_ymm: + ;CHECK: vfnmadd132ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg <16 x half> %a2 + %3 = call <16 x half> @llvm.fma.v16f16(<16 x half> %2, <16 x half> %a0, <16 x half> %a1) + ret <16 x half> %3 +} + +define <16 x half> @stack_fold_fnmadd123ph_mask_ymm(<16 x half>* %p, <16 x half> %a1, <16 x half> %a2, i16 %mask) { + ;CHECK-LABEL: stack_fold_fnmadd123ph_mask_ymm: + ;CHECK: vfnmadd213ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <16 x half>, <16 x half>* %p + %neg = fneg <16 x half> %a0 + %2 = call <16 x half> @llvm.fma.v16f16(<16 x half> %neg, <16 x half> %a1, <16 x half> %a2) + %3 = bitcast i16 %mask to <16 x i1> + %4 = select <16 x i1> %3, <16 x half> %2, <16 x half> %a0 + ret <16 x half> %4 +} + +define <16 x half> @stack_fold_fnmadd213ph_mask_ymm(<16 x half>* %p, <16 x half> %a1, <16 x half> %a2, i16 %mask) { + ;CHECK-LABEL: stack_fold_fnmadd213ph_mask_ymm: + ;CHECK: vfnmadd213ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <16 x half>, <16 x half>* %p + %neg = fneg <16 x half> %a1 + %2 = call <16 x half> @llvm.fma.v16f16(<16 x half> %neg, <16 x half> %a0, <16 x half> %a2) + %3 = bitcast i16 %mask to <16 x i1> + %4 = select <16 x i1> %3, <16 x half> %2, <16 x half> %a0 + ret <16 x half> %4 +} + +define <16 x half> @stack_fold_fnmadd231ph_mask_ymm(<16 x half>* %p, <16 x half> %a1, <16 x half> %a2, i16 %mask) { + ;CHECK-LABEL: stack_fold_fnmadd231ph_mask_ymm: + ;CHECK: vfnmadd231ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <16 x half>, <16 x half>* %p + %neg = fneg <16 x half> %a1 + %2 = call <16 x half> @llvm.fma.v16f16(<16 x half> %neg, <16 x half> %a2, <16 x half> %a0) + %3 = bitcast i16 %mask to <16 x i1> + %4 = select <16 x i1> %3, <16 x half> %2, <16 x half> %a0 + ret <16 x half> %4 +} + +define <16 x half> @stack_fold_fnmadd321ph_mask_ymm(<16 x half>* %p, <16 x half> %a1, <16 x half> %a2, i16 %mask) { + ;CHECK-LABEL: stack_fold_fnmadd321ph_mask_ymm: + ;CHECK: vfnmadd231ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <16 x half>, <16 x half>* %p + %neg = fneg <16 x half> %a2 + %2 = call <16 x half> @llvm.fma.v16f16(<16 x half> %neg, <16 x half> %a1, <16 x half> %a0) + %3 = bitcast i16 %mask to <16 x i1> + %4 = select <16 x i1> %3, <16 x half> %2, <16 x half> %a0 + ret <16 x half> %4 +} + +define <16 x half> @stack_fold_fnmadd132ph_mask_ymm(<16 x half>* %p, <16 x half> %a1, <16 x half> %a2, i16 %mask) { + ;CHECK-LABEL: stack_fold_fnmadd132ph_mask_ymm: + ;CHECK: vfnmadd132ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <16 x half>, <16 x half>* %p + %neg = fneg <16 x half> %a0 + %2 = call <16 x half> @llvm.fma.v16f16(<16 x half> %neg, <16 x half> %a2, <16 x half> %a1) + %3 = bitcast i16 %mask to <16 x i1> + %4 = select <16 x i1> %3, <16 x half> %2, <16 x half> %a0 + ret <16 x half> %4 +} + +define <16 x half> @stack_fold_fnmadd312ph_mask_ymm(<16 x half>* %p, <16 x half> %a1, <16 x half> %a2, i16 %mask) { + ;CHECK-LABEL: stack_fold_fnmadd312ph_mask_ymm: + ;CHECK: vfnmadd132ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <16 x half>, <16 x half>* %p + %neg = fneg <16 x half> %a2 + %2 = call <16 x half> @llvm.fma.v16f16(<16 x half> %neg, <16 x half> %a0, <16 x half> %a1) + %3 = bitcast i16 %mask to <16 x i1> + %4 = select <16 x i1> %3, <16 x half> %2, <16 x half> %a0 + ret <16 x half> %4 +} + +define <16 x half> @stack_fold_fnmadd123ph_maskz_ymm(<16 x half> %a0, <16 x half> %a1, <16 x half> %a2, i16* %mask) { + ;CHECK-LABEL: stack_fold_fnmadd123ph_maskz_ymm: + ;CHECK: vfnmadd213ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %neg = fneg <16 x half> %a0 + %2 = call <16 x half> @llvm.fma.v16f16(<16 x half> %neg, <16 x half> %a1, <16 x half> %a2) + %3 = load i16, i16* %mask + %4 = bitcast i16 %3 to <16 x i1> + %5 = select <16 x i1> %4, <16 x half> %2, <16 x half> zeroinitializer + ret <16 x half> %5 +} + +define <16 x half> @stack_fold_fnmadd213ph_maskz_ymm(<16 x half> %a0, <16 x half> %a1, <16 x half> %a2, i16* %mask) { + ;CHECK-LABEL: stack_fold_fnmadd213ph_maskz_ymm: + ;CHECK: vfnmadd213ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %neg = fneg <16 x half> %a1 + %2 = call <16 x half> @llvm.fma.v16f16(<16 x half> %neg, <16 x half> %a0, <16 x half> %a2) + %3 = load i16, i16* %mask + %4 = bitcast i16 %3 to <16 x i1> + %5 = select <16 x i1> %4, <16 x half> %2, <16 x half> zeroinitializer + ret <16 x half> %5 +} + +define <16 x half> @stack_fold_fnmadd231ph_maskz_ymm(<16 x half> %a0, <16 x half> %a1, <16 x half> %a2, i16* %mask) { + ;CHECK-LABEL: stack_fold_fnmadd231ph_maskz_ymm: + ;CHECK: vfnmadd231ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %neg = fneg <16 x half> %a1 + %2 = call <16 x half> @llvm.fma.v16f16(<16 x half> %neg, <16 x half> %a2, <16 x half> %a0) + %3 = load i16, i16* %mask + %4 = bitcast i16 %3 to <16 x i1> + %5 = select <16 x i1> %4, <16 x half> %2, <16 x half> zeroinitializer + ret <16 x half> %5 +} + +define <16 x half> @stack_fold_fnmadd321ph_maskz_ymm(<16 x half> %a0, <16 x half> %a1, <16 x half> %a2, i16* %mask) { + ;CHECK-LABEL: stack_fold_fnmadd321ph_maskz_ymm: + ;CHECK: vfnmadd231ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %neg = fneg <16 x half> %a2 + %2 = call <16 x half> @llvm.fma.v16f16(<16 x half> %neg, <16 x half> %a1, <16 x half> %a0) + %3 = load i16, i16* %mask + %4 = bitcast i16 %3 to <16 x i1> + %5 = select <16 x i1> %4, <16 x half> %2, <16 x half> zeroinitializer + ret <16 x half> %5 +} + +define <16 x half> @stack_fold_fnmadd132ph_maskz_ymm(<16 x half> %a0, <16 x half> %a1, <16 x half> %a2, i16* %mask) { + ;CHECK-LABEL: stack_fold_fnmadd132ph_maskz_ymm: + ;CHECK: vfnmadd132ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %neg = fneg <16 x half> %a0 + %2 = call <16 x half> @llvm.fma.v16f16(<16 x half> %neg, <16 x half> %a2, <16 x half> %a1) + %3 = load i16, i16* %mask + %4 = bitcast i16 %3 to <16 x i1> + %5 = select <16 x i1> %4, <16 x half> %2, <16 x half> zeroinitializer + ret <16 x half> %5 +} + +define <16 x half> @stack_fold_fnmadd312ph_maskz_ymm(<16 x half> %a0, <16 x half> %a1, <16 x half> %a2, i16* %mask) { + ;CHECK-LABEL: stack_fold_fnmadd312ph_maskz_ymm: + ;CHECK: vfnmadd132ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %neg = fneg <16 x half> %a2 + %2 = call <16 x half> @llvm.fma.v16f16(<16 x half> %neg, <16 x half> %a0, <16 x half> %a1) + %3 = load i16, i16* %mask + %4 = bitcast i16 %3 to <16 x i1> + %5 = select <16 x i1> %4, <16 x half> %2, <16 x half> zeroinitializer + ret <16 x half> %5 +} + +define <16 x half> @stack_fold_fnmsub123ph_ymm(<16 x half> %a0, <16 x half> %a1, <16 x half> %a2) { + ;CHECK-LABEL: stack_fold_fnmsub123ph_ymm: + ;CHECK: vfnmsub213ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg <16 x half> %a0 + %3 = fneg <16 x half> %a2 + %4 = call <16 x half> @llvm.fma.v16f16(<16 x half> %2, <16 x half> %a1, <16 x half> %3) + ret <16 x half> %4 +} + +define <16 x half> @stack_fold_fnmsub213ph_ymm(<16 x half> %a0, <16 x half> %a1, <16 x half> %a2) { + ;CHECK-LABEL: stack_fold_fnmsub213ph_ymm: + ;CHECK: vfnmsub213ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg <16 x half> %a1 + %3 = fneg <16 x half> %a2 + %4 = call <16 x half> @llvm.fma.v16f16(<16 x half> %2, <16 x half> %a0, <16 x half> %3) + ret <16 x half> %4 +} + +define <16 x half> @stack_fold_fnmsub231ph_ymm(<16 x half> %a0, <16 x half> %a1, <16 x half> %a2) { + ;CHECK-LABEL: stack_fold_fnmsub231ph_ymm: + ;CHECK: vfnmsub231ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg <16 x half> %a1 + %3 = fneg <16 x half> %a0 + %4 = call <16 x half> @llvm.fma.v16f16(<16 x half> %2, <16 x half> %a2, <16 x half> %3) + ret <16 x half> %4 +} + +define <16 x half> @stack_fold_fnmsub321ph_ymm(<16 x half> %a0, <16 x half> %a1, <16 x half> %a2) { + ;CHECK-LABEL: stack_fold_fnmsub321ph_ymm: + ;CHECK: vfnmsub231ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg <16 x half> %a2 + %3 = fneg <16 x half> %a0 + %4 = call <16 x half> @llvm.fma.v16f16(<16 x half> %2, <16 x half> %a1, <16 x half> %3) + ret <16 x half> %4 +} + +define <16 x half> @stack_fold_fnmsub132ph_ymm(<16 x half> %a0, <16 x half> %a1, <16 x half> %a2) { + ;CHECK-LABEL: stack_fold_fnmsub132ph_ymm: + ;CHECK: vfnmsub132ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg <16 x half> %a0 + %3 = fneg <16 x half> %a1 + %4 = call <16 x half> @llvm.fma.v16f16(<16 x half> %2, <16 x half> %a2, <16 x half> %3) + ret <16 x half> %4 +} + +define <16 x half> @stack_fold_fnmsub312ph_ymm(<16 x half> %a0, <16 x half> %a1, <16 x half> %a2) { + ;CHECK-LABEL: stack_fold_fnmsub312ph_ymm: + ;CHECK: vfnmsub132ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = fneg <16 x half> %a2 + %3 = fneg <16 x half> %a1 + %4 = call <16 x half> @llvm.fma.v16f16(<16 x half> %2, <16 x half> %a0, <16 x half> %3) + ret <16 x half> %4 +} + +define <16 x half> @stack_fold_fnmsub123ph_mask_ymm(<16 x half>* %p, <16 x half> %a1, <16 x half> %a2, i16 %mask) { + ;CHECK-LABEL: stack_fold_fnmsub123ph_mask_ymm: + ;CHECK: vfnmsub213ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <16 x half>, <16 x half>* %p + %neg = fneg <16 x half> %a2 + %neg1 = fneg <16 x half> %a0 + %2 = call <16 x half> @llvm.fma.v16f16(<16 x half> %neg1, <16 x half> %a1, <16 x half> %neg) + %3 = bitcast i16 %mask to <16 x i1> + %4 = select <16 x i1> %3, <16 x half> %2, <16 x half> %a0 + ret <16 x half> %4 +} + +define <16 x half> @stack_fold_fnmsub213ph_mask_ymm(<16 x half>* %p, <16 x half> %a1, <16 x half> %a2, i16 %mask) { + ;CHECK-LABEL: stack_fold_fnmsub213ph_mask_ymm: + ;CHECK: vfnmsub213ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <16 x half>, <16 x half>* %p + %neg = fneg <16 x half> %a2 + %neg1 = fneg <16 x half> %a1 + %2 = call <16 x half> @llvm.fma.v16f16(<16 x half> %neg1, <16 x half> %a0, <16 x half> %neg) + %3 = bitcast i16 %mask to <16 x i1> + %4 = select <16 x i1> %3, <16 x half> %2, <16 x half> %a0 + ret <16 x half> %4 +} + +define <16 x half> @stack_fold_fnmsub231ph_mask_ymm(<16 x half>* %p, <16 x half> %a1, <16 x half> %a2, i16 %mask) { + ;CHECK-LABEL: stack_fold_fnmsub231ph_mask_ymm: + ;CHECK: vfnmsub231ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <16 x half>, <16 x half>* %p + %neg = fneg <16 x half> %a0 + %neg1 = fneg <16 x half> %a1 + %2 = call <16 x half> @llvm.fma.v16f16(<16 x half> %neg1, <16 x half> %a2, <16 x half> %neg) + %3 = bitcast i16 %mask to <16 x i1> + %4 = select <16 x i1> %3, <16 x half> %2, <16 x half> %a0 + ret <16 x half> %4 +} + +define <16 x half> @stack_fold_fnmsub321ph_mask_ymm(<16 x half>* %p, <16 x half> %a1, <16 x half> %a2, i16 %mask) { + ;CHECK-LABEL: stack_fold_fnmsub321ph_mask_ymm: + ;CHECK: vfnmsub231ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <16 x half>, <16 x half>* %p + %neg = fneg <16 x half> %a0 + %neg1 = fneg <16 x half> %a2 + %2 = call <16 x half> @llvm.fma.v16f16(<16 x half> %neg1, <16 x half> %a1, <16 x half> %neg) + %3 = bitcast i16 %mask to <16 x i1> + %4 = select <16 x i1> %3, <16 x half> %2, <16 x half> %a0 + ret <16 x half> %4 +} + +define <16 x half> @stack_fold_fnmsub132ph_mask_ymm(<16 x half>* %p, <16 x half> %a1, <16 x half> %a2, i16 %mask) { + ;CHECK-LABEL: stack_fold_fnmsub132ph_mask_ymm: + ;CHECK: vfnmsub132ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <16 x half>, <16 x half>* %p + %neg = fneg <16 x half> %a1 + %neg1 = fneg <16 x half> %a0 + %2 = call <16 x half> @llvm.fma.v16f16(<16 x half> %neg1, <16 x half> %a2, <16 x half> %neg) + %3 = bitcast i16 %mask to <16 x i1> + %4 = select <16 x i1> %3, <16 x half> %2, <16 x half> %a0 + ret <16 x half> %4 +} + +define <16 x half> @stack_fold_fnmsub312ph_mask_ymm(<16 x half>* %p, <16 x half> %a1, <16 x half> %a2, i16 %mask) { + ;CHECK-LABEL: stack_fold_fnmsub312ph_mask_ymm: + ;CHECK: vfnmsub132ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %a0 = load <16 x half>, <16 x half>* %p + %neg = fneg <16 x half> %a1 + %neg1 = fneg <16 x half> %a2 + %2 = call <16 x half> @llvm.fma.v16f16(<16 x half> %neg1, <16 x half> %a0, <16 x half> %neg) + %3 = bitcast i16 %mask to <16 x i1> + %4 = select <16 x i1> %3, <16 x half> %2, <16 x half> %a0 + ret <16 x half> %4 +} + +define <16 x half> @stack_fold_fnmsub123ph_maskz_ymm(<16 x half> %a0, <16 x half> %a1, <16 x half> %a2, i16* %mask) { + ;CHECK-LABEL: stack_fold_fnmsub123ph_maskz_ymm: + ;CHECK: vfnmsub213ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %neg = fneg <16 x half> %a2 + %neg1 = fneg <16 x half> %a0 + %2 = call <16 x half> @llvm.fma.v16f16(<16 x half> %neg1, <16 x half> %a1, <16 x half> %neg) + %3 = load i16, i16* %mask + %4 = bitcast i16 %3 to <16 x i1> + %5 = select <16 x i1> %4, <16 x half> %2, <16 x half> zeroinitializer + ret <16 x half> %5 +} + +define <16 x half> @stack_fold_fnmsub213ph_maskz_ymm(<16 x half> %a0, <16 x half> %a1, <16 x half> %a2, i16* %mask) { + ;CHECK-LABEL: stack_fold_fnmsub213ph_maskz_ymm: + ;CHECK: vfnmsub213ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %neg = fneg <16 x half> %a2 + %neg1 = fneg <16 x half> %a1 + %2 = call <16 x half> @llvm.fma.v16f16(<16 x half> %neg1, <16 x half> %a0, <16 x half> %neg) + %3 = load i16, i16* %mask + %4 = bitcast i16 %3 to <16 x i1> + %5 = select <16 x i1> %4, <16 x half> %2, <16 x half> zeroinitializer + ret <16 x half> %5 +} + +define <16 x half> @stack_fold_fnmsub231ph_maskz_ymm(<16 x half> %a0, <16 x half> %a1, <16 x half> %a2, i16* %mask) { + ;CHECK-LABEL: stack_fold_fnmsub231ph_maskz_ymm: + ;CHECK: vfnmsub231ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %neg = fneg <16 x half> %a0 + %neg1 = fneg <16 x half> %a1 + %2 = call <16 x half> @llvm.fma.v16f16(<16 x half> %neg1, <16 x half> %a2, <16 x half> %neg) + %3 = load i16, i16* %mask + %4 = bitcast i16 %3 to <16 x i1> + %5 = select <16 x i1> %4, <16 x half> %2, <16 x half> zeroinitializer + ret <16 x half> %5 +} + +define <16 x half> @stack_fold_fnmsub321ph_maskz_ymm(<16 x half> %a0, <16 x half> %a1, <16 x half> %a2, i16* %mask) { + ;CHECK-LABEL: stack_fold_fnmsub321ph_maskz_ymm: + ;CHECK: vfnmsub231ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %neg = fneg <16 x half> %a0 + %neg1 = fneg <16 x half> %a2 + %2 = call <16 x half> @llvm.fma.v16f16(<16 x half> %neg1, <16 x half> %a1, <16 x half> %neg) + %3 = load i16, i16* %mask + %4 = bitcast i16 %3 to <16 x i1> + %5 = select <16 x i1> %4, <16 x half> %2, <16 x half> zeroinitializer + ret <16 x half> %5 +} + +define <16 x half> @stack_fold_fnmsub132ph_maskz_ymm(<16 x half> %a0, <16 x half> %a1, <16 x half> %a2, i16* %mask) { + ;CHECK-LABEL: stack_fold_fnmsub132ph_maskz_ymm: + ;CHECK: vfnmsub132ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %neg = fneg <16 x half> %a1 + %neg1 = fneg <16 x half> %a0 + %2 = call <16 x half> @llvm.fma.v16f16(<16 x half> %neg1, <16 x half> %a2, <16 x half> %neg) + %3 = load i16, i16* %mask + %4 = bitcast i16 %3 to <16 x i1> + %5 = select <16 x i1> %4, <16 x half> %2, <16 x half> zeroinitializer + ret <16 x half> %5 +} + +define <16 x half> @stack_fold_fnmsub312ph_maskz_ymm(<16 x half> %a0, <16 x half> %a1, <16 x half> %a2, i16* %mask) { + ;CHECK-LABEL: stack_fold_fnmsub312ph_maskz_ymm: + ;CHECK: vfnmsub132ph {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{{%k[0-7]}}} {z} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %neg = fneg <16 x half> %a1 + %neg1 = fneg <16 x half> %a2 + %2 = call <16 x half> @llvm.fma.v16f16(<16 x half> %neg1, <16 x half> %a0, <16 x half> %neg) + %3 = load i16, i16* %mask + %4 = bitcast i16 %3 to <16 x i1> + %5 = select <16 x i1> %4, <16 x half> %2, <16 x half> zeroinitializer + ret <16 x half> %5 +} diff --git a/llvm/test/CodeGen/X86/vec-strict-128-fp16.ll b/llvm/test/CodeGen/X86/vec-strict-128-fp16.ll index f73742947b5cc..2c3d7ceb37d03 100644 --- a/llvm/test/CodeGen/X86/vec-strict-128-fp16.ll +++ b/llvm/test/CodeGen/X86/vec-strict-128-fp16.ll @@ -17,6 +17,7 @@ declare double @llvm.experimental.constrained.fpext.f64.f16(half, metadata) declare <2 x float> @llvm.experimental.constrained.fpext.v2f32.v2f16(<2 x half>, metadata) declare <4 x float> @llvm.experimental.constrained.fpext.v4f32.v4f16(<4 x half>, metadata) declare <2 x double> @llvm.experimental.constrained.fpext.v2f64.v2f16(<2 x half>, metadata) +declare <8 x half> @llvm.experimental.constrained.fma.v8f16(<8 x half>, <8 x half>, <8 x half>, metadata, metadata) define <8 x half> @f2(<8 x half> %a, <8 x half> %b) #0 { ; CHECK-LABEL: f2: @@ -101,6 +102,17 @@ define <2 x double> @f12(<2 x double> %a0, <8 x half> %a1) #0 { ret <2 x double> %res } +define <8 x half> @f13(<8 x half> %a, <8 x half> %b, <8 x half> %c) #0 { +; CHECK-LABEL: f13: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmadd213ph %xmm2, %xmm1, %xmm0 +; CHECK-NEXT: ret{{[l|q]}} + %res = call <8 x half> @llvm.experimental.constrained.fma.v8f16(<8 x half> %a, <8 x half> %b, <8 x half> %c, + metadata !"round.dynamic", + metadata !"fpexcept.strict") #0 + ret <8 x half> %res +} + define <2 x double> @f15(<2 x half> %a) #0 { ; CHECK-LABEL: f15: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/X86/vec-strict-256-fp16.ll b/llvm/test/CodeGen/X86/vec-strict-256-fp16.ll index d5868287823fb..a2e02508327c8 100644 --- a/llvm/test/CodeGen/X86/vec-strict-256-fp16.ll +++ b/llvm/test/CodeGen/X86/vec-strict-256-fp16.ll @@ -7,6 +7,7 @@ declare <16 x half> @llvm.experimental.constrained.fsub.v16f16(<16 x half>, <16 declare <16 x half> @llvm.experimental.constrained.fmul.v16f16(<16 x half>, <16 x half>, metadata, metadata) declare <16 x half> @llvm.experimental.constrained.fdiv.v16f16(<16 x half>, <16 x half>, metadata, metadata) declare <16 x half> @llvm.experimental.constrained.sqrt.v16f16(<16 x half>, metadata, metadata) +declare <16 x half> @llvm.experimental.constrained.fma.v16f16(<16 x half>, <16 x half>, <16 x half>, metadata, metadata) declare <4 x double> @llvm.experimental.constrained.fpext.v4f64.v4f16(<4 x half>, metadata) declare <8 x float> @llvm.experimental.constrained.fpext.v8f32.v8f16(<8 x half>, metadata) declare <4 x half> @llvm.experimental.constrained.fptrunc.v4f16.v4f64(<4 x double>, metadata, metadata) @@ -98,6 +99,17 @@ define <4 x half> @f12(<4 x double> %a) #0 { ret <4 x half> %ret } +define <16 x half> @f13(<16 x half> %a, <16 x half> %b, <16 x half> %c) #0 { +; CHECK-LABEL: f13: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmadd213ph %ymm2, %ymm1, %ymm0 +; CHECK-NEXT: ret{{[l|q]}} + %res = call <16 x half> @llvm.experimental.constrained.fma.v16f16(<16 x half> %a, <16 x half> %b, <16 x half> %c, + metadata !"round.dynamic", + metadata !"fpexcept.strict") #0 + ret <16 x half> %res +} + define <8 x float> @f14(<8 x half> %a) #0 { ; CHECK-LABEL: f14: ; CHECK: # %bb.0: diff --git a/llvm/test/CodeGen/X86/vec-strict-512-fp16.ll b/llvm/test/CodeGen/X86/vec-strict-512-fp16.ll index 6273a525b15d6..dfbc11a43d3d7 100644 --- a/llvm/test/CodeGen/X86/vec-strict-512-fp16.ll +++ b/llvm/test/CodeGen/X86/vec-strict-512-fp16.ll @@ -11,6 +11,7 @@ declare <8 x double> @llvm.experimental.constrained.fpext.v8f64.v8f16(<8 x half> declare <16 x float> @llvm.experimental.constrained.fpext.v16f32.v16f16(<16 x half>, metadata) declare <8 x half> @llvm.experimental.constrained.fptrunc.v8f16.v8f64(<8 x double>, metadata, metadata) declare <16 x half> @llvm.experimental.constrained.fptrunc.v16f16.v16f32(<16 x float>, metadata, metadata) +declare <32 x half> @llvm.experimental.constrained.fma.v32f16(<32 x half>, <32 x half>, <32 x half>, metadata, metadata) declare <32 x half> @llvm.experimental.constrained.ceil.v32f16(<32 x half>, metadata) declare <32 x half> @llvm.experimental.constrained.floor.v32f16(<32 x half>, metadata) declare <32 x half> @llvm.experimental.constrained.trunc.v32f16(<32 x half>, metadata) @@ -97,6 +98,17 @@ define <8 x half> @f12(<8 x double> %a) #0 { ret <8 x half> %ret } +define <32 x half> @f13(<32 x half> %a, <32 x half> %b, <32 x half> %c) #0 { +; CHECK-LABEL: f13: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmadd213ph %zmm2, %zmm1, %zmm0 +; CHECK-NEXT: ret{{[l|q]}} + %res = call <32 x half> @llvm.experimental.constrained.fma.v32f16(<32 x half> %a, <32 x half> %b, <32 x half> %c, + metadata !"round.dynamic", + metadata !"fpexcept.strict") #0 + ret <32 x half> %res +} + define <16 x float> @f14(<16 x half> %a) #0 { ; CHECK-LABEL: f14: ; CHECK: # %bb.0: diff --git a/llvm/test/MC/Disassembler/X86/avx512fp16.txt b/llvm/test/MC/Disassembler/X86/avx512fp16.txt index 67514e50b1e12..ca1772175f95d 100644 --- a/llvm/test/MC/Disassembler/X86/avx512fp16.txt +++ b/llvm/test/MC/Disassembler/X86/avx512fp16.txt @@ -1764,3 +1764,723 @@ # ATT: vsqrtsh -256(%rdx), %xmm29, %xmm30 {%k7} {z} # INTEL: vsqrtsh xmm30 {k7} {z}, xmm29, word ptr [rdx - 256] 0x62,0x65,0x16,0x87,0x51,0x72,0x80 + +# ATT: vfmadd132ph %zmm28, %zmm29, %zmm30 +# INTEL: vfmadd132ph zmm30, zmm29, zmm28 +0x62,0x06,0x15,0x40,0x98,0xf4 + +# ATT: vfmadd132ph {rn-sae}, %zmm28, %zmm29, %zmm30 +# INTEL: vfmadd132ph zmm30, zmm29, zmm28, {rn-sae} +0x62,0x06,0x15,0x10,0x98,0xf4 + +# ATT: vfmadd132ph 268435456(%rbp,%r14,8), %zmm29, %zmm30 {%k7} +# INTEL: vfmadd132ph zmm30 {k7}, zmm29, zmmword ptr [rbp + 8*r14 + 268435456] +0x62,0x26,0x15,0x47,0x98,0xb4,0xf5,0x00,0x00,0x00,0x10 + +# ATT: vfmadd132ph (%r9){1to32}, %zmm29, %zmm30 +# INTEL: vfmadd132ph zmm30, zmm29, word ptr [r9]{1to32} +0x62,0x46,0x15,0x50,0x98,0x31 + +# ATT: vfmadd132ph 8128(%rcx), %zmm29, %zmm30 +# INTEL: vfmadd132ph zmm30, zmm29, zmmword ptr [rcx + 8128] +0x62,0x66,0x15,0x40,0x98,0x71,0x7f + +# ATT: vfmadd132ph -256(%rdx){1to32}, %zmm29, %zmm30 {%k7} {z} +# INTEL: vfmadd132ph zmm30 {k7} {z}, zmm29, word ptr [rdx - 256]{1to32} +0x62,0x66,0x15,0xd7,0x98,0x72,0x80 + +# ATT: vfmadd132sh %xmm28, %xmm29, %xmm30 +# INTEL: vfmadd132sh xmm30, xmm29, xmm28 +0x62,0x06,0x15,0x00,0x99,0xf4 + +# ATT: vfmadd132sh {rn-sae}, %xmm28, %xmm29, %xmm30 +# INTEL: vfmadd132sh xmm30, xmm29, xmm28, {rn-sae} +0x62,0x06,0x15,0x10,0x99,0xf4 + +# ATT: vfmadd132sh 268435456(%rbp,%r14,8), %xmm29, %xmm30 {%k7} +# INTEL: vfmadd132sh xmm30 {k7}, xmm29, word ptr [rbp + 8*r14 + 268435456] +0x62,0x26,0x15,0x07,0x99,0xb4,0xf5,0x00,0x00,0x00,0x10 + +# ATT: vfmadd132sh (%r9), %xmm29, %xmm30 +# INTEL: vfmadd132sh xmm30, xmm29, word ptr [r9] +0x62,0x46,0x15,0x00,0x99,0x31 + +# ATT: vfmadd132sh 254(%rcx), %xmm29, %xmm30 +# INTEL: vfmadd132sh xmm30, xmm29, word ptr [rcx + 254] +0x62,0x66,0x15,0x00,0x99,0x71,0x7f + +# ATT: vfmadd132sh -256(%rdx), %xmm29, %xmm30 {%k7} {z} +# INTEL: vfmadd132sh xmm30 {k7} {z}, xmm29, word ptr [rdx - 256] +0x62,0x66,0x15,0x87,0x99,0x72,0x80 + +# ATT: vfmadd213ph %zmm28, %zmm29, %zmm30 +# INTEL: vfmadd213ph zmm30, zmm29, zmm28 +0x62,0x06,0x15,0x40,0xa8,0xf4 + +# ATT: vfmadd213ph {rn-sae}, %zmm28, %zmm29, %zmm30 +# INTEL: vfmadd213ph zmm30, zmm29, zmm28, {rn-sae} +0x62,0x06,0x15,0x10,0xa8,0xf4 + +# ATT: vfmadd213ph 268435456(%rbp,%r14,8), %zmm29, %zmm30 {%k7} +# INTEL: vfmadd213ph zmm30 {k7}, zmm29, zmmword ptr [rbp + 8*r14 + 268435456] +0x62,0x26,0x15,0x47,0xa8,0xb4,0xf5,0x00,0x00,0x00,0x10 + +# ATT: vfmadd213ph (%r9){1to32}, %zmm29, %zmm30 +# INTEL: vfmadd213ph zmm30, zmm29, word ptr [r9]{1to32} +0x62,0x46,0x15,0x50,0xa8,0x31 + +# ATT: vfmadd213ph 8128(%rcx), %zmm29, %zmm30 +# INTEL: vfmadd213ph zmm30, zmm29, zmmword ptr [rcx + 8128] +0x62,0x66,0x15,0x40,0xa8,0x71,0x7f + +# ATT: vfmadd213ph -256(%rdx){1to32}, %zmm29, %zmm30 {%k7} {z} +# INTEL: vfmadd213ph zmm30 {k7} {z}, zmm29, word ptr [rdx - 256]{1to32} +0x62,0x66,0x15,0xd7,0xa8,0x72,0x80 + +# ATT: vfmadd213sh %xmm28, %xmm29, %xmm30 +# INTEL: vfmadd213sh xmm30, xmm29, xmm28 +0x62,0x06,0x15,0x00,0xa9,0xf4 + +# ATT: vfmadd213sh {rn-sae}, %xmm28, %xmm29, %xmm30 +# INTEL: vfmadd213sh xmm30, xmm29, xmm28, {rn-sae} +0x62,0x06,0x15,0x10,0xa9,0xf4 + +# ATT: vfmadd213sh 268435456(%rbp,%r14,8), %xmm29, %xmm30 {%k7} +# INTEL: vfmadd213sh xmm30 {k7}, xmm29, word ptr [rbp + 8*r14 + 268435456] +0x62,0x26,0x15,0x07,0xa9,0xb4,0xf5,0x00,0x00,0x00,0x10 + +# ATT: vfmadd213sh (%r9), %xmm29, %xmm30 +# INTEL: vfmadd213sh xmm30, xmm29, word ptr [r9] +0x62,0x46,0x15,0x00,0xa9,0x31 + +# ATT: vfmadd213sh 254(%rcx), %xmm29, %xmm30 +# INTEL: vfmadd213sh xmm30, xmm29, word ptr [rcx + 254] +0x62,0x66,0x15,0x00,0xa9,0x71,0x7f + +# ATT: vfmadd213sh -256(%rdx), %xmm29, %xmm30 {%k7} {z} +# INTEL: vfmadd213sh xmm30 {k7} {z}, xmm29, word ptr [rdx - 256] +0x62,0x66,0x15,0x87,0xa9,0x72,0x80 + +# ATT: vfmadd231ph %zmm28, %zmm29, %zmm30 +# INTEL: vfmadd231ph zmm30, zmm29, zmm28 +0x62,0x06,0x15,0x40,0xb8,0xf4 + +# ATT: vfmadd231ph {rn-sae}, %zmm28, %zmm29, %zmm30 +# INTEL: vfmadd231ph zmm30, zmm29, zmm28, {rn-sae} +0x62,0x06,0x15,0x10,0xb8,0xf4 + +# ATT: vfmadd231ph 268435456(%rbp,%r14,8), %zmm29, %zmm30 {%k7} +# INTEL: vfmadd231ph zmm30 {k7}, zmm29, zmmword ptr [rbp + 8*r14 + 268435456] +0x62,0x26,0x15,0x47,0xb8,0xb4,0xf5,0x00,0x00,0x00,0x10 + +# ATT: vfmadd231ph (%r9){1to32}, %zmm29, %zmm30 +# INTEL: vfmadd231ph zmm30, zmm29, word ptr [r9]{1to32} +0x62,0x46,0x15,0x50,0xb8,0x31 + +# ATT: vfmadd231ph 8128(%rcx), %zmm29, %zmm30 +# INTEL: vfmadd231ph zmm30, zmm29, zmmword ptr [rcx + 8128] +0x62,0x66,0x15,0x40,0xb8,0x71,0x7f + +# ATT: vfmadd231ph -256(%rdx){1to32}, %zmm29, %zmm30 {%k7} {z} +# INTEL: vfmadd231ph zmm30 {k7} {z}, zmm29, word ptr [rdx - 256]{1to32} +0x62,0x66,0x15,0xd7,0xb8,0x72,0x80 + +# ATT: vfmadd231sh %xmm28, %xmm29, %xmm30 +# INTEL: vfmadd231sh xmm30, xmm29, xmm28 +0x62,0x06,0x15,0x00,0xb9,0xf4 + +# ATT: vfmadd231sh {rn-sae}, %xmm28, %xmm29, %xmm30 +# INTEL: vfmadd231sh xmm30, xmm29, xmm28, {rn-sae} +0x62,0x06,0x15,0x10,0xb9,0xf4 + +# ATT: vfmadd231sh 268435456(%rbp,%r14,8), %xmm29, %xmm30 {%k7} +# INTEL: vfmadd231sh xmm30 {k7}, xmm29, word ptr [rbp + 8*r14 + 268435456] +0x62,0x26,0x15,0x07,0xb9,0xb4,0xf5,0x00,0x00,0x00,0x10 + +# ATT: vfmadd231sh (%r9), %xmm29, %xmm30 +# INTEL: vfmadd231sh xmm30, xmm29, word ptr [r9] +0x62,0x46,0x15,0x00,0xb9,0x31 + +# ATT: vfmadd231sh 254(%rcx), %xmm29, %xmm30 +# INTEL: vfmadd231sh xmm30, xmm29, word ptr [rcx + 254] +0x62,0x66,0x15,0x00,0xb9,0x71,0x7f + +# ATT: vfmadd231sh -256(%rdx), %xmm29, %xmm30 {%k7} {z} +# INTEL: vfmadd231sh xmm30 {k7} {z}, xmm29, word ptr [rdx - 256] +0x62,0x66,0x15,0x87,0xb9,0x72,0x80 + +# ATT: vfmaddsub132ph %zmm28, %zmm29, %zmm30 +# INTEL: vfmaddsub132ph zmm30, zmm29, zmm28 +0x62,0x06,0x15,0x40,0x96,0xf4 + +# ATT: vfmaddsub132ph {rn-sae}, %zmm28, %zmm29, %zmm30 +# INTEL: vfmaddsub132ph zmm30, zmm29, zmm28, {rn-sae} +0x62,0x06,0x15,0x10,0x96,0xf4 + +# ATT: vfmaddsub132ph 268435456(%rbp,%r14,8), %zmm29, %zmm30 {%k7} +# INTEL: vfmaddsub132ph zmm30 {k7}, zmm29, zmmword ptr [rbp + 8*r14 + 268435456] +0x62,0x26,0x15,0x47,0x96,0xb4,0xf5,0x00,0x00,0x00,0x10 + +# ATT: vfmaddsub132ph (%r9){1to32}, %zmm29, %zmm30 +# INTEL: vfmaddsub132ph zmm30, zmm29, word ptr [r9]{1to32} +0x62,0x46,0x15,0x50,0x96,0x31 + +# ATT: vfmaddsub132ph 8128(%rcx), %zmm29, %zmm30 +# INTEL: vfmaddsub132ph zmm30, zmm29, zmmword ptr [rcx + 8128] +0x62,0x66,0x15,0x40,0x96,0x71,0x7f + +# ATT: vfmaddsub132ph -256(%rdx){1to32}, %zmm29, %zmm30 {%k7} {z} +# INTEL: vfmaddsub132ph zmm30 {k7} {z}, zmm29, word ptr [rdx - 256]{1to32} +0x62,0x66,0x15,0xd7,0x96,0x72,0x80 + +# ATT: vfmaddsub213ph %zmm28, %zmm29, %zmm30 +# INTEL: vfmaddsub213ph zmm30, zmm29, zmm28 +0x62,0x06,0x15,0x40,0xa6,0xf4 + +# ATT: vfmaddsub213ph {rn-sae}, %zmm28, %zmm29, %zmm30 +# INTEL: vfmaddsub213ph zmm30, zmm29, zmm28, {rn-sae} +0x62,0x06,0x15,0x10,0xa6,0xf4 + +# ATT: vfmaddsub213ph 268435456(%rbp,%r14,8), %zmm29, %zmm30 {%k7} +# INTEL: vfmaddsub213ph zmm30 {k7}, zmm29, zmmword ptr [rbp + 8*r14 + 268435456] +0x62,0x26,0x15,0x47,0xa6,0xb4,0xf5,0x00,0x00,0x00,0x10 + +# ATT: vfmaddsub213ph (%r9){1to32}, %zmm29, %zmm30 +# INTEL: vfmaddsub213ph zmm30, zmm29, word ptr [r9]{1to32} +0x62,0x46,0x15,0x50,0xa6,0x31 + +# ATT: vfmaddsub213ph 8128(%rcx), %zmm29, %zmm30 +# INTEL: vfmaddsub213ph zmm30, zmm29, zmmword ptr [rcx + 8128] +0x62,0x66,0x15,0x40,0xa6,0x71,0x7f + +# ATT: vfmaddsub213ph -256(%rdx){1to32}, %zmm29, %zmm30 {%k7} {z} +# INTEL: vfmaddsub213ph zmm30 {k7} {z}, zmm29, word ptr [rdx - 256]{1to32} +0x62,0x66,0x15,0xd7,0xa6,0x72,0x80 + +# ATT: vfmaddsub231ph %zmm28, %zmm29, %zmm30 +# INTEL: vfmaddsub231ph zmm30, zmm29, zmm28 +0x62,0x06,0x15,0x40,0xb6,0xf4 + +# ATT: vfmaddsub231ph {rn-sae}, %zmm28, %zmm29, %zmm30 +# INTEL: vfmaddsub231ph zmm30, zmm29, zmm28, {rn-sae} +0x62,0x06,0x15,0x10,0xb6,0xf4 + +# ATT: vfmaddsub231ph 268435456(%rbp,%r14,8), %zmm29, %zmm30 {%k7} +# INTEL: vfmaddsub231ph zmm30 {k7}, zmm29, zmmword ptr [rbp + 8*r14 + 268435456] +0x62,0x26,0x15,0x47,0xb6,0xb4,0xf5,0x00,0x00,0x00,0x10 + +# ATT: vfmaddsub231ph (%r9){1to32}, %zmm29, %zmm30 +# INTEL: vfmaddsub231ph zmm30, zmm29, word ptr [r9]{1to32} +0x62,0x46,0x15,0x50,0xb6,0x31 + +# ATT: vfmaddsub231ph 8128(%rcx), %zmm29, %zmm30 +# INTEL: vfmaddsub231ph zmm30, zmm29, zmmword ptr [rcx + 8128] +0x62,0x66,0x15,0x40,0xb6,0x71,0x7f + +# ATT: vfmaddsub231ph -256(%rdx){1to32}, %zmm29, %zmm30 {%k7} {z} +# INTEL: vfmaddsub231ph zmm30 {k7} {z}, zmm29, word ptr [rdx - 256]{1to32} +0x62,0x66,0x15,0xd7,0xb6,0x72,0x80 + +# ATT: vfmsub132ph %zmm28, %zmm29, %zmm30 +# INTEL: vfmsub132ph zmm30, zmm29, zmm28 +0x62,0x06,0x15,0x40,0x9a,0xf4 + +# ATT: vfmsub132ph {rn-sae}, %zmm28, %zmm29, %zmm30 +# INTEL: vfmsub132ph zmm30, zmm29, zmm28, {rn-sae} +0x62,0x06,0x15,0x10,0x9a,0xf4 + +# ATT: vfmsub132ph 268435456(%rbp,%r14,8), %zmm29, %zmm30 {%k7} +# INTEL: vfmsub132ph zmm30 {k7}, zmm29, zmmword ptr [rbp + 8*r14 + 268435456] +0x62,0x26,0x15,0x47,0x9a,0xb4,0xf5,0x00,0x00,0x00,0x10 + +# ATT: vfmsub132ph (%r9){1to32}, %zmm29, %zmm30 +# INTEL: vfmsub132ph zmm30, zmm29, word ptr [r9]{1to32} +0x62,0x46,0x15,0x50,0x9a,0x31 + +# ATT: vfmsub132ph 8128(%rcx), %zmm29, %zmm30 +# INTEL: vfmsub132ph zmm30, zmm29, zmmword ptr [rcx + 8128] +0x62,0x66,0x15,0x40,0x9a,0x71,0x7f + +# ATT: vfmsub132ph -256(%rdx){1to32}, %zmm29, %zmm30 {%k7} {z} +# INTEL: vfmsub132ph zmm30 {k7} {z}, zmm29, word ptr [rdx - 256]{1to32} +0x62,0x66,0x15,0xd7,0x9a,0x72,0x80 + +# ATT: vfmsub132sh %xmm28, %xmm29, %xmm30 +# INTEL: vfmsub132sh xmm30, xmm29, xmm28 +0x62,0x06,0x15,0x00,0x9b,0xf4 + +# ATT: vfmsub132sh {rn-sae}, %xmm28, %xmm29, %xmm30 +# INTEL: vfmsub132sh xmm30, xmm29, xmm28, {rn-sae} +0x62,0x06,0x15,0x10,0x9b,0xf4 + +# ATT: vfmsub132sh 268435456(%rbp,%r14,8), %xmm29, %xmm30 {%k7} +# INTEL: vfmsub132sh xmm30 {k7}, xmm29, word ptr [rbp + 8*r14 + 268435456] +0x62,0x26,0x15,0x07,0x9b,0xb4,0xf5,0x00,0x00,0x00,0x10 + +# ATT: vfmsub132sh (%r9), %xmm29, %xmm30 +# INTEL: vfmsub132sh xmm30, xmm29, word ptr [r9] +0x62,0x46,0x15,0x00,0x9b,0x31 + +# ATT: vfmsub132sh 254(%rcx), %xmm29, %xmm30 +# INTEL: vfmsub132sh xmm30, xmm29, word ptr [rcx + 254] +0x62,0x66,0x15,0x00,0x9b,0x71,0x7f + +# ATT: vfmsub132sh -256(%rdx), %xmm29, %xmm30 {%k7} {z} +# INTEL: vfmsub132sh xmm30 {k7} {z}, xmm29, word ptr [rdx - 256] +0x62,0x66,0x15,0x87,0x9b,0x72,0x80 + +# ATT: vfmsub213ph %zmm28, %zmm29, %zmm30 +# INTEL: vfmsub213ph zmm30, zmm29, zmm28 +0x62,0x06,0x15,0x40,0xaa,0xf4 + +# ATT: vfmsub213ph {rn-sae}, %zmm28, %zmm29, %zmm30 +# INTEL: vfmsub213ph zmm30, zmm29, zmm28, {rn-sae} +0x62,0x06,0x15,0x10,0xaa,0xf4 + +# ATT: vfmsub213ph 268435456(%rbp,%r14,8), %zmm29, %zmm30 {%k7} +# INTEL: vfmsub213ph zmm30 {k7}, zmm29, zmmword ptr [rbp + 8*r14 + 268435456] +0x62,0x26,0x15,0x47,0xaa,0xb4,0xf5,0x00,0x00,0x00,0x10 + +# ATT: vfmsub213ph (%r9){1to32}, %zmm29, %zmm30 +# INTEL: vfmsub213ph zmm30, zmm29, word ptr [r9]{1to32} +0x62,0x46,0x15,0x50,0xaa,0x31 + +# ATT: vfmsub213ph 8128(%rcx), %zmm29, %zmm30 +# INTEL: vfmsub213ph zmm30, zmm29, zmmword ptr [rcx + 8128] +0x62,0x66,0x15,0x40,0xaa,0x71,0x7f + +# ATT: vfmsub213ph -256(%rdx){1to32}, %zmm29, %zmm30 {%k7} {z} +# INTEL: vfmsub213ph zmm30 {k7} {z}, zmm29, word ptr [rdx - 256]{1to32} +0x62,0x66,0x15,0xd7,0xaa,0x72,0x80 + +# ATT: vfmsub213sh %xmm28, %xmm29, %xmm30 +# INTEL: vfmsub213sh xmm30, xmm29, xmm28 +0x62,0x06,0x15,0x00,0xab,0xf4 + +# ATT: vfmsub213sh {rn-sae}, %xmm28, %xmm29, %xmm30 +# INTEL: vfmsub213sh xmm30, xmm29, xmm28, {rn-sae} +0x62,0x06,0x15,0x10,0xab,0xf4 + +# ATT: vfmsub213sh 268435456(%rbp,%r14,8), %xmm29, %xmm30 {%k7} +# INTEL: vfmsub213sh xmm30 {k7}, xmm29, word ptr [rbp + 8*r14 + 268435456] +0x62,0x26,0x15,0x07,0xab,0xb4,0xf5,0x00,0x00,0x00,0x10 + +# ATT: vfmsub213sh (%r9), %xmm29, %xmm30 +# INTEL: vfmsub213sh xmm30, xmm29, word ptr [r9] +0x62,0x46,0x15,0x00,0xab,0x31 + +# ATT: vfmsub213sh 254(%rcx), %xmm29, %xmm30 +# INTEL: vfmsub213sh xmm30, xmm29, word ptr [rcx + 254] +0x62,0x66,0x15,0x00,0xab,0x71,0x7f + +# ATT: vfmsub213sh -256(%rdx), %xmm29, %xmm30 {%k7} {z} +# INTEL: vfmsub213sh xmm30 {k7} {z}, xmm29, word ptr [rdx - 256] +0x62,0x66,0x15,0x87,0xab,0x72,0x80 + +# ATT: vfmsub231ph %zmm28, %zmm29, %zmm30 +# INTEL: vfmsub231ph zmm30, zmm29, zmm28 +0x62,0x06,0x15,0x40,0xba,0xf4 + +# ATT: vfmsub231ph {rn-sae}, %zmm28, %zmm29, %zmm30 +# INTEL: vfmsub231ph zmm30, zmm29, zmm28, {rn-sae} +0x62,0x06,0x15,0x10,0xba,0xf4 + +# ATT: vfmsub231ph 268435456(%rbp,%r14,8), %zmm29, %zmm30 {%k7} +# INTEL: vfmsub231ph zmm30 {k7}, zmm29, zmmword ptr [rbp + 8*r14 + 268435456] +0x62,0x26,0x15,0x47,0xba,0xb4,0xf5,0x00,0x00,0x00,0x10 + +# ATT: vfmsub231ph (%r9){1to32}, %zmm29, %zmm30 +# INTEL: vfmsub231ph zmm30, zmm29, word ptr [r9]{1to32} +0x62,0x46,0x15,0x50,0xba,0x31 + +# ATT: vfmsub231ph 8128(%rcx), %zmm29, %zmm30 +# INTEL: vfmsub231ph zmm30, zmm29, zmmword ptr [rcx + 8128] +0x62,0x66,0x15,0x40,0xba,0x71,0x7f + +# ATT: vfmsub231ph -256(%rdx){1to32}, %zmm29, %zmm30 {%k7} {z} +# INTEL: vfmsub231ph zmm30 {k7} {z}, zmm29, word ptr [rdx - 256]{1to32} +0x62,0x66,0x15,0xd7,0xba,0x72,0x80 + +# ATT: vfmsub231sh %xmm28, %xmm29, %xmm30 +# INTEL: vfmsub231sh xmm30, xmm29, xmm28 +0x62,0x06,0x15,0x00,0xbb,0xf4 + +# ATT: vfmsub231sh {rn-sae}, %xmm28, %xmm29, %xmm30 +# INTEL: vfmsub231sh xmm30, xmm29, xmm28, {rn-sae} +0x62,0x06,0x15,0x10,0xbb,0xf4 + +# ATT: vfmsub231sh 268435456(%rbp,%r14,8), %xmm29, %xmm30 {%k7} +# INTEL: vfmsub231sh xmm30 {k7}, xmm29, word ptr [rbp + 8*r14 + 268435456] +0x62,0x26,0x15,0x07,0xbb,0xb4,0xf5,0x00,0x00,0x00,0x10 + +# ATT: vfmsub231sh (%r9), %xmm29, %xmm30 +# INTEL: vfmsub231sh xmm30, xmm29, word ptr [r9] +0x62,0x46,0x15,0x00,0xbb,0x31 + +# ATT: vfmsub231sh 254(%rcx), %xmm29, %xmm30 +# INTEL: vfmsub231sh xmm30, xmm29, word ptr [rcx + 254] +0x62,0x66,0x15,0x00,0xbb,0x71,0x7f + +# ATT: vfmsub231sh -256(%rdx), %xmm29, %xmm30 {%k7} {z} +# INTEL: vfmsub231sh xmm30 {k7} {z}, xmm29, word ptr [rdx - 256] +0x62,0x66,0x15,0x87,0xbb,0x72,0x80 + +# ATT: vfmsubadd132ph %zmm28, %zmm29, %zmm30 +# INTEL: vfmsubadd132ph zmm30, zmm29, zmm28 +0x62,0x06,0x15,0x40,0x97,0xf4 + +# ATT: vfmsubadd132ph {rn-sae}, %zmm28, %zmm29, %zmm30 +# INTEL: vfmsubadd132ph zmm30, zmm29, zmm28, {rn-sae} +0x62,0x06,0x15,0x10,0x97,0xf4 + +# ATT: vfmsubadd132ph 268435456(%rbp,%r14,8), %zmm29, %zmm30 {%k7} +# INTEL: vfmsubadd132ph zmm30 {k7}, zmm29, zmmword ptr [rbp + 8*r14 + 268435456] +0x62,0x26,0x15,0x47,0x97,0xb4,0xf5,0x00,0x00,0x00,0x10 + +# ATT: vfmsubadd132ph (%r9){1to32}, %zmm29, %zmm30 +# INTEL: vfmsubadd132ph zmm30, zmm29, word ptr [r9]{1to32} +0x62,0x46,0x15,0x50,0x97,0x31 + +# ATT: vfmsubadd132ph 8128(%rcx), %zmm29, %zmm30 +# INTEL: vfmsubadd132ph zmm30, zmm29, zmmword ptr [rcx + 8128] +0x62,0x66,0x15,0x40,0x97,0x71,0x7f + +# ATT: vfmsubadd132ph -256(%rdx){1to32}, %zmm29, %zmm30 {%k7} {z} +# INTEL: vfmsubadd132ph zmm30 {k7} {z}, zmm29, word ptr [rdx - 256]{1to32} +0x62,0x66,0x15,0xd7,0x97,0x72,0x80 + +# ATT: vfmsubadd213ph %zmm28, %zmm29, %zmm30 +# INTEL: vfmsubadd213ph zmm30, zmm29, zmm28 +0x62,0x06,0x15,0x40,0xa7,0xf4 + +# ATT: vfmsubadd213ph {rn-sae}, %zmm28, %zmm29, %zmm30 +# INTEL: vfmsubadd213ph zmm30, zmm29, zmm28, {rn-sae} +0x62,0x06,0x15,0x10,0xa7,0xf4 + +# ATT: vfmsubadd213ph 268435456(%rbp,%r14,8), %zmm29, %zmm30 {%k7} +# INTEL: vfmsubadd213ph zmm30 {k7}, zmm29, zmmword ptr [rbp + 8*r14 + 268435456] +0x62,0x26,0x15,0x47,0xa7,0xb4,0xf5,0x00,0x00,0x00,0x10 + +# ATT: vfmsubadd213ph (%r9){1to32}, %zmm29, %zmm30 +# INTEL: vfmsubadd213ph zmm30, zmm29, word ptr [r9]{1to32} +0x62,0x46,0x15,0x50,0xa7,0x31 + +# ATT: vfmsubadd213ph 8128(%rcx), %zmm29, %zmm30 +# INTEL: vfmsubadd213ph zmm30, zmm29, zmmword ptr [rcx + 8128] +0x62,0x66,0x15,0x40,0xa7,0x71,0x7f + +# ATT: vfmsubadd213ph -256(%rdx){1to32}, %zmm29, %zmm30 {%k7} {z} +# INTEL: vfmsubadd213ph zmm30 {k7} {z}, zmm29, word ptr [rdx - 256]{1to32} +0x62,0x66,0x15,0xd7,0xa7,0x72,0x80 + +# ATT: vfmsubadd231ph %zmm28, %zmm29, %zmm30 +# INTEL: vfmsubadd231ph zmm30, zmm29, zmm28 +0x62,0x06,0x15,0x40,0xb7,0xf4 + +# ATT: vfmsubadd231ph {rn-sae}, %zmm28, %zmm29, %zmm30 +# INTEL: vfmsubadd231ph zmm30, zmm29, zmm28, {rn-sae} +0x62,0x06,0x15,0x10,0xb7,0xf4 + +# ATT: vfmsubadd231ph 268435456(%rbp,%r14,8), %zmm29, %zmm30 {%k7} +# INTEL: vfmsubadd231ph zmm30 {k7}, zmm29, zmmword ptr [rbp + 8*r14 + 268435456] +0x62,0x26,0x15,0x47,0xb7,0xb4,0xf5,0x00,0x00,0x00,0x10 + +# ATT: vfmsubadd231ph (%r9){1to32}, %zmm29, %zmm30 +# INTEL: vfmsubadd231ph zmm30, zmm29, word ptr [r9]{1to32} +0x62,0x46,0x15,0x50,0xb7,0x31 + +# ATT: vfmsubadd231ph 8128(%rcx), %zmm29, %zmm30 +# INTEL: vfmsubadd231ph zmm30, zmm29, zmmword ptr [rcx + 8128] +0x62,0x66,0x15,0x40,0xb7,0x71,0x7f + +# ATT: vfmsubadd231ph -256(%rdx){1to32}, %zmm29, %zmm30 {%k7} {z} +# INTEL: vfmsubadd231ph zmm30 {k7} {z}, zmm29, word ptr [rdx - 256]{1to32} +0x62,0x66,0x15,0xd7,0xb7,0x72,0x80 + +# ATT: vfnmadd132ph %zmm28, %zmm29, %zmm30 +# INTEL: vfnmadd132ph zmm30, zmm29, zmm28 +0x62,0x06,0x15,0x40,0x9c,0xf4 + +# ATT: vfnmadd132ph {rn-sae}, %zmm28, %zmm29, %zmm30 +# INTEL: vfnmadd132ph zmm30, zmm29, zmm28, {rn-sae} +0x62,0x06,0x15,0x10,0x9c,0xf4 + +# ATT: vfnmadd132ph 268435456(%rbp,%r14,8), %zmm29, %zmm30 {%k7} +# INTEL: vfnmadd132ph zmm30 {k7}, zmm29, zmmword ptr [rbp + 8*r14 + 268435456] +0x62,0x26,0x15,0x47,0x9c,0xb4,0xf5,0x00,0x00,0x00,0x10 + +# ATT: vfnmadd132ph (%r9){1to32}, %zmm29, %zmm30 +# INTEL: vfnmadd132ph zmm30, zmm29, word ptr [r9]{1to32} +0x62,0x46,0x15,0x50,0x9c,0x31 + +# ATT: vfnmadd132ph 8128(%rcx), %zmm29, %zmm30 +# INTEL: vfnmadd132ph zmm30, zmm29, zmmword ptr [rcx + 8128] +0x62,0x66,0x15,0x40,0x9c,0x71,0x7f + +# ATT: vfnmadd132ph -256(%rdx){1to32}, %zmm29, %zmm30 {%k7} {z} +# INTEL: vfnmadd132ph zmm30 {k7} {z}, zmm29, word ptr [rdx - 256]{1to32} +0x62,0x66,0x15,0xd7,0x9c,0x72,0x80 + +# ATT: vfnmadd132sh %xmm28, %xmm29, %xmm30 +# INTEL: vfnmadd132sh xmm30, xmm29, xmm28 +0x62,0x06,0x15,0x00,0x9d,0xf4 + +# ATT: vfnmadd132sh {rn-sae}, %xmm28, %xmm29, %xmm30 +# INTEL: vfnmadd132sh xmm30, xmm29, xmm28, {rn-sae} +0x62,0x06,0x15,0x10,0x9d,0xf4 + +# ATT: vfnmadd132sh 268435456(%rbp,%r14,8), %xmm29, %xmm30 {%k7} +# INTEL: vfnmadd132sh xmm30 {k7}, xmm29, word ptr [rbp + 8*r14 + 268435456] +0x62,0x26,0x15,0x07,0x9d,0xb4,0xf5,0x00,0x00,0x00,0x10 + +# ATT: vfnmadd132sh (%r9), %xmm29, %xmm30 +# INTEL: vfnmadd132sh xmm30, xmm29, word ptr [r9] +0x62,0x46,0x15,0x00,0x9d,0x31 + +# ATT: vfnmadd132sh 254(%rcx), %xmm29, %xmm30 +# INTEL: vfnmadd132sh xmm30, xmm29, word ptr [rcx + 254] +0x62,0x66,0x15,0x00,0x9d,0x71,0x7f + +# ATT: vfnmadd132sh -256(%rdx), %xmm29, %xmm30 {%k7} {z} +# INTEL: vfnmadd132sh xmm30 {k7} {z}, xmm29, word ptr [rdx - 256] +0x62,0x66,0x15,0x87,0x9d,0x72,0x80 + +# ATT: vfnmadd213ph %zmm28, %zmm29, %zmm30 +# INTEL: vfnmadd213ph zmm30, zmm29, zmm28 +0x62,0x06,0x15,0x40,0xac,0xf4 + +# ATT: vfnmadd213ph {rn-sae}, %zmm28, %zmm29, %zmm30 +# INTEL: vfnmadd213ph zmm30, zmm29, zmm28, {rn-sae} +0x62,0x06,0x15,0x10,0xac,0xf4 + +# ATT: vfnmadd213ph 268435456(%rbp,%r14,8), %zmm29, %zmm30 {%k7} +# INTEL: vfnmadd213ph zmm30 {k7}, zmm29, zmmword ptr [rbp + 8*r14 + 268435456] +0x62,0x26,0x15,0x47,0xac,0xb4,0xf5,0x00,0x00,0x00,0x10 + +# ATT: vfnmadd213ph (%r9){1to32}, %zmm29, %zmm30 +# INTEL: vfnmadd213ph zmm30, zmm29, word ptr [r9]{1to32} +0x62,0x46,0x15,0x50,0xac,0x31 + +# ATT: vfnmadd213ph 8128(%rcx), %zmm29, %zmm30 +# INTEL: vfnmadd213ph zmm30, zmm29, zmmword ptr [rcx + 8128] +0x62,0x66,0x15,0x40,0xac,0x71,0x7f + +# ATT: vfnmadd213ph -256(%rdx){1to32}, %zmm29, %zmm30 {%k7} {z} +# INTEL: vfnmadd213ph zmm30 {k7} {z}, zmm29, word ptr [rdx - 256]{1to32} +0x62,0x66,0x15,0xd7,0xac,0x72,0x80 + +# ATT: vfnmadd213sh %xmm28, %xmm29, %xmm30 +# INTEL: vfnmadd213sh xmm30, xmm29, xmm28 +0x62,0x06,0x15,0x00,0xad,0xf4 + +# ATT: vfnmadd213sh {rn-sae}, %xmm28, %xmm29, %xmm30 +# INTEL: vfnmadd213sh xmm30, xmm29, xmm28, {rn-sae} +0x62,0x06,0x15,0x10,0xad,0xf4 + +# ATT: vfnmadd213sh 268435456(%rbp,%r14,8), %xmm29, %xmm30 {%k7} +# INTEL: vfnmadd213sh xmm30 {k7}, xmm29, word ptr [rbp + 8*r14 + 268435456] +0x62,0x26,0x15,0x07,0xad,0xb4,0xf5,0x00,0x00,0x00,0x10 + +# ATT: vfnmadd213sh (%r9), %xmm29, %xmm30 +# INTEL: vfnmadd213sh xmm30, xmm29, word ptr [r9] +0x62,0x46,0x15,0x00,0xad,0x31 + +# ATT: vfnmadd213sh 254(%rcx), %xmm29, %xmm30 +# INTEL: vfnmadd213sh xmm30, xmm29, word ptr [rcx + 254] +0x62,0x66,0x15,0x00,0xad,0x71,0x7f + +# ATT: vfnmadd213sh -256(%rdx), %xmm29, %xmm30 {%k7} {z} +# INTEL: vfnmadd213sh xmm30 {k7} {z}, xmm29, word ptr [rdx - 256] +0x62,0x66,0x15,0x87,0xad,0x72,0x80 + +# ATT: vfnmadd231ph %zmm28, %zmm29, %zmm30 +# INTEL: vfnmadd231ph zmm30, zmm29, zmm28 +0x62,0x06,0x15,0x40,0xbc,0xf4 + +# ATT: vfnmadd231ph {rn-sae}, %zmm28, %zmm29, %zmm30 +# INTEL: vfnmadd231ph zmm30, zmm29, zmm28, {rn-sae} +0x62,0x06,0x15,0x10,0xbc,0xf4 + +# ATT: vfnmadd231ph 268435456(%rbp,%r14,8), %zmm29, %zmm30 {%k7} +# INTEL: vfnmadd231ph zmm30 {k7}, zmm29, zmmword ptr [rbp + 8*r14 + 268435456] +0x62,0x26,0x15,0x47,0xbc,0xb4,0xf5,0x00,0x00,0x00,0x10 + +# ATT: vfnmadd231ph (%r9){1to32}, %zmm29, %zmm30 +# INTEL: vfnmadd231ph zmm30, zmm29, word ptr [r9]{1to32} +0x62,0x46,0x15,0x50,0xbc,0x31 + +# ATT: vfnmadd231ph 8128(%rcx), %zmm29, %zmm30 +# INTEL: vfnmadd231ph zmm30, zmm29, zmmword ptr [rcx + 8128] +0x62,0x66,0x15,0x40,0xbc,0x71,0x7f + +# ATT: vfnmadd231ph -256(%rdx){1to32}, %zmm29, %zmm30 {%k7} {z} +# INTEL: vfnmadd231ph zmm30 {k7} {z}, zmm29, word ptr [rdx - 256]{1to32} +0x62,0x66,0x15,0xd7,0xbc,0x72,0x80 + +# ATT: vfnmadd231sh %xmm28, %xmm29, %xmm30 +# INTEL: vfnmadd231sh xmm30, xmm29, xmm28 +0x62,0x06,0x15,0x00,0xbd,0xf4 + +# ATT: vfnmadd231sh {rn-sae}, %xmm28, %xmm29, %xmm30 +# INTEL: vfnmadd231sh xmm30, xmm29, xmm28, {rn-sae} +0x62,0x06,0x15,0x10,0xbd,0xf4 + +# ATT: vfnmadd231sh 268435456(%rbp,%r14,8), %xmm29, %xmm30 {%k7} +# INTEL: vfnmadd231sh xmm30 {k7}, xmm29, word ptr [rbp + 8*r14 + 268435456] +0x62,0x26,0x15,0x07,0xbd,0xb4,0xf5,0x00,0x00,0x00,0x10 + +# ATT: vfnmadd231sh (%r9), %xmm29, %xmm30 +# INTEL: vfnmadd231sh xmm30, xmm29, word ptr [r9] +0x62,0x46,0x15,0x00,0xbd,0x31 + +# ATT: vfnmadd231sh 254(%rcx), %xmm29, %xmm30 +# INTEL: vfnmadd231sh xmm30, xmm29, word ptr [rcx + 254] +0x62,0x66,0x15,0x00,0xbd,0x71,0x7f + +# ATT: vfnmadd231sh -256(%rdx), %xmm29, %xmm30 {%k7} {z} +# INTEL: vfnmadd231sh xmm30 {k7} {z}, xmm29, word ptr [rdx - 256] +0x62,0x66,0x15,0x87,0xbd,0x72,0x80 + +# ATT: vfnmsub132ph %zmm28, %zmm29, %zmm30 +# INTEL: vfnmsub132ph zmm30, zmm29, zmm28 +0x62,0x06,0x15,0x40,0x9e,0xf4 + +# ATT: vfnmsub132ph {rn-sae}, %zmm28, %zmm29, %zmm30 +# INTEL: vfnmsub132ph zmm30, zmm29, zmm28, {rn-sae} +0x62,0x06,0x15,0x10,0x9e,0xf4 + +# ATT: vfnmsub132ph 268435456(%rbp,%r14,8), %zmm29, %zmm30 {%k7} +# INTEL: vfnmsub132ph zmm30 {k7}, zmm29, zmmword ptr [rbp + 8*r14 + 268435456] +0x62,0x26,0x15,0x47,0x9e,0xb4,0xf5,0x00,0x00,0x00,0x10 + +# ATT: vfnmsub132ph (%r9){1to32}, %zmm29, %zmm30 +# INTEL: vfnmsub132ph zmm30, zmm29, word ptr [r9]{1to32} +0x62,0x46,0x15,0x50,0x9e,0x31 + +# ATT: vfnmsub132ph 8128(%rcx), %zmm29, %zmm30 +# INTEL: vfnmsub132ph zmm30, zmm29, zmmword ptr [rcx + 8128] +0x62,0x66,0x15,0x40,0x9e,0x71,0x7f + +# ATT: vfnmsub132ph -256(%rdx){1to32}, %zmm29, %zmm30 {%k7} {z} +# INTEL: vfnmsub132ph zmm30 {k7} {z}, zmm29, word ptr [rdx - 256]{1to32} +0x62,0x66,0x15,0xd7,0x9e,0x72,0x80 + +# ATT: vfnmsub132sh %xmm28, %xmm29, %xmm30 +# INTEL: vfnmsub132sh xmm30, xmm29, xmm28 +0x62,0x06,0x15,0x00,0x9f,0xf4 + +# ATT: vfnmsub132sh {rn-sae}, %xmm28, %xmm29, %xmm30 +# INTEL: vfnmsub132sh xmm30, xmm29, xmm28, {rn-sae} +0x62,0x06,0x15,0x10,0x9f,0xf4 + +# ATT: vfnmsub132sh 268435456(%rbp,%r14,8), %xmm29, %xmm30 {%k7} +# INTEL: vfnmsub132sh xmm30 {k7}, xmm29, word ptr [rbp + 8*r14 + 268435456] +0x62,0x26,0x15,0x07,0x9f,0xb4,0xf5,0x00,0x00,0x00,0x10 + +# ATT: vfnmsub132sh (%r9), %xmm29, %xmm30 +# INTEL: vfnmsub132sh xmm30, xmm29, word ptr [r9] +0x62,0x46,0x15,0x00,0x9f,0x31 + +# ATT: vfnmsub132sh 254(%rcx), %xmm29, %xmm30 +# INTEL: vfnmsub132sh xmm30, xmm29, word ptr [rcx + 254] +0x62,0x66,0x15,0x00,0x9f,0x71,0x7f + +# ATT: vfnmsub132sh -256(%rdx), %xmm29, %xmm30 {%k7} {z} +# INTEL: vfnmsub132sh xmm30 {k7} {z}, xmm29, word ptr [rdx - 256] +0x62,0x66,0x15,0x87,0x9f,0x72,0x80 + +# ATT: vfnmsub213ph %zmm28, %zmm29, %zmm30 +# INTEL: vfnmsub213ph zmm30, zmm29, zmm28 +0x62,0x06,0x15,0x40,0xae,0xf4 + +# ATT: vfnmsub213ph {rn-sae}, %zmm28, %zmm29, %zmm30 +# INTEL: vfnmsub213ph zmm30, zmm29, zmm28, {rn-sae} +0x62,0x06,0x15,0x10,0xae,0xf4 + +# ATT: vfnmsub213ph 268435456(%rbp,%r14,8), %zmm29, %zmm30 {%k7} +# INTEL: vfnmsub213ph zmm30 {k7}, zmm29, zmmword ptr [rbp + 8*r14 + 268435456] +0x62,0x26,0x15,0x47,0xae,0xb4,0xf5,0x00,0x00,0x00,0x10 + +# ATT: vfnmsub213ph (%r9){1to32}, %zmm29, %zmm30 +# INTEL: vfnmsub213ph zmm30, zmm29, word ptr [r9]{1to32} +0x62,0x46,0x15,0x50,0xae,0x31 + +# ATT: vfnmsub213ph 8128(%rcx), %zmm29, %zmm30 +# INTEL: vfnmsub213ph zmm30, zmm29, zmmword ptr [rcx + 8128] +0x62,0x66,0x15,0x40,0xae,0x71,0x7f + +# ATT: vfnmsub213ph -256(%rdx){1to32}, %zmm29, %zmm30 {%k7} {z} +# INTEL: vfnmsub213ph zmm30 {k7} {z}, zmm29, word ptr [rdx - 256]{1to32} +0x62,0x66,0x15,0xd7,0xae,0x72,0x80 + +# ATT: vfnmsub213sh %xmm28, %xmm29, %xmm30 +# INTEL: vfnmsub213sh xmm30, xmm29, xmm28 +0x62,0x06,0x15,0x00,0xaf,0xf4 + +# ATT: vfnmsub213sh {rn-sae}, %xmm28, %xmm29, %xmm30 +# INTEL: vfnmsub213sh xmm30, xmm29, xmm28, {rn-sae} +0x62,0x06,0x15,0x10,0xaf,0xf4 + +# ATT: vfnmsub213sh 268435456(%rbp,%r14,8), %xmm29, %xmm30 {%k7} +# INTEL: vfnmsub213sh xmm30 {k7}, xmm29, word ptr [rbp + 8*r14 + 268435456] +0x62,0x26,0x15,0x07,0xaf,0xb4,0xf5,0x00,0x00,0x00,0x10 + +# ATT: vfnmsub213sh (%r9), %xmm29, %xmm30 +# INTEL: vfnmsub213sh xmm30, xmm29, word ptr [r9] +0x62,0x46,0x15,0x00,0xaf,0x31 + +# ATT: vfnmsub213sh 254(%rcx), %xmm29, %xmm30 +# INTEL: vfnmsub213sh xmm30, xmm29, word ptr [rcx + 254] +0x62,0x66,0x15,0x00,0xaf,0x71,0x7f + +# ATT: vfnmsub213sh -256(%rdx), %xmm29, %xmm30 {%k7} {z} +# INTEL: vfnmsub213sh xmm30 {k7} {z}, xmm29, word ptr [rdx - 256] +0x62,0x66,0x15,0x87,0xaf,0x72,0x80 + +# ATT: vfnmsub231ph %zmm28, %zmm29, %zmm30 +# INTEL: vfnmsub231ph zmm30, zmm29, zmm28 +0x62,0x06,0x15,0x40,0xbe,0xf4 + +# ATT: vfnmsub231ph {rn-sae}, %zmm28, %zmm29, %zmm30 +# INTEL: vfnmsub231ph zmm30, zmm29, zmm28, {rn-sae} +0x62,0x06,0x15,0x10,0xbe,0xf4 + +# ATT: vfnmsub231ph 268435456(%rbp,%r14,8), %zmm29, %zmm30 {%k7} +# INTEL: vfnmsub231ph zmm30 {k7}, zmm29, zmmword ptr [rbp + 8*r14 + 268435456] +0x62,0x26,0x15,0x47,0xbe,0xb4,0xf5,0x00,0x00,0x00,0x10 + +# ATT: vfnmsub231ph (%r9){1to32}, %zmm29, %zmm30 +# INTEL: vfnmsub231ph zmm30, zmm29, word ptr [r9]{1to32} +0x62,0x46,0x15,0x50,0xbe,0x31 + +# ATT: vfnmsub231ph 8128(%rcx), %zmm29, %zmm30 +# INTEL: vfnmsub231ph zmm30, zmm29, zmmword ptr [rcx + 8128] +0x62,0x66,0x15,0x40,0xbe,0x71,0x7f + +# ATT: vfnmsub231ph -256(%rdx){1to32}, %zmm29, %zmm30 {%k7} {z} +# INTEL: vfnmsub231ph zmm30 {k7} {z}, zmm29, word ptr [rdx - 256]{1to32} +0x62,0x66,0x15,0xd7,0xbe,0x72,0x80 + +# ATT: vfnmsub231sh %xmm28, %xmm29, %xmm30 +# INTEL: vfnmsub231sh xmm30, xmm29, xmm28 +0x62,0x06,0x15,0x00,0xbf,0xf4 + +# ATT: vfnmsub231sh {rn-sae}, %xmm28, %xmm29, %xmm30 +# INTEL: vfnmsub231sh xmm30, xmm29, xmm28, {rn-sae} +0x62,0x06,0x15,0x10,0xbf,0xf4 + +# ATT: vfnmsub231sh 268435456(%rbp,%r14,8), %xmm29, %xmm30 {%k7} +# INTEL: vfnmsub231sh xmm30 {k7}, xmm29, word ptr [rbp + 8*r14 + 268435456] +0x62,0x26,0x15,0x07,0xbf,0xb4,0xf5,0x00,0x00,0x00,0x10 + +# ATT: vfnmsub231sh (%r9), %xmm29, %xmm30 +# INTEL: vfnmsub231sh xmm30, xmm29, word ptr [r9] +0x62,0x46,0x15,0x00,0xbf,0x31 + +# ATT: vfnmsub231sh 254(%rcx), %xmm29, %xmm30 +# INTEL: vfnmsub231sh xmm30, xmm29, word ptr [rcx + 254] +0x62,0x66,0x15,0x00,0xbf,0x71,0x7f + +# ATT: vfnmsub231sh -256(%rdx), %xmm29, %xmm30 {%k7} {z} +# INTEL: vfnmsub231sh xmm30 {k7} {z}, xmm29, word ptr [rdx - 256] +0x62,0x66,0x15,0x87,0xbf,0x72,0x80 diff --git a/llvm/test/MC/Disassembler/X86/avx512fp16vl.txt b/llvm/test/MC/Disassembler/X86/avx512fp16vl.txt index 8f480fc13d82f..390622b2d4824 100644 --- a/llvm/test/MC/Disassembler/X86/avx512fp16vl.txt +++ b/llvm/test/MC/Disassembler/X86/avx512fp16vl.txt @@ -1492,3 +1492,723 @@ # ATT: vsqrtph -256(%edx){1to16}, %ymm6 {%k7} {z} # INTEL: vsqrtph ymm6 {k7} {z}, word ptr [edx - 256]{1to16} 0x62,0xf5,0x7c,0xbf,0x51,0x72,0x80 + +# ATT: vfmadd132ph %ymm4, %ymm5, %ymm6 +# INTEL: vfmadd132ph ymm6, ymm5, ymm4 +0x62,0xf6,0x55,0x28,0x98,0xf4 + +# ATT: vfmadd132ph %xmm4, %xmm5, %xmm6 +# INTEL: vfmadd132ph xmm6, xmm5, xmm4 +0x62,0xf6,0x55,0x08,0x98,0xf4 + +# ATT: vfmadd132ph 268435456(%esp,%esi,8), %ymm5, %ymm6 {%k7} +# INTEL: vfmadd132ph ymm6 {k7}, ymm5, ymmword ptr [esp + 8*esi + 268435456] +0x62,0xf6,0x55,0x2f,0x98,0xb4,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vfmadd132ph (%ecx){1to16}, %ymm5, %ymm6 +# INTEL: vfmadd132ph ymm6, ymm5, word ptr [ecx]{1to16} +0x62,0xf6,0x55,0x38,0x98,0x31 + +# ATT: vfmadd132ph 4064(%ecx), %ymm5, %ymm6 +# INTEL: vfmadd132ph ymm6, ymm5, ymmword ptr [ecx + 4064] +0x62,0xf6,0x55,0x28,0x98,0x71,0x7f + +# ATT: vfmadd132ph -256(%edx){1to16}, %ymm5, %ymm6 {%k7} {z} +# INTEL: vfmadd132ph ymm6 {k7} {z}, ymm5, word ptr [edx - 256]{1to16} +0x62,0xf6,0x55,0xbf,0x98,0x72,0x80 + +# ATT: vfmadd132ph 268435456(%esp,%esi,8), %xmm5, %xmm6 {%k7} +# INTEL: vfmadd132ph xmm6 {k7}, xmm5, xmmword ptr [esp + 8*esi + 268435456] +0x62,0xf6,0x55,0x0f,0x98,0xb4,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vfmadd132ph (%ecx){1to8}, %xmm5, %xmm6 +# INTEL: vfmadd132ph xmm6, xmm5, word ptr [ecx]{1to8} +0x62,0xf6,0x55,0x18,0x98,0x31 + +# ATT: vfmadd132ph 2032(%ecx), %xmm5, %xmm6 +# INTEL: vfmadd132ph xmm6, xmm5, xmmword ptr [ecx + 2032] +0x62,0xf6,0x55,0x08,0x98,0x71,0x7f + +# ATT: vfmadd132ph -256(%edx){1to8}, %xmm5, %xmm6 {%k7} {z} +# INTEL: vfmadd132ph xmm6 {k7} {z}, xmm5, word ptr [edx - 256]{1to8} +0x62,0xf6,0x55,0x9f,0x98,0x72,0x80 + +# ATT: vfmadd213ph %ymm4, %ymm5, %ymm6 +# INTEL: vfmadd213ph ymm6, ymm5, ymm4 +0x62,0xf6,0x55,0x28,0xa8,0xf4 + +# ATT: vfmadd213ph %xmm4, %xmm5, %xmm6 +# INTEL: vfmadd213ph xmm6, xmm5, xmm4 +0x62,0xf6,0x55,0x08,0xa8,0xf4 + +# ATT: vfmadd213ph 268435456(%esp,%esi,8), %ymm5, %ymm6 {%k7} +# INTEL: vfmadd213ph ymm6 {k7}, ymm5, ymmword ptr [esp + 8*esi + 268435456] +0x62,0xf6,0x55,0x2f,0xa8,0xb4,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vfmadd213ph (%ecx){1to16}, %ymm5, %ymm6 +# INTEL: vfmadd213ph ymm6, ymm5, word ptr [ecx]{1to16} +0x62,0xf6,0x55,0x38,0xa8,0x31 + +# ATT: vfmadd213ph 4064(%ecx), %ymm5, %ymm6 +# INTEL: vfmadd213ph ymm6, ymm5, ymmword ptr [ecx + 4064] +0x62,0xf6,0x55,0x28,0xa8,0x71,0x7f + +# ATT: vfmadd213ph -256(%edx){1to16}, %ymm5, %ymm6 {%k7} {z} +# INTEL: vfmadd213ph ymm6 {k7} {z}, ymm5, word ptr [edx - 256]{1to16} +0x62,0xf6,0x55,0xbf,0xa8,0x72,0x80 + +# ATT: vfmadd213ph 268435456(%esp,%esi,8), %xmm5, %xmm6 {%k7} +# INTEL: vfmadd213ph xmm6 {k7}, xmm5, xmmword ptr [esp + 8*esi + 268435456] +0x62,0xf6,0x55,0x0f,0xa8,0xb4,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vfmadd213ph (%ecx){1to8}, %xmm5, %xmm6 +# INTEL: vfmadd213ph xmm6, xmm5, word ptr [ecx]{1to8} +0x62,0xf6,0x55,0x18,0xa8,0x31 + +# ATT: vfmadd213ph 2032(%ecx), %xmm5, %xmm6 +# INTEL: vfmadd213ph xmm6, xmm5, xmmword ptr [ecx + 2032] +0x62,0xf6,0x55,0x08,0xa8,0x71,0x7f + +# ATT: vfmadd213ph -256(%edx){1to8}, %xmm5, %xmm6 {%k7} {z} +# INTEL: vfmadd213ph xmm6 {k7} {z}, xmm5, word ptr [edx - 256]{1to8} +0x62,0xf6,0x55,0x9f,0xa8,0x72,0x80 + +# ATT: vfmadd231ph %ymm4, %ymm5, %ymm6 +# INTEL: vfmadd231ph ymm6, ymm5, ymm4 +0x62,0xf6,0x55,0x28,0xb8,0xf4 + +# ATT: vfmadd231ph %xmm4, %xmm5, %xmm6 +# INTEL: vfmadd231ph xmm6, xmm5, xmm4 +0x62,0xf6,0x55,0x08,0xb8,0xf4 + +# ATT: vfmadd231ph 268435456(%esp,%esi,8), %ymm5, %ymm6 {%k7} +# INTEL: vfmadd231ph ymm6 {k7}, ymm5, ymmword ptr [esp + 8*esi + 268435456] +0x62,0xf6,0x55,0x2f,0xb8,0xb4,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vfmadd231ph (%ecx){1to16}, %ymm5, %ymm6 +# INTEL: vfmadd231ph ymm6, ymm5, word ptr [ecx]{1to16} +0x62,0xf6,0x55,0x38,0xb8,0x31 + +# ATT: vfmadd231ph 4064(%ecx), %ymm5, %ymm6 +# INTEL: vfmadd231ph ymm6, ymm5, ymmword ptr [ecx + 4064] +0x62,0xf6,0x55,0x28,0xb8,0x71,0x7f + +# ATT: vfmadd231ph -256(%edx){1to16}, %ymm5, %ymm6 {%k7} {z} +# INTEL: vfmadd231ph ymm6 {k7} {z}, ymm5, word ptr [edx - 256]{1to16} +0x62,0xf6,0x55,0xbf,0xb8,0x72,0x80 + +# ATT: vfmadd231ph 268435456(%esp,%esi,8), %xmm5, %xmm6 {%k7} +# INTEL: vfmadd231ph xmm6 {k7}, xmm5, xmmword ptr [esp + 8*esi + 268435456] +0x62,0xf6,0x55,0x0f,0xb8,0xb4,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vfmadd231ph (%ecx){1to8}, %xmm5, %xmm6 +# INTEL: vfmadd231ph xmm6, xmm5, word ptr [ecx]{1to8} +0x62,0xf6,0x55,0x18,0xb8,0x31 + +# ATT: vfmadd231ph 2032(%ecx), %xmm5, %xmm6 +# INTEL: vfmadd231ph xmm6, xmm5, xmmword ptr [ecx + 2032] +0x62,0xf6,0x55,0x08,0xb8,0x71,0x7f + +# ATT: vfmadd231ph -256(%edx){1to8}, %xmm5, %xmm6 {%k7} {z} +# INTEL: vfmadd231ph xmm6 {k7} {z}, xmm5, word ptr [edx - 256]{1to8} +0x62,0xf6,0x55,0x9f,0xb8,0x72,0x80 + +# ATT: vfmaddsub132ph %ymm4, %ymm5, %ymm6 +# INTEL: vfmaddsub132ph ymm6, ymm5, ymm4 +0x62,0xf6,0x55,0x28,0x96,0xf4 + +# ATT: vfmaddsub132ph %xmm4, %xmm5, %xmm6 +# INTEL: vfmaddsub132ph xmm6, xmm5, xmm4 +0x62,0xf6,0x55,0x08,0x96,0xf4 + +# ATT: vfmaddsub132ph 268435456(%esp,%esi,8), %ymm5, %ymm6 {%k7} +# INTEL: vfmaddsub132ph ymm6 {k7}, ymm5, ymmword ptr [esp + 8*esi + 268435456] +0x62,0xf6,0x55,0x2f,0x96,0xb4,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vfmaddsub132ph (%ecx){1to16}, %ymm5, %ymm6 +# INTEL: vfmaddsub132ph ymm6, ymm5, word ptr [ecx]{1to16} +0x62,0xf6,0x55,0x38,0x96,0x31 + +# ATT: vfmaddsub132ph 4064(%ecx), %ymm5, %ymm6 +# INTEL: vfmaddsub132ph ymm6, ymm5, ymmword ptr [ecx + 4064] +0x62,0xf6,0x55,0x28,0x96,0x71,0x7f + +# ATT: vfmaddsub132ph -256(%edx){1to16}, %ymm5, %ymm6 {%k7} {z} +# INTEL: vfmaddsub132ph ymm6 {k7} {z}, ymm5, word ptr [edx - 256]{1to16} +0x62,0xf6,0x55,0xbf,0x96,0x72,0x80 + +# ATT: vfmaddsub132ph 268435456(%esp,%esi,8), %xmm5, %xmm6 {%k7} +# INTEL: vfmaddsub132ph xmm6 {k7}, xmm5, xmmword ptr [esp + 8*esi + 268435456] +0x62,0xf6,0x55,0x0f,0x96,0xb4,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vfmaddsub132ph (%ecx){1to8}, %xmm5, %xmm6 +# INTEL: vfmaddsub132ph xmm6, xmm5, word ptr [ecx]{1to8} +0x62,0xf6,0x55,0x18,0x96,0x31 + +# ATT: vfmaddsub132ph 2032(%ecx), %xmm5, %xmm6 +# INTEL: vfmaddsub132ph xmm6, xmm5, xmmword ptr [ecx + 2032] +0x62,0xf6,0x55,0x08,0x96,0x71,0x7f + +# ATT: vfmaddsub132ph -256(%edx){1to8}, %xmm5, %xmm6 {%k7} {z} +# INTEL: vfmaddsub132ph xmm6 {k7} {z}, xmm5, word ptr [edx - 256]{1to8} +0x62,0xf6,0x55,0x9f,0x96,0x72,0x80 + +# ATT: vfmaddsub213ph %ymm4, %ymm5, %ymm6 +# INTEL: vfmaddsub213ph ymm6, ymm5, ymm4 +0x62,0xf6,0x55,0x28,0xa6,0xf4 + +# ATT: vfmaddsub213ph %xmm4, %xmm5, %xmm6 +# INTEL: vfmaddsub213ph xmm6, xmm5, xmm4 +0x62,0xf6,0x55,0x08,0xa6,0xf4 + +# ATT: vfmaddsub213ph 268435456(%esp,%esi,8), %ymm5, %ymm6 {%k7} +# INTEL: vfmaddsub213ph ymm6 {k7}, ymm5, ymmword ptr [esp + 8*esi + 268435456] +0x62,0xf6,0x55,0x2f,0xa6,0xb4,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vfmaddsub213ph (%ecx){1to16}, %ymm5, %ymm6 +# INTEL: vfmaddsub213ph ymm6, ymm5, word ptr [ecx]{1to16} +0x62,0xf6,0x55,0x38,0xa6,0x31 + +# ATT: vfmaddsub213ph 4064(%ecx), %ymm5, %ymm6 +# INTEL: vfmaddsub213ph ymm6, ymm5, ymmword ptr [ecx + 4064] +0x62,0xf6,0x55,0x28,0xa6,0x71,0x7f + +# ATT: vfmaddsub213ph -256(%edx){1to16}, %ymm5, %ymm6 {%k7} {z} +# INTEL: vfmaddsub213ph ymm6 {k7} {z}, ymm5, word ptr [edx - 256]{1to16} +0x62,0xf6,0x55,0xbf,0xa6,0x72,0x80 + +# ATT: vfmaddsub213ph 268435456(%esp,%esi,8), %xmm5, %xmm6 {%k7} +# INTEL: vfmaddsub213ph xmm6 {k7}, xmm5, xmmword ptr [esp + 8*esi + 268435456] +0x62,0xf6,0x55,0x0f,0xa6,0xb4,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vfmaddsub213ph (%ecx){1to8}, %xmm5, %xmm6 +# INTEL: vfmaddsub213ph xmm6, xmm5, word ptr [ecx]{1to8} +0x62,0xf6,0x55,0x18,0xa6,0x31 + +# ATT: vfmaddsub213ph 2032(%ecx), %xmm5, %xmm6 +# INTEL: vfmaddsub213ph xmm6, xmm5, xmmword ptr [ecx + 2032] +0x62,0xf6,0x55,0x08,0xa6,0x71,0x7f + +# ATT: vfmaddsub213ph -256(%edx){1to8}, %xmm5, %xmm6 {%k7} {z} +# INTEL: vfmaddsub213ph xmm6 {k7} {z}, xmm5, word ptr [edx - 256]{1to8} +0x62,0xf6,0x55,0x9f,0xa6,0x72,0x80 + +# ATT: vfmaddsub231ph %ymm4, %ymm5, %ymm6 +# INTEL: vfmaddsub231ph ymm6, ymm5, ymm4 +0x62,0xf6,0x55,0x28,0xb6,0xf4 + +# ATT: vfmaddsub231ph %xmm4, %xmm5, %xmm6 +# INTEL: vfmaddsub231ph xmm6, xmm5, xmm4 +0x62,0xf6,0x55,0x08,0xb6,0xf4 + +# ATT: vfmaddsub231ph 268435456(%esp,%esi,8), %ymm5, %ymm6 {%k7} +# INTEL: vfmaddsub231ph ymm6 {k7}, ymm5, ymmword ptr [esp + 8*esi + 268435456] +0x62,0xf6,0x55,0x2f,0xb6,0xb4,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vfmaddsub231ph (%ecx){1to16}, %ymm5, %ymm6 +# INTEL: vfmaddsub231ph ymm6, ymm5, word ptr [ecx]{1to16} +0x62,0xf6,0x55,0x38,0xb6,0x31 + +# ATT: vfmaddsub231ph 4064(%ecx), %ymm5, %ymm6 +# INTEL: vfmaddsub231ph ymm6, ymm5, ymmword ptr [ecx + 4064] +0x62,0xf6,0x55,0x28,0xb6,0x71,0x7f + +# ATT: vfmaddsub231ph -256(%edx){1to16}, %ymm5, %ymm6 {%k7} {z} +# INTEL: vfmaddsub231ph ymm6 {k7} {z}, ymm5, word ptr [edx - 256]{1to16} +0x62,0xf6,0x55,0xbf,0xb6,0x72,0x80 + +# ATT: vfmaddsub231ph 268435456(%esp,%esi,8), %xmm5, %xmm6 {%k7} +# INTEL: vfmaddsub231ph xmm6 {k7}, xmm5, xmmword ptr [esp + 8*esi + 268435456] +0x62,0xf6,0x55,0x0f,0xb6,0xb4,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vfmaddsub231ph (%ecx){1to8}, %xmm5, %xmm6 +# INTEL: vfmaddsub231ph xmm6, xmm5, word ptr [ecx]{1to8} +0x62,0xf6,0x55,0x18,0xb6,0x31 + +# ATT: vfmaddsub231ph 2032(%ecx), %xmm5, %xmm6 +# INTEL: vfmaddsub231ph xmm6, xmm5, xmmword ptr [ecx + 2032] +0x62,0xf6,0x55,0x08,0xb6,0x71,0x7f + +# ATT: vfmaddsub231ph -256(%edx){1to8}, %xmm5, %xmm6 {%k7} {z} +# INTEL: vfmaddsub231ph xmm6 {k7} {z}, xmm5, word ptr [edx - 256]{1to8} +0x62,0xf6,0x55,0x9f,0xb6,0x72,0x80 + +# ATT: vfmsub132ph %ymm4, %ymm5, %ymm6 +# INTEL: vfmsub132ph ymm6, ymm5, ymm4 +0x62,0xf6,0x55,0x28,0x9a,0xf4 + +# ATT: vfmsub132ph %xmm4, %xmm5, %xmm6 +# INTEL: vfmsub132ph xmm6, xmm5, xmm4 +0x62,0xf6,0x55,0x08,0x9a,0xf4 + +# ATT: vfmsub132ph 268435456(%esp,%esi,8), %ymm5, %ymm6 {%k7} +# INTEL: vfmsub132ph ymm6 {k7}, ymm5, ymmword ptr [esp + 8*esi + 268435456] +0x62,0xf6,0x55,0x2f,0x9a,0xb4,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vfmsub132ph (%ecx){1to16}, %ymm5, %ymm6 +# INTEL: vfmsub132ph ymm6, ymm5, word ptr [ecx]{1to16} +0x62,0xf6,0x55,0x38,0x9a,0x31 + +# ATT: vfmsub132ph 4064(%ecx), %ymm5, %ymm6 +# INTEL: vfmsub132ph ymm6, ymm5, ymmword ptr [ecx + 4064] +0x62,0xf6,0x55,0x28,0x9a,0x71,0x7f + +# ATT: vfmsub132ph -256(%edx){1to16}, %ymm5, %ymm6 {%k7} {z} +# INTEL: vfmsub132ph ymm6 {k7} {z}, ymm5, word ptr [edx - 256]{1to16} +0x62,0xf6,0x55,0xbf,0x9a,0x72,0x80 + +# ATT: vfmsub132ph 268435456(%esp,%esi,8), %xmm5, %xmm6 {%k7} +# INTEL: vfmsub132ph xmm6 {k7}, xmm5, xmmword ptr [esp + 8*esi + 268435456] +0x62,0xf6,0x55,0x0f,0x9a,0xb4,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vfmsub132ph (%ecx){1to8}, %xmm5, %xmm6 +# INTEL: vfmsub132ph xmm6, xmm5, word ptr [ecx]{1to8} +0x62,0xf6,0x55,0x18,0x9a,0x31 + +# ATT: vfmsub132ph 2032(%ecx), %xmm5, %xmm6 +# INTEL: vfmsub132ph xmm6, xmm5, xmmword ptr [ecx + 2032] +0x62,0xf6,0x55,0x08,0x9a,0x71,0x7f + +# ATT: vfmsub132ph -256(%edx){1to8}, %xmm5, %xmm6 {%k7} {z} +# INTEL: vfmsub132ph xmm6 {k7} {z}, xmm5, word ptr [edx - 256]{1to8} +0x62,0xf6,0x55,0x9f,0x9a,0x72,0x80 + +# ATT: vfmsub213ph %ymm4, %ymm5, %ymm6 +# INTEL: vfmsub213ph ymm6, ymm5, ymm4 +0x62,0xf6,0x55,0x28,0xaa,0xf4 + +# ATT: vfmsub213ph %xmm4, %xmm5, %xmm6 +# INTEL: vfmsub213ph xmm6, xmm5, xmm4 +0x62,0xf6,0x55,0x08,0xaa,0xf4 + +# ATT: vfmsub213ph 268435456(%esp,%esi,8), %ymm5, %ymm6 {%k7} +# INTEL: vfmsub213ph ymm6 {k7}, ymm5, ymmword ptr [esp + 8*esi + 268435456] +0x62,0xf6,0x55,0x2f,0xaa,0xb4,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vfmsub213ph (%ecx){1to16}, %ymm5, %ymm6 +# INTEL: vfmsub213ph ymm6, ymm5, word ptr [ecx]{1to16} +0x62,0xf6,0x55,0x38,0xaa,0x31 + +# ATT: vfmsub213ph 4064(%ecx), %ymm5, %ymm6 +# INTEL: vfmsub213ph ymm6, ymm5, ymmword ptr [ecx + 4064] +0x62,0xf6,0x55,0x28,0xaa,0x71,0x7f + +# ATT: vfmsub213ph -256(%edx){1to16}, %ymm5, %ymm6 {%k7} {z} +# INTEL: vfmsub213ph ymm6 {k7} {z}, ymm5, word ptr [edx - 256]{1to16} +0x62,0xf6,0x55,0xbf,0xaa,0x72,0x80 + +# ATT: vfmsub213ph 268435456(%esp,%esi,8), %xmm5, %xmm6 {%k7} +# INTEL: vfmsub213ph xmm6 {k7}, xmm5, xmmword ptr [esp + 8*esi + 268435456] +0x62,0xf6,0x55,0x0f,0xaa,0xb4,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vfmsub213ph (%ecx){1to8}, %xmm5, %xmm6 +# INTEL: vfmsub213ph xmm6, xmm5, word ptr [ecx]{1to8} +0x62,0xf6,0x55,0x18,0xaa,0x31 + +# ATT: vfmsub213ph 2032(%ecx), %xmm5, %xmm6 +# INTEL: vfmsub213ph xmm6, xmm5, xmmword ptr [ecx + 2032] +0x62,0xf6,0x55,0x08,0xaa,0x71,0x7f + +# ATT: vfmsub213ph -256(%edx){1to8}, %xmm5, %xmm6 {%k7} {z} +# INTEL: vfmsub213ph xmm6 {k7} {z}, xmm5, word ptr [edx - 256]{1to8} +0x62,0xf6,0x55,0x9f,0xaa,0x72,0x80 + +# ATT: vfmsub231ph %ymm4, %ymm5, %ymm6 +# INTEL: vfmsub231ph ymm6, ymm5, ymm4 +0x62,0xf6,0x55,0x28,0xba,0xf4 + +# ATT: vfmsub231ph %xmm4, %xmm5, %xmm6 +# INTEL: vfmsub231ph xmm6, xmm5, xmm4 +0x62,0xf6,0x55,0x08,0xba,0xf4 + +# ATT: vfmsub231ph 268435456(%esp,%esi,8), %ymm5, %ymm6 {%k7} +# INTEL: vfmsub231ph ymm6 {k7}, ymm5, ymmword ptr [esp + 8*esi + 268435456] +0x62,0xf6,0x55,0x2f,0xba,0xb4,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vfmsub231ph (%ecx){1to16}, %ymm5, %ymm6 +# INTEL: vfmsub231ph ymm6, ymm5, word ptr [ecx]{1to16} +0x62,0xf6,0x55,0x38,0xba,0x31 + +# ATT: vfmsub231ph 4064(%ecx), %ymm5, %ymm6 +# INTEL: vfmsub231ph ymm6, ymm5, ymmword ptr [ecx + 4064] +0x62,0xf6,0x55,0x28,0xba,0x71,0x7f + +# ATT: vfmsub231ph -256(%edx){1to16}, %ymm5, %ymm6 {%k7} {z} +# INTEL: vfmsub231ph ymm6 {k7} {z}, ymm5, word ptr [edx - 256]{1to16} +0x62,0xf6,0x55,0xbf,0xba,0x72,0x80 + +# ATT: vfmsub231ph 268435456(%esp,%esi,8), %xmm5, %xmm6 {%k7} +# INTEL: vfmsub231ph xmm6 {k7}, xmm5, xmmword ptr [esp + 8*esi + 268435456] +0x62,0xf6,0x55,0x0f,0xba,0xb4,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vfmsub231ph (%ecx){1to8}, %xmm5, %xmm6 +# INTEL: vfmsub231ph xmm6, xmm5, word ptr [ecx]{1to8} +0x62,0xf6,0x55,0x18,0xba,0x31 + +# ATT: vfmsub231ph 2032(%ecx), %xmm5, %xmm6 +# INTEL: vfmsub231ph xmm6, xmm5, xmmword ptr [ecx + 2032] +0x62,0xf6,0x55,0x08,0xba,0x71,0x7f + +# ATT: vfmsub231ph -256(%edx){1to8}, %xmm5, %xmm6 {%k7} {z} +# INTEL: vfmsub231ph xmm6 {k7} {z}, xmm5, word ptr [edx - 256]{1to8} +0x62,0xf6,0x55,0x9f,0xba,0x72,0x80 + +# ATT: vfmsubadd132ph %ymm4, %ymm5, %ymm6 +# INTEL: vfmsubadd132ph ymm6, ymm5, ymm4 +0x62,0xf6,0x55,0x28,0x97,0xf4 + +# ATT: vfmsubadd132ph %xmm4, %xmm5, %xmm6 +# INTEL: vfmsubadd132ph xmm6, xmm5, xmm4 +0x62,0xf6,0x55,0x08,0x97,0xf4 + +# ATT: vfmsubadd132ph 268435456(%esp,%esi,8), %ymm5, %ymm6 {%k7} +# INTEL: vfmsubadd132ph ymm6 {k7}, ymm5, ymmword ptr [esp + 8*esi + 268435456] +0x62,0xf6,0x55,0x2f,0x97,0xb4,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vfmsubadd132ph (%ecx){1to16}, %ymm5, %ymm6 +# INTEL: vfmsubadd132ph ymm6, ymm5, word ptr [ecx]{1to16} +0x62,0xf6,0x55,0x38,0x97,0x31 + +# ATT: vfmsubadd132ph 4064(%ecx), %ymm5, %ymm6 +# INTEL: vfmsubadd132ph ymm6, ymm5, ymmword ptr [ecx + 4064] +0x62,0xf6,0x55,0x28,0x97,0x71,0x7f + +# ATT: vfmsubadd132ph -256(%edx){1to16}, %ymm5, %ymm6 {%k7} {z} +# INTEL: vfmsubadd132ph ymm6 {k7} {z}, ymm5, word ptr [edx - 256]{1to16} +0x62,0xf6,0x55,0xbf,0x97,0x72,0x80 + +# ATT: vfmsubadd132ph 268435456(%esp,%esi,8), %xmm5, %xmm6 {%k7} +# INTEL: vfmsubadd132ph xmm6 {k7}, xmm5, xmmword ptr [esp + 8*esi + 268435456] +0x62,0xf6,0x55,0x0f,0x97,0xb4,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vfmsubadd132ph (%ecx){1to8}, %xmm5, %xmm6 +# INTEL: vfmsubadd132ph xmm6, xmm5, word ptr [ecx]{1to8} +0x62,0xf6,0x55,0x18,0x97,0x31 + +# ATT: vfmsubadd132ph 2032(%ecx), %xmm5, %xmm6 +# INTEL: vfmsubadd132ph xmm6, xmm5, xmmword ptr [ecx + 2032] +0x62,0xf6,0x55,0x08,0x97,0x71,0x7f + +# ATT: vfmsubadd132ph -256(%edx){1to8}, %xmm5, %xmm6 {%k7} {z} +# INTEL: vfmsubadd132ph xmm6 {k7} {z}, xmm5, word ptr [edx - 256]{1to8} +0x62,0xf6,0x55,0x9f,0x97,0x72,0x80 + +# ATT: vfmsubadd213ph %ymm4, %ymm5, %ymm6 +# INTEL: vfmsubadd213ph ymm6, ymm5, ymm4 +0x62,0xf6,0x55,0x28,0xa7,0xf4 + +# ATT: vfmsubadd213ph %xmm4, %xmm5, %xmm6 +# INTEL: vfmsubadd213ph xmm6, xmm5, xmm4 +0x62,0xf6,0x55,0x08,0xa7,0xf4 + +# ATT: vfmsubadd213ph 268435456(%esp,%esi,8), %ymm5, %ymm6 {%k7} +# INTEL: vfmsubadd213ph ymm6 {k7}, ymm5, ymmword ptr [esp + 8*esi + 268435456] +0x62,0xf6,0x55,0x2f,0xa7,0xb4,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vfmsubadd213ph (%ecx){1to16}, %ymm5, %ymm6 +# INTEL: vfmsubadd213ph ymm6, ymm5, word ptr [ecx]{1to16} +0x62,0xf6,0x55,0x38,0xa7,0x31 + +# ATT: vfmsubadd213ph 4064(%ecx), %ymm5, %ymm6 +# INTEL: vfmsubadd213ph ymm6, ymm5, ymmword ptr [ecx + 4064] +0x62,0xf6,0x55,0x28,0xa7,0x71,0x7f + +# ATT: vfmsubadd213ph -256(%edx){1to16}, %ymm5, %ymm6 {%k7} {z} +# INTEL: vfmsubadd213ph ymm6 {k7} {z}, ymm5, word ptr [edx - 256]{1to16} +0x62,0xf6,0x55,0xbf,0xa7,0x72,0x80 + +# ATT: vfmsubadd213ph 268435456(%esp,%esi,8), %xmm5, %xmm6 {%k7} +# INTEL: vfmsubadd213ph xmm6 {k7}, xmm5, xmmword ptr [esp + 8*esi + 268435456] +0x62,0xf6,0x55,0x0f,0xa7,0xb4,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vfmsubadd213ph (%ecx){1to8}, %xmm5, %xmm6 +# INTEL: vfmsubadd213ph xmm6, xmm5, word ptr [ecx]{1to8} +0x62,0xf6,0x55,0x18,0xa7,0x31 + +# ATT: vfmsubadd213ph 2032(%ecx), %xmm5, %xmm6 +# INTEL: vfmsubadd213ph xmm6, xmm5, xmmword ptr [ecx + 2032] +0x62,0xf6,0x55,0x08,0xa7,0x71,0x7f + +# ATT: vfmsubadd213ph -256(%edx){1to8}, %xmm5, %xmm6 {%k7} {z} +# INTEL: vfmsubadd213ph xmm6 {k7} {z}, xmm5, word ptr [edx - 256]{1to8} +0x62,0xf6,0x55,0x9f,0xa7,0x72,0x80 + +# ATT: vfmsubadd231ph %ymm4, %ymm5, %ymm6 +# INTEL: vfmsubadd231ph ymm6, ymm5, ymm4 +0x62,0xf6,0x55,0x28,0xb7,0xf4 + +# ATT: vfmsubadd231ph %xmm4, %xmm5, %xmm6 +# INTEL: vfmsubadd231ph xmm6, xmm5, xmm4 +0x62,0xf6,0x55,0x08,0xb7,0xf4 + +# ATT: vfmsubadd231ph 268435456(%esp,%esi,8), %ymm5, %ymm6 {%k7} +# INTEL: vfmsubadd231ph ymm6 {k7}, ymm5, ymmword ptr [esp + 8*esi + 268435456] +0x62,0xf6,0x55,0x2f,0xb7,0xb4,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vfmsubadd231ph (%ecx){1to16}, %ymm5, %ymm6 +# INTEL: vfmsubadd231ph ymm6, ymm5, word ptr [ecx]{1to16} +0x62,0xf6,0x55,0x38,0xb7,0x31 + +# ATT: vfmsubadd231ph 4064(%ecx), %ymm5, %ymm6 +# INTEL: vfmsubadd231ph ymm6, ymm5, ymmword ptr [ecx + 4064] +0x62,0xf6,0x55,0x28,0xb7,0x71,0x7f + +# ATT: vfmsubadd231ph -256(%edx){1to16}, %ymm5, %ymm6 {%k7} {z} +# INTEL: vfmsubadd231ph ymm6 {k7} {z}, ymm5, word ptr [edx - 256]{1to16} +0x62,0xf6,0x55,0xbf,0xb7,0x72,0x80 + +# ATT: vfmsubadd231ph 268435456(%esp,%esi,8), %xmm5, %xmm6 {%k7} +# INTEL: vfmsubadd231ph xmm6 {k7}, xmm5, xmmword ptr [esp + 8*esi + 268435456] +0x62,0xf6,0x55,0x0f,0xb7,0xb4,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vfmsubadd231ph (%ecx){1to8}, %xmm5, %xmm6 +# INTEL: vfmsubadd231ph xmm6, xmm5, word ptr [ecx]{1to8} +0x62,0xf6,0x55,0x18,0xb7,0x31 + +# ATT: vfmsubadd231ph 2032(%ecx), %xmm5, %xmm6 +# INTEL: vfmsubadd231ph xmm6, xmm5, xmmword ptr [ecx + 2032] +0x62,0xf6,0x55,0x08,0xb7,0x71,0x7f + +# ATT: vfmsubadd231ph -256(%edx){1to8}, %xmm5, %xmm6 {%k7} {z} +# INTEL: vfmsubadd231ph xmm6 {k7} {z}, xmm5, word ptr [edx - 256]{1to8} +0x62,0xf6,0x55,0x9f,0xb7,0x72,0x80 + +# ATT: vfnmadd132ph %ymm4, %ymm5, %ymm6 +# INTEL: vfnmadd132ph ymm6, ymm5, ymm4 +0x62,0xf6,0x55,0x28,0x9c,0xf4 + +# ATT: vfnmadd132ph %xmm4, %xmm5, %xmm6 +# INTEL: vfnmadd132ph xmm6, xmm5, xmm4 +0x62,0xf6,0x55,0x08,0x9c,0xf4 + +# ATT: vfnmadd132ph 268435456(%esp,%esi,8), %ymm5, %ymm6 {%k7} +# INTEL: vfnmadd132ph ymm6 {k7}, ymm5, ymmword ptr [esp + 8*esi + 268435456] +0x62,0xf6,0x55,0x2f,0x9c,0xb4,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vfnmadd132ph (%ecx){1to16}, %ymm5, %ymm6 +# INTEL: vfnmadd132ph ymm6, ymm5, word ptr [ecx]{1to16} +0x62,0xf6,0x55,0x38,0x9c,0x31 + +# ATT: vfnmadd132ph 4064(%ecx), %ymm5, %ymm6 +# INTEL: vfnmadd132ph ymm6, ymm5, ymmword ptr [ecx + 4064] +0x62,0xf6,0x55,0x28,0x9c,0x71,0x7f + +# ATT: vfnmadd132ph -256(%edx){1to16}, %ymm5, %ymm6 {%k7} {z} +# INTEL: vfnmadd132ph ymm6 {k7} {z}, ymm5, word ptr [edx - 256]{1to16} +0x62,0xf6,0x55,0xbf,0x9c,0x72,0x80 + +# ATT: vfnmadd132ph 268435456(%esp,%esi,8), %xmm5, %xmm6 {%k7} +# INTEL: vfnmadd132ph xmm6 {k7}, xmm5, xmmword ptr [esp + 8*esi + 268435456] +0x62,0xf6,0x55,0x0f,0x9c,0xb4,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vfnmadd132ph (%ecx){1to8}, %xmm5, %xmm6 +# INTEL: vfnmadd132ph xmm6, xmm5, word ptr [ecx]{1to8} +0x62,0xf6,0x55,0x18,0x9c,0x31 + +# ATT: vfnmadd132ph 2032(%ecx), %xmm5, %xmm6 +# INTEL: vfnmadd132ph xmm6, xmm5, xmmword ptr [ecx + 2032] +0x62,0xf6,0x55,0x08,0x9c,0x71,0x7f + +# ATT: vfnmadd132ph -256(%edx){1to8}, %xmm5, %xmm6 {%k7} {z} +# INTEL: vfnmadd132ph xmm6 {k7} {z}, xmm5, word ptr [edx - 256]{1to8} +0x62,0xf6,0x55,0x9f,0x9c,0x72,0x80 + +# ATT: vfnmadd213ph %ymm4, %ymm5, %ymm6 +# INTEL: vfnmadd213ph ymm6, ymm5, ymm4 +0x62,0xf6,0x55,0x28,0xac,0xf4 + +# ATT: vfnmadd213ph %xmm4, %xmm5, %xmm6 +# INTEL: vfnmadd213ph xmm6, xmm5, xmm4 +0x62,0xf6,0x55,0x08,0xac,0xf4 + +# ATT: vfnmadd213ph 268435456(%esp,%esi,8), %ymm5, %ymm6 {%k7} +# INTEL: vfnmadd213ph ymm6 {k7}, ymm5, ymmword ptr [esp + 8*esi + 268435456] +0x62,0xf6,0x55,0x2f,0xac,0xb4,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vfnmadd213ph (%ecx){1to16}, %ymm5, %ymm6 +# INTEL: vfnmadd213ph ymm6, ymm5, word ptr [ecx]{1to16} +0x62,0xf6,0x55,0x38,0xac,0x31 + +# ATT: vfnmadd213ph 4064(%ecx), %ymm5, %ymm6 +# INTEL: vfnmadd213ph ymm6, ymm5, ymmword ptr [ecx + 4064] +0x62,0xf6,0x55,0x28,0xac,0x71,0x7f + +# ATT: vfnmadd213ph -256(%edx){1to16}, %ymm5, %ymm6 {%k7} {z} +# INTEL: vfnmadd213ph ymm6 {k7} {z}, ymm5, word ptr [edx - 256]{1to16} +0x62,0xf6,0x55,0xbf,0xac,0x72,0x80 + +# ATT: vfnmadd213ph 268435456(%esp,%esi,8), %xmm5, %xmm6 {%k7} +# INTEL: vfnmadd213ph xmm6 {k7}, xmm5, xmmword ptr [esp + 8*esi + 268435456] +0x62,0xf6,0x55,0x0f,0xac,0xb4,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vfnmadd213ph (%ecx){1to8}, %xmm5, %xmm6 +# INTEL: vfnmadd213ph xmm6, xmm5, word ptr [ecx]{1to8} +0x62,0xf6,0x55,0x18,0xac,0x31 + +# ATT: vfnmadd213ph 2032(%ecx), %xmm5, %xmm6 +# INTEL: vfnmadd213ph xmm6, xmm5, xmmword ptr [ecx + 2032] +0x62,0xf6,0x55,0x08,0xac,0x71,0x7f + +# ATT: vfnmadd213ph -256(%edx){1to8}, %xmm5, %xmm6 {%k7} {z} +# INTEL: vfnmadd213ph xmm6 {k7} {z}, xmm5, word ptr [edx - 256]{1to8} +0x62,0xf6,0x55,0x9f,0xac,0x72,0x80 + +# ATT: vfnmadd231ph %ymm4, %ymm5, %ymm6 +# INTEL: vfnmadd231ph ymm6, ymm5, ymm4 +0x62,0xf6,0x55,0x28,0xbc,0xf4 + +# ATT: vfnmadd231ph %xmm4, %xmm5, %xmm6 +# INTEL: vfnmadd231ph xmm6, xmm5, xmm4 +0x62,0xf6,0x55,0x08,0xbc,0xf4 + +# ATT: vfnmadd231ph 268435456(%esp,%esi,8), %ymm5, %ymm6 {%k7} +# INTEL: vfnmadd231ph ymm6 {k7}, ymm5, ymmword ptr [esp + 8*esi + 268435456] +0x62,0xf6,0x55,0x2f,0xbc,0xb4,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vfnmadd231ph (%ecx){1to16}, %ymm5, %ymm6 +# INTEL: vfnmadd231ph ymm6, ymm5, word ptr [ecx]{1to16} +0x62,0xf6,0x55,0x38,0xbc,0x31 + +# ATT: vfnmadd231ph 4064(%ecx), %ymm5, %ymm6 +# INTEL: vfnmadd231ph ymm6, ymm5, ymmword ptr [ecx + 4064] +0x62,0xf6,0x55,0x28,0xbc,0x71,0x7f + +# ATT: vfnmadd231ph -256(%edx){1to16}, %ymm5, %ymm6 {%k7} {z} +# INTEL: vfnmadd231ph ymm6 {k7} {z}, ymm5, word ptr [edx - 256]{1to16} +0x62,0xf6,0x55,0xbf,0xbc,0x72,0x80 + +# ATT: vfnmadd231ph 268435456(%esp,%esi,8), %xmm5, %xmm6 {%k7} +# INTEL: vfnmadd231ph xmm6 {k7}, xmm5, xmmword ptr [esp + 8*esi + 268435456] +0x62,0xf6,0x55,0x0f,0xbc,0xb4,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vfnmadd231ph (%ecx){1to8}, %xmm5, %xmm6 +# INTEL: vfnmadd231ph xmm6, xmm5, word ptr [ecx]{1to8} +0x62,0xf6,0x55,0x18,0xbc,0x31 + +# ATT: vfnmadd231ph 2032(%ecx), %xmm5, %xmm6 +# INTEL: vfnmadd231ph xmm6, xmm5, xmmword ptr [ecx + 2032] +0x62,0xf6,0x55,0x08,0xbc,0x71,0x7f + +# ATT: vfnmadd231ph -256(%edx){1to8}, %xmm5, %xmm6 {%k7} {z} +# INTEL: vfnmadd231ph xmm6 {k7} {z}, xmm5, word ptr [edx - 256]{1to8} +0x62,0xf6,0x55,0x9f,0xbc,0x72,0x80 + +# ATT: vfnmsub132ph %ymm4, %ymm5, %ymm6 +# INTEL: vfnmsub132ph ymm6, ymm5, ymm4 +0x62,0xf6,0x55,0x28,0x9e,0xf4 + +# ATT: vfnmsub132ph %xmm4, %xmm5, %xmm6 +# INTEL: vfnmsub132ph xmm6, xmm5, xmm4 +0x62,0xf6,0x55,0x08,0x9e,0xf4 + +# ATT: vfnmsub132ph 268435456(%esp,%esi,8), %ymm5, %ymm6 {%k7} +# INTEL: vfnmsub132ph ymm6 {k7}, ymm5, ymmword ptr [esp + 8*esi + 268435456] +0x62,0xf6,0x55,0x2f,0x9e,0xb4,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vfnmsub132ph (%ecx){1to16}, %ymm5, %ymm6 +# INTEL: vfnmsub132ph ymm6, ymm5, word ptr [ecx]{1to16} +0x62,0xf6,0x55,0x38,0x9e,0x31 + +# ATT: vfnmsub132ph 4064(%ecx), %ymm5, %ymm6 +# INTEL: vfnmsub132ph ymm6, ymm5, ymmword ptr [ecx + 4064] +0x62,0xf6,0x55,0x28,0x9e,0x71,0x7f + +# ATT: vfnmsub132ph -256(%edx){1to16}, %ymm5, %ymm6 {%k7} {z} +# INTEL: vfnmsub132ph ymm6 {k7} {z}, ymm5, word ptr [edx - 256]{1to16} +0x62,0xf6,0x55,0xbf,0x9e,0x72,0x80 + +# ATT: vfnmsub132ph 268435456(%esp,%esi,8), %xmm5, %xmm6 {%k7} +# INTEL: vfnmsub132ph xmm6 {k7}, xmm5, xmmword ptr [esp + 8*esi + 268435456] +0x62,0xf6,0x55,0x0f,0x9e,0xb4,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vfnmsub132ph (%ecx){1to8}, %xmm5, %xmm6 +# INTEL: vfnmsub132ph xmm6, xmm5, word ptr [ecx]{1to8} +0x62,0xf6,0x55,0x18,0x9e,0x31 + +# ATT: vfnmsub132ph 2032(%ecx), %xmm5, %xmm6 +# INTEL: vfnmsub132ph xmm6, xmm5, xmmword ptr [ecx + 2032] +0x62,0xf6,0x55,0x08,0x9e,0x71,0x7f + +# ATT: vfnmsub132ph -256(%edx){1to8}, %xmm5, %xmm6 {%k7} {z} +# INTEL: vfnmsub132ph xmm6 {k7} {z}, xmm5, word ptr [edx - 256]{1to8} +0x62,0xf6,0x55,0x9f,0x9e,0x72,0x80 + +# ATT: vfnmsub213ph %ymm4, %ymm5, %ymm6 +# INTEL: vfnmsub213ph ymm6, ymm5, ymm4 +0x62,0xf6,0x55,0x28,0xae,0xf4 + +# ATT: vfnmsub213ph %xmm4, %xmm5, %xmm6 +# INTEL: vfnmsub213ph xmm6, xmm5, xmm4 +0x62,0xf6,0x55,0x08,0xae,0xf4 + +# ATT: vfnmsub213ph 268435456(%esp,%esi,8), %ymm5, %ymm6 {%k7} +# INTEL: vfnmsub213ph ymm6 {k7}, ymm5, ymmword ptr [esp + 8*esi + 268435456] +0x62,0xf6,0x55,0x2f,0xae,0xb4,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vfnmsub213ph (%ecx){1to16}, %ymm5, %ymm6 +# INTEL: vfnmsub213ph ymm6, ymm5, word ptr [ecx]{1to16} +0x62,0xf6,0x55,0x38,0xae,0x31 + +# ATT: vfnmsub213ph 4064(%ecx), %ymm5, %ymm6 +# INTEL: vfnmsub213ph ymm6, ymm5, ymmword ptr [ecx + 4064] +0x62,0xf6,0x55,0x28,0xae,0x71,0x7f + +# ATT: vfnmsub213ph -256(%edx){1to16}, %ymm5, %ymm6 {%k7} {z} +# INTEL: vfnmsub213ph ymm6 {k7} {z}, ymm5, word ptr [edx - 256]{1to16} +0x62,0xf6,0x55,0xbf,0xae,0x72,0x80 + +# ATT: vfnmsub213ph 268435456(%esp,%esi,8), %xmm5, %xmm6 {%k7} +# INTEL: vfnmsub213ph xmm6 {k7}, xmm5, xmmword ptr [esp + 8*esi + 268435456] +0x62,0xf6,0x55,0x0f,0xae,0xb4,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vfnmsub213ph (%ecx){1to8}, %xmm5, %xmm6 +# INTEL: vfnmsub213ph xmm6, xmm5, word ptr [ecx]{1to8} +0x62,0xf6,0x55,0x18,0xae,0x31 + +# ATT: vfnmsub213ph 2032(%ecx), %xmm5, %xmm6 +# INTEL: vfnmsub213ph xmm6, xmm5, xmmword ptr [ecx + 2032] +0x62,0xf6,0x55,0x08,0xae,0x71,0x7f + +# ATT: vfnmsub213ph -256(%edx){1to8}, %xmm5, %xmm6 {%k7} {z} +# INTEL: vfnmsub213ph xmm6 {k7} {z}, xmm5, word ptr [edx - 256]{1to8} +0x62,0xf6,0x55,0x9f,0xae,0x72,0x80 + +# ATT: vfnmsub231ph %ymm4, %ymm5, %ymm6 +# INTEL: vfnmsub231ph ymm6, ymm5, ymm4 +0x62,0xf6,0x55,0x28,0xbe,0xf4 + +# ATT: vfnmsub231ph %xmm4, %xmm5, %xmm6 +# INTEL: vfnmsub231ph xmm6, xmm5, xmm4 +0x62,0xf6,0x55,0x08,0xbe,0xf4 + +# ATT: vfnmsub231ph 268435456(%esp,%esi,8), %ymm5, %ymm6 {%k7} +# INTEL: vfnmsub231ph ymm6 {k7}, ymm5, ymmword ptr [esp + 8*esi + 268435456] +0x62,0xf6,0x55,0x2f,0xbe,0xb4,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vfnmsub231ph (%ecx){1to16}, %ymm5, %ymm6 +# INTEL: vfnmsub231ph ymm6, ymm5, word ptr [ecx]{1to16} +0x62,0xf6,0x55,0x38,0xbe,0x31 + +# ATT: vfnmsub231ph 4064(%ecx), %ymm5, %ymm6 +# INTEL: vfnmsub231ph ymm6, ymm5, ymmword ptr [ecx + 4064] +0x62,0xf6,0x55,0x28,0xbe,0x71,0x7f + +# ATT: vfnmsub231ph -256(%edx){1to16}, %ymm5, %ymm6 {%k7} {z} +# INTEL: vfnmsub231ph ymm6 {k7} {z}, ymm5, word ptr [edx - 256]{1to16} +0x62,0xf6,0x55,0xbf,0xbe,0x72,0x80 + +# ATT: vfnmsub231ph 268435456(%esp,%esi,8), %xmm5, %xmm6 {%k7} +# INTEL: vfnmsub231ph xmm6 {k7}, xmm5, xmmword ptr [esp + 8*esi + 268435456] +0x62,0xf6,0x55,0x0f,0xbe,0xb4,0xf4,0x00,0x00,0x00,0x10 + +# ATT: vfnmsub231ph (%ecx){1to8}, %xmm5, %xmm6 +# INTEL: vfnmsub231ph xmm6, xmm5, word ptr [ecx]{1to8} +0x62,0xf6,0x55,0x18,0xbe,0x31 + +# ATT: vfnmsub231ph 2032(%ecx), %xmm5, %xmm6 +# INTEL: vfnmsub231ph xmm6, xmm5, xmmword ptr [ecx + 2032] +0x62,0xf6,0x55,0x08,0xbe,0x71,0x7f + +# ATT: vfnmsub231ph -256(%edx){1to8}, %xmm5, %xmm6 {%k7} {z} +# INTEL: vfnmsub231ph xmm6 {k7} {z}, xmm5, word ptr [edx - 256]{1to8} +0x62,0xf6,0x55,0x9f,0xbe,0x72,0x80 diff --git a/llvm/test/MC/X86/avx512fp16.s b/llvm/test/MC/X86/avx512fp16.s index b358705fbedc8..6f3165d5994ad 100644 --- a/llvm/test/MC/X86/avx512fp16.s +++ b/llvm/test/MC/X86/avx512fp16.s @@ -1763,3 +1763,723 @@ // CHECK: vsqrtsh -256(%rdx), %xmm29, %xmm30 {%k7} {z} // CHECK: encoding: [0x62,0x65,0x16,0x87,0x51,0x72,0x80] vsqrtsh -256(%rdx), %xmm29, %xmm30 {%k7} {z} + +// CHECK: vfmadd132ph %zmm28, %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x06,0x15,0x40,0x98,0xf4] + vfmadd132ph %zmm28, %zmm29, %zmm30 + +// CHECK: vfmadd132ph {rn-sae}, %zmm28, %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x06,0x15,0x10,0x98,0xf4] + vfmadd132ph {rn-sae}, %zmm28, %zmm29, %zmm30 + +// CHECK: vfmadd132ph 268435456(%rbp,%r14,8), %zmm29, %zmm30 {%k7} +// CHECK: encoding: [0x62,0x26,0x15,0x47,0x98,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfmadd132ph 268435456(%rbp,%r14,8), %zmm29, %zmm30 {%k7} + +// CHECK: vfmadd132ph (%r9){1to32}, %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x46,0x15,0x50,0x98,0x31] + vfmadd132ph (%r9){1to32}, %zmm29, %zmm30 + +// CHECK: vfmadd132ph 8128(%rcx), %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x66,0x15,0x40,0x98,0x71,0x7f] + vfmadd132ph 8128(%rcx), %zmm29, %zmm30 + +// CHECK: vfmadd132ph -256(%rdx){1to32}, %zmm29, %zmm30 {%k7} {z} +// CHECK: encoding: [0x62,0x66,0x15,0xd7,0x98,0x72,0x80] + vfmadd132ph -256(%rdx){1to32}, %zmm29, %zmm30 {%k7} {z} + +// CHECK: vfmadd132sh %xmm28, %xmm29, %xmm30 +// CHECK: encoding: [0x62,0x06,0x15,0x00,0x99,0xf4] + vfmadd132sh %xmm28, %xmm29, %xmm30 + +// CHECK: vfmadd132sh {rn-sae}, %xmm28, %xmm29, %xmm30 +// CHECK: encoding: [0x62,0x06,0x15,0x10,0x99,0xf4] + vfmadd132sh {rn-sae}, %xmm28, %xmm29, %xmm30 + +// CHECK: vfmadd132sh 268435456(%rbp,%r14,8), %xmm29, %xmm30 {%k7} +// CHECK: encoding: [0x62,0x26,0x15,0x07,0x99,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfmadd132sh 268435456(%rbp,%r14,8), %xmm29, %xmm30 {%k7} + +// CHECK: vfmadd132sh (%r9), %xmm29, %xmm30 +// CHECK: encoding: [0x62,0x46,0x15,0x00,0x99,0x31] + vfmadd132sh (%r9), %xmm29, %xmm30 + +// CHECK: vfmadd132sh 254(%rcx), %xmm29, %xmm30 +// CHECK: encoding: [0x62,0x66,0x15,0x00,0x99,0x71,0x7f] + vfmadd132sh 254(%rcx), %xmm29, %xmm30 + +// CHECK: vfmadd132sh -256(%rdx), %xmm29, %xmm30 {%k7} {z} +// CHECK: encoding: [0x62,0x66,0x15,0x87,0x99,0x72,0x80] + vfmadd132sh -256(%rdx), %xmm29, %xmm30 {%k7} {z} + +// CHECK: vfmadd213ph %zmm28, %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x06,0x15,0x40,0xa8,0xf4] + vfmadd213ph %zmm28, %zmm29, %zmm30 + +// CHECK: vfmadd213ph {rn-sae}, %zmm28, %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x06,0x15,0x10,0xa8,0xf4] + vfmadd213ph {rn-sae}, %zmm28, %zmm29, %zmm30 + +// CHECK: vfmadd213ph 268435456(%rbp,%r14,8), %zmm29, %zmm30 {%k7} +// CHECK: encoding: [0x62,0x26,0x15,0x47,0xa8,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfmadd213ph 268435456(%rbp,%r14,8), %zmm29, %zmm30 {%k7} + +// CHECK: vfmadd213ph (%r9){1to32}, %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x46,0x15,0x50,0xa8,0x31] + vfmadd213ph (%r9){1to32}, %zmm29, %zmm30 + +// CHECK: vfmadd213ph 8128(%rcx), %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x66,0x15,0x40,0xa8,0x71,0x7f] + vfmadd213ph 8128(%rcx), %zmm29, %zmm30 + +// CHECK: vfmadd213ph -256(%rdx){1to32}, %zmm29, %zmm30 {%k7} {z} +// CHECK: encoding: [0x62,0x66,0x15,0xd7,0xa8,0x72,0x80] + vfmadd213ph -256(%rdx){1to32}, %zmm29, %zmm30 {%k7} {z} + +// CHECK: vfmadd213sh %xmm28, %xmm29, %xmm30 +// CHECK: encoding: [0x62,0x06,0x15,0x00,0xa9,0xf4] + vfmadd213sh %xmm28, %xmm29, %xmm30 + +// CHECK: vfmadd213sh {rn-sae}, %xmm28, %xmm29, %xmm30 +// CHECK: encoding: [0x62,0x06,0x15,0x10,0xa9,0xf4] + vfmadd213sh {rn-sae}, %xmm28, %xmm29, %xmm30 + +// CHECK: vfmadd213sh 268435456(%rbp,%r14,8), %xmm29, %xmm30 {%k7} +// CHECK: encoding: [0x62,0x26,0x15,0x07,0xa9,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfmadd213sh 268435456(%rbp,%r14,8), %xmm29, %xmm30 {%k7} + +// CHECK: vfmadd213sh (%r9), %xmm29, %xmm30 +// CHECK: encoding: [0x62,0x46,0x15,0x00,0xa9,0x31] + vfmadd213sh (%r9), %xmm29, %xmm30 + +// CHECK: vfmadd213sh 254(%rcx), %xmm29, %xmm30 +// CHECK: encoding: [0x62,0x66,0x15,0x00,0xa9,0x71,0x7f] + vfmadd213sh 254(%rcx), %xmm29, %xmm30 + +// CHECK: vfmadd213sh -256(%rdx), %xmm29, %xmm30 {%k7} {z} +// CHECK: encoding: [0x62,0x66,0x15,0x87,0xa9,0x72,0x80] + vfmadd213sh -256(%rdx), %xmm29, %xmm30 {%k7} {z} + +// CHECK: vfmadd231ph %zmm28, %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x06,0x15,0x40,0xb8,0xf4] + vfmadd231ph %zmm28, %zmm29, %zmm30 + +// CHECK: vfmadd231ph {rn-sae}, %zmm28, %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x06,0x15,0x10,0xb8,0xf4] + vfmadd231ph {rn-sae}, %zmm28, %zmm29, %zmm30 + +// CHECK: vfmadd231ph 268435456(%rbp,%r14,8), %zmm29, %zmm30 {%k7} +// CHECK: encoding: [0x62,0x26,0x15,0x47,0xb8,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfmadd231ph 268435456(%rbp,%r14,8), %zmm29, %zmm30 {%k7} + +// CHECK: vfmadd231ph (%r9){1to32}, %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x46,0x15,0x50,0xb8,0x31] + vfmadd231ph (%r9){1to32}, %zmm29, %zmm30 + +// CHECK: vfmadd231ph 8128(%rcx), %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x66,0x15,0x40,0xb8,0x71,0x7f] + vfmadd231ph 8128(%rcx), %zmm29, %zmm30 + +// CHECK: vfmadd231ph -256(%rdx){1to32}, %zmm29, %zmm30 {%k7} {z} +// CHECK: encoding: [0x62,0x66,0x15,0xd7,0xb8,0x72,0x80] + vfmadd231ph -256(%rdx){1to32}, %zmm29, %zmm30 {%k7} {z} + +// CHECK: vfmadd231sh %xmm28, %xmm29, %xmm30 +// CHECK: encoding: [0x62,0x06,0x15,0x00,0xb9,0xf4] + vfmadd231sh %xmm28, %xmm29, %xmm30 + +// CHECK: vfmadd231sh {rn-sae}, %xmm28, %xmm29, %xmm30 +// CHECK: encoding: [0x62,0x06,0x15,0x10,0xb9,0xf4] + vfmadd231sh {rn-sae}, %xmm28, %xmm29, %xmm30 + +// CHECK: vfmadd231sh 268435456(%rbp,%r14,8), %xmm29, %xmm30 {%k7} +// CHECK: encoding: [0x62,0x26,0x15,0x07,0xb9,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfmadd231sh 268435456(%rbp,%r14,8), %xmm29, %xmm30 {%k7} + +// CHECK: vfmadd231sh (%r9), %xmm29, %xmm30 +// CHECK: encoding: [0x62,0x46,0x15,0x00,0xb9,0x31] + vfmadd231sh (%r9), %xmm29, %xmm30 + +// CHECK: vfmadd231sh 254(%rcx), %xmm29, %xmm30 +// CHECK: encoding: [0x62,0x66,0x15,0x00,0xb9,0x71,0x7f] + vfmadd231sh 254(%rcx), %xmm29, %xmm30 + +// CHECK: vfmadd231sh -256(%rdx), %xmm29, %xmm30 {%k7} {z} +// CHECK: encoding: [0x62,0x66,0x15,0x87,0xb9,0x72,0x80] + vfmadd231sh -256(%rdx), %xmm29, %xmm30 {%k7} {z} + +// CHECK: vfmaddsub132ph %zmm28, %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x06,0x15,0x40,0x96,0xf4] + vfmaddsub132ph %zmm28, %zmm29, %zmm30 + +// CHECK: vfmaddsub132ph {rn-sae}, %zmm28, %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x06,0x15,0x10,0x96,0xf4] + vfmaddsub132ph {rn-sae}, %zmm28, %zmm29, %zmm30 + +// CHECK: vfmaddsub132ph 268435456(%rbp,%r14,8), %zmm29, %zmm30 {%k7} +// CHECK: encoding: [0x62,0x26,0x15,0x47,0x96,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfmaddsub132ph 268435456(%rbp,%r14,8), %zmm29, %zmm30 {%k7} + +// CHECK: vfmaddsub132ph (%r9){1to32}, %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x46,0x15,0x50,0x96,0x31] + vfmaddsub132ph (%r9){1to32}, %zmm29, %zmm30 + +// CHECK: vfmaddsub132ph 8128(%rcx), %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x66,0x15,0x40,0x96,0x71,0x7f] + vfmaddsub132ph 8128(%rcx), %zmm29, %zmm30 + +// CHECK: vfmaddsub132ph -256(%rdx){1to32}, %zmm29, %zmm30 {%k7} {z} +// CHECK: encoding: [0x62,0x66,0x15,0xd7,0x96,0x72,0x80] + vfmaddsub132ph -256(%rdx){1to32}, %zmm29, %zmm30 {%k7} {z} + +// CHECK: vfmaddsub213ph %zmm28, %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x06,0x15,0x40,0xa6,0xf4] + vfmaddsub213ph %zmm28, %zmm29, %zmm30 + +// CHECK: vfmaddsub213ph {rn-sae}, %zmm28, %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x06,0x15,0x10,0xa6,0xf4] + vfmaddsub213ph {rn-sae}, %zmm28, %zmm29, %zmm30 + +// CHECK: vfmaddsub213ph 268435456(%rbp,%r14,8), %zmm29, %zmm30 {%k7} +// CHECK: encoding: [0x62,0x26,0x15,0x47,0xa6,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfmaddsub213ph 268435456(%rbp,%r14,8), %zmm29, %zmm30 {%k7} + +// CHECK: vfmaddsub213ph (%r9){1to32}, %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x46,0x15,0x50,0xa6,0x31] + vfmaddsub213ph (%r9){1to32}, %zmm29, %zmm30 + +// CHECK: vfmaddsub213ph 8128(%rcx), %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x66,0x15,0x40,0xa6,0x71,0x7f] + vfmaddsub213ph 8128(%rcx), %zmm29, %zmm30 + +// CHECK: vfmaddsub213ph -256(%rdx){1to32}, %zmm29, %zmm30 {%k7} {z} +// CHECK: encoding: [0x62,0x66,0x15,0xd7,0xa6,0x72,0x80] + vfmaddsub213ph -256(%rdx){1to32}, %zmm29, %zmm30 {%k7} {z} + +// CHECK: vfmaddsub231ph %zmm28, %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x06,0x15,0x40,0xb6,0xf4] + vfmaddsub231ph %zmm28, %zmm29, %zmm30 + +// CHECK: vfmaddsub231ph {rn-sae}, %zmm28, %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x06,0x15,0x10,0xb6,0xf4] + vfmaddsub231ph {rn-sae}, %zmm28, %zmm29, %zmm30 + +// CHECK: vfmaddsub231ph 268435456(%rbp,%r14,8), %zmm29, %zmm30 {%k7} +// CHECK: encoding: [0x62,0x26,0x15,0x47,0xb6,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfmaddsub231ph 268435456(%rbp,%r14,8), %zmm29, %zmm30 {%k7} + +// CHECK: vfmaddsub231ph (%r9){1to32}, %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x46,0x15,0x50,0xb6,0x31] + vfmaddsub231ph (%r9){1to32}, %zmm29, %zmm30 + +// CHECK: vfmaddsub231ph 8128(%rcx), %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x66,0x15,0x40,0xb6,0x71,0x7f] + vfmaddsub231ph 8128(%rcx), %zmm29, %zmm30 + +// CHECK: vfmaddsub231ph -256(%rdx){1to32}, %zmm29, %zmm30 {%k7} {z} +// CHECK: encoding: [0x62,0x66,0x15,0xd7,0xb6,0x72,0x80] + vfmaddsub231ph -256(%rdx){1to32}, %zmm29, %zmm30 {%k7} {z} + +// CHECK: vfmsub132ph %zmm28, %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x06,0x15,0x40,0x9a,0xf4] + vfmsub132ph %zmm28, %zmm29, %zmm30 + +// CHECK: vfmsub132ph {rn-sae}, %zmm28, %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x06,0x15,0x10,0x9a,0xf4] + vfmsub132ph {rn-sae}, %zmm28, %zmm29, %zmm30 + +// CHECK: vfmsub132ph 268435456(%rbp,%r14,8), %zmm29, %zmm30 {%k7} +// CHECK: encoding: [0x62,0x26,0x15,0x47,0x9a,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfmsub132ph 268435456(%rbp,%r14,8), %zmm29, %zmm30 {%k7} + +// CHECK: vfmsub132ph (%r9){1to32}, %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x46,0x15,0x50,0x9a,0x31] + vfmsub132ph (%r9){1to32}, %zmm29, %zmm30 + +// CHECK: vfmsub132ph 8128(%rcx), %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x66,0x15,0x40,0x9a,0x71,0x7f] + vfmsub132ph 8128(%rcx), %zmm29, %zmm30 + +// CHECK: vfmsub132ph -256(%rdx){1to32}, %zmm29, %zmm30 {%k7} {z} +// CHECK: encoding: [0x62,0x66,0x15,0xd7,0x9a,0x72,0x80] + vfmsub132ph -256(%rdx){1to32}, %zmm29, %zmm30 {%k7} {z} + +// CHECK: vfmsub132sh %xmm28, %xmm29, %xmm30 +// CHECK: encoding: [0x62,0x06,0x15,0x00,0x9b,0xf4] + vfmsub132sh %xmm28, %xmm29, %xmm30 + +// CHECK: vfmsub132sh {rn-sae}, %xmm28, %xmm29, %xmm30 +// CHECK: encoding: [0x62,0x06,0x15,0x10,0x9b,0xf4] + vfmsub132sh {rn-sae}, %xmm28, %xmm29, %xmm30 + +// CHECK: vfmsub132sh 268435456(%rbp,%r14,8), %xmm29, %xmm30 {%k7} +// CHECK: encoding: [0x62,0x26,0x15,0x07,0x9b,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfmsub132sh 268435456(%rbp,%r14,8), %xmm29, %xmm30 {%k7} + +// CHECK: vfmsub132sh (%r9), %xmm29, %xmm30 +// CHECK: encoding: [0x62,0x46,0x15,0x00,0x9b,0x31] + vfmsub132sh (%r9), %xmm29, %xmm30 + +// CHECK: vfmsub132sh 254(%rcx), %xmm29, %xmm30 +// CHECK: encoding: [0x62,0x66,0x15,0x00,0x9b,0x71,0x7f] + vfmsub132sh 254(%rcx), %xmm29, %xmm30 + +// CHECK: vfmsub132sh -256(%rdx), %xmm29, %xmm30 {%k7} {z} +// CHECK: encoding: [0x62,0x66,0x15,0x87,0x9b,0x72,0x80] + vfmsub132sh -256(%rdx), %xmm29, %xmm30 {%k7} {z} + +// CHECK: vfmsub213ph %zmm28, %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x06,0x15,0x40,0xaa,0xf4] + vfmsub213ph %zmm28, %zmm29, %zmm30 + +// CHECK: vfmsub213ph {rn-sae}, %zmm28, %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x06,0x15,0x10,0xaa,0xf4] + vfmsub213ph {rn-sae}, %zmm28, %zmm29, %zmm30 + +// CHECK: vfmsub213ph 268435456(%rbp,%r14,8), %zmm29, %zmm30 {%k7} +// CHECK: encoding: [0x62,0x26,0x15,0x47,0xaa,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfmsub213ph 268435456(%rbp,%r14,8), %zmm29, %zmm30 {%k7} + +// CHECK: vfmsub213ph (%r9){1to32}, %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x46,0x15,0x50,0xaa,0x31] + vfmsub213ph (%r9){1to32}, %zmm29, %zmm30 + +// CHECK: vfmsub213ph 8128(%rcx), %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x66,0x15,0x40,0xaa,0x71,0x7f] + vfmsub213ph 8128(%rcx), %zmm29, %zmm30 + +// CHECK: vfmsub213ph -256(%rdx){1to32}, %zmm29, %zmm30 {%k7} {z} +// CHECK: encoding: [0x62,0x66,0x15,0xd7,0xaa,0x72,0x80] + vfmsub213ph -256(%rdx){1to32}, %zmm29, %zmm30 {%k7} {z} + +// CHECK: vfmsub213sh %xmm28, %xmm29, %xmm30 +// CHECK: encoding: [0x62,0x06,0x15,0x00,0xab,0xf4] + vfmsub213sh %xmm28, %xmm29, %xmm30 + +// CHECK: vfmsub213sh {rn-sae}, %xmm28, %xmm29, %xmm30 +// CHECK: encoding: [0x62,0x06,0x15,0x10,0xab,0xf4] + vfmsub213sh {rn-sae}, %xmm28, %xmm29, %xmm30 + +// CHECK: vfmsub213sh 268435456(%rbp,%r14,8), %xmm29, %xmm30 {%k7} +// CHECK: encoding: [0x62,0x26,0x15,0x07,0xab,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfmsub213sh 268435456(%rbp,%r14,8), %xmm29, %xmm30 {%k7} + +// CHECK: vfmsub213sh (%r9), %xmm29, %xmm30 +// CHECK: encoding: [0x62,0x46,0x15,0x00,0xab,0x31] + vfmsub213sh (%r9), %xmm29, %xmm30 + +// CHECK: vfmsub213sh 254(%rcx), %xmm29, %xmm30 +// CHECK: encoding: [0x62,0x66,0x15,0x00,0xab,0x71,0x7f] + vfmsub213sh 254(%rcx), %xmm29, %xmm30 + +// CHECK: vfmsub213sh -256(%rdx), %xmm29, %xmm30 {%k7} {z} +// CHECK: encoding: [0x62,0x66,0x15,0x87,0xab,0x72,0x80] + vfmsub213sh -256(%rdx), %xmm29, %xmm30 {%k7} {z} + +// CHECK: vfmsub231ph %zmm28, %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x06,0x15,0x40,0xba,0xf4] + vfmsub231ph %zmm28, %zmm29, %zmm30 + +// CHECK: vfmsub231ph {rn-sae}, %zmm28, %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x06,0x15,0x10,0xba,0xf4] + vfmsub231ph {rn-sae}, %zmm28, %zmm29, %zmm30 + +// CHECK: vfmsub231ph 268435456(%rbp,%r14,8), %zmm29, %zmm30 {%k7} +// CHECK: encoding: [0x62,0x26,0x15,0x47,0xba,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfmsub231ph 268435456(%rbp,%r14,8), %zmm29, %zmm30 {%k7} + +// CHECK: vfmsub231ph (%r9){1to32}, %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x46,0x15,0x50,0xba,0x31] + vfmsub231ph (%r9){1to32}, %zmm29, %zmm30 + +// CHECK: vfmsub231ph 8128(%rcx), %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x66,0x15,0x40,0xba,0x71,0x7f] + vfmsub231ph 8128(%rcx), %zmm29, %zmm30 + +// CHECK: vfmsub231ph -256(%rdx){1to32}, %zmm29, %zmm30 {%k7} {z} +// CHECK: encoding: [0x62,0x66,0x15,0xd7,0xba,0x72,0x80] + vfmsub231ph -256(%rdx){1to32}, %zmm29, %zmm30 {%k7} {z} + +// CHECK: vfmsub231sh %xmm28, %xmm29, %xmm30 +// CHECK: encoding: [0x62,0x06,0x15,0x00,0xbb,0xf4] + vfmsub231sh %xmm28, %xmm29, %xmm30 + +// CHECK: vfmsub231sh {rn-sae}, %xmm28, %xmm29, %xmm30 +// CHECK: encoding: [0x62,0x06,0x15,0x10,0xbb,0xf4] + vfmsub231sh {rn-sae}, %xmm28, %xmm29, %xmm30 + +// CHECK: vfmsub231sh 268435456(%rbp,%r14,8), %xmm29, %xmm30 {%k7} +// CHECK: encoding: [0x62,0x26,0x15,0x07,0xbb,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfmsub231sh 268435456(%rbp,%r14,8), %xmm29, %xmm30 {%k7} + +// CHECK: vfmsub231sh (%r9), %xmm29, %xmm30 +// CHECK: encoding: [0x62,0x46,0x15,0x00,0xbb,0x31] + vfmsub231sh (%r9), %xmm29, %xmm30 + +// CHECK: vfmsub231sh 254(%rcx), %xmm29, %xmm30 +// CHECK: encoding: [0x62,0x66,0x15,0x00,0xbb,0x71,0x7f] + vfmsub231sh 254(%rcx), %xmm29, %xmm30 + +// CHECK: vfmsub231sh -256(%rdx), %xmm29, %xmm30 {%k7} {z} +// CHECK: encoding: [0x62,0x66,0x15,0x87,0xbb,0x72,0x80] + vfmsub231sh -256(%rdx), %xmm29, %xmm30 {%k7} {z} + +// CHECK: vfmsubadd132ph %zmm28, %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x06,0x15,0x40,0x97,0xf4] + vfmsubadd132ph %zmm28, %zmm29, %zmm30 + +// CHECK: vfmsubadd132ph {rn-sae}, %zmm28, %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x06,0x15,0x10,0x97,0xf4] + vfmsubadd132ph {rn-sae}, %zmm28, %zmm29, %zmm30 + +// CHECK: vfmsubadd132ph 268435456(%rbp,%r14,8), %zmm29, %zmm30 {%k7} +// CHECK: encoding: [0x62,0x26,0x15,0x47,0x97,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfmsubadd132ph 268435456(%rbp,%r14,8), %zmm29, %zmm30 {%k7} + +// CHECK: vfmsubadd132ph (%r9){1to32}, %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x46,0x15,0x50,0x97,0x31] + vfmsubadd132ph (%r9){1to32}, %zmm29, %zmm30 + +// CHECK: vfmsubadd132ph 8128(%rcx), %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x66,0x15,0x40,0x97,0x71,0x7f] + vfmsubadd132ph 8128(%rcx), %zmm29, %zmm30 + +// CHECK: vfmsubadd132ph -256(%rdx){1to32}, %zmm29, %zmm30 {%k7} {z} +// CHECK: encoding: [0x62,0x66,0x15,0xd7,0x97,0x72,0x80] + vfmsubadd132ph -256(%rdx){1to32}, %zmm29, %zmm30 {%k7} {z} + +// CHECK: vfmsubadd213ph %zmm28, %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x06,0x15,0x40,0xa7,0xf4] + vfmsubadd213ph %zmm28, %zmm29, %zmm30 + +// CHECK: vfmsubadd213ph {rn-sae}, %zmm28, %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x06,0x15,0x10,0xa7,0xf4] + vfmsubadd213ph {rn-sae}, %zmm28, %zmm29, %zmm30 + +// CHECK: vfmsubadd213ph 268435456(%rbp,%r14,8), %zmm29, %zmm30 {%k7} +// CHECK: encoding: [0x62,0x26,0x15,0x47,0xa7,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfmsubadd213ph 268435456(%rbp,%r14,8), %zmm29, %zmm30 {%k7} + +// CHECK: vfmsubadd213ph (%r9){1to32}, %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x46,0x15,0x50,0xa7,0x31] + vfmsubadd213ph (%r9){1to32}, %zmm29, %zmm30 + +// CHECK: vfmsubadd213ph 8128(%rcx), %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x66,0x15,0x40,0xa7,0x71,0x7f] + vfmsubadd213ph 8128(%rcx), %zmm29, %zmm30 + +// CHECK: vfmsubadd213ph -256(%rdx){1to32}, %zmm29, %zmm30 {%k7} {z} +// CHECK: encoding: [0x62,0x66,0x15,0xd7,0xa7,0x72,0x80] + vfmsubadd213ph -256(%rdx){1to32}, %zmm29, %zmm30 {%k7} {z} + +// CHECK: vfmsubadd231ph %zmm28, %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x06,0x15,0x40,0xb7,0xf4] + vfmsubadd231ph %zmm28, %zmm29, %zmm30 + +// CHECK: vfmsubadd231ph {rn-sae}, %zmm28, %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x06,0x15,0x10,0xb7,0xf4] + vfmsubadd231ph {rn-sae}, %zmm28, %zmm29, %zmm30 + +// CHECK: vfmsubadd231ph 268435456(%rbp,%r14,8), %zmm29, %zmm30 {%k7} +// CHECK: encoding: [0x62,0x26,0x15,0x47,0xb7,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfmsubadd231ph 268435456(%rbp,%r14,8), %zmm29, %zmm30 {%k7} + +// CHECK: vfmsubadd231ph (%r9){1to32}, %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x46,0x15,0x50,0xb7,0x31] + vfmsubadd231ph (%r9){1to32}, %zmm29, %zmm30 + +// CHECK: vfmsubadd231ph 8128(%rcx), %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x66,0x15,0x40,0xb7,0x71,0x7f] + vfmsubadd231ph 8128(%rcx), %zmm29, %zmm30 + +// CHECK: vfmsubadd231ph -256(%rdx){1to32}, %zmm29, %zmm30 {%k7} {z} +// CHECK: encoding: [0x62,0x66,0x15,0xd7,0xb7,0x72,0x80] + vfmsubadd231ph -256(%rdx){1to32}, %zmm29, %zmm30 {%k7} {z} + +// CHECK: vfnmadd132ph %zmm28, %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x06,0x15,0x40,0x9c,0xf4] + vfnmadd132ph %zmm28, %zmm29, %zmm30 + +// CHECK: vfnmadd132ph {rn-sae}, %zmm28, %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x06,0x15,0x10,0x9c,0xf4] + vfnmadd132ph {rn-sae}, %zmm28, %zmm29, %zmm30 + +// CHECK: vfnmadd132ph 268435456(%rbp,%r14,8), %zmm29, %zmm30 {%k7} +// CHECK: encoding: [0x62,0x26,0x15,0x47,0x9c,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfnmadd132ph 268435456(%rbp,%r14,8), %zmm29, %zmm30 {%k7} + +// CHECK: vfnmadd132ph (%r9){1to32}, %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x46,0x15,0x50,0x9c,0x31] + vfnmadd132ph (%r9){1to32}, %zmm29, %zmm30 + +// CHECK: vfnmadd132ph 8128(%rcx), %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x66,0x15,0x40,0x9c,0x71,0x7f] + vfnmadd132ph 8128(%rcx), %zmm29, %zmm30 + +// CHECK: vfnmadd132ph -256(%rdx){1to32}, %zmm29, %zmm30 {%k7} {z} +// CHECK: encoding: [0x62,0x66,0x15,0xd7,0x9c,0x72,0x80] + vfnmadd132ph -256(%rdx){1to32}, %zmm29, %zmm30 {%k7} {z} + +// CHECK: vfnmadd132sh %xmm28, %xmm29, %xmm30 +// CHECK: encoding: [0x62,0x06,0x15,0x00,0x9d,0xf4] + vfnmadd132sh %xmm28, %xmm29, %xmm30 + +// CHECK: vfnmadd132sh {rn-sae}, %xmm28, %xmm29, %xmm30 +// CHECK: encoding: [0x62,0x06,0x15,0x10,0x9d,0xf4] + vfnmadd132sh {rn-sae}, %xmm28, %xmm29, %xmm30 + +// CHECK: vfnmadd132sh 268435456(%rbp,%r14,8), %xmm29, %xmm30 {%k7} +// CHECK: encoding: [0x62,0x26,0x15,0x07,0x9d,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfnmadd132sh 268435456(%rbp,%r14,8), %xmm29, %xmm30 {%k7} + +// CHECK: vfnmadd132sh (%r9), %xmm29, %xmm30 +// CHECK: encoding: [0x62,0x46,0x15,0x00,0x9d,0x31] + vfnmadd132sh (%r9), %xmm29, %xmm30 + +// CHECK: vfnmadd132sh 254(%rcx), %xmm29, %xmm30 +// CHECK: encoding: [0x62,0x66,0x15,0x00,0x9d,0x71,0x7f] + vfnmadd132sh 254(%rcx), %xmm29, %xmm30 + +// CHECK: vfnmadd132sh -256(%rdx), %xmm29, %xmm30 {%k7} {z} +// CHECK: encoding: [0x62,0x66,0x15,0x87,0x9d,0x72,0x80] + vfnmadd132sh -256(%rdx), %xmm29, %xmm30 {%k7} {z} + +// CHECK: vfnmadd213ph %zmm28, %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x06,0x15,0x40,0xac,0xf4] + vfnmadd213ph %zmm28, %zmm29, %zmm30 + +// CHECK: vfnmadd213ph {rn-sae}, %zmm28, %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x06,0x15,0x10,0xac,0xf4] + vfnmadd213ph {rn-sae}, %zmm28, %zmm29, %zmm30 + +// CHECK: vfnmadd213ph 268435456(%rbp,%r14,8), %zmm29, %zmm30 {%k7} +// CHECK: encoding: [0x62,0x26,0x15,0x47,0xac,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfnmadd213ph 268435456(%rbp,%r14,8), %zmm29, %zmm30 {%k7} + +// CHECK: vfnmadd213ph (%r9){1to32}, %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x46,0x15,0x50,0xac,0x31] + vfnmadd213ph (%r9){1to32}, %zmm29, %zmm30 + +// CHECK: vfnmadd213ph 8128(%rcx), %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x66,0x15,0x40,0xac,0x71,0x7f] + vfnmadd213ph 8128(%rcx), %zmm29, %zmm30 + +// CHECK: vfnmadd213ph -256(%rdx){1to32}, %zmm29, %zmm30 {%k7} {z} +// CHECK: encoding: [0x62,0x66,0x15,0xd7,0xac,0x72,0x80] + vfnmadd213ph -256(%rdx){1to32}, %zmm29, %zmm30 {%k7} {z} + +// CHECK: vfnmadd213sh %xmm28, %xmm29, %xmm30 +// CHECK: encoding: [0x62,0x06,0x15,0x00,0xad,0xf4] + vfnmadd213sh %xmm28, %xmm29, %xmm30 + +// CHECK: vfnmadd213sh {rn-sae}, %xmm28, %xmm29, %xmm30 +// CHECK: encoding: [0x62,0x06,0x15,0x10,0xad,0xf4] + vfnmadd213sh {rn-sae}, %xmm28, %xmm29, %xmm30 + +// CHECK: vfnmadd213sh 268435456(%rbp,%r14,8), %xmm29, %xmm30 {%k7} +// CHECK: encoding: [0x62,0x26,0x15,0x07,0xad,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfnmadd213sh 268435456(%rbp,%r14,8), %xmm29, %xmm30 {%k7} + +// CHECK: vfnmadd213sh (%r9), %xmm29, %xmm30 +// CHECK: encoding: [0x62,0x46,0x15,0x00,0xad,0x31] + vfnmadd213sh (%r9), %xmm29, %xmm30 + +// CHECK: vfnmadd213sh 254(%rcx), %xmm29, %xmm30 +// CHECK: encoding: [0x62,0x66,0x15,0x00,0xad,0x71,0x7f] + vfnmadd213sh 254(%rcx), %xmm29, %xmm30 + +// CHECK: vfnmadd213sh -256(%rdx), %xmm29, %xmm30 {%k7} {z} +// CHECK: encoding: [0x62,0x66,0x15,0x87,0xad,0x72,0x80] + vfnmadd213sh -256(%rdx), %xmm29, %xmm30 {%k7} {z} + +// CHECK: vfnmadd231ph %zmm28, %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x06,0x15,0x40,0xbc,0xf4] + vfnmadd231ph %zmm28, %zmm29, %zmm30 + +// CHECK: vfnmadd231ph {rn-sae}, %zmm28, %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x06,0x15,0x10,0xbc,0xf4] + vfnmadd231ph {rn-sae}, %zmm28, %zmm29, %zmm30 + +// CHECK: vfnmadd231ph 268435456(%rbp,%r14,8), %zmm29, %zmm30 {%k7} +// CHECK: encoding: [0x62,0x26,0x15,0x47,0xbc,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfnmadd231ph 268435456(%rbp,%r14,8), %zmm29, %zmm30 {%k7} + +// CHECK: vfnmadd231ph (%r9){1to32}, %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x46,0x15,0x50,0xbc,0x31] + vfnmadd231ph (%r9){1to32}, %zmm29, %zmm30 + +// CHECK: vfnmadd231ph 8128(%rcx), %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x66,0x15,0x40,0xbc,0x71,0x7f] + vfnmadd231ph 8128(%rcx), %zmm29, %zmm30 + +// CHECK: vfnmadd231ph -256(%rdx){1to32}, %zmm29, %zmm30 {%k7} {z} +// CHECK: encoding: [0x62,0x66,0x15,0xd7,0xbc,0x72,0x80] + vfnmadd231ph -256(%rdx){1to32}, %zmm29, %zmm30 {%k7} {z} + +// CHECK: vfnmadd231sh %xmm28, %xmm29, %xmm30 +// CHECK: encoding: [0x62,0x06,0x15,0x00,0xbd,0xf4] + vfnmadd231sh %xmm28, %xmm29, %xmm30 + +// CHECK: vfnmadd231sh {rn-sae}, %xmm28, %xmm29, %xmm30 +// CHECK: encoding: [0x62,0x06,0x15,0x10,0xbd,0xf4] + vfnmadd231sh {rn-sae}, %xmm28, %xmm29, %xmm30 + +// CHECK: vfnmadd231sh 268435456(%rbp,%r14,8), %xmm29, %xmm30 {%k7} +// CHECK: encoding: [0x62,0x26,0x15,0x07,0xbd,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfnmadd231sh 268435456(%rbp,%r14,8), %xmm29, %xmm30 {%k7} + +// CHECK: vfnmadd231sh (%r9), %xmm29, %xmm30 +// CHECK: encoding: [0x62,0x46,0x15,0x00,0xbd,0x31] + vfnmadd231sh (%r9), %xmm29, %xmm30 + +// CHECK: vfnmadd231sh 254(%rcx), %xmm29, %xmm30 +// CHECK: encoding: [0x62,0x66,0x15,0x00,0xbd,0x71,0x7f] + vfnmadd231sh 254(%rcx), %xmm29, %xmm30 + +// CHECK: vfnmadd231sh -256(%rdx), %xmm29, %xmm30 {%k7} {z} +// CHECK: encoding: [0x62,0x66,0x15,0x87,0xbd,0x72,0x80] + vfnmadd231sh -256(%rdx), %xmm29, %xmm30 {%k7} {z} + +// CHECK: vfnmsub132ph %zmm28, %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x06,0x15,0x40,0x9e,0xf4] + vfnmsub132ph %zmm28, %zmm29, %zmm30 + +// CHECK: vfnmsub132ph {rn-sae}, %zmm28, %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x06,0x15,0x10,0x9e,0xf4] + vfnmsub132ph {rn-sae}, %zmm28, %zmm29, %zmm30 + +// CHECK: vfnmsub132ph 268435456(%rbp,%r14,8), %zmm29, %zmm30 {%k7} +// CHECK: encoding: [0x62,0x26,0x15,0x47,0x9e,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfnmsub132ph 268435456(%rbp,%r14,8), %zmm29, %zmm30 {%k7} + +// CHECK: vfnmsub132ph (%r9){1to32}, %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x46,0x15,0x50,0x9e,0x31] + vfnmsub132ph (%r9){1to32}, %zmm29, %zmm30 + +// CHECK: vfnmsub132ph 8128(%rcx), %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x66,0x15,0x40,0x9e,0x71,0x7f] + vfnmsub132ph 8128(%rcx), %zmm29, %zmm30 + +// CHECK: vfnmsub132ph -256(%rdx){1to32}, %zmm29, %zmm30 {%k7} {z} +// CHECK: encoding: [0x62,0x66,0x15,0xd7,0x9e,0x72,0x80] + vfnmsub132ph -256(%rdx){1to32}, %zmm29, %zmm30 {%k7} {z} + +// CHECK: vfnmsub132sh %xmm28, %xmm29, %xmm30 +// CHECK: encoding: [0x62,0x06,0x15,0x00,0x9f,0xf4] + vfnmsub132sh %xmm28, %xmm29, %xmm30 + +// CHECK: vfnmsub132sh {rn-sae}, %xmm28, %xmm29, %xmm30 +// CHECK: encoding: [0x62,0x06,0x15,0x10,0x9f,0xf4] + vfnmsub132sh {rn-sae}, %xmm28, %xmm29, %xmm30 + +// CHECK: vfnmsub132sh 268435456(%rbp,%r14,8), %xmm29, %xmm30 {%k7} +// CHECK: encoding: [0x62,0x26,0x15,0x07,0x9f,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfnmsub132sh 268435456(%rbp,%r14,8), %xmm29, %xmm30 {%k7} + +// CHECK: vfnmsub132sh (%r9), %xmm29, %xmm30 +// CHECK: encoding: [0x62,0x46,0x15,0x00,0x9f,0x31] + vfnmsub132sh (%r9), %xmm29, %xmm30 + +// CHECK: vfnmsub132sh 254(%rcx), %xmm29, %xmm30 +// CHECK: encoding: [0x62,0x66,0x15,0x00,0x9f,0x71,0x7f] + vfnmsub132sh 254(%rcx), %xmm29, %xmm30 + +// CHECK: vfnmsub132sh -256(%rdx), %xmm29, %xmm30 {%k7} {z} +// CHECK: encoding: [0x62,0x66,0x15,0x87,0x9f,0x72,0x80] + vfnmsub132sh -256(%rdx), %xmm29, %xmm30 {%k7} {z} + +// CHECK: vfnmsub213ph %zmm28, %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x06,0x15,0x40,0xae,0xf4] + vfnmsub213ph %zmm28, %zmm29, %zmm30 + +// CHECK: vfnmsub213ph {rn-sae}, %zmm28, %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x06,0x15,0x10,0xae,0xf4] + vfnmsub213ph {rn-sae}, %zmm28, %zmm29, %zmm30 + +// CHECK: vfnmsub213ph 268435456(%rbp,%r14,8), %zmm29, %zmm30 {%k7} +// CHECK: encoding: [0x62,0x26,0x15,0x47,0xae,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfnmsub213ph 268435456(%rbp,%r14,8), %zmm29, %zmm30 {%k7} + +// CHECK: vfnmsub213ph (%r9){1to32}, %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x46,0x15,0x50,0xae,0x31] + vfnmsub213ph (%r9){1to32}, %zmm29, %zmm30 + +// CHECK: vfnmsub213ph 8128(%rcx), %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x66,0x15,0x40,0xae,0x71,0x7f] + vfnmsub213ph 8128(%rcx), %zmm29, %zmm30 + +// CHECK: vfnmsub213ph -256(%rdx){1to32}, %zmm29, %zmm30 {%k7} {z} +// CHECK: encoding: [0x62,0x66,0x15,0xd7,0xae,0x72,0x80] + vfnmsub213ph -256(%rdx){1to32}, %zmm29, %zmm30 {%k7} {z} + +// CHECK: vfnmsub213sh %xmm28, %xmm29, %xmm30 +// CHECK: encoding: [0x62,0x06,0x15,0x00,0xaf,0xf4] + vfnmsub213sh %xmm28, %xmm29, %xmm30 + +// CHECK: vfnmsub213sh {rn-sae}, %xmm28, %xmm29, %xmm30 +// CHECK: encoding: [0x62,0x06,0x15,0x10,0xaf,0xf4] + vfnmsub213sh {rn-sae}, %xmm28, %xmm29, %xmm30 + +// CHECK: vfnmsub213sh 268435456(%rbp,%r14,8), %xmm29, %xmm30 {%k7} +// CHECK: encoding: [0x62,0x26,0x15,0x07,0xaf,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfnmsub213sh 268435456(%rbp,%r14,8), %xmm29, %xmm30 {%k7} + +// CHECK: vfnmsub213sh (%r9), %xmm29, %xmm30 +// CHECK: encoding: [0x62,0x46,0x15,0x00,0xaf,0x31] + vfnmsub213sh (%r9), %xmm29, %xmm30 + +// CHECK: vfnmsub213sh 254(%rcx), %xmm29, %xmm30 +// CHECK: encoding: [0x62,0x66,0x15,0x00,0xaf,0x71,0x7f] + vfnmsub213sh 254(%rcx), %xmm29, %xmm30 + +// CHECK: vfnmsub213sh -256(%rdx), %xmm29, %xmm30 {%k7} {z} +// CHECK: encoding: [0x62,0x66,0x15,0x87,0xaf,0x72,0x80] + vfnmsub213sh -256(%rdx), %xmm29, %xmm30 {%k7} {z} + +// CHECK: vfnmsub231ph %zmm28, %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x06,0x15,0x40,0xbe,0xf4] + vfnmsub231ph %zmm28, %zmm29, %zmm30 + +// CHECK: vfnmsub231ph {rn-sae}, %zmm28, %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x06,0x15,0x10,0xbe,0xf4] + vfnmsub231ph {rn-sae}, %zmm28, %zmm29, %zmm30 + +// CHECK: vfnmsub231ph 268435456(%rbp,%r14,8), %zmm29, %zmm30 {%k7} +// CHECK: encoding: [0x62,0x26,0x15,0x47,0xbe,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfnmsub231ph 268435456(%rbp,%r14,8), %zmm29, %zmm30 {%k7} + +// CHECK: vfnmsub231ph (%r9){1to32}, %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x46,0x15,0x50,0xbe,0x31] + vfnmsub231ph (%r9){1to32}, %zmm29, %zmm30 + +// CHECK: vfnmsub231ph 8128(%rcx), %zmm29, %zmm30 +// CHECK: encoding: [0x62,0x66,0x15,0x40,0xbe,0x71,0x7f] + vfnmsub231ph 8128(%rcx), %zmm29, %zmm30 + +// CHECK: vfnmsub231ph -256(%rdx){1to32}, %zmm29, %zmm30 {%k7} {z} +// CHECK: encoding: [0x62,0x66,0x15,0xd7,0xbe,0x72,0x80] + vfnmsub231ph -256(%rdx){1to32}, %zmm29, %zmm30 {%k7} {z} + +// CHECK: vfnmsub231sh %xmm28, %xmm29, %xmm30 +// CHECK: encoding: [0x62,0x06,0x15,0x00,0xbf,0xf4] + vfnmsub231sh %xmm28, %xmm29, %xmm30 + +// CHECK: vfnmsub231sh {rn-sae}, %xmm28, %xmm29, %xmm30 +// CHECK: encoding: [0x62,0x06,0x15,0x10,0xbf,0xf4] + vfnmsub231sh {rn-sae}, %xmm28, %xmm29, %xmm30 + +// CHECK: vfnmsub231sh 268435456(%rbp,%r14,8), %xmm29, %xmm30 {%k7} +// CHECK: encoding: [0x62,0x26,0x15,0x07,0xbf,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfnmsub231sh 268435456(%rbp,%r14,8), %xmm29, %xmm30 {%k7} + +// CHECK: vfnmsub231sh (%r9), %xmm29, %xmm30 +// CHECK: encoding: [0x62,0x46,0x15,0x00,0xbf,0x31] + vfnmsub231sh (%r9), %xmm29, %xmm30 + +// CHECK: vfnmsub231sh 254(%rcx), %xmm29, %xmm30 +// CHECK: encoding: [0x62,0x66,0x15,0x00,0xbf,0x71,0x7f] + vfnmsub231sh 254(%rcx), %xmm29, %xmm30 + +// CHECK: vfnmsub231sh -256(%rdx), %xmm29, %xmm30 {%k7} {z} +// CHECK: encoding: [0x62,0x66,0x15,0x87,0xbf,0x72,0x80] + vfnmsub231sh -256(%rdx), %xmm29, %xmm30 {%k7} {z} diff --git a/llvm/test/MC/X86/avx512fp16vl.s b/llvm/test/MC/X86/avx512fp16vl.s index 91c45a56a2e8a..a3f888e045393 100644 --- a/llvm/test/MC/X86/avx512fp16vl.s +++ b/llvm/test/MC/X86/avx512fp16vl.s @@ -1491,3 +1491,723 @@ // CHECK: vsqrtph -256(%edx){1to16}, %ymm6 {%k7} {z} // CHECK: encoding: [0x62,0xf5,0x7c,0xbf,0x51,0x72,0x80] vsqrtph -256(%edx){1to16}, %ymm6 {%k7} {z} + +// CHECK: vfmadd132ph %ymm4, %ymm5, %ymm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x28,0x98,0xf4] + vfmadd132ph %ymm4, %ymm5, %ymm6 + +// CHECK: vfmadd132ph %xmm4, %xmm5, %xmm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0x98,0xf4] + vfmadd132ph %xmm4, %xmm5, %xmm6 + +// CHECK: vfmadd132ph 268435456(%esp,%esi,8), %ymm5, %ymm6 {%k7} +// CHECK: encoding: [0x62,0xf6,0x55,0x2f,0x98,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfmadd132ph 268435456(%esp,%esi,8), %ymm5, %ymm6 {%k7} + +// CHECK: vfmadd132ph (%ecx){1to16}, %ymm5, %ymm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x38,0x98,0x31] + vfmadd132ph (%ecx){1to16}, %ymm5, %ymm6 + +// CHECK: vfmadd132ph 4064(%ecx), %ymm5, %ymm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x28,0x98,0x71,0x7f] + vfmadd132ph 4064(%ecx), %ymm5, %ymm6 + +// CHECK: vfmadd132ph -256(%edx){1to16}, %ymm5, %ymm6 {%k7} {z} +// CHECK: encoding: [0x62,0xf6,0x55,0xbf,0x98,0x72,0x80] + vfmadd132ph -256(%edx){1to16}, %ymm5, %ymm6 {%k7} {z} + +// CHECK: vfmadd132ph 268435456(%esp,%esi,8), %xmm5, %xmm6 {%k7} +// CHECK: encoding: [0x62,0xf6,0x55,0x0f,0x98,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfmadd132ph 268435456(%esp,%esi,8), %xmm5, %xmm6 {%k7} + +// CHECK: vfmadd132ph (%ecx){1to8}, %xmm5, %xmm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x18,0x98,0x31] + vfmadd132ph (%ecx){1to8}, %xmm5, %xmm6 + +// CHECK: vfmadd132ph 2032(%ecx), %xmm5, %xmm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0x98,0x71,0x7f] + vfmadd132ph 2032(%ecx), %xmm5, %xmm6 + +// CHECK: vfmadd132ph -256(%edx){1to8}, %xmm5, %xmm6 {%k7} {z} +// CHECK: encoding: [0x62,0xf6,0x55,0x9f,0x98,0x72,0x80] + vfmadd132ph -256(%edx){1to8}, %xmm5, %xmm6 {%k7} {z} + +// CHECK: vfmadd213ph %ymm4, %ymm5, %ymm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x28,0xa8,0xf4] + vfmadd213ph %ymm4, %ymm5, %ymm6 + +// CHECK: vfmadd213ph %xmm4, %xmm5, %xmm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0xa8,0xf4] + vfmadd213ph %xmm4, %xmm5, %xmm6 + +// CHECK: vfmadd213ph 268435456(%esp,%esi,8), %ymm5, %ymm6 {%k7} +// CHECK: encoding: [0x62,0xf6,0x55,0x2f,0xa8,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfmadd213ph 268435456(%esp,%esi,8), %ymm5, %ymm6 {%k7} + +// CHECK: vfmadd213ph (%ecx){1to16}, %ymm5, %ymm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x38,0xa8,0x31] + vfmadd213ph (%ecx){1to16}, %ymm5, %ymm6 + +// CHECK: vfmadd213ph 4064(%ecx), %ymm5, %ymm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x28,0xa8,0x71,0x7f] + vfmadd213ph 4064(%ecx), %ymm5, %ymm6 + +// CHECK: vfmadd213ph -256(%edx){1to16}, %ymm5, %ymm6 {%k7} {z} +// CHECK: encoding: [0x62,0xf6,0x55,0xbf,0xa8,0x72,0x80] + vfmadd213ph -256(%edx){1to16}, %ymm5, %ymm6 {%k7} {z} + +// CHECK: vfmadd213ph 268435456(%esp,%esi,8), %xmm5, %xmm6 {%k7} +// CHECK: encoding: [0x62,0xf6,0x55,0x0f,0xa8,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfmadd213ph 268435456(%esp,%esi,8), %xmm5, %xmm6 {%k7} + +// CHECK: vfmadd213ph (%ecx){1to8}, %xmm5, %xmm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x18,0xa8,0x31] + vfmadd213ph (%ecx){1to8}, %xmm5, %xmm6 + +// CHECK: vfmadd213ph 2032(%ecx), %xmm5, %xmm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0xa8,0x71,0x7f] + vfmadd213ph 2032(%ecx), %xmm5, %xmm6 + +// CHECK: vfmadd213ph -256(%edx){1to8}, %xmm5, %xmm6 {%k7} {z} +// CHECK: encoding: [0x62,0xf6,0x55,0x9f,0xa8,0x72,0x80] + vfmadd213ph -256(%edx){1to8}, %xmm5, %xmm6 {%k7} {z} + +// CHECK: vfmadd231ph %ymm4, %ymm5, %ymm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x28,0xb8,0xf4] + vfmadd231ph %ymm4, %ymm5, %ymm6 + +// CHECK: vfmadd231ph %xmm4, %xmm5, %xmm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0xb8,0xf4] + vfmadd231ph %xmm4, %xmm5, %xmm6 + +// CHECK: vfmadd231ph 268435456(%esp,%esi,8), %ymm5, %ymm6 {%k7} +// CHECK: encoding: [0x62,0xf6,0x55,0x2f,0xb8,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfmadd231ph 268435456(%esp,%esi,8), %ymm5, %ymm6 {%k7} + +// CHECK: vfmadd231ph (%ecx){1to16}, %ymm5, %ymm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x38,0xb8,0x31] + vfmadd231ph (%ecx){1to16}, %ymm5, %ymm6 + +// CHECK: vfmadd231ph 4064(%ecx), %ymm5, %ymm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x28,0xb8,0x71,0x7f] + vfmadd231ph 4064(%ecx), %ymm5, %ymm6 + +// CHECK: vfmadd231ph -256(%edx){1to16}, %ymm5, %ymm6 {%k7} {z} +// CHECK: encoding: [0x62,0xf6,0x55,0xbf,0xb8,0x72,0x80] + vfmadd231ph -256(%edx){1to16}, %ymm5, %ymm6 {%k7} {z} + +// CHECK: vfmadd231ph 268435456(%esp,%esi,8), %xmm5, %xmm6 {%k7} +// CHECK: encoding: [0x62,0xf6,0x55,0x0f,0xb8,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfmadd231ph 268435456(%esp,%esi,8), %xmm5, %xmm6 {%k7} + +// CHECK: vfmadd231ph (%ecx){1to8}, %xmm5, %xmm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x18,0xb8,0x31] + vfmadd231ph (%ecx){1to8}, %xmm5, %xmm6 + +// CHECK: vfmadd231ph 2032(%ecx), %xmm5, %xmm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0xb8,0x71,0x7f] + vfmadd231ph 2032(%ecx), %xmm5, %xmm6 + +// CHECK: vfmadd231ph -256(%edx){1to8}, %xmm5, %xmm6 {%k7} {z} +// CHECK: encoding: [0x62,0xf6,0x55,0x9f,0xb8,0x72,0x80] + vfmadd231ph -256(%edx){1to8}, %xmm5, %xmm6 {%k7} {z} + +// CHECK: vfmaddsub132ph %ymm4, %ymm5, %ymm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x28,0x96,0xf4] + vfmaddsub132ph %ymm4, %ymm5, %ymm6 + +// CHECK: vfmaddsub132ph %xmm4, %xmm5, %xmm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0x96,0xf4] + vfmaddsub132ph %xmm4, %xmm5, %xmm6 + +// CHECK: vfmaddsub132ph 268435456(%esp,%esi,8), %ymm5, %ymm6 {%k7} +// CHECK: encoding: [0x62,0xf6,0x55,0x2f,0x96,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfmaddsub132ph 268435456(%esp,%esi,8), %ymm5, %ymm6 {%k7} + +// CHECK: vfmaddsub132ph (%ecx){1to16}, %ymm5, %ymm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x38,0x96,0x31] + vfmaddsub132ph (%ecx){1to16}, %ymm5, %ymm6 + +// CHECK: vfmaddsub132ph 4064(%ecx), %ymm5, %ymm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x28,0x96,0x71,0x7f] + vfmaddsub132ph 4064(%ecx), %ymm5, %ymm6 + +// CHECK: vfmaddsub132ph -256(%edx){1to16}, %ymm5, %ymm6 {%k7} {z} +// CHECK: encoding: [0x62,0xf6,0x55,0xbf,0x96,0x72,0x80] + vfmaddsub132ph -256(%edx){1to16}, %ymm5, %ymm6 {%k7} {z} + +// CHECK: vfmaddsub132ph 268435456(%esp,%esi,8), %xmm5, %xmm6 {%k7} +// CHECK: encoding: [0x62,0xf6,0x55,0x0f,0x96,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfmaddsub132ph 268435456(%esp,%esi,8), %xmm5, %xmm6 {%k7} + +// CHECK: vfmaddsub132ph (%ecx){1to8}, %xmm5, %xmm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x18,0x96,0x31] + vfmaddsub132ph (%ecx){1to8}, %xmm5, %xmm6 + +// CHECK: vfmaddsub132ph 2032(%ecx), %xmm5, %xmm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0x96,0x71,0x7f] + vfmaddsub132ph 2032(%ecx), %xmm5, %xmm6 + +// CHECK: vfmaddsub132ph -256(%edx){1to8}, %xmm5, %xmm6 {%k7} {z} +// CHECK: encoding: [0x62,0xf6,0x55,0x9f,0x96,0x72,0x80] + vfmaddsub132ph -256(%edx){1to8}, %xmm5, %xmm6 {%k7} {z} + +// CHECK: vfmaddsub213ph %ymm4, %ymm5, %ymm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x28,0xa6,0xf4] + vfmaddsub213ph %ymm4, %ymm5, %ymm6 + +// CHECK: vfmaddsub213ph %xmm4, %xmm5, %xmm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0xa6,0xf4] + vfmaddsub213ph %xmm4, %xmm5, %xmm6 + +// CHECK: vfmaddsub213ph 268435456(%esp,%esi,8), %ymm5, %ymm6 {%k7} +// CHECK: encoding: [0x62,0xf6,0x55,0x2f,0xa6,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfmaddsub213ph 268435456(%esp,%esi,8), %ymm5, %ymm6 {%k7} + +// CHECK: vfmaddsub213ph (%ecx){1to16}, %ymm5, %ymm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x38,0xa6,0x31] + vfmaddsub213ph (%ecx){1to16}, %ymm5, %ymm6 + +// CHECK: vfmaddsub213ph 4064(%ecx), %ymm5, %ymm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x28,0xa6,0x71,0x7f] + vfmaddsub213ph 4064(%ecx), %ymm5, %ymm6 + +// CHECK: vfmaddsub213ph -256(%edx){1to16}, %ymm5, %ymm6 {%k7} {z} +// CHECK: encoding: [0x62,0xf6,0x55,0xbf,0xa6,0x72,0x80] + vfmaddsub213ph -256(%edx){1to16}, %ymm5, %ymm6 {%k7} {z} + +// CHECK: vfmaddsub213ph 268435456(%esp,%esi,8), %xmm5, %xmm6 {%k7} +// CHECK: encoding: [0x62,0xf6,0x55,0x0f,0xa6,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfmaddsub213ph 268435456(%esp,%esi,8), %xmm5, %xmm6 {%k7} + +// CHECK: vfmaddsub213ph (%ecx){1to8}, %xmm5, %xmm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x18,0xa6,0x31] + vfmaddsub213ph (%ecx){1to8}, %xmm5, %xmm6 + +// CHECK: vfmaddsub213ph 2032(%ecx), %xmm5, %xmm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0xa6,0x71,0x7f] + vfmaddsub213ph 2032(%ecx), %xmm5, %xmm6 + +// CHECK: vfmaddsub213ph -256(%edx){1to8}, %xmm5, %xmm6 {%k7} {z} +// CHECK: encoding: [0x62,0xf6,0x55,0x9f,0xa6,0x72,0x80] + vfmaddsub213ph -256(%edx){1to8}, %xmm5, %xmm6 {%k7} {z} + +// CHECK: vfmaddsub231ph %ymm4, %ymm5, %ymm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x28,0xb6,0xf4] + vfmaddsub231ph %ymm4, %ymm5, %ymm6 + +// CHECK: vfmaddsub231ph %xmm4, %xmm5, %xmm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0xb6,0xf4] + vfmaddsub231ph %xmm4, %xmm5, %xmm6 + +// CHECK: vfmaddsub231ph 268435456(%esp,%esi,8), %ymm5, %ymm6 {%k7} +// CHECK: encoding: [0x62,0xf6,0x55,0x2f,0xb6,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfmaddsub231ph 268435456(%esp,%esi,8), %ymm5, %ymm6 {%k7} + +// CHECK: vfmaddsub231ph (%ecx){1to16}, %ymm5, %ymm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x38,0xb6,0x31] + vfmaddsub231ph (%ecx){1to16}, %ymm5, %ymm6 + +// CHECK: vfmaddsub231ph 4064(%ecx), %ymm5, %ymm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x28,0xb6,0x71,0x7f] + vfmaddsub231ph 4064(%ecx), %ymm5, %ymm6 + +// CHECK: vfmaddsub231ph -256(%edx){1to16}, %ymm5, %ymm6 {%k7} {z} +// CHECK: encoding: [0x62,0xf6,0x55,0xbf,0xb6,0x72,0x80] + vfmaddsub231ph -256(%edx){1to16}, %ymm5, %ymm6 {%k7} {z} + +// CHECK: vfmaddsub231ph 268435456(%esp,%esi,8), %xmm5, %xmm6 {%k7} +// CHECK: encoding: [0x62,0xf6,0x55,0x0f,0xb6,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfmaddsub231ph 268435456(%esp,%esi,8), %xmm5, %xmm6 {%k7} + +// CHECK: vfmaddsub231ph (%ecx){1to8}, %xmm5, %xmm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x18,0xb6,0x31] + vfmaddsub231ph (%ecx){1to8}, %xmm5, %xmm6 + +// CHECK: vfmaddsub231ph 2032(%ecx), %xmm5, %xmm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0xb6,0x71,0x7f] + vfmaddsub231ph 2032(%ecx), %xmm5, %xmm6 + +// CHECK: vfmaddsub231ph -256(%edx){1to8}, %xmm5, %xmm6 {%k7} {z} +// CHECK: encoding: [0x62,0xf6,0x55,0x9f,0xb6,0x72,0x80] + vfmaddsub231ph -256(%edx){1to8}, %xmm5, %xmm6 {%k7} {z} + +// CHECK: vfmsub132ph %ymm4, %ymm5, %ymm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x28,0x9a,0xf4] + vfmsub132ph %ymm4, %ymm5, %ymm6 + +// CHECK: vfmsub132ph %xmm4, %xmm5, %xmm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0x9a,0xf4] + vfmsub132ph %xmm4, %xmm5, %xmm6 + +// CHECK: vfmsub132ph 268435456(%esp,%esi,8), %ymm5, %ymm6 {%k7} +// CHECK: encoding: [0x62,0xf6,0x55,0x2f,0x9a,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfmsub132ph 268435456(%esp,%esi,8), %ymm5, %ymm6 {%k7} + +// CHECK: vfmsub132ph (%ecx){1to16}, %ymm5, %ymm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x38,0x9a,0x31] + vfmsub132ph (%ecx){1to16}, %ymm5, %ymm6 + +// CHECK: vfmsub132ph 4064(%ecx), %ymm5, %ymm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x28,0x9a,0x71,0x7f] + vfmsub132ph 4064(%ecx), %ymm5, %ymm6 + +// CHECK: vfmsub132ph -256(%edx){1to16}, %ymm5, %ymm6 {%k7} {z} +// CHECK: encoding: [0x62,0xf6,0x55,0xbf,0x9a,0x72,0x80] + vfmsub132ph -256(%edx){1to16}, %ymm5, %ymm6 {%k7} {z} + +// CHECK: vfmsub132ph 268435456(%esp,%esi,8), %xmm5, %xmm6 {%k7} +// CHECK: encoding: [0x62,0xf6,0x55,0x0f,0x9a,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfmsub132ph 268435456(%esp,%esi,8), %xmm5, %xmm6 {%k7} + +// CHECK: vfmsub132ph (%ecx){1to8}, %xmm5, %xmm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x18,0x9a,0x31] + vfmsub132ph (%ecx){1to8}, %xmm5, %xmm6 + +// CHECK: vfmsub132ph 2032(%ecx), %xmm5, %xmm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0x9a,0x71,0x7f] + vfmsub132ph 2032(%ecx), %xmm5, %xmm6 + +// CHECK: vfmsub132ph -256(%edx){1to8}, %xmm5, %xmm6 {%k7} {z} +// CHECK: encoding: [0x62,0xf6,0x55,0x9f,0x9a,0x72,0x80] + vfmsub132ph -256(%edx){1to8}, %xmm5, %xmm6 {%k7} {z} + +// CHECK: vfmsub213ph %ymm4, %ymm5, %ymm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x28,0xaa,0xf4] + vfmsub213ph %ymm4, %ymm5, %ymm6 + +// CHECK: vfmsub213ph %xmm4, %xmm5, %xmm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0xaa,0xf4] + vfmsub213ph %xmm4, %xmm5, %xmm6 + +// CHECK: vfmsub213ph 268435456(%esp,%esi,8), %ymm5, %ymm6 {%k7} +// CHECK: encoding: [0x62,0xf6,0x55,0x2f,0xaa,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfmsub213ph 268435456(%esp,%esi,8), %ymm5, %ymm6 {%k7} + +// CHECK: vfmsub213ph (%ecx){1to16}, %ymm5, %ymm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x38,0xaa,0x31] + vfmsub213ph (%ecx){1to16}, %ymm5, %ymm6 + +// CHECK: vfmsub213ph 4064(%ecx), %ymm5, %ymm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x28,0xaa,0x71,0x7f] + vfmsub213ph 4064(%ecx), %ymm5, %ymm6 + +// CHECK: vfmsub213ph -256(%edx){1to16}, %ymm5, %ymm6 {%k7} {z} +// CHECK: encoding: [0x62,0xf6,0x55,0xbf,0xaa,0x72,0x80] + vfmsub213ph -256(%edx){1to16}, %ymm5, %ymm6 {%k7} {z} + +// CHECK: vfmsub213ph 268435456(%esp,%esi,8), %xmm5, %xmm6 {%k7} +// CHECK: encoding: [0x62,0xf6,0x55,0x0f,0xaa,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfmsub213ph 268435456(%esp,%esi,8), %xmm5, %xmm6 {%k7} + +// CHECK: vfmsub213ph (%ecx){1to8}, %xmm5, %xmm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x18,0xaa,0x31] + vfmsub213ph (%ecx){1to8}, %xmm5, %xmm6 + +// CHECK: vfmsub213ph 2032(%ecx), %xmm5, %xmm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0xaa,0x71,0x7f] + vfmsub213ph 2032(%ecx), %xmm5, %xmm6 + +// CHECK: vfmsub213ph -256(%edx){1to8}, %xmm5, %xmm6 {%k7} {z} +// CHECK: encoding: [0x62,0xf6,0x55,0x9f,0xaa,0x72,0x80] + vfmsub213ph -256(%edx){1to8}, %xmm5, %xmm6 {%k7} {z} + +// CHECK: vfmsub231ph %ymm4, %ymm5, %ymm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x28,0xba,0xf4] + vfmsub231ph %ymm4, %ymm5, %ymm6 + +// CHECK: vfmsub231ph %xmm4, %xmm5, %xmm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0xba,0xf4] + vfmsub231ph %xmm4, %xmm5, %xmm6 + +// CHECK: vfmsub231ph 268435456(%esp,%esi,8), %ymm5, %ymm6 {%k7} +// CHECK: encoding: [0x62,0xf6,0x55,0x2f,0xba,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfmsub231ph 268435456(%esp,%esi,8), %ymm5, %ymm6 {%k7} + +// CHECK: vfmsub231ph (%ecx){1to16}, %ymm5, %ymm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x38,0xba,0x31] + vfmsub231ph (%ecx){1to16}, %ymm5, %ymm6 + +// CHECK: vfmsub231ph 4064(%ecx), %ymm5, %ymm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x28,0xba,0x71,0x7f] + vfmsub231ph 4064(%ecx), %ymm5, %ymm6 + +// CHECK: vfmsub231ph -256(%edx){1to16}, %ymm5, %ymm6 {%k7} {z} +// CHECK: encoding: [0x62,0xf6,0x55,0xbf,0xba,0x72,0x80] + vfmsub231ph -256(%edx){1to16}, %ymm5, %ymm6 {%k7} {z} + +// CHECK: vfmsub231ph 268435456(%esp,%esi,8), %xmm5, %xmm6 {%k7} +// CHECK: encoding: [0x62,0xf6,0x55,0x0f,0xba,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfmsub231ph 268435456(%esp,%esi,8), %xmm5, %xmm6 {%k7} + +// CHECK: vfmsub231ph (%ecx){1to8}, %xmm5, %xmm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x18,0xba,0x31] + vfmsub231ph (%ecx){1to8}, %xmm5, %xmm6 + +// CHECK: vfmsub231ph 2032(%ecx), %xmm5, %xmm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0xba,0x71,0x7f] + vfmsub231ph 2032(%ecx), %xmm5, %xmm6 + +// CHECK: vfmsub231ph -256(%edx){1to8}, %xmm5, %xmm6 {%k7} {z} +// CHECK: encoding: [0x62,0xf6,0x55,0x9f,0xba,0x72,0x80] + vfmsub231ph -256(%edx){1to8}, %xmm5, %xmm6 {%k7} {z} + +// CHECK: vfmsubadd132ph %ymm4, %ymm5, %ymm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x28,0x97,0xf4] + vfmsubadd132ph %ymm4, %ymm5, %ymm6 + +// CHECK: vfmsubadd132ph %xmm4, %xmm5, %xmm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0x97,0xf4] + vfmsubadd132ph %xmm4, %xmm5, %xmm6 + +// CHECK: vfmsubadd132ph 268435456(%esp,%esi,8), %ymm5, %ymm6 {%k7} +// CHECK: encoding: [0x62,0xf6,0x55,0x2f,0x97,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfmsubadd132ph 268435456(%esp,%esi,8), %ymm5, %ymm6 {%k7} + +// CHECK: vfmsubadd132ph (%ecx){1to16}, %ymm5, %ymm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x38,0x97,0x31] + vfmsubadd132ph (%ecx){1to16}, %ymm5, %ymm6 + +// CHECK: vfmsubadd132ph 4064(%ecx), %ymm5, %ymm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x28,0x97,0x71,0x7f] + vfmsubadd132ph 4064(%ecx), %ymm5, %ymm6 + +// CHECK: vfmsubadd132ph -256(%edx){1to16}, %ymm5, %ymm6 {%k7} {z} +// CHECK: encoding: [0x62,0xf6,0x55,0xbf,0x97,0x72,0x80] + vfmsubadd132ph -256(%edx){1to16}, %ymm5, %ymm6 {%k7} {z} + +// CHECK: vfmsubadd132ph 268435456(%esp,%esi,8), %xmm5, %xmm6 {%k7} +// CHECK: encoding: [0x62,0xf6,0x55,0x0f,0x97,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfmsubadd132ph 268435456(%esp,%esi,8), %xmm5, %xmm6 {%k7} + +// CHECK: vfmsubadd132ph (%ecx){1to8}, %xmm5, %xmm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x18,0x97,0x31] + vfmsubadd132ph (%ecx){1to8}, %xmm5, %xmm6 + +// CHECK: vfmsubadd132ph 2032(%ecx), %xmm5, %xmm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0x97,0x71,0x7f] + vfmsubadd132ph 2032(%ecx), %xmm5, %xmm6 + +// CHECK: vfmsubadd132ph -256(%edx){1to8}, %xmm5, %xmm6 {%k7} {z} +// CHECK: encoding: [0x62,0xf6,0x55,0x9f,0x97,0x72,0x80] + vfmsubadd132ph -256(%edx){1to8}, %xmm5, %xmm6 {%k7} {z} + +// CHECK: vfmsubadd213ph %ymm4, %ymm5, %ymm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x28,0xa7,0xf4] + vfmsubadd213ph %ymm4, %ymm5, %ymm6 + +// CHECK: vfmsubadd213ph %xmm4, %xmm5, %xmm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0xa7,0xf4] + vfmsubadd213ph %xmm4, %xmm5, %xmm6 + +// CHECK: vfmsubadd213ph 268435456(%esp,%esi,8), %ymm5, %ymm6 {%k7} +// CHECK: encoding: [0x62,0xf6,0x55,0x2f,0xa7,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfmsubadd213ph 268435456(%esp,%esi,8), %ymm5, %ymm6 {%k7} + +// CHECK: vfmsubadd213ph (%ecx){1to16}, %ymm5, %ymm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x38,0xa7,0x31] + vfmsubadd213ph (%ecx){1to16}, %ymm5, %ymm6 + +// CHECK: vfmsubadd213ph 4064(%ecx), %ymm5, %ymm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x28,0xa7,0x71,0x7f] + vfmsubadd213ph 4064(%ecx), %ymm5, %ymm6 + +// CHECK: vfmsubadd213ph -256(%edx){1to16}, %ymm5, %ymm6 {%k7} {z} +// CHECK: encoding: [0x62,0xf6,0x55,0xbf,0xa7,0x72,0x80] + vfmsubadd213ph -256(%edx){1to16}, %ymm5, %ymm6 {%k7} {z} + +// CHECK: vfmsubadd213ph 268435456(%esp,%esi,8), %xmm5, %xmm6 {%k7} +// CHECK: encoding: [0x62,0xf6,0x55,0x0f,0xa7,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfmsubadd213ph 268435456(%esp,%esi,8), %xmm5, %xmm6 {%k7} + +// CHECK: vfmsubadd213ph (%ecx){1to8}, %xmm5, %xmm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x18,0xa7,0x31] + vfmsubadd213ph (%ecx){1to8}, %xmm5, %xmm6 + +// CHECK: vfmsubadd213ph 2032(%ecx), %xmm5, %xmm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0xa7,0x71,0x7f] + vfmsubadd213ph 2032(%ecx), %xmm5, %xmm6 + +// CHECK: vfmsubadd213ph -256(%edx){1to8}, %xmm5, %xmm6 {%k7} {z} +// CHECK: encoding: [0x62,0xf6,0x55,0x9f,0xa7,0x72,0x80] + vfmsubadd213ph -256(%edx){1to8}, %xmm5, %xmm6 {%k7} {z} + +// CHECK: vfmsubadd231ph %ymm4, %ymm5, %ymm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x28,0xb7,0xf4] + vfmsubadd231ph %ymm4, %ymm5, %ymm6 + +// CHECK: vfmsubadd231ph %xmm4, %xmm5, %xmm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0xb7,0xf4] + vfmsubadd231ph %xmm4, %xmm5, %xmm6 + +// CHECK: vfmsubadd231ph 268435456(%esp,%esi,8), %ymm5, %ymm6 {%k7} +// CHECK: encoding: [0x62,0xf6,0x55,0x2f,0xb7,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfmsubadd231ph 268435456(%esp,%esi,8), %ymm5, %ymm6 {%k7} + +// CHECK: vfmsubadd231ph (%ecx){1to16}, %ymm5, %ymm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x38,0xb7,0x31] + vfmsubadd231ph (%ecx){1to16}, %ymm5, %ymm6 + +// CHECK: vfmsubadd231ph 4064(%ecx), %ymm5, %ymm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x28,0xb7,0x71,0x7f] + vfmsubadd231ph 4064(%ecx), %ymm5, %ymm6 + +// CHECK: vfmsubadd231ph -256(%edx){1to16}, %ymm5, %ymm6 {%k7} {z} +// CHECK: encoding: [0x62,0xf6,0x55,0xbf,0xb7,0x72,0x80] + vfmsubadd231ph -256(%edx){1to16}, %ymm5, %ymm6 {%k7} {z} + +// CHECK: vfmsubadd231ph 268435456(%esp,%esi,8), %xmm5, %xmm6 {%k7} +// CHECK: encoding: [0x62,0xf6,0x55,0x0f,0xb7,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfmsubadd231ph 268435456(%esp,%esi,8), %xmm5, %xmm6 {%k7} + +// CHECK: vfmsubadd231ph (%ecx){1to8}, %xmm5, %xmm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x18,0xb7,0x31] + vfmsubadd231ph (%ecx){1to8}, %xmm5, %xmm6 + +// CHECK: vfmsubadd231ph 2032(%ecx), %xmm5, %xmm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0xb7,0x71,0x7f] + vfmsubadd231ph 2032(%ecx), %xmm5, %xmm6 + +// CHECK: vfmsubadd231ph -256(%edx){1to8}, %xmm5, %xmm6 {%k7} {z} +// CHECK: encoding: [0x62,0xf6,0x55,0x9f,0xb7,0x72,0x80] + vfmsubadd231ph -256(%edx){1to8}, %xmm5, %xmm6 {%k7} {z} + +// CHECK: vfnmadd132ph %ymm4, %ymm5, %ymm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x28,0x9c,0xf4] + vfnmadd132ph %ymm4, %ymm5, %ymm6 + +// CHECK: vfnmadd132ph %xmm4, %xmm5, %xmm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0x9c,0xf4] + vfnmadd132ph %xmm4, %xmm5, %xmm6 + +// CHECK: vfnmadd132ph 268435456(%esp,%esi,8), %ymm5, %ymm6 {%k7} +// CHECK: encoding: [0x62,0xf6,0x55,0x2f,0x9c,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfnmadd132ph 268435456(%esp,%esi,8), %ymm5, %ymm6 {%k7} + +// CHECK: vfnmadd132ph (%ecx){1to16}, %ymm5, %ymm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x38,0x9c,0x31] + vfnmadd132ph (%ecx){1to16}, %ymm5, %ymm6 + +// CHECK: vfnmadd132ph 4064(%ecx), %ymm5, %ymm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x28,0x9c,0x71,0x7f] + vfnmadd132ph 4064(%ecx), %ymm5, %ymm6 + +// CHECK: vfnmadd132ph -256(%edx){1to16}, %ymm5, %ymm6 {%k7} {z} +// CHECK: encoding: [0x62,0xf6,0x55,0xbf,0x9c,0x72,0x80] + vfnmadd132ph -256(%edx){1to16}, %ymm5, %ymm6 {%k7} {z} + +// CHECK: vfnmadd132ph 268435456(%esp,%esi,8), %xmm5, %xmm6 {%k7} +// CHECK: encoding: [0x62,0xf6,0x55,0x0f,0x9c,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfnmadd132ph 268435456(%esp,%esi,8), %xmm5, %xmm6 {%k7} + +// CHECK: vfnmadd132ph (%ecx){1to8}, %xmm5, %xmm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x18,0x9c,0x31] + vfnmadd132ph (%ecx){1to8}, %xmm5, %xmm6 + +// CHECK: vfnmadd132ph 2032(%ecx), %xmm5, %xmm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0x9c,0x71,0x7f] + vfnmadd132ph 2032(%ecx), %xmm5, %xmm6 + +// CHECK: vfnmadd132ph -256(%edx){1to8}, %xmm5, %xmm6 {%k7} {z} +// CHECK: encoding: [0x62,0xf6,0x55,0x9f,0x9c,0x72,0x80] + vfnmadd132ph -256(%edx){1to8}, %xmm5, %xmm6 {%k7} {z} + +// CHECK: vfnmadd213ph %ymm4, %ymm5, %ymm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x28,0xac,0xf4] + vfnmadd213ph %ymm4, %ymm5, %ymm6 + +// CHECK: vfnmadd213ph %xmm4, %xmm5, %xmm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0xac,0xf4] + vfnmadd213ph %xmm4, %xmm5, %xmm6 + +// CHECK: vfnmadd213ph 268435456(%esp,%esi,8), %ymm5, %ymm6 {%k7} +// CHECK: encoding: [0x62,0xf6,0x55,0x2f,0xac,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfnmadd213ph 268435456(%esp,%esi,8), %ymm5, %ymm6 {%k7} + +// CHECK: vfnmadd213ph (%ecx){1to16}, %ymm5, %ymm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x38,0xac,0x31] + vfnmadd213ph (%ecx){1to16}, %ymm5, %ymm6 + +// CHECK: vfnmadd213ph 4064(%ecx), %ymm5, %ymm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x28,0xac,0x71,0x7f] + vfnmadd213ph 4064(%ecx), %ymm5, %ymm6 + +// CHECK: vfnmadd213ph -256(%edx){1to16}, %ymm5, %ymm6 {%k7} {z} +// CHECK: encoding: [0x62,0xf6,0x55,0xbf,0xac,0x72,0x80] + vfnmadd213ph -256(%edx){1to16}, %ymm5, %ymm6 {%k7} {z} + +// CHECK: vfnmadd213ph 268435456(%esp,%esi,8), %xmm5, %xmm6 {%k7} +// CHECK: encoding: [0x62,0xf6,0x55,0x0f,0xac,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfnmadd213ph 268435456(%esp,%esi,8), %xmm5, %xmm6 {%k7} + +// CHECK: vfnmadd213ph (%ecx){1to8}, %xmm5, %xmm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x18,0xac,0x31] + vfnmadd213ph (%ecx){1to8}, %xmm5, %xmm6 + +// CHECK: vfnmadd213ph 2032(%ecx), %xmm5, %xmm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0xac,0x71,0x7f] + vfnmadd213ph 2032(%ecx), %xmm5, %xmm6 + +// CHECK: vfnmadd213ph -256(%edx){1to8}, %xmm5, %xmm6 {%k7} {z} +// CHECK: encoding: [0x62,0xf6,0x55,0x9f,0xac,0x72,0x80] + vfnmadd213ph -256(%edx){1to8}, %xmm5, %xmm6 {%k7} {z} + +// CHECK: vfnmadd231ph %ymm4, %ymm5, %ymm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x28,0xbc,0xf4] + vfnmadd231ph %ymm4, %ymm5, %ymm6 + +// CHECK: vfnmadd231ph %xmm4, %xmm5, %xmm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0xbc,0xf4] + vfnmadd231ph %xmm4, %xmm5, %xmm6 + +// CHECK: vfnmadd231ph 268435456(%esp,%esi,8), %ymm5, %ymm6 {%k7} +// CHECK: encoding: [0x62,0xf6,0x55,0x2f,0xbc,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfnmadd231ph 268435456(%esp,%esi,8), %ymm5, %ymm6 {%k7} + +// CHECK: vfnmadd231ph (%ecx){1to16}, %ymm5, %ymm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x38,0xbc,0x31] + vfnmadd231ph (%ecx){1to16}, %ymm5, %ymm6 + +// CHECK: vfnmadd231ph 4064(%ecx), %ymm5, %ymm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x28,0xbc,0x71,0x7f] + vfnmadd231ph 4064(%ecx), %ymm5, %ymm6 + +// CHECK: vfnmadd231ph -256(%edx){1to16}, %ymm5, %ymm6 {%k7} {z} +// CHECK: encoding: [0x62,0xf6,0x55,0xbf,0xbc,0x72,0x80] + vfnmadd231ph -256(%edx){1to16}, %ymm5, %ymm6 {%k7} {z} + +// CHECK: vfnmadd231ph 268435456(%esp,%esi,8), %xmm5, %xmm6 {%k7} +// CHECK: encoding: [0x62,0xf6,0x55,0x0f,0xbc,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfnmadd231ph 268435456(%esp,%esi,8), %xmm5, %xmm6 {%k7} + +// CHECK: vfnmadd231ph (%ecx){1to8}, %xmm5, %xmm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x18,0xbc,0x31] + vfnmadd231ph (%ecx){1to8}, %xmm5, %xmm6 + +// CHECK: vfnmadd231ph 2032(%ecx), %xmm5, %xmm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0xbc,0x71,0x7f] + vfnmadd231ph 2032(%ecx), %xmm5, %xmm6 + +// CHECK: vfnmadd231ph -256(%edx){1to8}, %xmm5, %xmm6 {%k7} {z} +// CHECK: encoding: [0x62,0xf6,0x55,0x9f,0xbc,0x72,0x80] + vfnmadd231ph -256(%edx){1to8}, %xmm5, %xmm6 {%k7} {z} + +// CHECK: vfnmsub132ph %ymm4, %ymm5, %ymm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x28,0x9e,0xf4] + vfnmsub132ph %ymm4, %ymm5, %ymm6 + +// CHECK: vfnmsub132ph %xmm4, %xmm5, %xmm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0x9e,0xf4] + vfnmsub132ph %xmm4, %xmm5, %xmm6 + +// CHECK: vfnmsub132ph 268435456(%esp,%esi,8), %ymm5, %ymm6 {%k7} +// CHECK: encoding: [0x62,0xf6,0x55,0x2f,0x9e,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfnmsub132ph 268435456(%esp,%esi,8), %ymm5, %ymm6 {%k7} + +// CHECK: vfnmsub132ph (%ecx){1to16}, %ymm5, %ymm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x38,0x9e,0x31] + vfnmsub132ph (%ecx){1to16}, %ymm5, %ymm6 + +// CHECK: vfnmsub132ph 4064(%ecx), %ymm5, %ymm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x28,0x9e,0x71,0x7f] + vfnmsub132ph 4064(%ecx), %ymm5, %ymm6 + +// CHECK: vfnmsub132ph -256(%edx){1to16}, %ymm5, %ymm6 {%k7} {z} +// CHECK: encoding: [0x62,0xf6,0x55,0xbf,0x9e,0x72,0x80] + vfnmsub132ph -256(%edx){1to16}, %ymm5, %ymm6 {%k7} {z} + +// CHECK: vfnmsub132ph 268435456(%esp,%esi,8), %xmm5, %xmm6 {%k7} +// CHECK: encoding: [0x62,0xf6,0x55,0x0f,0x9e,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfnmsub132ph 268435456(%esp,%esi,8), %xmm5, %xmm6 {%k7} + +// CHECK: vfnmsub132ph (%ecx){1to8}, %xmm5, %xmm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x18,0x9e,0x31] + vfnmsub132ph (%ecx){1to8}, %xmm5, %xmm6 + +// CHECK: vfnmsub132ph 2032(%ecx), %xmm5, %xmm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0x9e,0x71,0x7f] + vfnmsub132ph 2032(%ecx), %xmm5, %xmm6 + +// CHECK: vfnmsub132ph -256(%edx){1to8}, %xmm5, %xmm6 {%k7} {z} +// CHECK: encoding: [0x62,0xf6,0x55,0x9f,0x9e,0x72,0x80] + vfnmsub132ph -256(%edx){1to8}, %xmm5, %xmm6 {%k7} {z} + +// CHECK: vfnmsub213ph %ymm4, %ymm5, %ymm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x28,0xae,0xf4] + vfnmsub213ph %ymm4, %ymm5, %ymm6 + +// CHECK: vfnmsub213ph %xmm4, %xmm5, %xmm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0xae,0xf4] + vfnmsub213ph %xmm4, %xmm5, %xmm6 + +// CHECK: vfnmsub213ph 268435456(%esp,%esi,8), %ymm5, %ymm6 {%k7} +// CHECK: encoding: [0x62,0xf6,0x55,0x2f,0xae,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfnmsub213ph 268435456(%esp,%esi,8), %ymm5, %ymm6 {%k7} + +// CHECK: vfnmsub213ph (%ecx){1to16}, %ymm5, %ymm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x38,0xae,0x31] + vfnmsub213ph (%ecx){1to16}, %ymm5, %ymm6 + +// CHECK: vfnmsub213ph 4064(%ecx), %ymm5, %ymm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x28,0xae,0x71,0x7f] + vfnmsub213ph 4064(%ecx), %ymm5, %ymm6 + +// CHECK: vfnmsub213ph -256(%edx){1to16}, %ymm5, %ymm6 {%k7} {z} +// CHECK: encoding: [0x62,0xf6,0x55,0xbf,0xae,0x72,0x80] + vfnmsub213ph -256(%edx){1to16}, %ymm5, %ymm6 {%k7} {z} + +// CHECK: vfnmsub213ph 268435456(%esp,%esi,8), %xmm5, %xmm6 {%k7} +// CHECK: encoding: [0x62,0xf6,0x55,0x0f,0xae,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfnmsub213ph 268435456(%esp,%esi,8), %xmm5, %xmm6 {%k7} + +// CHECK: vfnmsub213ph (%ecx){1to8}, %xmm5, %xmm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x18,0xae,0x31] + vfnmsub213ph (%ecx){1to8}, %xmm5, %xmm6 + +// CHECK: vfnmsub213ph 2032(%ecx), %xmm5, %xmm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0xae,0x71,0x7f] + vfnmsub213ph 2032(%ecx), %xmm5, %xmm6 + +// CHECK: vfnmsub213ph -256(%edx){1to8}, %xmm5, %xmm6 {%k7} {z} +// CHECK: encoding: [0x62,0xf6,0x55,0x9f,0xae,0x72,0x80] + vfnmsub213ph -256(%edx){1to8}, %xmm5, %xmm6 {%k7} {z} + +// CHECK: vfnmsub231ph %ymm4, %ymm5, %ymm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x28,0xbe,0xf4] + vfnmsub231ph %ymm4, %ymm5, %ymm6 + +// CHECK: vfnmsub231ph %xmm4, %xmm5, %xmm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0xbe,0xf4] + vfnmsub231ph %xmm4, %xmm5, %xmm6 + +// CHECK: vfnmsub231ph 268435456(%esp,%esi,8), %ymm5, %ymm6 {%k7} +// CHECK: encoding: [0x62,0xf6,0x55,0x2f,0xbe,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfnmsub231ph 268435456(%esp,%esi,8), %ymm5, %ymm6 {%k7} + +// CHECK: vfnmsub231ph (%ecx){1to16}, %ymm5, %ymm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x38,0xbe,0x31] + vfnmsub231ph (%ecx){1to16}, %ymm5, %ymm6 + +// CHECK: vfnmsub231ph 4064(%ecx), %ymm5, %ymm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x28,0xbe,0x71,0x7f] + vfnmsub231ph 4064(%ecx), %ymm5, %ymm6 + +// CHECK: vfnmsub231ph -256(%edx){1to16}, %ymm5, %ymm6 {%k7} {z} +// CHECK: encoding: [0x62,0xf6,0x55,0xbf,0xbe,0x72,0x80] + vfnmsub231ph -256(%edx){1to16}, %ymm5, %ymm6 {%k7} {z} + +// CHECK: vfnmsub231ph 268435456(%esp,%esi,8), %xmm5, %xmm6 {%k7} +// CHECK: encoding: [0x62,0xf6,0x55,0x0f,0xbe,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfnmsub231ph 268435456(%esp,%esi,8), %xmm5, %xmm6 {%k7} + +// CHECK: vfnmsub231ph (%ecx){1to8}, %xmm5, %xmm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x18,0xbe,0x31] + vfnmsub231ph (%ecx){1to8}, %xmm5, %xmm6 + +// CHECK: vfnmsub231ph 2032(%ecx), %xmm5, %xmm6 +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0xbe,0x71,0x7f] + vfnmsub231ph 2032(%ecx), %xmm5, %xmm6 + +// CHECK: vfnmsub231ph -256(%edx){1to8}, %xmm5, %xmm6 {%k7} {z} +// CHECK: encoding: [0x62,0xf6,0x55,0x9f,0xbe,0x72,0x80] + vfnmsub231ph -256(%edx){1to8}, %xmm5, %xmm6 {%k7} {z} diff --git a/llvm/test/MC/X86/intel-syntax-avx512fp16.s b/llvm/test/MC/X86/intel-syntax-avx512fp16.s index 36ca110e12e6e..e2fb2e4ddde2e 100644 --- a/llvm/test/MC/X86/intel-syntax-avx512fp16.s +++ b/llvm/test/MC/X86/intel-syntax-avx512fp16.s @@ -1635,3 +1635,723 @@ // CHECK: vsqrtsh xmm6 {k7} {z}, xmm5, word ptr [edx - 256] // CHECK: encoding: [0x62,0xf5,0x56,0x8f,0x51,0x72,0x80] vsqrtsh xmm6 {k7} {z}, xmm5, word ptr [edx - 256] + +// CHECK: vfmadd132ph zmm6, zmm5, zmm4 +// CHECK: encoding: [0x62,0xf6,0x55,0x48,0x98,0xf4] + vfmadd132ph zmm6, zmm5, zmm4 + +// CHECK: vfmadd132ph zmm6, zmm5, zmm4, {rn-sae} +// CHECK: encoding: [0x62,0xf6,0x55,0x18,0x98,0xf4] + vfmadd132ph zmm6, zmm5, zmm4, {rn-sae} + +// CHECK: vfmadd132ph zmm6 {k7}, zmm5, zmmword ptr [esp + 8*esi + 268435456] +// CHECK: encoding: [0x62,0xf6,0x55,0x4f,0x98,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfmadd132ph zmm6 {k7}, zmm5, zmmword ptr [esp + 8*esi + 268435456] + +// CHECK: vfmadd132ph zmm6, zmm5, word ptr [ecx]{1to32} +// CHECK: encoding: [0x62,0xf6,0x55,0x58,0x98,0x31] + vfmadd132ph zmm6, zmm5, word ptr [ecx]{1to32} + +// CHECK: vfmadd132ph zmm6, zmm5, zmmword ptr [ecx + 8128] +// CHECK: encoding: [0x62,0xf6,0x55,0x48,0x98,0x71,0x7f] + vfmadd132ph zmm6, zmm5, zmmword ptr [ecx + 8128] + +// CHECK: vfmadd132ph zmm6 {k7} {z}, zmm5, word ptr [edx - 256]{1to32} +// CHECK: encoding: [0x62,0xf6,0x55,0xdf,0x98,0x72,0x80] + vfmadd132ph zmm6 {k7} {z}, zmm5, word ptr [edx - 256]{1to32} + +// CHECK: vfmadd132sh xmm6, xmm5, xmm4 +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0x99,0xf4] + vfmadd132sh xmm6, xmm5, xmm4 + +// CHECK: vfmadd132sh xmm6, xmm5, xmm4, {rn-sae} +// CHECK: encoding: [0x62,0xf6,0x55,0x18,0x99,0xf4] + vfmadd132sh xmm6, xmm5, xmm4, {rn-sae} + +// CHECK: vfmadd132sh xmm6 {k7}, xmm5, word ptr [esp + 8*esi + 268435456] +// CHECK: encoding: [0x62,0xf6,0x55,0x0f,0x99,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfmadd132sh xmm6 {k7}, xmm5, word ptr [esp + 8*esi + 268435456] + +// CHECK: vfmadd132sh xmm6, xmm5, word ptr [ecx] +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0x99,0x31] + vfmadd132sh xmm6, xmm5, word ptr [ecx] + +// CHECK: vfmadd132sh xmm6, xmm5, word ptr [ecx + 254] +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0x99,0x71,0x7f] + vfmadd132sh xmm6, xmm5, word ptr [ecx + 254] + +// CHECK: vfmadd132sh xmm6 {k7} {z}, xmm5, word ptr [edx - 256] +// CHECK: encoding: [0x62,0xf6,0x55,0x8f,0x99,0x72,0x80] + vfmadd132sh xmm6 {k7} {z}, xmm5, word ptr [edx - 256] + +// CHECK: vfmadd213ph zmm6, zmm5, zmm4 +// CHECK: encoding: [0x62,0xf6,0x55,0x48,0xa8,0xf4] + vfmadd213ph zmm6, zmm5, zmm4 + +// CHECK: vfmadd213ph zmm6, zmm5, zmm4, {rn-sae} +// CHECK: encoding: [0x62,0xf6,0x55,0x18,0xa8,0xf4] + vfmadd213ph zmm6, zmm5, zmm4, {rn-sae} + +// CHECK: vfmadd213ph zmm6 {k7}, zmm5, zmmword ptr [esp + 8*esi + 268435456] +// CHECK: encoding: [0x62,0xf6,0x55,0x4f,0xa8,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfmadd213ph zmm6 {k7}, zmm5, zmmword ptr [esp + 8*esi + 268435456] + +// CHECK: vfmadd213ph zmm6, zmm5, word ptr [ecx]{1to32} +// CHECK: encoding: [0x62,0xf6,0x55,0x58,0xa8,0x31] + vfmadd213ph zmm6, zmm5, word ptr [ecx]{1to32} + +// CHECK: vfmadd213ph zmm6, zmm5, zmmword ptr [ecx + 8128] +// CHECK: encoding: [0x62,0xf6,0x55,0x48,0xa8,0x71,0x7f] + vfmadd213ph zmm6, zmm5, zmmword ptr [ecx + 8128] + +// CHECK: vfmadd213ph zmm6 {k7} {z}, zmm5, word ptr [edx - 256]{1to32} +// CHECK: encoding: [0x62,0xf6,0x55,0xdf,0xa8,0x72,0x80] + vfmadd213ph zmm6 {k7} {z}, zmm5, word ptr [edx - 256]{1to32} + +// CHECK: vfmadd213sh xmm6, xmm5, xmm4 +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0xa9,0xf4] + vfmadd213sh xmm6, xmm5, xmm4 + +// CHECK: vfmadd213sh xmm6, xmm5, xmm4, {rn-sae} +// CHECK: encoding: [0x62,0xf6,0x55,0x18,0xa9,0xf4] + vfmadd213sh xmm6, xmm5, xmm4, {rn-sae} + +// CHECK: vfmadd213sh xmm6 {k7}, xmm5, word ptr [esp + 8*esi + 268435456] +// CHECK: encoding: [0x62,0xf6,0x55,0x0f,0xa9,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfmadd213sh xmm6 {k7}, xmm5, word ptr [esp + 8*esi + 268435456] + +// CHECK: vfmadd213sh xmm6, xmm5, word ptr [ecx] +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0xa9,0x31] + vfmadd213sh xmm6, xmm5, word ptr [ecx] + +// CHECK: vfmadd213sh xmm6, xmm5, word ptr [ecx + 254] +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0xa9,0x71,0x7f] + vfmadd213sh xmm6, xmm5, word ptr [ecx + 254] + +// CHECK: vfmadd213sh xmm6 {k7} {z}, xmm5, word ptr [edx - 256] +// CHECK: encoding: [0x62,0xf6,0x55,0x8f,0xa9,0x72,0x80] + vfmadd213sh xmm6 {k7} {z}, xmm5, word ptr [edx - 256] + +// CHECK: vfmadd231ph zmm6, zmm5, zmm4 +// CHECK: encoding: [0x62,0xf6,0x55,0x48,0xb8,0xf4] + vfmadd231ph zmm6, zmm5, zmm4 + +// CHECK: vfmadd231ph zmm6, zmm5, zmm4, {rn-sae} +// CHECK: encoding: [0x62,0xf6,0x55,0x18,0xb8,0xf4] + vfmadd231ph zmm6, zmm5, zmm4, {rn-sae} + +// CHECK: vfmadd231ph zmm6 {k7}, zmm5, zmmword ptr [esp + 8*esi + 268435456] +// CHECK: encoding: [0x62,0xf6,0x55,0x4f,0xb8,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfmadd231ph zmm6 {k7}, zmm5, zmmword ptr [esp + 8*esi + 268435456] + +// CHECK: vfmadd231ph zmm6, zmm5, word ptr [ecx]{1to32} +// CHECK: encoding: [0x62,0xf6,0x55,0x58,0xb8,0x31] + vfmadd231ph zmm6, zmm5, word ptr [ecx]{1to32} + +// CHECK: vfmadd231ph zmm6, zmm5, zmmword ptr [ecx + 8128] +// CHECK: encoding: [0x62,0xf6,0x55,0x48,0xb8,0x71,0x7f] + vfmadd231ph zmm6, zmm5, zmmword ptr [ecx + 8128] + +// CHECK: vfmadd231ph zmm6 {k7} {z}, zmm5, word ptr [edx - 256]{1to32} +// CHECK: encoding: [0x62,0xf6,0x55,0xdf,0xb8,0x72,0x80] + vfmadd231ph zmm6 {k7} {z}, zmm5, word ptr [edx - 256]{1to32} + +// CHECK: vfmadd231sh xmm6, xmm5, xmm4 +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0xb9,0xf4] + vfmadd231sh xmm6, xmm5, xmm4 + +// CHECK: vfmadd231sh xmm6, xmm5, xmm4, {rn-sae} +// CHECK: encoding: [0x62,0xf6,0x55,0x18,0xb9,0xf4] + vfmadd231sh xmm6, xmm5, xmm4, {rn-sae} + +// CHECK: vfmadd231sh xmm6 {k7}, xmm5, word ptr [esp + 8*esi + 268435456] +// CHECK: encoding: [0x62,0xf6,0x55,0x0f,0xb9,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfmadd231sh xmm6 {k7}, xmm5, word ptr [esp + 8*esi + 268435456] + +// CHECK: vfmadd231sh xmm6, xmm5, word ptr [ecx] +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0xb9,0x31] + vfmadd231sh xmm6, xmm5, word ptr [ecx] + +// CHECK: vfmadd231sh xmm6, xmm5, word ptr [ecx + 254] +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0xb9,0x71,0x7f] + vfmadd231sh xmm6, xmm5, word ptr [ecx + 254] + +// CHECK: vfmadd231sh xmm6 {k7} {z}, xmm5, word ptr [edx - 256] +// CHECK: encoding: [0x62,0xf6,0x55,0x8f,0xb9,0x72,0x80] + vfmadd231sh xmm6 {k7} {z}, xmm5, word ptr [edx - 256] + +// CHECK: vfmaddsub132ph zmm6, zmm5, zmm4 +// CHECK: encoding: [0x62,0xf6,0x55,0x48,0x96,0xf4] + vfmaddsub132ph zmm6, zmm5, zmm4 + +// CHECK: vfmaddsub132ph zmm6, zmm5, zmm4, {rn-sae} +// CHECK: encoding: [0x62,0xf6,0x55,0x18,0x96,0xf4] + vfmaddsub132ph zmm6, zmm5, zmm4, {rn-sae} + +// CHECK: vfmaddsub132ph zmm6 {k7}, zmm5, zmmword ptr [esp + 8*esi + 268435456] +// CHECK: encoding: [0x62,0xf6,0x55,0x4f,0x96,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfmaddsub132ph zmm6 {k7}, zmm5, zmmword ptr [esp + 8*esi + 268435456] + +// CHECK: vfmaddsub132ph zmm6, zmm5, word ptr [ecx]{1to32} +// CHECK: encoding: [0x62,0xf6,0x55,0x58,0x96,0x31] + vfmaddsub132ph zmm6, zmm5, word ptr [ecx]{1to32} + +// CHECK: vfmaddsub132ph zmm6, zmm5, zmmword ptr [ecx + 8128] +// CHECK: encoding: [0x62,0xf6,0x55,0x48,0x96,0x71,0x7f] + vfmaddsub132ph zmm6, zmm5, zmmword ptr [ecx + 8128] + +// CHECK: vfmaddsub132ph zmm6 {k7} {z}, zmm5, word ptr [edx - 256]{1to32} +// CHECK: encoding: [0x62,0xf6,0x55,0xdf,0x96,0x72,0x80] + vfmaddsub132ph zmm6 {k7} {z}, zmm5, word ptr [edx - 256]{1to32} + +// CHECK: vfmaddsub213ph zmm6, zmm5, zmm4 +// CHECK: encoding: [0x62,0xf6,0x55,0x48,0xa6,0xf4] + vfmaddsub213ph zmm6, zmm5, zmm4 + +// CHECK: vfmaddsub213ph zmm6, zmm5, zmm4, {rn-sae} +// CHECK: encoding: [0x62,0xf6,0x55,0x18,0xa6,0xf4] + vfmaddsub213ph zmm6, zmm5, zmm4, {rn-sae} + +// CHECK: vfmaddsub213ph zmm6 {k7}, zmm5, zmmword ptr [esp + 8*esi + 268435456] +// CHECK: encoding: [0x62,0xf6,0x55,0x4f,0xa6,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfmaddsub213ph zmm6 {k7}, zmm5, zmmword ptr [esp + 8*esi + 268435456] + +// CHECK: vfmaddsub213ph zmm6, zmm5, word ptr [ecx]{1to32} +// CHECK: encoding: [0x62,0xf6,0x55,0x58,0xa6,0x31] + vfmaddsub213ph zmm6, zmm5, word ptr [ecx]{1to32} + +// CHECK: vfmaddsub213ph zmm6, zmm5, zmmword ptr [ecx + 8128] +// CHECK: encoding: [0x62,0xf6,0x55,0x48,0xa6,0x71,0x7f] + vfmaddsub213ph zmm6, zmm5, zmmword ptr [ecx + 8128] + +// CHECK: vfmaddsub213ph zmm6 {k7} {z}, zmm5, word ptr [edx - 256]{1to32} +// CHECK: encoding: [0x62,0xf6,0x55,0xdf,0xa6,0x72,0x80] + vfmaddsub213ph zmm6 {k7} {z}, zmm5, word ptr [edx - 256]{1to32} + +// CHECK: vfmaddsub231ph zmm6, zmm5, zmm4 +// CHECK: encoding: [0x62,0xf6,0x55,0x48,0xb6,0xf4] + vfmaddsub231ph zmm6, zmm5, zmm4 + +// CHECK: vfmaddsub231ph zmm6, zmm5, zmm4, {rn-sae} +// CHECK: encoding: [0x62,0xf6,0x55,0x18,0xb6,0xf4] + vfmaddsub231ph zmm6, zmm5, zmm4, {rn-sae} + +// CHECK: vfmaddsub231ph zmm6 {k7}, zmm5, zmmword ptr [esp + 8*esi + 268435456] +// CHECK: encoding: [0x62,0xf6,0x55,0x4f,0xb6,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfmaddsub231ph zmm6 {k7}, zmm5, zmmword ptr [esp + 8*esi + 268435456] + +// CHECK: vfmaddsub231ph zmm6, zmm5, word ptr [ecx]{1to32} +// CHECK: encoding: [0x62,0xf6,0x55,0x58,0xb6,0x31] + vfmaddsub231ph zmm6, zmm5, word ptr [ecx]{1to32} + +// CHECK: vfmaddsub231ph zmm6, zmm5, zmmword ptr [ecx + 8128] +// CHECK: encoding: [0x62,0xf6,0x55,0x48,0xb6,0x71,0x7f] + vfmaddsub231ph zmm6, zmm5, zmmword ptr [ecx + 8128] + +// CHECK: vfmaddsub231ph zmm6 {k7} {z}, zmm5, word ptr [edx - 256]{1to32} +// CHECK: encoding: [0x62,0xf6,0x55,0xdf,0xb6,0x72,0x80] + vfmaddsub231ph zmm6 {k7} {z}, zmm5, word ptr [edx - 256]{1to32} + +// CHECK: vfmsub132ph zmm6, zmm5, zmm4 +// CHECK: encoding: [0x62,0xf6,0x55,0x48,0x9a,0xf4] + vfmsub132ph zmm6, zmm5, zmm4 + +// CHECK: vfmsub132ph zmm6, zmm5, zmm4, {rn-sae} +// CHECK: encoding: [0x62,0xf6,0x55,0x18,0x9a,0xf4] + vfmsub132ph zmm6, zmm5, zmm4, {rn-sae} + +// CHECK: vfmsub132ph zmm6 {k7}, zmm5, zmmword ptr [esp + 8*esi + 268435456] +// CHECK: encoding: [0x62,0xf6,0x55,0x4f,0x9a,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfmsub132ph zmm6 {k7}, zmm5, zmmword ptr [esp + 8*esi + 268435456] + +// CHECK: vfmsub132ph zmm6, zmm5, word ptr [ecx]{1to32} +// CHECK: encoding: [0x62,0xf6,0x55,0x58,0x9a,0x31] + vfmsub132ph zmm6, zmm5, word ptr [ecx]{1to32} + +// CHECK: vfmsub132ph zmm6, zmm5, zmmword ptr [ecx + 8128] +// CHECK: encoding: [0x62,0xf6,0x55,0x48,0x9a,0x71,0x7f] + vfmsub132ph zmm6, zmm5, zmmword ptr [ecx + 8128] + +// CHECK: vfmsub132ph zmm6 {k7} {z}, zmm5, word ptr [edx - 256]{1to32} +// CHECK: encoding: [0x62,0xf6,0x55,0xdf,0x9a,0x72,0x80] + vfmsub132ph zmm6 {k7} {z}, zmm5, word ptr [edx - 256]{1to32} + +// CHECK: vfmsub132sh xmm6, xmm5, xmm4 +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0x9b,0xf4] + vfmsub132sh xmm6, xmm5, xmm4 + +// CHECK: vfmsub132sh xmm6, xmm5, xmm4, {rn-sae} +// CHECK: encoding: [0x62,0xf6,0x55,0x18,0x9b,0xf4] + vfmsub132sh xmm6, xmm5, xmm4, {rn-sae} + +// CHECK: vfmsub132sh xmm6 {k7}, xmm5, word ptr [esp + 8*esi + 268435456] +// CHECK: encoding: [0x62,0xf6,0x55,0x0f,0x9b,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfmsub132sh xmm6 {k7}, xmm5, word ptr [esp + 8*esi + 268435456] + +// CHECK: vfmsub132sh xmm6, xmm5, word ptr [ecx] +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0x9b,0x31] + vfmsub132sh xmm6, xmm5, word ptr [ecx] + +// CHECK: vfmsub132sh xmm6, xmm5, word ptr [ecx + 254] +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0x9b,0x71,0x7f] + vfmsub132sh xmm6, xmm5, word ptr [ecx + 254] + +// CHECK: vfmsub132sh xmm6 {k7} {z}, xmm5, word ptr [edx - 256] +// CHECK: encoding: [0x62,0xf6,0x55,0x8f,0x9b,0x72,0x80] + vfmsub132sh xmm6 {k7} {z}, xmm5, word ptr [edx - 256] + +// CHECK: vfmsub213ph zmm6, zmm5, zmm4 +// CHECK: encoding: [0x62,0xf6,0x55,0x48,0xaa,0xf4] + vfmsub213ph zmm6, zmm5, zmm4 + +// CHECK: vfmsub213ph zmm6, zmm5, zmm4, {rn-sae} +// CHECK: encoding: [0x62,0xf6,0x55,0x18,0xaa,0xf4] + vfmsub213ph zmm6, zmm5, zmm4, {rn-sae} + +// CHECK: vfmsub213ph zmm6 {k7}, zmm5, zmmword ptr [esp + 8*esi + 268435456] +// CHECK: encoding: [0x62,0xf6,0x55,0x4f,0xaa,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfmsub213ph zmm6 {k7}, zmm5, zmmword ptr [esp + 8*esi + 268435456] + +// CHECK: vfmsub213ph zmm6, zmm5, word ptr [ecx]{1to32} +// CHECK: encoding: [0x62,0xf6,0x55,0x58,0xaa,0x31] + vfmsub213ph zmm6, zmm5, word ptr [ecx]{1to32} + +// CHECK: vfmsub213ph zmm6, zmm5, zmmword ptr [ecx + 8128] +// CHECK: encoding: [0x62,0xf6,0x55,0x48,0xaa,0x71,0x7f] + vfmsub213ph zmm6, zmm5, zmmword ptr [ecx + 8128] + +// CHECK: vfmsub213ph zmm6 {k7} {z}, zmm5, word ptr [edx - 256]{1to32} +// CHECK: encoding: [0x62,0xf6,0x55,0xdf,0xaa,0x72,0x80] + vfmsub213ph zmm6 {k7} {z}, zmm5, word ptr [edx - 256]{1to32} + +// CHECK: vfmsub213sh xmm6, xmm5, xmm4 +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0xab,0xf4] + vfmsub213sh xmm6, xmm5, xmm4 + +// CHECK: vfmsub213sh xmm6, xmm5, xmm4, {rn-sae} +// CHECK: encoding: [0x62,0xf6,0x55,0x18,0xab,0xf4] + vfmsub213sh xmm6, xmm5, xmm4, {rn-sae} + +// CHECK: vfmsub213sh xmm6 {k7}, xmm5, word ptr [esp + 8*esi + 268435456] +// CHECK: encoding: [0x62,0xf6,0x55,0x0f,0xab,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfmsub213sh xmm6 {k7}, xmm5, word ptr [esp + 8*esi + 268435456] + +// CHECK: vfmsub213sh xmm6, xmm5, word ptr [ecx] +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0xab,0x31] + vfmsub213sh xmm6, xmm5, word ptr [ecx] + +// CHECK: vfmsub213sh xmm6, xmm5, word ptr [ecx + 254] +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0xab,0x71,0x7f] + vfmsub213sh xmm6, xmm5, word ptr [ecx + 254] + +// CHECK: vfmsub213sh xmm6 {k7} {z}, xmm5, word ptr [edx - 256] +// CHECK: encoding: [0x62,0xf6,0x55,0x8f,0xab,0x72,0x80] + vfmsub213sh xmm6 {k7} {z}, xmm5, word ptr [edx - 256] + +// CHECK: vfmsub231ph zmm6, zmm5, zmm4 +// CHECK: encoding: [0x62,0xf6,0x55,0x48,0xba,0xf4] + vfmsub231ph zmm6, zmm5, zmm4 + +// CHECK: vfmsub231ph zmm6, zmm5, zmm4, {rn-sae} +// CHECK: encoding: [0x62,0xf6,0x55,0x18,0xba,0xf4] + vfmsub231ph zmm6, zmm5, zmm4, {rn-sae} + +// CHECK: vfmsub231ph zmm6 {k7}, zmm5, zmmword ptr [esp + 8*esi + 268435456] +// CHECK: encoding: [0x62,0xf6,0x55,0x4f,0xba,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfmsub231ph zmm6 {k7}, zmm5, zmmword ptr [esp + 8*esi + 268435456] + +// CHECK: vfmsub231ph zmm6, zmm5, word ptr [ecx]{1to32} +// CHECK: encoding: [0x62,0xf6,0x55,0x58,0xba,0x31] + vfmsub231ph zmm6, zmm5, word ptr [ecx]{1to32} + +// CHECK: vfmsub231ph zmm6, zmm5, zmmword ptr [ecx + 8128] +// CHECK: encoding: [0x62,0xf6,0x55,0x48,0xba,0x71,0x7f] + vfmsub231ph zmm6, zmm5, zmmword ptr [ecx + 8128] + +// CHECK: vfmsub231ph zmm6 {k7} {z}, zmm5, word ptr [edx - 256]{1to32} +// CHECK: encoding: [0x62,0xf6,0x55,0xdf,0xba,0x72,0x80] + vfmsub231ph zmm6 {k7} {z}, zmm5, word ptr [edx - 256]{1to32} + +// CHECK: vfmsub231sh xmm6, xmm5, xmm4 +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0xbb,0xf4] + vfmsub231sh xmm6, xmm5, xmm4 + +// CHECK: vfmsub231sh xmm6, xmm5, xmm4, {rn-sae} +// CHECK: encoding: [0x62,0xf6,0x55,0x18,0xbb,0xf4] + vfmsub231sh xmm6, xmm5, xmm4, {rn-sae} + +// CHECK: vfmsub231sh xmm6 {k7}, xmm5, word ptr [esp + 8*esi + 268435456] +// CHECK: encoding: [0x62,0xf6,0x55,0x0f,0xbb,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfmsub231sh xmm6 {k7}, xmm5, word ptr [esp + 8*esi + 268435456] + +// CHECK: vfmsub231sh xmm6, xmm5, word ptr [ecx] +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0xbb,0x31] + vfmsub231sh xmm6, xmm5, word ptr [ecx] + +// CHECK: vfmsub231sh xmm6, xmm5, word ptr [ecx + 254] +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0xbb,0x71,0x7f] + vfmsub231sh xmm6, xmm5, word ptr [ecx + 254] + +// CHECK: vfmsub231sh xmm6 {k7} {z}, xmm5, word ptr [edx - 256] +// CHECK: encoding: [0x62,0xf6,0x55,0x8f,0xbb,0x72,0x80] + vfmsub231sh xmm6 {k7} {z}, xmm5, word ptr [edx - 256] + +// CHECK: vfmsubadd132ph zmm6, zmm5, zmm4 +// CHECK: encoding: [0x62,0xf6,0x55,0x48,0x97,0xf4] + vfmsubadd132ph zmm6, zmm5, zmm4 + +// CHECK: vfmsubadd132ph zmm6, zmm5, zmm4, {rn-sae} +// CHECK: encoding: [0x62,0xf6,0x55,0x18,0x97,0xf4] + vfmsubadd132ph zmm6, zmm5, zmm4, {rn-sae} + +// CHECK: vfmsubadd132ph zmm6 {k7}, zmm5, zmmword ptr [esp + 8*esi + 268435456] +// CHECK: encoding: [0x62,0xf6,0x55,0x4f,0x97,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfmsubadd132ph zmm6 {k7}, zmm5, zmmword ptr [esp + 8*esi + 268435456] + +// CHECK: vfmsubadd132ph zmm6, zmm5, word ptr [ecx]{1to32} +// CHECK: encoding: [0x62,0xf6,0x55,0x58,0x97,0x31] + vfmsubadd132ph zmm6, zmm5, word ptr [ecx]{1to32} + +// CHECK: vfmsubadd132ph zmm6, zmm5, zmmword ptr [ecx + 8128] +// CHECK: encoding: [0x62,0xf6,0x55,0x48,0x97,0x71,0x7f] + vfmsubadd132ph zmm6, zmm5, zmmword ptr [ecx + 8128] + +// CHECK: vfmsubadd132ph zmm6 {k7} {z}, zmm5, word ptr [edx - 256]{1to32} +// CHECK: encoding: [0x62,0xf6,0x55,0xdf,0x97,0x72,0x80] + vfmsubadd132ph zmm6 {k7} {z}, zmm5, word ptr [edx - 256]{1to32} + +// CHECK: vfmsubadd213ph zmm6, zmm5, zmm4 +// CHECK: encoding: [0x62,0xf6,0x55,0x48,0xa7,0xf4] + vfmsubadd213ph zmm6, zmm5, zmm4 + +// CHECK: vfmsubadd213ph zmm6, zmm5, zmm4, {rn-sae} +// CHECK: encoding: [0x62,0xf6,0x55,0x18,0xa7,0xf4] + vfmsubadd213ph zmm6, zmm5, zmm4, {rn-sae} + +// CHECK: vfmsubadd213ph zmm6 {k7}, zmm5, zmmword ptr [esp + 8*esi + 268435456] +// CHECK: encoding: [0x62,0xf6,0x55,0x4f,0xa7,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfmsubadd213ph zmm6 {k7}, zmm5, zmmword ptr [esp + 8*esi + 268435456] + +// CHECK: vfmsubadd213ph zmm6, zmm5, word ptr [ecx]{1to32} +// CHECK: encoding: [0x62,0xf6,0x55,0x58,0xa7,0x31] + vfmsubadd213ph zmm6, zmm5, word ptr [ecx]{1to32} + +// CHECK: vfmsubadd213ph zmm6, zmm5, zmmword ptr [ecx + 8128] +// CHECK: encoding: [0x62,0xf6,0x55,0x48,0xa7,0x71,0x7f] + vfmsubadd213ph zmm6, zmm5, zmmword ptr [ecx + 8128] + +// CHECK: vfmsubadd213ph zmm6 {k7} {z}, zmm5, word ptr [edx - 256]{1to32} +// CHECK: encoding: [0x62,0xf6,0x55,0xdf,0xa7,0x72,0x80] + vfmsubadd213ph zmm6 {k7} {z}, zmm5, word ptr [edx - 256]{1to32} + +// CHECK: vfmsubadd231ph zmm6, zmm5, zmm4 +// CHECK: encoding: [0x62,0xf6,0x55,0x48,0xb7,0xf4] + vfmsubadd231ph zmm6, zmm5, zmm4 + +// CHECK: vfmsubadd231ph zmm6, zmm5, zmm4, {rn-sae} +// CHECK: encoding: [0x62,0xf6,0x55,0x18,0xb7,0xf4] + vfmsubadd231ph zmm6, zmm5, zmm4, {rn-sae} + +// CHECK: vfmsubadd231ph zmm6 {k7}, zmm5, zmmword ptr [esp + 8*esi + 268435456] +// CHECK: encoding: [0x62,0xf6,0x55,0x4f,0xb7,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfmsubadd231ph zmm6 {k7}, zmm5, zmmword ptr [esp + 8*esi + 268435456] + +// CHECK: vfmsubadd231ph zmm6, zmm5, word ptr [ecx]{1to32} +// CHECK: encoding: [0x62,0xf6,0x55,0x58,0xb7,0x31] + vfmsubadd231ph zmm6, zmm5, word ptr [ecx]{1to32} + +// CHECK: vfmsubadd231ph zmm6, zmm5, zmmword ptr [ecx + 8128] +// CHECK: encoding: [0x62,0xf6,0x55,0x48,0xb7,0x71,0x7f] + vfmsubadd231ph zmm6, zmm5, zmmword ptr [ecx + 8128] + +// CHECK: vfmsubadd231ph zmm6 {k7} {z}, zmm5, word ptr [edx - 256]{1to32} +// CHECK: encoding: [0x62,0xf6,0x55,0xdf,0xb7,0x72,0x80] + vfmsubadd231ph zmm6 {k7} {z}, zmm5, word ptr [edx - 256]{1to32} + +// CHECK: vfnmadd132ph zmm6, zmm5, zmm4 +// CHECK: encoding: [0x62,0xf6,0x55,0x48,0x9c,0xf4] + vfnmadd132ph zmm6, zmm5, zmm4 + +// CHECK: vfnmadd132ph zmm6, zmm5, zmm4, {rn-sae} +// CHECK: encoding: [0x62,0xf6,0x55,0x18,0x9c,0xf4] + vfnmadd132ph zmm6, zmm5, zmm4, {rn-sae} + +// CHECK: vfnmadd132ph zmm6 {k7}, zmm5, zmmword ptr [esp + 8*esi + 268435456] +// CHECK: encoding: [0x62,0xf6,0x55,0x4f,0x9c,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfnmadd132ph zmm6 {k7}, zmm5, zmmword ptr [esp + 8*esi + 268435456] + +// CHECK: vfnmadd132ph zmm6, zmm5, word ptr [ecx]{1to32} +// CHECK: encoding: [0x62,0xf6,0x55,0x58,0x9c,0x31] + vfnmadd132ph zmm6, zmm5, word ptr [ecx]{1to32} + +// CHECK: vfnmadd132ph zmm6, zmm5, zmmword ptr [ecx + 8128] +// CHECK: encoding: [0x62,0xf6,0x55,0x48,0x9c,0x71,0x7f] + vfnmadd132ph zmm6, zmm5, zmmword ptr [ecx + 8128] + +// CHECK: vfnmadd132ph zmm6 {k7} {z}, zmm5, word ptr [edx - 256]{1to32} +// CHECK: encoding: [0x62,0xf6,0x55,0xdf,0x9c,0x72,0x80] + vfnmadd132ph zmm6 {k7} {z}, zmm5, word ptr [edx - 256]{1to32} + +// CHECK: vfnmadd132sh xmm6, xmm5, xmm4 +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0x9d,0xf4] + vfnmadd132sh xmm6, xmm5, xmm4 + +// CHECK: vfnmadd132sh xmm6, xmm5, xmm4, {rn-sae} +// CHECK: encoding: [0x62,0xf6,0x55,0x18,0x9d,0xf4] + vfnmadd132sh xmm6, xmm5, xmm4, {rn-sae} + +// CHECK: vfnmadd132sh xmm6 {k7}, xmm5, word ptr [esp + 8*esi + 268435456] +// CHECK: encoding: [0x62,0xf6,0x55,0x0f,0x9d,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfnmadd132sh xmm6 {k7}, xmm5, word ptr [esp + 8*esi + 268435456] + +// CHECK: vfnmadd132sh xmm6, xmm5, word ptr [ecx] +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0x9d,0x31] + vfnmadd132sh xmm6, xmm5, word ptr [ecx] + +// CHECK: vfnmadd132sh xmm6, xmm5, word ptr [ecx + 254] +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0x9d,0x71,0x7f] + vfnmadd132sh xmm6, xmm5, word ptr [ecx + 254] + +// CHECK: vfnmadd132sh xmm6 {k7} {z}, xmm5, word ptr [edx - 256] +// CHECK: encoding: [0x62,0xf6,0x55,0x8f,0x9d,0x72,0x80] + vfnmadd132sh xmm6 {k7} {z}, xmm5, word ptr [edx - 256] + +// CHECK: vfnmadd213ph zmm6, zmm5, zmm4 +// CHECK: encoding: [0x62,0xf6,0x55,0x48,0xac,0xf4] + vfnmadd213ph zmm6, zmm5, zmm4 + +// CHECK: vfnmadd213ph zmm6, zmm5, zmm4, {rn-sae} +// CHECK: encoding: [0x62,0xf6,0x55,0x18,0xac,0xf4] + vfnmadd213ph zmm6, zmm5, zmm4, {rn-sae} + +// CHECK: vfnmadd213ph zmm6 {k7}, zmm5, zmmword ptr [esp + 8*esi + 268435456] +// CHECK: encoding: [0x62,0xf6,0x55,0x4f,0xac,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfnmadd213ph zmm6 {k7}, zmm5, zmmword ptr [esp + 8*esi + 268435456] + +// CHECK: vfnmadd213ph zmm6, zmm5, word ptr [ecx]{1to32} +// CHECK: encoding: [0x62,0xf6,0x55,0x58,0xac,0x31] + vfnmadd213ph zmm6, zmm5, word ptr [ecx]{1to32} + +// CHECK: vfnmadd213ph zmm6, zmm5, zmmword ptr [ecx + 8128] +// CHECK: encoding: [0x62,0xf6,0x55,0x48,0xac,0x71,0x7f] + vfnmadd213ph zmm6, zmm5, zmmword ptr [ecx + 8128] + +// CHECK: vfnmadd213ph zmm6 {k7} {z}, zmm5, word ptr [edx - 256]{1to32} +// CHECK: encoding: [0x62,0xf6,0x55,0xdf,0xac,0x72,0x80] + vfnmadd213ph zmm6 {k7} {z}, zmm5, word ptr [edx - 256]{1to32} + +// CHECK: vfnmadd213sh xmm6, xmm5, xmm4 +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0xad,0xf4] + vfnmadd213sh xmm6, xmm5, xmm4 + +// CHECK: vfnmadd213sh xmm6, xmm5, xmm4, {rn-sae} +// CHECK: encoding: [0x62,0xf6,0x55,0x18,0xad,0xf4] + vfnmadd213sh xmm6, xmm5, xmm4, {rn-sae} + +// CHECK: vfnmadd213sh xmm6 {k7}, xmm5, word ptr [esp + 8*esi + 268435456] +// CHECK: encoding: [0x62,0xf6,0x55,0x0f,0xad,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfnmadd213sh xmm6 {k7}, xmm5, word ptr [esp + 8*esi + 268435456] + +// CHECK: vfnmadd213sh xmm6, xmm5, word ptr [ecx] +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0xad,0x31] + vfnmadd213sh xmm6, xmm5, word ptr [ecx] + +// CHECK: vfnmadd213sh xmm6, xmm5, word ptr [ecx + 254] +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0xad,0x71,0x7f] + vfnmadd213sh xmm6, xmm5, word ptr [ecx + 254] + +// CHECK: vfnmadd213sh xmm6 {k7} {z}, xmm5, word ptr [edx - 256] +// CHECK: encoding: [0x62,0xf6,0x55,0x8f,0xad,0x72,0x80] + vfnmadd213sh xmm6 {k7} {z}, xmm5, word ptr [edx - 256] + +// CHECK: vfnmadd231ph zmm6, zmm5, zmm4 +// CHECK: encoding: [0x62,0xf6,0x55,0x48,0xbc,0xf4] + vfnmadd231ph zmm6, zmm5, zmm4 + +// CHECK: vfnmadd231ph zmm6, zmm5, zmm4, {rn-sae} +// CHECK: encoding: [0x62,0xf6,0x55,0x18,0xbc,0xf4] + vfnmadd231ph zmm6, zmm5, zmm4, {rn-sae} + +// CHECK: vfnmadd231ph zmm6 {k7}, zmm5, zmmword ptr [esp + 8*esi + 268435456] +// CHECK: encoding: [0x62,0xf6,0x55,0x4f,0xbc,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfnmadd231ph zmm6 {k7}, zmm5, zmmword ptr [esp + 8*esi + 268435456] + +// CHECK: vfnmadd231ph zmm6, zmm5, word ptr [ecx]{1to32} +// CHECK: encoding: [0x62,0xf6,0x55,0x58,0xbc,0x31] + vfnmadd231ph zmm6, zmm5, word ptr [ecx]{1to32} + +// CHECK: vfnmadd231ph zmm6, zmm5, zmmword ptr [ecx + 8128] +// CHECK: encoding: [0x62,0xf6,0x55,0x48,0xbc,0x71,0x7f] + vfnmadd231ph zmm6, zmm5, zmmword ptr [ecx + 8128] + +// CHECK: vfnmadd231ph zmm6 {k7} {z}, zmm5, word ptr [edx - 256]{1to32} +// CHECK: encoding: [0x62,0xf6,0x55,0xdf,0xbc,0x72,0x80] + vfnmadd231ph zmm6 {k7} {z}, zmm5, word ptr [edx - 256]{1to32} + +// CHECK: vfnmadd231sh xmm6, xmm5, xmm4 +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0xbd,0xf4] + vfnmadd231sh xmm6, xmm5, xmm4 + +// CHECK: vfnmadd231sh xmm6, xmm5, xmm4, {rn-sae} +// CHECK: encoding: [0x62,0xf6,0x55,0x18,0xbd,0xf4] + vfnmadd231sh xmm6, xmm5, xmm4, {rn-sae} + +// CHECK: vfnmadd231sh xmm6 {k7}, xmm5, word ptr [esp + 8*esi + 268435456] +// CHECK: encoding: [0x62,0xf6,0x55,0x0f,0xbd,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfnmadd231sh xmm6 {k7}, xmm5, word ptr [esp + 8*esi + 268435456] + +// CHECK: vfnmadd231sh xmm6, xmm5, word ptr [ecx] +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0xbd,0x31] + vfnmadd231sh xmm6, xmm5, word ptr [ecx] + +// CHECK: vfnmadd231sh xmm6, xmm5, word ptr [ecx + 254] +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0xbd,0x71,0x7f] + vfnmadd231sh xmm6, xmm5, word ptr [ecx + 254] + +// CHECK: vfnmadd231sh xmm6 {k7} {z}, xmm5, word ptr [edx - 256] +// CHECK: encoding: [0x62,0xf6,0x55,0x8f,0xbd,0x72,0x80] + vfnmadd231sh xmm6 {k7} {z}, xmm5, word ptr [edx - 256] + +// CHECK: vfnmsub132ph zmm6, zmm5, zmm4 +// CHECK: encoding: [0x62,0xf6,0x55,0x48,0x9e,0xf4] + vfnmsub132ph zmm6, zmm5, zmm4 + +// CHECK: vfnmsub132ph zmm6, zmm5, zmm4, {rn-sae} +// CHECK: encoding: [0x62,0xf6,0x55,0x18,0x9e,0xf4] + vfnmsub132ph zmm6, zmm5, zmm4, {rn-sae} + +// CHECK: vfnmsub132ph zmm6 {k7}, zmm5, zmmword ptr [esp + 8*esi + 268435456] +// CHECK: encoding: [0x62,0xf6,0x55,0x4f,0x9e,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfnmsub132ph zmm6 {k7}, zmm5, zmmword ptr [esp + 8*esi + 268435456] + +// CHECK: vfnmsub132ph zmm6, zmm5, word ptr [ecx]{1to32} +// CHECK: encoding: [0x62,0xf6,0x55,0x58,0x9e,0x31] + vfnmsub132ph zmm6, zmm5, word ptr [ecx]{1to32} + +// CHECK: vfnmsub132ph zmm6, zmm5, zmmword ptr [ecx + 8128] +// CHECK: encoding: [0x62,0xf6,0x55,0x48,0x9e,0x71,0x7f] + vfnmsub132ph zmm6, zmm5, zmmword ptr [ecx + 8128] + +// CHECK: vfnmsub132ph zmm6 {k7} {z}, zmm5, word ptr [edx - 256]{1to32} +// CHECK: encoding: [0x62,0xf6,0x55,0xdf,0x9e,0x72,0x80] + vfnmsub132ph zmm6 {k7} {z}, zmm5, word ptr [edx - 256]{1to32} + +// CHECK: vfnmsub132sh xmm6, xmm5, xmm4 +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0x9f,0xf4] + vfnmsub132sh xmm6, xmm5, xmm4 + +// CHECK: vfnmsub132sh xmm6, xmm5, xmm4, {rn-sae} +// CHECK: encoding: [0x62,0xf6,0x55,0x18,0x9f,0xf4] + vfnmsub132sh xmm6, xmm5, xmm4, {rn-sae} + +// CHECK: vfnmsub132sh xmm6 {k7}, xmm5, word ptr [esp + 8*esi + 268435456] +// CHECK: encoding: [0x62,0xf6,0x55,0x0f,0x9f,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfnmsub132sh xmm6 {k7}, xmm5, word ptr [esp + 8*esi + 268435456] + +// CHECK: vfnmsub132sh xmm6, xmm5, word ptr [ecx] +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0x9f,0x31] + vfnmsub132sh xmm6, xmm5, word ptr [ecx] + +// CHECK: vfnmsub132sh xmm6, xmm5, word ptr [ecx + 254] +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0x9f,0x71,0x7f] + vfnmsub132sh xmm6, xmm5, word ptr [ecx + 254] + +// CHECK: vfnmsub132sh xmm6 {k7} {z}, xmm5, word ptr [edx - 256] +// CHECK: encoding: [0x62,0xf6,0x55,0x8f,0x9f,0x72,0x80] + vfnmsub132sh xmm6 {k7} {z}, xmm5, word ptr [edx - 256] + +// CHECK: vfnmsub213ph zmm6, zmm5, zmm4 +// CHECK: encoding: [0x62,0xf6,0x55,0x48,0xae,0xf4] + vfnmsub213ph zmm6, zmm5, zmm4 + +// CHECK: vfnmsub213ph zmm6, zmm5, zmm4, {rn-sae} +// CHECK: encoding: [0x62,0xf6,0x55,0x18,0xae,0xf4] + vfnmsub213ph zmm6, zmm5, zmm4, {rn-sae} + +// CHECK: vfnmsub213ph zmm6 {k7}, zmm5, zmmword ptr [esp + 8*esi + 268435456] +// CHECK: encoding: [0x62,0xf6,0x55,0x4f,0xae,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfnmsub213ph zmm6 {k7}, zmm5, zmmword ptr [esp + 8*esi + 268435456] + +// CHECK: vfnmsub213ph zmm6, zmm5, word ptr [ecx]{1to32} +// CHECK: encoding: [0x62,0xf6,0x55,0x58,0xae,0x31] + vfnmsub213ph zmm6, zmm5, word ptr [ecx]{1to32} + +// CHECK: vfnmsub213ph zmm6, zmm5, zmmword ptr [ecx + 8128] +// CHECK: encoding: [0x62,0xf6,0x55,0x48,0xae,0x71,0x7f] + vfnmsub213ph zmm6, zmm5, zmmword ptr [ecx + 8128] + +// CHECK: vfnmsub213ph zmm6 {k7} {z}, zmm5, word ptr [edx - 256]{1to32} +// CHECK: encoding: [0x62,0xf6,0x55,0xdf,0xae,0x72,0x80] + vfnmsub213ph zmm6 {k7} {z}, zmm5, word ptr [edx - 256]{1to32} + +// CHECK: vfnmsub213sh xmm6, xmm5, xmm4 +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0xaf,0xf4] + vfnmsub213sh xmm6, xmm5, xmm4 + +// CHECK: vfnmsub213sh xmm6, xmm5, xmm4, {rn-sae} +// CHECK: encoding: [0x62,0xf6,0x55,0x18,0xaf,0xf4] + vfnmsub213sh xmm6, xmm5, xmm4, {rn-sae} + +// CHECK: vfnmsub213sh xmm6 {k7}, xmm5, word ptr [esp + 8*esi + 268435456] +// CHECK: encoding: [0x62,0xf6,0x55,0x0f,0xaf,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfnmsub213sh xmm6 {k7}, xmm5, word ptr [esp + 8*esi + 268435456] + +// CHECK: vfnmsub213sh xmm6, xmm5, word ptr [ecx] +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0xaf,0x31] + vfnmsub213sh xmm6, xmm5, word ptr [ecx] + +// CHECK: vfnmsub213sh xmm6, xmm5, word ptr [ecx + 254] +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0xaf,0x71,0x7f] + vfnmsub213sh xmm6, xmm5, word ptr [ecx + 254] + +// CHECK: vfnmsub213sh xmm6 {k7} {z}, xmm5, word ptr [edx - 256] +// CHECK: encoding: [0x62,0xf6,0x55,0x8f,0xaf,0x72,0x80] + vfnmsub213sh xmm6 {k7} {z}, xmm5, word ptr [edx - 256] + +// CHECK: vfnmsub231ph zmm6, zmm5, zmm4 +// CHECK: encoding: [0x62,0xf6,0x55,0x48,0xbe,0xf4] + vfnmsub231ph zmm6, zmm5, zmm4 + +// CHECK: vfnmsub231ph zmm6, zmm5, zmm4, {rn-sae} +// CHECK: encoding: [0x62,0xf6,0x55,0x18,0xbe,0xf4] + vfnmsub231ph zmm6, zmm5, zmm4, {rn-sae} + +// CHECK: vfnmsub231ph zmm6 {k7}, zmm5, zmmword ptr [esp + 8*esi + 268435456] +// CHECK: encoding: [0x62,0xf6,0x55,0x4f,0xbe,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfnmsub231ph zmm6 {k7}, zmm5, zmmword ptr [esp + 8*esi + 268435456] + +// CHECK: vfnmsub231ph zmm6, zmm5, word ptr [ecx]{1to32} +// CHECK: encoding: [0x62,0xf6,0x55,0x58,0xbe,0x31] + vfnmsub231ph zmm6, zmm5, word ptr [ecx]{1to32} + +// CHECK: vfnmsub231ph zmm6, zmm5, zmmword ptr [ecx + 8128] +// CHECK: encoding: [0x62,0xf6,0x55,0x48,0xbe,0x71,0x7f] + vfnmsub231ph zmm6, zmm5, zmmword ptr [ecx + 8128] + +// CHECK: vfnmsub231ph zmm6 {k7} {z}, zmm5, word ptr [edx - 256]{1to32} +// CHECK: encoding: [0x62,0xf6,0x55,0xdf,0xbe,0x72,0x80] + vfnmsub231ph zmm6 {k7} {z}, zmm5, word ptr [edx - 256]{1to32} + +// CHECK: vfnmsub231sh xmm6, xmm5, xmm4 +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0xbf,0xf4] + vfnmsub231sh xmm6, xmm5, xmm4 + +// CHECK: vfnmsub231sh xmm6, xmm5, xmm4, {rn-sae} +// CHECK: encoding: [0x62,0xf6,0x55,0x18,0xbf,0xf4] + vfnmsub231sh xmm6, xmm5, xmm4, {rn-sae} + +// CHECK: vfnmsub231sh xmm6 {k7}, xmm5, word ptr [esp + 8*esi + 268435456] +// CHECK: encoding: [0x62,0xf6,0x55,0x0f,0xbf,0xb4,0xf4,0x00,0x00,0x00,0x10] + vfnmsub231sh xmm6 {k7}, xmm5, word ptr [esp + 8*esi + 268435456] + +// CHECK: vfnmsub231sh xmm6, xmm5, word ptr [ecx] +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0xbf,0x31] + vfnmsub231sh xmm6, xmm5, word ptr [ecx] + +// CHECK: vfnmsub231sh xmm6, xmm5, word ptr [ecx + 254] +// CHECK: encoding: [0x62,0xf6,0x55,0x08,0xbf,0x71,0x7f] + vfnmsub231sh xmm6, xmm5, word ptr [ecx + 254] + +// CHECK: vfnmsub231sh xmm6 {k7} {z}, xmm5, word ptr [edx - 256] +// CHECK: encoding: [0x62,0xf6,0x55,0x8f,0xbf,0x72,0x80] + vfnmsub231sh xmm6 {k7} {z}, xmm5, word ptr [edx - 256] diff --git a/llvm/test/MC/X86/intel-syntax-avx512fp16vl.s b/llvm/test/MC/X86/intel-syntax-avx512fp16vl.s index 6091599b87d66..427cd2f2eaa04 100644 --- a/llvm/test/MC/X86/intel-syntax-avx512fp16vl.s +++ b/llvm/test/MC/X86/intel-syntax-avx512fp16vl.s @@ -1491,3 +1491,723 @@ // CHECK: vsqrtph ymm30 {k7} {z}, word ptr [rdx - 256]{1to16} // CHECK: encoding: [0x62,0x65,0x7c,0xbf,0x51,0x72,0x80] vsqrtph ymm30 {k7} {z}, word ptr [rdx - 256]{1to16} + +// CHECK: vfmadd132ph ymm30, ymm29, ymm28 +// CHECK: encoding: [0x62,0x06,0x15,0x20,0x98,0xf4] + vfmadd132ph ymm30, ymm29, ymm28 + +// CHECK: vfmadd132ph xmm30, xmm29, xmm28 +// CHECK: encoding: [0x62,0x06,0x15,0x00,0x98,0xf4] + vfmadd132ph xmm30, xmm29, xmm28 + +// CHECK: vfmadd132ph ymm30 {k7}, ymm29, ymmword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0x26,0x15,0x27,0x98,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfmadd132ph ymm30 {k7}, ymm29, ymmword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vfmadd132ph ymm30, ymm29, word ptr [r9]{1to16} +// CHECK: encoding: [0x62,0x46,0x15,0x30,0x98,0x31] + vfmadd132ph ymm30, ymm29, word ptr [r9]{1to16} + +// CHECK: vfmadd132ph ymm30, ymm29, ymmword ptr [rcx + 4064] +// CHECK: encoding: [0x62,0x66,0x15,0x20,0x98,0x71,0x7f] + vfmadd132ph ymm30, ymm29, ymmword ptr [rcx + 4064] + +// CHECK: vfmadd132ph ymm30 {k7} {z}, ymm29, word ptr [rdx - 256]{1to16} +// CHECK: encoding: [0x62,0x66,0x15,0xb7,0x98,0x72,0x80] + vfmadd132ph ymm30 {k7} {z}, ymm29, word ptr [rdx - 256]{1to16} + +// CHECK: vfmadd132ph xmm30 {k7}, xmm29, xmmword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0x26,0x15,0x07,0x98,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfmadd132ph xmm30 {k7}, xmm29, xmmword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vfmadd132ph xmm30, xmm29, word ptr [r9]{1to8} +// CHECK: encoding: [0x62,0x46,0x15,0x10,0x98,0x31] + vfmadd132ph xmm30, xmm29, word ptr [r9]{1to8} + +// CHECK: vfmadd132ph xmm30, xmm29, xmmword ptr [rcx + 2032] +// CHECK: encoding: [0x62,0x66,0x15,0x00,0x98,0x71,0x7f] + vfmadd132ph xmm30, xmm29, xmmword ptr [rcx + 2032] + +// CHECK: vfmadd132ph xmm30 {k7} {z}, xmm29, word ptr [rdx - 256]{1to8} +// CHECK: encoding: [0x62,0x66,0x15,0x97,0x98,0x72,0x80] + vfmadd132ph xmm30 {k7} {z}, xmm29, word ptr [rdx - 256]{1to8} + +// CHECK: vfmadd213ph ymm30, ymm29, ymm28 +// CHECK: encoding: [0x62,0x06,0x15,0x20,0xa8,0xf4] + vfmadd213ph ymm30, ymm29, ymm28 + +// CHECK: vfmadd213ph xmm30, xmm29, xmm28 +// CHECK: encoding: [0x62,0x06,0x15,0x00,0xa8,0xf4] + vfmadd213ph xmm30, xmm29, xmm28 + +// CHECK: vfmadd213ph ymm30 {k7}, ymm29, ymmword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0x26,0x15,0x27,0xa8,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfmadd213ph ymm30 {k7}, ymm29, ymmword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vfmadd213ph ymm30, ymm29, word ptr [r9]{1to16} +// CHECK: encoding: [0x62,0x46,0x15,0x30,0xa8,0x31] + vfmadd213ph ymm30, ymm29, word ptr [r9]{1to16} + +// CHECK: vfmadd213ph ymm30, ymm29, ymmword ptr [rcx + 4064] +// CHECK: encoding: [0x62,0x66,0x15,0x20,0xa8,0x71,0x7f] + vfmadd213ph ymm30, ymm29, ymmword ptr [rcx + 4064] + +// CHECK: vfmadd213ph ymm30 {k7} {z}, ymm29, word ptr [rdx - 256]{1to16} +// CHECK: encoding: [0x62,0x66,0x15,0xb7,0xa8,0x72,0x80] + vfmadd213ph ymm30 {k7} {z}, ymm29, word ptr [rdx - 256]{1to16} + +// CHECK: vfmadd213ph xmm30 {k7}, xmm29, xmmword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0x26,0x15,0x07,0xa8,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfmadd213ph xmm30 {k7}, xmm29, xmmword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vfmadd213ph xmm30, xmm29, word ptr [r9]{1to8} +// CHECK: encoding: [0x62,0x46,0x15,0x10,0xa8,0x31] + vfmadd213ph xmm30, xmm29, word ptr [r9]{1to8} + +// CHECK: vfmadd213ph xmm30, xmm29, xmmword ptr [rcx + 2032] +// CHECK: encoding: [0x62,0x66,0x15,0x00,0xa8,0x71,0x7f] + vfmadd213ph xmm30, xmm29, xmmword ptr [rcx + 2032] + +// CHECK: vfmadd213ph xmm30 {k7} {z}, xmm29, word ptr [rdx - 256]{1to8} +// CHECK: encoding: [0x62,0x66,0x15,0x97,0xa8,0x72,0x80] + vfmadd213ph xmm30 {k7} {z}, xmm29, word ptr [rdx - 256]{1to8} + +// CHECK: vfmadd231ph ymm30, ymm29, ymm28 +// CHECK: encoding: [0x62,0x06,0x15,0x20,0xb8,0xf4] + vfmadd231ph ymm30, ymm29, ymm28 + +// CHECK: vfmadd231ph xmm30, xmm29, xmm28 +// CHECK: encoding: [0x62,0x06,0x15,0x00,0xb8,0xf4] + vfmadd231ph xmm30, xmm29, xmm28 + +// CHECK: vfmadd231ph ymm30 {k7}, ymm29, ymmword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0x26,0x15,0x27,0xb8,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfmadd231ph ymm30 {k7}, ymm29, ymmword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vfmadd231ph ymm30, ymm29, word ptr [r9]{1to16} +// CHECK: encoding: [0x62,0x46,0x15,0x30,0xb8,0x31] + vfmadd231ph ymm30, ymm29, word ptr [r9]{1to16} + +// CHECK: vfmadd231ph ymm30, ymm29, ymmword ptr [rcx + 4064] +// CHECK: encoding: [0x62,0x66,0x15,0x20,0xb8,0x71,0x7f] + vfmadd231ph ymm30, ymm29, ymmword ptr [rcx + 4064] + +// CHECK: vfmadd231ph ymm30 {k7} {z}, ymm29, word ptr [rdx - 256]{1to16} +// CHECK: encoding: [0x62,0x66,0x15,0xb7,0xb8,0x72,0x80] + vfmadd231ph ymm30 {k7} {z}, ymm29, word ptr [rdx - 256]{1to16} + +// CHECK: vfmadd231ph xmm30 {k7}, xmm29, xmmword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0x26,0x15,0x07,0xb8,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfmadd231ph xmm30 {k7}, xmm29, xmmword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vfmadd231ph xmm30, xmm29, word ptr [r9]{1to8} +// CHECK: encoding: [0x62,0x46,0x15,0x10,0xb8,0x31] + vfmadd231ph xmm30, xmm29, word ptr [r9]{1to8} + +// CHECK: vfmadd231ph xmm30, xmm29, xmmword ptr [rcx + 2032] +// CHECK: encoding: [0x62,0x66,0x15,0x00,0xb8,0x71,0x7f] + vfmadd231ph xmm30, xmm29, xmmword ptr [rcx + 2032] + +// CHECK: vfmadd231ph xmm30 {k7} {z}, xmm29, word ptr [rdx - 256]{1to8} +// CHECK: encoding: [0x62,0x66,0x15,0x97,0xb8,0x72,0x80] + vfmadd231ph xmm30 {k7} {z}, xmm29, word ptr [rdx - 256]{1to8} + +// CHECK: vfmaddsub132ph ymm30, ymm29, ymm28 +// CHECK: encoding: [0x62,0x06,0x15,0x20,0x96,0xf4] + vfmaddsub132ph ymm30, ymm29, ymm28 + +// CHECK: vfmaddsub132ph xmm30, xmm29, xmm28 +// CHECK: encoding: [0x62,0x06,0x15,0x00,0x96,0xf4] + vfmaddsub132ph xmm30, xmm29, xmm28 + +// CHECK: vfmaddsub132ph ymm30 {k7}, ymm29, ymmword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0x26,0x15,0x27,0x96,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfmaddsub132ph ymm30 {k7}, ymm29, ymmword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vfmaddsub132ph ymm30, ymm29, word ptr [r9]{1to16} +// CHECK: encoding: [0x62,0x46,0x15,0x30,0x96,0x31] + vfmaddsub132ph ymm30, ymm29, word ptr [r9]{1to16} + +// CHECK: vfmaddsub132ph ymm30, ymm29, ymmword ptr [rcx + 4064] +// CHECK: encoding: [0x62,0x66,0x15,0x20,0x96,0x71,0x7f] + vfmaddsub132ph ymm30, ymm29, ymmword ptr [rcx + 4064] + +// CHECK: vfmaddsub132ph ymm30 {k7} {z}, ymm29, word ptr [rdx - 256]{1to16} +// CHECK: encoding: [0x62,0x66,0x15,0xb7,0x96,0x72,0x80] + vfmaddsub132ph ymm30 {k7} {z}, ymm29, word ptr [rdx - 256]{1to16} + +// CHECK: vfmaddsub132ph xmm30 {k7}, xmm29, xmmword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0x26,0x15,0x07,0x96,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfmaddsub132ph xmm30 {k7}, xmm29, xmmword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vfmaddsub132ph xmm30, xmm29, word ptr [r9]{1to8} +// CHECK: encoding: [0x62,0x46,0x15,0x10,0x96,0x31] + vfmaddsub132ph xmm30, xmm29, word ptr [r9]{1to8} + +// CHECK: vfmaddsub132ph xmm30, xmm29, xmmword ptr [rcx + 2032] +// CHECK: encoding: [0x62,0x66,0x15,0x00,0x96,0x71,0x7f] + vfmaddsub132ph xmm30, xmm29, xmmword ptr [rcx + 2032] + +// CHECK: vfmaddsub132ph xmm30 {k7} {z}, xmm29, word ptr [rdx - 256]{1to8} +// CHECK: encoding: [0x62,0x66,0x15,0x97,0x96,0x72,0x80] + vfmaddsub132ph xmm30 {k7} {z}, xmm29, word ptr [rdx - 256]{1to8} + +// CHECK: vfmaddsub213ph ymm30, ymm29, ymm28 +// CHECK: encoding: [0x62,0x06,0x15,0x20,0xa6,0xf4] + vfmaddsub213ph ymm30, ymm29, ymm28 + +// CHECK: vfmaddsub213ph xmm30, xmm29, xmm28 +// CHECK: encoding: [0x62,0x06,0x15,0x00,0xa6,0xf4] + vfmaddsub213ph xmm30, xmm29, xmm28 + +// CHECK: vfmaddsub213ph ymm30 {k7}, ymm29, ymmword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0x26,0x15,0x27,0xa6,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfmaddsub213ph ymm30 {k7}, ymm29, ymmword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vfmaddsub213ph ymm30, ymm29, word ptr [r9]{1to16} +// CHECK: encoding: [0x62,0x46,0x15,0x30,0xa6,0x31] + vfmaddsub213ph ymm30, ymm29, word ptr [r9]{1to16} + +// CHECK: vfmaddsub213ph ymm30, ymm29, ymmword ptr [rcx + 4064] +// CHECK: encoding: [0x62,0x66,0x15,0x20,0xa6,0x71,0x7f] + vfmaddsub213ph ymm30, ymm29, ymmword ptr [rcx + 4064] + +// CHECK: vfmaddsub213ph ymm30 {k7} {z}, ymm29, word ptr [rdx - 256]{1to16} +// CHECK: encoding: [0x62,0x66,0x15,0xb7,0xa6,0x72,0x80] + vfmaddsub213ph ymm30 {k7} {z}, ymm29, word ptr [rdx - 256]{1to16} + +// CHECK: vfmaddsub213ph xmm30 {k7}, xmm29, xmmword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0x26,0x15,0x07,0xa6,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfmaddsub213ph xmm30 {k7}, xmm29, xmmword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vfmaddsub213ph xmm30, xmm29, word ptr [r9]{1to8} +// CHECK: encoding: [0x62,0x46,0x15,0x10,0xa6,0x31] + vfmaddsub213ph xmm30, xmm29, word ptr [r9]{1to8} + +// CHECK: vfmaddsub213ph xmm30, xmm29, xmmword ptr [rcx + 2032] +// CHECK: encoding: [0x62,0x66,0x15,0x00,0xa6,0x71,0x7f] + vfmaddsub213ph xmm30, xmm29, xmmword ptr [rcx + 2032] + +// CHECK: vfmaddsub213ph xmm30 {k7} {z}, xmm29, word ptr [rdx - 256]{1to8} +// CHECK: encoding: [0x62,0x66,0x15,0x97,0xa6,0x72,0x80] + vfmaddsub213ph xmm30 {k7} {z}, xmm29, word ptr [rdx - 256]{1to8} + +// CHECK: vfmaddsub231ph ymm30, ymm29, ymm28 +// CHECK: encoding: [0x62,0x06,0x15,0x20,0xb6,0xf4] + vfmaddsub231ph ymm30, ymm29, ymm28 + +// CHECK: vfmaddsub231ph xmm30, xmm29, xmm28 +// CHECK: encoding: [0x62,0x06,0x15,0x00,0xb6,0xf4] + vfmaddsub231ph xmm30, xmm29, xmm28 + +// CHECK: vfmaddsub231ph ymm30 {k7}, ymm29, ymmword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0x26,0x15,0x27,0xb6,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfmaddsub231ph ymm30 {k7}, ymm29, ymmword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vfmaddsub231ph ymm30, ymm29, word ptr [r9]{1to16} +// CHECK: encoding: [0x62,0x46,0x15,0x30,0xb6,0x31] + vfmaddsub231ph ymm30, ymm29, word ptr [r9]{1to16} + +// CHECK: vfmaddsub231ph ymm30, ymm29, ymmword ptr [rcx + 4064] +// CHECK: encoding: [0x62,0x66,0x15,0x20,0xb6,0x71,0x7f] + vfmaddsub231ph ymm30, ymm29, ymmword ptr [rcx + 4064] + +// CHECK: vfmaddsub231ph ymm30 {k7} {z}, ymm29, word ptr [rdx - 256]{1to16} +// CHECK: encoding: [0x62,0x66,0x15,0xb7,0xb6,0x72,0x80] + vfmaddsub231ph ymm30 {k7} {z}, ymm29, word ptr [rdx - 256]{1to16} + +// CHECK: vfmaddsub231ph xmm30 {k7}, xmm29, xmmword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0x26,0x15,0x07,0xb6,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfmaddsub231ph xmm30 {k7}, xmm29, xmmword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vfmaddsub231ph xmm30, xmm29, word ptr [r9]{1to8} +// CHECK: encoding: [0x62,0x46,0x15,0x10,0xb6,0x31] + vfmaddsub231ph xmm30, xmm29, word ptr [r9]{1to8} + +// CHECK: vfmaddsub231ph xmm30, xmm29, xmmword ptr [rcx + 2032] +// CHECK: encoding: [0x62,0x66,0x15,0x00,0xb6,0x71,0x7f] + vfmaddsub231ph xmm30, xmm29, xmmword ptr [rcx + 2032] + +// CHECK: vfmaddsub231ph xmm30 {k7} {z}, xmm29, word ptr [rdx - 256]{1to8} +// CHECK: encoding: [0x62,0x66,0x15,0x97,0xb6,0x72,0x80] + vfmaddsub231ph xmm30 {k7} {z}, xmm29, word ptr [rdx - 256]{1to8} + +// CHECK: vfmsub132ph ymm30, ymm29, ymm28 +// CHECK: encoding: [0x62,0x06,0x15,0x20,0x9a,0xf4] + vfmsub132ph ymm30, ymm29, ymm28 + +// CHECK: vfmsub132ph xmm30, xmm29, xmm28 +// CHECK: encoding: [0x62,0x06,0x15,0x00,0x9a,0xf4] + vfmsub132ph xmm30, xmm29, xmm28 + +// CHECK: vfmsub132ph ymm30 {k7}, ymm29, ymmword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0x26,0x15,0x27,0x9a,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfmsub132ph ymm30 {k7}, ymm29, ymmword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vfmsub132ph ymm30, ymm29, word ptr [r9]{1to16} +// CHECK: encoding: [0x62,0x46,0x15,0x30,0x9a,0x31] + vfmsub132ph ymm30, ymm29, word ptr [r9]{1to16} + +// CHECK: vfmsub132ph ymm30, ymm29, ymmword ptr [rcx + 4064] +// CHECK: encoding: [0x62,0x66,0x15,0x20,0x9a,0x71,0x7f] + vfmsub132ph ymm30, ymm29, ymmword ptr [rcx + 4064] + +// CHECK: vfmsub132ph ymm30 {k7} {z}, ymm29, word ptr [rdx - 256]{1to16} +// CHECK: encoding: [0x62,0x66,0x15,0xb7,0x9a,0x72,0x80] + vfmsub132ph ymm30 {k7} {z}, ymm29, word ptr [rdx - 256]{1to16} + +// CHECK: vfmsub132ph xmm30 {k7}, xmm29, xmmword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0x26,0x15,0x07,0x9a,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfmsub132ph xmm30 {k7}, xmm29, xmmword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vfmsub132ph xmm30, xmm29, word ptr [r9]{1to8} +// CHECK: encoding: [0x62,0x46,0x15,0x10,0x9a,0x31] + vfmsub132ph xmm30, xmm29, word ptr [r9]{1to8} + +// CHECK: vfmsub132ph xmm30, xmm29, xmmword ptr [rcx + 2032] +// CHECK: encoding: [0x62,0x66,0x15,0x00,0x9a,0x71,0x7f] + vfmsub132ph xmm30, xmm29, xmmword ptr [rcx + 2032] + +// CHECK: vfmsub132ph xmm30 {k7} {z}, xmm29, word ptr [rdx - 256]{1to8} +// CHECK: encoding: [0x62,0x66,0x15,0x97,0x9a,0x72,0x80] + vfmsub132ph xmm30 {k7} {z}, xmm29, word ptr [rdx - 256]{1to8} + +// CHECK: vfmsub213ph ymm30, ymm29, ymm28 +// CHECK: encoding: [0x62,0x06,0x15,0x20,0xaa,0xf4] + vfmsub213ph ymm30, ymm29, ymm28 + +// CHECK: vfmsub213ph xmm30, xmm29, xmm28 +// CHECK: encoding: [0x62,0x06,0x15,0x00,0xaa,0xf4] + vfmsub213ph xmm30, xmm29, xmm28 + +// CHECK: vfmsub213ph ymm30 {k7}, ymm29, ymmword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0x26,0x15,0x27,0xaa,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfmsub213ph ymm30 {k7}, ymm29, ymmword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vfmsub213ph ymm30, ymm29, word ptr [r9]{1to16} +// CHECK: encoding: [0x62,0x46,0x15,0x30,0xaa,0x31] + vfmsub213ph ymm30, ymm29, word ptr [r9]{1to16} + +// CHECK: vfmsub213ph ymm30, ymm29, ymmword ptr [rcx + 4064] +// CHECK: encoding: [0x62,0x66,0x15,0x20,0xaa,0x71,0x7f] + vfmsub213ph ymm30, ymm29, ymmword ptr [rcx + 4064] + +// CHECK: vfmsub213ph ymm30 {k7} {z}, ymm29, word ptr [rdx - 256]{1to16} +// CHECK: encoding: [0x62,0x66,0x15,0xb7,0xaa,0x72,0x80] + vfmsub213ph ymm30 {k7} {z}, ymm29, word ptr [rdx - 256]{1to16} + +// CHECK: vfmsub213ph xmm30 {k7}, xmm29, xmmword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0x26,0x15,0x07,0xaa,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfmsub213ph xmm30 {k7}, xmm29, xmmword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vfmsub213ph xmm30, xmm29, word ptr [r9]{1to8} +// CHECK: encoding: [0x62,0x46,0x15,0x10,0xaa,0x31] + vfmsub213ph xmm30, xmm29, word ptr [r9]{1to8} + +// CHECK: vfmsub213ph xmm30, xmm29, xmmword ptr [rcx + 2032] +// CHECK: encoding: [0x62,0x66,0x15,0x00,0xaa,0x71,0x7f] + vfmsub213ph xmm30, xmm29, xmmword ptr [rcx + 2032] + +// CHECK: vfmsub213ph xmm30 {k7} {z}, xmm29, word ptr [rdx - 256]{1to8} +// CHECK: encoding: [0x62,0x66,0x15,0x97,0xaa,0x72,0x80] + vfmsub213ph xmm30 {k7} {z}, xmm29, word ptr [rdx - 256]{1to8} + +// CHECK: vfmsub231ph ymm30, ymm29, ymm28 +// CHECK: encoding: [0x62,0x06,0x15,0x20,0xba,0xf4] + vfmsub231ph ymm30, ymm29, ymm28 + +// CHECK: vfmsub231ph xmm30, xmm29, xmm28 +// CHECK: encoding: [0x62,0x06,0x15,0x00,0xba,0xf4] + vfmsub231ph xmm30, xmm29, xmm28 + +// CHECK: vfmsub231ph ymm30 {k7}, ymm29, ymmword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0x26,0x15,0x27,0xba,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfmsub231ph ymm30 {k7}, ymm29, ymmword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vfmsub231ph ymm30, ymm29, word ptr [r9]{1to16} +// CHECK: encoding: [0x62,0x46,0x15,0x30,0xba,0x31] + vfmsub231ph ymm30, ymm29, word ptr [r9]{1to16} + +// CHECK: vfmsub231ph ymm30, ymm29, ymmword ptr [rcx + 4064] +// CHECK: encoding: [0x62,0x66,0x15,0x20,0xba,0x71,0x7f] + vfmsub231ph ymm30, ymm29, ymmword ptr [rcx + 4064] + +// CHECK: vfmsub231ph ymm30 {k7} {z}, ymm29, word ptr [rdx - 256]{1to16} +// CHECK: encoding: [0x62,0x66,0x15,0xb7,0xba,0x72,0x80] + vfmsub231ph ymm30 {k7} {z}, ymm29, word ptr [rdx - 256]{1to16} + +// CHECK: vfmsub231ph xmm30 {k7}, xmm29, xmmword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0x26,0x15,0x07,0xba,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfmsub231ph xmm30 {k7}, xmm29, xmmword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vfmsub231ph xmm30, xmm29, word ptr [r9]{1to8} +// CHECK: encoding: [0x62,0x46,0x15,0x10,0xba,0x31] + vfmsub231ph xmm30, xmm29, word ptr [r9]{1to8} + +// CHECK: vfmsub231ph xmm30, xmm29, xmmword ptr [rcx + 2032] +// CHECK: encoding: [0x62,0x66,0x15,0x00,0xba,0x71,0x7f] + vfmsub231ph xmm30, xmm29, xmmword ptr [rcx + 2032] + +// CHECK: vfmsub231ph xmm30 {k7} {z}, xmm29, word ptr [rdx - 256]{1to8} +// CHECK: encoding: [0x62,0x66,0x15,0x97,0xba,0x72,0x80] + vfmsub231ph xmm30 {k7} {z}, xmm29, word ptr [rdx - 256]{1to8} + +// CHECK: vfmsubadd132ph ymm30, ymm29, ymm28 +// CHECK: encoding: [0x62,0x06,0x15,0x20,0x97,0xf4] + vfmsubadd132ph ymm30, ymm29, ymm28 + +// CHECK: vfmsubadd132ph xmm30, xmm29, xmm28 +// CHECK: encoding: [0x62,0x06,0x15,0x00,0x97,0xf4] + vfmsubadd132ph xmm30, xmm29, xmm28 + +// CHECK: vfmsubadd132ph ymm30 {k7}, ymm29, ymmword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0x26,0x15,0x27,0x97,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfmsubadd132ph ymm30 {k7}, ymm29, ymmword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vfmsubadd132ph ymm30, ymm29, word ptr [r9]{1to16} +// CHECK: encoding: [0x62,0x46,0x15,0x30,0x97,0x31] + vfmsubadd132ph ymm30, ymm29, word ptr [r9]{1to16} + +// CHECK: vfmsubadd132ph ymm30, ymm29, ymmword ptr [rcx + 4064] +// CHECK: encoding: [0x62,0x66,0x15,0x20,0x97,0x71,0x7f] + vfmsubadd132ph ymm30, ymm29, ymmword ptr [rcx + 4064] + +// CHECK: vfmsubadd132ph ymm30 {k7} {z}, ymm29, word ptr [rdx - 256]{1to16} +// CHECK: encoding: [0x62,0x66,0x15,0xb7,0x97,0x72,0x80] + vfmsubadd132ph ymm30 {k7} {z}, ymm29, word ptr [rdx - 256]{1to16} + +// CHECK: vfmsubadd132ph xmm30 {k7}, xmm29, xmmword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0x26,0x15,0x07,0x97,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfmsubadd132ph xmm30 {k7}, xmm29, xmmword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vfmsubadd132ph xmm30, xmm29, word ptr [r9]{1to8} +// CHECK: encoding: [0x62,0x46,0x15,0x10,0x97,0x31] + vfmsubadd132ph xmm30, xmm29, word ptr [r9]{1to8} + +// CHECK: vfmsubadd132ph xmm30, xmm29, xmmword ptr [rcx + 2032] +// CHECK: encoding: [0x62,0x66,0x15,0x00,0x97,0x71,0x7f] + vfmsubadd132ph xmm30, xmm29, xmmword ptr [rcx + 2032] + +// CHECK: vfmsubadd132ph xmm30 {k7} {z}, xmm29, word ptr [rdx - 256]{1to8} +// CHECK: encoding: [0x62,0x66,0x15,0x97,0x97,0x72,0x80] + vfmsubadd132ph xmm30 {k7} {z}, xmm29, word ptr [rdx - 256]{1to8} + +// CHECK: vfmsubadd213ph ymm30, ymm29, ymm28 +// CHECK: encoding: [0x62,0x06,0x15,0x20,0xa7,0xf4] + vfmsubadd213ph ymm30, ymm29, ymm28 + +// CHECK: vfmsubadd213ph xmm30, xmm29, xmm28 +// CHECK: encoding: [0x62,0x06,0x15,0x00,0xa7,0xf4] + vfmsubadd213ph xmm30, xmm29, xmm28 + +// CHECK: vfmsubadd213ph ymm30 {k7}, ymm29, ymmword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0x26,0x15,0x27,0xa7,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfmsubadd213ph ymm30 {k7}, ymm29, ymmword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vfmsubadd213ph ymm30, ymm29, word ptr [r9]{1to16} +// CHECK: encoding: [0x62,0x46,0x15,0x30,0xa7,0x31] + vfmsubadd213ph ymm30, ymm29, word ptr [r9]{1to16} + +// CHECK: vfmsubadd213ph ymm30, ymm29, ymmword ptr [rcx + 4064] +// CHECK: encoding: [0x62,0x66,0x15,0x20,0xa7,0x71,0x7f] + vfmsubadd213ph ymm30, ymm29, ymmword ptr [rcx + 4064] + +// CHECK: vfmsubadd213ph ymm30 {k7} {z}, ymm29, word ptr [rdx - 256]{1to16} +// CHECK: encoding: [0x62,0x66,0x15,0xb7,0xa7,0x72,0x80] + vfmsubadd213ph ymm30 {k7} {z}, ymm29, word ptr [rdx - 256]{1to16} + +// CHECK: vfmsubadd213ph xmm30 {k7}, xmm29, xmmword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0x26,0x15,0x07,0xa7,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfmsubadd213ph xmm30 {k7}, xmm29, xmmword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vfmsubadd213ph xmm30, xmm29, word ptr [r9]{1to8} +// CHECK: encoding: [0x62,0x46,0x15,0x10,0xa7,0x31] + vfmsubadd213ph xmm30, xmm29, word ptr [r9]{1to8} + +// CHECK: vfmsubadd213ph xmm30, xmm29, xmmword ptr [rcx + 2032] +// CHECK: encoding: [0x62,0x66,0x15,0x00,0xa7,0x71,0x7f] + vfmsubadd213ph xmm30, xmm29, xmmword ptr [rcx + 2032] + +// CHECK: vfmsubadd213ph xmm30 {k7} {z}, xmm29, word ptr [rdx - 256]{1to8} +// CHECK: encoding: [0x62,0x66,0x15,0x97,0xa7,0x72,0x80] + vfmsubadd213ph xmm30 {k7} {z}, xmm29, word ptr [rdx - 256]{1to8} + +// CHECK: vfmsubadd231ph ymm30, ymm29, ymm28 +// CHECK: encoding: [0x62,0x06,0x15,0x20,0xb7,0xf4] + vfmsubadd231ph ymm30, ymm29, ymm28 + +// CHECK: vfmsubadd231ph xmm30, xmm29, xmm28 +// CHECK: encoding: [0x62,0x06,0x15,0x00,0xb7,0xf4] + vfmsubadd231ph xmm30, xmm29, xmm28 + +// CHECK: vfmsubadd231ph ymm30 {k7}, ymm29, ymmword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0x26,0x15,0x27,0xb7,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfmsubadd231ph ymm30 {k7}, ymm29, ymmword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vfmsubadd231ph ymm30, ymm29, word ptr [r9]{1to16} +// CHECK: encoding: [0x62,0x46,0x15,0x30,0xb7,0x31] + vfmsubadd231ph ymm30, ymm29, word ptr [r9]{1to16} + +// CHECK: vfmsubadd231ph ymm30, ymm29, ymmword ptr [rcx + 4064] +// CHECK: encoding: [0x62,0x66,0x15,0x20,0xb7,0x71,0x7f] + vfmsubadd231ph ymm30, ymm29, ymmword ptr [rcx + 4064] + +// CHECK: vfmsubadd231ph ymm30 {k7} {z}, ymm29, word ptr [rdx - 256]{1to16} +// CHECK: encoding: [0x62,0x66,0x15,0xb7,0xb7,0x72,0x80] + vfmsubadd231ph ymm30 {k7} {z}, ymm29, word ptr [rdx - 256]{1to16} + +// CHECK: vfmsubadd231ph xmm30 {k7}, xmm29, xmmword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0x26,0x15,0x07,0xb7,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfmsubadd231ph xmm30 {k7}, xmm29, xmmword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vfmsubadd231ph xmm30, xmm29, word ptr [r9]{1to8} +// CHECK: encoding: [0x62,0x46,0x15,0x10,0xb7,0x31] + vfmsubadd231ph xmm30, xmm29, word ptr [r9]{1to8} + +// CHECK: vfmsubadd231ph xmm30, xmm29, xmmword ptr [rcx + 2032] +// CHECK: encoding: [0x62,0x66,0x15,0x00,0xb7,0x71,0x7f] + vfmsubadd231ph xmm30, xmm29, xmmword ptr [rcx + 2032] + +// CHECK: vfmsubadd231ph xmm30 {k7} {z}, xmm29, word ptr [rdx - 256]{1to8} +// CHECK: encoding: [0x62,0x66,0x15,0x97,0xb7,0x72,0x80] + vfmsubadd231ph xmm30 {k7} {z}, xmm29, word ptr [rdx - 256]{1to8} + +// CHECK: vfnmadd132ph ymm30, ymm29, ymm28 +// CHECK: encoding: [0x62,0x06,0x15,0x20,0x9c,0xf4] + vfnmadd132ph ymm30, ymm29, ymm28 + +// CHECK: vfnmadd132ph xmm30, xmm29, xmm28 +// CHECK: encoding: [0x62,0x06,0x15,0x00,0x9c,0xf4] + vfnmadd132ph xmm30, xmm29, xmm28 + +// CHECK: vfnmadd132ph ymm30 {k7}, ymm29, ymmword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0x26,0x15,0x27,0x9c,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfnmadd132ph ymm30 {k7}, ymm29, ymmword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vfnmadd132ph ymm30, ymm29, word ptr [r9]{1to16} +// CHECK: encoding: [0x62,0x46,0x15,0x30,0x9c,0x31] + vfnmadd132ph ymm30, ymm29, word ptr [r9]{1to16} + +// CHECK: vfnmadd132ph ymm30, ymm29, ymmword ptr [rcx + 4064] +// CHECK: encoding: [0x62,0x66,0x15,0x20,0x9c,0x71,0x7f] + vfnmadd132ph ymm30, ymm29, ymmword ptr [rcx + 4064] + +// CHECK: vfnmadd132ph ymm30 {k7} {z}, ymm29, word ptr [rdx - 256]{1to16} +// CHECK: encoding: [0x62,0x66,0x15,0xb7,0x9c,0x72,0x80] + vfnmadd132ph ymm30 {k7} {z}, ymm29, word ptr [rdx - 256]{1to16} + +// CHECK: vfnmadd132ph xmm30 {k7}, xmm29, xmmword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0x26,0x15,0x07,0x9c,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfnmadd132ph xmm30 {k7}, xmm29, xmmword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vfnmadd132ph xmm30, xmm29, word ptr [r9]{1to8} +// CHECK: encoding: [0x62,0x46,0x15,0x10,0x9c,0x31] + vfnmadd132ph xmm30, xmm29, word ptr [r9]{1to8} + +// CHECK: vfnmadd132ph xmm30, xmm29, xmmword ptr [rcx + 2032] +// CHECK: encoding: [0x62,0x66,0x15,0x00,0x9c,0x71,0x7f] + vfnmadd132ph xmm30, xmm29, xmmword ptr [rcx + 2032] + +// CHECK: vfnmadd132ph xmm30 {k7} {z}, xmm29, word ptr [rdx - 256]{1to8} +// CHECK: encoding: [0x62,0x66,0x15,0x97,0x9c,0x72,0x80] + vfnmadd132ph xmm30 {k7} {z}, xmm29, word ptr [rdx - 256]{1to8} + +// CHECK: vfnmadd213ph ymm30, ymm29, ymm28 +// CHECK: encoding: [0x62,0x06,0x15,0x20,0xac,0xf4] + vfnmadd213ph ymm30, ymm29, ymm28 + +// CHECK: vfnmadd213ph xmm30, xmm29, xmm28 +// CHECK: encoding: [0x62,0x06,0x15,0x00,0xac,0xf4] + vfnmadd213ph xmm30, xmm29, xmm28 + +// CHECK: vfnmadd213ph ymm30 {k7}, ymm29, ymmword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0x26,0x15,0x27,0xac,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfnmadd213ph ymm30 {k7}, ymm29, ymmword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vfnmadd213ph ymm30, ymm29, word ptr [r9]{1to16} +// CHECK: encoding: [0x62,0x46,0x15,0x30,0xac,0x31] + vfnmadd213ph ymm30, ymm29, word ptr [r9]{1to16} + +// CHECK: vfnmadd213ph ymm30, ymm29, ymmword ptr [rcx + 4064] +// CHECK: encoding: [0x62,0x66,0x15,0x20,0xac,0x71,0x7f] + vfnmadd213ph ymm30, ymm29, ymmword ptr [rcx + 4064] + +// CHECK: vfnmadd213ph ymm30 {k7} {z}, ymm29, word ptr [rdx - 256]{1to16} +// CHECK: encoding: [0x62,0x66,0x15,0xb7,0xac,0x72,0x80] + vfnmadd213ph ymm30 {k7} {z}, ymm29, word ptr [rdx - 256]{1to16} + +// CHECK: vfnmadd213ph xmm30 {k7}, xmm29, xmmword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0x26,0x15,0x07,0xac,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfnmadd213ph xmm30 {k7}, xmm29, xmmword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vfnmadd213ph xmm30, xmm29, word ptr [r9]{1to8} +// CHECK: encoding: [0x62,0x46,0x15,0x10,0xac,0x31] + vfnmadd213ph xmm30, xmm29, word ptr [r9]{1to8} + +// CHECK: vfnmadd213ph xmm30, xmm29, xmmword ptr [rcx + 2032] +// CHECK: encoding: [0x62,0x66,0x15,0x00,0xac,0x71,0x7f] + vfnmadd213ph xmm30, xmm29, xmmword ptr [rcx + 2032] + +// CHECK: vfnmadd213ph xmm30 {k7} {z}, xmm29, word ptr [rdx - 256]{1to8} +// CHECK: encoding: [0x62,0x66,0x15,0x97,0xac,0x72,0x80] + vfnmadd213ph xmm30 {k7} {z}, xmm29, word ptr [rdx - 256]{1to8} + +// CHECK: vfnmadd231ph ymm30, ymm29, ymm28 +// CHECK: encoding: [0x62,0x06,0x15,0x20,0xbc,0xf4] + vfnmadd231ph ymm30, ymm29, ymm28 + +// CHECK: vfnmadd231ph xmm30, xmm29, xmm28 +// CHECK: encoding: [0x62,0x06,0x15,0x00,0xbc,0xf4] + vfnmadd231ph xmm30, xmm29, xmm28 + +// CHECK: vfnmadd231ph ymm30 {k7}, ymm29, ymmword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0x26,0x15,0x27,0xbc,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfnmadd231ph ymm30 {k7}, ymm29, ymmword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vfnmadd231ph ymm30, ymm29, word ptr [r9]{1to16} +// CHECK: encoding: [0x62,0x46,0x15,0x30,0xbc,0x31] + vfnmadd231ph ymm30, ymm29, word ptr [r9]{1to16} + +// CHECK: vfnmadd231ph ymm30, ymm29, ymmword ptr [rcx + 4064] +// CHECK: encoding: [0x62,0x66,0x15,0x20,0xbc,0x71,0x7f] + vfnmadd231ph ymm30, ymm29, ymmword ptr [rcx + 4064] + +// CHECK: vfnmadd231ph ymm30 {k7} {z}, ymm29, word ptr [rdx - 256]{1to16} +// CHECK: encoding: [0x62,0x66,0x15,0xb7,0xbc,0x72,0x80] + vfnmadd231ph ymm30 {k7} {z}, ymm29, word ptr [rdx - 256]{1to16} + +// CHECK: vfnmadd231ph xmm30 {k7}, xmm29, xmmword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0x26,0x15,0x07,0xbc,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfnmadd231ph xmm30 {k7}, xmm29, xmmword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vfnmadd231ph xmm30, xmm29, word ptr [r9]{1to8} +// CHECK: encoding: [0x62,0x46,0x15,0x10,0xbc,0x31] + vfnmadd231ph xmm30, xmm29, word ptr [r9]{1to8} + +// CHECK: vfnmadd231ph xmm30, xmm29, xmmword ptr [rcx + 2032] +// CHECK: encoding: [0x62,0x66,0x15,0x00,0xbc,0x71,0x7f] + vfnmadd231ph xmm30, xmm29, xmmword ptr [rcx + 2032] + +// CHECK: vfnmadd231ph xmm30 {k7} {z}, xmm29, word ptr [rdx - 256]{1to8} +// CHECK: encoding: [0x62,0x66,0x15,0x97,0xbc,0x72,0x80] + vfnmadd231ph xmm30 {k7} {z}, xmm29, word ptr [rdx - 256]{1to8} + +// CHECK: vfnmsub132ph ymm30, ymm29, ymm28 +// CHECK: encoding: [0x62,0x06,0x15,0x20,0x9e,0xf4] + vfnmsub132ph ymm30, ymm29, ymm28 + +// CHECK: vfnmsub132ph xmm30, xmm29, xmm28 +// CHECK: encoding: [0x62,0x06,0x15,0x00,0x9e,0xf4] + vfnmsub132ph xmm30, xmm29, xmm28 + +// CHECK: vfnmsub132ph ymm30 {k7}, ymm29, ymmword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0x26,0x15,0x27,0x9e,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfnmsub132ph ymm30 {k7}, ymm29, ymmword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vfnmsub132ph ymm30, ymm29, word ptr [r9]{1to16} +// CHECK: encoding: [0x62,0x46,0x15,0x30,0x9e,0x31] + vfnmsub132ph ymm30, ymm29, word ptr [r9]{1to16} + +// CHECK: vfnmsub132ph ymm30, ymm29, ymmword ptr [rcx + 4064] +// CHECK: encoding: [0x62,0x66,0x15,0x20,0x9e,0x71,0x7f] + vfnmsub132ph ymm30, ymm29, ymmword ptr [rcx + 4064] + +// CHECK: vfnmsub132ph ymm30 {k7} {z}, ymm29, word ptr [rdx - 256]{1to16} +// CHECK: encoding: [0x62,0x66,0x15,0xb7,0x9e,0x72,0x80] + vfnmsub132ph ymm30 {k7} {z}, ymm29, word ptr [rdx - 256]{1to16} + +// CHECK: vfnmsub132ph xmm30 {k7}, xmm29, xmmword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0x26,0x15,0x07,0x9e,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfnmsub132ph xmm30 {k7}, xmm29, xmmword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vfnmsub132ph xmm30, xmm29, word ptr [r9]{1to8} +// CHECK: encoding: [0x62,0x46,0x15,0x10,0x9e,0x31] + vfnmsub132ph xmm30, xmm29, word ptr [r9]{1to8} + +// CHECK: vfnmsub132ph xmm30, xmm29, xmmword ptr [rcx + 2032] +// CHECK: encoding: [0x62,0x66,0x15,0x00,0x9e,0x71,0x7f] + vfnmsub132ph xmm30, xmm29, xmmword ptr [rcx + 2032] + +// CHECK: vfnmsub132ph xmm30 {k7} {z}, xmm29, word ptr [rdx - 256]{1to8} +// CHECK: encoding: [0x62,0x66,0x15,0x97,0x9e,0x72,0x80] + vfnmsub132ph xmm30 {k7} {z}, xmm29, word ptr [rdx - 256]{1to8} + +// CHECK: vfnmsub213ph ymm30, ymm29, ymm28 +// CHECK: encoding: [0x62,0x06,0x15,0x20,0xae,0xf4] + vfnmsub213ph ymm30, ymm29, ymm28 + +// CHECK: vfnmsub213ph xmm30, xmm29, xmm28 +// CHECK: encoding: [0x62,0x06,0x15,0x00,0xae,0xf4] + vfnmsub213ph xmm30, xmm29, xmm28 + +// CHECK: vfnmsub213ph ymm30 {k7}, ymm29, ymmword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0x26,0x15,0x27,0xae,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfnmsub213ph ymm30 {k7}, ymm29, ymmword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vfnmsub213ph ymm30, ymm29, word ptr [r9]{1to16} +// CHECK: encoding: [0x62,0x46,0x15,0x30,0xae,0x31] + vfnmsub213ph ymm30, ymm29, word ptr [r9]{1to16} + +// CHECK: vfnmsub213ph ymm30, ymm29, ymmword ptr [rcx + 4064] +// CHECK: encoding: [0x62,0x66,0x15,0x20,0xae,0x71,0x7f] + vfnmsub213ph ymm30, ymm29, ymmword ptr [rcx + 4064] + +// CHECK: vfnmsub213ph ymm30 {k7} {z}, ymm29, word ptr [rdx - 256]{1to16} +// CHECK: encoding: [0x62,0x66,0x15,0xb7,0xae,0x72,0x80] + vfnmsub213ph ymm30 {k7} {z}, ymm29, word ptr [rdx - 256]{1to16} + +// CHECK: vfnmsub213ph xmm30 {k7}, xmm29, xmmword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0x26,0x15,0x07,0xae,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfnmsub213ph xmm30 {k7}, xmm29, xmmword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vfnmsub213ph xmm30, xmm29, word ptr [r9]{1to8} +// CHECK: encoding: [0x62,0x46,0x15,0x10,0xae,0x31] + vfnmsub213ph xmm30, xmm29, word ptr [r9]{1to8} + +// CHECK: vfnmsub213ph xmm30, xmm29, xmmword ptr [rcx + 2032] +// CHECK: encoding: [0x62,0x66,0x15,0x00,0xae,0x71,0x7f] + vfnmsub213ph xmm30, xmm29, xmmword ptr [rcx + 2032] + +// CHECK: vfnmsub213ph xmm30 {k7} {z}, xmm29, word ptr [rdx - 256]{1to8} +// CHECK: encoding: [0x62,0x66,0x15,0x97,0xae,0x72,0x80] + vfnmsub213ph xmm30 {k7} {z}, xmm29, word ptr [rdx - 256]{1to8} + +// CHECK: vfnmsub231ph ymm30, ymm29, ymm28 +// CHECK: encoding: [0x62,0x06,0x15,0x20,0xbe,0xf4] + vfnmsub231ph ymm30, ymm29, ymm28 + +// CHECK: vfnmsub231ph xmm30, xmm29, xmm28 +// CHECK: encoding: [0x62,0x06,0x15,0x00,0xbe,0xf4] + vfnmsub231ph xmm30, xmm29, xmm28 + +// CHECK: vfnmsub231ph ymm30 {k7}, ymm29, ymmword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0x26,0x15,0x27,0xbe,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfnmsub231ph ymm30 {k7}, ymm29, ymmword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vfnmsub231ph ymm30, ymm29, word ptr [r9]{1to16} +// CHECK: encoding: [0x62,0x46,0x15,0x30,0xbe,0x31] + vfnmsub231ph ymm30, ymm29, word ptr [r9]{1to16} + +// CHECK: vfnmsub231ph ymm30, ymm29, ymmword ptr [rcx + 4064] +// CHECK: encoding: [0x62,0x66,0x15,0x20,0xbe,0x71,0x7f] + vfnmsub231ph ymm30, ymm29, ymmword ptr [rcx + 4064] + +// CHECK: vfnmsub231ph ymm30 {k7} {z}, ymm29, word ptr [rdx - 256]{1to16} +// CHECK: encoding: [0x62,0x66,0x15,0xb7,0xbe,0x72,0x80] + vfnmsub231ph ymm30 {k7} {z}, ymm29, word ptr [rdx - 256]{1to16} + +// CHECK: vfnmsub231ph xmm30 {k7}, xmm29, xmmword ptr [rbp + 8*r14 + 268435456] +// CHECK: encoding: [0x62,0x26,0x15,0x07,0xbe,0xb4,0xf5,0x00,0x00,0x00,0x10] + vfnmsub231ph xmm30 {k7}, xmm29, xmmword ptr [rbp + 8*r14 + 268435456] + +// CHECK: vfnmsub231ph xmm30, xmm29, word ptr [r9]{1to8} +// CHECK: encoding: [0x62,0x46,0x15,0x10,0xbe,0x31] + vfnmsub231ph xmm30, xmm29, word ptr [r9]{1to8} + +// CHECK: vfnmsub231ph xmm30, xmm29, xmmword ptr [rcx + 2032] +// CHECK: encoding: [0x62,0x66,0x15,0x00,0xbe,0x71,0x7f] + vfnmsub231ph xmm30, xmm29, xmmword ptr [rcx + 2032] + +// CHECK: vfnmsub231ph xmm30 {k7} {z}, xmm29, word ptr [rdx - 256]{1to8} +// CHECK: encoding: [0x62,0x66,0x15,0x97,0xbe,0x72,0x80] + vfnmsub231ph xmm30 {k7} {z}, xmm29, word ptr [rdx - 256]{1to8} From d95d2a8e4aaae9521278c2968fb6f1baa820b817 Mon Sep 17 00:00:00 2001 From: Omar Emara Date: Thu, 19 Aug 2021 11:47:07 -0700 Subject: [PATCH 7/9] [LLDB][GUI] Add extra keys to text field This patch adds many new keys to the text field and implements new behaviors as follows: ``` case KEY_HOME: case KEY_CTRL_A: MoveCursorToStart(); case KEY_END: case KEY_CTRL_E: MoveCursorToEnd(); case KEY_RIGHT: case KEY_SF: MoveCursorRight(); case KEY_LEFT: case KEY_SR: MoveCursorLeft(); case KEY_BACKSPACE: case KEY_DELETE: RemovePreviousChar(); case KEY_DC: RemoveNextChar(); case KEY_EOL: case KEY_CTRL_K: ClearToEnd(); case KEY_DL: case KEY_CLEAR: Clear(); ``` This patch also refactors scrolling to be dynamic at draw time for easier handing. Differential Revision: https://reviews.llvm.org/D108385 --- lldb/source/Core/IOHandlerCursesGUI.cpp | 101 ++++++++++++++++++------ 1 file changed, 75 insertions(+), 26 deletions(-) diff --git a/lldb/source/Core/IOHandlerCursesGUI.cpp b/lldb/source/Core/IOHandlerCursesGUI.cpp index 9dc13ccbb4764..1539175fb60ec 100644 --- a/lldb/source/Core/IOHandlerCursesGUI.cpp +++ b/lldb/source/Core/IOHandlerCursesGUI.cpp @@ -85,8 +85,12 @@ using llvm::StringRef; // we may want curses to be disabled for some builds for instance, windows #if LLDB_ENABLE_CURSES +#define KEY_CTRL_A 1 +#define KEY_CTRL_E 5 +#define KEY_CTRL_K 11 #define KEY_RETURN 10 #define KEY_ESCAPE 27 +#define KEY_DELETE 127 #define KEY_SHIFT_TAB (KEY_MAX + 1) @@ -1106,10 +1110,11 @@ class TextFieldDelegate : public FieldDelegate { int GetContentLength() { return m_content.length(); } void DrawContent(Surface &surface, bool is_selected) { + UpdateScrolling(surface.GetWidth()); + surface.MoveCursor(0, 0); const char *text = m_content.c_str() + m_first_visibile_char; surface.PutCString(text, surface.GetWidth()); - m_last_drawn_content_width = surface.GetWidth(); // Highlight the cursor. surface.MoveCursor(GetCursorXPosition(), 0); @@ -1156,6 +1161,22 @@ class TextFieldDelegate : public FieldDelegate { DrawError(error_surface); } + // Get the position of the last visible character. + int GetLastVisibleCharPosition(int width) { + int position = m_first_visibile_char + width - 1; + return std::min(position, GetContentLength()); + } + + void UpdateScrolling(int width) { + if (m_cursor_position < m_first_visibile_char) { + m_first_visibile_char = m_cursor_position; + return; + } + + if (m_cursor_position > GetLastVisibleCharPosition(width)) + m_first_visibile_char = m_cursor_position - (width - 1); + } + // The cursor is allowed to move one character past the string. // m_cursor_position is in range [0, GetContentLength()]. void MoveCursorRight() { @@ -1168,42 +1189,54 @@ class TextFieldDelegate : public FieldDelegate { m_cursor_position--; } - // If the cursor moved past the last visible character, scroll right by one - // character. - void ScrollRightIfNeeded() { - if (m_cursor_position - m_first_visibile_char == m_last_drawn_content_width) - m_first_visibile_char++; - } + void MoveCursorToStart() { m_cursor_position = 0; } + + void MoveCursorToEnd() { m_cursor_position = GetContentLength(); } void ScrollLeft() { if (m_first_visibile_char > 0) m_first_visibile_char--; } - // If the cursor moved past the first visible character, scroll left by one - // character. - void ScrollLeftIfNeeded() { - if (m_cursor_position < m_first_visibile_char) - m_first_visibile_char--; - } - - // Insert a character at the current cursor position, advance the cursor - // position, and make sure to scroll right if needed. + // Insert a character at the current cursor position and advance the cursor + // position. void InsertChar(char character) { m_content.insert(m_cursor_position, 1, character); m_cursor_position++; - ScrollRightIfNeeded(); + ClearError(); } // Remove the character before the cursor position, retreat the cursor - // position, and make sure to scroll left if needed. - void RemoveChar() { + // position, and scroll left. + void RemovePreviousChar() { if (m_cursor_position == 0) return; m_content.erase(m_cursor_position - 1, 1); m_cursor_position--; ScrollLeft(); + ClearError(); + } + + // Remove the character after the cursor position. + void RemoveNextChar() { + if (m_cursor_position == GetContentLength()) + return; + + m_content.erase(m_cursor_position, 1); + ClearError(); + } + + // Clear characters from the current cursor position to the end. + void ClearToEnd() { + m_content.erase(m_cursor_position); + ClearError(); + } + + void Clear() { + m_content.clear(); + m_cursor_position = 0; + ClearError(); } // True if the key represents a char that can be inserted in the field @@ -1224,17 +1257,36 @@ class TextFieldDelegate : public FieldDelegate { } switch (key) { + case KEY_HOME: + case KEY_CTRL_A: + MoveCursorToStart(); + return eKeyHandled; + case KEY_END: + case KEY_CTRL_E: + MoveCursorToEnd(); + return eKeyHandled; case KEY_RIGHT: + case KEY_SF: MoveCursorRight(); - ScrollRightIfNeeded(); return eKeyHandled; case KEY_LEFT: + case KEY_SR: MoveCursorLeft(); - ScrollLeftIfNeeded(); return eKeyHandled; case KEY_BACKSPACE: - ClearError(); - RemoveChar(); + case KEY_DELETE: + RemovePreviousChar(); + return eKeyHandled; + case KEY_DC: + RemoveNextChar(); + return eKeyHandled; + case KEY_EOL: + case KEY_CTRL_K: + ClearToEnd(); + return eKeyHandled; + case KEY_DL: + case KEY_CLEAR: + Clear(); return eKeyHandled; default: break; @@ -1277,9 +1329,6 @@ class TextFieldDelegate : public FieldDelegate { int m_cursor_position; // The index of the first visible character in the content. int m_first_visibile_char; - // The width of the fields content that was last drawn. Width can change, so - // this is used to determine if scrolling is needed dynamically. - int m_last_drawn_content_width; // Optional error message. If empty, field is considered to have no error. std::string m_error; }; From 8111f2f7eef50f8c0cdfd4b575e275c0c4b7fc41 Mon Sep 17 00:00:00 2001 From: Petr Hosek Date: Mon, 23 Aug 2021 21:24:24 -0700 Subject: [PATCH 8/9] [profile] Update counter offset to account for binary ids In the raw profile, binary ids immediately follow the header so when computing counters offset we need to account for the new section. Differential Revision: https://reviews.llvm.org/D108608 --- compiler-rt/lib/profile/InstrProfilingFile.c | 2 +- compiler-rt/lib/profile/InstrProfilingPlatformFuchsia.c | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/compiler-rt/lib/profile/InstrProfilingFile.c b/compiler-rt/lib/profile/InstrProfilingFile.c index fb4c2fecefacb..1c53a3f172f71 100644 --- a/compiler-rt/lib/profile/InstrProfilingFile.c +++ b/compiler-rt/lib/profile/InstrProfilingFile.c @@ -591,7 +591,7 @@ static void initializeProfileForContinuousMode(void) { return; } const uint64_t CountersOffsetInBiasMode = - sizeof(__llvm_profile_header) + + sizeof(__llvm_profile_header) + __llvm_write_binary_ids(NULL) + (DataSize * sizeof(__llvm_profile_data)); /* Update the profile fields based on the current mapping. */ INSTR_PROF_PROFILE_COUNTER_BIAS_VAR = (intptr_t)Profile - diff --git a/compiler-rt/lib/profile/InstrProfilingPlatformFuchsia.c b/compiler-rt/lib/profile/InstrProfilingPlatformFuchsia.c index 0146b14c193fc..7de6aa83f8afa 100644 --- a/compiler-rt/lib/profile/InstrProfilingPlatformFuchsia.c +++ b/compiler-rt/lib/profile/InstrProfilingPlatformFuchsia.c @@ -120,7 +120,8 @@ void __llvm_profile_initialize(void) { const uint64_t *CountersEnd = __llvm_profile_end_counters(); const uint64_t DataSize = __llvm_profile_get_data_size(DataBegin, DataEnd); const uint64_t CountersOffset = - sizeof(__llvm_profile_header) + (DataSize * sizeof(__llvm_profile_data)); + sizeof(__llvm_profile_header) + __llvm_write_binary_ids(NULL) + + (DataSize * sizeof(__llvm_profile_data)); uint64_t CountersSize = CountersEnd - CountersBegin; /* Don't publish a VMO if there are no counters. */ From 945cde8b6a4579406886d08f803b632e00f0b1ec Mon Sep 17 00:00:00 2001 From: Omar Emara Date: Mon, 23 Aug 2021 21:18:05 -0700 Subject: [PATCH 9/9] [LLDB][GUI] Add submit form key combination This patch adds a new key ALt+Enter key combination to form windows. Once invoked, the first action is executed without having to navigate to its button. Field exit callbacks are now also invoked on validation to support this aforementioned key combination. One concern for this key combination is its potential use by the window manager of the host. I am not sure if this will be a problem, but it is worth putting in consideration. Differential Revision: https://reviews.llvm.org/D108410 --- lldb/source/Core/IOHandlerCursesGUI.cpp | 25 +++++++++++++++++++++---- 1 file changed, 21 insertions(+), 4 deletions(-) diff --git a/lldb/source/Core/IOHandlerCursesGUI.cpp b/lldb/source/Core/IOHandlerCursesGUI.cpp index 1539175fb60ec..ca06a332e4edd 100644 --- a/lldb/source/Core/IOHandlerCursesGUI.cpp +++ b/lldb/source/Core/IOHandlerCursesGUI.cpp @@ -93,6 +93,7 @@ using llvm::StringRef; #define KEY_DELETE 127 #define KEY_SHIFT_TAB (KEY_MAX + 1) +#define KEY_ALT_ENTER (KEY_MAX + 2) namespace curses { class Menu; @@ -2315,6 +2316,7 @@ class FormDelegate { // action that requires valid fields. bool CheckFieldsValidity() { for (int i = 0; i < GetNumberOfFields(); i++) { + GetField(i)->FieldDelegateExitCallback(); if (GetField(i)->FieldDelegateHasError()) { SetError("Some fields are invalid!"); return false; @@ -2649,13 +2651,24 @@ class FormWindowDelegate : public WindowDelegate { Size(width, copy_height)); } + void DrawSubmitHint(Surface &surface, bool is_active) { + surface.MoveCursor(2, surface.GetHeight() - 1); + if (is_active) + surface.AttributeOn(A_BOLD | COLOR_PAIR(BlackOnWhite)); + surface.Printf("[Press Alt+Enter to %s]", + m_delegate_sp->GetAction(0).GetLabel().c_str()); + if (is_active) + surface.AttributeOff(A_BOLD | COLOR_PAIR(BlackOnWhite)); + } + bool WindowDelegateDraw(Window &window, bool force) override { m_delegate_sp->UpdateFieldsVisibility(); window.Erase(); window.DrawTitleBox(m_delegate_sp->GetName().c_str(), - "Press Esc to cancel"); + "Press Esc to Cancel"); + DrawSubmitHint(window, window.IsActive()); Rect content_bounds = window.GetFrame(); content_bounds.Inset(2, 2); @@ -2778,8 +2791,8 @@ class FormWindowDelegate : public WindowDelegate { return eKeyHandled; } - void ExecuteAction(Window &window) { - FormAction &action = m_delegate_sp->GetAction(m_selection_index); + void ExecuteAction(Window &window, int index) { + FormAction &action = m_delegate_sp->GetAction(index); action.Execute(window); if (m_delegate_sp->HasError()) { m_first_visible_line = 0; @@ -2794,10 +2807,13 @@ class FormWindowDelegate : public WindowDelegate { case '\n': case KEY_ENTER: if (m_selection_type == SelectionType::Action) { - ExecuteAction(window); + ExecuteAction(window, m_selection_index); return eKeyHandled; } break; + case KEY_ALT_ENTER: + ExecuteAction(window, 0); + return eKeyHandled; case '\t': return SelectNext(key); case KEY_SHIFT_TAB: @@ -7458,6 +7474,7 @@ void IOHandlerCursesGUI::Activate() { static_assert(LastColorPairIndex == 18, "Color indexes do not match."); define_key("\033[Z", KEY_SHIFT_TAB); + define_key("\033\015", KEY_ALT_ENTER); } }