Skip to content

Commit 560564f

Browse files
authored
[mlir][vector][gpu] Align minf/maxf reduction kind names with arith (#75901)
This is to avoid confusion when dealing with reduction/combining kinds. For example, see a recent PR comment: llvm/llvm-project#75846 (comment). Previously, they were picked to mostly mirror the names of the llvm vector reduction intrinsics: https://llvm.org/docs/LangRef.html#llvm-vector-reduce-fmin-intrinsic. In isolation, it was not clear if `<maxf>` has `arith.maxnumf` or `arith.maximumf` semantics. The new reduction kind names map 1:1 to arith ops, which makes it easier to tell/look up their semantics. Because both the vector and the gpu dialect depend on the arith dialect, it's more natural to align names with those in arith than with the lowering to llvm intrinsics. Issue: llvm/llvm-project#72354
1 parent 5136c16 commit 560564f

26 files changed

+101
-99
lines changed

mlir/include/mlir/Dialect/GPU/IR/GPUOps.td

+6-6
Original file line numberDiff line numberDiff line change
@@ -937,11 +937,11 @@ def GPU_AllReduceOpMul : I32EnumAttrCase<"MUL", 1, "mul">;
937937
def GPU_AllReduceOpMinUI : I32EnumAttrCase<"MINUI", 2, "minui">;
938938
def GPU_AllReduceOpMinSI : I32EnumAttrCase<"MINSI", 3, "minsi">;
939939
// Follows the `arith.minnumf` semantics.
940-
def GPU_AllReduceOpMinF : I32EnumAttrCase<"MINF", 4, "minf">;
940+
def GPU_AllReduceOpMinnumF : I32EnumAttrCase<"MINNUMF", 4, "minnumf">;
941941
def GPU_AllReduceOpMaxUI : I32EnumAttrCase<"MAXUI", 5, "maxui">;
942942
def GPU_AllReduceOpMaxSI : I32EnumAttrCase<"MAXSI", 6, "maxsi">;
943943
// Follows the `arith.maxnumf` semantics.
944-
def GPU_AllReduceOpMaxF : I32EnumAttrCase<"MAXF", 7, "maxf">;
944+
def GPU_AllReduceOpMaxnumF : I32EnumAttrCase<"MAXNUMF", 7, "maxnumf">;
945945
def GPU_AllReduceOpAnd : I32EnumAttrCase<"AND", 8, "and">;
946946
def GPU_AllReduceOpOr : I32EnumAttrCase<"OR", 9, "or">;
947947
def GPU_AllReduceOpXor : I32EnumAttrCase<"XOR", 10, "xor">;
@@ -957,10 +957,10 @@ def GPU_AllReduceOperation : I32EnumAttr<"AllReduceOperation",
957957
GPU_AllReduceOpMul,
958958
GPU_AllReduceOpMinUI,
959959
GPU_AllReduceOpMinSI,
960-
GPU_AllReduceOpMinF,
960+
GPU_AllReduceOpMinnumF,
961961
GPU_AllReduceOpMaxUI,
962962
GPU_AllReduceOpMaxSI,
963-
GPU_AllReduceOpMaxF,
963+
GPU_AllReduceOpMaxnumF,
964964
GPU_AllReduceOpAnd,
965965
GPU_AllReduceOpOr,
966966
GPU_AllReduceOpXor,
@@ -999,7 +999,7 @@ def GPU_AllReduceOp : GPU_Op<"all_reduce",
999999
accumulation as code region. The reduction operation must be one of:
10001000
* Integer types: `add`, `mul`, `minui`, `minsi`, `maxui`, `maxsi`, `and`,
10011001
`or`, `xor`
1002-
* Floating point types: `add`, `mul`, `minf`, `maxf`, `minimumf`,
1002+
* Floating point types: `add`, `mul`, `minnumf`, `maxnumf`, `minimumf`,
10031003
`maximumf`
10041004

10051005
If `uniform` flag is set either none or all work items of a workgroup
@@ -1039,7 +1039,7 @@ def GPU_SubgroupReduceOp : GPU_Op<"subgroup_reduce", [SameOperandsAndResultType]
10391039
of:
10401040
* Integer types: `add`, `mul`, `minui`, `minsi`, `maxui`, `maxsi`, `and`,
10411041
`or`, `xor`
1042-
* Floating point types: `add`, `mul`, `minf`, `maxf`, `minimumf`,
1042+
* Floating point types: `add`, `mul`, `minnumf`, `maxnumf`, `minimumf`,
10431043
`maximumf`
10441044
}];
10451045

mlir/include/mlir/Dialect/Vector/IR/VectorAttributes.td

+4-4
Original file line numberDiff line numberDiff line change
@@ -21,10 +21,10 @@ def COMBINING_KIND_ADD : I32BitEnumAttrCaseBit<"ADD", 0, "add">;
2121
def COMBINING_KIND_MUL : I32BitEnumAttrCaseBit<"MUL", 1, "mul">;
2222
def COMBINING_KIND_MINUI : I32BitEnumAttrCaseBit<"MINUI", 2, "minui">;
2323
def COMBINING_KIND_MINSI : I32BitEnumAttrCaseBit<"MINSI", 3, "minsi">;
24-
def COMBINING_KIND_MINF : I32BitEnumAttrCaseBit<"MINF", 4, "minf">;
24+
def COMBINING_KIND_MINNUMF : I32BitEnumAttrCaseBit<"MINNUMF", 4, "minnumf">;
2525
def COMBINING_KIND_MAXUI : I32BitEnumAttrCaseBit<"MAXUI", 5, "maxui">;
2626
def COMBINING_KIND_MAXSI : I32BitEnumAttrCaseBit<"MAXSI", 6, "maxsi">;
27-
def COMBINING_KIND_MAXF : I32BitEnumAttrCaseBit<"MAXF", 7, "maxf">;
27+
def COMBINING_KIND_MAXNUMF : I32BitEnumAttrCaseBit<"MAXNUMF", 7, "maxnumf">;
2828
def COMBINING_KIND_AND : I32BitEnumAttrCaseBit<"AND", 8, "and">;
2929
def COMBINING_KIND_OR : I32BitEnumAttrCaseBit<"OR", 9, "or">;
3030
def COMBINING_KIND_XOR : I32BitEnumAttrCaseBit<"XOR", 10, "xor">;
@@ -35,8 +35,8 @@ def CombiningKind : I32BitEnumAttr<
3535
"CombiningKind",
3636
"Kind of combining function for contractions and reductions",
3737
[COMBINING_KIND_ADD, COMBINING_KIND_MUL, COMBINING_KIND_MINUI,
38-
COMBINING_KIND_MINSI, COMBINING_KIND_MINF, COMBINING_KIND_MAXUI,
39-
COMBINING_KIND_MAXSI, COMBINING_KIND_MAXF, COMBINING_KIND_AND,
38+
COMBINING_KIND_MINSI, COMBINING_KIND_MINNUMF, COMBINING_KIND_MAXUI,
39+
COMBINING_KIND_MAXSI, COMBINING_KIND_MAXNUMF, COMBINING_KIND_AND,
4040
COMBINING_KIND_OR, COMBINING_KIND_XOR,
4141
COMBINING_KIND_MAXIMUMF, COMBINING_KIND_MINIMUMF]> {
4242
let cppNamespace = "::mlir::vector";

mlir/include/mlir/Dialect/Vector/IR/VectorOps.td

+12-11
Original file line numberDiff line numberDiff line change
@@ -87,8 +87,8 @@ def Vector_ContractionOp :
8787
An optional kind attribute may be used to specify the combining function
8888
between the intermediate result and accumulator argument of rank K. This
8989
attribute can take the values `add`/`mul`/`minsi`/`minui`/`maxsi`/`maxui`
90-
/`and`/`or`/`xor` for integers, and `add`/`mul`/`minf`/`maxf`/`minimumf`
91-
/`maximumf` for floats. The default is `add`.
90+
/`and`/`or`/`xor` for integers, and `add`/`mul`/`minnumf`/`maxnumf`
91+
/`minimumf`/`maximumf` for floats. The default is `add`.
9292

9393
Example:
9494

@@ -150,7 +150,7 @@ def Vector_ContractionOp :
150150
#contraction_trait = {
151151
indexing_maps = #contraction_accesses,
152152
iterator_types = ["reduction"],
153-
kind = #vector.kind<maxf>
153+
kind = #vector.kind<maxnumf>
154154
}
155155
%6 = vector.contract #contraction_trait %0, %1, %2
156156
: vector<10xf32>, vector<10xf32> into f32
@@ -234,8 +234,8 @@ def Vector_ReductionOp :
234234
let description = [{
235235
Reduces an 1-D vector "horizontally" into a scalar using the given
236236
operation: `add`/`mul`/`minsi`/`minui`/`maxsi`/`maxui`/`and`/`or`/`xor` for
237-
integers, and `add`/`mul`/`minf`/`maxf`/`minimumf`/`maximumf` for floats.
238-
Reductions also allow an optional fused accumulator.
237+
integers, and `add`/`mul`/`minnumf`/`maxnumf`/`minimumf`/`maximumf` for
238+
floats. Reductions also allow an optional fused accumulator.
239239

240240
Note that these operations are restricted to 1-D vectors to remain
241241
close to the corresponding LLVM intrinsics:
@@ -292,7 +292,7 @@ def Vector_MultiDimReductionOp :
292292
let description = [{
293293
Reduces an n-D vector into an (n-k)-D vector (or a scalar when k == n)
294294
using the given operation: `add`/`mul`/`minsi`/`minui`/`maxsi`/`maxui`
295-
/`and`/`or`/`xor` for integers, and `add`/`mul`/`minf`/`maxf`/`minimumf`
295+
/`and`/`or`/`xor` for integers, and `add`/`mul`/`minnumf`/`maxnumf`/`minimumf`
296296
/`maximumf` for floats.
297297
Takes an initial accumulator operand.
298298

@@ -942,7 +942,8 @@ def Vector_OuterProductOp :
942942

943943
An optional kind attribute may be specified to be: `add`/`mul`/`minsi`
944944
/`minui`/`maxsi`/`maxui`/`and`/`or`/`xor` for integers, and `add`/`mul`
945-
/`minf`/`maxf`/`minimumf`/`maximumf` for floats. The default is `add`.
945+
/`minnumf`/`maxnumf`/`minimumf`/`maximumf` for floats. The default is
946+
`add`.
946947

947948
Example:
948949

@@ -954,7 +955,7 @@ def Vector_OuterProductOp :
954955
vector<4xf32>, vector<8xf32>, vector<4x8xf32>
955956
return %3: vector<4x8xf32>
956957

957-
%4 = vector.outerproduct %0, %1, %2 {kind = #vector.kind<maxf>}:
958+
%4 = vector.outerproduct %0, %1, %2 {kind = #vector.kind<maxnumf>}:
958959
vector<4xf32>, vector<8xf32>, vector<4x8xf32>
959960
return %3: vector<4x8xf32>
960961

@@ -2769,9 +2770,9 @@ def Vector_ScanOp :
27692770
Performs an inclusive/exclusive scan on an n-D vector along a single
27702771
dimension returning an n-D result vector using the given
27712772
operation (`add`/`mul`/`minsi`/`minui`/`maxsi`/`maxui`/`and`/`or`/`xor` for
2772-
integers, and `add`/`mul`/`minf`/`maxf`/`minimumf`/`maximumf` for floats),
2773-
and a specified value for the initial value. The operator returns the
2774-
result of scan as well as the result of the last reduction in the scan.
2773+
integers, and `add`/`mul`/`minnumf`/`maxnumf`/`minimumf`/`maximumf` for
2774+
floats), and a specified value for the initial value. The operator returns
2775+
the result of scan as well as the result of the last reduction in the scan.
27752776

27762777
Example:
27772778

mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -72,13 +72,13 @@ convertReduxKind(gpu::AllReduceOperation mode) {
7272
return NVVM::ReduxKind::MIN;
7373
case gpu::AllReduceOperation::MINUI:
7474
return std::nullopt;
75-
case gpu::AllReduceOperation::MINF:
75+
case gpu::AllReduceOperation::MINNUMF:
7676
return NVVM::ReduxKind::MIN;
7777
case gpu::AllReduceOperation::MAXSI:
7878
return NVVM::ReduxKind::MAX;
7979
case gpu::AllReduceOperation::MAXUI:
8080
return std::nullopt;
81-
case gpu::AllReduceOperation::MAXF:
81+
case gpu::AllReduceOperation::MAXNUMF:
8282
return NVVM::ReduxKind::MAX;
8383
case gpu::AllReduceOperation::AND:
8484
return NVVM::ReduxKind::AND;

mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRV.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -529,7 +529,7 @@ static std::optional<Value> createGroupReduceOp(OpBuilder &builder,
529529
{ReduceType::MINSI, ElemType::Integer,
530530
&createGroupReduceOpImpl<spirv::GroupSMinOp,
531531
spirv::GroupNonUniformSMinOp>},
532-
{ReduceType::MINF, ElemType::Float,
532+
{ReduceType::MINNUMF, ElemType::Float,
533533
&createGroupReduceOpImpl<spirv::GroupFMinOp,
534534
spirv::GroupNonUniformFMinOp>},
535535
{ReduceType::MAXUI, ElemType::Integer,
@@ -538,7 +538,7 @@ static std::optional<Value> createGroupReduceOp(OpBuilder &builder,
538538
{ReduceType::MAXSI, ElemType::Integer,
539539
&createGroupReduceOpImpl<spirv::GroupSMaxOp,
540540
spirv::GroupNonUniformSMaxOp>},
541-
{ReduceType::MAXF, ElemType::Float,
541+
{ReduceType::MAXNUMF, ElemType::Float,
542542
&createGroupReduceOpImpl<spirv::GroupFMaxOp,
543543
spirv::GroupNonUniformFMaxOp>},
544544
{ReduceType::MINIMUMF, ElemType::Float,

mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp

+4-4
Original file line numberDiff line numberDiff line change
@@ -818,10 +818,10 @@ class VectorReductionOpConversion
818818
result =
819819
createFPReductionComparisonOpLowering<LLVM::vector_reduce_fmaximum>(
820820
rewriter, loc, llvmType, operand, acc, fmf);
821-
} else if (kind == vector::CombiningKind::MINF) {
821+
} else if (kind == vector::CombiningKind::MINNUMF) {
822822
result = createFPReductionComparisonOpLowering<LLVM::vector_reduce_fmin>(
823823
rewriter, loc, llvmType, operand, acc, fmf);
824-
} else if (kind == vector::CombiningKind::MAXF) {
824+
} else if (kind == vector::CombiningKind::MAXNUMF) {
825825
result = createFPReductionComparisonOpLowering<LLVM::vector_reduce_fmax>(
826826
rewriter, loc, llvmType, operand, acc, fmf);
827827
} else
@@ -938,12 +938,12 @@ class MaskedReductionOpConversion
938938
ReductionNeutralZero>(
939939
rewriter, loc, llvmType, operand, acc, maskOp.getMask());
940940
break;
941-
case vector::CombiningKind::MINF:
941+
case vector::CombiningKind::MINNUMF:
942942
result = lowerPredicatedReductionWithStartValue<LLVM::VPReduceFMinOp,
943943
ReductionNeutralFPMax>(
944944
rewriter, loc, llvmType, operand, acc, maskOp.getMask());
945945
break;
946-
case vector::CombiningKind::MAXF:
946+
case vector::CombiningKind::MAXNUMF:
947947
result = lowerPredicatedReductionWithStartValue<LLVM::VPReduceFMaxOp,
948948
ReductionNeutralFPMin>(
949949
rewriter, loc, llvmType, operand, acc, maskOp.getMask());

mlir/lib/Conversion/VectorToSPIRV/VectorToSPIRV.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -478,8 +478,8 @@ struct VectorReductionFloatMinMax final
478478

479479
INT_OR_FLOAT_CASE(MAXIMUMF, SPIRVFMaxOp);
480480
INT_OR_FLOAT_CASE(MINIMUMF, SPIRVFMinOp);
481-
INT_OR_FLOAT_CASE(MAXF, SPIRVFMaxOp);
482-
INT_OR_FLOAT_CASE(MINF, SPIRVFMinOp);
481+
INT_OR_FLOAT_CASE(MAXNUMF, SPIRVFMaxOp);
482+
INT_OR_FLOAT_CASE(MINNUMF, SPIRVFMinOp);
483483

484484
default:
485485
return rewriter.notifyMatchFailure(reduceOp, "not handled here");

mlir/lib/Dialect/GPU/IR/GPUDialect.cpp

+2-1
Original file line numberDiff line numberDiff line change
@@ -492,7 +492,8 @@ static LogicalResult verifyReduceOpAndType(gpu::AllReduceOperation opName,
492492
Type resType) {
493493
using Kind = gpu::AllReduceOperation;
494494
if (llvm::is_contained(
495-
{Kind::MINF, Kind::MAXF, Kind::MINIMUMF, Kind::MAXIMUMF}, opName)) {
495+
{Kind::MINNUMF, Kind::MAXNUMF, Kind::MINIMUMF, Kind::MAXIMUMF},
496+
opName)) {
496497
if (!isa<FloatType>(resType))
497498
return failure();
498499
}

mlir/lib/Dialect/GPU/Transforms/AllReduceLowering.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -38,10 +38,10 @@ convertReductionKind(gpu::AllReduceOperation mode) {
3838
MAP_CASE(MUL);
3939
MAP_CASE(MINUI);
4040
MAP_CASE(MINSI);
41-
MAP_CASE(MINF);
41+
MAP_CASE(MINNUMF);
4242
MAP_CASE(MAXSI);
4343
MAP_CASE(MAXUI);
44-
MAP_CASE(MAXF);
44+
MAP_CASE(MAXNUMF);
4545
MAP_CASE(AND);
4646
MAP_CASE(OR);
4747
MAP_CASE(XOR);

mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -2426,11 +2426,11 @@ bool isCastOfBlockArgument(Operation *op) {
24262426
bool isSupportedPoolKind(vector::CombiningKind kind) {
24272427
switch (kind) {
24282428
case vector::CombiningKind::ADD:
2429-
case vector::CombiningKind::MAXF:
2429+
case vector::CombiningKind::MAXNUMF:
24302430
case vector::CombiningKind::MAXIMUMF:
24312431
case vector::CombiningKind::MAXSI:
24322432
case vector::CombiningKind::MAXUI:
2433-
case vector::CombiningKind::MINF:
2433+
case vector::CombiningKind::MINNUMF:
24342434
case vector::CombiningKind::MINIMUMF:
24352435
case vector::CombiningKind::MINSI:
24362436
case vector::CombiningKind::MINUI:

mlir/lib/Dialect/Vector/IR/VectorOps.cpp

+4-4
Original file line numberDiff line numberDiff line change
@@ -140,8 +140,8 @@ static bool isSupportedCombiningKind(CombiningKind combiningKind,
140140
case CombiningKind::OR:
141141
case CombiningKind::XOR:
142142
return elementType.isIntOrIndex();
143-
case CombiningKind::MINF:
144-
case CombiningKind::MAXF:
143+
case CombiningKind::MINNUMF:
144+
case CombiningKind::MAXNUMF:
145145
case CombiningKind::MINIMUMF:
146146
case CombiningKind::MAXIMUMF:
147147
return llvm::isa<FloatType>(elementType);
@@ -6233,7 +6233,7 @@ Value mlir::vector::makeArithReduction(OpBuilder &b, Location loc,
62336233
assert(t1.isIntOrIndex() && tAcc.isIntOrIndex() && "expected int values");
62346234
result = b.createOrFold<arith::AndIOp>(loc, v1, acc);
62356235
break;
6236-
case CombiningKind::MAXF:
6236+
case CombiningKind::MAXNUMF:
62376237
assert(llvm::isa<FloatType>(t1) && llvm::isa<FloatType>(tAcc) &&
62386238
"expected float values");
62396239
result = b.createOrFold<arith::MaxNumFOp>(loc, v1, acc, fastmath);
@@ -6243,7 +6243,7 @@ Value mlir::vector::makeArithReduction(OpBuilder &b, Location loc,
62436243
"expected float values");
62446244
result = b.createOrFold<arith::MaximumFOp>(loc, v1, acc, fastmath);
62456245
break;
6246-
case CombiningKind::MINF:
6246+
case CombiningKind::MINNUMF:
62476247
assert(llvm::isa<FloatType>(t1) && llvm::isa<FloatType>(tAcc) &&
62486248
"expected float values");
62496249
result = b.createOrFold<arith::MinNumFOp>(loc, v1, acc, fastmath);

mlir/lib/Dialect/Vector/Transforms/LowerVectorContract.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -139,7 +139,7 @@ createContractArithOp(Location loc, Value x, Value y, Value acc,
139139
Value mul;
140140

141141
if (isInt) {
142-
if (kind == CombiningKind::MINF || kind == CombiningKind::MAXF ||
142+
if (kind == CombiningKind::MINNUMF || kind == CombiningKind::MAXNUMF ||
143143
kind == CombiningKind::MINIMUMF || kind == CombiningKind::MAXIMUMF)
144144
// Only valid for floating point types.
145145
return std::nullopt;

mlir/lib/Dialect/Vector/Transforms/LowerVectorScan.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -45,9 +45,9 @@ static bool isValidKind(bool isInt, vector::CombiningKind kind) {
4545
enum class KindType { FLOAT, INT, INVALID };
4646
KindType type{KindType::INVALID};
4747
switch (kind) {
48-
case CombiningKind::MINF:
48+
case CombiningKind::MINNUMF:
4949
case CombiningKind::MINIMUMF:
50-
case CombiningKind::MAXF:
50+
case CombiningKind::MAXNUMF:
5151
case CombiningKind::MAXIMUMF:
5252
type = KindType::FLOAT;
5353
break;

mlir/test/Conversion/GPUToSPIRV/reductions.mlir

+8-8
Original file line numberDiff line numberDiff line change
@@ -331,7 +331,7 @@ gpu.module @kernels {
331331
gpu.func @test(%arg : f32) kernel
332332
attributes {spirv.entry_point_abi = #spirv.entry_point_abi<workgroup_size = [16, 1, 1]>} {
333333
// CHECK: %{{.*}} = spirv.GroupFMin <Workgroup> <Reduce> %[[ARG]] : f32
334-
%reduced = gpu.all_reduce minf %arg uniform {} : (f32) -> (f32)
334+
%reduced = gpu.all_reduce minnumf %arg uniform {} : (f32) -> (f32)
335335
gpu.return
336336
}
337337
}
@@ -351,7 +351,7 @@ gpu.module @kernels {
351351
gpu.func @test(%arg : f32) kernel
352352
attributes {spirv.entry_point_abi = #spirv.entry_point_abi<workgroup_size = [16, 1, 1]>} {
353353
// CHECK: %{{.*}} = spirv.GroupNonUniformFMin "Workgroup" "Reduce" %[[ARG]] : f32
354-
%reduced = gpu.all_reduce minf %arg {} : (f32) -> (f32)
354+
%reduced = gpu.all_reduce minnumf %arg {} : (f32) -> (f32)
355355
gpu.return
356356
}
357357
}
@@ -414,7 +414,7 @@ gpu.module @kernels {
414414
gpu.func @test(%arg : f32) kernel
415415
attributes {spirv.entry_point_abi = #spirv.entry_point_abi<workgroup_size = [16, 1, 1]>} {
416416
// CHECK: %{{.*}} = spirv.GroupFMin <Subgroup> <Reduce> %[[ARG]] : f32
417-
%reduced = gpu.subgroup_reduce minf %arg uniform : (f32) -> (f32)
417+
%reduced = gpu.subgroup_reduce minnumf %arg uniform : (f32) -> (f32)
418418
gpu.return
419419
}
420420
}
@@ -434,7 +434,7 @@ gpu.module @kernels {
434434
gpu.func @test(%arg : f32) kernel
435435
attributes {spirv.entry_point_abi = #spirv.entry_point_abi<workgroup_size = [16, 1, 1]>} {
436436
// CHECK: %{{.*}} = spirv.GroupNonUniformFMin "Subgroup" "Reduce" %[[ARG]] : f32
437-
%reduced = gpu.subgroup_reduce minf %arg : (f32) -> (f32)
437+
%reduced = gpu.subgroup_reduce minnumf %arg : (f32) -> (f32)
438438
gpu.return
439439
}
440440
}
@@ -498,7 +498,7 @@ gpu.module @kernels {
498498
gpu.func @test(%arg : f32) kernel
499499
attributes {spirv.entry_point_abi = #spirv.entry_point_abi<workgroup_size = [16, 1, 1]>} {
500500
// CHECK: %{{.*}} = spirv.GroupFMax <Workgroup> <Reduce> %[[ARG]] : f32
501-
%reduced = gpu.all_reduce maxf %arg uniform {} : (f32) -> (f32)
501+
%reduced = gpu.all_reduce maxnumf %arg uniform {} : (f32) -> (f32)
502502
gpu.return
503503
}
504504
}
@@ -518,7 +518,7 @@ gpu.module @kernels {
518518
gpu.func @test(%arg : f32) kernel
519519
attributes {spirv.entry_point_abi = #spirv.entry_point_abi<workgroup_size = [16, 1, 1]>} {
520520
// CHECK: %{{.*}} = spirv.GroupNonUniformFMax "Workgroup" "Reduce" %[[ARG]] : f32
521-
%reduced = gpu.all_reduce maxf %arg {} : (f32) -> (f32)
521+
%reduced = gpu.all_reduce maxnumf %arg {} : (f32) -> (f32)
522522
gpu.return
523523
}
524524
}
@@ -582,7 +582,7 @@ gpu.module @kernels {
582582
gpu.func @test(%arg : f32) kernel
583583
attributes {spirv.entry_point_abi = #spirv.entry_point_abi<workgroup_size = [16, 1, 1]>} {
584584
// CHECK: %{{.*}} = spirv.GroupFMax <Subgroup> <Reduce> %[[ARG]] : f32
585-
%reduced = gpu.subgroup_reduce maxf %arg uniform : (f32) -> (f32)
585+
%reduced = gpu.subgroup_reduce maxnumf %arg uniform : (f32) -> (f32)
586586
gpu.return
587587
}
588588
}
@@ -602,7 +602,7 @@ gpu.module @kernels {
602602
gpu.func @test(%arg : f32) kernel
603603
attributes {spirv.entry_point_abi = #spirv.entry_point_abi<workgroup_size = [16, 1, 1]>} {
604604
// CHECK: %{{.*}} = spirv.GroupNonUniformFMax "Subgroup" "Reduce" %[[ARG]] : f32
605-
%reduced = gpu.subgroup_reduce maxf %arg : (f32) -> (f32)
605+
%reduced = gpu.subgroup_reduce maxnumf %arg : (f32) -> (f32)
606606
gpu.return
607607
}
608608
}

mlir/test/Conversion/VectorToLLVM/vector-reduction-to-llvm.mlir

+2-2
Original file line numberDiff line numberDiff line change
@@ -97,7 +97,7 @@ func.func @masked_reduce_mul_f32(%arg0: vector<16xf32>, %mask : vector<16xi1>) -
9797
// -----
9898

9999
func.func @masked_reduce_minf_f32(%arg0: vector<16xf32>, %mask : vector<16xi1>) -> f32 {
100-
%0 = vector.mask %mask { vector.reduction <minf>, %arg0 : vector<16xf32> into f32 } : vector<16xi1> -> f32
100+
%0 = vector.mask %mask { vector.reduction <minnumf>, %arg0 : vector<16xf32> into f32 } : vector<16xi1> -> f32
101101
return %0 : f32
102102
}
103103

@@ -111,7 +111,7 @@ func.func @masked_reduce_minf_f32(%arg0: vector<16xf32>, %mask : vector<16xi1>)
111111
// -----
112112

113113
func.func @masked_reduce_maxf_f32(%arg0: vector<16xf32>, %mask : vector<16xi1>) -> f32 {
114-
%0 = vector.mask %mask { vector.reduction <maxf>, %arg0 : vector<16xf32> into f32 } : vector<16xi1> -> f32
114+
%0 = vector.mask %mask { vector.reduction <maxnumf>, %arg0 : vector<16xf32> into f32 } : vector<16xi1> -> f32
115115
return %0 : f32
116116
}
117117

0 commit comments

Comments
 (0)