Skip to content

Commit e5957ac

Browse files
tungldftynse
authored andcommitted
[mlir] Fix the wrong computation of dynamic strides for lowering AllocOp to LLVM
Leftover change from before the MLIR merge, reviewed at accepted at tensorflow/mlir#338.
1 parent bc5b721 commit e5957ac

File tree

2 files changed

+8
-7
lines changed

2 files changed

+8
-7
lines changed

mlir/lib/Conversion/StandardToLLVM/ConvertStandardToLLVM.cpp

+5-4
Original file line numberDiff line numberDiff line change
@@ -1054,14 +1054,15 @@ struct AllocOpLowering : public LLVMLegalizationPattern<AllocOp> {
10541054
// Iterate strides in reverse order, compute runningStride and strideValues.
10551055
auto nStrides = strides.size();
10561056
SmallVector<Value, 4> strideValues(nStrides, nullptr);
1057-
for (auto indexedStride : llvm::enumerate(llvm::reverse(strides))) {
1058-
int64_t index = nStrides - 1 - indexedStride.index();
1057+
for (unsigned i = 0; i < nStrides; ++i) {
1058+
int64_t index = nStrides - 1 - i;
10591059
if (strides[index] == MemRefType::getDynamicStrideOrOffset())
10601060
// Identity layout map is enforced in the match function, so we compute:
1061-
// `runningStride *= sizes[index]`
1061+
// `runningStride *= sizes[index + 1]`
10621062
runningStride =
10631063
runningStride
1064-
? rewriter.create<LLVM::MulOp>(loc, runningStride, sizes[index])
1064+
? rewriter.create<LLVM::MulOp>(loc, runningStride,
1065+
sizes[index + 1])
10651066
: createIndexConstant(rewriter, loc, 1);
10661067
else
10671068
runningStride = createIndexConstant(rewriter, loc, strides[index]);

mlir/test/Conversion/StandardToLLVM/convert-memref-ops.mlir

+3-3
Original file line numberDiff line numberDiff line change
@@ -101,8 +101,8 @@ func @mixed_alloc(%arg0: index, %arg1: index) -> memref<?x42x?xf32> {
101101
// CHECK-NEXT: %[[off:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64
102102
// CHECK-NEXT: llvm.insertvalue %[[off]], %{{.*}}[2] : !llvm<"{ float*, float*, i64, [3 x i64], [3 x i64] }">
103103
// CHECK-NEXT: %[[st2:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64
104-
// CHECK-NEXT: %[[st1:.*]] = llvm.mul %{{.*}}, %[[c42]] : !llvm.i64
105-
// CHECK-NEXT: %[[st0:.*]] = llvm.mul %{{.*}}, %[[M]] : !llvm.i64
104+
// CHECK-NEXT: %[[st1:.*]] = llvm.mul %{{.*}}, %[[N]] : !llvm.i64
105+
// CHECK-NEXT: %[[st0:.*]] = llvm.mul %{{.*}}, %[[c42]] : !llvm.i64
106106
// CHECK-NEXT: llvm.insertvalue %[[M]], %{{.*}}[3, 0] : !llvm<"{ float*, float*, i64, [3 x i64], [3 x i64] }">
107107
// CHECK-NEXT: llvm.insertvalue %[[st0]], %{{.*}}[4, 0] : !llvm<"{ float*, float*, i64, [3 x i64], [3 x i64] }">
108108
// CHECK-NEXT: llvm.insertvalue %[[c42]], %{{.*}}[3, 1] : !llvm<"{ float*, float*, i64, [3 x i64], [3 x i64] }">
@@ -142,7 +142,7 @@ func @dynamic_alloc(%arg0: index, %arg1: index) -> memref<?x?xf32> {
142142
// CHECK-NEXT: %[[off:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64
143143
// CHECK-NEXT: llvm.insertvalue %[[off]], %{{.*}}[2] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }">
144144
// CHECK-NEXT: %[[st1:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64
145-
// CHECK-NEXT: %[[st0:.*]] = llvm.mul %{{.*}}, %[[M]] : !llvm.i64
145+
// CHECK-NEXT: %[[st0:.*]] = llvm.mul %{{.*}}, %[[N]] : !llvm.i64
146146
// CHECK-NEXT: llvm.insertvalue %[[M]], %{{.*}}[3, 0] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }">
147147
// CHECK-NEXT: llvm.insertvalue %[[st0]], %{{.*}}[4, 0] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }">
148148
// CHECK-NEXT: llvm.insertvalue %[[N]], %{{.*}}[3, 1] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }">

0 commit comments

Comments
 (0)