Skip to content

Commit 043d5c5

Browse files
pytorchbotZonglin Peng
andauthored
jarvis-nightly-operators-test-aten-constant-pad-nd-out (#15573)
This PR was created by the merge bot to help merge the original PR into the main branch. ghstack PR number: #15499 by @zonglinpeng ^ Please use this as the source of truth for the PR details, comments, and reviews ghstack PR base: https://github.com/pytorch/executorch/tree/gh/zonglinpeng/11/base ghstack PR head: https://github.com/pytorch/executorch/tree/gh/zonglinpeng/11/head Merge bot PR base: https://github.com/pytorch/executorch/tree/gh/zonglinpeng/10/orig Merge bot PR head: https://github.com/pytorch/executorch/tree/gh/zonglinpeng/11/orig Differential Revision: [D85364553](https://our.internmc.facebook.com/intern/diff/D85364553/) @diff-train-skip-merge --------- Co-authored-by: Zonglin Peng <[email protected]>
1 parent 564ac93 commit 043d5c5

File tree

1 file changed

+51
-29
lines changed

1 file changed

+51
-29
lines changed

backends/cadence/utils/facto_util.py

Lines changed: 51 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -189,47 +189,37 @@ def random_size_constraint(deps: object, r: int, d: int) -> int:
189189
if index == 0: # condition
190190
tensor_constraints = [
191191
cp.Dtype.In(lambda deps: [torch.bool]),
192-
cp.Value.Ge(lambda deps, dtype, struct: -(2**4)),
193-
cp.Value.Le(lambda deps, dtype, struct: 2**4),
192+
cp.Value.Ge(lambda deps, dtype, struct: 0),
193+
cp.Value.Le(lambda deps, dtype, struct: 1),
194194
cp.Rank.Ge(lambda deps: 1),
195195
cp.Size.Ge(lambda deps, r, d: 1),
196196
max_size_constraint,
197197
]
198198
elif index == 1: # input tensor(a)
199199
tensor_constraints = [
200-
cp.Dtype.In(
201-
lambda deps: [
202-
torch.int8,
203-
torch.int16,
204-
torch.uint8,
205-
torch.uint16,
206-
torch.int32,
207-
torch.float32,
208-
]
209-
),
200+
cp.Dtype.In(lambda deps: [torch.float32]),
210201
cp.Value.Ge(lambda deps, dtype, struct: -(2**4)),
211202
cp.Value.Le(lambda deps, dtype, struct: 2**4),
212203
cp.Rank.Ge(lambda deps: 1),
213204
cp.Size.Ge(lambda deps, r, d: 1),
205+
cp.Size.In(
206+
lambda deps, r, d: fn.broadcast_with(deps[0].shape, r, d)
207+
),
214208
max_size_constraint,
215209
]
216210
else: # input tensor(b)
217211
tensor_constraints = [
218-
cp.Dtype.In(
219-
lambda deps: [
220-
torch.int8,
221-
torch.int16,
222-
torch.uint8,
223-
torch.uint16,
224-
torch.int32,
225-
torch.float32,
226-
]
227-
),
212+
cp.Dtype.In(lambda deps: [torch.float32]),
228213
cp.Dtype.Eq(lambda deps: deps[1].dtype),
229214
cp.Value.Ge(lambda deps, dtype, struct: -(2**4)),
230215
cp.Value.Le(lambda deps, dtype, struct: 2**4),
231216
cp.Rank.Ge(lambda deps: 1),
232217
cp.Size.Ge(lambda deps, r, d: 1),
218+
cp.Size.In(
219+
lambda deps, r, d: fn.broadcast_with(
220+
fn.broadcasted_shape(deps[0].shape, deps[1].shape), r, d
221+
)
222+
),
233223
max_size_constraint,
234224
]
235225
case "embedding.default":
@@ -276,6 +266,9 @@ def random_size_constraint(deps: object, r: int, d: int) -> int:
276266
tensor_constraints.extend(
277267
[
278268
cp.Dtype.In(lambda deps: [torch.float32, torch.int32]),
269+
# Avoid NaN/Inf values that expose clamp NaN handling bugs
270+
cp.Value.Ge(lambda deps, dtype, struct: -(2**4)),
271+
cp.Value.Le(lambda deps, dtype, struct: 2**4),
279272
]
280273
)
281274
case "rsqrt.default":
@@ -351,12 +344,15 @@ def random_size_constraint(deps: object, r: int, d: int) -> int:
351344
]
352345
)
353346
case "constant_pad_nd.default":
354-
tensor_constraints.extend(
355-
[
356-
cp.Dtype.In(lambda deps: [torch.float32]),
357-
cp.Size.Le(lambda deps, r, d: 2**2),
358-
]
359-
)
347+
tensor_constraints = [
348+
cp.Dtype.In(lambda deps: [torch.float32]),
349+
cp.Value.Ge(lambda deps, dtype, struct: -(2**4)),
350+
cp.Value.Le(lambda deps, dtype, struct: 2**4),
351+
cp.Rank.Ge(lambda deps: 1),
352+
cp.Rank.Le(lambda deps: 2), # Reduced from 3 to 2 (max 2D tensors)
353+
cp.Size.Ge(lambda deps, r, d: 1),
354+
cp.Size.Le(lambda deps, r, d: 3), # Max dimension size of 3
355+
]
360356
case "avg_pool2d.default":
361357
tensor_constraints.extend(
362358
[
@@ -463,6 +459,7 @@ def apply_scalar_contraints(op_name: str) -> list[ScalarDtype]:
463459
| "mul.Scalar"
464460
| "div.Scalar"
465461
| "constant_pad_nd.default"
462+
| "clamp.default"
466463
):
467464
return [ScalarDtype.int]
468465
case "full.default":
@@ -490,7 +487,32 @@ def facto_testcase_gen( # noqa: C901
490487
cp.Size.Le(lambda deps, r, d: 2**2),
491488
]
492489
)
493-
if in_spec.name == "max_val": # hardtanh
490+
# Special handling for clamp.default to ensure min < max with sufficient gap (at least 2) and never None
491+
if op_name == "clamp.default":
492+
if in_spec.name == "min":
493+
# min must always be provided (not None) and bounded, leave room for max
494+
spec.inspec[index].constraints.extend(
495+
[
496+
cp.Optional.Eq(lambda deps: False), # Never None
497+
cp.Value.Ge(lambda deps, dtype: -(2**4)),
498+
cp.Value.Le(
499+
lambda deps, dtype: 2**4 - 2
500+
), # Leave room for max (at least 2 units)
501+
]
502+
)
503+
elif in_spec.name == "max":
504+
# max must always be provided (not None), be >= min + 2 (sufficient gap), and bounded
505+
spec.inspec[index].deps = [0, 1] # deps on input tensor and min
506+
spec.inspec[index].constraints.extend(
507+
[
508+
cp.Optional.Eq(lambda deps: False), # Never None
509+
cp.Value.Ge(
510+
lambda deps, dtype: deps[1] + 2
511+
), # max >= min + 2 (sufficient gap)
512+
cp.Value.Le(lambda deps, dtype: 2**4),
513+
]
514+
)
515+
elif in_spec.name == "max_val": # hardtanh
494516
spec.inspec[index].deps = [0, 1]
495517
spec.inspec[index].constraints.extend(
496518
[cp.Value.Ge(lambda deps, _: deps[1])]

0 commit comments

Comments
 (0)