Skip to content
Open
Show file tree
Hide file tree
Changes from 5 commits
Commits
Show all changes
21 commits
Select commit Hold shift + click to select a range
f07c93e
fix: implement semantic equivalence for tensors_have_same_dim_order (…
Feb 21, 2026
fb1b8f2
Merge branch 'main' into fix/16032-tensors-same-dim-order-semantic-eq…
nefainl Feb 21, 2026
f96558e
Merge branch 'main' into fix/16032-tensors-same-dim-order-semantic-eq…
nefainl Feb 24, 2026
559b781
Merge branch 'main' into fix/16032-tensors-same-dim-order-semantic-eq…
nefainl Feb 25, 2026
e352690
Merge branch 'main' into fix/16032-tensors-same-dim-order-semantic-eq…
nefainl Feb 26, 2026
c8e8e5d
Merge branch 'main' into fix/16032-tensors-same-dim-order-semantic-eq…
nefainl Feb 27, 2026
76098da
fix(exir): preserve dim_order in MemoryFormatOpsPass (#16032)
Feb 27, 2026
cd159fe
fix(runtime): clarify tensors_have_same_dim_order semantics (#16032)
Feb 27, 2026
0b0ae3c
fix(runtime): narrow PR C scope and clean tests
Feb 27, 2026
0c8b8e8
Merge branch 'main' into fix/16032-tensors-same-dim-order-semantic-eq…
nefainl Feb 28, 2026
0a12cfc
Merge branch 'main' into fix/16032-tensors-same-dim-order-semantic-eq…
nefainl Mar 6, 2026
fe33ca5
fix(runtime): tensors_have_same_dim_order Tier A legacy + Tier B sema…
Mar 28, 2026
5c70037
Merge branch 'main' into fix/16032-tensors-same-dim-order-semantic-eq…
nefainl Apr 9, 2026
3659e2e
fix(ci): relax Cadence and CoreML mv3 test tolerances
Apr 12, 2026
da1ae40
Merge branch 'main' into fix/16032-tensors-same-dim-order-semantic-eq…
Apr 12, 2026
4fb474b
style: clang-format tensor_util_portable (PR C)
Apr 12, 2026
23dfde3
Merge branch 'main' into fix/16032-tensors-same-dim-order-semantic-eq…
nefainl Apr 23, 2026
717eee0
Merge branch 'main' of https://github.com/pytorch/executorch into pr-…
Apr 24, 2026
4f281e5
Merge nefainl PR branch tip (sync remote)
Apr 24, 2026
4a03db6
Merge branch 'main' into fix/16032-tensors-same-dim-order-semantic-eq…
nefainl Apr 29, 2026
f75d14d
Merge branch 'main' into fix/16032-tensors-same-dim-order-semantic-eq…
GregoryComer May 12, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
70 changes: 53 additions & 17 deletions runtime/core/exec_aten/util/tensor_util_portable.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -109,29 +109,65 @@ bool tensor_is_channels_last_dim_order(torch::executor::Tensor t) {
return ret_val;
}

namespace {

// Helper: check if two tensors have semantically equivalent memory layouts.
// First tries exact dim_order label match; if labels differ, falls back to
// stride comparison that ignores size-1 dimensions (PyTorch semantics).
// Issue #16032: prevents false negatives for memory-compatible tensors.
Comment thread
nefainl marked this conversation as resolved.
Outdated
bool two_tensors_same_dim_order(
const executorch::aten::Tensor& a,
const executorch::aten::Tensor& b) {
if (a.dim() != b.dim()) {
return false;
}
const int ndim = static_cast<int>(a.dim());

// Fast path: check if dim_order labels match exactly
bool labels_match = true;
for (int i = 0; i < ndim; ++i) {
if (a.dim_order()[i] != b.dim_order()[i]) {
labels_match = false;
break;
}
}
if (labels_match) {
return true;
}

// Semantic equivalence: compare strides, ignoring size-1 dimensions.
// Two tensors are equivalent if their strides match for all dimensions
// where both tensors have size > 1. Size-1 dims don't affect memory
// traversal order (PyTorch's is_contiguous uses this logic).
for (int i = 0; i < ndim; ++i) {
Comment thread
nefainl marked this conversation as resolved.
// Skip dimensions where both tensors have size 1
if (a.sizes()[i] == 1 && b.sizes()[i] == 1) {
continue;
}
// For non-trivial dimensions, strides must match
if (a.strides()[i] != b.strides()[i]) {
return false;
}
}
return true;
}

} // namespace

bool tensors_have_same_dim_order(
const executorch::aten::ArrayRef<executorch::aten::Tensor> tensor_list) {
if (tensor_list.size() < 2) {
return true;
}
bool all_contiguous = true;
bool all_channels_last = true;
for (const auto i : c10::irange(tensor_list.size())) {
all_contiguous = all_contiguous &&
is_contiguous_dim_order(
tensor_list[i].dim_order().data(),
tensor_list[i].dim_order().size());
all_channels_last = all_channels_last &&
is_channels_last_dim_order(
tensor_list[i].dim_order().data(),
tensor_list[i].dim_order().size());
for (size_t i = 1; i < tensor_list.size(); ++i) {
if (!two_tensors_same_dim_order(tensor_list[0], tensor_list[i])) {
ET_LOG(
Error,
"%zd input tensors have different dim orders",
tensor_list.size());
return false;
}
}

ET_CHECK_OR_RETURN_FALSE(
all_contiguous || all_channels_last,
"%zd input tensors have different dim orders",
tensor_list.size());

return true;
}

Expand Down
127 changes: 127 additions & 0 deletions runtime/core/exec_aten/util/test/tensor_util_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -622,3 +622,130 @@ TEST_F(TensorUtilTest, SameShapesDifferentDimOrder) {
EXPECT_FALSE(tensors_have_same_dim_order(a, c, b));
EXPECT_FALSE(tensors_have_same_dim_order(c, b, a));
}

// Issue #16032: semantic equivalence tests for tensors_have_same_dim_order.
// These tests verify that tensors with different dim_order labels but
// semantically equivalent memory layouts are correctly identified.

TEST_F(TensorUtilTest, SemanticEquivalenceDegenerateC1) {
using namespace torch::executor;
// C=1: NCHW [2,1,4,4] and NHWC [2,1,4,4] have different dim_order labels
// but are semantically equivalent because the C dimension has size 1.
std::vector<int32_t> sizes = {2, 1, 4, 4};
Tensor nchw = tf_float_.ones(sizes);
Tensor nhwc = tf_float_.full_channels_last(sizes, 1.0f);

// Semantic equivalence: should return true because C=1 makes
// layouts identical in memory.
EXPECT_TRUE(tensors_have_same_dim_order(nchw, nhwc));
}

TEST_F(TensorUtilTest, SemanticEquivalenceDegenerateHW1) {
using namespace torch::executor;
// H=W=1: NCHW [2,3,1,1] and NHWC [2,3,1,1] have different dim_order labels
// but are semantically equivalent because H and W dimensions have size 1.
std::vector<int32_t> sizes = {2, 3, 1, 1};
Tensor nchw = tf_float_.ones(sizes);
Tensor nhwc = tf_float_.full_channels_last(sizes, 1.0f);

// Semantic equivalence: should return true because H=W=1 makes
// layouts identical in memory.
EXPECT_TRUE(tensors_have_same_dim_order(nchw, nhwc));
}

TEST_F(TensorUtilTest, SemanticEquivalenceNonDegenerateFails) {
using namespace torch::executor;
// Non-degenerate: NCHW [2,3,4,4] and NHWC [2,3,4,4] have different layouts.
// No size-1 dimensions, so semantic equivalence should fail.
std::vector<int32_t> sizes = {2, 3, 4, 4};
Tensor nchw = tf_float_.ones(sizes);
Tensor nhwc = tf_float_.full_channels_last(sizes, 1.0f);

// Different layouts, should return false
EXPECT_FALSE(tensors_have_same_dim_order(nchw, nhwc));
}

TEST_F(TensorUtilTest, SemanticEquivalencePartialDegenerateFails) {
using namespace torch::executor;
// Partial degenerate: only H=1, but C and W are non-trivial.
// This tests a case where only one spatial dim is 1.
std::vector<int32_t> sizes = {2, 3, 1, 4};
Tensor nchw = tf_float_.ones(sizes);
Tensor nhwc = tf_float_.full_channels_last(sizes, 1.0f);

// NCHW strides: [12, 4, 4, 1]
// NHWC strides: [12, 1, 12, 3]
// At dim 1 (C): sizes both 3, strides 4 vs 1 -> different
// Should return false
EXPECT_FALSE(tensors_have_same_dim_order(nchw, nhwc));
}

TEST_F(TensorUtilTest, SemanticEquivalenceDifferentRankFails) {
using namespace torch::executor;
// Different ranks should fail
Tensor a = tf_float_.ones({2, 3, 4, 4});
Tensor b = tf_float_.ones({2, 3, 4});

EXPECT_FALSE(tensors_have_same_dim_order(a, b));
}

TEST_F(TensorUtilTest, SemanticEquivalenceSameLabelsSameResult) {
using namespace torch::executor;
// Regression: same dim_order labels should still work (fast path)
std::vector<int32_t> sizes = {2, 3, 4, 4};
Tensor a = tf_float_.ones(sizes);
Tensor b = tf_float_.ones(sizes);

EXPECT_TRUE(tensors_have_same_dim_order(a, b));
}

TEST_F(TensorUtilTest, SemanticEquivalenceChannelsLastSameResult) {
using namespace torch::executor;
// Regression: two channels_last tensors should still work (fast path)
std::vector<int32_t> sizes = {2, 3, 4, 4};
Tensor a = tf_float_.full_channels_last(sizes, 1.0f);
Tensor b = tf_float_.full_channels_last(sizes, 2.0f);

EXPECT_TRUE(tensors_have_same_dim_order(a, b));
}

TEST_F(TensorUtilTest, SemanticEquivalenceThreeTensors) {
using namespace torch::executor;
// Test 3-tensor overload with semantic equivalence
std::vector<int32_t> sizes = {2, 1, 4, 4}; // C=1 degenerate
Tensor nchw1 = tf_float_.ones(sizes);
Tensor nchw2 = tf_float_.ones(sizes);
Tensor nhwc = tf_float_.full_channels_last(sizes, 1.0f);

// All three should be semantically equivalent
EXPECT_TRUE(tensors_have_same_dim_order(nchw1, nchw2, nhwc));
}

TEST_F(TensorUtilTest, SemanticEquivalenceAllOnes) {
using namespace torch::executor;
// All size-1 dimensions: NCHW and NHWC should be equivalent
std::vector<int32_t> sizes = {1, 1, 1, 1};
Tensor nchw = tf_float_.ones(sizes);
Tensor nhwc = tf_float_.full_channels_last(sizes, 1.0f);

// All dims are size-1, so all are skipped -> equivalent
EXPECT_TRUE(tensors_have_same_dim_order(nchw, nhwc));
}

TEST_F(TensorUtilTest, SemanticEquivalenceZeroDim) {
using namespace torch::executor;
// 0-dim tensors (scalars) should be equivalent
Tensor a = tf_float_.ones({});
Tensor b = tf_float_.ones({});

EXPECT_TRUE(tensors_have_same_dim_order(a, b));
}

TEST_F(TensorUtilTest, SemanticEquivalenceOneDim) {
using namespace torch::executor;
// 1-dim tensors should be equivalent (only one possible dim_order)
Tensor a = tf_float_.ones({5});
Tensor b = tf_float_.ones({5});

EXPECT_TRUE(tensors_have_same_dim_order(a, b));
}
Loading