ID
stringlengths 36
36
| Language
stringclasses 1
value | Repository Name
stringclasses 13
values | File Name
stringlengths 2
44
| File Path in Repository
stringlengths 11
111
| File Path for Unit Test
stringlengths 16
116
| Code
stringlengths 0
278k
| Unit Test - (Ground Truth)
stringlengths 127
663k
| Code Url
stringlengths 91
198
| Test Code Url
stringlengths 96
203
| Commit Hash
stringclasses 13
values |
---|---|---|---|---|---|---|---|---|---|---|
683091d0-4b5c-4f95-8c5d-18017a2e0324 | cpp | google/quiche | qpack_header_table | quiche/quic/core/qpack/qpack_header_table.cc | quiche/quic/core/qpack/qpack_header_table_test.cc | #include "quiche/quic/core/qpack/qpack_header_table.h"
#include <utility>
#include "absl/strings/string_view.h"
#include "quiche/quic/core/qpack/qpack_static_table.h"
#include "quiche/quic/platform/api/quic_logging.h"
#include "quiche/common/platform/api/quiche_logging.h"
namespace quic {
QpackEncoderHeaderTable::QpackEncoderHeaderTable()
: static_index_(ObtainQpackStaticTable().GetStaticIndex()),
static_name_index_(ObtainQpackStaticTable().GetStaticNameIndex()) {}
uint64_t QpackEncoderHeaderTable::InsertEntry(absl::string_view name,
absl::string_view value) {
const uint64_t index =
QpackHeaderTableBase<QpackEncoderDynamicTable>::InsertEntry(name, value);
name = dynamic_entries().back()->name();
value = dynamic_entries().back()->value();
auto index_result = dynamic_index_.insert(
std::make_pair(QpackLookupEntry{name, value}, index));
if (!index_result.second) {
QUICHE_DCHECK_GT(index, index_result.first->second);
dynamic_index_.erase(index_result.first);
auto result = dynamic_index_.insert(
std::make_pair(QpackLookupEntry{name, value}, index));
QUICHE_CHECK(result.second);
}
auto name_result = dynamic_name_index_.insert({name, index});
if (!name_result.second) {
QUICHE_DCHECK_GT(index, name_result.first->second);
dynamic_name_index_.erase(name_result.first);
auto result = dynamic_name_index_.insert({name, index});
QUICHE_CHECK(result.second);
}
return index;
}
QpackEncoderHeaderTable::MatchResult QpackEncoderHeaderTable::FindHeaderField(
absl::string_view name, absl::string_view value) const {
QpackLookupEntry query{name, value};
auto index_it = static_index_.find(query);
if (index_it != static_index_.end()) {
return { MatchType::kNameAndValue,
true,
index_it->second};
}
index_it = dynamic_index_.find(query);
if (index_it != dynamic_index_.end()) {
return { MatchType::kNameAndValue,
false,
index_it->second};
}
return FindHeaderName(name);
}
QpackEncoderHeaderTable::MatchResult QpackEncoderHeaderTable::FindHeaderName(
absl::string_view name) const {
auto name_index_it = static_name_index_.find(name);
if (name_index_it != static_name_index_.end()) {
return { MatchType::kName,
true,
name_index_it->second};
}
name_index_it = dynamic_name_index_.find(name);
if (name_index_it != dynamic_name_index_.end()) {
return { MatchType::kName,
false,
name_index_it->second};
}
return { MatchType::kNoMatch,
false,
0};
}
uint64_t QpackEncoderHeaderTable::MaxInsertSizeWithoutEvictingGivenEntry(
uint64_t index) const {
QUICHE_DCHECK_LE(dropped_entry_count(), index);
if (index > inserted_entry_count()) {
return dynamic_table_capacity();
}
uint64_t max_insert_size = dynamic_table_capacity() - dynamic_table_size();
uint64_t entry_index = dropped_entry_count();
for (const auto& entry : dynamic_entries()) {
if (entry_index >= index) {
break;
}
++entry_index;
max_insert_size += entry->Size();
}
return max_insert_size;
}
uint64_t QpackEncoderHeaderTable::draining_index(
float draining_fraction) const {
QUICHE_DCHECK_LE(0.0, draining_fraction);
QUICHE_DCHECK_LE(draining_fraction, 1.0);
const uint64_t required_space = draining_fraction * dynamic_table_capacity();
uint64_t space_above_draining_index =
dynamic_table_capacity() - dynamic_table_size();
if (dynamic_entries().empty() ||
space_above_draining_index >= required_space) {
return dropped_entry_count();
}
auto it = dynamic_entries().begin();
uint64_t entry_index = dropped_entry_count();
while (space_above_draining_index < required_space) {
space_above_draining_index += (*it)->Size();
++it;
++entry_index;
if (it == dynamic_entries().end()) {
return inserted_entry_count();
}
}
return entry_index;
}
void QpackEncoderHeaderTable::RemoveEntryFromEnd() {
const QpackEntry* const entry = dynamic_entries().front().get();
const uint64_t index = dropped_entry_count();
auto index_it = dynamic_index_.find({entry->name(), entry->value()});
if (index_it != dynamic_index_.end() && index_it->second == index) {
dynamic_index_.erase(index_it);
}
auto name_it = dynamic_name_index_.find(entry->name());
if (name_it != dynamic_name_index_.end() && name_it->second == index) {
dynamic_name_index_.erase(name_it);
}
QpackHeaderTableBase<QpackEncoderDynamicTable>::RemoveEntryFromEnd();
}
QpackDecoderHeaderTable::QpackDecoderHeaderTable()
: static_entries_(ObtainQpackStaticTable().GetStaticEntries()) {}
QpackDecoderHeaderTable::~QpackDecoderHeaderTable() {
for (auto& entry : observers_) {
entry.second->Cancel();
}
}
uint64_t QpackDecoderHeaderTable::InsertEntry(absl::string_view name,
absl::string_view value) {
const uint64_t index =
QpackHeaderTableBase<QpackDecoderDynamicTable>::InsertEntry(name, value);
while (!observers_.empty()) {
auto it = observers_.begin();
if (it->first > inserted_entry_count()) {
break;
}
Observer* observer = it->second;
observers_.erase(it);
observer->OnInsertCountReachedThreshold();
}
return index;
}
const QpackEntry* QpackDecoderHeaderTable::LookupEntry(bool is_static,
uint64_t index) const {
if (is_static) {
if (index >= static_entries_.size()) {
return nullptr;
}
return &static_entries_[index];
}
if (index < dropped_entry_count()) {
return nullptr;
}
index -= dropped_entry_count();
if (index >= dynamic_entries().size()) {
return nullptr;
}
return &dynamic_entries()[index];
}
void QpackDecoderHeaderTable::RegisterObserver(uint64_t required_insert_count,
Observer* observer) {
QUICHE_DCHECK_GT(required_insert_count, 0u);
observers_.insert({required_insert_count, observer});
}
void QpackDecoderHeaderTable::UnregisterObserver(uint64_t required_insert_count,
Observer* observer) {
auto it = observers_.lower_bound(required_insert_count);
while (it != observers_.end() && it->first == required_insert_count) {
if (it->second == observer) {
observers_.erase(it);
return;
}
++it;
}
QUICHE_NOTREACHED();
}
} | #include "quiche/quic/core/qpack/qpack_header_table.h"
#include <memory>
#include <tuple>
#include <utility>
#include "absl/base/macros.h"
#include "absl/strings/string_view.h"
#include "quiche/http2/hpack/hpack_entry.h"
#include "quiche/quic/core/qpack/qpack_static_table.h"
#include "quiche/quic/platform/api/quic_test.h"
namespace quic {
namespace test {
namespace {
using ::testing::_;
using ::testing::FieldsAre;
using ::testing::Mock;
using ::testing::StrictMock;
constexpr uint64_t kMaximumDynamicTableCapacityForTesting = 1024 * 1024;
constexpr bool kStaticEntry = true;
constexpr bool kDynamicEntry = false;
template <typename T>
class QpackHeaderTableTest : public QuicTest {
protected:
~QpackHeaderTableTest() override = default;
void SetUp() override {
ASSERT_TRUE(table_.SetMaximumDynamicTableCapacity(
kMaximumDynamicTableCapacityForTesting));
ASSERT_TRUE(
table_.SetDynamicTableCapacity(kMaximumDynamicTableCapacityForTesting));
}
bool EntryFitsDynamicTableCapacity(absl::string_view name,
absl::string_view value) const {
return table_.EntryFitsDynamicTableCapacity(name, value);
}
void InsertEntry(absl::string_view name, absl::string_view value) {
table_.InsertEntry(name, value);
}
bool SetDynamicTableCapacity(uint64_t capacity) {
return table_.SetDynamicTableCapacity(capacity);
}
uint64_t max_entries() const { return table_.max_entries(); }
uint64_t inserted_entry_count() const {
return table_.inserted_entry_count();
}
uint64_t dropped_entry_count() const { return table_.dropped_entry_count(); }
T table_;
};
using MyTypes =
::testing::Types<QpackEncoderHeaderTable, QpackDecoderHeaderTable>;
TYPED_TEST_SUITE(QpackHeaderTableTest, MyTypes);
TYPED_TEST(QpackHeaderTableTest, MaxEntries) {
TypeParam table1;
table1.SetMaximumDynamicTableCapacity(1024);
EXPECT_EQ(32u, table1.max_entries());
TypeParam table2;
table2.SetMaximumDynamicTableCapacity(500);
EXPECT_EQ(15u, table2.max_entries());
}
TYPED_TEST(QpackHeaderTableTest, SetDynamicTableCapacity) {
EXPECT_TRUE(this->SetDynamicTableCapacity(1024));
EXPECT_EQ(32u * 1024, this->max_entries());
EXPECT_TRUE(this->SetDynamicTableCapacity(500));
EXPECT_EQ(32u * 1024, this->max_entries());
EXPECT_FALSE(this->SetDynamicTableCapacity(
2 * kMaximumDynamicTableCapacityForTesting));
}
TYPED_TEST(QpackHeaderTableTest, EntryFitsDynamicTableCapacity) {
EXPECT_TRUE(this->SetDynamicTableCapacity(39));
EXPECT_TRUE(this->EntryFitsDynamicTableCapacity("foo", "bar"));
EXPECT_TRUE(this->EntryFitsDynamicTableCapacity("foo", "bar2"));
EXPECT_FALSE(this->EntryFitsDynamicTableCapacity("foo", "bar12"));
}
class QpackEncoderHeaderTableTest
: public QpackHeaderTableTest<QpackEncoderHeaderTable> {
protected:
enum MatchType { kNameAndValue, kName, kNoMatch };
~QpackEncoderHeaderTableTest() override = default;
std::tuple<MatchType, bool, uint64_t> FindHeaderField(
absl::string_view name, absl::string_view value) const {
QpackEncoderHeaderTable::MatchResult match_result =
table_.FindHeaderField(name, value);
return {static_cast<MatchType>(match_result.match_type),
match_result.is_static, match_result.index};
}
std::tuple<MatchType, bool, uint64_t> FindHeaderName(
absl::string_view name) const {
QpackEncoderHeaderTable::MatchResult match_result =
table_.FindHeaderName(name);
return {static_cast<MatchType>(match_result.match_type),
match_result.is_static, match_result.index};
}
uint64_t MaxInsertSizeWithoutEvictingGivenEntry(uint64_t index) const {
return table_.MaxInsertSizeWithoutEvictingGivenEntry(index);
}
uint64_t draining_index(float draining_fraction) const {
return table_.draining_index(draining_fraction);
}
};
TEST_F(QpackEncoderHeaderTableTest, FindStaticHeaderField) {
EXPECT_THAT(FindHeaderField(":method", "GET"),
FieldsAre(kNameAndValue, kStaticEntry, 17u));
EXPECT_THAT(FindHeaderField(":method", "POST"),
FieldsAre(kNameAndValue, kStaticEntry, 20u));
EXPECT_THAT(FindHeaderField(":method", "TRACE"),
FieldsAre(kName, kStaticEntry, 15u));
EXPECT_THAT(FindHeaderName(":method"), FieldsAre(kName, kStaticEntry, 15u));
EXPECT_THAT(FindHeaderField("accept-encoding", "gzip, deflate, br"),
FieldsAre(kNameAndValue, kStaticEntry, 31u));
EXPECT_THAT(FindHeaderField("accept-encoding", "compress"),
FieldsAre(kName, kStaticEntry, 31u));
EXPECT_THAT(FindHeaderField("accept-encoding", ""),
FieldsAre(kName, kStaticEntry, 31u));
EXPECT_THAT(FindHeaderName("accept-encoding"),
FieldsAre(kName, kStaticEntry, 31u));
EXPECT_THAT(FindHeaderField("location", ""),
FieldsAre(kNameAndValue, kStaticEntry, 12u));
EXPECT_THAT(FindHeaderField("location", "foo"),
FieldsAre(kName, kStaticEntry, 12u));
EXPECT_THAT(FindHeaderName("location"), FieldsAre(kName, kStaticEntry, 12u));
EXPECT_THAT(FindHeaderField("foo", ""), FieldsAre(kNoMatch, _, _));
EXPECT_THAT(FindHeaderField("foo", "bar"), FieldsAre(kNoMatch, _, _));
EXPECT_THAT(FindHeaderName("foo"), FieldsAre(kNoMatch, _, _));
}
TEST_F(QpackEncoderHeaderTableTest, FindDynamicHeaderField) {
EXPECT_THAT(FindHeaderField("foo", "bar"), FieldsAre(kNoMatch, _, _));
EXPECT_THAT(FindHeaderField("foo", "baz"), FieldsAre(kNoMatch, _, _));
EXPECT_THAT(FindHeaderName("foo"), FieldsAre(kNoMatch, _, _));
InsertEntry("foo", "bar");
EXPECT_THAT(FindHeaderField("foo", "bar"),
FieldsAre(kNameAndValue, kDynamicEntry, 0u));
EXPECT_THAT(FindHeaderField("foo", "baz"),
FieldsAre(kName, kDynamicEntry, 0u));
EXPECT_THAT(FindHeaderName("foo"), FieldsAre(kName, kDynamicEntry, 0u));
InsertEntry("foo", "bar");
EXPECT_THAT(FindHeaderField("foo", "bar"),
FieldsAre(kNameAndValue, kDynamicEntry, 1u));
EXPECT_THAT(FindHeaderField("foo", "baz"),
FieldsAre(kName, kDynamicEntry, 1u));
EXPECT_THAT(FindHeaderName("foo"), FieldsAre(kName, kDynamicEntry, 1u));
}
TEST_F(QpackEncoderHeaderTableTest, FindHeaderFieldPrefersStaticTable) {
InsertEntry(":method", "GET");
EXPECT_THAT(FindHeaderField(":method", "GET"),
FieldsAre(kNameAndValue, kStaticEntry, 17u));
EXPECT_THAT(FindHeaderField(":method", "TRACE"),
FieldsAre(kName, kStaticEntry, 15u));
EXPECT_THAT(FindHeaderName(":method"), FieldsAre(kName, kStaticEntry, 15u));
InsertEntry(":method", "TRACE");
EXPECT_THAT(FindHeaderField(":method", "TRACE"),
FieldsAre(kNameAndValue, kDynamicEntry, 1u));
}
TEST_F(QpackEncoderHeaderTableTest, EvictByInsertion) {
EXPECT_TRUE(SetDynamicTableCapacity(40));
InsertEntry("foo", "bar");
EXPECT_EQ(1u, inserted_entry_count());
EXPECT_EQ(0u, dropped_entry_count());
EXPECT_THAT(FindHeaderField("foo", "bar"),
FieldsAre(kNameAndValue, kDynamicEntry, 0u));
InsertEntry("baz", "qux");
EXPECT_EQ(2u, inserted_entry_count());
EXPECT_EQ(1u, dropped_entry_count());
EXPECT_THAT(FindHeaderField("foo", "bar"), FieldsAre(kNoMatch, _, _));
EXPECT_THAT(FindHeaderField("baz", "qux"),
FieldsAre(kNameAndValue, kDynamicEntry, 1u));
}
TEST_F(QpackEncoderHeaderTableTest, EvictByUpdateTableSize) {
InsertEntry("foo", "bar");
InsertEntry("baz", "qux");
EXPECT_EQ(2u, inserted_entry_count());
EXPECT_EQ(0u, dropped_entry_count());
EXPECT_THAT(FindHeaderField("foo", "bar"),
FieldsAre(kNameAndValue, kDynamicEntry, 0u));
EXPECT_THAT(FindHeaderField("baz", "qux"),
FieldsAre(kNameAndValue, kDynamicEntry, 1u));
EXPECT_TRUE(SetDynamicTableCapacity(40));
EXPECT_EQ(2u, inserted_entry_count());
EXPECT_EQ(1u, dropped_entry_count());
EXPECT_THAT(FindHeaderField("foo", "bar"), FieldsAre(kNoMatch, _, _));
EXPECT_THAT(FindHeaderField("baz", "qux"),
FieldsAre(kNameAndValue, kDynamicEntry, 1u));
EXPECT_TRUE(SetDynamicTableCapacity(20));
EXPECT_EQ(2u, inserted_entry_count());
EXPECT_EQ(2u, dropped_entry_count());
EXPECT_THAT(FindHeaderField("foo", "bar"), FieldsAre(kNoMatch, _, _));
EXPECT_THAT(FindHeaderField("baz", "qux"), FieldsAre(kNoMatch, _, _));
}
TEST_F(QpackEncoderHeaderTableTest, EvictOldestOfIdentical) {
EXPECT_TRUE(SetDynamicTableCapacity(80));
InsertEntry("foo", "bar");
InsertEntry("foo", "bar");
EXPECT_EQ(2u, inserted_entry_count());
EXPECT_EQ(0u, dropped_entry_count());
EXPECT_THAT(FindHeaderField("foo", "bar"),
FieldsAre(kNameAndValue, kDynamicEntry, 1u));
InsertEntry("baz", "qux");
EXPECT_EQ(3u, inserted_entry_count());
EXPECT_EQ(1u, dropped_entry_count());
EXPECT_THAT(FindHeaderField("foo", "bar"),
FieldsAre(kNameAndValue, kDynamicEntry, 1u));
EXPECT_THAT(FindHeaderField("baz", "qux"),
FieldsAre(kNameAndValue, kDynamicEntry, 2u));
}
TEST_F(QpackEncoderHeaderTableTest, EvictOldestOfSameName) {
EXPECT_TRUE(SetDynamicTableCapacity(80));
InsertEntry("foo", "bar");
InsertEntry("foo", "baz");
EXPECT_EQ(2u, inserted_entry_count());
EXPECT_EQ(0u, dropped_entry_count());
EXPECT_THAT(FindHeaderField("foo", "foo"),
FieldsAre(kName, kDynamicEntry, 1u));
InsertEntry("baz", "qux");
EXPECT_EQ(3u, inserted_entry_count());
EXPECT_EQ(1u, dropped_entry_count());
EXPECT_THAT(FindHeaderField("foo", "foo"),
FieldsAre(kName, kDynamicEntry, 1u));
EXPECT_THAT(FindHeaderField("baz", "qux"),
FieldsAre(kNameAndValue, kDynamicEntry, 2u));
}
TEST_F(QpackEncoderHeaderTableTest, MaxInsertSizeWithoutEvictingGivenEntry) {
const uint64_t dynamic_table_capacity = 100;
EXPECT_TRUE(SetDynamicTableCapacity(dynamic_table_capacity));
EXPECT_EQ(dynamic_table_capacity, MaxInsertSizeWithoutEvictingGivenEntry(0));
const uint64_t entry_size1 = QpackEntry::Size("foo", "bar");
InsertEntry("foo", "bar");
EXPECT_EQ(dynamic_table_capacity - entry_size1,
MaxInsertSizeWithoutEvictingGivenEntry(0));
EXPECT_EQ(dynamic_table_capacity, MaxInsertSizeWithoutEvictingGivenEntry(1));
const uint64_t entry_size2 = QpackEntry::Size("baz", "foobar");
InsertEntry("baz", "foobar");
EXPECT_EQ(dynamic_table_capacity, MaxInsertSizeWithoutEvictingGivenEntry(2));
EXPECT_EQ(dynamic_table_capacity - entry_size2,
MaxInsertSizeWithoutEvictingGivenEntry(1));
EXPECT_EQ(dynamic_table_capacity - entry_size2 - entry_size1,
MaxInsertSizeWithoutEvictingGivenEntry(0));
const uint64_t entry_size3 = QpackEntry::Size("last", "entry");
InsertEntry("last", "entry");
EXPECT_EQ(1u, dropped_entry_count());
EXPECT_EQ(dynamic_table_capacity, MaxInsertSizeWithoutEvictingGivenEntry(3));
EXPECT_EQ(dynamic_table_capacity - entry_size3,
MaxInsertSizeWithoutEvictingGivenEntry(2));
EXPECT_EQ(dynamic_table_capacity - entry_size3 - entry_size2,
MaxInsertSizeWithoutEvictingGivenEntry(1));
}
TEST_F(QpackEncoderHeaderTableTest, DrainingIndex) {
EXPECT_TRUE(SetDynamicTableCapacity(4 * QpackEntry::Size("foo", "bar")));
EXPECT_EQ(0u, draining_index(0.0));
EXPECT_EQ(0u, draining_index(1.0));
InsertEntry("foo", "bar");
EXPECT_EQ(0u, draining_index(0.0));
EXPECT_EQ(1u, draining_index(1.0));
InsertEntry("foo", "bar");
EXPECT_EQ(0u, draining_index(0.0));
EXPECT_EQ(0u, draining_index(0.5));
EXPECT_EQ(2u, draining_index(1.0));
InsertEntry("foo", "bar");
InsertEntry("foo", "bar");
EXPECT_EQ(0u, draining_index(0.0));
EXPECT_EQ(2u, draining_index(0.5));
EXPECT_EQ(4u, draining_index(1.0));
}
class MockObserver : public QpackDecoderHeaderTable::Observer {
public:
~MockObserver() override = default;
MOCK_METHOD(void, OnInsertCountReachedThreshold, (), (override));
MOCK_METHOD(void, Cancel, (), (override));
};
class QpackDecoderHeaderTableTest
: public QpackHeaderTableTest<QpackDecoderHeaderTable> {
protected:
~QpackDecoderHeaderTableTest() override = default;
void ExpectEntryAtIndex(bool is_static, uint64_t index,
absl::string_view expected_name,
absl::string_view expected_value) const {
const auto* entry = table_.LookupEntry(is_static, index);
ASSERT_TRUE(entry);
EXPECT_EQ(expected_name, entry->name());
EXPECT_EQ(expected_value, entry->value());
}
void ExpectNoEntryAtIndex(bool is_static, uint64_t index) const {
EXPECT_FALSE(table_.LookupEntry(is_static, index));
}
void RegisterObserver(uint64_t required_insert_count,
QpackDecoderHeaderTable::Observer* observer) {
table_.RegisterObserver(required_insert_count, observer);
}
void UnregisterObserver(uint64_t required_insert_count,
QpackDecoderHeaderTable::Observer* observer) {
table_.UnregisterObserver(required_insert_count, observer);
}
};
TEST_F(QpackDecoderHeaderTableTest, LookupStaticEntry) {
ExpectEntryAtIndex(kStaticEntry, 0, ":authority", "");
ExpectEntryAtIndex(kStaticEntry, 1, ":path", "/");
ExpectEntryAtIndex(kStaticEntry, 98, "x-frame-options", "sameorigin");
ASSERT_EQ(99u, QpackStaticTableVector().size());
ExpectNoEntryAtIndex(kStaticEntry, 99);
}
TEST_F(QpackDecoderHeaderTableTest, InsertAndLookupDynamicEntry) {
ExpectNoEntryAtIndex(kDynamicEntry, 0);
ExpectNoEntryAtIndex(kDynamicEntry, 1);
ExpectNoEntryAtIndex(kDynamicEntry, 2);
ExpectNoEntryAtIndex(kDynamicEntry, 3);
InsertEntry("foo", "bar");
ExpectEntryAtIndex(kDynamicEntry, 0, "foo", "bar");
ExpectNoEntryAtIndex(kDynamicEntry, 1);
ExpectNoEntryAtIndex(kDynamicEntry, 2);
ExpectNoEntryAtIndex(kDynamicEntry, 3);
InsertEntry("baz", "bing");
ExpectEntryAtIndex(kDynamicEntry, 0, "foo", "bar");
ExpectEntryAtIndex(kDynamicEntry, 1, "baz", "bing");
ExpectNoEntryAtIndex(kDynamicEntry, 2);
ExpectNoEntryAtIndex(kDynamicEntry, 3);
InsertEntry("baz", "bing");
ExpectEntryAtIndex(kDynamicEntry, 0, "foo", "bar");
ExpectEntryAtIndex(kDynamicEntry, 1, "baz", "bing");
ExpectEntryAtIndex(kDynamicEntry, 2, "baz", "bing");
ExpectNoEntryAtIndex(kDynamicEntry, 3);
}
TEST_F(QpackDecoderHeaderTableTest, EvictByInsertion) {
EXPECT_TRUE(SetDynamicTableCapacity(40));
InsertEntry("foo", "bar");
EXPECT_EQ(1u, inserted_entry_count());
EXPECT_EQ(0u, dropped_entry_count());
ExpectEntryAtIndex(kDynamicEntry, 0u, "foo", "bar");
InsertEntry("baz", "qux");
EXPECT_EQ(2u, inserted_entry_count());
EXPECT_EQ(1u, dropped_entry_count());
ExpectNoEntryAtIndex(kDynamicEntry, 0u);
ExpectEntryAtIndex(kDynamicEntry, 1u, "baz", "qux");
}
TEST_F(QpackDecoderHeaderTableTest, EvictByUpdateTableSize) {
ExpectNoEntryAtIndex(kDynamicEntry, 0u);
ExpectNoEntryAtIndex(kDynamicEntry, 1u);
InsertEntry("foo", "bar");
InsertEntry("baz", "qux");
EXPECT_EQ(2u, inserted_entry_count());
EXPECT_EQ(0u, dropped_entry_count());
ExpectEntryAtIndex(kDynamicEntry, 0u, "foo", "bar");
ExpectEntryAtIndex(kDynamicEntry, 1u, "baz", "qux");
EXPECT_TRUE(SetDynamicTableCapacity(40));
EXPECT_EQ(2u, inserted_entry_count());
EXPECT_EQ(1u, dropped_entry_count());
ExpectNoEntryAtIndex(kDynamicEntry, 0u);
ExpectEntryAtIndex(kDynamicEntry, 1u, "baz", "qux");
EXPECT_TRUE(SetDynamicTableCapacity(20));
EXPECT_EQ(2u, inserted_entry_count());
EXPECT_EQ(2u, dropped_entry_count());
ExpectNoEntryAtIndex(kDynamicEntry, 0u);
ExpectNoEntryAtIndex(kDynamicEntry, 1u);
}
TEST_F(QpackDecoderHeaderTableTest, EvictOldestOfIdentical) {
EXPECT_TRUE(SetDynamicTableCapacity(80));
InsertEntry("foo", "bar");
InsertEntry("foo", "bar");
EXPECT_EQ(2u, inserted_entry_count());
EXPECT_EQ(0u, dropped_entry_count());
ExpectEntryAtIndex(kDynamicEntry, 0u, "foo", "bar");
ExpectEntryAtIndex(kDynamicEntry, 1u, "foo", "bar");
ExpectNoEntryAtIndex(kDynamicEntry, 2u);
InsertEntry("baz", "qux");
EXPECT_EQ(3u, inserted_entry_count());
EXPECT_EQ(1u, dropped_entry_count());
ExpectNoEntryAtIndex(kDynamicEntry, 0u);
ExpectEntryAtIndex(kDynamicEntry, 1u, "foo", "bar");
ExpectEntryAtIndex(kDynamicEntry, 2u, "baz", "qux");
}
TEST_F(QpackDecoderHeaderTableTest, EvictOldestOfSameName) {
EXPECT_TRUE(SetDynamicTableCapacity(80));
InsertEntry("foo", "bar");
InsertEntry("foo", "baz");
EXPECT_EQ(2u, inserted_entry_count());
EXPECT_EQ(0u, dropped_entry_count());
ExpectEntryAtIndex(kDynamicEntry, 0u, "foo", "bar");
ExpectEntryAtIndex(kDynamicEntry, 1u, "foo", "baz");
ExpectNoEntryAtIndex(kDynamicEntry, 2u);
InsertEntry("baz", "qux");
EXPECT_EQ(3u, inserted_entry_count());
EXPECT_EQ(1u, dropped_entry_count());
ExpectNoEntryAtIndex(kDynamicEntry, 0u);
ExpectEntryAtIndex(kDynamicEntry, 1u, "foo", "baz");
ExpectEntryAtIndex(kDynamicEntry, 2u, "baz", "qux");
}
TEST_F(QpackDecoderHeaderTableTest, RegisterObserver) {
StrictMock<MockObserver> observer1;
RegisterObserver(1, &observer1);
EXPECT_CALL(observer1, OnInsertCountReachedThreshold);
InsertEntry("foo", "bar");
EXPECT_EQ(1u, inserted_entry_count());
Mock::VerifyAndClearExpectations(&observer1);
StrictMock<MockObserver> observer2;
StrictMock<MockObserver> observer3;
RegisterObserver(3, &observer3);
RegisterObserver(2, &observer2);
EXPECT_CALL(observer2, OnInsertCountReachedThreshold);
InsertEntry("foo", "bar");
EXPECT_EQ(2u, inserted_entry_count());
Mock::VerifyAndClearExpectations(&observer3);
EXPECT_CALL(observer3, OnInsertCountReachedThreshold);
InsertEntry("foo", "bar");
EXPECT_EQ(3u, inserted_entry_count());
Mock::VerifyAndClearExpectations(&observer2);
StrictMock<MockObserver> observer4;
StrictMock<MockObserver> observer5;
RegisterObserver(4, &observer4);
RegisterObserver(4, &observer5);
EXPECT_CALL(observer4, OnInsertCountReachedThreshold);
EXPECT_CALL(observer5, OnInsertCountReachedThreshold);
InsertEntry("foo", "bar");
EXPECT_EQ(4u, inserted_entry_count());
Mock::VerifyAndClearExpectations(&observer4);
Mock::VerifyAndClearExpectations(&observer5);
}
TEST_F(QpackDecoderHeaderTableTest, UnregisterObserver) {
StrictMock<MockObserver> observer1;
StrictMock<MockObserver> observer2;
StrictMock<MockObserver> observer3;
StrictMock<MockObserver> observer4;
RegisterObserver(1, &observer1);
RegisterObserver(2, &observer2);
RegisterObserver(2, &observer3);
RegisterObserver(3, &observer4);
UnregisterObserver(2, &observer3);
EXPECT_CALL(observer1, OnInsertCountReachedThreshold);
EXPECT_CALL(observer2, OnInsertCountReachedThreshold);
EXPECT_CALL(observer4, OnInsertCountReachedThreshold);
InsertEntry("foo", "bar");
InsertEntry("foo", "bar");
InsertEntry("foo", "bar");
EXPECT_EQ(3u, inserted_entry_count());
}
TEST_F(QpackDecoderHeaderTableTest, Cancel) {
StrictMock<MockObserver> observer;
auto table = std::make_unique<QpackDecoderHeaderTable>();
table->RegisterObserver(1, &observer);
EXPECT_CALL(observer, Cancel);
table.reset();
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/qpack/qpack_header_table.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/qpack/qpack_header_table_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
77cd2394-1ec2-4b7a-876d-77461d0d270a | cpp | tensorflow/tensorflow | transpose_test_utils | tensorflow/lite/kernels/transpose_test_utils.h | tensorflow/lite/kernels/transpose_test_utils_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_TRANSPOSE_TEST_UTILS_H_
#define TENSORFLOW_LITE_KERNELS_TRANSPOSE_TEST_UTILS_H_
#include <functional>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/types/span.h"
#include "tensorflow/lite/kernels/internal/portable_tensor.h"
#include "tensorflow/lite/kernels/internal/reference/transpose.h"
#include "tensorflow/lite/kernels/internal/runtime_shape.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
template <typename T>
std::vector<T> RunTestPermutation(const absl::Span<const int> shape,
const absl::Span<const int> perms) {
const int count = absl::c_accumulate(shape, 1, std::multiplies<>{});
std::vector<T> out(count);
std::vector<T> input(count);
absl::c_iota(input, static_cast<T>(0));
const RuntimeShape input_shape(shape.size(), shape.data());
RuntimeShape output_shape(perms.size());
for (int i = 0; i < perms.size(); i++) {
output_shape.SetDim(i, input_shape.Dims(perms[i]));
}
TransposeParams params{};
params.perm_count = static_cast<int8_t>(perms.size());
absl::c_copy(perms, params.perm);
reference_ops::Transpose(params, input_shape, input.data(), output_shape,
out.data());
return out;
}
}
#endif | #include "tensorflow/lite/kernels/transpose_test_utils.h"
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
using testing::ElementsAreArray;
namespace tflite {
namespace {
TEST(TransposeTest, TestRefOps1D) {
EXPECT_THAT(RunTestPermutation<float>({3}, {0}), ElementsAreArray({0, 1, 2}));
}
TEST(TransposeTest, TestRefOps2D) {
EXPECT_THAT(RunTestPermutation<float>({3, 2}, {1, 0}),
ElementsAreArray({0, 2, 4, 1, 3, 5}));
EXPECT_THAT(RunTestPermutation<float>({3, 2}, {0, 1}),
ElementsAreArray({0, 1, 2, 3, 4, 5}));
}
TEST(TransposeTest, TestRefOps3D) {
{
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4}, {2, 0, 1}),
ElementsAreArray({0, 4, 8, 12, 16, 20, 1, 5, 9, 13, 17, 21,
2, 6, 10, 14, 18, 22, 3, 7, 11, 15, 19, 23}));
}
{
std::vector<float> out =
RunTestPermutation<float>({2, 3, 4}, {0, 1, 2});
std::vector<float> ref(out.size());
absl::c_iota(ref, 0);
EXPECT_THAT(out, ElementsAreArray(ref));
}
{
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4}, {1, 2, 0}),
ElementsAreArray({0, 12, 1, 13, 2, 14, 3, 15, 4, 16, 5, 17,
6, 18, 7, 19, 8, 20, 9, 21, 10, 22, 11, 23}));
}
{
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4}, {0, 2, 1}),
ElementsAreArray({0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11,
12, 16, 20, 13, 17, 21, 14, 18, 22, 15, 19, 23}));
}
{
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4}, {1, 0, 2}),
ElementsAreArray({0, 1, 2, 3, 12, 13, 14, 15, 4, 5, 6, 7,
16, 17, 18, 19, 8, 9, 10, 11, 20, 21, 22, 23}));
}
{
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4}, {2, 1, 0}),
ElementsAreArray({0, 12, 4, 16, 8, 20, 1, 13, 5, 17, 9, 21,
2, 14, 6, 18, 10, 22, 3, 15, 7, 19, 11, 23}));
}
}
TEST(TransposeTest, TestRefOps3D_OneInDimension) {
{
EXPECT_THAT(
RunTestPermutation<float>({1, 2, 3}, {2, 0, 1}),
ElementsAreArray({0, 3, 1, 4, 2, 5}));
}
{
EXPECT_THAT(
RunTestPermutation<float>({1, 2, 3}, {1, 2, 0}),
ElementsAreArray({0, 1, 2, 3, 4, 5}));
}
{
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 1}, {1, 2, 0}),
ElementsAreArray({0, 3, 1, 4, 2, 5}));
}
{
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 1}, {2, 0, 1}),
ElementsAreArray({0, 1, 2, 3, 4, 5}));
}
}
template <typename T>
void TransposeTestTestRefOps4D() {
EXPECT_THAT(
RunTestPermutation<T>({2, 3, 4, 5}, {2, 0, 1, 3}),
ElementsAreArray(
{0, 1, 2, 3, 4, 20, 21, 22, 23, 24, 40, 41, 42, 43, 44,
60, 61, 62, 63, 64, 80, 81, 82, 83, 84, 100, 101, 102, 103, 104,
5, 6, 7, 8, 9, 25, 26, 27, 28, 29, 45, 46, 47, 48, 49,
65, 66, 67, 68, 69, 85, 86, 87, 88, 89, 105, 106, 107, 108, 109,
10, 11, 12, 13, 14, 30, 31, 32, 33, 34, 50, 51, 52, 53, 54,
70, 71, 72, 73, 74, 90, 91, 92, 93, 94, 110, 111, 112, 113, 114,
15, 16, 17, 18, 19, 35, 36, 37, 38, 39, 55, 56, 57, 58, 59,
75, 76, 77, 78, 79, 95, 96, 97, 98, 99, 115, 116, 117, 118, 119}));
{
std::vector<T> out = RunTestPermutation<T>({2, 3, 4, 5}, {0, 1, 2, 3});
std::vector<T> ref(out.size());
absl::c_iota(ref, 0);
EXPECT_THAT(out, ElementsAreArray(ref));
}
}
TEST(TransposeTest, TestRefOps4D) { TransposeTestTestRefOps4D<float>(); }
TEST(TransposeTest, TestRefOps4DInt8) { TransposeTestTestRefOps4D<int8_t>(); }
TEST(TransposeTest, TestRefOps4DInt16) { TransposeTestTestRefOps4D<int16_t>(); }
TEST(TransposeTest, TestRefOps1D0) {
EXPECT_THAT(RunTestPermutation<float>({2}, {0}),
ElementsAreArray({0, 1}));
}
TEST(TransposeTest, TestRefOps2D0) {
EXPECT_THAT(RunTestPermutation<float>({2, 3}, {0, 1}),
ElementsAreArray({0, 1, 2, 3, 4, 5}));
}
TEST(TransposeTest, TestRefOps2D1) {
EXPECT_THAT(RunTestPermutation<float>({2, 3}, {1, 0}),
ElementsAreArray({0, 3, 1, 4, 2, 5}));
}
TEST(TransposeTest, TestRefOps3D0) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4}, {0, 1, 2}),
ElementsAreArray({0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23}));
}
TEST(TransposeTest, TestRefOps3D1) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4}, {0, 2, 1}),
ElementsAreArray({0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11,
12, 16, 20, 13, 17, 21, 14, 18, 22, 15, 19, 23}));
}
TEST(TransposeTest, TestRefOps3D2) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4}, {1, 0, 2}),
ElementsAreArray({0, 1, 2, 3, 12, 13, 14, 15, 4, 5, 6, 7,
16, 17, 18, 19, 8, 9, 10, 11, 20, 21, 22, 23}));
}
TEST(TransposeTest, TestRefOps3D3) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4}, {1, 2, 0}),
ElementsAreArray({0, 12, 1, 13, 2, 14, 3, 15, 4, 16, 5, 17,
6, 18, 7, 19, 8, 20, 9, 21, 10, 22, 11, 23}));
}
TEST(TransposeTest, TestRefOps3D4) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4}, {2, 0, 1}),
ElementsAreArray({0, 4, 8, 12, 16, 20, 1, 5, 9, 13, 17, 21,
2, 6, 10, 14, 18, 22, 3, 7, 11, 15, 19, 23}));
}
TEST(TransposeTest, TestRefOps3D5) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4}, {2, 1, 0}),
ElementsAreArray({0, 12, 4, 16, 8, 20, 1, 13, 5, 17, 9, 21,
2, 14, 6, 18, 10, 22, 3, 15, 7, 19, 11, 23}));
}
TEST(TransposeTest, TestRefOps4D0) {
const std::vector<float> ref = [] {
std::vector<float> ref(120);
absl::c_iota(ref, 0);
return ref;
}();
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5}, {0, 1, 2, 3}),
ElementsAreArray(ref));
}
TEST(TransposeTest, TestRefOps4D1) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5}, {0, 1, 3, 2}),
ElementsAreArray(
{0, 5, 10, 15, 1, 6, 11, 16, 2, 7, 12, 17, 3, 8,
13, 18, 4, 9, 14, 19, 20, 25, 30, 35, 21, 26, 31, 36,
22, 27, 32, 37, 23, 28, 33, 38, 24, 29, 34, 39, 40, 45,
50, 55, 41, 46, 51, 56, 42, 47, 52, 57, 43, 48, 53, 58,
44, 49, 54, 59, 60, 65, 70, 75, 61, 66, 71, 76, 62, 67,
72, 77, 63, 68, 73, 78, 64, 69, 74, 79, 80, 85, 90, 95,
81, 86, 91, 96, 82, 87, 92, 97, 83, 88, 93, 98, 84, 89,
94, 99, 100, 105, 110, 115, 101, 106, 111, 116, 102, 107, 112, 117,
103, 108, 113, 118, 104, 109, 114, 119}));
}
TEST(TransposeTest, TestRefOps4D2) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5}, {0, 2, 1, 3}),
ElementsAreArray(
{0, 1, 2, 3, 4, 20, 21, 22, 23, 24, 40, 41, 42, 43, 44,
5, 6, 7, 8, 9, 25, 26, 27, 28, 29, 45, 46, 47, 48, 49,
10, 11, 12, 13, 14, 30, 31, 32, 33, 34, 50, 51, 52, 53, 54,
15, 16, 17, 18, 19, 35, 36, 37, 38, 39, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 80, 81, 82, 83, 84, 100, 101, 102, 103, 104,
65, 66, 67, 68, 69, 85, 86, 87, 88, 89, 105, 106, 107, 108, 109,
70, 71, 72, 73, 74, 90, 91, 92, 93, 94, 110, 111, 112, 113, 114,
75, 76, 77, 78, 79, 95, 96, 97, 98, 99, 115, 116, 117, 118, 119}));
}
TEST(TransposeTest, TestRefOps4D3) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5}, {0, 2, 3, 1}),
ElementsAreArray(
{0, 20, 40, 1, 21, 41, 2, 22, 42, 3, 23, 43, 4, 24, 44,
5, 25, 45, 6, 26, 46, 7, 27, 47, 8, 28, 48, 9, 29, 49,
10, 30, 50, 11, 31, 51, 12, 32, 52, 13, 33, 53, 14, 34, 54,
15, 35, 55, 16, 36, 56, 17, 37, 57, 18, 38, 58, 19, 39, 59,
60, 80, 100, 61, 81, 101, 62, 82, 102, 63, 83, 103, 64, 84, 104,
65, 85, 105, 66, 86, 106, 67, 87, 107, 68, 88, 108, 69, 89, 109,
70, 90, 110, 71, 91, 111, 72, 92, 112, 73, 93, 113, 74, 94, 114,
75, 95, 115, 76, 96, 116, 77, 97, 117, 78, 98, 118, 79, 99, 119}));
}
TEST(TransposeTest, TestRefOps4D4) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5}, {0, 3, 1, 2}),
ElementsAreArray({0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55,
1, 6, 11, 16, 21, 26, 31, 36, 41, 46, 51, 56,
2, 7, 12, 17, 22, 27, 32, 37, 42, 47, 52, 57,
3, 8, 13, 18, 23, 28, 33, 38, 43, 48, 53, 58,
4, 9, 14, 19, 24, 29, 34, 39, 44, 49, 54, 59,
60, 65, 70, 75, 80, 85, 90, 95, 100, 105, 110, 115,
61, 66, 71, 76, 81, 86, 91, 96, 101, 106, 111, 116,
62, 67, 72, 77, 82, 87, 92, 97, 102, 107, 112, 117,
63, 68, 73, 78, 83, 88, 93, 98, 103, 108, 113, 118,
64, 69, 74, 79, 84, 89, 94, 99, 104, 109, 114, 119}));
}
TEST(TransposeTest, TestRefOps4D5) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5}, {0, 3, 2, 1}),
ElementsAreArray(
{0, 20, 40, 5, 25, 45, 10, 30, 50, 15, 35, 55, 1, 21, 41,
6, 26, 46, 11, 31, 51, 16, 36, 56, 2, 22, 42, 7, 27, 47,
12, 32, 52, 17, 37, 57, 3, 23, 43, 8, 28, 48, 13, 33, 53,
18, 38, 58, 4, 24, 44, 9, 29, 49, 14, 34, 54, 19, 39, 59,
60, 80, 100, 65, 85, 105, 70, 90, 110, 75, 95, 115, 61, 81, 101,
66, 86, 106, 71, 91, 111, 76, 96, 116, 62, 82, 102, 67, 87, 107,
72, 92, 112, 77, 97, 117, 63, 83, 103, 68, 88, 108, 73, 93, 113,
78, 98, 118, 64, 84, 104, 69, 89, 109, 74, 94, 114, 79, 99, 119}));
}
TEST(TransposeTest, TestRefOps4D6) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5}, {1, 0, 2, 3}),
ElementsAreArray(
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
14, 15, 16, 17, 18, 19, 60, 61, 62, 63, 64, 65, 66, 67,
68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
36, 37, 38, 39, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 40, 41, 42, 43,
44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
58, 59, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111,
112, 113, 114, 115, 116, 117, 118, 119}));
}
TEST(TransposeTest, TestRefOps4D7) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5}, {1, 0, 3, 2}),
ElementsAreArray(
{0, 5, 10, 15, 1, 6, 11, 16, 2, 7, 12, 17, 3, 8,
13, 18, 4, 9, 14, 19, 60, 65, 70, 75, 61, 66, 71, 76,
62, 67, 72, 77, 63, 68, 73, 78, 64, 69, 74, 79, 20, 25,
30, 35, 21, 26, 31, 36, 22, 27, 32, 37, 23, 28, 33, 38,
24, 29, 34, 39, 80, 85, 90, 95, 81, 86, 91, 96, 82, 87,
92, 97, 83, 88, 93, 98, 84, 89, 94, 99, 40, 45, 50, 55,
41, 46, 51, 56, 42, 47, 52, 57, 43, 48, 53, 58, 44, 49,
54, 59, 100, 105, 110, 115, 101, 106, 111, 116, 102, 107, 112, 117,
103, 108, 113, 118, 104, 109, 114, 119}));
}
TEST(TransposeTest, TestRefOps4D8) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5}, {1, 2, 0, 3}),
ElementsAreArray(
{0, 1, 2, 3, 4, 60, 61, 62, 63, 64, 5, 6, 7, 8,
9, 65, 66, 67, 68, 69, 10, 11, 12, 13, 14, 70, 71, 72,
73, 74, 15, 16, 17, 18, 19, 75, 76, 77, 78, 79, 20, 21,
22, 23, 24, 80, 81, 82, 83, 84, 25, 26, 27, 28, 29, 85,
86, 87, 88, 89, 30, 31, 32, 33, 34, 90, 91, 92, 93, 94,
35, 36, 37, 38, 39, 95, 96, 97, 98, 99, 40, 41, 42, 43,
44, 100, 101, 102, 103, 104, 45, 46, 47, 48, 49, 105, 106, 107,
108, 109, 50, 51, 52, 53, 54, 110, 111, 112, 113, 114, 55, 56,
57, 58, 59, 115, 116, 117, 118, 119}));
}
TEST(TransposeTest, TestRefOps4D9) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5}, {1, 2, 3, 0}),
ElementsAreArray({0, 60, 1, 61, 2, 62, 3, 63, 4, 64, 5, 65,
6, 66, 7, 67, 8, 68, 9, 69, 10, 70, 11, 71,
12, 72, 13, 73, 14, 74, 15, 75, 16, 76, 17, 77,
18, 78, 19, 79, 20, 80, 21, 81, 22, 82, 23, 83,
24, 84, 25, 85, 26, 86, 27, 87, 28, 88, 29, 89,
30, 90, 31, 91, 32, 92, 33, 93, 34, 94, 35, 95,
36, 96, 37, 97, 38, 98, 39, 99, 40, 100, 41, 101,
42, 102, 43, 103, 44, 104, 45, 105, 46, 106, 47, 107,
48, 108, 49, 109, 50, 110, 51, 111, 52, 112, 53, 113,
54, 114, 55, 115, 56, 116, 57, 117, 58, 118, 59, 119}));
}
TEST(TransposeTest, TestRefOps4D10) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5}, {1, 3, 0, 2}),
ElementsAreArray(
{0, 5, 10, 15, 60, 65, 70, 75, 1, 6, 11, 16, 61, 66,
71, 76, 2, 7, 12, 17, 62, 67, 72, 77, 3, 8, 13, 18,
63, 68, 73, 78, 4, 9, 14, 19, 64, 69, 74, 79, 20, 25,
30, 35, 80, 85, 90, 95, 21, 26, 31, 36, 81, 86, 91, 96,
22, 27, 32, 37, 82, 87, 92, 97, 23, 28, 33, 38, 83, 88,
93, 98, 24, 29, 34, 39, 84, 89, 94, 99, 40, 45, 50, 55,
100, 105, 110, 115, 41, 46, 51, 56, 101, 106, 111, 116, 42, 47,
52, 57, 102, 107, 112, 117, 43, 48, 53, 58, 103, 108, 113, 118,
44, 49, 54, 59, 104, 109, 114, 119}));
}
TEST(TransposeTest, TestRefOps4D11) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5}, {1, 3, 2, 0}),
ElementsAreArray({0, 60, 5, 65, 10, 70, 15, 75, 1, 61, 6, 66,
11, 71, 16, 76, 2, 62, 7, 67, 12, 72, 17, 77,
3, 63, 8, 68, 13, 73, 18, 78, 4, 64, 9, 69,
14, 74, 19, 79, 20, 80, 25, 85, 30, 90, 35, 95,
21, 81, 26, 86, 31, 91, 36, 96, 22, 82, 27, 87,
32, 92, 37, 97, 23, 83, 28, 88, 33, 93, 38, 98,
24, 84, 29, 89, 34, 94, 39, 99, 40, 100, 45, 105,
50, 110, 55, 115, 41, 101, 46, 106, 51, 111, 56, 116,
42, 102, 47, 107, 52, 112, 57, 117, 43, 103, 48, 108,
53, 113, 58, 118, 44, 104, 49, 109, 54, 114, 59, 119}));
}
TEST(TransposeTest, TestRefOps4D12) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5}, {2, 0, 1, 3}),
ElementsAreArray(
{0, 1, 2, 3, 4, 20, 21, 22, 23, 24, 40, 41, 42, 43, 44,
60, 61, 62, 63, 64, 80, 81, 82, 83, 84, 100, 101, 102, 103, 104,
5, 6, 7, 8, 9, 25, 26, 27, 28, 29, 45, 46, 47, 48, 49,
65, 66, 67, 68, 69, 85, 86, 87, 88, 89, 105, 106, 107, 108, 109,
10, 11, 12, 13, 14, 30, 31, 32, 33, 34, 50, 51, 52, 53, 54,
70, 71, 72, 73, 74, 90, 91, 92, 93, 94, 110, 111, 112, 113, 114,
15, 16, 17, 18, 19, 35, 36, 37, 38, 39, 55, 56, 57, 58, 59,
75, 76, 77, 78, 79, 95, 96, 97, 98, 99, 115, 116, 117, 118, 119}));
}
TEST(TransposeTest, TestRefOps4D13) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5}, {2, 0, 3, 1}),
ElementsAreArray(
{0, 20, 40, 1, 21, 41, 2, 22, 42, 3, 23, 43, 4, 24, 44,
60, 80, 100, 61, 81, 101, 62, 82, 102, 63, 83, 103, 64, 84, 104,
5, 25, 45, 6, 26, 46, 7, 27, 47, 8, 28, 48, 9, 29, 49,
65, 85, 105, 66, 86, 106, 67, 87, 107, 68, 88, 108, 69, 89, 109,
10, 30, 50, 11, 31, 51, 12, 32, 52, 13, 33, 53, 14, 34, 54,
70, 90, 110, 71, 91, 111, 72, 92, 112, 73, 93, 113, 74, 94, 114,
15, 35, 55, 16, 36, 56, 17, 37, 57, 18, 38, 58, 19, 39, 59,
75, 95, 115, 76, 96, 116, 77, 97, 117, 78, 98, 118, 79, 99, 119}));
}
TEST(TransposeTest, TestRefOps4D14) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5}, {2, 1, 0, 3}),
ElementsAreArray(
{0, 1, 2, 3, 4, 60, 61, 62, 63, 64, 20, 21, 22, 23, 24,
80, 81, 82, 83, 84, 40, 41, 42, 43, 44, 100, 101, 102, 103, 104,
5, 6, 7, 8, 9, 65, 66, 67, 68, 69, 25, 26, 27, 28, 29,
85, 86, 87, 88, 89, 45, 46, 47, 48, 49, 105, 106, 107, 108, 109,
10, 11, 12, 13, 14, 70, 71, 72, 73, 74, 30, 31, 32, 33, 34,
90, 91, 92, 93, 94, 50, 51, 52, 53, 54, 110, 111, 112, 113, 114,
15, 16, 17, 18, 19, 75, 76, 77, 78, 79, 35, 36, 37, 38, 39,
95, 96, 97, 98, 99, 55, 56, 57, 58, 59, 115, 116, 117, 118, 119}));
}
TEST(TransposeTest, TestRefOps4D15) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5}, {2, 1, 3, 0}),
ElementsAreArray(
{0, 60, 1, 61, 2, 62, 3, 63, 4, 64, 20, 80, 21, 81, 22,
82, 23, 83, 24, 84, 40, 100, 41, 101, 42, 102, 43, 103, 44, 104,
5, 65, 6, 66, 7, 67, 8, 68, 9, 69, 25, 85, 26, 86, 27,
87, 28, 88, 29, 89, 45, 105, 46, 106, 47, 107, 48, 108, 49, 109,
10, 70, 11, 71, 12, 72, 13, 73, 14, 74, 30, 90, 31, 91, 32,
92, 33, 93, 34, 94, 50, 110, 51, 111, 52, 112, 53, 113, 54, 114,
15, 75, 16, 76, 17, 77, 18, 78, 19, 79, 35, 95, 36, 96, 37,
97, 38, 98, 39, 99, 55, 115, 56, 116, 57, 117, 58, 118, 59, 119}));
}
TEST(TransposeTest, TestRefOps4D16) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5}, {2, 3, 0, 1}),
ElementsAreArray(
{0, 20, 40, 60, 80, 100, 1, 21, 41, 61, 81, 101, 2, 22, 42,
62, 82, 102, 3, 23, 43, 63, 83, 103, 4, 24, 44, 64, 84, 104,
5, 25, 45, 65, 85, 105, 6, 26, 46, 66, 86, 106, 7, 27, 47,
67, 87, 107, 8, 28, 48, 68, 88, 108, 9, 29, 49, 69, 89, 109,
10, 30, 50, 70, 90, 110, 11, 31, 51, 71, 91, 111, 12, 32, 52,
72, 92, 112, 13, 33, 53, 73, 93, 113, 14, 34, 54, 74, 94, 114,
15, 35, 55, 75, 95, 115, 16, 36, 56, 76, 96, 116, 17, 37, 57,
77, 97, 117, 18, 38, 58, 78, 98, 118, 19, 39, 59, 79, 99, 119}));
}
TEST(TransposeTest, TestRefOps4D17) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5}, {2, 3, 1, 0}),
ElementsAreArray(
{0, 60, 20, 80, 40, 100, 1, 61, 21, 81, 41, 101, 2, 62, 22,
82, 42, 102, 3, 63, 23, 83, 43, 103, 4, 64, 24, 84, 44, 104,
5, 65, 25, 85, 45, 105, 6, 66, 26, 86, 46, 106, 7, 67, 27,
87, 47, 107, 8, 68, 28, 88, 48, 108, 9, 69, 29, 89, 49, 109,
10, 70, 30, 90, 50, 110, 11, 71, 31, 91, 51, 111, 12, 72, 32,
92, 52, 112, 13, 73, 33, 93, 53, 113, 14, 74, 34, 94, 54, 114,
15, 75, 35, 95, 55, 115, 16, 76, 36, 96, 56, 116, 17, 77, 37,
97, 57, 117, 18, 78, 38, 98, 58, 118, 19, 79, 39, 99, 59, 119}));
}
TEST(TransposeTest, TestRefOps4D18) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5}, {3, 0, 1, 2}),
ElementsAreArray({0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55,
60, 65, 70, 75, 80, 85, 90, 95, 100, 105, 110, 115,
1, 6, 11, 16, 21, 26, 31, 36, 41, 46, 51, 56,
61, 66, 71, 76, 81, 86, 91, 96, 101, 106, 111, 116,
2, 7, 12, 17, 22, 27, 32, 37, 42, 47, 52, 57,
62, 67, 72, 77, 82, 87, 92, 97, 102, 107, 112, 117,
3, 8, 13, 18, 23, 28, 33, 38, 43, 48, 53, 58,
63, 68, 73, 78, 83, 88, 93, 98, 103, 108, 113, 118,
4, 9, 14, 19, 24, 29, 34, 39, 44, 49, 54, 59,
64, 69, 74, 79, 84, 89, 94, 99, 104, 109, 114, 119}));
}
TEST(TransposeTest, TestRefOps4D19) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5}, {3, 0, 2, 1}),
ElementsAreArray(
{0, 20, 40, 5, 25, 45, 10, 30, 50, 15, 35, 55, 60, 80, 100,
65, 85, 105, 70, 90, 110, 75, 95, 115, 1, 21, 41, 6, 26, 46,
11, 31, 51, 16, 36, 56, 61, 81, 101, 66, 86, 106, 71, 91, 111,
76, 96, 116, 2, 22, 42, 7, 27, 47, 12, 32, 52, 17, 37, 57,
62, 82, 102, 67, 87, 107, 72, 92, 112, 77, 97, 117, 3, 23, 43,
8, 28, 48, 13, 33, 53, 18, 38, 58, 63, 83, 103, 68, 88, 108,
73, 93, 113, 78, 98, 118, 4, 24, 44, 9, 29, 49, 14, 34, 54,
19, 39, 59, 64, 84, 104, 69, 89, 109, 74, 94, 114, 79, 99, 119}));
}
TEST(TransposeTest, TestRefOps4D20) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5}, {3, 1, 0, 2}),
ElementsAreArray({0, 5, 10, 15, 60, 65, 70, 75, 20, 25, 30, 35,
80, 85, 90, 95, 40, 45, 50, 55, 100, 105, 110, 115,
1, 6, 11, 16, 61, 66, 71, 76, 21, 26, 31, 36,
81, 86, 91, 96, 41, 46, 51, 56, 101, 106, 111, 116,
2, 7, 12, 17, 62, 67, 72, 77, 22, 27, 32, 37,
82, 87, 92, 97, 42, 47, 52, 57, 102, 107, 112, 117,
3, 8, 13, 18, 63, 68, 73, 78, 23, 28, 33, 38,
83, 88, 93, 98, 43, 48, 53, 58, 103, 108, 113, 118,
4, 9, 14, 19, 64, 69, 74, 79, 24, 29, 34, 39,
84, 89, 94, 99, 44, 49, 54, 59, 104, 109, 114, 119}));
}
TEST(TransposeTest, TestRefOps4D21) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5}, {3, 1, 2, 0}),
ElementsAreArray({0, 60, 5, 65, 10, 70, 15, 75, 20, 80, 25, 85,
30, 90, 35, 95, 40, 100, 45, 105, 50, 110, 55, 115,
1, 61, 6, 66, 11, 71, 16, 76, 21, 81, 26, 86,
31, 91, 36, 96, 41, 101, 46, 106, 51, 111, 56, 116,
2, 62, 7, 67, 12, 72, 17, 77, 22, 82, 27, 87,
32, 92, 37, 97, 42, 102, 47, 107, 52, 112, 57, 117,
3, 63, 8, 68, 13, 73, 18, 78, 23, 83, 28, 88,
33, 93, 38, 98, 43, 103, 48, 108, 53, 113, 58, 118,
4, 64, 9, 69, 14, 74, 19, 79, 24, 84, 29, 89,
34, 94, 39, 99, 44, 104, 49, 109, 54, 114, 59, 119}));
}
TEST(TransposeTest, TestRefOps4D22) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5}, {3, 2, 0, 1}),
ElementsAreArray(
{0, 20, 40, 60, 80, 100, 5, 25, 45, 65, 85, 105, 10, 30, 50,
70, 90, 110, 15, 35, 55, 75, 95, 115, 1, 21, 41, 61, 81, 101,
6, 26, 46, 66, 86, 106, 11, 31, 51, 71, 91, 111, 16, 36, 56,
76, 96, 116, 2, 22, 42, 62, 82, 102, 7, 27, 47, 67, 87, 107,
12, 32, 52, 72, 92, 112, 17, 37, 57, 77, 97, 117, 3, 23, 43,
63, 83, 103, 8, 28, 48, 68, 88, 108, 13, 33, 53, 73, 93, 113,
18, 38, 58, 78, 98, 118, 4, 24, 44, 64, 84, 104, 9, 29, 49,
69, 89, 109, 14, 34, 54, 74, 94, 114, 19, 39, 59, 79, 99, 119}));
}
TEST(TransposeTest, TestRefOps4D23) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5}, {3, 2, 1, 0}),
ElementsAreArray(
{0, 60, 20, 80, 40, 100, 5, 65, 25, 85, 45, 105, 10, 70, 30,
90, 50, 110, 15, 75, 35, 95, 55, 115, 1, 61, 21, 81, 41, 101,
6, 66, 26, 86, 46, 106, 11, 71, 31, 91, 51, 111, 16, 76, 36,
96, 56, 116, 2, 62, 22, 82, 42, 102, 7, 67, 27, 87, 47, 107,
12, 72, 32, 92, 52, 112, 17, 77, 37, 97, 57, 117, 3, 63, 23,
83, 43, 103, 8, 68, 28, 88, 48, 108, 13, 73, 33, 93, 53, 113,
18, 78, 38, 98, 58, 118, 4, 64, 24, 84, 44, 104, 9, 69, 29,
89, 49, 109, 14, 74, 34, 94, 54, 114, 19, 79, 39, 99, 59, 119}));
}
TEST(TransposeTest, TestRefOps5D0) {
const std::vector<float> ref = [] {
std::vector<float> ref(720);
absl::c_iota(ref, 0);
return ref;
}();
EXPECT_THAT(RunTestPermutation<float>({2, 3, 4, 5, 6},
{0, 1, 2, 3, 4}),
ElementsAreArray(ref));
}
TEST(TransposeTest, TestRefOps5D1) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5, 6},
{4, 3, 2, 1, 0}),
ElementsAreArray(
{0, 360, 120, 480, 240, 600, 30, 390, 150, 510, 270, 630, 60, 420,
180, 540, 300, 660, 90, 450, 210, 570, 330, 690, 6, 366, 126, 486,
246, 606, 36, 396, 156, 516, 276, 636, 66, 426, 186, 546, 306, 666,
96, 456, 216, 576, 336, 696, 12, 372, 132, 492, 252, 612, 42, 402,
162, 522, 282, 642, 72, 432, 192, 552, 312, 672, 102, 462, 222, 582,
342, 702, 18, 378, 138, 498, 258, 618, 48, 408, 168, 528, 288, 648,
78, 438, 198, 558, 318, 678, 108, 468, 228, 588, 348, 708, 24, 384,
144, 504, 264, 624, 54, 414, 174, 534, 294, 654, 84, 444, 204, 564,
324, 684, 114, 474, 234, 594, 354, 714, 1, 361, 121, 481, 241, 601,
31, 391, 151, 511, 271, 631, 61, 421, 181, 541, 301, 661, 91, 451,
211, 571, 331, 691, 7, 367, 127, 487, 247, 607, 37, 397, 157, 517,
277, 637, 67, 427, 187, 547, 307, 667, 97, 457, 217, 577, 337, 697,
13, 373, 133, 493, 253, 613, 43, 403, 163, 523, 283, 643, 73, 433,
193, 553, 313, 673, 103, 463, 223, 583, 343, 703, 19, 379, 139, 499,
259, 619, 49, 409, 169, 529, 289, 649, 79, 439, 199, 559, 319, 679,
109, 469, 229, 589, 349, 709, 25, 385, 145, 505, 265, 625, 55, 415,
175, 535, 295, 655, 85, 445, 205, 565, 325, 685, 115, 475, 235, 595,
355, 715, 2, 362, 122, 482, 242, 602, 32, 392, 152, 512, 272, 632,
62, 422, 182, 542, 302, 662, 92, 452, 212, 572, 332, 692, 8, 368,
128, 488, 248, 608, 38, 398, 158, 518, 278, 638, 68, 428, 188, 548,
308, 668, 98, 458, 218, 578, 338, 698, 14, 374, 134, 494, 254, 614,
44, 404, 164, 524, 284, 644, 74, 434, 194, 554, 314, 674, 104, 464,
224, 584, 344, 704, 20, 380, 140, 500, 260, 620, 50, 410, 170, 530,
290, 650, 80, 440, 200, 560, 320, 680, 110, 470, 230, 590, 350, 710,
26, 386, 146, 506, 266, 626, 56, 416, 176, 536, 296, 656, 86, 446,
206, 566, 326, 686, 116, 476, 236, 596, 356, 716, 3, 363, 123, 483,
243, 603, 33, 393, 153, 513, 273, 633, 63, 423, 183, 543, 303, 663,
93, 453, 213, 573, 333, 693, 9, 369, 129, 489, 249, 609, 39, 399,
159, 519, 279, 639, 69, 429, 189, 549, 309, 669, 99, 459, 219, 579,
339, 699, 15, 375, 135, 495, 255, 615, 45, 405, 165, 525, 285, 645,
75, 435, 195, 555, 315, 675, 105, 465, 225, 585, 345, 705, 21, 381,
141, 501, 261, 621, 51, 411, 171, 531, 291, 651, 81, 441, 201, 561,
321, 681, 111, 471, 231, 591, 351, 711, 27, 387, 147, 507, 267, 627,
57, 417, 177, 537, 297, 657, 87, 447, 207, 567, 327, 687, 117, 477,
237, 597, 357, 717, 4, 364, 124, 484, 244, 604, 34, 394, 154, 514,
274, 634, 64, 424, 184, 544, 304, 664, 94, 454, 214, 574, 334, 694,
10, 370, 130, 490, 250, 610, 40, 400, 160, 520, 280, 640, 70, 430,
190, 550, 310, 670, 100, 460, 220, 580, 340, 700, 16, 376, 136, 496,
256, 616, 46, 406, 166, 526, 286, 646, 76, 436, 196, 556, 316, 676,
106, 466, 226, 586, 346, 706, 22, 382, 142, 502, 262, 622, 52, 412,
172, 532, 292, 652, 82, 442, 202, 562, 322, 682, 112, 472, 232, 592,
352, 712, 28, 388, 148, 508, 268, 628, 58, 418, 178, 538, 298, 658,
88, 448, 208, 568, 328, 688, 118, 478, 238, 598, 358, 718, 5, 365,
125, 485, 245, 605, 35, 395, 155, 515, 275, 635, 65, 425, 185, 545,
305, 665, 95, 455, 215, 575, 335, 695, 11, 371, 131, 491, 251, 611,
41, 401, 161, 521, 281, 641, 71, 431, 191, 551, 311, 671, 101, 461,
221, 581, 341, 701, 17, 377, 137, 497, 257, 617, 47, 407, 167, 527,
287, 647, 77, 437, 197, 557, 317, 677, 107, 467, 227, 587, 347, 707,
23, 383, 143, 503, 263, 623, 53, 413, 173, 533, 293, 653, 83, 443,
203, 563, 323, 683, 113, 473, 233, 593, 353, 713, 29, 389, 149, 509,
269, 629, 59, 419, 179, 539, 299, 659, 89, 449, 209, 569, 329, 689,
119, 479, 239, 599, 359, 719}));
}
TEST(TransposeTest, TestRefOps5D2) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5, 6},
{1, 0, 2, 3, 4}),
ElementsAreArray(
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83,
84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97,
98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111,
112, 113, 114, 115, 116, 117, 118, 119, 360, 361, 362, 363, 364, 365,
366, 367, 368, 369, 370, 371, 372, 373, 374, 375, 376, 377, 378, 379,
380, 381, 382, 383, 384, 385, 386, 387, 388, 389, 390, 391, 392, 393,
394, 395, 396, 397, 398, 399, 400, 401, 402, 403, 404, 405, 406, 407,
408, 409, 410, 411, 412, 413, 414, 415, 416, 417, 418, 419, 420, 421,
422, 423, 424, 425, 426, 427, 428, 429, 430, 431, 432, 433, 434, 435,
436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449,
450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 460, 461, 462, 463,
464, 465, 466, 467, 468, 469, 470, 471, 472, 473, 474, 475, 476, 477,
478, 479, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131,
132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145,
146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159,
160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173,
174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187,
188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201,
202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215,
216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229,
230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 480, 481, 482, 483,
484, 485, 486, 487, 488, 489, 490, 491, 492, 493, 494, 495, 496, 497,
498, 499, 500, 501, 502, 503, 504, 505, 506, 507, 508, 509, 510, 511,
512, 513, 514, 515, 516, 517, 518, 519, 520, 521, 522, 523, 524, 525,
526, 527, 528, 529, 530, 531, 532, 533, 534, 535, 536, 537, 538, 539,
540, 541, 542, 543, 544, 545, 546, 547, 548, 549, 550, 551, 552, 553,
554, 555, 556, 557, 558, 559, 560, 561, 562, 563, 564, 565, 566, 567,
568, 569, 570, 571, 572, 573, 574, 575, 576, 577, 578, 579, 580, 581,
582, 583, 584, 585, 586, 587, 588, 589, 590, 591, 592, 593, 594, 595,
596, 597, 598, 599, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249,
250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, 263,
264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277,
278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, 291,
292, 293, 294, 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305,
306, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319,
320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 333,
334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346, 347,
348, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 600, 601,
602, 603, 604, 605, 606, 607, 608, 609, 610, 611, 612, 613, 614, 615,
616, 617, 618, 619, 620, 621, 622, 623, 624, 625, 626, 627, 628, 629,
630, 631, 632, 633, 634, 635, 636, 637, 638, 639, 640, 641, 642, 643,
644, 645, 646, 647, 648, 649, 650, 651, 652, 653, 654, 655, 656, 657,
658, 659, 660, 661, 662, 663, 664, 665, 666, 667, 668, 669, 670, 671,
672, 673, 674, 675, 676, 677, 678, 679, 680, 681, 682, 683, 684, 685,
686, 687, 688, 689, 690, 691, 692, 693, 694, 695, 696, 697, 698, 699,
700, 701, 702, 703, 704, 705, 706, 707, 708, 709, 710, 711, 712, 713,
714, 715, 716, 717, 718, 719}));
}
TEST(TransposeTest, TestRefOps5D3) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5, 6},
{1, 2, 0, 3, 4}),
ElementsAreArray(
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
28, 29, 360, 361, 362, 363, 364, 365, 366, 367, 368, 369, 370, 371,
372, 373, 374, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384, 385,
386, 387, 388, 389, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
54, 55, 56, 57, 58, 59, 390, 391, 392, 393, 394, 395, 396, 397,
398, 399, 400, 401, 402, 403, 404, 405, 406, 407, 408, 409, 410, 411,
412, 413, 414, 415, 416, 417, 418, 419, 60, 61, 62, 63, 64, 65,
66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 420, 421, 422, 423,
424, 425, 426, 427, 428, 429, 430, 431, 432, 433, 434, 435, 436, 437,
438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 90, 91,
92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105,
106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119,
450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 460, 461, 462, 463,
464, 465, 466, 467, 468, 469, 470, 471, 472, 473, 474, 475, 476, 477,
478, 479, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131,
132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145,
146, 147, 148, 149, 480, 481, 482, 483, 484, 485, 486, 487, 488, 489,
490, 491, 492, 493, 494, 495, 496, 497, 498, 499, 500, 501, 502, 503,
504, 505, 506, 507, 508, 509, 150, 151, 152, 153, 154, 155, 156, 157,
158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171,
172, 173, 174, 175, 176, 177, 178, 179, 510, 511, 512, 513, 514, 515,
516, 517, 518, 519, 520, 521, 522, 523, 524, 525, 526, 527, 528, 529,
530, 531, 532, 533, 534, 535, 536, 537, 538, 539, 180, 181, 182, 183,
184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197,
198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 540, 541,
542, 543, 544, 545, 546, 547, 548, 549, 550, 551, 552, 553, 554, 555,
556, 557, 558, 559, 560, 561, 562, 563, 564, 565, 566, 567, 568, 569,
210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223,
224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237,
238, 239, 570, 571, 572, 573, 574, 575, 576, 577, 578, 579, 580, 581,
582, 583, 584, 585, 586, 587, 588, 589, 590, 591, 592, 593, 594, 595,
596, 597, 598, 599, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249,
250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, 263,
264, 265, 266, 267, 268, 269, 600, 601, 602, 603, 604, 605, 606, 607,
608, 609, 610, 611, 612, 613, 614, 615, 616, 617, 618, 619, 620, 621,
622, 623, 624, 625, 626, 627, 628, 629, 270, 271, 272, 273, 274, 275,
276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289,
290, 291, 292, 293, 294, 295, 296, 297, 298, 299, 630, 631, 632, 633,
634, 635, 636, 637, 638, 639, 640, 641, 642, 643, 644, 645, 646, 647,
648, 649, 650, 651, 652, 653, 654, 655, 656, 657, 658, 659, 300, 301,
302, 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, 314, 315,
316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329,
660, 661, 662, 663, 664, 665, 666, 667, 668, 669, 670, 671, 672, 673,
674, 675, 676, 677, 678, 679, 680, 681, 682, 683, 684, 685, 686, 687,
688, 689, 330, 331, 332, 333, 334, 335, 336, 337, 338, 339, 340, 341,
342, 343, 344, 345, 346, 347, 348, 349, 350, 351, 352, 353, 354, 355,
356, 357, 358, 359, 690, 691, 692, 693, 694, 695, 696, 697, 698, 699,
700, 701, 702, 703, 704, 705, 706, 707, 708, 709, 710, 711, 712, 713,
714, 715, 716, 717, 718, 719}));
}
TEST(TransposeTest, TestRefOps5D4) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5, 6},
{1, 2, 3, 0, 4}),
ElementsAreArray(
{0, 1, 2, 3, 4, 5, 360, 361, 362, 363, 364, 365, 6, 7,
8, 9, 10, 11, 366, 367, 368, 369, 370, 371, 12, 13, 14, 15,
16, 17, 372, 373, 374, 375, 376, 377, 18, 19, 20, 21, 22, 23,
378, 379, 380, 381, 382, 383, 24, 25, 26, 27, 28, 29, 384, 385,
386, 387, 388, 389, 30, 31, 32, 33, 34, 35, 390, 391, 392, 393,
394, 395, 36, 37, 38, 39, 40, 41, 396, 397, 398, 399, 400, 401,
42, 43, 44, 45, 46, 47, 402, 403, 404, 405, 406, 407, 48, 49,
50, 51, 52, 53, 408, 409, 410, 411, 412, 413, 54, 55, 56, 57,
58, 59, 414, 415, 416, 417, 418, 419, 60, 61, 62, 63, 64, 65,
420, 421, 422, 423, 424, 425, 66, 67, 68, 69, 70, 71, 426, 427,
428, 429, 430, 431, 72, 73, 74, 75, 76, 77, 432, 433, 434, 435,
436, 437, 78, 79, 80, 81, 82, 83, 438, 439, 440, 441, 442, 443,
84, 85, 86, 87, 88, 89, 444, 445, 446, 447, 448, 449, 90, 91,
92, 93, 94, 95, 450, 451, 452, 453, 454, 455, 96, 97, 98, 99,
100, 101, 456, 457, 458, 459, 460, 461, 102, 103, 104, 105, 106, 107,
462, 463, 464, 465, 466, 467, 108, 109, 110, 111, 112, 113, 468, 469,
470, 471, 472, 473, 114, 115, 116, 117, 118, 119, 474, 475, 476, 477,
478, 479, 120, 121, 122, 123, 124, 125, 480, 481, 482, 483, 484, 485,
126, 127, 128, 129, 130, 131, 486, 487, 488, 489, 490, 491, 132, 133,
134, 135, 136, 137, 492, 493, 494, 495, 496, 497, 138, 139, 140, 141,
142, 143, 498, 499, 500, 501, 502, 503, 144, 145, 146, 147, 148, 149,
504, 505, 506, 507, 508, 509, 150, 151, 152, 153, 154, 155, 510, 511,
512, 513, 514, 515, 156, 157, 158, 159, 160, 161, 516, 517, 518, 519,
520, 521, 162, 163, 164, 165, 166, 167, 522, 523, 524, 525, 526, 527,
168, 169, 170, 171, 172, 173, 528, 529, 530, 531, 532, 533, 174, 175,
176, 177, 178, 179, 534, 535, 536, 537, 538, 539, 180, 181, 182, 183,
184, 185, 540, 541, 542, 543, 544, 545, 186, 187, 188, 189, 190, 191,
546, 547, 548, 549, 550, 551, 192, 193, 194, 195, 196, 197, 552, 553,
554, 555, 556, 557, 198, 199, 200, 201, 202, 203, 558, 559, 560, 561,
562, 563, 204, 205, 206, 207, 208, 209, 564, 565, 566, 567, 568, 569,
210, 211, 212, 213, 214, 215, 570, 571, 572, 573, 574, 575, 216, 217,
218, 219, 220, 221, 576, 577, 578, 579, 580, 581, 222, 223, 224, 225,
226, 227, 582, 583, 584, 585, 586, 587, 228, 229, 230, 231, 232, 233,
588, 589, 590, 591, 592, 593, 234, 235, 236, 237, 238, 239, 594, 595,
596, 597, 598, 599, 240, 241, 242, 243, 244, 245, 600, 601, 602, 603,
604, 605, 246, 247, 248, 249, 250, 251, 606, 607, 608, 609, 610, 611,
252, 253, 254, 255, 256, 257, 612, 613, 614, 615, 616, 617, 258, 259,
260, 261, 262, 263, 618, 619, 620, 621, 622, 623, 264, 265, 266, 267,
268, 269, 624, 625, 626, 627, 628, 629, 270, 271, 272, 273, 274, 275,
630, 631, 632, 633, 634, 635, 276, 277, 278, 279, 280, 281, 636, 637,
638, 639, 640, 641, 282, 283, 284, 285, 286, 287, 642, 643, 644, 645,
646, 647, 288, 289, 290, 291, 292, 293, 648, 649, 650, 651, 652, 653,
294, 295, 296, 297, 298, 299, 654, 655, 656, 657, 658, 659, 300, 301,
302, 303, 304, 305, 660, 661, 662, 663, 664, 665, 306, 307, 308, 309,
310, 311, 666, 667, 668, 669, 670, 671, 312, 313, 314, 315, 316, 317,
672, 673, 674, 675, 676, 677, 318, 319, 320, 321, 322, 323, 678, 679,
680, 681, 682, 683, 324, 325, 326, 327, 328, 329, 684, 685, 686, 687,
688, 689, 330, 331, 332, 333, 334, 335, 690, 691, 692, 693, 694, 695,
336, 337, 338, 339, 340, 341, 696, 697, 698, 699, 700, 701, 342, 343,
344, 345, 346, 347, 702, 703, 704, 705, 706, 707, 348, 349, 350, 351,
352, 353, 708, 709, 710, 711, 712, 713, 354, 355, 356, 357, 358, 359,
714, 715, 716, 717, 718, 719}));
}
TEST(TransposeTest, TestRefOps5D5) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5, 6},
{1, 2, 3, 4, 0}),
ElementsAreArray(
{0, 360, 1, 361, 2, 362, 3, 363, 4, 364, 5, 365, 6, 366,
7, 367, 8, 368, 9, 369, 10, 370, 11, 371, 12, 372, 13, 373,
14, 374, 15, 375, 16, 376, 17, 377, 18, 378, 19, 379, 20, 380,
21, 381, 22, 382, 23, 383, 24, 384, 25, 385, 26, 386, 27, 387,
28, 388, 29, 389, 30, 390, 31, 391, 32, 392, 33, 393, 34, 394,
35, 395, 36, 396, 37, 397, 38, 398, 39, 399, 40, 400, 41, 401,
42, 402, 43, 403, 44, 404, 45, 405, 46, 406, 47, 407, 48, 408,
49, 409, 50, 410, 51, 411, 52, 412, 53, 413, 54, 414, 55, 415,
56, 416, 57, 417, 58, 418, 59, 419, 60, 420, 61, 421, 62, 422,
63, 423, 64, 424, 65, 425, 66, 426, 67, 427, 68, 428, 69, 429,
70, 430, 71, 431, 72, 432, 73, 433, 74, 434, 75, 435, 76, 436,
77, 437, 78, 438, 79, 439, 80, 440, 81, 441, 82, 442, 83, 443,
84, 444, 85, 445, 86, 446, 87, 447, 88, 448, 89, 449, 90, 450,
91, 451, 92, 452, 93, 453, 94, 454, 95, 455, 96, 456, 97, 457,
98, 458, 99, 459, 100, 460, 101, 461, 102, 462, 103, 463, 104, 464,
105, 465, 106, 466, 107, 467, 108, 468, 109, 469, 110, 470, 111, 471,
112, 472, 113, 473, 114, 474, 115, 475, 116, 476, 117, 477, 118, 478,
119, 479, 120, 480, 121, 481, 122, 482, 123, 483, 124, 484, 125, 485,
126, 486, 127, 487, 128, 488, 129, 489, 130, 490, 131, 491, 132, 492,
133, 493, 134, 494, 135, 495, 136, 496, 137, 497, 138, 498, 139, 499,
140, 500, 141, 501, 142, 502, 143, 503, 144, 504, 145, 505, 146, 506,
147, 507, 148, 508, 149, 509, 150, 510, 151, 511, 152, 512, 153, 513,
154, 514, 155, 515, 156, 516, 157, 517, 158, 518, 159, 519, 160, 520,
161, 521, 162, 522, 163, 523, 164, 524, 165, 525, 166, 526, 167, 527,
168, 528, 169, 529, 170, 530, 171, 531, 172, 532, 173, 533, 174, 534,
175, 535, 176, 536, 177, 537, 178, 538, 179, 539, 180, 540, 181, 541,
182, 542, 183, 543, 184, 544, 185, 545, 186, 546, 187, 547, 188, 548,
189, 549, 190, 550, 191, 551, 192, 552, 193, 553, 194, 554, 195, 555,
196, 556, 197, 557, 198, 558, 199, 559, 200, 560, 201, 561, 202, 562,
203, 563, 204, 564, 205, 565, 206, 566, 207, 567, 208, 568, 209, 569,
210, 570, 211, 571, 212, 572, 213, 573, 214, 574, 215, 575, 216, 576,
217, 577, 218, 578, 219, 579, 220, 580, 221, 581, 222, 582, 223, 583,
224, 584, 225, 585, 226, 586, 227, 587, 228, 588, 229, 589, 230, 590,
231, 591, 232, 592, 233, 593, 234, 594, 235, 595, 236, 596, 237, 597,
238, 598, 239, 599, 240, 600, 241, 601, 242, 602, 243, 603, 244, 604,
245, 605, 246, 606, 247, 607, 248, 608, 249, 609, 250, 610, 251, 611,
252, 612, 253, 613, 254, 614, 255, 615, 256, 616, 257, 617, 258, 618,
259, 619, 260, 620, 261, 621, 262, 622, 263, 623, 264, 624, 265, 625,
266, 626, 267, 627, 268, 628, 269, 629, 270, 630, 271, 631, 272, 632,
273, 633, 274, 634, 275, 635, 276, 636, 277, 637, 278, 638, 279, 639,
280, 640, 281, 641, 282, 642, 283, 643, 284, 644, 285, 645, 286, 646,
287, 647, 288, 648, 289, 649, 290, 650, 291, 651, 292, 652, 293, 653,
294, 654, 295, 655, 296, 656, 297, 657, 298, 658, 299, 659, 300, 660,
301, 661, 302, 662, 303, 663, 304, 664, 305, 665, 306, 666, 307, 667,
308, 668, 309, 669, 310, 670, 311, 671, 312, 672, 313, 673, 314, 674,
315, 675, 316, 676, 317, 677, 318, 678, 319, 679, 320, 680, 321, 681,
322, 682, 323, 683, 324, 684, 325, 685, 326, 686, 327, 687, 328, 688,
329, 689, 330, 690, 331, 691, 332, 692, 333, 693, 334, 694, 335, 695,
336, 696, 337, 697, 338, 698, 339, 699, 340, 700, 341, 701, 342, 702,
343, 703, 344, 704, 345, 705, 346, 706, 347, 707, 348, 708, 349, 709,
350, 710, 351, 711, 352, 712, 353, 713, 354, 714, 355, 715, 356, 716,
357, 717, 358, 718, 359, 719}));
}
TEST(TransposeTest, TestRefOps6D0) {
const std::vector<float> ref = [] {
std::vector<float> ref(5040);
absl::c_iota(ref, 0);
return ref;
}();
EXPECT_THAT(RunTestPermutation<float>({2, 3, 4, 5, 6, 7},
{0, 1, 2, 3, 4, 5}),
ElementsAreArray(ref));
}
TEST(TransposeTest, TestRefOps6D1) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5, 6, 7},
{5, 4, 3, 2, 1, 0}),
ElementsAreArray(
{0, 2520, 840, 3360, 1680, 4200, 210, 2730, 1050, 3570, 1890,
4410, 420, 2940, 1260, 3780, 2100, 4620, 630, 3150, 1470, 3990,
2310, 4830, 42, 2562, 882, 3402, 1722, 4242, 252, 2772, 1092,
3612, 1932, 4452, 462, 2982, 1302, 3822, 2142, 4662, 672, 3192,
1512, 4032, 2352, 4872, 84, 2604, 924, 3444, 1764, 4284, 294,
2814, 1134, 3654, 1974, 4494, 504, 3024, 1344, 3864, 2184, 4704,
714, 3234, 1554, 4074, 2394, 4914, 126, 2646, 966, 3486, 1806,
4326, 336, 2856, 1176, 3696, 2016, 4536, 546, 3066, 1386, 3906,
2226, 4746, 756, 3276, 1596, 4116, 2436, 4956, 168, 2688, 1008,
3528, 1848, 4368, 378, 2898, 1218, 3738, 2058, 4578, 588, 3108,
1428, 3948, 2268, 4788, 798, 3318, 1638, 4158, 2478, 4998, 7,
2527, 847, 3367, 1687, 4207, 217, 2737, 1057, 3577, 1897, 4417,
427, 2947, 1267, 3787, 2107, 4627, 637, 3157, 1477, 3997, 2317,
4837, 49, 2569, 889, 3409, 1729, 4249, 259, 2779, 1099, 3619,
1939, 4459, 469, 2989, 1309, 3829, 2149, 4669, 679, 3199, 1519,
4039, 2359, 4879, 91, 2611, 931, 3451, 1771, 4291, 301, 2821,
1141, 3661, 1981, 4501, 511, 3031, 1351, 3871, 2191, 4711, 721,
3241, 1561, 4081, 2401, 4921, 133, 2653, 973, 3493, 1813, 4333,
343, 2863, 1183, 3703, 2023, 4543, 553, 3073, 1393, 3913, 2233,
4753, 763, 3283, 1603, 4123, 2443, 4963, 175, 2695, 1015, 3535,
1855, 4375, 385, 2905, 1225, 3745, 2065, 4585, 595, 3115, 1435,
3955, 2275, 4795, 805, 3325, 1645, 4165, 2485, 5005, 14, 2534,
854, 3374, 1694, 4214, 224, 2744, 1064, 3584, 1904, 4424, 434,
2954, 1274, 3794, 2114, 4634, 644, 3164, 1484, 4004, 2324, 4844,
56, 2576, 896, 3416, 1736, 4256, 266, 2786, 1106, 3626, 1946,
4466, 476, 2996, 1316, 3836, 2156, 4676, 686, 3206, 1526, 4046,
2366, 4886, 98, 2618, 938, 3458, 1778, 4298, 308, 2828, 1148,
3668, 1988, 4508, 518, 3038, 1358, 3878, 2198, 4718, 728, 3248,
1568, 4088, 2408, 4928, 140, 2660, 980, 3500, 1820, 4340, 350,
2870, 1190, 3710, 2030, 4550, 560, 3080, 1400, 3920, 2240, 4760,
770, 3290, 1610, 4130, 2450, 4970, 182, 2702, 1022, 3542, 1862,
4382, 392, 2912, 1232, 3752, 2072, 4592, 602, 3122, 1442, 3962,
2282, 4802, 812, 3332, 1652, 4172, 2492, 5012, 21, 2541, 861,
3381, 1701, 4221, 231, 2751, 1071, 3591, 1911, 4431, 441, 2961,
1281, 3801, 2121, 4641, 651, 3171, 1491, 4011, 2331, 4851, 63,
2583, 903, 3423, 1743, 4263, 273, 2793, 1113, 3633, 1953, 4473,
483, 3003, 1323, 3843, 2163, 4683, 693, 3213, 1533, 4053, 2373,
4893, 105, 2625, 945, 3465, 1785, 4305, 315, 2835, 1155, 3675,
1995, 4515, 525, 3045, 1365, 3885, 2205, 4725, 735, 3255, 1575,
4095, 2415, 4935, 147, 2667, 987, 3507, 1827, 4347, 357, 2877,
1197, 3717, 2037, 4557, 567, 3087, 1407, 3927, 2247, 4767, 777,
3297, 1617, 4137, 2457, 4977, 189, 2709, 1029, 3549, 1869, 4389,
399, 2919, 1239, 3759, 2079, 4599, 609, 3129, 1449, 3969, 2289,
4809, 819, 3339, 1659, 4179, 2499, 5019, 28, 2548, 868, 3388,
1708, 4228, 238, 2758, 1078, 3598, 1918, 4438, 448, 2968, 1288,
3808, 2128, 4648, 658, 3178, 1498, 4018, 2338, 4858, 70, 2590,
910, 3430, 1750, 4270, 280, 2800, 1120, 3640, 1960, 4480, 490,
3010, 1330, 3850, 2170, 4690, 700, 3220, 1540, 4060, 2380, 4900,
112, 2632, 952, 3472, 1792, 4312, 322, 2842, 1162, 3682, 2002,
4522, 532, 3052, 1372, 3892, 2212, 4732, 742, 3262, 1582, 4102,
2422, 4942, 154, 2674, 994, 3514, 1834, 4354, 364, 2884, 1204,
3724, 2044, 4564, 574, 3094, 1414, 3934, 2254, 4774, 784, 3304,
1624, 4144, 2464, 4984, 196, 2716, 1036, 3556, 1876, 4396, 406,
2926, 1246, 3766, 2086, 4606, 616, 3136, 1456, 3976, 2296, 4816,
826, 3346, 1666, 4186, 2506, 5026, 35, 2555, 875, 3395, 1715,
4235, 245, 2765, 1085, 3605, 1925, 4445, 455, 2975, 1295, 3815,
2135, 4655, 665, 3185, 1505, 4025, 2345, 4865, 77, 2597, 917,
3437, 1757, 4277, 287, 2807, 1127, 3647, 1967, 4487, 497, 3017,
1337, 3857, 2177, 4697, 707, 3227, 1547, 4067, 2387, 4907, 119,
2639, 959, 3479, 1799, 4319, 329, 2849, 1169, 3689, 2009, 4529,
539, 3059, 1379, 3899, 2219, 4739, 749, 3269, 1589, 4109, 2429,
4949, 161, 2681, 1001, 3521, 1841, 4361, 371, 2891, 1211, 3731,
2051, 4571, 581, 3101, 1421, 3941, 2261, 4781, 791, 3311, 1631,
4151, 2471, 4991, 203, 2723, 1043, 3563, 1883, 4403, 413, 2933,
1253, 3773, 2093, 4613, 623, 3143, 1463, 3983, 2303, 4823, 833,
3353, 1673, 4193, 2513, 5033, 1, 2521, 841, 3361, 1681, 4201,
211, 2731, 1051, 3571, 1891, 4411, 421, 2941, 1261, 3781, 2101,
4621, 631, 3151, 1471, 3991, 2311, 4831, 43, 2563, 883, 3403,
1723, 4243, 253, 2773, 1093, 3613, 1933, 4453, 463, 2983, 1303,
3823, 2143, 4663, 673, 3193, 1513, 4033, 2353, 4873, 85, 2605,
925, 3445, 1765, 4285, 295, 2815, 1135, 3655, 1975, 4495, 505,
3025, 1345, 3865, 2185, 4705, 715, 3235, 1555, 4075, 2395, 4915,
127, 2647, 967, 3487, 1807, 4327, 337, 2857, 1177, 3697, 2017,
4537, 547, 3067, 1387, 3907, 2227, 4747, 757, 3277, 1597, 4117,
2437, 4957, 169, 2689, 1009, 3529, 1849, 4369, 379, 2899, 1219,
3739, 2059, 4579, 589, 3109, 1429, 3949, 2269, 4789, 799, 3319,
1639, 4159, 2479, 4999, 8, 2528, 848, 3368, 1688, 4208, 218,
2738, 1058, 3578, 1898, 4418, 428, 2948, 1268, 3788, 2108, 4628,
638, 3158, 1478, 3998, 2318, 4838, 50, 2570, 890, 3410, 1730,
4250, 260, 2780, 1100, 3620, 1940, 4460, 470, 2990, 1310, 3830,
2150, 4670, 680, 3200, 1520, 4040, 2360, 4880, 92, 2612, 932,
3452, 1772, 4292, 302, 2822, 1142, 3662, 1982, 4502, 512, 3032,
1352, 3872, 2192, 4712, 722, 3242, 1562, 4082, 2402, 4922, 134,
2654, 974, 3494, 1814, 4334, 344, 2864, 1184, 3704, 2024, 4544,
554, 3074, 1394, 3914, 2234, 4754, 764, 3284, 1604, 4124, 2444,
4964, 176, 2696, 1016, 3536, 1856, 4376, 386, 2906, 1226, 3746,
2066, 4586, 596, 3116, 1436, 3956, 2276, 4796, 806, 3326, 1646,
4166, 2486, 5006, 15, 2535, 855, 3375, 1695, 4215, 225, 2745,
1065, 3585, 1905, 4425, 435, 2955, 1275, 3795, 2115, 4635, 645,
3165, 1485, 4005, 2325, 4845, 57, 2577, 897, 3417, 1737, 4257,
267, 2787, 1107, 3627, 1947, 4467, 477, 2997, 1317, 3837, 2157,
4677, 687, 3207, 1527, 4047, 2367, 4887, 99, 2619, 939, 3459,
1779, 4299, 309, 2829, 1149, 3669, 1989, 4509, 519, 3039, 1359,
3879, 2199, 4719, 729, 3249, 1569, 4089, 2409, 4929, 141, 2661,
981, 3501, 1821, 4341, 351, 2871, 1191, 3711, 2031, 4551, 561,
3081, 1401, 3921, 2241, 4761, 771, 3291, 1611, 4131, 2451, 4971,
183, 2703, 1023, 3543, 1863, 4383, 393, 2913, 1233, 3753, 2073,
4593, 603, 3123, 1443, 3963, 2283, 4803, 813, 3333, 1653, 4173,
2493, 5013, 22, 2542, 862, 3382, 1702, 4222, 232, 2752, 1072,
3592, 1912, 4432, 442, 2962, 1282, 3802, 2122, 4642, 652, 3172,
1492, 4012, 2332, 4852, 64, 2584, 904, 3424, 1744, 4264, 274,
2794, 1114, 3634, 1954, 4474, 484, 3004, 1324, 3844, 2164, 4684,
694, 3214, 1534, 4054, 2374, 4894, 106, 2626, 946, 3466, 1786,
4306, 316, 2836, 1156, 3676, 1996, 4516, 526, 3046, 1366, 3886,
2206, 4726, 736, 3256, 1576, 4096, 2416, 4936, 148, 2668, 988,
3508, 1828, 4348, 358, 2878, 1198, 3718, 2038, 4558, 568, 3088,
1408, 3928, 2248, 4768, 778, 3298, 1618, 4138, 2458, 4978, 190,
2710, 1030, 3550, 1870, 4390, 400, 2920, 1240, 3760, 2080, 4600,
610, 3130, 1450, 3970, 2290, 4810, 820, 3340, 1660, 4180, 2500,
5020, 29, 2549, 869, 3389, 1709, 4229, 239, 2759, 1079, 3599,
1919, 4439, 449, 2969, 1289, 3809, 2129, 4649, 659, 3179, 1499,
4019, 2339, 4859, 71, 2591, 911, 3431, 1751, 4271, 281, 2801,
1121, 3641, 1961, 4481, 491, 3011, 1331, 3851, 2171, 4691, 701,
3221, 1541, 4061, 2381, 4901, 113, 2633, 953, 3473, 1793, 4313,
323, 2843, 1163, 3683, 2003, 4523, 533, 3053, 1373, 3893, 2213,
4733, 743, 3263, 1583, 4103, 2423, 4943, 155, 2675, 995, 3515,
1835, 4355, 365, 2885, 1205, 3725, 2045, 4565, 575, 3095, 1415,
3935, 2255, 4775, 785, 3305, 1625, 4145, 2465, 4985, 197, 2717,
1037, 3557, 1877, 4397, 407, 2927, 1247, 3767, 2087, 4607, 617,
3137, 1457, 3977, 2297, 4817, 827, 3347, 1667, 4187, 2507, 5027,
36, 2556, 876, 3396, 1716, 4236, 246, 2766, 1086, 3606, 1926,
4446, 456, 2976, 1296, 3816, 2136, 4656, 666, 3186, 1506, 4026,
2346, 4866, 78, 2598, 918, 3438, 1758, 4278, 288, 2808, 1128,
3648, 1968, 4488, 498, 3018, 1338, 3858, 2178, 4698, 708, 3228,
1548, 4068, 2388, 4908, 120, 2640, 960, 3480, 1800, 4320, 330,
2850, 1170, 3690, 2010, 4530, 540, 3060, 1380, 3900, 2220, 4740,
750, 3270, 1590, 4110, 2430, 4950, 162, 2682, 1002, 3522, 1842,
4362, 372, 2892, 1212, 3732, 2052, 4572, 582, 3102, 1422, 3942,
2262, 4782, 792, 3312, 1632, 4152, 2472, 4992, 204, 2724, 1044,
3564, 1884, 4404, 414, 2934, 1254, 3774, 2094, 4614, 624, 3144,
1464, 3984, 2304, 4824, 834, 3354, 1674, 4194, 2514, 5034, 2,
2522, 842, 3362, 1682, 4202, 212, 2732, 1052, 3572, 1892, 4412,
422, 2942, 1262, 3782, 2102, 4622, 632, 3152, 1472, 3992, 2312,
4832, 44, 2564, 884, 3404, 1724, 4244, 254, 2774, 1094, 3614,
1934, 4454, 464, 2984, 1304, 3824, 2144, 4664, 674, 3194, 1514,
4034, 2354, 4874, 86, 2606, 926, 3446, 1766, 4286, 296, 2816,
1136, 3656, 1976, 4496, 506, 3026, 1346, 3866, 2186, 4706, 716,
3236, 1556, 4076, 2396, 4916, 128, 2648, 968, 3488, 1808, 4328,
338, 2858, 1178, 3698, 2018, 4538, 548, 3068, 1388, 3908, 2228,
4748, 758, 3278, 1598, 4118, 2438, 4958, 170, 2690, 1010, 3530,
1850, 4370, 380, 2900, 1220, 3740, 2060, 4580, 590, 3110, 1430,
3950, 2270, 4790, 800, 3320, 1640, 4160, 2480, 5000, 9, 2529,
849, 3369, 1689, 4209, 219, 2739, 1059, 3579, 1899, 4419, 429,
2949, 1269, 3789, 2109, 4629, 639, 3159, 1479, 3999, 2319, 4839,
51, 2571, 891, 3411, 1731, 4251, 261, 2781, 1101, 3621, 1941,
4461, 471, 2991, 1311, 3831, 2151, 4671, 681, 3201, 1521, 4041,
2361, 4881, 93, 2613, 933, 3453, 1773, 4293, 303, 2823, 1143,
3663, 1983, 4503, 513, 3033, 1353, 3873, 2193, 4713, 723, 3243,
1563, 4083, 2403, 4923, 135, 2655, 975, 3495, 1815, 4335, 345,
2865, 1185, 3705, 2025, 4545, 555, 3075, 1395, 3915, 2235, 4755,
765, 3285, 1605, 4125, 2445, 4965, 177, 2697, 1017, 3537, 1857,
4377, 387, 2907, 1227, 3747, 2067, 4587, 597, 3117, 1437, 3957,
2277, 4797, 807, 3327, 1647, 4167, 2487, 5007, 16, 2536, 856,
3376, 1696, 4216, 226, 2746, 1066, 3586, 1906, 4426, 436, 2956,
1276, 3796, 2116, 4636, 646, 3166, 1486, 4006, 2326, 4846, 58,
2578, 898, 3418, 1738, 4258, 268, 2788, 1108, 3628, 1948, 4468,
478, 2998, 1318, 3838, 2158, 4678, 688, 3208, 1528, 4048, 2368,
4888, 100, 2620, 940, 3460, 1780, 4300, 310, 2830, 1150, 3670,
1990, 4510, 520, 3040, 1360, 3880, 2200, 4720, 730, 3250, 1570,
4090, 2410, 4930, 142, 2662, 982, 3502, 1822, 4342, 352, 2872,
1192, 3712, 2032, 4552, 562, 3082, 1402, 3922, 2242, 4762, 772,
3292, 1612, 4132, 2452, 4972, 184, 2704, 1024, 3544, 1864, 4384,
394, 2914, 1234, 3754, 2074, 4594, 604, 3124, 1444, 3964, 2284,
4804, 814, 3334, 1654, 4174, 2494, 5014, 23, 2543, 863, 3383,
1703, 4223, 233, 2753, 1073, 3593, 1913, 4433, 443, 2963, 1283,
3803, 2123, 4643, 653, 3173, 1493, 4013, 2333, 4853, 65, 2585,
905, 3425, 1745, 4265, 275, 2795, 1115, 3635, 1955, 4475, 485,
3005, 1325, 3845, 2165, 4685, 695, 3215, 1535, 4055, 2375, 4895,
107, 2627, 947, 3467, 1787, 4307, 317, 2837, 1157, 3677, 1997,
4517, 527, 3047, 1367, 3887, 2207, 4727, 737, 3257, 1577, 4097,
2417, 4937, 149, 2669, 989, 3509, 1829, 4349, 359, 2879, 1199,
3719, 2039, 4559, 569, 3089, 1409, 3929, 2249, 4769, 779, 3299,
1619, 4139, 2459, 4979, 191, 2711, 1031, 3551, 1871, 4391, 401,
2921, 1241, 3761, 2081, 4601, 611, 3131, 1451, 3971, 2291, 4811,
821, 3341, 1661, 4181, 2501, 5021, 30, 2550, 870, 3390, 1710,
4230, 240, 2760, 1080, 3600, 1920, 4440, 450, 2970, 1290, 3810,
2130, 4650, 660, 3180, 1500, 4020, 2340, 4860, 72, 2592, 912,
3432, 1752, 4272, 282, 2802, 1122, 3642, 1962, 4482, 492, 3012,
1332, 3852, 2172, 4692, 702, 3222, 1542, 4062, 2382, 4902, 114,
2634, 954, 3474, 1794, 4314, 324, 2844, 1164, 3684, 2004, 4524,
534, 3054, 1374, 3894, 2214, 4734, 744, 3264, 1584, 4104, 2424,
4944, 156, 2676, 996, 3516, 1836, 4356, 366, 2886, 1206, 3726,
2046, 4566, 576, 3096, 1416, 3936, 2256, 4776, 786, 3306, 1626,
4146, 2466, 4986, 198, 2718, 1038, 3558, 1878, 4398, 408, 2928,
1248, 3768, 2088, 4608, 618, 3138, 1458, 3978, 2298, 4818, 828,
3348, 1668, 4188, 2508, 5028, 37, 2557, 877, 3397, 1717, 4237,
247, 2767, 1087, 3607, 1927, 4447, 457, 2977, 1297, 3817, 2137,
4657, 667, 3187, 1507, 4027, 2347, 4867, 79, 2599, 919, 3439,
1759, 4279, 289, 2809, 1129, 3649, 1969, 4489, 499, 3019, 1339,
3859, 2179, 4699, 709, 3229, 1549, 4069, 2389, 4909, 121, 2641,
961, 3481, 1801, 4321, 331, 2851, 1171, 3691, 2011, 4531, 541,
3061, 1381, 3901, 2221, 4741, 751, 3271, 1591, 4111, 2431, 4951,
163, 2683, 1003, 3523, 1843, 4363, 373, 2893, 1213, 3733, 2053,
4573, 583, 3103, 1423, 3943, 2263, 4783, 793, 3313, 1633, 4153,
2473, 4993, 205, 2725, 1045, 3565, 1885, 4405, 415, 2935, 1255,
3775, 2095, 4615, 625, 3145, 1465, 3985, 2305, 4825, 835, 3355,
1675, 4195, 2515, 5035, 3, 2523, 843, 3363, 1683, 4203, 213,
2733, 1053, 3573, 1893, 4413, 423, 2943, 1263, 3783, 2103, 4623,
633, 3153, 1473, 3993, 2313, 4833, 45, 2565, 885, 3405, 1725,
4245, 255, 2775, 1095, 3615, 1935, 4455, 465, 2985, 1305, 3825,
2145, 4665, 675, 3195, 1515, 4035, 2355, 4875, 87, 2607, 927,
3447, 1767, 4287, 297, 2817, 1137, 3657, 1977, 4497, 507, 3027,
1347, 3867, 2187, 4707, 717, 3237, 1557, 4077, 2397, 4917, 129,
2649, 969, 3489, 1809, 4329, 339, 2859, 1179, 3699, 2019, 4539,
549, 3069, 1389, 3909, 2229, 4749, 759, 3279, 1599, 4119, 2439,
4959, 171, 2691, 1011, 3531, 1851, 4371, 381, 2901, 1221, 3741,
2061, 4581, 591, 3111, 1431, 3951, 2271, 4791, 801, 3321, 1641,
4161, 2481, 5001, 10, 2530, 850, 3370, 1690, 4210, 220, 2740,
1060, 3580, 1900, 4420, 430, 2950, 1270, 3790, 2110, 4630, 640,
3160, 1480, 4000, 2320, 4840, 52, 2572, 892, 3412, 1732, 4252,
262, 2782, 1102, 3622, 1942, 4462, 472, 2992, 1312, 3832, 2152,
4672, 682, 3202, 1522, 4042, 2362, 4882, 94, 2614, 934, 3454,
1774, 4294, 304, 2824, 1144, 3664, 1984, 4504, 514, 3034, 1354,
3874, 2194, 4714, 724, 3244, 1564, 4084, 2404, 4924, 136, 2656,
976, 3496, 1816, 4336, 346, 2866, 1186, 3706, 2026, 4546, 556,
3076, 1396, 3916, 2236, 4756, 766, 3286, 1606, 4126, 2446, 4966,
178, 2698, 1018, 3538, 1858, 4378, 388, 2908, 1228, 3748, 2068,
4588, 598, 3118, 1438, 3958, 2278, 4798, 808, 3328, 1648, 4168,
2488, 5008, 17, 2537, 857, 3377, 1697, 4217, 227, 2747, 1067,
3587, 1907, 4427, 437, 2957, 1277, 3797, 2117, 4637, 647, 3167,
1487, 4007, 2327, 4847, 59, 2579, 899, 3419, 1739, 4259, 269,
2789, 1109, 3629, 1949, 4469, 479, 2999, 1319, 3839, 2159, 4679,
689, 3209, 1529, 4049, 2369, 4889, 101, 2621, 941, 3461, 1781,
4301, 311, 2831, 1151, 3671, 1991, 4511, 521, 3041, 1361, 3881,
2201, 4721, 731, 3251, 1571, 4091, 2411, 4931, 143, 2663, 983,
3503, 1823, 4343, 353, 2873, 1193, 3713, 2033, 4553, 563, 3083,
1403, 3923, 2243, 4763, 773, 3293, 1613, 4133, 2453, 4973, 185,
2705, 1025, 3545, 1865, 4385, 395, 2915, 1235, 3755, 2075, 4595,
605, 3125, 1445, 3965, 2285, 4805, 815, 3335, 1655, 4175, 2495,
5015, 24, 2544, 864, 3384, 1704, 4224, 234, 2754, 1074, 3594,
1914, 4434, 444, 2964, 1284, 3804, 2124, 4644, 654, 3174, 1494,
4014, 2334, 4854, 66, 2586, 906, 3426, 1746, 4266, 276, 2796,
1116, 3636, 1956, 4476, 486, 3006, 1326, 3846, 2166, 4686, 696,
3216, 1536, 4056, 2376, 4896, 108, 2628, 948, 3468, 1788, 4308,
318, 2838, 1158, 3678, 1998, 4518, 528, 3048, 1368, 3888, 2208,
4728, 738, 3258, 1578, 4098, 2418, 4938, 150, 2670, 990, 3510,
1830, 4350, 360, 2880, 1200, 3720, 2040, 4560, 570, 3090, 1410,
3930, 2250, 4770, 780, 3300, 1620, 4140, 2460, 4980, 192, 2712,
1032, 3552, 1872, 4392, 402, 2922, 1242, 3762, 2082, 4602, 612,
3132, 1452, 3972, 2292, 4812, 822, 3342, 1662, 4182, 2502, 5022,
31, 2551, 871, 3391, 1711, 4231, 241, 2761, 1081, 3601, 1921,
4441, 451, 2971, 1291, 3811, 2131, 4651, 661, 3181, 1501, 4021,
2341, 4861, 73, 2593, 913, 3433, 1753, 4273, 283, 2803, 1123,
3643, 1963, 4483, 493, 3013, 1333, 3853, 2173, 4693, 703, 3223,
1543, 4063, 2383, 4903, 115, 2635, 955, 3475, 1795, 4315, 325,
2845, 1165, 3685, 2005, 4525, 535, 3055, 1375, 3895, 2215, 4735,
745, 3265, 1585, 4105, 2425, 4945, 157, 2677, 997, 3517, 1837,
4357, 367, 2887, 1207, 3727, 2047, 4567, 577, 3097, 1417, 3937,
2257, 4777, 787, 3307, 1627, 4147, 2467, 4987, 199, 2719, 1039,
3559, 1879, 4399, 409, 2929, 1249, 3769, 2089, 4609, 619, 3139,
1459, 3979, 2299, 4819, 829, 3349, 1669, 4189, 2509, 5029, 38,
2558, 878, 3398, 1718, 4238, 248, 2768, 1088, 3608, 1928, 4448,
458, 2978, 1298, 3818, 2138, 4658, 668, 3188, 1508, 4028, 2348,
4868, 80, 2600, 920, 3440, 1760, 4280, 290, 2810, 1130, 3650,
1970, 4490, 500, 3020, 1340, 3860, 2180, 4700, 710, 3230, 1550,
4070, 2390, 4910, 122, 2642, 962, 3482, 1802, 4322, 332, 2852,
1172, 3692, 2012, 4532, 542, 3062, 1382, 3902, 2222, 4742, 752,
3272, 1592, 4112, 2432, 4952, 164, 2684, 1004, 3524, 1844, 4364,
374, 2894, 1214, 3734, 2054, 4574, 584, 3104, 1424, 3944, 2264,
4784, 794, 3314, 1634, 4154, 2474, 4994, 206, 2726, 1046, 3566,
1886, 4406, 416, 2936, 1256, 3776, 2096, 4616, 626, 3146, 1466,
3986, 2306, 4826, 836, 3356, 1676, 4196, 2516, 5036, 4, 2524,
844, 3364, 1684, 4204, 214, 2734, 1054, 3574, 1894, 4414, 424,
2944, 1264, 3784, 2104, 4624, 634, 3154, 1474, 3994, 2314, 4834,
46, 2566, 886, 3406, 1726, 4246, 256, 2776, 1096, 3616, 1936,
4456, 466, 2986, 1306, 3826, 2146, 4666, 676, 3196, 1516, 4036,
2356, 4876, 88, 2608, 928, 3448, 1768, 4288, 298, 2818, 1138,
3658, 1978, 4498, 508, 3028, 1348, 3868, 2188, 4708, 718, 3238,
1558, 4078, 2398, 4918, 130, 2650, 970, 3490, 1810, 4330, 340,
2860, 1180, 3700, 2020, 4540, 550, 3070, 1390, 3910, 2230, 4750,
760, 3280, 1600, 4120, 2440, 4960, 172, 2692, 1012, 3532, 1852,
4372, 382, 2902, 1222, 3742, 2062, 4582, 592, 3112, 1432, 3952,
2272, 4792, 802, 3322, 1642, 4162, 2482, 5002, 11, 2531, 851,
3371, 1691, 4211, 221, 2741, 1061, 3581, 1901, 4421, 431, 2951,
1271, 3791, 2111, 4631, 641, 3161, 1481, 4001, 2321, 4841, 53,
2573, 893, 3413, 1733, 4253, 263, 2783, 1103, 3623, 1943, 4463,
473, 2993, 1313, 3833, 2153, 4673, 683, 3203, 1523, 4043, 2363,
4883, 95, 2615, 935, 3455, 1775, 4295, 305, 2825, 1145, 3665,
1985, 4505, 515, 3035, 1355, 3875, 2195, 4715, 725, 3245, 1565,
4085, 2405, 4925, 137, 2657, 977, 3497, 1817, 4337, 347, 2867,
1187, 3707, 2027, 4547, 557, 3077, 1397, 3917, 2237, 4757, 767,
3287, 1607, 4127, 2447, 4967, 179, 2699, 1019, 3539, 1859, 4379,
389, 2909, 1229, 3749, 2069, 4589, 599, 3119, 1439, 3959, 2279,
4799, 809, 3329, 1649, 4169, 2489, 5009, 18, 2538, 858, 3378,
1698, 4218, 228, 2748, 1068, 3588, 1908, 4428, 438, 2958, 1278,
3798, 2118, 4638, 648, 3168, 1488, 4008, 2328, 4848, 60, 2580,
900, 3420, 1740, 4260, 270, 2790, 1110, 3630, 1950, 4470, 480,
3000, 1320, 3840, 2160, 4680, 690, 3210, 1530, 4050, 2370, 4890,
102, 2622, 942, 3462, 1782, 4302, 312, 2832, 1152, 3672, 1992,
4512, 522, 3042, 1362, 3882, 2202, 4722, 732, 3252, 1572, 4092,
2412, 4932, 144, 2664, 984, 3504, 1824, 4344, 354, 2874, 1194,
3714, 2034, 4554, 564, 3084, 1404, 3924, 2244, 4764, 774, 3294,
1614, 4134, 2454, 4974, 186, 2706, 1026, 3546, 1866, 4386, 396,
2916, 1236, 3756, 2076, 4596, 606, 3126, 1446, 3966, 2286, 4806,
816, 3336, 1656, 4176, 2496, 5016, 25, 2545, 865, 3385, 1705,
4225, 235, 2755, 1075, 3595, 1915, 4435, 445, 2965, 1285, 3805,
2125, 4645, 655, 3175, 1495, 4015, 2335, 4855, 67, 2587, 907,
3427, 1747, 4267, 277, 2797, 1117, 3637, 1957, 4477, 487, 3007,
1327, 3847, 2167, 4687, 697, 3217, 1537, 4057, 2377, 4897, 109,
2629, 949, 3469, 1789, 4309, 319, 2839, 1159, 3679, 1999, 4519,
529, 3049, 1369, 3889, 2209, 4729, 739, 3259, 1579, 4099, 2419,
4939, 151, 2671, 991, 3511, 1831, 4351, 361, 2881, 1201, 3721,
2041, 4561, 571, 3091, 1411, 3931, 2251, 4771, 781, 3301, 1621,
4141, 2461, 4981, 193, 2713, 1033, 3553, 1873, 4393, 403, 2923,
1243, 3763, 2083, 4603, 613, 3133, 1453, 3973, 2293, 4813, 823,
3343, 1663, 4183, 2503, 5023, 32, 2552, 872, 3392, 1712, 4232,
242, 2762, 1082, 3602, 1922, 4442, 452, 2972, 1292, 3812, 2132,
4652, 662, 3182, 1502, 4022, 2342, 4862, 74, 2594, 914, 3434,
1754, 4274, 284, 2804, 1124, 3644, 1964, 4484, 494, 3014, 1334,
3854, 2174, 4694, 704, 3224, 1544, 4064, 2384, 4904, 116, 2636,
956, 3476, 1796, 4316, 326, 2846, 1166, 3686, 2006, 4526, 536,
3056, 1376, 3896, 2216, 4736, 746, 3266, 1586, 4106, 2426, 4946,
158, 2678, 998, 3518, 1838, 4358, 368, 2888, 1208, 3728, 2048,
4568, 578, 3098, 1418, 3938, 2258, 4778, 788, 3308, 1628, 4148,
2468, 4988, 200, 2720, 1040, 3560, 1880, 4400, 410, 2930, 1250,
3770, 2090, 4610, 620, 3140, 1460, 3980, 2300, 4820, 830, 3350,
1670, 4190, 2510, 5030, 39, 2559, 879, 3399, 1719, 4239, 249,
2769, 1089, 3609, 1929, 4449, 459, 2979, 1299, 3819, 2139, 4659,
669, 3189, 1509, 4029, 2349, 4869, 81, 2601, 921, 3441, 1761,
4281, 291, 2811, 1131, 3651, 1971, 4491, 501, 3021, 1341, 3861,
2181, 4701, 711, 3231, 1551, 4071, 2391, 4911, 123, 2643, 963,
3483, 1803, 4323, 333, 2853, 1173, 3693, 2013, 4533, 543, 3063,
1383, 3903, 2223, 4743, 753, 3273, 1593, 4113, 2433, 4953, 165,
2685, 1005, 3525, 1845, 4365, 375, 2895, 1215, 3735, 2055, 4575,
585, 3105, 1425, 3945, 2265, 4785, 795, 3315, 1635, 4155, 2475,
4995, 207, 2727, 1047, 3567, 1887, 4407, 417, 2937, 1257, 3777,
2097, 4617, 627, 3147, 1467, 3987, 2307, 4827, 837, 3357, 1677,
4197, 2517, 5037, 5, 2525, 845, 3365, 1685, 4205, 215, 2735,
1055, 3575, 1895, 4415, 425, 2945, 1265, 3785, 2105, 4625, 635,
3155, 1475, 3995, 2315, 4835, 47, 2567, 887, 3407, 1727, 4247,
257, 2777, 1097, 3617, 1937, 4457, 467, 2987, 1307, 3827, 2147,
4667, 677, 3197, 1517, 4037, 2357, 4877, 89, 2609, 929, 3449,
1769, 4289, 299, 2819, 1139, 3659, 1979, 4499, 509, 3029, 1349,
3869, 2189, 4709, 719, 3239, 1559, 4079, 2399, 4919, 131, 2651,
971, 3491, 1811, 4331, 341, 2861, 1181, 3701, 2021, 4541, 551,
3071, 1391, 3911, 2231, 4751, 761, 3281, 1601, 4121, 2441, 4961,
173, 2693, 1013, 3533, 1853, 4373, 383, 2903, 1223, 3743, 2063,
4583, 593, 3113, 1433, 3953, 2273, 4793, 803, 3323, 1643, 4163,
2483, 5003, 12, 2532, 852, 3372, 1692, 4212, 222, 2742, 1062,
3582, 1902, 4422, 432, 2952, 1272, 3792, 2112, 4632, 642, 3162,
1482, 4002, 2322, 4842, 54, 2574, 894, 3414, 1734, 4254, 264,
2784, 1104, 3624, 1944, 4464, 474, 2994, 1314, 3834, 2154, 4674,
684, 3204, 1524, 4044, 2364, 4884, 96, 2616, 936, 3456, 1776,
4296, 306, 2826, 1146, 3666, 1986, 4506, 516, 3036, 1356, 3876,
2196, 4716, 726, 3246, 1566, 4086, 2406, 4926, 138, 2658, 978,
3498, 1818, 4338, 348, 2868, 1188, 3708, 2028, 4548, 558, 3078,
1398, 3918, 2238, 4758, 768, 3288, 1608, 4128, 2448, 4968, 180,
2700, 1020, 3540, 1860, 4380, 390, 2910, 1230, 3750, 2070, 4590,
600, 3120, 1440, 3960, 2280, 4800, 810, 3330, 1650, 4170, 2490,
5010, 19, 2539, 859, 3379, 1699, 4219, 229, 2749, 1069, 3589,
1909, 4429, 439, 2959, 1279, 3799, 2119, 4639, 649, 3169, 1489,
4009, 2329, 4849, 61, 2581, 901, 3421, 1741, 4261, 271, 2791,
1111, 3631, 1951, 4471, 481, 3001, 1321, 3841, 2161, 4681, 691,
3211, 1531, 4051, 2371, 4891, 103, 2623, 943, 3463, 1783, 4303,
313, 2833, 1153, 3673, 1993, 4513, 523, 3043, 1363, 3883, 2203,
4723, 733, 3253, 1573, 4093, 2413, 4933, 145, 2665, 985, 3505,
1825, 4345, 355, 2875, 1195, 3715, 2035, 4555, 565, 3085, 1405,
3925, 2245, 4765, 775, 3295, 1615, 4135, 2455, 4975, 187, 2707,
1027, 3547, 1867, 4387, 397, 2917, 1237, 3757, 2077, 4597, 607,
3127, 1447, 3967, 2287, 4807, 817, 3337, 1657, 4177, 2497, 5017,
26, 2546, 866, 3386, 1706, 4226, 236, 2756, 1076, 3596, 1916,
4436, 446, 2966, 1286, 3806, 2126, 4646, 656, 3176, 1496, 4016,
2336, 4856, 68, 2588, 908, 3428, 1748, 4268, 278, 2798, 1118,
3638, 1958, 4478, 488, 3008, 1328, 3848, 2168, 4688, 698, 3218,
1538, 4058, 2378, 4898, 110, 2630, 950, 3470, 1790, 4310, 320,
2840, 1160, 3680, 2000, 4520, 530, 3050, 1370, 3890, 2210, 4730,
740, 3260, 1580, 4100, 2420, 4940, 152, 2672, 992, 3512, 1832,
4352, 362, 2882, 1202, 3722, 2042, 4562, 572, 3092, 1412, 3932,
2252, 4772, 782, 3302, 1622, 4142, 2462, 4982, 194, 2714, 1034,
3554, 1874, 4394, 404, 2924, 1244, 3764, 2084, 4604, 614, 3134,
1454, 3974, 2294, 4814, 824, 3344, 1664, 4184, 2504, 5024, 33,
2553, 873, 3393, 1713, 4233, 243, 2763, 1083, 3603, 1923, 4443,
453, 2973, 1293, 3813, 2133, 4653, 663, 3183, 1503, 4023, 2343,
4863, 75, 2595, 915, 3435, 1755, 4275, 285, 2805, 1125, 3645,
1965, 4485, 495, 3015, 1335, 3855, 2175, 4695, 705, 3225, 1545,
4065, 2385, 4905, 117, 2637, 957, 3477, 1797, 4317, 327, 2847,
1167, 3687, 2007, 4527, 537, 3057, 1377, 3897, 2217, 4737, 747,
3267, 1587, 4107, 2427, 4947, 159, 2679, 999, 3519, 1839, 4359,
369, 2889, 1209, 3729, 2049, 4569, 579, 3099, 1419, 3939, 2259,
4779, 789, 3309, 1629, 4149, 2469, 4989, 201, 2721, 1041, 3561,
1881, 4401, 411, 2931, 1251, 3771, 2091, 4611, 621, 3141, 1461,
3981, 2301, 4821, 831, 3351, 1671, 4191, 2511, 5031, 40, 2560,
880, 3400, 1720, 4240, 250, 2770, 1090, 3610, 1930, 4450, 460,
2980, 1300, 3820, 2140, 4660, 670, 3190, 1510, 4030, 2350, 4870,
82, 2602, 922, 3442, 1762, 4282, 292, 2812, 1132, 3652, 1972,
4492, 502, 3022, 1342, 3862, 2182, 4702, 712, 3232, 1552, 4072,
2392, 4912, 124, 2644, 964, 3484, 1804, 4324, 334, 2854, 1174,
3694, 2014, 4534, 544, 3064, 1384, 3904, 2224, 4744, 754, 3274,
1594, 4114, 2434, 4954, 166, 2686, 1006, 3526, 1846, 4366, 376,
2896, 1216, 3736, 2056, 4576, 586, 3106, 1426, 3946, 2266, 4786,
796, 3316, 1636, 4156, 2476, 4996, 208, 2728, 1048, 3568, 1888,
4408, 418, 2938, 1258, 3778, 2098, 4618, 628, 3148, 1468, 3988,
2308, 4828, 838, 3358, 1678, 4198, 2518, 5038, 6, 2526, 846,
3366, 1686, 4206, 216, 2736, 1056, 3576, 1896, 4416, 426, 2946,
1266, 3786, 2106, 4626, 636, 3156, 1476, 3996, 2316, 4836, 48,
2568, 888, 3408, 1728, 4248, 258, 2778, 1098, 3618, 1938, 4458,
468, 2988, 1308, 3828, 2148, 4668, 678, 3198, 1518, 4038, 2358,
4878, 90, 2610, 930, 3450, 1770, 4290, 300, 2820, 1140, 3660,
1980, 4500, 510, 3030, 1350, 3870, 2190, 4710, 720, 3240, 1560,
4080, 2400, 4920, 132, 2652, 972, 3492, 1812, 4332, 342, 2862,
1182, 3702, 2022, 4542, 552, 3072, 1392, 3912, 2232, 4752, 762,
3282, 1602, 4122, 2442, 4962, 174, 2694, 1014, 3534, 1854, 4374,
384, 2904, 1224, 3744, 2064, 4584, 594, 3114, 1434, 3954, 2274,
4794, 804, 3324, 1644, 4164, 2484, 5004, 13, 2533, 853, 3373,
1693, 4213, 223, 2743, 1063, 3583, 1903, 4423, 433, 2953, 1273,
3793, 2113, 4633, 643, 3163, 1483, 4003, 2323, 4843, 55, 2575,
895, 3415, 1735, 4255, 265, 2785, 1105, 3625, 1945, 4465, 475,
2995, 1315, 3835, 2155, 4675, 685, 3205, 1525, 4045, 2365, 4885,
97, 2617, 937, 3457, 1777, 4297, 307, 2827, 1147, 3667, 1987,
4507, 517, 3037, 1357, 3877, 2197, 4717, 727, 3247, 1567, 4087,
2407, 4927, 139, 2659, 979, 3499, 1819, 4339, 349, 2869, 1189,
3709, 2029, 4549, 559, 3079, 1399, 3919, 2239, 4759, 769, 3289,
1609, 4129, 2449, 4969, 181, 2701, 1021, 3541, 1861, 4381, 391,
2911, 1231, 3751, 2071, 4591, 601, 3121, 1441, 3961, 2281, 4801,
811, 3331, 1651, 4171, 2491, 5011, 20, 2540, 860, 3380, 1700,
4220, 230, 2750, 1070, 3590, 1910, 4430, 440, 2960, 1280, 3800,
2120, 4640, 650, 3170, 1490, 4010, 2330, 4850, 62, 2582, 902,
3422, 1742, 4262, 272, 2792, 1112, 3632, 1952, 4472, 482, 3002,
1322, 3842, 2162, 4682, 692, 3212, 1532, 4052, 2372, 4892, 104,
2624, 944, 3464, 1784, 4304, 314, 2834, 1154, 3674, 1994, 4514,
524, 3044, 1364, 3884, 2204, 4724, 734, 3254, 1574, 4094, 2414,
4934, 146, 2666, 986, 3506, 1826, 4346, 356, 2876, 1196, 3716,
2036, 4556, 566, 3086, 1406, 3926, 2246, 4766, 776, 3296, 1616,
4136, 2456, 4976, 188, 2708, 1028, 3548, 1868, 4388, 398, 2918,
1238, 3758, 2078, 4598, 608, 3128, 1448, 3968, 2288, 4808, 818,
3338, 1658, 4178, 2498, 5018, 27, 2547, 867, 3387, 1707, 4227,
237, 2757, 1077, 3597, 1917, 4437, 447, 2967, 1287, 3807, 2127,
4647, 657, 3177, 1497, 4017, 2337, 4857, 69, 2589, 909, 3429,
1749, 4269, 279, 2799, 1119, 3639, 1959, 4479, 489, 3009, 1329,
3849, 2169, 4689, 699, 3219, 1539, 4059, 2379, 4899, 111, 2631,
951, 3471, 1791, 4311, 321, 2841, 1161, 3681, 2001, 4521, 531,
3051, 1371, 3891, 2211, 4731, 741, 3261, 1581, 4101, 2421, 4941,
153, 2673, 993, 3513, 1833, 4353, 363, 2883, 1203, 3723, 2043,
4563, 573, 3093, 1413, 3933, 2253, 4773, 783, 3303, 1623, 4143,
2463, 4983, 195, 2715, 1035, 3555, 1875, 4395, 405, 2925, 1245,
3765, 2085, 4605, 615, 3135, 1455, 3975, 2295, 4815, 825, 3345,
1665, 4185, 2505, 5025, 34, 2554, 874, 3394, 1714, 4234, 244,
2764, 1084, 3604, 1924, 4444, 454, 2974, 1294, 3814, 2134, 4654,
664, 3184, 1504, 4024, 2344, 4864, 76, 2596, 916, 3436, 1756,
4276, 286, 2806, 1126, 3646, 1966, 4486, 496, 3016, 1336, 3856,
2176, 4696, 706, 3226, 1546, 4066, 2386, 4906, 118, 2638, 958,
3478, 1798, 4318, 328, 2848, 1168, 3688, 2008, 4528, 538, 3058,
1378, 3898, 2218, 4738, 748, 3268, 1588, 4108, 2428, 4948, 160,
2680, 1000, 3520, 1840, 4360, 370, 2890, 1210, 3730, 2050, 4570,
580, 3100, 1420, 3940, 2260, 4780, 790, 3310, 1630, 4150, 2470,
4990, 202, 2722, 1042, 3562, 1882, 4402, 412, 2932, 1252, 3772,
2092, 4612, 622, 3142, 1462, 3982, 2302, 4822, 832, 3352, 1672,
4192, 2512, 5032, 41, 2561, 881, 3401, 1721, 4241, 251, 2771,
1091, 3611, 1931, 4451, 461, 2981, 1301, 3821, 2141, 4661, 671,
3191, 1511, 4031, 2351, 4871, 83, 2603, 923, 3443, 1763, 4283,
293, 2813, 1133, 3653, 1973, 4493, 503, 3023, 1343, 3863, 2183,
4703, 713, 3233, 1553, 4073, 2393, 4913, 125, 2645, 965, 3485,
1805, 4325, 335, 2855, 1175, 3695, 2015, 4535, 545, 3065, 1385,
3905, 2225, 4745, 755, 3275, 1595, 4115, 2435, 4955, 167, 2687,
1007, 3527, 1847, 4367, 377, 2897, 1217, 3737, 2057, 4577, 587,
3107, 1427, 3947, 2267, 4787, 797, 3317, 1637, 4157, 2477, 4997,
209, 2729, 1049, 3569, 1889, 4409, 419, 2939, 1259, 3779, 2099,
4619, 629, 3149, 1469, 3989, 2309, 4829, 839, 3359, 1679, 4199,
2519, 5039}));
}
TEST(TransposeTest, TestRefOps6D2) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5, 6, 7},
{1, 0, 2, 3, 4, 5}),
ElementsAreArray(
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65,
66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76,
77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87,
88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98,
99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109,
110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120,
121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131,
132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142,
143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153,
154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164,
165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175,
176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186,
187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197,
198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208,
209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230,
231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241,
242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252,
253, 254, 255, 256, 257, 258, 259, 260, 261, 262, 263,
264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274,
275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285,
286, 287, 288, 289, 290, 291, 292, 293, 294, 295, 296,
297, 298, 299, 300, 301, 302, 303, 304, 305, 306, 307,
308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318,
319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329,
330, 331, 332, 333, 334, 335, 336, 337, 338, 339, 340,
341, 342, 343, 344, 345, 346, 347, 348, 349, 350, 351,
352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362,
363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373,
374, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384,
385, 386, 387, 388, 389, 390, 391, 392, 393, 394, 395,
396, 397, 398, 399, 400, 401, 402, 403, 404, 405, 406,
407, 408, 409, 410, 411, 412, 413, 414, 415, 416, 417,
418, 419, 420, 421, 422, 423, 424, 425, 426, 427, 428,
429, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439,
440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450,
451, 452, 453, 454, 455, 456, 457, 458, 459, 460, 461,
462, 463, 464, 465, 466, 467, 468, 469, 470, 471, 472,
473, 474, 475, 476, 477, 478, 479, 480, 481, 482, 483,
484, 485, 486, 487, 488, 489, 490, 491, 492, 493, 494,
495, 496, 497, 498, 499, 500, 501, 502, 503, 504, 505,
506, 507, 508, 509, 510, 511, 512, 513, 514, 515, 516,
517, 518, 519, 520, 521, 522, 523, 524, 525, 526, 527,
528, 529, 530, 531, 532, 533, 534, 535, 536, 537, 538,
539, 540, 541, 542, 543, 544, 545, 546, 547, 548, 549,
550, 551, 552, 553, 554, 555, 556, 557, 558, 559, 560,
561, 562, 563, 564, 565, 566, 567, 568, 569, 570, 571,
572, 573, 574, 575, 576, 577, 578, 579, 580, 581, 582,
583, 584, 585, 586, 587, 588, 589, 590, 591, 592, 593,
594, 595, 596, 597, 598, 599, 600, 601, 602, 603, 604,
605, 606, 607, 608, 609, 610, 611, 612, 613, 614, 615,
616, 617, 618, 619, 620, 621, 622, 623, 624, 625, 626,
627, 628, 629, 630, 631, 632, 633, 634, 635, 636, 637,
638, 639, 640, 641, 642, 643, 644, 645, 646, 647, 648,
649, 650, 651, 652, 653, 654, 655, 656, 657, 658, 659,
660, 661, 662, 663, 664, 665, 666, 667, 668, 669, 670,
671, 672, 673, 674, 675, 676, 677, 678, 679, 680, 681,
682, 683, 684, 685, 686, 687, 688, 689, 690, 691, 692,
693, 694, 695, 696, 697, 698, 699, 700, 701, 702, 703,
704, 705, 706, 707, 708, 709, 710, 711, 712, 713, 714,
715, 716, 717, 718, 719, 720, 721, 722, 723, 724, 725,
726, 727, 728, 729, 730, 731, 732, 733, 734, 735, 736,
737, 738, 739, 740, 741, 742, 743, 744, 745, 746, 747,
748, 749, 750, 751, 752, 753, 754, 755, 756, 757, 758,
759, 760, 761, 762, 763, 764, 765, 766, 767, 768, 769,
770, 771, 772, 773, 774, 775, 776, 777, 778, 779, 780,
781, 782, 783, 784, 785, 786, 787, 788, 789, 790, 791,
792, 793, 794, 795, 796, 797, 798, 799, 800, 801, 802,
803, 804, 805, 806, 807, 808, 809, 810, 811, 812, 813,
814, 815, 816, 817, 818, 819, 820, 821, 822, 823, 824,
825, 826, 827, 828, 829, 830, 831, 832, 833, 834, 835,
836, 837, 838, 839, 2520, 2521, 2522, 2523, 2524, 2525, 2526,
2527, 2528, 2529, 2530, 2531, 2532, 2533, 2534, 2535, 2536, 2537,
2538, 2539, 2540, 2541, 2542, 2543, 2544, 2545, 2546, 2547, 2548,
2549, 2550, 2551, 2552, 2553, 2554, 2555, 2556, 2557, 2558, 2559,
2560, 2561, 2562, 2563, 2564, 2565, 2566, 2567, 2568, 2569, 2570,
2571, 2572, 2573, 2574, 2575, 2576, 2577, 2578, 2579, 2580, 2581,
2582, 2583, 2584, 2585, 2586, 2587, 2588, 2589, 2590, 2591, 2592,
2593, 2594, 2595, 2596, 2597, 2598, 2599, 2600, 2601, 2602, 2603,
2604, 2605, 2606, 2607, 2608, 2609, 2610, 2611, 2612, 2613, 2614,
2615, 2616, 2617, 2618, 2619, 2620, 2621, 2622, 2623, 2624, 2625,
2626, 2627, 2628, 2629, 2630, 2631, 2632, 2633, 2634, 2635, 2636,
2637, 2638, 2639, 2640, 2641, 2642, 2643, 2644, 2645, 2646, 2647,
2648, 2649, 2650, 2651, 2652, 2653, 2654, 2655, 2656, 2657, 2658,
2659, 2660, 2661, 2662, 2663, 2664, 2665, 2666, 2667, 2668, 2669,
2670, 2671, 2672, 2673, 2674, 2675, 2676, 2677, 2678, 2679, 2680,
2681, 2682, 2683, 2684, 2685, 2686, 2687, 2688, 2689, 2690, 2691,
2692, 2693, 2694, 2695, 2696, 2697, 2698, 2699, 2700, 2701, 2702,
2703, 2704, 2705, 2706, 2707, 2708, 2709, 2710, 2711, 2712, 2713,
2714, 2715, 2716, 2717, 2718, 2719, 2720, 2721, 2722, 2723, 2724,
2725, 2726, 2727, 2728, 2729, 2730, 2731, 2732, 2733, 2734, 2735,
2736, 2737, 2738, 2739, 2740, 2741, 2742, 2743, 2744, 2745, 2746,
2747, 2748, 2749, 2750, 2751, 2752, 2753, 2754, 2755, 2756, 2757,
2758, 2759, 2760, 2761, 2762, 2763, 2764, 2765, 2766, 2767, 2768,
2769, 2770, 2771, 2772, 2773, 2774, 2775, 2776, 2777, 2778, 2779,
2780, 2781, 2782, 2783, 2784, 2785, 2786, 2787, 2788, 2789, 2790,
2791, 2792, 2793, 2794, 2795, 2796, 2797, 2798, 2799, 2800, 2801,
2802, 2803, 2804, 2805, 2806, 2807, 2808, 2809, 2810, 2811, 2812,
2813, 2814, 2815, 2816, 2817, 2818, 2819, 2820, 2821, 2822, 2823,
2824, 2825, 2826, 2827, 2828, 2829, 2830, 2831, 2832, 2833, 2834,
2835, 2836, 2837, 2838, 2839, 2840, 2841, 2842, 2843, 2844, 2845,
2846, 2847, 2848, 2849, 2850, 2851, 2852, 2853, 2854, 2855, 2856,
2857, 2858, 2859, 2860, 2861, 2862, 2863, 2864, 2865, 2866, 2867,
2868, 2869, 2870, 2871, 2872, 2873, 2874, 2875, 2876, 2877, 2878,
2879, 2880, 2881, 2882, 2883, 2884, 2885, 2886, 2887, 2888, 2889,
2890, 2891, 2892, 2893, 2894, 2895, 2896, 2897, 2898, 2899, 2900,
2901, 2902, 2903, 2904, 2905, 2906, 2907, 2908, 2909, 2910, 2911,
2912, 2913, 2914, 2915, 2916, 2917, 2918, 2919, 2920, 2921, 2922,
2923, 2924, 2925, 2926, 2927, 2928, 2929, 2930, 2931, 2932, 2933,
2934, 2935, 2936, 2937, 2938, 2939, 2940, 2941, 2942, 2943, 2944,
2945, 2946, 2947, 2948, 2949, 2950, 2951, 2952, 2953, 2954, 2955,
2956, 2957, 2958, 2959, 2960, 2961, 2962, 2963, 2964, 2965, 2966,
2967, 2968, 2969, 2970, 2971, 2972, 2973, 2974, 2975, 2976, 2977,
2978, 2979, 2980, 2981, 2982, 2983, 2984, 2985, 2986, 2987, 2988,
2989, 2990, 2991, 2992, 2993, 2994, 2995, 2996, 2997, 2998, 2999,
3000, 3001, 3002, 3003, 3004, 3005, 3006, 3007, 3008, 3009, 3010,
3011, 3012, 3013, 3014, 3015, 3016, 3017, 3018, 3019, 3020, 3021,
3022, 3023, 3024, 3025, 3026, 3027, 3028, 3029, 3030, 3031, 3032,
3033, 3034, 3035, 3036, 3037, 3038, 3039, 3040, 3041, 3042, 3043,
3044, 3045, 3046, 3047, 3048, 3049, 3050, 3051, 3052, 3053, 3054,
3055, 3056, 3057, 3058, 3059, 3060, 3061, 3062, 3063, 3064, 3065,
3066, 3067, 3068, 3069, 3070, 3071, 3072, 3073, 3074, 3075, 3076,
3077, 3078, 3079, 3080, 3081, 3082, 3083, 3084, 3085, 3086, 3087,
3088, 3089, 3090, 3091, 3092, 3093, 3094, 3095, 3096, 3097, 3098,
3099, 3100, 3101, 3102, 3103, 3104, 3105, 3106, 3107, 3108, 3109,
3110, 3111, 3112, 3113, 3114, 3115, 3116, 3117, 3118, 3119, 3120,
3121, 3122, 3123, 3124, 3125, 3126, 3127, 3128, 3129, 3130, 3131,
3132, 3133, 3134, 3135, 3136, 3137, 3138, 3139, 3140, 3141, 3142,
3143, 3144, 3145, 3146, 3147, 3148, 3149, 3150, 3151, 3152, 3153,
3154, 3155, 3156, 3157, 3158, 3159, 3160, 3161, 3162, 3163, 3164,
3165, 3166, 3167, 3168, 3169, 3170, 3171, 3172, 3173, 3174, 3175,
3176, 3177, 3178, 3179, 3180, 3181, 3182, 3183, 3184, 3185, 3186,
3187, 3188, 3189, 3190, 3191, 3192, 3193, 3194, 3195, 3196, 3197,
3198, 3199, 3200, 3201, 3202, 3203, 3204, 3205, 3206, 3207, 3208,
3209, 3210, 3211, 3212, 3213, 3214, 3215, 3216, 3217, 3218, 3219,
3220, 3221, 3222, 3223, 3224, 3225, 3226, 3227, 3228, 3229, 3230,
3231, 3232, 3233, 3234, 3235, 3236, 3237, 3238, 3239, 3240, 3241,
3242, 3243, 3244, 3245, 3246, 3247, 3248, 3249, 3250, 3251, 3252,
3253, 3254, 3255, 3256, 3257, 3258, 3259, 3260, 3261, 3262, 3263,
3264, 3265, 3266, 3267, 3268, 3269, 3270, 3271, 3272, 3273, 3274,
3275, 3276, 3277, 3278, 3279, 3280, 3281, 3282, 3283, 3284, 3285,
3286, 3287, 3288, 3289, 3290, 3291, 3292, 3293, 3294, 3295, 3296,
3297, 3298, 3299, 3300, 3301, 3302, 3303, 3304, 3305, 3306, 3307,
3308, 3309, 3310, 3311, 3312, 3313, 3314, 3315, 3316, 3317, 3318,
3319, 3320, 3321, 3322, 3323, 3324, 3325, 3326, 3327, 3328, 3329,
3330, 3331, 3332, 3333, 3334, 3335, 3336, 3337, 3338, 3339, 3340,
3341, 3342, 3343, 3344, 3345, 3346, 3347, 3348, 3349, 3350, 3351,
3352, 3353, 3354, 3355, 3356, 3357, 3358, 3359, 840, 841, 842,
843, 844, 845, 846, 847, 848, 849, 850, 851, 852, 853,
854, 855, 856, 857, 858, 859, 860, 861, 862, 863, 864,
865, 866, 867, 868, 869, 870, 871, 872, 873, 874, 875,
876, 877, 878, 879, 880, 881, 882, 883, 884, 885, 886,
887, 888, 889, 890, 891, 892, 893, 894, 895, 896, 897,
898, 899, 900, 901, 902, 903, 904, 905, 906, 907, 908,
909, 910, 911, 912, 913, 914, 915, 916, 917, 918, 919,
920, 921, 922, 923, 924, 925, 926, 927, 928, 929, 930,
931, 932, 933, 934, 935, 936, 937, 938, 939, 940, 941,
942, 943, 944, 945, 946, 947, 948, 949, 950, 951, 952,
953, 954, 955, 956, 957, 958, 959, 960, 961, 962, 963,
964, 965, 966, 967, 968, 969, 970, 971, 972, 973, 974,
975, 976, 977, 978, 979, 980, 981, 982, 983, 984, 985,
986, 987, 988, 989, 990, 991, 992, 993, 994, 995, 996,
997, 998, 999, 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007,
1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015, 1016, 1017, 1018,
1019, 1020, 1021, 1022, 1023, 1024, 1025, 1026, 1027, 1028, 1029,
1030, 1031, 1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039, 1040,
1041, 1042, 1043, 1044, 1045, 1046, 1047, 1048, 1049, 1050, 1051,
1052, 1053, 1054, 1055, 1056, 1057, 1058, 1059, 1060, 1061, 1062,
1063, 1064, 1065, 1066, 1067, 1068, 1069, 1070, 1071, 1072, 1073,
1074, 1075, 1076, 1077, 1078, 1079, 1080, 1081, 1082, 1083, 1084,
1085, 1086, 1087, 1088, 1089, 1090, 1091, 1092, 1093, 1094, 1095,
1096, 1097, 1098, 1099, 1100, 1101, 1102, 1103, 1104, 1105, 1106,
1107, 1108, 1109, 1110, 1111, 1112, 1113, 1114, 1115, 1116, 1117,
1118, 1119, 1120, 1121, 1122, 1123, 1124, 1125, 1126, 1127, 1128,
1129, 1130, 1131, 1132, 1133, 1134, 1135, 1136, 1137, 1138, 1139,
1140, 1141, 1142, 1143, 1144, 1145, 1146, 1147, 1148, 1149, 1150,
1151, 1152, 1153, 1154, 1155, 1156, 1157, 1158, 1159, 1160, 1161,
1162, 1163, 1164, 1165, 1166, 1167, 1168, 1169, 1170, 1171, 1172,
1173, 1174, 1175, 1176, 1177, 1178, 1179, 1180, 1181, 1182, 1183,
1184, 1185, 1186, 1187, 1188, 1189, 1190, 1191, 1192, 1193, 1194,
1195, 1196, 1197, 1198, 1199, 1200, 1201, 1202, 1203, 1204, 1205,
1206, 1207, 1208, 1209, 1210, 1211, 1212, 1213, 1214, 1215, 1216,
1217, 1218, 1219, 1220, 1221, 1222, 1223, 1224, 1225, 1226, 1227,
1228, 1229, 1230, 1231, 1232, 1233, 1234, 1235, 1236, 1237, 1238,
1239, 1240, 1241, 1242, 1243, 1244, 1245, 1246, 1247, 1248, 1249,
1250, 1251, 1252, 1253, 1254, 1255, 1256, 1257, 1258, 1259, 1260,
1261, 1262, 1263, 1264, 1265, 1266, 1267, 1268, 1269, 1270, 1271,
1272, 1273, 1274, 1275, 1276, 1277, 1278, 1279, 1280, 1281, 1282,
1283, 1284, 1285, 1286, 1287, 1288, 1289, 1290, 1291, 1292, 1293,
1294, 1295, 1296, 1297, 1298, 1299, 1300, 1301, 1302, 1303, 1304,
1305, 1306, 1307, 1308, 1309, 1310, 1311, 1312, 1313, 1314, 1315,
1316, 1317, 1318, 1319, 1320, 1321, 1322, 1323, 1324, 1325, 1326,
1327, 1328, 1329, 1330, 1331, 1332, 1333, 1334, 1335, 1336, 1337,
1338, 1339, 1340, 1341, 1342, 1343, 1344, 1345, 1346, 1347, 1348,
1349, 1350, 1351, 1352, 1353, 1354, 1355, 1356, 1357, 1358, 1359,
1360, 1361, 1362, 1363, 1364, 1365, 1366, 1367, 1368, 1369, 1370,
1371, 1372, 1373, 1374, 1375, 1376, 1377, 1378, 1379, 1380, 1381,
1382, 1383, 1384, 1385, 1386, 1387, 1388, 1389, 1390, 1391, 1392,
1393, 1394, 1395, 1396, 1397, 1398, 1399, 1400, 1401, 1402, 1403,
1404, 1405, 1406, 1407, 1408, 1409, 1410, 1411, 1412, 1413, 1414,
1415, 1416, 1417, 1418, 1419, 1420, 1421, 1422, 1423, 1424, 1425,
1426, 1427, 1428, 1429, 1430, 1431, 1432, 1433, 1434, 1435, 1436,
1437, 1438, 1439, 1440, 1441, 1442, 1443, 1444, 1445, 1446, 1447,
1448, 1449, 1450, 1451, 1452, 1453, 1454, 1455, 1456, 1457, 1458,
1459, 1460, 1461, 1462, 1463, 1464, 1465, 1466, 1467, 1468, 1469,
1470, 1471, 1472, 1473, 1474, 1475, 1476, 1477, 1478, 1479, 1480,
1481, 1482, 1483, 1484, 1485, 1486, 1487, 1488, 1489, 1490, 1491,
1492, 1493, 1494, 1495, 1496, 1497, 1498, 1499, 1500, 1501, 1502,
1503, 1504, 1505, 1506, 1507, 1508, 1509, 1510, 1511, 1512, 1513,
1514, 1515, 1516, 1517, 1518, 1519, 1520, 1521, 1522, 1523, 1524,
1525, 1526, 1527, 1528, 1529, 1530, 1531, 1532, 1533, 1534, 1535,
1536, 1537, 1538, 1539, 1540, 1541, 1542, 1543, 1544, 1545, 1546,
1547, 1548, 1549, 1550, 1551, 1552, 1553, 1554, 1555, 1556, 1557,
1558, 1559, 1560, 1561, 1562, 1563, 1564, 1565, 1566, 1567, 1568,
1569, 1570, 1571, 1572, 1573, 1574, 1575, 1576, 1577, 1578, 1579,
1580, 1581, 1582, 1583, 1584, 1585, 1586, 1587, 1588, 1589, 1590,
1591, 1592, 1593, 1594, 1595, 1596, 1597, 1598, 1599, 1600, 1601,
1602, 1603, 1604, 1605, 1606, 1607, 1608, 1609, 1610, 1611, 1612,
1613, 1614, 1615, 1616, 1617, 1618, 1619, 1620, 1621, 1622, 1623,
1624, 1625, 1626, 1627, 1628, 1629, 1630, 1631, 1632, 1633, 1634,
1635, 1636, 1637, 1638, 1639, 1640, 1641, 1642, 1643, 1644, 1645,
1646, 1647, 1648, 1649, 1650, 1651, 1652, 1653, 1654, 1655, 1656,
1657, 1658, 1659, 1660, 1661, 1662, 1663, 1664, 1665, 1666, 1667,
1668, 1669, 1670, 1671, 1672, 1673, 1674, 1675, 1676, 1677, 1678,
1679, 3360, 3361, 3362, 3363, 3364, 3365, 3366, 3367, 3368, 3369,
3370, 3371, 3372, 3373, 3374, 3375, 3376, 3377, 3378, 3379, 3380,
3381, 3382, 3383, 3384, 3385, 3386, 3387, 3388, 3389, 3390, 3391,
3392, 3393, 3394, 3395, 3396, 3397, 3398, 3399, 3400, 3401, 3402,
3403, 3404, 3405, 3406, 3407, 3408, 3409, 3410, 3411, 3412, 3413,
3414, 3415, 3416, 3417, 3418, 3419, 3420, 3421, 3422, 3423, 3424,
3425, 3426, 3427, 3428, 3429, 3430, 3431, 3432, 3433, 3434, 3435,
3436, 3437, 3438, 3439, 3440, 3441, 3442, 3443, 3444, 3445, 3446,
3447, 3448, 3449, 3450, 3451, 3452, 3453, 3454, 3455, 3456, 3457,
3458, 3459, 3460, 3461, 3462, 3463, 3464, 3465, 3466, 3467, 3468,
3469, 3470, 3471, 3472, 3473, 3474, 3475, 3476, 3477, 3478, 3479,
3480, 3481, 3482, 3483, 3484, 3485, 3486, 3487, 3488, 3489, 3490,
3491, 3492, 3493, 3494, 3495, 3496, 3497, 3498, 3499, 3500, 3501,
3502, 3503, 3504, 3505, 3506, 3507, 3508, 3509, 3510, 3511, 3512,
3513, 3514, 3515, 3516, 3517, 3518, 3519, 3520, 3521, 3522, 3523,
3524, 3525, 3526, 3527, 3528, 3529, 3530, 3531, 3532, 3533, 3534,
3535, 3536, 3537, 3538, 3539, 3540, 3541, 3542, 3543, 3544, 3545,
3546, 3547, 3548, 3549, 3550, 3551, 3552, 3553, 3554, 3555, 3556,
3557, 3558, 3559, 3560, 3561, 3562, 3563, 3564, 3565, 3566, 3567,
3568, 3569, 3570, 3571, 3572, 3573, 3574, 3575, 3576, 3577, 3578,
3579, 3580, 3581, 3582, 3583, 3584, 3585, 3586, 3587, 3588, 3589,
3590, 3591, 3592, 3593, 3594, 3595, 3596, 3597, 3598, 3599, 3600,
3601, 3602, 3603, 3604, 3605, 3606, 3607, 3608, 3609, 3610, 3611,
3612, 3613, 3614, 3615, 3616, 3617, 3618, 3619, 3620, 3621, 3622,
3623, 3624, 3625, 3626, 3627, 3628, 3629, 3630, 3631, 3632, 3633,
3634, 3635, 3636, 3637, 3638, 3639, 3640, 3641, 3642, 3643, 3644,
3645, 3646, 3647, 3648, 3649, 3650, 3651, 3652, 3653, 3654, 3655,
3656, 3657, 3658, 3659, 3660, 3661, 3662, 3663, 3664, 3665, 3666,
3667, 3668, 3669, 3670, 3671, 3672, 3673, 3674, 3675, 3676, 3677,
3678, 3679, 3680, 3681, 3682, 3683, 3684, 3685, 3686, 3687, 3688,
3689, 3690, 3691, 3692, 3693, 3694, 3695, 3696, 3697, 3698, 3699,
3700, 3701, 3702, 3703, 3704, 3705, 3706, 3707, 3708, 3709, 3710,
3711, 3712, 3713, 3714, 3715, 3716, 3717, 3718, 3719, 3720, 3721,
3722, 3723, 3724, 3725, 3726, 3727, 3728, 3729, 3730, 3731, 3732,
3733, 3734, 3735, 3736, 3737, 3738, 3739, 3740, 3741, 3742, 3743,
3744, 3745, 3746, 3747, 3748, 3749, 3750, 3751, 3752, 3753, 3754,
3755, 3756, 3757, 3758, 3759, 3760, 3761, 3762, 3763, 3764, 3765,
3766, 3767, 3768, 3769, 3770, 3771, 3772, 3773, 3774, 3775, 3776,
3777, 3778, 3779, 3780, 3781, 3782, 3783, 3784, 3785, 3786, 3787,
3788, 3789, 3790, 3791, 3792, 3793, 3794, 3795, 3796, 3797, 3798,
3799, 3800, 3801, 3802, 3803, 3804, 3805, 3806, 3807, 3808, 3809,
3810, 3811, 3812, 3813, 3814, 3815, 3816, 3817, 3818, 3819, 3820,
3821, 3822, 3823, 3824, 3825, 3826, 3827, 3828, 3829, 3830, 3831,
3832, 3833, 3834, 3835, 3836, 3837, 3838, 3839, 3840, 3841, 3842,
3843, 3844, 3845, 3846, 3847, 3848, 3849, 3850, 3851, 3852, 3853,
3854, 3855, 3856, 3857, 3858, 3859, 3860, 3861, 3862, 3863, 3864,
3865, 3866, 3867, 3868, 3869, 3870, 3871, 3872, 3873, 3874, 3875,
3876, 3877, 3878, 3879, 3880, 3881, 3882, 3883, 3884, 3885, 3886,
3887, 3888, 3889, 3890, 3891, 3892, 3893, 3894, 3895, 3896, 3897,
3898, 3899, 3900, 3901, 3902, 3903, 3904, 3905, 3906, 3907, 3908,
3909, 3910, 3911, 3912, 3913, 3914, 3915, 3916, 3917, 3918, 3919,
3920, 3921, 3922, 3923, 3924, 3925, 3926, 3927, 3928, 3929, 3930,
3931, 3932, 3933, 3934, 3935, 3936, 3937, 3938, 3939, 3940, 3941,
3942, 3943, 3944, 3945, 3946, 3947, 3948, 3949, 3950, 3951, 3952,
3953, 3954, 3955, 3956, 3957, 3958, 3959, 3960, 3961, 3962, 3963,
3964, 3965, 3966, 3967, 3968, 3969, 3970, 3971, 3972, 3973, 3974,
3975, 3976, 3977, 3978, 3979, 3980, 3981, 3982, 3983, 3984, 3985,
3986, 3987, 3988, 3989, 3990, 3991, 3992, 3993, 3994, 3995, 3996,
3997, 3998, 3999, 4000, 4001, 4002, 4003, 4004, 4005, 4006, 4007,
4008, 4009, 4010, 4011, 4012, 4013, 4014, 4015, 4016, 4017, 4018,
4019, 4020, 4021, 4022, 4023, 4024, 4025, 4026, 4027, 4028, 4029,
4030, 4031, 4032, 4033, 4034, 4035, 4036, 4037, 4038, 4039, 4040,
4041, 4042, 4043, 4044, 4045, 4046, 4047, 4048, 4049, 4050, 4051,
4052, 4053, 4054, 4055, 4056, 4057, 4058, 4059, 4060, 4061, 4062,
4063, 4064, 4065, 4066, 4067, 4068, 4069, 4070, 4071, 4072, 4073,
4074, 4075, 4076, 4077, 4078, 4079, 4080, 4081, 4082, 4083, 4084,
4085, 4086, 4087, 4088, 4089, 4090, 4091, 4092, 4093, 4094, 4095,
4096, 4097, 4098, 4099, 4100, 4101, 4102, 4103, 4104, 4105, 4106,
4107, 4108, 4109, 4110, 4111, 4112, 4113, 4114, 4115, 4116, 4117,
4118, 4119, 4120, 4121, 4122, 4123, 4124, 4125, 4126, 4127, 4128,
4129, 4130, 4131, 4132, 4133, 4134, 4135, 4136, 4137, 4138, 4139,
4140, 4141, 4142, 4143, 4144, 4145, 4146, 4147, 4148, 4149, 4150,
4151, 4152, 4153, 4154, 4155, 4156, 4157, 4158, 4159, 4160, 4161,
4162, 4163, 4164, 4165, 4166, 4167, 4168, 4169, 4170, 4171, 4172,
4173, 4174, 4175, 4176, 4177, 4178, 4179, 4180, 4181, 4182, 4183,
4184, 4185, 4186, 4187, 4188, 4189, 4190, 4191, 4192, 4193, 4194,
4195, 4196, 4197, 4198, 4199, 1680, 1681, 1682, 1683, 1684, 1685,
1686, 1687, 1688, 1689, 1690, 1691, 1692, 1693, 1694, 1695, 1696,
1697, 1698, 1699, 1700, 1701, 1702, 1703, 1704, 1705, 1706, 1707,
1708, 1709, 1710, 1711, 1712, 1713, 1714, 1715, 1716, 1717, 1718,
1719, 1720, 1721, 1722, 1723, 1724, 1725, 1726, 1727, 1728, 1729,
1730, 1731, 1732, 1733, 1734, 1735, 1736, 1737, 1738, 1739, 1740,
1741, 1742, 1743, 1744, 1745, 1746, 1747, 1748, 1749, 1750, 1751,
1752, 1753, 1754, 1755, 1756, 1757, 1758, 1759, 1760, 1761, 1762,
1763, 1764, 1765, 1766, 1767, 1768, 1769, 1770, 1771, 1772, 1773,
1774, 1775, 1776, 1777, 1778, 1779, 1780, 1781, 1782, 1783, 1784,
1785, 1786, 1787, 1788, 1789, 1790, 1791, 1792, 1793, 1794, 1795,
1796, 1797, 1798, 1799, 1800, 1801, 1802, 1803, 1804, 1805, 1806,
1807, 1808, 1809, 1810, 1811, 1812, 1813, 1814, 1815, 1816, 1817,
1818, 1819, 1820, 1821, 1822, 1823, 1824, 1825, 1826, 1827, 1828,
1829, 1830, 1831, 1832, 1833, 1834, 1835, 1836, 1837, 1838, 1839,
1840, 1841, 1842, 1843, 1844, 1845, 1846, 1847, 1848, 1849, 1850,
1851, 1852, 1853, 1854, 1855, 1856, 1857, 1858, 1859, 1860, 1861,
1862, 1863, 1864, 1865, 1866, 1867, 1868, 1869, 1870, 1871, 1872,
1873, 1874, 1875, 1876, 1877, 1878, 1879, 1880, 1881, 1882, 1883,
1884, 1885, 1886, 1887, 1888, 1889, 1890, 1891, 1892, 1893, 1894,
1895, 1896, 1897, 1898, 1899, 1900, 1901, 1902, 1903, 1904, 1905,
1906, 1907, 1908, 1909, 1910, 1911, 1912, 1913, 1914, 1915, 1916,
1917, 1918, 1919, 1920, 1921, 1922, 1923, 1924, 1925, 1926, 1927,
1928, 1929, 1930, 1931, 1932, 1933, 1934, 1935, 1936, 1937, 1938,
1939, 1940, 1941, 1942, 1943, 1944, 1945, 1946, 1947, 1948, 1949,
1950, 1951, 1952, 1953, 1954, 1955, 1956, 1957, 1958, 1959, 1960,
1961, 1962, 1963, 1964, 1965, 1966, 1967, 1968, 1969, 1970, 1971,
1972, 1973, 1974, 1975, 1976, 1977, 1978, 1979, 1980, 1981, 1982,
1983, 1984, 1985, 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993,
1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004,
2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015,
2016, 2017, 2018, 2019, 2020, 2021, 2022, 2023, 2024, 2025, 2026,
2027, 2028, 2029, 2030, 2031, 2032, 2033, 2034, 2035, 2036, 2037,
2038, 2039, 2040, 2041, 2042, 2043, 2044, 2045, 2046, 2047, 2048,
2049, 2050, 2051, 2052, 2053, 2054, 2055, 2056, 2057, 2058, 2059,
2060, 2061, 2062, 2063, 2064, 2065, 2066, 2067, 2068, 2069, 2070,
2071, 2072, 2073, 2074, 2075, 2076, 2077, 2078, 2079, 2080, 2081,
2082, 2083, 2084, 2085, 2086, 2087, 2088, 2089, 2090, 2091, 2092,
2093, 2094, 2095, 2096, 2097, 2098, 2099, 2100, 2101, 2102, 2103,
2104, 2105, 2106, 2107, 2108, 2109, 2110, 2111, 2112, 2113, 2114,
2115, 2116, 2117, 2118, 2119, 2120, 2121, 2122, 2123, 2124, 2125,
2126, 2127, 2128, 2129, 2130, 2131, 2132, 2133, 2134, 2135, 2136,
2137, 2138, 2139, 2140, 2141, 2142, 2143, 2144, 2145, 2146, 2147,
2148, 2149, 2150, 2151, 2152, 2153, 2154, 2155, 2156, 2157, 2158,
2159, 2160, 2161, 2162, 2163, 2164, 2165, 2166, 2167, 2168, 2169,
2170, 2171, 2172, 2173, 2174, 2175, 2176, 2177, 2178, 2179, 2180,
2181, 2182, 2183, 2184, 2185, 2186, 2187, 2188, 2189, 2190, 2191,
2192, 2193, 2194, 2195, 2196, 2197, 2198, 2199, 2200, 2201, 2202,
2203, 2204, 2205, 2206, 2207, 2208, 2209, 2210, 2211, 2212, 2213,
2214, 2215, 2216, 2217, 2218, 2219, 2220, 2221, 2222, 2223, 2224,
2225, 2226, 2227, 2228, 2229, 2230, 2231, 2232, 2233, 2234, 2235,
2236, 2237, 2238, 2239, 2240, 2241, 2242, 2243, 2244, 2245, 2246,
2247, 2248, 2249, 2250, 2251, 2252, 2253, 2254, 2255, 2256, 2257,
2258, 2259, 2260, 2261, 2262, 2263, 2264, 2265, 2266, 2267, 2268,
2269, 2270, 2271, 2272, 2273, 2274, 2275, 2276, 2277, 2278, 2279,
2280, 2281, 2282, 2283, 2284, 2285, 2286, 2287, 2288, 2289, 2290,
2291, 2292, 2293, 2294, 2295, 2296, 2297, 2298, 2299, 2300, 2301,
2302, 2303, 2304, 2305, 2306, 2307, 2308, 2309, 2310, 2311, 2312,
2313, 2314, 2315, 2316, 2317, 2318, 2319, 2320, 2321, 2322, 2323,
2324, 2325, 2326, 2327, 2328, 2329, 2330, 2331, 2332, 2333, 2334,
2335, 2336, 2337, 2338, 2339, 2340, 2341, 2342, 2343, 2344, 2345,
2346, 2347, 2348, 2349, 2350, 2351, 2352, 2353, 2354, 2355, 2356,
2357, 2358, 2359, 2360, 2361, 2362, 2363, 2364, 2365, 2366, 2367,
2368, 2369, 2370, 2371, 2372, 2373, 2374, 2375, 2376, 2377, 2378,
2379, 2380, 2381, 2382, 2383, 2384, 2385, 2386, 2387, 2388, 2389,
2390, 2391, 2392, 2393, 2394, 2395, 2396, 2397, 2398, 2399, 2400,
2401, 2402, 2403, 2404, 2405, 2406, 2407, 2408, 2409, 2410, 2411,
2412, 2413, 2414, 2415, 2416, 2417, 2418, 2419, 2420, 2421, 2422,
2423, 2424, 2425, 2426, 2427, 2428, 2429, 2430, 2431, 2432, 2433,
2434, 2435, 2436, 2437, 2438, 2439, 2440, 2441, 2442, 2443, 2444,
2445, 2446, 2447, 2448, 2449, 2450, 2451, 2452, 2453, 2454, 2455,
2456, 2457, 2458, 2459, 2460, 2461, 2462, 2463, 2464, 2465, 2466,
2467, 2468, 2469, 2470, 2471, 2472, 2473, 2474, 2475, 2476, 2477,
2478, 2479, 2480, 2481, 2482, 2483, 2484, 2485, 2486, 2487, 2488,
2489, 2490, 2491, 2492, 2493, 2494, 2495, 2496, 2497, 2498, 2499,
2500, 2501, 2502, 2503, 2504, 2505, 2506, 2507, 2508, 2509, 2510,
2511, 2512, 2513, 2514, 2515, 2516, 2517, 2518, 2519, 4200, 4201,
4202, 4203, 4204, 4205, 4206, 4207, 4208, 4209, 4210, 4211, 4212,
4213, 4214, 4215, 4216, 4217, 4218, 4219, 4220, 4221, 4222, 4223,
4224, 4225, 4226, 4227, 4228, 4229, 4230, 4231, 4232, 4233, 4234,
4235, 4236, 4237, 4238, 4239, 4240, 4241, 4242, 4243, 4244, 4245,
4246, 4247, 4248, 4249, 4250, 4251, 4252, 4253, 4254, 4255, 4256,
4257, 4258, 4259, 4260, 4261, 4262, 4263, 4264, 4265, 4266, 4267,
4268, 4269, 4270, 4271, 4272, 4273, 4274, 4275, 4276, 4277, 4278,
4279, 4280, 4281, 4282, 4283, 4284, 4285, 4286, 4287, 4288, 4289,
4290, 4291, 4292, 4293, 4294, 4295, 4296, 4297, 4298, 4299, 4300,
4301, 4302, 4303, 4304, 4305, 4306, 4307, 4308, 4309, 4310, 4311,
4312, 4313, 4314, 4315, 4316, 4317, 4318, 4319, 4320, 4321, 4322,
4323, 4324, 4325, 4326, 4327, 4328, 4329, 4330, 4331, 4332, 4333,
4334, 4335, 4336, 4337, 4338, 4339, 4340, 4341, 4342, 4343, 4344,
4345, 4346, 4347, 4348, 4349, 4350, 4351, 4352, 4353, 4354, 4355,
4356, 4357, 4358, 4359, 4360, 4361, 4362, 4363, 4364, 4365, 4366,
4367, 4368, 4369, 4370, 4371, 4372, 4373, 4374, 4375, 4376, 4377,
4378, 4379, 4380, 4381, 4382, 4383, 4384, 4385, 4386, 4387, 4388,
4389, 4390, 4391, 4392, 4393, 4394, 4395, 4396, 4397, 4398, 4399,
4400, 4401, 4402, 4403, 4404, 4405, 4406, 4407, 4408, 4409, 4410,
4411, 4412, 4413, 4414, 4415, 4416, 4417, 4418, 4419, 4420, 4421,
4422, 4423, 4424, 4425, 4426, 4427, 4428, 4429, 4430, 4431, 4432,
4433, 4434, 4435, 4436, 4437, 4438, 4439, 4440, 4441, 4442, 4443,
4444, 4445, 4446, 4447, 4448, 4449, 4450, 4451, 4452, 4453, 4454,
4455, 4456, 4457, 4458, 4459, 4460, 4461, 4462, 4463, 4464, 4465,
4466, 4467, 4468, 4469, 4470, 4471, 4472, 4473, 4474, 4475, 4476,
4477, 4478, 4479, 4480, 4481, 4482, 4483, 4484, 4485, 4486, 4487,
4488, 4489, 4490, 4491, 4492, 4493, 4494, 4495, 4496, 4497, 4498,
4499, 4500, 4501, 4502, 4503, 4504, 4505, 4506, 4507, 4508, 4509,
4510, 4511, 4512, 4513, 4514, 4515, 4516, 4517, 4518, 4519, 4520,
4521, 4522, 4523, 4524, 4525, 4526, 4527, 4528, 4529, 4530, 4531,
4532, 4533, 4534, 4535, 4536, 4537, 4538, 4539, 4540, 4541, 4542,
4543, 4544, 4545, 4546, 4547, 4548, 4549, 4550, 4551, 4552, 4553,
4554, 4555, 4556, 4557, 4558, 4559, 4560, 4561, 4562, 4563, 4564,
4565, 4566, 4567, 4568, 4569, 4570, 4571, 4572, 4573, 4574, 4575,
4576, 4577, 4578, 4579, 4580, 4581, 4582, 4583, 4584, 4585, 4586,
4587, 4588, 4589, 4590, 4591, 4592, 4593, 4594, 4595, 4596, 4597,
4598, 4599, 4600, 4601, 4602, 4603, 4604, 4605, 4606, 4607, 4608,
4609, 4610, 4611, 4612, 4613, 4614, 4615, 4616, 4617, 4618, 4619,
4620, 4621, 4622, 4623, 4624, 4625, 4626, 4627, 4628, 4629, 4630,
4631, 4632, 4633, 4634, 4635, 4636, 4637, 4638, 4639, 4640, 4641,
4642, 4643, 4644, 4645, 4646, 4647, 4648, 4649, 4650, 4651, 4652,
4653, 4654, 4655, 4656, 4657, 4658, 4659, 4660, 4661, 4662, 4663,
4664, 4665, 4666, 4667, 4668, 4669, 4670, 4671, 4672, 4673, 4674,
4675, 4676, 4677, 4678, 4679, 4680, 4681, 4682, 4683, 4684, 4685,
4686, 4687, 4688, 4689, 4690, 4691, 4692, 4693, 4694, 4695, 4696,
4697, 4698, 4699, 4700, 4701, 4702, 4703, 4704, 4705, 4706, 4707,
4708, 4709, 4710, 4711, 4712, 4713, 4714, 4715, 4716, 4717, 4718,
4719, 4720, 4721, 4722, 4723, 4724, 4725, 4726, 4727, 4728, 4729,
4730, 4731, 4732, 4733, 4734, 4735, 4736, 4737, 4738, 4739, 4740,
4741, 4742, 4743, 4744, 4745, 4746, 4747, 4748, 4749, 4750, 4751,
4752, 4753, 4754, 4755, 4756, 4757, 4758, 4759, 4760, 4761, 4762,
4763, 4764, 4765, 4766, 4767, 4768, 4769, 4770, 4771, 4772, 4773,
4774, 4775, 4776, 4777, 4778, 4779, 4780, 4781, 4782, 4783, 4784,
4785, 4786, 4787, 4788, 4789, 4790, 4791, 4792, 4793, 4794, 4795,
4796, 4797, 4798, 4799, 4800, 4801, 4802, 4803, 4804, 4805, 4806,
4807, 4808, 4809, 4810, 4811, 4812, 4813, 4814, 4815, 4816, 4817,
4818, 4819, 4820, 4821, 4822, 4823, 4824, 4825, 4826, 4827, 4828,
4829, 4830, 4831, 4832, 4833, 4834, 4835, 4836, 4837, 4838, 4839,
4840, 4841, 4842, 4843, 4844, 4845, 4846, 4847, 4848, 4849, 4850,
4851, 4852, 4853, 4854, 4855, 4856, 4857, 4858, 4859, 4860, 4861,
4862, 4863, 4864, 4865, 4866, 4867, 4868, 4869, 4870, 4871, 4872,
4873, 4874, 4875, 4876, 4877, 4878, 4879, 4880, 4881, 4882, 4883,
4884, 4885, 4886, 4887, 4888, 4889, 4890, 4891, 4892, 4893, 4894,
4895, 4896, 4897, 4898, 4899, 4900, 4901, 4902, 4903, 4904, 4905,
4906, 4907, 4908, 4909, 4910, 4911, 4912, 4913, 4914, 4915, 4916,
4917, 4918, 4919, 4920, 4921, 4922, 4923, 4924, 4925, 4926, 4927,
4928, 4929, 4930, 4931, 4932, 4933, 4934, 4935, 4936, 4937, 4938,
4939, 4940, 4941, 4942, 4943, 4944, 4945, 4946, 4947, 4948, 4949,
4950, 4951, 4952, 4953, 4954, 4955, 4956, 4957, 4958, 4959, 4960,
4961, 4962, 4963, 4964, 4965, 4966, 4967, 4968, 4969, 4970, 4971,
4972, 4973, 4974, 4975, 4976, 4977, 4978, 4979, 4980, 4981, 4982,
4983, 4984, 4985, 4986, 4987, 4988, 4989, 4990, 4991, 4992, 4993,
4994, 4995, 4996, 4997, 4998, 4999, 5000, 5001, 5002, 5003, 5004,
5005, 5006, 5007, 5008, 5009, 5010, 5011, 5012, 5013, 5014, 5015,
5016, 5017, 5018, 5019, 5020, 5021, 5022, 5023, 5024, 5025, 5026,
5027, 5028, 5029, 5030, 5031, 5032, 5033, 5034, 5035, 5036, 5037,
5038, 5039}));
}
TEST(TransposeTest, TestRefOps6D3) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5, 6, 7},
{1, 2, 0, 3, 4, 5}),
ElementsAreArray(
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65,
66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76,
77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87,
88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98,
99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109,
110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120,
121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131,
132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142,
143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153,
154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164,
165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175,
176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186,
187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197,
198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208,
209, 2520, 2521, 2522, 2523, 2524, 2525, 2526, 2527, 2528, 2529,
2530, 2531, 2532, 2533, 2534, 2535, 2536, 2537, 2538, 2539, 2540,
2541, 2542, 2543, 2544, 2545, 2546, 2547, 2548, 2549, 2550, 2551,
2552, 2553, 2554, 2555, 2556, 2557, 2558, 2559, 2560, 2561, 2562,
2563, 2564, 2565, 2566, 2567, 2568, 2569, 2570, 2571, 2572, 2573,
2574, 2575, 2576, 2577, 2578, 2579, 2580, 2581, 2582, 2583, 2584,
2585, 2586, 2587, 2588, 2589, 2590, 2591, 2592, 2593, 2594, 2595,
2596, 2597, 2598, 2599, 2600, 2601, 2602, 2603, 2604, 2605, 2606,
2607, 2608, 2609, 2610, 2611, 2612, 2613, 2614, 2615, 2616, 2617,
2618, 2619, 2620, 2621, 2622, 2623, 2624, 2625, 2626, 2627, 2628,
2629, 2630, 2631, 2632, 2633, 2634, 2635, 2636, 2637, 2638, 2639,
2640, 2641, 2642, 2643, 2644, 2645, 2646, 2647, 2648, 2649, 2650,
2651, 2652, 2653, 2654, 2655, 2656, 2657, 2658, 2659, 2660, 2661,
2662, 2663, 2664, 2665, 2666, 2667, 2668, 2669, 2670, 2671, 2672,
2673, 2674, 2675, 2676, 2677, 2678, 2679, 2680, 2681, 2682, 2683,
2684, 2685, 2686, 2687, 2688, 2689, 2690, 2691, 2692, 2693, 2694,
2695, 2696, 2697, 2698, 2699, 2700, 2701, 2702, 2703, 2704, 2705,
2706, 2707, 2708, 2709, 2710, 2711, 2712, 2713, 2714, 2715, 2716,
2717, 2718, 2719, 2720, 2721, 2722, 2723, 2724, 2725, 2726, 2727,
2728, 2729, 210, 211, 212, 213, 214, 215, 216, 217, 218,
219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229,
230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240,
241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251,
252, 253, 254, 255, 256, 257, 258, 259, 260, 261, 262,
263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273,
274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284,
285, 286, 287, 288, 289, 290, 291, 292, 293, 294, 295,
296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306,
307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317,
318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 328,
329, 330, 331, 332, 333, 334, 335, 336, 337, 338, 339,
340, 341, 342, 343, 344, 345, 346, 347, 348, 349, 350,
351, 352, 353, 354, 355, 356, 357, 358, 359, 360, 361,
362, 363, 364, 365, 366, 367, 368, 369, 370, 371, 372,
373, 374, 375, 376, 377, 378, 379, 380, 381, 382, 383,
384, 385, 386, 387, 388, 389, 390, 391, 392, 393, 394,
395, 396, 397, 398, 399, 400, 401, 402, 403, 404, 405,
406, 407, 408, 409, 410, 411, 412, 413, 414, 415, 416,
417, 418, 419, 2730, 2731, 2732, 2733, 2734, 2735, 2736, 2737,
2738, 2739, 2740, 2741, 2742, 2743, 2744, 2745, 2746, 2747, 2748,
2749, 2750, 2751, 2752, 2753, 2754, 2755, 2756, 2757, 2758, 2759,
2760, 2761, 2762, 2763, 2764, 2765, 2766, 2767, 2768, 2769, 2770,
2771, 2772, 2773, 2774, 2775, 2776, 2777, 2778, 2779, 2780, 2781,
2782, 2783, 2784, 2785, 2786, 2787, 2788, 2789, 2790, 2791, 2792,
2793, 2794, 2795, 2796, 2797, 2798, 2799, 2800, 2801, 2802, 2803,
2804, 2805, 2806, 2807, 2808, 2809, 2810, 2811, 2812, 2813, 2814,
2815, 2816, 2817, 2818, 2819, 2820, 2821, 2822, 2823, 2824, 2825,
2826, 2827, 2828, 2829, 2830, 2831, 2832, 2833, 2834, 2835, 2836,
2837, 2838, 2839, 2840, 2841, 2842, 2843, 2844, 2845, 2846, 2847,
2848, 2849, 2850, 2851, 2852, 2853, 2854, 2855, 2856, 2857, 2858,
2859, 2860, 2861, 2862, 2863, 2864, 2865, 2866, 2867, 2868, 2869,
2870, 2871, 2872, 2873, 2874, 2875, 2876, 2877, 2878, 2879, 2880,
2881, 2882, 2883, 2884, 2885, 2886, 2887, 2888, 2889, 2890, 2891,
2892, 2893, 2894, 2895, 2896, 2897, 2898, 2899, 2900, 2901, 2902,
2903, 2904, 2905, 2906, 2907, 2908, 2909, 2910, 2911, 2912, 2913,
2914, 2915, 2916, 2917, 2918, 2919, 2920, 2921, 2922, 2923, 2924,
2925, 2926, 2927, 2928, 2929, 2930, 2931, 2932, 2933, 2934, 2935,
2936, 2937, 2938, 2939, 420, 421, 422, 423, 424, 425, 426,
427, 428, 429, 430, 431, 432, 433, 434, 435, 436, 437,
438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448,
449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459,
460, 461, 462, 463, 464, 465, 466, 467, 468, 469, 470,
471, 472, 473, 474, 475, 476, 477, 478, 479, 480, 481,
482, 483, 484, 485, 486, 487, 488, 489, 490, 491, 492,
493, 494, 495, 496, 497, 498, 499, 500, 501, 502, 503,
504, 505, 506, 507, 508, 509, 510, 511, 512, 513, 514,
515, 516, 517, 518, 519, 520, 521, 522, 523, 524, 525,
526, 527, 528, 529, 530, 531, 532, 533, 534, 535, 536,
537, 538, 539, 540, 541, 542, 543, 544, 545, 546, 547,
548, 549, 550, 551, 552, 553, 554, 555, 556, 557, 558,
559, 560, 561, 562, 563, 564, 565, 566, 567, 568, 569,
570, 571, 572, 573, 574, 575, 576, 577, 578, 579, 580,
581, 582, 583, 584, 585, 586, 587, 588, 589, 590, 591,
592, 593, 594, 595, 596, 597, 598, 599, 600, 601, 602,
603, 604, 605, 606, 607, 608, 609, 610, 611, 612, 613,
614, 615, 616, 617, 618, 619, 620, 621, 622, 623, 624,
625, 626, 627, 628, 629, 2940, 2941, 2942, 2943, 2944, 2945,
2946, 2947, 2948, 2949, 2950, 2951, 2952, 2953, 2954, 2955, 2956,
2957, 2958, 2959, 2960, 2961, 2962, 2963, 2964, 2965, 2966, 2967,
2968, 2969, 2970, 2971, 2972, 2973, 2974, 2975, 2976, 2977, 2978,
2979, 2980, 2981, 2982, 2983, 2984, 2985, 2986, 2987, 2988, 2989,
2990, 2991, 2992, 2993, 2994, 2995, 2996, 2997, 2998, 2999, 3000,
3001, 3002, 3003, 3004, 3005, 3006, 3007, 3008, 3009, 3010, 3011,
3012, 3013, 3014, 3015, 3016, 3017, 3018, 3019, 3020, 3021, 3022,
3023, 3024, 3025, 3026, 3027, 3028, 3029, 3030, 3031, 3032, 3033,
3034, 3035, 3036, 3037, 3038, 3039, 3040, 3041, 3042, 3043, 3044,
3045, 3046, 3047, 3048, 3049, 3050, 3051, 3052, 3053, 3054, 3055,
3056, 3057, 3058, 3059, 3060, 3061, 3062, 3063, 3064, 3065, 3066,
3067, 3068, 3069, 3070, 3071, 3072, 3073, 3074, 3075, 3076, 3077,
3078, 3079, 3080, 3081, 3082, 3083, 3084, 3085, 3086, 3087, 3088,
3089, 3090, 3091, 3092, 3093, 3094, 3095, 3096, 3097, 3098, 3099,
3100, 3101, 3102, 3103, 3104, 3105, 3106, 3107, 3108, 3109, 3110,
3111, 3112, 3113, 3114, 3115, 3116, 3117, 3118, 3119, 3120, 3121,
3122, 3123, 3124, 3125, 3126, 3127, 3128, 3129, 3130, 3131, 3132,
3133, 3134, 3135, 3136, 3137, 3138, 3139, 3140, 3141, 3142, 3143,
3144, 3145, 3146, 3147, 3148, 3149, 630, 631, 632, 633, 634,
635, 636, 637, 638, 639, 640, 641, 642, 643, 644, 645,
646, 647, 648, 649, 650, 651, 652, 653, 654, 655, 656,
657, 658, 659, 660, 661, 662, 663, 664, 665, 666, 667,
668, 669, 670, 671, 672, 673, 674, 675, 676, 677, 678,
679, 680, 681, 682, 683, 684, 685, 686, 687, 688, 689,
690, 691, 692, 693, 694, 695, 696, 697, 698, 699, 700,
701, 702, 703, 704, 705, 706, 707, 708, 709, 710, 711,
712, 713, 714, 715, 716, 717, 718, 719, 720, 721, 722,
723, 724, 725, 726, 727, 728, 729, 730, 731, 732, 733,
734, 735, 736, 737, 738, 739, 740, 741, 742, 743, 744,
745, 746, 747, 748, 749, 750, 751, 752, 753, 754, 755,
756, 757, 758, 759, 760, 761, 762, 763, 764, 765, 766,
767, 768, 769, 770, 771, 772, 773, 774, 775, 776, 777,
778, 779, 780, 781, 782, 783, 784, 785, 786, 787, 788,
789, 790, 791, 792, 793, 794, 795, 796, 797, 798, 799,
800, 801, 802, 803, 804, 805, 806, 807, 808, 809, 810,
811, 812, 813, 814, 815, 816, 817, 818, 819, 820, 821,
822, 823, 824, 825, 826, 827, 828, 829, 830, 831, 832,
833, 834, 835, 836, 837, 838, 839, 3150, 3151, 3152, 3153,
3154, 3155, 3156, 3157, 3158, 3159, 3160, 3161, 3162, 3163, 3164,
3165, 3166, 3167, 3168, 3169, 3170, 3171, 3172, 3173, 3174, 3175,
3176, 3177, 3178, 3179, 3180, 3181, 3182, 3183, 3184, 3185, 3186,
3187, 3188, 3189, 3190, 3191, 3192, 3193, 3194, 3195, 3196, 3197,
3198, 3199, 3200, 3201, 3202, 3203, 3204, 3205, 3206, 3207, 3208,
3209, 3210, 3211, 3212, 3213, 3214, 3215, 3216, 3217, 3218, 3219,
3220, 3221, 3222, 3223, 3224, 3225, 3226, 3227, 3228, 3229, 3230,
3231, 3232, 3233, 3234, 3235, 3236, 3237, 3238, 3239, 3240, 3241,
3242, 3243, 3244, 3245, 3246, 3247, 3248, 3249, 3250, 3251, 3252,
3253, 3254, 3255, 3256, 3257, 3258, 3259, 3260, 3261, 3262, 3263,
3264, 3265, 3266, 3267, 3268, 3269, 3270, 3271, 3272, 3273, 3274,
3275, 3276, 3277, 3278, 3279, 3280, 3281, 3282, 3283, 3284, 3285,
3286, 3287, 3288, 3289, 3290, 3291, 3292, 3293, 3294, 3295, 3296,
3297, 3298, 3299, 3300, 3301, 3302, 3303, 3304, 3305, 3306, 3307,
3308, 3309, 3310, 3311, 3312, 3313, 3314, 3315, 3316, 3317, 3318,
3319, 3320, 3321, 3322, 3323, 3324, 3325, 3326, 3327, 3328, 3329,
3330, 3331, 3332, 3333, 3334, 3335, 3336, 3337, 3338, 3339, 3340,
3341, 3342, 3343, 3344, 3345, 3346, 3347, 3348, 3349, 3350, 3351,
3352, 3353, 3354, 3355, 3356, 3357, 3358, 3359, 840, 841, 842,
843, 844, 845, 846, 847, 848, 849, 850, 851, 852, 853,
854, 855, 856, 857, 858, 859, 860, 861, 862, 863, 864,
865, 866, 867, 868, 869, 870, 871, 872, 873, 874, 875,
876, 877, 878, 879, 880, 881, 882, 883, 884, 885, 886,
887, 888, 889, 890, 891, 892, 893, 894, 895, 896, 897,
898, 899, 900, 901, 902, 903, 904, 905, 906, 907, 908,
909, 910, 911, 912, 913, 914, 915, 916, 917, 918, 919,
920, 921, 922, 923, 924, 925, 926, 927, 928, 929, 930,
931, 932, 933, 934, 935, 936, 937, 938, 939, 940, 941,
942, 943, 944, 945, 946, 947, 948, 949, 950, 951, 952,
953, 954, 955, 956, 957, 958, 959, 960, 961, 962, 963,
964, 965, 966, 967, 968, 969, 970, 971, 972, 973, 974,
975, 976, 977, 978, 979, 980, 981, 982, 983, 984, 985,
986, 987, 988, 989, 990, 991, 992, 993, 994, 995, 996,
997, 998, 999, 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007,
1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015, 1016, 1017, 1018,
1019, 1020, 1021, 1022, 1023, 1024, 1025, 1026, 1027, 1028, 1029,
1030, 1031, 1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039, 1040,
1041, 1042, 1043, 1044, 1045, 1046, 1047, 1048, 1049, 3360, 3361,
3362, 3363, 3364, 3365, 3366, 3367, 3368, 3369, 3370, 3371, 3372,
3373, 3374, 3375, 3376, 3377, 3378, 3379, 3380, 3381, 3382, 3383,
3384, 3385, 3386, 3387, 3388, 3389, 3390, 3391, 3392, 3393, 3394,
3395, 3396, 3397, 3398, 3399, 3400, 3401, 3402, 3403, 3404, 3405,
3406, 3407, 3408, 3409, 3410, 3411, 3412, 3413, 3414, 3415, 3416,
3417, 3418, 3419, 3420, 3421, 3422, 3423, 3424, 3425, 3426, 3427,
3428, 3429, 3430, 3431, 3432, 3433, 3434, 3435, 3436, 3437, 3438,
3439, 3440, 3441, 3442, 3443, 3444, 3445, 3446, 3447, 3448, 3449,
3450, 3451, 3452, 3453, 3454, 3455, 3456, 3457, 3458, 3459, 3460,
3461, 3462, 3463, 3464, 3465, 3466, 3467, 3468, 3469, 3470, 3471,
3472, 3473, 3474, 3475, 3476, 3477, 3478, 3479, 3480, 3481, 3482,
3483, 3484, 3485, 3486, 3487, 3488, 3489, 3490, 3491, 3492, 3493,
3494, 3495, 3496, 3497, 3498, 3499, 3500, 3501, 3502, 3503, 3504,
3505, 3506, 3507, 3508, 3509, 3510, 3511, 3512, 3513, 3514, 3515,
3516, 3517, 3518, 3519, 3520, 3521, 3522, 3523, 3524, 3525, 3526,
3527, 3528, 3529, 3530, 3531, 3532, 3533, 3534, 3535, 3536, 3537,
3538, 3539, 3540, 3541, 3542, 3543, 3544, 3545, 3546, 3547, 3548,
3549, 3550, 3551, 3552, 3553, 3554, 3555, 3556, 3557, 3558, 3559,
3560, 3561, 3562, 3563, 3564, 3565, 3566, 3567, 3568, 3569, 1050,
1051, 1052, 1053, 1054, 1055, 1056, 1057, 1058, 1059, 1060, 1061,
1062, 1063, 1064, 1065, 1066, 1067, 1068, 1069, 1070, 1071, 1072,
1073, 1074, 1075, 1076, 1077, 1078, 1079, 1080, 1081, 1082, 1083,
1084, 1085, 1086, 1087, 1088, 1089, 1090, 1091, 1092, 1093, 1094,
1095, 1096, 1097, 1098, 1099, 1100, 1101, 1102, 1103, 1104, 1105,
1106, 1107, 1108, 1109, 1110, 1111, 1112, 1113, 1114, 1115, 1116,
1117, 1118, 1119, 1120, 1121, 1122, 1123, 1124, 1125, 1126, 1127,
1128, 1129, 1130, 1131, 1132, 1133, 1134, 1135, 1136, 1137, 1138,
1139, 1140, 1141, 1142, 1143, 1144, 1145, 1146, 1147, 1148, 1149,
1150, 1151, 1152, 1153, 1154, 1155, 1156, 1157, 1158, 1159, 1160,
1161, 1162, 1163, 1164, 1165, 1166, 1167, 1168, 1169, 1170, 1171,
1172, 1173, 1174, 1175, 1176, 1177, 1178, 1179, 1180, 1181, 1182,
1183, 1184, 1185, 1186, 1187, 1188, 1189, 1190, 1191, 1192, 1193,
1194, 1195, 1196, 1197, 1198, 1199, 1200, 1201, 1202, 1203, 1204,
1205, 1206, 1207, 1208, 1209, 1210, 1211, 1212, 1213, 1214, 1215,
1216, 1217, 1218, 1219, 1220, 1221, 1222, 1223, 1224, 1225, 1226,
1227, 1228, 1229, 1230, 1231, 1232, 1233, 1234, 1235, 1236, 1237,
1238, 1239, 1240, 1241, 1242, 1243, 1244, 1245, 1246, 1247, 1248,
1249, 1250, 1251, 1252, 1253, 1254, 1255, 1256, 1257, 1258, 1259,
3570, 3571, 3572, 3573, 3574, 3575, 3576, 3577, 3578, 3579, 3580,
3581, 3582, 3583, 3584, 3585, 3586, 3587, 3588, 3589, 3590, 3591,
3592, 3593, 3594, 3595, 3596, 3597, 3598, 3599, 3600, 3601, 3602,
3603, 3604, 3605, 3606, 3607, 3608, 3609, 3610, 3611, 3612, 3613,
3614, 3615, 3616, 3617, 3618, 3619, 3620, 3621, 3622, 3623, 3624,
3625, 3626, 3627, 3628, 3629, 3630, 3631, 3632, 3633, 3634, 3635,
3636, 3637, 3638, 3639, 3640, 3641, 3642, 3643, 3644, 3645, 3646,
3647, 3648, 3649, 3650, 3651, 3652, 3653, 3654, 3655, 3656, 3657,
3658, 3659, 3660, 3661, 3662, 3663, 3664, 3665, 3666, 3667, 3668,
3669, 3670, 3671, 3672, 3673, 3674, 3675, 3676, 3677, 3678, 3679,
3680, 3681, 3682, 3683, 3684, 3685, 3686, 3687, 3688, 3689, 3690,
3691, 3692, 3693, 3694, 3695, 3696, 3697, 3698, 3699, 3700, 3701,
3702, 3703, 3704, 3705, 3706, 3707, 3708, 3709, 3710, 3711, 3712,
3713, 3714, 3715, 3716, 3717, 3718, 3719, 3720, 3721, 3722, 3723,
3724, 3725, 3726, 3727, 3728, 3729, 3730, 3731, 3732, 3733, 3734,
3735, 3736, 3737, 3738, 3739, 3740, 3741, 3742, 3743, 3744, 3745,
3746, 3747, 3748, 3749, 3750, 3751, 3752, 3753, 3754, 3755, 3756,
3757, 3758, 3759, 3760, 3761, 3762, 3763, 3764, 3765, 3766, 3767,
3768, 3769, 3770, 3771, 3772, 3773, 3774, 3775, 3776, 3777, 3778,
3779, 1260, 1261, 1262, 1263, 1264, 1265, 1266, 1267, 1268, 1269,
1270, 1271, 1272, 1273, 1274, 1275, 1276, 1277, 1278, 1279, 1280,
1281, 1282, 1283, 1284, 1285, 1286, 1287, 1288, 1289, 1290, 1291,
1292, 1293, 1294, 1295, 1296, 1297, 1298, 1299, 1300, 1301, 1302,
1303, 1304, 1305, 1306, 1307, 1308, 1309, 1310, 1311, 1312, 1313,
1314, 1315, 1316, 1317, 1318, 1319, 1320, 1321, 1322, 1323, 1324,
1325, 1326, 1327, 1328, 1329, 1330, 1331, 1332, 1333, 1334, 1335,
1336, 1337, 1338, 1339, 1340, 1341, 1342, 1343, 1344, 1345, 1346,
1347, 1348, 1349, 1350, 1351, 1352, 1353, 1354, 1355, 1356, 1357,
1358, 1359, 1360, 1361, 1362, 1363, 1364, 1365, 1366, 1367, 1368,
1369, 1370, 1371, 1372, 1373, 1374, 1375, 1376, 1377, 1378, 1379,
1380, 1381, 1382, 1383, 1384, 1385, 1386, 1387, 1388, 1389, 1390,
1391, 1392, 1393, 1394, 1395, 1396, 1397, 1398, 1399, 1400, 1401,
1402, 1403, 1404, 1405, 1406, 1407, 1408, 1409, 1410, 1411, 1412,
1413, 1414, 1415, 1416, 1417, 1418, 1419, 1420, 1421, 1422, 1423,
1424, 1425, 1426, 1427, 1428, 1429, 1430, 1431, 1432, 1433, 1434,
1435, 1436, 1437, 1438, 1439, 1440, 1441, 1442, 1443, 1444, 1445,
1446, 1447, 1448, 1449, 1450, 1451, 1452, 1453, 1454, 1455, 1456,
1457, 1458, 1459, 1460, 1461, 1462, 1463, 1464, 1465, 1466, 1467,
1468, 1469, 3780, 3781, 3782, 3783, 3784, 3785, 3786, 3787, 3788,
3789, 3790, 3791, 3792, 3793, 3794, 3795, 3796, 3797, 3798, 3799,
3800, 3801, 3802, 3803, 3804, 3805, 3806, 3807, 3808, 3809, 3810,
3811, 3812, 3813, 3814, 3815, 3816, 3817, 3818, 3819, 3820, 3821,
3822, 3823, 3824, 3825, 3826, 3827, 3828, 3829, 3830, 3831, 3832,
3833, 3834, 3835, 3836, 3837, 3838, 3839, 3840, 3841, 3842, 3843,
3844, 3845, 3846, 3847, 3848, 3849, 3850, 3851, 3852, 3853, 3854,
3855, 3856, 3857, 3858, 3859, 3860, 3861, 3862, 3863, 3864, 3865,
3866, 3867, 3868, 3869, 3870, 3871, 3872, 3873, 3874, 3875, 3876,
3877, 3878, 3879, 3880, 3881, 3882, 3883, 3884, 3885, 3886, 3887,
3888, 3889, 3890, 3891, 3892, 3893, 3894, 3895, 3896, 3897, 3898,
3899, 3900, 3901, 3902, 3903, 3904, 3905, 3906, 3907, 3908, 3909,
3910, 3911, 3912, 3913, 3914, 3915, 3916, 3917, 3918, 3919, 3920,
3921, 3922, 3923, 3924, 3925, 3926, 3927, 3928, 3929, 3930, 3931,
3932, 3933, 3934, 3935, 3936, 3937, 3938, 3939, 3940, 3941, 3942,
3943, 3944, 3945, 3946, 3947, 3948, 3949, 3950, 3951, 3952, 3953,
3954, 3955, 3956, 3957, 3958, 3959, 3960, 3961, 3962, 3963, 3964,
3965, 3966, 3967, 3968, 3969, 3970, 3971, 3972, 3973, 3974, 3975,
3976, 3977, 3978, 3979, 3980, 3981, 3982, 3983, 3984, 3985, 3986,
3987, 3988, 3989, 1470, 1471, 1472, 1473, 1474, 1475, 1476, 1477,
1478, 1479, 1480, 1481, 1482, 1483, 1484, 1485, 1486, 1487, 1488,
1489, 1490, 1491, 1492, 1493, 1494, 1495, 1496, 1497, 1498, 1499,
1500, 1501, 1502, 1503, 1504, 1505, 1506, 1507, 1508, 1509, 1510,
1511, 1512, 1513, 1514, 1515, 1516, 1517, 1518, 1519, 1520, 1521,
1522, 1523, 1524, 1525, 1526, 1527, 1528, 1529, 1530, 1531, 1532,
1533, 1534, 1535, 1536, 1537, 1538, 1539, 1540, 1541, 1542, 1543,
1544, 1545, 1546, 1547, 1548, 1549, 1550, 1551, 1552, 1553, 1554,
1555, 1556, 1557, 1558, 1559, 1560, 1561, 1562, 1563, 1564, 1565,
1566, 1567, 1568, 1569, 1570, 1571, 1572, 1573, 1574, 1575, 1576,
1577, 1578, 1579, 1580, 1581, 1582, 1583, 1584, 1585, 1586, 1587,
1588, 1589, 1590, 1591, 1592, 1593, 1594, 1595, 1596, 1597, 1598,
1599, 1600, 1601, 1602, 1603, 1604, 1605, 1606, 1607, 1608, 1609,
1610, 1611, 1612, 1613, 1614, 1615, 1616, 1617, 1618, 1619, 1620,
1621, 1622, 1623, 1624, 1625, 1626, 1627, 1628, 1629, 1630, 1631,
1632, 1633, 1634, 1635, 1636, 1637, 1638, 1639, 1640, 1641, 1642,
1643, 1644, 1645, 1646, 1647, 1648, 1649, 1650, 1651, 1652, 1653,
1654, 1655, 1656, 1657, 1658, 1659, 1660, 1661, 1662, 1663, 1664,
1665, 1666, 1667, 1668, 1669, 1670, 1671, 1672, 1673, 1674, 1675,
1676, 1677, 1678, 1679, 3990, 3991, 3992, 3993, 3994, 3995, 3996,
3997, 3998, 3999, 4000, 4001, 4002, 4003, 4004, 4005, 4006, 4007,
4008, 4009, 4010, 4011, 4012, 4013, 4014, 4015, 4016, 4017, 4018,
4019, 4020, 4021, 4022, 4023, 4024, 4025, 4026, 4027, 4028, 4029,
4030, 4031, 4032, 4033, 4034, 4035, 4036, 4037, 4038, 4039, 4040,
4041, 4042, 4043, 4044, 4045, 4046, 4047, 4048, 4049, 4050, 4051,
4052, 4053, 4054, 4055, 4056, 4057, 4058, 4059, 4060, 4061, 4062,
4063, 4064, 4065, 4066, 4067, 4068, 4069, 4070, 4071, 4072, 4073,
4074, 4075, 4076, 4077, 4078, 4079, 4080, 4081, 4082, 4083, 4084,
4085, 4086, 4087, 4088, 4089, 4090, 4091, 4092, 4093, 4094, 4095,
4096, 4097, 4098, 4099, 4100, 4101, 4102, 4103, 4104, 4105, 4106,
4107, 4108, 4109, 4110, 4111, 4112, 4113, 4114, 4115, 4116, 4117,
4118, 4119, 4120, 4121, 4122, 4123, 4124, 4125, 4126, 4127, 4128,
4129, 4130, 4131, 4132, 4133, 4134, 4135, 4136, 4137, 4138, 4139,
4140, 4141, 4142, 4143, 4144, 4145, 4146, 4147, 4148, 4149, 4150,
4151, 4152, 4153, 4154, 4155, 4156, 4157, 4158, 4159, 4160, 4161,
4162, 4163, 4164, 4165, 4166, 4167, 4168, 4169, 4170, 4171, 4172,
4173, 4174, 4175, 4176, 4177, 4178, 4179, 4180, 4181, 4182, 4183,
4184, 4185, 4186, 4187, 4188, 4189, 4190, 4191, 4192, 4193, 4194,
4195, 4196, 4197, 4198, 4199, 1680, 1681, 1682, 1683, 1684, 1685,
1686, 1687, 1688, 1689, 1690, 1691, 1692, 1693, 1694, 1695, 1696,
1697, 1698, 1699, 1700, 1701, 1702, 1703, 1704, 1705, 1706, 1707,
1708, 1709, 1710, 1711, 1712, 1713, 1714, 1715, 1716, 1717, 1718,
1719, 1720, 1721, 1722, 1723, 1724, 1725, 1726, 1727, 1728, 1729,
1730, 1731, 1732, 1733, 1734, 1735, 1736, 1737, 1738, 1739, 1740,
1741, 1742, 1743, 1744, 1745, 1746, 1747, 1748, 1749, 1750, 1751,
1752, 1753, 1754, 1755, 1756, 1757, 1758, 1759, 1760, 1761, 1762,
1763, 1764, 1765, 1766, 1767, 1768, 1769, 1770, 1771, 1772, 1773,
1774, 1775, 1776, 1777, 1778, 1779, 1780, 1781, 1782, 1783, 1784,
1785, 1786, 1787, 1788, 1789, 1790, 1791, 1792, 1793, 1794, 1795,
1796, 1797, 1798, 1799, 1800, 1801, 1802, 1803, 1804, 1805, 1806,
1807, 1808, 1809, 1810, 1811, 1812, 1813, 1814, 1815, 1816, 1817,
1818, 1819, 1820, 1821, 1822, 1823, 1824, 1825, 1826, 1827, 1828,
1829, 1830, 1831, 1832, 1833, 1834, 1835, 1836, 1837, 1838, 1839,
1840, 1841, 1842, 1843, 1844, 1845, 1846, 1847, 1848, 1849, 1850,
1851, 1852, 1853, 1854, 1855, 1856, 1857, 1858, 1859, 1860, 1861,
1862, 1863, 1864, 1865, 1866, 1867, 1868, 1869, 1870, 1871, 1872,
1873, 1874, 1875, 1876, 1877, 1878, 1879, 1880, 1881, 1882, 1883,
1884, 1885, 1886, 1887, 1888, 1889, 4200, 4201, 4202, 4203, 4204,
4205, 4206, 4207, 4208, 4209, 4210, 4211, 4212, 4213, 4214, 4215,
4216, 4217, 4218, 4219, 4220, 4221, 4222, 4223, 4224, 4225, 4226,
4227, 4228, 4229, 4230, 4231, 4232, 4233, 4234, 4235, 4236, 4237,
4238, 4239, 4240, 4241, 4242, 4243, 4244, 4245, 4246, 4247, 4248,
4249, 4250, 4251, 4252, 4253, 4254, 4255, 4256, 4257, 4258, 4259,
4260, 4261, 4262, 4263, 4264, 4265, 4266, 4267, 4268, 4269, 4270,
4271, 4272, 4273, 4274, 4275, 4276, 4277, 4278, 4279, 4280, 4281,
4282, 4283, 4284, 4285, 4286, 4287, 4288, 4289, 4290, 4291, 4292,
4293, 4294, 4295, 4296, 4297, 4298, 4299, 4300, 4301, 4302, 4303,
4304, 4305, 4306, 4307, 4308, 4309, 4310, 4311, 4312, 4313, 4314,
4315, 4316, 4317, 4318, 4319, 4320, 4321, 4322, 4323, 4324, 4325,
4326, 4327, 4328, 4329, 4330, 4331, 4332, 4333, 4334, 4335, 4336,
4337, 4338, 4339, 4340, 4341, 4342, 4343, 4344, 4345, 4346, 4347,
4348, 4349, 4350, 4351, 4352, 4353, 4354, 4355, 4356, 4357, 4358,
4359, 4360, 4361, 4362, 4363, 4364, 4365, 4366, 4367, 4368, 4369,
4370, 4371, 4372, 4373, 4374, 4375, 4376, 4377, 4378, 4379, 4380,
4381, 4382, 4383, 4384, 4385, 4386, 4387, 4388, 4389, 4390, 4391,
4392, 4393, 4394, 4395, 4396, 4397, 4398, 4399, 4400, 4401, 4402,
4403, 4404, 4405, 4406, 4407, 4408, 4409, 1890, 1891, 1892, 1893,
1894, 1895, 1896, 1897, 1898, 1899, 1900, 1901, 1902, 1903, 1904,
1905, 1906, 1907, 1908, 1909, 1910, 1911, 1912, 1913, 1914, 1915,
1916, 1917, 1918, 1919, 1920, 1921, 1922, 1923, 1924, 1925, 1926,
1927, 1928, 1929, 1930, 1931, 1932, 1933, 1934, 1935, 1936, 1937,
1938, 1939, 1940, 1941, 1942, 1943, 1944, 1945, 1946, 1947, 1948,
1949, 1950, 1951, 1952, 1953, 1954, 1955, 1956, 1957, 1958, 1959,
1960, 1961, 1962, 1963, 1964, 1965, 1966, 1967, 1968, 1969, 1970,
1971, 1972, 1973, 1974, 1975, 1976, 1977, 1978, 1979, 1980, 1981,
1982, 1983, 1984, 1985, 1986, 1987, 1988, 1989, 1990, 1991, 1992,
1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014,
2015, 2016, 2017, 2018, 2019, 2020, 2021, 2022, 2023, 2024, 2025,
2026, 2027, 2028, 2029, 2030, 2031, 2032, 2033, 2034, 2035, 2036,
2037, 2038, 2039, 2040, 2041, 2042, 2043, 2044, 2045, 2046, 2047,
2048, 2049, 2050, 2051, 2052, 2053, 2054, 2055, 2056, 2057, 2058,
2059, 2060, 2061, 2062, 2063, 2064, 2065, 2066, 2067, 2068, 2069,
2070, 2071, 2072, 2073, 2074, 2075, 2076, 2077, 2078, 2079, 2080,
2081, 2082, 2083, 2084, 2085, 2086, 2087, 2088, 2089, 2090, 2091,
2092, 2093, 2094, 2095, 2096, 2097, 2098, 2099, 4410, 4411, 4412,
4413, 4414, 4415, 4416, 4417, 4418, 4419, 4420, 4421, 4422, 4423,
4424, 4425, 4426, 4427, 4428, 4429, 4430, 4431, 4432, 4433, 4434,
4435, 4436, 4437, 4438, 4439, 4440, 4441, 4442, 4443, 4444, 4445,
4446, 4447, 4448, 4449, 4450, 4451, 4452, 4453, 4454, 4455, 4456,
4457, 4458, 4459, 4460, 4461, 4462, 4463, 4464, 4465, 4466, 4467,
4468, 4469, 4470, 4471, 4472, 4473, 4474, 4475, 4476, 4477, 4478,
4479, 4480, 4481, 4482, 4483, 4484, 4485, 4486, 4487, 4488, 4489,
4490, 4491, 4492, 4493, 4494, 4495, 4496, 4497, 4498, 4499, 4500,
4501, 4502, 4503, 4504, 4505, 4506, 4507, 4508, 4509, 4510, 4511,
4512, 4513, 4514, 4515, 4516, 4517, 4518, 4519, 4520, 4521, 4522,
4523, 4524, 4525, 4526, 4527, 4528, 4529, 4530, 4531, 4532, 4533,
4534, 4535, 4536, 4537, 4538, 4539, 4540, 4541, 4542, 4543, 4544,
4545, 4546, 4547, 4548, 4549, 4550, 4551, 4552, 4553, 4554, 4555,
4556, 4557, 4558, 4559, 4560, 4561, 4562, 4563, 4564, 4565, 4566,
4567, 4568, 4569, 4570, 4571, 4572, 4573, 4574, 4575, 4576, 4577,
4578, 4579, 4580, 4581, 4582, 4583, 4584, 4585, 4586, 4587, 4588,
4589, 4590, 4591, 4592, 4593, 4594, 4595, 4596, 4597, 4598, 4599,
4600, 4601, 4602, 4603, 4604, 4605, 4606, 4607, 4608, 4609, 4610,
4611, 4612, 4613, 4614, 4615, 4616, 4617, 4618, 4619, 2100, 2101,
2102, 2103, 2104, 2105, 2106, 2107, 2108, 2109, 2110, 2111, 2112,
2113, 2114, 2115, 2116, 2117, 2118, 2119, 2120, 2121, 2122, 2123,
2124, 2125, 2126, 2127, 2128, 2129, 2130, 2131, 2132, 2133, 2134,
2135, 2136, 2137, 2138, 2139, 2140, 2141, 2142, 2143, 2144, 2145,
2146, 2147, 2148, 2149, 2150, 2151, 2152, 2153, 2154, 2155, 2156,
2157, 2158, 2159, 2160, 2161, 2162, 2163, 2164, 2165, 2166, 2167,
2168, 2169, 2170, 2171, 2172, 2173, 2174, 2175, 2176, 2177, 2178,
2179, 2180, 2181, 2182, 2183, 2184, 2185, 2186, 2187, 2188, 2189,
2190, 2191, 2192, 2193, 2194, 2195, 2196, 2197, 2198, 2199, 2200,
2201, 2202, 2203, 2204, 2205, 2206, 2207, 2208, 2209, 2210, 2211,
2212, 2213, 2214, 2215, 2216, 2217, 2218, 2219, 2220, 2221, 2222,
2223, 2224, 2225, 2226, 2227, 2228, 2229, 2230, 2231, 2232, 2233,
2234, 2235, 2236, 2237, 2238, 2239, 2240, 2241, 2242, 2243, 2244,
2245, 2246, 2247, 2248, 2249, 2250, 2251, 2252, 2253, 2254, 2255,
2256, 2257, 2258, 2259, 2260, 2261, 2262, 2263, 2264, 2265, 2266,
2267, 2268, 2269, 2270, 2271, 2272, 2273, 2274, 2275, 2276, 2277,
2278, 2279, 2280, 2281, 2282, 2283, 2284, 2285, 2286, 2287, 2288,
2289, 2290, 2291, 2292, 2293, 2294, 2295, 2296, 2297, 2298, 2299,
2300, 2301, 2302, 2303, 2304, 2305, 2306, 2307, 2308, 2309, 4620,
4621, 4622, 4623, 4624, 4625, 4626, 4627, 4628, 4629, 4630, 4631,
4632, 4633, 4634, 4635, 4636, 4637, 4638, 4639, 4640, 4641, 4642,
4643, 4644, 4645, 4646, 4647, 4648, 4649, 4650, 4651, 4652, 4653,
4654, 4655, 4656, 4657, 4658, 4659, 4660, 4661, 4662, 4663, 4664,
4665, 4666, 4667, 4668, 4669, 4670, 4671, 4672, 4673, 4674, 4675,
4676, 4677, 4678, 4679, 4680, 4681, 4682, 4683, 4684, 4685, 4686,
4687, 4688, 4689, 4690, 4691, 4692, 4693, 4694, 4695, 4696, 4697,
4698, 4699, 4700, 4701, 4702, 4703, 4704, 4705, 4706, 4707, 4708,
4709, 4710, 4711, 4712, 4713, 4714, 4715, 4716, 4717, 4718, 4719,
4720, 4721, 4722, 4723, 4724, 4725, 4726, 4727, 4728, 4729, 4730,
4731, 4732, 4733, 4734, 4735, 4736, 4737, 4738, 4739, 4740, 4741,
4742, 4743, 4744, 4745, 4746, 4747, 4748, 4749, 4750, 4751, 4752,
4753, 4754, 4755, 4756, 4757, 4758, 4759, 4760, 4761, 4762, 4763,
4764, 4765, 4766, 4767, 4768, 4769, 4770, 4771, 4772, 4773, 4774,
4775, 4776, 4777, 4778, 4779, 4780, 4781, 4782, 4783, 4784, 4785,
4786, 4787, 4788, 4789, 4790, 4791, 4792, 4793, 4794, 4795, 4796,
4797, 4798, 4799, 4800, 4801, 4802, 4803, 4804, 4805, 4806, 4807,
4808, 4809, 4810, 4811, 4812, 4813, 4814, 4815, 4816, 4817, 4818,
4819, 4820, 4821, 4822, 4823, 4824, 4825, 4826, 4827, 4828, 4829,
2310, 2311, 2312, 2313, 2314, 2315, 2316, 2317, 2318, 2319, 2320,
2321, 2322, 2323, 2324, 2325, 2326, 2327, 2328, 2329, 2330, 2331,
2332, 2333, 2334, 2335, 2336, 2337, 2338, 2339, 2340, 2341, 2342,
2343, 2344, 2345, 2346, 2347, 2348, 2349, 2350, 2351, 2352, 2353,
2354, 2355, 2356, 2357, 2358, 2359, 2360, 2361, 2362, 2363, 2364,
2365, 2366, 2367, 2368, 2369, 2370, 2371, 2372, 2373, 2374, 2375,
2376, 2377, 2378, 2379, 2380, 2381, 2382, 2383, 2384, 2385, 2386,
2387, 2388, 2389, 2390, 2391, 2392, 2393, 2394, 2395, 2396, 2397,
2398, 2399, 2400, 2401, 2402, 2403, 2404, 2405, 2406, 2407, 2408,
2409, 2410, 2411, 2412, 2413, 2414, 2415, 2416, 2417, 2418, 2419,
2420, 2421, 2422, 2423, 2424, 2425, 2426, 2427, 2428, 2429, 2430,
2431, 2432, 2433, 2434, 2435, 2436, 2437, 2438, 2439, 2440, 2441,
2442, 2443, 2444, 2445, 2446, 2447, 2448, 2449, 2450, 2451, 2452,
2453, 2454, 2455, 2456, 2457, 2458, 2459, 2460, 2461, 2462, 2463,
2464, 2465, 2466, 2467, 2468, 2469, 2470, 2471, 2472, 2473, 2474,
2475, 2476, 2477, 2478, 2479, 2480, 2481, 2482, 2483, 2484, 2485,
2486, 2487, 2488, 2489, 2490, 2491, 2492, 2493, 2494, 2495, 2496,
2497, 2498, 2499, 2500, 2501, 2502, 2503, 2504, 2505, 2506, 2507,
2508, 2509, 2510, 2511, 2512, 2513, 2514, 2515, 2516, 2517, 2518,
2519, 4830, 4831, 4832, 4833, 4834, 4835, 4836, 4837, 4838, 4839,
4840, 4841, 4842, 4843, 4844, 4845, 4846, 4847, 4848, 4849, 4850,
4851, 4852, 4853, 4854, 4855, 4856, 4857, 4858, 4859, 4860, 4861,
4862, 4863, 4864, 4865, 4866, 4867, 4868, 4869, 4870, 4871, 4872,
4873, 4874, 4875, 4876, 4877, 4878, 4879, 4880, 4881, 4882, 4883,
4884, 4885, 4886, 4887, 4888, 4889, 4890, 4891, 4892, 4893, 4894,
4895, 4896, 4897, 4898, 4899, 4900, 4901, 4902, 4903, 4904, 4905,
4906, 4907, 4908, 4909, 4910, 4911, 4912, 4913, 4914, 4915, 4916,
4917, 4918, 4919, 4920, 4921, 4922, 4923, 4924, 4925, 4926, 4927,
4928, 4929, 4930, 4931, 4932, 4933, 4934, 4935, 4936, 4937, 4938,
4939, 4940, 4941, 4942, 4943, 4944, 4945, 4946, 4947, 4948, 4949,
4950, 4951, 4952, 4953, 4954, 4955, 4956, 4957, 4958, 4959, 4960,
4961, 4962, 4963, 4964, 4965, 4966, 4967, 4968, 4969, 4970, 4971,
4972, 4973, 4974, 4975, 4976, 4977, 4978, 4979, 4980, 4981, 4982,
4983, 4984, 4985, 4986, 4987, 4988, 4989, 4990, 4991, 4992, 4993,
4994, 4995, 4996, 4997, 4998, 4999, 5000, 5001, 5002, 5003, 5004,
5005, 5006, 5007, 5008, 5009, 5010, 5011, 5012, 5013, 5014, 5015,
5016, 5017, 5018, 5019, 5020, 5021, 5022, 5023, 5024, 5025, 5026,
5027, 5028, 5029, 5030, 5031, 5032, 5033, 5034, 5035, 5036, 5037,
5038, 5039}));
}
TEST(TransposeTest, TestRefOps6D4) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5, 6, 7},
{1, 2, 3, 0, 4, 5}),
ElementsAreArray(
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
33, 34, 35, 36, 37, 38, 39, 40, 41, 2520, 2521,
2522, 2523, 2524, 2525, 2526, 2527, 2528, 2529, 2530, 2531, 2532,
2533, 2534, 2535, 2536, 2537, 2538, 2539, 2540, 2541, 2542, 2543,
2544, 2545, 2546, 2547, 2548, 2549, 2550, 2551, 2552, 2553, 2554,
2555, 2556, 2557, 2558, 2559, 2560, 2561, 42, 43, 44, 45,
46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67,
68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78,
79, 80, 81, 82, 83, 2562, 2563, 2564, 2565, 2566, 2567,
2568, 2569, 2570, 2571, 2572, 2573, 2574, 2575, 2576, 2577, 2578,
2579, 2580, 2581, 2582, 2583, 2584, 2585, 2586, 2587, 2588, 2589,
2590, 2591, 2592, 2593, 2594, 2595, 2596, 2597, 2598, 2599, 2600,
2601, 2602, 2603, 84, 85, 86, 87, 88, 89, 90, 91,
92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102,
103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113,
114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124,
125, 2604, 2605, 2606, 2607, 2608, 2609, 2610, 2611, 2612, 2613,
2614, 2615, 2616, 2617, 2618, 2619, 2620, 2621, 2622, 2623, 2624,
2625, 2626, 2627, 2628, 2629, 2630, 2631, 2632, 2633, 2634, 2635,
2636, 2637, 2638, 2639, 2640, 2641, 2642, 2643, 2644, 2645, 126,
127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137,
138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148,
149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159,
160, 161, 162, 163, 164, 165, 166, 167, 2646, 2647, 2648,
2649, 2650, 2651, 2652, 2653, 2654, 2655, 2656, 2657, 2658, 2659,
2660, 2661, 2662, 2663, 2664, 2665, 2666, 2667, 2668, 2669, 2670,
2671, 2672, 2673, 2674, 2675, 2676, 2677, 2678, 2679, 2680, 2681,
2682, 2683, 2684, 2685, 2686, 2687, 168, 169, 170, 171, 172,
173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183,
184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194,
195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205,
206, 207, 208, 209, 2688, 2689, 2690, 2691, 2692, 2693, 2694,
2695, 2696, 2697, 2698, 2699, 2700, 2701, 2702, 2703, 2704, 2705,
2706, 2707, 2708, 2709, 2710, 2711, 2712, 2713, 2714, 2715, 2716,
2717, 2718, 2719, 2720, 2721, 2722, 2723, 2724, 2725, 2726, 2727,
2728, 2729, 210, 211, 212, 213, 214, 215, 216, 217, 218,
219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229,
230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240,
241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251,
2730, 2731, 2732, 2733, 2734, 2735, 2736, 2737, 2738, 2739, 2740,
2741, 2742, 2743, 2744, 2745, 2746, 2747, 2748, 2749, 2750, 2751,
2752, 2753, 2754, 2755, 2756, 2757, 2758, 2759, 2760, 2761, 2762,
2763, 2764, 2765, 2766, 2767, 2768, 2769, 2770, 2771, 252, 253,
254, 255, 256, 257, 258, 259, 260, 261, 262, 263, 264,
265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275,
276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286,
287, 288, 289, 290, 291, 292, 293, 2772, 2773, 2774, 2775,
2776, 2777, 2778, 2779, 2780, 2781, 2782, 2783, 2784, 2785, 2786,
2787, 2788, 2789, 2790, 2791, 2792, 2793, 2794, 2795, 2796, 2797,
2798, 2799, 2800, 2801, 2802, 2803, 2804, 2805, 2806, 2807, 2808,
2809, 2810, 2811, 2812, 2813, 294, 295, 296, 297, 298, 299,
300, 301, 302, 303, 304, 305, 306, 307, 308, 309, 310,
311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321,
322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332,
333, 334, 335, 2814, 2815, 2816, 2817, 2818, 2819, 2820, 2821,
2822, 2823, 2824, 2825, 2826, 2827, 2828, 2829, 2830, 2831, 2832,
2833, 2834, 2835, 2836, 2837, 2838, 2839, 2840, 2841, 2842, 2843,
2844, 2845, 2846, 2847, 2848, 2849, 2850, 2851, 2852, 2853, 2854,
2855, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345,
346, 347, 348, 349, 350, 351, 352, 353, 354, 355, 356,
357, 358, 359, 360, 361, 362, 363, 364, 365, 366, 367,
368, 369, 370, 371, 372, 373, 374, 375, 376, 377, 2856,
2857, 2858, 2859, 2860, 2861, 2862, 2863, 2864, 2865, 2866, 2867,
2868, 2869, 2870, 2871, 2872, 2873, 2874, 2875, 2876, 2877, 2878,
2879, 2880, 2881, 2882, 2883, 2884, 2885, 2886, 2887, 2888, 2889,
2890, 2891, 2892, 2893, 2894, 2895, 2896, 2897, 378, 379, 380,
381, 382, 383, 384, 385, 386, 387, 388, 389, 390, 391,
392, 393, 394, 395, 396, 397, 398, 399, 400, 401, 402,
403, 404, 405, 406, 407, 408, 409, 410, 411, 412, 413,
414, 415, 416, 417, 418, 419, 2898, 2899, 2900, 2901, 2902,
2903, 2904, 2905, 2906, 2907, 2908, 2909, 2910, 2911, 2912, 2913,
2914, 2915, 2916, 2917, 2918, 2919, 2920, 2921, 2922, 2923, 2924,
2925, 2926, 2927, 2928, 2929, 2930, 2931, 2932, 2933, 2934, 2935,
2936, 2937, 2938, 2939, 420, 421, 422, 423, 424, 425, 426,
427, 428, 429, 430, 431, 432, 433, 434, 435, 436, 437,
438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448,
449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459,
460, 461, 2940, 2941, 2942, 2943, 2944, 2945, 2946, 2947, 2948,
2949, 2950, 2951, 2952, 2953, 2954, 2955, 2956, 2957, 2958, 2959,
2960, 2961, 2962, 2963, 2964, 2965, 2966, 2967, 2968, 2969, 2970,
2971, 2972, 2973, 2974, 2975, 2976, 2977, 2978, 2979, 2980, 2981,
462, 463, 464, 465, 466, 467, 468, 469, 470, 471, 472,
473, 474, 475, 476, 477, 478, 479, 480, 481, 482, 483,
484, 485, 486, 487, 488, 489, 490, 491, 492, 493, 494,
495, 496, 497, 498, 499, 500, 501, 502, 503, 2982, 2983,
2984, 2985, 2986, 2987, 2988, 2989, 2990, 2991, 2992, 2993, 2994,
2995, 2996, 2997, 2998, 2999, 3000, 3001, 3002, 3003, 3004, 3005,
3006, 3007, 3008, 3009, 3010, 3011, 3012, 3013, 3014, 3015, 3016,
3017, 3018, 3019, 3020, 3021, 3022, 3023, 504, 505, 506, 507,
508, 509, 510, 511, 512, 513, 514, 515, 516, 517, 518,
519, 520, 521, 522, 523, 524, 525, 526, 527, 528, 529,
530, 531, 532, 533, 534, 535, 536, 537, 538, 539, 540,
541, 542, 543, 544, 545, 3024, 3025, 3026, 3027, 3028, 3029,
3030, 3031, 3032, 3033, 3034, 3035, 3036, 3037, 3038, 3039, 3040,
3041, 3042, 3043, 3044, 3045, 3046, 3047, 3048, 3049, 3050, 3051,
3052, 3053, 3054, 3055, 3056, 3057, 3058, 3059, 3060, 3061, 3062,
3063, 3064, 3065, 546, 547, 548, 549, 550, 551, 552, 553,
554, 555, 556, 557, 558, 559, 560, 561, 562, 563, 564,
565, 566, 567, 568, 569, 570, 571, 572, 573, 574, 575,
576, 577, 578, 579, 580, 581, 582, 583, 584, 585, 586,
587, 3066, 3067, 3068, 3069, 3070, 3071, 3072, 3073, 3074, 3075,
3076, 3077, 3078, 3079, 3080, 3081, 3082, 3083, 3084, 3085, 3086,
3087, 3088, 3089, 3090, 3091, 3092, 3093, 3094, 3095, 3096, 3097,
3098, 3099, 3100, 3101, 3102, 3103, 3104, 3105, 3106, 3107, 588,
589, 590, 591, 592, 593, 594, 595, 596, 597, 598, 599,
600, 601, 602, 603, 604, 605, 606, 607, 608, 609, 610,
611, 612, 613, 614, 615, 616, 617, 618, 619, 620, 621,
622, 623, 624, 625, 626, 627, 628, 629, 3108, 3109, 3110,
3111, 3112, 3113, 3114, 3115, 3116, 3117, 3118, 3119, 3120, 3121,
3122, 3123, 3124, 3125, 3126, 3127, 3128, 3129, 3130, 3131, 3132,
3133, 3134, 3135, 3136, 3137, 3138, 3139, 3140, 3141, 3142, 3143,
3144, 3145, 3146, 3147, 3148, 3149, 630, 631, 632, 633, 634,
635, 636, 637, 638, 639, 640, 641, 642, 643, 644, 645,
646, 647, 648, 649, 650, 651, 652, 653, 654, 655, 656,
657, 658, 659, 660, 661, 662, 663, 664, 665, 666, 667,
668, 669, 670, 671, 3150, 3151, 3152, 3153, 3154, 3155, 3156,
3157, 3158, 3159, 3160, 3161, 3162, 3163, 3164, 3165, 3166, 3167,
3168, 3169, 3170, 3171, 3172, 3173, 3174, 3175, 3176, 3177, 3178,
3179, 3180, 3181, 3182, 3183, 3184, 3185, 3186, 3187, 3188, 3189,
3190, 3191, 672, 673, 674, 675, 676, 677, 678, 679, 680,
681, 682, 683, 684, 685, 686, 687, 688, 689, 690, 691,
692, 693, 694, 695, 696, 697, 698, 699, 700, 701, 702,
703, 704, 705, 706, 707, 708, 709, 710, 711, 712, 713,
3192, 3193, 3194, 3195, 3196, 3197, 3198, 3199, 3200, 3201, 3202,
3203, 3204, 3205, 3206, 3207, 3208, 3209, 3210, 3211, 3212, 3213,
3214, 3215, 3216, 3217, 3218, 3219, 3220, 3221, 3222, 3223, 3224,
3225, 3226, 3227, 3228, 3229, 3230, 3231, 3232, 3233, 714, 715,
716, 717, 718, 719, 720, 721, 722, 723, 724, 725, 726,
727, 728, 729, 730, 731, 732, 733, 734, 735, 736, 737,
738, 739, 740, 741, 742, 743, 744, 745, 746, 747, 748,
749, 750, 751, 752, 753, 754, 755, 3234, 3235, 3236, 3237,
3238, 3239, 3240, 3241, 3242, 3243, 3244, 3245, 3246, 3247, 3248,
3249, 3250, 3251, 3252, 3253, 3254, 3255, 3256, 3257, 3258, 3259,
3260, 3261, 3262, 3263, 3264, 3265, 3266, 3267, 3268, 3269, 3270,
3271, 3272, 3273, 3274, 3275, 756, 757, 758, 759, 760, 761,
762, 763, 764, 765, 766, 767, 768, 769, 770, 771, 772,
773, 774, 775, 776, 777, 778, 779, 780, 781, 782, 783,
784, 785, 786, 787, 788, 789, 790, 791, 792, 793, 794,
795, 796, 797, 3276, 3277, 3278, 3279, 3280, 3281, 3282, 3283,
3284, 3285, 3286, 3287, 3288, 3289, 3290, 3291, 3292, 3293, 3294,
3295, 3296, 3297, 3298, 3299, 3300, 3301, 3302, 3303, 3304, 3305,
3306, 3307, 3308, 3309, 3310, 3311, 3312, 3313, 3314, 3315, 3316,
3317, 798, 799, 800, 801, 802, 803, 804, 805, 806, 807,
808, 809, 810, 811, 812, 813, 814, 815, 816, 817, 818,
819, 820, 821, 822, 823, 824, 825, 826, 827, 828, 829,
830, 831, 832, 833, 834, 835, 836, 837, 838, 839, 3318,
3319, 3320, 3321, 3322, 3323, 3324, 3325, 3326, 3327, 3328, 3329,
3330, 3331, 3332, 3333, 3334, 3335, 3336, 3337, 3338, 3339, 3340,
3341, 3342, 3343, 3344, 3345, 3346, 3347, 3348, 3349, 3350, 3351,
3352, 3353, 3354, 3355, 3356, 3357, 3358, 3359, 840, 841, 842,
843, 844, 845, 846, 847, 848, 849, 850, 851, 852, 853,
854, 855, 856, 857, 858, 859, 860, 861, 862, 863, 864,
865, 866, 867, 868, 869, 870, 871, 872, 873, 874, 875,
876, 877, 878, 879, 880, 881, 3360, 3361, 3362, 3363, 3364,
3365, 3366, 3367, 3368, 3369, 3370, 3371, 3372, 3373, 3374, 3375,
3376, 3377, 3378, 3379, 3380, 3381, 3382, 3383, 3384, 3385, 3386,
3387, 3388, 3389, 3390, 3391, 3392, 3393, 3394, 3395, 3396, 3397,
3398, 3399, 3400, 3401, 882, 883, 884, 885, 886, 887, 888,
889, 890, 891, 892, 893, 894, 895, 896, 897, 898, 899,
900, 901, 902, 903, 904, 905, 906, 907, 908, 909, 910,
911, 912, 913, 914, 915, 916, 917, 918, 919, 920, 921,
922, 923, 3402, 3403, 3404, 3405, 3406, 3407, 3408, 3409, 3410,
3411, 3412, 3413, 3414, 3415, 3416, 3417, 3418, 3419, 3420, 3421,
3422, 3423, 3424, 3425, 3426, 3427, 3428, 3429, 3430, 3431, 3432,
3433, 3434, 3435, 3436, 3437, 3438, 3439, 3440, 3441, 3442, 3443,
924, 925, 926, 927, 928, 929, 930, 931, 932, 933, 934,
935, 936, 937, 938, 939, 940, 941, 942, 943, 944, 945,
946, 947, 948, 949, 950, 951, 952, 953, 954, 955, 956,
957, 958, 959, 960, 961, 962, 963, 964, 965, 3444, 3445,
3446, 3447, 3448, 3449, 3450, 3451, 3452, 3453, 3454, 3455, 3456,
3457, 3458, 3459, 3460, 3461, 3462, 3463, 3464, 3465, 3466, 3467,
3468, 3469, 3470, 3471, 3472, 3473, 3474, 3475, 3476, 3477, 3478,
3479, 3480, 3481, 3482, 3483, 3484, 3485, 966, 967, 968, 969,
970, 971, 972, 973, 974, 975, 976, 977, 978, 979, 980,
981, 982, 983, 984, 985, 986, 987, 988, 989, 990, 991,
992, 993, 994, 995, 996, 997, 998, 999, 1000, 1001, 1002,
1003, 1004, 1005, 1006, 1007, 3486, 3487, 3488, 3489, 3490, 3491,
3492, 3493, 3494, 3495, 3496, 3497, 3498, 3499, 3500, 3501, 3502,
3503, 3504, 3505, 3506, 3507, 3508, 3509, 3510, 3511, 3512, 3513,
3514, 3515, 3516, 3517, 3518, 3519, 3520, 3521, 3522, 3523, 3524,
3525, 3526, 3527, 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015,
1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023, 1024, 1025, 1026,
1027, 1028, 1029, 1030, 1031, 1032, 1033, 1034, 1035, 1036, 1037,
1038, 1039, 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047, 1048,
1049, 3528, 3529, 3530, 3531, 3532, 3533, 3534, 3535, 3536, 3537,
3538, 3539, 3540, 3541, 3542, 3543, 3544, 3545, 3546, 3547, 3548,
3549, 3550, 3551, 3552, 3553, 3554, 3555, 3556, 3557, 3558, 3559,
3560, 3561, 3562, 3563, 3564, 3565, 3566, 3567, 3568, 3569, 1050,
1051, 1052, 1053, 1054, 1055, 1056, 1057, 1058, 1059, 1060, 1061,
1062, 1063, 1064, 1065, 1066, 1067, 1068, 1069, 1070, 1071, 1072,
1073, 1074, 1075, 1076, 1077, 1078, 1079, 1080, 1081, 1082, 1083,
1084, 1085, 1086, 1087, 1088, 1089, 1090, 1091, 3570, 3571, 3572,
3573, 3574, 3575, 3576, 3577, 3578, 3579, 3580, 3581, 3582, 3583,
3584, 3585, 3586, 3587, 3588, 3589, 3590, 3591, 3592, 3593, 3594,
3595, 3596, 3597, 3598, 3599, 3600, 3601, 3602, 3603, 3604, 3605,
3606, 3607, 3608, 3609, 3610, 3611, 1092, 1093, 1094, 1095, 1096,
1097, 1098, 1099, 1100, 1101, 1102, 1103, 1104, 1105, 1106, 1107,
1108, 1109, 1110, 1111, 1112, 1113, 1114, 1115, 1116, 1117, 1118,
1119, 1120, 1121, 1122, 1123, 1124, 1125, 1126, 1127, 1128, 1129,
1130, 1131, 1132, 1133, 3612, 3613, 3614, 3615, 3616, 3617, 3618,
3619, 3620, 3621, 3622, 3623, 3624, 3625, 3626, 3627, 3628, 3629,
3630, 3631, 3632, 3633, 3634, 3635, 3636, 3637, 3638, 3639, 3640,
3641, 3642, 3643, 3644, 3645, 3646, 3647, 3648, 3649, 3650, 3651,
3652, 3653, 1134, 1135, 1136, 1137, 1138, 1139, 1140, 1141, 1142,
1143, 1144, 1145, 1146, 1147, 1148, 1149, 1150, 1151, 1152, 1153,
1154, 1155, 1156, 1157, 1158, 1159, 1160, 1161, 1162, 1163, 1164,
1165, 1166, 1167, 1168, 1169, 1170, 1171, 1172, 1173, 1174, 1175,
3654, 3655, 3656, 3657, 3658, 3659, 3660, 3661, 3662, 3663, 3664,
3665, 3666, 3667, 3668, 3669, 3670, 3671, 3672, 3673, 3674, 3675,
3676, 3677, 3678, 3679, 3680, 3681, 3682, 3683, 3684, 3685, 3686,
3687, 3688, 3689, 3690, 3691, 3692, 3693, 3694, 3695, 1176, 1177,
1178, 1179, 1180, 1181, 1182, 1183, 1184, 1185, 1186, 1187, 1188,
1189, 1190, 1191, 1192, 1193, 1194, 1195, 1196, 1197, 1198, 1199,
1200, 1201, 1202, 1203, 1204, 1205, 1206, 1207, 1208, 1209, 1210,
1211, 1212, 1213, 1214, 1215, 1216, 1217, 3696, 3697, 3698, 3699,
3700, 3701, 3702, 3703, 3704, 3705, 3706, 3707, 3708, 3709, 3710,
3711, 3712, 3713, 3714, 3715, 3716, 3717, 3718, 3719, 3720, 3721,
3722, 3723, 3724, 3725, 3726, 3727, 3728, 3729, 3730, 3731, 3732,
3733, 3734, 3735, 3736, 3737, 1218, 1219, 1220, 1221, 1222, 1223,
1224, 1225, 1226, 1227, 1228, 1229, 1230, 1231, 1232, 1233, 1234,
1235, 1236, 1237, 1238, 1239, 1240, 1241, 1242, 1243, 1244, 1245,
1246, 1247, 1248, 1249, 1250, 1251, 1252, 1253, 1254, 1255, 1256,
1257, 1258, 1259, 3738, 3739, 3740, 3741, 3742, 3743, 3744, 3745,
3746, 3747, 3748, 3749, 3750, 3751, 3752, 3753, 3754, 3755, 3756,
3757, 3758, 3759, 3760, 3761, 3762, 3763, 3764, 3765, 3766, 3767,
3768, 3769, 3770, 3771, 3772, 3773, 3774, 3775, 3776, 3777, 3778,
3779, 1260, 1261, 1262, 1263, 1264, 1265, 1266, 1267, 1268, 1269,
1270, 1271, 1272, 1273, 1274, 1275, 1276, 1277, 1278, 1279, 1280,
1281, 1282, 1283, 1284, 1285, 1286, 1287, 1288, 1289, 1290, 1291,
1292, 1293, 1294, 1295, 1296, 1297, 1298, 1299, 1300, 1301, 3780,
3781, 3782, 3783, 3784, 3785, 3786, 3787, 3788, 3789, 3790, 3791,
3792, 3793, 3794, 3795, 3796, 3797, 3798, 3799, 3800, 3801, 3802,
3803, 3804, 3805, 3806, 3807, 3808, 3809, 3810, 3811, 3812, 3813,
3814, 3815, 3816, 3817, 3818, 3819, 3820, 3821, 1302, 1303, 1304,
1305, 1306, 1307, 1308, 1309, 1310, 1311, 1312, 1313, 1314, 1315,
1316, 1317, 1318, 1319, 1320, 1321, 1322, 1323, 1324, 1325, 1326,
1327, 1328, 1329, 1330, 1331, 1332, 1333, 1334, 1335, 1336, 1337,
1338, 1339, 1340, 1341, 1342, 1343, 3822, 3823, 3824, 3825, 3826,
3827, 3828, 3829, 3830, 3831, 3832, 3833, 3834, 3835, 3836, 3837,
3838, 3839, 3840, 3841, 3842, 3843, 3844, 3845, 3846, 3847, 3848,
3849, 3850, 3851, 3852, 3853, 3854, 3855, 3856, 3857, 3858, 3859,
3860, 3861, 3862, 3863, 1344, 1345, 1346, 1347, 1348, 1349, 1350,
1351, 1352, 1353, 1354, 1355, 1356, 1357, 1358, 1359, 1360, 1361,
1362, 1363, 1364, 1365, 1366, 1367, 1368, 1369, 1370, 1371, 1372,
1373, 1374, 1375, 1376, 1377, 1378, 1379, 1380, 1381, 1382, 1383,
1384, 1385, 3864, 3865, 3866, 3867, 3868, 3869, 3870, 3871, 3872,
3873, 3874, 3875, 3876, 3877, 3878, 3879, 3880, 3881, 3882, 3883,
3884, 3885, 3886, 3887, 3888, 3889, 3890, 3891, 3892, 3893, 3894,
3895, 3896, 3897, 3898, 3899, 3900, 3901, 3902, 3903, 3904, 3905,
1386, 1387, 1388, 1389, 1390, 1391, 1392, 1393, 1394, 1395, 1396,
1397, 1398, 1399, 1400, 1401, 1402, 1403, 1404, 1405, 1406, 1407,
1408, 1409, 1410, 1411, 1412, 1413, 1414, 1415, 1416, 1417, 1418,
1419, 1420, 1421, 1422, 1423, 1424, 1425, 1426, 1427, 3906, 3907,
3908, 3909, 3910, 3911, 3912, 3913, 3914, 3915, 3916, 3917, 3918,
3919, 3920, 3921, 3922, 3923, 3924, 3925, 3926, 3927, 3928, 3929,
3930, 3931, 3932, 3933, 3934, 3935, 3936, 3937, 3938, 3939, 3940,
3941, 3942, 3943, 3944, 3945, 3946, 3947, 1428, 1429, 1430, 1431,
1432, 1433, 1434, 1435, 1436, 1437, 1438, 1439, 1440, 1441, 1442,
1443, 1444, 1445, 1446, 1447, 1448, 1449, 1450, 1451, 1452, 1453,
1454, 1455, 1456, 1457, 1458, 1459, 1460, 1461, 1462, 1463, 1464,
1465, 1466, 1467, 1468, 1469, 3948, 3949, 3950, 3951, 3952, 3953,
3954, 3955, 3956, 3957, 3958, 3959, 3960, 3961, 3962, 3963, 3964,
3965, 3966, 3967, 3968, 3969, 3970, 3971, 3972, 3973, 3974, 3975,
3976, 3977, 3978, 3979, 3980, 3981, 3982, 3983, 3984, 3985, 3986,
3987, 3988, 3989, 1470, 1471, 1472, 1473, 1474, 1475, 1476, 1477,
1478, 1479, 1480, 1481, 1482, 1483, 1484, 1485, 1486, 1487, 1488,
1489, 1490, 1491, 1492, 1493, 1494, 1495, 1496, 1497, 1498, 1499,
1500, 1501, 1502, 1503, 1504, 1505, 1506, 1507, 1508, 1509, 1510,
1511, 3990, 3991, 3992, 3993, 3994, 3995, 3996, 3997, 3998, 3999,
4000, 4001, 4002, 4003, 4004, 4005, 4006, 4007, 4008, 4009, 4010,
4011, 4012, 4013, 4014, 4015, 4016, 4017, 4018, 4019, 4020, 4021,
4022, 4023, 4024, 4025, 4026, 4027, 4028, 4029, 4030, 4031, 1512,
1513, 1514, 1515, 1516, 1517, 1518, 1519, 1520, 1521, 1522, 1523,
1524, 1525, 1526, 1527, 1528, 1529, 1530, 1531, 1532, 1533, 1534,
1535, 1536, 1537, 1538, 1539, 1540, 1541, 1542, 1543, 1544, 1545,
1546, 1547, 1548, 1549, 1550, 1551, 1552, 1553, 4032, 4033, 4034,
4035, 4036, 4037, 4038, 4039, 4040, 4041, 4042, 4043, 4044, 4045,
4046, 4047, 4048, 4049, 4050, 4051, 4052, 4053, 4054, 4055, 4056,
4057, 4058, 4059, 4060, 4061, 4062, 4063, 4064, 4065, 4066, 4067,
4068, 4069, 4070, 4071, 4072, 4073, 1554, 1555, 1556, 1557, 1558,
1559, 1560, 1561, 1562, 1563, 1564, 1565, 1566, 1567, 1568, 1569,
1570, 1571, 1572, 1573, 1574, 1575, 1576, 1577, 1578, 1579, 1580,
1581, 1582, 1583, 1584, 1585, 1586, 1587, 1588, 1589, 1590, 1591,
1592, 1593, 1594, 1595, 4074, 4075, 4076, 4077, 4078, 4079, 4080,
4081, 4082, 4083, 4084, 4085, 4086, 4087, 4088, 4089, 4090, 4091,
4092, 4093, 4094, 4095, 4096, 4097, 4098, 4099, 4100, 4101, 4102,
4103, 4104, 4105, 4106, 4107, 4108, 4109, 4110, 4111, 4112, 4113,
4114, 4115, 1596, 1597, 1598, 1599, 1600, 1601, 1602, 1603, 1604,
1605, 1606, 1607, 1608, 1609, 1610, 1611, 1612, 1613, 1614, 1615,
1616, 1617, 1618, 1619, 1620, 1621, 1622, 1623, 1624, 1625, 1626,
1627, 1628, 1629, 1630, 1631, 1632, 1633, 1634, 1635, 1636, 1637,
4116, 4117, 4118, 4119, 4120, 4121, 4122, 4123, 4124, 4125, 4126,
4127, 4128, 4129, 4130, 4131, 4132, 4133, 4134, 4135, 4136, 4137,
4138, 4139, 4140, 4141, 4142, 4143, 4144, 4145, 4146, 4147, 4148,
4149, 4150, 4151, 4152, 4153, 4154, 4155, 4156, 4157, 1638, 1639,
1640, 1641, 1642, 1643, 1644, 1645, 1646, 1647, 1648, 1649, 1650,
1651, 1652, 1653, 1654, 1655, 1656, 1657, 1658, 1659, 1660, 1661,
1662, 1663, 1664, 1665, 1666, 1667, 1668, 1669, 1670, 1671, 1672,
1673, 1674, 1675, 1676, 1677, 1678, 1679, 4158, 4159, 4160, 4161,
4162, 4163, 4164, 4165, 4166, 4167, 4168, 4169, 4170, 4171, 4172,
4173, 4174, 4175, 4176, 4177, 4178, 4179, 4180, 4181, 4182, 4183,
4184, 4185, 4186, 4187, 4188, 4189, 4190, 4191, 4192, 4193, 4194,
4195, 4196, 4197, 4198, 4199, 1680, 1681, 1682, 1683, 1684, 1685,
1686, 1687, 1688, 1689, 1690, 1691, 1692, 1693, 1694, 1695, 1696,
1697, 1698, 1699, 1700, 1701, 1702, 1703, 1704, 1705, 1706, 1707,
1708, 1709, 1710, 1711, 1712, 1713, 1714, 1715, 1716, 1717, 1718,
1719, 1720, 1721, 4200, 4201, 4202, 4203, 4204, 4205, 4206, 4207,
4208, 4209, 4210, 4211, 4212, 4213, 4214, 4215, 4216, 4217, 4218,
4219, 4220, 4221, 4222, 4223, 4224, 4225, 4226, 4227, 4228, 4229,
4230, 4231, 4232, 4233, 4234, 4235, 4236, 4237, 4238, 4239, 4240,
4241, 1722, 1723, 1724, 1725, 1726, 1727, 1728, 1729, 1730, 1731,
1732, 1733, 1734, 1735, 1736, 1737, 1738, 1739, 1740, 1741, 1742,
1743, 1744, 1745, 1746, 1747, 1748, 1749, 1750, 1751, 1752, 1753,
1754, 1755, 1756, 1757, 1758, 1759, 1760, 1761, 1762, 1763, 4242,
4243, 4244, 4245, 4246, 4247, 4248, 4249, 4250, 4251, 4252, 4253,
4254, 4255, 4256, 4257, 4258, 4259, 4260, 4261, 4262, 4263, 4264,
4265, 4266, 4267, 4268, 4269, 4270, 4271, 4272, 4273, 4274, 4275,
4276, 4277, 4278, 4279, 4280, 4281, 4282, 4283, 1764, 1765, 1766,
1767, 1768, 1769, 1770, 1771, 1772, 1773, 1774, 1775, 1776, 1777,
1778, 1779, 1780, 1781, 1782, 1783, 1784, 1785, 1786, 1787, 1788,
1789, 1790, 1791, 1792, 1793, 1794, 1795, 1796, 1797, 1798, 1799,
1800, 1801, 1802, 1803, 1804, 1805, 4284, 4285, 4286, 4287, 4288,
4289, 4290, 4291, 4292, 4293, 4294, 4295, 4296, 4297, 4298, 4299,
4300, 4301, 4302, 4303, 4304, 4305, 4306, 4307, 4308, 4309, 4310,
4311, 4312, 4313, 4314, 4315, 4316, 4317, 4318, 4319, 4320, 4321,
4322, 4323, 4324, 4325, 1806, 1807, 1808, 1809, 1810, 1811, 1812,
1813, 1814, 1815, 1816, 1817, 1818, 1819, 1820, 1821, 1822, 1823,
1824, 1825, 1826, 1827, 1828, 1829, 1830, 1831, 1832, 1833, 1834,
1835, 1836, 1837, 1838, 1839, 1840, 1841, 1842, 1843, 1844, 1845,
1846, 1847, 4326, 4327, 4328, 4329, 4330, 4331, 4332, 4333, 4334,
4335, 4336, 4337, 4338, 4339, 4340, 4341, 4342, 4343, 4344, 4345,
4346, 4347, 4348, 4349, 4350, 4351, 4352, 4353, 4354, 4355, 4356,
4357, 4358, 4359, 4360, 4361, 4362, 4363, 4364, 4365, 4366, 4367,
1848, 1849, 1850, 1851, 1852, 1853, 1854, 1855, 1856, 1857, 1858,
1859, 1860, 1861, 1862, 1863, 1864, 1865, 1866, 1867, 1868, 1869,
1870, 1871, 1872, 1873, 1874, 1875, 1876, 1877, 1878, 1879, 1880,
1881, 1882, 1883, 1884, 1885, 1886, 1887, 1888, 1889, 4368, 4369,
4370, 4371, 4372, 4373, 4374, 4375, 4376, 4377, 4378, 4379, 4380,
4381, 4382, 4383, 4384, 4385, 4386, 4387, 4388, 4389, 4390, 4391,
4392, 4393, 4394, 4395, 4396, 4397, 4398, 4399, 4400, 4401, 4402,
4403, 4404, 4405, 4406, 4407, 4408, 4409, 1890, 1891, 1892, 1893,
1894, 1895, 1896, 1897, 1898, 1899, 1900, 1901, 1902, 1903, 1904,
1905, 1906, 1907, 1908, 1909, 1910, 1911, 1912, 1913, 1914, 1915,
1916, 1917, 1918, 1919, 1920, 1921, 1922, 1923, 1924, 1925, 1926,
1927, 1928, 1929, 1930, 1931, 4410, 4411, 4412, 4413, 4414, 4415,
4416, 4417, 4418, 4419, 4420, 4421, 4422, 4423, 4424, 4425, 4426,
4427, 4428, 4429, 4430, 4431, 4432, 4433, 4434, 4435, 4436, 4437,
4438, 4439, 4440, 4441, 4442, 4443, 4444, 4445, 4446, 4447, 4448,
4449, 4450, 4451, 1932, 1933, 1934, 1935, 1936, 1937, 1938, 1939,
1940, 1941, 1942, 1943, 1944, 1945, 1946, 1947, 1948, 1949, 1950,
1951, 1952, 1953, 1954, 1955, 1956, 1957, 1958, 1959, 1960, 1961,
1962, 1963, 1964, 1965, 1966, 1967, 1968, 1969, 1970, 1971, 1972,
1973, 4452, 4453, 4454, 4455, 4456, 4457, 4458, 4459, 4460, 4461,
4462, 4463, 4464, 4465, 4466, 4467, 4468, 4469, 4470, 4471, 4472,
4473, 4474, 4475, 4476, 4477, 4478, 4479, 4480, 4481, 4482, 4483,
4484, 4485, 4486, 4487, 4488, 4489, 4490, 4491, 4492, 4493, 1974,
1975, 1976, 1977, 1978, 1979, 1980, 1981, 1982, 1983, 1984, 1985,
1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995, 1996,
1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 4494, 4495, 4496,
4497, 4498, 4499, 4500, 4501, 4502, 4503, 4504, 4505, 4506, 4507,
4508, 4509, 4510, 4511, 4512, 4513, 4514, 4515, 4516, 4517, 4518,
4519, 4520, 4521, 4522, 4523, 4524, 4525, 4526, 4527, 4528, 4529,
4530, 4531, 4532, 4533, 4534, 4535, 2016, 2017, 2018, 2019, 2020,
2021, 2022, 2023, 2024, 2025, 2026, 2027, 2028, 2029, 2030, 2031,
2032, 2033, 2034, 2035, 2036, 2037, 2038, 2039, 2040, 2041, 2042,
2043, 2044, 2045, 2046, 2047, 2048, 2049, 2050, 2051, 2052, 2053,
2054, 2055, 2056, 2057, 4536, 4537, 4538, 4539, 4540, 4541, 4542,
4543, 4544, 4545, 4546, 4547, 4548, 4549, 4550, 4551, 4552, 4553,
4554, 4555, 4556, 4557, 4558, 4559, 4560, 4561, 4562, 4563, 4564,
4565, 4566, 4567, 4568, 4569, 4570, 4571, 4572, 4573, 4574, 4575,
4576, 4577, 2058, 2059, 2060, 2061, 2062, 2063, 2064, 2065, 2066,
2067, 2068, 2069, 2070, 2071, 2072, 2073, 2074, 2075, 2076, 2077,
2078, 2079, 2080, 2081, 2082, 2083, 2084, 2085, 2086, 2087, 2088,
2089, 2090, 2091, 2092, 2093, 2094, 2095, 2096, 2097, 2098, 2099,
4578, 4579, 4580, 4581, 4582, 4583, 4584, 4585, 4586, 4587, 4588,
4589, 4590, 4591, 4592, 4593, 4594, 4595, 4596, 4597, 4598, 4599,
4600, 4601, 4602, 4603, 4604, 4605, 4606, 4607, 4608, 4609, 4610,
4611, 4612, 4613, 4614, 4615, 4616, 4617, 4618, 4619, 2100, 2101,
2102, 2103, 2104, 2105, 2106, 2107, 2108, 2109, 2110, 2111, 2112,
2113, 2114, 2115, 2116, 2117, 2118, 2119, 2120, 2121, 2122, 2123,
2124, 2125, 2126, 2127, 2128, 2129, 2130, 2131, 2132, 2133, 2134,
2135, 2136, 2137, 2138, 2139, 2140, 2141, 4620, 4621, 4622, 4623,
4624, 4625, 4626, 4627, 4628, 4629, 4630, 4631, 4632, 4633, 4634,
4635, 4636, 4637, 4638, 4639, 4640, 4641, 4642, 4643, 4644, 4645,
4646, 4647, 4648, 4649, 4650, 4651, 4652, 4653, 4654, 4655, 4656,
4657, 4658, 4659, 4660, 4661, 2142, 2143, 2144, 2145, 2146, 2147,
2148, 2149, 2150, 2151, 2152, 2153, 2154, 2155, 2156, 2157, 2158,
2159, 2160, 2161, 2162, 2163, 2164, 2165, 2166, 2167, 2168, 2169,
2170, 2171, 2172, 2173, 2174, 2175, 2176, 2177, 2178, 2179, 2180,
2181, 2182, 2183, 4662, 4663, 4664, 4665, 4666, 4667, 4668, 4669,
4670, 4671, 4672, 4673, 4674, 4675, 4676, 4677, 4678, 4679, 4680,
4681, 4682, 4683, 4684, 4685, 4686, 4687, 4688, 4689, 4690, 4691,
4692, 4693, 4694, 4695, 4696, 4697, 4698, 4699, 4700, 4701, 4702,
4703, 2184, 2185, 2186, 2187, 2188, 2189, 2190, 2191, 2192, 2193,
2194, 2195, 2196, 2197, 2198, 2199, 2200, 2201, 2202, 2203, 2204,
2205, 2206, 2207, 2208, 2209, 2210, 2211, 2212, 2213, 2214, 2215,
2216, 2217, 2218, 2219, 2220, 2221, 2222, 2223, 2224, 2225, 4704,
4705, 4706, 4707, 4708, 4709, 4710, 4711, 4712, 4713, 4714, 4715,
4716, 4717, 4718, 4719, 4720, 4721, 4722, 4723, 4724, 4725, 4726,
4727, 4728, 4729, 4730, 4731, 4732, 4733, 4734, 4735, 4736, 4737,
4738, 4739, 4740, 4741, 4742, 4743, 4744, 4745, 2226, 2227, 2228,
2229, 2230, 2231, 2232, 2233, 2234, 2235, 2236, 2237, 2238, 2239,
2240, 2241, 2242, 2243, 2244, 2245, 2246, 2247, 2248, 2249, 2250,
2251, 2252, 2253, 2254, 2255, 2256, 2257, 2258, 2259, 2260, 2261,
2262, 2263, 2264, 2265, 2266, 2267, 4746, 4747, 4748, 4749, 4750,
4751, 4752, 4753, 4754, 4755, 4756, 4757, 4758, 4759, 4760, 4761,
4762, 4763, 4764, 4765, 4766, 4767, 4768, 4769, 4770, 4771, 4772,
4773, 4774, 4775, 4776, 4777, 4778, 4779, 4780, 4781, 4782, 4783,
4784, 4785, 4786, 4787, 2268, 2269, 2270, 2271, 2272, 2273, 2274,
2275, 2276, 2277, 2278, 2279, 2280, 2281, 2282, 2283, 2284, 2285,
2286, 2287, 2288, 2289, 2290, 2291, 2292, 2293, 2294, 2295, 2296,
2297, 2298, 2299, 2300, 2301, 2302, 2303, 2304, 2305, 2306, 2307,
2308, 2309, 4788, 4789, 4790, 4791, 4792, 4793, 4794, 4795, 4796,
4797, 4798, 4799, 4800, 4801, 4802, 4803, 4804, 4805, 4806, 4807,
4808, 4809, 4810, 4811, 4812, 4813, 4814, 4815, 4816, 4817, 4818,
4819, 4820, 4821, 4822, 4823, 4824, 4825, 4826, 4827, 4828, 4829,
2310, 2311, 2312, 2313, 2314, 2315, 2316, 2317, 2318, 2319, 2320,
2321, 2322, 2323, 2324, 2325, 2326, 2327, 2328, 2329, 2330, 2331,
2332, 2333, 2334, 2335, 2336, 2337, 2338, 2339, 2340, 2341, 2342,
2343, 2344, 2345, 2346, 2347, 2348, 2349, 2350, 2351, 4830, 4831,
4832, 4833, 4834, 4835, 4836, 4837, 4838, 4839, 4840, 4841, 4842,
4843, 4844, 4845, 4846, 4847, 4848, 4849, 4850, 4851, 4852, 4853,
4854, 4855, 4856, 4857, 4858, 4859, 4860, 4861, 4862, 4863, 4864,
4865, 4866, 4867, 4868, 4869, 4870, 4871, 2352, 2353, 2354, 2355,
2356, 2357, 2358, 2359, 2360, 2361, 2362, 2363, 2364, 2365, 2366,
2367, 2368, 2369, 2370, 2371, 2372, 2373, 2374, 2375, 2376, 2377,
2378, 2379, 2380, 2381, 2382, 2383, 2384, 2385, 2386, 2387, 2388,
2389, 2390, 2391, 2392, 2393, 4872, 4873, 4874, 4875, 4876, 4877,
4878, 4879, 4880, 4881, 4882, 4883, 4884, 4885, 4886, 4887, 4888,
4889, 4890, 4891, 4892, 4893, 4894, 4895, 4896, 4897, 4898, 4899,
4900, 4901, 4902, 4903, 4904, 4905, 4906, 4907, 4908, 4909, 4910,
4911, 4912, 4913, 2394, 2395, 2396, 2397, 2398, 2399, 2400, 2401,
2402, 2403, 2404, 2405, 2406, 2407, 2408, 2409, 2410, 2411, 2412,
2413, 2414, 2415, 2416, 2417, 2418, 2419, 2420, 2421, 2422, 2423,
2424, 2425, 2426, 2427, 2428, 2429, 2430, 2431, 2432, 2433, 2434,
2435, 4914, 4915, 4916, 4917, 4918, 4919, 4920, 4921, 4922, 4923,
4924, 4925, 4926, 4927, 4928, 4929, 4930, 4931, 4932, 4933, 4934,
4935, 4936, 4937, 4938, 4939, 4940, 4941, 4942, 4943, 4944, 4945,
4946, 4947, 4948, 4949, 4950, 4951, 4952, 4953, 4954, 4955, 2436,
2437, 2438, 2439, 2440, 2441, 2442, 2443, 2444, 2445, 2446, 2447,
2448, 2449, 2450, 2451, 2452, 2453, 2454, 2455, 2456, 2457, 2458,
2459, 2460, 2461, 2462, 2463, 2464, 2465, 2466, 2467, 2468, 2469,
2470, 2471, 2472, 2473, 2474, 2475, 2476, 2477, 4956, 4957, 4958,
4959, 4960, 4961, 4962, 4963, 4964, 4965, 4966, 4967, 4968, 4969,
4970, 4971, 4972, 4973, 4974, 4975, 4976, 4977, 4978, 4979, 4980,
4981, 4982, 4983, 4984, 4985, 4986, 4987, 4988, 4989, 4990, 4991,
4992, 4993, 4994, 4995, 4996, 4997, 2478, 2479, 2480, 2481, 2482,
2483, 2484, 2485, 2486, 2487, 2488, 2489, 2490, 2491, 2492, 2493,
2494, 2495, 2496, 2497, 2498, 2499, 2500, 2501, 2502, 2503, 2504,
2505, 2506, 2507, 2508, 2509, 2510, 2511, 2512, 2513, 2514, 2515,
2516, 2517, 2518, 2519, 4998, 4999, 5000, 5001, 5002, 5003, 5004,
5005, 5006, 5007, 5008, 5009, 5010, 5011, 5012, 5013, 5014, 5015,
5016, 5017, 5018, 5019, 5020, 5021, 5022, 5023, 5024, 5025, 5026,
5027, 5028, 5029, 5030, 5031, 5032, 5033, 5034, 5035, 5036, 5037,
5038, 5039}));
}
TEST(TransposeTest, TestRefOps6D5) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5, 6, 7},
{1, 2, 3, 4, 0, 5}),
ElementsAreArray(
{0, 1, 2, 3, 4, 5, 6, 2520, 2521, 2522, 2523,
2524, 2525, 2526, 7, 8, 9, 10, 11, 12, 13, 2527,
2528, 2529, 2530, 2531, 2532, 2533, 14, 15, 16, 17, 18,
19, 20, 2534, 2535, 2536, 2537, 2538, 2539, 2540, 21, 22,
23, 24, 25, 26, 27, 2541, 2542, 2543, 2544, 2545, 2546,
2547, 28, 29, 30, 31, 32, 33, 34, 2548, 2549, 2550,
2551, 2552, 2553, 2554, 35, 36, 37, 38, 39, 40, 41,
2555, 2556, 2557, 2558, 2559, 2560, 2561, 42, 43, 44, 45,
46, 47, 48, 2562, 2563, 2564, 2565, 2566, 2567, 2568, 49,
50, 51, 52, 53, 54, 55, 2569, 2570, 2571, 2572, 2573,
2574, 2575, 56, 57, 58, 59, 60, 61, 62, 2576, 2577,
2578, 2579, 2580, 2581, 2582, 63, 64, 65, 66, 67, 68,
69, 2583, 2584, 2585, 2586, 2587, 2588, 2589, 70, 71, 72,
73, 74, 75, 76, 2590, 2591, 2592, 2593, 2594, 2595, 2596,
77, 78, 79, 80, 81, 82, 83, 2597, 2598, 2599, 2600,
2601, 2602, 2603, 84, 85, 86, 87, 88, 89, 90, 2604,
2605, 2606, 2607, 2608, 2609, 2610, 91, 92, 93, 94, 95,
96, 97, 2611, 2612, 2613, 2614, 2615, 2616, 2617, 98, 99,
100, 101, 102, 103, 104, 2618, 2619, 2620, 2621, 2622, 2623,
2624, 105, 106, 107, 108, 109, 110, 111, 2625, 2626, 2627,
2628, 2629, 2630, 2631, 112, 113, 114, 115, 116, 117, 118,
2632, 2633, 2634, 2635, 2636, 2637, 2638, 119, 120, 121, 122,
123, 124, 125, 2639, 2640, 2641, 2642, 2643, 2644, 2645, 126,
127, 128, 129, 130, 131, 132, 2646, 2647, 2648, 2649, 2650,
2651, 2652, 133, 134, 135, 136, 137, 138, 139, 2653, 2654,
2655, 2656, 2657, 2658, 2659, 140, 141, 142, 143, 144, 145,
146, 2660, 2661, 2662, 2663, 2664, 2665, 2666, 147, 148, 149,
150, 151, 152, 153, 2667, 2668, 2669, 2670, 2671, 2672, 2673,
154, 155, 156, 157, 158, 159, 160, 2674, 2675, 2676, 2677,
2678, 2679, 2680, 161, 162, 163, 164, 165, 166, 167, 2681,
2682, 2683, 2684, 2685, 2686, 2687, 168, 169, 170, 171, 172,
173, 174, 2688, 2689, 2690, 2691, 2692, 2693, 2694, 175, 176,
177, 178, 179, 180, 181, 2695, 2696, 2697, 2698, 2699, 2700,
2701, 182, 183, 184, 185, 186, 187, 188, 2702, 2703, 2704,
2705, 2706, 2707, 2708, 189, 190, 191, 192, 193, 194, 195,
2709, 2710, 2711, 2712, 2713, 2714, 2715, 196, 197, 198, 199,
200, 201, 202, 2716, 2717, 2718, 2719, 2720, 2721, 2722, 203,
204, 205, 206, 207, 208, 209, 2723, 2724, 2725, 2726, 2727,
2728, 2729, 210, 211, 212, 213, 214, 215, 216, 2730, 2731,
2732, 2733, 2734, 2735, 2736, 217, 218, 219, 220, 221, 222,
223, 2737, 2738, 2739, 2740, 2741, 2742, 2743, 224, 225, 226,
227, 228, 229, 230, 2744, 2745, 2746, 2747, 2748, 2749, 2750,
231, 232, 233, 234, 235, 236, 237, 2751, 2752, 2753, 2754,
2755, 2756, 2757, 238, 239, 240, 241, 242, 243, 244, 2758,
2759, 2760, 2761, 2762, 2763, 2764, 245, 246, 247, 248, 249,
250, 251, 2765, 2766, 2767, 2768, 2769, 2770, 2771, 252, 253,
254, 255, 256, 257, 258, 2772, 2773, 2774, 2775, 2776, 2777,
2778, 259, 260, 261, 262, 263, 264, 265, 2779, 2780, 2781,
2782, 2783, 2784, 2785, 266, 267, 268, 269, 270, 271, 272,
2786, 2787, 2788, 2789, 2790, 2791, 2792, 273, 274, 275, 276,
277, 278, 279, 2793, 2794, 2795, 2796, 2797, 2798, 2799, 280,
281, 282, 283, 284, 285, 286, 2800, 2801, 2802, 2803, 2804,
2805, 2806, 287, 288, 289, 290, 291, 292, 293, 2807, 2808,
2809, 2810, 2811, 2812, 2813, 294, 295, 296, 297, 298, 299,
300, 2814, 2815, 2816, 2817, 2818, 2819, 2820, 301, 302, 303,
304, 305, 306, 307, 2821, 2822, 2823, 2824, 2825, 2826, 2827,
308, 309, 310, 311, 312, 313, 314, 2828, 2829, 2830, 2831,
2832, 2833, 2834, 315, 316, 317, 318, 319, 320, 321, 2835,
2836, 2837, 2838, 2839, 2840, 2841, 322, 323, 324, 325, 326,
327, 328, 2842, 2843, 2844, 2845, 2846, 2847, 2848, 329, 330,
331, 332, 333, 334, 335, 2849, 2850, 2851, 2852, 2853, 2854,
2855, 336, 337, 338, 339, 340, 341, 342, 2856, 2857, 2858,
2859, 2860, 2861, 2862, 343, 344, 345, 346, 347, 348, 349,
2863, 2864, 2865, 2866, 2867, 2868, 2869, 350, 351, 352, 353,
354, 355, 356, 2870, 2871, 2872, 2873, 2874, 2875, 2876, 357,
358, 359, 360, 361, 362, 363, 2877, 2878, 2879, 2880, 2881,
2882, 2883, 364, 365, 366, 367, 368, 369, 370, 2884, 2885,
2886, 2887, 2888, 2889, 2890, 371, 372, 373, 374, 375, 376,
377, 2891, 2892, 2893, 2894, 2895, 2896, 2897, 378, 379, 380,
381, 382, 383, 384, 2898, 2899, 2900, 2901, 2902, 2903, 2904,
385, 386, 387, 388, 389, 390, 391, 2905, 2906, 2907, 2908,
2909, 2910, 2911, 392, 393, 394, 395, 396, 397, 398, 2912,
2913, 2914, 2915, 2916, 2917, 2918, 399, 400, 401, 402, 403,
404, 405, 2919, 2920, 2921, 2922, 2923, 2924, 2925, 406, 407,
408, 409, 410, 411, 412, 2926, 2927, 2928, 2929, 2930, 2931,
2932, 413, 414, 415, 416, 417, 418, 419, 2933, 2934, 2935,
2936, 2937, 2938, 2939, 420, 421, 422, 423, 424, 425, 426,
2940, 2941, 2942, 2943, 2944, 2945, 2946, 427, 428, 429, 430,
431, 432, 433, 2947, 2948, 2949, 2950, 2951, 2952, 2953, 434,
435, 436, 437, 438, 439, 440, 2954, 2955, 2956, 2957, 2958,
2959, 2960, 441, 442, 443, 444, 445, 446, 447, 2961, 2962,
2963, 2964, 2965, 2966, 2967, 448, 449, 450, 451, 452, 453,
454, 2968, 2969, 2970, 2971, 2972, 2973, 2974, 455, 456, 457,
458, 459, 460, 461, 2975, 2976, 2977, 2978, 2979, 2980, 2981,
462, 463, 464, 465, 466, 467, 468, 2982, 2983, 2984, 2985,
2986, 2987, 2988, 469, 470, 471, 472, 473, 474, 475, 2989,
2990, 2991, 2992, 2993, 2994, 2995, 476, 477, 478, 479, 480,
481, 482, 2996, 2997, 2998, 2999, 3000, 3001, 3002, 483, 484,
485, 486, 487, 488, 489, 3003, 3004, 3005, 3006, 3007, 3008,
3009, 490, 491, 492, 493, 494, 495, 496, 3010, 3011, 3012,
3013, 3014, 3015, 3016, 497, 498, 499, 500, 501, 502, 503,
3017, 3018, 3019, 3020, 3021, 3022, 3023, 504, 505, 506, 507,
508, 509, 510, 3024, 3025, 3026, 3027, 3028, 3029, 3030, 511,
512, 513, 514, 515, 516, 517, 3031, 3032, 3033, 3034, 3035,
3036, 3037, 518, 519, 520, 521, 522, 523, 524, 3038, 3039,
3040, 3041, 3042, 3043, 3044, 525, 526, 527, 528, 529, 530,
531, 3045, 3046, 3047, 3048, 3049, 3050, 3051, 532, 533, 534,
535, 536, 537, 538, 3052, 3053, 3054, 3055, 3056, 3057, 3058,
539, 540, 541, 542, 543, 544, 545, 3059, 3060, 3061, 3062,
3063, 3064, 3065, 546, 547, 548, 549, 550, 551, 552, 3066,
3067, 3068, 3069, 3070, 3071, 3072, 553, 554, 555, 556, 557,
558, 559, 3073, 3074, 3075, 3076, 3077, 3078, 3079, 560, 561,
562, 563, 564, 565, 566, 3080, 3081, 3082, 3083, 3084, 3085,
3086, 567, 568, 569, 570, 571, 572, 573, 3087, 3088, 3089,
3090, 3091, 3092, 3093, 574, 575, 576, 577, 578, 579, 580,
3094, 3095, 3096, 3097, 3098, 3099, 3100, 581, 582, 583, 584,
585, 586, 587, 3101, 3102, 3103, 3104, 3105, 3106, 3107, 588,
589, 590, 591, 592, 593, 594, 3108, 3109, 3110, 3111, 3112,
3113, 3114, 595, 596, 597, 598, 599, 600, 601, 3115, 3116,
3117, 3118, 3119, 3120, 3121, 602, 603, 604, 605, 606, 607,
608, 3122, 3123, 3124, 3125, 3126, 3127, 3128, 609, 610, 611,
612, 613, 614, 615, 3129, 3130, 3131, 3132, 3133, 3134, 3135,
616, 617, 618, 619, 620, 621, 622, 3136, 3137, 3138, 3139,
3140, 3141, 3142, 623, 624, 625, 626, 627, 628, 629, 3143,
3144, 3145, 3146, 3147, 3148, 3149, 630, 631, 632, 633, 634,
635, 636, 3150, 3151, 3152, 3153, 3154, 3155, 3156, 637, 638,
639, 640, 641, 642, 643, 3157, 3158, 3159, 3160, 3161, 3162,
3163, 644, 645, 646, 647, 648, 649, 650, 3164, 3165, 3166,
3167, 3168, 3169, 3170, 651, 652, 653, 654, 655, 656, 657,
3171, 3172, 3173, 3174, 3175, 3176, 3177, 658, 659, 660, 661,
662, 663, 664, 3178, 3179, 3180, 3181, 3182, 3183, 3184, 665,
666, 667, 668, 669, 670, 671, 3185, 3186, 3187, 3188, 3189,
3190, 3191, 672, 673, 674, 675, 676, 677, 678, 3192, 3193,
3194, 3195, 3196, 3197, 3198, 679, 680, 681, 682, 683, 684,
685, 3199, 3200, 3201, 3202, 3203, 3204, 3205, 686, 687, 688,
689, 690, 691, 692, 3206, 3207, 3208, 3209, 3210, 3211, 3212,
693, 694, 695, 696, 697, 698, 699, 3213, 3214, 3215, 3216,
3217, 3218, 3219, 700, 701, 702, 703, 704, 705, 706, 3220,
3221, 3222, 3223, 3224, 3225, 3226, 707, 708, 709, 710, 711,
712, 713, 3227, 3228, 3229, 3230, 3231, 3232, 3233, 714, 715,
716, 717, 718, 719, 720, 3234, 3235, 3236, 3237, 3238, 3239,
3240, 721, 722, 723, 724, 725, 726, 727, 3241, 3242, 3243,
3244, 3245, 3246, 3247, 728, 729, 730, 731, 732, 733, 734,
3248, 3249, 3250, 3251, 3252, 3253, 3254, 735, 736, 737, 738,
739, 740, 741, 3255, 3256, 3257, 3258, 3259, 3260, 3261, 742,
743, 744, 745, 746, 747, 748, 3262, 3263, 3264, 3265, 3266,
3267, 3268, 749, 750, 751, 752, 753, 754, 755, 3269, 3270,
3271, 3272, 3273, 3274, 3275, 756, 757, 758, 759, 760, 761,
762, 3276, 3277, 3278, 3279, 3280, 3281, 3282, 763, 764, 765,
766, 767, 768, 769, 3283, 3284, 3285, 3286, 3287, 3288, 3289,
770, 771, 772, 773, 774, 775, 776, 3290, 3291, 3292, 3293,
3294, 3295, 3296, 777, 778, 779, 780, 781, 782, 783, 3297,
3298, 3299, 3300, 3301, 3302, 3303, 784, 785, 786, 787, 788,
789, 790, 3304, 3305, 3306, 3307, 3308, 3309, 3310, 791, 792,
793, 794, 795, 796, 797, 3311, 3312, 3313, 3314, 3315, 3316,
3317, 798, 799, 800, 801, 802, 803, 804, 3318, 3319, 3320,
3321, 3322, 3323, 3324, 805, 806, 807, 808, 809, 810, 811,
3325, 3326, 3327, 3328, 3329, 3330, 3331, 812, 813, 814, 815,
816, 817, 818, 3332, 3333, 3334, 3335, 3336, 3337, 3338, 819,
820, 821, 822, 823, 824, 825, 3339, 3340, 3341, 3342, 3343,
3344, 3345, 826, 827, 828, 829, 830, 831, 832, 3346, 3347,
3348, 3349, 3350, 3351, 3352, 833, 834, 835, 836, 837, 838,
839, 3353, 3354, 3355, 3356, 3357, 3358, 3359, 840, 841, 842,
843, 844, 845, 846, 3360, 3361, 3362, 3363, 3364, 3365, 3366,
847, 848, 849, 850, 851, 852, 853, 3367, 3368, 3369, 3370,
3371, 3372, 3373, 854, 855, 856, 857, 858, 859, 860, 3374,
3375, 3376, 3377, 3378, 3379, 3380, 861, 862, 863, 864, 865,
866, 867, 3381, 3382, 3383, 3384, 3385, 3386, 3387, 868, 869,
870, 871, 872, 873, 874, 3388, 3389, 3390, 3391, 3392, 3393,
3394, 875, 876, 877, 878, 879, 880, 881, 3395, 3396, 3397,
3398, 3399, 3400, 3401, 882, 883, 884, 885, 886, 887, 888,
3402, 3403, 3404, 3405, 3406, 3407, 3408, 889, 890, 891, 892,
893, 894, 895, 3409, 3410, 3411, 3412, 3413, 3414, 3415, 896,
897, 898, 899, 900, 901, 902, 3416, 3417, 3418, 3419, 3420,
3421, 3422, 903, 904, 905, 906, 907, 908, 909, 3423, 3424,
3425, 3426, 3427, 3428, 3429, 910, 911, 912, 913, 914, 915,
916, 3430, 3431, 3432, 3433, 3434, 3435, 3436, 917, 918, 919,
920, 921, 922, 923, 3437, 3438, 3439, 3440, 3441, 3442, 3443,
924, 925, 926, 927, 928, 929, 930, 3444, 3445, 3446, 3447,
3448, 3449, 3450, 931, 932, 933, 934, 935, 936, 937, 3451,
3452, 3453, 3454, 3455, 3456, 3457, 938, 939, 940, 941, 942,
943, 944, 3458, 3459, 3460, 3461, 3462, 3463, 3464, 945, 946,
947, 948, 949, 950, 951, 3465, 3466, 3467, 3468, 3469, 3470,
3471, 952, 953, 954, 955, 956, 957, 958, 3472, 3473, 3474,
3475, 3476, 3477, 3478, 959, 960, 961, 962, 963, 964, 965,
3479, 3480, 3481, 3482, 3483, 3484, 3485, 966, 967, 968, 969,
970, 971, 972, 3486, 3487, 3488, 3489, 3490, 3491, 3492, 973,
974, 975, 976, 977, 978, 979, 3493, 3494, 3495, 3496, 3497,
3498, 3499, 980, 981, 982, 983, 984, 985, 986, 3500, 3501,
3502, 3503, 3504, 3505, 3506, 987, 988, 989, 990, 991, 992,
993, 3507, 3508, 3509, 3510, 3511, 3512, 3513, 994, 995, 996,
997, 998, 999, 1000, 3514, 3515, 3516, 3517, 3518, 3519, 3520,
1001, 1002, 1003, 1004, 1005, 1006, 1007, 3521, 3522, 3523, 3524,
3525, 3526, 3527, 1008, 1009, 1010, 1011, 1012, 1013, 1014, 3528,
3529, 3530, 3531, 3532, 3533, 3534, 1015, 1016, 1017, 1018, 1019,
1020, 1021, 3535, 3536, 3537, 3538, 3539, 3540, 3541, 1022, 1023,
1024, 1025, 1026, 1027, 1028, 3542, 3543, 3544, 3545, 3546, 3547,
3548, 1029, 1030, 1031, 1032, 1033, 1034, 1035, 3549, 3550, 3551,
3552, 3553, 3554, 3555, 1036, 1037, 1038, 1039, 1040, 1041, 1042,
3556, 3557, 3558, 3559, 3560, 3561, 3562, 1043, 1044, 1045, 1046,
1047, 1048, 1049, 3563, 3564, 3565, 3566, 3567, 3568, 3569, 1050,
1051, 1052, 1053, 1054, 1055, 1056, 3570, 3571, 3572, 3573, 3574,
3575, 3576, 1057, 1058, 1059, 1060, 1061, 1062, 1063, 3577, 3578,
3579, 3580, 3581, 3582, 3583, 1064, 1065, 1066, 1067, 1068, 1069,
1070, 3584, 3585, 3586, 3587, 3588, 3589, 3590, 1071, 1072, 1073,
1074, 1075, 1076, 1077, 3591, 3592, 3593, 3594, 3595, 3596, 3597,
1078, 1079, 1080, 1081, 1082, 1083, 1084, 3598, 3599, 3600, 3601,
3602, 3603, 3604, 1085, 1086, 1087, 1088, 1089, 1090, 1091, 3605,
3606, 3607, 3608, 3609, 3610, 3611, 1092, 1093, 1094, 1095, 1096,
1097, 1098, 3612, 3613, 3614, 3615, 3616, 3617, 3618, 1099, 1100,
1101, 1102, 1103, 1104, 1105, 3619, 3620, 3621, 3622, 3623, 3624,
3625, 1106, 1107, 1108, 1109, 1110, 1111, 1112, 3626, 3627, 3628,
3629, 3630, 3631, 3632, 1113, 1114, 1115, 1116, 1117, 1118, 1119,
3633, 3634, 3635, 3636, 3637, 3638, 3639, 1120, 1121, 1122, 1123,
1124, 1125, 1126, 3640, 3641, 3642, 3643, 3644, 3645, 3646, 1127,
1128, 1129, 1130, 1131, 1132, 1133, 3647, 3648, 3649, 3650, 3651,
3652, 3653, 1134, 1135, 1136, 1137, 1138, 1139, 1140, 3654, 3655,
3656, 3657, 3658, 3659, 3660, 1141, 1142, 1143, 1144, 1145, 1146,
1147, 3661, 3662, 3663, 3664, 3665, 3666, 3667, 1148, 1149, 1150,
1151, 1152, 1153, 1154, 3668, 3669, 3670, 3671, 3672, 3673, 3674,
1155, 1156, 1157, 1158, 1159, 1160, 1161, 3675, 3676, 3677, 3678,
3679, 3680, 3681, 1162, 1163, 1164, 1165, 1166, 1167, 1168, 3682,
3683, 3684, 3685, 3686, 3687, 3688, 1169, 1170, 1171, 1172, 1173,
1174, 1175, 3689, 3690, 3691, 3692, 3693, 3694, 3695, 1176, 1177,
1178, 1179, 1180, 1181, 1182, 3696, 3697, 3698, 3699, 3700, 3701,
3702, 1183, 1184, 1185, 1186, 1187, 1188, 1189, 3703, 3704, 3705,
3706, 3707, 3708, 3709, 1190, 1191, 1192, 1193, 1194, 1195, 1196,
3710, 3711, 3712, 3713, 3714, 3715, 3716, 1197, 1198, 1199, 1200,
1201, 1202, 1203, 3717, 3718, 3719, 3720, 3721, 3722, 3723, 1204,
1205, 1206, 1207, 1208, 1209, 1210, 3724, 3725, 3726, 3727, 3728,
3729, 3730, 1211, 1212, 1213, 1214, 1215, 1216, 1217, 3731, 3732,
3733, 3734, 3735, 3736, 3737, 1218, 1219, 1220, 1221, 1222, 1223,
1224, 3738, 3739, 3740, 3741, 3742, 3743, 3744, 1225, 1226, 1227,
1228, 1229, 1230, 1231, 3745, 3746, 3747, 3748, 3749, 3750, 3751,
1232, 1233, 1234, 1235, 1236, 1237, 1238, 3752, 3753, 3754, 3755,
3756, 3757, 3758, 1239, 1240, 1241, 1242, 1243, 1244, 1245, 3759,
3760, 3761, 3762, 3763, 3764, 3765, 1246, 1247, 1248, 1249, 1250,
1251, 1252, 3766, 3767, 3768, 3769, 3770, 3771, 3772, 1253, 1254,
1255, 1256, 1257, 1258, 1259, 3773, 3774, 3775, 3776, 3777, 3778,
3779, 1260, 1261, 1262, 1263, 1264, 1265, 1266, 3780, 3781, 3782,
3783, 3784, 3785, 3786, 1267, 1268, 1269, 1270, 1271, 1272, 1273,
3787, 3788, 3789, 3790, 3791, 3792, 3793, 1274, 1275, 1276, 1277,
1278, 1279, 1280, 3794, 3795, 3796, 3797, 3798, 3799, 3800, 1281,
1282, 1283, 1284, 1285, 1286, 1287, 3801, 3802, 3803, 3804, 3805,
3806, 3807, 1288, 1289, 1290, 1291, 1292, 1293, 1294, 3808, 3809,
3810, 3811, 3812, 3813, 3814, 1295, 1296, 1297, 1298, 1299, 1300,
1301, 3815, 3816, 3817, 3818, 3819, 3820, 3821, 1302, 1303, 1304,
1305, 1306, 1307, 1308, 3822, 3823, 3824, 3825, 3826, 3827, 3828,
1309, 1310, 1311, 1312, 1313, 1314, 1315, 3829, 3830, 3831, 3832,
3833, 3834, 3835, 1316, 1317, 1318, 1319, 1320, 1321, 1322, 3836,
3837, 3838, 3839, 3840, 3841, 3842, 1323, 1324, 1325, 1326, 1327,
1328, 1329, 3843, 3844, 3845, 3846, 3847, 3848, 3849, 1330, 1331,
1332, 1333, 1334, 1335, 1336, 3850, 3851, 3852, 3853, 3854, 3855,
3856, 1337, 1338, 1339, 1340, 1341, 1342, 1343, 3857, 3858, 3859,
3860, 3861, 3862, 3863, 1344, 1345, 1346, 1347, 1348, 1349, 1350,
3864, 3865, 3866, 3867, 3868, 3869, 3870, 1351, 1352, 1353, 1354,
1355, 1356, 1357, 3871, 3872, 3873, 3874, 3875, 3876, 3877, 1358,
1359, 1360, 1361, 1362, 1363, 1364, 3878, 3879, 3880, 3881, 3882,
3883, 3884, 1365, 1366, 1367, 1368, 1369, 1370, 1371, 3885, 3886,
3887, 3888, 3889, 3890, 3891, 1372, 1373, 1374, 1375, 1376, 1377,
1378, 3892, 3893, 3894, 3895, 3896, 3897, 3898, 1379, 1380, 1381,
1382, 1383, 1384, 1385, 3899, 3900, 3901, 3902, 3903, 3904, 3905,
1386, 1387, 1388, 1389, 1390, 1391, 1392, 3906, 3907, 3908, 3909,
3910, 3911, 3912, 1393, 1394, 1395, 1396, 1397, 1398, 1399, 3913,
3914, 3915, 3916, 3917, 3918, 3919, 1400, 1401, 1402, 1403, 1404,
1405, 1406, 3920, 3921, 3922, 3923, 3924, 3925, 3926, 1407, 1408,
1409, 1410, 1411, 1412, 1413, 3927, 3928, 3929, 3930, 3931, 3932,
3933, 1414, 1415, 1416, 1417, 1418, 1419, 1420, 3934, 3935, 3936,
3937, 3938, 3939, 3940, 1421, 1422, 1423, 1424, 1425, 1426, 1427,
3941, 3942, 3943, 3944, 3945, 3946, 3947, 1428, 1429, 1430, 1431,
1432, 1433, 1434, 3948, 3949, 3950, 3951, 3952, 3953, 3954, 1435,
1436, 1437, 1438, 1439, 1440, 1441, 3955, 3956, 3957, 3958, 3959,
3960, 3961, 1442, 1443, 1444, 1445, 1446, 1447, 1448, 3962, 3963,
3964, 3965, 3966, 3967, 3968, 1449, 1450, 1451, 1452, 1453, 1454,
1455, 3969, 3970, 3971, 3972, 3973, 3974, 3975, 1456, 1457, 1458,
1459, 1460, 1461, 1462, 3976, 3977, 3978, 3979, 3980, 3981, 3982,
1463, 1464, 1465, 1466, 1467, 1468, 1469, 3983, 3984, 3985, 3986,
3987, 3988, 3989, 1470, 1471, 1472, 1473, 1474, 1475, 1476, 3990,
3991, 3992, 3993, 3994, 3995, 3996, 1477, 1478, 1479, 1480, 1481,
1482, 1483, 3997, 3998, 3999, 4000, 4001, 4002, 4003, 1484, 1485,
1486, 1487, 1488, 1489, 1490, 4004, 4005, 4006, 4007, 4008, 4009,
4010, 1491, 1492, 1493, 1494, 1495, 1496, 1497, 4011, 4012, 4013,
4014, 4015, 4016, 4017, 1498, 1499, 1500, 1501, 1502, 1503, 1504,
4018, 4019, 4020, 4021, 4022, 4023, 4024, 1505, 1506, 1507, 1508,
1509, 1510, 1511, 4025, 4026, 4027, 4028, 4029, 4030, 4031, 1512,
1513, 1514, 1515, 1516, 1517, 1518, 4032, 4033, 4034, 4035, 4036,
4037, 4038, 1519, 1520, 1521, 1522, 1523, 1524, 1525, 4039, 4040,
4041, 4042, 4043, 4044, 4045, 1526, 1527, 1528, 1529, 1530, 1531,
1532, 4046, 4047, 4048, 4049, 4050, 4051, 4052, 1533, 1534, 1535,
1536, 1537, 1538, 1539, 4053, 4054, 4055, 4056, 4057, 4058, 4059,
1540, 1541, 1542, 1543, 1544, 1545, 1546, 4060, 4061, 4062, 4063,
4064, 4065, 4066, 1547, 1548, 1549, 1550, 1551, 1552, 1553, 4067,
4068, 4069, 4070, 4071, 4072, 4073, 1554, 1555, 1556, 1557, 1558,
1559, 1560, 4074, 4075, 4076, 4077, 4078, 4079, 4080, 1561, 1562,
1563, 1564, 1565, 1566, 1567, 4081, 4082, 4083, 4084, 4085, 4086,
4087, 1568, 1569, 1570, 1571, 1572, 1573, 1574, 4088, 4089, 4090,
4091, 4092, 4093, 4094, 1575, 1576, 1577, 1578, 1579, 1580, 1581,
4095, 4096, 4097, 4098, 4099, 4100, 4101, 1582, 1583, 1584, 1585,
1586, 1587, 1588, 4102, 4103, 4104, 4105, 4106, 4107, 4108, 1589,
1590, 1591, 1592, 1593, 1594, 1595, 4109, 4110, 4111, 4112, 4113,
4114, 4115, 1596, 1597, 1598, 1599, 1600, 1601, 1602, 4116, 4117,
4118, 4119, 4120, 4121, 4122, 1603, 1604, 1605, 1606, 1607, 1608,
1609, 4123, 4124, 4125, 4126, 4127, 4128, 4129, 1610, 1611, 1612,
1613, 1614, 1615, 1616, 4130, 4131, 4132, 4133, 4134, 4135, 4136,
1617, 1618, 1619, 1620, 1621, 1622, 1623, 4137, 4138, 4139, 4140,
4141, 4142, 4143, 1624, 1625, 1626, 1627, 1628, 1629, 1630, 4144,
4145, 4146, 4147, 4148, 4149, 4150, 1631, 1632, 1633, 1634, 1635,
1636, 1637, 4151, 4152, 4153, 4154, 4155, 4156, 4157, 1638, 1639,
1640, 1641, 1642, 1643, 1644, 4158, 4159, 4160, 4161, 4162, 4163,
4164, 1645, 1646, 1647, 1648, 1649, 1650, 1651, 4165, 4166, 4167,
4168, 4169, 4170, 4171, 1652, 1653, 1654, 1655, 1656, 1657, 1658,
4172, 4173, 4174, 4175, 4176, 4177, 4178, 1659, 1660, 1661, 1662,
1663, 1664, 1665, 4179, 4180, 4181, 4182, 4183, 4184, 4185, 1666,
1667, 1668, 1669, 1670, 1671, 1672, 4186, 4187, 4188, 4189, 4190,
4191, 4192, 1673, 1674, 1675, 1676, 1677, 1678, 1679, 4193, 4194,
4195, 4196, 4197, 4198, 4199, 1680, 1681, 1682, 1683, 1684, 1685,
1686, 4200, 4201, 4202, 4203, 4204, 4205, 4206, 1687, 1688, 1689,
1690, 1691, 1692, 1693, 4207, 4208, 4209, 4210, 4211, 4212, 4213,
1694, 1695, 1696, 1697, 1698, 1699, 1700, 4214, 4215, 4216, 4217,
4218, 4219, 4220, 1701, 1702, 1703, 1704, 1705, 1706, 1707, 4221,
4222, 4223, 4224, 4225, 4226, 4227, 1708, 1709, 1710, 1711, 1712,
1713, 1714, 4228, 4229, 4230, 4231, 4232, 4233, 4234, 1715, 1716,
1717, 1718, 1719, 1720, 1721, 4235, 4236, 4237, 4238, 4239, 4240,
4241, 1722, 1723, 1724, 1725, 1726, 1727, 1728, 4242, 4243, 4244,
4245, 4246, 4247, 4248, 1729, 1730, 1731, 1732, 1733, 1734, 1735,
4249, 4250, 4251, 4252, 4253, 4254, 4255, 1736, 1737, 1738, 1739,
1740, 1741, 1742, 4256, 4257, 4258, 4259, 4260, 4261, 4262, 1743,
1744, 1745, 1746, 1747, 1748, 1749, 4263, 4264, 4265, 4266, 4267,
4268, 4269, 1750, 1751, 1752, 1753, 1754, 1755, 1756, 4270, 4271,
4272, 4273, 4274, 4275, 4276, 1757, 1758, 1759, 1760, 1761, 1762,
1763, 4277, 4278, 4279, 4280, 4281, 4282, 4283, 1764, 1765, 1766,
1767, 1768, 1769, 1770, 4284, 4285, 4286, 4287, 4288, 4289, 4290,
1771, 1772, 1773, 1774, 1775, 1776, 1777, 4291, 4292, 4293, 4294,
4295, 4296, 4297, 1778, 1779, 1780, 1781, 1782, 1783, 1784, 4298,
4299, 4300, 4301, 4302, 4303, 4304, 1785, 1786, 1787, 1788, 1789,
1790, 1791, 4305, 4306, 4307, 4308, 4309, 4310, 4311, 1792, 1793,
1794, 1795, 1796, 1797, 1798, 4312, 4313, 4314, 4315, 4316, 4317,
4318, 1799, 1800, 1801, 1802, 1803, 1804, 1805, 4319, 4320, 4321,
4322, 4323, 4324, 4325, 1806, 1807, 1808, 1809, 1810, 1811, 1812,
4326, 4327, 4328, 4329, 4330, 4331, 4332, 1813, 1814, 1815, 1816,
1817, 1818, 1819, 4333, 4334, 4335, 4336, 4337, 4338, 4339, 1820,
1821, 1822, 1823, 1824, 1825, 1826, 4340, 4341, 4342, 4343, 4344,
4345, 4346, 1827, 1828, 1829, 1830, 1831, 1832, 1833, 4347, 4348,
4349, 4350, 4351, 4352, 4353, 1834, 1835, 1836, 1837, 1838, 1839,
1840, 4354, 4355, 4356, 4357, 4358, 4359, 4360, 1841, 1842, 1843,
1844, 1845, 1846, 1847, 4361, 4362, 4363, 4364, 4365, 4366, 4367,
1848, 1849, 1850, 1851, 1852, 1853, 1854, 4368, 4369, 4370, 4371,
4372, 4373, 4374, 1855, 1856, 1857, 1858, 1859, 1860, 1861, 4375,
4376, 4377, 4378, 4379, 4380, 4381, 1862, 1863, 1864, 1865, 1866,
1867, 1868, 4382, 4383, 4384, 4385, 4386, 4387, 4388, 1869, 1870,
1871, 1872, 1873, 1874, 1875, 4389, 4390, 4391, 4392, 4393, 4394,
4395, 1876, 1877, 1878, 1879, 1880, 1881, 1882, 4396, 4397, 4398,
4399, 4400, 4401, 4402, 1883, 1884, 1885, 1886, 1887, 1888, 1889,
4403, 4404, 4405, 4406, 4407, 4408, 4409, 1890, 1891, 1892, 1893,
1894, 1895, 1896, 4410, 4411, 4412, 4413, 4414, 4415, 4416, 1897,
1898, 1899, 1900, 1901, 1902, 1903, 4417, 4418, 4419, 4420, 4421,
4422, 4423, 1904, 1905, 1906, 1907, 1908, 1909, 1910, 4424, 4425,
4426, 4427, 4428, 4429, 4430, 1911, 1912, 1913, 1914, 1915, 1916,
1917, 4431, 4432, 4433, 4434, 4435, 4436, 4437, 1918, 1919, 1920,
1921, 1922, 1923, 1924, 4438, 4439, 4440, 4441, 4442, 4443, 4444,
1925, 1926, 1927, 1928, 1929, 1930, 1931, 4445, 4446, 4447, 4448,
4449, 4450, 4451, 1932, 1933, 1934, 1935, 1936, 1937, 1938, 4452,
4453, 4454, 4455, 4456, 4457, 4458, 1939, 1940, 1941, 1942, 1943,
1944, 1945, 4459, 4460, 4461, 4462, 4463, 4464, 4465, 1946, 1947,
1948, 1949, 1950, 1951, 1952, 4466, 4467, 4468, 4469, 4470, 4471,
4472, 1953, 1954, 1955, 1956, 1957, 1958, 1959, 4473, 4474, 4475,
4476, 4477, 4478, 4479, 1960, 1961, 1962, 1963, 1964, 1965, 1966,
4480, 4481, 4482, 4483, 4484, 4485, 4486, 1967, 1968, 1969, 1970,
1971, 1972, 1973, 4487, 4488, 4489, 4490, 4491, 4492, 4493, 1974,
1975, 1976, 1977, 1978, 1979, 1980, 4494, 4495, 4496, 4497, 4498,
4499, 4500, 1981, 1982, 1983, 1984, 1985, 1986, 1987, 4501, 4502,
4503, 4504, 4505, 4506, 4507, 1988, 1989, 1990, 1991, 1992, 1993,
1994, 4508, 4509, 4510, 4511, 4512, 4513, 4514, 1995, 1996, 1997,
1998, 1999, 2000, 2001, 4515, 4516, 4517, 4518, 4519, 4520, 4521,
2002, 2003, 2004, 2005, 2006, 2007, 2008, 4522, 4523, 4524, 4525,
4526, 4527, 4528, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 4529,
4530, 4531, 4532, 4533, 4534, 4535, 2016, 2017, 2018, 2019, 2020,
2021, 2022, 4536, 4537, 4538, 4539, 4540, 4541, 4542, 2023, 2024,
2025, 2026, 2027, 2028, 2029, 4543, 4544, 4545, 4546, 4547, 4548,
4549, 2030, 2031, 2032, 2033, 2034, 2035, 2036, 4550, 4551, 4552,
4553, 4554, 4555, 4556, 2037, 2038, 2039, 2040, 2041, 2042, 2043,
4557, 4558, 4559, 4560, 4561, 4562, 4563, 2044, 2045, 2046, 2047,
2048, 2049, 2050, 4564, 4565, 4566, 4567, 4568, 4569, 4570, 2051,
2052, 2053, 2054, 2055, 2056, 2057, 4571, 4572, 4573, 4574, 4575,
4576, 4577, 2058, 2059, 2060, 2061, 2062, 2063, 2064, 4578, 4579,
4580, 4581, 4582, 4583, 4584, 2065, 2066, 2067, 2068, 2069, 2070,
2071, 4585, 4586, 4587, 4588, 4589, 4590, 4591, 2072, 2073, 2074,
2075, 2076, 2077, 2078, 4592, 4593, 4594, 4595, 4596, 4597, 4598,
2079, 2080, 2081, 2082, 2083, 2084, 2085, 4599, 4600, 4601, 4602,
4603, 4604, 4605, 2086, 2087, 2088, 2089, 2090, 2091, 2092, 4606,
4607, 4608, 4609, 4610, 4611, 4612, 2093, 2094, 2095, 2096, 2097,
2098, 2099, 4613, 4614, 4615, 4616, 4617, 4618, 4619, 2100, 2101,
2102, 2103, 2104, 2105, 2106, 4620, 4621, 4622, 4623, 4624, 4625,
4626, 2107, 2108, 2109, 2110, 2111, 2112, 2113, 4627, 4628, 4629,
4630, 4631, 4632, 4633, 2114, 2115, 2116, 2117, 2118, 2119, 2120,
4634, 4635, 4636, 4637, 4638, 4639, 4640, 2121, 2122, 2123, 2124,
2125, 2126, 2127, 4641, 4642, 4643, 4644, 4645, 4646, 4647, 2128,
2129, 2130, 2131, 2132, 2133, 2134, 4648, 4649, 4650, 4651, 4652,
4653, 4654, 2135, 2136, 2137, 2138, 2139, 2140, 2141, 4655, 4656,
4657, 4658, 4659, 4660, 4661, 2142, 2143, 2144, 2145, 2146, 2147,
2148, 4662, 4663, 4664, 4665, 4666, 4667, 4668, 2149, 2150, 2151,
2152, 2153, 2154, 2155, 4669, 4670, 4671, 4672, 4673, 4674, 4675,
2156, 2157, 2158, 2159, 2160, 2161, 2162, 4676, 4677, 4678, 4679,
4680, 4681, 4682, 2163, 2164, 2165, 2166, 2167, 2168, 2169, 4683,
4684, 4685, 4686, 4687, 4688, 4689, 2170, 2171, 2172, 2173, 2174,
2175, 2176, 4690, 4691, 4692, 4693, 4694, 4695, 4696, 2177, 2178,
2179, 2180, 2181, 2182, 2183, 4697, 4698, 4699, 4700, 4701, 4702,
4703, 2184, 2185, 2186, 2187, 2188, 2189, 2190, 4704, 4705, 4706,
4707, 4708, 4709, 4710, 2191, 2192, 2193, 2194, 2195, 2196, 2197,
4711, 4712, 4713, 4714, 4715, 4716, 4717, 2198, 2199, 2200, 2201,
2202, 2203, 2204, 4718, 4719, 4720, 4721, 4722, 4723, 4724, 2205,
2206, 2207, 2208, 2209, 2210, 2211, 4725, 4726, 4727, 4728, 4729,
4730, 4731, 2212, 2213, 2214, 2215, 2216, 2217, 2218, 4732, 4733,
4734, 4735, 4736, 4737, 4738, 2219, 2220, 2221, 2222, 2223, 2224,
2225, 4739, 4740, 4741, 4742, 4743, 4744, 4745, 2226, 2227, 2228,
2229, 2230, 2231, 2232, 4746, 4747, 4748, 4749, 4750, 4751, 4752,
2233, 2234, 2235, 2236, 2237, 2238, 2239, 4753, 4754, 4755, 4756,
4757, 4758, 4759, 2240, 2241, 2242, 2243, 2244, 2245, 2246, 4760,
4761, 4762, 4763, 4764, 4765, 4766, 2247, 2248, 2249, 2250, 2251,
2252, 2253, 4767, 4768, 4769, 4770, 4771, 4772, 4773, 2254, 2255,
2256, 2257, 2258, 2259, 2260, 4774, 4775, 4776, 4777, 4778, 4779,
4780, 2261, 2262, 2263, 2264, 2265, 2266, 2267, 4781, 4782, 4783,
4784, 4785, 4786, 4787, 2268, 2269, 2270, 2271, 2272, 2273, 2274,
4788, 4789, 4790, 4791, 4792, 4793, 4794, 2275, 2276, 2277, 2278,
2279, 2280, 2281, 4795, 4796, 4797, 4798, 4799, 4800, 4801, 2282,
2283, 2284, 2285, 2286, 2287, 2288, 4802, 4803, 4804, 4805, 4806,
4807, 4808, 2289, 2290, 2291, 2292, 2293, 2294, 2295, 4809, 4810,
4811, 4812, 4813, 4814, 4815, 2296, 2297, 2298, 2299, 2300, 2301,
2302, 4816, 4817, 4818, 4819, 4820, 4821, 4822, 2303, 2304, 2305,
2306, 2307, 2308, 2309, 4823, 4824, 4825, 4826, 4827, 4828, 4829,
2310, 2311, 2312, 2313, 2314, 2315, 2316, 4830, 4831, 4832, 4833,
4834, 4835, 4836, 2317, 2318, 2319, 2320, 2321, 2322, 2323, 4837,
4838, 4839, 4840, 4841, 4842, 4843, 2324, 2325, 2326, 2327, 2328,
2329, 2330, 4844, 4845, 4846, 4847, 4848, 4849, 4850, 2331, 2332,
2333, 2334, 2335, 2336, 2337, 4851, 4852, 4853, 4854, 4855, 4856,
4857, 2338, 2339, 2340, 2341, 2342, 2343, 2344, 4858, 4859, 4860,
4861, 4862, 4863, 4864, 2345, 2346, 2347, 2348, 2349, 2350, 2351,
4865, 4866, 4867, 4868, 4869, 4870, 4871, 2352, 2353, 2354, 2355,
2356, 2357, 2358, 4872, 4873, 4874, 4875, 4876, 4877, 4878, 2359,
2360, 2361, 2362, 2363, 2364, 2365, 4879, 4880, 4881, 4882, 4883,
4884, 4885, 2366, 2367, 2368, 2369, 2370, 2371, 2372, 4886, 4887,
4888, 4889, 4890, 4891, 4892, 2373, 2374, 2375, 2376, 2377, 2378,
2379, 4893, 4894, 4895, 4896, 4897, 4898, 4899, 2380, 2381, 2382,
2383, 2384, 2385, 2386, 4900, 4901, 4902, 4903, 4904, 4905, 4906,
2387, 2388, 2389, 2390, 2391, 2392, 2393, 4907, 4908, 4909, 4910,
4911, 4912, 4913, 2394, 2395, 2396, 2397, 2398, 2399, 2400, 4914,
4915, 4916, 4917, 4918, 4919, 4920, 2401, 2402, 2403, 2404, 2405,
2406, 2407, 4921, 4922, 4923, 4924, 4925, 4926, 4927, 2408, 2409,
2410, 2411, 2412, 2413, 2414, 4928, 4929, 4930, 4931, 4932, 4933,
4934, 2415, 2416, 2417, 2418, 2419, 2420, 2421, 4935, 4936, 4937,
4938, 4939, 4940, 4941, 2422, 2423, 2424, 2425, 2426, 2427, 2428,
4942, 4943, 4944, 4945, 4946, 4947, 4948, 2429, 2430, 2431, 2432,
2433, 2434, 2435, 4949, 4950, 4951, 4952, 4953, 4954, 4955, 2436,
2437, 2438, 2439, 2440, 2441, 2442, 4956, 4957, 4958, 4959, 4960,
4961, 4962, 2443, 2444, 2445, 2446, 2447, 2448, 2449, 4963, 4964,
4965, 4966, 4967, 4968, 4969, 2450, 2451, 2452, 2453, 2454, 2455,
2456, 4970, 4971, 4972, 4973, 4974, 4975, 4976, 2457, 2458, 2459,
2460, 2461, 2462, 2463, 4977, 4978, 4979, 4980, 4981, 4982, 4983,
2464, 2465, 2466, 2467, 2468, 2469, 2470, 4984, 4985, 4986, 4987,
4988, 4989, 4990, 2471, 2472, 2473, 2474, 2475, 2476, 2477, 4991,
4992, 4993, 4994, 4995, 4996, 4997, 2478, 2479, 2480, 2481, 2482,
2483, 2484, 4998, 4999, 5000, 5001, 5002, 5003, 5004, 2485, 2486,
2487, 2488, 2489, 2490, 2491, 5005, 5006, 5007, 5008, 5009, 5010,
5011, 2492, 2493, 2494, 2495, 2496, 2497, 2498, 5012, 5013, 5014,
5015, 5016, 5017, 5018, 2499, 2500, 2501, 2502, 2503, 2504, 2505,
5019, 5020, 5021, 5022, 5023, 5024, 5025, 2506, 2507, 2508, 2509,
2510, 2511, 2512, 5026, 5027, 5028, 5029, 5030, 5031, 5032, 2513,
2514, 2515, 2516, 2517, 2518, 2519, 5033, 5034, 5035, 5036, 5037,
5038, 5039}));
}
TEST(TransposeTest, TestRefOps6D6) {
EXPECT_THAT(
RunTestPermutation<float>({2, 3, 4, 5, 6, 7},
{1, 2, 3, 4, 5, 0}),
ElementsAreArray(
{0, 2520, 1, 2521, 2, 2522, 3, 2523, 4, 2524, 5,
2525, 6, 2526, 7, 2527, 8, 2528, 9, 2529, 10, 2530,
11, 2531, 12, 2532, 13, 2533, 14, 2534, 15, 2535, 16,
2536, 17, 2537, 18, 2538, 19, 2539, 20, 2540, 21, 2541,
22, 2542, 23, 2543, 24, 2544, 25, 2545, 26, 2546, 27,
2547, 28, 2548, 29, 2549, 30, 2550, 31, 2551, 32, 2552,
33, 2553, 34, 2554, 35, 2555, 36, 2556, 37, 2557, 38,
2558, 39, 2559, 40, 2560, 41, 2561, 42, 2562, 43, 2563,
44, 2564, 45, 2565, 46, 2566, 47, 2567, 48, 2568, 49,
2569, 50, 2570, 51, 2571, 52, 2572, 53, 2573, 54, 2574,
55, 2575, 56, 2576, 57, 2577, 58, 2578, 59, 2579, 60,
2580, 61, 2581, 62, 2582, 63, 2583, 64, 2584, 65, 2585,
66, 2586, 67, 2587, 68, 2588, 69, 2589, 70, 2590, 71,
2591, 72, 2592, 73, 2593, 74, 2594, 75, 2595, 76, 2596,
77, 2597, 78, 2598, 79, 2599, 80, 2600, 81, 2601, 82,
2602, 83, 2603, 84, 2604, 85, 2605, 86, 2606, 87, 2607,
88, 2608, 89, 2609, 90, 2610, 91, 2611, 92, 2612, 93,
2613, 94, 2614, 95, 2615, 96, 2616, 97, 2617, 98, 2618,
99, 2619, 100, 2620, 101, 2621, 102, 2622, 103, 2623, 104,
2624, 105, 2625, 106, 2626, 107, 2627, 108, 2628, 109, 2629,
110, 2630, 111, 2631, 112, 2632, 113, 2633, 114, 2634, 115,
2635, 116, 2636, 117, 2637, 118, 2638, 119, 2639, 120, 2640,
121, 2641, 122, 2642, 123, 2643, 124, 2644, 125, 2645, 126,
2646, 127, 2647, 128, 2648, 129, 2649, 130, 2650, 131, 2651,
132, 2652, 133, 2653, 134, 2654, 135, 2655, 136, 2656, 137,
2657, 138, 2658, 139, 2659, 140, 2660, 141, 2661, 142, 2662,
143, 2663, 144, 2664, 145, 2665, 146, 2666, 147, 2667, 148,
2668, 149, 2669, 150, 2670, 151, 2671, 152, 2672, 153, 2673,
154, 2674, 155, 2675, 156, 2676, 157, 2677, 158, 2678, 159,
2679, 160, 2680, 161, 2681, 162, 2682, 163, 2683, 164, 2684,
165, 2685, 166, 2686, 167, 2687, 168, 2688, 169, 2689, 170,
2690, 171, 2691, 172, 2692, 173, 2693, 174, 2694, 175, 2695,
176, 2696, 177, 2697, 178, 2698, 179, 2699, 180, 2700, 181,
2701, 182, 2702, 183, 2703, 184, 2704, 185, 2705, 186, 2706,
187, 2707, 188, 2708, 189, 2709, 190, 2710, 191, 2711, 192,
2712, 193, 2713, 194, 2714, 195, 2715, 196, 2716, 197, 2717,
198, 2718, 199, 2719, 200, 2720, 201, 2721, 202, 2722, 203,
2723, 204, 2724, 205, 2725, 206, 2726, 207, 2727, 208, 2728,
209, 2729, 210, 2730, 211, 2731, 212, 2732, 213, 2733, 214,
2734, 215, 2735, 216, 2736, 217, 2737, 218, 2738, 219, 2739,
220, 2740, 221, 2741, 222, 2742, 223, 2743, 224, 2744, 225,
2745, 226, 2746, 227, 2747, 228, 2748, 229, 2749, 230, 2750,
231, 2751, 232, 2752, 233, 2753, 234, 2754, 235, 2755, 236,
2756, 237, 2757, 238, 2758, 239, 2759, 240, 2760, 241, 2761,
242, 2762, 243, 2763, 244, 2764, 245, 2765, 246, 2766, 247,
2767, 248, 2768, 249, 2769, 250, 2770, 251, 2771, 252, 2772,
253, 2773, 254, 2774, 255, 2775, 256, 2776, 257, 2777, 258,
2778, 259, 2779, 260, 2780, 261, 2781, 262, 2782, 263, 2783,
264, 2784, 265, 2785, 266, 2786, 267, 2787, 268, 2788, 269,
2789, 270, 2790, 271, 2791, 272, 2792, 273, 2793, 274, 2794,
275, 2795, 276, 2796, 277, 2797, 278, 2798, 279, 2799, 280,
2800, 281, 2801, 282, 2802, 283, 2803, 284, 2804, 285, 2805,
286, 2806, 287, 2807, 288, 2808, 289, 2809, 290, 2810, 291,
2811, 292, 2812, 293, 2813, 294, 2814, 295, 2815, 296, 2816,
297, 2817, 298, 2818, 299, 2819, 300, 2820, 301, 2821, 302,
2822, 303, 2823, 304, 2824, 305, 2825, 306, 2826, 307, 2827,
308, 2828, 309, 2829, 310, 2830, 311, 2831, 312, 2832, 313,
2833, 314, 2834, 315, 2835, 316, 2836, 317, 2837, 318, 2838,
319, 2839, 320, 2840, 321, 2841, 322, 2842, 323, 2843, 324,
2844, 325, 2845, 326, 2846, 327, 2847, 328, 2848, 329, 2849,
330, 2850, 331, 2851, 332, 2852, 333, 2853, 334, 2854, 335,
2855, 336, 2856, 337, 2857, 338, 2858, 339, 2859, 340, 2860,
341, 2861, 342, 2862, 343, 2863, 344, 2864, 345, 2865, 346,
2866, 347, 2867, 348, 2868, 349, 2869, 350, 2870, 351, 2871,
352, 2872, 353, 2873, 354, 2874, 355, 2875, 356, 2876, 357,
2877, 358, 2878, 359, 2879, 360, 2880, 361, 2881, 362, 2882,
363, 2883, 364, 2884, 365, 2885, 366, 2886, 367, 2887, 368,
2888, 369, 2889, 370, 2890, 371, 2891, 372, 2892, 373, 2893,
374, 2894, 375, 2895, 376, 2896, 377, 2897, 378, 2898, 379,
2899, 380, 2900, 381, 2901, 382, 2902, 383, 2903, 384, 2904,
385, 2905, 386, 2906, 387, 2907, 388, 2908, 389, 2909, 390,
2910, 391, 2911, 392, 2912, 393, 2913, 394, 2914, 395, 2915,
396, 2916, 397, 2917, 398, 2918, 399, 2919, 400, 2920, 401,
2921, 402, 2922, 403, 2923, 404, 2924, 405, 2925, 406, 2926,
407, 2927, 408, 2928, 409, 2929, 410, 2930, 411, 2931, 412,
2932, 413, 2933, 414, 2934, 415, 2935, 416, 2936, 417, 2937,
418, 2938, 419, 2939, 420, 2940, 421, 2941, 422, 2942, 423,
2943, 424, 2944, 425, 2945, 426, 2946, 427, 2947, 428, 2948,
429, 2949, 430, 2950, 431, 2951, 432, 2952, 433, 2953, 434,
2954, 435, 2955, 436, 2956, 437, 2957, 438, 2958, 439, 2959,
440, 2960, 441, 2961, 442, 2962, 443, 2963, 444, 2964, 445,
2965, 446, 2966, 447, 2967, 448, 2968, 449, 2969, 450, 2970,
451, 2971, 452, 2972, 453, 2973, 454, 2974, 455, 2975, 456,
2976, 457, 2977, 458, 2978, 459, 2979, 460, 2980, 461, 2981,
462, 2982, 463, 2983, 464, 2984, 465, 2985, 466, 2986, 467,
2987, 468, 2988, 469, 2989, 470, 2990, 471, 2991, 472, 2992,
473, 2993, 474, 2994, 475, 2995, 476, 2996, 477, 2997, 478,
2998, 479, 2999, 480, 3000, 481, 3001, 482, 3002, 483, 3003,
484, 3004, 485, 3005, 486, 3006, 487, 3007, 488, 3008, 489,
3009, 490, 3010, 491, 3011, 492, 3012, 493, 3013, 494, 3014,
495, 3015, 496, 3016, 497, 3017, 498, 3018, 499, 3019, 500,
3020, 501, 3021, 502, 3022, 503, 3023, 504, 3024, 505, 3025,
506, 3026, 507, 3027, 508, 3028, 509, 3029, 510, 3030, 511,
3031, 512, 3032, 513, 3033, 514, 3034, 515, 3035, 516, 3036,
517, 3037, 518, 3038, 519, 3039, 520, 3040, 521, 3041, 522,
3042, 523, 3043, 524, 3044, 525, 3045, 526, 3046, 527, 3047,
528, 3048, 529, 3049, 530, 3050, 531, 3051, 532, 3052, 533,
3053, 534, 3054, 535, 3055, 536, 3056, 537, 3057, 538, 3058,
539, 3059, 540, 3060, 541, 3061, 542, 3062, 543, 3063, 544,
3064, 545, 3065, 546, 3066, 547, 3067, 548, 3068, 549, 3069,
550, 3070, 551, 3071, 552, 3072, 553, 3073, 554, 3074, 555,
3075, 556, 3076, 557, 3077, 558, 3078, 559, 3079, 560, 3080,
561, 3081, 562, 3082, 563, 3083, 564, 3084, 565, 3085, 566,
3086, 567, 3087, 568, 3088, 569, 3089, 570, 3090, 571, 3091,
572, 3092, 573, 3093, 574, 3094, 575, 3095, 576, 3096, 577,
3097, 578, 3098, 579, 3099, 580, 3100, 581, 3101, 582, 3102,
583, 3103, 584, 3104, 585, 3105, 586, 3106, 587, 3107, 588,
3108, 589, 3109, 590, 3110, 591, 3111, 592, 3112, 593, 3113,
594, 3114, 595, 3115, 596, 3116, 597, 3117, 598, 3118, 599,
3119, 600, 3120, 601, 3121, 602, 3122, 603, 3123, 604, 3124,
605, 3125, 606, 3126, 607, 3127, 608, 3128, 609, 3129, 610,
3130, 611, 3131, 612, 3132, 613, 3133, 614, 3134, 615, 3135,
616, 3136, 617, 3137, 618, 3138, 619, 3139, 620, 3140, 621,
3141, 622, 3142, 623, 3143, 624, 3144, 625, 3145, 626, 3146,
627, 3147, 628, 3148, 629, 3149, 630, 3150, 631, 3151, 632,
3152, 633, 3153, 634, 3154, 635, 3155, 636, 3156, 637, 3157,
638, 3158, 639, 3159, 640, 3160, 641, 3161, 642, 3162, 643,
3163, 644, 3164, 645, 3165, 646, 3166, 647, 3167, 648, 3168,
649, 3169, 650, 3170, 651, 3171, 652, 3172, 653, 3173, 654,
3174, 655, 3175, 656, 3176, 657, 3177, 658, 3178, 659, 3179,
660, 3180, 661, 3181, 662, 3182, 663, 3183, 664, 3184, 665,
3185, 666, 3186, 667, 3187, 668, 3188, 669, 3189, 670, 3190,
671, 3191, 672, 3192, 673, 3193, 674, 3194, 675, 3195, 676,
3196, 677, 3197, 678, 3198, 679, 3199, 680, 3200, 681, 3201,
682, 3202, 683, 3203, 684, 3204, 685, 3205, 686, 3206, 687,
3207, 688, 3208, 689, 3209, 690, 3210, 691, 3211, 692, 3212,
693, 3213, 694, 3214, 695, 3215, 696, 3216, 697, 3217, 698,
3218, 699, 3219, 700, 3220, 701, 3221, 702, 3222, 703, 3223,
704, 3224, 705, 3225, 706, 3226, 707, 3227, 708, 3228, 709,
3229, 710, 3230, 711, 3231, 712, 3232, 713, 3233, 714, 3234,
715, 3235, 716, 3236, 717, 3237, 718, 3238, 719, 3239, 720,
3240, 721, 3241, 722, 3242, 723, 3243, 724, 3244, 725, 3245,
726, 3246, 727, 3247, 728, 3248, 729, 3249, 730, 3250, 731,
3251, 732, 3252, 733, 3253, 734, 3254, 735, 3255, 736, 3256,
737, 3257, 738, 3258, 739, 3259, 740, 3260, 741, 3261, 742,
3262, 743, 3263, 744, 3264, 745, 3265, 746, 3266, 747, 3267,
748, 3268, 749, 3269, 750, 3270, 751, 3271, 752, 3272, 753,
3273, 754, 3274, 755, 3275, 756, 3276, 757, 3277, 758, 3278,
759, 3279, 760, 3280, 761, 3281, 762, 3282, 763, 3283, 764,
3284, 765, 3285, 766, 3286, 767, 3287, 768, 3288, 769, 3289,
770, 3290, 771, 3291, 772, 3292, 773, 3293, 774, 3294, 775,
3295, 776, 3296, 777, 3297, 778, 3298, 779, 3299, 780, 3300,
781, 3301, 782, 3302, 783, 3303, 784, 3304, 785, 3305, 786,
3306, 787, 3307, 788, 3308, 789, 3309, 790, 3310, 791, 3311,
792, 3312, 793, 3313, 794, 3314, 795, 3315, 796, 3316, 797,
3317, 798, 3318, 799, 3319, 800, 3320, 801, 3321, 802, 3322,
803, 3323, 804, 3324, 805, 3325, 806, 3326, 807, 3327, 808,
3328, 809, 3329, 810, 3330, 811, 3331, 812, 3332, 813, 3333,
814, 3334, 815, 3335, 816, 3336, 817, 3337, 818, 3338, 819,
3339, 820, 3340, 821, 3341, 822, 3342, 823, 3343, 824, 3344,
825, 3345, 826, 3346, 827, 3347, 828, 3348, 829, 3349, 830,
3350, 831, 3351, 832, 3352, 833, 3353, 834, 3354, 835, 3355,
836, 3356, 837, 3357, 838, 3358, 839, 3359, 840, 3360, 841,
3361, 842, 3362, 843, 3363, 844, 3364, 845, 3365, 846, 3366,
847, 3367, 848, 3368, 849, 3369, 850, 3370, 851, 3371, 852,
3372, 853, 3373, 854, 3374, 855, 3375, 856, 3376, 857, 3377,
858, 3378, 859, 3379, 860, 3380, 861, 3381, 862, 3382, 863,
3383, 864, 3384, 865, 3385, 866, 3386, 867, 3387, 868, 3388,
869, 3389, 870, 3390, 871, 3391, 872, 3392, 873, 3393, 874,
3394, 875, 3395, 876, 3396, 877, 3397, 878, 3398, 879, 3399,
880, 3400, 881, 3401, 882, 3402, 883, 3403, 884, 3404, 885,
3405, 886, 3406, 887, 3407, 888, 3408, 889, 3409, 890, 3410,
891, 3411, 892, 3412, 893, 3413, 894, 3414, 895, 3415, 896,
3416, 897, 3417, 898, 3418, 899, 3419, 900, 3420, 901, 3421,
902, 3422, 903, 3423, 904, 3424, 905, 3425, 906, 3426, 907,
3427, 908, 3428, 909, 3429, 910, 3430, 911, 3431, 912, 3432,
913, 3433, 914, 3434, 915, 3435, 916, 3436, 917, 3437, 918,
3438, 919, 3439, 920, 3440, 921, 3441, 922, 3442, 923, 3443,
924, 3444, 925, 3445, 926, 3446, 927, 3447, 928, 3448, 929,
3449, 930, 3450, 931, 3451, 932, 3452, 933, 3453, 934, 3454,
935, 3455, 936, 3456, 937, 3457, 938, 3458, 939, 3459, 940,
3460, 941, 3461, 942, 3462, 943, 3463, 944, 3464, 945, 3465,
946, 3466, 947, 3467, 948, 3468, 949, 3469, 950, 3470, 951,
3471, 952, 3472, 953, 3473, 954, 3474, 955, 3475, 956, 3476,
957, 3477, 958, 3478, 959, 3479, 960, 3480, 961, 3481, 962,
3482, 963, 3483, 964, 3484, 965, 3485, 966, 3486, 967, 3487,
968, 3488, 969, 3489, 970, 3490, 971, 3491, 972, 3492, 973,
3493, 974, 3494, 975, 3495, 976, 3496, 977, 3497, 978, 3498,
979, 3499, 980, 3500, 981, 3501, 982, 3502, 983, 3503, 984,
3504, 985, 3505, 986, 3506, 987, 3507, 988, 3508, 989, 3509,
990, 3510, 991, 3511, 992, 3512, 993, 3513, 994, 3514, 995,
3515, 996, 3516, 997, 3517, 998, 3518, 999, 3519, 1000, 3520,
1001, 3521, 1002, 3522, 1003, 3523, 1004, 3524, 1005, 3525, 1006,
3526, 1007, 3527, 1008, 3528, 1009, 3529, 1010, 3530, 1011, 3531,
1012, 3532, 1013, 3533, 1014, 3534, 1015, 3535, 1016, 3536, 1017,
3537, 1018, 3538, 1019, 3539, 1020, 3540, 1021, 3541, 1022, 3542,
1023, 3543, 1024, 3544, 1025, 3545, 1026, 3546, 1027, 3547, 1028,
3548, 1029, 3549, 1030, 3550, 1031, 3551, 1032, 3552, 1033, 3553,
1034, 3554, 1035, 3555, 1036, 3556, 1037, 3557, 1038, 3558, 1039,
3559, 1040, 3560, 1041, 3561, 1042, 3562, 1043, 3563, 1044, 3564,
1045, 3565, 1046, 3566, 1047, 3567, 1048, 3568, 1049, 3569, 1050,
3570, 1051, 3571, 1052, 3572, 1053, 3573, 1054, 3574, 1055, 3575,
1056, 3576, 1057, 3577, 1058, 3578, 1059, 3579, 1060, 3580, 1061,
3581, 1062, 3582, 1063, 3583, 1064, 3584, 1065, 3585, 1066, 3586,
1067, 3587, 1068, 3588, 1069, 3589, 1070, 3590, 1071, 3591, 1072,
3592, 1073, 3593, 1074, 3594, 1075, 3595, 1076, 3596, 1077, 3597,
1078, 3598, 1079, 3599, 1080, 3600, 1081, 3601, 1082, 3602, 1083,
3603, 1084, 3604, 1085, 3605, 1086, 3606, 1087, 3607, 1088, 3608,
1089, 3609, 1090, 3610, 1091, 3611, 1092, 3612, 1093, 3613, 1094,
3614, 1095, 3615, 1096, 3616, 1097, 3617, 1098, 3618, 1099, 3619,
1100, 3620, 1101, 3621, 1102, 3622, 1103, 3623, 1104, 3624, 1105,
3625, 1106, 3626, 1107, 3627, 1108, 3628, 1109, 3629, 1110, 3630,
1111, 3631, 1112, 3632, 1113, 3633, 1114, 3634, 1115, 3635, 1116,
3636, 1117, 3637, 1118, 3638, 1119, 3639, 1120, 3640, 1121, 3641,
1122, 3642, 1123, 3643, 1124, 3644, 1125, 3645, 1126, 3646, 1127,
3647, 1128, 3648, 1129, 3649, 1130, 3650, 1131, 3651, 1132, 3652,
1133, 3653, 1134, 3654, 1135, 3655, 1136, 3656, 1137, 3657, 1138,
3658, 1139, 3659, 1140, 3660, 1141, 3661, 1142, 3662, 1143, 3663,
1144, 3664, 1145, 3665, 1146, 3666, 1147, 3667, 1148, 3668, 1149,
3669, 1150, 3670, 1151, 3671, 1152, 3672, 1153, 3673, 1154, 3674,
1155, 3675, 1156, 3676, 1157, 3677, 1158, 3678, 1159, 3679, 1160,
3680, 1161, 3681, 1162, 3682, 1163, 3683, 1164, 3684, 1165, 3685,
1166, 3686, 1167, 3687, 1168, 3688, 1169, 3689, 1170, 3690, 1171,
3691, 1172, 3692, 1173, 3693, 1174, 3694, 1175, 3695, 1176, 3696,
1177, 3697, 1178, 3698, 1179, 3699, 1180, 3700, 1181, 3701, 1182,
3702, 1183, 3703, 1184, 3704, 1185, 3705, 1186, 3706, 1187, 3707,
1188, 3708, 1189, 3709, 1190, 3710, 1191, 3711, 1192, 3712, 1193,
3713, 1194, 3714, 1195, 3715, 1196, 3716, 1197, 3717, 1198, 3718,
1199, 3719, 1200, 3720, 1201, 3721, 1202, 3722, 1203, 3723, 1204,
3724, 1205, 3725, 1206, 3726, 1207, 3727, 1208, 3728, 1209, 3729,
1210, 3730, 1211, 3731, 1212, 3732, 1213, 3733, 1214, 3734, 1215,
3735, 1216, 3736, 1217, 3737, 1218, 3738, 1219, 3739, 1220, 3740,
1221, 3741, 1222, 3742, 1223, 3743, 1224, 3744, 1225, 3745, 1226,
3746, 1227, 3747, 1228, 3748, 1229, 3749, 1230, 3750, 1231, 3751,
1232, 3752, 1233, 3753, 1234, 3754, 1235, 3755, 1236, 3756, 1237,
3757, 1238, 3758, 1239, 3759, 1240, 3760, 1241, 3761, 1242, 3762,
1243, 3763, 1244, 3764, 1245, 3765, 1246, 3766, 1247, 3767, 1248,
3768, 1249, 3769, 1250, 3770, 1251, 3771, 1252, 3772, 1253, 3773,
1254, 3774, 1255, 3775, 1256, 3776, 1257, 3777, 1258, 3778, 1259,
3779, 1260, 3780, 1261, 3781, 1262, 3782, 1263, 3783, 1264, 3784,
1265, 3785, 1266, 3786, 1267, 3787, 1268, 3788, 1269, 3789, 1270,
3790, 1271, 3791, 1272, 3792, 1273, 3793, 1274, 3794, 1275, 3795,
1276, 3796, 1277, 3797, 1278, 3798, 1279, 3799, 1280, 3800, 1281,
3801, 1282, 3802, 1283, 3803, 1284, 3804, 1285, 3805, 1286, 3806,
1287, 3807, 1288, 3808, 1289, 3809, 1290, 3810, 1291, 3811, 1292,
3812, 1293, 3813, 1294, 3814, 1295, 3815, 1296, 3816, 1297, 3817,
1298, 3818, 1299, 3819, 1300, 3820, 1301, 3821, 1302, 3822, 1303,
3823, 1304, 3824, 1305, 3825, 1306, 3826, 1307, 3827, 1308, 3828,
1309, 3829, 1310, 3830, 1311, 3831, 1312, 3832, 1313, 3833, 1314,
3834, 1315, 3835, 1316, 3836, 1317, 3837, 1318, 3838, 1319, 3839,
1320, 3840, 1321, 3841, 1322, 3842, 1323, 3843, 1324, 3844, 1325,
3845, 1326, 3846, 1327, 3847, 1328, 3848, 1329, 3849, 1330, 3850,
1331, 3851, 1332, 3852, 1333, 3853, 1334, 3854, 1335, 3855, 1336,
3856, 1337, 3857, 1338, 3858, 1339, 3859, 1340, 3860, 1341, 3861,
1342, 3862, 1343, 3863, 1344, 3864, 1345, 3865, 1346, 3866, 1347,
3867, 1348, 3868, 1349, 3869, 1350, 3870, 1351, 3871, 1352, 3872,
1353, 3873, 1354, 3874, 1355, 3875, 1356, 3876, 1357, 3877, 1358,
3878, 1359, 3879, 1360, 3880, 1361, 3881, 1362, 3882, 1363, 3883,
1364, 3884, 1365, 3885, 1366, 3886, 1367, 3887, 1368, 3888, 1369,
3889, 1370, 3890, 1371, 3891, 1372, 3892, 1373, 3893, 1374, 3894,
1375, 3895, 1376, 3896, 1377, 3897, 1378, 3898, 1379, 3899, 1380,
3900, 1381, 3901, 1382, 3902, 1383, 3903, 1384, 3904, 1385, 3905,
1386, 3906, 1387, 3907, 1388, 3908, 1389, 3909, 1390, 3910, 1391,
3911, 1392, 3912, 1393, 3913, 1394, 3914, 1395, 3915, 1396, 3916,
1397, 3917, 1398, 3918, 1399, 3919, 1400, 3920, 1401, 3921, 1402,
3922, 1403, 3923, 1404, 3924, 1405, 3925, 1406, 3926, 1407, 3927,
1408, 3928, 1409, 3929, 1410, 3930, 1411, 3931, 1412, 3932, 1413,
3933, 1414, 3934, 1415, 3935, 1416, 3936, 1417, 3937, 1418, 3938,
1419, 3939, 1420, 3940, 1421, 3941, 1422, 3942, 1423, 3943, 1424,
3944, 1425, 3945, 1426, 3946, 1427, 3947, 1428, 3948, 1429, 3949,
1430, 3950, 1431, 3951, 1432, 3952, 1433, 3953, 1434, 3954, 1435,
3955, 1436, 3956, 1437, 3957, 1438, 3958, 1439, 3959, 1440, 3960,
1441, 3961, 1442, 3962, 1443, 3963, 1444, 3964, 1445, 3965, 1446,
3966, 1447, 3967, 1448, 3968, 1449, 3969, 1450, 3970, 1451, 3971,
1452, 3972, 1453, 3973, 1454, 3974, 1455, 3975, 1456, 3976, 1457,
3977, 1458, 3978, 1459, 3979, 1460, 3980, 1461, 3981, 1462, 3982,
1463, 3983, 1464, 3984, 1465, 3985, 1466, 3986, 1467, 3987, 1468,
3988, 1469, 3989, 1470, 3990, 1471, 3991, 1472, 3992, 1473, 3993,
1474, 3994, 1475, 3995, 1476, 3996, 1477, 3997, 1478, 3998, 1479,
3999, 1480, 4000, 1481, 4001, 1482, 4002, 1483, 4003, 1484, 4004,
1485, 4005, 1486, 4006, 1487, 4007, 1488, 4008, 1489, 4009, 1490,
4010, 1491, 4011, 1492, 4012, 1493, 4013, 1494, 4014, 1495, 4015,
1496, 4016, 1497, 4017, 1498, 4018, 1499, 4019, 1500, 4020, 1501,
4021, 1502, 4022, 1503, 4023, 1504, 4024, 1505, 4025, 1506, 4026,
1507, 4027, 1508, 4028, 1509, 4029, 1510, 4030, 1511, 4031, 1512,
4032, 1513, 4033, 1514, 4034, 1515, 4035, 1516, 4036, 1517, 4037,
1518, 4038, 1519, 4039, 1520, 4040, 1521, 4041, 1522, 4042, 1523,
4043, 1524, 4044, 1525, 4045, 1526, 4046, 1527, 4047, 1528, 4048,
1529, 4049, 1530, 4050, 1531, 4051, 1532, 4052, 1533, 4053, 1534,
4054, 1535, 4055, 1536, 4056, 1537, 4057, 1538, 4058, 1539, 4059,
1540, 4060, 1541, 4061, 1542, 4062, 1543, 4063, 1544, 4064, 1545,
4065, 1546, 4066, 1547, 4067, 1548, 4068, 1549, 4069, 1550, 4070,
1551, 4071, 1552, 4072, 1553, 4073, 1554, 4074, 1555, 4075, 1556,
4076, 1557, 4077, 1558, 4078, 1559, 4079, 1560, 4080, 1561, 4081,
1562, 4082, 1563, 4083, 1564, 4084, 1565, 4085, 1566, 4086, 1567,
4087, 1568, 4088, 1569, 4089, 1570, 4090, 1571, 4091, 1572, 4092,
1573, 4093, 1574, 4094, 1575, 4095, 1576, 4096, 1577, 4097, 1578,
4098, 1579, 4099, 1580, 4100, 1581, 4101, 1582, 4102, 1583, 4103,
1584, 4104, 1585, 4105, 1586, 4106, 1587, 4107, 1588, 4108, 1589,
4109, 1590, 4110, 1591, 4111, 1592, 4112, 1593, 4113, 1594, 4114,
1595, 4115, 1596, 4116, 1597, 4117, 1598, 4118, 1599, 4119, 1600,
4120, 1601, 4121, 1602, 4122, 1603, 4123, 1604, 4124, 1605, 4125,
1606, 4126, 1607, 4127, 1608, 4128, 1609, 4129, 1610, 4130, 1611,
4131, 1612, 4132, 1613, 4133, 1614, 4134, 1615, 4135, 1616, 4136,
1617, 4137, 1618, 4138, 1619, 4139, 1620, 4140, 1621, 4141, 1622,
4142, 1623, 4143, 1624, 4144, 1625, 4145, 1626, 4146, 1627, 4147,
1628, 4148, 1629, 4149, 1630, 4150, 1631, 4151, 1632, 4152, 1633,
4153, 1634, 4154, 1635, 4155, 1636, 4156, 1637, 4157, 1638, 4158,
1639, 4159, 1640, 4160, 1641, 4161, 1642, 4162, 1643, 4163, 1644,
4164, 1645, 4165, 1646, 4166, 1647, 4167, 1648, 4168, 1649, 4169,
1650, 4170, 1651, 4171, 1652, 4172, 1653, 4173, 1654, 4174, 1655,
4175, 1656, 4176, 1657, 4177, 1658, 4178, 1659, 4179, 1660, 4180,
1661, 4181, 1662, 4182, 1663, 4183, 1664, 4184, 1665, 4185, 1666,
4186, 1667, 4187, 1668, 4188, 1669, 4189, 1670, 4190, 1671, 4191,
1672, 4192, 1673, 4193, 1674, 4194, 1675, 4195, 1676, 4196, 1677,
4197, 1678, 4198, 1679, 4199, 1680, 4200, 1681, 4201, 1682, 4202,
1683, 4203, 1684, 4204, 1685, 4205, 1686, 4206, 1687, 4207, 1688,
4208, 1689, 4209, 1690, 4210, 1691, 4211, 1692, 4212, 1693, 4213,
1694, 4214, 1695, 4215, 1696, 4216, 1697, 4217, 1698, 4218, 1699,
4219, 1700, 4220, 1701, 4221, 1702, 4222, 1703, 4223, 1704, 4224,
1705, 4225, 1706, 4226, 1707, 4227, 1708, 4228, 1709, 4229, 1710,
4230, 1711, 4231, 1712, 4232, 1713, 4233, 1714, 4234, 1715, 4235,
1716, 4236, 1717, 4237, 1718, 4238, 1719, 4239, 1720, 4240, 1721,
4241, 1722, 4242, 1723, 4243, 1724, 4244, 1725, 4245, 1726, 4246,
1727, 4247, 1728, 4248, 1729, 4249, 1730, 4250, 1731, 4251, 1732,
4252, 1733, 4253, 1734, 4254, 1735, 4255, 1736, 4256, 1737, 4257,
1738, 4258, 1739, 4259, 1740, 4260, 1741, 4261, 1742, 4262, 1743,
4263, 1744, 4264, 1745, 4265, 1746, 4266, 1747, 4267, 1748, 4268,
1749, 4269, 1750, 4270, 1751, 4271, 1752, 4272, 1753, 4273, 1754,
4274, 1755, 4275, 1756, 4276, 1757, 4277, 1758, 4278, 1759, 4279,
1760, 4280, 1761, 4281, 1762, 4282, 1763, 4283, 1764, 4284, 1765,
4285, 1766, 4286, 1767, 4287, 1768, 4288, 1769, 4289, 1770, 4290,
1771, 4291, 1772, 4292, 1773, 4293, 1774, 4294, 1775, 4295, 1776,
4296, 1777, 4297, 1778, 4298, 1779, 4299, 1780, 4300, 1781, 4301,
1782, 4302, 1783, 4303, 1784, 4304, 1785, 4305, 1786, 4306, 1787,
4307, 1788, 4308, 1789, 4309, 1790, 4310, 1791, 4311, 1792, 4312,
1793, 4313, 1794, 4314, 1795, 4315, 1796, 4316, 1797, 4317, 1798,
4318, 1799, 4319, 1800, 4320, 1801, 4321, 1802, 4322, 1803, 4323,
1804, 4324, 1805, 4325, 1806, 4326, 1807, 4327, 1808, 4328, 1809,
4329, 1810, 4330, 1811, 4331, 1812, 4332, 1813, 4333, 1814, 4334,
1815, 4335, 1816, 4336, 1817, 4337, 1818, 4338, 1819, 4339, 1820,
4340, 1821, 4341, 1822, 4342, 1823, 4343, 1824, 4344, 1825, 4345,
1826, 4346, 1827, 4347, 1828, 4348, 1829, 4349, 1830, 4350, 1831,
4351, 1832, 4352, 1833, 4353, 1834, 4354, 1835, 4355, 1836, 4356,
1837, 4357, 1838, 4358, 1839, 4359, 1840, 4360, 1841, 4361, 1842,
4362, 1843, 4363, 1844, 4364, 1845, 4365, 1846, 4366, 1847, 4367,
1848, 4368, 1849, 4369, 1850, 4370, 1851, 4371, 1852, 4372, 1853,
4373, 1854, 4374, 1855, 4375, 1856, 4376, 1857, 4377, 1858, 4378,
1859, 4379, 1860, 4380, 1861, 4381, 1862, 4382, 1863, 4383, 1864,
4384, 1865, 4385, 1866, 4386, 1867, 4387, 1868, 4388, 1869, 4389,
1870, 4390, 1871, 4391, 1872, 4392, 1873, 4393, 1874, 4394, 1875,
4395, 1876, 4396, 1877, 4397, 1878, 4398, 1879, 4399, 1880, 4400,
1881, 4401, 1882, 4402, 1883, 4403, 1884, 4404, 1885, 4405, 1886,
4406, 1887, 4407, 1888, 4408, 1889, 4409, 1890, 4410, 1891, 4411,
1892, 4412, 1893, 4413, 1894, 4414, 1895, 4415, 1896, 4416, 1897,
4417, 1898, 4418, 1899, 4419, 1900, 4420, 1901, 4421, 1902, 4422,
1903, 4423, 1904, 4424, 1905, 4425, 1906, 4426, 1907, 4427, 1908,
4428, 1909, 4429, 1910, 4430, 1911, 4431, 1912, 4432, 1913, 4433,
1914, 4434, 1915, 4435, 1916, 4436, 1917, 4437, 1918, 4438, 1919,
4439, 1920, 4440, 1921, 4441, 1922, 4442, 1923, 4443, 1924, 4444,
1925, 4445, 1926, 4446, 1927, 4447, 1928, 4448, 1929, 4449, 1930,
4450, 1931, 4451, 1932, 4452, 1933, 4453, 1934, 4454, 1935, 4455,
1936, 4456, 1937, 4457, 1938, 4458, 1939, 4459, 1940, 4460, 1941,
4461, 1942, 4462, 1943, 4463, 1944, 4464, 1945, 4465, 1946, 4466,
1947, 4467, 1948, 4468, 1949, 4469, 1950, 4470, 1951, 4471, 1952,
4472, 1953, 4473, 1954, 4474, 1955, 4475, 1956, 4476, 1957, 4477,
1958, 4478, 1959, 4479, 1960, 4480, 1961, 4481, 1962, 4482, 1963,
4483, 1964, 4484, 1965, 4485, 1966, 4486, 1967, 4487, 1968, 4488,
1969, 4489, 1970, 4490, 1971, 4491, 1972, 4492, 1973, 4493, 1974,
4494, 1975, 4495, 1976, 4496, 1977, 4497, 1978, 4498, 1979, 4499,
1980, 4500, 1981, 4501, 1982, 4502, 1983, 4503, 1984, 4504, 1985,
4505, 1986, 4506, 1987, 4507, 1988, 4508, 1989, 4509, 1990, 4510,
1991, 4511, 1992, 4512, 1993, 4513, 1994, 4514, 1995, 4515, 1996,
4516, 1997, 4517, 1998, 4518, 1999, 4519, 2000, 4520, 2001, 4521,
2002, 4522, 2003, 4523, 2004, 4524, 2005, 4525, 2006, 4526, 2007,
4527, 2008, 4528, 2009, 4529, 2010, 4530, 2011, 4531, 2012, 4532,
2013, 4533, 2014, 4534, 2015, 4535, 2016, 4536, 2017, 4537, 2018,
4538, 2019, 4539, 2020, 4540, 2021, 4541, 2022, 4542, 2023, 4543,
2024, 4544, 2025, 4545, 2026, 4546, 2027, 4547, 2028, 4548, 2029,
4549, 2030, 4550, 2031, 4551, 2032, 4552, 2033, 4553, 2034, 4554,
2035, 4555, 2036, 4556, 2037, 4557, 2038, 4558, 2039, 4559, 2040,
4560, 2041, 4561, 2042, 4562, 2043, 4563, 2044, 4564, 2045, 4565,
2046, 4566, 2047, 4567, 2048, 4568, 2049, 4569, 2050, 4570, 2051,
4571, 2052, 4572, 2053, 4573, 2054, 4574, 2055, 4575, 2056, 4576,
2057, 4577, 2058, 4578, 2059, 4579, 2060, 4580, 2061, 4581, 2062,
4582, 2063, 4583, 2064, 4584, 2065, 4585, 2066, 4586, 2067, 4587,
2068, 4588, 2069, 4589, 2070, 4590, 2071, 4591, 2072, 4592, 2073,
4593, 2074, 4594, 2075, 4595, 2076, 4596, 2077, 4597, 2078, 4598,
2079, 4599, 2080, 4600, 2081, 4601, 2082, 4602, 2083, 4603, 2084,
4604, 2085, 4605, 2086, 4606, 2087, 4607, 2088, 4608, 2089, 4609,
2090, 4610, 2091, 4611, 2092, 4612, 2093, 4613, 2094, 4614, 2095,
4615, 2096, 4616, 2097, 4617, 2098, 4618, 2099, 4619, 2100, 4620,
2101, 4621, 2102, 4622, 2103, 4623, 2104, 4624, 2105, 4625, 2106,
4626, 2107, 4627, 2108, 4628, 2109, 4629, 2110, 4630, 2111, 4631,
2112, 4632, 2113, 4633, 2114, 4634, 2115, 4635, 2116, 4636, 2117,
4637, 2118, 4638, 2119, 4639, 2120, 4640, 2121, 4641, 2122, 4642,
2123, 4643, 2124, 4644, 2125, 4645, 2126, 4646, 2127, 4647, 2128,
4648, 2129, 4649, 2130, 4650, 2131, 4651, 2132, 4652, 2133, 4653,
2134, 4654, 2135, 4655, 2136, 4656, 2137, 4657, 2138, 4658, 2139,
4659, 2140, 4660, 2141, 4661, 2142, 4662, 2143, 4663, 2144, 4664,
2145, 4665, 2146, 4666, 2147, 4667, 2148, 4668, 2149, 4669, 2150,
4670, 2151, 4671, 2152, 4672, 2153, 4673, 2154, 4674, 2155, 4675,
2156, 4676, 2157, 4677, 2158, 4678, 2159, 4679, 2160, 4680, 2161,
4681, 2162, 4682, 2163, 4683, 2164, 4684, 2165, 4685, 2166, 4686,
2167, 4687, 2168, 4688, 2169, 4689, 2170, 4690, 2171, 4691, 2172,
4692, 2173, 4693, 2174, 4694, 2175, 4695, 2176, 4696, 2177, 4697,
2178, 4698, 2179, 4699, 2180, 4700, 2181, 4701, 2182, 4702, 2183,
4703, 2184, 4704, 2185, 4705, 2186, 4706, 2187, 4707, 2188, 4708,
2189, 4709, 2190, 4710, 2191, 4711, 2192, 4712, 2193, 4713, 2194,
4714, 2195, 4715, 2196, 4716, 2197, 4717, 2198, 4718, 2199, 4719,
2200, 4720, 2201, 4721, 2202, 4722, 2203, 4723, 2204, 4724, 2205,
4725, 2206, 4726, 2207, 4727, 2208, 4728, 2209, 4729, 2210, 4730,
2211, 4731, 2212, 4732, 2213, 4733, 2214, 4734, 2215, 4735, 2216,
4736, 2217, 4737, 2218, 4738, 2219, 4739, 2220, 4740, 2221, 4741,
2222, 4742, 2223, 4743, 2224, 4744, 2225, 4745, 2226, 4746, 2227,
4747, 2228, 4748, 2229, 4749, 2230, 4750, 2231, 4751, 2232, 4752,
2233, 4753, 2234, 4754, 2235, 4755, 2236, 4756, 2237, 4757, 2238,
4758, 2239, 4759, 2240, 4760, 2241, 4761, 2242, 4762, 2243, 4763,
2244, 4764, 2245, 4765, 2246, 4766, 2247, 4767, 2248, 4768, 2249,
4769, 2250, 4770, 2251, 4771, 2252, 4772, 2253, 4773, 2254, 4774,
2255, 4775, 2256, 4776, 2257, 4777, 2258, 4778, 2259, 4779, 2260,
4780, 2261, 4781, 2262, 4782, 2263, 4783, 2264, 4784, 2265, 4785,
2266, 4786, 2267, 4787, 2268, 4788, 2269, 4789, 2270, 4790, 2271,
4791, 2272, 4792, 2273, 4793, 2274, 4794, 2275, 4795, 2276, 4796,
2277, 4797, 2278, 4798, 2279, 4799, 2280, 4800, 2281, 4801, 2282,
4802, 2283, 4803, 2284, 4804, 2285, 4805, 2286, 4806, 2287, 4807,
2288, 4808, 2289, 4809, 2290, 4810, 2291, 4811, 2292, 4812, 2293,
4813, 2294, 4814, 2295, 4815, 2296, 4816, 2297, 4817, 2298, 4818,
2299, 4819, 2300, 4820, 2301, 4821, 2302, 4822, 2303, 4823, 2304,
4824, 2305, 4825, 2306, 4826, 2307, 4827, 2308, 4828, 2309, 4829,
2310, 4830, 2311, 4831, 2312, 4832, 2313, 4833, 2314, 4834, 2315,
4835, 2316, 4836, 2317, 4837, 2318, 4838, 2319, 4839, 2320, 4840,
2321, 4841, 2322, 4842, 2323, 4843, 2324, 4844, 2325, 4845, 2326,
4846, 2327, 4847, 2328, 4848, 2329, 4849, 2330, 4850, 2331, 4851,
2332, 4852, 2333, 4853, 2334, 4854, 2335, 4855, 2336, 4856, 2337,
4857, 2338, 4858, 2339, 4859, 2340, 4860, 2341, 4861, 2342, 4862,
2343, 4863, 2344, 4864, 2345, 4865, 2346, 4866, 2347, 4867, 2348,
4868, 2349, 4869, 2350, 4870, 2351, 4871, 2352, 4872, 2353, 4873,
2354, 4874, 2355, 4875, 2356, 4876, 2357, 4877, 2358, 4878, 2359,
4879, 2360, 4880, 2361, 4881, 2362, 4882, 2363, 4883, 2364, 4884,
2365, 4885, 2366, 4886, 2367, 4887, 2368, 4888, 2369, 4889, 2370,
4890, 2371, 4891, 2372, 4892, 2373, 4893, 2374, 4894, 2375, 4895,
2376, 4896, 2377, 4897, 2378, 4898, 2379, 4899, 2380, 4900, 2381,
4901, 2382, 4902, 2383, 4903, 2384, 4904, 2385, 4905, 2386, 4906,
2387, 4907, 2388, 4908, 2389, 4909, 2390, 4910, 2391, 4911, 2392,
4912, 2393, 4913, 2394, 4914, 2395, 4915, 2396, 4916, 2397, 4917,
2398, 4918, 2399, 4919, 2400, 4920, 2401, 4921, 2402, 4922, 2403,
4923, 2404, 4924, 2405, 4925, 2406, 4926, 2407, 4927, 2408, 4928,
2409, 4929, 2410, 4930, 2411, 4931, 2412, 4932, 2413, 4933, 2414,
4934, 2415, 4935, 2416, 4936, 2417, 4937, 2418, 4938, 2419, 4939,
2420, 4940, 2421, 4941, 2422, 4942, 2423, 4943, 2424, 4944, 2425,
4945, 2426, 4946, 2427, 4947, 2428, 4948, 2429, 4949, 2430, 4950,
2431, 4951, 2432, 4952, 2433, 4953, 2434, 4954, 2435, 4955, 2436,
4956, 2437, 4957, 2438, 4958, 2439, 4959, 2440, 4960, 2441, 4961,
2442, 4962, 2443, 4963, 2444, 4964, 2445, 4965, 2446, 4966, 2447,
4967, 2448, 4968, 2449, 4969, 2450, 4970, 2451, 4971, 2452, 4972,
2453, 4973, 2454, 4974, 2455, 4975, 2456, 4976, 2457, 4977, 2458,
4978, 2459, 4979, 2460, 4980, 2461, 4981, 2462, 4982, 2463, 4983,
2464, 4984, 2465, 4985, 2466, 4986, 2467, 4987, 2468, 4988, 2469,
4989, 2470, 4990, 2471, 4991, 2472, 4992, 2473, 4993, 2474, 4994,
2475, 4995, 2476, 4996, 2477, 4997, 2478, 4998, 2479, 4999, 2480,
5000, 2481, 5001, 2482, 5002, 2483, 5003, 2484, 5004, 2485, 5005,
2486, 5006, 2487, 5007, 2488, 5008, 2489, 5009, 2490, 5010, 2491,
5011, 2492, 5012, 2493, 5013, 2494, 5014, 2495, 5015, 2496, 5016,
2497, 5017, 2498, 5018, 2499, 5019, 2500, 5020, 2501, 5021, 2502,
5022, 2503, 5023, 2504, 5024, 2505, 5025, 2506, 5026, 2507, 5027,
2508, 5028, 2509, 5029, 2510, 5030, 2511, 5031, 2512, 5032, 2513,
5033, 2514, 5034, 2515, 5035, 2516, 5036, 2517, 5037, 2518, 5038,
2519, 5039}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/transpose_test_utils.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/transpose_test_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b177da21-0cd6-4142-be0f-c1ea4d838d88 | cpp | google/quiche | oblivious_http_gateway | quiche/oblivious_http/oblivious_http_gateway.cc | quiche/oblivious_http/oblivious_http_gateway_test.cc | #include "quiche/oblivious_http/oblivious_http_gateway.h"
#include <stdint.h>
#include <memory>
#include <string>
#include <utility>
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "quiche/common/quiche_crypto_logging.h"
#include "quiche/common/quiche_random.h"
namespace quiche {
ObliviousHttpGateway::ObliviousHttpGateway(
bssl::UniquePtr<EVP_HPKE_KEY> recipient_key,
const ObliviousHttpHeaderKeyConfig& ohttp_key_config,
QuicheRandom* quiche_random)
: server_hpke_key_(std::move(recipient_key)),
ohttp_key_config_(ohttp_key_config),
quiche_random_(quiche_random) {}
absl::StatusOr<ObliviousHttpGateway> ObliviousHttpGateway::Create(
absl::string_view hpke_private_key,
const ObliviousHttpHeaderKeyConfig& ohttp_key_config,
QuicheRandom* quiche_random) {
if (hpke_private_key.empty()) {
return absl::InvalidArgumentError("Invalid/Empty HPKE private key.");
}
bssl::UniquePtr<EVP_HPKE_KEY> recipient_key(EVP_HPKE_KEY_new());
if (recipient_key == nullptr) {
return SslErrorAsStatus(
"Failed to initialize ObliviousHttpGateway/Server's Key.");
}
if (!EVP_HPKE_KEY_init(
recipient_key.get(), ohttp_key_config.GetHpkeKem(),
reinterpret_cast<const uint8_t*>(hpke_private_key.data()),
hpke_private_key.size())) {
return SslErrorAsStatus("Failed to import HPKE private key.");
}
if (quiche_random == nullptr) quiche_random = QuicheRandom::GetInstance();
return ObliviousHttpGateway(std::move(recipient_key), ohttp_key_config,
quiche_random);
}
absl::StatusOr<ObliviousHttpRequest>
ObliviousHttpGateway::DecryptObliviousHttpRequest(
absl::string_view encrypted_data, absl::string_view request_label) const {
return ObliviousHttpRequest::CreateServerObliviousRequest(
encrypted_data, *(server_hpke_key_), ohttp_key_config_, request_label);
}
absl::StatusOr<ObliviousHttpResponse>
ObliviousHttpGateway::CreateObliviousHttpResponse(
std::string plaintext_data,
ObliviousHttpRequest::Context& oblivious_http_request_context,
absl::string_view response_label) const {
return ObliviousHttpResponse::CreateServerObliviousResponse(
std::move(plaintext_data), oblivious_http_request_context, response_label,
quiche_random_);
}
} | #include "quiche/oblivious_http/oblivious_http_gateway.h"
#include <stdint.h>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/escaping.h"
#include "absl/strings/string_view.h"
#include "quiche/common/platform/api/quiche_test.h"
#include "quiche/common/platform/api/quiche_thread.h"
#include "quiche/common/quiche_random.h"
#include "quiche/oblivious_http/buffers/oblivious_http_request.h"
namespace quiche {
namespace {
std::string GetHpkePrivateKey() {
absl::string_view hpke_key_hex =
"b77431ecfa8f4cfc30d6e467aafa06944dffe28cb9dd1409e33a3045f5adc8a1";
std::string hpke_key_bytes;
EXPECT_TRUE(absl::HexStringToBytes(hpke_key_hex, &hpke_key_bytes));
return hpke_key_bytes;
}
std::string GetHpkePublicKey() {
absl::string_view public_key =
"6d21cfe09fbea5122f9ebc2eb2a69fcc4f06408cd54aac934f012e76fcdcef62";
std::string public_key_bytes;
EXPECT_TRUE(absl::HexStringToBytes(public_key, &public_key_bytes));
return public_key_bytes;
}
const ObliviousHttpHeaderKeyConfig GetOhttpKeyConfig(uint8_t key_id,
uint16_t kem_id,
uint16_t kdf_id,
uint16_t aead_id) {
auto ohttp_key_config =
ObliviousHttpHeaderKeyConfig::Create(key_id, kem_id, kdf_id, aead_id);
EXPECT_TRUE(ohttp_key_config.ok());
return std::move(ohttp_key_config.value());
}
TEST(ObliviousHttpGateway, TestProvisioningKeyAndDecapsulate) {
constexpr absl::string_view kX25519SecretKey =
"3c168975674b2fa8e465970b79c8dcf09f1c741626480bd4c6162fc5b6a98e1a";
std::string x25519_secret_key_bytes;
ASSERT_TRUE(
absl::HexStringToBytes(kX25519SecretKey, &x25519_secret_key_bytes));
auto instance = ObliviousHttpGateway::Create(
x25519_secret_key_bytes,
GetOhttpKeyConfig(
1, EVP_HPKE_DHKEM_X25519_HKDF_SHA256, EVP_HPKE_HKDF_SHA256,
EVP_HPKE_AES_128_GCM));
constexpr absl::string_view kEncapsulatedRequest =
"010020000100014b28f881333e7c164ffc499ad9796f877f4e1051ee6d31bad19dec96c2"
"08b4726374e469135906992e1268c594d2a10c695d858c40a026e7965e7d86b83dd440b2"
"c0185204b4d63525";
std::string encapsulated_request_bytes;
ASSERT_TRUE(absl::HexStringToBytes(kEncapsulatedRequest,
&encapsulated_request_bytes));
auto decrypted_req =
instance->DecryptObliviousHttpRequest(encapsulated_request_bytes);
ASSERT_TRUE(decrypted_req.ok());
ASSERT_FALSE(decrypted_req->GetPlaintextData().empty());
}
TEST(ObliviousHttpGateway, TestDecryptingMultipleRequestsWithSingleInstance) {
auto instance = ObliviousHttpGateway::Create(
GetHpkePrivateKey(),
GetOhttpKeyConfig(1, EVP_HPKE_DHKEM_X25519_HKDF_SHA256,
EVP_HPKE_HKDF_SHA256, EVP_HPKE_AES_256_GCM));
absl::string_view encrypted_req_1 =
"010020000100025f20b60306b61ad9ecad389acd752ca75c4e2969469809fe3d84aae137"
"f73e4ccfe9ba71f12831fdce6c8202fbd38a84c5d8a73ac4c8ea6c10592594845f";
std::string encrypted_req_1_bytes;
ASSERT_TRUE(absl::HexStringToBytes(encrypted_req_1, &encrypted_req_1_bytes));
auto decapsulated_req_1 =
instance->DecryptObliviousHttpRequest(encrypted_req_1_bytes);
ASSERT_TRUE(decapsulated_req_1.ok());
ASSERT_FALSE(decapsulated_req_1->GetPlaintextData().empty());
absl::string_view encrypted_req_2 =
"01002000010002285ebc2fcad72cc91b378050cac29a62feea9cd97829335ee9fc87e672"
"4fa13ff2efdff620423d54225d3099088e7b32a5165f805a5d922918865a0a447a";
std::string encrypted_req_2_bytes;
ASSERT_TRUE(absl::HexStringToBytes(encrypted_req_2, &encrypted_req_2_bytes));
auto decapsulated_req_2 =
instance->DecryptObliviousHttpRequest(encrypted_req_2_bytes);
ASSERT_TRUE(decapsulated_req_2.ok());
ASSERT_FALSE(decapsulated_req_2->GetPlaintextData().empty());
}
TEST(ObliviousHttpGateway, TestInvalidHPKEKey) {
EXPECT_EQ(ObliviousHttpGateway::Create(
"Invalid HPKE key",
GetOhttpKeyConfig(70, EVP_HPKE_DHKEM_X25519_HKDF_SHA256,
EVP_HPKE_HKDF_SHA256, EVP_HPKE_AES_256_GCM))
.status()
.code(),
absl::StatusCode::kInternal);
EXPECT_EQ(ObliviousHttpGateway::Create(
"",
GetOhttpKeyConfig(70, EVP_HPKE_DHKEM_X25519_HKDF_SHA256,
EVP_HPKE_HKDF_SHA256, EVP_HPKE_AES_256_GCM))
.status()
.code(),
absl::StatusCode::kInvalidArgument);
}
TEST(ObliviousHttpGateway, TestObliviousResponseHandling) {
auto ohttp_key_config =
GetOhttpKeyConfig(3, EVP_HPKE_DHKEM_X25519_HKDF_SHA256,
EVP_HPKE_HKDF_SHA256, EVP_HPKE_AES_256_GCM);
auto instance =
ObliviousHttpGateway::Create(GetHpkePrivateKey(), ohttp_key_config);
ASSERT_TRUE(instance.ok());
auto encapsualte_request_on_client =
ObliviousHttpRequest::CreateClientObliviousRequest(
"test", GetHpkePublicKey(), ohttp_key_config);
ASSERT_TRUE(encapsualte_request_on_client.ok());
auto decapsulated_req_on_server = instance->DecryptObliviousHttpRequest(
encapsualte_request_on_client->EncapsulateAndSerialize());
ASSERT_TRUE(decapsulated_req_on_server.ok());
auto server_request_context =
std::move(decapsulated_req_on_server.value()).ReleaseContext();
auto encapsulate_resp_on_gateway = instance->CreateObliviousHttpResponse(
"some response", server_request_context);
ASSERT_TRUE(encapsulate_resp_on_gateway.ok());
ASSERT_FALSE(encapsulate_resp_on_gateway->EncapsulateAndSerialize().empty());
}
TEST(ObliviousHttpGateway,
TestHandlingMultipleResponsesForMultipleRequestsWithSingleInstance) {
auto instance = ObliviousHttpGateway::Create(
GetHpkePrivateKey(),
GetOhttpKeyConfig(1, EVP_HPKE_DHKEM_X25519_HKDF_SHA256,
EVP_HPKE_HKDF_SHA256, EVP_HPKE_AES_256_GCM),
QuicheRandom::GetInstance());
std::string encrypted_request_1_bytes;
ASSERT_TRUE(
absl::HexStringToBytes("010020000100025f20b60306b61ad9ecad389acd752ca75c4"
"e2969469809fe3d84aae137"
"f73e4ccfe9ba71f12831fdce6c8202fbd38a84c5d8a73ac4c"
"8ea6c10592594845f",
&encrypted_request_1_bytes));
auto decrypted_request_1 =
instance->DecryptObliviousHttpRequest(encrypted_request_1_bytes);
ASSERT_TRUE(decrypted_request_1.ok());
std::string encrypted_request_2_bytes;
ASSERT_TRUE(
absl::HexStringToBytes("01002000010002285ebc2fcad72cc91b378050cac29a62fee"
"a9cd97829335ee9fc87e672"
"4fa13ff2efdff620423d54225d3099088e7b32a5165f805a5"
"d922918865a0a447a",
&encrypted_request_2_bytes));
auto decrypted_request_2 =
instance->DecryptObliviousHttpRequest(encrypted_request_2_bytes);
ASSERT_TRUE(decrypted_request_2.ok());
auto oblivious_request_context_1 =
std::move(decrypted_request_1.value()).ReleaseContext();
auto encrypted_response_1 = instance->CreateObliviousHttpResponse(
"test response 1", oblivious_request_context_1);
ASSERT_TRUE(encrypted_response_1.ok());
ASSERT_FALSE(encrypted_response_1->EncapsulateAndSerialize().empty());
auto oblivious_request_context_2 =
std::move(decrypted_request_2.value()).ReleaseContext();
auto encrypted_response_2 = instance->CreateObliviousHttpResponse(
"test response 2", oblivious_request_context_2);
ASSERT_TRUE(encrypted_response_2.ok());
ASSERT_FALSE(encrypted_response_2->EncapsulateAndSerialize().empty());
}
TEST(ObliviousHttpGateway, TestWithMultipleThreads) {
class TestQuicheThread : public QuicheThread {
public:
TestQuicheThread(const ObliviousHttpGateway& gateway_receiver,
std::string request_payload, std::string response_payload)
: QuicheThread("gateway_thread"),
gateway_receiver_(gateway_receiver),
request_payload_(request_payload),
response_payload_(response_payload) {}
protected:
void Run() override {
auto decrypted_request =
gateway_receiver_.DecryptObliviousHttpRequest(request_payload_);
ASSERT_TRUE(decrypted_request.ok());
ASSERT_FALSE(decrypted_request->GetPlaintextData().empty());
auto gateway_request_context =
std::move(decrypted_request.value()).ReleaseContext();
auto encrypted_response = gateway_receiver_.CreateObliviousHttpResponse(
response_payload_, gateway_request_context);
ASSERT_TRUE(encrypted_response.ok());
ASSERT_FALSE(encrypted_response->EncapsulateAndSerialize().empty());
}
private:
const ObliviousHttpGateway& gateway_receiver_;
std::string request_payload_, response_payload_;
};
auto gateway_receiver = ObliviousHttpGateway::Create(
GetHpkePrivateKey(),
GetOhttpKeyConfig(1, EVP_HPKE_DHKEM_X25519_HKDF_SHA256,
EVP_HPKE_HKDF_SHA256, EVP_HPKE_AES_256_GCM),
QuicheRandom::GetInstance());
std::string request_payload_1;
ASSERT_TRUE(
absl::HexStringToBytes("010020000100025f20b60306b61ad9ecad389acd752ca75c4"
"e2969469809fe3d84aae137"
"f73e4ccfe9ba71f12831fdce6c8202fbd38a84c5d8a73ac4c"
"8ea6c10592594845f",
&request_payload_1));
TestQuicheThread t1(*gateway_receiver, request_payload_1, "test response 1");
std::string request_payload_2;
ASSERT_TRUE(
absl::HexStringToBytes("01002000010002285ebc2fcad72cc91b378050cac29a62fee"
"a9cd97829335ee9fc87e672"
"4fa13ff2efdff620423d54225d3099088e7b32a5165f805a5"
"d922918865a0a447a",
&request_payload_2));
TestQuicheThread t2(*gateway_receiver, request_payload_2, "test response 2");
t1.Start();
t2.Start();
t1.Join();
t2.Join();
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/oblivious_http/oblivious_http_gateway.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/oblivious_http/oblivious_http_gateway_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
05ba9773-8dd8-4974-a475-170f6c840a77 | cpp | tensorflow/tensorflow | random_op | tensorflow/core/kernels/random_op.cc | tensorflow/core/kernels/random_op_test.cc | #define EIGEN_USE_THREADS
#include <algorithm>
#include <cmath>
#include <memory>
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/kernels/random_op_cpu.h"
#include "tensorflow/core/lib/hash/crc32c.h"
#include "tensorflow/core/lib/random/random_distributions.h"
#include "tensorflow/core/lib/random/simple_philox.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/util/guarded_philox_random.h"
#include "tensorflow/core/util/work_sharder.h"
#if EIGEN_COMP_GNUC && __cplusplus > 199711L
#define DISABLE_FLOAT_EQUALITY_WARNING \
_Pragma("GCC diagnostic push") \
_Pragma("GCC diagnostic ignored \"-Wfloat-equal\"")
#define ENABLE_FLOAT_EQUALITY_WARNING _Pragma("GCC diagnostic pop")
#else
#define DISABLE_FLOAT_EQUALITY_WARNING
#define ENABLE_FLOAT_EQUALITY_WARNING
#endif
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
namespace {
static Status AllocateOutputWithShape(OpKernelContext* ctx, const Tensor& shape,
int index, Tensor** output) {
TensorShape tensor_shape;
TF_RETURN_IF_ERROR(tensor::MakeShape(shape, &tensor_shape));
return ctx->allocate_output(index, tensor_shape, output);
}
template <typename Device, class Distribution>
class PhiloxRandomOp : public OpKernel {
public:
typedef typename Distribution::ResultElementType T;
explicit PhiloxRandomOp(OpKernelConstruction* ctx) : OpKernel(ctx) {
OP_REQUIRES_OK(ctx, generator_.Init(ctx));
}
void Compute(OpKernelContext* ctx) override {
const Tensor& shape = ctx->input(0);
Tensor* output;
OP_REQUIRES_OK(ctx, AllocateOutputWithShape(ctx, shape, 0, &output));
auto output_flat = output->flat<T>();
functor::FillPhiloxRandom<Device, Distribution>()(
ctx, ctx->eigen_device<Device>(), nullptr, nullptr,
generator_.ReserveRandomOutputs(output_flat.size(), 256),
output_flat.data(), output_flat.size(), Distribution());
}
private:
GuardedPhiloxRandom generator_;
};
template <typename Device, class IntType>
class RandomUniformIntOp : public OpKernel {
public:
explicit RandomUniformIntOp(OpKernelConstruction* ctx) : OpKernel(ctx) {
OP_REQUIRES_OK(ctx, generator_.Init(ctx));
}
void Compute(OpKernelContext* ctx) override {
const Tensor& shape = ctx->input(0);
const Tensor& minval = ctx->input(1);
const Tensor& maxval = ctx->input(2);
OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(minval.shape()),
errors::InvalidArgument("minval must be 0-D, got shape ",
minval.shape().DebugString()));
OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(maxval.shape()),
errors::InvalidArgument("maxval must be 0-D, got shape ",
maxval.shape().DebugString()));
Tensor* output;
OP_REQUIRES_OK(ctx, AllocateOutputWithShape(ctx, shape, 0, &output));
if (output->NumElements() == 0) return;
IntType lo = minval.scalar<IntType>()();
IntType hi = maxval.scalar<IntType>()();
OP_REQUIRES(
ctx, lo < hi,
errors::InvalidArgument("Need minval < maxval, got ", lo, " >= ", hi));
typedef random::UniformDistribution<random::PhiloxRandom, IntType>
Distribution;
Distribution dist(lo, hi);
auto output_flat = output->flat<IntType>();
functor::FillPhiloxRandom<Device, Distribution>()(
ctx, ctx->eigen_device<Device>(), nullptr, nullptr,
generator_.ReserveRandomOutputs(output_flat.size(), 256),
output_flat.data(), output_flat.size(), dist);
}
private:
GuardedPhiloxRandom generator_;
};
template <typename T>
class RandomGammaOp : public OpKernel {
public:
explicit RandomGammaOp(OpKernelConstruction* context) : OpKernel(context) {
OP_REQUIRES_OK(context, generator_.Init(context));
}
void Compute(OpKernelContext* ctx) override {
const Tensor& shape_t = ctx->input(0);
const Tensor& alpha_t = ctx->input(1);
OP_REQUIRES(ctx,
TensorShapeUtils::IsVector(shape_t.shape()) &&
(shape_t.dtype() == DataType::DT_INT32 ||
shape_t.dtype() == DataType::DT_INT64),
errors::InvalidArgument(
"shape must be a vector of {int32,int64}, got shape: ",
shape_t.DebugString()));
TensorShape samples_shape;
if (shape_t.dtype() == DataType::DT_INT32) {
auto vec = shape_t.flat<int32>();
OP_REQUIRES_OK(ctx, TensorShapeUtils::MakeShape(vec.data(), vec.size(),
&samples_shape));
} else if (shape_t.dtype() == DataType::DT_INT64) {
auto vec = shape_t.flat<int64_t>();
OP_REQUIRES_OK(ctx, TensorShapeUtils::MakeShape(vec.data(), vec.size(),
&samples_shape));
}
const int64_t samples_per_alpha = samples_shape.num_elements();
OP_REQUIRES_OK(ctx, samples_shape.AppendShapeWithStatus(alpha_t.shape()));
Tensor* samples_t = nullptr;
OP_REQUIRES_OK(ctx, ctx->allocate_output(0, samples_shape, &samples_t));
if (samples_shape.num_elements() == 0) return;
using random::PhiloxRandom;
typedef random::NormalDistribution<PhiloxRandom, double> Normal;
typedef random::UniformDistribution<PhiloxRandom, double> Uniform;
#define UNIFORM(X) \
if (uniform_remaining == 0) { \
uniform_remaining = Uniform::kResultElementCount; \
uniform_result = uniform(&gen); \
} \
uniform_remaining--; \
double X = uniform_result[uniform_remaining]
static constexpr int kReservedSamplesPerOutput = 256;
const auto alpha_flat = alpha_t.flat<T>().data();
const int64_t num_alphas = alpha_t.NumElements();
OP_REQUIRES(ctx, num_alphas > 0,
errors::InvalidArgument(
"Input alpha should have non-zero element count, got: ",
num_alphas));
auto samples_flat = samples_t->flat<T>().data();
PhiloxRandom rng = generator_.ReserveRandomOutputs(
samples_per_alpha * num_alphas, kReservedSamplesPerOutput);
auto DoWork = [samples_per_alpha, num_alphas, &rng, samples_flat,
alpha_flat](int64_t start_output, int64_t limit_output) {
using Eigen::numext::exp;
using Eigen::numext::log;
using Eigen::numext::log1p;
using Eigen::numext::pow;
Normal normal;
Uniform uniform;
typename Normal::ResultType norm_result;
typename Uniform::ResultType uniform_result;
for (int64_t output_idx = start_output; output_idx < limit_output;
) {
int64_t alpha_idx = output_idx / samples_per_alpha;
T* const samples_alpha_offset = samples_flat + alpha_idx;
const double alpha = static_cast<double>(alpha_flat[alpha_idx]);
DISABLE_FLOAT_EQUALITY_WARNING
if (alpha == static_cast<double>(1.0)) {
ENABLE_FLOAT_EQUALITY_WARNING
for (int64_t sample_idx = output_idx % samples_per_alpha;
sample_idx < samples_per_alpha && output_idx < limit_output;
sample_idx++, output_idx++) {
PhiloxRandom gen = rng;
gen.Skip(kReservedSamplesPerOutput * output_idx);
int16_t uniform_remaining = 0;
UNIFORM(u);
const double res = -log1p(-u);
samples_alpha_offset[sample_idx * num_alphas] = static_cast<T>(res);
}
} else {
const bool alpha_less_than_one = alpha < 1;
const double d = alpha + (alpha_less_than_one ? 2.0 / 3 : -1.0 / 3);
const double c = 1.0 / 3 / sqrt(d);
for (int64_t sample_idx = output_idx % samples_per_alpha;
sample_idx < samples_per_alpha && output_idx < limit_output;
sample_idx++, output_idx++) {
PhiloxRandom gen = rng;
gen.Skip(kReservedSamplesPerOutput * output_idx);
int16_t norm_remaining = 0;
int16_t uniform_remaining = 0;
while (true) {
if (norm_remaining == 0) {
norm_remaining = Normal::kResultElementCount;
norm_result = normal(&gen);
}
norm_remaining--;
const double x = norm_result[norm_remaining];
double v = 1 + c * x;
if (v <= 0) {
continue;
}
v = v * v * v;
UNIFORM(u);
if ((u < 1 - 0.0331 * (x * x) * (x * x)) ||
(log(u) < 0.5 * x * x + d * (1 - v + log(v)))) {
double res = d * v;
if (alpha_less_than_one) {
UNIFORM(b);
res *= pow(b, 1 / alpha);
}
samples_alpha_offset[sample_idx * num_alphas] =
static_cast<T>(res);
break;
}
}
}
}
}
};
#undef UNIFORM
static const int kElementCost = 85 + 2 * Normal::kElementCost +
Uniform::kElementCost +
3 * PhiloxRandom::kElementCost;
auto worker_threads = *(ctx->device()->tensorflow_cpu_worker_threads());
Shard(worker_threads.num_threads, worker_threads.workers,
num_alphas * samples_per_alpha, kElementCost, DoWork);
}
private:
GuardedPhiloxRandom generator_;
RandomGammaOp(const RandomGammaOp&) = delete;
void operator=(const RandomGammaOp&) = delete;
};
}
#define REGISTER(TYPE) \
template struct functor::FillPhiloxRandom< \
CPUDevice, random::UniformDistribution<random::PhiloxRandom, TYPE>>; \
template struct functor::FillPhiloxRandom< \
CPUDevice, random::NormalDistribution<random::PhiloxRandom, TYPE>>; \
template struct functor::FillPhiloxRandom< \
CPUDevice, \
random::TruncatedNormalDistribution< \
random::SingleSampleAdapter<random::PhiloxRandom>, TYPE>>; \
REGISTER_KERNEL_BUILDER( \
Name("RandomUniform") \
.Device(DEVICE_CPU) \
.HostMemory("shape") \
.TypeConstraint<TYPE>("dtype"), \
PhiloxRandomOp<CPUDevice, random::UniformDistribution< \
random::PhiloxRandom, TYPE>>); \
REGISTER_KERNEL_BUILDER( \
Name("RandomStandardNormal") \
.Device(DEVICE_CPU) \
.HostMemory("shape") \
.TypeConstraint<TYPE>("dtype"), \
PhiloxRandomOp<CPUDevice, \
random::NormalDistribution<random::PhiloxRandom, TYPE>>); \
REGISTER_KERNEL_BUILDER( \
Name("TruncatedNormal") \
.Device(DEVICE_CPU) \
.HostMemory("shape") \
.TypeConstraint<TYPE>("dtype"), \
PhiloxRandomOp< \
CPUDevice, \
random::TruncatedNormalDistribution< \
random::SingleSampleAdapter<random::PhiloxRandom>, TYPE>>); \
REGISTER_KERNEL_BUILDER( \
Name("RandomGamma").Device(DEVICE_CPU).TypeConstraint<TYPE>("T"), \
RandomGammaOp<TYPE>)
#define REGISTER_FULL_INT(IntType) \
template struct functor::FillPhiloxRandom< \
CPUDevice, \
random::UniformFullIntDistribution<random::PhiloxRandom, IntType>>
#define REGISTER_INT(IntType) \
REGISTER_FULL_INT(IntType); \
template struct functor::FillPhiloxRandom< \
CPUDevice, random::UniformDistribution<random::PhiloxRandom, IntType>>; \
REGISTER_KERNEL_BUILDER(Name("RandomUniformInt") \
.Device(DEVICE_CPU) \
.HostMemory("shape") \
.HostMemory("minval") \
.HostMemory("maxval") \
.TypeConstraint<IntType>("Tout"), \
RandomUniformIntOp<CPUDevice, IntType>);
TF_CALL_half(REGISTER);
TF_CALL_bfloat16(REGISTER);
TF_CALL_float(REGISTER);
TF_CALL_double(REGISTER);
TF_CALL_int32(REGISTER_INT);
TF_CALL_int64(REGISTER_INT);
TF_CALL_uint32(REGISTER_FULL_INT);
TF_CALL_uint64(REGISTER_FULL_INT);
#undef REGISTER
#undef REGISTER_INT
#undef REGISTER_FULL_INT
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#define REGISTER(TYPE) \
REGISTER_KERNEL_BUILDER( \
Name("RandomUniform") \
.Device(DEVICE_GPU) \
.HostMemory("shape") \
.TypeConstraint<int32>("T") \
.TypeConstraint<TYPE>("dtype"), \
PhiloxRandomOp<GPUDevice, random::UniformDistribution< \
random::PhiloxRandom, TYPE>>); \
REGISTER_KERNEL_BUILDER( \
Name("RandomStandardNormal") \
.Device(DEVICE_GPU) \
.HostMemory("shape") \
.TypeConstraint<int32>("T") \
.TypeConstraint<TYPE>("dtype"), \
PhiloxRandomOp<GPUDevice, \
random::NormalDistribution<random::PhiloxRandom, TYPE>>); \
REGISTER_KERNEL_BUILDER( \
Name("TruncatedNormal") \
.Device(DEVICE_GPU) \
.HostMemory("shape") \
.TypeConstraint<int32>("T") \
.TypeConstraint<TYPE>("dtype"), \
PhiloxRandomOp< \
GPUDevice, \
random::TruncatedNormalDistribution< \
random::SingleSampleAdapter<random::PhiloxRandom>, TYPE>>);
#define REGISTER_FULL_INT(IntType) \
template struct functor::FillPhiloxRandom< \
GPUDevice, \
random::UniformFullIntDistribution<random::PhiloxRandom, IntType>>
#define REGISTER_INT(IntType) \
REGISTER_FULL_INT(IntType); \
template struct functor::FillPhiloxRandom< \
GPUDevice, random::UniformDistribution<random::PhiloxRandom, IntType>>; \
REGISTER_KERNEL_BUILDER(Name("RandomUniformInt") \
.Device(DEVICE_GPU) \
.HostMemory("shape") \
.HostMemory("minval") \
.HostMemory("maxval") \
.TypeConstraint<int32>("T") \
.TypeConstraint<IntType>("Tout"), \
RandomUniformIntOp<GPUDevice, IntType>);
TF_CALL_half(REGISTER);
TF_CALL_bfloat16(REGISTER);
TF_CALL_float(REGISTER);
TF_CALL_double(REGISTER);
TF_CALL_int32(REGISTER_INT);
TF_CALL_int64(REGISTER_INT);
TF_CALL_uint32(REGISTER_FULL_INT);
TF_CALL_uint64(REGISTER_FULL_INT);
#undef REGISTER
#undef REGISTER_INT
#undef REGISTER_FULL_INT
#endif
} | #include <random>
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/math/math_util.h"
#include "tensorflow/core/lib/random/philox_random.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace {
Tensor VecShape(int64_t v) {
if (v >= std::numeric_limits<int32>::max()) {
Tensor shape(DT_INT64, TensorShape({1}));
shape.vec<int64_t>()(0) = v;
return shape;
} else {
Tensor shape(DT_INT32, TensorShape({1}));
shape.vec<int32>()(0) = v;
return shape;
}
}
Graph* RandomUniform(int64_t n) {
Graph* g = new Graph(OpRegistry::Global());
test::graph::RandomUniform(g, test::graph::Constant(g, VecShape(n)),
DT_FLOAT);
return g;
}
Graph* RandomNormal(int64_t n) {
Graph* g = new Graph(OpRegistry::Global());
test::graph::RandomGaussian(g, test::graph::Constant(g, VecShape(n)),
DT_FLOAT);
return g;
}
Graph* TruncatedNormal(int64_t n) {
Graph* g = new Graph(OpRegistry::Global());
test::graph::TruncatedNormal(g, test::graph::Constant(g, VecShape(n)),
DT_FLOAT);
return g;
}
#define BM_RNG(DEVICE, RNG) \
void BM_##DEVICE##_##RNG(::testing::benchmark::State& state) { \
const int arg = state.range(0); \
\
test::Benchmark(#DEVICE, RNG(arg), false) \
.Run(state); \
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * arg); \
} \
BENCHMARK(BM_##DEVICE##_##RNG)->Range(1 << 20, 8 << 20);
BM_RNG(cpu, RandomUniform);
BM_RNG(cpu, RandomNormal);
BM_RNG(cpu, TruncatedNormal);
BM_RNG(gpu, RandomUniform);
BM_RNG(gpu, RandomNormal);
BM_RNG(gpu, TruncatedNormal);
Tensor VecAlphas(int64_t n) {
Tensor alphas(DT_DOUBLE, TensorShape({n}));
for (int i = 0; i < n; i++) {
alphas.vec<double>()(i) =
0.25 + MathUtil::IPow(1.1, i % 2 == 0 ? i : n - i);
}
return alphas;
}
void BM_cpu_RandomGamma(::testing::benchmark::State& state) {
const int nsamp = state.range(0);
const int nalpha = state.range(1);
Graph* g = new Graph(OpRegistry::Global());
test::graph::RandomGamma(g, test::graph::Constant(g, VecShape(nsamp)),
test::graph::Constant(g, VecAlphas(nalpha)));
test::Benchmark("cpu", g, false).Run(state);
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * nsamp *
nalpha);
}
BENCHMARK(BM_cpu_RandomGamma)->RangePair(1 << 14, 4 << 15, 2, 50);
void BM_PhiloxRandom(::testing::benchmark::State& state) {
int count = 2 << 20;
random::PhiloxRandom gen(0x12345);
for (auto s : state) {
for (int j = 0; j < count; j += 4) {
auto samples = gen();
tensorflow::testing::DoNotOptimize(samples);
}
}
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * count);
}
BENCHMARK(BM_PhiloxRandom);
void BM_StdMTRandom(::testing::benchmark::State& state) {
int count = 2 << 20;
std::mt19937 gen(0x12345);
for (auto s : state) {
for (int j = 0; j < count; ++j) {
uint_fast32_t sample = gen();
tensorflow::testing::DoNotOptimize(sample);
}
}
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * count);
}
BENCHMARK(BM_StdMTRandom);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/random_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/random_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f92f7a68-f778-4514-9db2-42387ef2abfa | cpp | google/tensorstore | json_gtest | tensorstore/internal/json_gtest.cc | tensorstore/internal/json_gtest_test.cc | #include "tensorstore/internal/json_gtest.h"
#include <ostream>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json/same.h"
#include "tensorstore/internal/json_pointer.h"
#include "tensorstore/util/quote_string.h"
namespace tensorstore {
namespace {
class JsonMatcherImpl : public ::testing::MatcherInterface<::nlohmann::json> {
public:
JsonMatcherImpl(::nlohmann::json value) : value_(std::move(value)) {}
bool MatchAndExplain(
::nlohmann::json value_untyped,
::testing::MatchResultListener* listener) const override {
if (!internal_json::JsonSame(value_, value_untyped)) {
if (listener->IsInterested()) {
*listener << "where the difference is:\n"
<< ::nlohmann::json::diff(value_, value_untyped)
.dump(
2, ' ',
true,
::nlohmann::json::error_handler_t::ignore);
}
return false;
}
return true;
}
void DescribeTo(std::ostream* os) const override {
*os << "matches json " << value_;
}
void DescribeNegationTo(std::ostream* os) const override {
*os << "does not match json " << value_;
}
private:
::nlohmann::json value_;
};
}
::testing::Matcher<::nlohmann::json> MatchesJson(::nlohmann::json j) {
return ::testing::MakeMatcher(new JsonMatcherImpl(std::move(j)));
}
namespace {
class JsonPointerMatcherImpl
: public ::testing::MatcherInterface<::nlohmann::json> {
public:
JsonPointerMatcherImpl(std::string sub_value_pointer,
::testing::Matcher<::nlohmann::json> sub_value_matcher)
: sub_value_pointer_(std::move(sub_value_pointer)),
sub_value_matcher_(std::move(sub_value_matcher)) {}
bool MatchAndExplain(
::nlohmann::json value_untyped,
::testing::MatchResultListener* listener) const override {
auto sub_value =
json_pointer::Dereference(value_untyped, sub_value_pointer_);
if (!sub_value.ok()) {
if (listener->IsInterested()) {
*listener << "where the pointer could not be resolved: "
<< sub_value.status();
}
return false;
}
if (listener->IsInterested()) {
::testing::StringMatchResultListener s;
if (!sub_value_matcher_.MatchAndExplain(**sub_value, &s)) {
*listener << "whose sub value doesn't match";
auto str = s.str();
if (!str.empty()) {
*listener << ", " << str;
}
return false;
}
return true;
}
return sub_value_matcher_.Matches(**sub_value);
}
void DescribeTo(std::ostream* os) const override {
*os << "has sub value " << tensorstore::QuoteString(sub_value_pointer_)
<< " that ";
sub_value_matcher_.DescribeTo(os);
}
void DescribeNegationTo(std::ostream* os) const override {
*os << "does not have sub value "
<< tensorstore::QuoteString(sub_value_pointer_) << " that ";
sub_value_matcher_.DescribeTo(os);
}
private:
std::string sub_value_pointer_;
::testing::Matcher<nlohmann::json> sub_value_matcher_;
};
}
::testing::Matcher<::nlohmann::json> JsonSubValueMatches(
std::string json_pointer,
::testing::Matcher<::nlohmann::json> value_matcher) {
return ::testing::MakeMatcher(new JsonPointerMatcherImpl(
std::move(json_pointer), std::move(value_matcher)));
}
::testing::Matcher<::nlohmann::json> JsonSubValueMatches(
std::string json_pointer, ::nlohmann::json value_matcher) {
return JsonSubValueMatches(std::move(json_pointer),
MatchesJson(std::move(value_matcher)));
}
::testing::Matcher<::nlohmann::json> JsonSubValuesMatch(
std::vector<std::pair<std::string, ::nlohmann::json>> matchers) {
std::vector<::testing::Matcher<::nlohmann::json>> all;
all.reserve(matchers.size());
for (const auto& p : matchers) {
all.push_back(JsonSubValueMatches(p.first, p.second));
}
return ::testing::AllOfArray(all);
}
} | #include "tensorstore/internal/json_gtest.h"
#include <sstream>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <nlohmann/json.hpp>
namespace {
using ::tensorstore::JsonSubValueMatches;
using ::tensorstore::JsonSubValuesMatch;
using ::tensorstore::MatchesJson;
template <typename MatcherType>
std::string Describe(const MatcherType& m) {
std::ostringstream ss;
m.DescribeTo(&ss);
return ss.str();
}
template <typename MatcherType, typename Value>
std::string Explain(const MatcherType& m, const Value& x) {
testing::StringMatchResultListener listener;
ExplainMatchResult(m, x, &listener);
return listener.str();
}
TEST(JsonSubValueMatchesTest, Example) {
::nlohmann::json obj{{"a", 123}, {"b", {{"c", "xyz"}}}};
EXPECT_THAT(obj, JsonSubValueMatches("/a", 123));
EXPECT_THAT(obj, JsonSubValueMatches("/b/c", "xyz"));
EXPECT_THAT(obj,
JsonSubValueMatches("/b/c", ::testing::Not(MatchesJson("xy"))));
EXPECT_THAT(Describe(JsonSubValueMatches("/a", 123)),
"has sub value \"/a\" that matches json 123");
EXPECT_THAT(Explain(JsonSubValueMatches("/a", 124), obj),
::testing::StartsWith(
"whose sub value doesn't match, where the difference is:"));
}
TEST(JsonSubValuesMatchTest, Example) {
::nlohmann::json obj{{"a", 123}, {"b", {{"c", "xyz"}}}};
EXPECT_THAT(obj, JsonSubValuesMatch({{"/a", 123}, {"/b/c", "xyz"}}));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json_gtest.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json_gtest_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
94696138-88ad-406c-84d3-0739ede2539f | cpp | tensorflow/tensorflow | full_type_util | tensorflow/core/framework/full_type_util.cc | tensorflow/core/framework/full_type_util_test.cc | #include "tensorflow/core/framework/full_type_util.h"
#include <algorithm>
#include <string>
#include "absl/container/flat_hash_map.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/full_type.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/hash.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace full_type {
OpTypeConstructor NoOp() {
return nullptr;
}
OpTypeConstructor NoOutputs() {
return [](OpDef* op_def) {
op_def->mutable_output_arg();
return absl::OkStatus();
};
}
OpTypeConstructor Nullary(FullTypeId t) {
return [t](OpDef* op_def) {
FullTypeDef* tdef =
op_def->mutable_output_arg(0)->mutable_experimental_full_type();
tdef->set_type_id(t);
return absl::OkStatus();
};
}
OpTypeConstructor Unary(FullTypeId t, const string& var_name) {
return [t, var_name](OpDef* op_def) {
FullTypeDef* tdef =
op_def->mutable_output_arg(0)->mutable_experimental_full_type();
tdef->set_type_id(t);
FullTypeDef* arg = tdef->add_args();
arg->set_type_id(TFT_VAR);
arg->set_s(var_name);
return absl::OkStatus();
};
}
OpTypeConstructor UnaryGeneric(FullTypeId t) {
return [t](OpDef* op_def) {
FullTypeDef* tdef =
op_def->mutable_output_arg(0)->mutable_experimental_full_type();
tdef->set_type_id(t);
FullTypeDef* arg = tdef->add_args();
arg->set_type_id(TFT_ANY);
return absl::OkStatus();
};
}
OpTypeConstructor UnaryTensorContainer(FullTypeId t, FullTypeId dtype) {
return [t, dtype](OpDef* op_def) {
FullTypeDef* tdef =
op_def->mutable_output_arg(0)->mutable_experimental_full_type();
tdef->set_type_id(t);
FullTypeDef* arg = tdef->add_args();
arg->set_type_id(TFT_TENSOR);
FullTypeDef* targ = arg->add_args();
targ->set_type_id(dtype);
return absl::OkStatus();
};
}
OpTypeConstructor UnaryTensorContainer(FullTypeId t, const string& var_name) {
return [t, var_name](OpDef* op_def) {
FullTypeDef* tdef =
op_def->mutable_output_arg(0)->mutable_experimental_full_type();
tdef->set_type_id(t);
FullTypeDef* targ = tdef->add_args();
targ->set_type_id(TFT_TENSOR);
FullTypeDef* varg = targ->add_args();
varg->set_type_id(TFT_VAR);
varg->set_s(var_name);
return absl::OkStatus();
};
}
OpTypeConstructor VariadicTensorContainer(FullTypeId t,
const string& var_name) {
return [t, var_name](OpDef* op_def) {
FullTypeDef* tdef =
op_def->mutable_output_arg(0)->mutable_experimental_full_type();
tdef->set_type_id(t);
FullTypeDef* for_each = tdef->add_args();
for_each->set_type_id(TFT_FOR_EACH);
for_each->add_args()->set_type_id(TFT_PRODUCT);
FullTypeDef* tpl = for_each->add_args();
tpl->set_type_id(TFT_TENSOR);
FullTypeDef* targ = tpl->add_args();
targ->set_type_id(TFT_VAR);
targ->set_s(var_name);
FullTypeDef* tvar = for_each->add_args();
tvar->set_type_id(TFT_VAR);
tvar->set_s(var_name);
return absl::OkStatus();
};
}
namespace {
typedef absl::flat_hash_map<StringPiece, const AttrValue*> AttrMap;
inline Status SubstituteFromAttrs(AttrMap& attrs, FullTypeDef& t);
Status SubstituteVar(AttrMap& attrs, FullTypeDef& t) {
if (t.args_size() != 0) {
return Status(
absl::StatusCode::kInvalidArgument,
absl::StrCat("Unexpected Var type, expected args_size 0, found ",
t.args_size()));
}
StringPiece var_name = t.s();
if (!attrs.contains(var_name)) {
return Status(
absl::StatusCode::kInvalidArgument,
absl::StrCat("could not find an attribute for key '", var_name, "'"));
}
const AttrValue* attr = attrs.at(var_name);
const auto attr_type = attr->value_case();
if (attr_type == AttrValue::kType) {
map_dtype_to_tensor(attr->type(), t);
} else if (attr_type == AttrValue::kList) {
const auto& attr_list = attr->list();
if (attr_list.type_size() != 1) {
return Status(absl::StatusCode::kUnimplemented,
absl::StrCat("lists or other than one type element\n",
attr_list.DebugString(), "\nkey=", var_name));
}
map_dtype_to_tensor(attr_list.type(0), t);
} else {
return Status(absl::StatusCode::kUnimplemented,
absl::StrCat("unsupported attribute type ",
attr->DebugString(), " for name ", var_name));
}
t.clear_s();
return absl::OkStatus();
}
Status SubstituteForEach(AttrMap& attrs, FullTypeDef& t) {
if (t.args_size() != 3) {
return Status(absl::StatusCode::kInvalidArgument,
absl::StrCat("illegal FOR_EACH type, expected 3 args, got ",
t.args_size()));
}
const auto& cont = t.args(0);
const auto& tmpl = t.args(1);
const auto& t_var = t.args(2);
StringPiece var_name = t_var.s();
if (!attrs.contains(var_name)) {
return Status(
absl::StatusCode::kInvalidArgument,
absl::StrCat("could not find an attribute for key '", var_name, "'"));
}
const AttrValue* attr = attrs.at(var_name);
FullTypeDef result;
result.set_type_id(cont.type_id());
const auto attr_type = attr->value_case();
if (attr_type == AttrValue::kType) {
FullTypeDef* target = result.add_args();
*target = tmpl;
TF_RETURN_WITH_CONTEXT_IF_ERROR(
SubstituteFromAttrs(attrs, *target), "while substituting '", var_name,
"' from\n", attr->DebugString(), "\ninto ", target->DebugString());
} else if (attr_type == AttrValue::kList) {
const auto& attr_list = attr->list();
int tsize = attr_list.type_size();
if (tsize == 0) {
return Status(absl::StatusCode::kUnimplemented,
absl::StrCat("unsupported list attribute type\n",
attr_list.DebugString(), "\nkey=", var_name));
}
AttrValue replacement;
attrs[var_name] = &replacement;
for (int i = 0; i < tsize; i++) {
replacement.set_type(attr_list.type(i));
FullTypeDef* target = result.add_args();
*target = tmpl;
TF_RETURN_WITH_CONTEXT_IF_ERROR(SubstituteFromAttrs(attrs, *target),
"while substituting '", var_name,
"' from\n", attr->DebugString(), "\n[", i,
"] into\n", target->DebugString());
}
attrs[var_name] = attr;
} else {
return Status(absl::StatusCode::kUnimplemented,
absl::StrCat("unsupported attribute type\n",
attr->DebugString(), "\nfor name ", var_name));
}
t = result;
return absl::OkStatus();
}
Status SubstituteGeneric(AttrMap& attrs, FullTypeDef& t) {
int nargs = t.args_size();
for (int j = 0; j < nargs; j++) {
FullTypeDef* arg_t = t.mutable_args(j);
TF_RETURN_WITH_CONTEXT_IF_ERROR(SubstituteFromAttrs(attrs, *arg_t),
"while substituting arg ", j, ": ",
arg_t->DebugString());
if (arg_t->type_id() == TFT_TENSOR && arg_t->args_size() &&
arg_t->args(0).type_id() == TFT_LEGACY_VARIANT) {
t.clear_args();
break;
}
}
return absl::OkStatus();
}
inline Status SubstituteFromAttrs(AttrMap& attrs, FullTypeDef& t) {
switch (t.type_id()) {
case TFT_VAR:
return SubstituteVar(attrs, t);
case TFT_FOR_EACH:
return SubstituteForEach(attrs, t);
default:
return SubstituteGeneric(attrs, t);
}
return absl::OkStatus();
}
}
Status SpecializeType(const AttrSlice& attrs, const OpDef& op_def,
FullTypeDef& target) {
target.Clear();
target.set_type_id(TFT_PRODUCT);
AttrMap map;
for (const auto& attr : attrs) {
map.emplace(attr.first, &attr.second);
}
for (const auto& attr_def : op_def.attr()) {
if (attr_def.has_default_value() && !attrs.Find(attr_def.name())) {
map.emplace(attr_def.name(), &attr_def.default_value());
}
}
int nargs = op_def.output_arg_size();
for (int i = 0; i < nargs; i++) {
auto& t = *(target.add_args());
t = op_def.output_arg(i).experimental_full_type();
TF_RETURN_WITH_CONTEXT_IF_ERROR(
SubstituteFromAttrs(map, t), "while expanding vars of\n",
t.DebugString(), "\nfrom\n", attrs.SummarizeNode());
}
return absl::OkStatus();
}
const FullTypeDef& GetArgDefaultUnset(const FullTypeDef& t, int i) {
static FullTypeDef* unset_type = []() {
FullTypeDef* t = new FullTypeDef();
return t;
}();
if (i < t.args_size()) {
return t.args(i);
}
return *unset_type;
}
const FullTypeDef& GetArgDefaultAny(const FullTypeDef& t, int i) {
static FullTypeDef* any_type = []() {
FullTypeDef* t = new FullTypeDef();
t->set_type_id(TFT_ANY);
return t;
}();
if (i < t.args_size()) {
const FullTypeDef& f_val = t.args(i);
if (f_val.type_id() == TFT_UNSET) {
return *any_type;
}
return f_val;
}
return *any_type;
}
bool IsEqual(const FullTypeDef& lhs, const FullTypeDef& rhs) {
if (lhs.type_id() != rhs.type_id()) {
return false;
}
const auto& lhs_s = lhs.s();
const auto& rhs_s = rhs.s();
if (lhs_s.empty()) {
if (!rhs_s.empty()) {
return false;
}
} else if (rhs_s != lhs_s) {
return false;
}
for (int i = 0; i < std::max(lhs.args_size(), rhs.args_size()); i++) {
const FullTypeDef& lhs_arg = GetArgDefaultAny(lhs, i);
const FullTypeDef& rhs_arg = GetArgDefaultAny(rhs, i);
if (!IsEqual(lhs_arg, rhs_arg)) {
return false;
}
}
return true;
}
uint64_t Hash(const FullTypeDef& arg) {
uint64_t val = Hash64Combine(arg.type_id(), 0);
const auto& arg_s = arg.s();
val = Hash64Combine(val, Hash64(arg_s));
for (int i = 0, e = arg.args_size(); i < e; ++i) {
const FullTypeDef& arg_arg = GetArgDefaultAny(arg, i);
val = Hash64Combine(val, Hash(arg_arg));
}
return val;
}
bool IsSubtype(const FullTypeDef& lhs, const FullTypeDef& rhs, bool covariant) {
if (rhs.type_id() == TFT_ANY) {
return true;
}
if (rhs.type_id() == TFT_UNSET) {
return true;
}
if ((rhs.type_id() == TFT_TENSOR) &&
(GetArgDefaultUnset(rhs, 0).type_id() == TFT_LEGACY_VARIANT)) {
return true;
}
if (lhs.type_id() == TFT_ENCODED) {
return IsSubtype(GetArgDefaultAny(lhs, 1), rhs, true);
}
if (lhs.type_id() != rhs.type_id()) {
return false;
}
for (int i = 0; i < std::max(lhs.args_size(), rhs.args_size()); i++) {
const FullTypeDef& lhs_arg = GetArgDefaultAny(lhs, i);
const FullTypeDef& rhs_arg = GetArgDefaultAny(rhs, i);
if (covariant) {
if (!IsSubtype(lhs_arg, rhs_arg)) {
return false;
}
} else {
if (!IsSubtype(rhs_arg, lhs_arg)) {
return false;
}
}
}
return true;
}
}
} | #include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/full_type.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace full_type {
namespace {
TEST(Nullary, Basic) {
OpTypeConstructor ctor = Nullary(TFT_TENSOR);
OpDef op;
op.add_output_arg();
TF_ASSERT_OK(ctor(&op));
const FullTypeDef& t = op.output_arg(0).experimental_full_type();
EXPECT_EQ(t.type_id(), TFT_TENSOR);
EXPECT_EQ(t.args_size(), 0);
}
TEST(Unary, Basic) {
OpTypeConstructor ctor = Unary(TFT_TENSOR, "T");
OpDef op;
op.add_output_arg();
TF_ASSERT_OK(ctor(&op));
const FullTypeDef& t = op.output_arg(0).experimental_full_type();
EXPECT_EQ(t.type_id(), TFT_TENSOR);
EXPECT_EQ(t.args_size(), 1);
EXPECT_EQ(t.args(0).type_id(), TFT_VAR);
EXPECT_EQ(t.args(0).args_size(), 0);
EXPECT_EQ(t.args(0).s(), "T");
}
TEST(UnaryGeneric, Basic) {
OpTypeConstructor ctor = UnaryGeneric(TFT_TENSOR);
OpDef op;
op.add_output_arg();
TF_ASSERT_OK(ctor(&op));
const FullTypeDef& t = op.output_arg(0).experimental_full_type();
EXPECT_EQ(t.type_id(), TFT_TENSOR);
EXPECT_EQ(t.args_size(), 1);
EXPECT_EQ(t.args(0).type_id(), TFT_ANY);
EXPECT_EQ(t.args(0).args_size(), 0);
}
TEST(UnaryTensorContainer, Fixed) {
OpTypeConstructor ctor = UnaryTensorContainer(TFT_ARRAY, TFT_INT32);
OpDef op;
op.add_output_arg();
TF_ASSERT_OK(ctor(&op));
const FullTypeDef& t = op.output_arg(0).experimental_full_type();
EXPECT_EQ(t.type_id(), TFT_ARRAY);
EXPECT_EQ(t.args_size(), 1);
EXPECT_EQ(t.args(0).type_id(), TFT_TENSOR);
EXPECT_EQ(t.args(0).args_size(), 1);
EXPECT_EQ(t.args(0).args(0).type_id(), TFT_INT32);
EXPECT_EQ(t.args(0).args(0).args_size(), 0);
}
TEST(UnaryTensorContainer, Dependent) {
OpTypeConstructor ctor = UnaryTensorContainer(TFT_ARRAY, "T");
OpDef op;
op.add_output_arg();
TF_ASSERT_OK(ctor(&op));
const FullTypeDef& t = op.output_arg(0).experimental_full_type();
EXPECT_EQ(t.type_id(), TFT_ARRAY);
EXPECT_EQ(t.args_size(), 1);
EXPECT_EQ(t.args(0).type_id(), TFT_TENSOR);
EXPECT_EQ(t.args(0).args_size(), 1);
EXPECT_EQ(t.args(0).args(0).type_id(), TFT_VAR);
EXPECT_EQ(t.args(0).args(0).args_size(), 0);
EXPECT_EQ(t.args(0).args(0).s(), "T");
}
TEST(VariadicTensorContainer, Basic) {
OpTypeConstructor ctor = VariadicTensorContainer(TFT_ARRAY, "T");
OpDef op;
op.add_output_arg();
TF_ASSERT_OK(ctor(&op));
const FullTypeDef& t = op.output_arg(0).experimental_full_type();
EXPECT_EQ(t.type_id(), TFT_ARRAY);
EXPECT_EQ(t.args_size(), 1);
EXPECT_EQ(t.args(0).type_id(), TFT_FOR_EACH);
EXPECT_EQ(t.args(0).args_size(), 3);
EXPECT_EQ(t.args(0).args(0).type_id(), TFT_PRODUCT);
EXPECT_EQ(t.args(0).args(0).args_size(), 0);
EXPECT_EQ(t.args(0).args(1).type_id(), TFT_TENSOR);
EXPECT_EQ(t.args(0).args(1).args_size(), 1);
EXPECT_EQ(t.args(0).args(1).args(0).type_id(), TFT_VAR);
EXPECT_EQ(t.args(0).args(1).args(0).args_size(), 0);
EXPECT_EQ(t.args(0).args(1).args(0).s(), "T");
EXPECT_EQ(t.args(0).args(2).type_id(), TFT_VAR);
EXPECT_EQ(t.args(0).args(2).args_size(), 0);
EXPECT_EQ(t.args(0).args(2).s(), "T");
}
TEST(SpecializeType, Fixed) {
OpDef op;
FullTypeDef* t = op.add_output_arg()->mutable_experimental_full_type();
t->set_type_id(TFT_ARRAY);
t->add_args()->set_type_id(TFT_TENSOR);
t->mutable_args(0)->add_args()->set_type_id(TFT_INT32);
t->add_args()->set_type_id(TFT_DATASET);
t->mutable_args(1)->add_args()->set_type_id(TFT_FLOAT);
AttrSlice empty;
FullTypeDef ft;
TF_ASSERT_OK(SpecializeType(empty, op, ft));
EXPECT_EQ(ft.type_id(), TFT_PRODUCT);
EXPECT_EQ(ft.args_size(), 1);
const FullTypeDef& t_actual = ft.args(0);
EXPECT_EQ(t_actual.type_id(), TFT_ARRAY);
EXPECT_EQ(t_actual.args_size(), 2);
EXPECT_EQ(t_actual.args(0).type_id(), TFT_TENSOR);
EXPECT_EQ(t_actual.args(0).args_size(), 1);
EXPECT_EQ(t_actual.args(0).args(0).type_id(), TFT_INT32);
EXPECT_EQ(t_actual.args(0).args(0).args_size(), 0);
EXPECT_EQ(t_actual.args(1).type_id(), TFT_DATASET);
EXPECT_EQ(t_actual.args(1).args_size(), 1);
EXPECT_EQ(t_actual.args(1).args(0).type_id(), TFT_FLOAT);
EXPECT_EQ(t_actual.args(1).args(0).args_size(), 0);
}
TEST(SpecializeType, Idempotence) {
OpDef op;
FullTypeDef* t = op.add_output_arg()->mutable_experimental_full_type();
t->set_type_id(TFT_ARRAY);
t->add_args()->set_type_id(TFT_TENSOR);
t->mutable_args(0)->add_args()->set_type_id(TFT_INT32);
t->add_args()->set_type_id(TFT_DATASET);
t->mutable_args(1)->add_args()->set_type_id(TFT_FLOAT);
AttrSlice empty;
FullTypeDef ft;
TF_ASSERT_OK(SpecializeType(empty, op, ft));
TF_ASSERT_OK(SpecializeType(empty, op, ft));
EXPECT_EQ(ft.type_id(), TFT_PRODUCT);
EXPECT_EQ(ft.args_size(), 1);
}
TEST(SpecializeType, VarExpandsFromSingleAttribute) {
OpDef op;
FullTypeDef* t = op.add_output_arg()->mutable_experimental_full_type();
t->set_type_id(TFT_ARRAY);
t->add_args()->set_type_id(TFT_TENSOR);
t->mutable_args(0)->add_args()->set_type_id(TFT_VAR);
t->mutable_args(0)->mutable_args(0)->set_s("T");
AttrValue attr;
attr.set_type(DT_INT32);
NodeDef ndef;
(*ndef.mutable_attr())["T"] = attr;
AttrSlice attrs(ndef);
FullTypeDef ft;
TF_ASSERT_OK(SpecializeType(attrs, op, ft));
EXPECT_EQ(ft.type_id(), TFT_PRODUCT);
EXPECT_EQ(ft.args_size(), 1);
const FullTypeDef& t_actual = ft.args(0);
EXPECT_EQ(t_actual.type_id(), TFT_ARRAY);
EXPECT_EQ(t_actual.args_size(), 1);
EXPECT_EQ(t_actual.args(0).type_id(), TFT_TENSOR);
EXPECT_EQ(t_actual.args(0).args_size(), 1);
EXPECT_EQ(t_actual.args(0).args(0).type_id(), TFT_INT32);
EXPECT_EQ(t_actual.args(0).args(0).args_size(), 0);
}
TEST(SpecializeType, VarExpandsFromDefaultForSingleAttribute) {
OpDef op;
FullTypeDef* t = op.add_output_arg()->mutable_experimental_full_type();
t->set_type_id(TFT_ARRAY);
t->add_args()->set_type_id(TFT_TENSOR);
t->mutable_args(0)->add_args()->set_type_id(TFT_VAR);
t->mutable_args(0)->mutable_args(0)->set_s("T");
AttrValue attr;
attr.set_type(DT_INT32);
OpDef::AttrDef* attr_with_default = op.add_attr();
attr_with_default->set_name("T");
(*attr_with_default->mutable_default_value()) = attr;
NodeDef ndef;
AttrSlice attrs(ndef);
FullTypeDef ft;
TF_ASSERT_OK(SpecializeType(attrs, op, ft));
EXPECT_EQ(ft.type_id(), TFT_PRODUCT);
EXPECT_EQ(ft.args_size(), 1);
const FullTypeDef& t_actual = ft.args(0);
EXPECT_EQ(t_actual.type_id(), TFT_ARRAY);
EXPECT_EQ(t_actual.args_size(), 1);
EXPECT_EQ(t_actual.args(0).type_id(), TFT_TENSOR);
EXPECT_EQ(t_actual.args(0).args_size(), 1);
EXPECT_EQ(t_actual.args(0).args(0).type_id(), TFT_INT32);
EXPECT_EQ(t_actual.args(0).args(0).args_size(), 0);
}
TEST(SpecializeType, VarExpandsFromSingleElementTypeListAttribute) {
OpDef op;
FullTypeDef* t = op.add_output_arg()->mutable_experimental_full_type();
t->set_type_id(TFT_ARRAY);
t->add_args()->set_type_id(TFT_TENSOR);
t->mutable_args(0)->add_args()->set_type_id(TFT_VAR);
t->mutable_args(0)->mutable_args(0)->set_s("T");
AttrValue attr;
attr.mutable_list()->add_type(DT_INT32);
NodeDef ndef;
(*ndef.mutable_attr())["T"] = attr;
AttrSlice attrs(ndef);
FullTypeDef ft;
TF_ASSERT_OK(SpecializeType(attrs, op, ft));
EXPECT_EQ(ft.type_id(), TFT_PRODUCT);
EXPECT_EQ(ft.args_size(), 1);
const FullTypeDef& t_actual = ft.args(0);
EXPECT_EQ(t_actual.type_id(), TFT_ARRAY);
EXPECT_EQ(t_actual.args_size(), 1);
EXPECT_EQ(t_actual.args(0).type_id(), TFT_TENSOR);
EXPECT_EQ(t_actual.args(0).args_size(), 1);
EXPECT_EQ(t_actual.args(0).args(0).type_id(), TFT_INT32);
EXPECT_EQ(t_actual.args(0).args(0).args_size(), 0);
}
TEST(SpecializeType, VarRejectsMultipleElementTypeListAttribute) {
OpDef op;
FullTypeDef* t = op.add_output_arg()->mutable_experimental_full_type();
t->set_type_id(TFT_ARRAY);
t->add_args()->set_type_id(TFT_TENSOR);
t->mutable_args(0)->add_args()->set_type_id(TFT_VAR);
t->mutable_args(0)->mutable_args(0)->set_s("T");
AttrValue attr;
attr.mutable_list()->add_type(DT_INT32);
attr.mutable_list()->add_type(DT_FLOAT);
NodeDef ndef;
(*ndef.mutable_attr())["T"] = attr;
AttrSlice attrs(ndef);
FullTypeDef ft;
EXPECT_FALSE(SpecializeType(attrs, op, ft).ok());
}
TEST(SpecializeType, VarRejectsEmptyTypeListAttribute) {
OpDef op;
FullTypeDef* t = op.add_output_arg()->mutable_experimental_full_type();
t->set_type_id(TFT_ARRAY);
t->add_args()->set_type_id(TFT_TENSOR);
t->mutable_args(0)->add_args()->set_type_id(TFT_VAR);
t->mutable_args(0)->mutable_args(0)->set_s("T");
AttrValue attr;
attr.mutable_list();
NodeDef ndef;
(*ndef.mutable_attr())["T"] = attr;
AttrSlice attrs(ndef);
FullTypeDef ft;
EXPECT_FALSE(SpecializeType(attrs, op, ft).ok());
}
TEST(SpecializeType, ForEachExpandsFromSingleAttribute) {
OpDef op;
FullTypeDef* t = op.add_output_arg()->mutable_experimental_full_type();
t->set_type_id(TFT_FOR_EACH);
t->add_args()->set_type_id(TFT_PRODUCT);
t->add_args()->set_type_id(TFT_TENSOR);
t->mutable_args(1)->add_args()->set_type_id(TFT_VAR);
t->mutable_args(1)->mutable_args(0)->set_s("T");
t->add_args()->set_type_id(TFT_VAR);
t->mutable_args(2)->set_s("T");
AttrValue attr;
attr.set_type(DT_INT32);
NodeDef ndef;
(*ndef.mutable_attr())["T"] = attr;
AttrSlice attrs(ndef);
FullTypeDef ft;
TF_ASSERT_OK(SpecializeType(attrs, op, ft));
EXPECT_EQ(ft.type_id(), TFT_PRODUCT);
EXPECT_EQ(ft.args_size(), 1);
const FullTypeDef& t_actual = ft.args(0);
EXPECT_EQ(t_actual.type_id(), TFT_PRODUCT);
EXPECT_EQ(t_actual.args_size(), 1);
EXPECT_EQ(t_actual.args(0).type_id(), TFT_TENSOR);
EXPECT_EQ(t_actual.args(0).args_size(), 1);
EXPECT_EQ(t_actual.args(0).args(0).type_id(), TFT_INT32);
EXPECT_EQ(t_actual.args(0).args(0).args_size(), 0);
}
TEST(SpecializeType, ForEachExpandsFromListAttribute) {
OpDef op;
FullTypeDef* t = op.add_output_arg()->mutable_experimental_full_type();
t->set_type_id(TFT_FOR_EACH);
t->add_args()->set_type_id(TFT_PRODUCT);
t->add_args()->set_type_id(TFT_TENSOR);
t->mutable_args(1)->add_args()->set_type_id(TFT_VAR);
t->mutable_args(1)->mutable_args(0)->set_s("T");
t->add_args()->set_type_id(TFT_VAR);
t->mutable_args(2)->set_s("T");
AttrValue attr;
attr.mutable_list()->add_type(DT_INT32);
attr.mutable_list()->add_type(DT_FLOAT);
NodeDef ndef;
(*ndef.mutable_attr())["T"] = attr;
AttrSlice attrs(ndef);
FullTypeDef ft;
TF_ASSERT_OK(SpecializeType(attrs, op, ft));
EXPECT_EQ(ft.type_id(), TFT_PRODUCT);
EXPECT_EQ(ft.args_size(), 1);
const FullTypeDef& t_actual = ft.args(0);
EXPECT_EQ(t_actual.type_id(), TFT_PRODUCT);
EXPECT_EQ(t_actual.args_size(), 2);
EXPECT_EQ(t_actual.args(0).type_id(), TFT_TENSOR);
EXPECT_EQ(t_actual.args(0).args_size(), 1);
EXPECT_EQ(t_actual.args(0).args(0).type_id(), TFT_INT32);
EXPECT_EQ(t_actual.args(0).args(0).args_size(), 0);
EXPECT_EQ(t_actual.args(1).type_id(), TFT_TENSOR);
EXPECT_EQ(t_actual.args(1).args_size(), 1);
EXPECT_EQ(t_actual.args(1).args(0).type_id(), TFT_FLOAT);
EXPECT_EQ(t_actual.args(1).args(0).args_size(), 0);
}
TEST(SpecializeType, ForEachDistributesNestedVar) {
OpDef op;
FullTypeDef* t = op.add_output_arg()->mutable_experimental_full_type();
t->set_type_id(TFT_FOR_EACH);
t->add_args()->set_type_id(TFT_PRODUCT);
t->add_args()->set_type_id(TFT_TENSOR);
t->mutable_args(1)->add_args()->set_type_id(TFT_VAR);
t->mutable_args(1)->mutable_args(0)->set_s("ForEachTarget");
t->mutable_args(1)->add_args()->set_type_id(TFT_VAR);
t->mutable_args(1)->mutable_args(1)->set_s("GlobalVar");
t->add_args()->set_type_id(TFT_VAR);
t->mutable_args(2)->set_s("ForEachTarget");
NodeDef ndef;
AttrValue attr;
attr.mutable_list()->add_type(DT_INT32);
attr.mutable_list()->add_type(DT_INT64);
(*ndef.mutable_attr())["ForEachTarget"] = attr;
attr.set_type(DT_FLOAT);
(*ndef.mutable_attr())["GlobalVar"] = attr;
AttrSlice attrs(ndef);
FullTypeDef ft;
TF_ASSERT_OK(SpecializeType(attrs, op, ft));
EXPECT_EQ(ft.type_id(), TFT_PRODUCT);
EXPECT_EQ(ft.args_size(), 1);
const FullTypeDef& t_actual = ft.args(0);
EXPECT_EQ(t_actual.type_id(), TFT_PRODUCT);
EXPECT_EQ(t_actual.args_size(), 2);
EXPECT_EQ(t_actual.args(0).type_id(), TFT_TENSOR);
EXPECT_EQ(t_actual.args(0).args_size(), 2);
EXPECT_EQ(t_actual.args(0).args(0).type_id(), TFT_INT32);
EXPECT_EQ(t_actual.args(0).args(0).args_size(), 0);
EXPECT_EQ(t_actual.args(0).args(1).type_id(), TFT_FLOAT);
EXPECT_EQ(t_actual.args(0).args(1).args_size(), 0);
EXPECT_EQ(t_actual.args(1).type_id(), TFT_TENSOR);
EXPECT_EQ(t_actual.args(1).args_size(), 2);
EXPECT_EQ(t_actual.args(1).args(0).type_id(), TFT_INT64);
EXPECT_EQ(t_actual.args(1).args(0).args_size(), 0);
EXPECT_EQ(t_actual.args(1).args(1).type_id(), TFT_FLOAT);
EXPECT_EQ(t_actual.args(1).args(1).args_size(), 0);
}
TEST(SpecializeType, ForEachDistributesNestedForEach) {
OpDef op;
FullTypeDef* t = op.add_output_arg()->mutable_experimental_full_type();
t->set_type_id(TFT_FOR_EACH);
t->add_args()->set_type_id(TFT_PRODUCT);
FullTypeDef* inner = t->add_args();
inner->set_type_id(TFT_FOR_EACH);
inner->add_args()->set_type_id(TFT_PRODUCT);
inner->add_args()->set_type_id(TFT_ARRAY);
inner->mutable_args(1)->add_args()->set_type_id(TFT_VAR);
inner->mutable_args(1)->mutable_args(0)->set_s("InnerForEach");
inner->mutable_args(1)->add_args()->set_type_id(TFT_VAR);
inner->mutable_args(1)->mutable_args(1)->set_s("OuterForEach");
inner->add_args()->set_type_id(TFT_VAR);
inner->mutable_args(2)->set_s("InnerForEach");
t->add_args()->set_type_id(TFT_VAR);
t->mutable_args(2)->set_s("OuterForEach");
NodeDef ndef;
AttrValue attr;
attr.mutable_list()->add_type(DT_INT32);
attr.mutable_list()->add_type(DT_INT64);
(*ndef.mutable_attr())["OuterForEach"] = attr;
attr.set_type(DT_FLOAT);
(*ndef.mutable_attr())["InnerForEach"] = attr;
AttrSlice attrs(ndef);
FullTypeDef ft;
TF_ASSERT_OK(SpecializeType(attrs, op, ft));
EXPECT_EQ(ft.type_id(), TFT_PRODUCT);
EXPECT_EQ(ft.args_size(), 1);
const FullTypeDef& t_actual = ft.args(0);
EXPECT_EQ(t_actual.type_id(), TFT_PRODUCT);
EXPECT_EQ(t_actual.args_size(), 2);
EXPECT_EQ(t_actual.args(0).type_id(), TFT_PRODUCT);
EXPECT_EQ(t_actual.args(0).args_size(), 1);
EXPECT_EQ(t_actual.args(0).args(0).type_id(), TFT_ARRAY);
EXPECT_EQ(t_actual.args(0).args(0).args_size(), 2);
EXPECT_EQ(t_actual.args(0).args(0).args(0).type_id(), TFT_FLOAT);
EXPECT_EQ(t_actual.args(0).args(0).args(0).args_size(), 0);
EXPECT_EQ(t_actual.args(0).args(0).args(1).type_id(), TFT_INT32);
EXPECT_EQ(t_actual.args(0).args(0).args(1).args_size(), 0);
EXPECT_EQ(t_actual.args(1).type_id(), TFT_PRODUCT);
EXPECT_EQ(t_actual.args(1).args_size(), 1);
EXPECT_EQ(t_actual.args(1).args(0).type_id(), TFT_ARRAY);
EXPECT_EQ(t_actual.args(1).args(0).args_size(), 2);
EXPECT_EQ(t_actual.args(1).args(0).args(0).type_id(), TFT_FLOAT);
EXPECT_EQ(t_actual.args(1).args(0).args(0).args_size(), 0);
EXPECT_EQ(t_actual.args(1).args(0).args(1).type_id(), TFT_INT64);
EXPECT_EQ(t_actual.args(1).args(0).args(1).args_size(), 0);
}
TEST(SpecializeType, ForEachOverridesTargetOfNestedForEach) {
OpDef op;
FullTypeDef* t = op.add_output_arg()->mutable_experimental_full_type();
t->set_type_id(TFT_FOR_EACH);
t->add_args()->set_type_id(TFT_PRODUCT);
FullTypeDef* inner = t->add_args();
inner->set_type_id(TFT_FOR_EACH);
inner->add_args()->set_type_id(TFT_PRODUCT);
inner->add_args()->set_type_id(TFT_ARRAY);
inner->mutable_args(1)->add_args()->set_type_id(TFT_VAR);
inner->mutable_args(1)->mutable_args(0)->set_s("T");
inner->add_args()->set_type_id(TFT_VAR);
inner->mutable_args(2)->set_s("T");
t->add_args()->set_type_id(TFT_VAR);
t->mutable_args(2)->set_s("T");
NodeDef ndef;
AttrValue attr;
attr.mutable_list()->add_type(DT_FLOAT);
attr.mutable_list()->add_type(DT_DOUBLE);
(*ndef.mutable_attr())["T"] = attr;
AttrSlice attrs(ndef);
FullTypeDef ft;
TF_ASSERT_OK(SpecializeType(attrs, op, ft));
EXPECT_EQ(ft.type_id(), TFT_PRODUCT);
EXPECT_EQ(ft.args_size(), 1);
const FullTypeDef& t_actual = ft.args(0);
EXPECT_EQ(t_actual.type_id(), TFT_PRODUCT);
EXPECT_EQ(t_actual.args_size(), 2);
EXPECT_EQ(t_actual.args(0).type_id(), TFT_PRODUCT);
EXPECT_EQ(t_actual.args(0).args_size(), 1);
EXPECT_EQ(t_actual.args(0).args(0).type_id(), TFT_ARRAY);
EXPECT_EQ(t_actual.args(0).args(0).args_size(), 1);
EXPECT_EQ(t_actual.args(0).args(0).args(0).type_id(), TFT_FLOAT);
EXPECT_EQ(t_actual.args(0).args(0).args(0).args_size(), 0);
EXPECT_EQ(t_actual.args(1).type_id(), TFT_PRODUCT);
EXPECT_EQ(t_actual.args(1).args_size(), 1);
EXPECT_EQ(t_actual.args(1).args(0).type_id(), TFT_ARRAY);
EXPECT_EQ(t_actual.args(1).args(0).args_size(), 1);
EXPECT_EQ(t_actual.args(1).args(0).args(0).type_id(), TFT_DOUBLE);
EXPECT_EQ(t_actual.args(1).args(0).args(0).args_size(), 0);
}
TEST(SpecializeType, ForEachRejectsMalformedInput) {
OpDef op;
FullTypeDef* t = op.add_output_arg()->mutable_experimental_full_type();
t->set_type_id(TFT_FOR_EACH);
t->add_args()->set_type_id(TFT_PRODUCT);
NodeDef ndef;
AttrSlice attrs(ndef);
FullTypeDef ft;
EXPECT_FALSE(SpecializeType(attrs, op, ft).ok());
}
TEST(SpecializeType, VarShouldHaveNoArgs) {
OpDef op;
FullTypeDef* t = op.add_output_arg()->mutable_experimental_full_type();
t->set_type_id(TFT_VAR);
t->add_args()->set_type_id(TFT_PRODUCT);
NodeDef ndef;
AttrSlice attrs(ndef);
FullTypeDef ft;
EXPECT_FALSE(SpecializeType(attrs, op, ft).ok());
}
TEST(SpecializeType, RemovesLegacyVariant) {
OpDef op;
FullTypeDef* t = op.add_output_arg()->mutable_experimental_full_type();
t->set_type_id(TFT_ARRAY);
t->add_args()->set_type_id(TFT_TENSOR);
t->mutable_args(0)->add_args()->set_type_id(TFT_LEGACY_VARIANT);
t->add_args()->set_type_id(TFT_TENSOR);
t->mutable_args(1)->add_args()->set_type_id(TFT_FLOAT);
AttrSlice empty;
FullTypeDef ft;
TF_ASSERT_OK(SpecializeType(empty, op, ft));
EXPECT_EQ(ft.type_id(), TFT_PRODUCT);
EXPECT_EQ(ft.args_size(), 1);
const FullTypeDef& t_actual = ft.args(0);
EXPECT_EQ(t_actual.type_id(), TFT_ARRAY);
EXPECT_EQ(t_actual.args_size(), 0);
}
TEST(SpecializeType, RemovesLegacyVariantAfterExpansion) {
OpDef op;
FullTypeDef* t = op.add_output_arg()->mutable_experimental_full_type();
t->set_type_id(TFT_ARRAY);
t->add_args()->set_type_id(TFT_TENSOR);
t->mutable_args(0)->add_args()->set_type_id(TFT_VAR);
t->mutable_args(0)->mutable_args(0)->set_s("T");
AttrValue attr;
attr.set_type(DT_VARIANT);
NodeDef ndef;
(*ndef.mutable_attr())["T"] = attr;
AttrSlice attrs(ndef);
FullTypeDef ft;
TF_ASSERT_OK(SpecializeType(attrs, op, ft));
EXPECT_EQ(ft.type_id(), TFT_PRODUCT);
EXPECT_EQ(ft.args_size(), 1);
const FullTypeDef& t_actual = ft.args(0);
EXPECT_EQ(t_actual.type_id(), TFT_ARRAY);
EXPECT_EQ(t_actual.args_size(), 0);
}
TEST(GetArgDefaults, DefaultUnsetFromNoArgs) {
FullTypeDef t;
const auto& d = GetArgDefaultUnset(t, 0);
EXPECT_EQ(d.type_id(), TFT_UNSET);
}
TEST(GetArgDefaults, DefaultUnsetFromOutOfBounds) {
FullTypeDef t;
t.add_args()->set_type_id(TFT_TENSOR);
const auto& d = GetArgDefaultUnset(t, 1);
EXPECT_EQ(d.type_id(), TFT_UNSET);
}
TEST(GetArgDefaults, NoDefaultUnsetFromArg) {
FullTypeDef t;
t.add_args()->set_type_id(TFT_TENSOR);
t.mutable_args(0)->add_args();
const auto& d = GetArgDefaultUnset(t, 0);
EXPECT_EQ(d.type_id(), TFT_TENSOR);
EXPECT_EQ(d.args_size(), 1);
}
TEST(GetArgDefaults, DefaultAnyFromNoArgs) {
FullTypeDef t;
const auto& d = GetArgDefaultAny(t, 0);
EXPECT_EQ(d.type_id(), TFT_ANY);
}
TEST(GetArgDefaults, DefaultAnyFromOutOfBounds) {
FullTypeDef t;
t.add_args()->set_type_id(TFT_TENSOR);
const auto& d = GetArgDefaultAny(t, 1);
EXPECT_EQ(d.type_id(), TFT_ANY);
}
TEST(GetArgDefaults, DefaultAnyFromUnset) {
FullTypeDef t;
t.add_args();
const auto& d = GetArgDefaultAny(t, 0);
EXPECT_EQ(d.type_id(), TFT_ANY);
}
TEST(GetArgDefaults, NoDefaultAnyFromArg) {
FullTypeDef t;
t.add_args()->set_type_id(TFT_TENSOR);
t.mutable_args(0)->add_args();
const auto& d = GetArgDefaultAny(t, 0);
EXPECT_EQ(d.type_id(), TFT_TENSOR);
EXPECT_EQ(d.args_size(), 1);
}
TEST(IsEqual, Reflexivity) {
FullTypeDef t;
t.set_type_id(TFT_TENSOR);
t.add_args()->set_type_id(TFT_INT32);
t.add_args()->set_type_id(TFT_INT64);
EXPECT_TRUE(IsEqual(t, t));
}
TEST(IsEqual, Copy) {
FullTypeDef t;
t.set_type_id(TFT_TENSOR);
t.add_args()->set_type_id(TFT_INT32);
t.add_args()->set_type_id(TFT_INT64);
FullTypeDef u;
u = t;
EXPECT_TRUE(IsEqual(t, u));
EXPECT_TRUE(IsEqual(u, t));
}
TEST(IsEqual, DifferentTypesNotEqual) {
FullTypeDef t;
t.set_type_id(TFT_TENSOR);
t.add_args()->set_type_id(TFT_INT32);
t.add_args()->set_type_id(TFT_INT64);
FullTypeDef u;
u = t;
u.set_type_id(TFT_ARRAY);
EXPECT_FALSE(IsEqual(t, u));
EXPECT_FALSE(IsEqual(u, t));
}
TEST(IsEqual, DifferentAritiesNotEqual) {
FullTypeDef t;
t.set_type_id(TFT_TENSOR);
t.add_args()->set_type_id(TFT_INT32);
t.add_args()->set_type_id(TFT_INT64);
FullTypeDef u;
u = t;
u.add_args()->set_type_id(TFT_FLOAT);
EXPECT_FALSE(IsEqual(t, u));
EXPECT_FALSE(IsEqual(u, t));
}
TEST(IsEqual, MissingArgsEquivalentToAny) {
FullTypeDef t;
t.set_type_id(TFT_TENSOR);
t.add_args()->set_type_id(TFT_INT32);
FullTypeDef u;
u = t;
u.add_args()->set_type_id(TFT_ANY);
EXPECT_TRUE(IsEqual(t, u));
EXPECT_TRUE(IsEqual(u, t));
}
TEST(IsEqual, DifferentArgsNotEqual) {
FullTypeDef t;
t.set_type_id(TFT_TENSOR);
t.add_args()->set_type_id(TFT_INT32);
t.add_args()->set_type_id(TFT_INT64);
FullTypeDef u;
u = t;
u.mutable_args(1)->set_type_id(TFT_FLOAT);
EXPECT_FALSE(IsEqual(t, u));
EXPECT_FALSE(IsEqual(u, t));
}
TEST(IsEqual, DifferentStringValuesNotEqual) {
FullTypeDef t;
t.set_type_id(TFT_VAR);
t.set_s("T");
FullTypeDef u;
u = t;
u.set_type_id(TFT_VAR);
u.set_s("U");
EXPECT_FALSE(IsEqual(t, u));
EXPECT_FALSE(IsEqual(u, t));
}
TEST(IsSubtype, Reflexivity) {
FullTypeDef t;
t.set_type_id(TFT_TENSOR);
t.add_args()->set_type_id(TFT_INT32);
t.add_args()->set_type_id(TFT_INT64);
EXPECT_TRUE(IsSubtype(t, t));
}
TEST(IsSubtype, Copy) {
FullTypeDef t;
t.set_type_id(TFT_TENSOR);
t.add_args()->set_type_id(TFT_INT32);
t.add_args()->set_type_id(TFT_INT64);
FullTypeDef u;
u = t;
EXPECT_TRUE(IsSubtype(t, u));
}
TEST(IsSubtype, Any) {
FullTypeDef t;
t.set_type_id(TFT_TENSOR);
t.add_args()->set_type_id(TFT_INT32);
t.add_args()->set_type_id(TFT_INT64);
FullTypeDef u;
u.set_type_id(TFT_ANY);
EXPECT_TRUE(IsSubtype(t, u));
EXPECT_FALSE(IsSubtype(u, t));
}
TEST(IsSubtype, Unset) {
FullTypeDef t;
t.set_type_id(TFT_TENSOR);
t.add_args()->set_type_id(TFT_INT32);
t.add_args()->set_type_id(TFT_INT64);
FullTypeDef u;
u.set_type_id(TFT_UNSET);
EXPECT_TRUE(IsSubtype(t, u));
EXPECT_FALSE(IsSubtype(u, t));
}
TEST(IsSubtype, Covariance) {
FullTypeDef t;
t.set_type_id(TFT_TENSOR);
t.add_args()->set_type_id(TFT_ARRAY);
t.mutable_args(0)->add_args()->set_type_id(TFT_INT32);
FullTypeDef u;
u.set_type_id(TFT_TENSOR);
u.add_args()->set_type_id(TFT_ANY);
EXPECT_TRUE(IsSubtype(t, u, true));
EXPECT_FALSE(IsSubtype(u, t, true));
EXPECT_FALSE(IsSubtype(t, u, false));
EXPECT_TRUE(IsSubtype(u, t, false));
}
TEST(IsSubtype, DifferentTypesNotSubtype) {
FullTypeDef t;
t.set_type_id(TFT_TENSOR);
t.add_args()->set_type_id(TFT_INT32);
t.add_args()->set_type_id(TFT_INT64);
FullTypeDef u;
u = t;
u.set_type_id(TFT_ARRAY);
EXPECT_FALSE(IsSubtype(t, u));
EXPECT_FALSE(IsSubtype(u, t));
}
TEST(IsSubtype, DifferentAritiesDefaultToAny) {
FullTypeDef t;
t.set_type_id(TFT_TENSOR);
t.add_args()->set_type_id(TFT_INT32);
t.add_args()->set_type_id(TFT_INT64);
FullTypeDef u;
u = t;
u.add_args()->set_type_id(TFT_FLOAT);
EXPECT_FALSE(IsSubtype(t, u));
EXPECT_TRUE(IsSubtype(u, t));
}
TEST(IsSubtype, DifferentArgsNotSubtype) {
FullTypeDef t;
t.set_type_id(TFT_TENSOR);
t.add_args()->set_type_id(TFT_INT32);
t.add_args()->set_type_id(TFT_INT64);
FullTypeDef u;
u = t;
u.mutable_args(1)->set_type_id(TFT_FLOAT);
EXPECT_FALSE(IsSubtype(t, u));
EXPECT_FALSE(IsSubtype(u, t));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/full_type_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/full_type_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
896b5f77-9df8-45ee-8777-37ac3e99637f | cpp | google/libaddressinput | address_normalizer | cpp/src/address_normalizer.cc | cpp/test/address_normalizer_test.cc | #include <libaddressinput/address_normalizer.h>
#include <libaddressinput/address_data.h>
#include <libaddressinput/address_field.h>
#include <libaddressinput/preload_supplier.h>
#include <cassert>
#include <cstddef>
#include <string>
#include <vector>
#include "lookup_key.h"
#include "rule.h"
#include "util/size.h"
#include "util/string_compare.h"
namespace i18n {
namespace addressinput {
AddressNormalizer::AddressNormalizer(const PreloadSupplier* supplier)
: supplier_(supplier),
compare_(new StringCompare) {
assert(supplier_ != nullptr);
}
AddressNormalizer::~AddressNormalizer() = default;
void AddressNormalizer::Normalize(AddressData* address) const {
assert(address != nullptr);
assert(supplier_->IsLoaded(address->region_code));
AddressData region_address;
region_address.region_code = address->region_code;
LookupKey parent_key;
parent_key.FromAddress(region_address);
const Rule* parent_rule = supplier_->GetRule(parent_key);
assert(parent_rule != nullptr);
std::vector<std::string> languages(parent_rule->GetLanguages());
if (languages.empty()) {
languages.emplace_back("");
} else {
languages[0] = "";
}
LookupKey lookup_key;
for (size_t depth = 1; depth < size(LookupKey::kHierarchy); ++depth) {
AddressField field = LookupKey::kHierarchy[depth];
if (address->IsFieldEmpty(field)) {
return;
}
const std::string& field_value = address->GetFieldValue(field);
bool no_match_found_yet = true;
for (const auto& sub_key : parent_rule->GetSubKeys()) {
if (!no_match_found_yet) {
break;
}
for (const std::string& language_tag : languages) {
lookup_key.set_language(language_tag);
lookup_key.FromLookupKey(parent_key, sub_key);
const Rule* rule = supplier_->GetRule(lookup_key);
if (rule == nullptr) continue;
bool matches_latin_name =
compare_->NaturalEquals(field_value, rule->GetLatinName());
bool matches_local_name_id =
compare_->NaturalEquals(field_value, sub_key) ||
compare_->NaturalEquals(field_value, rule->GetName());
if (matches_latin_name || matches_local_name_id) {
address->SetFieldValue(
field, matches_latin_name ? rule->GetLatinName() : sub_key);
no_match_found_yet = false;
parent_key.FromLookupKey(parent_key, sub_key);
parent_rule = supplier_->GetRule(parent_key);
assert(parent_rule != nullptr);
break;
}
}
}
if (no_match_found_yet) {
return;
}
}
}
}
} | #include <libaddressinput/address_normalizer.h>
#include <libaddressinput/address_data.h>
#include <libaddressinput/callback.h>
#include <libaddressinput/null_storage.h>
#include <libaddressinput/preload_supplier.h>
#include <memory>
#include <string>
#include <gtest/gtest.h>
#include "testdata_source.h"
namespace {
using i18n::addressinput::AddressData;
using i18n::addressinput::AddressNormalizer;
using i18n::addressinput::BuildCallback;
using i18n::addressinput::NullStorage;
using i18n::addressinput::PreloadSupplier;
using i18n::addressinput::TestdataSource;
class AddressNormalizerTest : public testing::Test {
public:
AddressNormalizerTest(const AddressNormalizerTest&) = delete;
AddressNormalizerTest& operator=(const AddressNormalizerTest&) = delete;
protected:
AddressNormalizerTest()
: supplier_(new TestdataSource(true), new NullStorage),
loaded_(BuildCallback(this, &AddressNormalizerTest::OnLoaded)),
normalizer_(&supplier_) {}
PreloadSupplier supplier_;
const std::unique_ptr<const PreloadSupplier::Callback> loaded_;
const AddressNormalizer normalizer_;
private:
void OnLoaded(bool success, const std::string& region_code, int num_rules) {
ASSERT_TRUE(success);
ASSERT_FALSE(region_code.empty());
ASSERT_LT(0, num_rules);
}
};
TEST_F(AddressNormalizerTest, CountryWithNoLanguageNoAdminArea) {
supplier_.LoadRules("IR", *loaded_);
AddressData address{
.region_code = "IR",
.administrative_area = "Tehran",
};
normalizer_.Normalize(&address);
EXPECT_EQ("Tehran", address.administrative_area);
}
TEST_F(AddressNormalizerTest, BrazilAdminAreaAndLocality) {
supplier_.LoadRules("BR", *loaded_);
AddressData address{
.region_code = "BR",
.administrative_area = "Maranhão",
.locality = "Cantanhede",
};
normalizer_.Normalize(&address);
EXPECT_EQ("MA", address.administrative_area);
EXPECT_EQ("Cantanhede", address.locality);
}
TEST_F(AddressNormalizerTest, FrenchCanadaNameLanguageNotConsistent) {
supplier_.LoadRules("CA", *loaded_);
AddressData address{
.region_code = "CA",
.administrative_area = "Nouveau-Brunswick",
.language_code = "en-CA",
};
normalizer_.Normalize(&address);
EXPECT_EQ("NB", address.administrative_area);
}
TEST_F(AddressNormalizerTest, FrenchCanadaName) {
supplier_.LoadRules("CA", *loaded_);
AddressData address{
.region_code = "CA",
.administrative_area = "Nouveau-Brunswick",
.language_code = "fr-CA",
};
normalizer_.Normalize(&address);
EXPECT_EQ("NB", address.administrative_area);
}
TEST_F(AddressNormalizerTest, FrenchCanadaNameLanguageNotListed) {
supplier_.LoadRules("CA", *loaded_);
AddressData address{
.region_code = "CA",
.administrative_area = "Colombie-Britannique",
.language_code = "fa-CA",
};
normalizer_.Normalize(&address);
EXPECT_EQ("BC", address.administrative_area);
}
TEST_F(AddressNormalizerTest, CaliforniaShortNameCa) {
supplier_.LoadRules("US", *loaded_);
AddressData address{
.region_code = "US",
.administrative_area = "California",
.locality = "Mountain View",
.language_code = "en-US",
};
normalizer_.Normalize(&address);
EXPECT_EQ("CA", address.administrative_area);
}
TEST_F(AddressNormalizerTest, CountryWithNonStandardData) {
supplier_.LoadRules("HK", *loaded_);
AddressData address{
.region_code = "HK",
.administrative_area = "香港島",
};
normalizer_.Normalize(&address);
EXPECT_EQ("香港島", address.administrative_area);
}
TEST_F(AddressNormalizerTest, GangwonLatinNameStaysUnchanged) {
supplier_.LoadRules("KR", *loaded_);
AddressData address{
.region_code = "KR",
.administrative_area = "Gangwon",
.language_code = "ko-Latn",
};
normalizer_.Normalize(&address);
EXPECT_EQ("Gangwon", address.administrative_area);
}
TEST_F(AddressNormalizerTest, GangwonKoreanName) {
supplier_.LoadRules("KR", *loaded_);
AddressData address{
.region_code = "KR",
.administrative_area = "강원",
.language_code = "ko-KR",
};
normalizer_.Normalize(&address);
EXPECT_EQ("강원도", address.administrative_area);
}
TEST_F(AddressNormalizerTest, DontSwitchLatinScriptForUnknownLanguage) {
supplier_.LoadRules("KR", *loaded_);
AddressData address{
.region_code = "KR",
.administrative_area = "Gangwon",
};
normalizer_.Normalize(&address);
EXPECT_EQ("Gangwon", address.administrative_area);
}
TEST_F(AddressNormalizerTest, DontSwitchLocalScriptForUnknownLanguage) {
supplier_.LoadRules("KR", *loaded_);
AddressData address{
.region_code = "KR",
.administrative_area = "강원",
};
normalizer_.Normalize(&address);
EXPECT_EQ("강원도", address.administrative_area);
}
} | https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/src/address_normalizer.cc | https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/test/address_normalizer_test.cc | 2610f7b1043d6784ada41392fc9392d1ea09ea07 |
5e5b458c-749f-4599-ab52-44f385a4c5f4 | cpp | tensorflow/tensorflow | stream_executor_util | third_party/xla/xla/service/gpu/stream_executor_util.cc | third_party/xla/xla/service/gpu/stream_executor_util_test.cc | #include "xla/service/gpu/stream_executor_util.h"
#include <cstdint>
#include <iterator>
#include <limits>
#include <map>
#include <memory>
#include <optional>
#include <random>
#include <sstream>
#include <string_view>
#include <tuple>
#include <type_traits>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/const_init.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
#include "absl/types/span.h"
#include "Eigen/Core"
#include "xla/autotuning.pb.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/primitive_util.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/hlo_module_config.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/data_type.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/dnn.h"
#include "xla/stream_executor/kernel.h"
#include "xla/stream_executor/kernel_spec.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/typed_kernel_factory.h"
#include "xla/tsl/protobuf/dnn.pb.h"
#include "xla/tsl/util/proto/proto_utils.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/ml_dtypes.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
absl::StatusOr<se::dnn::VersionInfo> GetDnnVersionInfo(
stream_executor::StreamExecutor* stream_exec) {
if (!stream_exec) {
return absl::InvalidArgumentError("StreamExecutor is null");
}
stream_executor::dnn::DnnSupport* dnn = stream_exec->AsDnn();
if (!dnn) {
return absl::FailedPreconditionError(
"DNN library initialization failed. Look at the errors above for more "
"details.");
}
return dnn->GetVersion();
}
se::dnn::VersionInfo GetDnnVersionInfoOrDefault(
stream_executor::StreamExecutor* stream_exec,
se::dnn::VersionInfo fallback_version) {
return GetDnnVersionInfo(stream_exec).value_or(fallback_version);
}
namespace {
using se::dnn::DataLayout;
using se::dnn::DataLayoutString;
using se::dnn::FilterLayout;
using se::dnn::FilterLayoutString;
int64_t FindMissingDnum(absl::Span<const int64_t> vals) {
for (int i = 0; i < vals.size(); i++) {
if (!absl::c_linear_search(vals, i)) {
return i;
}
}
return vals.size();
}
absl::StatusOr<Layout> DataLayoutToXlaLayout(
DataLayout data_layout, int64_t batch_dimension, int64_t feature_dimension,
absl::Span<int64_t const> spatial_dimensions) {
std::vector<int64_t> layout;
switch (data_layout) {
case DataLayout::kBatchDepthYX:
layout.push_back(batch_dimension);
layout.push_back(feature_dimension);
layout.insert(layout.end(), spatial_dimensions.begin(),
spatial_dimensions.end());
break;
case DataLayout::kBatchDepthYX4:
case DataLayout::kBatchDepthYX32:
layout.push_back(batch_dimension);
layout.push_back(feature_dimension);
layout.insert(layout.end(), spatial_dimensions.begin(),
spatial_dimensions.end());
layout.push_back(FindMissingDnum(layout));
break;
case DataLayout::kBatchYXDepth:
layout.push_back(batch_dimension);
layout.insert(layout.end(), spatial_dimensions.begin(),
spatial_dimensions.end());
layout.push_back(feature_dimension);
break;
default:
return Internal("Invalid layout %s", DataLayoutString(data_layout));
}
return LayoutUtil::MakeLayoutFromMajorToMinor(layout);
}
}
absl::StatusOr<std::tuple<Layout, Layout, Layout>>
StreamExecutorConvLayoutsToXlaLayouts(const ConvolutionDimensionNumbers& dnums,
DataLayout input, FilterLayout filter,
DataLayout output) {
TF_ASSIGN_OR_RETURN(
Layout input_layout,
DataLayoutToXlaLayout(input, dnums.input_batch_dimension(),
dnums.input_feature_dimension(),
dnums.input_spatial_dimensions()));
TF_ASSIGN_OR_RETURN(
Layout output_layout,
DataLayoutToXlaLayout(input, dnums.output_batch_dimension(),
dnums.output_feature_dimension(),
dnums.output_spatial_dimensions()));
std::vector<int64_t> filter_layout;
switch (filter) {
case FilterLayout::kOutputInputYX:
filter_layout.push_back(dnums.kernel_output_feature_dimension());
filter_layout.push_back(dnums.kernel_input_feature_dimension());
filter_layout.insert(filter_layout.end(),
dnums.kernel_spatial_dimensions().begin(),
dnums.kernel_spatial_dimensions().end());
break;
case FilterLayout::kOutputInputYX4:
filter_layout.push_back(dnums.kernel_output_feature_dimension());
filter_layout.push_back(dnums.kernel_input_feature_dimension());
filter_layout.insert(filter_layout.end(),
dnums.kernel_spatial_dimensions().begin(),
dnums.kernel_spatial_dimensions().end());
filter_layout.push_back(FindMissingDnum(filter_layout));
break;
case FilterLayout::kOutputYXInput:
filter_layout.push_back(dnums.kernel_output_feature_dimension());
filter_layout.insert(filter_layout.end(),
dnums.kernel_spatial_dimensions().begin(),
dnums.kernel_spatial_dimensions().end());
filter_layout.push_back(dnums.kernel_input_feature_dimension());
break;
default:
return Internal("Invalid filter layout %s for conv with dnums %s,",
FilterLayoutString(filter),
ConvolutionDimensionNumbersToString(dnums));
}
return std::make_tuple(input_layout,
LayoutUtil::MakeLayoutFromMajorToMinor(filter_layout),
output_layout);
}
absl::StatusOr<std::tuple<DataLayout, FilterLayout, DataLayout>>
XlaConvShapesToStreamExecutorLayouts(const ConvolutionDimensionNumbers& dnums,
const Shape& input, const Shape& filter,
const Shape& output) {
CHECK(input.has_layout());
CHECK(filter.has_layout());
CHECK(output.has_layout());
Layout nchw_input, nchw_filter, nchw_output;
std::tie(nchw_input, nchw_filter, nchw_output) =
StreamExecutorConvLayoutsToXlaLayouts(dnums, DataLayout::kBatchDepthYX,
FilterLayout::kOutputInputYX,
DataLayout::kBatchDepthYX)
.value();
Layout nchw_vect_input, nchw_vect_filter, nchw_vect_output;
std::tie(nchw_vect_input, nchw_vect_filter, nchw_vect_output) =
StreamExecutorConvLayoutsToXlaLayouts(dnums, DataLayout::kBatchDepthYX4,
FilterLayout::kOutputInputYX4,
DataLayout::kBatchDepthYX4)
.value();
Layout nhwc_input, nhwc_filter, nhwc_output;
std::tie(nhwc_input, nhwc_filter, nhwc_output) =
StreamExecutorConvLayoutsToXlaLayouts(dnums, DataLayout::kBatchYXDepth,
FilterLayout::kOutputYXInput,
DataLayout::kBatchYXDepth)
.value();
DataLayout input_layout;
if (LayoutUtil::Equal(input.layout(), nchw_input)) {
input_layout = DataLayout::kBatchDepthYX;
} else if (LayoutUtil::Equal(input.layout(), nchw_vect_input)) {
int64_t vect_size = input.dimensions(input.layout().minor_to_major(0));
if (vect_size == 4) {
input_layout = DataLayout::kBatchDepthYX4;
} else if (vect_size == 32) {
input_layout = DataLayout::kBatchDepthYX32;
} else {
return Internal(
"Invalid input shape %s for conv with dnums %s. Most-minor dim "
"should be 4 or 32, but was %d.",
ShapeUtil::HumanStringWithLayout(input),
ConvolutionDimensionNumbersToString(dnums), vect_size);
}
} else if (LayoutUtil::Equal(input.layout(), nhwc_input)) {
input_layout = DataLayout::kBatchYXDepth;
} else {
return Internal(
"Invalid input layout %s for conv with dnums %s; expected one of (%s, "
"%s, %s)",
LayoutUtil::HumanString(input.layout()),
ConvolutionDimensionNumbersToString(dnums), nchw_input.ToString(),
nchw_vect_input.ToString(), nhwc_input.ToString());
}
FilterLayout filter_layout;
if (LayoutUtil::Equal(filter.layout(), nchw_filter)) {
filter_layout = FilterLayout::kOutputInputYX;
} else if (LayoutUtil::Equal(filter.layout(), nchw_vect_filter)) {
int64_t vect_size = filter.dimensions(filter.layout().minor_to_major(0));
if (vect_size == 4) {
filter_layout = FilterLayout::kOutputInputYX4;
} else if (vect_size == 32) {
filter_layout = FilterLayout::kOutputInputYX32;
} else {
return Internal(
"Invalid filter shape %s for conv with dnums %s. Most-minor dim "
"should be 4 or 32, but was %d.",
ShapeUtil::HumanStringWithLayout(filter),
ConvolutionDimensionNumbersToString(dnums), vect_size);
}
} else if (LayoutUtil::Equal(filter.layout(), nhwc_filter)) {
filter_layout = FilterLayout::kOutputYXInput;
} else {
return Internal(
"Invalid filter layout %s for conv with dnums %s, expected one of (%s, "
"%s, %s)",
LayoutUtil::HumanString(filter.layout()),
ConvolutionDimensionNumbersToString(dnums), nchw_filter.ToString(),
nchw_vect_filter.ToString(), nhwc_filter.ToString());
}
DataLayout output_layout;
if (LayoutUtil::Equal(output.layout(), nchw_output)) {
output_layout = DataLayout::kBatchDepthYX;
} else if (LayoutUtil::Equal(output.layout(), nchw_vect_output)) {
int64_t vect_size = output.dimensions(output.layout().minor_to_major(0));
if (vect_size == 4) {
output_layout = DataLayout::kBatchDepthYX4;
} else if (vect_size == 32) {
output_layout = DataLayout::kBatchDepthYX32;
} else {
return Internal(
"Invalid output shape %s for conv with dnums %s. Most-minor dim "
"should be 4 or 32, but was %d.",
ShapeUtil::HumanStringWithLayout(output),
ConvolutionDimensionNumbersToString(dnums), vect_size);
}
} else if (LayoutUtil::Equal(output.layout(), nhwc_output)) {
output_layout = DataLayout::kBatchYXDepth;
} else {
return Internal("Invalid output layout %s for conv with dnums %s",
LayoutUtil::HumanString(output.layout()),
ConvolutionDimensionNumbersToString(dnums));
}
return std::make_tuple(input_layout, filter_layout, output_layout);
}
static std::optional<int64_t> FindVectorizedDim(int64_t rank, int64_t d0,
int64_t d1,
absl::Span<const int64_t> ds) {
for (int64_t i = 0; i < rank; i++) {
if (i == d0 || i == d1 || absl::c_linear_search(ds, i)) {
continue;
}
return i;
}
return std::nullopt;
}
std::tuple<std::optional<int64_t>, std::optional<int64_t>,
std::optional<int64_t>>
FindVectorizedFeatureDims(const ConvolutionDimensionNumbers& dnums,
const Shape& input, const Shape& filter,
const Shape& output) {
return {
FindVectorizedDim(input.dimensions_size(), dnums.input_batch_dimension(),
dnums.input_feature_dimension(),
dnums.input_spatial_dimensions()),
FindVectorizedDim(filter.dimensions_size(),
dnums.kernel_input_feature_dimension(),
dnums.kernel_output_feature_dimension(),
dnums.kernel_spatial_dimensions()),
FindVectorizedDim(
output.dimensions_size(), dnums.output_batch_dimension(),
dnums.output_feature_dimension(), dnums.output_spatial_dimensions()),
};
}
absl::Mutex& GetGpuMutex(const se::StreamExecutor* stream_exec) {
static absl::Mutex mu(absl::kConstInit);
static auto* mutexes =
new std::map<std::pair<const se::Platform*, int64_t>,
absl::Mutex>();
absl::MutexLock global_lock(&mu);
auto it = mutexes
->emplace(std::piecewise_construct,
std::make_tuple(stream_exec->GetPlatform(),
stream_exec->device_ordinal()),
std::make_tuple())
.first;
return it->second;
}
absl::StatusOr<std::unique_ptr<se::Kernel>> CreateKernel(
absl::string_view kernel_name, uint64_t num_args, absl::string_view ptx,
absl::Span<const uint8_t> cubin_data, se::StreamExecutor* stream_exec,
uint32_t shared_mem_bytes) {
se::MultiKernelLoaderSpec loader_spec(num_args);
loader_spec.AddCudaPtxInMemory(ptx, kernel_name);
if (!cubin_data.empty()) {
loader_spec.AddCudaCubinInMemory(cubin_data, kernel_name);
}
TF_ASSIGN_OR_RETURN(std::unique_ptr<se::Kernel> kernel,
stream_exec->LoadKernel(loader_spec));
se::KernelMetadata m;
m.set_shared_memory_bytes(shared_mem_bytes);
kernel->set_metadata(m);
return kernel;
}
absl::Status ExecuteKernelOnStream(const se::Kernel& kernel,
absl::Span<const se::DeviceMemoryBase> args,
const LaunchDimensions& dims,
se::Stream* stream) {
TF_ASSIGN_OR_RETURN(
std::unique_ptr<se::KernelArgsPackedArrayBase> kernel_args,
se::PackKernelArgs(args, kernel.metadata()));
return stream->Launch(dims.thread_counts_per_block(), dims.block_counts(),
kernel, *kernel_args);
}
absl::Status ExecuteKernelOnStream(const se::Kernel& kernel,
absl::Span<const se::DeviceMemoryBase> args,
const LaunchDimensions& dims,
const se::ClusterDim& cluster_dim,
se::Stream* stream) {
TF_ASSIGN_OR_RETURN(
std::unique_ptr<se::KernelArgsPackedArrayBase> kernel_args,
se::PackKernelArgs(args, kernel.metadata()));
return stream->Launch(dims.thread_counts_per_block(), dims.block_counts(),
cluster_dim, kernel, *kernel_args);
}
template <typename T, typename Generator>
typename std::enable_if<std::is_integral<T>::value,
T>::type static UniformDistribution(T lhs, T rhs,
Generator* gen) =
delete;
template <typename T, typename Generator>
typename std::enable_if<std::is_floating_point<T>::value,
T>::type static UniformDistribution(T lhs, T rhs,
Generator* gen) {
return std::uniform_real_distribution<T>(lhs, rhs)(*gen);
}
namespace repeat_buffer_kernel {
void* kernel();
}
template <typename T>
static void InitializeTypedBuffer(se::Stream* stream,
se::DeviceMemoryBase buffer,
int64_t* rng_state) {
constexpr int host_buffer_size = 10069;
static std::vector<T>* host_buffer = [&] {
auto* ret = new std::vector<T>(host_buffer_size);
std::mt19937 gen;
for (auto& element : *ret) {
constexpr bool kIsIntegral = std::numeric_limits<T>::is_integer;
constexpr bool kIsLowRange =
!kIsIntegral && std::numeric_limits<T>::max_exponent <=
std::numeric_limits<Eigen::half>::max_exponent;
using RandomType = typename std::conditional<std::is_same_v<T, double>,
double, float>::type;
auto upper_bound = RandomType(kIsLowRange ? 0.1 : 1.0);
auto rand_val = UniformDistribution(RandomType(0), upper_bound, &gen);
element = T(kIsIntegral ? rand_val + 0.5 : rand_val);
}
return ret;
}();
CHECK_EQ(0, buffer.size() % sizeof(T));
int64_t elements_to_fill = buffer.size() / sizeof(T);
int64_t host_index = *rng_state;
CHECK_LT(host_index, host_buffer_size);
*rng_state = (*rng_state + elements_to_fill) % host_buffer_size;
int64_t first_size =
std::min<int64_t>(host_buffer_size - host_index, elements_to_fill);
TF_CHECK_OK(stream->Memcpy(&buffer, host_buffer->data() + host_index,
first_size * sizeof(T)));
elements_to_fill -= first_size;
if (elements_to_fill == 0) {
return;
}
int64_t second_size = std::min<int64_t>(host_index, elements_to_fill);
CHECK_LE(first_size + second_size, host_buffer_size);
se::DeviceMemoryBase mem =
buffer.GetByteSlice(first_size * sizeof(T), second_size * sizeof(T));
TF_CHECK_OK(stream->Memcpy(&mem, host_buffer->data(), mem.size()));
elements_to_fill -= second_size;
if (elements_to_fill == 0) {
return;
}
#ifdef GOOGLE_CUDA
CHECK_EQ(elements_to_fill, buffer.size() / sizeof(T) - host_buffer_size);
se::StreamExecutor* executor = stream->parent();
auto kernel =
se::TypedKernelFactory<se::DeviceMemoryBase, int64_t, int64_t>::Create(
executor, "RepeatBufferKernel", repeat_buffer_kernel::kernel());
if (!kernel.ok()) {
LOG(FATAL) << "Could not create RepeatBufferKernel: " << kernel.status();
}
constexpr int64_t host_buffer_bytes = host_buffer_size * sizeof(T);
constexpr int threads_per_block = 256;
constexpr int blocks_per_grid =
(host_buffer_bytes + threads_per_block - 1) / threads_per_block;
TF_CHECK_OK(stream->ThenLaunch(se::ThreadDim(threads_per_block, 1, 1),
se::BlockDim(blocks_per_grid, 1, 1), *kernel,
buffer, host_buffer_bytes,
static_cast<int64_t>(buffer.size())));
#endif
}
void InitializeBuffer(se::Stream* stream, PrimitiveType buffer_type,
int64_t* rng_state, se::DeviceMemoryBase buffer) {
return primitive_util::PrimitiveTypeSwitch<void>(
[&](auto primitive_type_constant) -> void {
if constexpr (primitive_util::IsFloatingPointType(
primitive_type_constant) ||
primitive_util::IsIntegralType(primitive_type_constant)) {
using NativeT = typename primitive_util::PrimitiveTypeToNative<
primitive_type_constant>::type;
return InitializeTypedBuffer<NativeT>(stream, buffer, rng_state);
}
if constexpr (primitive_util::IsComplexType(primitive_type_constant)) {
using NativeT = typename primitive_util::PrimitiveTypeToNative<
primitive_type_constant>::type;
return InitializeTypedBuffer<typename NativeT::value_type>(
stream, buffer, rng_state);
}
if constexpr (primitive_type_constant == PRED) {
return InitializeTypedBuffer<int8_t>(stream, buffer, rng_state);
}
LOG(FATAL) << "Unexpected type: "
<< primitive_util::LowercasePrimitiveTypeName(buffer_type);
},
buffer_type);
}
absl::StatusOr<se::dnn::ConvolutionKind> GetDNNConvKindFromCudnnConvKind(
CudnnConvKind kind) {
switch (kind) {
case CudnnConvKind::kBackwardFilter:
return se::dnn::BACKWARD_FILTER;
case CudnnConvKind::kBackwardInput:
return se::dnn::BACKWARD_DATA;
case CudnnConvKind::kForward:
return se::dnn::FORWARD;
case CudnnConvKind::kForwardActivation:
return se::dnn::FORWARD_BIAS_ACTIVATION;
case CudnnConvKind::kForwardGraph:
return se::dnn::FORWARD_GRAPH;
default:
break;
}
return Internal("Unexpected convolution kind");
}
absl::StatusOr<se::dnn::NormKind> GetDNNNormKindFromCudnnNormKind(
CudnnNormKind kind) {
switch (kind) {
case CudnnNormKind::kLayerForwardInfer:
return se::dnn::LAYER_FWD_INFER;
case CudnnNormKind::kLayerForwardTrain:
return se::dnn::LAYER_FWD_TRAIN;
case CudnnNormKind::kLayerBackward:
return se::dnn::LAYER_BWD;
default:
return Internal("Unexpected norm kind");
}
}
absl::StatusOr<se::dnn::FMHAMaskKind> GetDNNFmhaMaskKindFromCudnnFmhaMaskKind(
CudnnfMHAMaskKind kind) {
switch (kind) {
case CudnnfMHAMaskKind::kNoMask:
return se::dnn::NO_MASK;
case CudnnfMHAMaskKind::kPadding:
return se::dnn::PADDING;
case CudnnfMHAMaskKind::kCausal:
return se::dnn::CAUSAL;
case CudnnfMHAMaskKind::kPaddingCausal:
return se::dnn::PADDING_CAUSAL;
case CudnnfMHAMaskKind::kAlibi:
return se::dnn::ALIBI;
default:
return Internal("Unexpected fmha mask kind");
}
}
absl::StatusOr<se::dnn::DataType> GetDNNDataTypeFromPrimitiveType(
PrimitiveType type) {
switch (type) {
case F16:
return se::dnn::ToDataType<Eigen::half>::value;
case F32:
return se::dnn::ToDataType<float>::value;
case F64:
return se::dnn::ToDataType<double>::value;
case S8:
return se::dnn::ToDataType<int8_t>::value;
case S32:
return se::dnn::ToDataType<int32_t>::value;
case BF16:
return se::dnn::ToDataType<Eigen::bfloat16>::value;
case F8E4M3FN:
return se::dnn::ToDataType<tsl::float8_e4m3fn>::value;
case F8E5M2:
return se::dnn::ToDataType<tsl::float8_e5m2>::value;
default:
break;
}
return Internal("Unsupported datatype");
}
bool RequireDeterminism(const HloModuleConfig& config) {
return config.debug_options().xla_gpu_deterministic_ops() ||
config.debug_options().xla_gpu_exclude_nondeterministic_ops();
}
namespace {
std::vector<AutotuneResult> KeepNonFailures(
absl::Span<AutotuneResult const> profile_results) {
std::vector<AutotuneResult> filtered_results;
absl::c_copy_if(profile_results, std::back_inserter(filtered_results),
[](const AutotuneResult& r) {
return !r.has_failure() ||
r.failure().kind() == AutotuneResult::WRONG_RESULT;
});
return filtered_results;
}
absl::Status AllAlgorithmsFailedInternalError(
std::optional<std::string_view> instr_str,
absl::Span<AutotuneResult const> profile_results) {
std::ostringstream msg;
if (instr_str.has_value()) {
msg << "All algorithms tried for " << instr_str.value()
<< " failed. Falling back to default algorithm. Per-algorithm "
"errors:";
} else {
msg << "All algorithms failed. Falling back to the default algorithm. "
<< "Per-algorithm errors:";
}
for (const auto& result : profile_results) {
msg << "\n " << result.failure().msg();
}
return Internal("%s", msg.str());
}
absl::Status NoAlgorithmSuppliedInternalError(
std::optional<std::string_view> instr_str) {
std::ostringstream msg;
if (instr_str.has_value()) {
msg << "There are no algorithm candidates for computing: \n "
<< instr_str.value()
<< "\nThis likely means that the instruction shape is not supported by "
"the target GPU library.";
} else {
msg << "There are no algorithm candidates for computing the instruction.\n"
"This likely means that the instruction shape is not supported by "
"the target GPU library.";
}
return Internal("%s", msg.str());
}
void SortAutotuningResultsByRunTime(std::vector<AutotuneResult>& results) {
absl::c_sort(results,
[](const AutotuneResult& lhs, const AutotuneResult& rhs) {
return tsl::proto_utils::FromDurationProto(lhs.run_time()) <
tsl::proto_utils::FromDurationProto(rhs.run_time());
});
}
absl::Span<AutotuneResult const> TopResultsWithinMeasurementError(
std::vector<AutotuneResult>& results_sorted_by_runtime) {
constexpr absl::Duration kMeasurementError = absl::Microseconds(4);
absl::Duration min_time = tsl::proto_utils::FromDurationProto(
results_sorted_by_runtime.front().run_time());
absl::Duration limit_time = min_time + kMeasurementError;
auto limit_time_it = absl::c_find_if(
results_sorted_by_runtime, [limit_time](const AutotuneResult& x) {
return tsl::proto_utils::FromDurationProto(x.run_time()) > limit_time;
});
return absl::MakeSpan(&*results_sorted_by_runtime.begin(), &*limit_time_it);
}
}
absl::StatusOr<AutotuneResult> PickBestResult(
absl::Span<AutotuneResult const> profile_results,
std::optional<std::string_view> instr_str,
HloModuleConfig hlo_module_config) {
if (profile_results.empty()) {
return NoAlgorithmSuppliedInternalError(instr_str);
}
std::vector<AutotuneResult> filtered_results =
KeepNonFailures(profile_results);
if (filtered_results.empty()) {
return AllAlgorithmsFailedInternalError(instr_str, profile_results);
}
if (RequireDeterminism(hlo_module_config)) {
return *filtered_results.begin();
}
SortAutotuningResultsByRunTime(filtered_results);
auto top_within_error = TopResultsWithinMeasurementError(filtered_results);
return *absl::c_min_element(top_within_error, [](const AutotuneResult& lhs,
const AutotuneResult& rhs) {
return lhs.scratch_bytes() < rhs.scratch_bytes();
});
}
}
} | #include "xla/service/gpu/stream_executor_util.h"
#include <cstdint>
#include <vector>
#include <gtest/gtest.h>
#include "absl/status/statusor.h"
#include "absl/time/time.h"
#include "xla/autotuning.pb.h"
#include "xla/service/hlo_module_config.h"
#include "xla/tsl/util/proto/proto_utils.h"
namespace xla::gpu {
namespace {
struct Result {
int64_t run_time_ns;
int64_t scratch_bytes;
bool operator==(const Result& other) const {
return other.run_time_ns == run_time_ns &&
other.scratch_bytes == scratch_bytes;
};
explicit operator AutotuneResult() const {
AutotuneResult result;
*result.mutable_run_time() =
tsl::proto_utils::ToDurationProto(absl::Nanoseconds(run_time_ns));
result.set_scratch_bytes(scratch_bytes);
return result;
}
};
static Result ATRToResult(AutotuneResult atr) {
return Result{.run_time_ns = absl::ToInt64Nanoseconds(
tsl::proto_utils::FromDurationProto(atr.run_time())),
.scratch_bytes = atr.scratch_bytes()};
}
std::vector<AutotuneResult> Results(const std::vector<Result>& stats) {
std::vector<AutotuneResult> results;
for (const auto& s : stats) results.push_back(AutotuneResult(s));
return results;
}
TEST(StreamExecutorTest, PickBestResult) {
absl::StatusOr<AutotuneResult> atr;
atr = PickBestResult(Results({{9000, 0}, {1000, 0}, {16000, 0}}), "", {});
EXPECT_EQ(ATRToResult(atr.value()), Result({1000, 0}));
atr = PickBestResult(Results({{4700, 0}, {4600, 0}, {4500, 0}}), "", {});
EXPECT_EQ(ATRToResult(atr.value()), Result({4500, 0}));
atr = PickBestResult(Results({{4700, 0}, {4600, 2}, {4500, 1}}), "", {});
EXPECT_EQ(ATRToResult(atr.value()), Result({4700, 0}));
atr = PickBestResult(Results({{5000, 1}, {6000, 0}, {7500, 0}}), "", {});
EXPECT_EQ(ATRToResult(atr.value()), Result({6000, 0}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/stream_executor_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/stream_executor_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
dde88d8c-a50e-4c90-9f5c-0a007cee16b9 | cpp | tensorflow/tensorflow | colocate_predecessor_trees_pass | tensorflow/core/common_runtime/colocate_predecessor_trees_pass.cc | tensorflow/core/common_runtime/colocate_predecessor_trees_pass_test.cc | #include "tensorflow/core/common_runtime/colocate_predecessor_trees_pass.h"
#include <optional>
#include <queue>
#include <string>
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/tsl/util/device_name_utils.h"
#include "tensorflow/core/common_runtime/optimization_registry.h"
#include "tensorflow/core/config/flag_defs.h"
#include "tensorflow/core/config/flags.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/util/device_name_utils.h"
#include "tensorflow/core/util/dump_graph.h"
namespace tensorflow {
namespace {
constexpr absl::string_view kClassAttr = "_class";
constexpr absl::string_view kFill = "Fill";
bool IsValidFillOp(const Node& node) {
if (node.type_string() != kFill) {
return false;
}
if (node.IsArg()) {
return false;
}
if (node.has_assigned_device_name()) {
return false;
}
if (!node.requested_device().empty()) {
return false;
}
if (HasNodeAttr(node.def(), kClassAttr)) {
return false;
}
if (!KernelDefAvailable(DeviceType(DEVICE_CPU), node.def())) {
return false;
}
return true;
}
bool IsValidIdentityNode(const Node& node) {
if (!node.IsIdentity()) {
return false;
}
if (node.requested_device().empty()) {
return false;
}
auto device_name = node.requested_device();
DeviceNameUtils::ParsedName parsed_device_name;
DeviceNameUtils::ParseFullName(device_name, &parsed_device_name);
if (parsed_device_name.type != DEVICE_CPU) {
return false;
}
if (node.IsArg()) {
return false;
}
if (!KernelDefAvailable(DeviceType(DEVICE_CPU), node.def())) {
return false;
}
return true;
}
std::optional<std::string> GetColocateStringName(const Node& fill_node) {
std::string device = "";
std::string colocation_prefix = "loc:@";
std::string colocation_name = "";
for (auto output_node : fill_node.out_nodes()) {
if (!IsValidIdentityNode(*output_node)) return std::nullopt;
if (device.empty()) {
device = output_node->requested_device();
colocation_name = absl::StrCat(colocation_prefix, output_node->name());
} else if (device != output_node->requested_device()) {
return std::nullopt;
}
}
if (colocation_name.empty()) return std::nullopt;
return colocation_name;
}
bool AreAllInNodesQualifiedConst(const Node& node) {
for (auto in_node : node.in_nodes()) {
if (!in_node->IsConstant()) {
return false;
}
if (in_node->IsArg()) {
return false;
}
if (in_node->has_assigned_device_name()) {
return false;
}
if (!in_node->requested_device().empty()) {
return false;
}
if (HasNodeAttr(in_node->def(), kClassAttr)) {
return false;
}
if (!KernelDefAvailable(DeviceType(DEVICE_CPU), in_node->def())) {
return false;
}
}
return true;
}
bool ShouldRunPass(const GraphOptimizationPassOptions& options) {
if (!flags::Global().enable_tf2min_ici_weight.value()) {
VLOG(1) << "ColocatePredecessorTreesPass is disabled.";
return false;
}
VLOG(1) << "ColocatePredecessorTreesPass is enabled.";
if (options.graph == nullptr) {
LOG(INFO) << "No graph in colocate_predecessor_trees_pass.\n";
return false;
}
return true;
}
void LogGraphProperties(bool is_graph_changed, bool has_valid_fill_op,
bool has_colocation_name, bool has_qualified_const,
Graph* graph,
const GraphOptimizationPassOptions& options) {
if (is_graph_changed) {
VLOG(1) << "Graph is changed by ColocatePredecessorTreesPass.";
VLOG(1) << DumpGraphToFile("graph_changed_after_colocate_predecessor_trees",
*graph, options.flib_def);
} else {
VLOG(1) << "Graph is not changed by ColocatePredecessorTreesPass.";
VLOG(1) << "has_valid_fill_op: " << has_valid_fill_op;
VLOG(1) << "has_colocation_name: " << has_colocation_name;
VLOG(1) << "has_qualified_const: " << has_qualified_const;
VLOG(1) << DumpGraphToFile(
"graph_not_changed_after_colocate_predecessor_trees", *graph,
options.flib_def);
}
}
}
Status ColocatePredecessorTreesPass::Run(
const GraphOptimizationPassOptions& options) {
if (!ShouldRunPass(options)) {
return absl::OkStatus();
}
Graph* graph = options.graph->get();
VLOG(1) << DumpGraphToFile("graph_before_colocate_predecessor_trees", *graph,
options.flib_def);
bool is_graph_changed = false;
bool has_valid_fill_op = false;
bool has_colocation_name = false;
bool has_qualified_const = false;
for (Node* node : graph->nodes()) {
if (!IsValidFillOp(*node)) {
continue;
}
has_valid_fill_op = true;
auto colocation_name = GetColocateStringName(*node);
if (!colocation_name.has_value()) continue;
has_colocation_name = true;
if (!AreAllInNodesQualifiedConst(*node)) continue;
has_qualified_const = true;
is_graph_changed = true;
node->AddAttr(std::string(kClassAttr), {*colocation_name});
for (auto in_node : node->in_nodes()) {
in_node->AddAttr(std::string(kClassAttr), {*colocation_name});
}
for (auto out_node : node->out_nodes()) {
out_node->AddAttr(std::string(kClassAttr), {*colocation_name});
}
}
LogGraphProperties(is_graph_changed, has_valid_fill_op, has_colocation_name,
has_qualified_const, graph, options);
return absl::OkStatus();
}
REGISTER_OPTIMIZATION(OptimizationPassRegistry::PRE_PLACEMENT, 50,
ColocatePredecessorTreesPass);
} | #include "tensorflow/core/common_runtime/colocate_predecessor_trees_pass.h"
#include <memory>
#include <string>
#include "tensorflow/cc/framework/scope.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/common_runtime/graph_def_builder_util.h"
#include "tensorflow/core/common_runtime/optimization_registry.h"
#include "tensorflow/core/config/flag_defs.h"
#include "tensorflow/core/config/flags.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/platform/test.h"
#include "tsl/platform/test.h"
namespace tensorflow {
const char kCpu0[] = "/job:tpu_host_worker/replica:0/task:0/device:CPU:0";
const char kCpu1[] = "/job:tpu_host_worker/replica:0/task:0/device:CPU:1";
const char kClassAttr[] = "_class";
Node* GetNode(const Graph& graph, const std::string& name) {
for (Node* node : graph.nodes()) {
if (node->name() == name) return node;
}
return nullptr;
}
TEST(ColocatePredecessorTreesPassTest, ICIFlagFalse) {
auto graph = std::make_unique<Graph>(OpRegistry::Global());
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
Node* const_0 = ops::SourceOp("Const", builder.opts()
.WithName("const_0")
.WithAttr("dtype", DT_INT32)
.WithAttr("value", Tensor(1.0)));
Node* const_1 = ops::SourceOp("Const", builder.opts()
.WithName("const_1")
.WithAttr("dtype", DT_INT32)
.WithAttr("value", Tensor(2.0)));
Node* fill =
ops::BinaryOp("Fill", const_0, const_1, builder.opts().WithName("fill"));
ops::UnaryOp("Identity", fill, builder.opts().WithName("identity"));
ops::UnaryOp("Identity", fill, builder.opts().WithName("identity_1"));
TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get()));
GetNode(*graph, "identity")->set_requested_device(kCpu0);
GetNode(*graph, "identity_1")->set_requested_device(kCpu0);
GraphDef before;
graph->ToGraphDef(&before);
GraphOptimizationPassOptions options;
options.graph = &graph;
ColocatePredecessorTreesPass pass;
TF_ASSERT_OK(pass.Run(options));
EXPECT_FALSE(HasNodeAttr(GetNode(*graph, "const_0")->def(), kClassAttr));
EXPECT_FALSE(HasNodeAttr(GetNode(*graph, "const_1")->def(), kClassAttr));
EXPECT_FALSE(HasNodeAttr(GetNode(*graph, "fill")->def(), kClassAttr));
EXPECT_FALSE(HasNodeAttr(GetNode(*graph, "identity")->def(), kClassAttr));
}
TEST(ColocatePredecessorTreesPassTest, SimpleExample) {
flags::Global().enable_tf2min_ici_weight.reset(true);
auto graph = std::make_unique<Graph>(OpRegistry::Global());
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
Node* const_0 = ops::SourceOp("Const", builder.opts()
.WithName("const_0")
.WithAttr("dtype", DT_INT32)
.WithAttr("value", Tensor(1.0)));
Node* const_1 = ops::SourceOp("Const", builder.opts()
.WithName("const_1")
.WithAttr("dtype", DT_INT32)
.WithAttr("value", Tensor(2.0)));
Node* fill =
ops::BinaryOp("Fill", const_0, const_1, builder.opts().WithName("fill"));
ops::UnaryOp("Identity", fill, builder.opts().WithName("identity"));
ops::UnaryOp("Identity", fill, builder.opts().WithName("identity_1"));
TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get()));
GetNode(*graph, "identity")->set_requested_device(kCpu0);
GetNode(*graph, "identity_1")->set_requested_device(kCpu0);
GraphDef before;
graph->ToGraphDef(&before);
GraphOptimizationPassOptions options;
options.graph = &graph;
ColocatePredecessorTreesPass pass;
TF_ASSERT_OK(pass.Run(options));
EXPECT_TRUE(HasNodeAttr(GetNode(*graph, "const_0")->def(), kClassAttr));
EXPECT_TRUE(HasNodeAttr(GetNode(*graph, "const_1")->def(), kClassAttr));
EXPECT_TRUE(HasNodeAttr(GetNode(*graph, "fill")->def(), kClassAttr));
EXPECT_TRUE(HasNodeAttr(GetNode(*graph, "identity")->def(), kClassAttr));
EXPECT_TRUE(HasNodeAttr(GetNode(*graph, "identity_1")->def(), kClassAttr));
std::string expected_colocation_info = "loc:@identity";
const AttrValue* input_value;
TF_EXPECT_OK(
GetNode(*graph, "const_0")->attrs().Find(kClassAttr, &input_value));
EXPECT_EQ(input_value->list().s().at(0), expected_colocation_info);
TF_EXPECT_OK(
GetNode(*graph, "const_1")->attrs().Find(kClassAttr, &input_value));
EXPECT_EQ(input_value->list().s().at(0), expected_colocation_info);
TF_EXPECT_OK(GetNode(*graph, "fill")->attrs().Find(kClassAttr, &input_value));
EXPECT_EQ(input_value->list().s().at(0), expected_colocation_info);
TF_EXPECT_OK(
GetNode(*graph, "identity")->attrs().Find(kClassAttr, &input_value));
EXPECT_EQ(input_value->list().s().at(0), expected_colocation_info);
TF_EXPECT_OK(
GetNode(*graph, "identity_1")->attrs().Find(kClassAttr, &input_value));
EXPECT_EQ(input_value->list().s().at(0), expected_colocation_info);
}
TEST(ColocatePredecessorTreesPassTest, PropagateTwoTrees) {
flags::Global().enable_tf2min_ici_weight.reset(true);
auto graph = std::make_unique<Graph>(OpRegistry::Global());
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
Node* const_0 = ops::SourceOp("Const", builder.opts()
.WithName("const_0")
.WithAttr("dtype", DT_INT32)
.WithAttr("value", Tensor(1.0)));
Node* const_1 = ops::SourceOp("Const", builder.opts()
.WithName("const_1")
.WithAttr("dtype", DT_INT32)
.WithAttr("value", Tensor(2.0)));
Node* fill =
ops::BinaryOp("Fill", const_0, const_1, builder.opts().WithName("fill"));
ops::UnaryOp("Identity", fill, builder.opts().WithName("identity"));
Node* const_2 = ops::SourceOp("Const", builder.opts()
.WithName("const_2")
.WithAttr("dtype", DT_INT32)
.WithAttr("value", Tensor(1.0)));
Node* const_3 = ops::SourceOp("Const", builder.opts()
.WithName("const_3")
.WithAttr("dtype", DT_INT32)
.WithAttr("value", Tensor(2.0)));
Node* fill_1 = ops::BinaryOp("Fill", const_2, const_3,
builder.opts().WithName("fill_1"));
ops::UnaryOp("Identity", fill_1, builder.opts().WithName("identity_1"));
TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get()));
GetNode(*graph, "identity")->set_requested_device(kCpu0);
GetNode(*graph, "identity_1")->set_requested_device(kCpu0);
GraphDef before;
graph->ToGraphDef(&before);
GraphOptimizationPassOptions options;
options.graph = &graph;
ColocatePredecessorTreesPass pass;
TF_ASSERT_OK(pass.Run(options));
EXPECT_TRUE(HasNodeAttr(GetNode(*graph, "const_0")->def(), kClassAttr));
EXPECT_TRUE(HasNodeAttr(GetNode(*graph, "const_1")->def(), kClassAttr));
EXPECT_TRUE(HasNodeAttr(GetNode(*graph, "fill")->def(), kClassAttr));
EXPECT_TRUE(HasNodeAttr(GetNode(*graph, "identity")->def(), kClassAttr));
std::string expected_colocation_info = "loc:@identity";
const AttrValue* input_value;
TF_EXPECT_OK(
GetNode(*graph, "const_0")->attrs().Find(kClassAttr, &input_value));
EXPECT_EQ(input_value->list().s().at(0), expected_colocation_info);
TF_EXPECT_OK(
GetNode(*graph, "const_1")->attrs().Find(kClassAttr, &input_value));
EXPECT_EQ(input_value->list().s().at(0), expected_colocation_info);
TF_EXPECT_OK(GetNode(*graph, "fill")->attrs().Find(kClassAttr, &input_value));
EXPECT_EQ(input_value->list().s().at(0), expected_colocation_info);
TF_EXPECT_OK(
GetNode(*graph, "identity")->attrs().Find(kClassAttr, &input_value));
EXPECT_EQ(input_value->list().s().at(0), expected_colocation_info);
EXPECT_TRUE(HasNodeAttr(GetNode(*graph, "const_2")->def(), kClassAttr));
EXPECT_TRUE(HasNodeAttr(GetNode(*graph, "const_3")->def(), kClassAttr));
EXPECT_TRUE(HasNodeAttr(GetNode(*graph, "fill_1")->def(), kClassAttr));
EXPECT_TRUE(HasNodeAttr(GetNode(*graph, "identity_1")->def(), kClassAttr));
std::string expected_colocation_info_1 = "loc:@identity_1";
TF_EXPECT_OK(
GetNode(*graph, "const_2")->attrs().Find(kClassAttr, &input_value));
EXPECT_EQ(input_value->list().s().at(0), expected_colocation_info_1);
TF_EXPECT_OK(
GetNode(*graph, "const_3")->attrs().Find(kClassAttr, &input_value));
EXPECT_EQ(input_value->list().s().at(0), expected_colocation_info_1);
TF_EXPECT_OK(
GetNode(*graph, "fill_1")->attrs().Find(kClassAttr, &input_value));
EXPECT_EQ(input_value->list().s().at(0), expected_colocation_info_1);
TF_EXPECT_OK(
GetNode(*graph, "identity_1")->attrs().Find(kClassAttr, &input_value));
EXPECT_EQ(input_value->list().s().at(0), expected_colocation_info_1);
}
TEST(ColocatePredecessorTreesPassTest, RootHasMultipleOutputs) {
flags::Global().enable_tf2min_ici_weight.reset(true);
auto graph = std::make_unique<Graph>(OpRegistry::Global());
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
Node* const_0 = ops::SourceOp("Const", builder.opts()
.WithName("const_0")
.WithAttr("dtype", DT_INT32)
.WithAttr("value", Tensor(1.0)));
Node* const_1 = ops::SourceOp("Const", builder.opts()
.WithName("const_1")
.WithAttr("dtype", DT_INT32)
.WithAttr("value", Tensor(2.0)));
Node* fill =
ops::BinaryOp("Fill", const_0, const_1, builder.opts().WithName("fill"));
Node* identity =
ops::UnaryOp("Identity", fill, builder.opts().WithName("identity"));
ops::UnaryOp("Identity", fill, builder.opts().WithName("identity_0"));
ops::UnaryOp("Identity", identity, builder.opts().WithName("identity_1"));
ops::UnaryOp("Identity", identity, builder.opts().WithName("identity_2"));
TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get()));
GetNode(*graph, "identity")->set_requested_device(kCpu0);
GetNode(*graph, "identity_0")->set_requested_device(kCpu0);
GraphDef before;
graph->ToGraphDef(&before);
GraphOptimizationPassOptions options;
options.graph = &graph;
ColocatePredecessorTreesPass pass;
TF_ASSERT_OK(pass.Run(options));
EXPECT_TRUE(HasNodeAttr(GetNode(*graph, "const_0")->def(), kClassAttr));
EXPECT_TRUE(HasNodeAttr(GetNode(*graph, "const_1")->def(), kClassAttr));
EXPECT_TRUE(HasNodeAttr(GetNode(*graph, "fill")->def(), kClassAttr));
EXPECT_TRUE(HasNodeAttr(GetNode(*graph, "identity")->def(), kClassAttr));
EXPECT_TRUE(HasNodeAttr(GetNode(*graph, "identity_0")->def(), kClassAttr));
EXPECT_FALSE(HasNodeAttr(GetNode(*graph, "identity_1")->def(), kClassAttr));
EXPECT_FALSE(HasNodeAttr(GetNode(*graph, "identity_2")->def(), kClassAttr));
std::string expected_colocation_info = "loc:@identity";
const AttrValue* input_value;
TF_EXPECT_OK(
GetNode(*graph, "const_0")->attrs().Find(kClassAttr, &input_value));
EXPECT_EQ(input_value->list().s().at(0), expected_colocation_info);
TF_EXPECT_OK(
GetNode(*graph, "const_1")->attrs().Find(kClassAttr, &input_value));
EXPECT_EQ(input_value->list().s().at(0), expected_colocation_info);
TF_EXPECT_OK(GetNode(*graph, "fill")->attrs().Find(kClassAttr, &input_value));
EXPECT_EQ(input_value->list().s().at(0), expected_colocation_info);
TF_EXPECT_OK(
GetNode(*graph, "identity")->attrs().Find(kClassAttr, &input_value));
EXPECT_EQ(input_value->list().s().at(0), expected_colocation_info);
TF_EXPECT_OK(
GetNode(*graph, "identity_0")->attrs().Find(kClassAttr, &input_value));
EXPECT_EQ(input_value->list().s().at(0), expected_colocation_info);
}
TEST(ColocatePredecessorTreesPassTest, ConstHasDeviceAttr) {
flags::Global().enable_tf2min_ici_weight.reset(true);
auto graph = std::make_unique<Graph>(OpRegistry::Global());
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
Node* const_0 = ops::SourceOp("Const", builder.opts()
.WithName("const_0")
.WithAttr("dtype", DT_INT32)
.WithAttr("value", Tensor(1.0)));
Node* const_1 = ops::SourceOp("Const", builder.opts()
.WithName("const_1")
.WithAttr("dtype", DT_INT32)
.WithAttr("value", Tensor(2.0)));
Node* fill =
ops::BinaryOp("Fill", const_0, const_1, builder.opts().WithName("fill"));
ops::UnaryOp("Identity", fill, builder.opts().WithName("identity"));
TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get()));
GetNode(*graph, "identity")->set_requested_device(kCpu0);
GetNode(*graph, "const_0")->set_requested_device(kCpu1);
GraphDef before;
graph->ToGraphDef(&before);
GraphOptimizationPassOptions options;
options.graph = &graph;
ColocatePredecessorTreesPass pass;
TF_ASSERT_OK(pass.Run(options));
EXPECT_FALSE(HasNodeAttr(GetNode(*graph, "const_0")->def(), kClassAttr));
EXPECT_FALSE(HasNodeAttr(GetNode(*graph, "const_1")->def(), kClassAttr));
EXPECT_FALSE(HasNodeAttr(GetNode(*graph, "fill")->def(), kClassAttr));
EXPECT_FALSE(HasNodeAttr(GetNode(*graph, "identity")->def(), kClassAttr));
}
TEST(ColocatePredecessorTreesPassTest, ConstHasColocationInfo) {
flags::Global().enable_tf2min_ici_weight.reset(true);
auto graph = std::make_unique<Graph>(OpRegistry::Global());
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
Node* const_0 =
ops::SourceOp("Const", builder.opts()
.WithName("const_0")
.WithAttr("dtype", DT_INT32)
.WithAttr("value", Tensor(1.0))
.WithAttr("_class", {"loc:@fill"}));
Node* const_1 = ops::SourceOp("Const", builder.opts()
.WithName("const_1")
.WithAttr("dtype", DT_INT32)
.WithAttr("value", Tensor(2.0)));
Node* fill =
ops::BinaryOp("Fill", const_0, const_1, builder.opts().WithName("fill"));
Node* identity =
ops::UnaryOp("Identity", fill, builder.opts().WithName("identity"));
TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get()));
GetNode(*graph, "identity")->set_requested_device(kCpu0);
GraphDef before;
graph->ToGraphDef(&before);
GraphOptimizationPassOptions options;
options.graph = &graph;
ColocatePredecessorTreesPass pass;
TF_ASSERT_OK(pass.Run(options));
EXPECT_TRUE(HasNodeAttr(const_0->def(), kClassAttr));
EXPECT_FALSE(HasNodeAttr(const_1->def(), kClassAttr));
EXPECT_FALSE(HasNodeAttr(fill->def(), kClassAttr));
EXPECT_FALSE(HasNodeAttr(identity->def(), kClassAttr));
}
TEST(ColocatePredecessorTreesPassTest, InputArg) {
flags::Global().enable_tf2min_ici_weight.reset(true);
auto graph = std::make_unique<Graph>(OpRegistry::Global());
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
Node* arg_0 = ops::SourceOp("_Arg", builder.opts()
.WithName("arg_0")
.WithAttr("T", DT_INT32)
.WithAttr("index", 0));
Node* const_0 = ops::SourceOp("Const", builder.opts()
.WithName("const_0")
.WithAttr("dtype", DT_INT32)
.WithAttr("value", Tensor(2.0)));
Node* fill =
ops::BinaryOp("Fill", arg_0, const_0, builder.opts().WithName("fill"));
ops::UnaryOp("Identity", fill, builder.opts().WithName("identity"));
TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get()));
GetNode(*graph, "identity")->set_requested_device(kCpu0);
GraphDef before;
graph->ToGraphDef(&before);
GraphOptimizationPassOptions options;
options.graph = &graph;
ColocatePredecessorTreesPass pass;
TF_ASSERT_OK(pass.Run(options));
EXPECT_FALSE(HasNodeAttr(GetNode(*graph, "arg_0")->def(), kClassAttr));
EXPECT_FALSE(HasNodeAttr(GetNode(*graph, "const_0")->def(), kClassAttr));
EXPECT_FALSE(HasNodeAttr(GetNode(*graph, "fill")->def(), kClassAttr));
EXPECT_FALSE(HasNodeAttr(GetNode(*graph, "identity")->def(), kClassAttr));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/colocate_predecessor_trees_pass.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/colocate_predecessor_trees_pass_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4f659819-6eca-4f9c-a250-a5fa655698cb | cpp | abseil/abseil-cpp | layout | absl/container/internal/layout.h | absl/container/internal/layout_test.cc | #ifndef ABSL_CONTAINER_INTERNAL_LAYOUT_H_
#define ABSL_CONTAINER_INTERNAL_LAYOUT_H_
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <array>
#include <string>
#include <tuple>
#include <type_traits>
#include <typeinfo>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/debugging/internal/demangle.h"
#include "absl/meta/type_traits.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "absl/utility/utility.h"
#ifdef ABSL_HAVE_ADDRESS_SANITIZER
#include <sanitizer/asan_interface.h>
#endif
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
template <class T, size_t N>
struct Aligned;
namespace internal_layout {
template <class T>
struct NotAligned {};
template <class T, size_t N>
struct NotAligned<const Aligned<T, N>> {
static_assert(sizeof(T) == 0, "Aligned<T, N> cannot be const-qualified");
};
template <size_t>
using IntToSize = size_t;
template <class T>
struct Type : NotAligned<T> {
using type = T;
};
template <class T, size_t N>
struct Type<Aligned<T, N>> {
using type = T;
};
template <class T>
struct SizeOf : NotAligned<T>, std::integral_constant<size_t, sizeof(T)> {};
template <class T, size_t N>
struct SizeOf<Aligned<T, N>> : std::integral_constant<size_t, sizeof(T)> {};
template <class T>
struct AlignOf : NotAligned<T> {
static constexpr size_t value = alignof(T);
};
template <class T, size_t N>
struct AlignOf<Aligned<T, N>> {
static_assert(N % alignof(T) == 0,
"Custom alignment can't be lower than the type's alignment");
static constexpr size_t value = N;
};
template <class T, class... Ts>
using Contains = absl::disjunction<std::is_same<T, Ts>...>;
template <class From, class To>
using CopyConst =
typename std::conditional<std::is_const<From>::value, const To, To>::type;
template <class T>
using SliceType = Span<T>;
namespace adl_barrier {
template <class Needle, class... Ts>
constexpr size_t Find(Needle, Needle, Ts...) {
static_assert(!Contains<Needle, Ts...>(), "Duplicate element type");
return 0;
}
template <class Needle, class T, class... Ts>
constexpr size_t Find(Needle, T, Ts...) {
return adl_barrier::Find(Needle(), Ts()...) + 1;
}
constexpr bool IsPow2(size_t n) { return !(n & (n - 1)); }
constexpr size_t Align(size_t n, size_t m) { return (n + m - 1) & ~(m - 1); }
constexpr size_t Min(size_t a, size_t b) { return b < a ? b : a; }
constexpr size_t Max(size_t a) { return a; }
template <class... Ts>
constexpr size_t Max(size_t a, size_t b, Ts... rest) {
return adl_barrier::Max(b < a ? a : b, rest...);
}
template <class T>
std::string TypeName() {
std::string out;
#if ABSL_INTERNAL_HAS_RTTI
absl::StrAppend(&out, "<",
absl::debugging_internal::DemangleString(typeid(T).name()),
">");
#endif
return out;
}
}
template <bool C>
using EnableIf = typename std::enable_if<C, int>::type;
template <class T>
using IsLegalElementType = std::integral_constant<
bool, !std::is_reference<T>::value && !std::is_volatile<T>::value &&
!std::is_reference<typename Type<T>::type>::value &&
!std::is_volatile<typename Type<T>::type>::value &&
adl_barrier::IsPow2(AlignOf<T>::value)>;
template <class Elements, class StaticSizeSeq, class RuntimeSizeSeq,
class SizeSeq, class OffsetSeq>
class LayoutImpl;
template <class... Elements, size_t... StaticSizeSeq, size_t... RuntimeSizeSeq,
size_t... SizeSeq, size_t... OffsetSeq>
class LayoutImpl<
std::tuple<Elements...>, absl::index_sequence<StaticSizeSeq...>,
absl::index_sequence<RuntimeSizeSeq...>, absl::index_sequence<SizeSeq...>,
absl::index_sequence<OffsetSeq...>> {
private:
static_assert(sizeof...(Elements) > 0, "At least one field is required");
static_assert(absl::conjunction<IsLegalElementType<Elements>...>::value,
"Invalid element type (see IsLegalElementType)");
static_assert(sizeof...(StaticSizeSeq) <= sizeof...(Elements),
"Too many static sizes specified");
enum {
NumTypes = sizeof...(Elements),
NumStaticSizes = sizeof...(StaticSizeSeq),
NumRuntimeSizes = sizeof...(RuntimeSizeSeq),
NumSizes = sizeof...(SizeSeq),
NumOffsets = sizeof...(OffsetSeq),
};
static_assert(NumStaticSizes + NumRuntimeSizes == NumSizes, "Internal error");
static_assert(NumSizes <= NumTypes, "Internal error");
static_assert(NumOffsets == adl_barrier::Min(NumTypes, NumSizes + 1),
"Internal error");
static_assert(NumTypes > 0, "Internal error");
static constexpr std::array<size_t, sizeof...(StaticSizeSeq)> kStaticSizes = {
StaticSizeSeq...};
template <class T>
static constexpr size_t ElementIndex() {
static_assert(Contains<Type<T>, Type<typename Type<Elements>::type>...>(),
"Type not found");
return adl_barrier::Find(Type<T>(),
Type<typename Type<Elements>::type>()...);
}
template <size_t N>
using ElementAlignment =
AlignOf<typename std::tuple_element<N, std::tuple<Elements...>>::type>;
public:
using ElementTypes = std::tuple<typename Type<Elements>::type...>;
template <size_t N>
using ElementType = typename std::tuple_element<N, ElementTypes>::type;
constexpr explicit LayoutImpl(IntToSize<RuntimeSizeSeq>... sizes)
: size_{sizes...} {}
static constexpr size_t Alignment() {
return adl_barrier::Max(AlignOf<Elements>::value...);
}
template <size_t N, EnableIf<N == 0> = 0>
constexpr size_t Offset() const {
return 0;
}
template <size_t N, EnableIf<N != 0> = 0>
constexpr size_t Offset() const {
static_assert(N < NumOffsets, "Index out of bounds");
return adl_barrier::Align(
Offset<N - 1>() + SizeOf<ElementType<N - 1>>::value * Size<N - 1>(),
ElementAlignment<N>::value);
}
template <class T>
constexpr size_t Offset() const {
return Offset<ElementIndex<T>()>();
}
constexpr std::array<size_t, NumOffsets> Offsets() const {
return {{Offset<OffsetSeq>()...}};
}
template <size_t N, EnableIf<(N < NumStaticSizes)> = 0>
constexpr size_t Size() const {
return kStaticSizes[N];
}
template <size_t N, EnableIf<(N >= NumStaticSizes)> = 0>
constexpr size_t Size() const {
static_assert(N < NumSizes, "Index out of bounds");
return size_[N - NumStaticSizes];
}
template <class T>
constexpr size_t Size() const {
return Size<ElementIndex<T>()>();
}
constexpr std::array<size_t, NumSizes> Sizes() const {
return {{Size<SizeSeq>()...}};
}
template <size_t N, class Char>
CopyConst<Char, ElementType<N>>* Pointer(Char* p) const {
using C = typename std::remove_const<Char>::type;
static_assert(
std::is_same<C, char>() || std::is_same<C, unsigned char>() ||
std::is_same<C, signed char>(),
"The argument must be a pointer to [const] [signed|unsigned] char");
constexpr size_t alignment = Alignment();
(void)alignment;
assert(reinterpret_cast<uintptr_t>(p) % alignment == 0);
return reinterpret_cast<CopyConst<Char, ElementType<N>>*>(p + Offset<N>());
}
template <class T, class Char>
CopyConst<Char, T>* Pointer(Char* p) const {
return Pointer<ElementIndex<T>()>(p);
}
template <class Char>
auto Pointers(Char* p) const {
return std::tuple<CopyConst<Char, ElementType<OffsetSeq>>*...>(
Pointer<OffsetSeq>(p)...);
}
template <size_t N, class Char>
SliceType<CopyConst<Char, ElementType<N>>> Slice(Char* p) const {
return SliceType<CopyConst<Char, ElementType<N>>>(Pointer<N>(p), Size<N>());
}
template <class T, class Char>
SliceType<CopyConst<Char, T>> Slice(Char* p) const {
return Slice<ElementIndex<T>()>(p);
}
template <class Char>
auto Slices(ABSL_ATTRIBUTE_UNUSED Char* p) const {
return std::tuple<SliceType<CopyConst<Char, ElementType<SizeSeq>>>...>(
Slice<SizeSeq>(p)...);
}
constexpr size_t AllocSize() const {
static_assert(NumTypes == NumSizes, "You must specify sizes of all fields");
return Offset<NumTypes - 1>() +
SizeOf<ElementType<NumTypes - 1>>::value * Size<NumTypes - 1>();
}
template <class Char, size_t N = NumOffsets - 1, EnableIf<N == 0> = 0>
void PoisonPadding(const Char* p) const {
Pointer<0>(p);
}
template <class Char, size_t N = NumOffsets - 1, EnableIf<N != 0> = 0>
void PoisonPadding(const Char* p) const {
static_assert(N < NumOffsets, "Index out of bounds");
(void)p;
#ifdef ABSL_HAVE_ADDRESS_SANITIZER
PoisonPadding<Char, N - 1>(p);
if (ElementAlignment<N - 1>::value % ElementAlignment<N>::value) {
size_t start =
Offset<N - 1>() + SizeOf<ElementType<N - 1>>::value * Size<N - 1>();
ASAN_POISON_MEMORY_REGION(p + start, Offset<N>() - start);
}
#endif
}
std::string DebugString() const {
const auto offsets = Offsets();
const size_t sizes[] = {SizeOf<ElementType<OffsetSeq>>::value...};
const std::string types[] = {
adl_barrier::TypeName<ElementType<OffsetSeq>>()...};
std::string res = absl::StrCat("@0", types[0], "(", sizes[0], ")");
for (size_t i = 0; i != NumOffsets - 1; ++i) {
absl::StrAppend(&res, "[", DebugSize(i), "]; @", offsets[i + 1],
types[i + 1], "(", sizes[i + 1], ")");
}
int last = static_cast<int>(NumSizes) - 1;
if (NumTypes == NumSizes && last >= 0) {
absl::StrAppend(&res, "[", DebugSize(static_cast<size_t>(last)), "]");
}
return res;
}
private:
size_t DebugSize(size_t n) const {
if (n < NumStaticSizes) {
return kStaticSizes[n];
} else {
return size_[n - NumStaticSizes];
}
}
size_t size_[NumRuntimeSizes > 0 ? NumRuntimeSizes : 1];
};
template <class... Elements, size_t... StaticSizeSeq, size_t... RuntimeSizeSeq,
size_t... SizeSeq, size_t... OffsetSeq>
constexpr std::array<size_t, sizeof...(StaticSizeSeq)> LayoutImpl<
std::tuple<Elements...>, absl::index_sequence<StaticSizeSeq...>,
absl::index_sequence<RuntimeSizeSeq...>, absl::index_sequence<SizeSeq...>,
absl::index_sequence<OffsetSeq...>>::kStaticSizes;
template <class StaticSizeSeq, size_t NumRuntimeSizes, class... Ts>
using LayoutType = LayoutImpl<
std::tuple<Ts...>, StaticSizeSeq,
absl::make_index_sequence<NumRuntimeSizes>,
absl::make_index_sequence<NumRuntimeSizes + StaticSizeSeq::size()>,
absl::make_index_sequence<adl_barrier::Min(
sizeof...(Ts), NumRuntimeSizes + StaticSizeSeq::size() + 1)>>;
template <class StaticSizeSeq, class... Ts>
class LayoutWithStaticSizes
: public LayoutType<StaticSizeSeq,
sizeof...(Ts) - adl_barrier::Min(sizeof...(Ts),
StaticSizeSeq::size()),
Ts...> {
private:
using Super =
LayoutType<StaticSizeSeq,
sizeof...(Ts) -
adl_barrier::Min(sizeof...(Ts), StaticSizeSeq::size()),
Ts...>;
public:
template <size_t NumSizes>
using PartialType =
internal_layout::LayoutType<StaticSizeSeq, NumSizes, Ts...>;
template <class... Sizes>
static constexpr PartialType<sizeof...(Sizes)> Partial(Sizes&&... sizes) {
static_assert(sizeof...(Sizes) + StaticSizeSeq::size() <= sizeof...(Ts),
"");
return PartialType<sizeof...(Sizes)>(
static_cast<size_t>(std::forward<Sizes>(sizes))...);
}
using Super::Super;
};
}
template <class... Ts>
class Layout : public internal_layout::LayoutWithStaticSizes<
absl::make_index_sequence<0>, Ts...> {
private:
using Super =
internal_layout::LayoutWithStaticSizes<absl::make_index_sequence<0>,
Ts...>;
public:
template <class StaticSizeSeq>
using WithStaticSizeSequence =
internal_layout::LayoutWithStaticSizes<StaticSizeSeq, Ts...>;
template <size_t... StaticSizes>
using WithStaticSizes =
WithStaticSizeSequence<std::index_sequence<StaticSizes...>>;
using Super::Super;
};
}
ABSL_NAMESPACE_END
}
#endif | #include "absl/container/internal/layout.h"
#include <stddef.h>
#include <cstdint>
#include <cstring>
#include <initializer_list>
#include <memory>
#include <ostream>
#include <string>
#include <tuple>
#include <type_traits>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/config.h"
#include "absl/log/check.h"
#include "absl/types/span.h"
#include "absl/utility/utility.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
namespace {
using ::absl::Span;
using ::testing::ElementsAre;
size_t Distance(const void* from, const void* to) {
CHECK_LE(from, to) << "Distance must be non-negative";
return static_cast<const char*>(to) - static_cast<const char*>(from);
}
template <class Expected, class Actual>
Expected Type(Actual val) {
static_assert(std::is_same<Expected, Actual>(), "");
return val;
}
struct alignas(8) Int128 {
uint64_t a, b;
friend bool operator==(Int128 lhs, Int128 rhs) {
return std::tie(lhs.a, lhs.b) == std::tie(rhs.a, rhs.b);
}
static std::string Name() {
return internal_layout::adl_barrier::TypeName<Int128>();
}
};
struct alignas(8) Int64 {
int64_t a;
friend bool operator==(Int64 lhs, Int64 rhs) { return lhs.a == rhs.a; }
};
static_assert(sizeof(int8_t) == 1, "");
static_assert(alignof(int8_t) == 1, "");
static_assert(sizeof(int16_t) == 2, "");
static_assert(alignof(int16_t) == 2, "");
static_assert(sizeof(int32_t) == 4, "");
static_assert(alignof(int32_t) == 4, "");
static_assert(sizeof(Int64) == 8, "");
static_assert(alignof(Int64) == 8, "");
static_assert(sizeof(Int128) == 16, "");
static_assert(alignof(Int128) == 8, "");
template <class Expected, class Actual>
void SameType() {
static_assert(std::is_same<Expected, Actual>(), "");
}
TEST(Layout, ElementType) {
{
using L = Layout<int32_t>;
SameType<int32_t, L::ElementType<0>>();
SameType<int32_t, decltype(L::Partial())::ElementType<0>>();
SameType<int32_t, decltype(L::Partial(0))::ElementType<0>>();
}
{
using L = Layout<int32_t, int32_t>;
SameType<int32_t, L::ElementType<0>>();
SameType<int32_t, L::ElementType<1>>();
SameType<int32_t, decltype(L::Partial())::ElementType<0>>();
SameType<int32_t, decltype(L::Partial())::ElementType<1>>();
SameType<int32_t, decltype(L::Partial(0))::ElementType<0>>();
SameType<int32_t, decltype(L::Partial(0))::ElementType<1>>();
}
{
using L = Layout<int8_t, int32_t, Int128>;
SameType<int8_t, L::ElementType<0>>();
SameType<int32_t, L::ElementType<1>>();
SameType<Int128, L::ElementType<2>>();
SameType<int8_t, decltype(L::Partial())::ElementType<0>>();
SameType<int8_t, decltype(L::Partial(0))::ElementType<0>>();
SameType<int32_t, decltype(L::Partial(0))::ElementType<1>>();
SameType<int8_t, decltype(L::Partial(0, 0))::ElementType<0>>();
SameType<int32_t, decltype(L::Partial(0, 0))::ElementType<1>>();
SameType<Int128, decltype(L::Partial(0, 0))::ElementType<2>>();
SameType<int8_t, decltype(L::Partial(0, 0, 0))::ElementType<0>>();
SameType<int32_t, decltype(L::Partial(0, 0, 0))::ElementType<1>>();
SameType<Int128, decltype(L::Partial(0, 0, 0))::ElementType<2>>();
}
}
TEST(Layout, ElementTypes) {
{
using L = Layout<int32_t>;
SameType<std::tuple<int32_t>, L::ElementTypes>();
SameType<std::tuple<int32_t>, decltype(L::Partial())::ElementTypes>();
SameType<std::tuple<int32_t>, decltype(L::Partial(0))::ElementTypes>();
}
{
using L = Layout<int32_t, int32_t>;
SameType<std::tuple<int32_t, int32_t>, L::ElementTypes>();
SameType<std::tuple<int32_t, int32_t>,
decltype(L::Partial())::ElementTypes>();
SameType<std::tuple<int32_t, int32_t>,
decltype(L::Partial(0))::ElementTypes>();
}
{
using L = Layout<int8_t, int32_t, Int128>;
SameType<std::tuple<int8_t, int32_t, Int128>, L::ElementTypes>();
SameType<std::tuple<int8_t, int32_t, Int128>,
decltype(L::Partial())::ElementTypes>();
SameType<std::tuple<int8_t, int32_t, Int128>,
decltype(L::Partial(0))::ElementTypes>();
SameType<std::tuple<int8_t, int32_t, Int128>,
decltype(L::Partial(0, 0))::ElementTypes>();
SameType<std::tuple<int8_t, int32_t, Int128>,
decltype(L::Partial(0, 0, 0))::ElementTypes>();
}
}
TEST(Layout, OffsetByIndex) {
{
using L = Layout<int32_t>;
EXPECT_EQ(0, L::Partial().Offset<0>());
EXPECT_EQ(0, L::Partial(3).Offset<0>());
EXPECT_EQ(0, L(3).Offset<0>());
}
{
using L = Layout<int32_t, int32_t>;
EXPECT_EQ(0, L::Partial().Offset<0>());
EXPECT_EQ(0, L::Partial(3).Offset<0>());
EXPECT_EQ(12, L::Partial(3).Offset<1>());
EXPECT_EQ(0, L::Partial(3, 5).Offset<0>());
EXPECT_EQ(12, L::Partial(3, 5).Offset<1>());
EXPECT_EQ(0, L(3, 5).Offset<0>());
EXPECT_EQ(12, L(3, 5).Offset<1>());
}
{
using L = Layout<int8_t, int32_t, Int128>;
EXPECT_EQ(0, L::Partial().Offset<0>());
EXPECT_EQ(0, L::Partial(0).Offset<0>());
EXPECT_EQ(0, L::Partial(0).Offset<1>());
EXPECT_EQ(0, L::Partial(1).Offset<0>());
EXPECT_EQ(4, L::Partial(1).Offset<1>());
EXPECT_EQ(0, L::Partial(5).Offset<0>());
EXPECT_EQ(8, L::Partial(5).Offset<1>());
EXPECT_EQ(0, L::Partial(0, 0).Offset<0>());
EXPECT_EQ(0, L::Partial(0, 0).Offset<1>());
EXPECT_EQ(0, L::Partial(0, 0).Offset<2>());
EXPECT_EQ(0, L::Partial(1, 0).Offset<0>());
EXPECT_EQ(4, L::Partial(1, 0).Offset<1>());
EXPECT_EQ(8, L::Partial(1, 0).Offset<2>());
EXPECT_EQ(0, L::Partial(5, 3).Offset<0>());
EXPECT_EQ(8, L::Partial(5, 3).Offset<1>());
EXPECT_EQ(24, L::Partial(5, 3).Offset<2>());
EXPECT_EQ(0, L::Partial(0, 0, 0).Offset<0>());
EXPECT_EQ(0, L::Partial(0, 0, 0).Offset<1>());
EXPECT_EQ(0, L::Partial(0, 0, 0).Offset<2>());
EXPECT_EQ(0, L::Partial(1, 0, 0).Offset<0>());
EXPECT_EQ(4, L::Partial(1, 0, 0).Offset<1>());
EXPECT_EQ(8, L::Partial(1, 0, 0).Offset<2>());
EXPECT_EQ(0, L::Partial(5, 3, 1).Offset<0>());
EXPECT_EQ(24, L::Partial(5, 3, 1).Offset<2>());
EXPECT_EQ(8, L::Partial(5, 3, 1).Offset<1>());
EXPECT_EQ(0, L(5, 3, 1).Offset<0>());
EXPECT_EQ(24, L(5, 3, 1).Offset<2>());
EXPECT_EQ(8, L(5, 3, 1).Offset<1>());
}
}
TEST(Layout, OffsetByType) {
{
using L = Layout<int32_t>;
EXPECT_EQ(0, L::Partial().Offset<int32_t>());
EXPECT_EQ(0, L::Partial(3).Offset<int32_t>());
EXPECT_EQ(0, L(3).Offset<int32_t>());
}
{
using L = Layout<int8_t, int32_t, Int128>;
EXPECT_EQ(0, L::Partial().Offset<int8_t>());
EXPECT_EQ(0, L::Partial(0).Offset<int8_t>());
EXPECT_EQ(0, L::Partial(0).Offset<int32_t>());
EXPECT_EQ(0, L::Partial(1).Offset<int8_t>());
EXPECT_EQ(4, L::Partial(1).Offset<int32_t>());
EXPECT_EQ(0, L::Partial(5).Offset<int8_t>());
EXPECT_EQ(8, L::Partial(5).Offset<int32_t>());
EXPECT_EQ(0, L::Partial(0, 0).Offset<int8_t>());
EXPECT_EQ(0, L::Partial(0, 0).Offset<int32_t>());
EXPECT_EQ(0, L::Partial(0, 0).Offset<Int128>());
EXPECT_EQ(0, L::Partial(1, 0).Offset<int8_t>());
EXPECT_EQ(4, L::Partial(1, 0).Offset<int32_t>());
EXPECT_EQ(8, L::Partial(1, 0).Offset<Int128>());
EXPECT_EQ(0, L::Partial(5, 3).Offset<int8_t>());
EXPECT_EQ(8, L::Partial(5, 3).Offset<int32_t>());
EXPECT_EQ(24, L::Partial(5, 3).Offset<Int128>());
EXPECT_EQ(0, L::Partial(0, 0, 0).Offset<int8_t>());
EXPECT_EQ(0, L::Partial(0, 0, 0).Offset<int32_t>());
EXPECT_EQ(0, L::Partial(0, 0, 0).Offset<Int128>());
EXPECT_EQ(0, L::Partial(1, 0, 0).Offset<int8_t>());
EXPECT_EQ(4, L::Partial(1, 0, 0).Offset<int32_t>());
EXPECT_EQ(8, L::Partial(1, 0, 0).Offset<Int128>());
EXPECT_EQ(0, L::Partial(5, 3, 1).Offset<int8_t>());
EXPECT_EQ(24, L::Partial(5, 3, 1).Offset<Int128>());
EXPECT_EQ(8, L::Partial(5, 3, 1).Offset<int32_t>());
EXPECT_EQ(0, L(5, 3, 1).Offset<int8_t>());
EXPECT_EQ(24, L(5, 3, 1).Offset<Int128>());
EXPECT_EQ(8, L(5, 3, 1).Offset<int32_t>());
}
}
TEST(Layout, Offsets) {
{
using L = Layout<int32_t>;
EXPECT_THAT(L::Partial().Offsets(), ElementsAre(0));
EXPECT_THAT(L::Partial(3).Offsets(), ElementsAre(0));
EXPECT_THAT(L(3).Offsets(), ElementsAre(0));
}
{
using L = Layout<int32_t, int32_t>;
EXPECT_THAT(L::Partial().Offsets(), ElementsAre(0));
EXPECT_THAT(L::Partial(3).Offsets(), ElementsAre(0, 12));
EXPECT_THAT(L::Partial(3, 5).Offsets(), ElementsAre(0, 12));
EXPECT_THAT(L(3, 5).Offsets(), ElementsAre(0, 12));
}
{
using L = Layout<int8_t, int32_t, Int128>;
EXPECT_THAT(L::Partial().Offsets(), ElementsAre(0));
EXPECT_THAT(L::Partial(1).Offsets(), ElementsAre(0, 4));
EXPECT_THAT(L::Partial(5).Offsets(), ElementsAre(0, 8));
EXPECT_THAT(L::Partial(0, 0).Offsets(), ElementsAre(0, 0, 0));
EXPECT_THAT(L::Partial(1, 0).Offsets(), ElementsAre(0, 4, 8));
EXPECT_THAT(L::Partial(5, 3).Offsets(), ElementsAre(0, 8, 24));
EXPECT_THAT(L::Partial(0, 0, 0).Offsets(), ElementsAre(0, 0, 0));
EXPECT_THAT(L::Partial(1, 0, 0).Offsets(), ElementsAre(0, 4, 8));
EXPECT_THAT(L::Partial(5, 3, 1).Offsets(), ElementsAre(0, 8, 24));
EXPECT_THAT(L(5, 3, 1).Offsets(), ElementsAre(0, 8, 24));
}
}
TEST(Layout, StaticOffsets) {
using L = Layout<int8_t, int32_t, Int128>;
{
using SL = L::WithStaticSizes<>;
EXPECT_THAT(SL::Partial().Offsets(), ElementsAre(0));
EXPECT_THAT(SL::Partial(5).Offsets(), ElementsAre(0, 8));
EXPECT_THAT(SL::Partial(5, 3, 1).Offsets(), ElementsAre(0, 8, 24));
EXPECT_THAT(SL(5, 3, 1).Offsets(), ElementsAre(0, 8, 24));
}
{
using SL = L::WithStaticSizes<5>;
EXPECT_THAT(SL::Partial().Offsets(), ElementsAre(0, 8));
EXPECT_THAT(SL::Partial(3).Offsets(), ElementsAre(0, 8, 24));
EXPECT_THAT(SL::Partial(3, 1).Offsets(), ElementsAre(0, 8, 24));
EXPECT_THAT(SL(3, 1).Offsets(), ElementsAre(0, 8, 24));
}
{
using SL = L::WithStaticSizes<5, 3>;
EXPECT_THAT(SL::Partial().Offsets(), ElementsAre(0, 8, 24));
EXPECT_THAT(SL::Partial(1).Offsets(), ElementsAre(0, 8, 24));
EXPECT_THAT(SL(1).Offsets(), ElementsAre(0, 8, 24));
}
{
using SL = L::WithStaticSizes<5, 3, 1>;
EXPECT_THAT(SL::Partial().Offsets(), ElementsAre(0, 8, 24));
EXPECT_THAT(SL().Offsets(), ElementsAre(0, 8, 24));
}
}
TEST(Layout, AllocSize) {
{
using L = Layout<int32_t>;
EXPECT_EQ(0, L::Partial(0).AllocSize());
EXPECT_EQ(12, L::Partial(3).AllocSize());
EXPECT_EQ(12, L(3).AllocSize());
}
{
using L = Layout<int32_t, int32_t>;
EXPECT_EQ(32, L::Partial(3, 5).AllocSize());
EXPECT_EQ(32, L(3, 5).AllocSize());
}
{
using L = Layout<int8_t, int32_t, Int128>;
EXPECT_EQ(0, L::Partial(0, 0, 0).AllocSize());
EXPECT_EQ(8, L::Partial(1, 0, 0).AllocSize());
EXPECT_EQ(8, L::Partial(0, 1, 0).AllocSize());
EXPECT_EQ(16, L::Partial(0, 0, 1).AllocSize());
EXPECT_EQ(24, L::Partial(1, 1, 1).AllocSize());
EXPECT_EQ(136, L::Partial(3, 5, 7).AllocSize());
EXPECT_EQ(136, L(3, 5, 7).AllocSize());
}
}
TEST(Layout, StaticAllocSize) {
using L = Layout<int8_t, int32_t, Int128>;
{
using SL = L::WithStaticSizes<>;
EXPECT_EQ(136, SL::Partial(3, 5, 7).AllocSize());
EXPECT_EQ(136, SL(3, 5, 7).AllocSize());
}
{
using SL = L::WithStaticSizes<3>;
EXPECT_EQ(136, SL::Partial(5, 7).AllocSize());
EXPECT_EQ(136, SL(5, 7).AllocSize());
}
{
using SL = L::WithStaticSizes<3, 5>;
EXPECT_EQ(136, SL::Partial(7).AllocSize());
EXPECT_EQ(136, SL(7).AllocSize());
}
{
using SL = L::WithStaticSizes<3, 5, 7>;
EXPECT_EQ(136, SL::Partial().AllocSize());
EXPECT_EQ(136, SL().AllocSize());
}
}
TEST(Layout, SizeByIndex) {
{
using L = Layout<int32_t>;
EXPECT_EQ(0, L::Partial(0).Size<0>());
EXPECT_EQ(3, L::Partial(3).Size<0>());
EXPECT_EQ(3, L(3).Size<0>());
}
{
using L = Layout<int32_t, int32_t>;
EXPECT_EQ(0, L::Partial(0).Size<0>());
EXPECT_EQ(3, L::Partial(3).Size<0>());
EXPECT_EQ(3, L::Partial(3, 5).Size<0>());
EXPECT_EQ(5, L::Partial(3, 5).Size<1>());
EXPECT_EQ(3, L(3, 5).Size<0>());
EXPECT_EQ(5, L(3, 5).Size<1>());
}
{
using L = Layout<int8_t, int32_t, Int128>;
EXPECT_EQ(3, L::Partial(3).Size<0>());
EXPECT_EQ(3, L::Partial(3, 5).Size<0>());
EXPECT_EQ(5, L::Partial(3, 5).Size<1>());
EXPECT_EQ(3, L::Partial(3, 5, 7).Size<0>());
EXPECT_EQ(5, L::Partial(3, 5, 7).Size<1>());
EXPECT_EQ(7, L::Partial(3, 5, 7).Size<2>());
EXPECT_EQ(3, L(3, 5, 7).Size<0>());
EXPECT_EQ(5, L(3, 5, 7).Size<1>());
EXPECT_EQ(7, L(3, 5, 7).Size<2>());
}
}
TEST(Layout, SizeByType) {
{
using L = Layout<int32_t>;
EXPECT_EQ(0, L::Partial(0).Size<int32_t>());
EXPECT_EQ(3, L::Partial(3).Size<int32_t>());
EXPECT_EQ(3, L(3).Size<int32_t>());
}
{
using L = Layout<int8_t, int32_t, Int128>;
EXPECT_EQ(3, L::Partial(3).Size<int8_t>());
EXPECT_EQ(3, L::Partial(3, 5).Size<int8_t>());
EXPECT_EQ(5, L::Partial(3, 5).Size<int32_t>());
EXPECT_EQ(3, L::Partial(3, 5, 7).Size<int8_t>());
EXPECT_EQ(5, L::Partial(3, 5, 7).Size<int32_t>());
EXPECT_EQ(7, L::Partial(3, 5, 7).Size<Int128>());
EXPECT_EQ(3, L(3, 5, 7).Size<int8_t>());
EXPECT_EQ(5, L(3, 5, 7).Size<int32_t>());
EXPECT_EQ(7, L(3, 5, 7).Size<Int128>());
}
}
TEST(Layout, Sizes) {
{
using L = Layout<int32_t>;
EXPECT_THAT(L::Partial().Sizes(), ElementsAre());
EXPECT_THAT(L::Partial(3).Sizes(), ElementsAre(3));
EXPECT_THAT(L(3).Sizes(), ElementsAre(3));
}
{
using L = Layout<int32_t, int32_t>;
EXPECT_THAT(L::Partial().Sizes(), ElementsAre());
EXPECT_THAT(L::Partial(3).Sizes(), ElementsAre(3));
EXPECT_THAT(L::Partial(3, 5).Sizes(), ElementsAre(3, 5));
EXPECT_THAT(L(3, 5).Sizes(), ElementsAre(3, 5));
}
{
using L = Layout<int8_t, int32_t, Int128>;
EXPECT_THAT(L::Partial().Sizes(), ElementsAre());
EXPECT_THAT(L::Partial(3).Sizes(), ElementsAre(3));
EXPECT_THAT(L::Partial(3, 5).Sizes(), ElementsAre(3, 5));
EXPECT_THAT(L::Partial(3, 5, 7).Sizes(), ElementsAre(3, 5, 7));
EXPECT_THAT(L(3, 5, 7).Sizes(), ElementsAre(3, 5, 7));
}
}
TEST(Layout, StaticSize) {
using L = Layout<int8_t, int32_t, Int128>;
{
using SL = L::WithStaticSizes<>;
EXPECT_THAT(SL::Partial().Sizes(), ElementsAre());
EXPECT_THAT(SL::Partial(3).Size<0>(), 3);
EXPECT_THAT(SL::Partial(3).Size<int8_t>(), 3);
EXPECT_THAT(SL::Partial(3).Sizes(), ElementsAre(3));
EXPECT_THAT(SL::Partial(3, 5, 7).Size<0>(), 3);
EXPECT_THAT(SL::Partial(3, 5, 7).Size<int8_t>(), 3);
EXPECT_THAT(SL::Partial(3, 5, 7).Size<2>(), 7);
EXPECT_THAT(SL::Partial(3, 5, 7).Size<Int128>(), 7);
EXPECT_THAT(SL::Partial(3, 5, 7).Sizes(), ElementsAre(3, 5, 7));
EXPECT_THAT(SL(3, 5, 7).Size<0>(), 3);
EXPECT_THAT(SL(3, 5, 7).Size<int8_t>(), 3);
EXPECT_THAT(SL(3, 5, 7).Size<2>(), 7);
EXPECT_THAT(SL(3, 5, 7).Size<Int128>(), 7);
EXPECT_THAT(SL(3, 5, 7).Sizes(), ElementsAre(3, 5, 7));
}
}
TEST(Layout, PointerByIndex) {
alignas(max_align_t) const unsigned char p[100] = {0};
{
using L = Layout<int32_t>;
EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L::Partial().Pointer<0>(p))));
EXPECT_EQ(0,
Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<0>(p))));
EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L(3).Pointer<0>(p))));
}
{
using L = Layout<int32_t, int32_t>;
EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L::Partial().Pointer<0>(p))));
EXPECT_EQ(0,
Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<0>(p))));
EXPECT_EQ(12,
Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<1>(p))));
EXPECT_EQ(
0, Distance(p, Type<const int32_t*>(L::Partial(3, 5).Pointer<0>(p))));
EXPECT_EQ(
12, Distance(p, Type<const int32_t*>(L::Partial(3, 5).Pointer<1>(p))));
EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L(3, 5).Pointer<0>(p))));
EXPECT_EQ(12, Distance(p, Type<const int32_t*>(L(3, 5).Pointer<1>(p))));
}
{
using L = Layout<int8_t, int32_t, Int128>;
EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial().Pointer<0>(p))));
EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial(0).Pointer<0>(p))));
EXPECT_EQ(0,
Distance(p, Type<const int32_t*>(L::Partial(0).Pointer<1>(p))));
EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial(1).Pointer<0>(p))));
EXPECT_EQ(4,
Distance(p, Type<const int32_t*>(L::Partial(1).Pointer<1>(p))));
EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial(5).Pointer<0>(p))));
EXPECT_EQ(8,
Distance(p, Type<const int32_t*>(L::Partial(5).Pointer<1>(p))));
EXPECT_EQ(0,
Distance(p, Type<const int8_t*>(L::Partial(0, 0).Pointer<0>(p))));
EXPECT_EQ(
0, Distance(p, Type<const int32_t*>(L::Partial(0, 0).Pointer<1>(p))));
EXPECT_EQ(0,
Distance(p, Type<const Int128*>(L::Partial(0, 0).Pointer<2>(p))));
EXPECT_EQ(0,
Distance(p, Type<const int8_t*>(L::Partial(1, 0).Pointer<0>(p))));
EXPECT_EQ(
4, Distance(p, Type<const int32_t*>(L::Partial(1, 0).Pointer<1>(p))));
EXPECT_EQ(8,
Distance(p, Type<const Int128*>(L::Partial(1, 0).Pointer<2>(p))));
EXPECT_EQ(0,
Distance(p, Type<const int8_t*>(L::Partial(5, 3).Pointer<0>(p))));
EXPECT_EQ(
8, Distance(p, Type<const int32_t*>(L::Partial(5, 3).Pointer<1>(p))));
EXPECT_EQ(24,
Distance(p, Type<const Int128*>(L::Partial(5, 3).Pointer<2>(p))));
EXPECT_EQ(
0, Distance(p, Type<const int8_t*>(L::Partial(0, 0, 0).Pointer<0>(p))));
EXPECT_EQ(
0,
Distance(p, Type<const int32_t*>(L::Partial(0, 0, 0).Pointer<1>(p))));
EXPECT_EQ(
0, Distance(p, Type<const Int128*>(L::Partial(0, 0, 0).Pointer<2>(p))));
EXPECT_EQ(
0, Distance(p, Type<const int8_t*>(L::Partial(1, 0, 0).Pointer<0>(p))));
EXPECT_EQ(
4,
Distance(p, Type<const int32_t*>(L::Partial(1, 0, 0).Pointer<1>(p))));
EXPECT_EQ(
8, Distance(p, Type<const Int128*>(L::Partial(1, 0, 0).Pointer<2>(p))));
EXPECT_EQ(
0, Distance(p, Type<const int8_t*>(L::Partial(5, 3, 1).Pointer<0>(p))));
EXPECT_EQ(
24,
Distance(p, Type<const Int128*>(L::Partial(5, 3, 1).Pointer<2>(p))));
EXPECT_EQ(
8,
Distance(p, Type<const int32_t*>(L::Partial(5, 3, 1).Pointer<1>(p))));
EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L(5, 3, 1).Pointer<0>(p))));
EXPECT_EQ(24, Distance(p, Type<const Int128*>(L(5, 3, 1).Pointer<2>(p))));
EXPECT_EQ(8, Distance(p, Type<const int32_t*>(L(5, 3, 1).Pointer<1>(p))));
}
}
TEST(Layout, PointerByType) {
alignas(max_align_t) const unsigned char p[100] = {0};
{
using L = Layout<int32_t>;
EXPECT_EQ(
0, Distance(p, Type<const int32_t*>(L::Partial().Pointer<int32_t>(p))));
EXPECT_EQ(
0,
Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<int32_t>(p))));
EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L(3).Pointer<int32_t>(p))));
}
{
using L = Layout<int8_t, int32_t, Int128>;
EXPECT_EQ(
0, Distance(p, Type<const int8_t*>(L::Partial().Pointer<int8_t>(p))));
EXPECT_EQ(
0, Distance(p, Type<const int8_t*>(L::Partial(0).Pointer<int8_t>(p))));
EXPECT_EQ(
0,
Distance(p, Type<const int32_t*>(L::Partial(0).Pointer<int32_t>(p))));
EXPECT_EQ(
0, Distance(p, Type<const int8_t*>(L::Partial(1).Pointer<int8_t>(p))));
EXPECT_EQ(
4,
Distance(p, Type<const int32_t*>(L::Partial(1).Pointer<int32_t>(p))));
EXPECT_EQ(
0, Distance(p, Type<const int8_t*>(L::Partial(5).Pointer<int8_t>(p))));
EXPECT_EQ(
8,
Distance(p, Type<const int32_t*>(L::Partial(5).Pointer<int32_t>(p))));
EXPECT_EQ(
0,
Distance(p, Type<const int8_t*>(L::Partial(0, 0).Pointer<int8_t>(p))));
EXPECT_EQ(0, Distance(p, Type<const int32_t*>(
L::Partial(0, 0).Pointer<int32_t>(p))));
EXPECT_EQ(
0,
Distance(p, Type<const Int128*>(L::Partial(0, 0).Pointer<Int128>(p))));
EXPECT_EQ(
0,
Distance(p, Type<const int8_t*>(L::Partial(1, 0).Pointer<int8_t>(p))));
EXPECT_EQ(4, Distance(p, Type<const int32_t*>(
L::Partial(1, 0).Pointer<int32_t>(p))));
EXPECT_EQ(
8,
Distance(p, Type<const Int128*>(L::Partial(1, 0).Pointer<Int128>(p))));
EXPECT_EQ(
0,
Distance(p, Type<const int8_t*>(L::Partial(5, 3).Pointer<int8_t>(p))));
EXPECT_EQ(8, Distance(p, Type<const int32_t*>(
L::Partial(5, 3).Pointer<int32_t>(p))));
EXPECT_EQ(
24,
Distance(p, Type<const Int128*>(L::Partial(5, 3).Pointer<Int128>(p))));
EXPECT_EQ(0, Distance(p, Type<const int8_t*>(
L::Partial(0, 0, 0).Pointer<int8_t>(p))));
EXPECT_EQ(0, Distance(p, Type<const int32_t*>(
L::Partial(0, 0, 0).Pointer<int32_t>(p))));
EXPECT_EQ(0, Distance(p, Type<const Int128*>(
L::Partial(0, 0, 0).Pointer<Int128>(p))));
EXPECT_EQ(0, Distance(p, Type<const int8_t*>(
L::Partial(1, 0, 0).Pointer<int8_t>(p))));
EXPECT_EQ(4, Distance(p, Type<const int32_t*>(
L::Partial(1, 0, 0).Pointer<int32_t>(p))));
EXPECT_EQ(8, Distance(p, Type<const Int128*>(
L::Partial(1, 0, 0).Pointer<Int128>(p))));
EXPECT_EQ(0, Distance(p, Type<const int8_t*>(
L::Partial(5, 3, 1).Pointer<int8_t>(p))));
EXPECT_EQ(24, Distance(p, Type<const Int128*>(
L::Partial(5, 3, 1).Pointer<Int128>(p))));
EXPECT_EQ(8, Distance(p, Type<const int32_t*>(
L::Partial(5, 3, 1).Pointer<int32_t>(p))));
EXPECT_EQ(24,
Distance(p, Type<const Int128*>(L(5, 3, 1).Pointer<Int128>(p))));
EXPECT_EQ(
8, Distance(p, Type<const int32_t*>(L(5, 3, 1).Pointer<int32_t>(p))));
}
}
TEST(Layout, MutablePointerByIndex) {
alignas(max_align_t) unsigned char p[100] = {0};
{
using L = Layout<int32_t>;
EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial().Pointer<0>(p))));
EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(3).Pointer<0>(p))));
EXPECT_EQ(0, Distance(p, Type<int32_t*>(L(3).Pointer<0>(p))));
}
{
using L = Layout<int32_t, int32_t>;
EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial().Pointer<0>(p))));
EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(3).Pointer<0>(p))));
EXPECT_EQ(12, Distance(p, Type<int32_t*>(L::Partial(3).Pointer<1>(p))));
EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(3, 5).Pointer<0>(p))));
EXPECT_EQ(12, Distance(p, Type<int32_t*>(L::Partial(3, 5).Pointer<1>(p))));
EXPECT_EQ(0, Distance(p, Type<int32_t*>(L(3, 5).Pointer<0>(p))));
EXPECT_EQ(12, Distance(p, Type<int32_t*>(L(3, 5).Pointer<1>(p))));
}
{
using L = Layout<int8_t, int32_t, Int128>;
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial().Pointer<0>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(0).Pointer<0>(p))));
EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(0).Pointer<1>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(1).Pointer<0>(p))));
EXPECT_EQ(4, Distance(p, Type<int32_t*>(L::Partial(1).Pointer<1>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(5).Pointer<0>(p))));
EXPECT_EQ(8, Distance(p, Type<int32_t*>(L::Partial(5).Pointer<1>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(0, 0).Pointer<0>(p))));
EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(0, 0).Pointer<1>(p))));
EXPECT_EQ(0, Distance(p, Type<Int128*>(L::Partial(0, 0).Pointer<2>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(1, 0).Pointer<0>(p))));
EXPECT_EQ(4, Distance(p, Type<int32_t*>(L::Partial(1, 0).Pointer<1>(p))));
EXPECT_EQ(8, Distance(p, Type<Int128*>(L::Partial(1, 0).Pointer<2>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(5, 3).Pointer<0>(p))));
EXPECT_EQ(8, Distance(p, Type<int32_t*>(L::Partial(5, 3).Pointer<1>(p))));
EXPECT_EQ(24, Distance(p, Type<Int128*>(L::Partial(5, 3).Pointer<2>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(0, 0, 0).Pointer<0>(p))));
EXPECT_EQ(0,
Distance(p, Type<int32_t*>(L::Partial(0, 0, 0).Pointer<1>(p))));
EXPECT_EQ(0, Distance(p, Type<Int128*>(L::Partial(0, 0, 0).Pointer<2>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(1, 0, 0).Pointer<0>(p))));
EXPECT_EQ(4,
Distance(p, Type<int32_t*>(L::Partial(1, 0, 0).Pointer<1>(p))));
EXPECT_EQ(8, Distance(p, Type<Int128*>(L::Partial(1, 0, 0).Pointer<2>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(5, 3, 1).Pointer<0>(p))));
EXPECT_EQ(24,
Distance(p, Type<Int128*>(L::Partial(5, 3, 1).Pointer<2>(p))));
EXPECT_EQ(8,
Distance(p, Type<int32_t*>(L::Partial(5, 3, 1).Pointer<1>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L(5, 3, 1).Pointer<0>(p))));
EXPECT_EQ(24, Distance(p, Type<Int128*>(L(5, 3, 1).Pointer<2>(p))));
EXPECT_EQ(8, Distance(p, Type<int32_t*>(L(5, 3, 1).Pointer<1>(p))));
}
}
TEST(Layout, MutablePointerByType) {
alignas(max_align_t) unsigned char p[100] = {0};
{
using L = Layout<int32_t>;
EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial().Pointer<int32_t>(p))));
EXPECT_EQ(0,
Distance(p, Type<int32_t*>(L::Partial(3).Pointer<int32_t>(p))));
EXPECT_EQ(0, Distance(p, Type<int32_t*>(L(3).Pointer<int32_t>(p))));
}
{
using L = Layout<int8_t, int32_t, Int128>;
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial().Pointer<int8_t>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(0).Pointer<int8_t>(p))));
EXPECT_EQ(0,
Distance(p, Type<int32_t*>(L::Partial(0).Pointer<int32_t>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(1).Pointer<int8_t>(p))));
EXPECT_EQ(4,
Distance(p, Type<int32_t*>(L::Partial(1).Pointer<int32_t>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(5).Pointer<int8_t>(p))));
EXPECT_EQ(8,
Distance(p, Type<int32_t*>(L::Partial(5).Pointer<int32_t>(p))));
EXPECT_EQ(0,
Distance(p, Type<int8_t*>(L::Partial(0, 0).Pointer<int8_t>(p))));
EXPECT_EQ(
0, Distance(p, Type<int32_t*>(L::Partial(0, 0).Pointer<int32_t>(p))));
EXPECT_EQ(0,
Distance(p, Type<Int128*>(L::Partial(0, 0).Pointer<Int128>(p))));
EXPECT_EQ(0,
Distance(p, Type<int8_t*>(L::Partial(1, 0).Pointer<int8_t>(p))));
EXPECT_EQ(
4, Distance(p, Type<int32_t*>(L::Partial(1, 0).Pointer<int32_t>(p))));
EXPECT_EQ(8,
Distance(p, Type<Int128*>(L::Partial(1, 0).Pointer<Int128>(p))));
EXPECT_EQ(0,
Distance(p, Type<int8_t*>(L::Partial(5, 3).Pointer<int8_t>(p))));
EXPECT_EQ(
8, Distance(p, Type<int32_t*>(L::Partial(5, 3).Pointer<int32_t>(p))));
EXPECT_EQ(24,
Distance(p, Type<Int128*>(L::Partial(5, 3).Pointer<Int128>(p))));
EXPECT_EQ(
0, Distance(p, Type<int8_t*>(L::Partial(0, 0, 0).Pointer<int8_t>(p))));
EXPECT_EQ(
0,
Distance(p, Type<int32_t*>(L::Partial(0, 0, 0).Pointer<int32_t>(p))));
EXPECT_EQ(
0, Distance(p, Type<Int128*>(L::Partial(0, 0, 0).Pointer<Int128>(p))));
EXPECT_EQ(
0, Distance(p, Type<int8_t*>(L::Partial(1, 0, 0).Pointer<int8_t>(p))));
EXPECT_EQ(
4,
Distance(p, Type<int32_t*>(L::Partial(1, 0, 0).Pointer<int32_t>(p))));
EXPECT_EQ(
8, Distance(p, Type<Int128*>(L::Partial(1, 0, 0).Pointer<Int128>(p))));
EXPECT_EQ(
0, Distance(p, Type<int8_t*>(L::Partial(5, 3, 1).Pointer<int8_t>(p))));
EXPECT_EQ(
24, Distance(p, Type<Int128*>(L::Partial(5, 3, 1).Pointer<Int128>(p))));
EXPECT_EQ(
8,
Distance(p, Type<int32_t*>(L::Partial(5, 3, 1).Pointer<int32_t>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L(5, 3, 1).Pointer<int8_t>(p))));
EXPECT_EQ(24, Distance(p, Type<Int128*>(L(5, 3, 1).Pointer<Int128>(p))));
EXPECT_EQ(8, Distance(p, Type<int32_t*>(L(5, 3, 1).Pointer<int32_t>(p))));
}
}
TEST(Layout, Pointers) {
alignas(max_align_t) const unsigned char p[100] = {0};
using L = Layout<int8_t, int8_t, Int128>;
{
const auto x = L::Partial();
EXPECT_EQ(std::make_tuple(x.Pointer<0>(p)),
Type<std::tuple<const int8_t*>>(x.Pointers(p)));
}
{
const auto x = L::Partial(1);
EXPECT_EQ(std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p)),
(Type<std::tuple<const int8_t*, const int8_t*>>(x.Pointers(p))));
}
{
const auto x = L::Partial(1, 2);
EXPECT_EQ(
std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)),
(Type<std::tuple<const int8_t*, const int8_t*, const Int128*>>(
x.Pointers(p))));
}
{
const auto x = L::Partial(1, 2, 3);
EXPECT_EQ(
std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)),
(Type<std::tuple<const int8_t*, const int8_t*, const Int128*>>(
x.Pointers(p))));
}
{
const L x(1, 2, 3);
EXPECT_EQ(
std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)),
(Type<std::tuple<const int8_t*, const int8_t*, const Int128*>>(
x.Pointers(p))));
}
}
TEST(Layout, MutablePointers) {
alignas(max_align_t) unsigned char p[100] = {0};
using L = Layout<int8_t, int8_t, Int128>;
{
const auto x = L::Partial();
EXPECT_EQ(std::make_tuple(x.Pointer<0>(p)),
Type<std::tuple<int8_t*>>(x.Pointers(p)));
}
{
const auto x = L::Partial(1);
EXPECT_EQ(std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p)),
(Type<std::tuple<int8_t*, int8_t*>>(x.Pointers(p))));
}
{
const auto x = L::Partial(1, 2);
EXPECT_EQ(
std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)),
(Type<std::tuple<int8_t*, int8_t*, Int128*>>(x.Pointers(p))));
}
{
const auto x = L::Partial(1, 2, 3);
EXPECT_EQ(
std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)),
(Type<std::tuple<int8_t*, int8_t*, Int128*>>(x.Pointers(p))));
}
{
const L x(1, 2, 3);
EXPECT_EQ(
std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)),
(Type<std::tuple<int8_t*, int8_t*, Int128*>>(x.Pointers(p))));
}
}
TEST(Layout, StaticPointers) {
alignas(max_align_t) const unsigned char p[100] = {0};
using L = Layout<int8_t, int8_t, Int128>;
{
const auto x = L::WithStaticSizes<>::Partial();
EXPECT_EQ(std::make_tuple(x.Pointer<0>(p)),
Type<std::tuple<const int8_t*>>(x.Pointers(p)));
}
{
const auto x = L::WithStaticSizes<>::Partial(1);
EXPECT_EQ(std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p)),
(Type<std::tuple<const int8_t*, const int8_t*>>(x.Pointers(p))));
}
{
const auto x = L::WithStaticSizes<1>::Partial();
EXPECT_EQ(std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p)),
(Type<std::tuple<const int8_t*, const int8_t*>>(x.Pointers(p))));
}
{
const auto x = L::WithStaticSizes<>::Partial(1, 2, 3);
EXPECT_EQ(
std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)),
(Type<std::tuple<const int8_t*, const int8_t*, const Int128*>>(
x.Pointers(p))));
}
{
const auto x = L::WithStaticSizes<1>::Partial(2, 3);
EXPECT_EQ(
std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)),
(Type<std::tuple<const int8_t*, const int8_t*, const Int128*>>(
x.Pointers(p))));
}
{
const auto x = L::WithStaticSizes<1, 2>::Partial(3);
EXPECT_EQ(
std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)),
(Type<std::tuple<const int8_t*, const int8_t*, const Int128*>>(
x.Pointers(p))));
}
{
const auto x = L::WithStaticSizes<1, 2, 3>::Partial();
EXPECT_EQ(
std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)),
(Type<std::tuple<const int8_t*, const int8_t*, const Int128*>>(
x.Pointers(p))));
}
{
const L::WithStaticSizes<1, 2, 3> x;
EXPECT_EQ(
std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)),
(Type<std::tuple<const int8_t*, const int8_t*, const Int128*>>(
x.Pointers(p))));
}
}
TEST(Layout, SliceByIndexSize) {
alignas(max_align_t) const unsigned char p[100] = {0};
{
using L = Layout<int32_t>;
EXPECT_EQ(0, L::Partial(0).Slice<0>(p).size());
EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size());
EXPECT_EQ(3, L(3).Slice<0>(p).size());
}
{
using L = Layout<int32_t, int32_t>;
EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size());
EXPECT_EQ(5, L::Partial(3, 5).Slice<1>(p).size());
EXPECT_EQ(5, L(3, 5).Slice<1>(p).size());
}
{
using L = Layout<int8_t, int32_t, Int128>;
EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size());
EXPECT_EQ(3, L::Partial(3, 5).Slice<0>(p).size());
EXPECT_EQ(5, L::Partial(3, 5).Slice<1>(p).size());
EXPECT_EQ(3, L::Partial(3, 5, 7).Slice<0>(p).size());
EXPECT_EQ(5, L::Partial(3, 5, 7).Slice<1>(p).size());
EXPECT_EQ(7, L::Partial(3, 5, 7).Slice<2>(p).size());
EXPECT_EQ(3, L(3, 5, 7).Slice<0>(p).size());
EXPECT_EQ(5, L(3, 5, 7).Slice<1>(p).size());
EXPECT_EQ(7, L(3, 5, 7).Slice<2>(p).size());
}
}
TEST(Layout, SliceByTypeSize) {
alignas(max_align_t) const unsigned char p[100] = {0};
{
using L = Layout<int32_t>;
EXPECT_EQ(0, L::Partial(0).Slice<int32_t>(p).size());
EXPECT_EQ(3, L::Partial(3).Slice<int32_t>(p).size());
EXPECT_EQ(3, L(3).Slice<int32_t>(p).size());
}
{
using L = Layout<int8_t, int32_t, Int128>;
EXPECT_EQ(3, L::Partial(3).Slice<int8_t>(p).size());
EXPECT_EQ(3, L::Partial(3, 5).Slice<int8_t>(p).size());
EXPECT_EQ(5, L::Partial(3, 5).Slice<int32_t>(p).size());
EXPECT_EQ(3, L::Partial(3, 5, 7).Slice<int8_t>(p).size());
EXPECT_EQ(5, L::Partial(3, 5, 7).Slice<int32_t>(p).size());
EXPECT_EQ(7, L::Partial(3, 5, 7).Slice<Int128>(p).size());
EXPECT_EQ(3, L(3, 5, 7).Slice<int8_t>(p).size());
EXPECT_EQ(5, L(3, 5, 7).Slice<int32_t>(p).size());
EXPECT_EQ(7, L(3, 5, 7).Slice<Int128>(p).size());
}
}
TEST(Layout, MutableSliceByIndexSize) {
alignas(max_align_t) unsigned char p[100] = {0};
{
using L = Layout<int32_t>;
EXPECT_EQ(0, L::Partial(0).Slice<0>(p).size());
EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size());
EXPECT_EQ(3, L(3).Slice<0>(p).size());
}
{
using L = Layout<int32_t, int32_t>;
EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size());
EXPECT_EQ(5, L::Partial(3, 5).Slice<1>(p).size());
EXPECT_EQ(5, L(3, 5).Slice<1>(p).size());
}
{
using L = Layout<int8_t, int32_t, Int128>;
EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size());
EXPECT_EQ(3, L::Partial(3, 5).Slice<0>(p).size());
EXPECT_EQ(5, L::Partial(3, 5).Slice<1>(p).size());
EXPECT_EQ(3, L::Partial(3, 5, 7).Slice<0>(p).size());
EXPECT_EQ(5, L::Partial(3, 5, 7).Slice<1>(p).size());
EXPECT_EQ(7, L::Partial(3, 5, 7).Slice<2>(p).size());
EXPECT_EQ(3, L(3, 5, 7).Slice<0>(p).size());
EXPECT_EQ(5, L(3, 5, 7).Slice<1>(p).size());
EXPECT_EQ(7, L(3, 5, 7).Slice<2>(p).size());
}
}
TEST(Layout, MutableSliceByTypeSize) {
alignas(max_align_t) unsigned char p[100] = {0};
{
using L = Layout<int32_t>;
EXPECT_EQ(0, L::Partial(0).Slice<int32_t>(p).size());
EXPECT_EQ(3, L::Partial(3).Slice<int32_t>(p).size());
EXPECT_EQ(3, L(3).Slice<int32_t>(p).size());
}
{
using L = Layout<int8_t, int32_t, Int128>;
EXPECT_EQ(3, L::Partial(3).Slice<int8_t>(p).size());
EXPECT_EQ(3, L::Partial(3, 5).Slice<int8_t>(p).size());
EXPECT_EQ(5, L::Partial(3, 5).Slice<int32_t>(p).size());
EXPECT_EQ(3, L::Partial(3, 5, 7).Slice<int8_t>(p).size());
EXPECT_EQ(5, L::Partial(3, 5, 7).Slice<int32_t>(p).size());
EXPECT_EQ(7, L::Partial(3, 5, 7).Slice<Int128>(p).size());
EXPECT_EQ(3, L(3, 5, 7).Slice<int8_t>(p).size());
EXPECT_EQ(5, L(3, 5, 7).Slice<int32_t>(p).size());
EXPECT_EQ(7, L(3, 5, 7).Slice<Int128>(p).size());
}
}
TEST(Layout, StaticSliceSize) {
alignas(max_align_t) const unsigned char cp[100] = {0};
alignas(max_align_t) unsigned char p[100] = {0};
using L = Layout<int8_t, int32_t, Int128>;
using SL = L::WithStaticSizes<3, 5>;
EXPECT_EQ(3, SL::Partial().Slice<0>(cp).size());
EXPECT_EQ(3, SL::Partial().Slice<int8_t>(cp).size());
EXPECT_EQ(3, SL::Partial(7).Slice<0>(cp).size());
EXPECT_EQ(3, SL::Partial(7).Slice<int8_t>(cp).size());
EXPECT_EQ(5, SL::Partial().Slice<1>(cp).size());
EXPECT_EQ(5, SL::Partial().Slice<int32_t>(cp).size());
EXPECT_EQ(5, SL::Partial(7).Slice<1>(cp).size());
EXPECT_EQ(5, SL::Partial(7).Slice<int32_t>(cp).size());
EXPECT_EQ(7, SL::Partial(7).Slice<2>(cp).size());
EXPECT_EQ(7, SL::Partial(7).Slice<Int128>(cp).size());
EXPECT_EQ(3, SL::Partial().Slice<0>(p).size());
EXPECT_EQ(3, SL::Partial().Slice<int8_t>(p).size());
EXPECT_EQ(3, SL::Partial(7).Slice<0>(p).size());
EXPECT_EQ(3, SL::Partial(7).Slice<int8_t>(p).size());
EXPECT_EQ(5, SL::Partial().Slice<1>(p).size());
EXPECT_EQ(5, SL::Partial().Slice<int32_t>(p).size());
EXPECT_EQ(5, SL::Partial(7).Slice<1>(p).size());
EXPECT_EQ(5, SL::Partial(7).Slice<int32_t>(p).size());
EXPECT_EQ(7, SL::Partial(7).Slice<2>(p).size());
EXPECT_EQ(7, SL::Partial(7).Slice<Int128>(p).size());
}
TEST(Layout, SliceByIndexData) {
alignas(max_align_t) const unsigned char p[100] = {0};
{
using L = Layout<int32_t>;
EXPECT_EQ(
0, Distance(
p, Type<Span<const int32_t>>(L::Partial(0).Slice<0>(p)).data()));
EXPECT_EQ(
0, Distance(
p, Type<Span<const int32_t>>(L::Partial(3).Slice<0>(p)).data()));
EXPECT_EQ(0,
Distance(p, Type<Span<const int32_t>>(L(3).Slice<0>(p)).data()));
}
{
using L = Layout<int32_t, int32_t>;
EXPECT_EQ(
0, Distance(
p, Type<Span<const int32_t>>(L::Partial(3).Slice<0>(p)).data()));
EXPECT_EQ(
0,
Distance(
p, Type<Span<const int32_t>>(L::Partial(3, 5).Slice<0>(p)).data()));
EXPECT_EQ(
12,
Distance(
p, Type<Span<const int32_t>>(L::Partial(3, 5).Slice<1>(p)).data()));
EXPECT_EQ(
0, Distance(p, Type<Span<const int32_t>>(L(3, 5).Slice<0>(p)).data()));
EXPECT_EQ(
12, Distance(p, Type<Span<const int32_t>>(L(3, 5).Slice<1>(p)).data()));
}
{
using L = Layout<int8_t, int32_t, Int128>;
EXPECT_EQ(
0, Distance(
p, Type<Span<const int8_t>>(L::Partial(0).Slice<0>(p)).data()));
EXPECT_EQ(
0, Distance(
p, Type<Span<const int8_t>>(L::Partial(1).Slice<0>(p)).data()));
EXPECT_EQ(
0, Distance(
p, Type<Span<const int8_t>>(L::Partial(5).Slice<0>(p)).data()));
EXPECT_EQ(
0,
Distance(
p, Type<Span<const int8_t>>(L::Partial(0, 0).Slice<0>(p)).data()));
EXPECT_EQ(
0,
Distance(
p, Type<Span<const int32_t>>(L::Partial(0, 0).Slice<1>(p)).data()));
EXPECT_EQ(
0,
Distance(
p, Type<Span<const int8_t>>(L::Partial(1, 0).Slice<0>(p)).data()));
EXPECT_EQ(
4,
Distance(
p, Type<Span<const int32_t>>(L::Partial(1, 0).Slice<1>(p)).data()));
EXPECT_EQ(
0,
Distance(
p, Type<Span<const int8_t>>(L::Partial(5, 3).Slice<0>(p)).data()));
EXPECT_EQ(
8,
Distance(
p, Type<Span<const int32_t>>(L::Partial(5, 3).Slice<1>(p)).data()));
EXPECT_EQ(
0,
Distance(
p,
Type<Span<const int8_t>>(L::Partial(0, 0, 0).Slice<0>(p)).data()));
EXPECT_EQ(
0,
Distance(
p,
Type<Span<const int32_t>>(L::Partial(0, 0, 0).Slice<1>(p)).data()));
EXPECT_EQ(
0,
Distance(
p,
Type<Span<const Int128>>(L::Partial(0, 0, 0).Slice<2>(p)).data()));
EXPECT_EQ(
0,
Distance(
p,
Type<Span<const int8_t>>(L::Partial(1, 0, 0).Slice<0>(p)).data()));
EXPECT_EQ(
4,
Distance(
p,
Type<Span<const int32_t>>(L::Partial(1, 0, 0).Slice<1>(p)).data()));
EXPECT_EQ(
8,
Distance(
p,
Type<Span<const Int128>>(L::Partial(1, 0, 0).Slice<2>(p)).data()));
EXPECT_EQ(
0,
Distance(
p,
Type<Span<const int8_t>>(L::Partial(5, 3, 1).Slice<0>(p)).data()));
EXPECT_EQ(
24,
Distance(
p,
Type<Span<const Int128>>(L::Partial(5, 3, 1).Slice<2>(p)).data()));
EXPECT_EQ(
8,
Distance(
p,
Type<Span<const int32_t>>(L::Partial(5, 3, 1).Slice<1>(p)).data()));
EXPECT_EQ(
0,
Distance(p, Type<Span<const int8_t>>(L(5, 3, 1).Slice<0>(p)).data()));
EXPECT_EQ(
24,
Distance(p, Type<Span<const Int128>>(L(5, 3, 1).Slice<2>(p)).data()));
EXPECT_EQ(
8,
Distance(p, Type<Span<const int32_t>>(L(5, 3, 1).Slice<1>(p)).data()));
}
}
TEST(Layout, SliceByTypeData) {
alignas(max_align_t) const unsigned char p[100] = {0};
{
using L = Layout<int32_t>;
EXPECT_EQ(
0,
Distance(
p,
Type<Span<const int32_t>>(L::Partial(0).Slice<int32_t>(p)).data()));
EXPECT_EQ(
0,
Distance(
p,
Type<Span<const int32_t>>(L::Partial(3).Slice<int32_t>(p)).data()));
EXPECT_EQ(
0,
Distance(p, Type<Span<const int32_t>>(L(3).Slice<int32_t>(p)).data()));
}
{
using L = Layout<int8_t, int32_t, Int128>;
EXPECT_EQ(
0,
Distance(
p,
Type<Span<const int8_t>>(L::Partial(0).Slice<int8_t>(p)).data()));
EXPECT_EQ(
0,
Distance(
p,
Type<Span<const int8_t>>(L::Partial(1).Slice<int8_t>(p)).data()));
EXPECT_EQ(
0,
Distance(
p,
Type<Span<const int8_t>>(L::Partial(5).Slice<int8_t>(p)).data()));
EXPECT_EQ(
0,
Distance(p, Type<Span<const int8_t>>(L::Partial(0, 0).Slice<int8_t>(p))
.data()));
EXPECT_EQ(0, Distance(p, Type<Span<const int32_t>>(
L::Partial(0, 0).Slice<int32_t>(p))
.data()));
EXPECT_EQ(
0,
Distance(p, Type<Span<const int8_t>>(L::Partial(1, 0).Slice<int8_t>(p))
.data()));
EXPECT_EQ(4, Distance(p, Type<Span<const int32_t>>(
L::Partial(1, 0).Slice<int32_t>(p))
.data()));
EXPECT_EQ(
0,
Distance(p, Type<Span<const int8_t>>(L::Partial(5, 3).Slice<int8_t>(p))
.data()));
EXPECT_EQ(8, Distance(p, Type<Span<const int32_t>>(
L::Partial(5, 3).Slice<int32_t>(p))
.data()));
EXPECT_EQ(0, Distance(p, Type<Span<const int8_t>>(
L::Partial(0, 0, 0).Slice<int8_t>(p))
.data()));
EXPECT_EQ(0, Distance(p, Type<Span<const int32_t>>(
L::Partial(0, 0, 0).Slice<int32_t>(p))
.data()));
EXPECT_EQ(0, Distance(p, Type<Span<const Int128>>(
L::Partial(0, 0, 0).Slice<Int128>(p))
.data()));
EXPECT_EQ(0, Distance(p, Type<Span<const int8_t>>(
L::Partial(1, 0, 0).Slice<int8_t>(p))
.data()));
EXPECT_EQ(4, Distance(p, Type<Span<const int32_t>>(
L::Partial(1, 0, 0).Slice<int32_t>(p))
.data()));
EXPECT_EQ(8, Distance(p, Type<Span<const Int128>>(
L::Partial(1, 0, 0).Slice<Int128>(p))
.data()));
EXPECT_EQ(0, Distance(p, Type<Span<const int8_t>>(
L::Partial(5, 3, 1).Slice<int8_t>(p))
.data()));
EXPECT_EQ(24, Distance(p, Type<Span<const Int128>>(
L::Partial(5, 3, 1).Slice<Int128>(p))
.data()));
EXPECT_EQ(8, Distance(p, Type<Span<const int32_t>>(
L::Partial(5, 3, 1).Slice<int32_t>(p))
.data()));
EXPECT_EQ(
0,
Distance(p,
Type<Span<const int8_t>>(L(5, 3, 1).Slice<int8_t>(p)).data()));
EXPECT_EQ(
24,
Distance(p,
Type<Span<const Int128>>(L(5, 3, 1).Slice<Int128>(p)).data()));
EXPECT_EQ(
8,
Distance(
p, Type<Span<const int32_t>>(L(5, 3, 1).Slice<int32_t>(p)).data()));
}
}
TEST(Layout, MutableSliceByIndexData) {
alignas(max_align_t) unsigned char p[100] = {0};
{
using L = Layout<int32_t>;
EXPECT_EQ(
0, Distance(p, Type<Span<int32_t>>(L::Partial(0).Slice<0>(p)).data()));
EXPECT_EQ(
0, Distance(p, Type<Span<int32_t>>(L::Partial(3).Slice<0>(p)).data()));
EXPECT_EQ(0, Distance(p, Type<Span<int32_t>>(L(3).Slice<0>(p)).data()));
}
{
using L = Layout<int32_t, int32_t>;
EXPECT_EQ(
0, Distance(p, Type<Span<int32_t>>(L::Partial(3).Slice<0>(p)).data()));
EXPECT_EQ(
0,
Distance(p, Type<Span<int32_t>>(L::Partial(3, 5).Slice<0>(p)).data()));
EXPECT_EQ(
12,
Distance(p, Type<Span<int32_t>>(L::Partial(3, 5).Slice<1>(p)).data()));
EXPECT_EQ(0, Distance(p, Type<Span<int32_t>>(L(3, 5).Slice<0>(p)).data()));
EXPECT_EQ(12, Distance(p, Type<Span<int32_t>>(L(3, 5).Slice<1>(p)).data()));
}
{
using L = Layout<int8_t, int32_t, Int128>;
EXPECT_EQ(
0, Distance(p, Type<Span<int8_t>>(L::Partial(0).Slice<0>(p)).data()));
EXPECT_EQ(
0, Distance(p, Type<Span<int8_t>>(L::Partial(1).Slice<0>(p)).data()));
EXPECT_EQ(
0, Distance(p, Type<Span<int8_t>>(L::Partial(5).Slice<0>(p)).data()));
EXPECT_EQ(
0,
Distance(p, Type<Span<int8_t>>(L::Partial(0, 0).Slice<0>(p)).data()));
EXPECT_EQ(
0,
Distance(p, Type<Span<int32_t>>(L::Partial(0, 0).Slice<1>(p)).data()));
EXPECT_EQ(
0,
Distance(p, Type<Span<int8_t>>(L::Partial(1, 0).Slice<0>(p)).data()));
EXPECT_EQ(
4,
Distance(p, Type<Span<int32_t>>(L::Partial(1, 0).Slice<1>(p)).data()));
EXPECT_EQ(
0,
Distance(p, Type<Span<int8_t>>(L::Partial(5, 3).Slice<0>(p)).data()));
EXPECT_EQ(
8,
Distance(p, Type<Span<int32_t>>(L::Partial(5, 3).Slice<1>(p)).data()));
EXPECT_EQ(
0, Distance(
p, Type<Span<int8_t>>(L::Partial(0, 0, 0).Slice<0>(p)).data()));
EXPECT_EQ(
0, Distance(
p, Type<Span<int32_t>>(L::Partial(0, 0, 0).Slice<1>(p)).data()));
EXPECT_EQ(
0, Distance(
p, Type<Span<Int128>>(L::Partial(0, 0, 0).Slice<2>(p)).data()));
EXPECT_EQ(
0, Distance(
p, Type<Span<int8_t>>(L::Partial(1, 0, 0).Slice<0>(p)).data()));
EXPECT_EQ(
4, Distance(
p, Type<Span<int32_t>>(L::Partial(1, 0, 0).Slice<1>(p)).data()));
EXPECT_EQ(
8, Distance(
p, Type<Span<Int128>>(L::Partial(1, 0, 0).Slice<2>(p)).data()));
EXPECT_EQ(
0, Distance(
p, Type<Span<int8_t>>(L::Partial(5, 3, 1).Slice<0>(p)).data()));
EXPECT_EQ(
24, Distance(
p, Type<Span<Int128>>(L::Partial(5, 3, 1).Slice<2>(p)).data()));
EXPECT_EQ(
8, Distance(
p, Type<Span<int32_t>>(L::Partial(5, 3, 1).Slice<1>(p)).data()));
EXPECT_EQ(0,
Distance(p, Type<Span<int8_t>>(L(5, 3, 1).Slice<0>(p)).data()));
EXPECT_EQ(24,
Distance(p, Type<Span<Int128>>(L(5, 3, 1).Slice<2>(p)).data()));
EXPECT_EQ(8,
Distance(p, Type<Span<int32_t>>(L(5, 3, 1).Slice<1>(p)).data()));
}
}
TEST(Layout, MutableSliceByTypeData) {
alignas(max_align_t) unsigned char p[100] = {0};
{
using L = Layout<int32_t>;
EXPECT_EQ(
0, Distance(
p, Type<Span<int32_t>>(L::Partial(0).Slice<int32_t>(p)).data()));
EXPECT_EQ(
0, Distance(
p, Type<Span<int32_t>>(L::Partial(3).Slice<int32_t>(p)).data()));
EXPECT_EQ(0,
Distance(p, Type<Span<int32_t>>(L(3).Slice<int32_t>(p)).data()));
}
{
using L = Layout<int8_t, int32_t, Int128>;
EXPECT_EQ(
0,
Distance(p, Type<Span<int8_t>>(L::Partial(0).Slice<int8_t>(p)).data()));
EXPECT_EQ(
0,
Distance(p, Type<Span<int8_t>>(L::Partial(1).Slice<int8_t>(p)).data()));
EXPECT_EQ(
0,
Distance(p, Type<Span<int8_t>>(L::Partial(5).Slice<int8_t>(p)).data()));
EXPECT_EQ(
0,
Distance(p,
Type<Span<int8_t>>(L::Partial(0, 0).Slice<int8_t>(p)).data()));
EXPECT_EQ(
0,
Distance(
p, Type<Span<int32_t>>(L::Partial(0, 0).Slice<int32_t>(p)).data()));
EXPECT_EQ(
0,
Distance(p,
Type<Span<int8_t>>(L::Partial(1, 0).Slice<int8_t>(p)).data()));
EXPECT_EQ(
4,
Distance(
p, Type<Span<int32_t>>(L::Partial(1, 0).Slice<int32_t>(p)).data()));
EXPECT_EQ(
0,
Distance(p,
Type<Span<int8_t>>(L::Partial(5, 3).Slice<int8_t>(p)).data()));
EXPECT_EQ(
8,
Distance(
p, Type<Span<int32_t>>(L::Partial(5, 3).Slice<int32_t>(p)).data()));
EXPECT_EQ(
0,
Distance(
p,
Type<Span<int8_t>>(L::Partial(0, 0, 0).Slice<int8_t>(p)).data()));
EXPECT_EQ(
0,
Distance(
p,
Type<Span<int32_t>>(L::Partial(0, 0, 0).Slice<int32_t>(p)).data()));
EXPECT_EQ(
0,
Distance(
p,
Type<Span<Int128>>(L::Partial(0, 0, 0).Slice<Int128>(p)).data()));
EXPECT_EQ(
0,
Distance(
p,
Type<Span<int8_t>>(L::Partial(1, 0, 0).Slice<int8_t>(p)).data()));
EXPECT_EQ(
4,
Distance(
p,
Type<Span<int32_t>>(L::Partial(1, 0, 0).Slice<int32_t>(p)).data()));
EXPECT_EQ(
8,
Distance(
p,
Type<Span<Int128>>(L::Partial(1, 0, 0).Slice<Int128>(p)).data()));
EXPECT_EQ(
0,
Distance(
p,
Type<Span<int8_t>>(L::Partial(5, 3, 1).Slice<int8_t>(p)).data()));
EXPECT_EQ(
24,
Distance(
p,
Type<Span<Int128>>(L::Partial(5, 3, 1).Slice<Int128>(p)).data()));
EXPECT_EQ(
8,
Distance(
p,
Type<Span<int32_t>>(L::Partial(5, 3, 1).Slice<int32_t>(p)).data()));
EXPECT_EQ(
0, Distance(p, Type<Span<int8_t>>(L(5, 3, 1).Slice<int8_t>(p)).data()));
EXPECT_EQ(
24,
Distance(p, Type<Span<Int128>>(L(5, 3, 1).Slice<Int128>(p)).data()));
EXPECT_EQ(
8,
Distance(p, Type<Span<int32_t>>(L(5, 3, 1).Slice<int32_t>(p)).data()));
}
}
TEST(Layout, StaticSliceData) {
alignas(max_align_t) const unsigned char cp[100] = {0};
alignas(max_align_t) unsigned char p[100] = {0};
using L = Layout<int8_t, int32_t, Int128>;
using SL = L::WithStaticSizes<3, 5>;
EXPECT_EQ(0, Distance(cp, SL::Partial().Slice<0>(cp).data()));
EXPECT_EQ(0, Distance(cp, SL::Partial().Slice<int8_t>(cp).data()));
EXPECT_EQ(0, Distance(cp, SL::Partial(7).Slice<0>(cp).data()));
EXPECT_EQ(0, Distance(cp, SL::Partial(7).Slice<int8_t>(cp).data()));
EXPECT_EQ(4, Distance(cp, SL::Partial().Slice<1>(cp).data()));
EXPECT_EQ(4, Distance(cp, SL::Partial().Slice<int32_t>(cp).data()));
EXPECT_EQ(4, Distance(cp, SL::Partial(7).Slice<1>(cp).data()));
EXPECT_EQ(4, Distance(cp, SL::Partial(7).Slice<int32_t>(cp).data()));
EXPECT_EQ(24, Distance(cp, SL::Partial(7).Slice<2>(cp).data()));
EXPECT_EQ(24, Distance(cp, SL::Partial(7).Slice<Int128>(cp).data()));
EXPECT_EQ(0, Distance(p, SL::Partial().Slice<0>(p).data()));
EXPECT_EQ(0, Distance(p, SL::Partial().Slice<int8_t>(p).data()));
EXPECT_EQ(0, Distance(p, SL::Partial(7).Slice<0>(p).data()));
EXPECT_EQ(0, Distance(p, SL::Partial(7).Slice<int8_t>(p).data()));
EXPECT_EQ(4, Distance(p, SL::Partial().Slice<1>(p).data()));
EXPECT_EQ(4, Distance(p, SL::Partial().Slice<int32_t>(p).data()));
EXPECT_EQ(4, Distance(p, SL::Partial(7).Slice<1>(p).data()));
EXPECT_EQ(4, Distance(p, SL::Partial(7).Slice<int32_t>(p).data()));
EXPECT_EQ(24, Distance(p, SL::Partial(7).Slice<2>(p).data()));
EXPECT_EQ(24, Distance(p, SL::Partial(7).Slice<Int128>(p).data()));
}
MATCHER_P(IsSameSlice, slice, "") {
return arg.size() == slice.size() && arg.data() == slice.data();
}
template <typename... M>
class TupleMatcher {
public:
explicit TupleMatcher(M... matchers) : matchers_(std::move(matchers)...) {}
template <typename Tuple>
bool MatchAndExplain(const Tuple& p,
testing::MatchResultListener* ) const {
static_assert(std::tuple_size<Tuple>::value == sizeof...(M), "");
return MatchAndExplainImpl(
p, absl::make_index_sequence<std::tuple_size<Tuple>::value>{});
}
void DescribeTo(::std::ostream* os) const {}
void DescribeNegationTo(::std::ostream* os) const {}
private:
template <typename Tuple, size_t... Is>
bool MatchAndExplainImpl(const Tuple& p, absl::index_sequence<Is...>) const {
return std::min(
{true, testing::SafeMatcherCast<
const typename std::tuple_element<Is, Tuple>::type&>(
std::get<Is>(matchers_))
.Matches(std::get<Is>(p))...});
}
std::tuple<M...> matchers_;
};
template <typename... M>
testing::PolymorphicMatcher<TupleMatcher<M...>> Tuple(M... matchers) {
return testing::MakePolymorphicMatcher(
TupleMatcher<M...>(std::move(matchers)...));
}
TEST(Layout, Slices) {
alignas(max_align_t) const unsigned char p[100] = {0};
using L = Layout<int8_t, int8_t, Int128>;
{
const auto x = L::Partial();
EXPECT_THAT(Type<std::tuple<>>(x.Slices(p)), Tuple());
}
{
const auto x = L::Partial(1);
EXPECT_THAT(Type<std::tuple<Span<const int8_t>>>(x.Slices(p)),
Tuple(IsSameSlice(x.Slice<0>(p))));
}
{
const auto x = L::Partial(1, 2);
EXPECT_THAT(
(Type<std::tuple<Span<const int8_t>, Span<const int8_t>>>(x.Slices(p))),
Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p))));
}
{
const auto x = L::Partial(1, 2, 3);
EXPECT_THAT((Type<std::tuple<Span<const int8_t>, Span<const int8_t>,
Span<const Int128>>>(x.Slices(p))),
Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)),
IsSameSlice(x.Slice<2>(p))));
}
{
const L x(1, 2, 3);
EXPECT_THAT((Type<std::tuple<Span<const int8_t>, Span<const int8_t>,
Span<const Int128>>>(x.Slices(p))),
Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)),
IsSameSlice(x.Slice<2>(p))));
}
}
TEST(Layout, MutableSlices) {
alignas(max_align_t) unsigned char p[100] = {0};
using L = Layout<int8_t, int8_t, Int128>;
{
const auto x = L::Partial();
EXPECT_THAT(Type<std::tuple<>>(x.Slices(p)), Tuple());
}
{
const auto x = L::Partial(1);
EXPECT_THAT(Type<std::tuple<Span<int8_t>>>(x.Slices(p)),
Tuple(IsSameSlice(x.Slice<0>(p))));
}
{
const auto x = L::Partial(1, 2);
EXPECT_THAT((Type<std::tuple<Span<int8_t>, Span<int8_t>>>(x.Slices(p))),
Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p))));
}
{
const auto x = L::Partial(1, 2, 3);
EXPECT_THAT((Type<std::tuple<Span<int8_t>, Span<int8_t>, Span<Int128>>>(
x.Slices(p))),
Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)),
IsSameSlice(x.Slice<2>(p))));
}
{
const L x(1, 2, 3);
EXPECT_THAT((Type<std::tuple<Span<int8_t>, Span<int8_t>, Span<Int128>>>(
x.Slices(p))),
Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)),
IsSameSlice(x.Slice<2>(p))));
}
}
TEST(Layout, StaticSlices) {
alignas(max_align_t) const unsigned char cp[100] = {0};
alignas(max_align_t) unsigned char p[100] = {0};
using SL = Layout<int8_t, int8_t, Int128>::WithStaticSizes<1, 2>;
{
const auto x = SL::Partial();
EXPECT_THAT(
(Type<std::tuple<Span<const int8_t>, Span<const int8_t>>>(
x.Slices(cp))),
Tuple(IsSameSlice(x.Slice<0>(cp)), IsSameSlice(x.Slice<1>(cp))));
EXPECT_THAT((Type<std::tuple<Span<int8_t>, Span<int8_t>>>(x.Slices(p))),
Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p))));
}
{
const auto x = SL::Partial(3);
EXPECT_THAT((Type<std::tuple<Span<const int8_t>, Span<const int8_t>,
Span<const Int128>>>(x.Slices(cp))),
Tuple(IsSameSlice(x.Slice<0>(cp)), IsSameSlice(x.Slice<1>(cp)),
IsSameSlice(x.Slice<2>(cp))));
EXPECT_THAT((Type<std::tuple<Span<int8_t>, Span<int8_t>, Span<Int128>>>(
x.Slices(p))),
Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)),
IsSameSlice(x.Slice<2>(p))));
}
{
const SL x(3);
EXPECT_THAT((Type<std::tuple<Span<const int8_t>, Span<const int8_t>,
Span<const Int128>>>(x.Slices(cp))),
Tuple(IsSameSlice(x.Slice<0>(cp)), IsSameSlice(x.Slice<1>(cp)),
IsSameSlice(x.Slice<2>(cp))));
EXPECT_THAT((Type<std::tuple<Span<int8_t>, Span<int8_t>, Span<Int128>>>(
x.Slices(p))),
Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)),
IsSameSlice(x.Slice<2>(p))));
}
}
TEST(Layout, UnalignedTypes) {
constexpr Layout<unsigned char, unsigned char, unsigned char> x(1, 2, 3);
alignas(max_align_t) unsigned char p[x.AllocSize() + 1];
EXPECT_THAT(x.Pointers(p + 1), Tuple(p + 1, p + 2, p + 4));
}
TEST(Layout, CustomAlignment) {
constexpr Layout<unsigned char, Aligned<unsigned char, 8>> x(1, 2);
alignas(max_align_t) unsigned char p[x.AllocSize()];
EXPECT_EQ(10, x.AllocSize());
EXPECT_THAT(x.Pointers(p), Tuple(p + 0, p + 8));
}
TEST(Layout, OverAligned) {
constexpr size_t M = alignof(max_align_t);
constexpr Layout<unsigned char, Aligned<unsigned char, 2 * M>> x(1, 3);
#ifdef __GNUC__
__attribute__((aligned(2 * M))) unsigned char p[x.AllocSize()];
#else
alignas(2 * M) unsigned char p[x.AllocSize()];
#endif
EXPECT_EQ(2 * M + 3, x.AllocSize());
EXPECT_THAT(x.Pointers(p), Tuple(p + 0, p + 2 * M));
}
TEST(Layout, Alignment) {
static_assert(Layout<int8_t>::Alignment() == 1, "");
static_assert(Layout<int32_t>::Alignment() == 4, "");
static_assert(Layout<Int64>::Alignment() == 8, "");
static_assert(Layout<Aligned<int8_t, 64>>::Alignment() == 64, "");
static_assert(Layout<int8_t, int32_t, Int64>::Alignment() == 8, "");
static_assert(Layout<int8_t, Int64, int32_t>::Alignment() == 8, "");
static_assert(Layout<int32_t, int8_t, Int64>::Alignment() == 8, "");
static_assert(Layout<int32_t, Int64, int8_t>::Alignment() == 8, "");
static_assert(Layout<Int64, int8_t, int32_t>::Alignment() == 8, "");
static_assert(Layout<Int64, int32_t, int8_t>::Alignment() == 8, "");
static_assert(Layout<Int64, int32_t, int8_t>::Alignment() == 8, "");
static_assert(
Layout<Aligned<int8_t, 64>>::WithStaticSizes<>::Alignment() == 64, "");
static_assert(
Layout<Aligned<int8_t, 64>>::WithStaticSizes<2>::Alignment() == 64, "");
}
TEST(Layout, StaticAlignment) {
static_assert(Layout<int8_t>::WithStaticSizes<>::Alignment() == 1, "");
static_assert(Layout<int8_t>::WithStaticSizes<0>::Alignment() == 1, "");
static_assert(Layout<int8_t>::WithStaticSizes<7>::Alignment() == 1, "");
static_assert(Layout<int32_t>::WithStaticSizes<>::Alignment() == 4, "");
static_assert(Layout<int32_t>::WithStaticSizes<0>::Alignment() == 4, "");
static_assert(Layout<int32_t>::WithStaticSizes<3>::Alignment() == 4, "");
static_assert(
Layout<Aligned<int8_t, 64>>::WithStaticSizes<>::Alignment() == 64, "");
static_assert(
Layout<Aligned<int8_t, 64>>::WithStaticSizes<0>::Alignment() == 64, "");
static_assert(
Layout<Aligned<int8_t, 64>>::WithStaticSizes<2>::Alignment() == 64, "");
static_assert(
Layout<int32_t, Int64, int8_t>::WithStaticSizes<>::Alignment() == 8, "");
static_assert(
Layout<int32_t, Int64, int8_t>::WithStaticSizes<0, 0, 0>::Alignment() ==
8,
"");
static_assert(
Layout<int32_t, Int64, int8_t>::WithStaticSizes<1, 1, 1>::Alignment() ==
8,
"");
}
TEST(Layout, ConstexprPartial) {
constexpr size_t M = alignof(max_align_t);
constexpr Layout<unsigned char, Aligned<unsigned char, 2 * M>> x(1, 3);
static_assert(x.Partial(1).template Offset<1>() == 2 * M, "");
}
TEST(Layout, StaticConstexpr) {
constexpr size_t M = alignof(max_align_t);
using L = Layout<unsigned char, Aligned<unsigned char, 2 * M>>;
using SL = L::WithStaticSizes<1, 3>;
constexpr SL x;
static_assert(x.Offset<1>() == 2 * M, "");
}
struct Region {
size_t from;
size_t to;
};
void ExpectRegionPoisoned(const unsigned char* p, size_t n, bool poisoned) {
#ifdef ABSL_HAVE_ADDRESS_SANITIZER
for (size_t i = 0; i != n; ++i) {
EXPECT_EQ(poisoned, __asan_address_is_poisoned(p + i));
}
#endif
}
template <size_t N>
void ExpectPoisoned(const unsigned char (&buf)[N],
std::initializer_list<Region> reg) {
size_t prev = 0;
for (const Region& r : reg) {
ExpectRegionPoisoned(buf + prev, r.from - prev, false);
ExpectRegionPoisoned(buf + r.from, r.to - r.from, true);
prev = r.to;
}
ExpectRegionPoisoned(buf + prev, N - prev, false);
}
TEST(Layout, PoisonPadding) {
using L = Layout<int8_t, Int64, int32_t, Int128>;
constexpr size_t n = L::Partial(1, 2, 3, 4).AllocSize();
{
constexpr auto x = L::Partial();
alignas(max_align_t) const unsigned char c[n] = {};
x.PoisonPadding(c);
EXPECT_EQ(x.Slices(c), x.Slices(c));
ExpectPoisoned(c, {});
}
{
constexpr auto x = L::Partial(1);
alignas(max_align_t) const unsigned char c[n] = {};
x.PoisonPadding(c);
EXPECT_EQ(x.Slices(c), x.Slices(c));
ExpectPoisoned(c, {{1, 8}});
}
{
constexpr auto x = L::Partial(1, 2);
alignas(max_align_t) const unsigned char c[n] = {};
x.PoisonPadding(c);
EXPECT_EQ(x.Slices(c), x.Slices(c));
ExpectPoisoned(c, {{1, 8}});
}
{
constexpr auto x = L::Partial(1, 2, 3);
alignas(max_align_t) const unsigned char c[n] = {};
x.PoisonPadding(c);
EXPECT_EQ(x.Slices(c), x.Slices(c));
ExpectPoisoned(c, {{1, 8}, {36, 40}});
}
{
constexpr auto x = L::Partial(1, 2, 3, 4);
alignas(max_align_t) const unsigned char c[n] = {};
x.PoisonPadding(c);
EXPECT_EQ(x.Slices(c), x.Slices(c));
ExpectPoisoned(c, {{1, 8}, {36, 40}});
}
{
constexpr L x(1, 2, 3, 4);
alignas(max_align_t) const unsigned char c[n] = {};
x.PoisonPadding(c);
EXPECT_EQ(x.Slices(c), x.Slices(c));
ExpectPoisoned(c, {{1, 8}, {36, 40}});
}
}
TEST(Layout, StaticPoisonPadding) {
using L = Layout<int8_t, Int64, int32_t, Int128>;
using SL = L::WithStaticSizes<1, 2>;
constexpr size_t n = L::Partial(1, 2, 3, 4).AllocSize();
{
constexpr auto x = SL::Partial();
alignas(max_align_t) const unsigned char c[n] = {};
x.PoisonPadding(c);
EXPECT_EQ(x.Slices(c), x.Slices(c));
ExpectPoisoned(c, {{1, 8}});
}
{
constexpr auto x = SL::Partial(3);
alignas(max_align_t) const unsigned char c[n] = {};
x.PoisonPadding(c);
EXPECT_EQ(x.Slices(c), x.Slices(c));
ExpectPoisoned(c, {{1, 8}, {36, 40}});
}
{
constexpr auto x = SL::Partial(3, 4);
alignas(max_align_t) const unsigned char c[n] = {};
x.PoisonPadding(c);
EXPECT_EQ(x.Slices(c), x.Slices(c));
ExpectPoisoned(c, {{1, 8}, {36, 40}});
}
{
constexpr SL x(3, 4);
alignas(max_align_t) const unsigned char c[n] = {};
x.PoisonPadding(c);
EXPECT_EQ(x.Slices(c), x.Slices(c));
ExpectPoisoned(c, {{1, 8}, {36, 40}});
}
}
TEST(Layout, DebugString) {
{
constexpr auto x = Layout<int8_t, int32_t, int8_t, Int128>::Partial();
EXPECT_EQ("@0<signed char>(1)", x.DebugString());
}
{
constexpr auto x = Layout<int8_t, int32_t, int8_t, Int128>::Partial(1);
EXPECT_EQ("@0<signed char>(1)[1]; @4<int>(4)", x.DebugString());
}
{
constexpr auto x = Layout<int8_t, int32_t, int8_t, Int128>::Partial(1, 2);
EXPECT_EQ("@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)",
x.DebugString());
}
{
constexpr auto x =
Layout<int8_t, int32_t, int8_t, Int128>::Partial(1, 2, 3);
EXPECT_EQ(
"@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)[3]; "
"@16" +
Int128::Name() + "(16)",
x.DebugString());
}
{
constexpr auto x =
Layout<int8_t, int32_t, int8_t, Int128>::Partial(1, 2, 3, 4);
EXPECT_EQ(
"@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)[3]; "
"@16" +
Int128::Name() + "(16)[4]",
x.DebugString());
}
{
constexpr Layout<int8_t, int32_t, int8_t, Int128> x(1, 2, 3, 4);
EXPECT_EQ(
"@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)[3]; "
"@16" +
Int128::Name() + "(16)[4]",
x.DebugString());
}
}
TEST(Layout, StaticDebugString) {
{
constexpr auto x =
Layout<int8_t, int32_t, int8_t, Int128>::WithStaticSizes<>::Partial();
EXPECT_EQ("@0<signed char>(1)", x.DebugString());
}
{
constexpr auto x =
Layout<int8_t, int32_t, int8_t, Int128>::WithStaticSizes<>::Partial(1);
EXPECT_EQ("@0<signed char>(1)[1]; @4<int>(4)", x.DebugString());
}
{
constexpr auto x =
Layout<int8_t, int32_t, int8_t, Int128>::WithStaticSizes<1>::Partial();
EXPECT_EQ("@0<signed char>(1)[1]; @4<int>(4)", x.DebugString());
}
{
constexpr auto x =
Layout<int8_t, int32_t, int8_t, Int128>::WithStaticSizes<>::Partial(1,
2);
EXPECT_EQ("@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)",
x.DebugString());
}
{
constexpr auto x =
Layout<int8_t, int32_t, int8_t, Int128>::WithStaticSizes<1>::Partial(2);
EXPECT_EQ("@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)",
x.DebugString());
}
{
constexpr auto x = Layout<int8_t, int32_t, int8_t,
Int128>::WithStaticSizes<1, 2>::Partial();
EXPECT_EQ("@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)",
x.DebugString());
}
{
constexpr auto x = Layout<int8_t, int32_t, int8_t,
Int128>::WithStaticSizes<1, 2, 3, 4>::Partial();
EXPECT_EQ(
"@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)[3]; "
"@16" +
Int128::Name() + "(16)[4]",
x.DebugString());
}
{
constexpr Layout<int8_t, int32_t, int8_t, Int128>::WithStaticSizes<1, 2, 3,
4>
x;
EXPECT_EQ(
"@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)[3]; "
"@16" +
Int128::Name() + "(16)[4]",
x.DebugString());
}
}
TEST(Layout, CharTypes) {
constexpr Layout<int32_t> x(1);
alignas(max_align_t) char c[x.AllocSize()] = {};
alignas(max_align_t) unsigned char uc[x.AllocSize()] = {};
alignas(max_align_t) signed char sc[x.AllocSize()] = {};
alignas(max_align_t) const char cc[x.AllocSize()] = {};
alignas(max_align_t) const unsigned char cuc[x.AllocSize()] = {};
alignas(max_align_t) const signed char csc[x.AllocSize()] = {};
Type<int32_t*>(x.Pointer<0>(c));
Type<int32_t*>(x.Pointer<0>(uc));
Type<int32_t*>(x.Pointer<0>(sc));
Type<const int32_t*>(x.Pointer<0>(cc));
Type<const int32_t*>(x.Pointer<0>(cuc));
Type<const int32_t*>(x.Pointer<0>(csc));
Type<int32_t*>(x.Pointer<int32_t>(c));
Type<int32_t*>(x.Pointer<int32_t>(uc));
Type<int32_t*>(x.Pointer<int32_t>(sc));
Type<const int32_t*>(x.Pointer<int32_t>(cc));
Type<const int32_t*>(x.Pointer<int32_t>(cuc));
Type<const int32_t*>(x.Pointer<int32_t>(csc));
Type<std::tuple<int32_t*>>(x.Pointers(c));
Type<std::tuple<int32_t*>>(x.Pointers(uc));
Type<std::tuple<int32_t*>>(x.Pointers(sc));
Type<std::tuple<const int32_t*>>(x.Pointers(cc));
Type<std::tuple<const int32_t*>>(x.Pointers(cuc));
Type<std::tuple<const int32_t*>>(x.Pointers(csc));
Type<Span<int32_t>>(x.Slice<0>(c));
Type<Span<int32_t>>(x.Slice<0>(uc));
Type<Span<int32_t>>(x.Slice<0>(sc));
Type<Span<const int32_t>>(x.Slice<0>(cc));
Type<Span<const int32_t>>(x.Slice<0>(cuc));
Type<Span<const int32_t>>(x.Slice<0>(csc));
Type<std::tuple<Span<int32_t>>>(x.Slices(c));
Type<std::tuple<Span<int32_t>>>(x.Slices(uc));
Type<std::tuple<Span<int32_t>>>(x.Slices(sc));
Type<std::tuple<Span<const int32_t>>>(x.Slices(cc));
Type<std::tuple<Span<const int32_t>>>(x.Slices(cuc));
Type<std::tuple<Span<const int32_t>>>(x.Slices(csc));
}
TEST(Layout, ConstElementType) {
constexpr Layout<const int32_t> x(1);
alignas(int32_t) char c[x.AllocSize()] = {};
const char* cc = c;
const int32_t* p = reinterpret_cast<const int32_t*>(cc);
EXPECT_EQ(alignof(int32_t), x.Alignment());
EXPECT_EQ(0, x.Offset<0>());
EXPECT_EQ(0, x.Offset<const int32_t>());
EXPECT_THAT(x.Offsets(), ElementsAre(0));
EXPECT_EQ(1, x.Size<0>());
EXPECT_EQ(1, x.Size<const int32_t>());
EXPECT_THAT(x.Sizes(), ElementsAre(1));
EXPECT_EQ(sizeof(int32_t), x.AllocSize());
EXPECT_EQ(p, Type<const int32_t*>(x.Pointer<0>(c)));
EXPECT_EQ(p, Type<const int32_t*>(x.Pointer<0>(cc)));
EXPECT_EQ(p, Type<const int32_t*>(x.Pointer<const int32_t>(c)));
EXPECT_EQ(p, Type<const int32_t*>(x.Pointer<const int32_t>(cc)));
EXPECT_THAT(Type<std::tuple<const int32_t*>>(x.Pointers(c)), Tuple(p));
EXPECT_THAT(Type<std::tuple<const int32_t*>>(x.Pointers(cc)), Tuple(p));
EXPECT_THAT(Type<Span<const int32_t>>(x.Slice<0>(c)),
IsSameSlice(Span<const int32_t>(p, 1)));
EXPECT_THAT(Type<Span<const int32_t>>(x.Slice<0>(cc)),
IsSameSlice(Span<const int32_t>(p, 1)));
EXPECT_THAT(Type<Span<const int32_t>>(x.Slice<const int32_t>(c)),
IsSameSlice(Span<const int32_t>(p, 1)));
EXPECT_THAT(Type<Span<const int32_t>>(x.Slice<const int32_t>(cc)),
IsSameSlice(Span<const int32_t>(p, 1)));
EXPECT_THAT(Type<std::tuple<Span<const int32_t>>>(x.Slices(c)),
Tuple(IsSameSlice(Span<const int32_t>(p, 1))));
EXPECT_THAT(Type<std::tuple<Span<const int32_t>>>(x.Slices(cc)),
Tuple(IsSameSlice(Span<const int32_t>(p, 1))));
}
namespace example {
class CompactString {
public:
CompactString(const char* s = "") {
const size_t size = strlen(s);
const L layout(1, size + 1);
p_.reset(new unsigned char[layout.AllocSize()]);
layout.PoisonPadding(p_.get());
*layout.Pointer<size_t>(p_.get()) = size;
memcpy(layout.Pointer<char>(p_.get()), s, size + 1);
}
size_t size() const {
return *L::Partial().Pointer<size_t>(p_.get());
}
const char* c_str() const {
return L::Partial(1).Pointer<char>(p_.get());
}
private:
using L = Layout<size_t, char>;
std::unique_ptr<unsigned char[]> p_;
};
TEST(CompactString, Works) {
CompactString s = "hello";
EXPECT_EQ(5, s.size());
EXPECT_STREQ("hello", s.c_str());
}
class StaticCompactString {
public:
StaticCompactString(const char* s = "") {
const size_t size = strlen(s);
const SL layout(size + 1);
p_.reset(new unsigned char[layout.AllocSize()]);
layout.PoisonPadding(p_.get());
*layout.Pointer<size_t>(p_.get()) = size;
memcpy(layout.Pointer<char>(p_.get()), s, size + 1);
}
size_t size() const { return *SL::Partial().Pointer<size_t>(p_.get()); }
const char* c_str() const { return SL::Partial().Pointer<char>(p_.get()); }
private:
using SL = Layout<size_t, char>::WithStaticSizes<1>;
std::unique_ptr<unsigned char[]> p_;
};
TEST(StaticCompactString, Works) {
StaticCompactString s = "hello";
EXPECT_EQ(5, s.size());
EXPECT_STREQ("hello", s.c_str());
}
}
}
}
ABSL_NAMESPACE_END
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/container/internal/layout.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/container/internal/layout_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
555aed98-8f55-421e-badb-302e32069668 | cpp | tensorflow/tensorflow | reduce_scatter_decomposer | third_party/xla/xla/service/reduce_scatter_decomposer.cc | third_party/xla/xla/service/reduce_scatter_decomposer_test.cc | #include "xla/service/reduce_scatter_decomposer.h"
#include <sys/types.h>
#include <limits>
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/literal_util.h"
#include "xla/service/collective_decomposer_utils.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/hlo_module_config.h"
#include "xla/shape_util.h"
namespace xla {
absl::StatusOr<bool> ReduceScatterDecomposer::Run(
HloModule *module,
const absl::flat_hash_set<absl::string_view> &execution_threads) {
bool changed = false;
int64_t next_channel_id = hlo_query::NextChannelId(*module);
for (HloComputation *computation :
module->MakeNonfusionComputations(execution_threads)) {
for (HloInstruction *instruction :
computation->MakeInstructionPostOrder()) {
auto *rs = DynCast<HloReduceScatterInstruction>(instruction);
if (!rs || !rs->shape().IsArray()) {
continue;
}
std::optional<int64_t> channel_id;
if (rs->channel_id()) {
channel_id = next_channel_id++;
}
if (should_decompose_ && !should_decompose_(rs)) {
continue;
}
VLOG(2) << "Decompose: " << rs->ToString();
HloComputation *apply_clone = module->AddComputationAndUnifyNamesAndIds(
rs->to_apply()->Clone(), false);
HloInstruction *ar =
computation->AddInstruction(HloInstruction::CreateAllReduce(
rs->operand(0)->shape(), rs->operands(), apply_clone,
rs->device_list(), rs->constrain_layout(), channel_id,
rs->use_global_device_ids()));
apply_clone->SetCollectiveCallInstruction(ar);
TF_ASSIGN_OR_RETURN(
CollectiveOpGroupMode group_mode,
GetCollectiveOpGroupMode(rs->channel_id().has_value(),
rs->use_global_device_ids()));
TF_ASSIGN_OR_RETURN(
std::vector<HloInstruction *> start_indices,
CreateStartIndicesForCollectiveDecomposition(
group_mode, rs->replica_groups(), rs->shape(),
rs->scatter_dimension(), computation, update_layout_));
HloInstruction *ds =
computation->AddInstruction(HloInstruction::CreateDynamicSlice(
rs->shape(), ar, start_indices, rs->shape().dimensions()));
TF_RETURN_IF_ERROR(rs->ReplaceAllUsesWith(ds));
TF_RETURN_IF_ERROR(computation->RemoveInstruction(rs));
changed = true;
}
}
return changed;
}
} | #include "xla/service/reduce_scatter_decomposer.h"
#include <utility>
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal_util.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace op = xla::testing::opcode_matchers;
class ReduceScatterDecomposerTest : public HloTestBase {
public:
enum class PassAction {
kNoChange,
kTrivialGroups,
kTableLookup,
};
void RunPass(
absl::string_view hlo_module, PassAction action,
CollectiveOpGroupMode mode = CollectiveOpGroupMode::kCrossReplica,
int64_t shard_size = 0, int64_t shard_dimension = 0,
int64_t replica_count = 2,
std::function<bool(const HloInstruction *)> should_decompose =
[](const HloInstruction *) { return true; }) {
const int64_t partition_count = 2;
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndReturnVerifiedModule(hlo_module, replica_count,
partition_count));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ReduceScatterDecomposer(nullptr,
should_decompose)
.Run(module.get()));
if (action == PassAction::kNoChange) {
ASSERT_FALSE(changed);
return;
}
ASSERT_TRUE(changed);
Literal multiplier = LiteralUtil::CreateR0<uint32_t>(shard_size);
::testing::Matcher<const ::xla::HloInstruction *> id_matcher = [&]() {
switch (mode) {
case CollectiveOpGroupMode::kCrossPartition:
return op::PartitionId();
case CollectiveOpGroupMode::kCrossReplica:
return op::ReplicaId();
case CollectiveOpGroupMode::kCrossReplicaAndPartition:
return op::ReplicaId();
case CollectiveOpGroupMode::kFlattenedID: {
return op::Add(
op::Multiply(op::ReplicaId(),
op::Constant(LiteralUtil::CreateR0<uint32_t>(
partition_count))),
op::PartitionId());
}
}
}();
auto root = module->entry_computation()->root_instruction();
const Shape &shape = root->shape();
::testing::Matcher<const ::xla::HloInstruction *> slice_index = id_matcher;
if (action == PassAction::kTableLookup) {
slice_index = op::Reshape(op::DynamicSlice(op::Constant(), id_matcher));
}
if (mode == CollectiveOpGroupMode::kCrossReplicaAndPartition) {
slice_index = op::Add(
op::Multiply(
slice_index,
op::Constant(LiteralUtil::CreateR0<uint32_t>(partition_count))),
op::PartitionId());
}
auto zero_matcher = op::Constant(LiteralUtil::Zero(U32));
std::vector<::testing::Matcher<const ::xla::HloInstruction *>> ds_operands(
shape.rank() + 1, zero_matcher);
ds_operands[0] = op::AllReduce(op::Parameter(0));
ds_operands[shard_dimension + 1] =
op::Multiply(slice_index, op::Constant(std::move(multiplier)));
EXPECT_THAT(root, op::DynamicSlice(ds_operands));
}
};
TEST_F(ReduceScatterDecomposerTest, TrivialReplicaID) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
ROOT rs = f32[4] reduce-scatter(p0), replica_groups={{0,1}}, dimensions={0}, to_apply=sum
}
)";
RunPass(hlo_string, PassAction::kTrivialGroups,
CollectiveOpGroupMode::kCrossReplica,
4);
}
TEST_F(ReduceScatterDecomposerTest, TableLookupReplicaId) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
ROOT rs = f32[4] reduce-scatter(p0), replica_groups={{1, 0}}, dimensions={0}, to_apply=sum
}
)";
RunPass(hlo_string, PassAction::kTableLookup,
CollectiveOpGroupMode::kCrossReplica,
4);
}
TEST_F(ReduceScatterDecomposerTest, TrivialCrossReplicaAndPartition) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[4, 8] parameter(0)
ROOT rs = f32[4, 2] reduce-scatter(p0), replica_groups={{0, 1}}, channel_id=1, dimensions={1}, to_apply=sum
}
)";
RunPass(hlo_string, PassAction::kTrivialGroups,
CollectiveOpGroupMode::kCrossReplicaAndPartition,
2, 1);
}
TEST_F(ReduceScatterDecomposerTest,
TrivialCrossReplicaAndPartition_SingleReplica) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[4, 8] parameter(0)
ROOT rs = f32[4, 4] reduce-scatter(p0), replica_groups={{0}}, channel_id=1, dimensions={1}, to_apply=sum
}
)";
RunPass(hlo_string, PassAction::kTrivialGroups,
CollectiveOpGroupMode::kCrossPartition,
4, 1, 1);
}
TEST_F(ReduceScatterDecomposerTest, TableLookupFlattenedId) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[4, 8] parameter(0)
ROOT rs = f32[4, 2] reduce-scatter(p0), replica_groups={{1,0, 3, 2}}, channel_id=1, dimensions={1}, to_apply=sum, use_global_device_ids=true
}
)";
RunPass(hlo_string, PassAction::kTableLookup,
CollectiveOpGroupMode::kFlattenedID,
2, 1);
}
TEST_F(ReduceScatterDecomposerTest, NoChange) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[4, 8] parameter(0)
ROOT rs = (f32[4, 2], f32[4,2]) reduce-scatter(p0, p0), replica_groups={{1,0, 3, 2}}, channel_id=1, dimensions={1}, to_apply=sum, use_global_device_ids=true
}
)";
RunPass(hlo_string, PassAction::kNoChange);
}
TEST_F(ReduceScatterDecomposerTest, NoChangeWithShouldDecompose) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[4, 8] parameter(0)
ROOT rs = f32[4, 4] reduce-scatter(p0), replica_groups={{0,1}, {2,3}}, channel_id=1, dimensions={1}, to_apply=sum, use_global_device_ids=true
}
)";
RunPass(hlo_string, PassAction::kNoChange,
CollectiveOpGroupMode::kCrossReplica,
0, 0,
2, [](const HloInstruction *) { return false; });
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/reduce_scatter_decomposer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/reduce_scatter_decomposer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c035c168-6335-49bb-a102-f0b941fe3711 | cpp | tensorflow/tensorflow | hash_utils | tensorflow/core/data/hash_utils.cc | tensorflow/core/data/hash_utils_test.cc | #include "tensorflow/core/data/hash_utils.h"
#include <array>
#include <memory>
#include <queue>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/serialization_utils.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/framework/op_def_builder.h"
#include "tensorflow/core/framework/op_def_util.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/lib/strings/proto_serialization.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/regexp.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/util/work_sharder.h"
namespace tensorflow {
namespace data {
namespace {
constexpr std::array<const char*, 3> kOpsWithSeed = {
"AnonymousRandomSeedGenerator",
"ShuffleDataset",
"ShuffleAndRepeatDataset"
};
constexpr char kSeedInputName[] = "seed";
constexpr char kSeed2InputName[] = "seed2";
constexpr char kSeedGeneratorInputName[] = "seed_generator";
template <std::size_t SIZE>
bool IsNodeOfType(const NodeDef& node,
const std::array<const char*, SIZE>& op_types) {
for (const auto& type : op_types) {
if (MatchesAnyVersion(type, node.op())) {
return true;
}
}
return false;
}
Status GetSink(const GraphDef& graph_def, const NodeDef** sink) {
for (auto& node : graph_def.node()) {
if (node.op() == kRetvalOp) {
*sink = &node;
break;
}
}
if (sink == nullptr) {
return errors::Internal("Cannot find sink node for dataset graph.");
}
return absl::OkStatus();
}
Status ShouldIgnoreInput(const NodeDef& node, int i, bool* result) {
*result = false;
if (IsNodeOfType(node, kOpsWithSeed)) {
const OpRegistrationData* reg;
auto status = OpRegistry::Global()->LookUp(node.op(), ®);
if (status.ok()) {
if (reg->op_def.input_arg_size() > i) {
const std::string input_arg_name = reg->op_def.input_arg(i).name();
if (input_arg_name == kSeedInputName ||
input_arg_name == kSeed2InputName ||
input_arg_name == kSeedGeneratorInputName) {
VLOG(2) << "Ignoring arg: " << input_arg_name
<< " from node: " << node.name();
*result = true;
return absl::OkStatus();
}
}
} else if (errors::IsNotFound(status)) {
LOG(WARNING) << "Cannot find " << node.op()
<< " in global op registry, so cannot determine which "
"inputs are seeds.";
} else {
return status;
}
}
return absl::OkStatus();
}
Status ParseInputNodeName(absl::string_view input_name,
absl::string_view* node_name,
absl::string_view* suffix, bool* is_control_input) {
if (input_name[0] == '^') {
*node_name = input_name.substr(1);
*is_control_input = true;
return absl::OkStatus();
}
std::pair<absl::string_view, absl::string_view> node_spec =
absl::StrSplit(input_name, absl::MaxSplits(':', 1));
*node_name = node_spec.first;
*suffix = node_spec.second;
*is_control_input = false;
return absl::OkStatus();
}
class GraphHasher {
using NodeCache = absl::flat_hash_map<const NodeDef*, uint64>;
using FunctionCache = absl::flat_hash_map<const FunctionDef*, uint64>;
using AttrCache =
absl::flat_hash_map<std::pair<const NodeDef*, bool>, uint64>;
public:
explicit GraphHasher(const GraphDef* graph, const NodeDef* root,
const FunctionLibraryDefinition* flib)
: graph_(graph), root_(root), flib_(flib) {
node_cache_ = std::make_shared<NodeCache>();
function_cache_ = std::make_shared<FunctionCache>();
attr_cache_ = std::make_shared<AttrCache>();
}
explicit GraphHasher(const GraphDef* graph, const NodeDef* root,
const FunctionLibraryDefinition* flib,
std::shared_ptr<NodeCache> node_cache,
std::shared_ptr<FunctionCache> function_cache,
std::shared_ptr<AttrCache> attr_cache)
: graph_(graph),
root_(root),
flib_(flib),
node_cache_(node_cache),
function_cache_(function_cache),
attr_cache_(attr_cache) {}
Status Init() {
absl::flat_hash_map<absl::string_view, const NodeDef*> node_def_by_name;
node_def_by_name.reserve(graph_->node_size());
for (const auto& node : graph_->node()) {
auto result = node_def_by_name.emplace(node.name(), &node);
if (TF_PREDICT_FALSE(!result.second)) {
auto node_name_formatter =
[](std::string* out,
const decltype(node_def_by_name)::value_type& item) {
absl::StrAppend(out, "'", item.first, "'");
};
return errors::Internal(
"Encountered graph with duplicate node name '", node.name(),
"' in [", absl::StrJoin(node_def_by_name, ",", node_name_formatter),
"]");
}
}
absl::flat_hash_set<absl::string_view> visited;
std::queue<const NodeDef*> bfs_queue;
bfs_queue.push(root_);
while (!bfs_queue.empty()) {
const NodeDef* node = bfs_queue.front();
bfs_queue.pop();
if (visited.contains(node->name())) {
continue;
}
visited.insert(node->name());
NodeRep node_rep;
for (int i = 0; i < node->input_size(); ++i) {
DCHECK_GT(node->input(i).length(), 0);
bool should_ignore_input = false;
TF_RETURN_IF_ERROR(ShouldIgnoreInput(*node, i, &should_ignore_input));
if (should_ignore_input) continue;
absl::string_view node_name, suffix;
bool is_control_input;
TF_RETURN_IF_ERROR(ParseInputNodeName(node->input(i), &node_name,
&suffix, &is_control_input));
auto* input_node = gtl::FindPtrOrNull(node_def_by_name, node_name);
if (input_node == nullptr) {
return errors::Internal("Graph node [", node->name(), "] has input [",
node_name, "] that doesn't exist in graph");
}
if (visited.contains(node_name)) {
EdgeRep cycle_edge(node, input_node);
cycle_forming_edges_.insert(cycle_edge.GetHash());
continue;
}
if (is_control_input) {
node_rep.node_control_inputs.push_back(input_node);
} else {
node_rep.node_inputs.push_back(std::make_pair(input_node, suffix));
bfs_queue.push(input_node);
}
}
nodes_[node] = node_rep;
}
return absl::OkStatus();
}
Status HashRoot(uint64* hash) { return HashNode(root_, hash); }
Status CheckEqual(GraphHasher* that) {
return CheckNodesEqual(root_, that, that->root_);
}
private:
Status HashNode(const NodeDef* node, uint64* hash) {
auto it = node_cache_->find(node);
if (it != node_cache_->end()) {
*hash = it->second;
return absl::OkStatus();
}
NodeRep* node_rep = gtl::FindOrNull(nodes_, node);
if (node_rep == nullptr) {
return errors::InvalidArgument("Could not find node: ", node->name());
}
uint64 non_input_hash;
TF_RETURN_IF_ERROR(
HashNodeNonInput(node, true, &non_input_hash));
uint64 control_inputs_hash;
TF_RETURN_IF_ERROR(
HashControlInputs(node_rep->node_control_inputs, &control_inputs_hash));
uint64 inputs_hash = 0;
for (const auto& input : node_rep->node_inputs) {
uint64 node_hash = 0;
EdgeRep edge(node, input.first);
if (cycle_forming_edges_.contains(edge.GetHash())) {
TF_RETURN_IF_ERROR(
HashNodeNonInput(input.first, true, &node_hash));
} else {
TF_RETURN_IF_ERROR(HashNode(input.first, &node_hash));
}
inputs_hash = Hash64Combine(
inputs_hash, Hash64Combine(node_hash, Hash64(input.second.data(),
input.second.size())));
}
*hash = Hash64Combine(non_input_hash,
Hash64Combine(control_inputs_hash, inputs_hash));
auto result = node_cache_->emplace(node, *hash);
if (!result.second) {
return errors::Internal(absl::StrCat("Computed the hash for node ",
node->DebugString(), " twice!"));
}
return absl::OkStatus();
}
Status CheckNodesEqual(const NodeDef* this_node, GraphHasher* that,
const NodeDef* that_node) {
Status s = CheckNodesEqualHelper(this_node, that, that_node);
if (!s.ok()) {
return errors::FailedPrecondition("Nodes ", this_node->name(), " and ",
that_node->name(),
" are not the same:\n", s);
}
return s;
}
Status CheckNodesEqualHelper(const NodeDef* this_node, GraphHasher* that,
const NodeDef* that_node) {
TF_RETURN_IF_ERROR(CheckNodesEqualNonInput(this_node, that, that_node,
true));
TF_RETURN_IF_ERROR(
CheckControlInputsEqual(nodes_[this_node].node_control_inputs, that,
that->nodes_[that_node].node_control_inputs));
auto& this_node_inputs = nodes_[this_node].node_inputs;
auto& that_node_inputs = that->nodes_[that_node].node_inputs;
if (this_node_inputs.size() != that_node_inputs.size()) {
return errors::FailedPrecondition(
"Nodes have different numbers of node inputs: ",
this_node_inputs.size(), " vs ", that_node_inputs.size());
}
for (int i = 0; i < this_node_inputs.size(); ++i) {
const NodeDef* this_input = this_node_inputs[i].first;
const NodeDef* that_input = that_node_inputs[i].first;
if (is_cycle_forming_edge(this_node, this_input)) {
TF_RETURN_IF_ERROR(CheckNodesEqualNonInput(this_input, that, that_input,
true));
} else {
TF_RETURN_IF_ERROR(CheckNodesEqual(this_input, that, that_input));
}
absl::string_view this_input_suffix = this_node_inputs[i].second;
absl::string_view that_input_suffix = that_node_inputs[i].second;
if (this_input_suffix != that_input_suffix) {
return errors::FailedPrecondition(
"Node inputs ", this_input->name(), " and ", that_input->name(),
" have different suffixes: ", this_input_suffix, " vs ",
that_input_suffix);
}
}
return absl::OkStatus();
}
Status HashNodeNonInput(const NodeDef* node, bool hash_functions,
uint64* hash) {
auto iter = attr_cache_->find(std::make_pair(node, hash_functions));
if (iter != attr_cache_->end()) {
*hash = iter->second;
return absl::OkStatus();
}
uint64 attrs_hash = 0;
const OpRegistrationData* reg;
TF_RETURN_IF_ERROR(flib_->LookUp(node->op(), ®));
uint64 op_hash = 0;
if (reg->is_function_op) {
if (hash_functions) {
TF_RETURN_IF_ERROR(HashFunction(node->op(), node->attr(), &op_hash));
}
} else {
op_hash = Hash64(node->op());
}
for (const auto& attr : reg->op_def.attr()) {
const auto& attr_key = attr.name();
if (DatasetOpKernel::IsDatasetOp(reg->op_def) && attr_key == "metadata")
continue;
auto node_attr_iter = node->attr().find(attr_key);
if (node_attr_iter == node->attr().end()) {
continue;
}
const auto& attr_value = node_attr_iter->second;
if (attr_key == kColocationAttrName ||
attr_key == kColocationGroupPrefix) {
continue;
}
uint64 attr_hash = 0;
TF_RETURN_IF_ERROR(
HashAttr(attr_key, attr_value, hash_functions, &attr_hash));
attrs_hash = Hash64Combine(attrs_hash, attr_hash);
}
uint64 device_hash = Hash64(node->device());
*hash = Hash64Combine(op_hash, Hash64Combine(attrs_hash, device_hash));
auto result =
attr_cache_->emplace(std::make_pair(node, hash_functions), *hash);
if (!result.second) {
return errors::Internal(absl::StrCat(
"Computed the hash for non-input node: ", node->DebugString(),
" and hash function bool: ", hash_functions, "twice!"));
}
return absl::OkStatus();
}
Status CheckNodesEqualNonInput(const NodeDef* this_node, GraphHasher* that,
const NodeDef* that_node,
bool compare_functions) {
const OpRegistrationData* reg;
TF_RETURN_IF_ERROR(flib_->LookUp(this_node->op(), ®));
if (reg->is_function_op) {
if (compare_functions) {
TF_RETURN_IF_ERROR(
CheckFunctionsEqual(this_node->op(), this_node->attr(), that,
that_node->op(), that_node->attr()));
}
} else {
if (this_node->op() != that_node->op()) {
return errors::FailedPrecondition(
"ops for nodes ", this_node->name(), " and ", that_node->name(),
" are different: ", this_node->op(), " != ", that_node->op());
}
}
for (const auto& attr : reg->op_def.attr()) {
const auto& attr_key = attr.name();
const bool this_has_attr = this_node->attr().contains(attr_key);
const bool that_has_attr = that_node->attr().contains(attr_key);
if (this_has_attr != that_has_attr) {
return errors::FailedPrecondition(
"attr with key ", attr_key, " is different for nodes ",
this_node->name(), " and ", that_node->name(),
". Present in former: ", this_has_attr,
". Present in latter: ", that_has_attr);
}
if (!this_has_attr) {
continue;
}
if (attr_key == kColocationAttrName ||
attr_key == kColocationGroupPrefix) {
continue;
}
const auto& this_attr = this_node->attr().at(attr_key);
const auto& that_attr = that_node->attr().at(attr_key);
TF_RETURN_IF_ERROR(CheckAttrsEqual(attr_key, this_attr, that, that_attr,
compare_functions));
}
if (this_node->device() != that_node->device()) {
return errors::FailedPrecondition(
"Devices are different for nodes ", this_node->name(), " and ",
that_node->name(), ": ", this_node->device(), " vs ",
that_node->device());
}
return absl::OkStatus();
}
Status HashAttr(const std::string& attr_name, const AttrValue& attr_value,
bool hash_functions, uint64* hash) {
uint64 value_hash = 0;
if (attr_value.has_func()) {
if (hash_functions) {
TF_RETURN_IF_ERROR(HashFunction(attr_value.func(), &value_hash));
}
} else if (attr_value.has_list() && attr_value.list().func_size() > 0) {
if (hash_functions) {
for (auto& func : attr_value.list().func()) {
uint64 func_hash;
TF_RETURN_IF_ERROR(HashFunction(func, &func_hash));
value_hash = Hash64Combine(value_hash, func_hash);
}
}
} else {
value_hash = DeterministicProtoHash64(attr_value);
}
*hash = Hash64Combine(Hash64(attr_name), value_hash);
return absl::OkStatus();
}
Status CheckAttrsEqual(const std::string& attr_name,
const AttrValue& this_attr, GraphHasher* that,
const AttrValue& that_attr, bool compare_functions) {
if (this_attr.has_func() != that_attr.has_func()) {
return errors::FailedPrecondition(
"AttrValues are of different types: ", this_attr.DebugString(),
" vs ", that_attr.DebugString());
}
if (this_attr.has_func()) {
if (compare_functions) {
TF_RETURN_IF_ERROR(
CheckFunctionsEqual(this_attr.func(), that, that_attr.func()));
}
return absl::OkStatus();
}
if (this_attr.has_list() != that_attr.has_list()) {
return errors::FailedPrecondition(
"AttrValues are of different types: ", this_attr.DebugString(),
" vs ", that_attr.DebugString());
}
if (this_attr.has_list()) {
if (this_attr.list().func_size() != that_attr.list().func_size()) {
return errors::FailedPrecondition(
"AttrValues have func lists of different sizes: ",
this_attr.DebugString(), " vs ", that_attr.DebugString());
}
if (compare_functions) {
for (int i = 0; i < this_attr.list().func_size(); ++i) {
TF_RETURN_IF_ERROR(CheckFunctionsEqual(this_attr.list().func(i), that,
that_attr.list().func(i)));
}
}
return absl::OkStatus();
}
uint64 this_hash, that_hash;
TF_RETURN_IF_ERROR(
HashAttr(attr_name, this_attr, true, &this_hash));
TF_RETURN_IF_ERROR(that->HashAttr(attr_name, that_attr,
true, &that_hash));
if (this_hash != that_hash) {
return errors::FailedPrecondition(
"AttrValues are different: ", this_attr.DebugString(), " vs ",
that_attr.DebugString());
}
return absl::OkStatus();
}
Status HashFunction(const NameAttrList& func, uint64* hash) {
return HashFunction(func.name(), func.attr(), hash);
}
Status HashFunction(const std::string& name, const AttrValueMap& attrs,
uint64* hash) {
const FunctionDef* fdef = flib_->Find(name);
auto it = function_cache_->find(fdef);
if (it != function_cache_->end()) {
*hash = it->second;
return absl::OkStatus();
}
std::unique_ptr<FunctionBody> fbody;
TF_RETURN_IF_ERROR(
FunctionDefToBodyHelper(*fdef, AttrSlice(&attrs), flib_, &fbody));
GraphDef graph_def = fbody->graph->ToGraphDefDebug();
uint64 ret_nodes_hash = 0;
for (const auto& ret_node : fbody->ret_nodes) {
uint64 ret_node_hash = 0;
GraphHasher hasher(&graph_def, &ret_node->def(), flib_, node_cache_,
function_cache_, attr_cache_);
TF_RETURN_IF_ERROR(hasher.Init());
TF_RETURN_IF_ERROR(hasher.HashRoot(&ret_node_hash));
ret_nodes_hash = Hash64Combine(ret_nodes_hash, ret_node_hash);
}
std::vector<const NodeDef*> control_rets;
control_rets.reserve(fbody->control_ret_nodes.size());
for (const auto& control_ret_node : fbody->control_ret_nodes) {
control_rets.push_back(&control_ret_node->def());
}
uint64 control_ret_nodes_hash = 0;
TF_RETURN_IF_ERROR(
HashControlInputs(control_rets, &control_ret_nodes_hash));
*hash = Hash64Combine(ret_nodes_hash, control_ret_nodes_hash);
auto result = function_cache_->emplace(fdef, *hash);
if (!result.second) {
return errors::Internal(
absl::StrCat("Computed the hash for function ", name, " twice!"));
}
return absl::OkStatus();
}
Status CheckFunctionsEqual(const NameAttrList& this_func, GraphHasher* that,
const NameAttrList& that_func) {
return CheckFunctionsEqual(this_func.name(), this_func.attr(), that,
that_func.name(), that_func.attr());
}
Status CheckFunctionsEqual(const std::string& this_name,
const AttrValueMap& this_attrs, GraphHasher* that,
const std::string& that_name,
const AttrValueMap& that_attrs) {
Status s = CheckFunctionsEqualHelper(this_name, this_attrs, that, that_name,
that_attrs);
if (!s.ok()) {
return errors::FailedPrecondition("Functions ", this_name, " and ",
that_name, " are not the same:\n", s);
}
return s;
}
Status CheckFunctionsEqualHelper(const std::string& this_name,
const AttrValueMap& this_attrs,
GraphHasher* that,
const std::string& that_name,
const AttrValueMap& that_attrs) {
const FunctionDef* this_fdef = flib_->Find(this_name);
const FunctionDef* that_fdef = that->flib_->Find(that_name);
std::unique_ptr<FunctionBody> this_fbody;
TF_RETURN_IF_ERROR(FunctionDefToBodyHelper(
*this_fdef, AttrSlice(&this_attrs), flib_, &this_fbody));
GraphDef this_graph_def = this_fbody->graph->ToGraphDefDebug();
std::unique_ptr<FunctionBody> that_fbody;
TF_RETURN_IF_ERROR(FunctionDefToBodyHelper(
*that_fdef, AttrSlice(&that_attrs), that->flib_, &that_fbody));
GraphDef that_graph_def = that_fbody->graph->ToGraphDefDebug();
if (this_fbody->ret_nodes.size() != that_fbody->ret_nodes.size()) {
return errors::FailedPrecondition(
"Different numbers of ret nodes for functions ", this_name, " and ",
that_name, ": ", this_fbody->ret_nodes.size(), " vs ",
that_fbody->ret_nodes.size());
}
for (int i = 0; i < this_fbody->ret_nodes.size(); ++i) {
const NodeDef* this_root = &this_fbody->ret_nodes[i]->def();
const NodeDef* that_root = &that_fbody->ret_nodes[i]->def();
GraphHasher this_hasher(&this_graph_def, this_root, flib_, node_cache_,
function_cache_, attr_cache_);
TF_RETURN_IF_ERROR(this_hasher.Init());
GraphHasher that_hasher(&that_graph_def, that_root, that->flib_,
node_cache_, function_cache_, attr_cache_);
TF_RETURN_IF_ERROR(that_hasher.Init());
TF_RETURN_IF_ERROR(this_hasher.CheckEqual(&that_hasher));
}
std::vector<const NodeDef*> this_control_rets;
this_control_rets.reserve(this_fbody->control_ret_nodes.size());
for (const auto& control_ret_node : this_fbody->control_ret_nodes) {
this_control_rets.push_back(&control_ret_node->def());
}
std::vector<const NodeDef*> that_control_rets;
that_control_rets.reserve(that_fbody->control_ret_nodes.size());
for (const auto& control_ret_node : that_fbody->control_ret_nodes) {
that_control_rets.push_back(&control_ret_node->def());
}
TF_RETURN_IF_ERROR(
CheckControlInputsEqual(this_control_rets, that, that_control_rets));
return absl::OkStatus();
}
Status HashControlInputs(const std::vector<const NodeDef*>& inputs,
uint64* hash) {
*hash = 0;
for (const NodeDef* input : inputs) {
uint64 node_hash = 0;
TF_RETURN_IF_ERROR(
HashNodeNonInput(input, false, &node_hash));
*hash = Hash64CombineUnordered(*hash, node_hash);
}
return absl::OkStatus();
}
Status CheckControlInputsEqual(
const std::vector<const NodeDef*>& this_inputs, GraphHasher* that,
const std::vector<const NodeDef*>& that_inputs) {
absl::flat_hash_map<uint64, const NodeDef*> this_hashes;
for (const NodeDef* input : this_inputs) {
uint64 node_hash = 0;
TF_RETURN_IF_ERROR(
HashNodeNonInput(input, false, &node_hash));
this_hashes[node_hash] = input;
}
absl::flat_hash_map<uint64, const NodeDef*> that_hashes;
for (const NodeDef* input : that_inputs) {
uint64 node_hash = 0;
TF_RETURN_IF_ERROR(
HashNodeNonInput(input, false, &node_hash));
auto this_iter = this_hashes.find(node_hash);
if (this_iter != this_hashes.end()) {
this_hashes.erase(this_iter);
} else {
that_hashes[node_hash] = input;
}
}
if (!this_hashes.empty()) {
auto formatter = [](string* out,
const decltype(this_hashes)::value_type& item) {
out->append(item.second->name());
};
return errors::FailedPrecondition(
"Control dependencies are different. One node has dependencies [",
absl::StrJoin(this_hashes, ", ", formatter),
"], which don't match any of the other node's dependencies [",
absl::StrJoin(that_hashes, ", ", formatter), "]");
}
return absl::OkStatus();
}
private:
bool is_cycle_forming_edge(const NodeDef* start, const NodeDef* end) {
EdgeRep edge(start, end);
return cycle_forming_edges_.contains(edge.GetHash());
}
struct NodeRep {
std::vector<const NodeDef*> node_control_inputs;
std::vector<std::pair<const NodeDef*, absl::string_view>> node_inputs;
};
struct EdgeRep {
const NodeDef* start_node;
const NodeDef* end_node;
EdgeRep(const NodeDef* start, const NodeDef* end)
: start_node(start), end_node(end) {}
uint64 GetHash() {
return Hash64Combine(absl::Hash<const NodeDef*>()(start_node),
absl::Hash<const NodeDef*>()(end_node));
}
};
const GraphDef* const graph_;
const NodeDef* const root_;
const FunctionLibraryDefinition* const flib_;
absl::flat_hash_set<uint64> cycle_forming_edges_;
absl::flat_hash_map<const NodeDef*, NodeRep> nodes_;
std::shared_ptr<NodeCache> node_cache_;
std::shared_ptr<FunctionCache> function_cache_;
std::shared_ptr<AttrCache> attr_cache_;
};
}
Status HashTensor(const Tensor& tensor, uint64* hash) {
const tstring* s = nullptr;
*hash = Hash64Combine(0, tensor.dtype());
for (int i = 0; i < tensor.shape().dims(); ++i) {
*hash = Hash64Combine(*hash, tensor.shape().dim_size(i));
}
switch (tensor.dtype()) {
case DT_RESOURCE:
case DT_VARIANT:
return errors::Unimplemented("Hashing ", DataTypeString(tensor.dtype()),
" is not supported.");
case DT_STRING:
s = tensor.flat<tstring>().data();
for (int i = 0; i < tensor.NumElements(); ++i, ++s) {
*hash = Hash64Combine(*hash, Hash64(s->data(), s->size()));
}
break;
default:
*hash = Hash64(tensor.tensor_data().data(), tensor.tensor_data().size());
}
return absl::OkStatus();
}
Status HashNode(const GraphDef& graph, const NodeDef& node, uint64* hash) {
const FunctionLibraryDefinition flib_def(OpRegistry::Global(),
graph.library());
return HashNode(graph, node, flib_def, hash);
}
Status HashNode(const GraphDef& graph, const NodeDef& node,
const FunctionLibraryDefinition& flib_def, uint64* hash) {
GraphHasher hasher(&graph, &node, &flib_def);
TF_RETURN_IF_ERROR(hasher.Init());
return hasher.HashRoot(hash);
}
Status HashGraph(const GraphDef& graph_def, uint64* hash) {
const NodeDef* sink = nullptr;
TF_RETURN_IF_ERROR(GetSink(graph_def, &sink));
return HashNode(graph_def, *sink, hash);
}
Status CheckGraphsEqual(const GraphDef& a, const GraphDef& b) {
const NodeDef* sink_a;
TF_RETURN_IF_ERROR(GetSink(a, &sink_a));
const NodeDef* sink_b;
TF_RETURN_IF_ERROR(GetSink(b, &sink_b));
return CheckSubgraphsEqual(a, sink_a, b, sink_b);
}
Status CheckSubgraphsEqual(const GraphDef& a, const NodeDef* node_a,
const GraphDef& b, const NodeDef* node_b) {
const FunctionLibraryDefinition flib_def_a(OpRegistry::Global(), a.library());
GraphHasher hasher_a(&a, node_a, &flib_def_a);
TF_RETURN_IF_ERROR(hasher_a.Init());
const FunctionLibraryDefinition flib_def_b(OpRegistry::Global(), b.library());
GraphHasher hasher_b(&b, node_b, &flib_def_b);
TF_RETURN_IF_ERROR(hasher_b.Init());
return hasher_a.CheckEqual(&hasher_b);
}
}
} | #include "tensorflow/core/data/hash_utils.h"
#include <utility>
#include <vector>
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/framework/variant.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/core/util/work_sharder.h"
namespace tensorflow {
namespace data {
namespace {
using ::testing::ContainsRegex;
class DatasetHashUtilsTest : public ::testing::Test {
protected:
uint64 GetHash(const FunctionDefLibrary& library, const FunctionDef& fn) {
GraphDef graph_def;
*graph_def.mutable_library() = library;
NodeDef* node = graph_def.add_node();
node->set_op("RemoteCall");
NameAttrList func;
func.set_name(fn.signature().name());
AddNodeAttr("f", func, node);
uint64 hash = 0;
TF_CHECK_OK(HashNode(graph_def, *node, &hash));
return hash;
}
Status CheckEqual(const FunctionDefLibrary& library, const FunctionDef& fn1,
const FunctionDef& fn2) {
GraphDef graph_def;
*graph_def.mutable_library() = library;
NodeDef* node1 = graph_def.add_node();
node1->set_name("RemoteCall");
node1->set_op("RemoteCall");
NameAttrList func1;
func1.set_name(fn1.signature().name());
AddNodeAttr("f", func1, node1);
NodeDef* node2 = graph_def.add_node();
node1->set_name("RemoteCall2");
node2->set_op("RemoteCall");
NameAttrList func2;
func2.set_name(fn2.signature().name());
AddNodeAttr("f", func2, node2);
return CheckSubgraphsEqual(graph_def, node1, graph_def, node2);
}
uint64 GetHash(const GraphDef& graph, const NodeDef& node) {
uint64 hash = 0;
TF_CHECK_OK(HashNode(graph, node, &hash));
return hash;
}
uint64 GetHash(const Tensor& tensor) {
uint64 hash = 0;
TF_CHECK_OK(HashTensor(tensor, &hash));
return hash;
}
};
TEST_F(DatasetHashUtilsTest, HashFunctionSameFunctionDifferentNames) {
FunctionDefLibrary fl;
FunctionDef* f1 = fl.add_function();
*f1 = FunctionDefHelper::Create(
"AddAndMul", {"i: float"}, {"o: float"}, {},
{{{"add"}, "Add", {"i", "i"}, {{"T", DT_FLOAT}}},
{{"ret"}, "Mul", {"i", "i"}, {{"T", DT_FLOAT}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "add"}});
FunctionDef* f2 = fl.add_function();
*f2 = FunctionDefHelper::Create(
"AddAndMul2", {"input: float"}, {"o: float"}, {},
{{{"add"}, "Add", {"input", "input"}, {{"T", DT_FLOAT}}},
{{"ret"}, "Mul", {"input", "input"}, {{"T", DT_FLOAT}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "add"}});
EXPECT_EQ(GetHash(fl, *f1), GetHash(fl, *f2));
TF_EXPECT_OK(CheckEqual(fl, *f1, *f2));
}
TEST_F(DatasetHashUtilsTest, HashFunctionDifferentFunctions) {
FunctionDefLibrary fl;
FunctionDef* f1 = fl.add_function();
*f1 = FunctionDefHelper::Create(
"AddAndMul", {"i: float"}, {"o: float"}, {},
{{{"add"}, "Add", {"i", "i"}, {{"T", DT_FLOAT}}},
{{"ret"}, "Mul", {"i", "i"}, {{"T", DT_FLOAT}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "add"}});
FunctionDef* f2 = fl.add_function();
*f2 = FunctionDefHelper::Create(
"AddAndAdd", {"i: float"}, {"o: float"}, {},
{{{"add"}, "Add", {"i", "i"}, {{"T", DT_FLOAT}}},
{{"ret"}, "Add", {"i", "i"}, {{"T", DT_FLOAT}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "add"}});
EXPECT_NE(GetHash(fl, *f1), GetHash(fl, *f2));
Status s = CheckEqual(fl, *f1, *f2);
EXPECT_NE(s.code(), error::OK);
EXPECT_THAT(s.message(), ContainsRegex("Add"));
}
TEST_F(DatasetHashUtilsTest, HashFunctionDifferentInternalNodeNames) {
FunctionDefLibrary fl;
FunctionDef* f1 = fl.add_function();
*f1 = FunctionDefHelper::Create(
"AddAndMul", {"i: float", "j: float", "k: float"}, {"o: float"}, {},
{{{"add"}, "Add", {"i", "j"}, {{"T", DT_FLOAT}}},
{{"ret"}, "Mul", {"add:z:0", "k"}, {{"T", DT_FLOAT}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "ret"}});
FunctionDef* f2 = fl.add_function();
*f2 = FunctionDefHelper::Create(
"AddAndMul2", {"a: float", "b: float", "c: float"}, {"o: float"}, {},
{{{"add"}, "Add", {"a", "b"}, {{"T", DT_FLOAT}}},
{{"mul"}, "Mul", {"add:z:0", "c"}, {{"T", DT_FLOAT}}}},
{{"o", "mul:z:0"}},
{{"must_execute", "mul"}});
EXPECT_EQ(GetHash(fl, *f1), GetHash(fl, *f2));
TF_EXPECT_OK(CheckEqual(fl, *f1, *f2));
}
TEST_F(DatasetHashUtilsTest, HashGraphWithMultipleCycles) {
uint64 hash = 0;
for (int i = 0; i < 1000; ++i) {
GraphDef g;
NodeDef* output_node = g.add_node();
TF_CHECK_OK(NodeDefBuilder("O", "Add")
.Input("A", 0, DT_FLOAT)
.Input("D", 0, DT_FLOAT)
.Finalize(output_node));
TF_CHECK_OK(NodeDefBuilder("A", "Abs")
.Input("B", 0, DT_FLOAT)
.Finalize(g.add_node()));
TF_CHECK_OK(NodeDefBuilder("B", "Add")
.Input("C", 0, DT_FLOAT)
.Input("D", 0, DT_FLOAT)
.Finalize(g.add_node()));
TF_CHECK_OK(NodeDefBuilder("C", "Ceil")
.Input("A", 0, DT_FLOAT)
.Finalize(g.add_node()));
TF_CHECK_OK(NodeDefBuilder("D", "Cos")
.Input("E", 0, DT_FLOAT)
.Finalize(g.add_node()));
TF_CHECK_OK(NodeDefBuilder("E", "Floor")
.Input("B", 0, DT_FLOAT)
.Finalize(g.add_node()));
uint64 t = GetHash(g, *output_node);
if (hash == 0) {
hash = t;
} else {
EXPECT_EQ(t, hash);
}
}
}
TEST_F(DatasetHashUtilsTest, HashNodeSameGraphDifferentNames) {
GraphDef gd;
NodeDef* n1 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_1", "Const")
.Attr("value", 1)
.Device("CPU:0")
.Finalize(n1));
NodeDef* n2 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_2", "Const")
.Attr("value", 2)
.Device("CPU:0")
.Finalize(n2));
NodeDef* n3 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_3", "Add")
.Device("CPU:0")
.Input(n1->name(), 0, DT_INT32)
.Input(n2->name(), 0, DT_INT32)
.Finalize(n3));
NodeDef* n4 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_3/node_7", "Const")
.Attr("value", 1)
.Device("CPU:0")
.Finalize(n4));
NodeDef* n5 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_4/node_9", "Const")
.Attr("value", 2)
.Device("CPU:0")
.Finalize(n5));
NodeDef* n6 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_5/node_11", "Add")
.Device("CPU:0")
.Input(n4->name(), 0, DT_INT32)
.Input(n5->name(), 0, DT_INT32)
.Finalize(n6));
uint64 hash1 = GetHash(gd, *n3);
uint64 hash2 = GetHash(gd, *n6);
EXPECT_EQ(hash1, hash2);
TF_EXPECT_OK(CheckSubgraphsEqual(gd, n3, gd, n6));
}
TEST_F(DatasetHashUtilsTest, HashNodeDifferentGraphs) {
GraphDef gd;
NodeDef* n1 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_1", "Const")
.Attr("value", 1)
.Device("CPU:0")
.Finalize(n1));
NodeDef* n2 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_2", "Const")
.Attr("value", 2)
.Device("CPU:0")
.Finalize(n2));
NodeDef* n3 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_3", "Add")
.Device("CPU:0")
.Input(n1->name(), 0, DT_INT32)
.Input(n2->name(), 0, DT_INT32)
.Finalize(n3));
NodeDef* n4 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_4", "Mul")
.Device("CPU:0")
.Input(n1->name(), 0, DT_INT32)
.Input(n2->name(), 0, DT_INT32)
.Finalize(n4));
uint64 hash1 = GetHash(gd, *n3);
uint64 hash2 = GetHash(gd, *n4);
EXPECT_NE(hash1, hash2);
Status s = CheckSubgraphsEqual(gd, n3, gd, n4);
EXPECT_NE(s.code(), error::OK);
EXPECT_THAT(s.message(), ContainsRegex("Add"));
EXPECT_THAT(s.message(), ContainsRegex("Mul"));
}
TEST_F(DatasetHashUtilsTest, HashSameGraphDifferentSeeds) {
GraphDef gd;
NodeDef* n1 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_1", "Const")
.Attr("value", 1)
.Device("CPU:0")
.Finalize(n1));
NodeDef* seed = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/seed", "Const")
.Attr("value", 123)
.Device("CPU:0")
.Finalize(seed));
NodeDef* seed2 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/seed2", "Const")
.Attr("value", 456)
.Device("CPU:0")
.Finalize(seed2));
NodeDef* range_ds = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/range", "RangeDataset")
.Input(n1->name(), 0, DT_INT64)
.Input(n1->name(), 0, DT_INT64)
.Input(n1->name(), 0, DT_INT64)
.Device("CPU:0")
.Finalize(range_ds));
NodeDef* shuffle_ds = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/shuffle", "ShuffleDataset")
.Input(range_ds->name(), 0, DT_VARIANT)
.Input(n1->name(), 0, DT_INT64)
.Input(seed->name(), 0, DT_INT64)
.Input(seed2->name(), 0, DT_INT64)
.Device("CPU:0")
.Finalize(shuffle_ds));
NodeDef* different_seed = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/different_seed", "Const")
.Attr("value", 789)
.Device("CPU:0")
.Finalize(different_seed));
NodeDef* different_seed2 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/different_seed2", "Const")
.Attr("value", 654)
.Device("CPU:0")
.Finalize(different_seed2));
NodeDef* range_ds_2 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/range_2", "RangeDataset")
.Input(n1->name(), 0, DT_INT64)
.Input(n1->name(), 0, DT_INT64)
.Input(n1->name(), 0, DT_INT64)
.Device("CPU:0")
.Finalize(range_ds_2));
NodeDef* shuffle_ds_2 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/shuffle_2", "ShuffleDataset")
.Input(range_ds_2->name(), 0, DT_VARIANT)
.Input(n1->name(), 0, DT_INT64)
.Input(different_seed->name(), 0, DT_INT64)
.Input(different_seed2->name(), 0, DT_INT64)
.Device("CPU:0")
.Finalize(shuffle_ds_2));
uint64 hash1 = GetHash(gd, *shuffle_ds);
uint64 hash2 = GetHash(gd, *shuffle_ds_2);
EXPECT_EQ(hash1, hash2);
TF_EXPECT_OK(CheckSubgraphsEqual(gd, shuffle_ds, gd, shuffle_ds_2));
}
TEST_F(DatasetHashUtilsTest, HashNodeSameGraphDifferentColocationNames) {
GraphDef gd;
NodeDef* n1 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_1", "Const")
.Attr("value", 1)
.Attr("_class", {"graph_1/node_2"})
.Device("CPU:0")
.Finalize(n1));
NodeDef* n2 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_2", "Const")
.Attr("value", 2)
.Device("CPU:0")
.Finalize(n2));
NodeDef* n3 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_3", "Add")
.Device("CPU:0")
.Input(n1->name(), 0, DT_INT32)
.Input(n2->name(), 0, DT_INT32)
.Finalize(n3));
NodeDef* n4 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_3/node_7", "Const")
.Attr("value", 1)
.Attr("_class", {"graph_3/node_9"})
.Device("CPU:0")
.Finalize(n4));
NodeDef* n5 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_4/node_9", "Const")
.Attr("value", 2)
.Device("CPU:0")
.Finalize(n5));
NodeDef* n6 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_5/node_11", "Add")
.Device("CPU:0")
.Input(n1->name(), 0, DT_INT32)
.Input(n2->name(), 0, DT_INT32)
.Finalize(n6));
uint64 hash1 = GetHash(gd, *n3);
uint64 hash2 = GetHash(gd, *n6);
EXPECT_EQ(hash1, hash2);
TF_EXPECT_OK(CheckSubgraphsEqual(gd, n3, gd, n6));
}
TEST_F(DatasetHashUtilsTest, HashNodeReversedOrder) {
GraphDef gd;
NodeDef* n1 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_1", "Const")
.Attr("value", 1)
.Device("CPU:0")
.Finalize(n1));
NodeDef* n2 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_2", "Const")
.Attr("value", 2)
.Device("CPU:0")
.Finalize(n2));
NodeDef* n3 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_3", "Add")
.Device("CPU:0")
.Input(n1->name(), 0, DT_INT32)
.Input(n2->name(), 0, DT_INT32)
.Finalize(n3));
NodeDef* n4 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_4", "Add")
.Device("CPU:0")
.Input(n2->name(), 0, DT_INT32)
.Input(n1->name(), 0, DT_INT32)
.Finalize(n4));
uint64 hash1 = GetHash(gd, *n3);
uint64 hash2 = GetHash(gd, *n4);
EXPECT_NE(hash1, hash2);
Status s = CheckSubgraphsEqual(gd, n3, gd, n4);
EXPECT_NE(s.code(), error::OK);
EXPECT_THAT(s.message(), ContainsRegex("AttrValues are different"));
}
TEST_F(DatasetHashUtilsTest, HashNodeInputPortChanged) {
GraphDef gd;
NodeDef* n1 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_1", "Const")
.Attr("value", 1)
.Device("CPU:0")
.Finalize(n1));
NodeDef* n2 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_2", "Const")
.Attr("value", 2)
.Device("CPU:0")
.Finalize(n2));
NodeDef* n3 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_3", "Add")
.Device("CPU:0")
.Input(n1->name(), 0, DT_INT32)
.Input(n2->name(), 0, DT_INT32)
.Finalize(n3));
NodeDef* n4 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_4", "Add")
.Device("CPU:0")
.Input(n1->name(), 1, DT_INT32)
.Input(n2->name(), 2, DT_INT32)
.Finalize(n4));
uint64 hash1 = GetHash(gd, *n3);
uint64 hash2 = GetHash(gd, *n4);
EXPECT_NE(hash1, hash2);
Status s = CheckSubgraphsEqual(gd, n3, gd, n4);
EXPECT_NE(s.code(), error::OK);
EXPECT_THAT(s.message(), ContainsRegex("Node inputs"));
}
TEST_F(DatasetHashUtilsTest, HashNodeSameFunctionDifferentNames) {
GraphDef gd;
FunctionDefLibrary* fl1 = gd.mutable_library();
FunctionDef* f1 = fl1->add_function();
*f1 = FunctionDefHelper::Create(
"AddAndMul", {"i: float"}, {"o: float"}, {},
{{{"add"}, "Add", {"i", "i"}, {{"T", DT_FLOAT}}},
{{"ret"}, "Mul", {"i", "i"}, {{"T", DT_FLOAT}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "add"}});
FunctionDef* f2 = fl1->add_function();
*f2 = FunctionDefHelper::Create(
"AddAndMul2", {"input: float"}, {"o: float"}, {},
{{{"add"}, "Add", {"input", "input"}, {{"T", DT_FLOAT}}},
{{"ret"}, "Mul", {"input", "input"}, {{"T", DT_FLOAT}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "add"}});
AttrValue a1;
NameAttrList* nal1 = a1.mutable_func();
nal1->set_name("AddAndMul");
NodeDef* n1 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_1", "Const")
.Attr("value", 1)
.Device("CPU:0")
.Finalize(n1));
std::vector<NodeDefBuilder::NodeOut> func_inputs;
func_inputs.emplace_back(n1->name(), 0, DT_FLOAT);
func_inputs.emplace_back(n1->name(), 0, DT_FLOAT);
NodeDef* n2 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_2", "For")
.Input(n1->name(), 0, DT_INT32)
.Input(n1->name(), 0, DT_INT32)
.Input(n1->name(), 0, DT_INT32)
.Input(func_inputs)
.Attr("body", a1)
.Device("CPU:0")
.Finalize(n2));
NodeDef* n3 = gd.add_node();
AttrValue a2;
NameAttrList* nal2 = a2.mutable_func();
nal2->set_name("AddAndMul2");
TF_CHECK_OK(NodeDefBuilder("graph_1/node_3", "For")
.Input(n1->name(), 0, DT_INT32)
.Input(n1->name(), 0, DT_INT32)
.Input(n1->name(), 0, DT_INT32)
.Input(func_inputs)
.Attr("body", a2)
.Device("CPU:0")
.Finalize(n3));
uint64 hash1 = GetHash(gd, *n2);
uint64 hash2 = GetHash(gd, *n3);
EXPECT_EQ(hash1, hash2);
TF_EXPECT_OK(CheckSubgraphsEqual(gd, n2, gd, n3));
}
TEST_F(DatasetHashUtilsTest, HashNodeSameFunctionListsDifferentNames) {
GraphDef gd;
FunctionDefLibrary* fl1 = gd.mutable_library();
FunctionDef* f1 = fl1->add_function();
*f1 = FunctionDefHelper::Create(
"AddAndMul", {"i: float"}, {"o: float"}, {},
{{{"add"}, "Add", {"i", "i"}, {{"T", DT_FLOAT}}},
{{"ret"}, "Mul", {"i", "i"}, {{"T", DT_FLOAT}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "add"}});
FunctionDef* f2 = fl1->add_function();
*f2 = FunctionDefHelper::Create(
"AddAndMul2", {"input: float"}, {"o: float"}, {},
{{{"add"}, "Add", {"input", "input"}, {{"T", DT_FLOAT}}},
{{"ret"}, "Mul", {"input", "input"}, {{"T", DT_FLOAT}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "add"}});
AttrValue a1;
AttrValue_ListValue* list1 = a1.mutable_list();
NameAttrList* nal1 = list1->add_func();
nal1->set_name("AddAndMul");
NodeDef* n1 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_1", "Const")
.Attr("value", 1)
.Device("CPU:0")
.Finalize(n1));
std::vector<NodeDefBuilder::NodeOut> func_inputs;
func_inputs.emplace_back(n1->name(), 0, DT_FLOAT);
func_inputs.emplace_back(n1->name(), 0, DT_FLOAT);
NodeDef* n2 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_2", "For")
.Input(n1->name(), 0, DT_INT32)
.Input(n1->name(), 0, DT_INT32)
.Input(n1->name(), 0, DT_INT32)
.Input(func_inputs)
.Attr("body", a1)
.Device("CPU:0")
.Finalize(n2));
NodeDef* n3 = gd.add_node();
AttrValue a2;
AttrValue_ListValue* list2 = a2.mutable_list();
NameAttrList* nal2 = list2->add_func();
nal2->set_name("AddAndMul2");
TF_CHECK_OK(NodeDefBuilder("graph_1/node_3", "For")
.Input(n1->name(), 0, DT_INT32)
.Input(n1->name(), 0, DT_INT32)
.Input(n1->name(), 0, DT_INT32)
.Input(func_inputs)
.Attr("body", a2)
.Device("CPU:0")
.Finalize(n3));
uint64 hash1 = GetHash(gd, *n2);
uint64 hash2 = GetHash(gd, *n3);
EXPECT_EQ(hash1, hash2);
TF_EXPECT_OK(CheckSubgraphsEqual(gd, n2, gd, n3));
}
TEST_F(DatasetHashUtilsTest, HashNodeSameFunctionsOps) {
GraphDef gd;
FunctionDefLibrary* fl1 = gd.mutable_library();
FunctionDef* f1 = fl1->add_function();
FunctionDef func = FunctionDefHelper::Create(
"AddAndMul", {"i: float"}, {"o: float"}, {},
{{{"add"}, "Add", {"i", "i"}, {{"T", DT_FLOAT}}},
{{"ret"}, "Mul", {"i", "i"}, {{"T", DT_FLOAT}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "add"}});
*f1 = func;
FunctionDef* f2 = fl1->add_function();
func = FunctionDefHelper::Create(
"AddAndMul2", {"i: float"}, {"o: float"}, {},
{{{"add"}, "Add", {"i", "i"}, {{"T", DT_FLOAT}}},
{{"ret"}, "Mul", {"i", "i"}, {{"T", DT_FLOAT}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "add"}});
*f2 = func;
FunctionLibraryDefinition flib(OpRegistry::Global(), gd.library());
NodeDef* n1 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_1", "Const")
.Attr("value", 1)
.Device("CPU:0")
.Finalize(n1));
NodeDef* n2 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_2", "AddAndMul", &flib)
.Input(n1->name(), 0, DT_FLOAT)
.Device("CPU:0")
.Finalize(n2));
NodeDef* n3 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_3", "AddAndMul2", &flib)
.Input(n1->name(), 0, DT_FLOAT)
.Device("CPU:0")
.Finalize(n3));
uint64 hash1 = GetHash(gd, *n2);
uint64 hash2 = GetHash(gd, *n3);
EXPECT_EQ(hash1, hash2);
TF_EXPECT_OK(CheckSubgraphsEqual(gd, n2, gd, n3));
}
TEST_F(DatasetHashUtilsTest, HashNodeDifferentFunctionsOps) {
GraphDef gd;
FunctionDefLibrary* fl1 = gd.mutable_library();
FunctionDef* f1 = fl1->add_function();
FunctionDef func = FunctionDefHelper::Create(
"AddAndMul", {"i: float"}, {"o: float"}, {},
{{{"add"}, "Add", {"i", "i"}, {{"T", DT_FLOAT}}},
{{"ret"}, "Mul", {"i", "i"}, {{"T", DT_FLOAT}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "add"}});
*f1 = func;
FunctionDef* f2 = fl1->add_function();
func = FunctionDefHelper::Create(
"AddAndMul2", {"i: float"}, {"o: float"}, {},
{{{"add"}, "Add", {"i", "i"}, {{"T", DT_FLOAT}}},
{{"ret"}, "Mul", {"i", "i"}, {{"T", DT_FLOAT}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "ret"}});
*f2 = func;
FunctionLibraryDefinition flib(OpRegistry::Global(), gd.library());
NodeDef* n1 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_1", "Const")
.Attr("value", 1)
.Device("CPU:0")
.Finalize(n1));
NodeDef* n2 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_2", "AddAndMul", &flib)
.Input(n1->name(), 0, DT_FLOAT)
.Device("CPU:0")
.Finalize(n2));
NodeDef* n3 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_3", "AddAndMul2", &flib)
.Input(n1->name(), 0, DT_FLOAT)
.Device("CPU:0")
.Finalize(n3));
uint64 hash1 = GetHash(gd, *n2);
uint64 hash2 = GetHash(gd, *n3);
EXPECT_NE(hash1, hash2);
Status s = CheckSubgraphsEqual(gd, n2, gd, n3);
EXPECT_NE(s.code(), error::OK);
EXPECT_THAT(
s.message(),
ContainsRegex("Functions AddAndMul and AddAndMul2 are not the same"));
}
TEST_F(DatasetHashUtilsTest, HashNodeDifferentFunctions) {
GraphDef gd;
FunctionDefLibrary* fl1 = gd.mutable_library();
FunctionDef* f1 = fl1->add_function();
FunctionDef func = FunctionDefHelper::Create(
"AddAndMul", {"i: float"}, {"o: float"}, {},
{{{"add"}, "Add", {"i", "i"}, {{"T", DT_FLOAT}}},
{{"ret"}, "Mul", {"i", "i"}, {{"T", DT_FLOAT}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "add"}});
*f1 = func;
FunctionDef* f2 = fl1->add_function();
func = FunctionDefHelper::Create(
"AddAndMul2", {"i: float"}, {"o: float"}, {},
{{{"add"}, "Add", {"i", "i"}, {{"T", DT_FLOAT}}},
{{"ret"}, "Mul", {"i", "i"}, {{"T", DT_FLOAT}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "ret"}});
*f2 = func;
AttrValue a1;
NameAttrList* nal1 = a1.mutable_func();
nal1->set_name("AddAndMul");
NodeDef* n1 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_1", "Const")
.Attr("value", 1)
.Device("CPU:0")
.Finalize(n1));
std::vector<NodeDefBuilder::NodeOut> func_inputs;
func_inputs.emplace_back(n1->name(), 0, DT_FLOAT);
func_inputs.emplace_back(n1->name(), 0, DT_FLOAT);
NodeDef* n2 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_2", "For")
.Input(n1->name(), 0, DT_INT32)
.Input(n1->name(), 0, DT_INT32)
.Input(n1->name(), 0, DT_INT32)
.Input(func_inputs)
.Attr("body", a1)
.Device("CPU:0")
.Finalize(n2));
NodeDef* n3 = gd.add_node();
AttrValue a2;
NameAttrList* nal2 = a2.mutable_func();
nal2->set_name("AddAndMul2");
TF_CHECK_OK(NodeDefBuilder("graph_1/node_3", "For")
.Input(n1->name(), 0, DT_INT32)
.Input(n1->name(), 0, DT_INT32)
.Input(n1->name(), 0, DT_INT32)
.Input(func_inputs)
.Attr("body", a2)
.Device("CPU:0")
.Finalize(n3));
uint64 hash1 = GetHash(gd, *n2);
uint64 hash2 = GetHash(gd, *n3);
EXPECT_NE(hash1, hash2);
Status s = CheckSubgraphsEqual(gd, n2, gd, n3);
EXPECT_NE(s.code(), error::OK);
EXPECT_THAT(
s.message(),
ContainsRegex("Functions AddAndMul and AddAndMul2 are not the same"));
}
TEST_F(DatasetHashUtilsTest, HashNodeDifferentFunctionLists) {
GraphDef gd;
FunctionDefLibrary* fl1 = gd.mutable_library();
FunctionDef* f1 = fl1->add_function();
FunctionDef func = FunctionDefHelper::Create(
"AddAndMul", {"i: float"}, {"o: float"}, {},
{{{"add"}, "Add", {"i", "i"}, {{"T", DT_FLOAT}}},
{{"ret"}, "Mul", {"i", "i"}, {{"T", DT_FLOAT}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "add"}});
*f1 = func;
FunctionDef* f2 = fl1->add_function();
func = FunctionDefHelper::Create(
"AddAndMul2", {"i: float"}, {"o: float"}, {},
{{{"add"}, "Add", {"i", "i"}, {{"T", DT_FLOAT}}},
{{"ret"}, "Mul", {"i", "i"}, {{"T", DT_FLOAT}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "ret"}});
*f2 = func;
AttrValue a1;
AttrValue_ListValue* list1 = a1.mutable_list();
NameAttrList* nal1 = list1->add_func();
nal1->set_name("AddAndMul");
NodeDef* n1 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_1", "Const")
.Attr("value", 1)
.Device("CPU:0")
.Finalize(n1));
std::vector<NodeDefBuilder::NodeOut> func_inputs;
func_inputs.emplace_back(n1->name(), 0, DT_FLOAT);
func_inputs.emplace_back(n1->name(), 0, DT_FLOAT);
NodeDef* n2 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_2", "For")
.Input(n1->name(), 0, DT_INT32)
.Input(n1->name(), 0, DT_INT32)
.Input(n1->name(), 0, DT_INT32)
.Input(func_inputs)
.Attr("body", a1)
.Device("CPU:0")
.Finalize(n2));
NodeDef* n3 = gd.add_node();
AttrValue a2;
AttrValue_ListValue* list2 = a2.mutable_list();
NameAttrList* nal2 = list2->add_func();
nal2->set_name("AddAndMul2");
TF_CHECK_OK(NodeDefBuilder("graph_1/node_3", "For")
.Input(n1->name(), 0, DT_INT32)
.Input(n1->name(), 0, DT_INT32)
.Input(n1->name(), 0, DT_INT32)
.Input(func_inputs)
.Attr("body", a2)
.Device("CPU:0")
.Finalize(n3));
uint64 hash1 = GetHash(gd, *n2);
uint64 hash2 = GetHash(gd, *n3);
EXPECT_NE(hash1, hash2);
Status s = CheckSubgraphsEqual(gd, n2, gd, n3);
EXPECT_NE(s.code(), error::OK);
EXPECT_THAT(
s.message(),
ContainsRegex("Functions AddAndMul and AddAndMul2 are not the same"));
}
TEST_F(DatasetHashUtilsTest, HashNodeDifferentControlInputs) {
GraphDef gd;
NodeDef* n1 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_1", "Const")
.Attr("value", 1)
.Device("CPU:0")
.Finalize(n1));
NodeDef* n2 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_2", "Const")
.Attr("value", 2)
.Device("CPU:0")
.Finalize(n2));
NodeDef* n3 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_3", "Const")
.Attr("value", 10)
.Device("CPU:0")
.Finalize(n3));
NodeDef* n4 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_4", "Identity")
.Device("CPU:0")
.Input(n1->name(), 0, DT_INT32)
.ControlInput(n2->name())
.Finalize(n4));
NodeDef* n5 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_5", "Identity")
.Device("CPU:0")
.Input(n1->name(), 0, DT_INT32)
.ControlInput(n3->name())
.Finalize(n5));
uint64 hash1 = GetHash(gd, *n4);
uint64 hash2 = GetHash(gd, *n5);
EXPECT_NE(hash1, hash2);
Status s = CheckSubgraphsEqual(gd, n4, gd, n5);
EXPECT_NE(s.code(), error::OK);
EXPECT_THAT(s.message(), ContainsRegex("Control dependencies are different"));
}
TEST_F(DatasetHashUtilsTest, HashNodeControlInputDifferentOrdering) {
GraphDef gd;
NodeDef* n1 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_1", "Const")
.Attr("value", 1)
.Device("CPU:0")
.Finalize(n1));
NodeDef* n2 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_2", "Const")
.Attr("value", 2)
.Device("CPU:0")
.Finalize(n2));
NodeDef* n3 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_3", "Const")
.Attr("value", 10)
.Device("CPU:0")
.Finalize(n3));
NodeDef* n4 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_4", "Identity")
.Device("CPU:0")
.Input(n1->name(), 0, DT_INT32)
.ControlInput(n2->name())
.ControlInput(n3->name())
.Finalize(n4));
NodeDef* n5 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_5", "Identity")
.Device("CPU:0")
.Input(n1->name(), 0, DT_INT32)
.ControlInput(n3->name())
.ControlInput(n2->name())
.Finalize(n5));
uint64 hash1 = GetHash(gd, *n4);
uint64 hash2 = GetHash(gd, *n5);
EXPECT_EQ(hash1, hash2);
TF_EXPECT_OK(CheckSubgraphsEqual(gd, n4, gd, n5));
}
TEST_F(DatasetHashUtilsTest, HashNodeDifferentGraphSamePartialGraph) {
GraphDef gd;
NodeDef* n1 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_1", "Const")
.Attr("value", 1)
.Device("CPU:0")
.Finalize(n1));
NodeDef* n2 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_2", "Const")
.Attr("value", 2)
.Device("CPU:0")
.Finalize(n2));
NodeDef* n3 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_3", "Add")
.Device("CPU:0")
.Input(n1->name(), 0, DT_INT32)
.Input(n2->name(), 0, DT_INT32)
.Finalize(n3));
uint64 hash1 = GetHash(gd, *n1);
n3->Clear();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_3", "Mul")
.Device("CPU:0")
.Input(n1->name(), 0, DT_INT32)
.Input(n2->name(), 0, DT_INT32)
.Finalize(n3));
uint64 hash2 = GetHash(gd, *n1);
EXPECT_EQ(hash1, hash2);
}
TEST_F(DatasetHashUtilsTest, HashNodeWithManyControlDependencies) {
GraphDef gd;
NodeDef* n;
for (int i = 0; i < 1000; ++i) {
n = gd.add_node();
NodeDefBuilder ndb(absl::StrCat("graph_1/node_", i), "Const");
ndb.Attr("value", 1);
ndb.Device("CPU:0");
for (int j = 0; j < i; ++j) {
ndb.ControlInput(absl::StrCat("graph_1/node_", j));
}
TF_CHECK_OK(ndb.Finalize(n));
}
GetHash(gd, *n);
}
TEST_F(DatasetHashUtilsTest, HashFunctionsWithControlDependencyLoop) {
GraphDef gd;
FunctionDefLibrary* fl1 = gd.mutable_library();
FunctionDef* f1 = fl1->add_function();
AttrValue a1;
NameAttrList* nal1 = a1.mutable_func();
nal1->set_name("AddAndMul");
std::pair<string, FunctionDefHelper::AttrValueWrapper> func_attr = {
"body", FunctionDefHelper::AttrValueWrapper(*nal1)};
FunctionDef func = FunctionDefHelper::Create(
"AddAndMul",
{"i: float", "j: int32"},
{"o: float"},
{},
{{{"add"}, "Add", {"i", "i"}, {{"T", DT_FLOAT}}, {"ret"}},
{{"for"}, "For", {"j", "j", "j"}, {func_attr, {"T", DT_FLOAT}}, {"ret"}},
{{"ret"}, "Mul", {"i", "i"}, {{"T", DT_FLOAT}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "add"}});
*f1 = func;
NodeDef* n1 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_1", "Const")
.Attr("value", 1)
.Device("CPU:0")
.Finalize(n1));
std::vector<NodeDefBuilder::NodeOut> func_inputs;
func_inputs.emplace_back(n1->name(), 0, DT_FLOAT);
func_inputs.emplace_back(n1->name(), 0, DT_FLOAT);
NodeDef* n2 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_2", "For")
.Input(n1->name(), 0, DT_INT32)
.Input(n1->name(), 0, DT_INT32)
.Input(n1->name(), 0, DT_INT32)
.Input(func_inputs)
.ControlInput("graph_1/node_2")
.Attr("body", a1)
.Device("CPU:0")
.Finalize(n2));
GetHash(gd, *n2);
}
TEST_F(DatasetHashUtilsTest, HashNodeWithControlDependencyLoop) {
GraphDef gd;
NodeDef* n1 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_1", "Const")
.Attr("value", 1)
.Device("CPU:0")
.ControlInput("graph_1/node_2")
.Finalize(n1));
NodeDef* n2 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_2", "Const")
.Attr("value", 2)
.Device("CPU:0")
.ControlInput("graph_1/node_1")
.Finalize(n2));
NodeDef* n3 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_3", "Add")
.Device("CPU:0")
.Input(n1->name(), 0, DT_INT32)
.Input(n2->name(), 0, DT_INT32)
.ControlInput("graph_1/node_1")
.ControlInput("graph_1/node_2")
.Finalize(n3));
GetHash(gd, *n3);
}
TEST_F(DatasetHashUtilsTest, HashNodeWithControlDependencyLoopDifferentNames) {
GraphDef gd1;
NodeDef* n1 = gd1.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_1", "Const")
.Attr("value", 1)
.Device("CPU:0")
.ControlInput("graph_1/node_2")
.Finalize(n1));
NodeDef* n2 = gd1.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_2", "Const")
.Attr("value", 2)
.Device("CPU:0")
.ControlInput("graph_1/node_1")
.Finalize(n2));
NodeDef* n3 = gd1.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_3", "Add")
.Device("CPU:0")
.Input(n1->name(), 0, DT_INT32)
.Input(n2->name(), 0, DT_INT32)
.ControlInput("graph_1/node_1")
.ControlInput("graph_1/node_2")
.Finalize(n3));
GraphDef gd2;
NodeDef* n4 = gd2.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_4", "Const")
.Attr("value", 1)
.Device("CPU:0")
.ControlInput("graph_1/node_5")
.Finalize(n4));
NodeDef* n5 = gd2.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_5", "Const")
.Attr("value", 2)
.Device("CPU:0")
.ControlInput("graph_1/node_4")
.Finalize(n5));
NodeDef* n6 = gd2.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_6", "Add")
.Device("CPU:0")
.Input(n4->name(), 0, DT_INT32)
.Input(n5->name(), 0, DT_INT32)
.ControlInput("graph_1/node_4")
.ControlInput("graph_1/node_5")
.Finalize(n6));
EXPECT_EQ(GetHash(gd1, *n3), GetHash(gd2, *n6));
}
TEST_F(DatasetHashUtilsTest, HashInt32Tensor) {
Tensor s1(42);
Tensor s2(42);
Tensor s3(43);
EXPECT_EQ(GetHash(s1), GetHash(s2));
EXPECT_NE(GetHash(s1), GetHash(s3));
Tensor v1(DT_INT32, TensorShape({2}));
v1.vec<int32>()(0) = 0;
v1.vec<int32>()(1) = 1;
Tensor v2(DT_INT32, TensorShape({2}));
v2.vec<int32>()(0) = 0;
v2.vec<int32>()(1) = 1;
Tensor v3(DT_INT32, TensorShape({2}));
v3.vec<int32>()(0) = 0;
v3.vec<int32>()(1) = 2;
EXPECT_EQ(GetHash(v1), GetHash(v2));
EXPECT_NE(GetHash(v1), GetHash(v3));
}
TEST_F(DatasetHashUtilsTest, HashStringTensor) {
Tensor s1("hello");
Tensor s2("hello");
Tensor s3("world");
EXPECT_EQ(GetHash(s1), GetHash(s2));
EXPECT_NE(GetHash(s1), GetHash(s3));
Tensor v1(DT_STRING, TensorShape({2}));
v1.vec<tstring>()(0) = "hello";
v1.vec<tstring>()(1) = "world";
Tensor v2(DT_STRING, TensorShape({2}));
v2.vec<tstring>()(0) = "hello";
v2.vec<tstring>()(1) = "world";
Tensor v3(DT_STRING, TensorShape({2}));
v3.vec<tstring>()(0) = "hello";
v3.vec<tstring>()(1) = "universe";
EXPECT_EQ(GetHash(v1), GetHash(v2));
EXPECT_NE(GetHash(v1), GetHash(v3));
}
static void BM_ParallelFunctionCallsGraph(benchmark::State& state) {
GraphDef graph_def;
FunctionDefLibrary* fl = graph_def.mutable_library();
FunctionDef* fd = fl->add_function();
*fd = FunctionDefHelper::Create(
"AddAndMul", {"i: float"}, {"o: float"}, {},
{{{"add"}, "Add", {"i", "i"}, {{"T", DT_FLOAT}}},
{{"ret"}, "Mul", {"i", "i"}, {{"T", DT_FLOAT}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "add"}});
NodeDef* input = graph_def.add_node();
input->set_name("InputPlaceholder");
input->set_op("Placeholder");
AddNodeAttr("dtype", DT_FLOAT, input);
NodeDef* target = graph_def.add_node();
target->set_name("Target");
target->set_op("NoOp");
ConfigProto config_pb;
config_pb.mutable_device_count()->insert({"CPU", 1});
config_pb.mutable_device_count()->insert({"GPU", 1});
config_pb.set_allow_soft_placement(true);
for (int i = 0; i < 100; ++i) {
NodeDef* node = graph_def.add_node();
node->set_name(absl::StrCat("PartitionedCall_", i));
node->set_op("PartitionedCall");
*node->add_input() = input->name();
AddNodeAttr("Tin", DT_FLOAT, node);
AddNodeAttr("Tout", DT_FLOAT, node);
AddNodeAttr("config", "", node);
AddNodeAttr("config_proto", config_pb.SerializeAsString(), node);
NameAttrList func;
func.set_name(fd->signature().name());
AddNodeAttr("f", func, node);
*target->add_input() = absl::StrCat("^", node->name());
}
uint64 hash_value;
for (auto _ : state) {
TF_CHECK_OK(HashNode(graph_def, *target, &hash_value));
}
}
BENCHMARK(BM_ParallelFunctionCallsGraph);
static void BM_ChainedFunctionCallsGraph(benchmark::State& state) {
GraphDef graph_def;
FunctionDefLibrary* fl = graph_def.mutable_library();
FunctionDef* fd = fl->add_function();
*fd = FunctionDefHelper::Create(
"AddAndMul", {"i: float"}, {"o: float"}, {},
{{{"add"}, "Add", {"i", "i"}, {{"T", DT_FLOAT}}},
{{"ret"}, "Mul", {"i", "i"}, {{"T", DT_FLOAT}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "add"}});
NodeDef* input = graph_def.add_node();
input->set_name("InputPlaceholder");
input->set_op("Placeholder");
AddNodeAttr("dtype", DT_FLOAT, input);
ConfigProto config_pb;
config_pb.mutable_device_count()->insert({"CPU", 1});
config_pb.mutable_device_count()->insert({"GPU", 1});
config_pb.set_allow_soft_placement(true);
for (int i = 0; i < 100; ++i) {
NodeDef* node = graph_def.add_node();
node->set_name(absl::StrCat("PartitionedCall_", i));
node->set_op("PartitionedCall");
if (i > 0) {
*node->add_input() = absl::StrCat("PartitionedCall_", i - 1);
} else {
*node->add_input() = input->name();
}
AddNodeAttr("Tin", DT_FLOAT, node);
AddNodeAttr("Tout", DT_FLOAT, node);
AddNodeAttr("config", "", node);
AddNodeAttr("config_proto", config_pb.SerializeAsString(), node);
NameAttrList func;
func.set_name(fd->signature().name());
AddNodeAttr("f", func, node);
}
const NodeDef& target = graph_def.node(graph_def.node_size() - 1);
uint64 hash_value;
for (auto _ : state) {
TF_CHECK_OK(HashNode(graph_def, target, &hash_value));
}
}
BENCHMARK(BM_ChainedFunctionCallsGraph);
static void BM_ComposedFunctionCallsGraph(benchmark::State& state) {
GraphDef graph_def;
FunctionDefLibrary* fl = graph_def.mutable_library();
FunctionDef* fd = fl->add_function();
*fd = FunctionDefHelper::Create(
"AddAndMul", {"i: float"}, {"o: float"}, {},
{{{"add"}, "Add", {"i", "i"}, {{"T", DT_FLOAT}}},
{{"ret"}, "Mul", {"i", "i"}, {{"T", DT_FLOAT}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "add"}});
ConfigProto config_pb;
config_pb.mutable_device_count()->insert({"CPU", 1});
config_pb.mutable_device_count()->insert({"GPU", 1});
config_pb.set_allow_soft_placement(true);
for (int i = 0; i < 99; ++i) {
NameAttrList func;
func.set_name(fd->signature().name());
FunctionDef* fd = fl->add_function();
*fd = FunctionDefHelper::Create(
absl::StrCat("F_", i),
{"i: float"},
{"o: float"},
{},
{
{
{"inner_call"},
"PartitionedCall",
{"i"},
{{"Ti", DT_FLOAT},
{"Tout", DT_FLOAT},
{"config", ""},
{"config_proto", config_pb.SerializeAsString()},
{"f", func}},
},
},
{{"o", "inner_call:o:0"}},
{{"must_execute", "inner_call"}});
}
NodeDef* input = graph_def.add_node();
input->set_name("InputPlaceholder");
input->set_op("Placeholder");
AddNodeAttr("dtype", DT_FLOAT, input);
NodeDef* node = graph_def.add_node();
node->set_name("PartitionedCall_start");
node->set_op("PartitionedCall");
*node->add_input() = input->name();
AddNodeAttr("Tin", DT_FLOAT, node);
AddNodeAttr("Tout", DT_FLOAT, node);
AddNodeAttr("config", "", node);
AddNodeAttr("config_proto", config_pb.SerializeAsString(), node);
NameAttrList func;
func.set_name(fd->signature().name());
AddNodeAttr("f", func, node);
const NodeDef& target = graph_def.node(graph_def.node_size() - 1);
uint64 hash_value;
for (auto _ : state) {
TF_CHECK_OK(HashNode(graph_def, target, &hash_value));
}
}
BENCHMARK(BM_ComposedFunctionCallsGraph);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/hash_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/hash_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
aa9eb069-6e9e-4c5a-9df4-5d8c7ab16540 | cpp | tensorflow/tensorflow | xplane_to_profile_instructions | third_party/xla/xla/python/xplane_to_profile_instructions.cc | third_party/xla/xla/python/xplane_to_profile_instructions_test.cc | #include "xla/python/xplane_to_profile_instructions.h"
#include <cstdint>
#include <memory>
#include <numeric>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/types/optional.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo.pb.h"
#include "xla/tsl/profiler/convert/xla_op_utils.h"
#include "xla/tsl/profiler/utils/file_system_utils.h"
#include "xla/tsl/profiler/utils/tf_xplane_visitor.h"
#include "xla/tsl/profiler/utils/xplane_schema.h"
#include "xla/tsl/profiler/utils/xplane_utils.h"
#include "xla/tsl/profiler/utils/xplane_visitor.h"
#include "xla/xla.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/types.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
namespace xla {
namespace {
constexpr char kXPlanePb[] = "xplane.pb";
constexpr char kCostNameSep[] = "::";
using tensorflow::profiler::XPlane;
using tensorflow::profiler::XSpace;
using tsl::profiler::CreateTfXPlaneVisitor;
using tsl::profiler::FindPlanesWithPrefix;
using tsl::profiler::FindPlaneWithName;
using tsl::profiler::GetStatTypeStr;
using tsl::profiler::HostEventType;
using tsl::profiler::IsInternalEvent;
using tsl::profiler::ProfilerJoinPath;
using tsl::profiler::StatType;
using tsl::profiler::XEventMetadataVisitor;
using tsl::profiler::XEventVisitor;
using tsl::profiler::XLineVisitor;
using tsl::profiler::XPlaneVisitor;
using tsl::profiler::XStatVisitor;
void GetXPlaneLatencyInfo(
const XPlaneVisitor& xplane,
const absl::flat_hash_map<std::string, std::string>& hlo_module_info,
absl::flat_hash_map<std::string, HloLatencyInfo>* hlo_latency_info) {
xplane.ForEachLine([hlo_latency_info,
hlo_module_info](const XLineVisitor& xline) {
if (xline.DisplayName() == tsl::profiler::kXlaAsyncOpLineName) {
return;
}
xline.ForEachEvent([hlo_latency_info,
hlo_module_info](const XEventVisitor& xevent) {
int64_t event_type =
xevent.Type().value_or(HostEventType::kUnknownHostEventType);
if (IsInternalEvent(event_type)) return;
std::optional<std::string> hlo_name = std::nullopt;
std::optional<std::string> hlo_module_name = std::nullopt;
std::optional<std::string> fingerprint = std::nullopt;
std::optional<int64_t> program_id = std::nullopt;
auto for_each_stat = [&](const XStatVisitor& stat) {
if (stat.ValueCase() == tsl::profiler::XStat::VALUE_NOT_SET) return;
if (stat.Name() == GetStatTypeStr(StatType::kHloOp)) {
hlo_name = stat.ToString();
}
if (stat.Name() == GetStatTypeStr(StatType::kProgramId)) {
program_id = stat.IntValue();
}
if (stat.Name() == GetStatTypeStr(StatType::kHloModule)) {
hlo_module_name = stat.ToString();
}
};
xevent.Metadata().ForEachStat(for_each_stat);
xevent.ForEachStat(for_each_stat);
if (!hlo_name.has_value() || !hlo_module_name.has_value()) {
return;
}
if (hlo_module_name.has_value()) {
std::string fingerprint_key = hlo_module_name.value();
if (program_id.has_value()) {
fingerprint_key = tsl::profiler::HloModuleNameWithProgramId(
hlo_module_name.value(), program_id.value());
}
if (hlo_module_info.contains(fingerprint_key)) {
fingerprint = hlo_module_info.at(fingerprint_key);
}
}
double latency = static_cast<double>(xevent.DurationNs()) / 1e3;
std::string key = hlo_name.value();
if (fingerprint.has_value()) {
key = absl::StrCat(fingerprint.value(), kCostNameSep, hlo_name.value());
}
(*hlo_latency_info)[key].durations.emplace_back(latency);
});
});
}
std::unique_ptr<xla::HloModule> CreateModuleFromProto(
const xla::HloModuleProto& proto) {
auto config = xla::HloModule::CreateModuleConfigFromProto(proto, {});
if (config.ok()) {
auto module = xla::HloModule::CreateFromProto(proto, config.value());
if (module.ok()) {
return std::move(*module);
}
}
return nullptr;
}
std::optional<std::string> GetHloModuleFingerprint(
const xla::HloModuleProto& hlo_module_proto) {
std::unique_ptr<xla::HloModule> hlo_module =
CreateModuleFromProto(hlo_module_proto);
if (hlo_module == nullptr) {
return std::nullopt;
}
const auto& map = hlo_module->entry_computation()
->root_instruction()
->frontend_attributes()
.map();
auto it = map.find("fingerprint_before_lhs");
if (it != map.end()) {
return it->second;
}
return std::nullopt;
}
void GetXPlaneHloModuleInfo(
const XPlaneVisitor& xplane,
absl::flat_hash_map<std::string, std::string>* hlo_module_info) {
xplane.ForEachEventMetadata([&](const XEventMetadataVisitor& event_metadata) {
event_metadata.ForEachStat([&](const XStatVisitor& stat) {
xla::HloProto hlo_proto;
if (tsl::ParseProtoUnlimited(&hlo_proto, stat.BytesValue().data(),
stat.BytesValue().size())) {
const xla::HloModuleProto& hlo_module_proto = hlo_proto.hlo_module();
std::optional<std::string> fingerprint =
GetHloModuleFingerprint(hlo_module_proto);
if (fingerprint.has_value()) {
std::string key_with_id = tsl::profiler::HloModuleNameWithProgramId(
hlo_module_proto.name(), hlo_module_proto.id());
(*hlo_module_info)[key_with_id] = fingerprint.value();
}
}
});
});
}
}
absl::Status ConvertXplaneUnderLogdirToProfiledInstructionsProto(
const std::string& logdir, tensorflow::profiler::ProfiledInstructionsProto*
profiled_instructions_proto) {
std::vector<std::string> children_path;
TF_RETURN_IF_ERROR(tsl::Env::Default()->GetChildren(logdir, &children_path));
if (children_path.empty()) {
return absl::NotFoundError(
absl::StrCat("Could not find file under: ", logdir));
}
std::vector<tensorflow::profiler::XSpace> xspaces;
for (const std::string& child_path : children_path) {
if (absl::StrContains(child_path, kXPlanePb)) {
std::string xspace_path = ProfilerJoinPath(logdir, child_path);
tensorflow::profiler::XSpace xspace;
TF_RETURN_IF_ERROR(
ReadBinaryProto(tsl::Env::Default(), xspace_path, &xspace));
xspaces.emplace_back(xspace);
}
}
return ConvertXplaneToProfiledInstructionsProto(xspaces,
profiled_instructions_proto);
}
absl::Status ConvertXplaneToProfiledInstructionsProto(
std::vector<tensorflow::profiler::XSpace> xspaces,
tensorflow::profiler::ProfiledInstructionsProto*
profiled_instructions_proto) {
absl::flat_hash_map<std::string, HloLatencyInfo> hlo_latency_info;
absl::flat_hash_map<std::string, std::string> hlo_module_info;
for (const XSpace& xspace : xspaces) {
const XPlane* metadata_plane =
FindPlaneWithName(xspace, tsl::profiler::kMetadataPlaneName);
if (metadata_plane != nullptr) {
XPlaneVisitor xplane = CreateTfXPlaneVisitor(metadata_plane);
GetXPlaneHloModuleInfo(xplane, &hlo_module_info);
}
std::vector<const XPlane*> device_planes =
FindPlanesWithPrefix(xspace, tsl::profiler::kGpuPlanePrefix);
if (device_planes.empty()) {
device_planes =
FindPlanesWithPrefix(xspace, tsl::profiler::kTpuPlanePrefix);
}
if (device_planes.empty()) {
device_planes =
FindPlanesWithPrefix(xspace, tsl::profiler::kCustomPlanePrefix);
}
for (const XPlane* device_plane : device_planes) {
XPlaneVisitor xplane = CreateTfXPlaneVisitor(device_plane);
GetXPlaneLatencyInfo(xplane, hlo_module_info, &hlo_latency_info);
}
}
for (const auto& iter : hlo_latency_info) {
auto* cost = profiled_instructions_proto->add_costs();
std::vector<double> durations = iter.second.durations;
double sum = std::accumulate(durations.begin(), durations.end(), 0.0);
cost->set_cost_us(sum / durations.size());
cost->set_name(iter.first);
}
return absl::OkStatus();
}
} | #include "xla/python/xplane_to_profile_instructions.h"
#include <cstdint>
#include <memory>
#include <string>
#include "xla/service/hlo.pb.h"
#include "xla/tests/verified_hlo_module.h"
#include "xla/tsl/profiler/convert/xla_op_utils.h"
#include "xla/tsl/profiler/rpc/client/save_profile.h"
#include "xla/tsl/profiler/utils/file_system_utils.h"
#include "xla/tsl/profiler/utils/xplane_builder.h"
#include "xla/tsl/profiler/utils/xplane_schema.h"
#include "tsl/platform/test.h"
#include "tsl/profiler/protobuf/profiled_instructions.pb.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
namespace xla {
namespace {
using tensorflow::profiler::XSpace;
using tsl::profiler::GetStatTypeStr;
using tsl::profiler::GpuPlaneName;
using tsl::profiler::kHostThreadsPlaneName;
using tsl::profiler::kMetadataPlaneName;
using tsl::profiler::StatType;
using tsl::profiler::XEventBuilder;
using tsl::profiler::XLineBuilder;
using tsl::profiler::XPlaneBuilder;
void CreateXSpace(XSpace* space, int first_device_latency,
int second_device_latency) {
XPlaneBuilder host_plane(space->add_planes());
host_plane.SetName(kHostThreadsPlaneName);
XLineBuilder thread1 = host_plane.GetOrCreateLine(10);
thread1.SetName("thread1");
XEventBuilder event1 =
thread1.AddEvent(*host_plane.GetOrCreateEventMetadata("event1"));
event1.SetTimestampNs(150000);
event1.SetDurationNs(10000);
event1.AddStatValue(*host_plane.GetOrCreateStatMetadata("tf_op"),
*host_plane.GetOrCreateStatMetadata("Relu"));
XLineBuilder thread2 = host_plane.GetOrCreateLine(20);
thread2.SetName("thread2");
XEventBuilder event2 =
thread2.AddEvent(*host_plane.GetOrCreateEventMetadata("event2"));
event2.SetTimestampNs(160000);
event2.SetDurationNs(10000);
event2.AddStatValue(*host_plane.GetOrCreateStatMetadata("tf_op"),
*host_plane.GetOrCreateStatMetadata("Conv2D"));
int64_t program_id = 1;
XPlaneBuilder device_plane(space->add_planes());
device_plane.SetName(GpuPlaneName(0));
device_plane.SetId(0);
XLineBuilder stream1 = device_plane.GetOrCreateLine(30);
stream1.SetName("gpu stream 1");
XEventBuilder event3 =
stream1.AddEvent(*device_plane.GetOrCreateEventMetadata("kernel1"));
event3.SetTimestampNs(180000);
event3.SetDurationNs(first_device_latency);
event3.AddStatValue(
*device_plane.GetOrCreateStatMetadata(GetStatTypeStr(StatType::kHloOp)),
*device_plane.GetOrCreateStatMetadata("custom-call"));
event3.AddStatValue(*device_plane.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kHloModule)),
*device_plane.GetOrCreateStatMetadata("test_module"));
event3.AddStatValue(*device_plane.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kProgramId)),
program_id);
XPlaneBuilder device_plane_2(space->add_planes());
device_plane_2.SetName(GpuPlaneName(1));
device_plane_2.SetId(0);
XLineBuilder stream2 = device_plane.GetOrCreateLine(30);
stream2.SetName("gpu stream 1");
XEventBuilder event5 =
stream1.AddEvent(*device_plane.GetOrCreateEventMetadata("kernel1"));
event5.SetTimestampNs(180000);
event5.SetDurationNs(second_device_latency);
event5.AddStatValue(
*device_plane.GetOrCreateStatMetadata(GetStatTypeStr(StatType::kHloOp)),
*device_plane.GetOrCreateStatMetadata("custom-call"));
event5.AddStatValue(*device_plane.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kHloModule)),
*device_plane.GetOrCreateStatMetadata("test_module"));
event5.AddStatValue(*device_plane.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kProgramId)),
program_id);
}
void CreateXSpaceWithFingerprint(XSpace* space, int first_device_latency,
int second_device_latency) {
XPlaneBuilder metadata_plane(space->add_planes());
metadata_plane.SetName(kMetadataPlaneName);
const char* hlo_text = R"(
HloModule test_module
apply_op {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT apply_op = f32[] add(x, y)
}
ENTRY ar {
p0 = f32[32] parameter(0)
p1 = f32[32, 32] parameter(1)
p2 = f32[32, 32] parameter(2)
p3 = f32[32] parameter(3)
dot0 = f32[32,32]{1,0} custom-call(p1, p2), custom_call_target="__cublas$gemm"
dot1 = f32[32,32]{1,0} custom-call(dot0, p2), custom_call_target="__cublas$gemm"
dot2 = f32[32,32]{1,0} custom-call(dot1, p2), custom_call_target="__cublas$gemm"
dot3 = f32[32,32]{1,0} custom-call(dot2, p2), custom_call_target="__cublas$gemm"
dot4 = f32[32,32]{1,0} custom-call(dot3, p2), custom_call_target="__cublas$gemm"
dot5 = f32[32,32]{1,0} custom-call(dot4, p2), custom_call_target="__cublas$gemm"
dot6 = f32[32,32]{1,0} custom-call(dot5, p2), custom_call_target="__cublas$gemm"
ar-start = f32[32] all-reduce-start(p0), to_apply=apply_op
ar-done = f32[32] all-reduce-done(ar-start)
%ag-start = (f32[32], f32[64]) all-gather-start(p3), dimensions={0}
%ag-done = f32[64] all-gather-done(%ag-start)
add0 = f32[32,32] add(dot0, dot1)
add1 = f32[32,32] add(add0, dot2)
add2 = f32[32,32] add(add1, dot3)
add3 = f32[32,32] add(add2, dot4)
add4 = f32[32,32] add(add3, dot5)
add5 = f32[32,32] add(add4, dot6)
ROOT t = (f32[32], f32[64], f32[32,32]) tuple(ar-done, %ag-done, add5)
})";
xla::HloModuleConfig config;
auto module = std::make_unique<VerifiedHloModule>(
"test_module", config, false,
true,
ShapeUtil::ByteSizeOfElements);
if (module->ParseHloStringAndVerifyModule(hlo_text).ok()) {
HloInstruction* root = module->entry_computation()->root_instruction();
FrontendAttributes attributes;
(*attributes.mutable_map())["fingerprint_before_lhs"] = "08a5";
root->add_frontend_attributes(attributes);
xla::HloModuleProto hlo_module_proto = module->ToProto();
hlo_module_proto.set_id(1);
xla::HloProto hlo_proto;
*hlo_proto.mutable_hlo_module() = hlo_module_proto;
int64_t program_id = 1;
tsl::profiler::XEventMetadata* event_metadata =
metadata_plane.GetOrCreateEventMetadata(program_id);
event_metadata->set_name(tsl::profiler::HloModuleNameWithProgramId(
hlo_proto.hlo_module().name(), program_id));
tsl::profiler::XStatsBuilder<tsl::profiler::XEventMetadata> event_stats(
event_metadata, &metadata_plane);
auto* hlo_proto_stat = metadata_plane.GetOrCreateStatMetadata(
GetStatTypeStr(tsl::profiler::StatType::kHloProto));
event_stats.AddStatValue(*hlo_proto_stat, hlo_proto);
}
return CreateXSpace(space, first_device_latency, second_device_latency);
}
TEST(XplaneToProfiledInstructionsProtoTest,
ConvertXplaneUnderLogdirToProfiledInstructionsProto) {
tensorflow::profiler::ProfiledInstructionsProto profile_proto;
std::string logdir = testing::TempDir() + "/logdir";
std::string run = tsl::profiler::GetCurrentTimeStampAsString();
const std::string path = tsl::profiler::ProfilerJoinPath(logdir, run);
XSpace xspace_first_host;
CreateXSpace(&xspace_first_host, 10000, 10000);
auto status =
tsl::profiler::SaveXSpace(logdir, run, "host_0", xspace_first_host);
EXPECT_TRUE(status.ok());
XSpace xspace_2nd_host;
CreateXSpace(&xspace_2nd_host, 15000, 5000);
status = tsl::profiler::SaveXSpace(logdir, run, "host_1", xspace_2nd_host);
EXPECT_TRUE(status.ok());
EXPECT_TRUE(
ConvertXplaneUnderLogdirToProfiledInstructionsProto(path, &profile_proto)
.ok());
EXPECT_EQ(profile_proto.costs_size(), 1);
EXPECT_EQ(profile_proto.costs(0).cost_us(), 10);
EXPECT_EQ(profile_proto.costs(0).name(), "custom-call");
}
TEST(XplaneToProfiledInstructionsProtoTest,
ConvertXplaneUnderLogdirToProfiledInstructionsProtoWithFingerprint) {
tensorflow::profiler::ProfiledInstructionsProto profile_proto;
std::string logdir = testing::TempDir() + "/logdir";
std::string run = tsl::profiler::GetCurrentTimeStampAsString();
const std::string path = tsl::profiler::ProfilerJoinPath(logdir, run);
XSpace xspace_first_host;
CreateXSpaceWithFingerprint(&xspace_first_host, 10000, 10000);
auto status =
tsl::profiler::SaveXSpace(logdir, run, "host_0", xspace_first_host);
EXPECT_TRUE(status.ok());
XSpace xspace_2nd_host;
CreateXSpaceWithFingerprint(&xspace_2nd_host, 15000, 5000);
status = tsl::profiler::SaveXSpace(logdir, run, "host_1", xspace_2nd_host);
EXPECT_TRUE(status.ok());
EXPECT_TRUE(
ConvertXplaneUnderLogdirToProfiledInstructionsProto(path, &profile_proto)
.ok());
EXPECT_EQ(profile_proto.costs_size(), 1);
EXPECT_EQ(profile_proto.costs(0).cost_us(), 10);
EXPECT_EQ(profile_proto.costs(0).name(), "08a5::custom-call");
}
TEST(XplaneToProfiledInstructionsProtoTest,
ConvertXplaneToProfiledInstructionsProto) {
tensorflow::profiler::ProfiledInstructionsProto profile_proto;
XSpace xspace_a;
CreateXSpace(&xspace_a, 10000, 10000);
XSpace xspace_b;
CreateXSpace(&xspace_b, 15000, 5000);
EXPECT_TRUE(ConvertXplaneToProfiledInstructionsProto({xspace_a, xspace_b},
&profile_proto)
.ok());
EXPECT_EQ(profile_proto.costs_size(), 1);
EXPECT_EQ(profile_proto.costs(0).cost_us(), 10);
EXPECT_EQ(profile_proto.costs(0).name(), "custom-call");
}
TEST(XplaneToProfiledInstructionsProtoTest,
ConvertXplaneToProfiledInstructionsProtoWithFingerprint) {
tensorflow::profiler::ProfiledInstructionsProto profile_proto;
XSpace xspace_a;
CreateXSpaceWithFingerprint(&xspace_a, 10000, 10000);
XSpace xspace_b;
CreateXSpaceWithFingerprint(&xspace_b, 15000, 5000);
EXPECT_TRUE(ConvertXplaneToProfiledInstructionsProto({xspace_a, xspace_b},
&profile_proto)
.ok());
EXPECT_EQ(profile_proto.costs_size(), 1);
EXPECT_EQ(profile_proto.costs(0).cost_us(), 10);
EXPECT_EQ(profile_proto.costs(0).name(), "08a5::custom-call");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/xplane_to_profile_instructions.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/xplane_to_profile_instructions_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2a27a67d-619c-4efb-a55a-b7f6ba66a70d | cpp | google/quiche | ping_payload_decoder | quiche/http2/decoder/payload_decoders/ping_payload_decoder.cc | quiche/http2/decoder/payload_decoders/ping_payload_decoder_test.cc | #include "quiche/http2/decoder/payload_decoders/ping_payload_decoder.h"
#include "quiche/http2/decoder/http2_frame_decoder_listener.h"
#include "quiche/http2/http2_constants.h"
#include "quiche/common/platform/api/quiche_logging.h"
namespace http2 {
namespace {
constexpr auto kOpaqueSize = Http2PingFields::EncodedSize();
}
DecodeStatus PingPayloadDecoder::StartDecodingPayload(FrameDecoderState* state,
DecodeBuffer* db) {
const Http2FrameHeader& frame_header = state->frame_header();
const uint32_t total_length = frame_header.payload_length;
QUICHE_DVLOG(2) << "PingPayloadDecoder::StartDecodingPayload: "
<< frame_header;
QUICHE_DCHECK_EQ(Http2FrameType::PING, frame_header.type);
QUICHE_DCHECK_LE(db->Remaining(), total_length);
QUICHE_DCHECK_EQ(0, frame_header.flags & ~(Http2FrameFlag::ACK));
if (db->Remaining() == kOpaqueSize && total_length == kOpaqueSize) {
static_assert(sizeof(Http2PingFields) == kOpaqueSize,
"If not, then can't enter this block!");
auto* ping = reinterpret_cast<const Http2PingFields*>(db->cursor());
if (frame_header.IsAck()) {
state->listener()->OnPingAck(frame_header, *ping);
} else {
state->listener()->OnPing(frame_header, *ping);
}
db->AdvanceCursor(kOpaqueSize);
return DecodeStatus::kDecodeDone;
}
state->InitializeRemainders();
return HandleStatus(
state, state->StartDecodingStructureInPayload(&ping_fields_, db));
}
DecodeStatus PingPayloadDecoder::ResumeDecodingPayload(FrameDecoderState* state,
DecodeBuffer* db) {
QUICHE_DVLOG(2) << "ResumeDecodingPayload: remaining_payload="
<< state->remaining_payload();
QUICHE_DCHECK_EQ(Http2FrameType::PING, state->frame_header().type);
QUICHE_DCHECK_LE(db->Remaining(), state->frame_header().payload_length);
return HandleStatus(
state, state->ResumeDecodingStructureInPayload(&ping_fields_, db));
}
DecodeStatus PingPayloadDecoder::HandleStatus(FrameDecoderState* state,
DecodeStatus status) {
QUICHE_DVLOG(2) << "HandleStatus: status=" << status
<< "; remaining_payload=" << state->remaining_payload();
if (status == DecodeStatus::kDecodeDone) {
if (state->remaining_payload() == 0) {
const Http2FrameHeader& frame_header = state->frame_header();
if (frame_header.IsAck()) {
state->listener()->OnPingAck(frame_header, ping_fields_);
} else {
state->listener()->OnPing(frame_header, ping_fields_);
}
return DecodeStatus::kDecodeDone;
}
return state->ReportFrameSizeError();
}
QUICHE_DCHECK(
(status == DecodeStatus::kDecodeInProgress &&
state->remaining_payload() > 0) ||
(status == DecodeStatus::kDecodeError && state->remaining_payload() == 0))
<< "\n status=" << status
<< "; remaining_payload=" << state->remaining_payload();
return status;
}
} | #include "quiche/http2/decoder/payload_decoders/ping_payload_decoder.h"
#include <stddef.h>
#include "quiche/http2/decoder/http2_frame_decoder_listener.h"
#include "quiche/http2/http2_constants.h"
#include "quiche/http2/test_tools/frame_parts.h"
#include "quiche/http2/test_tools/frame_parts_collector.h"
#include "quiche/http2/test_tools/http2_frame_builder.h"
#include "quiche/http2/test_tools/http2_random.h"
#include "quiche/http2/test_tools/http2_structures_test_util.h"
#include "quiche/http2/test_tools/payload_decoder_base_test_util.h"
#include "quiche/http2/test_tools/random_decoder_test_base.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace http2 {
namespace test {
class PingPayloadDecoderPeer {
public:
static constexpr Http2FrameType FrameType() { return Http2FrameType::PING; }
static constexpr uint8_t FlagsAffectingPayloadDecoding() { return 0; }
};
namespace {
struct Listener : public FramePartsCollector {
void OnPing(const Http2FrameHeader& header,
const Http2PingFields& ping) override {
QUICHE_VLOG(1) << "OnPing: " << header << "; " << ping;
StartAndEndFrame(header)->OnPing(header, ping);
}
void OnPingAck(const Http2FrameHeader& header,
const Http2PingFields& ping) override {
QUICHE_VLOG(1) << "OnPingAck: " << header << "; " << ping;
StartAndEndFrame(header)->OnPingAck(header, ping);
}
void OnFrameSizeError(const Http2FrameHeader& header) override {
QUICHE_VLOG(1) << "OnFrameSizeError: " << header;
FrameError(header)->OnFrameSizeError(header);
}
};
class PingPayloadDecoderTest
: public AbstractPayloadDecoderTest<PingPayloadDecoder,
PingPayloadDecoderPeer, Listener> {
protected:
Http2PingFields RandPingFields() {
Http2PingFields fields;
test::Randomize(&fields, RandomPtr());
return fields;
}
};
TEST_F(PingPayloadDecoderTest, WrongSize) {
auto approve_size = [](size_t size) {
return size != Http2PingFields::EncodedSize();
};
Http2FrameBuilder fb;
fb.Append(RandPingFields());
fb.Append(RandPingFields());
fb.Append(RandPingFields());
EXPECT_TRUE(VerifyDetectsFrameSizeError(0, fb.buffer(), approve_size));
}
TEST_F(PingPayloadDecoderTest, Ping) {
for (int n = 0; n < 100; ++n) {
Http2PingFields fields = RandPingFields();
Http2FrameBuilder fb;
fb.Append(fields);
Http2FrameHeader header(fb.size(), Http2FrameType::PING,
RandFlags() & ~Http2FrameFlag::ACK, RandStreamId());
set_frame_header(header);
FrameParts expected(header);
expected.SetOptPing(fields);
EXPECT_TRUE(DecodePayloadAndValidateSeveralWays(fb.buffer(), expected));
}
}
TEST_F(PingPayloadDecoderTest, PingAck) {
for (int n = 0; n < 100; ++n) {
Http2PingFields fields;
Randomize(&fields, RandomPtr());
Http2FrameBuilder fb;
fb.Append(fields);
Http2FrameHeader header(fb.size(), Http2FrameType::PING,
RandFlags() | Http2FrameFlag::ACK, RandStreamId());
set_frame_header(header);
FrameParts expected(header);
expected.SetOptPing(fields);
EXPECT_TRUE(DecodePayloadAndValidateSeveralWays(fb.buffer(), expected));
}
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/decoder/payload_decoders/ping_payload_decoder.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/decoder/payload_decoders/ping_payload_decoder_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
7706d990-1619-4cbd-8817-82820269533e | cpp | tensorflow/tensorflow | reporter | third_party/xla/xla/tsl/util/reporter.cc | tensorflow/core/util/reporter_test.cc | #include "xla/tsl/util/reporter.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/str_util.h"
namespace tsl {
TestReportFile::TestReportFile(const string& fname, const string& test_name)
: closed_(true), fname_(fname), test_name_(test_name) {}
absl::Status TestReportFile::Append(const string& content) {
if (closed_) return absl::OkStatus();
return log_file_->Append(content);
}
absl::Status TestReportFile::Close() {
if (closed_) return absl::OkStatus();
closed_ = true;
return log_file_->Close();
}
absl::Status TestReportFile::Initialize() {
if (fname_.empty()) {
return absl::OkStatus();
}
string mangled_fname = strings::StrCat(
fname_, absl::StrJoin(str_util::Split(test_name_, '/'), "__"));
Env* env = Env::Default();
if (env->FileExists(mangled_fname).ok()) {
return errors::InvalidArgument(
"Cannot create TestReportFile, file exists: ", mangled_fname);
}
TF_RETURN_IF_ERROR(env->NewWritableFile(mangled_fname, &log_file_));
TF_RETURN_IF_ERROR(log_file_->Flush());
closed_ = false;
return absl::OkStatus();
}
TestReporter::TestReporter(const string& fname, const string& test_name)
: report_file_(fname, test_name) {
benchmark_entry_.set_name(test_name);
}
absl::Status TestReporter::Close() {
if (report_file_.IsClosed()) return absl::OkStatus();
tensorflow::BenchmarkEntries entries;
*entries.add_entry() = benchmark_entry_;
TF_RETURN_IF_ERROR(report_file_.Append(entries.SerializeAsString()));
benchmark_entry_.Clear();
return report_file_.Close();
}
absl::Status TestReporter::Benchmark(int64_t iters, double cpu_time,
double wall_time, double throughput) {
if (report_file_.IsClosed()) return absl::OkStatus();
benchmark_entry_.set_iters(iters);
benchmark_entry_.set_cpu_time(cpu_time / iters);
benchmark_entry_.set_wall_time(wall_time / iters);
benchmark_entry_.set_throughput(throughput);
return absl::OkStatus();
}
absl::Status TestReporter::SetProperty(const string& name,
const string& value) {
if (report_file_.IsClosed()) return absl::OkStatus();
(*benchmark_entry_.mutable_extras())[name].set_string_value(value);
return absl::OkStatus();
}
absl::Status TestReporter::SetProperty(const string& name, double value) {
if (report_file_.IsClosed()) return absl::OkStatus();
(*benchmark_entry_.mutable_extras())[name].set_double_value(value);
return absl::OkStatus();
}
absl::Status TestReporter::AddMetric(const string& name, double value) {
if (report_file_.IsClosed()) return absl::OkStatus();
auto* metric = benchmark_entry_.add_metrics();
metric->set_name(name);
metric->set_value(value);
return absl::OkStatus();
}
absl::Status TestReporter::Initialize() { return report_file_.Initialize(); }
} | #define _XOPEN_SOURCE
#include <cstdlib>
#include "tensorflow/core/util/reporter.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
static void ExpectHasSubstr(StringPiece s, StringPiece expected) {
EXPECT_TRUE(absl::StrContains(s, expected))
<< s << " does not contain " << expected;
}
TEST(TestReporter, NoLogging) {
TestReporter test_reporter("b1");
TF_EXPECT_OK(test_reporter.Initialize());
TF_EXPECT_OK(test_reporter.Close());
}
TEST(TestReporter, UsesEnv) {
const char* old_env = std::getenv(TestReporter::kTestReporterEnv);
setenv(TestReporter::kTestReporterEnv, "/cant/find/me:!", 1);
CHECK_EQ(string(std::getenv(TestReporter::kTestReporterEnv)),
string("/cant/find/me:!"));
TestReporter test_reporter("b1");
Status s = test_reporter.Initialize();
ExpectHasSubstr(s.ToString(), "/cant/find/me");
unsetenv(TestReporter::kTestReporterEnv);
CHECK_EQ(std::getenv(TestReporter::kTestReporterEnv), nullptr);
TestReporter test_reporter_empty("b1");
s = test_reporter_empty.Initialize();
TF_EXPECT_OK(s);
s = test_reporter_empty.Close();
TF_EXPECT_OK(s);
if (old_env == nullptr) {
unsetenv(TestReporter::kTestReporterEnv);
} else {
setenv(TestReporter::kTestReporterEnv, old_env, 1);
}
}
TEST(TestReporter, CreateTwiceFails) {
{
TestReporter test_reporter(
strings::StrCat(testing::TmpDir(), "/test_reporter_dupe"), "t1");
TF_EXPECT_OK(test_reporter.Initialize());
}
{
TestReporter test_reporter(
strings::StrCat(testing::TmpDir(), "/test_reporter_dupe"), "t1");
Status s = test_reporter.Initialize();
ExpectHasSubstr(s.ToString(), "file exists:");
}
}
TEST(TestReporter, CreateCloseCreateAgainSkipsSecond) {
TestReporter test_reporter(
strings::StrCat(testing::TmpDir(), "/test_reporter_create_close"), "t1");
TF_EXPECT_OK(test_reporter.Initialize());
TF_EXPECT_OK(test_reporter.Close());
TF_EXPECT_OK(test_reporter.Benchmark(1, 1.0, 2.0, 3.0));
TF_EXPECT_OK(test_reporter.Close());
Status s = test_reporter.Initialize();
ExpectHasSubstr(s.ToString(), "file exists:");
}
TEST(TestReporter, Benchmark) {
string fname =
strings::StrCat(testing::TmpDir(), "/test_reporter_benchmarks_");
TestReporter test_reporter(fname, "b1/2/3");
TF_EXPECT_OK(test_reporter.Initialize());
TF_EXPECT_OK(test_reporter.Benchmark(1, 1.0, 2.0, 3.0));
TF_EXPECT_OK(test_reporter.Close());
string expected_fname = strings::StrCat(fname, "b1__2__3");
string read;
TF_EXPECT_OK(ReadFileToString(Env::Default(), expected_fname, &read));
BenchmarkEntries benchmark_entries;
ASSERT_TRUE(benchmark_entries.ParseFromString(read));
ASSERT_EQ(1, benchmark_entries.entry_size());
const BenchmarkEntry& benchmark_entry = benchmark_entries.entry(0);
EXPECT_EQ(benchmark_entry.name(), "b1/2/3");
EXPECT_EQ(benchmark_entry.iters(), 1);
EXPECT_EQ(benchmark_entry.cpu_time(), 1.0);
EXPECT_EQ(benchmark_entry.wall_time(), 2.0);
EXPECT_EQ(benchmark_entry.throughput(), 3.0);
}
TEST(TestReporter, SetProperties) {
string fname =
strings::StrCat(testing::TmpDir(), "/test_reporter_benchmarks_");
TestReporter test_reporter(fname, "b2/3/4");
TF_EXPECT_OK(test_reporter.Initialize());
TF_EXPECT_OK(test_reporter.SetProperty("string_prop", "abc"));
TF_EXPECT_OK(test_reporter.SetProperty("double_prop", 4.0));
TF_EXPECT_OK(test_reporter.Close());
string expected_fname = strings::StrCat(fname, "b2__3__4");
string read;
TF_EXPECT_OK(ReadFileToString(Env::Default(), expected_fname, &read));
BenchmarkEntries benchmark_entries;
ASSERT_TRUE(benchmark_entries.ParseFromString(read));
ASSERT_EQ(1, benchmark_entries.entry_size());
const BenchmarkEntry& benchmark_entry = benchmark_entries.entry(0);
const auto& extras = benchmark_entry.extras();
ASSERT_EQ(2, extras.size());
EXPECT_EQ("abc", extras.at("string_prop").string_value());
EXPECT_EQ(4.0, extras.at("double_prop").double_value());
}
TEST(TestReporter, AddMetrics) {
string fname =
strings::StrCat(testing::TmpDir(), "/test_reporter_benchmarks_");
TestReporter test_reporter(fname, "b3/4/5");
TF_EXPECT_OK(test_reporter.Initialize());
TF_EXPECT_OK(test_reporter.AddMetric("metric1", 2.0));
TF_EXPECT_OK(test_reporter.AddMetric("metric2", 3.0));
TF_EXPECT_OK(test_reporter.Close());
string expected_fname = strings::StrCat(fname, "b3__4__5");
string read;
TF_EXPECT_OK(ReadFileToString(Env::Default(), expected_fname, &read));
BenchmarkEntries benchmark_entries;
ASSERT_TRUE(benchmark_entries.ParseFromString(read));
ASSERT_EQ(1, benchmark_entries.entry_size());
const BenchmarkEntry& benchmark_entry = benchmark_entries.entry(0);
const auto& metrics = benchmark_entry.metrics();
ASSERT_EQ(2, metrics.size());
EXPECT_EQ("metric1", metrics.at(0).name());
EXPECT_EQ(2.0, metrics.at(0).value());
EXPECT_EQ("metric2", metrics.at(1).name());
EXPECT_EQ(3.0, metrics.at(1).value());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/util/reporter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/reporter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3fc1de5f-22d4-46d6-9a7d-6cf17b61d645 | cpp | tensorflow/tensorflow | attribute_map_internal | tensorflow/lite/core/async/interop/attribute_map_internal.cc | tensorflow/lite/core/async/interop/attribute_map_internal_test.cc | #include "tensorflow/lite/core/async/interop/attribute_map_internal.h"
#include "tensorflow/lite/core/async/interop/reconcile_fns.h"
namespace tflite {
namespace interop {
bool AttributeMap::ReconcileAttributes(const AttributeMap* other,
AttributeMap* merged,
AttributeMap* conflict) const {
if (other == nullptr || merged == nullptr) return false;
if (type_ != other->type_) return false;
merged->type_ = type_;
if (conflict) conflict->type_ = type_;
return tflite::interop::ReconcileGeneralAttributeKeys(
type_, &attrs_, &other->attrs_, &merged->attrs_,
conflict ? &conflict->attrs_ : nullptr);
}
bool AttributeMap::CheckAttributeCoverage(const AttributeMap* other,
AttributeMap* conflict) const {
if (other == nullptr) return false;
if (type_ != other->type_) return false;
if (conflict) conflict->type_ = type_;
return tflite::interop::CheckGeneralAttributeKeysCoverage(
type_, &attrs_, &other->attrs_, conflict ? &conflict->attrs_ : nullptr);
}
}
} | #include "tensorflow/lite/core/async/interop/attribute_map_internal.h"
#include <cstdint>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/async/interop/c/types.h"
namespace tflite {
namespace interop {
namespace {
TEST(AttributeMapTest, TypeTest) {
{
auto attrs = AttributeMap(kTfLiteAttrMapTypeBuffer);
EXPECT_TRUE(attrs.IsBufferAttributeMap());
EXPECT_FALSE(attrs.IsSyncAttributeMap());
}
{
auto attrs = AttributeMap(kTfLiteAttrMapTypeSync);
EXPECT_TRUE(attrs.IsSyncAttributeMap());
EXPECT_FALSE(attrs.IsBufferAttributeMap());
}
}
TEST(AttributeMapTest, AccessorTest) {
auto attrs = AttributeMap(kTfLiteAttrMapTypeBuffer);
{
attrs.SetAttr(kTfLiteBufferAttrKeyAlignment, size_t(8));
size_t result;
EXPECT_TRUE(attrs.GetAttr(kTfLiteBufferAttrKeyAlignment, &result));
EXPECT_EQ(8, result);
}
{
attrs.SetCustomAttr("Foo", 12);
int result;
EXPECT_FALSE(attrs.GetCustomAttr("Bar", &result));
EXPECT_TRUE(attrs.GetCustomAttr("Foo", &result));
EXPECT_EQ(12, result);
}
}
TEST(AttributeMapTest, ReconcileFailDifferentTypes) {
auto attrs1 = AttributeMap(kTfLiteAttrMapTypeBuffer);
auto attrs2 = AttributeMap(kTfLiteAttrMapTypeSync);
auto attrs3 = AttributeMap(kTfLiteAttrMapTypeBuffer);
EXPECT_FALSE(
attrs1.ReconcileAttributes(&attrs2, &attrs3, nullptr));
EXPECT_FALSE(attrs1.CheckAttributeCoverage(&attrs2, &attrs3));
}
TEST(AttributeMapTest, NullptrTest) {
auto attrs1 = AttributeMap(kTfLiteAttrMapTypeBuffer);
auto attrs2 = AttributeMap(kTfLiteAttrMapTypeBuffer);
EXPECT_FALSE(attrs1.ReconcileAttributes(nullptr, &attrs2,
nullptr));
EXPECT_FALSE(attrs1.ReconcileAttributes(&attrs2, nullptr,
nullptr));
EXPECT_FALSE(attrs1.CheckAttributeCoverage(nullptr,
nullptr));
}
TEST(AttributeMapTest, ReconcileDifferentTypes) {
auto attrs1 = AttributeMap(kTfLiteAttrMapTypeBuffer);
auto attrs2 = AttributeMap(kTfLiteAttrMapTypeSync);
auto attrs3 = AttributeMap(kTfLiteAttrMapTypeBuffer);
EXPECT_FALSE(attrs1.ReconcileAttributes(&attrs2, &attrs3,
nullptr));
}
TEST(AttributeMapTest, ReconcileTest) {
auto attrs1 = AttributeMap(kTfLiteAttrMapTypeBuffer);
attrs1.SetAttr(kTfLiteBufferAttrKeyAlignment, size_t(8));
auto attrs2 = AttributeMap(kTfLiteAttrMapTypeBuffer);
attrs2.SetAttr(kTfLiteBufferAttrKeyAlignment, size_t(4));
auto attrs3 = AttributeMap(kTfLiteAttrMapTypeSync);
auto attrs4 = AttributeMap(kTfLiteAttrMapTypeSync);
EXPECT_TRUE(attrs1.ReconcileAttributes(&attrs2, &attrs3, &attrs4));
EXPECT_TRUE(attrs3.IsBufferAttributeMap());
EXPECT_TRUE(attrs4.IsBufferAttributeMap());
size_t result;
EXPECT_TRUE(attrs3.GetAttr(kTfLiteBufferAttrKeyAlignment, &result));
EXPECT_EQ(8, result);
}
TEST(AttributeMapTest, CoverageTest) {
auto attrs1 = AttributeMap(kTfLiteAttrMapTypeBuffer);
attrs1.SetAttr(kTfLiteBufferAttrKeyAlignment, size_t(8));
auto attrs2 = AttributeMap(kTfLiteAttrMapTypeBuffer);
attrs2.SetAttr(kTfLiteBufferAttrKeyAlignment, size_t(4));
auto attrs3 = AttributeMap(kTfLiteAttrMapTypeSync);
EXPECT_TRUE(attrs1.CheckAttributeCoverage(&attrs2, &attrs3));
EXPECT_TRUE(attrs3.IsBufferAttributeMap());
}
TEST(AttributeMapTest, CoverageFailedTest) {
auto attrs1 = AttributeMap(kTfLiteAttrMapTypeBuffer);
attrs1.SetAttr(kTfLiteBufferAttrKeyAlignment, size_t(10));
auto attrs2 = AttributeMap(kTfLiteAttrMapTypeBuffer);
attrs2.SetAttr(kTfLiteBufferAttrKeyAlignment, size_t(4));
auto conflict = AttributeMap(kTfLiteAttrMapTypeSync);
EXPECT_FALSE(attrs1.CheckAttributeCoverage(&attrs2, &conflict));
EXPECT_TRUE(conflict.IsBufferAttributeMap());
size_t result;
EXPECT_TRUE(conflict.GetAttr(kTfLiteBufferAttrKeyAlignment, &result));
EXPECT_EQ(4, result);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/async/interop/attribute_map_internal.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/async/interop/attribute_map_internal_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4e72922f-72c7-4080-9b76-440f602382cd | cpp | tensorflow/tensorflow | grpc_tensor_coding | tensorflow/core/distributed_runtime/rpc/grpc_tensor_coding.cc | tensorflow/core/distributed_runtime/rpc/grpc_tensor_coding_test.cc | #include "tensorflow/core/distributed_runtime/rpc/grpc_tensor_coding.h"
#include "grpcpp/support/byte_buffer.h"
#include "grpcpp/support/slice.h"
#include "tensorflow/core/common_runtime/dma_helper.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_reference.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/lib/gtl/inlined_vector.h"
#include "tensorflow/core/lib/io/proto_encode_helper.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/protobuf/worker.pb.h"
namespace tensorflow {
namespace grpc {
void EncodeRecvTensorResponseToByteBuffer(const RecvTensorResponse& proto,
::grpc::ByteBuffer* result) {
::grpc::Slice slice(proto.ByteSizeLong());
proto.SerializeWithCachedSizesToArray(
const_cast<uint8*>(reinterpret_cast<const uint8*>(slice.begin())));
::grpc::ByteBuffer tmp(&slice, 1);
result->Swap(&tmp);
}
static int VarLengthEncodingSize(uint32 tag, size_t bytes) {
return core::VarintLength(tag << 3) + core::VarintLength(bytes) + bytes;
}
static int SkeletonEncodingSizeUpperBound(const Tensor& val) {
static const int kVarintMax64 = 10;
const int ndims = val.shape().dims();
return (2 * kVarintMax64) +
(ndims * (4 * kVarintMax64));
}
static void EncodeSkeleton(const Tensor& val, io::ProtoEncodeHelper* e) {
e->WriteUint64(TensorProto::kDtypeFieldNumber, val.dtype());
const int ndims = val.shape().dims();
int tensor_shape_bytes = 0;
for (int d = 0; d < ndims; d++) {
int64_t dim_size = val.shape().dim_size(d);
tensor_shape_bytes +=
2 +
1 +
core::VarintLength(dim_size);
}
if (tensor_shape_bytes > 0) {
e->WriteVarlengthBeginning(TensorProto::kTensorShapeFieldNumber,
tensor_shape_bytes);
for (int d = 0; d < ndims; d++) {
int64_t dim_size = val.shape().dim_size(d);
int64_t dim_varlen = 1 +
core::VarintLength(dim_size);
e->WriteVarlengthBeginning(TensorShapeProto::kDimFieldNumber, dim_varlen);
e->WriteUint64(TensorShapeProto_Dim::kSizeFieldNumber, dim_size);
}
}
#ifndef NDEBUG
{
TensorProto skeleton;
skeleton.set_dtype(val.dtype());
val.shape().AsProto(skeleton.mutable_tensor_shape());
string tensor_except_contents;
skeleton.AppendToString(&tensor_except_contents);
TensorProto skeleton2;
skeleton2.ParseFromString(string(e->data(), e->size()));
string out;
skeleton.AppendToString(&out);
DCHECK_EQ(tensor_except_contents, out) << skeleton.DebugString() << " vs\n"
<< skeleton2.DebugString();
}
#endif
}
void EncodeTensorToByteBuffer(bool is_dead, const Tensor& val, bool require_ack,
::grpc::ByteBuffer* result) {
const int kLargeTensorBytes = 1024;
const int64_t kProtoBufLimitBytes = 1LL << 31;
if (val.TotalBytes() > kProtoBufLimitBytes) {
size_t exceeded_bytes = val.TotalBytes() - kProtoBufLimitBytes;
LOG(FATAL) << "Cannot encode a Tensor that exceeds the 2GB protobuf limit. "
"Exceeded bytes: "
<< exceeded_bytes
<< ", tensor shape: " << val.shape().AsProto().DebugString();
}
RecvTensorResponse response;
if (is_dead) {
response.set_is_dead(is_dead);
}
response.set_require_ack(require_ack);
response.set_send_start_micros(Env::Default()->NowMicros());
if (!DataTypeCanUseMemcpy(val.dtype())) {
val.AsProtoTensorContent(response.mutable_tensor());
EncodeRecvTensorResponseToByteBuffer(response, result);
} else {
absl::InlinedVector<char, 128UL> skeleton(
SkeletonEncodingSizeUpperBound(val));
io::ProtoEncodeHelper e_skeleton(skeleton.data(), skeleton.size());
EncodeSkeleton(val, &e_skeleton);
StringPiece tdata = val.tensor_data();
uint32 overall_tensor_proto_bytesize =
(e_skeleton.size() +
VarLengthEncodingSize(TensorProto::kTensorContentFieldNumber,
tdata.size()));
string header;
response.AppendToString(&header);
size_t expected_size =
(header.size() +
VarLengthEncodingSize(RecvTensorResponse::kTensorFieldNumber,
overall_tensor_proto_bytesize));
bool share_tensor_slice_memory = (tdata.size() > kLargeTensorBytes);
size_t encoder_size = expected_size - tdata.size();
absl::InlinedVector<char, 1024UL> space(encoder_size);
io::ProtoEncodeHelper e(space.data(), space.size());
e.WriteRawBytes(header);
e.WriteVarlengthBeginning(RecvTensorResponse::kTensorFieldNumber,
overall_tensor_proto_bytesize);
e.WriteRawBytes(StringPiece(e_skeleton.data(), e_skeleton.size()));
e.WriteVarlengthBeginning(TensorProto::kTensorContentFieldNumber,
tdata.size());
::grpc::Slice slices[2];
int num_slices = 0;
{
size_t slice_len =
e.size() + (share_tensor_slice_memory ? 0 : tdata.size());
slices[0] = ::grpc::Slice(slice_len);
memcpy(const_cast<uint8_t*>(slices[0].begin()), e.data(), e.size());
if (!share_tensor_slice_memory) {
memcpy(const_cast<uint8_t*>(slices[0].begin()) + e.size(), tdata.data(),
tdata.size());
}
num_slices += 1;
}
if (share_tensor_slice_memory) {
const TensorBuffer* buf = DMAHelper::buffer(&val);
buf->Ref();
slices[1] = ::grpc::Slice(
const_cast<void*>(static_cast<const void*>(tdata.data())),
tdata.size(),
[](void* backing) { static_cast<TensorBuffer*>(backing)->Unref(); },
const_cast<TensorBuffer*>(buf));
num_slices += 1;
}
size_t total_bytes = 0;
for (int i = 0; i < num_slices; i++) {
total_bytes += slices[i].size();
}
CHECK_EQ(total_bytes, expected_size);
::grpc::ByteBuffer tmp(&slices[0], num_slices);
result->Swap(&tmp);
}
}
}
} | #include "tensorflow/core/distributed_runtime/rpc/grpc_tensor_coding.h"
#include "grpcpp/support/byte_buffer.h"
#include "grpcpp/support/slice.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/gtl/inlined_vector.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/worker.pb.h"
namespace tensorflow {
class GrpcTensorCodingTest : public ::testing::Test {
public:
void Validate(const Tensor& t, bool is_dead) {
::grpc::ByteBuffer buf;
grpc::EncodeTensorToByteBuffer(is_dead, t, false, &buf);
std::vector<::grpc::Slice> slices;
(void)buf.Dump(&slices);
string tmp;
for (const auto& s : slices) {
tmp.append(reinterpret_cast<const char*>(s.begin()), s.size());
}
RecvTensorResponse response;
EXPECT_TRUE(response.ParseFromString(tmp));
EXPECT_EQ(response.is_dead(), is_dead);
Tensor result_tensor;
EXPECT_TRUE(result_tensor.FromProto(response.tensor()));
EXPECT_EQ(t.dtype(), result_tensor.dtype());
EXPECT_EQ(t.shape().DebugString(), result_tensor.shape().DebugString());
EXPECT_EQ(t.DebugString(), result_tensor.DebugString());
}
template <typename T>
void DoTest(DataType dt) {
gtl::InlinedVector<T, 4> v;
for (int elems = 0; elems <= 10000; elems++) {
if (elems < 100 || (elems % 1000 == 0)) {
Tensor a(dt, TensorShape({1, static_cast<int64_t>(v.size())}));
test::FillValues<T>(&a, v);
Validate(a, (elems == 0));
}
v.push_back(static_cast<T>(elems));
}
}
void DoTestForStrings(DataType dt) {
absl::InlinedVector<tstring, 4UL> v;
for (int elems = 0; elems <= 10000; elems++) {
if (elems < 100 || (elems % 1000 == 0)) {
Tensor a(dt, TensorShape({1, static_cast<int64_t>(v.size())}));
test::FillValues<tstring>(&a, v);
Validate(a, (elems == 0));
}
v.push_back(strings::StrCat("This is string ", elems));
}
}
};
TEST_F(GrpcTensorCodingTest, Simple) {
DoTest<float>(DT_FLOAT);
DoTest<double>(DT_DOUBLE);
DoTest<int32>(DT_INT32);
DoTest<uint16>(DT_UINT16);
DoTest<uint8>(DT_UINT8);
DoTest<int16>(DT_INT16);
DoTest<int8>(DT_INT8);
DoTest<complex64>(DT_COMPLEX64);
DoTest<complex128>(DT_COMPLEX128);
DoTest<int64_t>(DT_INT64);
DoTest<bool>(DT_BOOL);
DoTest<qint8>(DT_QINT8);
DoTest<quint8>(DT_QUINT8);
DoTest<qint16>(DT_QINT16);
DoTest<quint16>(DT_QUINT16);
DoTest<qint32>(DT_QINT32);
DoTest<bfloat16>(DT_BFLOAT16);
DoTest<Eigen::half>(DT_HALF);
}
TEST_F(GrpcTensorCodingTest, StringTensor) { DoTestForStrings(DT_STRING); }
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/rpc/grpc_tensor_coding.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/rpc/grpc_tensor_coding_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e623aace-d631-4870-923c-4ed013853e68 | cpp | tensorflow/tensorflow | all_reduce_promotion | third_party/xla/xla/service/all_reduce_promotion.cc | third_party/xla/xla/service/all_reduce_promotion_test.cc | #include "xla/service/all_reduce_promotion.h"
#include <memory>
#include <string>
#include <utility>
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace {
bool IsAllReduce(const HloInstruction* inst) {
return inst->opcode() == HloOpcode::kAllReduce ||
inst->opcode() == HloOpcode::kReduceScatter;
}
std::unique_ptr<HloInstruction> CloneAllReduce(
const HloInstruction* inst, const Shape& shape,
absl::Span<HloInstruction* const> operands) {
std::unique_ptr<HloInstruction> new_inst =
inst->CloneWithNewOperands(shape, operands);
HloComputation* to_apply = new_inst->to_apply();
HloComputation* to_apply_promoted = [&]() {
PrimitiveType type = shape.element_type();
std::string name = absl::StrCat(to_apply->name(), "_promoted");
HloComputation::Builder promoted(name);
auto x = promoted.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(type, {}), "x"));
auto y = promoted.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(type, {}), "y"));
promoted.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(type, {}), to_apply->root_instruction()->opcode(),
x, y));
return inst->GetModule()->AddEmbeddedComputation(promoted.Build());
}();
new_inst->set_to_apply(to_apply_promoted);
to_apply_promoted->SetCollectiveCallInstruction(new_inst.get());
return new_inst;
}
}
AllReducePromotion::AllReducePromotion(
absl::Span<std::pair<PrimitiveType, PrimitiveType> const> from_to_types)
: pass_(from_to_types, IsAllReduce, CloneAllReduce) {}
absl::StatusOr<bool> AllReducePromotion::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
return pass_.Run(module, execution_threads);
}
} | #include "xla/service/all_reduce_promotion.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace m = ::xla::match;
class AllReducePromotionTest : public HloTestBase {
public:
AllReducePromotion pass_{{{U16, U32}, {S16, S32}}};
};
TEST_F(AllReducePromotionTest, SimplePromotionAllReduce) {
absl::string_view hlo_text = R"(
HloModule test
sum {
a = u16[] parameter(0)
b = u16[] parameter(1)
ROOT add.2 = u16[] add(a, b)
}
ENTRY test_computation {
id32 = u32[] replica-id()
id = u16[] convert(id32)
id2 = u16[2] broadcast(id), dimensions={}
a0 = u16[2] constant({10, 15})
a1 = u16[2] add(id2, a0)
ROOT cp = u16[2] all-reduce(a1), replica_groups={}, to_apply=sum
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass_, module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Convert(m::AllReduce(m::Convert().WithShape(U32, {2}))
.WithShape(U32, {2}))
.WithShape(U16, {2})));
}
TEST_F(AllReducePromotionTest, SimplePromotionReduceScatter) {
absl::string_view hlo_text = R"(
HloModule test
sum {
a = u16[] parameter(0)
b = u16[] parameter(1)
ROOT add.2 = u16[] add(a, b)
}
ENTRY test_computation {
id32 = u32[] replica-id()
id = u16[] convert(id32)
id2 = u16[2] broadcast(id), dimensions={}
a0 = u16[2] constant({10, 15})
a1 = u16[2] add(id2, a0)
ROOT cp = u16[1] reduce-scatter(a1), dimensions={0}, replica_groups={}, to_apply=sum
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass_, module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Convert(m::ReduceScatter(m::Convert().WithShape(U32, {2}))
.WithShape(U32, {1}))
.WithShape(U16, {1})));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/all_reduce_promotion.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/all_reduce_promotion_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fd40ac30-586f-4884-9c6e-7edd9bbe0bf6 | cpp | google/arolla | memory | arolla/util/memory.h | arolla/util/memory_test.cc | #ifndef AROLLA_UTIL_MEMORY_H_
#define AROLLA_UTIL_MEMORY_H_
#include <cstddef>
#include <cstdint>
#include <cstdlib>
#include <memory>
#include "absl/log/check.h"
namespace arolla {
struct FreeDeleter {
void operator()(const void* ptr) const { std::free(const_cast<void*>(ptr)); }
};
using MallocPtr = std::unique_ptr<void, FreeDeleter>;
struct Alignment {
size_t value;
};
inline MallocPtr AlignedAlloc(Alignment alignment, size_t size) {
DCHECK(alignment.value > 0 && !(alignment.value & (alignment.value - 1)));
if (size == 0) {
size = 1;
}
if (alignment.value <= sizeof(void*)) {
return MallocPtr(std::malloc(size));
}
void* result = nullptr;
if (posix_memalign(&result, alignment.value, size)) {
result = nullptr;
}
DCHECK(result) << "posix_memalign failed.";
return MallocPtr(result);
}
inline bool IsAlignedPtr(size_t alignment, const void* ptr) {
DCHECK(alignment > 0 && !(alignment & (alignment - 1)));
return (reinterpret_cast<uintptr_t>(ptr) & (alignment - 1)) == 0;
}
inline bool IsAlignedPtr(Alignment alignment, const void* ptr) {
return IsAlignedPtr(alignment.value, ptr);
}
}
#endif | #include "arolla/util/memory.h"
#include <cstdint>
#include <vector>
#include "gtest/gtest.h"
namespace arolla {
namespace {
TEST(Memory, IsAlignedPtr) {
std::vector<char> range(128, 0);
for (auto& ref : range) {
EXPECT_EQ(IsAlignedPtr(32, &ref),
reinterpret_cast<uintptr_t>(&ref) % 32 == 0);
}
}
TEST(Memory, AlignedAlloc) {
std::vector<MallocPtr> ptrs;
for (int i = 0; i < 100; ++i) {
ptrs.push_back(AlignedAlloc(Alignment{64}, 3));
}
for (const auto& ptr : ptrs) {
EXPECT_TRUE(IsAlignedPtr(64, ptr.get()));
}
EXPECT_NE(AlignedAlloc(Alignment{1}, 0).get(), nullptr);
EXPECT_NE(AlignedAlloc(Alignment{1}, 64).get(), nullptr);
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/util/memory.h | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/util/memory_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
c092720c-f1ac-4480-8438-947c924d26ed | cpp | google/tsl | mutex | tsl/platform/default/mutex.cc | tsl/platform/mutex_test.cc | #include "tsl/platform/mutex.h"
#include <time.h>
#include <cstdint>
#include "nsync_cv.h"
#include "nsync_mu.h"
#include "nsync_mu_wait.h"
#include "nsync_time.h"
namespace tsl {
static_assert(sizeof(nsync::nsync_mu) <= sizeof(internal::MuData),
"tsl::internal::MuData needs to be bigger");
static inline nsync::nsync_mu *mu_cast(internal::MuData *mu) {
return reinterpret_cast<nsync::nsync_mu *>(mu);
}
static inline const nsync::nsync_mu *mu_cast(const internal::MuData *mu) {
return reinterpret_cast<const nsync::nsync_mu *>(mu);
}
mutex::mutex() { nsync::nsync_mu_init(mu_cast(&mu_)); }
void mutex::lock() { nsync::nsync_mu_lock(mu_cast(&mu_)); }
bool mutex::try_lock() { return nsync::nsync_mu_trylock(mu_cast(&mu_)) != 0; };
void mutex::unlock() { nsync::nsync_mu_unlock(mu_cast(&mu_)); }
void mutex::assert_held() const TF_ASSERT_EXCLUSIVE_LOCK() {
nsync::nsync_mu_assert_held(mu_cast(&mu_));
}
void mutex::lock_shared() { nsync::nsync_mu_rlock(mu_cast(&mu_)); }
bool mutex::try_lock_shared() {
return nsync::nsync_mu_rtrylock(mu_cast(&mu_)) != 0;
};
void mutex::unlock_shared() { nsync::nsync_mu_runlock(mu_cast(&mu_)); }
void mutex::assert_held_shared() const TF_ASSERT_SHARED_LOCK() {
nsync::nsync_mu_rassert_held(mu_cast(&mu_));
}
static int EvaluateCondition(const void *vcond) {
return static_cast<int>(static_cast<const Condition *>(vcond)->Eval());
}
void mutex::Await(const Condition &cond) {
nsync::nsync_mu_wait(mu_cast(&mu_), &EvaluateCondition, &cond, nullptr);
}
bool mutex::AwaitWithDeadline(const Condition &cond, uint64_t abs_deadline_ns) {
time_t seconds = abs_deadline_ns / (1000 * 1000 * 1000);
nsync::nsync_time abs_time = nsync::nsync_time_s_ns(
seconds, abs_deadline_ns - seconds * (1000 * 1000 * 1000));
return nsync::nsync_mu_wait_with_deadline(mu_cast(&mu_), &EvaluateCondition,
&cond, nullptr, abs_time,
nullptr) == 0;
}
static_assert(sizeof(nsync::nsync_cv) <= sizeof(internal::CVData),
"tsl::internal::CVData needs to be bigger");
static inline nsync::nsync_cv *cv_cast(internal::CVData *cv) {
return reinterpret_cast<nsync::nsync_cv *>(cv);
}
condition_variable::condition_variable() {
nsync::nsync_cv_init(cv_cast(&cv_));
}
void condition_variable::wait(mutex_lock &lock) {
nsync::nsync_cv_wait(cv_cast(&cv_), mu_cast(&lock.mutex()->mu_));
}
void condition_variable::notify_one() { nsync::nsync_cv_signal(cv_cast(&cv_)); }
void condition_variable::notify_all() {
nsync::nsync_cv_broadcast(cv_cast(&cv_));
}
namespace internal {
std::cv_status wait_until_system_clock(
CVData *cv_data, MuData *mu_data,
const std::chrono::system_clock::time_point timeout_time) {
int r = nsync::nsync_cv_wait_with_deadline(cv_cast(cv_data), mu_cast(mu_data),
timeout_time, nullptr);
return r ? std::cv_status::timeout : std::cv_status::no_timeout;
}
}
} | #include "tsl/platform/mutex.h"
#include "tsl/platform/test.h"
#include "tsl/platform/threadpool.h"
namespace tsl {
namespace {
class MutexTest : public ::testing::Test {
protected:
mutex_lock GetLock() TF_NO_THREAD_SAFETY_ANALYSIS {
return mutex_lock{mu_};
}
tf_shared_lock GetSharedLock() TF_NO_THREAD_SAFETY_ANALYSIS {
return tf_shared_lock{mu_};
}
bool test_try_lock() {
bool test = mu_.try_lock();
if (test) mu_.unlock();
return test;
}
bool test_try_lock_shared() {
bool test = mu_.try_lock_shared();
if (test) mu_.unlock_shared();
return test;
}
mutex mu_;
};
TEST_F(MutexTest, MovableMutexLockTest) {
EXPECT_TRUE(test_try_lock());
{
mutex_lock lock = GetLock();
EXPECT_FALSE(test_try_lock());
EXPECT_FALSE(test_try_lock_shared());
}
EXPECT_TRUE(test_try_lock());
}
TEST_F(MutexTest, SharedMutexLockTest) {
EXPECT_TRUE(test_try_lock());
{
tf_shared_lock lock = GetSharedLock();
EXPECT_FALSE(test_try_lock());
EXPECT_TRUE(test_try_lock_shared());
}
EXPECT_TRUE(test_try_lock());
}
TEST(ConditionVariableTest, WaitWithPredicate) {
constexpr int kNumThreads = 4;
mutex mu;
condition_variable cv;
bool ready = false;
int count = 0;
tsl::thread::ThreadPool pool(Env::Default(),
"condition_variable_test_wait_with_predicate",
kNumThreads);
for (int i = 0; i < kNumThreads; ++i) {
pool.Schedule([&mu, &cv, &ready, &count]() {
mutex_lock lock(mu);
cv.wait(lock, [&ready] { return ready; });
++count;
cv.notify_one();
});
}
{
mutex_lock lock(mu);
EXPECT_EQ(count, 0);
}
{
mutex_lock lock(mu);
ready = true;
cv.notify_all();
}
{
mutex_lock lock(mu);
cv.wait(lock, [&count, kNumThreads] { return count == kNumThreads; });
EXPECT_EQ(count, kNumThreads);
}
}
TEST(ConditionVariableTest, WaitWithTruePredicateDoesntBlock) {
mutex mu;
mutex_lock lock(mu);
condition_variable cv;
cv.wait(lock, [] { return true; });
EXPECT_TRUE(static_cast<bool>(lock));
}
}
} | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/default/mutex.cc | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/mutex_test.cc | 6d708fdcdd4f40537b7fa273371215a6fa3d4423 |
5b95d526-b4b8-453b-be5a-fc62526a0718 | cpp | tensorflow/tensorflow | bytecode | tensorflow/core/tfrt/mlrt/bytecode/bytecode.h | tensorflow/core/tfrt/mlrt/bytecode/bytecode_test.cc | #ifndef TENSORFLOW_CORE_TFRT_MLRT_BYTECODE_BYTECODE_H_
#define TENSORFLOW_CORE_TFRT_MLRT_BYTECODE_BYTECODE_H_
#include <cstddef>
#include <cstring>
#include <iterator>
#include <string>
#include <type_traits>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/strings/string_view.h"
namespace mlrt {
namespace bc {
using BcAddr_t = uint64_t;
class Buffer {
public:
char* Get(BcAddr_t address) {
DCHECK_LT(address, buffer_.size());
return &buffer_.at(address);
}
char* data() { return buffer_.data(); }
const char* data() const { return buffer_.data(); }
size_t size() const { return buffer_.size(); }
bool empty() const { return buffer_.empty(); }
void shrink_to_fit() { buffer_.shrink_to_fit(); }
private:
static_assert(alignof(std::max_align_t) >= 8,
"The bytecode buffer needs to be at least 8-byte aligned.");
std::vector<char> buffer_;
friend class Allocator;
};
class Allocator {
public:
explicit Allocator(Buffer* buffer) : buffer_(buffer) {
DCHECK(buffer != nullptr);
}
BcAddr_t Allocate(size_t size, size_t alignment) {
DCHECK_LE(alignment, 8);
size_t next_align =
(buffer_->buffer_.size() + alignment - 1) / alignment * alignment;
buffer_->buffer_.resize(next_align + size);
return next_align;
}
template <typename T>
BcAddr_t Allocate() {
static_assert(std::is_trivial<T>::value, "T must be trivial.");
return Allocate(sizeof(T), alignof(T));
}
size_t size() const { return buffer_->size(); }
char* raw(BcAddr_t address) { return buffer_->Get(address); }
private:
Buffer* buffer_;
};
template <typename T, typename Enable = void>
struct AccessTraits {
using StorageType = T;
static_assert(std::is_trivial<StorageType>::value,
"StorageType must be trivial.");
using ConstructorType = void;
static T Read(const char* p) {
T value;
std::memcpy(&value, p, sizeof(T));
return value;
}
template <typename... Args>
static BcAddr_t Construct(Allocator* allocator, BcAddr_t address,
Args&&... args) {
T value(std::forward<Args>(args)...);
std::memcpy(allocator->raw(address), &value, sizeof(T));
return address;
}
static void Place(Allocator* allocator, BcAddr_t address, const char* data,
size_t size, size_t num = 1) {
CHECK_LE(size, num * sizeof(T));
std::memcpy(allocator->raw(address), data, size);
}
};
template <typename T>
struct AccessTraits<T, std::void_t<typename T::NonTrivialConstructorType>> {
using StorageType = typename T::StorageType;
static_assert(std::is_trivial<StorageType>::value,
"StorageType must be trivial.");
using ConstructorType = typename T::NonTrivialConstructorType;
static T Read(const char* p) {
return T(p);
}
template <typename... Args>
static ConstructorType Construct(Allocator* allocator, BcAddr_t address,
Args&&... args) {
return ConstructorType(allocator, address, std::forward<Args>(args)...);
}
};
template <typename T>
BcAddr_t Allocate(Allocator* allocator) {
return allocator->Allocate<typename AccessTraits<T>::StorageType>();
}
template <typename T, typename... Args>
auto New(Allocator* allocator, Args&&... args) {
auto address = Allocate<T>(allocator);
return AccessTraits<T>::Construct(allocator, address,
std::forward<Args>(args)...);
}
template <typename T>
class ReadIterator {
using StorageType = typename AccessTraits<T>::StorageType;
public:
using difference_type = std::ptrdiff_t;
using value_type = std::remove_cv_t<T>;
using pointer = void;
using reference = value_type;
using iterator_category = std::input_iterator_tag;
explicit ReadIterator(const char* data) : data_(data) {}
const char* data() const { return data_; }
value_type operator*() const { return AccessTraits<T>::Read(data_); }
ReadIterator& operator++() {
data_ += sizeof(StorageType);
return *this;
}
ReadIterator operator++(int) {
ReadIterator r = *this;
data_ += sizeof(StorageType);
return r;
}
ReadIterator& operator+=(difference_type offset) {
data_ += offset * sizeof(StorageType);
return *this;
}
ReadIterator operator+(difference_type offset) const {
ReadIterator r = *this;
r += offset;
return r;
}
ReadIterator& operator--() {
data_ -= sizeof(StorageType);
return *this;
}
ReadIterator operator--(int) {
ReadIterator r = *this;
data_ -= sizeof(StorageType);
return r;
}
ReadIterator& operator-=(difference_type offset) {
data_ -= offset * sizeof(StorageType);
return *this;
}
ReadIterator operator-(difference_type offset) const {
ReadIterator r = *this;
r -= offset;
return r;
}
difference_type operator-(const ReadIterator& other) const {
DCHECK_EQ((data_ - other.data_) % sizeof(StorageType), 0);
return (data_ - other.data_) / sizeof(StorageType);
}
friend bool operator==(const ReadIterator& a, const ReadIterator& b) {
return a.data_ == b.data_;
}
friend bool operator!=(const ReadIterator& a, const ReadIterator& b) {
return !(a == b);
}
friend bool operator<(const ReadIterator& a, const ReadIterator& b) {
return a.data_ < b.data_;
}
friend bool operator<=(const ReadIterator& a, const ReadIterator& b) {
return a.data_ <= b.data_;
}
friend bool operator>(const ReadIterator& a, const ReadIterator& b) {
return a.data_ > b.data_;
}
friend bool operator>=(const ReadIterator& a, const ReadIterator& b) {
return a.data_ >= b.data_;
}
private:
const char* data_ = nullptr;
};
#define DEFINE_BYTECODE_FIELD(Type, name) \
typename ::mlrt::bc::AccessTraits<Type>::StorageType name; \
static const char* name##_pointer(const char* base) { \
return base + offsetof(Self, name); \
} \
static ::mlrt::bc::BcAddr_t name##_address(::mlrt::bc::BcAddr_t base) { \
return base + offsetof(Self, name); \
} \
static Type read_##name(const char* base) { \
return ::mlrt::bc::AccessTraits<Type>::Read(name##_pointer(base)); \
} \
template <typename... Args> \
static auto construct_##name(::mlrt::bc::Allocator* allocator, \
::mlrt::bc::BcAddr_t base, Args&&... args) { \
return ::mlrt::bc::AccessTraits<Type>::Construct( \
allocator, name##_address(base), std::forward<Args>(args)...); \
} \
static_assert( \
std::is_trivial< \
typename ::mlrt::bc::AccessTraits<Type>::StorageType>::value, \
"Bytecode storage types must be trivial.")
template <typename T, typename SizeType = uint32_t>
class Vector {
public:
struct Storage {
using Self = Storage;
DEFINE_BYTECODE_FIELD(SizeType, size);
DEFINE_BYTECODE_FIELD(SizeType, offset);
};
static_assert(std::is_trivial<Storage>::value, "StorageType is trivial");
static_assert(std::is_standard_layout<Storage>::value,
"StorageType has standard layout");
static_assert(sizeof(Storage) == 2 * sizeof(SizeType));
static_assert(alignof(Storage) == alignof(SizeType));
using StorageType = Storage;
using ElementStorageType = typename AccessTraits<T>::StorageType;
using value_type = T;
using iterator = ReadIterator<T>;
using const_iterator = iterator;
class Constructor {
public:
Constructor(Allocator* allocator, BcAddr_t address, size_t size)
: allocator_(allocator), address_(address) {
DCHECK_GE(allocator->size(), address + sizeof(StorageType));
size_t data_start = allocator->Allocate(size * sizeof(ElementStorageType),
alignof(ElementStorageType));
CHECK_LT(size, std::numeric_limits<SizeType>::max());
CHECK_LT(data_start - address,
std::numeric_limits<SizeType>::max());
storage_.size = size;
storage_.offset = data_start - address;
AccessTraits<StorageType>::Construct(allocator, address, storage_);
}
Constructor(Allocator* allocator, BcAddr_t address,
const std::vector<T>& vec)
: Constructor(allocator, address, vec.size()) {
Assign(vec.begin(), vec.end());
}
template <typename... Args>
auto ConstructAt(size_t index, Args&&... args) {
DCHECK_LT(index, size());
return AccessTraits<T>::Construct(allocator_, GetElementAddress(index),
std::forward<Args>(args)...);
}
template <typename V>
void Assign(std::initializer_list<V> ilist) {
DCHECK_EQ(ilist.size(), size());
Assign(ilist.begin(), ilist.end());
}
template <typename Range>
void Assign(const Range& range) {
DCHECK_EQ(std::distance(std::begin(range), std::end(range)), size());
Assign(std::begin(range), std::end(range));
}
template <typename Iter>
void Assign(Iter begin, Iter end) {
size_t i = 0;
for (; begin != end; ++begin) {
ConstructAt(i++, *begin);
}
DCHECK_EQ(i, size());
}
template <
typename U = T,
typename std::enable_if<
std::is_same_v<typename AccessTraits<U>::ConstructorType, void>,
int>::type = 0>
void Place(const char* data, size_t size) {
AccessTraits<U>::Place(allocator_, address_ + storage_.offset, data, size,
storage_.size);
}
size_t size() const { return storage_.size; }
BcAddr_t address() const { return address_; }
private:
BcAddr_t GetElementAddress(size_t index) const {
return address_ + storage_.offset + index * sizeof(ElementStorageType);
}
Allocator* allocator_;
BcAddr_t address_;
Vector::Storage storage_;
};
using NonTrivialConstructorType = Constructor;
explicit Vector(const char* p) : p_(p) {
static_assert(!std::is_trivial_v<Vector>);
DCHECK(p_ != nullptr);
}
Vector() {
static_assert(!std::is_trivial_v<Vector>);
static Storage kEmptyStorage{0, 0};
p_ = reinterpret_cast<const char*>(&kEmptyStorage);
}
const char* data() const { return p_ + offset(); }
size_t size() const { return StorageType::read_size(p_); }
bool empty() const { return size() == 0; }
iterator begin() const { return iterator(data()); }
iterator end() const {
return iterator(data() + size() * sizeof(ElementStorageType));
}
T operator[](size_t index) const {
DCHECK_LT(index, size());
auto iter = begin();
iter += index;
return *iter;
}
private:
SizeType offset() const { return StorageType::read_offset(p_); }
const char* p_;
};
class String : public Vector<char, uint64_t> {
public:
using Base = Vector<char, uint64_t>;
using Base::Base;
class Constructor : public Base::Constructor {
public:
using Base::Constructor::Assign;
Constructor(Allocator* allocator, BcAddr_t address, absl::string_view str)
: Base::Constructor(allocator, address, str.size()) {
Assign(str.begin(), str.end());
}
};
using NonTrivialConstructorType = Constructor;
using Base::data;
using Base::size;
std::string str() const { return std::string(data(), size()); }
absl::string_view Get() const { return absl::string_view(data(), size()); }
operator absl::string_view() const {
return absl::string_view(data(), size());
}
friend bool operator==(String x, absl::string_view y) { return x.Get() == y; }
friend bool operator==(absl::string_view x, String y) { return x == y.Get(); }
};
}
}
#endif | #include "tensorflow/core/tfrt/mlrt/bytecode/bytecode.h"
#include <array>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
namespace mlrt {
namespace bc {
namespace {
TEST(ByteCodeTest, VectorOfTrivial) {
Buffer buffer;
Allocator alloc(&buffer);
auto ctor = New<Vector<uint32_t>>(&alloc, 4);
for (int i = 0; i < 4; ++i) {
ctor.ConstructAt(i, i);
}
Vector<uint32_t> view(buffer.Get(ctor.address()));
ASSERT_EQ(view.size(), 4);
EXPECT_EQ(view[0], 0);
EXPECT_EQ(view[1], 1);
EXPECT_EQ(view[2], 2);
EXPECT_EQ(view[3], 3);
EXPECT_THAT(view, ::testing::ElementsAreArray({0, 1, 2, 3}));
Vector<uint32_t> empty;
ASSERT_TRUE(empty.empty());
}
TEST(ByteCodeTest, VectorOfVector) {
Buffer buffer;
Allocator alloc(&buffer);
using T = Vector<uint32_t>;
using V = Vector<T>;
auto vctor = New<V>(&alloc, 3);
{
auto tctor = vctor.ConstructAt(0, 2);
tctor.ConstructAt(0, 0);
tctor.ConstructAt(1, 1);
}
{
auto tctor = vctor.ConstructAt(1, 1);
tctor.ConstructAt(0, 2);
}
vctor.ConstructAt(2, 0);
V v(buffer.Get(vctor.address()));
auto t0 = v[0];
ASSERT_EQ(t0.size(), 2);
EXPECT_EQ(t0[0], 0);
EXPECT_EQ(t0[1], 1);
EXPECT_THAT(t0, testing::ElementsAreArray({0, 1}));
auto t1 = v[1];
ASSERT_EQ(t1.size(), 1);
EXPECT_EQ(t1[0], 2);
EXPECT_THAT(t1, testing::ElementsAreArray({2}));
auto t2 = v[2];
ASSERT_EQ(t2.size(), 0);
Vector<Vector<uint32_t>> empty;
ASSERT_TRUE(empty.empty());
}
TEST(ByteCodeTest, String) {
Buffer buffer;
Allocator alloc(&buffer);
auto ctor = New<String>(&alloc, "bytecode string");
String view(buffer.Get(ctor.address()));
EXPECT_EQ(view.str(), "bytecode string");
EXPECT_EQ(view.Get(), "bytecode string");
EXPECT_EQ(absl::string_view(view), "bytecode string");
}
TEST(ByteCodeTest, PlaceVectorOfTrivial) {
Buffer buffer;
Allocator alloc(&buffer);
auto ctor = New<Vector<uint32_t>>(&alloc, 4);
std::array<uint32_t, 4> data = {0, 1, 2, 3};
ctor.Place(reinterpret_cast<const char*>(data.data()),
data.size() * sizeof(uint32_t));
Vector<uint32_t> view(buffer.Get(ctor.address()));
ASSERT_EQ(view.size(), 4);
EXPECT_EQ(view[0], 0);
EXPECT_EQ(view[1], 1);
EXPECT_EQ(view[2], 2);
EXPECT_EQ(view[3], 3);
EXPECT_THAT(view, ::testing::ElementsAreArray({0, 1, 2, 3}));
}
TEST(ByteCodeTest, ReadIteratorDistance) {
Buffer buffer;
Allocator alloc(&buffer);
auto ctor = New<Vector<uint32_t>>(&alloc, 4);
for (int i = 0; i < 4; ++i) {
ctor.ConstructAt(i, i);
}
Vector<uint32_t> view(buffer.Get(ctor.address()));
EXPECT_EQ(view.end() - view.begin(), 4);
}
TEST(ByteCodeTest, ReadIteratorCompare) {
Buffer buffer;
Allocator alloc(&buffer);
auto ctor = New<Vector<uint32_t>>(&alloc, 4);
for (int i = 0; i < 4; ++i) {
ctor.ConstructAt(i, i);
}
Vector<uint32_t> view(buffer.Get(ctor.address()));
EXPECT_GE(view.end(), view.begin());
EXPECT_GT(view.end(), view.begin());
EXPECT_LE(view.begin(), view.end());
EXPECT_LT(view.begin(), view.end());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/mlrt/bytecode/bytecode.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/mlrt/bytecode/bytecode_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0dc90f43-3fd2-43ff-b407-9a4a3aaa2aa1 | cpp | tensorflow/tensorflow | shape_tree | third_party/xla/xla/shape_tree.cc | third_party/xla/xla/shape_tree_test.cc | #include "xla/shape_tree.h"
#include <cstddef>
#include <cstdint>
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace internal {
IndexTable::IndexTable(const Shape& shape) : entries_(1) {
size_t next_node_id = 0;
CreateEntry(entries_[0], shape, next_node_id);
}
void IndexTable::CreateEntry(Entry& entry, const Shape& shape,
size_t& next_node_id) {
entry.node_id = next_node_id++;
if (!shape.IsTuple()) return;
size_t children_start_id = entries_.size();
entry.children_start_id = children_start_id;
entries_.resize(entries_.size() + shape.tuple_shapes_size());
for (size_t i = 0; i < shape.tuple_shapes_size(); ++i) {
CreateEntry(entries_[children_start_id + i], shape.tuple_shapes(i),
next_node_id);
}
}
const IndexTable::Entry& IndexTable::operator[](ShapeIndexView index) const {
const Entry* result = &entries_.front();
for (int64_t i : index) {
CHECK_GE(result->children_start_id, 0);
result = &entries_[result->children_start_id + i];
}
return *result;
}
}
} | #include "xla/shape_tree.h"
#include <iterator>
#include <memory>
#include <utility>
#include <vector>
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/test_benchmark.h"
namespace xla {
namespace {
class ShapeTreeTest : public ::testing::Test {
protected:
ShapeTreeTest() {
array_shape_ = ShapeUtil::MakeShape(F32, {42, 42, 123});
tuple_shape_ =
ShapeUtil::MakeTupleShape({array_shape_, array_shape_, array_shape_});
nested_tuple_shape_ = ShapeUtil::MakeTupleShape(
{array_shape_, ShapeUtil::MakeTupleShape({array_shape_, array_shape_}),
ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeTupleShape({array_shape_, array_shape_}),
array_shape_})});
}
void TestShapeConstructor(const Shape& shape, int expected_num_nodes);
void TestInitValueConstructor(const Shape& shape, int expected_num_nodes);
Shape array_shape_;
Shape tuple_shape_;
Shape nested_tuple_shape_;
};
TEST_F(ShapeTreeTest, DefaultConstructor) {
ShapeTree<int> int_tree;
EXPECT_TRUE(ShapeUtil::IsEmptyTuple(int_tree.shape()));
ShapeTree<bool> bool_tree;
EXPECT_TRUE(ShapeUtil::IsEmptyTuple(bool_tree.shape()));
}
void ShapeTreeTest::TestShapeConstructor(const Shape& shape,
int expected_num_nodes) {
ShapeTree<int> int_tree(shape);
int num_nodes = 0;
int_tree.ForEachElement([&num_nodes](const ShapeIndex& , int data) {
EXPECT_EQ(0, data);
++num_nodes;
});
EXPECT_EQ(expected_num_nodes, num_nodes);
ShapeTree<bool> bool_tree(shape);
num_nodes = 0;
bool_tree.ForEachElement(
[&num_nodes](const ShapeIndex& , bool data) {
EXPECT_EQ(false, data);
++num_nodes;
});
EXPECT_EQ(expected_num_nodes, num_nodes);
}
TEST_F(ShapeTreeTest, ShapeConstructor) {
TestShapeConstructor(array_shape_, 1);
TestShapeConstructor(tuple_shape_, 4);
TestShapeConstructor(nested_tuple_shape_, 10);
}
void ShapeTreeTest::TestInitValueConstructor(const Shape& shape,
int expected_num_nodes) {
ShapeTree<int> tree(shape, 42);
int num_nodes = 0;
tree.ForEachElement([&num_nodes](const ShapeIndex& , int data) {
EXPECT_EQ(42, data);
++num_nodes;
});
EXPECT_EQ(expected_num_nodes, num_nodes);
num_nodes = 0;
tree.ForEachMutableElement(
[&num_nodes](const ShapeIndex& , int* data) {
EXPECT_EQ(42, *data);
*data = num_nodes;
++num_nodes;
});
EXPECT_EQ(expected_num_nodes, num_nodes);
num_nodes = 0;
tree.ForEachElement([&num_nodes](const ShapeIndex& , int data) {
EXPECT_EQ(num_nodes, data);
++num_nodes;
});
EXPECT_EQ(expected_num_nodes, num_nodes);
}
TEST_F(ShapeTreeTest, InitValueConstructor) {
TestInitValueConstructor(array_shape_, 1);
TestInitValueConstructor(tuple_shape_, 4);
TestInitValueConstructor(nested_tuple_shape_, 10);
}
TEST_F(ShapeTreeTest, EmptyTupleMustHaveNoLeaves) {
ShapeTree<int> shape_tree{ShapeUtil::MakeTupleShape({})};
EXPECT_EQ(0, shape_tree.leaf_count());
}
TEST_F(ShapeTreeTest, NestedEmptyTuple) {
Shape shape(
ShapeUtil::MakeTupleShape({ShapeUtil::MakeTupleShape({}), array_shape_}));
ShapeTree<int> shape_tree{shape};
EXPECT_EQ(ShapeUtil::GetLeafCount(shape), shape_tree.leaf_count());
}
TEST_F(ShapeTreeTest, ArrayShape) {
ShapeTree<int> shape_tree{array_shape_};
*shape_tree.mutable_element({}) = 42;
EXPECT_EQ(42, shape_tree.element({}));
*shape_tree.mutable_element({}) = 123;
EXPECT_EQ(123, shape_tree.element({}));
EXPECT_TRUE(ShapeUtil::Compatible(array_shape_, shape_tree.shape()));
ShapeTree<int> copy{shape_tree};
EXPECT_EQ(123, copy.element({}));
*copy.mutable_element({}) = 99;
EXPECT_EQ(99, copy.element({}));
EXPECT_EQ(123, shape_tree.element({}));
copy = shape_tree;
EXPECT_EQ(123, copy.element({}));
}
TEST_F(ShapeTreeTest, TupleShape) {
ShapeTree<int> shape_tree{tuple_shape_};
*shape_tree.mutable_element({}) = 1;
*shape_tree.mutable_element({0}) = 42;
*shape_tree.mutable_element({1}) = 123;
*shape_tree.mutable_element({2}) = -100;
EXPECT_EQ(1, shape_tree.element({}));
EXPECT_EQ(42, shape_tree.element({0}));
EXPECT_EQ(123, shape_tree.element({1}));
EXPECT_EQ(-100, shape_tree.element({2}));
EXPECT_TRUE(ShapeUtil::Compatible(tuple_shape_, shape_tree.shape()));
int sum = 0;
shape_tree.ForEachElement(
[&sum](const ShapeIndex& , int data) { sum += data; });
EXPECT_EQ(66, sum);
ShapeTree<int> copy{shape_tree};
EXPECT_EQ(1, copy.element({}));
EXPECT_EQ(42, copy.element({0}));
EXPECT_EQ(123, copy.element({1}));
EXPECT_EQ(-100, copy.element({2}));
shape_tree.ForEachMutableElement(
[](const ShapeIndex& , int* data) { *data = 0; });
EXPECT_EQ(0, shape_tree.element({}));
EXPECT_EQ(0, shape_tree.element({0}));
EXPECT_EQ(0, shape_tree.element({1}));
EXPECT_EQ(0, shape_tree.element({2}));
EXPECT_EQ(1, copy.element({}));
EXPECT_EQ(42, copy.element({0}));
EXPECT_EQ(123, copy.element({1}));
EXPECT_EQ(-100, copy.element({2}));
copy = shape_tree;
EXPECT_EQ(0, copy.element({}));
EXPECT_EQ(0, copy.element({0}));
EXPECT_EQ(0, copy.element({1}));
EXPECT_EQ(0, copy.element({2}));
}
TEST_F(ShapeTreeTest, NestedTupleShape) {
ShapeTree<int> shape_tree{nested_tuple_shape_};
*shape_tree.mutable_element({0}) = 42;
*shape_tree.mutable_element({1, 1}) = 123;
*shape_tree.mutable_element({2, 0, 1}) = -100;
EXPECT_EQ(42, shape_tree.element({0}));
EXPECT_EQ(123, shape_tree.element({1, 1}));
EXPECT_EQ(-100, shape_tree.element({2, 0, 1}));
EXPECT_TRUE(ShapeUtil::Compatible(nested_tuple_shape_, shape_tree.shape()));
ShapeTree<int> copy{shape_tree};
EXPECT_EQ(42, copy.element({0}));
EXPECT_EQ(123, copy.element({1, 1}));
EXPECT_EQ(-100, copy.element({2, 0, 1}));
*copy.mutable_element({0}) = 1;
*copy.mutable_element({1, 1}) = 2;
*copy.mutable_element({2, 0, 1}) = 3;
EXPECT_EQ(1, copy.element({0}));
EXPECT_EQ(2, copy.element({1, 1}));
EXPECT_EQ(3, copy.element({2, 0, 1}));
EXPECT_EQ(42, shape_tree.element({0}));
EXPECT_EQ(123, shape_tree.element({1, 1}));
EXPECT_EQ(-100, shape_tree.element({2, 0, 1}));
copy = shape_tree;
EXPECT_EQ(42, copy.element({0}));
EXPECT_EQ(123, copy.element({1, 1}));
EXPECT_EQ(-100, copy.element({2, 0, 1}));
}
TEST_F(ShapeTreeTest, InvalidIndexingTuple) {
ShapeTree<int> shape_tree{tuple_shape_};
#ifndef NDEBUG
EXPECT_DEATH(shape_tree.element({4}), "");
#endif
}
TEST_F(ShapeTreeTest, InvalidIndexingNestedTuple) {
ShapeTree<int> shape_tree{nested_tuple_shape_};
#ifndef NDEBUG
EXPECT_DEATH(shape_tree.element({0, 0}), "");
#endif
}
TEST_F(ShapeTreeTest, ShapeTreeOfNonCopyableType) {
ShapeTree<std::unique_ptr<int>> shape_tree{tuple_shape_};
EXPECT_EQ(shape_tree.element({2}).get(), nullptr);
*shape_tree.mutable_element({2}) = std::make_unique<int>(42);
EXPECT_EQ(*shape_tree.element({2}), 42);
}
TEST_F(ShapeTreeTest, CopySubtreeFromArrayShape) {
ShapeTree<int> source(array_shape_);
*source.mutable_element({}) = 42;
ShapeTree<int> destination(array_shape_, 123);
EXPECT_EQ(destination.element({}), 123);
destination.CopySubtreeFrom(source, {},
{});
EXPECT_EQ(destination.element({}), 42);
}
TEST_F(ShapeTreeTest, FullCopySubtreeFromTupleShape) {
ShapeTree<int> source(tuple_shape_);
*source.mutable_element({}) = 10;
*source.mutable_element({0}) = 11;
*source.mutable_element({1}) = 12;
*source.mutable_element({2}) = 13;
ShapeTree<int> destination(tuple_shape_, 0);
destination.CopySubtreeFrom(source, {},
{});
EXPECT_EQ(destination.element({}), 10);
EXPECT_EQ(destination.element({0}), 11);
EXPECT_EQ(destination.element({1}), 12);
EXPECT_EQ(destination.element({2}), 13);
}
TEST_F(ShapeTreeTest, SingleElementCopySubtreeFromTupleShape) {
ShapeTree<int> source(tuple_shape_);
*source.mutable_element({}) = 10;
*source.mutable_element({0}) = 11;
*source.mutable_element({1}) = 12;
*source.mutable_element({2}) = 13;
ShapeTree<int> destination(tuple_shape_, 0);
destination.CopySubtreeFrom(source, {0},
{1});
EXPECT_EQ(destination.element({}), 0);
EXPECT_EQ(destination.element({0}), 0);
EXPECT_EQ(destination.element({1}), 11);
EXPECT_EQ(destination.element({2}), 0);
}
TEST_F(ShapeTreeTest, CopySubtreeIntoNestedShape) {
ShapeTree<int> source(
ShapeUtil::MakeTupleShape({array_shape_, array_shape_}));
*source.mutable_element({}) = 10;
*source.mutable_element({0}) = 11;
*source.mutable_element({1}) = 12;
ShapeTree<int> destination(nested_tuple_shape_, 0);
destination.CopySubtreeFrom(source, {},
{2, 0});
EXPECT_EQ(destination.element({}), 0);
EXPECT_EQ(destination.element({0}), 0);
EXPECT_EQ(destination.element({1}), 0);
EXPECT_EQ(destination.element({1, 0}), 0);
EXPECT_EQ(destination.element({1, 1}), 0);
EXPECT_EQ(destination.element({2}), 0);
EXPECT_EQ(destination.element({2, 0}), 10);
EXPECT_EQ(destination.element({2, 0, 0}), 11);
EXPECT_EQ(destination.element({2, 0, 1}), 12);
EXPECT_EQ(destination.element({2, 1}), 0);
}
TEST_F(ShapeTreeTest, CopySubtreeFromNestedShape) {
ShapeTree<int> source(nested_tuple_shape_, 42);
*source.mutable_element({1}) = 10;
*source.mutable_element({1, 0}) = 11;
*source.mutable_element({1, 1}) = 12;
ShapeTree<int> destination(
ShapeUtil::MakeTupleShape({array_shape_, array_shape_}), 0);
destination.CopySubtreeFrom(source, {1},
{});
EXPECT_EQ(destination.element({}), 10);
EXPECT_EQ(destination.element({0}), 11);
EXPECT_EQ(destination.element({1}), 12);
}
TEST_F(ShapeTreeTest, OperatorEquals) {
{
ShapeTree<int> a(array_shape_, 123);
ShapeTree<int> b(array_shape_, 42);
ShapeTree<int> c(array_shape_, 42);
EXPECT_FALSE(a == b);
EXPECT_TRUE(a != b);
EXPECT_TRUE(b == c);
}
{
ShapeTree<int> a(tuple_shape_);
*a.mutable_element({}) = 10;
*a.mutable_element({0}) = 11;
*a.mutable_element({1}) = 12;
ShapeTree<int> b(tuple_shape_);
*b.mutable_element({}) = 10;
*b.mutable_element({0}) = 42;
*b.mutable_element({1}) = 11;
ShapeTree<int> c(tuple_shape_);
*c.mutable_element({}) = 10;
*c.mutable_element({0}) = 42;
*c.mutable_element({1}) = 11;
EXPECT_FALSE(a == b);
EXPECT_TRUE(a != b);
EXPECT_TRUE(b == c);
EXPECT_FALSE(b != c);
}
}
TEST_F(ShapeTreeTest, ConstructWithPointerToShape) {
ShapeTree<int> t(&nested_tuple_shape_, 42);
int num_nodes = 0;
t.ForEachElement([&num_nodes](const ShapeIndex& , int data) {
EXPECT_EQ(42, data);
++num_nodes;
});
EXPECT_EQ(10, num_nodes);
}
TEST_F(ShapeTreeTest, CopyWithPointerToShape) {
ShapeTree<int> source(&nested_tuple_shape_, 0);
ShapeTree<int> dest(source);
EXPECT_EQ(&dest.shape(), &nested_tuple_shape_);
}
TEST_F(ShapeTreeTest, CopyAssignWithPointerToShape) {
ShapeTree<int> source(&nested_tuple_shape_, 0);
ShapeTree<int> dest;
dest = source;
EXPECT_EQ(&dest.shape(), &nested_tuple_shape_);
}
TEST_F(ShapeTreeTest, IterateSimple) {
ShapeTree<int> t(nested_tuple_shape_, 42);
int num_nodes = 0;
for (auto index_to_data : t) {
EXPECT_EQ(42, index_to_data.second);
++num_nodes;
}
EXPECT_EQ(10, num_nodes);
}
TEST_F(ShapeTreeTest, ConstIterate) {
const ShapeTree<int> t(nested_tuple_shape_, 42);
int num_nodes = 0;
for (const auto& index_to_data : t) {
EXPECT_EQ(42, index_to_data.second);
++num_nodes;
}
EXPECT_EQ(10, num_nodes);
}
TEST_F(ShapeTreeTest, IterateAndMutate) {
ShapeTree<int> t(nested_tuple_shape_, 42);
int i = 0;
for (auto& index_to_data : t) {
EXPECT_EQ(42, index_to_data.second);
if (i == 1) {
index_to_data.second = 98;
}
++i;
}
(*t.begin()).second = 78;
EXPECT_EQ(78, (*t.begin()).second);
i = 0;
for (auto& index_to_data : t) {
if (i == 0) {
EXPECT_EQ(78, index_to_data.second);
} else if (i == 1) {
EXPECT_EQ(98, index_to_data.second);
} else {
EXPECT_EQ(42, index_to_data.second);
}
++i;
}
EXPECT_EQ(78, (*t.begin()).second);
EXPECT_EQ(98, (*std::next(t.begin())).second);
}
TEST_F(ShapeTreeTest, IterateOrder) {
ShapeTree<int> t(nested_tuple_shape_, 42);
std::vector<ShapeIndex> v;
v.reserve(t.leaf_count());
for (auto index_to_data : t) {
v.push_back(index_to_data.first);
}
EXPECT_EQ(v, (std::vector<ShapeIndex>{{},
{0},
{1},
{1, 0},
{1, 1},
{2},
{2, 0},
{2, 0, 0},
{2, 0, 1},
{2, 1}}));
}
TEST_F(ShapeTreeTest, ReverseIterateOrder) {
ShapeTree<int> t(nested_tuple_shape_, 42);
std::vector<ShapeIndex> v;
v.reserve(t.leaf_count());
for (auto it = t.rbegin(); it != t.rend(); ++it) {
v.push_back(it->first);
}
EXPECT_EQ(v, (std::vector<ShapeIndex>{
{2, 1},
{2, 0, 1},
{2, 0, 0},
{2, 0},
{2},
{1, 1},
{1, 0},
{1},
{0},
{},
}));
}
TEST_F(ShapeTreeTest, Find) {
ShapeTree<int> t(nested_tuple_shape_, 42);
auto found = t.find({1, 0});
EXPECT_NE(found, t.end());
EXPECT_EQ(found->first, ShapeIndex({1, 0}));
EXPECT_EQ(found->second, 42);
}
TEST_F(ShapeTreeTest, ConstFind) {
const ShapeTree<int> t(nested_tuple_shape_, 42);
auto found = t.find({1, 0});
EXPECT_NE(found, t.end());
EXPECT_EQ(found->first, ShapeIndex({1, 0}));
EXPECT_EQ(found->second, 42);
}
TEST_F(ShapeTreeTest, IterateOrderLeaves) {
ShapeTree<int> t(nested_tuple_shape_, 42);
std::vector<ShapeIndex> v;
const auto& leaves = t.leaves();
v.reserve(t.leaf_count());
for (auto index_to_data : leaves) {
v.push_back(index_to_data.first);
}
EXPECT_EQ(v, (std::vector<ShapeIndex>{
{0}, {1, 0}, {1, 1}, {2, 0, 0}, {2, 0, 1}, {2, 1}}));
}
TEST_F(ShapeTreeTest, ReverseIterateOrderLeaves) {
ShapeTree<int> t(nested_tuple_shape_, 42);
std::vector<ShapeIndex> v;
v.reserve(t.leaf_count());
for (auto it = t.leaf_rbegin(); it != t.leaf_rend(); ++it) {
v.push_back(it->first);
}
EXPECT_EQ(v, (std::vector<ShapeIndex>{
{2, 1},
{2, 0, 1},
{2, 0, 0},
{1, 1},
{1, 0},
{0},
}));
}
void BM_Construct(::testing::benchmark::State& state) {
const int depth = state.range(0);
const int fan_out = state.range(1);
Shape shape = ShapeUtil::MakeShape(F32, {32, 64, 128});
for (int i = 0; i < depth; ++i) {
std::vector<xla::Shape> shapes(fan_out, shape);
shape = ShapeUtil::MakeTupleShape(shapes);
}
for (auto s : state) {
ShapeTree<int> shape_tree(shape);
}
}
void BM_ConstructUnowned(::testing::benchmark::State& state) {
const int depth = state.range(0);
const int fan_out = state.range(1);
Shape shape = ShapeUtil::MakeShape(F32, {32, 64, 128});
for (int i = 0; i < depth; ++i) {
std::vector<xla::Shape> shapes(fan_out, shape);
shape = ShapeUtil::MakeTupleShape(shapes);
}
for (auto s : state) {
ShapeTree<int> shape_tree(&shape);
}
}
void BM_Copy(::testing::benchmark::State& state) {
const int depth = state.range(0);
const int fan_out = state.range(1);
Shape shape = ShapeUtil::MakeShape(F32, {32, 64, 128});
for (int i = 0; i < depth; ++i) {
std::vector<xla::Shape> shapes(fan_out, shape);
shape = ShapeUtil::MakeTupleShape(shapes);
}
ShapeTree<int> shape_tree(shape);
for (auto s : state) {
ShapeTree<int> copy = shape_tree;
tsl::testing::DoNotOptimize(copy);
}
}
void BM_Move(::testing::benchmark::State& state) {
const int depth = state.range(0);
const int fan_out = state.range(1);
Shape shape = ShapeUtil::MakeShape(F32, {32, 64, 128});
for (int i = 0; i < depth; ++i) {
std::vector<xla::Shape> shapes(fan_out, shape);
shape = ShapeUtil::MakeTupleShape(shapes);
}
ShapeTree<int> shape_tree(shape);
for (auto s : state) {
ShapeTree<int> copy = std::move(shape_tree);
shape_tree = std::move(copy);
}
}
void BM_ForEach(::testing::benchmark::State& state) {
const int depth = state.range(0);
const int fan_out = state.range(1);
Shape shape = ShapeUtil::MakeShape(F32, {32, 64, 128});
for (int i = 0; i < depth; ++i) {
std::vector<xla::Shape> shapes(fan_out, shape);
shape = ShapeUtil::MakeTupleShape(shapes);
}
ShapeTree<int> shape_tree(shape);
for (auto s : state) {
shape_tree.ForEachMutableElement([](const ShapeIndex& index, int* data) {
tsl::testing::DoNotOptimize(index);
});
}
}
void BM_Iterate(::testing::benchmark::State& state) {
const int depth = state.range(0);
const int fan_out = state.range(1);
Shape shape = ShapeUtil::MakeShape(F32, {32, 64, 128});
for (int i = 0; i < depth; ++i) {
std::vector<xla::Shape> shapes(fan_out, shape);
shape = ShapeUtil::MakeTupleShape(shapes);
}
ShapeTree<int> shape_tree(shape);
for (auto s : state) {
for (auto& iter : shape_tree) {
tsl::testing::DoNotOptimize(iter.second);
}
}
}
#define BENCHMARK_WITH_ARGS(name) \
BENCHMARK(name)->ArgPair(2, 8)->ArgPair(1, 1000)
BENCHMARK_WITH_ARGS(BM_Construct);
BENCHMARK_WITH_ARGS(BM_ConstructUnowned);
BENCHMARK_WITH_ARGS(BM_Copy);
BENCHMARK_WITH_ARGS(BM_Move);
BENCHMARK_WITH_ARGS(BM_ForEach);
BENCHMARK_WITH_ARGS(BM_Iterate);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/shape_tree.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/shape_tree_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8a6d5e13-b058-42e0-b8ea-d7b62c632acd | cpp | tensorflow/tensorflow | pjrt_c_api_helpers | third_party/xla/xla/pjrt/c/pjrt_c_api_helpers.cc | third_party/xla/xla/pjrt/c/pjrt_c_api_helpers_test.cc | #include "xla/pjrt/c/pjrt_c_api_helpers.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <functional>
#include <memory>
#include <string>
#include <string_view>
#include <utility>
#include <variant>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/inlined_vector.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/time/time.h"
#include "absl/types/span.h"
#include "xla/layout.h"
#include "xla/pjrt/c/pjrt_c_api.h"
#include "xla/pjrt/c/pjrt_c_api_layouts_extension.h"
#include "xla/pjrt/c/pjrt_c_api_profiler_extension.h"
#include "xla/pjrt/distributed/key_value_store_interface.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/pjrt_common.h"
#include "xla/pjrt/pjrt_executable.h"
#include "xla/pjrt/pjrt_future.h"
#include "xla/primitive_util.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
#include "tsl/profiler/lib/connected_traceme.h"
#include "tsl/profiler/lib/context_types.h"
namespace pjrt {
const absl::string_view kHloFormat = "hlo";
const absl::string_view kMlirFormat = "mlir";
const absl::string_view kHloWithConfigFormat = "hlo_with_config";
PJRT_ClientDeleter MakeClientDeleter(const PJRT_Api* api) {
return [api](PJRT_Client* client) -> void {
PJRT_Client_Destroy_Args destroy_args;
destroy_args.struct_size = PJRT_Client_Destroy_Args_STRUCT_SIZE;
destroy_args.extension_start = nullptr;
destroy_args.client = client;
PJRT_Error* error = api->PJRT_Client_Destroy(&destroy_args);
CHECK(error == nullptr);
};
}
PJRT_ErrorDeleter MakeErrorDeleter(const PJRT_Api* api) {
return [api](PJRT_Error* error) -> void {
PJRT_Error_Destroy_Args destroy_args;
destroy_args.struct_size = PJRT_Error_Destroy_Args_STRUCT_SIZE;
destroy_args.extension_start = nullptr;
destroy_args.error = error;
api->PJRT_Error_Destroy(&destroy_args);
};
}
PJRT_BufferDeleter MakeBufferDeleter(const PJRT_Api* api) {
return [api](PJRT_Buffer* buffer) -> void {
PJRT_Buffer_Destroy_Args destroy_args;
destroy_args.struct_size = PJRT_Buffer_Destroy_Args_STRUCT_SIZE;
destroy_args.extension_start = nullptr;
destroy_args.buffer = buffer;
pjrt::LogFatalIfPjrtError(api->PJRT_Buffer_Destroy(&destroy_args), api);
};
}
PJRT_ExecutableDeleter MakeExecutableDeleter(const PJRT_Api* api) {
return [api](PJRT_Executable* executable) -> void {
PJRT_Executable_Destroy_Args args;
args.struct_size = PJRT_Executable_Destroy_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.executable = executable;
pjrt::LogFatalIfPjrtError(api->PJRT_Executable_Destroy(&args), api);
};
}
PJRT_LoadedExecutableDeleter MakeLoadedExecutableDeleter(const PJRT_Api* api) {
return [api](PJRT_LoadedExecutable* executable) -> void {
PJRT_LoadedExecutable_Destroy_Args args;
args.struct_size = PJRT_LoadedExecutable_Destroy_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.executable = executable;
pjrt::LogFatalIfPjrtError(api->PJRT_LoadedExecutable_Destroy(&args), api);
};
}
absl::Status PjrtErrorToStatus(const PJRT_Error* error, const PJRT_Api* api) {
absl::Status status;
if (error != nullptr) {
status = absl::Status(PjrtErrorToStatusCode(error, api),
GetPjrtErrorMessage(error, api));
}
return status;
}
PJRT_TopologyDescriptionDeleter MakeTopologyDescriptionDeleter(
const PJRT_Api* api) {
return [api](PJRT_TopologyDescription* topology) -> void {
PJRT_TopologyDescription_Destroy_Args destroy_args;
destroy_args.struct_size =
PJRT_TopologyDescription_Destroy_Args_STRUCT_SIZE;
destroy_args.extension_start = nullptr;
destroy_args.topology = topology;
pjrt::LogFatalIfPjrtError(
api->PJRT_TopologyDescription_Destroy(&destroy_args), api);
};
}
PJRT_Layouts_MemoryLayoutDeleter MakeMemoryLayoutDeleter(const PJRT_Api* api) {
PJRT_Layouts_Extension* ext_api =
FindExtension<PJRT_Layouts_Extension>(api, PJRT_Extension_Type_Layouts);
CHECK_NE(ext_api, nullptr) << "MakeMemoryLayoutDeleter passed PJRT_Api that "
"doesn't support layouts extension";
return [api, ext_api](PJRT_Layouts_MemoryLayout* layout) -> void {
PJRT_Layouts_MemoryLayout_Destroy_Args args;
args.struct_size = PJRT_Layouts_MemoryLayout_Destroy_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.layout = layout;
pjrt::LogFatalIfPjrtError(ext_api->PJRT_Layouts_MemoryLayout_Destroy(&args),
api);
};
}
PJRT_Error_Code GetErrorCode(const PJRT_Error* error, const PJRT_Api* api) {
PJRT_Error_GetCode_Args args;
args.struct_size = PJRT_Error_GetCode_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.error = error;
pjrt::LogFatalIfPjrtError(api->PJRT_Error_GetCode(&args), api);
return args.code;
}
absl::StatusCode PjrtErrorToStatusCode(const PJRT_Error* error,
const PJRT_Api* api) {
return PjrtErrorCodeToStatusCode(GetErrorCode(error, api));
}
absl::StatusCode PjrtErrorCodeToStatusCode(PJRT_Error_Code code) {
switch (code) {
case PJRT_Error_Code_CANCELLED:
case PJRT_Error_Code_UNKNOWN:
case PJRT_Error_Code_INVALID_ARGUMENT:
case PJRT_Error_Code_DEADLINE_EXCEEDED:
case PJRT_Error_Code_NOT_FOUND:
case PJRT_Error_Code_ALREADY_EXISTS:
case PJRT_Error_Code_PERMISSION_DENIED:
case PJRT_Error_Code_RESOURCE_EXHAUSTED:
case PJRT_Error_Code_FAILED_PRECONDITION:
case PJRT_Error_Code_ABORTED:
case PJRT_Error_Code_OUT_OF_RANGE:
case PJRT_Error_Code_UNIMPLEMENTED:
case PJRT_Error_Code_INTERNAL:
case PJRT_Error_Code_UNAVAILABLE:
case PJRT_Error_Code_DATA_LOSS:
case PJRT_Error_Code_UNAUTHENTICATED:
return static_cast<absl::StatusCode>(code);
}
}
PJRT_Error_Code StatusCodeToPjrtErrorCode(absl::StatusCode code) {
switch (static_cast<tsl::error::Code>(code)) {
case tsl::error::CANCELLED:
case tsl::error::UNKNOWN:
case tsl::error::INVALID_ARGUMENT:
case tsl::error::DEADLINE_EXCEEDED:
case tsl::error::NOT_FOUND:
case tsl::error::ALREADY_EXISTS:
case tsl::error::PERMISSION_DENIED:
case tsl::error::UNAUTHENTICATED:
case tsl::error::RESOURCE_EXHAUSTED:
case tsl::error::FAILED_PRECONDITION:
case tsl::error::ABORTED:
case tsl::error::OUT_OF_RANGE:
case tsl::error::UNIMPLEMENTED:
case tsl::error::INTERNAL:
case tsl::error::UNAVAILABLE:
case tsl::error::DATA_LOSS:
return static_cast<PJRT_Error_Code>(code);
case tsl::error::OK:
CHECK(false) << "Status::OK() cannot be converted to PJRT_Error code, "
"use nullptr instead";
case tensorflow::error::
DO_NOT_USE_RESERVED_FOR_FUTURE_EXPANSION_USE_DEFAULT_IN_SWITCH_INSTEAD_:
CHECK(false) << "got DO_NOT_USE_RESERVED_FOR_FUTURE_EXPANSION_"
"USE_DEFAULT_IN_SWITCH_INSTEAD_";
case tensorflow::error::Code_INT_MIN_SENTINEL_DO_NOT_USE_:
CHECK(false) << "got Code_INT_MIN_SENTINEL_DO_NOT_USE_";
case tensorflow::error::Code_INT_MAX_SENTINEL_DO_NOT_USE_:
CHECK(false) << "got Code_INT_MAX_SENTINEL_DO_NOT_USE_";
}
}
absl::string_view GetPjrtErrorMessage(const PJRT_Error* error,
const PJRT_Api* api) {
PJRT_Error_Message_Args message_args;
message_args.struct_size = PJRT_Error_Message_Args_STRUCT_SIZE;
message_args.extension_start = nullptr;
message_args.error = error;
api->PJRT_Error_Message(&message_args);
return absl::string_view(message_args.message, message_args.message_size);
}
void LogFatalIfPjrtError(PJRT_Error* error, const PJRT_Api* api) {
std::unique_ptr<PJRT_Error, pjrt::PJRT_ErrorDeleter> _error(
error, MakeErrorDeleter(api));
absl::Status _status = PjrtErrorToStatus(_error.get(), api);
if (!_status.ok()) {
LOG(FATAL) << "Unexpected error status " << _status.message();
}
}
PJRT_EventDeleter MakeEventDeleter(const PJRT_Api* api) {
CHECK(api != nullptr);
return [api](PJRT_Event* managed) {
PJRT_Event_Destroy_Args args;
args.struct_size = PJRT_Event_Destroy_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.event = managed;
LogFatalIfPjrtError(api->PJRT_Event_Destroy(&args), api);
};
}
PJRT_Buffer_Type ConvertToPjRtBufferType(xla::PrimitiveType type) {
switch (type) {
case xla::PrimitiveType::PRIMITIVE_TYPE_INVALID:
return PJRT_Buffer_Type::PJRT_Buffer_Type_INVALID;
case xla::PrimitiveType::PRED:
return PJRT_Buffer_Type::PJRT_Buffer_Type_PRED;
case xla::PrimitiveType::TOKEN:
return PJRT_Buffer_Type::PJRT_Buffer_Type_TOKEN;
case xla::PrimitiveType::S2:
return PJRT_Buffer_Type::PJRT_Buffer_Type_S2;
case xla::PrimitiveType::S4:
return PJRT_Buffer_Type::PJRT_Buffer_Type_S4;
case xla::PrimitiveType::S8:
return PJRT_Buffer_Type::PJRT_Buffer_Type_S8;
case xla::PrimitiveType::S16:
return PJRT_Buffer_Type::PJRT_Buffer_Type_S16;
case xla::PrimitiveType::S32:
return PJRT_Buffer_Type::PJRT_Buffer_Type_S32;
case xla::PrimitiveType::S64:
return PJRT_Buffer_Type::PJRT_Buffer_Type_S64;
case xla::PrimitiveType::U2:
return PJRT_Buffer_Type::PJRT_Buffer_Type_U2;
case xla::PrimitiveType::U4:
return PJRT_Buffer_Type::PJRT_Buffer_Type_U4;
case xla::PrimitiveType::U8:
return PJRT_Buffer_Type::PJRT_Buffer_Type_U8;
case xla::PrimitiveType::U16:
return PJRT_Buffer_Type::PJRT_Buffer_Type_U16;
case xla::PrimitiveType::U32:
return PJRT_Buffer_Type::PJRT_Buffer_Type_U32;
case xla::PrimitiveType::U64:
return PJRT_Buffer_Type::PJRT_Buffer_Type_U64;
case xla::PrimitiveType::F16:
return PJRT_Buffer_Type::PJRT_Buffer_Type_F16;
case xla::PrimitiveType::F32:
return PJRT_Buffer_Type::PJRT_Buffer_Type_F32;
case xla::PrimitiveType::BF16:
return PJRT_Buffer_Type::PJRT_Buffer_Type_BF16;
case xla::PrimitiveType::F64:
return PJRT_Buffer_Type::PJRT_Buffer_Type_F64;
case xla::PrimitiveType::F8E5M2:
return PJRT_Buffer_Type::PJRT_Buffer_Type_F8E5M2;
case xla::PrimitiveType::F8E4M3:
return PJRT_Buffer_Type::PJRT_Buffer_Type_F8E4M3;
case xla::PrimitiveType::F8E4M3FN:
return PJRT_Buffer_Type::PJRT_Buffer_Type_F8E4M3FN;
case xla::PrimitiveType::F8E4M3B11FNUZ:
return PJRT_Buffer_Type::PJRT_Buffer_Type_F8E4M3B11FNUZ;
case xla::PrimitiveType::F8E5M2FNUZ:
return PJRT_Buffer_Type::PJRT_Buffer_Type_F8E5M2FNUZ;
case xla::PrimitiveType::F8E4M3FNUZ:
return PJRT_Buffer_Type::PJRT_Buffer_Type_F8E4M3FNUZ;
case xla::PrimitiveType::F8E3M4:
return PJRT_Buffer_Type::PJRT_Buffer_Type_F8E3M4;
case xla::PrimitiveType::C64:
return PJRT_Buffer_Type::PJRT_Buffer_Type_C64;
case xla::PrimitiveType::C128:
return PJRT_Buffer_Type::PJRT_Buffer_Type_C128;
default:
CHECK(false)
<< "Element type of the shape is not supported in C API layer: "
<< xla::primitive_util::LowercasePrimitiveTypeName(type);
}
}
xla::PrimitiveType ConvertFromPjRtBufferType(PJRT_Buffer_Type type) {
switch (type) {
case PJRT_Buffer_Type::PJRT_Buffer_Type_PRED:
return xla::PrimitiveType::PRED;
case PJRT_Buffer_Type::PJRT_Buffer_Type_TOKEN:
return xla::PrimitiveType::TOKEN;
case PJRT_Buffer_Type::PJRT_Buffer_Type_S2:
return xla::PrimitiveType::S2;
case PJRT_Buffer_Type::PJRT_Buffer_Type_S4:
return xla::PrimitiveType::S4;
case PJRT_Buffer_Type::PJRT_Buffer_Type_S8:
return xla::PrimitiveType::S8;
case PJRT_Buffer_Type::PJRT_Buffer_Type_S16:
return xla::PrimitiveType::S16;
case PJRT_Buffer_Type::PJRT_Buffer_Type_S32:
return xla::PrimitiveType::S32;
case PJRT_Buffer_Type::PJRT_Buffer_Type_S64:
return xla::PrimitiveType::S64;
case PJRT_Buffer_Type::PJRT_Buffer_Type_U2:
return xla::PrimitiveType::U2;
case PJRT_Buffer_Type::PJRT_Buffer_Type_U4:
return xla::PrimitiveType::U4;
case PJRT_Buffer_Type::PJRT_Buffer_Type_U8:
return xla::PrimitiveType::U8;
case PJRT_Buffer_Type::PJRT_Buffer_Type_U16:
return xla::PrimitiveType::U16;
case PJRT_Buffer_Type::PJRT_Buffer_Type_U32:
return xla::PrimitiveType::U32;
case PJRT_Buffer_Type::PJRT_Buffer_Type_U64:
return xla::PrimitiveType::U64;
case PJRT_Buffer_Type::PJRT_Buffer_Type_F16:
return xla::PrimitiveType::F16;
case PJRT_Buffer_Type::PJRT_Buffer_Type_F32:
return xla::PrimitiveType::F32;
case PJRT_Buffer_Type::PJRT_Buffer_Type_BF16:
return xla::PrimitiveType::BF16;
case PJRT_Buffer_Type::PJRT_Buffer_Type_F64:
return xla::PrimitiveType::F64;
case PJRT_Buffer_Type::PJRT_Buffer_Type_C64:
return xla::PrimitiveType::C64;
case PJRT_Buffer_Type::PJRT_Buffer_Type_C128:
return xla::PrimitiveType::C128;
case PJRT_Buffer_Type::PJRT_Buffer_Type_F8E5M2:
return xla::PrimitiveType::F8E5M2;
case PJRT_Buffer_Type::PJRT_Buffer_Type_F8E4M3:
return xla::PrimitiveType::F8E4M3;
case PJRT_Buffer_Type::PJRT_Buffer_Type_F8E4M3FN:
return xla::PrimitiveType::F8E4M3FN;
case PJRT_Buffer_Type::PJRT_Buffer_Type_F8E4M3B11FNUZ:
return xla::PrimitiveType::F8E4M3B11FNUZ;
case PJRT_Buffer_Type::PJRT_Buffer_Type_F8E5M2FNUZ:
return xla::PrimitiveType::F8E5M2FNUZ;
case PJRT_Buffer_Type::PJRT_Buffer_Type_F8E4M3FNUZ:
return xla::PrimitiveType::F8E4M3FNUZ;
case PJRT_Buffer_Type::PJRT_Buffer_Type_F8E3M4:
return xla::PrimitiveType::F8E3M4;
case PJRT_Buffer_Type::PJRT_Buffer_Type_INVALID:
CHECK(false) << "Buffer type is not supported in C API layer.";
}
}
const char* HostBufferSemanticsToString(
xla::PjRtClient::HostBufferSemantics h) {
switch (h) {
case xla::PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall:
return "xla::PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall";
case xla::PjRtClient::HostBufferSemantics::kImmutableZeroCopy:
return "xla::PjRtClient::HostBufferSemantics::kImmutableZeroCopy";
case xla::PjRtClient::HostBufferSemantics::kMutableZeroCopy:
return "xla::PjRtClient::HostBufferSemantics::kMutableZeroCopy";
case xla::PjRtClient::HostBufferSemantics::kImmutableUntilTransferCompletes:
return "xla::PjRtClient::HostBufferSemantics::"
"kImmutableUntilTransferCompletes";
}
}
PJRT_HostBufferSemantics ConvertToPjRtHostBufferSemantics(
xla::PjRtClient::HostBufferSemantics buffer_semantics) {
switch (buffer_semantics) {
case xla::PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall:
return PJRT_HostBufferSemantics::
PJRT_HostBufferSemantics_kImmutableOnlyDuringCall;
case xla::PjRtClient::HostBufferSemantics::kImmutableUntilTransferCompletes:
return PJRT_HostBufferSemantics::
PJRT_HostBufferSemantics_kImmutableUntilTransferCompletes;
case xla::PjRtClient::HostBufferSemantics::kImmutableZeroCopy:
return PJRT_HostBufferSemantics::
PJRT_HostBufferSemantics_kImmutableZeroCopy;
case xla::PjRtClient::HostBufferSemantics::kMutableZeroCopy:
return PJRT_HostBufferSemantics::
PJRT_HostBufferSemantics_kMutableZeroCopy;
default:
CHECK(false)
<< "Input host buffer semantics is not supported in C API layer: "
<< HostBufferSemanticsToString(buffer_semantics);
}
}
xla::PjRtClient::HostBufferSemantics ConvertFromPjRtHostBufferSemantics(
PJRT_HostBufferSemantics buffer_semantics) {
switch (buffer_semantics) {
case PJRT_HostBufferSemantics::
PJRT_HostBufferSemantics_kImmutableOnlyDuringCall:
return xla::PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall;
case PJRT_HostBufferSemantics::
PJRT_HostBufferSemantics_kImmutableUntilTransferCompletes:
return xla::PjRtClient::HostBufferSemantics::
kImmutableUntilTransferCompletes;
case PJRT_HostBufferSemantics::PJRT_HostBufferSemantics_kImmutableZeroCopy:
return xla::PjRtClient::HostBufferSemantics::kImmutableZeroCopy;
case PJRT_HostBufferSemantics::PJRT_HostBufferSemantics_kMutableZeroCopy:
return xla::PjRtClient::HostBufferSemantics::kMutableZeroCopy;
}
}
xla::PjRtFuture<> ConvertCEventToCppFuture(PJRT_Event* c_event,
const PJRT_Api* c_api) {
using xla::PjRtFuture;
PJRT_Event_OnReady_Args event_onready_args;
event_onready_args.struct_size = PJRT_Event_OnReady_Args_STRUCT_SIZE;
event_onready_args.extension_start = nullptr;
event_onready_args.event = c_event;
PjRtFuture<>::Promise promise = PjRtFuture<>::CreatePromise();
event_onready_args.user_arg = new std::function<void(PJRT_Error*)>(
[promise, c_event, c_api](PJRT_Error* error) mutable {
if (error != nullptr) {
promise.Set(::pjrt::PjrtErrorToStatus(error, c_api));
::pjrt::MakeErrorDeleter(c_api)(error);
} else {
promise.Set();
}
::pjrt::MakeEventDeleter(c_api)(c_event);
});
event_onready_args.callback = [](PJRT_Error* error, void* arg) {
std::function<void(PJRT_Error*)>* set_future =
reinterpret_cast<std::function<void(PJRT_Error*)>*>(arg);
(*set_future)(error);
delete set_future;
};
PJRT_Error* error = c_api->PJRT_Event_OnReady(&event_onready_args);
if (error != nullptr) {
return PjRtFuture<>(::pjrt::PjrtErrorToStatus(error, c_api));
}
return PjRtFuture<>(std::move(promise));
}
static absl::StatusOr<PJRT_NamedValue> ConvertToPjRtNamedValue(
const std::string& name, const xla::PjRtValueType& value) {
PJRT_NamedValue c_value;
c_value.struct_size = PJRT_NamedValue_STRUCT_SIZE;
c_value.extension_start = nullptr;
c_value.name = name.c_str();
c_value.name_size = name.size();
if (std::holds_alternative<std::string>(value)) {
c_value.type = PJRT_NamedValue_Type::PJRT_NamedValue_kString;
const std::string& option_string_value = std::get<std::string>(value);
c_value.string_value = option_string_value.c_str();
c_value.value_size = option_string_value.size();
} else if (std::holds_alternative<int64_t>(value)) {
c_value.type = PJRT_NamedValue_Type::PJRT_NamedValue_kInt64;
c_value.int64_value = std::get<int64_t>(value);
c_value.value_size = 1;
} else if (std::holds_alternative<std::vector<int64_t>>(value)) {
c_value.type = PJRT_NamedValue_Type::PJRT_NamedValue_kInt64List;
const std::vector<int64_t>& option_int_list_value =
std::get<std::vector<int64_t>>(value);
c_value.int64_array_value = option_int_list_value.data();
c_value.value_size = option_int_list_value.size();
} else if (std::holds_alternative<float>(value)) {
c_value.type = PJRT_NamedValue_Type::PJRT_NamedValue_kFloat;
c_value.float_value = std::get<float>(value);
c_value.value_size = 1;
} else if (std::holds_alternative<bool>(value)) {
c_value.type = PJRT_NamedValue_Type::PJRT_NamedValue_kBool;
c_value.bool_value = std::get<bool>(value);
c_value.value_size = 1;
} else {
return tsl::errors::InvalidArgument("Unexpected PjRtValueType: '",
value.index(), " with name: ", name);
}
return c_value;
}
absl::StatusOr<std::vector<PJRT_NamedValue>> ConvertToPjRtNamedValueList(
const absl::flat_hash_map<std::string, xla::PjRtValueType>& cpp_value_map) {
std::vector<PJRT_NamedValue> c_value_list;
c_value_list.reserve(cpp_value_map.size());
for (const auto& [name, value] : cpp_value_map) {
TF_ASSIGN_OR_RETURN(PJRT_NamedValue c_value,
ConvertToPjRtNamedValue(name, value));
c_value_list.push_back(c_value);
}
return c_value_list;
}
absl::flat_hash_map<std::string, xla::PjRtValueType>
ConvertFromPjRtNamedValueList(const PJRT_NamedValue* c_value_list,
size_t list_size) {
absl::flat_hash_map<std::string, xla::PjRtValueType> cpp_value_map;
for (int i = 0; i < list_size; ++i) {
const PJRT_NamedValue& c_value = c_value_list[i];
absl::string_view name = absl::string_view(c_value.name, c_value.name_size);
switch (c_value.type) {
case PJRT_NamedValue_Type::PJRT_NamedValue_kString: {
std::string string_value(c_value.string_value, c_value.value_size);
cpp_value_map[name] = xla::PjRtValueType(string_value);
break;
}
case PJRT_NamedValue_Type::PJRT_NamedValue_kInt64: {
cpp_value_map[name] = xla::PjRtValueType(c_value.int64_value);
break;
}
case PJRT_NamedValue_Type::PJRT_NamedValue_kInt64List: {
const int64_t* array_ptr(c_value.int64_array_value);
std::vector<int64_t> int64_array(array_ptr,
array_ptr + c_value.value_size);
cpp_value_map[name] = xla::PjRtValueType(int64_array);
break;
}
case PJRT_NamedValue_Type::PJRT_NamedValue_kFloat: {
cpp_value_map[name] = xla::PjRtValueType(c_value.float_value);
break;
}
case PJRT_NamedValue_Type::PJRT_NamedValue_kBool: {
cpp_value_map[name] = xla::PjRtValueType(c_value.bool_value);
break;
}
default: {
LOG(FATAL) << "Unexpected PJRT_NamedValue type: " << c_value.type
<< " with name: " << name;
break;
}
}
}
return cpp_value_map;
}
static absl::StatusOr<PJRT_NamedValue_Type> GetPjrtNamedValueType(
xla::PjRtValueType cpp_value) {
if (std::holds_alternative<std::string>(cpp_value)) {
return PJRT_NamedValue_Type::PJRT_NamedValue_kString;
}
if (std::holds_alternative<int64_t>(cpp_value)) {
return PJRT_NamedValue_Type::PJRT_NamedValue_kInt64;
}
if (std::holds_alternative<std::vector<int64_t>>(cpp_value)) {
return PJRT_NamedValue_Type::PJRT_NamedValue_kInt64List;
}
if (std::holds_alternative<float>(cpp_value)) {
return PJRT_NamedValue_Type::PJRT_NamedValue_kFloat;
}
if (std::holds_alternative<bool>(cpp_value)) {
return PJRT_NamedValue_Type::PJRT_NamedValue_kBool;
}
return tsl::errors::InvalidArgument("Unexpected PjRtValueType with index",
cpp_value.index());
}
absl::Status ValidateCreateOptions(
const absl::flat_hash_map<std::string, xla::PjRtValueType>& value_map,
const absl::flat_hash_map<std::string, PJRT_NamedValue_Type>&
expected_name_and_types) {
for (const auto& [name, value] : value_map) {
auto it = expected_name_and_types.find(name);
if (it == expected_name_and_types.end()) {
return tsl::errors::InvalidArgument(
"Unexpected option name passed to PJRT_Client_Create: ", name);
}
TF_ASSIGN_OR_RETURN(PJRT_NamedValue_Type type,
GetPjrtNamedValueType(value));
if (type != it->second) {
return tsl::errors::InvalidArgument(
"Option passed to PJRT_Client_Create with name ", name,
" has type index ", value.index(), " but expected type index is ",
it->second);
}
}
return absl::OkStatus();
}
const std::vector<PJRT_NamedValue>& GetXlaPluginCAttributes() {
constexpr absl::string_view kXlaVersion = "xla_version";
PJRT_NamedValue c_value;
c_value.struct_size = PJRT_NamedValue_STRUCT_SIZE;
c_value.extension_start = nullptr;
c_value.name = kXlaVersion.data();
c_value.name_size = kXlaVersion.size();
c_value.type = PJRT_NamedValue_Type::PJRT_NamedValue_kInt64;
c_value.int64_value = 2;
c_value.value_size = 1;
static const std::vector<PJRT_NamedValue>* c_values =
new std::vector<PJRT_NamedValue>({c_value});
return *c_values;
}
static std::string StructSizeErrorMsg(absl::string_view struct_name,
size_t expected_size,
size_t actual_size) {
std::string error_msg = absl::StrCat(
"Unexpected ", struct_name, " size: expected ", expected_size, ", got ",
actual_size, ". Check installed software versions.");
#if defined(PJRT_API_MAJOR)
absl::StrAppend(&error_msg, " The framework PJRT API version is ",
PJRT_API_MAJOR, ".", PJRT_API_MINOR, ".");
#endif
return error_msg;
}
absl::Status ActualStructSizeIsGreaterOrEqual(absl::string_view struct_name,
size_t expected_size,
size_t actual_size) {
if (actual_size < expected_size) {
return tsl::errors::InvalidArgument(
StructSizeErrorMsg(struct_name, expected_size, actual_size));
}
if (actual_size > expected_size) {
VLOG(2) << StructSizeErrorMsg(struct_name, expected_size, actual_size);
}
return absl::OkStatus();
}
absl::string_view GetPlatformVersion(PJRT_Client* client, const PJRT_Api* api) {
PJRT_Client_PlatformVersion_Args args;
args.struct_size = PJRT_Client_PlatformVersion_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.client = client;
LogFatalIfPjrtError(api->PJRT_Client_PlatformVersion(&args), api);
absl::string_view platform_version(args.platform_version,
args.platform_version_size);
return platform_version;
}
absl::string_view GetPlatformName(PJRT_Client* client, const PJRT_Api* api) {
PJRT_Client_PlatformName_Args args;
args.client = client;
args.struct_size = PJRT_Client_PlatformName_Args_STRUCT_SIZE;
args.extension_start = nullptr;
pjrt::LogFatalIfPjrtError(api->PJRT_Client_PlatformName(&args), api);
absl::string_view platform_name(args.platform_name, args.platform_name_size);
return platform_name;
}
absl::StatusOr<PJRT_TopologyDescription*> GetTopologyDescription(
PJRT_Client* client, const PJRT_Api* api) {
PJRT_Client_TopologyDescription_Args args;
args.struct_size = PJRT_Client_TopologyDescription_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.client = client;
RETURN_STATUS_IF_PJRT_ERROR(api->PJRT_Client_TopologyDescription(&args), api);
return args.topology;
}
PJRT_Chunk ConvertFromCppChunk(xla::PjRtChunk chunk) {
PJRT_Chunk c_chunk;
c_chunk.data = chunk.data();
c_chunk.size = static_cast<size_t>(chunk.size());
c_chunk.deleter_arg = new std::function(chunk.deleter());
c_chunk.deleter = [](void* data, void* deleter_arg) {
auto* deleter = reinterpret_cast<std::function<void(void*)>*>(deleter_arg);
(*deleter)(data);
delete deleter;
};
chunk.release();
return c_chunk;
}
xla::PjRtChunk ConvertToCppChunk(const PJRT_Chunk& chunk) {
return xla::PjRtChunk(
chunk.data, chunk.size,
[deleter_arg = chunk.deleter_arg, deleter = chunk.deleter](void* data) {
deleter(data, deleter_arg);
});
}
PJRT_DeviceDescription* GetDeviceDescription(const PJRT_Api* api,
PJRT_Device* device) {
PJRT_Device_GetDescription_Args args;
args.struct_size = PJRT_Device_GetDescription_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.device = device;
pjrt::LogFatalIfPjrtError(api->PJRT_Device_GetDescription(&args), api);
return args.device_description;
}
absl::Span<PJRT_Memory* const> GetAddressableMemories(const PJRT_Api* api,
PJRT_Device* device) {
PJRT_Device_AddressableMemories_Args args;
args.struct_size = PJRT_Device_AddressableMemories_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.device = device;
pjrt::LogFatalIfPjrtError(api->PJRT_Device_AddressableMemories(&args), api);
return absl::MakeSpan(args.memories, args.num_memories);
}
int GetId(const PJRT_Api* api, PJRT_DeviceDescription* device_desc) {
PJRT_DeviceDescription_Id_Args args = PJRT_DeviceDescription_Id_Args{
PJRT_DeviceDescription_Id_Args_STRUCT_SIZE, nullptr, device_desc};
pjrt::LogFatalIfPjrtError(api->PJRT_DeviceDescription_Id(&args), api);
return args.id;
}
static void PjRtValueDeleterCallback(char* value) { delete[] value; }
static PJRT_KeyValueGetCFunc ToKVGetCFunc(
xla::KeyValueStoreInterface* kv_store) {
return [kv_store](PJRT_KeyValueGetCallback_Args* args) -> PJRT_Error* {
absl::StatusOr<std::string> output =
kv_store->Get(std::string_view(args->key, args->key_size),
absl::Milliseconds(args->timeout_in_ms));
if (!output.ok()) {
absl::string_view message = output.status().message();
return (*args->callback_error)(
StatusCodeToPjrtErrorCode(output.status().code()), message.data(),
message.size());
}
args->value = new char[output->size()];
std::copy(output->begin(), output->end(), args->value);
args->value_size = output->size();
args->value_deleter_callback = &PjRtValueDeleterCallback;
return nullptr;
};
}
static PJRT_KeyValuePutCFunc ToKVPutCFunc(
xla::KeyValueStoreInterface* kv_store) {
return [kv_store](PJRT_KeyValuePutCallback_Args* args) -> PJRT_Error* {
absl::Status status =
kv_store->Set(std::string_view(args->key, args->key_size),
std::string_view(args->value, args->value_size));
if (!status.ok()) {
absl::string_view message = status.message();
return (*args->callback_error)(StatusCodeToPjrtErrorCode(status.code()),
message.data(), message.size());
}
return nullptr;
};
}
static PJRT_KeyValueGetCallback ToCKVGetCallback(
PJRT_KeyValueGetCFunc* kv_get_c_func) {
return [](PJRT_KeyValueGetCallback_Args* args) -> PJRT_Error* {
PJRT_KeyValueGetCFunc* kv_get_c_func =
reinterpret_cast<PJRT_KeyValueGetCFunc*>(args->user_arg);
if (kv_get_c_func == nullptr) {
absl::Status status = xla::InvalidArgument(
"got nullptr for PJRT_KeyValueGet_Args.user_arg");
return (*args->callback_error)(StatusCodeToPjrtErrorCode(status.code()),
status.message().data(),
status.message().size());
}
return (*kv_get_c_func)(args);
};
}
static PJRT_KeyValuePutCallback ToCKVPutCallback(
PJRT_KeyValuePutCFunc* kv_put_c_func) {
return [](PJRT_KeyValuePutCallback_Args* args) -> PJRT_Error* {
PJRT_KeyValuePutCFunc* kv_put_c_func =
reinterpret_cast<PJRT_KeyValuePutCFunc*>(args->user_arg);
if (kv_put_c_func == nullptr) {
absl::Status status = xla::InvalidArgument(
"got nullptr for PJRT_KeyValuePut_Args.user_arg");
return (*args->callback_error)(StatusCodeToPjrtErrorCode(status.code()),
status.message().data(),
status.message().size());
}
return (*kv_put_c_func)(args);
};
}
std::unique_ptr<PJRT_KeyValueCallbackData> ConvertToCKeyValueCallbacks(
std::shared_ptr<xla::KeyValueStoreInterface> kv_store) {
auto kv_callback_data = std::make_unique<PJRT_KeyValueCallbackData>();
kv_callback_data->kv_get_c_func = ToKVGetCFunc(kv_store.get());
kv_callback_data->kv_put_c_func = ToKVPutCFunc(kv_store.get());
kv_callback_data->c_kv_get =
ToCKVGetCallback(&kv_callback_data->kv_get_c_func);
kv_callback_data->c_kv_put =
ToCKVPutCallback(&kv_callback_data->kv_put_c_func);
kv_callback_data->kv_store = std::move(kv_store);
return kv_callback_data;
}
PJRT_SendCallbackInfo CppSendCallbackToCSendCallback(
xla::SendCallback cpp_send_callback,
PJRT_SendCallbackFunction* send_callback_function) {
return PJRT_SendCallbackInfo{
cpp_send_callback.channel_id,
send_callback_function,
[](PJRT_Chunk* chunk, PJRT_CallbackError* callback_error,
size_t total_size_in_bytes, bool done, void* user_arg) -> PJRT_Error* {
PJRT_SendCallbackFunction* send_callback =
reinterpret_cast<PJRT_SendCallbackFunction*>(user_arg);
return (*send_callback)(chunk, callback_error, total_size_in_bytes,
done);
}};
}
PJRT_RecvCallbackInfo CppRecvCallbackToCRecvCallback(
xla::RecvCallback cpp_recv_callback,
PJRT_RecvCallbackFunction* recv_callback_function) {
return PJRT_RecvCallbackInfo{
cpp_recv_callback.channel_id,
recv_callback_function,
[](PJRT_CopyToDeviceStream* stream, void* user_arg) {
auto* recv_callback =
reinterpret_cast<std::function<void(PJRT_CopyToDeviceStream*)>*>(
user_arg);
(*recv_callback)(stream);
}};
}
absl::StatusOr<BufferMemoryLayoutData> ConvertToBufferMemoryLayoutData(
const xla::Layout& cpp_layout) {
BufferMemoryLayoutData layout_data;
layout_data.c_layout.type =
PJRT_Buffer_MemoryLayout_Type::PJRT_Buffer_MemoryLayout_Type_Tiled;
PJRT_Buffer_MemoryLayout_Tiled c_tiled;
layout_data.minor_to_major.assign(cpp_layout.minor_to_major().begin(),
cpp_layout.minor_to_major().end());
c_tiled.minor_to_major = layout_data.minor_to_major.data();
c_tiled.minor_to_major_size = layout_data.minor_to_major.size();
c_tiled.num_tiles = cpp_layout.tiles().size();
if (c_tiled.num_tiles >= 0) {
layout_data.tile_dim_sizes.reserve(c_tiled.num_tiles);
for (int i = 0; i < c_tiled.num_tiles; ++i) {
absl::Span<const int64_t> tile_dim = cpp_layout.tiles()[i].dimensions();
layout_data.tile_dims.insert(layout_data.tile_dims.end(),
tile_dim.begin(), tile_dim.end());
layout_data.tile_dim_sizes.push_back(tile_dim.size());
}
c_tiled.tile_dims = layout_data.tile_dims.data();
c_tiled.tile_dim_sizes = layout_data.tile_dim_sizes.data();
}
layout_data.c_layout.tiled = c_tiled;
return layout_data;
}
absl::StatusOr<BufferMemoryLayoutData> ConvertToBufferMemoryLayoutData(
absl::Span<int64_t const> byte_strides) {
BufferMemoryLayoutData layout_data;
layout_data.c_layout.type =
PJRT_Buffer_MemoryLayout_Type::PJRT_Buffer_MemoryLayout_Type_Strides;
layout_data.c_layout.strides.byte_strides = byte_strides.data();
layout_data.c_layout.strides.num_byte_strides = byte_strides.size();
return layout_data;
}
absl::StatusOr<xla::Layout> ConvertToLayout(
const PJRT_Buffer_MemoryLayout_Tiled& c_tiled) {
absl::Span<const int64_t> minor_to_major(c_tiled.minor_to_major,
c_tiled.minor_to_major_size);
absl::InlinedVector<xla::Tile, 1> tiles;
tiles.reserve(c_tiled.num_tiles);
const int64_t* current_tile = c_tiled.tile_dims;
for (int i = 0; i < c_tiled.num_tiles; ++i) {
tiles.push_back(xla::Tile(
absl::Span<const int64_t>(current_tile, c_tiled.tile_dim_sizes[i])));
current_tile += c_tiled.tile_dim_sizes[i];
}
xla::Layout layout = xla::Layout(minor_to_major);
layout.mutable_tiles()->assign(tiles.begin(), tiles.end());
return layout;
}
PJRT_Buffer_Type GetElementType(const PJRT_Api* api, PJRT_Buffer* buffer) {
PJRT_Buffer_ElementType_Args args;
args.struct_size = PJRT_Buffer_ElementType_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.buffer = buffer;
LogFatalIfPjrtError(api->PJRT_Buffer_ElementType(&args), api);
return args.type;
}
absl::Span<const int64_t> GetDimensions(const PJRT_Api* api,
PJRT_Buffer* buffer) {
PJRT_Buffer_Dimensions_Args args;
args.struct_size = PJRT_Buffer_Dimensions_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.buffer = buffer;
LogFatalIfPjrtError(api->PJRT_Buffer_Dimensions(&args), api);
return {args.dims, args.num_dims};
}
std::unique_ptr<PJRT_Layouts_MemoryLayout, PJRT_Layouts_MemoryLayoutDeleter>
GetMemoryLayout(const PJRT_Api* api, PJRT_Buffer* buffer) {
PJRT_Layouts_PJRT_Buffer_MemoryLayout_Args args;
args.struct_size = PJRT_Layouts_PJRT_Buffer_MemoryLayout_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.buffer = buffer;
PJRT_Layouts_Extension* ext_api =
FindExtension<PJRT_Layouts_Extension>(api, PJRT_Extension_Type_Layouts);
CHECK_NE(ext_api, nullptr) << "GetMemoryLayout called with PJRT_Api that "
"doesn't support layouts extension";
LogFatalIfPjrtError(ext_api->PJRT_Layouts_PJRT_Buffer_MemoryLayout(&args),
api);
return std::unique_ptr<PJRT_Layouts_MemoryLayout,
PJRT_Layouts_MemoryLayoutDeleter>(
args.layout, MakeMemoryLayoutDeleter(api));
}
absl::StatusOr<xla::Shape> BuildXlaShapeFromC(
PJRT_Buffer_Type element_type, const int64_t* dims, size_t num_dims,
PJRT_Buffer_MemoryLayout* layout) {
xla::Shape shape =
xla::ShapeUtil::MakeShape(ConvertFromPjRtBufferType(element_type),
absl::Span<const int64_t>(dims, num_dims));
xla::Layout cpp_layout;
if (layout != nullptr) {
switch (layout->type) {
case PJRT_Buffer_MemoryLayout_Type::PJRT_Buffer_MemoryLayout_Type_Tiled: {
TF_ASSIGN_OR_RETURN(cpp_layout, ConvertToLayout(layout->tiled));
break;
}
case PJRT_Buffer_MemoryLayout_Type::
PJRT_Buffer_MemoryLayout_Type_Strides: {
TF_RETURN_IF_ERROR(absl::InvalidArgumentError(
"PJRT_Buffer_MemoryLayout_Type_Strides is not supported to be "
"converted to a xla::Shape"));
break;
}
default: {
TF_RETURN_IF_ERROR(absl::InvalidArgumentError(absl::StrCat(
"Unexpected PJRT_Buffer_MemoryLayout_Type type: ", layout->type)));
}
}
*shape.mutable_layout() = cpp_layout;
}
return shape;
}
absl::string_view PlatformName(const PJRT_Api* api,
const PJRT_TopologyDescription* topo_desc) {
PJRT_TopologyDescription_PlatformName_Args args;
args.struct_size = PJRT_TopologyDescription_PlatformName_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.topology = const_cast<PJRT_TopologyDescription*>(topo_desc);
LogFatalIfPjrtError(api->PJRT_TopologyDescription_PlatformName(&args), api);
return {args.platform_name, args.platform_name_size};
}
absl::Span<PJRT_DeviceDescription* const> DeviceDescriptions(
const PJRT_Api* api, const PJRT_TopologyDescription* topo_desc) {
PJRT_TopologyDescription_GetDeviceDescriptions_Args args;
args.struct_size =
PJRT_TopologyDescription_GetDeviceDescriptions_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.topology = const_cast<PJRT_TopologyDescription*>(topo_desc);
LogFatalIfPjrtError(
api->PJRT_TopologyDescription_GetDeviceDescriptions(&args), api);
return {args.descriptions, args.num_descriptions};
}
absl::StatusOr<xla::CompiledMemoryStats> GetCompiledMemoryStats(
const PJRT_Api* api, PJRT_Executable* executable) {
PJRT_Executable_GetCompiledMemoryStats_Args args;
args.struct_size = PJRT_Executable_GetCompiledMemoryStats_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.executable = executable;
RETURN_STATUS_IF_PJRT_ERROR(
api->PJRT_Executable_GetCompiledMemoryStats(&args), api);
xla::CompiledMemoryStats results;
results.generated_code_size_in_bytes = args.generated_code_size_in_bytes;
results.argument_size_in_bytes = args.argument_size_in_bytes;
results.output_size_in_bytes = args.output_size_in_bytes;
results.alias_size_in_bytes = args.alias_size_in_bytes;
results.temp_size_in_bytes = args.temp_size_in_bytes;
results.host_generated_code_size_in_bytes =
args.host_generated_code_size_in_bytes;
results.host_argument_size_in_bytes = args.host_argument_size_in_bytes;
results.host_output_size_in_bytes = args.host_output_size_in_bytes;
results.host_alias_size_in_bytes = args.host_alias_size_in_bytes;
results.host_temp_size_in_bytes = args.host_temp_size_in_bytes;
return results;
}
PJRT_Profiler_Extension CreatePjrtProfilerExtension(
absl::string_view traceme_name) {
tsl::profiler::TraceMeProducer producer(
traceme_name, tsl::profiler::ContextType::kPjrtLibraryCall);
int64_t traceme_context_id = producer.GetContextId();
PJRT_Profiler_Extension profiler_extension{
PJRT_Profiler_Extension_STRUCT_SIZE,
PJRT_Extension_Type::PJRT_Extension_Type_Profiler,
nullptr,
nullptr,
traceme_context_id,
};
return profiler_extension;
}
} | #include "xla/pjrt/c/pjrt_c_api_helpers.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/time/time.h"
#include "xla/layout.h"
#include "xla/pjrt/c/pjrt_c_api.h"
#include "xla/pjrt/c/pjrt_c_api_wrapper_impl.h"
#include "xla/pjrt/distributed/in_memory_key_value_store.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/pjrt_common.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/status.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace pjrt {
namespace {
using ::testing::HasSubstr;
TEST(PjRtCApiHelperTest, ConvertValidPjRtValueType) {
std::vector<int64_t> int64_list = {static_cast<int64_t>(1),
static_cast<int64_t>(2)};
absl::flat_hash_map<std::string, xla::PjRtValueType> original_cpp_map = {
{"string", "v1"},
{"int64", static_cast<int64_t>(1)},
{"int64_list", int64_list},
{"float", static_cast<float>(1.0)}};
TF_ASSERT_OK_AND_ASSIGN(std::vector<PJRT_NamedValue> c_map,
ConvertToPjRtNamedValueList(original_cpp_map));
auto converted_back_cpp_map =
ConvertFromPjRtNamedValueList(c_map.data(), c_map.size());
EXPECT_THAT(converted_back_cpp_map,
testing::UnorderedElementsAreArray(original_cpp_map));
}
TEST(PjRtCApiHelperTest, ValidOptionNameAndPjRtValueTypeIndex) {
const auto expected = absl::flat_hash_map<std::string, PJRT_NamedValue_Type>({
{"string", PJRT_NamedValue_Type::PJRT_NamedValue_kString},
{"int64", PJRT_NamedValue_Type::PJRT_NamedValue_kInt64},
});
absl::flat_hash_map<std::string, xla::PjRtValueType> valid_map = {
{"string", static_cast<std::string>("v1")},
{"int64", static_cast<int64_t>(1)}};
TF_EXPECT_OK(ValidateCreateOptions(valid_map, expected));
}
TEST(PjRtCApiHelperTest, InvalidOptionName) {
const auto expected = absl::flat_hash_map<std::string, PJRT_NamedValue_Type>({
{"string", PJRT_NamedValue_Type::PJRT_NamedValue_kString},
{"int64", PJRT_NamedValue_Type::PJRT_NamedValue_kInt64},
});
absl::flat_hash_map<std::string, xla::PjRtValueType> invalid_map = {
{"invalid", "v1"}};
auto status = ValidateCreateOptions(invalid_map, expected);
EXPECT_NE(status, absl::OkStatus());
EXPECT_THAT(status.message(),
HasSubstr("Unexpected option name passed to PJRT_Client_Create"));
}
TEST(PjRtCApiHelperTest, InvalidOptionTypeIndex) {
const auto expected = absl::flat_hash_map<std::string, PJRT_NamedValue_Type>({
{"string", PJRT_NamedValue_Type::PJRT_NamedValue_kString},
{"int64", PJRT_NamedValue_Type::PJRT_NamedValue_kInt64},
});
absl::flat_hash_map<std::string, xla::PjRtValueType> invalid_map = {
{"string", static_cast<int64_t>(1)}};
auto status = ValidateCreateOptions(invalid_map, expected);
EXPECT_NE(status, absl::OkStatus());
EXPECT_THAT(status.message(),
HasSubstr("Option passed to PJRT_Client_Create with name string "
"has type index 2 but expected type index is 0"));
}
TEST(PjRtCApiHelperTest, Callback) {
auto kv_store = std::make_shared<xla::InMemoryKeyValueStore>();
auto kv_callback_data = ConvertToCKeyValueCallbacks(kv_store);
auto converted_kv_store = ToCppKeyValueStore(
kv_callback_data->c_kv_get, &kv_callback_data->kv_get_c_func,
kv_callback_data->c_kv_put, &kv_callback_data->kv_put_c_func);
auto s = converted_kv_store->Set("key", "value");
TF_EXPECT_OK(s);
auto v = converted_kv_store->Get("key", absl::Seconds(1));
TF_EXPECT_OK(v.status());
EXPECT_EQ(*v, "value");
}
TEST(PjRtCApiHelperTest, ConvertToCLayoutFromStrides) {
std::vector<int64_t> strides = {4, 8};
absl::StatusOr<BufferMemoryLayoutData> layout_data =
ConvertToBufferMemoryLayoutData(strides);
EXPECT_TRUE(layout_data.ok());
EXPECT_EQ(
layout_data->c_layout.type,
PJRT_Buffer_MemoryLayout_Type::PJRT_Buffer_MemoryLayout_Type_Strides);
EXPECT_EQ(layout_data->c_layout.strides.num_byte_strides, 2);
EXPECT_EQ(layout_data->c_layout.strides.byte_strides[0], strides[0]);
EXPECT_EQ(layout_data->c_layout.strides.byte_strides[1], strides[1]);
}
TEST(PjRtCApiHelperTest, ConvertToCLayoutFromLayoutNoTiles) {
std::vector<int64_t> minor_to_major = {1, 0};
xla::Layout layout(minor_to_major);
TF_ASSERT_OK_AND_ASSIGN(BufferMemoryLayoutData layout_data,
ConvertToBufferMemoryLayoutData(layout));
EXPECT_EQ(layout_data.c_layout.type,
PJRT_Buffer_MemoryLayout_Type::PJRT_Buffer_MemoryLayout_Type_Tiled);
EXPECT_EQ(layout_data.c_layout.tiled.num_tiles, 0);
PJRT_Buffer_MemoryLayout_Tiled tiled = layout_data.c_layout.tiled;
EXPECT_EQ(tiled.minor_to_major_size, 2);
EXPECT_EQ(tiled.minor_to_major[0], minor_to_major[0]);
EXPECT_EQ(tiled.minor_to_major[1], minor_to_major[1]);
}
TEST(PjRtCApiHelperTest, ConvertToCLayoutFromLayoutWithTiles) {
std::vector<int64_t> minor_to_major = {1, 0};
xla::Layout layout(minor_to_major);
std::vector<int64_t> tile_dims_1 = {2, 4};
std::vector<int64_t> tile_dims_2 = {1};
layout.mutable_tiles()->push_back(xla::Tile(tile_dims_1));
layout.mutable_tiles()->push_back(xla::Tile(tile_dims_2));
TF_ASSERT_OK_AND_ASSIGN(BufferMemoryLayoutData layout_data,
ConvertToBufferMemoryLayoutData(layout));
EXPECT_EQ(layout_data.c_layout.type,
PJRT_Buffer_MemoryLayout_Type::PJRT_Buffer_MemoryLayout_Type_Tiled);
PJRT_Buffer_MemoryLayout_Tiled tiled = layout_data.c_layout.tiled;
EXPECT_EQ(tiled.minor_to_major_size, 2);
EXPECT_EQ(tiled.minor_to_major[0], minor_to_major[0]);
EXPECT_EQ(tiled.minor_to_major[1], minor_to_major[1]);
EXPECT_EQ(tiled.num_tiles, 2);
EXPECT_EQ(tiled.tile_dim_sizes[0], tile_dims_1.size());
EXPECT_EQ(tiled.tile_dim_sizes[1], tile_dims_2.size());
EXPECT_EQ(tiled.tile_dims[0], tile_dims_1[0]);
EXPECT_EQ(tiled.tile_dims[1], tile_dims_1[1]);
EXPECT_EQ(tiled.tile_dims[2], tile_dims_2[0]);
}
TEST(PjRtCApiHelperTest, ConvertFromCLayoutToLayout) {
PJRT_Buffer_MemoryLayout c_layout;
c_layout.type =
PJRT_Buffer_MemoryLayout_Type::PJRT_Buffer_MemoryLayout_Type_Tiled;
std::vector<int64_t> minor_to_major = {1, 0};
c_layout.tiled.minor_to_major_size = 2;
c_layout.tiled.minor_to_major = minor_to_major.data();
c_layout.tiled.num_tiles = 2;
std::vector<size_t> tile_dim_sizes = {2, 1};
c_layout.tiled.tile_dim_sizes = tile_dim_sizes.data();
std::vector<int64_t> tile_dims = {2, 4, 1};
c_layout.tiled.tile_dims = tile_dims.data();
TF_ASSERT_OK_AND_ASSIGN(xla::Layout layout, ConvertToLayout(c_layout.tiled));
EXPECT_EQ(layout.ToString(), "{1,0:T(2,4)(1)}");
}
TEST(PjRtCApiHelperTest, ConvertFromCLayoutToLayoutNoTile) {
PJRT_Buffer_MemoryLayout c_layout;
c_layout.type =
PJRT_Buffer_MemoryLayout_Type::PJRT_Buffer_MemoryLayout_Type_Tiled;
c_layout.tiled.num_tiles = 0;
std::vector<int64_t> minor_to_major = {1, 0};
c_layout.tiled.minor_to_major_size = 2;
c_layout.tiled.minor_to_major = minor_to_major.data();
TF_ASSERT_OK_AND_ASSIGN(xla::Layout layout, ConvertToLayout(c_layout.tiled));
EXPECT_EQ(layout.ToString(), "{1,0}");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/c/pjrt_c_api_helpers.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/c/pjrt_c_api_helpers_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
134ec802-5d04-47ce-83bd-c7083b178e4b | cpp | tensorflow/tensorflow | sharding_conversions | third_party/xla/xla/python/ifrt/support/sharding_conversions.cc | third_party/xla/xla/python/ifrt/support/sharding_conversions_test.cc | #include "xla/python/ifrt/support/sharding_conversions.h"
#include <cstdint>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Casting.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/device_list.h"
#include "xla/python/ifrt/ir/sharding_param.h"
#include "xla/python/ifrt/sharding.h"
#include "xla/tsl/concurrency/ref_count.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace ifrt {
namespace support {
absl::StatusOr<OpSharding> ToOpSharding(const Sharding& sharding) {
if (auto* sharding_param_sharding =
llvm::dyn_cast<xla::ifrt::ShardingParamSharding>(&sharding)) {
return ToOpSharding(sharding_param_sharding->sharding_param(),
sharding_param_sharding->devices());
} else {
return absl::InvalidArgumentError(
"Only conversion from `ShardingParamSharding` to `OpSharding` is "
"supported.");
}
}
absl::StatusOr<OpSharding> ToOpSharding(
const ShardingParam& sharding_param,
const tsl::RCReference<xla::ifrt::DeviceList>& device_mapping) {
OpSharding op_sharding;
{
bool all_dim_replicated = true;
for (const int64_t dim_shard : sharding_param.dim_shards()) {
if (dim_shard != 1) {
all_dim_replicated = false;
break;
}
}
if (all_dim_replicated) {
op_sharding.set_type(OpSharding::REPLICATED);
return op_sharding;
}
}
op_sharding.set_type(OpSharding::OTHER);
auto* tile_assignment_dims = op_sharding.mutable_tile_assignment_dimensions();
int64_t cum_size = 1;
tile_assignment_dims->Reserve(sharding_param.dim_shards().size() + 1);
for (const int64_t dim_shard : sharding_param.dim_shards()) {
cum_size *= dim_shard;
tile_assignment_dims->Add(dim_shard);
}
int device_count = 1;
for (const int axis_size : sharding_param.minor_to_major().axis_sizes) {
device_count *= axis_size;
}
if (device_count != cum_size) {
op_sharding.set_replicate_on_last_tile_dim(true);
tile_assignment_dims->Add(device_count / cum_size);
}
llvm::SmallVector<int> logical_device_ids;
sharding_param.minor_to_major().ToDeviceList(logical_device_ids);
auto* tile_assignment_devices = op_sharding.mutable_tile_assignment_devices();
tile_assignment_devices->Reserve(logical_device_ids.size());
const absl::Span<Device* const> device_mapping_devices =
device_mapping->devices();
for (const int logical_device_id : logical_device_ids) {
if (logical_device_id < 0 ||
logical_device_id >= device_mapping_devices.size()) {
return absl::OutOfRangeError(
absl::StrCat("Can't map device with logical id ", logical_device_id,
". The logical device id should be within [0, ",
device_mapping_devices.size(), ")."));
}
tile_assignment_devices->Add(
device_mapping_devices[logical_device_id]->Id().value());
}
return op_sharding;
}
absl::StatusOr<HloSharding> ToHloSharding(const ShardingParam& sharding_param) {
auto axis_sizes = sharding_param.minor_to_major().axis_sizes;
llvm::SmallVector<int64_t> reshape_dims;
reshape_dims.reserve(axis_sizes.size());
int device_count = 1;
for (auto axis_size : llvm::reverse(axis_sizes)) {
reshape_dims.push_back(axis_size);
device_count *= axis_size;
}
if (device_count == 1) {
return HloSharding::Replicate();
}
int64_t cum_size = 1;
llvm::SmallVector<int64_t> dims;
dims.reserve(sharding_param.dim_shards().size());
for (const int64_t dim_shard : sharding_param.dim_shards()) {
cum_size *= dim_shard;
dims.push_back(dim_shard);
}
llvm::SmallVector<int, 4> permutation;
int num_axis = sharding_param.minor_to_major().permutation.size();
permutation.reserve(num_axis);
for (const int axis_id :
llvm::reverse(sharding_param.minor_to_major().permutation)) {
permutation.push_back(num_axis - axis_id - 1);
}
if (device_count != cum_size) {
dims.push_back(device_count / cum_size);
return HloSharding::PartialTile(
TileAssignment(dims, reshape_dims, permutation));
} else {
return HloSharding::IotaTile(dims, reshape_dims, permutation);
}
}
absl::StatusOr<ShardingParam> ToShardingParam(const HloSharding& hlo_sharding,
int rank, int num_devices) {
ShardingParam::MinorToMajor minor_to_major;
if (hlo_sharding.IsReplicated() ||
(hlo_sharding.IsTileMaximal() && hlo_sharding.HasUniqueDevice() &&
num_devices == 1)) {
llvm::SmallVector<int64_t> dim_shards(rank, 1);
minor_to_major.permutation.push_back(0);
minor_to_major.axis_sizes.push_back(num_devices);
return ShardingParam(dim_shards, std::move(minor_to_major));
} else if (hlo_sharding.IsTiled()) {
const xla::TileAssignment& tile_assignment = hlo_sharding.tile_assignment();
if (!tile_assignment.iota()) {
return absl::InvalidArgumentError(absl::StrCat(
"Conversion from `HloSharding` without `IotaTileAssignment` is not "
"supported; sharding=",
hlo_sharding.ToString()));
}
if (rank != hlo_sharding.TiledDataRank()) {
return absl::InvalidArgumentError(absl::StrFormat(
"`TiledData` expected to have have %d dimensions, but has %d "
"dimensions; sharding=%s",
rank, hlo_sharding.TiledDataRank(), hlo_sharding.ToString()));
}
if (hlo_sharding.subgroup_types().size() > 1 ||
(hlo_sharding.subgroup_types().size() == 1 &&
hlo_sharding.subgroup_types()[0] != xla::OpSharding::REPLICATED)) {
return absl::InvalidArgumentError(absl::StrCat(
"Unsupported conversion to `ShardingParam` from `HloSharding` that "
"has more than a subgroup or a subgroup that is not REPLICATED; "
"sharding=",
hlo_sharding.ToString()));
}
llvm::SmallVector<int64_t> dim_shards(tile_assignment.dimensions().begin(),
tile_assignment.dimensions().end());
if (hlo_sharding.ReplicateOnLastTileDim() ||
(hlo_sharding.subgroup_types().size() == 1 &&
hlo_sharding.subgroup_types()[0] == xla::OpSharding::REPLICATED)) {
dim_shards.pop_back();
}
if (tile_assignment.iota()->reshape_dims().empty()) {
minor_to_major.permutation.push_back(0);
minor_to_major.axis_sizes.push_back(num_devices);
} else {
for (auto reshape_dim :
llvm::reverse(tile_assignment.iota()->reshape_dims())) {
minor_to_major.axis_sizes.push_back(reshape_dim);
}
int num_axis = tile_assignment.iota()->transpose_perm().size();
for (int axis_id :
llvm::reverse(tile_assignment.iota()->transpose_perm())) {
minor_to_major.permutation.push_back(num_axis - axis_id - 1);
}
}
return ShardingParam(dim_shards, std::move(minor_to_major));
}
return absl::UnimplementedError(
absl::StrCat("Unsupported conversion to `ShardingParam` from "
"`HloSharding`; sharding=",
hlo_sharding.ToString()));
}
}
}
} | #include "xla/python/ifrt/support/sharding_conversions.h"
#include <memory>
#include <numeric>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/hlo/ir/tile_assignment.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/device_list.h"
#include "xla/python/ifrt/index_domain.h"
#include "xla/python/ifrt/ir/sharding_param.h"
#include "xla/python/ifrt/memory.h"
#include "xla/python/ifrt/mock.h"
#include "xla/python/ifrt/shape.h"
#include "xla/python/ifrt/sharding.h"
#include "xla/python/ifrt/test_util.h"
#include "xla/shape.h"
#include "xla/tsl/concurrency/ref_count.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
namespace support {
namespace {
using ::testing::Return;
using ::tsl::testing::StatusIs;
using xla::HloSharding;
absl::StatusOr<HloSharding> ToHloShardingViaOpSharding(
const ShardingParam& sharding_param,
const tsl::RCReference<DeviceList>& device_list) {
TF_ASSIGN_OR_RETURN(xla::OpSharding op_sharding,
ToOpSharding(sharding_param, device_list));
return HloSharding::FromProto(op_sharding);
}
struct ShardingConversionTestClientState {
absl::flat_hash_map<DeviceId, std::unique_ptr<Device>> device_map;
std::vector<Device*> devices;
};
std::shared_ptr<MockClient> MakeTestClient(int num_devices) {
auto state = std::make_shared<ShardingConversionTestClientState>();
state->devices.reserve(num_devices);
for (int i = 0; i < num_devices; ++i) {
auto device = std::make_unique<MockDevice>();
ON_CALL(*device, Id).WillByDefault(Return(DeviceId(i)));
state->devices.push_back(device.get());
state->device_map.insert({DeviceId(i), std::move(device)});
}
auto client = std::make_shared<MockClient>();
ON_CALL(*client, devices)
.WillByDefault(
[state]() -> absl::Span<Device* const> { return state->devices; });
return client;
}
class ShardingConversionsTest : public testing::TestWithParam<int> {
public:
void SetUp() override { client_ = MakeTestClient(GetParam()); }
tsl::RCReference<DeviceList> GetDevices(
absl::Span<const int> device_indices) {
return test_util::GetDevices(client_.get(), device_indices).value();
}
void AssertSameTiling(const ShardingParam& sharding_param,
const HloSharding& hlo_sharding, const Shape& shape) {
auto device_list = GetDevices({0, 1, 2, 3, 4, 5});
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<const Sharding> sharding,
ShardingParamSharding::Create(
sharding_param, device_list, MemoryKind()));
const xla::Shape xla_shape(PrimitiveType::F16, shape.dims(), {}, {});
TF_ASSERT_OK_AND_ASSIGN(const std::vector<IndexDomain> index_domains,
sharding->IndexDomains(shape));
ASSERT_EQ(index_domains.size(),
hlo_sharding.tile_assignment().num_elements());
const xla::Shape xla_tile_shape = hlo_sharding.TileShape(xla_shape);
for (int i = 0; i < index_domains.size(); ++i) {
SCOPED_TRACE(absl::StrCat("on device ", i));
EXPECT_EQ(index_domains[i].origin().elements(),
hlo_sharding.TileOffsetForDevice(xla_shape, i));
EXPECT_EQ(index_domains[i].shape().dims(), xla_tile_shape.dimensions());
}
}
private:
std::shared_ptr<Client> client_;
};
TEST_P(ShardingConversionsTest, Replicated) {
ShardingParam expected_sharding_param{
{1, 1, 1},
{{0, 1}, {2, 3}}};
TF_EXPECT_OK(expected_sharding_param.verify());
TF_ASSERT_OK_AND_ASSIGN(const HloSharding hlo_iota_sharding,
ToHloSharding(expected_sharding_param));
TF_ASSERT_OK_AND_ASSIGN(
const HloSharding hlo_sharding,
ToHloShardingViaOpSharding(expected_sharding_param,
GetDevices({0, 1, 2, 3, 4, 5})));
EXPECT_EQ(hlo_sharding.ToString(), "{replicated}");
EXPECT_EQ(hlo_sharding, hlo_iota_sharding);
TF_ASSERT_OK_AND_ASSIGN(auto sharding_param,
ToShardingParam(hlo_iota_sharding, 3, 6));
TF_ASSERT_OK_AND_ASSIGN(const HloSharding actual_hlo_sharding,
ToHloSharding(sharding_param));
EXPECT_EQ(hlo_iota_sharding, actual_hlo_sharding);
}
TEST_P(ShardingConversionsTest, SingleDeviceReplicated) {
ShardingParam expected_sharding_param{
{1, 1}, {{0}, {1}}};
TF_EXPECT_OK(expected_sharding_param.verify());
TF_ASSERT_OK_AND_ASSIGN(const HloSharding hlo_iota_sharding,
ToHloSharding(expected_sharding_param));
TF_ASSERT_OK_AND_ASSIGN(
const HloSharding hlo_sharding,
ToHloShardingViaOpSharding(expected_sharding_param, GetDevices({0})));
EXPECT_EQ(hlo_sharding.ToString(), "{replicated}");
EXPECT_EQ(hlo_sharding, hlo_iota_sharding);
TF_ASSERT_OK_AND_ASSIGN(auto sharding_param,
ToShardingParam(hlo_iota_sharding, 2, 1));
EXPECT_EQ(expected_sharding_param, sharding_param);
}
TEST_P(ShardingConversionsTest, Permutation) {
ShardingParam expected_sharding_param{
{2, 1, 3},
{{1, 0}, {3, 2}}};
TF_EXPECT_OK(expected_sharding_param.verify());
TF_ASSERT_OK_AND_ASSIGN(const HloSharding hlo_iota_sharding,
ToHloSharding(expected_sharding_param));
TF_ASSERT_OK_AND_ASSIGN(
const HloSharding hlo_sharding,
ToHloShardingViaOpSharding(expected_sharding_param,
GetDevices({0, 1, 2, 3, 4, 5})));
EXPECT_EQ(hlo_sharding.ToString(), "{devices=[2,1,3]0,3,1,4,2,5}");
EXPECT_EQ(hlo_sharding, hlo_iota_sharding);
TF_ASSERT_OK_AND_ASSIGN(auto sharding_param,
ToShardingParam(hlo_iota_sharding, 3, 6));
EXPECT_EQ(expected_sharding_param, sharding_param);
}
TEST_P(ShardingConversionsTest, Partial) {
ShardingParam expected_sharding_param{
{2, 1}, {{0, 1}, {2, 3}}};
TF_EXPECT_OK(expected_sharding_param.verify());
TF_ASSERT_OK_AND_ASSIGN(const HloSharding hlo_iota_sharding,
ToHloSharding(expected_sharding_param));
TF_ASSERT_OK_AND_ASSIGN(
const HloSharding hlo_sharding,
ToHloShardingViaOpSharding(expected_sharding_param,
GetDevices({0, 1, 2, 3, 4, 5})));
EXPECT_EQ(hlo_sharding.ToString(),
"{devices=[2,1,3]0,1,2,3,4,5 last_tile_dim_replicate}");
EXPECT_EQ(hlo_sharding, hlo_iota_sharding);
TF_ASSERT_OK_AND_ASSIGN(auto sharding_param,
ToShardingParam(hlo_iota_sharding, 2, 6));
TF_ASSERT_OK_AND_ASSIGN(const HloSharding actual_hlo_sharding,
ToHloSharding(sharding_param));
EXPECT_EQ(hlo_iota_sharding, actual_hlo_sharding);
}
TEST_P(ShardingConversionsTest, OneDimToTwoAxes) {
ShardingParam expected_sharding_param{
{4}, {{1, 0}, {2, 2}}};
TF_EXPECT_OK(expected_sharding_param.verify());
TF_ASSERT_OK_AND_ASSIGN(const HloSharding hlo_iota_sharding,
ToHloSharding(expected_sharding_param));
TF_ASSERT_OK_AND_ASSIGN(const HloSharding hlo_sharding,
ToHloShardingViaOpSharding(expected_sharding_param,
GetDevices({0, 1, 2, 3})));
EXPECT_EQ(hlo_sharding.ToString(), "{devices=[4]0,2,1,3}");
EXPECT_EQ(hlo_sharding, hlo_iota_sharding);
TF_ASSERT_OK_AND_ASSIGN(auto sharding_param,
ToShardingParam(hlo_iota_sharding, 1, 4));
EXPECT_EQ(expected_sharding_param, sharding_param);
}
TEST_P(ShardingConversionsTest, NonTrivialDeviceAssignment) {
ShardingParam expected_sharding_param{
{2, 1, 3},
{{1, 0}, {3, 2}}};
TF_EXPECT_OK(expected_sharding_param.verify());
TF_ASSERT_OK_AND_ASSIGN(
const HloSharding hlo_sharding,
ToHloShardingViaOpSharding(expected_sharding_param,
GetDevices({6, 5, 4, 3, 2, 1})));
EXPECT_EQ(hlo_sharding.ToString(), "{devices=[2,1,3]6,3,5,2,4,1}");
}
TEST_P(ShardingConversionsTest, VerifyIncorrectShardings) {
ShardingParam different_permutation_and_axis{
{1, 1}, {{0, 1}, {2}}};
EXPECT_FALSE(different_permutation_and_axis.verify().ok());
ShardingParam too_many_slices{{2, 2},
{{0}, {2}}};
EXPECT_FALSE(too_many_slices.verify().ok());
ShardingParam incorrect_permutation{
{4, 1},
{{0, 1, 1}, {2, 2, 2}}};
EXPECT_FALSE(incorrect_permutation.verify().ok());
}
TEST_P(ShardingConversionsTest, ErrorOnDeviceAssignment) {
ShardingParam sharding_param{{2, 1, 3},
{{1, 0}, {3, 2}}};
TF_EXPECT_OK(sharding_param.verify());
EXPECT_THAT(
ToHloShardingViaOpSharding(sharding_param, GetDevices({6, 5, 4, 3, 2})),
StatusIs(absl::StatusCode::kOutOfRange,
::testing::HasSubstr("Can't map device with logical id 5")));
}
TEST_P(ShardingConversionsTest, ShardingParamFullySharded) {
ShardingParam sharding_param{{2, 3},
{{0, 1}, {2, 3}}};
TF_EXPECT_OK(sharding_param.verify());
TF_ASSERT_OK_AND_ASSIGN(const HloSharding hlo_sharding,
ToHloShardingViaOpSharding(
sharding_param, GetDevices({0, 1, 2, 3, 4, 5})));
AssertSameTiling(sharding_param, hlo_sharding, Shape({6, 6}));
}
TEST_P(ShardingConversionsTest, ShardingParamWithPermutation) {
ShardingParam sharding_param{{2, 3},
{{1, 0}, {3, 2}}};
TF_EXPECT_OK(sharding_param.verify());
TF_ASSERT_OK_AND_ASSIGN(const HloSharding hlo_sharding,
ToHloShardingViaOpSharding(
sharding_param, GetDevices({0, 1, 2, 3, 4, 5})));
AssertSameTiling(sharding_param, hlo_sharding, Shape({6, 6}));
}
TEST_P(ShardingConversionsTest, ShardingParamWithReplication) {
ShardingParam sharding_param{{2, 1},
{{0, 1}, {2, 3}}};
TF_EXPECT_OK(sharding_param.verify());
TF_ASSERT_OK_AND_ASSIGN(const HloSharding hlo_sharding,
ToHloShardingViaOpSharding(
sharding_param, GetDevices({0, 1, 2, 3, 4, 5})));
AssertSameTiling(sharding_param, hlo_sharding, Shape({6, 6}));
}
TEST_P(ShardingConversionsTest, OpShardingReplicated) {
OpSharding op_sharding;
op_sharding.set_type(OpSharding::REPLICATED);
TF_ASSERT_OK_AND_ASSIGN(auto hlo_sharding,
HloSharding::FromProto(op_sharding));
TF_ASSERT_OK_AND_ASSIGN(auto actual, ToShardingParam(hlo_sharding, 2, 6));
ShardingParam expected{{1, 1},
{{0}, {6}}};
TF_EXPECT_OK(expected.verify());
EXPECT_EQ(actual, expected);
}
INSTANTIATE_TEST_SUITE_P(NumDevices, ShardingConversionsTest,
testing::Values(7));
struct HloShardingTestStruct {
HloSharding hlo_sharding;
int rank;
int num_devices;
};
class HloShardingToShardingParamTest
: public testing::TestWithParam<HloShardingTestStruct> {
public:
void SetUp() override {
const auto& param = GetParam();
client_ = MakeTestClient(param.num_devices);
}
tsl::RCReference<DeviceList> GetDevices(
absl::Span<const int> device_indices) {
return test_util::GetDevices(client_.get(), device_indices).value();
}
private:
std::shared_ptr<Client> client_;
};
TEST_P(HloShardingToShardingParamTest, HloShardingToShardingParam) {
const auto& param = GetParam();
TF_ASSERT_OK_AND_ASSIGN(
auto sharding_param,
ToShardingParam(param.hlo_sharding, param.rank, param.num_devices));
EXPECT_TRUE(sharding_param.verify().ok());
TF_ASSERT_OK_AND_ASSIGN(auto actual_hlo_sharding,
ToHloSharding(sharding_param));
EXPECT_EQ(param.hlo_sharding, actual_hlo_sharding);
std::vector<int> device_ids(param.num_devices);
std::iota(device_ids.begin(), device_ids.end(), 0);
TF_ASSERT_OK_AND_ASSIGN(
auto hlo_via_op_sharding,
ToHloShardingViaOpSharding(sharding_param,
GetDevices(absl::MakeSpan(device_ids))));
EXPECT_EQ(param.hlo_sharding, hlo_via_op_sharding);
}
INSTANTIATE_TEST_SUITE_P(
HloShardingConversionTests, HloShardingToShardingParamTest,
testing::ValuesIn<HloShardingTestStruct>({
{HloSharding::IotaTile({4, 2}), 2, 8},
{HloSharding::IotaTile({2, 4}, {4, 2}, {1, 0}), 2, 8},
{HloSharding::IotaTile({8, 1}), 2, 8},
{HloSharding::IotaTile({8, 1}, {4, 2}, {1, 0}), 2, 8},
{HloSharding::PartialTile(TileAssignment({4, 1, 2}, {8}, {0})), 2, 8},
{HloSharding::PartialTile(TileAssignment({2, 1, 4}, {4, 2}, {1, 0})), 2,
8},
{HloSharding::PartialTile(TileAssignment({1, 4, 2}, {8}, {0})), 2, 8},
{HloSharding::PartialTile(TileAssignment({1, 2, 4}, {4, 2}, {1, 0})), 2,
8},
{HloSharding::PartialTile(TileAssignment({4, 3, 2}, {2, 3, 4},
{2, 1, 0})),
2, 24},
{HloSharding::PartialTile(TileAssignment({4, 2, 3}, {6, 4}, {1, 0})), 2,
24},
{HloSharding::PartialTile(TileAssignment({6, 1, 4}, {24}, {0})), 2, 24},
{HloSharding::PartialTile(TileAssignment({12, 1, 2}, {2, 12}, {1, 0})),
2, 24},
{HloSharding::PartialTile(TileAssignment({8, 1, 3}, {6, 4}, {1, 0})), 2,
24},
{HloSharding::PartialTile(TileAssignment({2, 1, 12}, {24}, {0})), 2,
24},
{HloSharding::PartialTile(TileAssignment({3, 1, 8}, {2, 3, 4},
{1, 0, 2})),
2, 24},
{HloSharding::PartialTile(TileAssignment({1, 4, 6}, {6, 4}, {1, 0})), 2,
24},
{HloSharding::PartialTile(TileAssignment({1, 12, 2}, {2, 12}, {1, 0})),
2, 24},
{HloSharding::PartialTile(TileAssignment({3, 2, 1, 4}, {2, 3, 4},
{1, 0, 2})),
3, 24},
{HloSharding::PartialTile(TileAssignment({2, 4, 1, 3}, {2, 3, 4},
{0, 2, 1})),
3, 24},
{HloSharding::PartialTile(TileAssignment({4, 3, 1, 2}, {2, 3, 4},
{2, 1, 0})),
3, 24},
{HloSharding::PartialTile(TileAssignment({12, 1, 1, 2}, {2, 12},
{1, 0})),
3, 24},
}));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt/support/sharding_conversions.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt/support/sharding_conversions_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b6ff3b65-9b13-44b3-8470-4edfba40e5bf | cpp | tensorflow/tensorflow | conv_layout_normalization | third_party/xla/xla/service/gpu/conv_layout_normalization.cc | third_party/xla/xla/service/gpu/conv_layout_normalization_test.cc | #include "xla/service/gpu/conv_layout_normalization.h"
#include <cstdint>
#include <optional>
#include <vector>
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
absl::StatusOr<std::optional<HloInstruction*>> UpdateLayoutForCudnnConvolution(
HloCustomCallInstruction* hlo) {
HloInstruction* lhs = hlo->mutable_operand(0);
HloInstruction* rhs = hlo->mutable_operand(1);
const ConvolutionDimensionNumbers& dim_numbers =
hlo->convolution_dimension_numbers();
auto transpose_dim = [&](int64_t dim, const Shape& unnormalized_shape) {
return unnormalized_shape.rank() -
FindIndex(unnormalized_shape.layout().minor_to_major(), dim) - 1;
};
auto transpose_dims = [&](tsl::protobuf::RepeatedField<int64_t>& dims,
const Shape& unnormalized_shape) {
for (auto& dim : dims) {
dim = transpose_dim(dim, unnormalized_shape);
}
};
const Shape& conv_output_shape =
hlo->shape().IsTuple() ? hlo->shape().tuple_shapes(0) : hlo->shape();
Shape input_shape, filter_shape, output_shape;
TF_ASSIGN_OR_RETURN(
gpu::CudnnConvKind conv_kind,
gpu::GetCudnnConvKind(Cast<HloCustomCallInstruction>(hlo)));
switch (conv_kind) {
case gpu::CudnnConvKind::kForward:
case gpu::CudnnConvKind::kForwardActivation:
case gpu::CudnnConvKind::kForwardGraph: {
input_shape = lhs->shape();
filter_shape = rhs->shape();
output_shape = conv_output_shape;
break;
}
case gpu::CudnnConvKind::kBackwardInput: {
filter_shape = rhs->shape();
output_shape = lhs->shape();
input_shape = conv_output_shape;
break;
}
case gpu::CudnnConvKind::kBackwardFilter: {
input_shape = lhs->shape();
output_shape = rhs->shape();
filter_shape = conv_output_shape;
break;
}
}
ConvolutionDimensionNumbers new_dim_numbers = dim_numbers;
new_dim_numbers.set_input_batch_dimension(
transpose_dim(dim_numbers.input_batch_dimension(), input_shape));
new_dim_numbers.set_input_feature_dimension(
transpose_dim(dim_numbers.input_feature_dimension(), input_shape));
transpose_dims(*new_dim_numbers.mutable_input_spatial_dimensions(),
input_shape);
new_dim_numbers.set_kernel_input_feature_dimension(transpose_dim(
dim_numbers.kernel_input_feature_dimension(), filter_shape));
new_dim_numbers.set_kernel_output_feature_dimension(transpose_dim(
dim_numbers.kernel_output_feature_dimension(), filter_shape));
transpose_dims(*new_dim_numbers.mutable_kernel_spatial_dimensions(),
filter_shape);
new_dim_numbers.set_output_batch_dimension(
transpose_dim(dim_numbers.output_batch_dimension(), output_shape));
new_dim_numbers.set_output_feature_dimension(
transpose_dim(dim_numbers.output_feature_dimension(), output_shape));
transpose_dims(*new_dim_numbers.mutable_output_spatial_dimensions(),
output_shape);
Shape normalized_shape;
if (hlo->shape().IsTuple()) {
TF_RET_CHECK(hlo->shape().tuple_shapes().back().rank() == 1)
<< "The last element in the tuple returned by a convolution Custom "
"Call is expected to be an "
"allocator of rank one";
std::vector<Shape> new_tuple_shape;
for (const Shape& tuple_shape : hlo->shape().tuple_shapes()) {
new_tuple_shape.emplace_back(
ShapeUtil::MakeShapeWithDescendingLayoutAndSamePhysicalLayout(
tuple_shape));
}
normalized_shape = ShapeUtil::MakeTupleShape(new_tuple_shape);
} else {
normalized_shape =
ShapeUtil::MakeShapeWithDescendingLayoutAndSamePhysicalLayout(
hlo->shape());
}
std::vector<HloInstruction*> normalized_operands;
bool performed_normalization = false;
for (int idx = 0; idx < hlo->operand_count(); idx++) {
HloInstruction* op = hlo->mutable_operand(idx);
const Shape& s = op->shape();
Shape s_reordered =
ShapeUtil::MakeShapeWithDescendingLayoutAndSamePhysicalLayout(s);
normalized_operands.emplace_back(MakeBitcastHlo(op, s_reordered));
}
if (!performed_normalization &&
ShapeUtil::Equal(normalized_shape, hlo->shape()) &&
ConvolutionDimensionNumbersToString(new_dim_numbers) ==
ConvolutionDimensionNumbersToString(dim_numbers)) {
return std::nullopt;
}
HloInstruction* normalized_conv = hlo->parent()->AddInstruction(
HloInstruction::CreateCustomCall(normalized_shape, normalized_operands,
hlo->custom_call_target()),
&hlo->metadata());
normalized_conv->set_window(hlo->window());
normalized_conv->set_convolution_dimension_numbers(new_dim_numbers);
normalized_conv->set_feature_group_count(hlo->feature_group_count());
normalized_conv->set_raw_backend_config_string(
hlo->raw_backend_config_string());
*normalized_conv->mutable_precision_config() = hlo->precision_config();
normalized_conv->parent()->parent()->SetAndUniquifyInstrName(normalized_conv,
hlo->name());
HloInstruction* bc_to_orig;
if (normalized_conv->shape().IsTuple()) {
std::vector<HloInstruction*> tuple_elements(
normalized_conv->shape().tuple_shapes_size());
for (int i = 0; i < normalized_conv->shape().tuple_shapes_size(); ++i) {
TF_ASSIGN_OR_RETURN(HloInstruction * normalized_out,
MakeGetTupleElementHlo(normalized_conv, i));
tuple_elements[i] =
MakeBitcastHlo(normalized_out, hlo->shape().tuple_shapes(i));
}
bc_to_orig = MaybeMakeTuple(tuple_elements);
} else {
bc_to_orig = MakeBitcastHlo(normalized_conv, hlo->shape());
}
return bc_to_orig;
}
}
absl::StatusOr<std::optional<HloInstruction*>> NormalizeLayoutForGpuCustomCalls(
HloCustomCallInstruction* hlo) {
if (IsCustomCallToDnnConvolution(*hlo)) {
TF_ASSIGN_OR_RETURN(std::optional<HloInstruction*> bc_to_orig,
UpdateLayoutForCudnnConvolution(hlo));
return bc_to_orig;
}
return std::nullopt;
}
}
} | #include "xla/error_spec.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/test_macros.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
namespace {
class ConvolutionLayoutNormalizationTest : public HloTestBase {
public:
se::CudaComputeCapability GetCudaComputeCapability() {
return backend()
.default_stream_executor()
->GetDeviceDescription()
.cuda_compute_capability();
}
};
TEST_F(ConvolutionLayoutNormalizationTest, BackwardInput) {
const char* hlo = R"(
HloModule TestModule
%TestComputation1 (param_0: f32[1,20,257], param_1: f32[31,257,136]) -> (f32[1,23,136], u8[0]) {
%param_0 = f32[1,20,257]{2,1,0} parameter(0)
%copy.3 = f32[1,20,257]{1,2,0} copy(f32[1,20,257]{2,1,0} %param_0)
%param_1 = f32[31,257,136]{2,1,0} parameter(1)
%copy.4 = f32[31,257,136]{0,2,1} copy(f32[31,257,136]{2,1,0} %param_1)
%custom-call.1 = (f32[1,23,136]{1,2,0}, u8[0]{0}) custom-call(f32[1,20,257]{1,2,0} %copy.3, f32[31,257,136]{0,2,1} %copy.4), window={size=31 stride=2 pad=23_23}, dim_labels=b0f_0oi->b0f, custom_call_target="__cudnn$convBackwardInput", backend_config={"cudnn_conv_backend_config":{conv_result_scale:1}}
%get-tuple-element.2 = f32[1,23,136]{1,2,0} get-tuple-element((f32[1,23,136]{1,2,0}, u8[0]{0}) %custom-call.1), index=0
%copy.5 = f32[1,23,136]{2,1,0} copy(f32[1,23,136]{1,2,0} %get-tuple-element.2)
%get-tuple-element.3 = u8[0]{0} get-tuple-element((f32[1,23,136]{1,2,0}, u8[0]{0}) %custom-call.1), index=1
ROOT %tuple.1 = (f32[1,23,136]{2,1,0}, u8[0]{0}) tuple(f32[1,23,136]{2,1,0} %copy.5, u8[0]{0} %get-tuple-element.3)
}
)";
MatchOptimizedHlo(hlo, R"(
)");
}
TEST_F(ConvolutionLayoutNormalizationTest, Forward) {
const char* hlo = R"(
HloModule TestModule
ENTRY %TestComputation {
%param_0 = f32[2,128,1,378]{3,2,1,0} parameter(0)
%param_1 = f32[1,5,128,128]{1,0,2,3} parameter(1)
ROOT %custom-call.1 = (f32[2,128,1,378]{3,2,1,0}, u8[0]{0}) custom-call(%param_0, %param_1), window={size=1x5 pad=0_0x2_2}, dim_labels=bf01_01io->bf01, custom_call_target="__cudnn$convForward", backend_config={"cudnn_conv_backend_config":{conv_result_scale:1}}
}
)";
MatchOptimizedHlo(hlo, R"(
)");
}
TEST_F(ConvolutionLayoutNormalizationTest, DISABLED_ON_GPU_ROCM(FusedConv3D)) {
const char* hlo = R"(
HloModule TestModule
ENTRY TestComputation {
%p0 = f32[8,4,5,5,1] parameter(0)
%p1 = f32[3,3,3,1,32] parameter(1)
%conv = f32[8,4,5,5,32] convolution(p0, p1), window={size=3x3x3 pad=1_1x1_1x1_1}, dim_labels=b012f_012io->b012f
%bias = f32[32] parameter(2)
%broadcasted_bias = f32[8,4,5,5,32] broadcast(%bias), dimensions={4}
%add = f32[8,4,5,5,32] add(%conv, %broadcasted_bias)
%zero = f32[] constant(0)
%zeros = f32[8,4,5,5,32] broadcast(%zero), dimensions={}
ROOT relu = f32[8,4,5,5,32] maximum(%zeros, %add)
}
)";
MatchOptimizedHlo(hlo, R"(
)");
}
TEST_F(ConvolutionLayoutNormalizationTest, GraphConvF8) {
if (!GetCudaComputeCapability().IsAtLeast(
se::CudaComputeCapability::HOPPER)) {
GTEST_SKIP() << "FP8 convolutions require Hopper or newer architecture.";
}
const char* hlo = R"(
HloModule Test
ENTRY %Test (input.1: f8e4m3fn[2,1,378,128], filter.1: f8e4m3fn[1,128,128,5], input_scale.1: f32[], filter_scale.1: f32[], z_scale.1: f32[]) -> (f8e4m3fn[2,1,378,128], f32[], u8[0]{0}) {
%input.1 = f8e4m3fn[2,1,378,128]{3,2,1,0} parameter(0)
%filter.1 = f8e4m3fn[128,1,5,128]{1,0,2,3} parameter(1)
%input_scale.1 = f32[] parameter(2)
%filter_scale.1 = f32[] parameter(3)
%z_scale.1 = f32[] parameter(4)
ROOT %cudnn-conv.3.0 = (f8e4m3fn[2,1,378,128]{3,2,1,0}, f32[], u8[0]{0}) custom-call(%input.1, %filter.1, %input_scale.1, %filter_scale.1, %z_scale.1), window={size=1x5 pad=0_0x2_2}, dim_labels=b01f_o01i->b01f, custom_call_target="__cudnn$convForwardGraph", backend_config={"cudnn_conv_backend_config":{"conv_result_scale":1,"serialized_graph":"28:[f32]conv();30:[f32]scale(28);32:[f32]scale(30);16:[f8e4m3fn]scale(32);25:[f32]amax(32);"}}
})";
MatchOptimizedHlo(hlo, R"(
)");
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/conv_layout_normalization.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/conv_layout_normalization_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6deec09b-f59f-458b-9ae1-7ae910be6816 | cpp | google/tensorstore | circular_queue | tensorstore/internal/container/circular_queue.h | tensorstore/internal/container/circular_queue_test.cc | #ifndef TENSORSTORE_INTERNAL_THREAD_CIRCULAR_QUEUE_H_
#define TENSORSTORE_INTERNAL_THREAD_CIRCULAR_QUEUE_H_
#include <stddef.h>
#include <algorithm>
#include <cassert>
#include <memory>
#include <type_traits>
#include "absl/base/attributes.h"
#include "absl/log/absl_check.h"
#include "tensorstore/internal/container/item_traits.h"
namespace tensorstore {
namespace internal_container {
template <typename T, typename Allocator = std::allocator<T>>
class CircularQueue {
using TransferTraits = ItemTraits<T>;
using Storage = typename std::aligned_storage<sizeof(T), alignof(T)>::type;
static_assert(sizeof(T) == sizeof(Storage));
using StorageAllocator =
typename std::allocator_traits<Allocator>::template rebind_alloc<Storage>;
using StorageAllocatorTraits = std::allocator_traits<StorageAllocator>;
static constexpr bool kDestroyIsTrivial =
TransferTraits::template destroy_is_trivial<Allocator>();
public:
explicit CircularQueue(size_t n) : CircularQueue(n, Allocator()) {}
CircularQueue(size_t n, Allocator alloc)
: allocator_(std::move(alloc)),
begin_(0),
end_(0),
mask_(0),
buffer_(nullptr) {
ABSL_CHECK_EQ(n & (n - 1), 0);
internal_resize(n);
}
~CircularQueue() {
clear();
if (buffer_) {
StorageAllocator storage_alloc(allocator_);
StorageAllocatorTraits::deallocate(
storage_alloc, reinterpret_cast<Storage*>(buffer_), mask_ + 1);
}
}
CircularQueue(const CircularQueue&) = delete;
CircularQueue& operator=(const CircularQueue&) = delete;
size_t capacity() const { return mask_ + 1; }
size_t size() const { return end_ - begin_; }
bool empty() const { return !size(); }
T& front() {
ABSL_CHECK(!empty());
return buffer_[begin_ & mask_];
}
const T& front() const {
ABSL_CHECK(!empty());
return buffer_[begin_ & mask_];
}
T& back() {
ABSL_CHECK(!empty());
return buffer_[(end_ - 1) & mask_];
}
const T& back() const {
ABSL_CHECK(!empty());
return buffer_[(end_ - 1) & mask_];
}
T& operator[](size_t i) {
ABSL_CHECK_LT(i, size());
return buffer_[(begin_ + i) & mask_];
}
const T& operator[](size_t i) const {
ABSL_CHECK_LT(i, size());
return buffer_[(begin_ + i) & mask_];
}
void push_back(const T& val) { emplace_back(val); }
void push_back(T&& val) { emplace_back(std::move(val)); }
template <typename... A>
T& emplace_back(A&&... args) {
auto* storage = emplace_back_raw();
TransferTraits::construct(&allocator_, storage, std::forward<A>(args)...);
return *storage;
}
void pop_front() {
ABSL_CHECK(!empty());
auto x = begin_++;
if constexpr (!kDestroyIsTrivial) {
TransferTraits::destroy(&allocator_, buffer_ + (x & mask_));
}
}
void clear() {
if constexpr (!kDestroyIsTrivial) {
for (size_t i = begin_; i < end_; i++) {
TransferTraits::destroy(&allocator_, buffer_ + (i & mask_));
}
}
begin_ = 0;
end_ = 0;
}
private:
T* emplace_back_raw() {
if (size() == capacity()) {
internal_resize((mask_ + 1) * 2);
}
return buffer_ + (end_++ & mask_);
}
void internal_resize(size_t c) {
ABSL_CHECK_EQ(c & (c - 1), 0);
ABSL_CHECK_GT(c, mask_ + 1);
StorageAllocator storage_alloc(allocator_);
T* new_buffer = std::launder(reinterpret_cast<T*>(
StorageAllocatorTraits::allocate(storage_alloc, c)));
size_t j = 0;
for (size_t i = begin_; i < end_; i++) {
auto* storage = buffer_ + (i & mask_);
TransferTraits::transfer(&allocator_, new_buffer + j++, storage);
}
if (buffer_) {
StorageAllocatorTraits::deallocate(
storage_alloc, reinterpret_cast<Storage*>(buffer_), mask_ + 1);
}
begin_ = 0;
end_ = j;
mask_ = c - 1;
buffer_ = new_buffer;
}
ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS Allocator allocator_;
size_t begin_;
size_t end_;
size_t mask_;
T* buffer_;
};
}
}
#endif | #include "tensorstore/internal/container/circular_queue.h"
#include <stdint.h>
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace {
using ::tensorstore::internal_container::CircularQueue;
TEST(CircularQueue, Basic) {
CircularQueue<int64_t> q(2);
EXPECT_TRUE(q.empty());
EXPECT_THAT(q.size(), 0);
q.push_back(10);
EXPECT_FALSE(q.empty());
EXPECT_EQ(q.front(), 10);
EXPECT_EQ(q.back(), 10);
q.pop_front();
EXPECT_TRUE(q.empty());
for (int i = 0; i < 10; ++i) {
q.push_back(i);
}
EXPECT_FALSE(q.empty());
q.clear();
EXPECT_TRUE(q.empty());
}
TEST(CircularQueue, BasicWithSharedPtr) {
CircularQueue<std::shared_ptr<int64_t>> q(2);
EXPECT_TRUE(q.empty());
EXPECT_THAT(q.size(), 0);
q.push_back(std::make_shared<int64_t>(10));
EXPECT_FALSE(q.empty());
EXPECT_EQ(*q.front(), 10);
EXPECT_EQ(*q.back(), 10);
q.pop_front();
EXPECT_TRUE(q.empty());
for (int i = 0; i < 10; ++i) {
q.push_back(std::make_shared<int64_t>(i));
}
EXPECT_FALSE(q.empty());
q.clear();
EXPECT_TRUE(q.empty());
}
TEST(CircularQueue, Resize) {
CircularQueue<int64_t> q(2);
for (int64_t i = 0; i < 1234; ++i) {
q.push_back(i);
}
EXPECT_FALSE(q.empty());
EXPECT_THAT(q.size(), 1234);
EXPECT_THAT(q.capacity(), ::testing::Gt(1234));
for (int64_t i = 0; i < 1234; ++i) {
EXPECT_THAT(q.front(), i);
q.pop_front();
}
EXPECT_THAT(q.size(), ::testing::Eq(0));
}
class OnlyConstructibleByAllocator {
explicit OnlyConstructibleByAllocator(int i) : i_(i) {}
public:
OnlyConstructibleByAllocator(const OnlyConstructibleByAllocator &other)
: i_(other.i_) {}
OnlyConstructibleByAllocator &operator=(
const OnlyConstructibleByAllocator &other) {
i_ = other.i_;
return *this;
}
int Get() const { return i_; }
bool operator==(int i) const { return i_ == i; }
private:
template <typename T>
friend class OnlyConstructibleAllocator;
int i_;
};
template <typename T = OnlyConstructibleByAllocator>
class OnlyConstructibleAllocator : public std::allocator<T> {
public:
OnlyConstructibleAllocator() = default;
template <class U>
explicit OnlyConstructibleAllocator(const OnlyConstructibleAllocator<U> &) {}
void construct(OnlyConstructibleByAllocator *p, int i) {
new (p) OnlyConstructibleByAllocator(i);
}
template <class U>
struct rebind {
using other = OnlyConstructibleAllocator<U>;
};
};
TEST(CircularQueue, OnlyConstructibleByAllocator) {
CircularQueue<OnlyConstructibleByAllocator, OnlyConstructibleAllocator<>> q(
2);
EXPECT_TRUE(q.empty());
EXPECT_THAT(q.size(), 0);
q.emplace_back(10);
EXPECT_FALSE(q.empty());
EXPECT_EQ(q.front(), 10);
EXPECT_EQ(q.back(), 10);
q.pop_front();
EXPECT_TRUE(q.empty());
for (int i = 0; i < 10; ++i) {
q.emplace_back(i);
}
EXPECT_FALSE(q.empty());
q.clear();
EXPECT_TRUE(q.empty());
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/container/circular_queue.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/container/circular_queue_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
6f7ffe16-c5ff-47d3-844e-a0bf0bc88a11 | cpp | google/langsvr | encode | src/lsp/encode.cc | src/lsp/encode_test.cc | #include "langsvr/lsp/encode.h"
namespace langsvr::lsp {
Result<const json::Value*> Encode(Null, json::Builder& b) {
return b.Null();
}
Result<const json::Value*> Encode(Boolean in, json::Builder& b) {
return b.Bool(in);
}
Result<const json::Value*> Encode(Integer in, json::Builder& b) {
return b.I64(in);
}
Result<const json::Value*> Encode(Uinteger in, json::Builder& b) {
return b.U64(in);
}
Result<const json::Value*> Encode(Decimal in, json::Builder& b) {
return b.F64(in);
}
Result<const json::Value*> Encode(const String& in, json::Builder& b) {
return b.String(in);
}
} | #include "langsvr/json/builder.h"
#include "langsvr/lsp/lsp.h"
#include "gmock/gmock.h"
namespace langsvr::lsp {
namespace {
TEST(EncodeTest, ShowDocumentParams) {
ShowDocumentParams params;
params.uri = "file.txt";
params.selection = Range{{1, 2}, {3, 4}};
auto b = json::Builder::Create();
auto res = Encode(params, *b);
EXPECT_EQ(res, Success);
EXPECT_EQ(
res.Get()->Json(),
R"({"selection":{"end":{"character":4,"line":3},"start":{"character":2,"line":1}},"uri":"file.txt"})");
}
}
} | https://github.com/google/langsvr/blob/303c526231a90049a3e384549720f3fbd453cf66/src/lsp/encode.cc | https://github.com/google/langsvr/blob/303c526231a90049a3e384549720f3fbd453cf66/src/lsp/encode_test.cc | 303c526231a90049a3e384549720f3fbd453cf66 |
92a675e2-12a2-4cc7-94b8-6b41dd74b6c4 | cpp | google/cel-cpp | uint_value | common/values/uint_value.cc | common/values/uint_value_test.cc | #include <cstddef>
#include <cstdint>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "common/any.h"
#include "common/casting.h"
#include "common/json.h"
#include "common/value.h"
#include "internal/number.h"
#include "internal/serialize.h"
#include "internal/status_macros.h"
namespace cel {
namespace {
std::string UintDebugString(int64_t value) { return absl::StrCat(value, "u"); }
}
std::string UintValue::DebugString() const {
return UintDebugString(NativeValue());
}
absl::Status UintValue::SerializeTo(AnyToJsonConverter&,
absl::Cord& value) const {
return internal::SerializeUInt64Value(NativeValue(), value);
}
absl::StatusOr<Json> UintValue::ConvertToJson(AnyToJsonConverter&) const {
return JsonUint(NativeValue());
}
absl::Status UintValue::Equal(ValueManager&, const Value& other,
Value& result) const {
if (auto other_value = As<UintValue>(other); other_value.has_value()) {
result = BoolValue{NativeValue() == other_value->NativeValue()};
return absl::OkStatus();
}
if (auto other_value = As<DoubleValue>(other); other_value.has_value()) {
result =
BoolValue{internal::Number::FromUint64(NativeValue()) ==
internal::Number::FromDouble(other_value->NativeValue())};
return absl::OkStatus();
}
if (auto other_value = As<IntValue>(other); other_value.has_value()) {
result = BoolValue{internal::Number::FromUint64(NativeValue()) ==
internal::Number::FromInt64(other_value->NativeValue())};
return absl::OkStatus();
}
result = BoolValue{false};
return absl::OkStatus();
}
absl::StatusOr<Value> UintValue::Equal(ValueManager& value_manager,
const Value& other) const {
Value result;
CEL_RETURN_IF_ERROR(Equal(value_manager, other, result));
return result;
}
} | #include <cstdint>
#include <sstream>
#include "absl/hash/hash.h"
#include "absl/strings/cord.h"
#include "absl/types/optional.h"
#include "common/any.h"
#include "common/casting.h"
#include "common/json.h"
#include "common/native_type.h"
#include "common/value.h"
#include "common/value_testing.h"
#include "internal/testing.h"
namespace cel {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::testing::An;
using ::testing::Ne;
using UintValueTest = common_internal::ThreadCompatibleValueTest<>;
TEST_P(UintValueTest, Kind) {
EXPECT_EQ(UintValue(1).kind(), UintValue::kKind);
EXPECT_EQ(Value(UintValue(1)).kind(), UintValue::kKind);
}
TEST_P(UintValueTest, DebugString) {
{
std::ostringstream out;
out << UintValue(1);
EXPECT_EQ(out.str(), "1u");
}
{
std::ostringstream out;
out << Value(UintValue(1));
EXPECT_EQ(out.str(), "1u");
}
}
TEST_P(UintValueTest, ConvertToJson) {
EXPECT_THAT(UintValue(1).ConvertToJson(value_manager()),
IsOkAndHolds(Json(1.0)));
}
TEST_P(UintValueTest, NativeTypeId) {
EXPECT_EQ(NativeTypeId::Of(UintValue(1)), NativeTypeId::For<UintValue>());
EXPECT_EQ(NativeTypeId::Of(Value(UintValue(1))),
NativeTypeId::For<UintValue>());
}
TEST_P(UintValueTest, InstanceOf) {
EXPECT_TRUE(InstanceOf<UintValue>(UintValue(1)));
EXPECT_TRUE(InstanceOf<UintValue>(Value(UintValue(1))));
}
TEST_P(UintValueTest, Cast) {
EXPECT_THAT(Cast<UintValue>(UintValue(1)), An<UintValue>());
EXPECT_THAT(Cast<UintValue>(Value(UintValue(1))), An<UintValue>());
}
TEST_P(UintValueTest, As) {
EXPECT_THAT(As<UintValue>(Value(UintValue(1))), Ne(absl::nullopt));
}
TEST_P(UintValueTest, HashValue) {
EXPECT_EQ(absl::HashOf(UintValue(1)), absl::HashOf(uint64_t{1}));
}
TEST_P(UintValueTest, Equality) {
EXPECT_NE(UintValue(0u), 1u);
EXPECT_NE(1u, UintValue(0u));
EXPECT_NE(UintValue(0u), UintValue(1u));
}
TEST_P(UintValueTest, LessThan) {
EXPECT_LT(UintValue(0), 1);
EXPECT_LT(0, UintValue(1));
EXPECT_LT(UintValue(0), UintValue(1));
}
INSTANTIATE_TEST_SUITE_P(
UintValueTest, UintValueTest,
::testing::Combine(::testing::Values(MemoryManagement::kPooling,
MemoryManagement::kReferenceCounting)),
UintValueTest::ToString);
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/values/uint_value.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/values/uint_value_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
3c947f36-5411-42e9-ae6b-7264dbade642 | cpp | tensorflow/tensorflow | ir_array | third_party/xla/xla/service/llvm_ir/ir_array.cc | third_party/xla/xla/service/llvm_ir/ir_array_test.cc | #include "xla/service/llvm_ir/ir_array.h"
#include <cstdint>
#include <optional>
#include <tuple>
#include <utility>
#include <variant>
#include <vector>
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/Casting.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/permutation_util.h"
#include "xla/primitive_util.h"
#include "xla/service/llvm_ir/llvm_type_conversion_util.h"
#include "xla/service/llvm_ir/llvm_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
namespace xla {
namespace llvm_ir {
IrArray::Index::Index(absl::Span<llvm::Value* const> multidim,
llvm::Value* linear, const Shape& shape,
llvm::Type* index_type)
: Index(multidim, shape, index_type) {
CHECK_NE(linear, nullptr);
linear_ = linear;
}
void IrArray::Index::Delinearize(std::vector<llvm::Value*>* multidim,
llvm::Value* linear, const Shape& shape,
llvm::IRBuilder<>* b) const {
int64_t divisor = 1;
const Layout& layout = shape.layout();
for (int64_t i = 0; i < layout.minor_to_major_size(); ++i) {
int64_t dimension = layout.minor_to_major(i);
int64_t size_of_current_dimension = shape.dimensions(dimension);
auto* quot = b->CreateUDiv(linear, GetConstantWithIndexType(divisor));
if (i < layout.minor_to_major_size() - 1) {
(*multidim)[dimension] = b->CreateURem(
quot, GetConstantWithIndexType(size_of_current_dimension));
} else {
(*multidim)[dimension] = quot;
}
divisor *= size_of_current_dimension;
}
}
void IrArray::Index::Delinearize(std::vector<llvm::Value*>* multidim,
llvm::Value* linear, const Shape& shape,
absl::Span<llvm::Value*> dynamic_dims,
llvm::IRBuilder<>* b) const {
CHECK_EQ(shape.dimensions_size(), dynamic_dims.size());
CHECK_EQ(multidim_.size(), shape.rank());
llvm::Value* divisor = GetConstantWithIndexType(1);
const Layout& layout = shape.layout();
for (int64_t i = 0; i < layout.minor_to_major_size(); ++i) {
int64_t dimension = layout.minor_to_major(i);
auto* quot = b->CreateUDiv(linear, divisor, "quot");
if (i < layout.minor_to_major_size() - 1) {
llvm::Value* casted_dynamic_dim =
b->CreateIntCast(dynamic_dims[dimension], quot->getType(),
true);
(*multidim)[dimension] =
b->CreateURem(quot, casted_dynamic_dim, "dim_value");
divisor = b->CreateMul(divisor, casted_dynamic_dim, "divisor");
} else {
(*multidim)[dimension] = quot;
}
}
}
IrArray::Index::Index(llvm::Value* linear, const Shape& shape,
llvm::IRBuilder<>* b)
: multidim_(shape.rank()),
linear_(linear),
layout_(shape.layout()),
dims_(shape.dimensions().begin(), shape.dimensions().end()) {
CHECK_NE(linear, nullptr);
index_type_ = linear->getType();
CHECK(LayoutUtil::HasLayout(shape))
<< "Shape " << ShapeUtil::HumanStringWithLayout(shape)
<< " should have a layout.";
Delinearize(&multidim_, linear, shape, b);
}
IrArray::Index::Index(llvm::Value* linear,
absl::Span<llvm::Value* const> multidim,
const Shape& shape, llvm::IRBuilder<>* b)
: multidim_(shape.rank()),
linear_(linear),
layout_(shape.layout()),
dims_(shape.dimensions().begin(), shape.dimensions().end()) {
CHECK_NE(linear, nullptr);
index_type_ = linear->getType();
CHECK_EQ(multidim.size(), shape.rank());
for (auto dim : multidim) {
if (dim) {
CHECK_EQ(dim->getType(), index_type_);
}
}
CHECK(LayoutUtil::HasLayout(shape))
<< "Shape " << ShapeUtil::HumanStringWithLayout(shape)
<< " should have a layout.";
Delinearize(&multidim_, linear, shape, b);
for (int i = 0; i < multidim.size(); ++i) {
if (multidim[i] != nullptr) {
multidim_[i] = multidim[i];
}
}
}
IrArray::Index::Index(llvm::Value* linear, const Shape& shape,
absl::Span<llvm::Value*> dynamic_dims,
llvm::IRBuilder<>* b)
: multidim_(shape.rank()),
linear_(linear),
layout_(shape.layout()),
dims_(shape.dimensions().begin(), shape.dimensions().end()) {
CHECK_NE(linear, nullptr);
index_type_ = linear->getType();
CHECK(LayoutUtil::HasLayout(shape))
<< "Shape " << ShapeUtil::HumanStringWithLayout(shape)
<< " should have a layout.";
Delinearize(&multidim_, linear, shape, dynamic_dims, b);
}
IrArray::Index::Index(absl::Span<llvm::Value* const> multidim,
absl::Span<int64_t const> dimensions,
llvm::Type* index_type)
: Index(multidim, ShapeUtil::MakeShape( PRED, dimensions),
index_type) {}
IrArray::Index::Index(absl::Span<llvm::Value* const> multidim,
const Shape& shape, llvm::Type* index_type)
: multidim_(multidim.begin(), multidim.end()),
linear_(nullptr),
layout_(shape.layout()),
dims_(shape.dimensions().begin(), shape.dimensions().end()),
index_type_(index_type) {
CHECK_NE(index_type_, nullptr);
CHECK_EQ(shape.dimensions_size(), multidim.size());
for (const auto* dim : multidim) {
CHECK_NE(dim, nullptr);
}
CHECK(LayoutUtil::HasLayout(shape))
<< "Shape " << ShapeUtil::HumanStringWithLayout(shape)
<< " should have a layout.";
}
IrArray::IrArray(llvm::Value* base_ptr, llvm::Type* pointee_type, Shape shape)
: base_ptr_(base_ptr),
pointee_type_(pointee_type),
shape_(std::move(shape)) {
TF_CHECK_OK(ShapeUtil::ValidateShape(shape));
CHECK(base_ptr_->getType()->isPointerTy());
int depth = 0;
element_type_ = pointee_type;
while (llvm::ArrayType* array_type =
llvm::dyn_cast<llvm::ArrayType>(element_type_)) {
element_type_ = array_type->getElementType();
++depth;
}
if (!shape_.IsArray() || ShapeUtil::IsScalar(shape_)) {
DCHECK(depth == 1 || depth == 0) << depth;
} else {
DCHECK_EQ(depth, shape_.rank()) << shape.ShortDebugString();
}
}
bool IrArray::Index::LinearValidOnShape(const Shape& a) const {
auto b = ShapeUtil::MakeShape(a.element_type(), dims_);
*b.mutable_layout() = layout_;
return linear_ != nullptr &&
ShapeUtil::ElementsIn(a) == ShapeUtil::ElementsIn(b) &&
ShapeUtil::ReshapeIsBitcast(a, b);
}
IrArray::Index IrArray::Index::SourceIndexOfReshape(
const Shape& output_shape, const Shape& input_shape,
llvm::IRBuilder<>* builder) const {
CHECK_EQ(multidim_.size(), output_shape.rank());
std::vector<llvm::Value*> source_multidim_index(
input_shape.rank(), llvm::UndefValue::get(index_type_));
if (std::optional<ShapeUtil::ShapeEqualityDescriptor> trivial_reshape =
ShapeUtil::InsertedOrDeleted1SizedDimensions(input_shape,
output_shape)) {
for (int64_t i = 0, j = 0, k = 0, l = 0; i < source_multidim_index.size();
++i) {
if (j == trivial_reshape->deleted_dimensions.size() ||
trivial_reshape->deleted_dimensions[j] > i) {
while (l < trivial_reshape->inserted_dimensions.size() &&
trivial_reshape->inserted_dimensions[l] == k) {
++k;
++l;
}
source_multidim_index[i] = multidim_[k];
++k;
} else {
source_multidim_index[i] = GetConstantWithIndexType(0);
++j;
}
}
} else {
const auto common_factors =
CommonFactors(input_shape.dimensions(), output_shape.dimensions());
for (ssize_t k = common_factors.size() - 2; k >= 0; --k) {
absl::Span<int64_t const> dimensions = output_shape.dimensions().subspan(
common_factors[k].second,
common_factors[k + 1].second - common_factors[k].second);
llvm::Value* logical_linear_index =
Index(absl::Span<llvm::Value* const>(multidim_).subspan(
common_factors[k].second,
common_factors[k + 1].second - common_factors[k].second),
dimensions, index_type_)
.Linearize(dimensions, builder);
for (int64_t i = common_factors[k + 1].first - 1;
i >= common_factors[k].first; --i) {
llvm::Value* divisor =
GetConstantWithIndexType(input_shape.dimensions(i));
if (input_shape.dimensions(i) == 1) {
source_multidim_index[i] = GetConstantWithIndexType(0);
} else if (i == common_factors[k].first) {
source_multidim_index[i] = logical_linear_index;
} else {
source_multidim_index[i] =
builder->CreateURem(logical_linear_index, divisor);
}
logical_linear_index =
builder->CreateUDiv(logical_linear_index, divisor);
}
}
}
if (linear() != nullptr && LayoutUtil::HasLayout(input_shape) &&
LayoutUtil::HasLayout(output_shape) &&
ShapeUtil::ReshapeIsBitcast(input_shape, output_shape)) {
return Index(source_multidim_index, linear(), input_shape, index_type_);
}
return Index(source_multidim_index, input_shape, index_type_);
}
IrArray::Index IrArray::Index::SourceIndexOfSlice(
const Shape& operand_shape, absl::Span<const int64_t> starts,
absl::Span<const int64_t> strides, llvm::IRBuilder<>* builder) const {
std::vector<llvm::Value*> source_multi_index(multidim_.size());
for (int i = 0; i < multidim_.size(); ++i) {
int64_t stride = strides[i];
if (stride != 1) {
source_multi_index[i] = builder->CreateAdd(
builder->CreateMul(multidim_[i], GetConstantWithIndexType(stride)),
GetConstantWithIndexType(starts[i]));
} else {
source_multi_index[i] =
builder->CreateAdd(multidim_[i], GetConstantWithIndexType(starts[i]));
}
}
return Index(source_multi_index, operand_shape, index_type_);
}
IrArray::Index IrArray::Index::SourceIndexOfTranspose(
const Shape& shape, const Shape& operand_shape,
absl::Span<const int64_t> dimension_mapping) const {
std::vector<llvm::Value*> operand_multidim_index =
PermuteInverse(multidim(), dimension_mapping);
if (linear() != nullptr && LayoutUtil::HasLayout(operand_shape) &&
LayoutUtil::HasLayout(shape) &&
ShapeUtil::TransposeIsBitcast(operand_shape, shape, dimension_mapping)) {
return Index(operand_multidim_index, linear(), operand_shape, index_type_);
}
return Index(operand_multidim_index, operand_shape, index_type_);
}
IrArray::Index IrArray::Index::SourceIndexOfBitcast(
const Shape& shape, const Shape& operand_shape,
llvm::IRBuilder<>* builder) const {
CHECK(LayoutUtil::HasLayout(shape) && LayoutUtil::HasLayout(operand_shape));
const ShapeUtil::BitcastDecomposition decomposition =
ShapeUtil::DecomposeBitcast(operand_shape, shape);
if (std::holds_alternative<ShapeUtil::BitcastDecompositionReshape>(
decomposition)) {
return SourceIndexOfReshape(shape, operand_shape, builder);
}
if (std::holds_alternative<ShapeUtil::BitcastDecompositionTranspose>(
decomposition)) {
const auto& decomposition_transpose =
std::get<ShapeUtil::BitcastDecompositionTranspose>(decomposition);
return SourceIndexOfTranspose(shape, operand_shape,
decomposition_transpose.transpose_dims);
}
CHECK(std::holds_alternative<ShapeUtil::BitcastDecompositionTrt>(
decomposition));
const auto& decomposition_trt =
std::get<ShapeUtil::BitcastDecompositionTrt>(decomposition);
Index index = *this;
if (!decomposition_trt.IsTranspose2Identity()) {
index = index.SourceIndexOfTranspose(shape, decomposition_trt.reshape_shape,
decomposition_trt.transpose2_dims);
}
index =
index.SourceIndexOfReshape(decomposition_trt.reshape_shape,
decomposition_trt.transpose1_shape, builder);
if (!decomposition_trt.IsTranspose1Identity()) {
index = index.SourceIndexOfTranspose(decomposition_trt.transpose1_shape,
operand_shape,
decomposition_trt.transpose1_dims);
}
return index;
}
IrArray::Index IrArray::Index::SourceIndexOfBitcast(
const Shape& operand_shape, llvm::IRBuilder<>* builder) const {
auto shape = ShapeUtil::MakeShape(F32, dims_);
*shape.mutable_layout() = layout_;
return SourceIndexOfBitcast(shape, operand_shape, builder);
}
IrArray::Index IrArray::Index::SourceIndexOfBroadcast(
const Shape& shape, const Shape& operand_shape,
absl::Span<const int64_t> dimension_mapping,
llvm::IRBuilder<>* builder) const {
int64_t rank = operand_shape.rank();
std::vector<llvm::Value*> source_index(rank);
for (int64_t i = 0; i < rank; ++i) {
source_index[i] = multidim_[dimension_mapping[i]];
}
if (linear_ == nullptr || !LayoutUtil::HasLayout(operand_shape) ||
!LayoutUtil::HasLayout(shape) || rank == 1) {
return Index(source_index, operand_shape, index_type_);
}
std::vector<int64_t> logical_to_physical =
LayoutUtil::MakeLogicalToPhysical(shape.layout());
int64_t output_rank = shape.rank();
int64_t min_broadcasted_dimension = output_rank;
int64_t max_broadcasted_dimension = -1;
for (int64_t i = 0; i < rank; ++i) {
int64_t physical_dim = logical_to_physical[dimension_mapping[i]];
min_broadcasted_dimension =
std::min(min_broadcasted_dimension, physical_dim);
max_broadcasted_dimension =
std::max(max_broadcasted_dimension, physical_dim);
}
bool contiguous_broadcast_dimensions =
max_broadcasted_dimension - min_broadcasted_dimension == rank - 1;
if (!contiguous_broadcast_dimensions) {
return Index(source_index, operand_shape, index_type_);
}
std::vector<int64_t> operand_logical_to_physical =
LayoutUtil::MakeLogicalToPhysical(operand_shape.layout());
for (int64_t i = 0; i < rank; ++i) {
if (operand_logical_to_physical[i] !=
logical_to_physical[dimension_mapping[i]] - min_broadcasted_dimension) {
return Index(source_index, operand_shape, index_type_);
}
}
llvm::Value* linear = linear_;
int64_t divisor = 1;
for (int64_t i = max_broadcasted_dimension + 1; i < output_rank; ++i) {
divisor *= shape.dimensions(LayoutUtil::Major(shape.layout(), i));
}
if (divisor > 1) {
linear = builder->CreateUDiv(linear, GetConstantWithIndexType(divisor));
}
if (min_broadcasted_dimension > 0) {
int64_t mod = 1;
for (int64_t i = min_broadcasted_dimension; i <= max_broadcasted_dimension;
++i) {
mod *= shape.dimensions(LayoutUtil::Major(shape.layout(), i));
}
linear = builder->CreateURem(linear, GetConstantWithIndexType(mod));
}
return Index(source_index, linear, operand_shape, index_type_);
}
llvm::Value* IrArray::Index::Linearize(absl::Span<const int64_t> dimensions,
llvm::IRBuilder<>* builder) const {
CHECK_EQ(size(), dimensions.size());
llvm::Value* logical_linear_index = GetConstantWithIndexType(0);
int64_t multiplier = 1;
for (ssize_t i = 0; i < size(); ++i) {
int64_t dimension = layout_.minor_to_major(i);
llvm::Value* addend = builder->CreateMul(
(*this)[dimension], GetConstantWithIndexType(multiplier), "",
true, true);
addend = builder->CreateZExtOrTrunc(addend, index_type_);
logical_linear_index = builder->CreateAdd(logical_linear_index, addend, "",
true, true);
multiplier *= dimensions[dimension];
}
return logical_linear_index;
}
llvm::Value* IrArray::Index::Linearize(
const std::vector<llvm::Value*>& dynamic_dims,
llvm::IRBuilder<>* builder) const {
CHECK_EQ(size(), dynamic_dims.size());
llvm::Value* logical_linear_index = GetConstantWithIndexType(0);
llvm::Value* multiplier = GetConstantWithIndexType(1);
for (ssize_t i = 0; i < size(); ++i) {
int64_t dimension = layout_.minor_to_major(i);
llvm::Value* addend = builder->CreateMul((*this)[dimension], multiplier, "",
true, true);
addend = builder->CreateZExtOrTrunc(addend, index_type_);
logical_linear_index = builder->CreateAdd(logical_linear_index, addend, "",
true, true);
if (i < size() - 1) {
multiplier = builder->CreateMul(multiplier, dynamic_dims[dimension],
"multiplier");
}
}
return logical_linear_index;
}
llvm::Value* IrArray::EmitArrayElementAddress(const IrArray::Index& index,
llvm::IRBuilder<>* b,
absl::string_view name,
bool use_linear_index,
llvm::Value** bit_offset) const {
if (ShapeUtil::IsScalar(shape_)) {
if (primitive_util::IsSubByteNonPredType(shape_.element_type())) {
CHECK_NE(bit_offset, nullptr);
*bit_offset =
b->getInt8(8 - primitive_util::BitWidth(shape_.element_type()));
}
return base_ptr_;
}
CHECK_EQ(index.size(), shape_.rank());
CHECK(index.ShapeIsCompatible(shape_))
<< "Shape " << index.AsShapeWithType(shape_.element_type()).ToString(true)
<< " is not compatible with " << shape_.ToString(true);
if (use_linear_index && index.LinearValidOnShape(shape_)) {
return EmitLinearArrayElementAddress(index, b, name, bit_offset);
}
if (primitive_util::IsSubByteNonPredType(shape_.element_type())) {
IrArray::Index linear_index = index;
if (!index.LinearValidOnShape(shape_)) {
std::vector<int64_t> dimensions;
dimensions.reserve(shape_.rank());
for (int64_t i = 0; i < shape_.rank(); ++i) {
dimensions.push_back(shape_.dimensions(i));
}
llvm::Value* linearized = index.Linearize(dimensions, b);
linear_index = IrArray::Index(linearized, shape_, b);
}
return EmitLinearArrayElementAddress(linear_index, b, name, bit_offset);
}
std::vector<llvm::Value*> actual_index;
for (int64_t i = 0; i < index.size(); ++i) {
auto dim = shape_.dimensions(i);
actual_index.push_back(
dim == 1 ? llvm::ConstantInt::get(index[i]->getType(), 0) : index[i]);
}
CHECK_GT(index.size(), 0);
std::vector<llvm::Value*> gep_indices(
1, llvm::ConstantInt::get(index[0]->getType(), 0));
for (int64_t i = 0; i < shape_.rank(); ++i) {
int64_t dimension = LayoutUtil::Major(shape_.layout(), i);
gep_indices.push_back(actual_index[dimension]);
}
return b->CreateInBoundsGEP(pointee_type_, base_ptr_, gep_indices,
llvm_ir::AsStringRef(name));
}
llvm::Value* IrArray::EmitLinearArrayElementAddress(
const IrArray::Index& index, llvm::IRBuilder<>* b, absl::string_view name,
llvm::Value** bit_offset) const {
CHECK(index.LinearValidOnShape(shape_));
llvm::Module* module = b->GetInsertBlock()->getParent()->getParent();
llvm::Type* type = PrimitiveTypeToIrType(shape_.element_type(), module);
if (!primitive_util::IsSubByteNonPredType(shape_.element_type())) {
auto linear_index = llvm::dyn_cast<llvm::BinaryOperator>(index.linear());
if (linear_index && (linear_index->getOpcode() == llvm::Instruction::Add)) {
llvm::Value* index_operand_0 = linear_index->getOperand(0);
llvm::Value* index_operand_1 = linear_index->getOperand(1);
llvm::Value* ptr_address =
b->CreateGEP(type, base_ptr_, index_operand_0, "");
return b->CreateInBoundsGEP(type, ptr_address, index_operand_1,
llvm_ir::AsStringRef(name));
} else {
return b->CreateInBoundsGEP(type, base_ptr_, index.linear(),
llvm_ir::AsStringRef(name));
}
}
llvm::Type* index_type = index.linear()->getType();
auto bit_width = primitive_util::BitWidth(shape_.element_type());
llvm::Value* elements_per_byte =
llvm::ConstantInt::get(index_type, 8 / bit_width);
llvm::Value* remainder = b->CreateURem(index.linear(), elements_per_byte);
llvm::Value* byte_offset = b->CreateUDiv(index.linear(), elements_per_byte);
CHECK_NE(bit_offset, nullptr);
*bit_offset = b->CreateIntCast(
b->CreateSub(llvm::ConstantInt::get(index_type, 8 - bit_width),
b->CreateMul(remainder,
llvm::ConstantInt::get(index_type, bit_width))),
b->getInt8Ty(), false);
return b->CreateInBoundsGEP(b->getInt8Ty(), base_ptr_, byte_offset,
llvm_ir::AsStringRef(name));
}
void IrArray::AnnotateLoadStoreInstructionWithMetadata(
llvm::Instruction* instruction) const {
CHECK(llvm::isa<llvm::LoadInst>(instruction) ||
llvm::isa<llvm::StoreInst>(instruction));
CHECK(!llvm::isa<llvm::StoreInst>(instruction) || !is_invariant_)
<< "Trying to create a store to an invariant IRArray.";
for (const auto& kind_md_pair : metadata_) {
instruction->setMetadata(kind_md_pair.first, kind_md_pair.second);
}
}
llvm::Value* IrArray::EmitReadArrayElement(const Index& index,
llvm::IRBuilder<>* b,
absl::string_view name,
bool use_linear_index) const {
llvm::Value* bit_offset = nullptr;
llvm::Value* element_address =
EmitArrayElementAddress(index, b, name, use_linear_index, &bit_offset);
llvm::Type* load_type =
primitive_util::IsSubByteNonPredType(shape_.element_type())
? b->getInt8Ty()
: element_type_;
llvm::LoadInst* load =
b->CreateLoad(load_type, element_address, llvm_ir::AsStringRef(name));
AnnotateLoadStoreInstructionWithMetadata(load);
llvm::Value* elem = load;
if (primitive_util::IsSubByteNonPredType(shape_.element_type())) {
llvm::Value* shifted = b->CreateLShr(load, bit_offset);
elem = b->CreateTrunc(
shifted, b->getIntNTy(primitive_util::BitWidth(shape_.element_type())));
}
return elem;
}
void IrArray::EmitWriteArrayElement(const Index& index, llvm::Value* value,
llvm::IRBuilder<>* b,
bool use_linear_index) const {
llvm::Value* bit_offset = nullptr;
llvm::Value* element_address =
EmitArrayElementAddress(index, b, "", use_linear_index, &bit_offset);
if (primitive_util::IsSubByteNonPredType(shape_.element_type())) {
llvm::LoadInst* load = b->CreateLoad(b->getInt8Ty(), element_address);
AnnotateLoadStoreInstructionWithMetadata(load);
value = b->CreateIntCast(value, b->getInt8Ty(),
false);
value = b->CreateShl(value, bit_offset);
auto bit_width = primitive_util::BitWidth(shape_.element_type());
llvm::Value* mask = b->getInt8(~LsbMask<uint8_t>(bit_width));
mask = b->CreateIntrinsic(b->getInt8Ty(), llvm::Intrinsic::fshl,
{mask, mask, bit_offset});
llvm::Value* masked_load = b->CreateAnd(load, mask);
value = b->CreateOr(masked_load, value);
}
llvm::StoreInst* store = b->CreateStore(value, element_address);
AnnotateLoadStoreInstructionWithMetadata(store);
}
IrArray IrArray::CastToShape(const Shape& new_shape,
llvm::IRBuilder<>* b) const {
if (shape_ == new_shape) return *this;
llvm::Module* module = b->GetInsertBlock()->getParent()->getParent();
llvm::Type* new_ir_type = llvm_ir::ShapeToIrType(new_shape, module);
IrArray new_irarray(base_ptr_, new_ir_type, new_shape);
new_irarray.metadata_ = metadata_;
return new_irarray;
}
bool IrArray::Index::ShapeIsCompatible(const Shape& a, const Shape& b) {
const auto get_strides = [](const Shape& shape) {
int rank = shape.dimensions().size();
int64_t stride = 1;
std::vector<int64_t> strides;
for (int i = 0; i < rank; i++) {
auto dim = shape.dimensions(shape.layout().minor_to_major(i));
if (dim != 1) {
stride *= dim;
strides.push_back(stride);
}
}
return strides;
};
return get_strides(a) == get_strides(b);
}
}
} | #include "xla/service/llvm_ir/ir_array.h"
#include <string>
#include "llvm/ADT/ArrayRef.h"
#include "llvm/IR/Argument.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Value.h"
#include "xla/service/llvm_ir/llvm_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/filecheck.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace llvm_ir {
namespace {
class IrArrayTest : public ::testing::Test {
public:
IrArrayTest()
: context_{},
module_{"IrArrayTest module", context_},
builder_{context_} {}
llvm::Function* EmitFunctionAndSetInsertPoint(
llvm::ArrayRef<llvm::Type*> params) {
llvm::FunctionType* function_type =
llvm::FunctionType::get(llvm::Type::getVoidTy(context_), params,
false);
llvm::Function* function = llvm::Function::Create(
function_type, llvm::Function::LinkageTypes::ExternalLinkage,
"test_function", module_);
llvm::BasicBlock* bb = llvm::BasicBlock::Create(context_, "bb", function);
builder_.SetInsertPoint(bb);
return function;
}
protected:
llvm::LLVMContext context_;
llvm::Module module_;
llvm::IRBuilder<> builder_;
};
TEST_F(IrArrayTest, TestShapeIsCompatible) {
xla::Shape a =
ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 10, 20}, {2, 1, 0});
xla::Shape b =
ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 10, 20}, {2, 0, 1});
xla::Shape c =
ShapeUtil::MakeShapeWithDenseLayout(F32, {10, 1, 20}, {2, 1, 0});
xla::Shape d =
ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 10, 30}, {2, 1, 0});
xla::Shape e =
ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 10, 30}, {2, 0, 1});
xla::Shape f =
ShapeUtil::MakeShapeWithDenseLayout(F32, {10, 1, 30}, {2, 1, 0});
EXPECT_TRUE(IrArray::Index::ShapeIsCompatible(a, b));
EXPECT_TRUE(IrArray::Index::ShapeIsCompatible(a, c));
EXPECT_FALSE(IrArray::Index::ShapeIsCompatible(a, d));
EXPECT_FALSE(IrArray::Index::ShapeIsCompatible(a, e));
EXPECT_FALSE(IrArray::Index::ShapeIsCompatible(a, f));
}
TEST_F(IrArrayTest, EmitArrayElementAddress) {
llvm::Function* function = EmitFunctionAndSetInsertPoint(
{builder_.getPtrTy(), builder_.getInt32Ty()});
llvm::Argument* array_ptr = function->getArg(0);
llvm::Argument* array_index = function->getArg(1);
Shape shape = ShapeUtil::MakeShape(F32, {3, 5});
llvm::Type* type = llvm_ir::ShapeToIrType(shape, &module_);
IrArray ir_array(array_ptr, type, shape);
IrArray::Index index(array_index, shape, &builder_);
ir_array.EmitArrayElementAddress(index, &builder_);
std::string ir_str = DumpToString(&module_);
const char* filecheck_pattern = R"(
CHECK: define void @test_function(ptr %[[ptr:[0-9]+]], i32 %[[idx:[0-9]+]]) {
CHECK: getelementptr inbounds float, ptr %[[ptr]], i32 %[[idx]]
)";
TF_ASSERT_OK_AND_ASSIGN(bool filecheck_match,
RunFileCheck(ir_str, filecheck_pattern));
EXPECT_TRUE(filecheck_match);
}
TEST_F(IrArrayTest, EmitArrayElementAddressNonLinear) {
llvm::Function* function = EmitFunctionAndSetInsertPoint(
{builder_.getPtrTy(), builder_.getInt32Ty()});
llvm::Argument* array_ptr = function->getArg(0);
llvm::Argument* array_index = function->getArg(1);
Shape shape = ShapeUtil::MakeShape(F32, {3, 5});
llvm::Type* type = llvm_ir::ShapeToIrType(shape, &module_);
IrArray ir_array(array_ptr, type, shape);
IrArray::Index index(array_index, shape, &builder_);
ir_array.EmitArrayElementAddress(index, &builder_, "",
false);
std::string ir_str = DumpToString(&module_);
const char* filecheck_pattern = R"(
CHECK: define void @test_function(ptr %[[ptr:[0-9]+]], i32 %[[idx:[0-9]+]]) {
CHECK: %[[udiv1:[0-9]+]] = udiv i32 %[[idx]], 1
CHECK: %[[urem:[0-9]+]] = urem i32 %[[udiv1]], 5
CHECK: %[[udiv2:[0-9]+]] = udiv i32 %[[idx]], 5
CHECK: getelementptr inbounds [3 x [5 x float]], ptr %0, i32 0, i32 %[[udiv2]], i32 %[[urem]]
)";
TF_ASSERT_OK_AND_ASSIGN(bool filecheck_match,
RunFileCheck(ir_str, filecheck_pattern));
EXPECT_TRUE(filecheck_match);
}
TEST_F(IrArrayTest, EmitArrayElementAddressInt4) {
llvm::Function* function = EmitFunctionAndSetInsertPoint(
{builder_.getPtrTy(), builder_.getInt32Ty()});
llvm::Argument* array_ptr = function->getArg(0);
llvm::Argument* array_index = function->getArg(1);
Shape shape = ShapeUtil::MakeShape(S4, {3, 5});
llvm::Type* type = llvm_ir::ShapeToIrType(shape, &module_);
IrArray ir_array(array_ptr, type, shape);
IrArray::Index index(array_index, shape, &builder_);
llvm::Value* bit_offset;
ir_array.EmitArrayElementAddress(index, &builder_, "",
true,
&bit_offset);
std::string ir_str = DumpToString(&module_);
const char* filecheck_pattern = R"(
CHECK: define void @test_function(ptr %[[ptr:[0-9]+]], i32 %[[idx:[0-9]+]]) {
CHECK: %[[rem:[0-9]+]] = urem i32 %[[idx]], 2
CHECK: %[[div:[0-9]+]] = udiv i32 %[[idx]], 2
CHECK: getelementptr inbounds i8, ptr %[[ptr]], i32 %[[div]]
)";
TF_ASSERT_OK_AND_ASSIGN(bool filecheck_match,
RunFileCheck(ir_str, filecheck_pattern));
EXPECT_TRUE(filecheck_match);
}
TEST_F(IrArrayTest, EmitArrayElementAddressInt4NonLinear) {
llvm::Function* function = EmitFunctionAndSetInsertPoint(
{llvm::PointerType::get(context_, 0), llvm::Type::getInt32Ty(context_),
llvm::Type::getInt32Ty(context_)});
llvm::Argument* array_ptr = function->getArg(0);
llvm::Argument* array_index0 = function->getArg(1);
llvm::Argument* array_index1 = function->getArg(2);
Shape shape = ShapeUtil::MakeShape(S4, {3, 5});
llvm::Type* type = llvm_ir::ShapeToIrType(shape, &module_);
IrArray ir_array(array_ptr, type, shape);
IrArray::Index index({array_index0, array_index1}, shape,
builder_.getInt32Ty());
llvm::Value* bit_offset;
ir_array.EmitArrayElementAddress(index, &builder_, "",
false,
&bit_offset);
std::string ir_str = DumpToString(&module_);
const char* filecheck_pattern = R"(
CHECK: define void @test_function(ptr %[[ptr:[0-9]+]], i32 %[[idx0:[0-9]+]], i32 %[[idx1:[0-9]+]]) {
CHECK: %[[mul1:[0-9]+]] = mul nuw nsw i32 %[[idx1]], 1
CHECK: %[[add1:[0-9]+]] = add nuw nsw i32 0, %[[mul1]]
CHECK: %[[mul2:[0-9]+]] = mul nuw nsw i32 %[[idx0]], 5
CHECK: %[[add2:[0-9]+]] = add nuw nsw i32 %[[add1]], %[[mul2]]
CHECK: %[[udiv:[0-9]+]] = udiv i32 %[[add2]], 2
CHECK: %[[gep:[0-9]+]] = getelementptr inbounds i8, ptr %[[ptr]], i32 %[[udiv]]
)";
TF_ASSERT_OK_AND_ASSIGN(bool filecheck_match,
RunFileCheck(ir_str, filecheck_pattern));
EXPECT_TRUE(filecheck_match);
}
TEST_F(IrArrayTest, EmitReadArrayElementInt4) {
llvm::Function* function = EmitFunctionAndSetInsertPoint(
{builder_.getPtrTy(), builder_.getInt32Ty()});
llvm::Argument* array_ptr = function->getArg(0);
llvm::Argument* array_index = function->getArg(1);
Shape shape = ShapeUtil::MakeShape(S4, {3, 5});
llvm::Type* type = llvm_ir::ShapeToIrType(shape, &module_);
IrArray ir_array(array_ptr, type, shape);
IrArray::Index index(array_index, shape, &builder_);
ir_array.EmitReadArrayElement(index, &builder_);
std::string ir_str = DumpToString(&module_);
const char* filecheck_pattern = R"(
CHECK: define void @test_function(ptr %[[ptr:[0-9]+]], i32 %[[idx0:[0-9]+]]) {
COM: Calculate the address.
CHECK: %[[urem:[0-9]+]] = urem i32 %[[idx0]], 2
CHECK: %[[addr:[0-9]+]] = udiv i32 %[[idx0]], 2
CHECK: %[[mul:[0-9]+]] = mul i32 %[[urem]], 4
CHECK: %[[sub:[0-9]+]] = sub i32 4, %[[mul]]
CHECK: %[[trunc:[0-9]+]] = trunc i32 %[[sub]] to i8
CHECK: %[[gep:[0-9]+]] = getelementptr inbounds i8, ptr %[[ptr]], i32 %[[addr]]
COM: Load the element, optionally shift, and truncate.
CHECK: %[[load:[0-9]+]] = load i8, ptr %[[gep]], align 1
CHECK: %[[shift:[0-9]+]] = lshr i8 %[[load]], %[[trunc]]
CHECK: trunc i8 %[[shift]] to i4
)";
TF_ASSERT_OK_AND_ASSIGN(bool filecheck_match,
RunFileCheck(ir_str, filecheck_pattern));
EXPECT_TRUE(filecheck_match);
}
TEST_F(IrArrayTest, EmitWriteArrayElementInt4) {
llvm::Function* function = EmitFunctionAndSetInsertPoint(
{builder_.getPtrTy(), builder_.getInt32Ty(), builder_.getIntNTy(4)});
llvm::Argument* array_ptr = function->getArg(0);
llvm::Argument* array_index = function->getArg(1);
llvm::Argument* val_to_write = function->getArg(2);
Shape shape = ShapeUtil::MakeShape(S4, {3, 5});
llvm::Type* type = llvm_ir::ShapeToIrType(shape, &module_);
IrArray ir_array(array_ptr, type, shape);
IrArray::Index index(array_index, shape, &builder_);
ir_array.EmitWriteArrayElement(index, val_to_write, &builder_);
std::string ir_str = DumpToString(&module_);
const char* filecheck_pattern = R"(
CHECK: define void @test_function(ptr %[[ptr:[0-9]+]], i32 %[[idx0:[0-9]+]], i4 %[[val:[0-9]+]]) {
COM: Calculate the address.
CHECK: %[[urem:[0-9]+]] = urem i32 %[[idx0]], 2
CHECK: %[[addr:[0-9]+]] = udiv i32 %[[idx0]], 2
CHECK: %[[mul:[0-9]+]] = mul i32 %[[urem]], 4
CHECK: %[[sub:[0-9]+]] = sub i32 4, %[[mul]]
CHECK: %[[trunc:[0-9]+]] = trunc i32 %[[sub]] to i8
CHECK: %[[gep:[0-9]+]] = getelementptr inbounds i8, ptr %[[ptr]], i32 %[[addr]]
COM: Load address, replace 4 bits with the value, and write to address.
CHECK: %[[load:[0-9]+]] = load i8, ptr %[[gep]], align 1
CHECK: %[[zext:[0-9]+]] = zext i4 %[[val]] to i8
CHECK: %[[shifted_val:[0-9]+]] = shl i8 %[[zext]], %[[trunc]]
CHECK: %[[mask:[0-9]+]] = call i8 @llvm.fshl.i8(i8 -16, i8 -16, i8 %[[trunc]])
CHECK: %[[and:[0-9]+]] = and i8 %[[load]], %[[mask]]
CHECK: %[[towrite:[0-9]+]] = or i8 %[[and]], %[[shifted_val]]
CHECK: store i8 %[[towrite]], ptr %[[gep]], align 1
)";
TF_ASSERT_OK_AND_ASSIGN(bool filecheck_match,
RunFileCheck(ir_str, filecheck_pattern));
EXPECT_TRUE(filecheck_match);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/llvm_ir/ir_array.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/llvm_ir/ir_array_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a5f1c7f7-4c8d-4a2d-b296-8e3453f03101 | cpp | tensorflow/tensorflow | tpu_metadata_utils | tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/tpu_metadata_utils.cc | tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/tpu_metadata_utils_test.cc | #include "tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/tpu_metadata_utils.h"
#include <optional>
#include <string>
#include <utility>
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/FormatVariadic.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Diagnostics.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/Types.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_device.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/attribute_utils.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/convert_tensor.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/convert_type.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/xla_sharding_util.h"
#include "xla/xla.pb.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/protobuf/tpu/compile_metadata.pb.h"
namespace mlir {
namespace TFTPU {
namespace {
constexpr char kStepMarkerLocationAttr[] = "step_marker_location";
constexpr char kUseXlaSpmdAttr[] = "use_spmd_for_xla_partitioning";
constexpr char kBadStringArrayElementMsg[] =
"bad '{0}' attribute at index {1}, not a string";
constexpr char kBadArrayElementMsg[] =
"bad '{0}' attribute at index {1} with value '{2}': failed to parse to {3}";
constexpr char kBadArrayAttrLengthMsg[] =
"bad '{0}' attribute, expected array attribute of size {1}, got size {2}";
std::string CreateMissingAttributeMsg(llvm::StringRef attribute) {
return llvm::formatv("requires attribute '{0}'", attribute).str();
}
LogicalResult SetMetadataProtoStepMarkerLocation(
tf_device::ClusterFuncOp op,
tensorflow::tpu::TPUCompileMetadataProto* metadata) {
auto step_marker_location =
op->getAttrOfType<StringAttr>(kStepMarkerLocationAttr);
if (!step_marker_location)
return op.emitOpError(CreateMissingAttributeMsg(kStepMarkerLocationAttr));
xla::DebugOptions::StepMarkerLocation location =
xla::DebugOptions::STEP_MARK_AT_ENTRY;
if (!step_marker_location.getValue().empty() &&
!xla::DebugOptions::StepMarkerLocation_Parse(
std::string(step_marker_location.getValue()), &location))
return op.emitOpError(llvm::formatv("bad '{0}' attribute with value '{1}'",
kStepMarkerLocationAttr,
step_marker_location.getValue()));
metadata->set_step_marker_location(location);
return success();
}
LogicalResult SetOpSharding(Operation* op, Attribute attr, llvm::StringRef name,
int index, xla::OpSharding* sharding_ptr) {
auto sharding_attr = mlir::dyn_cast<StringAttr>(attr);
if (!sharding_attr)
return op->emitOpError(
llvm::formatv(kBadStringArrayElementMsg, name, index));
if (tensorflow::DecodeShardingAttribute(sharding_attr, *sharding_ptr)
.failed()) {
return op->emitOpError(llvm::formatv(kBadArrayElementMsg, name, index,
sharding_attr.getValue(),
"xla::OpSharding"));
}
return success();
}
LogicalResult SetMetadataProtoArgs(
tf_device::ClusterFuncOp op,
tensorflow::tpu::TPUCompileMetadataProto* metadata) {
auto input_shardings =
op->getAttrOfType<ArrayAttr>(tensorflow::kInputShardingAttr);
if (!input_shardings)
return op.emitOpError(
CreateMissingAttributeMsg(tensorflow::kInputShardingAttr));
if (input_shardings.size() != op.getNumOperands())
return op.emitOpError(
llvm::formatv(kBadArrayAttrLengthMsg, tensorflow::kInputShardingAttr,
op.getNumOperands(), input_shardings.size()));
mlir::StringAttr replication_attr_name = mlir::StringAttr::get(
op.getContext(), "mhlo.is_same_data_across_replicas");
auto dynamic_arg_idx = op->getAttrOfType<ArrayAttr>(TF::kDynamicArgIndexAttr);
llvm::SmallSet<int, 4> dynamic_arg_idx_set;
if (dynamic_arg_idx) {
for (auto idx : dynamic_arg_idx.getValue()) {
dynamic_arg_idx_set.insert(mlir::dyn_cast<IntegerAttr>(idx).getInt());
}
}
for (auto operand_type_and_idx : llvm::enumerate(op.getOperandTypes())) {
Type operand_type = operand_type_and_idx.value();
int index = operand_type_and_idx.index();
tensorflow::tpu::TPUCompileMetadataProto::Arg* arg = metadata->add_args();
tensorflow::DataType dtype;
tensorflow::Status status =
tensorflow::ConvertToDataType(operand_type, &dtype);
if (!status.ok())
return op.emitOpError(
llvm::formatv("failed to determine operand type at index {0}: {1}",
index, status.message()));
arg->set_dtype(dtype);
if (dtype == tensorflow::DT_RESOURCE)
arg->set_kind(tensorflow::tpu::TPUCompileMetadataProto::Arg::VARIABLE);
else
arg->set_kind(tensorflow::tpu::TPUCompileMetadataProto::Arg::PARAMETER);
*arg->mutable_shape() = tensorflow::TensorShapeProto();
if (auto ranked_tensor_type =
mlir::dyn_cast<RankedTensorType>(operand_type)) {
tensorflow::TensorShapeProto shape_proto;
ConvertToTensorShapeProto(ranked_tensor_type.getShape(), &shape_proto);
*arg->mutable_shape() = std::move(shape_proto);
} else {
arg->mutable_shape()->set_unknown_rank(true);
}
if (failed(SetOpSharding(op, input_shardings.getValue()[index],
tensorflow::kInputShardingAttr, index,
arg->mutable_sharding())))
return failure();
auto attr = op.getFuncOp().getArgAttrOfType<mlir::BoolAttr>(
index, replication_attr_name);
arg->set_is_same_data_across_replicas(attr != nullptr && attr.getValue());
arg->mutable_is_bounded_dynamic_dim()->Add(
dynamic_arg_idx_set.contains(index));
}
return success();
}
LogicalResult SetMetadataProtoRetvals(
tf_device::ClusterFuncOp op,
tensorflow::tpu::TPUCompileMetadataProto* metadata) {
auto output_shardings =
op->getAttrOfType<ArrayAttr>(tensorflow::kOutputShardingAttr);
if (!output_shardings)
return op.emitOpError(
CreateMissingAttributeMsg(tensorflow::kOutputShardingAttr));
if (output_shardings.size() != op.getNumResults())
return op.emitOpError(
llvm::formatv(kBadArrayAttrLengthMsg, tensorflow::kOutputShardingAttr,
op.getNumResults(), output_shardings.size()));
for (auto output_sharding_and_idx : llvm::enumerate(output_shardings))
if (failed(SetOpSharding(op, output_sharding_and_idx.value(),
tensorflow::kOutputShardingAttr,
output_sharding_and_idx.index(),
metadata->add_retvals()->mutable_sharding())))
return failure();
return success();
}
}
LogicalResult SetMetadataProtoFromClusterFuncOp(
tf_device::ClusterFuncOp op, int num_replicas, int num_cores_per_replica,
std::optional<xla::DeviceAssignmentProto>&& xla_device_assignment,
tensorflow::tpu::TPUCompileMetadataProto* metadata) {
if (auto options_attr =
op->getAttrOfType<StringAttr>("tpu_compile_options_proto")) {
if (!metadata->mutable_compile_options()->ParseFromArray(
options_attr.data(), options_attr.size())) {
return failure();
}
}
metadata->set_num_replicas(num_replicas);
metadata->set_num_cores_per_replica(num_cores_per_replica);
if (failed(SetMetadataProtoStepMarkerLocation(op, metadata)))
return failure();
if (xla_device_assignment.has_value())
*metadata->mutable_device_assignment() =
std::move(xla_device_assignment.value());
auto use_spmd_attr = op->getAttrOfType<BoolAttr>(kUseXlaSpmdAttr);
if (!use_spmd_attr)
return op.emitOpError(CreateMissingAttributeMsg(kUseXlaSpmdAttr));
metadata->set_use_spmd_for_xla_partitioning(use_spmd_attr.getValue());
if (failed(SetMetadataProtoArgs(op, metadata))) return failure();
return SetMetadataProtoRetvals(op, metadata);
}
}
} | #include "tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/tpu_metadata_utils.h"
#include <ostream>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/Diagnostics.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/register_common_dialects.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_device.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/tpu/compile_metadata.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/statusor.h"
namespace mlir {
namespace TFTPU {
namespace {
using mlir::DialectRegistry;
using mlir::MLIRContext;
using mlir::ModuleOp;
using mlir::OwningOpRef;
class ProtoStringMatcher {
public:
explicit ProtoStringMatcher(const tsl::protobuf::Message& expected)
: expected_(expected.SerializeAsString()) {}
template <typename Message>
bool MatchAndExplain(const Message& p, testing::MatchResultListener*) const {
return p.SerializeAsString() == expected_;
}
void DescribeTo(::std::ostream* os) const { *os << expected_; }
void DescribeNegationTo(::std::ostream* os) const {
*os << "not equal to expected message: " << expected_;
}
private:
const std::string expected_;
};
inline ::testing::PolymorphicMatcher<ProtoStringMatcher> EqualsProto(
const tsl::protobuf::Message& x) {
return ::testing::MakePolymorphicMatcher(ProtoStringMatcher(x));
}
std::string TestDataPath() {
return tensorflow::GetDataDependencyFilepath(
"tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/testdata/");
}
class TpuMetadataUtilsTest : public ::testing::Test {
public:
TpuMetadataUtilsTest() {
mlir::RegisterCommonToolingDialects(registry_);
context_.appendDialectRegistry(registry_);
context_.loadAllAvailableDialects();
}
absl::StatusOr<std::vector<mlir::tf_device::ClusterFuncOp>> GetClusterFuncOps(
absl::string_view mlir_module_filename) {
TF_RETURN_IF_ERROR(CreateMlirModule(mlir_module_filename));
std::vector<mlir::tf_device::ClusterFuncOp> cluster_func_ops;
mlir_module_->walk([&](mlir::tf_device::ClusterFuncOp op) {
cluster_func_ops.push_back(op);
});
return cluster_func_ops;
}
private:
absl::Status CreateMlirModule(absl::string_view mlir_module_filename) {
std::string mlir_module_path =
absl::StrCat(TestDataPath(), mlir_module_filename);
mlir_module_ =
mlir::parseSourceFile<mlir::ModuleOp>(mlir_module_path, &context_);
if (!mlir_module_) {
return absl::Status(
absl::StatusCode::kNotFound,
absl::StrCat("Could not find MLIR module at ", mlir_module_path));
}
return absl::OkStatus();
}
DialectRegistry registry_;
MLIRContext context_;
OwningOpRef<mlir::ModuleOp> mlir_module_;
};
TEST_F(TpuMetadataUtilsTest, SingleDevice) {
TF_ASSERT_OK_AND_ASSIGN(auto cluster_func_ops,
GetClusterFuncOps("basic_cluster.mlir"));
mlir::tf_device::ClusterFuncOp cluster_func_op = cluster_func_ops.front();
tensorflow::tpu::TPUCompileMetadataProto compile_metadata;
ASSERT_TRUE(mlir::succeeded(SetMetadataProtoFromClusterFuncOp(
cluster_func_op,
1, 1, {}, &compile_metadata)));
tensorflow::tpu::TPUCompileMetadataProto expected_compile_metadata;
ASSERT_TRUE(tsl::protobuf::TextFormat::ParseFromString(
R"pb(
num_replicas: 1 num_cores_per_replica: 1
)pb",
&expected_compile_metadata));
EXPECT_THAT(compile_metadata, EqualsProto(expected_compile_metadata));
}
TEST_F(TpuMetadataUtilsTest, spmd) {
TF_ASSERT_OK_AND_ASSIGN(auto cluster_func_ops,
GetClusterFuncOps("spmd.mlir"));
mlir::tf_device::ClusterFuncOp cluster_func_op = cluster_func_ops.front();
tensorflow::tpu::TPUCompileMetadataProto compile_metadata;
ASSERT_TRUE(mlir::succeeded(SetMetadataProtoFromClusterFuncOp(
cluster_func_op,
1, 2, {}, &compile_metadata)));
tensorflow::tpu::TPUCompileMetadataProto expected_compile_metadata;
ASSERT_TRUE(tsl::protobuf::TextFormat::ParseFromString(
R"pb(
args {
dtype: DT_FLOAT
shape { unknown_rank: true }
kind: PARAMETER
sharding {
type: OTHER
tile_assignment_dimensions: 2
tile_assignment_dimensions: 1
tile_assignment_devices: 0
tile_assignment_devices: 1
}
is_bounded_dynamic_dim: false
}
retvals { sharding {} }
num_replicas: 1
num_cores_per_replica: 2
use_spmd_for_xla_partitioning: true
compile_options {}
)pb",
&expected_compile_metadata));
EXPECT_THAT(compile_metadata, EqualsProto(expected_compile_metadata));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/tpu_metadata_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/tpu_metadata_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
73030787-d952-461e-beaf-51cb58b76e6a | cpp | tensorflow/tensorflow | node_def_builder | tensorflow/core/framework/node_def_builder.cc | tensorflow/core/framework/node_def_builder_test.cc | #include "tensorflow/core/framework/node_def_builder.h"
#include <vector>
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_def_util.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/strings/str_util.h"
namespace tensorflow {
NodeDefBuilder::NodeOut::NodeOut(StringPiece n, int i, DataType dt)
: node(n), index(i), data_type(dt) {}
NodeDefBuilder::NodeOut::NodeOut() {
}
void NodeDefBuilder::NodeOut::Reset(StringPiece n, int i, DataType dt) {
node = string(n);
index = i;
data_type = dt;
}
NodeDefBuilder::NodeDefBuilder(StringPiece name, StringPiece op_name,
const OpRegistryInterface* op_registry,
const NodeDebugInfo* debug) {
node_def_.set_name(string(name));
const Status status = op_registry->LookUpOpDef(string(op_name), &op_def_);
if (status.ok()) {
Initialize();
} else {
errors_.push_back(std::string(status.message()));
inputs_specified_ = 0;
}
if (debug != nullptr) MergeDebugInfo(*debug, &node_def_);
}
NodeDefBuilder::NodeDefBuilder(StringPiece name, StringPiece op_name,
const NodeDebugInfo& debug)
: NodeDefBuilder(name, op_name) {
MergeDebugInfo(debug, &node_def_);
}
NodeDefBuilder::NodeDefBuilder(StringPiece name, const OpDef* op_def)
: op_def_(op_def) {
node_def_.set_name(string(name));
Initialize();
}
void NodeDefBuilder::Initialize() {
inputs_specified_ = 0;
node_def_.set_op(op_def_->name());
}
const OpDef::ArgDef* NodeDefBuilder::NextArgDef() {
if (!NextArgAvailable()) return nullptr;
return &op_def_->input_arg(inputs_specified_++);
}
bool NodeDefBuilder::NextArgAvailable() {
if (op_def_ == nullptr) {
return false;
} else if (inputs_specified_ >= op_def_->input_arg_size()) {
errors_.push_back(strings::StrCat("More Input() calls than the ",
op_def_->input_arg_size(),
" input_args"));
return false;
}
return true;
}
NodeDefBuilder& NodeDefBuilder::Input(FakeInputFunctor fake_input) {
if (NextArgAvailable()) {
Status status = fake_input(*op_def_, inputs_specified_, node_def_, this);
if (!status.ok()) errors_.push_back(std::string(status.message()));
}
return *this;
}
NodeDefBuilder& NodeDefBuilder::Input(StringPiece src_node, int src_index,
DataType dt) {
const OpDef::ArgDef* arg = NextArgDef();
if (arg != nullptr) SingleInput(arg, src_node, src_index, dt);
return *this;
}
NodeDefBuilder& NodeDefBuilder::Input(const NodeOut& src) {
Input(src.node, src.index, src.data_type);
return *this;
}
NodeDefBuilder& NodeDefBuilder::Input(absl::Span<const NodeOut> src_list) {
const OpDef::ArgDef* arg = NextArgDef();
if (arg != nullptr) ListInput(arg, src_list);
return *this;
}
void NodeDefBuilder::SingleInput(const OpDef::ArgDef* input_arg,
StringPiece src_node, int src_index,
DataType dt) {
AddInput(src_node, src_index);
if (!input_arg->number_attr().empty() ||
!input_arg->type_list_attr().empty()) {
errors_.push_back(strings::StrCat("Single tensor passed to '",
input_arg->name(), "', expected list"));
return;
}
if (input_arg->type() != DT_INVALID) {
const DataType expected = MaybeAddRef(input_arg, input_arg->type());
VerifyInputType(input_arg, expected, dt);
} else {
VerifyInputRef(input_arg, dt);
Attr(input_arg->type_attr(), BaseType(dt));
}
}
void NodeDefBuilder::ListInput(const OpDef::ArgDef* input_arg,
absl::Span<const NodeOut> src_list) {
for (const auto& node_out : src_list) {
AddInput(node_out.node, node_out.index);
}
if (!input_arg->number_attr().empty()) {
Attr(input_arg->number_attr(), static_cast<int64_t>(src_list.size()));
if (input_arg->type() != DT_INVALID) {
const DataType expected = MaybeAddRef(input_arg, input_arg->type());
for (const auto& node_out : src_list) {
VerifyInputType(input_arg, expected, node_out.data_type);
}
} else if (!src_list.empty()) {
const DataType base = BaseType(src_list[0].data_type);
Attr(input_arg->type_attr(), base);
const DataType expected = MaybeAddRef(input_arg, base);
for (const auto& node_out : src_list) {
VerifyInputType(input_arg, expected, node_out.data_type);
}
}
} else if (!input_arg->type_list_attr().empty()) {
DataTypeVector type_vec;
type_vec.reserve(src_list.size());
for (const auto& node_out : src_list) {
const DataType dt = node_out.data_type;
VerifyInputRef(input_arg, dt);
type_vec.push_back(BaseType(dt));
}
Attr(input_arg->type_list_attr(), type_vec);
} else {
errors_.push_back(strings::StrCat("List provided to input '",
input_arg->name(),
"' when single Tensor expected"));
}
}
void NodeDefBuilder::AddInput(StringPiece src_node, int src_index) {
if (src_node.empty()) {
errors_.push_back("Empty input node name");
} else if (src_node[0] == '^') {
errors_.push_back(
strings::StrCat("Non-control input starting with ^: ", src_node));
} else if (src_index > 0) {
node_def_.add_input(strings::StrCat(src_node, ":", src_index));
} else {
node_def_.add_input(string(src_node));
}
}
void NodeDefBuilder::VerifyInputType(const OpDef::ArgDef* input_arg,
DataType expected, DataType dt) {
if (!TypesCompatible(expected, dt)) {
errors_.push_back(strings::StrCat("Input '", input_arg->name(), "' passed ",
DataTypeString(dt), " expected ",
DataTypeString(expected)));
}
}
void NodeDefBuilder::VerifyInputRef(const OpDef::ArgDef* input_arg,
DataType dt) {
if (input_arg->is_ref() && !IsRefType(dt)) {
errors_.push_back(strings::StrCat("Input '", input_arg->name(), "' passed ",
DataTypeString(dt),
" expected ref type"));
}
}
NodeDefBuilder& NodeDefBuilder::ControlInput(StringPiece src_node) {
control_inputs_.emplace_back(src_node);
return *this;
}
NodeDefBuilder& NodeDefBuilder::Device(StringPiece device_spec) {
node_def_.set_device(string(device_spec));
return *this;
}
Status NodeDefBuilder::Finalize(NodeDef* node_def, bool consume) {
const std::vector<string>* errors_ptr = &errors_;
std::vector<string> errors_storage;
if (op_def_ != nullptr && inputs_specified_ < op_def_->input_arg_size()) {
errors_storage = errors_;
errors_storage.push_back(
strings::StrCat(inputs_specified_, " inputs specified of ",
op_def_->input_arg_size(), " inputs in Op"));
errors_ptr = &errors_storage;
}
if (!errors_ptr->empty()) {
if (errors_ptr->size() == 1) {
if (op_def_ == nullptr) {
return errors::InvalidArgument((*errors_ptr)[0],
" while building NodeDef '",
node_def_.name(), "'");
}
return errors::InvalidArgument(
(*errors_ptr)[0], " while building NodeDef '", node_def_.name(),
"' using ", SummarizeOpDef(*op_def_));
} else {
if (op_def_ == nullptr) {
return errors::InvalidArgument(
errors_ptr->size(), " errors while building NodeDef '",
node_def_.name(), "':\n", absl::StrJoin(*errors_ptr, "\n"));
}
return errors::InvalidArgument(
errors_ptr->size(), " errors while building NodeDef '",
node_def_.name(), "' using ", SummarizeOpDef(*op_def_), ":\n",
absl::StrJoin(*errors_ptr, "\n"));
}
} else {
NodeDef node_def_backup;
if (node_def == nullptr) node_def = &node_def_backup;
if (consume) {
*node_def = std::move(node_def_);
} else {
*node_def = node_def_;
}
for (const auto& control_input : control_inputs_) {
node_def->add_input(strings::StrCat("^", control_input));
}
AddDefaultsToNodeDef(*op_def_, node_def);
return absl::OkStatus();
}
}
bool NodeDefBuilder::AttrValueAlreadyPresent(StringPiece name,
const AttrValue& value) {
if (const AttrValue* found = AttrSlice(node_def_).Find(name)) {
if (!AreAttrValuesEqual(*found, value)) {
errors_.push_back(strings::StrCat("Inconsistent values for attr '", name,
"' ", SummarizeAttrValue(*found),
" vs. ", SummarizeAttrValue(value)));
}
return true;
}
return false;
}
NodeDefBuilder& NodeDefBuilder::Attr(StringPiece name, const AttrValue& value) {
if (!AttrValueAlreadyPresent(name, value)) {
AddNodeAttr(name, value, &node_def_);
}
return *this;
}
NodeDefBuilder& NodeDefBuilder::Attr(StringPiece name, AttrValue&& value) {
if (!AttrValueAlreadyPresent(name, value)) {
AddNodeAttr(name, std::move(value), &node_def_);
}
return *this;
}
#define ATTR(T) \
NodeDefBuilder& NodeDefBuilder::Attr(StringPiece name, T value) { \
AttrValue attr_value; \
SetAttrValue(value, &attr_value); \
return Attr(name, attr_value); \
}
ATTR(StringPiece)
ATTR(const char*)
ATTR(int32_t)
ATTR(int64_t)
ATTR(float)
ATTR(double)
ATTR(bool)
ATTR(DataType)
ATTR(const PartialTensorShape&)
ATTR(const Tensor&)
ATTR(const TensorProto&)
ATTR(const NameAttrList&)
ATTR(absl::Span<const StringPiece>)
ATTR(absl::Span<const char* const>)
ATTR(absl::Span<const string>)
ATTR(absl::Span<const tstring>)
ATTR(absl::Span<const int32>)
ATTR(absl::Span<const int64_t>)
ATTR(absl::Span<const float>)
ATTR(absl::Span<const bool>)
ATTR(const std::vector<bool>&)
ATTR(absl::Span<const DataType>)
ATTR(absl::Span<const TensorShape>)
ATTR(absl::Span<const PartialTensorShape>)
ATTR(absl::Span<const TensorShapeProto>)
ATTR(absl::Span<const Tensor>)
ATTR(absl::Span<const NameAttrList>)
#undef ATTR
} | #include "tensorflow/core/framework/node_def_builder.h"
#include <memory>
#include <vector>
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op_def_builder.h"
#include "tensorflow/core/framework/op_def_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
class NodeDefBuilderTest : public ::testing::Test {
protected:
void Op(const OpDefBuilder& op_def_builder) {
OpRegistrationData op_reg_data;
TF_EXPECT_OK(op_def_builder.Finalize(&op_reg_data));
op_def_ = op_reg_data.op_def;
}
NodeDefBuilder& Builder() {
EXPECT_FALSE(op_def_.name().empty()) << "Must call Op() before Builder()";
builder_ = std::make_unique<NodeDefBuilder>("n", &op_def_);
return *builder_;
}
void ExpectSuccess(NodeDefBuilder& builder,
DataTypeSlice expected_in_types,
DataTypeSlice expected_out_types, StringPiece proto) {
NodeDef node_def;
Status status = builder.Finalize(&node_def);
TF_EXPECT_OK(status);
if (!status.ok()) return;
NodeDef expected;
protobuf::TextFormat::ParseFromString(strings::StrCat("name: 'n' ", proto),
&expected);
EXPECT_EQ(node_def.DebugString(), expected.DebugString());
DataTypeVector in_types, out_types;
status =
InOutTypesForNode(node_def, builder.op_def(), &in_types, &out_types);
TF_EXPECT_OK(status);
if (!status.ok()) return;
EXPECT_EQ(DataTypeSliceString(expected_in_types),
DataTypeVectorString(in_types));
EXPECT_EQ(DataTypeSliceString(expected_out_types),
DataTypeVectorString(out_types));
status = ValidateNodeDef(node_def, op_def_);
TF_EXPECT_OK(status);
}
void ExpectFailures(NodeDefBuilder& builder,
const std::vector<string>& messages) {
NodeDef node_def;
Status status = builder.Finalize(&node_def);
EXPECT_FALSE(status.ok()) << SummarizeNodeDef(node_def);
if (status.ok()) return;
for (const string& message : messages) {
EXPECT_TRUE(absl::StrContains(status.message(), message))
<< status << ", " << message;
}
}
void ExpectFailure(NodeDefBuilder& builder,
const string& message) {
ExpectFailures(builder, {message});
}
void ExpectInvalid(NodeDefBuilder& builder,
const string& message) {
NodeDef node_def;
Status status = builder.Finalize(&node_def);
if (status.ok()) {
status = ValidateNodeDef(node_def, op_def_);
}
EXPECT_FALSE(status.ok()) << SummarizeNodeDef(node_def);
if (status.ok()) return;
EXPECT_TRUE(absl::StrContains(status.message(), message))
<< "Actual error: " << status.message()
<< "\nDoes not contain: " << message;
}
OpDef op_def_;
std::unique_ptr<NodeDefBuilder> builder_;
};
TEST_F(NodeDefBuilderTest, Simple) {
Op(OpDefBuilder("Simple").Input("a: int32").Output("out: float"));
ExpectSuccess(Builder().Input("x", 0, DT_INT32), {DT_INT32}, {DT_FLOAT},
R"proto(op: "Simple" input: "x")proto");
ExpectSuccess(Builder().Input("y", 2, DT_INT32), {DT_INT32}, {DT_FLOAT},
R"proto(op: "Simple" input: "y:2")proto");
ExpectSuccess(Builder().Input(FakeInput()), {DT_INT32}, {DT_FLOAT}, R"proto(
op: "Simple"
input: "a")proto");
ExpectSuccess(Builder().Input(FakeInput(DT_INT32)), {DT_INT32}, {DT_FLOAT},
R"proto(op: "Simple" input: "a")proto");
ExpectSuccess(Builder().Input(FakeInput(DT_INT32_REF)), {DT_INT32},
{DT_FLOAT}, R"proto(op: "Simple" input: "a")proto");
ExpectSuccess(
Builder().ControlInput("x").Input(FakeInput()).ControlInput("y"),
{DT_INT32}, {DT_FLOAT}, R"proto(
op: "Simple"
input: [ "a", "^x", "^y" ])proto");
ExpectSuccess(Builder().Input(FakeInput()).Device("ddd"), {DT_INT32},
{DT_FLOAT}, R"proto(
op: "Simple" input: "a" device: "ddd")proto");
ExpectFailure(Builder().Input("x", 0, DT_INT32).Input("y", 0, DT_INT32),
"More Input() calls than the 1 input_args while building "
"NodeDef 'n' using Op<name=Simple; signature=a:int32 -> "
"out:float>");
ExpectFailure(Builder(), "0 inputs specified of 1 inputs in Op while");
{
NodeDefBuilder& builder = Builder();
TF_EXPECT_OK(builder.Input(FakeInput()).Finalize(nullptr));
ExpectSuccess(builder, {DT_INT32}, {DT_FLOAT}, R"proto(
op: "Simple"
input: "a")proto");
}
{
NodeDefBuilder& builder = Builder();
ExpectFailure(builder, "0 inputs specified of 1 inputs in Op while");
builder.Input(FakeInput());
ExpectSuccess(builder, {DT_INT32}, {DT_FLOAT}, R"proto(
op: "Simple"
input: "a")proto");
builder.Input(FakeInput(DT_INT32));
ExpectFailure(builder, "More Input() calls than the 1 input_args while");
}
ExpectFailure(Builder().Input("x", 0, DT_FLOAT),
"Input 'a' passed float expected int32 ");
ExpectFailure(Builder().Input("x", 0, DT_FLOAT_REF),
"Input 'a' passed float_ref expected int32 ");
ExpectFailure(Builder().Input(FakeInput(3, DT_FLOAT)),
"List provided to input 'a' when single Tensor expected while");
ExpectFailure(Builder().Input(FakeInput(3)),
"List provided to input 'a' when single Tensor expected while");
ExpectInvalid(Builder().Input(FakeInput()).ControlInput("z:2"),
"Control input '^z:2' must not have ':' in NodeDef:");
ExpectFailure(Builder().Input("", 0, DT_INT32),
"Empty input node name while");
ExpectFailure(Builder().Input("^x", 0, DT_INT32),
"Non-control input starting with ^: ^x while");
}
TEST_F(NodeDefBuilderTest, OpDoesNotExist) {
NodeDefBuilder builder("n", "Op Does Not Exist");
builder.Input(FakeInput())
.Input(FakeInput(12))
.ControlInput("y")
.Attr("foo", 12)
.Device("device");
ExpectFailures(builder, {"Op type not registered 'Op Does Not Exist'",
"while building NodeDef 'n'"});
}
TEST_F(NodeDefBuilderTest, Polymorphic) {
Op(OpDefBuilder("Polymorphic")
.Input("v: T")
.Output("out: T")
.Attr("T: type"));
ExpectSuccess(Builder().Input(FakeInput(DT_INT32)), {DT_INT32}, {DT_INT32},
R"proto(
op: "Polymorphic"
input: "a"
attr {
key: "T"
value { type: DT_INT32 }
})proto");
ExpectSuccess(Builder().Input(FakeInput(DT_FLOAT)), {DT_FLOAT}, {DT_FLOAT},
R"proto(
op: "Polymorphic"
input: "a"
attr {
key: "T"
value { type: DT_FLOAT }
})proto");
ExpectSuccess(Builder().Input(FakeInput(DT_BOOL)).Attr("T", DT_BOOL),
{DT_BOOL}, {DT_BOOL}, R"proto(
op: "Polymorphic"
input: "a"
attr {
key: "T"
value { type: DT_BOOL }
})proto");
ExpectFailure(Builder().Input(FakeInput(DT_BOOL)).Attr("T", DT_STRING),
"Inconsistent values for attr 'T' DT_BOOL vs. DT_STRING while");
ExpectFailure(Builder().Attr("T", DT_STRING).Input(FakeInput(DT_BOOL)),
"Inconsistent values for attr 'T' DT_STRING vs. DT_BOOL while");
ExpectFailure(Builder().Attr("T", 12).Input(FakeInput(DT_BOOL)),
"Inconsistent values for attr 'T' 12 vs. DT_BOOL while");
}
TEST_F(NodeDefBuilderTest, PolymorphicOut) {
Op(OpDefBuilder("PolymorphicOut").Output("out: T").Attr("T: type"));
ExpectSuccess(Builder().Attr("T", DT_INT32), {}, {DT_INT32}, R"proto(
op: "PolymorphicOut"
attr {
key: "T"
value { type: DT_INT32 }
})proto");
ExpectSuccess(Builder().Attr("T", DT_FLOAT), {}, {DT_FLOAT}, R"proto(
op: "PolymorphicOut"
attr {
key: "T"
value { type: DT_FLOAT }
})proto");
ExpectSuccess(Builder().Attr("T", DT_FLOAT).Attr("T", DT_FLOAT), {},
{DT_FLOAT}, R"proto(
op: "PolymorphicOut"
attr {
key: "T"
value { type: DT_FLOAT }
})proto");
ExpectFailure(Builder().Attr("T", DT_BOOL).Attr("T", DT_FLOAT),
"Inconsistent values for attr 'T' DT_BOOL vs. DT_FLOAT while");
ExpectInvalid(Builder(), "NodeDef missing attr 'T' from");
ExpectInvalid(
Builder().Attr("T", {DT_INT32, DT_BOOL}),
"AttrValue had value with type 'list(type)' when 'type' expected");
ExpectInvalid(Builder().Attr("T", 12),
"AttrValue had value with type 'int' when 'type' expected");
}
TEST_F(NodeDefBuilderTest, PolymorphicDefaultOut) {
Op(OpDefBuilder("PolymorphicDefaultOut")
.Output("out: T")
.Attr("T: type = DT_STRING"));
ExpectSuccess(Builder(), {}, {DT_STRING}, R"proto(
op: "PolymorphicDefaultOut"
attr {
key: "T"
value { type: DT_STRING }
})proto");
ExpectSuccess(Builder().Attr("T", DT_BOOL), {}, {DT_BOOL}, R"proto(
op: "PolymorphicDefaultOut"
attr {
key: "T"
value { type: DT_BOOL }
})proto");
}
TEST_F(NodeDefBuilderTest, Binary) {
Op(OpDefBuilder("Binary").Input("a: T").Input("b: T").Output("out: T").Attr(
"T: type"));
ExpectSuccess(Builder().Input(FakeInput(DT_INT32)).Input(FakeInput(DT_INT32)),
{DT_INT32, DT_INT32}, {DT_INT32}, R"proto(
op: "Binary"
input: "a"
input: "b"
attr {
key: "T"
value { type: DT_INT32 }
})proto");
ExpectSuccess(Builder().Input(FakeInput(DT_STRING)).Input(FakeInput()),
{DT_STRING, DT_STRING}, {DT_STRING}, R"proto(
op: "Binary"
input: "a"
input: "b"
attr {
key: "T"
value { type: DT_STRING }
})proto");
ExpectFailure(Builder().Input(FakeInput(DT_BOOL)).Input(FakeInput(DT_STRING)),
"Inconsistent values for attr 'T' DT_BOOL vs. DT_STRING while");
}
TEST_F(NodeDefBuilderTest, Restrict) {
Op(OpDefBuilder("Restrict")
.Input("a: T")
.Output("out: T")
.Attr("T: {string, bool}"));
ExpectSuccess(Builder().Input(FakeInput(DT_STRING)), {DT_STRING}, {DT_STRING},
R"proto(
op: "Restrict"
input: "a"
attr {
key: "T"
value { type: DT_STRING }
})proto");
ExpectInvalid(Builder().Input(FakeInput(DT_INT32)),
"Value for attr 'T' of int32 is not in the list of allowed "
"values: string, bool");
}
TEST_F(NodeDefBuilderTest, TypeList) {
Op(OpDefBuilder("TypeList").Input("a: T").Attr("T: list(type)"));
ExpectSuccess(Builder().Input(FakeInput({DT_STRING, DT_INT32})),
{DT_STRING, DT_INT32}, {}, R"proto(
op: "TypeList"
input: [ "a", "a:1" ]
attr {
key: "T"
value { list { type: [ DT_STRING, DT_INT32 ] } }
}
)proto");
ExpectSuccess(Builder().Input(FakeInput(3, DT_BOOL)),
{DT_BOOL, DT_BOOL, DT_BOOL}, {}, R"proto(
op: "TypeList"
input: [ "a", "a:1", "a:2" ]
attr {
key: "T"
value { list { type: [ DT_BOOL, DT_BOOL, DT_BOOL ] } }
}
)proto");
ExpectInvalid(Builder().Input(FakeInput(0)),
"Length for attr 'T' of 0 must be at least minimum 1");
ExpectInvalid(Builder().Input(FakeInput({})),
"Length for attr 'T' of 0 must be at least minimum 1");
ExpectInvalid(Builder().Input(FakeInput(DT_BOOL)),
"Single tensor passed to 'a', expected list while");
ExpectFailures(Builder().Input(FakeInput()),
{"2 errors while building NodeDef",
"Could not infer list of types for input 'a': "
"No attr named 'T' in NodeDef:",
"0 inputs specified of 1 inputs in Op"});
}
TEST_F(NodeDefBuilderTest, TypeListNoMin) {
Op(OpDefBuilder("TypeListNoMin").Input("a: T").Attr("T: list(type) >= 0"));
ExpectSuccess(Builder().Input(FakeInput(0)), {}, {}, R"proto(
op: "TypeListNoMin"
attr {
key: "T"
value { list {} }
})proto");
ExpectSuccess(Builder().Input(FakeInput(DataTypeVector())), {}, {}, R"proto(
op: "TypeListNoMin"
attr {
key: "T"
value { list {} }
})proto");
ExpectSuccess(Builder().Input(FakeInput({})), {}, {}, R"proto(
op: "TypeListNoMin"
attr {
key: "T"
value { list {} }
})proto");
ExpectSuccess(Builder().Input(FakeInput({DT_BOOL})), {DT_BOOL}, {}, R"proto(
op: "TypeListNoMin"
input: "a"
attr {
key: "T"
value { list { type: DT_BOOL } }
})proto");
}
TEST_F(NodeDefBuilderTest, TypeListTwice) {
Op(OpDefBuilder("TypeListTwice")
.Input("a: T")
.Input("b: T")
.Attr("T: list(type) >= 0"));
ExpectSuccess(Builder()
.Input(FakeInput({DT_INT32, DT_BOOL}))
.Input(FakeInput({DT_INT32, DT_BOOL})),
{DT_INT32, DT_BOOL, DT_INT32, DT_BOOL}, {}, R"proto(
op: "TypeListTwice"
input: [ "a", "a:1", "b", "b:1" ]
attr {
key: "T"
value { list { type: [ DT_INT32, DT_BOOL ] } }
})proto");
ExpectSuccess(
Builder().Input(FakeInput({DT_INT32, DT_BOOL})).Input(FakeInput()),
{DT_INT32, DT_BOOL, DT_INT32, DT_BOOL}, {}, R"proto(
op: "TypeListTwice"
input: [ "a", "a:1", "b", "b:1" ]
attr {
key: "T"
value { list { type: [ DT_INT32, DT_BOOL ] } }
})proto");
ExpectSuccess(Builder().Input(FakeInput(0)).Input(FakeInput(0)), {}, {},
R"proto(
op: "TypeListTwice"
attr {
key: "T"
value { list {} }
})proto");
ExpectSuccess(Builder().Input(FakeInput(0)).Input(FakeInput()), {}, {},
R"proto(
op: "TypeListTwice"
attr {
key: "T"
value { list {} }
})proto");
ExpectFailure(Builder()
.Input(FakeInput({DT_INT32, DT_BOOL}))
.Input(FakeInput({DT_INT32, DT_STRING})),
"Inconsistent values for attr 'T' [DT_INT32, DT_BOOL] vs. "
"[DT_INT32, DT_STRING] while");
}
TEST_F(NodeDefBuilderTest, OutTypeList) {
Op(OpDefBuilder("OutTypeList").Output("out: T").Attr("T: list(type) >= 0"));
ExpectSuccess(Builder().Attr("T", {DT_FLOAT}), {}, {DT_FLOAT}, R"proto(
op: "OutTypeList"
attr {
key: "T"
value { list { type: DT_FLOAT } }
})proto");
ExpectSuccess(Builder().Attr("T", {DT_STRING, DT_BOOL}), {},
{DT_STRING, DT_BOOL}, R"proto(
op: "OutTypeList"
attr {
key: "T"
value { list { type: [ DT_STRING, DT_BOOL ] } }
})proto");
ExpectSuccess(Builder().Attr("T", DataTypeVector()), {}, {}, R"proto(
op: "OutTypeList"
attr {
key: "T"
value { list {} }
})proto");
ExpectInvalid(
Builder().Attr("T", DT_FLOAT),
"AttrValue had value with type 'type' when 'list(type)' expected");
}
TEST_F(NodeDefBuilderTest, TypeListRestrict) {
Op(OpDefBuilder("TypeListRestrict")
.Input("a: T")
.Attr("T: list({string, bool}) >= 0"));
ExpectSuccess(Builder().Input(FakeInput({DT_STRING, DT_BOOL})),
{DT_STRING, DT_BOOL}, {}, R"proto(
op: "TypeListRestrict"
input: [ "a", "a:1" ]
attr {
key: "T"
value { list { type: [ DT_STRING, DT_BOOL ] } }
})proto");
ExpectInvalid(Builder().Input(FakeInput({DT_STRING, DT_INT32})),
"Value for attr 'T' of int32 is not in the list of allowed "
"values: string, bool");
}
TEST_F(NodeDefBuilderTest, OutTypeListRestrict) {
Op(OpDefBuilder("OutTypeListRestrict")
.Output("out: t")
.Attr("t: list({string, bool}) >= 0"));
ExpectSuccess(Builder().Attr("t", {DT_BOOL, DT_STRING}), {},
{DT_BOOL, DT_STRING}, R"proto(
op: "OutTypeListRestrict"
attr {
key: "t"
value { list { type: [ DT_BOOL, DT_STRING ] } }
})proto");
ExpectInvalid(Builder().Attr("t", {DT_STRING, DT_INT32}),
"Value for attr 't' of int32 is not in the list of allowed "
"values: string, bool");
}
TEST_F(NodeDefBuilderTest, Attr) {
Op(OpDefBuilder("Attr").Attr("a: int"));
ExpectSuccess(Builder().Attr("a", 12), {}, {}, R"proto(
op: "Attr"
attr {
key: "a"
value { i: 12 }
})proto");
ExpectInvalid(Builder().Attr("a", "bad"),
"AttrValue had value with type 'string' when 'int' expected");
ExpectInvalid(
Builder().Attr("a", {12}),
"AttrValue had value with type 'list(int)' when 'int' expected");
ExpectInvalid(Builder(), "NodeDef missing attr 'a' from Op<");
ExpectSuccess(Builder().Attr("a", 10).Attr("b", 12), {}, {},
R"proto(
op: "Attr"
attr {
key: "a"
value { i: 10 }
}
attr {
key: "b"
value { i: 12 }
}
)proto");
}
TEST_F(NodeDefBuilderTest, AttrFloat) {
Op(OpDefBuilder("AttrFloat").Attr("a: float"));
ExpectSuccess(Builder().Attr("a", 1.2f ), {}, {}, R"proto(
op: "AttrFloat"
attr {
key: "a"
value { f: 1.2 }
}
)proto");
ExpectSuccess(Builder().Attr("a", 1.2 ), {}, {}, R"proto(
op: "AttrFloat"
attr {
key: "a"
value { f: 1.2 }
}
)proto");
ExpectInvalid(Builder().Attr("a", 12),
"AttrValue had value with type 'int' when 'float' expected");
}
TEST_F(NodeDefBuilderTest, AttrBoolList) {
Op(OpDefBuilder("AttrBoolList").Attr("a: list(bool)"));
ExpectSuccess(Builder().Attr("a", {true, false, true}), {}, {}, R"proto(
op: "AttrBoolList"
attr {
key: "a"
value { list { b: [ true, false, true ] } }
}
)proto");
ExpectSuccess(Builder().Attr("a", std::vector<bool>()), {}, {}, R"proto(
op: "AttrBoolList"
attr {
key: "a"
value { list {} }
}
)proto");
ExpectInvalid(Builder().Attr("a", {0}),
"AttrValue had value with type 'list(int)' when 'list(bool)' "
"expected");
}
TEST_F(NodeDefBuilderTest, AttrMin) {
Op(OpDefBuilder("AttrMin").Attr("a: int >= 5"));
ExpectSuccess(Builder().Attr("a", 12), {}, {}, R"proto(
op: "AttrMin"
attr {
key: "a"
value { i: 12 }
})proto");
ExpectInvalid(Builder().Attr("a", 2),
"Value for attr 'a' of 2 must be at least minimum 5");
}
TEST_F(NodeDefBuilderTest, AttrListMin) {
Op(OpDefBuilder("AttrListMin").Attr("a: list(int) >= 2"));
ExpectSuccess(Builder().Attr("a", {1, 2}), {}, {}, R"proto(
op: "AttrListMin"
attr {
key: "a"
value { list { i: [ 1, 2 ] } }
})proto");
ExpectInvalid(Builder().Attr("a", {17}),
"Length for attr 'a' of 1 must be at least minimum 2");
}
TEST_F(NodeDefBuilderTest, AttrEnum) {
Op(OpDefBuilder("AttrEnum").Attr("a: {'apples', 'oranges'}"));
ExpectSuccess(Builder().Attr("a", "oranges"), {}, {}, R"proto(
op: "AttrEnum"
attr {
key: "a"
value { s: "oranges" }
})proto");
ExpectInvalid(
Builder().Attr("a", "invalid"),
"Value for attr 'a' of \"invalid\" is not in the list of allowed values: "
"\"apples\", \"oranges\"");
}
TEST_F(NodeDefBuilderTest, AttrEnumList) {
Op(OpDefBuilder("AttrEnumList").Attr("a: list({'apples', 'oranges'})"));
ExpectSuccess(Builder().Attr("a", {"oranges", "apples"}), {}, {}, R"proto(
op: "AttrEnumList"
attr {
key: "a"
value { list { s: [ "oranges", "apples" ] } }
})proto");
ExpectInvalid(
Builder().Attr("a", {"apples", "invalid", "oranges"}),
"Value for attr 'a' of \"invalid\" is not in the list of allowed values: "
"\"apples\", \"oranges\"");
}
TEST_F(NodeDefBuilderTest, AttrShape) {
Op(OpDefBuilder("AttrShape").Attr("a: shape"));
ExpectSuccess(Builder().Attr("a", TensorShape({5})), {}, {}, R"proto(
op: "AttrShape"
attr {
key: "a"
value { shape { dim { size: 5 } } }
})proto");
ExpectSuccess(Builder().Attr("a", TensorShape({4, 3, 2})), {}, {}, R"proto(
op: "AttrShape"
attr {
key: "a"
value {
shape {
dim { size: 4 }
dim { size: 3 }
dim { size: 2 }
}
}
})proto");
ExpectSuccess(Builder().Attr("a", TensorShape({3, 2})), {}, {},
R"proto(
op: "AttrShape"
attr {
key: "a"
value {
shape {
dim { size: 3 }
dim { size: 2 }
}
}
})proto");
ExpectSuccess(Builder().Attr("a", TensorShape()), {}, {}, R"proto(
op: "AttrShape"
attr {
key: "a"
value { shape {} }
})proto");
}
TEST_F(NodeDefBuilderTest, AttrDefault) {
Op(OpDefBuilder("AttrDefault").Attr("a: string = 'banana'"));
ExpectSuccess(Builder(), {}, {}, R"proto(
op: "AttrDefault"
attr {
key: "a"
value { s: "banana" }
})proto");
ExpectSuccess(Builder().Attr("a", "kiwi"), {}, {}, R"proto(
op: "AttrDefault"
attr {
key: "a"
value { s: "kiwi" }
})proto");
}
TEST_F(NodeDefBuilderTest, AttrManyDefault) {
Op(OpDefBuilder("AttrManyDefault")
.Attr("a: string = 'banana'")
.Attr("b: string = 'kiwi'"));
ExpectSuccess(Builder(), {}, {}, R"proto(
op: "AttrManyDefault"
attr {
key: "a"
value { s: "banana" }
}
attr {
key: "b"
value { s: "kiwi" }
})proto");
Op(OpDefBuilder("AttrManyDefaultWithMandatory")
.Attr("a: string = 'banana'")
.Attr("b: string = 'kiwi'")
.Attr("c: string"));
ExpectSuccess(Builder().Attr("c", "strawberry"), {}, {}, R"proto(
op: "AttrManyDefaultWithMandatory"
attr {
key: "c"
value { s: "strawberry" }
}
attr {
key: "a"
value { s: "banana" }
}
attr {
key: "b"
value { s: "kiwi" }
})proto");
Op(OpDefBuilder("AttrManyDefaultAndInferred")
.Input("input: T")
.Attr("T: {float, double}")
.Attr("a: string")
.Attr("b: list(string) >= 1")
.Attr("c: bool = true")
.Attr("d: float = 0.3")
.Attr("e: string")
.Attr("f: float = 0.25"));
ExpectSuccess(Builder()
.Input(FakeInput(DT_FLOAT))
.Attr("a", "foo")
.Attr("e", "foo")
.Attr("b", std::vector<string>({"bar", "baz"}))
.Attr("f", 1.0f),
{DT_FLOAT}, {}, R"proto(
op: "AttrManyDefaultAndInferred"
input: "a"
attr {
key: "T"
value { type: DT_FLOAT }
}
attr {
key: "a"
value { s: "foo" }
}
attr {
key: "e"
value { s: "foo" }
}
attr {
key: "b"
value { list { s: "bar" s: "baz" } }
}
attr {
key: "f"
value { f: 1.0 }
}
attr {
key: "c"
value { b: true }
}
attr {
key: "d"
value { f: 0.3 }
})proto");
}
TEST_F(NodeDefBuilderTest, AttrListDefault) {
Op(OpDefBuilder("AttrListDefault").Attr("a: list(int) = [5, 15]"));
ExpectSuccess(Builder(), {}, {}, R"proto(
op: "AttrListDefault"
attr {
key: "a"
value { list { i: [ 5, 15 ] } }
})proto");
ExpectSuccess(Builder().Attr("a", {3}), {}, {}, R"proto(
op: "AttrListDefault"
attr {
key: "a"
value { list { i: 3 } }
})proto");
ExpectSuccess(Builder().Attr("a", std::vector<int>()), {}, {}, R"proto(
op: "AttrListDefault"
attr {
key: "a"
value { list {} }
})proto");
}
TEST_F(NodeDefBuilderTest, AttrEmptyListDefault) {
Op(OpDefBuilder("AttrEmptyListDefault").Attr("a: list(int) = []"));
ExpectSuccess(Builder(), {}, {}, R"proto(
op: "AttrEmptyListDefault"
attr {
key: "a"
value { list {} }
})proto");
ExpectSuccess(Builder().Attr("a", {3}), {}, {}, R"proto(
op: "AttrEmptyListDefault"
attr {
key: "a"
value { list { i: 3 } }
})proto");
ExpectSuccess(Builder().Attr("a", std::vector<int>()), {}, {}, R"proto(
op: "AttrEmptyListDefault"
attr {
key: "a"
value { list {} }
})proto");
}
TEST_F(NodeDefBuilderTest, NIntsIn) {
Op(OpDefBuilder("NIntsIn").Input("a: N*int32").Attr("N: int >= 2"));
ExpectSuccess(Builder().Input(FakeInput(2)), {DT_INT32, DT_INT32}, {},
R"proto(
op: "NIntsIn"
input: [ "a", "a:1" ]
attr {
key: "N"
value { i: 2 }
})proto");
ExpectSuccess(Builder().Input(FakeInput(5, DT_INT32)),
{DT_INT32, DT_INT32, DT_INT32, DT_INT32, DT_INT32}, {}, R"proto(
op: "NIntsIn"
input: [ "a", "a:1", "a:2", "a:3", "a:4" ]
attr {
key: "N"
value { i: 5 }
})proto");
ExpectFailures(Builder().Input(FakeInput(2, DT_STRING)),
{"2 errors while building NodeDef",
"Input 'a' passed string expected int32"});
ExpectInvalid(Builder().Input(FakeInput(1)),
"Value for attr 'N' of 1 must be at least minimum 2");
ExpectFailures(
Builder().Input(FakeInput(DT_INT32)),
{"2 errors while building NodeDef",
"Could not infer length of input 'a': No attr named 'N' in NodeDef:",
"0 inputs specified of 1 inputs in Op"});
ExpectFailure(Builder().Input({{"in", 0, DT_INT32}, {"in", 1, DT_STRING}}),
"Input 'a' passed string expected int32 while");
ExpectFailures(
Builder().Input(FakeInput()),
{"2 errors while building NodeDef",
"Could not infer length of input 'a': No attr named 'N' in NodeDef:",
"0 inputs specified of 1 inputs in Op"});
}
TEST_F(NodeDefBuilderTest, NPolymorphicIn) {
Op(OpDefBuilder("NPolymorphicIn")
.Input("a: N*T")
.Attr("T: type")
.Attr("N: int >= 2"));
ExpectSuccess(Builder().Input(FakeInput(2, DT_INT32)), {DT_INT32, DT_INT32},
{}, R"proto(
op: "NPolymorphicIn"
input: [ "a", "a:1" ]
attr {
key: "N"
value { i: 2 }
}
attr {
key: "T"
value { type: DT_INT32 }
})proto");
ExpectSuccess(Builder().Input(FakeInput(3, DT_STRING)),
{DT_STRING, DT_STRING, DT_STRING}, {}, R"proto(
op: "NPolymorphicIn"
input: [ "a", "a:1", "a:2" ]
attr {
key: "N"
value { i: 3 }
}
attr {
key: "T"
value { type: DT_STRING }
})proto");
ExpectFailures(
Builder().Input(FakeInput(2)),
{"2 errors while building NodeDef",
"Could not infer type for input 'a': No attr named 'T' in NodeDef:",
"0 inputs specified of 1 inputs in Op"});
ExpectFailure(Builder().Input(FakeInput({DT_INT32, DT_STRING})),
"Input 'a' passed string expected int32 while");
ExpectFailure(Builder().Input({{"in", 0, DT_INT32}, {"in", 1, DT_STRING}}),
"Input 'a' passed string expected int32 while");
ExpectInvalid(Builder().Input(FakeInput(1, DT_INT32)),
"Value for attr 'N' of 1 must be at least minimum 2");
ExpectFailure(Builder().Input("in", 0, DT_INT32),
"Single tensor passed to 'a', expected list while");
}
TEST_F(NodeDefBuilderTest, NPolymorphicRestrictIn) {
Op(OpDefBuilder("NPolymorphicRestrictIn")
.Input("a: N*T")
.Attr("T: {string, bool}")
.Attr("N: int >= 2"));
ExpectSuccess(Builder().Input(FakeInput(2, DT_BOOL)), {DT_BOOL, DT_BOOL}, {},
R"proto(
op: "NPolymorphicRestrictIn"
input: [ "a", "a:1" ]
attr {
key: "N"
value { i: 2 }
}
attr {
key: "T"
value { type: DT_BOOL }
})proto");
ExpectSuccess(Builder().Input(FakeInput(3, DT_STRING)),
{DT_STRING, DT_STRING, DT_STRING}, {}, R"proto(
op: "NPolymorphicRestrictIn"
input: [ "a", "a:1", "a:2" ]
attr {
key: "N"
value { i: 3 }
}
attr {
key: "T"
value { type: DT_STRING }
})proto");
ExpectInvalid(Builder().Input(FakeInput(2, DT_INT32)),
"Value for attr 'T' of int32 is not in the list of allowed "
"values: string, bool");
}
TEST_F(NodeDefBuilderTest, NInTwice) {
Op(OpDefBuilder("NInTwice")
.Input("a: N*int32")
.Input("b: N*string")
.Attr("N: int >= 0"));
ExpectSuccess(Builder().Input(FakeInput(2)).Input(FakeInput(2)),
{DT_INT32, DT_INT32, DT_STRING, DT_STRING}, {}, R"proto(
op: "NInTwice"
input: [ "a", "a:1", "b", "b:1" ]
attr {
key: "N"
value { i: 2 }
})proto");
ExpectSuccess(Builder().Input(FakeInput(0)).Input(FakeInput()), {}, {},
R"proto(
op: "NInTwice"
attr {
key: "N"
value { i: 0 }
})proto");
ExpectFailure(Builder().Input(FakeInput(3)).Input(FakeInput(1)),
"Inconsistent values for attr 'N' 3 vs. 1 while");
}
TEST_F(NodeDefBuilderTest, NInPolymorphicTwice) {
Op(OpDefBuilder("NInPolymorphicTwice")
.Input("a: N*T")
.Input("b: N*T")
.Attr("T: type")
.Attr("N: int >= 0"));
ExpectSuccess(Builder().Input(FakeInput(2, DT_INT32)).Input(FakeInput()),
{DT_INT32, DT_INT32, DT_INT32, DT_INT32}, {}, R"proto(
op: "NInPolymorphicTwice"
input: [ "a", "a:1", "b", "b:1" ]
attr {
key: "N"
value { i: 2 }
}
attr {
key: "T"
value { type: DT_INT32 }
})proto");
ExpectFailure(
Builder().Input(FakeInput(3, DT_INT32)).Input(FakeInput(1, DT_INT32)),
"Inconsistent values for attr 'N' 3 vs. 1 while");
ExpectFailure(Builder().Input(FakeInput(3, DT_INT32)).Input(FakeInput(1)),
"Inconsistent values for attr 'N' 3 vs. 1 while");
ExpectFailure(
Builder().Input(FakeInput(2, DT_INT32)).Input(FakeInput(2, DT_STRING)),
"Inconsistent values for attr 'T' DT_INT32 vs. DT_STRING while");
ExpectFailure(
Builder().Input(FakeInput(2, DT_INT32)).Input(FakeInput(DT_STRING)),
"Inconsistent values for attr 'T' DT_INT32 vs. DT_STRING while");
}
TEST_F(NodeDefBuilderTest, NInTwoTypeVariables) {
Op(OpDefBuilder("NInTwoTypeVariables")
.Input("a: N*S")
.Input("b: N*T")
.Attr("S: type")
.Attr("T: type")
.Attr("N: int >= 0"));
ExpectSuccess(
Builder().Input(FakeInput(2, DT_INT32)).Input(FakeInput(2, DT_BOOL)),
{DT_INT32, DT_INT32, DT_BOOL, DT_BOOL}, {}, R"proto(
op: "NInTwoTypeVariables"
input: [ "a", "a:1", "b", "b:1" ]
attr {
key: "N"
value { i: 2 }
}
attr {
key: "S"
value { type: DT_INT32 }
}
attr {
key: "T"
value { type: DT_BOOL }
})proto");
ExpectSuccess(
Builder().Input(FakeInput(2, DT_INT32)).Input(FakeInput(DT_BOOL)),
{DT_INT32, DT_INT32, DT_BOOL, DT_BOOL}, {}, R"proto(
op: "NInTwoTypeVariables"
input: [ "a", "a:1", "b", "b:1" ]
attr {
key: "N"
value { i: 2 }
}
attr {
key: "S"
value { type: DT_INT32 }
}
attr {
key: "T"
value { type: DT_BOOL }
})proto");
ExpectFailure(
Builder().Input(FakeInput(3, DT_INT32)).Input(FakeInput(1, DT_STRING)),
"Inconsistent values for attr 'N' 3 vs. 1 while");
}
TEST_F(NodeDefBuilderTest, InPolymorphicTwice) {
Op(OpDefBuilder("InPolymorphicTwice")
.Input("a: N*T")
.Input("b: M*T")
.Attr("T: type")
.Attr("N: int >= 0")
.Attr("M: int >= 0"));
ExpectSuccess(
Builder().Input(FakeInput(1, DT_INT32)).Input(FakeInput(3, DT_INT32)),
{DT_INT32, DT_INT32, DT_INT32, DT_INT32}, {}, R"proto(
op: "InPolymorphicTwice"
input: [ "a", "b", "b:1", "b:2" ]
attr {
key: "N"
value { i: 1 }
}
attr {
key: "T"
value { type: DT_INT32 }
}
attr {
key: "M"
value { i: 3 }
})proto");
ExpectSuccess(Builder().Input(FakeInput(1, DT_BOOL)).Input(FakeInput(0)),
{DT_BOOL}, {}, R"proto(
op: "InPolymorphicTwice"
input: "a"
attr {
key: "N"
value { i: 1 }
}
attr {
key: "T"
value { type: DT_BOOL }
}
attr {
key: "M"
value { i: 0 }
})proto");
ExpectSuccess(Builder().Input(FakeInput(0)).Input(FakeInput(1, DT_BOOL)),
{DT_BOOL}, {}, R"proto(
op: "InPolymorphicTwice"
input: "b"
attr {
key: "N"
value { i: 0 }
}
attr {
key: "M"
value { i: 1 }
}
attr {
key: "T"
value { type: DT_BOOL }
})proto");
ExpectFailure(
Builder().Input(FakeInput(2, DT_INT32)).Input(FakeInput(2, DT_STRING)),
"Inconsistent values for attr 'T' DT_INT32 vs. DT_STRING while");
}
TEST_F(NodeDefBuilderTest, NIntsOut) {
Op(OpDefBuilder("NIntsOut").Output("a: N*int32").Attr("N: int >= 2"));
ExpectSuccess(Builder().Attr("N", 2), {}, {DT_INT32, DT_INT32}, R"proto(
op: "NIntsOut"
attr {
key: "N"
value { i: 2 }
})proto");
ExpectSuccess(Builder().Attr("N", 3), {}, {DT_INT32, DT_INT32, DT_INT32},
R"proto(
op: "NIntsOut"
attr {
key: "N"
value { i: 3 }
})proto");
ExpectInvalid(Builder().Attr("N", 1),
"Value for attr 'N' of 1 must be at least minimum 2");
ExpectInvalid(
Builder().Attr("N", {3}),
"AttrValue had value with type 'list(int)' when 'int' expected");
ExpectInvalid(Builder(), "NodeDef missing attr 'N' from");
}
TEST_F(NodeDefBuilderTest, NIntsOutDefault) {
Op(OpDefBuilder("NIntsOutDefault")
.Output("a: N*int32")
.Attr("N: int >= 2 = 3"));
ExpectSuccess(Builder(), {}, {DT_INT32, DT_INT32, DT_INT32}, R"proto(
op: "NIntsOutDefault"
attr {
key: "N"
value { i: 3 }
})proto");
ExpectSuccess(Builder().Attr("N", 2), {}, {DT_INT32, DT_INT32}, R"proto(
op: "NIntsOutDefault"
attr {
key: "N"
value { i: 2 }
})proto");
}
TEST_F(NodeDefBuilderTest, NPolymorphicOut) {
Op(OpDefBuilder("NPolymorphicOut")
.Output("a: N*T")
.Attr("T: type")
.Attr("N: int >= 2"));
ExpectSuccess(Builder().Attr("T", DT_INT32).Attr("N", 2), {},
{DT_INT32, DT_INT32}, R"proto(
op: "NPolymorphicOut"
attr {
key: "T"
value { type: DT_INT32 }
}
attr {
key: "N"
value { i: 2 }
})proto");
ExpectSuccess(Builder().Attr("N", 3).Attr("T", DT_STRING), {},
{DT_STRING, DT_STRING, DT_STRING}, R"proto(
op: "NPolymorphicOut"
attr {
key: "N"
value { i: 3 }
}
attr {
key: "T"
value { type: DT_STRING }
})proto");
ExpectInvalid(Builder().Attr("N", 1).Attr("T", DT_STRING),
"Value for attr 'N' of 1 must be at least minimum 2");
ExpectInvalid(
Builder().Attr("N", 3).Attr("T", {DT_STRING}),
"AttrValue had value with type 'list(type)' when 'type' expected");
}
TEST_F(NodeDefBuilderTest, NPolymorphicOutDefault) {
Op(OpDefBuilder("NPolymorphicOutDefault")
.Output("a: N*T")
.Attr("T: type = DT_BOOL")
.Attr("N: int >= 2 = 2"));
ExpectSuccess(Builder(), {}, {DT_BOOL, DT_BOOL}, R"proto(
op: "NPolymorphicOutDefault"
attr {
key: "T"
value { type: DT_BOOL }
}
attr {
key: "N"
value { i: 2 }
})proto");
ExpectSuccess(Builder().Attr("N", 3), {}, {DT_BOOL, DT_BOOL, DT_BOOL},
R"proto(
op: "NPolymorphicOutDefault"
attr {
key: "N"
value { i: 3 }
}
attr {
key: "T"
value { type: DT_BOOL }
})proto");
ExpectSuccess(Builder().Attr("T", DT_INT32), {}, {DT_INT32, DT_INT32},
R"proto(
op: "NPolymorphicOutDefault"
attr {
key: "T"
value { type: DT_INT32 }
}
attr {
key: "N"
value { i: 2 }
})proto");
ExpectSuccess(Builder().Attr("N", 3).Attr("T", DT_INT32), {},
{DT_INT32, DT_INT32, DT_INT32}, R"proto(
op: "NPolymorphicOutDefault"
attr {
key: "N"
value { i: 3 }
}
attr {
key: "T"
value { type: DT_INT32 }
})proto");
}
TEST_F(NodeDefBuilderTest, NPolymorphicRestrictOut) {
Op(OpDefBuilder("NPolymorphicRestrictOut")
.Output("a: N*T")
.Attr("T: {string, bool}")
.Attr("N: int >= 2"));
ExpectSuccess(Builder().Attr("N", 3).Attr("T", DT_BOOL), {},
{DT_BOOL, DT_BOOL, DT_BOOL}, R"proto(
op: "NPolymorphicRestrictOut"
attr {
key: "N"
value { i: 3 }
}
attr {
key: "T"
value { type: DT_BOOL }
})proto");
ExpectInvalid(Builder().Attr("N", 3).Attr("T", DT_INT32),
"Value for attr 'T' of int32 is not in the list of allowed "
"values: string, bool");
}
TEST_F(NodeDefBuilderTest, RefIn) {
Op(OpDefBuilder("RefIn").Input("a: Ref(int32)"));
ExpectSuccess(Builder().Input(FakeInput(DT_INT32_REF)), {DT_INT32_REF}, {},
R"proto(
op: "RefIn" input: "a")proto");
ExpectFailure(Builder().Input(FakeInput(DT_BOOL_REF)),
"Input 'a' passed bool_ref expected int32_ref while");
ExpectFailure(Builder().Input(FakeInput(DT_INT32)),
"Input 'a' passed int32 expected int32_ref while");
}
TEST_F(NodeDefBuilderTest, PolymorphicRefIn) {
Op(OpDefBuilder("PolymorphicRefIn").Input("a: Ref(T)").Attr("T: type"));
ExpectSuccess(Builder().Input(FakeInput(DT_BOOL_REF)), {DT_BOOL_REF}, {},
R"proto(
op: "PolymorphicRefIn"
input: "a"
attr {
key: "T"
value { type: DT_BOOL }
})proto");
ExpectFailure(Builder().Input(FakeInput(DT_BOOL)),
"Input 'a' passed bool expected ref type while");
}
TEST_F(NodeDefBuilderTest, RefOut) {
Op(OpDefBuilder("RefOut").Output("a: Ref(string)"));
ExpectSuccess(Builder(), {}, {DT_STRING_REF}, R"proto(
op: "RefOut")proto");
}
TEST_F(NodeDefBuilderTest, PolymorphicRefOut) {
Op(OpDefBuilder("PolymorphicRefOut").Output("a: Ref(t)").Attr("t: type"));
ExpectSuccess(Builder().Attr("t", DT_BOOL), {}, {DT_BOOL_REF}, R"proto(
op: "PolymorphicRefOut"
attr {
key: "t"
value { type: DT_BOOL }
})proto");
}
TEST_F(NodeDefBuilderTest, SpecifyDevice) {
Op(OpDefBuilder("SpecifyDevice"));
ExpectSuccess(Builder().Device("ADevice"), {}, {}, R"proto(
op: "SpecifyDevice"
device: "ADevice")proto");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/node_def_builder.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/node_def_builder_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4e2fd81d-47ea-45e7-a4dd-dcd3075df80d | cpp | abseil/abseil-cpp | poisson_distribution | absl/random/poisson_distribution.h | absl/random/poisson_distribution_test.cc | #ifndef ABSL_RANDOM_POISSON_DISTRIBUTION_H_
#define ABSL_RANDOM_POISSON_DISTRIBUTION_H_
#include <cassert>
#include <cmath>
#include <istream>
#include <limits>
#include <ostream>
#include <type_traits>
#include "absl/random/internal/fast_uniform_bits.h"
#include "absl/random/internal/fastmath.h"
#include "absl/random/internal/generate_real.h"
#include "absl/random/internal/iostream_state_saver.h"
#include "absl/random/internal/traits.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
template <typename IntType = int>
class poisson_distribution {
public:
using result_type = IntType;
class param_type {
public:
using distribution_type = poisson_distribution;
explicit param_type(double mean = 1.0);
double mean() const { return mean_; }
friend bool operator==(const param_type& a, const param_type& b) {
return a.mean_ == b.mean_;
}
friend bool operator!=(const param_type& a, const param_type& b) {
return !(a == b);
}
private:
friend class poisson_distribution;
double mean_;
double emu_;
double lmu_;
double s_;
double log_k_;
int split_;
static_assert(random_internal::IsIntegral<IntType>::value,
"Class-template absl::poisson_distribution<> must be "
"parameterized using an integral type.");
};
poisson_distribution() : poisson_distribution(1.0) {}
explicit poisson_distribution(double mean) : param_(mean) {}
explicit poisson_distribution(const param_type& p) : param_(p) {}
void reset() {}
template <typename URBG>
result_type operator()(URBG& g) {
return (*this)(g, param_);
}
template <typename URBG>
result_type operator()(URBG& g,
const param_type& p);
param_type param() const { return param_; }
void param(const param_type& p) { param_ = p; }
result_type(min)() const { return 0; }
result_type(max)() const { return (std::numeric_limits<result_type>::max)(); }
double mean() const { return param_.mean(); }
friend bool operator==(const poisson_distribution& a,
const poisson_distribution& b) {
return a.param_ == b.param_;
}
friend bool operator!=(const poisson_distribution& a,
const poisson_distribution& b) {
return a.param_ != b.param_;
}
private:
param_type param_;
random_internal::FastUniformBits<uint64_t> fast_u64_;
};
template <typename IntType>
poisson_distribution<IntType>::param_type::param_type(double mean)
: mean_(mean), split_(0) {
assert(mean >= 0);
assert(mean <=
static_cast<double>((std::numeric_limits<result_type>::max)()));
assert(mean <= 1e10);
if (mean_ < 10) {
split_ = 1;
emu_ = std::exp(-mean_);
} else if (mean_ <= 50) {
split_ = 1 + static_cast<int>(mean_ / 10.0);
emu_ = std::exp(-mean_ / static_cast<double>(split_));
} else {
constexpr double k2E = 0.7357588823428846;
constexpr double kSA = 0.4494580810294493;
lmu_ = std::log(mean_);
double a = mean_ + 0.5;
s_ = kSA + std::sqrt(k2E * a);
const double mode = std::ceil(mean_) - 1;
log_k_ = lmu_ * mode - absl::random_internal::StirlingLogFactorial(mode);
}
}
template <typename IntType>
template <typename URBG>
typename poisson_distribution<IntType>::result_type
poisson_distribution<IntType>::operator()(
URBG& g,
const param_type& p) {
using random_internal::GeneratePositiveTag;
using random_internal::GenerateRealFromBits;
using random_internal::GenerateSignedTag;
if (p.split_ != 0) {
result_type n = 0;
for (int split = p.split_; split > 0; --split) {
double r = 1.0;
do {
r *= GenerateRealFromBits<double, GeneratePositiveTag, true>(
fast_u64_(g));
++n;
} while (r > p.emu_);
--n;
}
return n;
}
const double a = p.mean_ + 0.5;
for (;;) {
const double u = GenerateRealFromBits<double, GeneratePositiveTag, false>(
fast_u64_(g));
const double v = GenerateRealFromBits<double, GenerateSignedTag, false>(
fast_u64_(g));
const double x = std::floor(p.s_ * v / u + a);
if (x < 0) continue;
const double rhs = x * p.lmu_;
double s = (x <= 1.0) ? 0.0
: (x == 2.0) ? 0.693147180559945
: absl::random_internal::StirlingLogFactorial(x);
const double lhs = 2.0 * std::log(u) + p.log_k_ + s;
if (lhs < rhs) {
return x > static_cast<double>((max)())
? (max)()
: static_cast<result_type>(x);
}
}
}
template <typename CharT, typename Traits, typename IntType>
std::basic_ostream<CharT, Traits>& operator<<(
std::basic_ostream<CharT, Traits>& os,
const poisson_distribution<IntType>& x) {
auto saver = random_internal::make_ostream_state_saver(os);
os.precision(random_internal::stream_precision_helper<double>::kPrecision);
os << x.mean();
return os;
}
template <typename CharT, typename Traits, typename IntType>
std::basic_istream<CharT, Traits>& operator>>(
std::basic_istream<CharT, Traits>& is,
poisson_distribution<IntType>& x) {
using param_type = typename poisson_distribution<IntType>::param_type;
auto saver = random_internal::make_istream_state_saver(is);
double mean = random_internal::read_floating_point<double>(is);
if (!is.fail()) {
x.param(param_type(mean));
}
return is;
}
ABSL_NAMESPACE_END
}
#endif | #include "absl/random/poisson_distribution.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <random>
#include <sstream>
#include <string>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/macros.h"
#include "absl/container/flat_hash_map.h"
#include "absl/log/log.h"
#include "absl/random/internal/chi_square.h"
#include "absl/random/internal/distribution_test_util.h"
#include "absl/random/internal/pcg_engine.h"
#include "absl/random/internal/sequence_urbg.h"
#include "absl/random/random.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/strip.h"
namespace {
using absl::random_internal::kChiSquared;
template <typename IntType>
class PoissonDistributionInterfaceTest : public ::testing::Test {};
using IntTypes = ::testing::Types<int, int8_t, int16_t, int32_t, int64_t,
uint8_t, uint16_t, uint32_t, uint64_t>;
TYPED_TEST_SUITE(PoissonDistributionInterfaceTest, IntTypes);
TYPED_TEST(PoissonDistributionInterfaceTest, SerializeTest) {
using param_type = typename absl::poisson_distribution<TypeParam>::param_type;
const double kMax =
std::min(1e10 ,
static_cast<double>(std::numeric_limits<TypeParam>::max()));
const double kParams[] = {
1,
std::nextafter(1.0, 0.0),
std::nextafter(1.0, 2.0),
1e-8, 1e-4,
0.0000005,
0.2,
0.5,
2,
20,
100, 1e4, 1e8, 1.5e9, 1e20,
std::numeric_limits<double>::max(),
std::numeric_limits<double>::epsilon(),
std::nextafter(std::numeric_limits<double>::min(),
1.0),
std::numeric_limits<double>::min(),
std::numeric_limits<double>::denorm_min(),
std::numeric_limits<double>::min() / 2,
std::nextafter(std::numeric_limits<double>::min(),
0.0),
};
constexpr int kCount = 1000;
absl::InsecureBitGen gen;
for (const double m : kParams) {
const double mean = std::min(kMax, m);
const param_type param(mean);
absl::poisson_distribution<TypeParam> before(mean);
EXPECT_EQ(before.mean(), param.mean());
{
absl::poisson_distribution<TypeParam> via_param(param);
EXPECT_EQ(via_param, before);
EXPECT_EQ(via_param.param(), before.param());
}
auto sample_min = before.max();
auto sample_max = before.min();
for (int i = 0; i < kCount; i++) {
auto sample = before(gen);
EXPECT_GE(sample, before.min());
EXPECT_LE(sample, before.max());
if (sample > sample_max) sample_max = sample;
if (sample < sample_min) sample_min = sample;
}
LOG(INFO) << "Range {" << param.mean() << "}: " << sample_min << ", "
<< sample_max;
std::stringstream ss;
ss << before;
absl::poisson_distribution<TypeParam> after(3.8);
EXPECT_NE(before.mean(), after.mean());
EXPECT_NE(before.param(), after.param());
EXPECT_NE(before, after);
ss >> after;
EXPECT_EQ(before.mean(), after.mean())
<< ss.str() << " "
<< (ss.good() ? "good " : "")
<< (ss.bad() ? "bad " : "")
<< (ss.eof() ? "eof " : "")
<< (ss.fail() ? "fail " : "");
}
}
class PoissonModel {
public:
explicit PoissonModel(double mean) : mean_(mean) {}
double mean() const { return mean_; }
double variance() const { return mean_; }
double stddev() const { return std::sqrt(variance()); }
double skew() const { return 1.0 / mean_; }
double kurtosis() const { return 3.0 + 1.0 / mean_; }
void InitCDF();
struct CDF {
size_t index;
double pmf;
double cdf;
};
CDF InverseCDF(double p) {
CDF target{0, 0, p};
auto it = std::upper_bound(
std::begin(cdf_), std::end(cdf_), target,
[](const CDF& a, const CDF& b) { return a.cdf < b.cdf; });
return *it;
}
void LogCDF() {
LOG(INFO) << "CDF (mean = " << mean_ << ")";
for (const auto c : cdf_) {
LOG(INFO) << c.index << ": pmf=" << c.pmf << " cdf=" << c.cdf;
}
}
private:
const double mean_;
std::vector<CDF> cdf_;
};
void PoissonModel::InitCDF() {
if (!cdf_.empty()) {
return;
}
ABSL_ASSERT(mean_ < 201.0);
const size_t max_i = 50 * stddev() + mean();
const double e_neg_mean = std::exp(-mean());
ABSL_ASSERT(e_neg_mean > 0);
double d = 1;
double last_result = e_neg_mean;
double cumulative = e_neg_mean;
if (e_neg_mean > 1e-10) {
cdf_.push_back({0, e_neg_mean, cumulative});
}
for (size_t i = 1; i < max_i; i++) {
d *= (mean() / i);
double result = e_neg_mean * d;
cumulative += result;
if (result < 1e-10 && result < last_result && cumulative > 0.999999) {
break;
}
if (result > 1e-7) {
cdf_.push_back({i, result, cumulative});
}
last_result = result;
}
ABSL_ASSERT(!cdf_.empty());
}
struct ZParam {
double mean;
double p_fail;
int trials;
size_t samples;
};
class PoissonDistributionZTest : public testing::TestWithParam<ZParam>,
public PoissonModel {
public:
PoissonDistributionZTest() : PoissonModel(GetParam().mean) {}
template <typename D>
bool SingleZTest(const double p, const size_t samples);
absl::random_internal::pcg64_2018_engine rng_{0x2B7E151628AED2A6};
};
template <typename D>
bool PoissonDistributionZTest::SingleZTest(const double p,
const size_t samples) {
D dis(mean());
absl::flat_hash_map<int32_t, int> buckets;
std::vector<double> data;
data.reserve(samples);
for (int j = 0; j < samples; j++) {
const auto x = dis(rng_);
buckets[x]++;
data.push_back(x);
}
const auto m = absl::random_internal::ComputeDistributionMoments(data);
const double max_err = absl::random_internal::MaxErrorTolerance(p);
const double z = absl::random_internal::ZScore(mean(), m);
const bool pass = absl::random_internal::Near("z", z, 0.0, max_err);
if (!pass) {
LOG(INFO)
<< "p=" << p << " max_err=" << max_err << "\n"
" mean=" << m.mean << " vs. " << mean() << "\n"
" stddev=" << std::sqrt(m.variance) << " vs. " << stddev() << "\n"
" skewness=" << m.skewness << " vs. " << skew() << "\n"
" kurtosis=" << m.kurtosis << " vs. " << kurtosis() << "\n"
" z=" << z;
}
return pass;
}
TEST_P(PoissonDistributionZTest, AbslPoissonDistribution) {
const auto& param = GetParam();
const int expected_failures =
std::max(1, static_cast<int>(std::ceil(param.trials * param.p_fail)));
const double p = absl::random_internal::RequiredSuccessProbability(
param.p_fail, param.trials);
int failures = 0;
for (int i = 0; i < param.trials; i++) {
failures +=
SingleZTest<absl::poisson_distribution<int32_t>>(p, param.samples) ? 0
: 1;
}
EXPECT_LE(failures, expected_failures);
}
std::vector<ZParam> GetZParams() {
return std::vector<ZParam>({
ZParam{0.5, 0.01, 100, 1000},
ZParam{1.0, 0.01, 100, 1000},
ZParam{10.0, 0.01, 100, 5000},
ZParam{20.0, 0.01, 100, 10000},
ZParam{50.0, 0.01, 100, 10000},
ZParam{51.0, 0.01, 100, 10000},
ZParam{200.0, 0.05, 10, 100000},
ZParam{100000.0, 0.05, 10, 1000000},
});
}
std::string ZParamName(const ::testing::TestParamInfo<ZParam>& info) {
const auto& p = info.param;
std::string name = absl::StrCat("mean_", absl::SixDigits(p.mean));
return absl::StrReplaceAll(name, {{"+", "_"}, {"-", "_"}, {".", "_"}});
}
INSTANTIATE_TEST_SUITE_P(All, PoissonDistributionZTest,
::testing::ValuesIn(GetZParams()), ZParamName);
class PoissonDistributionChiSquaredTest : public testing::TestWithParam<double>,
public PoissonModel {
public:
PoissonDistributionChiSquaredTest() : PoissonModel(GetParam()) {}
template <typename D>
double ChiSquaredTestImpl();
private:
void InitChiSquaredTest(const double buckets);
std::vector<size_t> cutoffs_;
std::vector<double> expected_;
absl::random_internal::pcg64_2018_engine rng_{0x2B7E151628AED2A6};
};
void PoissonDistributionChiSquaredTest::InitChiSquaredTest(
const double buckets) {
if (!cutoffs_.empty() && !expected_.empty()) {
return;
}
InitCDF();
double last_cdf = 0;
const double inc = 1.0 / buckets;
for (double p = inc; p <= 1.0; p += inc) {
auto result = InverseCDF(p);
if (!cutoffs_.empty() && cutoffs_.back() == result.index) {
continue;
}
double d = result.cdf - last_cdf;
cutoffs_.push_back(result.index);
expected_.push_back(d);
last_cdf = result.cdf;
}
cutoffs_.push_back(std::numeric_limits<size_t>::max());
expected_.push_back(std::max(0.0, 1.0 - last_cdf));
}
template <typename D>
double PoissonDistributionChiSquaredTest::ChiSquaredTestImpl() {
const int kSamples = 2000;
const int kBuckets = 50;
ABSL_ASSERT(mean() <= 200);
InitChiSquaredTest(kBuckets);
D dis(mean());
std::vector<int32_t> counts(cutoffs_.size(), 0);
for (int j = 0; j < kSamples; j++) {
const size_t x = dis(rng_);
auto it = std::lower_bound(std::begin(cutoffs_), std::end(cutoffs_), x);
counts[std::distance(cutoffs_.begin(), it)]++;
}
std::vector<int32_t> e(expected_.size(), 0);
for (int i = 0; i < e.size(); i++) {
e[i] = kSamples * expected_[i];
}
const int dof = static_cast<int>(counts.size()) - 1;
const double threshold = absl::random_internal::ChiSquareValue(dof, 0.98);
const double chi_square = absl::random_internal::ChiSquare(
std::begin(counts), std::end(counts), std::begin(e), std::end(e));
const double p = absl::random_internal::ChiSquarePValue(chi_square, dof);
if (chi_square > threshold) {
LogCDF();
LOG(INFO) << "VALUES buckets=" << counts.size()
<< " samples=" << kSamples;
for (size_t i = 0; i < counts.size(); i++) {
LOG(INFO) << cutoffs_[i] << ": " << counts[i] << " vs. E=" << e[i];
}
LOG(INFO) << kChiSquared << "(data, dof=" << dof << ") = " << chi_square
<< " (" << p << ")\n"
<< " vs.\n"
<< kChiSquared << " @ 0.98 = " << threshold;
}
return p;
}
TEST_P(PoissonDistributionChiSquaredTest, AbslPoissonDistribution) {
const int kTrials = 20;
ASSERT_LE(mean(), 200.0);
if (mean() > 200.0) {
return;
}
int failures = 0;
for (int i = 0; i < kTrials; i++) {
double p_value = ChiSquaredTestImpl<absl::poisson_distribution<int32_t>>();
if (p_value < 0.005) {
failures++;
}
}
EXPECT_LE(failures, 4);
}
INSTANTIATE_TEST_SUITE_P(All, PoissonDistributionChiSquaredTest,
::testing::Values(0.5, 1.0, 2.0, 10.0, 50.0, 51.0,
200.0));
TEST(PoissonDistributionTest, StabilityTest) {
using testing::ElementsAre;
absl::random_internal::sequence_urbg urbg({
0x035b0dc7e0a18acfull, 0x06cebe0d2653682eull, 0x0061e9b23861596bull,
0x0003eb76f6f7f755ull, 0xFFCEA50FDB2F953Bull, 0xC332DDEFBE6C5AA5ull,
0x6558218568AB9702ull, 0x2AEF7DAD5B6E2F84ull, 0x1521B62829076170ull,
0xECDD4775619F1510ull, 0x13CCA830EB61BD96ull, 0x0334FE1EAA0363CFull,
0xB5735C904C70A239ull, 0xD59E9E0BCBAADE14ull, 0xEECC86BC60622CA7ull,
0x4864f22c059bf29eull, 0x247856d8b862665cull, 0xe46e86e9a1337e10ull,
0xd8c8541f3519b133ull, 0xe75b5162c567b9e4ull, 0xf732e5ded7009c5bull,
0xb170b98353121eacull, 0x1ec2e8986d2362caull, 0x814c8e35fe9a961aull,
0x0c3cd59c9b638a02ull, 0xcb3bb6478a07715cull, 0x1224e62c978bbc7full,
0x671ef2cb04e81f6eull, 0x3c1cbd811eaf1808ull, 0x1bbc23cfa8fac721ull,
0xa4c2cda65e596a51ull, 0xb77216fad37adf91ull, 0x836d794457c08849ull,
0xe083df03475f49d7ull, 0xbc9feb512e6b0d6cull, 0xb12d74fdd718c8c5ull,
0x12ff09653bfbe4caull, 0x8dd03a105bc4ee7eull, 0x5738341045ba0d85ull,
0xf3fd722dc65ad09eull, 0xfa14fd21ea2a5705ull, 0xffe6ea4d6edb0c73ull,
0xD07E9EFE2BF11FB4ull, 0x95DBDA4DAE909198ull, 0xEAAD8E716B93D5A0ull,
0xD08ED1D0AFC725E0ull, 0x8E3C5B2F8E7594B7ull, 0x8FF6E2FBF2122B64ull,
0x8888B812900DF01Cull, 0x4FAD5EA0688FC31Cull, 0xD1CFF191B3A8C1ADull,
0x2F2F2218BE0E1777ull, 0xEA752DFE8B021FA1ull, 0xE5A0CC0FB56F74E8ull,
0x18ACF3D6CE89E299ull, 0xB4A84FE0FD13E0B7ull, 0x7CC43B81D2ADA8D9ull,
0x165FA26680957705ull, 0x93CC7314211A1477ull, 0xE6AD206577B5FA86ull,
0xC75442F5FB9D35CFull, 0xEBCDAF0C7B3E89A0ull, 0xD6411BD3AE1E7E49ull,
0x00250E2D2071B35Eull, 0x226800BB57B8E0AFull, 0x2464369BF009B91Eull,
0x5563911D59DFA6AAull, 0x78C14389D95A537Full, 0x207D5BA202E5B9C5ull,
0x832603766295CFA9ull, 0x11C819684E734A41ull, 0xB3472DCA7B14A94Aull,
});
std::vector<int> output(10);
{
absl::poisson_distribution<int> dist(5);
std::generate(std::begin(output), std::end(output),
[&] { return dist(urbg); });
}
EXPECT_THAT(output,
ElementsAre(1, 0, 0, 4, 2, 10, 3, 3, 7, 12));
{
urbg.reset();
absl::poisson_distribution<int> dist(25);
std::generate(std::begin(output), std::end(output),
[&] { return dist(urbg); });
}
EXPECT_THAT(output,
ElementsAre(9, 35, 18, 10, 35, 18, 10, 35, 18, 10));
{
urbg.reset();
absl::poisson_distribution<int> dist(121);
std::generate(std::begin(output), std::end(output),
[&] { return dist(urbg); });
}
EXPECT_THAT(output,
ElementsAre(161, 122, 129, 124, 112, 112, 117, 120, 130, 114));
}
TEST(PoissonDistributionTest, AlgorithmExpectedValue_1) {
absl::random_internal::sequence_urbg urbg({0x8000000000000001ull});
absl::poisson_distribution<int> dist(5);
EXPECT_EQ(7, dist(urbg));
}
TEST(PoissonDistributionTest, AlgorithmExpectedValue_2) {
absl::random_internal::sequence_urbg urbg({0x8000000000000001ull});
absl::poisson_distribution<int> dist(25);
EXPECT_EQ(36, dist(urbg));
}
TEST(PoissonDistributionTest, AlgorithmExpectedValue_3) {
absl::random_internal::sequence_urbg urbg(
{0x7fffffffffffffffull, 0x8000000000000000ull});
absl::poisson_distribution<int> dist(121);
EXPECT_EQ(121, dist(urbg));
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/random/poisson_distribution.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/random/poisson_distribution_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
de5fe46c-970f-4768-becc-1f57168e0534 | cpp | tensorflow/tensorflow | auto_mixed_precision | tensorflow/core/grappler/optimizers/auto_mixed_precision.cc | tensorflow/core/grappler/optimizers/auto_mixed_precision_test.cc | #include "tensorflow/core/grappler/optimizers/auto_mixed_precision.h"
#include <fstream>
#include <memory>
#include <string>
#include <unordered_map>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/costs/virtual_placer.h"
#include "tensorflow/core/grappler/devices.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/auto_mixed_precision_lists.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/numbers.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/util/env_var.h"
#include "tensorflow/core/util/util.h"
namespace tensorflow {
namespace grappler {
namespace {
bool ShouldSimulateGpu() {
bool is_enabled = [] {
bool ret = false;
string var;
TF_CHECK_OK(ReadStringFromEnvVar(
"TF_AUTO_MIXED_PRECISION_GRAPH_REWRITE_SIMULATE_GPU", "", &var));
TF_CHECK_OK(
ReadBoolFromEnvVar("TF_AUTO_MIXED_PRECISION_GRAPH_REWRITE_SIMULATE_GPU",
false, &ret));
return ret;
}();
return is_enabled;
}
#if GOOGLE_CUDA
const std::pair<int, int> kMinGPUArch = {7, 0};
#else
const std::pair<int, int> kMinGPUArch = {0, 0};
#endif
const char kSuffix[] = "AutoMixedPrecision";
const char kCastToFp16[] = "CastToFp16";
const char kCastToBf16[] = "CastToBf16";
const char kCastToFp32[] = "CastToFp32";
#if GOOGLE_CUDA
std::pair<int, int> GetDeviceGPUArch(
const DeviceProperties& device_properties) {
if (device_properties.type() != "GPU") return {0, 0};
string arch_str = device_properties.environment().at("architecture");
std::vector<string> split_arch_str = str_util::Split(arch_str, '.');
if (split_arch_str.empty()) {
return {0, 0};
}
int major, minor;
if (!strings::safe_strto32(split_arch_str[0], &major)) {
return {0, 0};
}
if (split_arch_str.size() > 1) {
if (strings::safe_strto32(split_arch_str[1], &minor)) {
return {major, minor};
} else {
return {0, 0};
}
} else {
return {major, 0};
}
}
#endif
bool HasFastFP16Support(const DeviceProperties& props) {
#if GOOGLE_CUDA
return GetDeviceGPUArch(props) >= kMinGPUArch;
#elif TENSORFLOW_USE_ROCM
absl::flat_hash_set<std::string> FP16SupportedDevices = {{"gfx906"},
{"gfx908"}};
std::string gcnArchName = props.environment().at("architecture");
std::vector<std::string> gpu_arch = absl::StrSplit(gcnArchName, ":");
return !gpu_arch.empty() && FP16SupportedDevices.contains(gpu_arch[0]);
#endif
return ShouldSimulateGpu();
}
struct TypeAttrId {
static constexpr int kSingleType = -1;
explicit TypeAttrId(const string& _attr_name, int _type_index = kSingleType)
: attr_name(_attr_name),
type_index(_type_index),
fixed_type(DT_INVALID) {}
explicit TypeAttrId(DataType _fixed_type)
: attr_name(), type_index(kSingleType), fixed_type(_fixed_type) {}
bool operator==(const TypeAttrId& other) const {
return attr_name == other.attr_name && type_index == other.type_index &&
fixed_type == other.fixed_type;
}
bool operator<(const TypeAttrId& other) const {
return std::make_tuple(attr_name, type_index, fixed_type) <
std::make_tuple(other.attr_name, other.type_index, other.fixed_type);
}
template <typename H>
friend H AbslHashValue(H h, const TypeAttrId& ta) {
return H::combine(std::move(h), ta.attr_name, ta.type_index, ta.fixed_type);
}
string DebugString() const {
if (!attr_name.empty()) {
if (type_index == kSingleType) {
return attr_name;
} else {
return strings::StrCat(attr_name, "[", type_index, "]");
}
} else {
return tensorflow::DataTypeString(fixed_type);
}
}
string attr_name;
int type_index;
DataType fixed_type;
};
DataType GetDataType(const NodeDef& node, const TypeAttrId& type_attr) {
if (type_attr.attr_name.empty()) {
return type_attr.fixed_type;
}
if (!node.attr().count(type_attr.attr_name)) {
return DT_INVALID;
}
const AttrValue& attr_value = node.attr().at(type_attr.attr_name);
if (type_attr.type_index == TypeAttrId::kSingleType) {
return attr_value.type();
} else {
if (type_attr.type_index < 0 ||
type_attr.type_index >= attr_value.list().type_size()) {
return DT_INVALID;
}
return attr_value.list().type(type_attr.type_index);
}
}
bool SetDataType(NodeDef* node, const TypeAttrId& type_attr, DataType type) {
if (type_attr.attr_name.empty() || !node->attr().count(type_attr.attr_name)) {
return false;
}
AttrValue& attr_value = node->mutable_attr()->at(type_attr.attr_name);
if (type_attr.type_index == TypeAttrId::kSingleType) {
attr_value.set_type(type);
} else {
if (type_attr.type_index < 0 ||
type_attr.type_index >= attr_value.list().type_size()) {
return false;
}
attr_value.mutable_list()->set_type(type_attr.type_index, type);
}
return true;
}
std::vector<std::pair<int, int>> ArgDefIndexes(const NodeDef& node, int arg_idx,
const OpDef::ArgDef& arg_def) {
std::vector<std::pair<int, int>> argdef_inds;
if (!arg_def.type_list_attr().empty()) {
int num_types = node.attr().at(arg_def.type_list_attr()).list().type_size();
for (int type_idx = 0; type_idx < num_types; ++type_idx) {
argdef_inds.push_back({arg_idx, type_idx});
}
} else {
int num_repeat = 1;
if (node.attr().count(arg_def.number_attr())) {
num_repeat = node.attr().at(arg_def.number_attr()).i();
}
argdef_inds.insert(argdef_inds.end(), num_repeat, {arg_idx, -1});
}
return argdef_inds;
}
std::vector<std::pair<int, int>> InputPortArgDefIndexes(const NodeDef& node,
const OpDef& op_def) {
std::vector<std::pair<int, int>> argdef_inds;
argdef_inds.reserve(op_def.input_arg_size());
for (int arg_idx = 0; arg_idx < op_def.input_arg_size(); ++arg_idx) {
const OpDef::ArgDef& arg_def = op_def.input_arg(arg_idx);
auto arg_results = ArgDefIndexes(node, arg_idx, arg_def);
argdef_inds.insert(argdef_inds.end(), arg_results.begin(),
arg_results.end());
}
return argdef_inds;
}
std::vector<std::pair<int, int>> OutputPortArgDefIndexes(const NodeDef& node,
const OpDef& op_def) {
std::vector<std::pair<int, int>> argdef_inds;
argdef_inds.reserve(op_def.output_arg_size());
for (int arg_idx = 0; arg_idx < op_def.output_arg_size(); ++arg_idx) {
const OpDef::ArgDef& arg_def = op_def.output_arg(arg_idx);
auto arg_results = ArgDefIndexes(node, arg_idx, arg_def);
argdef_inds.insert(argdef_inds.end(), arg_results.begin(),
arg_results.end());
}
return argdef_inds;
}
TypeAttrId GetTypeAttrId(const OpDef::ArgDef& arg_def, int arg_type_index) {
if (!arg_def.type_list_attr().empty()) {
return TypeAttrId(arg_def.type_list_attr(), arg_type_index);
} else if (!arg_def.type_attr().empty()) {
return TypeAttrId(arg_def.type_attr());
} else {
return TypeAttrId(arg_def.type());
}
}
std::vector<int> NonControlInputs(const NodeDef& node) {
std::vector<int> pos;
for (int i = 0; i < node.input_size(); i++) {
if (!IsControlInput(node.input(i))) {
pos.push_back(i);
}
}
return pos;
}
class NodeTypeAttrMap {
public:
NodeTypeAttrMap() {}
explicit NodeTypeAttrMap(const GraphDef& graph) { TF_CHECK_OK(Init(graph)); }
Status Init(const GraphDef& graph) {
if (graph_ != nullptr) {
return errors::InvalidArgument("NodeTypeAttrMap is already initialized.");
}
graph_ = &graph;
function_library_.reset(
new FunctionLibraryDefinition(OpRegistry::Global(), graph.library()));
for (const NodeDef& node : graph.node()) {
TF_RETURN_IF_ERROR(AddNode(node));
}
return absl::OkStatus();
}
bool is_initialized() const { return graph_ != nullptr; }
absl::flat_hash_set<TypeAttrId> GetTypeAttrs(const NodeDef& node) const {
DCHECK(is_initialized()) << "NodeTypeAttrMap is not initialized";
absl::flat_hash_set<TypeAttrId> type_attrs;
const auto iter = type2io_.find(&node);
CHECK(iter != type2io_.end());
for (const auto& key_value : iter->second) {
type_attrs.insert(key_value.first);
}
return type_attrs;
}
const absl::flat_hash_set<int>& GetInputPorts(
const NodeDef& node, const TypeAttrId& type_attr) const {
DCHECK(is_initialized()) << "NodeTypeAttrMap is not initialized";
return type2io_.at(&node).at(type_attr).first;
}
const absl::flat_hash_set<int>& GetOutputPorts(
const NodeDef& node, const TypeAttrId& type_attr) const {
DCHECK(is_initialized()) << "NodeTypeAttrMap is not initialized";
return type2io_.at(&node).at(type_attr).second;
}
TypeAttrId GetInputTypeAttr(const NodeDef& node, int port) const {
DCHECK(is_initialized()) << "NodeTypeAttrMap is not initialized";
const auto iter = io2type_.find(&node);
DCHECK(iter != io2type_.end())
<< "Node " << node.name() << " doesn't exist in a graph";
auto type_vec = io2type_.at(&node).first;
CHECK_GE(port, 0);
CHECK_LT(port, type_vec.size());
return type_vec[port];
}
TypeAttrId GetOutputTypeAttr(const NodeDef& node, int port) const {
DCHECK(is_initialized()) << "NodeTypeAttrMap is not initialized";
auto type_vec = io2type_.at(&node).second;
CHECK_GE(port, 0);
CHECK_LT(port, type_vec.size());
return type_vec[port];
}
private:
Status AddNode(const NodeDef& node) {
const OpDef* op_def_ptr = nullptr;
TF_RETURN_IF_ERROR(function_library_->LookUpOpDef(node.op(), &op_def_ptr));
const OpDef& op_def = *op_def_ptr;
auto& type2io_entry = type2io_[&node];
auto& io2type_entry = io2type_[&node];
auto input_arg_inds = InputPortArgDefIndexes(node, op_def);
if (NonControlInputs(node).size() != input_arg_inds.size()) {
return errors::InvalidArgument(
"Expected ", node.op(), " node ", node.name(), " to have ",
input_arg_inds.size(), " non-control input(s), but got ",
node.input_size());
}
io2type_entry.first.reserve(input_arg_inds.size());
for (int i = 0; i < static_cast<int>(input_arg_inds.size()); ++i) {
const auto& arg_inds = input_arg_inds[i];
const OpDef::ArgDef& arg_def = op_def.input_arg(arg_inds.first);
TypeAttrId type_attr = GetTypeAttrId(arg_def, arg_inds.second);
if (!type_attr.attr_name.empty() &&
!node.attr().count(type_attr.attr_name)) {
return errors::InvalidArgument("Type attribute ", type_attr.attr_name,
" is not present in node ", node.name());
}
type2io_entry[type_attr].first.insert(i);
io2type_entry.first.push_back(type_attr);
}
auto output_arg_inds = OutputPortArgDefIndexes(node, op_def);
io2type_entry.second.reserve(output_arg_inds.size());
for (int i = 0; i < static_cast<int>(output_arg_inds.size()); ++i) {
const auto& arg_inds = output_arg_inds[i];
const OpDef::ArgDef& arg_def = op_def.output_arg(arg_inds.first);
TypeAttrId type_attr = GetTypeAttrId(arg_def, arg_inds.second);
if (!type_attr.attr_name.empty() &&
!node.attr().count(type_attr.attr_name)) {
return errors::InvalidArgument("Type attribute ", type_attr.attr_name,
" is not present in node ", node.name());
}
type2io_entry[type_attr].second.insert(i);
io2type_entry.second.push_back(type_attr);
}
for (const auto& attr : node.attr()) {
const string& attr_name = attr.first;
if (!attr_name.empty() && attr_name[0] == '_') continue;
const AttrValue& attr_value = attr.second;
const OpDef::AttrDef* attr_def = FindAttr(attr_name, op_def);
if (!attr_def) {
return errors::InvalidArgument("AttrDef not found for attribute ",
attr_name, " of node ", node.name());
}
if (attr_def->type() == "type") {
type2io_entry[TypeAttrId(attr_name)];
} else if (attr_def->type() == "list(type)") {
for (int i = 0; i < attr_value.list().type_size(); ++i) {
type2io_entry[TypeAttrId(attr_name, i)];
}
}
}
return absl::OkStatus();
}
const GraphDef* graph_ = nullptr;
std::unique_ptr<FunctionLibraryDefinition> function_library_;
typedef absl::flat_hash_set<int> IntSet;
typedef absl::flat_hash_map<TypeAttrId, std::pair<IntSet, IntSet>> Type2IOMap;
absl::flat_hash_map<const NodeDef*, Type2IOMap> type2io_;
typedef std::vector<TypeAttrId> TypeAttrIdVec;
absl::flat_hash_map<const NodeDef*, std::pair<TypeAttrIdVec, TypeAttrIdVec>>
io2type_;
};
struct NodeTypeId {
NodeTypeId(const NodeDef* _node, const TypeAttrId& _type_attr)
: node(_node), type_attr(_type_attr) {}
const NodeDef* node;
TypeAttrId type_attr;
bool operator==(const NodeTypeId& other) const {
return node == other.node && type_attr == other.type_attr;
}
template <typename H>
friend H AbslHashValue(H h, const NodeTypeId& nt) {
return H::combine(std::move(h), nt.node, nt.type_attr);
}
};
struct NodeTypeIdEdge {
NodeTypeIdEdge(const NodeTypeId& _src, const NodeTypeId& _dst)
: src(_src), dst(_dst) {}
NodeTypeId src;
NodeTypeId dst;
};
class GraphTypeTopologyView {
public:
GraphTypeTopologyView() = default;
explicit GraphTypeTopologyView(bool skip_invalid_edges)
: skip_invalid_edges_(skip_invalid_edges) {}
Status InitializeFromGraph(const GraphDef& graph,
const NodeTypeAttrMap& node_type_map);
Status AddEphemeralEdges(absl::Span<const NodeTypeIdEdge> ephemeral_edges);
bool is_initialized() const { return graph_ != nullptr; }
int num_nodes() const { return num_nodes_; }
const GraphDef* graph() const { return graph_; }
bool HasNode(absl::string_view node_name, const TypeAttrId& type_attr) const;
const NodeTypeId* GetNode(absl::string_view node_name,
const TypeAttrId& type_attr) const;
const NodeTypeId* GetNode(int node_idx) const;
const absl::optional<int> GetNodeIndex(absl::string_view node_name,
const TypeAttrId& type_attr) const;
const absl::optional<int> GetNodeIndex(const NodeTypeId& node) const;
const absl::InlinedVector<int, 4>& GetFanin(int node_idx) const;
const absl::InlinedVector<int, 2>& GetFanout(int node_idx) const;
private:
struct NodeTypeKey : public std::pair<absl::string_view, TypeAttrId> {
typedef std::pair<absl::string_view, TypeAttrId> Base;
using Base::pair;
template <typename H>
friend H AbslHashValue(H h, const NodeTypeKey& nt) {
return H::combine(std::move(h), nt.first, nt.second);
}
};
bool skip_invalid_edges_ = false;
const GraphDef* graph_ = nullptr;
int num_nodes_ = 0;
std::vector<NodeTypeId> node_type_attrs_;
absl::flat_hash_map<absl::string_view, int> node_name_to_index_;
absl::flat_hash_map<NodeTypeKey, int> node_type_name_to_index_;
std::vector<absl::InlinedVector<int, 4>> fanins_;
std::vector<absl::InlinedVector<int, 2>> fanouts_;
absl::InlinedVector<int, 4> empty_fanin_;
absl::InlinedVector<int, 2> empty_fanout_;
};
template <typename T>
inline void SortAndRemoveDuplicates(T* v) {
std::sort(v->begin(), v->end());
v->erase(std::unique(v->begin(), v->end()), v->end());
}
Status GraphTypeTopologyView::InitializeFromGraph(
const GraphDef& graph, const NodeTypeAttrMap& node_type_map) {
if (graph_ != nullptr) {
return errors::InvalidArgument(
"GraphTypeTopologyView is already initialized.");
}
graph_ = &graph;
int num_nodedefs = graph.node_size();
node_name_to_index_.rehash(num_nodedefs);
node_type_attrs_.reserve(num_nodedefs);
node_type_name_to_index_.rehash(num_nodedefs);
for (int node_idx = 0; node_idx < num_nodedefs; ++node_idx) {
const NodeDef& node = graph.node(node_idx);
node_name_to_index_.emplace(node.name(), node_idx);
for (const TypeAttrId& type_attr : node_type_map.GetTypeAttrs(node)) {
int node_type_idx = node_type_attrs_.size();
node_type_name_to_index_.emplace(NodeTypeKey(node.name(), type_attr),
node_type_idx);
node_type_attrs_.emplace_back(&node, type_attr);
}
}
num_nodes_ = node_type_attrs_.size();
fanins_.resize(num_nodes_);
fanouts_.resize(num_nodes_);
for (int node_type_idx = 0; node_type_idx < num_nodes_; ++node_type_idx) {
const NodeTypeId& node_type = node_type_attrs_.at(node_type_idx);
auto input_ports =
node_type_map.GetInputPorts(*node_type.node, node_type.type_attr);
fanins_[node_type_idx].reserve(input_ports.size());
for (int port : input_ports) {
const string& input = node_type.node->input(port);
TensorId tensor = ParseTensorName(input);
const auto it = node_name_to_index_.find(tensor.node());
const bool valid_input = it != node_name_to_index_.end();
if (!valid_input) {
const string error_message = absl::StrCat(
"Non-existent input ", input, " in node ", node_type.node->name());
if (skip_invalid_edges_) {
VLOG(3) << "Skip error: " << error_message;
} else {
return errors::InvalidArgument(error_message);
}
}
if (valid_input) {
const int input_idx = it->second;
const NodeDef& input_node = graph_->node(input_idx);
TypeAttrId input_type_attr =
node_type_map.GetOutputTypeAttr(input_node, tensor.index());
const auto it2 = node_type_name_to_index_.find(
NodeTypeKey(input_node.name(), input_type_attr));
if (it2 == node_type_name_to_index_.end()) {
if (!skip_invalid_edges_) {
return errors::InvalidArgument("Did not find type attr ",
input_type_attr.DebugString(),
" in node ", input_node.name());
}
continue;
}
int input_node_type_idx = it2->second;
fanins_[node_type_idx].push_back(input_node_type_idx);
fanouts_[input_node_type_idx].push_back(node_type_idx);
}
}
SortAndRemoveDuplicates(&fanins_[node_type_idx]);
}
for (int node_type_idx = 0; node_type_idx < num_nodes_; ++node_type_idx) {
SortAndRemoveDuplicates(&fanouts_[node_type_idx]);
}
return absl::OkStatus();
}
Status GraphTypeTopologyView::AddEphemeralEdges(
absl::Span<const NodeTypeIdEdge> ephemeral_edges) {
for (const NodeTypeIdEdge& edge : ephemeral_edges) {
const auto src = node_name_to_index_.find(edge.src.node->name());
const bool valid_src = src != node_name_to_index_.end();
if (!valid_src) {
const string error_message =
absl::StrCat("Non-existent src node: ", edge.src.node->name());
if (skip_invalid_edges_) {
VLOG(0) << "Skip error: " << error_message;
} else {
return errors::InvalidArgument(error_message);
}
}
const auto dst = node_name_to_index_.find(edge.dst.node->name());
const bool valid_dst = dst != node_name_to_index_.end();
if (!valid_dst) {
const string error_message =
absl::StrCat("Non-existent dst node: ", edge.dst.node->name());
if (skip_invalid_edges_) {
VLOG(0) << "Skip error: " << error_message;
} else {
return errors::InvalidArgument(error_message);
}
}
if (valid_dst && valid_src) {
int src_node_type_idx = node_type_name_to_index_.at(
NodeTypeKey(edge.src.node->name(), edge.src.type_attr));
int dst_node_type_idx = node_type_name_to_index_.at(
NodeTypeKey(edge.dst.node->name(), edge.dst.type_attr));
fanins_[dst_node_type_idx].push_back(src_node_type_idx);
fanouts_[src_node_type_idx].push_back(dst_node_type_idx);
}
}
for (int node_type_idx = 0; node_type_idx < num_nodes_; ++node_type_idx) {
SortAndRemoveDuplicates(&fanins_[node_type_idx]);
SortAndRemoveDuplicates(&fanouts_[node_type_idx]);
}
return absl::OkStatus();
}
bool GraphTypeTopologyView::HasNode(absl::string_view node_name,
const TypeAttrId& type_attr) const {
DCHECK(is_initialized()) << "GraphTypeTopologyView is not initialized";
NodeTypeKey key(node_name, type_attr);
const auto it = node_type_name_to_index_.find(key);
return it != node_type_name_to_index_.end();
}
const NodeTypeId* GraphTypeTopologyView::GetNode(
absl::string_view node_name, const TypeAttrId& type_attr) const {
DCHECK(is_initialized()) << "GraphTypeTopologyView is not initialized";
NodeTypeKey key(node_name, type_attr);
const auto it = node_type_name_to_index_.find(key);
return it == node_type_name_to_index_.end()
? nullptr
: &node_type_attrs_.at(it->second);
}
const NodeTypeId* GraphTypeTopologyView::GetNode(int node_idx) const {
DCHECK(is_initialized()) << "GraphTypeTopologyView is not initialized";
DCHECK(node_idx >= 0 && node_idx < num_nodes_) << "node_idx is out of range";
return &node_type_attrs_.at(node_idx);
}
const absl::optional<int> GraphTypeTopologyView::GetNodeIndex(
absl::string_view node_name, const TypeAttrId& type_attr) const {
DCHECK(is_initialized()) << "GraphTypeTopologyView is not initialized";
NodeTypeKey key(node_name, type_attr);
const auto it = node_type_name_to_index_.find(key);
DCHECK(it != node_type_name_to_index_.end())
<< "Node doesn't exist in a graph";
return it == node_type_name_to_index_.end() ? absl::nullopt
: absl::make_optional(it->second);
}
const absl::optional<int> GraphTypeTopologyView::GetNodeIndex(
const NodeTypeId& node) const {
return GetNodeIndex(node.node->name(), node.type_attr);
}
const absl::InlinedVector<int, 4>& GraphTypeTopologyView::GetFanin(
int node_idx) const {
DCHECK(is_initialized()) << "GraphTypeTopologyView is not initialized";
const bool is_valid_node_idx = node_idx >= 0 && node_idx < num_nodes_;
DCHECK(is_valid_node_idx) << "node_idx is out of range";
return is_valid_node_idx ? fanins_[node_idx] : empty_fanin_;
}
const absl::InlinedVector<int, 2>& GraphTypeTopologyView::GetFanout(
int node_idx) const {
DCHECK(is_initialized()) << "GraphTypeTopologyView is not initialized";
const bool is_valid_node_idx = node_idx >= 0 && node_idx < num_nodes_;
DCHECK(is_valid_node_idx) << "node_idx is out of range";
return is_valid_node_idx ? fanouts_[node_idx] : empty_fanout_;
}
enum class TypeTraversalDirection {
kFollowInputs,
kFollowOutputs,
kFollowInputsAndOutputs,
};
struct DfsTypeCallbacks {
DfsTypeCallbacks() = default;
DfsTypeCallbacks(std::function<void(int)> pre, std::function<void(int)> post,
std::function<void(int, int)> back_edge)
: pre_order(std::move(pre)),
post_order(std::move(post)),
on_back_edge(std::move(back_edge)) {}
static DfsTypeCallbacks PreOrder(std::function<void(int)> pre) {
return DfsTypeCallbacks(std::move(pre), nullptr, nullptr);
}
static DfsTypeCallbacks PostOrder(std::function<void(int)> post) {
return DfsTypeCallbacks(nullptr, std::move(post), nullptr);
}
std::function<void(int)> pre_order;
std::function<void(int)> post_order;
std::function<void(int, int)> on_back_edge;
};
struct DfsTypePredicates {
DfsTypePredicates() = default;
DfsTypePredicates(std::function<bool(int)> enter,
std::function<bool(int)> advance)
: enter(std::move(enter)), advance(std::move(advance)) {}
static DfsTypePredicates Enter(std::function<bool(int)> enter) {
return DfsTypePredicates(std::move(enter), nullptr);
}
static DfsTypePredicates Advance(std::function<bool(int)> advance) {
return DfsTypePredicates(nullptr, std::move(advance));
}
std::function<bool(int)> enter;
std::function<bool(int)> advance;
};
struct DfsStackElem {
DfsStackElem(int node, bool children_visited, int src)
: node(node), children_visited(children_visited), src(src) {}
explicit DfsStackElem(int node) : DfsStackElem(node, false, -1) {}
int node;
bool children_visited;
int src;
};
enum class NodeState { kNotVisited, kVisiting, kDone };
void DfsTypeTraversal(const GraphTypeTopologyView& graph_type_view,
const absl::Span<const NodeTypeId* const> from,
const TypeTraversalDirection direction,
const DfsTypePredicates& predicates,
const DfsTypeCallbacks& callbacks) {
std::vector<DfsStackElem> stack;
stack.reserve(from.size());
for (const NodeTypeId* node : from) {
const absl::optional<int> node_idx = graph_type_view.GetNodeIndex(*node);
DCHECK(node_idx.has_value())
<< "Illegal start node: " << node->node->name();
if (node_idx.has_value()) {
stack.emplace_back(node_idx.value());
}
}
absl::flat_hash_map<int, NodeState> node_state;
while (!stack.empty()) {
DfsStackElem w = stack.back();
stack.pop_back();
NodeState& state = node_state[w.node];
if (state == NodeState::kDone) continue;
if (predicates.enter && !predicates.enter(w.node)) {
state = NodeState::kDone;
continue;
}
if (w.children_visited) {
state = NodeState::kDone;
if (callbacks.post_order) {
callbacks.post_order(w.node);
}
continue;
}
if (state == NodeState::kVisiting) {
if (callbacks.on_back_edge) {
callbacks.on_back_edge(w.src, w.node);
}
continue;
}
state = NodeState::kVisiting;
if (callbacks.pre_order) {
callbacks.pre_order(w.node);
}
stack.emplace_back(w.node, true, w.src);
if (predicates.advance && !predicates.advance(w.node)) {
continue;
}
if (direction == TypeTraversalDirection::kFollowInputs ||
direction == TypeTraversalDirection::kFollowInputsAndOutputs) {
for (const int fanin : graph_type_view.GetFanin(w.node)) {
stack.emplace_back(fanin, false, w.node);
}
}
if (direction == TypeTraversalDirection::kFollowOutputs ||
direction == TypeTraversalDirection::kFollowInputsAndOutputs) {
for (const int fanout : graph_type_view.GetFanout(w.node)) {
stack.emplace_back(fanout, false, w.node);
}
}
}
}
DataTypeSet AllowedDataTypes(const OpDef::AttrDef& attr_def) {
const auto& allowed_types = attr_def.allowed_values().list().type();
if (allowed_types.empty()) {
return AllTypes();
}
uint32 dtype_mask = 0;
for (int dtype : allowed_types) {
dtype_mask |= 1u << dtype;
}
return DataTypeSet(dtype_mask);
}
DataTypeSet AllowedDataTypes(const OpDef& op_def, const TypeAttrId& t_attr_id) {
if (t_attr_id.attr_name.empty()) {
return ToSet(t_attr_id.fixed_type);
}
const OpDef::AttrDef* attr_def = FindAttr(t_attr_id.attr_name, op_def);
CHECK(attr_def);
return AllowedDataTypes(*attr_def);
}
Status ValidateLists(const gtl::FlatSet<string>& allow_list,
const gtl::FlatSet<string>& deny_list,
const gtl::FlatSet<string>& infer_list,
const gtl::FlatSet<string>& clear_list) {
std::vector<gtl::FlatSet<string>> lists{allow_list, deny_list, infer_list,
clear_list};
std::multiset<string> counts;
for (const auto& list : lists) {
counts.insert(list.begin(), list.end());
}
bool duplicates = false;
for (const auto& s : counts) {
if (counts.count(s) > 1) {
duplicates = true;
LOG(ERROR) << "Op present in multiple lists: " << s;
}
}
if (duplicates) {
return errors::InvalidArgument("Op lists have conflicting entries");
} else {
return absl::OkStatus();
}
}
bool HasInputOrOutputRefs(const NodeDef& node) {
const OpDef* op_def;
Status status = OpRegistry::Global()->LookUpOpDef(node.op(), &op_def);
if (!status.ok()) {
return true;
}
for (const auto& input : op_def->input_arg()) {
if (input.is_ref()) {
return true;
}
}
for (const auto& output : op_def->output_arg()) {
if (output.is_ref()) {
return true;
}
}
return false;
}
bool CanForceFP16(const NodeDef& node) {
return node.op() != "Const" && node.op() != "SoftmaxCrossEntropyWithLogits" &&
!IsStateful(node) && !HasInputOrOutputRefs(node);
}
int GetCudaVersion(
const std::unordered_map<string, DeviceProperties>& devices) {
for (const auto& device : devices) {
const DeviceProperties& device_properties = device.second;
if (device_properties.type() == "GPU") {
const auto& device_env = device_properties.environment();
auto it = device_env.find("cuda");
if (it != device_env.end()) {
string cuda_version_str = it->second;
return std::stoi(cuda_version_str);
}
}
}
return 0;
}
int GetCudnnVersion(
const std::unordered_map<string, DeviceProperties>& devices) {
for (const auto& device : devices) {
const DeviceProperties& device_properties = device.second;
if (device_properties.type() == "GPU") {
const auto& device_env = device_properties.environment();
auto it = device_env.find("cudnn");
if (it != device_env.end()) {
string cudnn_version_str = it->second;
return std::stoi(cudnn_version_str);
}
}
}
return 0;
}
std::unordered_map<string, DeviceProperties> GetDevices(Cluster* cluster) {
if (!ShouldSimulateGpu()) {
return cluster->GetDevices();
}
bool has_gpu = false;
for (const auto& device : cluster->GetDevices()) {
const DeviceProperties& device_properties = device.second;
if (device_properties.type() == "GPU") {
has_gpu = true;
break;
}
}
if (has_gpu) {
return cluster->GetDevices();
}
std::unordered_map<string, DeviceProperties> devices(cluster->GetDevices());
DeviceProperties gpu_device_properies;
gpu_device_properies.set_type("GPU");
#if GOOGLE_CUDA
gpu_device_properies.set_vendor("NVIDIA");
gpu_device_properies.mutable_environment()->insert({"architecture", "8.0"});
gpu_device_properies.mutable_environment()->insert({"cuda", "11050"});
gpu_device_properies.mutable_environment()->insert({"cudnn", "8302"});
#elif TENSORFLOW_USE_ROCM
gpu_device_properies.set_vendor("Advanced Micro Devices, Inc");
gpu_device_properies.mutable_environment()->insert(
{"architecture", "gfx908"});
#endif
devices.emplace(std::make_pair("/job:localhost/replica:0/task:0/device:GPU:0",
gpu_device_properies));
return devices;
}
class AutoMixedPrecisionImpl {
public:
enum class CastType { FP16, FP32, AUTO };
AutoMixedPrecisionImpl(Cluster* cluster,
const std::unordered_set<string>& nodes_to_preserve,
GraphDef* graph, string id,
AutoMixedPrecisionMode mode)
: devices_(GetDevices(cluster)),
virtual_placer_(devices_),
nodes_to_preserve_(nodes_to_preserve),
graph_(graph),
function_library_(OpRegistry::Global(), graph->library()),
id_(id),
graph_view_(graph),
cuda_version_(GetCudaVersion(devices_)),
cudnn_version_(GetCudnnVersion(devices_)),
num_nonvar_casts_to_f16_(0),
mode_(mode),
target_dtype_((mode_ == AutoMixedPrecisionMode::CUDA ||
mode_ == AutoMixedPrecisionMode::CPU ||
mode_ == AutoMixedPrecisionMode::FP16_CPU)
? DT_HALF
: DT_BFLOAT16) {}
Status Optimize();
private:
typedef absl::flat_hash_set<NodeTypeId> NodeTypeIdSet;
std::unique_ptr<AutoMixedPrecisionLists> get_mixed_precision_lists() const {
switch (mode_) {
case AutoMixedPrecisionMode::CUDA:
return std::make_unique<AutoMixedPrecisionListsFp16>(
cuda_version_, cudnn_version_, AutoMixedPrecisionMode::CUDA);
case AutoMixedPrecisionMode::BF16:
return std::make_unique<AutoMixedPrecisionListsMkl>();
case AutoMixedPrecisionMode::CPU:
return std::make_unique<AutoMixedPrecisionListsFp16>(
10000,
8000,
AutoMixedPrecisionMode::CPU);
case AutoMixedPrecisionMode::FP16_CPU:
return std::make_unique<AutoMixedPrecisionListsFp16>(
0, 0, AutoMixedPrecisionMode::FP16_CPU);
}
}
Status PrintDebugLogs(bool preop, size_t timestamp);
void LogSkippedNode(const NodeDef& node, const string& device_type) const;
bool MustPreserve(const NodeDef& node) const;
bool IsOnDevice(const NodeDef& node, const string& device_type) const;
bool IsOnSuitableGPUArch(const NodeDef& node) const;
bool ShouldProcess(const NodeDef& node) const;
bool NodeHasF16KernelForTypeAttr(const NodeDef& node, TypeAttrId taid) const;
bool NodeImplicitlyReadsNonResourceVariable(const NodeDef& node) const;
void ConvertBatchNormOpsToV2();
bool SupportsF16(const NodeTypeId& node_type) const;
bool SupportsF16DataType(const NodeTypeId& node_type) const;
bool IsQuantized(const NodeTypeId& node_type) const;
const NodeTypeId* GetTensorListFloat32NodeTypeId(const NodeDef& node) const;
bool IsSourceOrSinkOp(const string& op) const;
void FindFloat32TensorListOpClustersAndDenylistUnsafe(
std::vector<absl::flat_hash_set<const NodeDef*>>* clusters,
absl::flat_hash_set<int>* deny_set) const;
void FindTensorListImplicitFloat32Edges(
const absl::flat_hash_set<const NodeDef*>& tensor_list_nodes,
std::vector<NodeTypeIdEdge>* implicit_fp32_edges) const;
void AddAllowlistOps(absl::flat_hash_set<int>* allow_set) const;
void RemoveAllowsetWithFp32(absl::flat_hash_set<int>* allow_set) const;
void PropagateDenyFwdThroughClearAndInfer(
absl::flat_hash_set<int>* deny_set) const;
void ForceColorMatchBetweenTensorListOps(
const absl::flat_hash_set<const NodeDef*>& tensor_list_nodes,
absl::flat_hash_set<int>* allow_set,
absl::flat_hash_set<int>* deny_set) const;
void AddClearAndInferToAllowIfBetweenAllow(
const absl::flat_hash_set<int>& deny_set,
absl::flat_hash_set<int>* allow_set) const;
void AddInferToAllowIfFollowAllow(const absl::flat_hash_set<int>& deny_set,
absl::flat_hash_set<int>* allow_set) const;
void PropagateAllowThroughClear(const absl::flat_hash_set<int>& deny_set,
absl::flat_hash_set<int>* allow_set) const;
Status ForceColorMatchOnRecurrentEdges(
absl::flat_hash_set<int>* allow_set) const;
void MakeCastsAllowIfAllOutputsAllow(
absl::flat_hash_set<int>* allow_set) const;
NodeDef BuildCastNode(const MutableGraphView::OutputPort& src, bool to_f16,
const string& device) const;
absl::StatusOr<NodeDef*> InsertCastNodeAtFanout(
const absl::flat_hash_set<int>& allow_set, const bool src_is_allow,
const CastType& cast_type, MutableGraphView::OutputPort& src);
absl::StatusOr<DataType> GetCastToType(const NodeDef* node) const;
void CollectOutputPorts(
const TypeAttrId& type_attr, NodeDef* node,
std::vector<MutableGraphView::OutputPort>& output_ports) const;
Status ChangeTypeAttrsAndAddCasts(const absl::flat_hash_set<int>& allow_set);
std::unordered_map<string, DeviceProperties> devices_;
VirtualPlacer virtual_placer_;
std::unordered_set<string> nodes_to_preserve_;
GraphDef* graph_;
FunctionLibraryDefinition function_library_;
string id_;
MutableGraphView graph_view_;
int cuda_version_;
int cudnn_version_;
int num_nonvar_casts_to_f16_;
NodeTypeAttrMap node_type_map_;
GraphTypeTopologyView graph_type_view_;
bool force_all_fp16_;
bool treat_infer_as_deny_;
AutoMixedPrecisionMode mode_;
gtl::FlatSet<string> f16_allowlist_;
gtl::FlatSet<string> f16_denylist_;
gtl::FlatSet<string> f16_inferlist_;
gtl::FlatSet<string> f16_clearlist_;
absl::flat_hash_set<const NodeDef*> should_process_nodes_;
DataType target_dtype_;
};
NodeDef AutoMixedPrecisionImpl::BuildCastNode(
const MutableGraphView::OutputPort& src, bool to_f16,
const string& device) const {
DataType src_type = to_f16 ? DT_FLOAT : target_dtype_;
DataType dst_type = to_f16 ? target_dtype_ : DT_FLOAT;
const char* cast_string = !to_f16 ? kCastToFp32
: target_dtype_ == DT_HALF ? kCastToFp16
: kCastToBf16;
int id = 0;
std::string name;
do {
name = absl::StrCat(src.node->name(), "-", src.port_id, "-", cast_string,
"-", id, "-", kSuffix);
++id;
} while (graph_view_.GetNode(name));
NodeDef node;
node.set_name(name);
node.set_op("Cast");
node.set_device(device);
node.add_input(strings::StrCat(src.node->name(), ":", src.port_id));
(*node.mutable_attr())["SrcT"].set_type(src_type);
(*node.mutable_attr())["DstT"].set_type(dst_type);
(*node.mutable_attr())["Truncate"].set_b(false);
return node;
}
bool AutoMixedPrecisionImpl::NodeHasF16KernelForTypeAttr(
const NodeDef& node, TypeAttrId taid) const {
NodeDef node_copy(node);
if (node.device().empty()) {
string device_name = virtual_placer_.get_canonical_device_name(node);
node_copy.set_device(device_name);
}
if (!SetDataType(&node_copy, taid, target_dtype_)) {
return false;
}
return IsKernelRegisteredForNode(node_copy).ok();
}
Status AutoMixedPrecisionImpl::PrintDebugLogs(bool preop, size_t timestamp) {
string prepend_path;
TF_RETURN_IF_ERROR(ReadStringFromEnvVar(
"TF_AUTO_MIXED_PRECISION_GRAPH_REWRITE_LOG_PATH", "", &prepend_path));
if (prepend_path.empty()) return absl::OkStatus();
string suffix =
strings::StrCat("_", preop ? "preop" : kSuffix, "_", id_, "_", timestamp);
string fname =
io::JoinPath(prepend_path, strings::StrCat("graphdef", suffix, ".pb"));
std::fstream f;
f.open(fname.c_str(), std::fstream::out | std::fstream::binary);
f << graph_->SerializeAsString();
f.close();
LOG(INFO) << "Saved " << (preop ? "pre-optimization" : "post-optimization")
<< " graph as binary to " << fname;
fname = io::JoinPath(prepend_path,
strings::StrCat("graphdef", suffix, ".pb.txt"));
f.open(fname.c_str(), std::fstream::out);
f << graph_->DebugString();
f.close();
LOG(INFO) << "Saved " << (preop ? "pre-optimization" : "post-optimization")
<< " graph as text to " << fname;
if (!preop) {
fname = io::JoinPath(prepend_path,
strings::StrCat("paintbuckets", suffix, ".txt"));
f.open(fname.c_str(), std::fstream::out);
std::unique_ptr<AutoMixedPrecisionLists> mp_lists =
get_mixed_precision_lists();
f << "AllowList:\n";
for (const auto& x : mp_lists->AllowList()) {
f << x << "\n";
}
f << "\nDenyList:\n";
for (const auto& x : mp_lists->DenyList()) {
f << x << "\n";
}
f << "\nInferList:\n";
for (const auto& x : mp_lists->InferList()) {
f << x << "\n";
}
f << "\nClearList:\n";
for (const auto& x : mp_lists->ClearList()) {
f << x << "\n";
}
f.close();
LOG(INFO) << "Saved paint bucket info to " << fname;
}
return absl::OkStatus();
}
void AutoMixedPrecisionImpl::LogSkippedNode(const NodeDef& node,
const string& device_type) const {
VLOG(2) << "Skipping " << node.op() << " node " << node.name()
<< " because it "
<< (MustPreserve(node)
? "must be preserved"
: absl::StrFormat(
"is not on the %s, or the %s arch is not suitable",
device_type, device_type));
}
bool AutoMixedPrecisionImpl::MustPreserve(const NodeDef& node) const {
return nodes_to_preserve_.count(node.name());
}
bool AutoMixedPrecisionImpl::IsOnDevice(const NodeDef& node,
const string& device_type) const {
string device_name;
if (node.device().empty()) {
device_name = virtual_placer_.get_canonical_device_name(node);
} else {
device_name = node.device();
}
string device;
string not_used;
if (DeviceNameUtils::SplitDeviceName(device_name, ¬_used, &device) &&
absl::StrContains(absl::AsciiStrToLower(device),
absl::AsciiStrToLower(device_type))) {
return true;
}
return false;
}
bool AutoMixedPrecisionImpl::IsOnSuitableGPUArch(const NodeDef& node) const {
return HasFastFP16Support(virtual_placer_.get_device(node));
}
bool AutoMixedPrecisionImpl::ShouldProcess(const NodeDef& node) const {
return should_process_nodes_.count(&node);
}
bool IsFloat32(const NodeTypeId& node_type) {
return GetDataType(*node_type.node, node_type.type_attr) ==
DataType::DT_FLOAT;
}
bool IsTensorListOp(const string& op) {
return absl::StrContains(op, "TensorList");
}
bool IsTensorListReaderOp(const string& op) {
static const gtl::FlatSet<string> tensor_list_reader_ops = {
"TensorListConcat", "TensorListConcatV2", "TensorListGather",
"TensorListGetItem", "TensorListPopBack", "TensorListStack"};
return tensor_list_reader_ops.count(op);
}
bool IsTensorListWriterOp(const string& op) {
static const gtl::FlatSet<string> tensor_list_writer_ops = {
"TensorListFromTensor", "TensorListPushBack",
"TensorListPushBackBatch", "TensorListScatter",
"TensorListScatterV2", "TensorListScatterIntoExistingList",
"TensorListSetItem", "TensorListSplit"};
return tensor_list_writer_ops.count(op);
}
bool AutoMixedPrecisionImpl::SupportsF16(const NodeTypeId& node_type) const {
const OpDef* op_def;
Status status =
OpRegistry::Global()->LookUpOpDef(node_type.node->op(), &op_def);
if (!status.ok()) return false;
return AllowedDataTypes(*op_def, node_type.type_attr)
.Contains(target_dtype_) &&
NodeHasF16KernelForTypeAttr(*node_type.node, node_type.type_attr);
}
bool AutoMixedPrecisionImpl::SupportsF16DataType(
const NodeTypeId& node_type) const {
const OpDef* op_def;
Status status =
OpRegistry::Global()->LookUpOpDef(node_type.node->op(), &op_def);
if (!status.ok()) return false;
return AllowedDataTypes(*op_def, node_type.type_attr).Contains(target_dtype_);
}
bool AutoMixedPrecisionImpl::IsQuantized(const NodeTypeId& node_type) const {
for (const TypeAttrId& type_attr :
node_type_map_.GetTypeAttrs(*node_type.node)) {
if (DataTypeIsQuantized(GetDataType(*node_type.node, type_attr))) {
return true;
}
}
return false;
}
void AutoMixedPrecisionImpl::ConvertBatchNormOpsToV2() {
for (int node_idx = 0; node_idx < graph_->node_size(); ++node_idx) {
NodeDef* node = graph_->mutable_node(node_idx);
if (!ShouldProcess(*node)) continue;
bool changed = false;
if (node->op() == "FusedBatchNorm") {
VLOG(2) << "Changing op of " << node->op() << " node " << node->name()
<< " to FusedBatchNormV2";
node->set_op("FusedBatchNormV2");
changed = true;
} else if (node->op() == "FusedBatchNormGrad") {
VLOG(2) << "Changing op of " << node->op() << " node " << node->name()
<< " to FusedBatchNormGradV2";
node->set_op("FusedBatchNormGradV2");
changed = true;
}
if (changed) {
(*node->mutable_attr())["U"].set_type(DT_FLOAT);
}
}
}
bool ShouldIgnorePerformance() {
static bool is_enabled = [] {
bool ret = false;
TF_CHECK_OK(ReadBoolFromEnvVar(
"TF_AUTO_MIXED_PRECISION_GRAPH_REWRITE_IGNORE_PERFORMANCE",
false, &ret));
return ret;
}();
return is_enabled;
}
Status AutoMixedPrecisionImpl::Optimize() {
string optimization_level;
TF_RETURN_IF_ERROR(ReadStringFromEnvVar(
"TF_AUTO_MIXED_PRECISION_GRAPH_REWRITE_LEVEL", "", &optimization_level));
optimization_level = absl::AsciiStrToUpper(optimization_level);
force_all_fp16_ = optimization_level == "UNSAFE_FORCE_ALL";
if (force_all_fp16_ && (mode_ == AutoMixedPrecisionMode::BF16 ||
mode_ == AutoMixedPrecisionMode::FP16_CPU)) {
return errors::InvalidArgument(
"TF_AUTO_MIXED_PRECISION_GRAPH_REWRITE_LEVEL cannot be set to "
"UNSAFE_FORCE_ALL when oneDNN is used");
}
treat_infer_as_deny_ = optimization_level == "TREAT_INFER_AS_DENY";
VLOG(2) << "Optimization Level: " << optimization_level;
std::unique_ptr<AutoMixedPrecisionLists> mp_lists =
get_mixed_precision_lists();
f16_allowlist_ = mp_lists->AllowList();
f16_denylist_ = mp_lists->DenyList();
if (treat_infer_as_deny_) {
for (const auto& op : mp_lists->InferList()) {
f16_denylist_.insert(op);
}
} else {
f16_inferlist_ = mp_lists->InferList();
}
f16_clearlist_ = mp_lists->ClearList();
TF_RETURN_IF_ERROR(ValidateLists(f16_allowlist_, f16_denylist_,
f16_inferlist_, f16_clearlist_));
size_t timestamp = Env::Default()->NowMicros() / 1000;
TF_RETURN_IF_ERROR(PrintDebugLogs( true, timestamp));
VLOG(2) << "Identifying nodes that should be processed";
for (const NodeDef& node : graph_->node()) {
bool should_process;
string device_type;
switch (mode_) {
case AutoMixedPrecisionMode::CUDA:
device_type = DEVICE_GPU;
should_process =
!MustPreserve(node) && IsOnDevice(node, device_type) &&
(ShouldIgnorePerformance() || IsOnSuitableGPUArch(node));
break;
case AutoMixedPrecisionMode::BF16:
case AutoMixedPrecisionMode::CPU:
case AutoMixedPrecisionMode::FP16_CPU:
device_type = DEVICE_CPU;
should_process = !MustPreserve(node) && IsOnDevice(node, device_type);
break;
}
if (should_process) {
should_process_nodes_.insert(&node);
} else {
LogSkippedNode(node, device_type);
}
}
VLOG(2) << "Converting FusedBatchNorm* ops to V2";
ConvertBatchNormOpsToV2();
VLOG(2) << "Building node type map for graph";
TF_RETURN_IF_ERROR(node_type_map_.Init(*graph_));
VLOG(2) << "Constructing graph type attribute topology view";
TF_RETURN_IF_ERROR(
graph_type_view_.InitializeFromGraph(*graph_, node_type_map_));
absl::flat_hash_set<int> deny_set;
std::vector<absl::flat_hash_set<const NodeDef*>> tensor_list_clusters;
FindFloat32TensorListOpClustersAndDenylistUnsafe(&tensor_list_clusters,
&deny_set);
std::vector<NodeTypeIdEdge> ephemeral_edges;
for (const auto& cluster : tensor_list_clusters) {
VLOG(1) << "Found safe Tensor List cluster of size " << cluster.size();
for (const NodeDef* node : cluster) {
VLOG(2) << " Cluster member: " << node->op() << " node " << node->name();
}
FindTensorListImplicitFloat32Edges(cluster, &ephemeral_edges);
}
TF_RETURN_IF_ERROR(graph_type_view_.AddEphemeralEdges(ephemeral_edges));
absl::flat_hash_set<int> allow_set;
VLOG(2) << "Beginning pass 1 to add allowlist ops";
AddAllowlistOps(&allow_set);
VLOG(2) << "Finished pass 1";
if (allow_set.empty()) {
LOG(INFO) << "No allowlist ops found, nothing to do";
return absl::OkStatus();
}
VLOG(2) << "Beginning pass 2 to propagate deny forwards from denylist ops "
"through clear/inferlist ops";
PropagateDenyFwdThroughClearAndInfer(&deny_set);
VLOG(2) << "Finished pass 2";
VLOG(2) << "Forcing color match between data structure ops";
for (const auto& cluster : tensor_list_clusters) {
ForceColorMatchBetweenTensorListOps(cluster, &allow_set, &deny_set);
}
VLOG(2) << "Beginning pass 3 to set clear and infer nodes to allow if they "
"are between allow ops";
AddClearAndInferToAllowIfBetweenAllow(deny_set, &allow_set);
VLOG(2) << "Finished pass 3";
VLOG(2) << "Beginning pass 4 to add infer list ops to allow if they "
"directly follow allow nodes";
AddInferToAllowIfFollowAllow(deny_set, &allow_set);
VLOG(2) << "Finished pass 4";
VLOG(2) << "Beginning pass 5 to propagate allow from allow nodes through "
"clearlist ops";
PropagateAllowThroughClear(deny_set, &allow_set);
VLOG(2) << "Finished pass 5";
VLOG(2) << "Beginning pass 6 to remove some nodes which could not be changed "
"to F16"
"from allow set";
RemoveAllowsetWithFp32(&allow_set);
VLOG(2) << "Finished pass 6";
VLOG(2) << "Forcing color match between data structure ops";
for (const auto& cluster : tensor_list_clusters) {
ForceColorMatchBetweenTensorListOps(cluster, &allow_set, &deny_set);
}
VLOG(2) << "Forcing color match on loop edges";
TF_RETURN_IF_ERROR(ForceColorMatchOnRecurrentEdges(&allow_set));
VLOG(2) << "Finding existing casts that can be made allow";
MakeCastsAllowIfAllOutputsAllow(&allow_set);
VLOG(2) << "Beginning final pass to change type attributes and insert Cast "
"ops at paint boundaries";
TF_RETURN_IF_ERROR(ChangeTypeAttrsAndAddCasts(allow_set));
VLOG(2) << "Finished final pass";
TF_RETURN_IF_ERROR(PrintDebugLogs( false, timestamp));
return absl::OkStatus();
}
const NodeTypeId* AutoMixedPrecisionImpl::GetTensorListFloat32NodeTypeId(
const NodeDef& node) const {
if (!IsTensorListOp(node.op())) return nullptr;
for (const TypeAttrId& type_attr : node_type_map_.GetTypeAttrs(node)) {
const NodeTypeId* node_type =
graph_type_view_.GetNode(node.name(), type_attr);
if (node_type && node_type->type_attr.fixed_type == DT_INVALID &&
node_type->type_attr.type_index == TypeAttrId::kSingleType &&
IsFloat32(*node_type)) {
return node_type;
}
}
return nullptr;
}
bool AutoMixedPrecisionImpl::IsSourceOrSinkOp(const string& op) const {
const gtl::FlatSet<string> source_and_sink_ops = {
"_Arg",
"_Retval",
"OptionalFromValue",
"OptionalGetValue",
"PartitionedCall",
"Placeholder",
"StatefulPartitionedCall",
};
return source_and_sink_ops.count(op) || function_library_.Find(op);
}
void AutoMixedPrecisionImpl::FindFloat32TensorListOpClustersAndDenylistUnsafe(
std::vector<absl::flat_hash_set<const NodeDef*>>* tensor_list_clusters,
absl::flat_hash_set<int>* deny_set) const {
absl::flat_hash_set<const NodeDef*> tensor_list_prop_set;
for (int root_idx = 0; root_idx < graph_type_view_.num_nodes(); ++root_idx) {
const NodeTypeId& root = *graph_type_view_.GetNode(root_idx);
if (!ShouldProcess(*root.node) ||
root.type_attr.fixed_type != DataType::DT_VARIANT ||
!GetTensorListFloat32NodeTypeId(*root.node) ||
tensor_list_prop_set.count(root.node)) {
continue;
}
const NodeTypeId* root_fp32 = GetTensorListFloat32NodeTypeId(*root.node);
const absl::optional<int> maybe_root_fp32_idx =
graph_type_view_.GetNodeIndex(*root_fp32);
DCHECK(maybe_root_fp32_idx.has_value())
<< "Type attribute " << root_fp32->type_attr.DebugString()
<< " of node " << root.node->name() << " not found in graph view";
int root_fp32_idx = maybe_root_fp32_idx.value();
absl::flat_hash_set<const NodeDef*> cluster({root.node});
DfsTypeTraversal(graph_type_view_, {&root},
TypeTraversalDirection::kFollowInputsAndOutputs,
DfsTypePredicates::Enter([&](int idx) -> bool {
const NodeTypeId& item = *graph_type_view_.GetNode(idx);
return !tensor_list_prop_set.count(item.node);
}),
DfsTypeCallbacks::PreOrder([&](int idx) {
const NodeTypeId& item = *graph_type_view_.GetNode(idx);
const NodeDef* node = item.node;
if (GetTensorListFloat32NodeTypeId(*node)) {
cluster.insert(node);
if (!ShouldProcess(*node)) {
deny_set->insert(root_fp32_idx);
}
} else if (IsSourceOrSinkOp(node->op())) {
deny_set->insert(root_fp32_idx);
}
}));
tensor_list_clusters->push_back(cluster);
}
}
void AutoMixedPrecisionImpl::FindTensorListImplicitFloat32Edges(
const absl::flat_hash_set<const NodeDef*>& tensor_list_nodes,
std::vector<NodeTypeIdEdge>* implicit_fp32_edges) const {
for (const NodeDef* root_node : tensor_list_nodes) {
if (!IsTensorListReaderOp(root_node->op())) continue;
NodeTypeId root(root_node, TypeAttrId(DataType::DT_VARIANT));
const NodeTypeId* root_fp32 = GetTensorListFloat32NodeTypeId(*root.node);
CHECK(root_fp32) << "No float32 type attribute found for "
<< root.node->op() << " node " << root.node->name();
DfsTypeTraversal(
graph_type_view_, {&root}, TypeTraversalDirection::kFollowInputs,
DfsTypePredicates::Enter([&](int idx) -> bool {
const NodeTypeId& item = *graph_type_view_.GetNode(idx);
return ShouldProcess(*item.node);
}),
DfsTypeCallbacks::PreOrder([&](int idx) {
const NodeTypeId& item = *graph_type_view_.GetNode(idx);
if (IsTensorListWriterOp(item.node->op())) {
const NodeTypeId* item_fp32 =
GetTensorListFloat32NodeTypeId(*item.node);
CHECK(item_fp32)
<< "No float32 type attribute found for " << item.node->op()
<< " node " << item.node->name();
VLOG(2) << "Adding ephemeral float32 edge from "
<< item_fp32->node->op() << " node "
<< item_fp32->node->name() << " to "
<< root_fp32->node->op() << " node "
<< root_fp32->node->name();
implicit_fp32_edges->emplace_back(*item_fp32, *root_fp32);
}
}));
}
}
void AutoMixedPrecisionImpl::AddAllowlistOps(
absl::flat_hash_set<int>* allow_set) const {
for (int root_idx = 0; root_idx < graph_type_view_.num_nodes(); ++root_idx) {
const NodeTypeId& root = *graph_type_view_.GetNode(root_idx);
if (!ShouldProcess(*root.node)) continue;
bool force_allow = force_all_fp16_ && CanForceFP16(*root.node);
if (f16_allowlist_.count(root.node->op()) || force_allow) {
bool inserted = allow_set->insert(root_idx).second;
if (VLOG_IS_ON(2) && inserted) {
VLOG(2) << "Painting type " << root.type_attr.DebugString()
<< " of node " << root.node->name() << " ALLOW because its op "
<< root.node->op() << " is on the allowlist";
}
}
}
}
void AutoMixedPrecisionImpl::PropagateDenyFwdThroughClearAndInfer(
absl::flat_hash_set<int>* deny_set) const {
if (force_all_fp16_) return;
absl::flat_hash_set<int> upstream_of_deny_or_infer_set;
for (int root_idx = 0; root_idx < graph_type_view_.num_nodes(); ++root_idx) {
const NodeTypeId& root = *graph_type_view_.GetNode(root_idx);
if (!(f16_denylist_.count(root.node->op()) ||
f16_inferlist_.count(root.node->op()))) {
continue;
}
DfsTypeTraversal(graph_type_view_, {&root},
TypeTraversalDirection::kFollowInputs,
DfsTypePredicates::Enter([&](int idx) -> bool {
const NodeTypeId& item = *graph_type_view_.GetNode(idx);
return idx == root_idx ||
(!upstream_of_deny_or_infer_set.count(idx) &&
f16_clearlist_.count(item.node->op()));
}),
DfsTypeCallbacks::PreOrder([&](int idx) {
upstream_of_deny_or_infer_set.insert(idx);
}));
}
for (int root_idx = 0; root_idx < graph_type_view_.num_nodes(); ++root_idx) {
const NodeTypeId& root = *graph_type_view_.GetNode(root_idx);
if (deny_set->count(root_idx) || !f16_denylist_.count(root.node->op())) {
continue;
}
DfsTypeTraversal(
graph_type_view_, {&root}, TypeTraversalDirection::kFollowOutputs,
DfsTypePredicates::Enter([&](int idx) -> bool {
return idx == root_idx || (!deny_set->count(idx) &&
upstream_of_deny_or_infer_set.count(idx));
}),
DfsTypeCallbacks::PreOrder([&](int idx) {
bool inserted = deny_set->insert(idx).second;
if (VLOG_IS_ON(2) && inserted) {
const NodeTypeId& item = *graph_type_view_.GetNode(idx);
VLOG(2) << "Painting type " << item.type_attr.DebugString()
<< " of " << item.node->op() << " node "
<< item.node->name() << " DENY";
}
}));
}
}
void AutoMixedPrecisionImpl::AddClearAndInferToAllowIfBetweenAllow(
const absl::flat_hash_set<int>& deny_set,
absl::flat_hash_set<int>* allow_set) const {
absl::flat_hash_set<int> downstream_of_allow_set;
for (int root_idx = 0; root_idx < graph_type_view_.num_nodes(); ++root_idx) {
const NodeTypeId& root = *graph_type_view_.GetNode(root_idx);
if (!ShouldProcess(*root.node) || !f16_allowlist_.count(root.node->op())) {
continue;
}
DfsTypeTraversal(
graph_type_view_, {&root}, TypeTraversalDirection::kFollowOutputs,
DfsTypePredicates::Enter([&](int idx) -> bool {
const NodeTypeId& item = *graph_type_view_.GetNode(idx);
return idx == root_idx ||
(!downstream_of_allow_set.count(idx) &&
!f16_allowlist_.count(item.node->op()) &&
!deny_set.count(idx) && ShouldProcess(*item.node) &&
IsFloat32(item) && SupportsF16(item) &&
(f16_clearlist_.count(item.node->op()) ||
f16_inferlist_.count(item.node->op())));
}),
DfsTypeCallbacks::PreOrder(
[&](int idx) { downstream_of_allow_set.insert(idx); }));
}
absl::flat_hash_set<int> upstream_of_allow_set;
for (int root_idx = 0; root_idx < graph_type_view_.num_nodes(); ++root_idx) {
const NodeTypeId& root = *graph_type_view_.GetNode(root_idx);
if (!ShouldProcess(*root.node) || upstream_of_allow_set.count(root_idx) ||
!f16_allowlist_.count(root.node->op())) {
continue;
}
DfsTypeTraversal(
graph_type_view_, {&root}, TypeTraversalDirection::kFollowInputs,
DfsTypePredicates::Enter([&](int idx) -> bool {
return idx == root_idx || (!upstream_of_allow_set.count(idx) &&
downstream_of_allow_set.count(idx));
}),
DfsTypeCallbacks::PreOrder([&](int idx) {
upstream_of_allow_set.insert(idx);
bool inserted = allow_set->insert(idx).second;
if (VLOG_IS_ON(2) && inserted) {
const NodeTypeId& item = *graph_type_view_.GetNode(idx);
VLOG(2) << "Painting type " << item.type_attr.DebugString()
<< " of " << item.node->op() << " node "
<< item.node->name() << " ALLOW";
}
}));
}
}
void AutoMixedPrecisionImpl::PropagateAllowThroughClear(
const absl::flat_hash_set<int>& deny_set,
absl::flat_hash_set<int>* allow_set) const {
absl::flat_hash_set<int> clear_prop_set;
for (int root_idx = 0; root_idx < graph_type_view_.num_nodes(); ++root_idx) {
const NodeTypeId& root = *graph_type_view_.GetNode(root_idx);
if (!ShouldProcess(*root.node) || clear_prop_set.count(root_idx) ||
!allow_set->count(root_idx)) {
continue;
}
DfsTypeTraversal(
graph_type_view_, {&root},
TypeTraversalDirection::kFollowInputsAndOutputs,
DfsTypePredicates::Enter([&](int idx) -> bool {
const NodeTypeId& item = *graph_type_view_.GetNode(idx);
return idx == root_idx ||
(!allow_set->count(idx) && !deny_set.count(idx) &&
ShouldProcess(*item.node) && IsFloat32(item) &&
SupportsF16(item) &&
(f16_clearlist_.count(item.node->op())) &&
!NodeImplicitlyReadsNonResourceVariable(*item.node));
}),
DfsTypeCallbacks::PreOrder([&](int idx) {
clear_prop_set.insert(idx);
bool inserted = allow_set->insert(idx).second;
if (VLOG_IS_ON(2) && inserted) {
const NodeTypeId& item = *graph_type_view_.GetNode(idx);
VLOG(2) << "Painting type " << item.type_attr.DebugString()
<< " of " << item.node->op() << " node "
<< item.node->name() << " ALLOW";
}
}));
}
}
void AutoMixedPrecisionImpl::AddInferToAllowIfFollowAllow(
const absl::flat_hash_set<int>& deny_set,
absl::flat_hash_set<int>* allow_set) const {
if (mode_ != AutoMixedPrecisionMode::BF16) {
return;
}
for (int item_idx = 0; item_idx < graph_type_view_.num_nodes(); ++item_idx) {
const NodeTypeId& item = *graph_type_view_.GetNode(item_idx);
if (!ShouldProcess(*item.node) || deny_set.count(item_idx) ||
allow_set->count(item_idx) || !f16_inferlist_.count(item.node->op()) ||
!IsFloat32(item) || !SupportsF16DataType(item)) {
continue;
}
bool has_allow_fanin = false;
for (const int fanin : graph_type_view_.GetFanin(item_idx)) {
if (deny_set.count(fanin)) {
has_allow_fanin = false;
break;
}
if (allow_set->count(fanin)) {
has_allow_fanin = true;
}
}
if (has_allow_fanin) {
bool inserted = allow_set->insert(item_idx).second;
if (VLOG_IS_ON(2) && inserted) {
VLOG(2) << "Painting type " << item.type_attr.DebugString() << " of "
<< item.node->op() << " node " << item.node->name() << " ALLOW";
}
}
}
}
void AutoMixedPrecisionImpl::RemoveAllowsetWithFp32(
absl::flat_hash_set<int>* allow_set) const {
for (int root_idx = 0; root_idx < graph_type_view_.num_nodes(); ++root_idx) {
const NodeTypeId& root = *graph_type_view_.GetNode(root_idx);
if (f16_allowlist_.count(root.node->op()) && allow_set->count(root_idx) &&
(!SupportsF16DataType(root) || IsQuantized(root))) {
auto erased = allow_set->erase(root_idx);
if (VLOG_IS_ON(2) && erased) {
VLOG(2) << "UnPainting type " << root.type_attr.DebugString()
<< " of node " << root.node->name() << " ALLOW because its op "
<< root.node->op() << " is not support F16 DataType";
}
}
}
}
Status AutoMixedPrecisionImpl::ForceColorMatchOnRecurrentEdges(
absl::flat_hash_set<int>* allow_set) const {
for (const NodeDef& node : graph_->node()) {
if (node.op() == "NextIteration") {
GraphView::OutputPort output_port(&node, 0);
const auto& fanout = graph_view_.GetFanout(output_port);
std::vector<int> merge_idxs;
merge_idxs.reserve(fanout.size());
bool any_merge_is_not_allow = false;
for (const auto& output : fanout) {
const NodeDef& merge_node = *output.node;
if (merge_node.op() != "Merge") {
return errors::FailedPrecondition(
"Expected Merge node after NextIteration, got ", merge_node.op());
}
const absl::optional<int> maybe_merge_idx =
graph_type_view_.GetNodeIndex(merge_node.name(), TypeAttrId("T"));
if (!maybe_merge_idx.has_value()) {
return errors::Internal("Type attribute T of Merge node ",
merge_node.name(),
" not found in graph view");
}
int merge_idx = maybe_merge_idx.value();
merge_idxs.push_back(merge_idx);
any_merge_is_not_allow =
any_merge_is_not_allow || !allow_set->count(merge_idx);
}
const absl::optional<int> maybe_nextiter_idx =
graph_type_view_.GetNodeIndex(node.name(), TypeAttrId("T"));
if (!maybe_nextiter_idx.has_value()) {
return errors::Internal("Type attribute T of NextIteration node ",
node.name(), " not found in graph view");
}
int nextiter_idx = maybe_nextiter_idx.value();
if (any_merge_is_not_allow) {
for (int merge_idx : merge_idxs) {
if (allow_set->erase(merge_idx)) {
VLOG(2) << "Painting type T of Merge node "
<< graph_type_view_.GetNode(merge_idx)->node->name()
<< " DENY to match the color of its sibling Merge nodes "
"with common NextIteration node "
<< node.name();
}
}
if (allow_set->erase(nextiter_idx)) {
VLOG(2) << "Painting type T of NextIteration node " << node.name()
<< " DENY to match the color of its output Merge node(s)";
}
} else {
if (allow_set->insert(nextiter_idx).second) {
VLOG(2) << "Painting type T of NextIteration node " << node.name()
<< " ALLOW to match the color of its output Merge node(s)";
}
}
}
}
return absl::OkStatus();
}
void AutoMixedPrecisionImpl::ForceColorMatchBetweenTensorListOps(
const absl::flat_hash_set<const NodeDef*>& tensor_list_nodes,
absl::flat_hash_set<int>* allow_set,
absl::flat_hash_set<int>* deny_set) const {
bool any_deny = false;
bool any_allow = false;
std::vector<int> node_type_idxs;
node_type_idxs.reserve(tensor_list_nodes.size());
for (const NodeDef* node : tensor_list_nodes) {
const NodeTypeId& node_type = *GetTensorListFloat32NodeTypeId(*node);
const absl::optional<int> maybe_node_type_idx =
graph_type_view_.GetNodeIndex(node_type);
DCHECK(maybe_node_type_idx.has_value())
<< "Type attribute " << node_type.type_attr.DebugString() << " of node "
<< node->name() << " not found in graph view";
node_type_idxs.push_back(maybe_node_type_idx.value());
}
for (int node_type_idx : node_type_idxs) {
if (deny_set->count(node_type_idx)) {
any_deny = true;
break;
} else if (allow_set->count(node_type_idx)) {
any_allow = true;
}
}
if (!any_deny && !any_allow) return;
for (int node_type_idx : node_type_idxs) {
const NodeTypeId& node_type = *graph_type_view_.GetNode(node_type_idx);
VLOG(2) << "Painting type " << node_type.type_attr.DebugString() << " of "
<< node_type.node->op() << " node " << node_type.node->name() << " "
<< (any_deny ? "DENY" : "ALLOW")
<< " because at least one of its siblings is "
<< (any_deny ? "DENY" : "ALLOW");
if (any_deny) {
allow_set->erase(node_type_idx);
deny_set->insert(node_type_idx);
} else {
allow_set->insert(node_type_idx);
}
}
}
bool AutoMixedPrecisionImpl::NodeImplicitlyReadsNonResourceVariable(
const NodeDef& node) const {
if (node.op() == "Identity" || node.op() == "Enter") {
GraphView::InputPort node_input(&node, 0);
MutableGraphView::OutputPort prev_output =
graph_view_.GetRegularFanin(node_input);
const NodeDef* input = prev_output.node;
if (input && ((node.op() == "Identity" && (input->op() == "Variable" ||
input->op() == "VariableV2")) ||
(node.op() == "Enter" &&
NodeImplicitlyReadsNonResourceVariable(*input)))) {
return true;
}
}
return false;
}
void AutoMixedPrecisionImpl::MakeCastsAllowIfAllOutputsAllow(
absl::flat_hash_set<int>* allow_set) const {
int num_nodes_preop = graph_->node_size();
for (int node_idx = 0; node_idx < num_nodes_preop; ++node_idx) {
NodeDef* node = graph_->mutable_node(node_idx);
NodeTypeId node_type(node, TypeAttrId("DstT"));
if (node->op() != "Cast" || !IsFloat32(node_type)) {
continue;
}
bool all_fanouts_allow = true;
MutableGraphView::OutputPort src(node, 0);
const auto& fanout = graph_view_.GetFanout(src);
for (const MutableGraphView::InputPort& dst : fanout) {
TypeAttrId dst_type_attr =
node_type_map_.GetInputTypeAttr(*dst.node, dst.port_id);
const absl::optional<int> maybe_dst_type_idx =
graph_type_view_.GetNodeIndex(dst.node->name(), dst_type_attr);
DCHECK(maybe_dst_type_idx.has_value())
<< "Type attribute " << dst_type_attr.DebugString() << " of node "
<< dst.node->name() << " not found in graph view";
int dst_type_idx = maybe_dst_type_idx.value();
bool dst_is_allow = allow_set->count(dst_type_idx);
if (!dst_is_allow) {
all_fanouts_allow = false;
break;
}
}
if (!fanout.empty() && all_fanouts_allow) {
const absl::optional<int> maybe_node_type_idx =
graph_type_view_.GetNodeIndex(node_type);
DCHECK(maybe_node_type_idx.has_value())
<< "Type attribute " << node_type.type_attr.DebugString()
<< " of node " << node_type.node->name()
<< " not found in graph view";
int node_type_idx = maybe_node_type_idx.value();
allow_set->insert(node_type_idx);
}
}
}
absl::StatusOr<NodeDef*> AutoMixedPrecisionImpl::InsertCastNodeAtFanout(
const absl::flat_hash_set<int>& allow_set, const bool src_is_allow,
const CastType& cast_type, MutableGraphView::OutputPort& src) {
NodeDef* added_cast_node = nullptr;
auto fanout = graph_view_.GetFanout(src);
for (const MutableGraphView::InputPort& dst : fanout) {
TypeAttrId dst_type_attr =
node_type_map_.GetInputTypeAttr(*dst.node, dst.port_id);
const absl::optional<int> maybe_dst_type_idx =
graph_type_view_.GetNodeIndex(dst.node->name(), dst_type_attr);
if (!maybe_dst_type_idx.has_value()) {
return errors::Internal("Type attribute ", dst_type_attr.DebugString(),
" of ", dst.node->op(), " node ",
dst.node->name(), " not found in graph view");
}
int dst_type_idx = maybe_dst_type_idx.value();
bool dst_is_allow = allow_set.count(dst_type_idx);
bool to_f16 = false;
bool should_cast = false;
switch (cast_type) {
case CastType::AUTO:
if (src_is_allow != dst_is_allow) {
to_f16 = dst_is_allow;
should_cast = true;
}
break;
case CastType::FP16:
to_f16 = true;
should_cast = true;
break;
case CastType::FP32:
to_f16 = false;
should_cast = true;
break;
default:
return errors::Internal("Invalid Cast Type: ",
static_cast<int>(cast_type));
}
if (!should_cast) continue;
if (added_cast_node == nullptr) {
VLOG(1) << "Inserting cast to "
<< (to_f16 ? DataTypeString(target_dtype_) : "DT_FLOAT") << " at "
<< src.node->op() << " " << src.node->name() << ":"
<< src.port_id;
added_cast_node =
graph_view_.AddNode(BuildCastNode(src, to_f16, src.node->device()));
if (to_f16 && !IsConstant(*src.node) && !IsVariable(*src.node) &&
!NodeImplicitlyReadsNonResourceVariable(*src.node)) {
++num_nonvar_casts_to_f16_;
}
}
TF_RETURN_IF_ERROR(graph_view_.UpdateRegularFaninByPort(
dst.node->name(), dst.port_id, {added_cast_node->name(), 0}));
}
return added_cast_node;
}
absl::StatusOr<DataType> AutoMixedPrecisionImpl::GetCastToType(
const NodeDef* node) const {
CHECK_EQ(node->op(), "Cast")
<< "Node " << node->name() << " is not a Cast op";
return node->attr().at("DstT").type();
}
void AutoMixedPrecisionImpl::CollectOutputPorts(
const TypeAttrId& type_attr, NodeDef* node,
std::vector<MutableGraphView::OutputPort>& output_ports) const {
for (int port_id : node_type_map_.GetOutputPorts(*node, type_attr)) {
output_ports.emplace_back(node, port_id);
}
}
Status AutoMixedPrecisionImpl::ChangeTypeAttrsAndAddCasts(
const absl::flat_hash_set<int>& allow_set) {
int num_nodes_changed = 0;
const int num_nodes_preop = graph_->node_size();
bool emulate_f16 = false;
if (mode_ == AutoMixedPrecisionMode::CPU) {
TF_CHECK_OK(
ReadBoolFromEnvVar("TF_AUTO_MIXED_PRECISION_GRAPH_REWRITE_EMULATE_FP16",
true, &emulate_f16));
}
VLOG(1) << "Setting emulate_f16 = " << emulate_f16;
for (int node_idx = 0; node_idx < num_nodes_preop; ++node_idx) {
NodeDef* node = graph_->mutable_node(node_idx);
for (const TypeAttrId& type_attr : node_type_map_.GetTypeAttrs(*node)) {
const absl::optional<int> maybe_node_type_idx =
graph_type_view_.GetNodeIndex(node->name(), type_attr);
if (!maybe_node_type_idx.has_value()) {
return errors::Internal("Type attribute ", type_attr.DebugString(),
" of ", node->op(), " node ", node->name(),
" not found in graph view");
}
int node_type_idx = maybe_node_type_idx.value();
if (!IsFloat32(*graph_type_view_.GetNode(node_type_idx))) continue;
bool src_is_allow = allow_set.count(node_type_idx);
std::vector<MutableGraphView::OutputPort> output_ports;
if (src_is_allow) {
if (emulate_f16) {
for (int port_id : node_type_map_.GetInputPorts(*node, type_attr)) {
VLOG(2) << "Cast to F32 at fanin of node " << node->name() << ":"
<< port_id;
MutableGraphView::InputPort dst(node, port_id);
MutableGraphView::OutputPort src = graph_view_.GetRegularFanin(dst);
NodeDef* added_cast_node = graph_view_.AddNode(
BuildCastNode(src, false, src.node->device()));
VLOG(1) << "Inserting cast to DT_FLOAT at " << src.node->op() << " "
<< src.node->name() << ":" << src.port_id;
TF_RETURN_IF_ERROR(graph_view_.UpdateRegularFaninByPort(
dst.node->name(), dst.port_id, {added_cast_node->name(), 0}));
}
for (int port_id : node_type_map_.GetOutputPorts(*node, type_attr)) {
MutableGraphView::OutputPort src(node, port_id);
VLOG(2) << "Cast to F16 at fanout of node " << node->name() << ":"
<< port_id;
TF_ASSIGN_OR_RETURN(NodeDef * added_cast_node,
InsertCastNodeAtFanout(allow_set, src_is_allow,
CastType::FP16, src));
if (added_cast_node != nullptr) {
output_ports.emplace_back(added_cast_node, 0);
}
}
} else {
VLOG(1) << "Changing type " << type_attr.DebugString() << " of "
<< node->op() << " node " << node->name() << " to "
<< DataTypeString(target_dtype_);
if (!SetDataType(node, type_attr, target_dtype_)) {
return errors::Internal("Failed to set type attribute");
}
++num_nodes_changed;
CollectOutputPorts(type_attr, node, output_ports);
}
} else {
CollectOutputPorts(type_attr, node, output_ports);
}
for (auto output_port : output_ports) {
VLOG(2) << "Cast to required data type at fanout of node "
<< output_port.node->name() << ":" << output_port.port_id;
TF_RETURN_IF_ERROR(InsertCastNodeAtFanout(allow_set, src_is_allow,
CastType::AUTO, output_port)
.status());
}
}
}
const char* type_str = target_dtype_ == DT_HALF ? "float16" : "bfloat16";
LOG(INFO) << "Converted " << num_nodes_changed << "/" << num_nodes_preop
<< " nodes to " << type_str << " precision using "
<< num_nonvar_casts_to_f16_ << " cast(s) to " << type_str
<< " (excluding Const and Variable casts)";
return absl::OkStatus();
}
int GetNumGPUs(const Cluster& cluster) {
if (ShouldSimulateGpu()) {
return 1;
}
auto devices = cluster.GetDevices();
int num_gpus = 0;
for (const auto& device : devices) {
const DeviceProperties& device_properties = device.second;
if (device_properties.type() == "GPU" &&
(ShouldIgnorePerformance() || HasFastFP16Support(device_properties))) {
num_gpus++;
}
}
return num_gpus;
}
}
Status AutoMixedPrecision::Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* output) {
if (cluster == nullptr) {
return errors::InvalidArgument("cluster == nullptr");
}
#if !defined(INTEL_MKL)
if (mode_ == AutoMixedPrecisionMode::BF16) {
return errors::Unimplemented(
"The auto_mixed_precision_onednn_bfloat16 optimizer cannot be used "
"since this build of TensorFlow is not compiled with oneDNN support "
"for bfloat16. "
"For information on oneDNN builds, see: "
"https:
"tensorflow-installation-guide");
}
#endif
*output = item.graph;
int num_gpus = GetNumGPUs(*cluster);
if (num_gpus < 1 && mode_ == AutoMixedPrecisionMode::CUDA) {
VLOG(1) << "No (suitable) GPUs detected, skipping " << name()
<< " graph optimizer";
return absl::OkStatus();
}
if (mode_ == AutoMixedPrecisionMode::FP16_CPU &&
!IsAMXDataTypeSupportedByOneDNNOnThisCPU(DT_HALF) &&
!IsAVXConvertSupportedByOneDNNOnThisCPU()) {
VLOG(1) << "No support for " << name() << " graph optimizer on CPU";
return absl::OkStatus();
}
if (num_gpus >= 1 && mode_ == AutoMixedPrecisionMode::BF16) {
LOG(WARNING) << "Note: GPUs detected. Using " << name()
<< " graph optimizer configured for BFloat16 on CPUs";
}
AutoMixedPrecisionImpl optimizer(cluster, item.NodesToPreserve(), output,
item.id, mode_);
if (item.id == "tf_graph") {
LOG(INFO) << "Running " << name() << " graph optimizer";
} else {
VLOG(1) << "Running " << name() << " graph optimizer on " << item.id;
}
Status status = optimizer.Optimize();
if (!status.ok()) {
*output = item.graph;
LOG(WARNING) << name() << " graph optimizer FAILED: " << status.ToString();
}
return status;
}
}
} | #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM || INTEL_MKL
#include "tensorflow/core/grappler/optimizers/auto_mixed_precision.h"
#include <utility>
#include <vector>
#include "tensorflow/cc/ops/control_flow_ops_internal.h"
#include "tensorflow/cc/ops/list_ops.h"
#include "tensorflow/cc/ops/math_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/clusters/single_machine.h"
#include "tensorflow/core/grappler/clusters/virtual_cluster.h"
#include "tensorflow/core/grappler/devices.h"
#include "tensorflow/core/grappler/graph_view.h"
#include "tensorflow/core/grappler/utils/grappler_test.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/random/random.h"
#include "tensorflow/core/util/util.h"
namespace tensorflow {
namespace grappler {
namespace {
using ::testing::ContainsRegex;
using ::testing::SizeIs;
template <DataType DTYPE>
Tensor GenerateIdentityMatrix(int64_t height, int64_t width) {
typedef typename EnumToDataType<DTYPE>::Type T;
Tensor tensor(DTYPE, TensorShape{height, width});
for (int64_t i = 0; i < height; ++i) {
for (int64_t j = 0; j < width; ++j) {
tensor.matrix<T>()(i, j) = i == j;
}
}
return tensor;
}
template <DataType DTYPE>
Tensor GenerateRandomTensorInRange(const TensorShape& shape, double minval,
double maxval) {
typedef typename EnumToDataType<DTYPE>::Type T;
Tensor tensor(DTYPE, shape);
for (auto i = 0; i < tensor.NumElements(); i++)
tensor.flat<T>()(i) =
(random::New64() % 65536 / 65536.0) * (maxval - minval) + minval;
return tensor;
}
void VerifyGraphsEquivalent(const GraphDef& original_graph,
const GraphDef& optimized_graph,
const string& func) {
EXPECT_EQ(original_graph.node_size(), optimized_graph.node_size()) << func;
GraphView optimized_view(&optimized_graph);
for (int i = 0; i < original_graph.node_size(); ++i) {
const NodeDef& original = original_graph.node(i);
const NodeDef& optimized = *optimized_view.GetNode(original.name());
EXPECT_EQ(original.name(), optimized.name()) << func;
EXPECT_EQ(original.op(), optimized.op()) << func;
EXPECT_EQ(original.input_size(), optimized.input_size()) << func;
if (original.input_size() == optimized.input_size()) {
for (int j = 0; j < original.input_size(); ++j) {
EXPECT_EQ(original.input(j), optimized.input(j)) << func;
}
}
}
}
const std::pair<int, int> kMinGPUArch = {7, 0};
class AutoMixedPrecisionTest : public GrapplerTest {
protected:
void SetMode(AutoMixedPrecisionMode mode) { mode_ = mode; }
void SetUp() override {
if (mode_ == AutoMixedPrecisionMode::CUDA) {
int num_gpus = GetNumAvailableGPUs();
gpu_available_ = (num_gpus > 0);
#if GOOGLE_CUDA
gpu_available_ =
gpu_available_ && (num_gpus == GetNumAvailableGPUs(kMinGPUArch));
#else
gpu_available_ = false;
#endif
if (gpu_available_) {
virtual_cluster_.reset(new SingleMachine( 10, 1, 1));
} else {
DeviceProperties device_properties;
device_properties.set_type("GPU");
#if GOOGLE_CUDA
device_properties.mutable_environment()->insert({"architecture", "7"});
device_properties.mutable_environment()->insert({"cuda", "9010"});
#else
device_properties.mutable_environment()->insert(
{"architecture", "gfx906"});
#endif
virtual_cluster_.reset(
new VirtualCluster({{"/GPU:1", device_properties}}));
}
} else if (mode_ == AutoMixedPrecisionMode::FP16_CPU) {
DeviceProperties device_properties;
device_properties.set_type("CPU");
virtual_cluster_.reset(new SingleMachine( 10, 1, 0));
bool is_fp16_enabled_on_cpu = false;
#if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3)
is_fp16_enabled_on_cpu =
IsAMXDataTypeSupportedByOneDNNOnThisCPU(DT_HALF) ||
IsAVXConvertSupportedByOneDNNOnThisCPU();
#endif
if (!IsMKLEnabled() || !is_fp16_enabled_on_cpu) {
GTEST_SKIP() << "This device doesn't support FP16";
}
}
TF_CHECK_OK(virtual_cluster_->Provision());
}
void TearDown() override { TF_CHECK_OK(virtual_cluster_->Shutdown()); }
NodeDef* AddSimpleNode(const string& name, const string& op,
const std::vector<string>& inputs,
GraphDef* graph) const {
std::vector<std::pair<string, AttrValue>> attributes;
if (op == "AddN" || op == "ShapeN") {
AttrValue num_inputs;
num_inputs.set_i(inputs.size());
attributes.emplace_back("N", num_inputs);
}
if (op == "ShapeN") {
AttrValue out_type;
out_type.set_type(DT_INT32);
attributes.emplace_back("out_type", out_type);
}
AttrValue type;
type.set_type(DT_FLOAT);
if (op == "Const" || op == "Placeholder" || op == "VariableV2" ||
op == "VarHandleOp" || op == "ReadVariableOp") {
attributes.emplace_back("dtype", type);
} else if (op == "SparseMatMul") {
attributes.emplace_back("Ta", type);
attributes.emplace_back("Tb", type);
} else if (op == "IdentityN") {
AttrValue type_list;
for (int i = 0; i < static_cast<int>(inputs.size()); ++i) {
type_list.mutable_list()->add_type(DT_FLOAT);
}
attributes.emplace_back("T", type_list);
} else if (op == "StackV2" || op == "StackPopV2") {
attributes.emplace_back("elem_type", type);
} else if (op == "Cast") {
attributes.emplace_back("SrcT", type);
attributes.emplace_back("DstT", type);
} else {
attributes.emplace_back("T", type);
}
return AddNode(name, op, inputs, attributes, graph);
}
void TestSimpleUnaryInferOp(
double input_min, double input_max, double atol, double rtol,
const std::function<Output(const tensorflow::Scope&, Output)>&
test_op_factory) {
int size = 128;
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output eye = ops::Const(s.WithOpName("eye"),
GenerateIdentityMatrix<DT_FLOAT>(size, size));
Output input = ops::Placeholder(s.WithOpName("input"), DT_FLOAT);
Output allow1 = ops::MatMul(s.WithOpName("allow1"), input, eye);
Output infer1 = test_op_factory(s.WithOpName("infer1"), allow1);
Output allow2 = ops::MatMul(s.WithOpName("allow2"), infer1, eye);
Output fetch1 = ops::Identity(s.WithOpName("fetch1"), allow2);
GrapplerItem item;
item.fetch = {"fetch1"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto input_tensor = GenerateRandomTensorInRange<DT_FLOAT>(
TensorShape({size, size}), input_min, input_max);
std::vector<std::pair<string, Tensor>> feed = {{"input", input_tensor}};
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, feed);
AutoMixedPrecision optimizer(mode_);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
GraphView output_view(&output);
EXPECT_EQ(output_view.GetNode("input")->attr().at("dtype").type(),
DT_FLOAT);
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("infer1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("allow2")->attr().at("T").type(), DT_HALF);
auto tensors = EvaluateNodes(output, item.fetch, feed);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectClose(tensors_expected[i], tensors[i], atol, rtol);
}
}
std::unique_ptr<Cluster> virtual_cluster_;
bool gpu_available_;
AutoMixedPrecisionMode mode_;
};
class AutoMixedPrecisionParamTest
: public AutoMixedPrecisionTest,
public ::testing::WithParamInterface<AutoMixedPrecisionMode> {
protected:
void SetUp() override {
mode_ = GetParam();
AutoMixedPrecisionTest::SetMode(mode_);
AutoMixedPrecisionTest::SetUp();
}
AutoMixedPrecisionMode mode_;
};
TEST_P(AutoMixedPrecisionParamTest, NoOp) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input = ops::Const(s.WithOpName("input"), 1.234f, {32});
Output deny1 = ops::Exp(s.WithOpName("deny1"), input);
Output clr1 = ops::Relu(s.WithOpName("clr1"), deny1);
Output infer1 = ops::Sqrt(s.WithOpName("infer1"), clr1);
Output clr2 = ops::Relu(s.WithOpName("clr2"), infer1);
Output fetch = ops::Identity(s.WithOpName("fetch"), clr2);
GrapplerItem item;
item.fetch = {"fetch"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
AutoMixedPrecision optimizer(mode_);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
VerifyGraphsEquivalent(item.graph, output, __FUNCTION__);
GraphView output_view(&output);
EXPECT_EQ(output_view.GetNode("input")->attr().at("dtype").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("deny1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("infer1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr2")->attr().at("T").type(), DT_FLOAT);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectTensorNear<float>(tensors_expected[i], tensors[i], 1e-6);
}
}
TEST_P(AutoMixedPrecisionParamTest, AlreadyFp16) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input = ops::Const(s.WithOpName("input"), 1.f, {32, 32});
Output cst1 = ops::Cast(s.WithOpName("cst1"), input, DT_HALF);
Output allow1 = ops::MatMul(s.WithOpName("allow1"), cst1, cst1);
Output clr1 = ops::Relu(s.WithOpName("clr1"), allow1);
Output cst2 = ops::Cast(s.WithOpName("cst2"), clr1, DT_FLOAT);
Output clr2 = ops::Relu(s.WithOpName("clr2"), cst2);
Output fetch = ops::Identity(s.WithOpName("fetch"), clr2);
GrapplerItem item;
item.fetch = {"fetch"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
AutoMixedPrecision optimizer(mode_);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
VerifyGraphsEquivalent(item.graph, output, __FUNCTION__);
GraphView output_view(&output);
EXPECT_EQ(output_view.GetNode("input")->attr().at("dtype").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("cst1")->attr().at("DstT").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("clr1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("cst2")->attr().at("SrcT").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("cst2")->attr().at("DstT").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr2")->attr().at("T").type(), DT_FLOAT);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectTensorNear<float>(tensors_expected[i], tensors[i], 1e-6);
}
}
TEST_P(AutoMixedPrecisionParamTest, Simple) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input = ops::Const(s.WithOpName("input"), 1.f / 32, {32, 32});
Output deny1 = ops::Exp(s.WithOpName("deny1"), input);
Output clr1 = ops::Relu(s.WithOpName("clr1"), deny1);
Output infer1 = ops::Sqrt(s.WithOpName("infer1"), clr1);
Output clr2 = ops::Relu(s.WithOpName("clr2"), infer1);
Output allow1 = ops::MatMul(s.WithOpName("allow1"), clr2, clr2);
Output clr3 = ops::Relu(s.WithOpName("clr3"), allow1);
Output infer2 = ops::Log(s.WithOpName("infer2"), clr3);
Output clr4 = ops::Relu(s.WithOpName("clr4"), infer2);
Output deny2 = ops::SparseMatMul(s.WithOpName("deny2"), clr4, clr4);
Output clr5 = ops::Relu(s.WithOpName("clr5"), deny2);
Output fetch = ops::Identity(s.WithOpName("fetch"), clr5);
GrapplerItem item;
item.fetch = {"fetch"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
AutoMixedPrecision optimizer(mode_);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
GraphView output_view(&output);
EXPECT_EQ(output.node_size(), item.graph.node_size() + 2);
EXPECT_EQ(output_view.GetNode("input")->attr().at("dtype").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("deny1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("infer1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr2")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("clr3")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("infer2")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr4")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("deny2")->attr().at("Ta").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("deny2")->attr().at("Tb").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr5")->attr().at("T").type(), DT_FLOAT);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectClose(tensors_expected[i], tensors[i], -1, 5e-4);
}
}
TEST_P(AutoMixedPrecisionParamTest, NoInferOp) {
setenv("TF_AUTO_MIXED_PRECISION_GRAPH_REWRITE_LEVEL", "TREAT_INFER_AS_DENY",
1 );
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input = ops::Const(s.WithOpName("input"), 1.f / 32, {32, 32});
Output deny1 = ops::Exp(s.WithOpName("deny1"), input);
Output clr1 = ops::Relu(s.WithOpName("clr1"), deny1);
Output infer1 = ops::Sqrt(s.WithOpName("infer1"), clr1);
Output clr2 = ops::Relu(s.WithOpName("clr2"), infer1);
Output allow1 = ops::MatMul(s.WithOpName("allow1"), clr2, clr2);
Output clr3 = ops::Relu(s.WithOpName("clr3"), allow1);
Output infer2 = ops::Log(s.WithOpName("infer2"), clr3);
Output clr4 = ops::Relu(s.WithOpName("clr4"), infer2);
Output allow2 = ops::MatMul(s.WithOpName("allow2"), clr4, clr4);
Output infer3 = ops::Log(s.WithOpName("infer3"), allow2);
Output fetch = ops::Identity(s.WithOpName("fetch"), infer3);
GrapplerItem item;
item.fetch = {"fetch"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
AutoMixedPrecision optimizer(mode_);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
GraphView output_view(&output);
EXPECT_EQ(output.node_size(), item.graph.node_size() + 4);
EXPECT_EQ(output_view.GetNode("input")->attr().at("dtype").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("deny1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("infer1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr2")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("clr3")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("infer2")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr4")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("allow2")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("infer3")->attr().at("T").type(), DT_FLOAT);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectClose(tensors_expected[i], tensors[i], -1, 5e-4);
}
unsetenv("TF_AUTO_MIXED_PRECISION_GRAPH_REWRITE_LEVEL");
}
TEST_P(AutoMixedPrecisionParamTest, BidirectionalClearChain) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input = ops::Const(s.WithOpName("input"), 1.f / 32, {32, 32});
Output clr1 = ops::Relu(s.WithOpName("clr1"), input);
Output clr2 = ops::Relu(s.WithOpName("clr2"), input);
Output allow1 = ops::MatMul(s.WithOpName("allow1"), clr1, clr1);
auto clr3 = ops::ShapeN(s.WithOpName("clr3"), {clr1, clr2});
Output clr4 = ops::Relu(s.WithOpName("clr4"), clr2);
Output fetch1 = ops::Identity(s.WithOpName("fetch1"), allow1);
Output fetch2 = ops::Identity(s.WithOpName("fetch2"), clr4);
GrapplerItem item;
item.fetch = {"fetch1", "fetch2"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
AutoMixedPrecision optimizer(mode_);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
GraphView output_view(&output);
EXPECT_EQ(output.node_size(), item.graph.node_size() + 3);
EXPECT_EQ(output_view.GetNode("input")->attr().at("dtype").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("clr2")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("clr3")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("clr4")->attr().at("T").type(), DT_HALF);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectTensorNear<float>(tensors_expected[i], tensors[i], 1e-6);
}
}
TEST_P(AutoMixedPrecisionParamTest, PreserveFetches) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input = ops::Const(s.WithOpName("input"), 1.f / 32, {32, 32});
Output allow1 = ops::MatMul(s.WithOpName("allow1"), input, input);
Output clr1 = ops::Relu(s.WithOpName("clr1"), allow1);
Output infer1 = ops::Sqrt(s.WithOpName("infer1"), clr1);
Output deny1 = ops::Exp(s.WithOpName("deny1"), infer1);
Output clr2 = ops::Relu(s.WithOpName("clr2"), deny1);
Output allow2 = ops::MatMul(s.WithOpName("allow2"), clr2, clr2);
Output clr3 = ops::Relu(s.WithOpName("clr3"), allow2);
Output deny2 = ops::Exp(s.WithOpName("deny2"), clr3);
Output clr4 = ops::Relu(s.WithOpName("clr4"), deny2);
GrapplerItem item;
item.fetch = {"allow1", "clr2", "clr3"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
AutoMixedPrecision optimizer(mode_);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
GraphView output_view(&output);
EXPECT_EQ(output.node_size(), item.graph.node_size() + 2);
EXPECT_EQ(output_view.GetNode("input")->attr().at("dtype").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("infer1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("deny1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr2")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("allow2")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("clr3")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("deny2")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr4")->attr().at("T").type(), DT_FLOAT);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectClose(tensors_expected[i], tensors[i], -1, 5e-3);
}
}
TEST_P(AutoMixedPrecisionParamTest, PreserveCPUNodes) {
if (mode_ == AutoMixedPrecisionMode::FP16_CPU) {
GTEST_SKIP() << "This test is not required on CPU";
}
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input = ops::Const(s.WithOpName("input"), 1.f / 32, {32, 32});
Output clr1 = ops::Relu(s.WithOpName("clr1"), input);
Output allow1 = ops::MatMul(s.WithOpName("allow1"), clr1, clr1);
Output infer1 = ops::Tanh(s.WithOpName("infer1"), allow1);
Output allow2 =
ops::MatMul(s.WithOpName("allow2").WithDevice(
"/job:localhost/replica:0/task:0/device:CPU:0"),
infer1, infer1);
Output clr2 = ops::Relu(s.WithOpName("clr2"), allow2);
Output fetch = ops::Identity(s.WithOpName("fetch"), clr2);
GrapplerItem item;
item.fetch = {"fetch"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
AutoMixedPrecision optimizer(mode_);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
GraphView output_view(&output);
EXPECT_EQ(output.node_size(), item.graph.node_size() + 2);
EXPECT_EQ(output_view.GetNode("input")->attr().at("dtype").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("infer1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("allow2")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr2")->attr().at("T").type(), DT_FLOAT);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectTensorNear<float>(tensors_expected[i], tensors[i], 1e-6);
}
}
TEST_P(AutoMixedPrecisionParamTest, PreserveIdentityAfterVariable) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input = ops::Const(s.WithOpName("input"), 1.f / 32, {32, 32});
Output var1 = ops::Variable(s.WithOpName("var1"), {32, 32}, DT_FLOAT);
Output clr1 = ops::Identity(s.WithOpName("clr1"), var1);
Output allow1 = ops::MatMul(s.WithOpName("allow1"), input, clr1);
Output input2 = ops::Const(s.WithOpName("input2"), 1.f / 32, {32, 32});
Output clr2 = ops::Identity(s.WithOpName("clr2"), input2);
Output allow2 = ops::MatMul(s.WithOpName("allow2"), input, clr2);
Output fetch1 = ops::Identity(s.WithOpName("fetch1"), allow1);
Output fetch2 = ops::Identity(s.WithOpName("fetch2"), allow2);
GrapplerItem item;
item.fetch = {"fetch1", "fetch2"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto var1_tensor =
GenerateConstantTensor<DT_FLOAT>(TensorShape({32, 32}), 3.141593f);
std::vector<std::pair<string, Tensor>> feed = {{"var1", var1_tensor}};
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, feed);
AutoMixedPrecision optimizer(mode_);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
GraphView output_view(&output);
EXPECT_EQ(output.node_size(), item.graph.node_size() + 5);
EXPECT_EQ(output_view.GetNode("input")->attr().at("dtype").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("var1")->attr().at("dtype").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("input2")->attr().at("dtype").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr2")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("allow2")->attr().at("T").type(), DT_HALF);
auto tensors = EvaluateNodes(output, item.fetch, feed);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectClose(tensors_expected[i], tensors[i], -1, 5e-3);
}
}
TEST_P(AutoMixedPrecisionParamTest, FusedBatchNorm) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input = ops::Const(s.WithOpName("input"), 1.f / 32, {8, 56, 56, 16});
Output weight = ops::Const(s.WithOpName("weight"), 2.f, {3, 3, 16, 16});
Output scale = ops::Const(s.WithOpName("scale"), 3.f, {16});
Output offset = ops::Const(s.WithOpName("offset"), 4.f, {16});
Output mean = ops::Const(s.WithOpName("mean"), 5.f, {0});
Output variance = ops::Const(s.WithOpName("variance"), 6.f, {0});
Output allow1 =
ops::Conv2D(s.WithOpName("allow1"), input, weight, {1, 1, 1, 1}, "SAME",
ops::Conv2D::DataFormat("NHWC"));
auto fbn1_op =
ops::FusedBatchNorm(s.WithOpName("fbn1"), allow1, scale, offset, mean,
variance, ops::FusedBatchNorm::DataFormat("NHWC"));
Output fbn1 = fbn1_op.y;
Output fbn1_rs1 = fbn1_op.reserve_space_1;
Output fbn1_rs2 = fbn1_op.reserve_space_2;
Output bng1 = ops::FusedBatchNormGrad(
s.WithOpName("bng1"), fbn1, allow1, scale, fbn1_rs1,
fbn1_rs2, ops::FusedBatchNormGrad::DataFormat("NHWC"))
.x_backprop;
Output infer1 = ops::Add(s.WithOpName("infer1"), fbn1, bng1);
Output allow2 =
ops::Conv2D(s.WithOpName("allow2"), infer1, weight, {1, 1, 1, 1}, "SAME",
ops::Conv2D::DataFormat("NHWC"));
Output fetch = ops::Identity(s.WithOpName("fetch"), allow2);
GrapplerItem item;
item.fetch = {"fetch"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
AutoMixedPrecision optimizer(mode_);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
GraphView output_view(&output);
EXPECT_EQ(output.node_size(), item.graph.node_size() + 3);
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("fbn1")->op(), "FusedBatchNormV2");
EXPECT_EQ(output_view.GetNode("fbn1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("fbn1")->attr().at("U").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("bng1")->op(), "FusedBatchNormGradV2");
EXPECT_EQ(output_view.GetNode("bng1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("bng1")->attr().at("U").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("infer1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("allow2")->attr().at("T").type(), DT_HALF);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectClose(tensors_expected[i], tensors[i], -1, 1e-2);
}
}
TEST_P(AutoMixedPrecisionParamTest, RepeatedAndListTypeAttrs) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input = ops::Const(s.WithOpName("input"), 1.f / 32, {32, 32});
Output allow1 = ops::MatMul(s.WithOpName("allow1"), input, input);
auto clr1_op = ops::IdentityN(s.WithOpName("clr1"), {allow1, allow1, allow1});
Output infer1 =
ops::AddN(s.WithOpName("infer1"),
{clr1_op.output[0], clr1_op.output[1], clr1_op.output[2]});
Output allow2 = ops::MatMul(s.WithOpName("allow2"), infer1, infer1);
Output fetch = ops::Identity(s.WithOpName("fetch"), allow2);
GrapplerItem item;
item.fetch = {"fetch"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
AutoMixedPrecision optimizer(mode_);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
GraphView output_view(&output);
EXPECT_EQ(output.node_size(), item.graph.node_size() + 2);
EXPECT_EQ(output_view.GetNode("input")->attr().at("dtype").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_HALF);
for (auto type : output_view.GetNode("clr1")->attr().at("T").list().type()) {
EXPECT_EQ(type, DT_HALF);
}
EXPECT_EQ(output_view.GetNode("infer1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("allow2")->attr().at("T").type(), DT_HALF);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectTensorNear<float>(tensors_expected[i], tensors[i], 1e-6);
}
}
TEST_P(AutoMixedPrecisionParamTest, ExistingCast) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input = ops::Const(s.WithOpName("input"), true, {32, 32});
Output cst1 = ops::Cast(s.WithOpName("cst1"), input, DT_FLOAT);
Output allow1 = ops::MatMul(s.WithOpName("allow1"), cst1, cst1);
Output fetch = ops::Identity(s.WithOpName("fetch"), allow1);
GrapplerItem item;
item.fetch = {"fetch"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
AutoMixedPrecision optimizer(mode_);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
GraphView output_view(&output);
EXPECT_EQ(output.node_size(), item.graph.node_size() + 1);
EXPECT_EQ(output_view.GetNode("cst1")->attr().at("SrcT").type(), DT_BOOL);
EXPECT_EQ(output_view.GetNode("cst1")->attr().at("DstT").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_HALF);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectTensorNear<float>(tensors_expected[i], tensors[i], 1e-6);
}
}
TEST_P(AutoMixedPrecisionParamTest, RecurrentEdgeColorMismatch) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input = ops::Const(s.WithOpName("input"), 1.f / 32, {32, 32});
Output deny1 = ops::Exp(s.WithOpName("deny1"), input);
Output ent1 =
ops::internal::Enter(s.WithOpName("ent1"), deny1, "loop1").output;
Output mrg1 = ops::Merge(s.WithOpName("mrg1"), {ent1, ent1}).output;
Output con1 = ops::Const(s.WithOpName("con1"), false, {});
Output lpc1 = ops::LoopCond(s.WithOpName("lpc1"), con1).output;
auto swt1 = ops::Switch(s.WithOpName("swt1"), mrg1, lpc1);
Output infer1 = ops::Sqrt(s.WithOpName("infer1"), swt1.output_true);
Output allow1 = ops::MatMul(s.WithOpName("allow1"), infer1, infer1);
Output nxt1 = ops::NextIteration(s.WithOpName("nxt1"), allow1);
Output ext1 = ops::internal::Exit(s.WithOpName("ext1"), swt1.output_false);
Output fetch = ops::Identity(s.WithOpName("fetch"), ext1);
auto mrg2 = ops::Merge(s.WithOpName("mrg2"), {ent1, nxt1});
GrapplerItem item;
item.fetch = {"fetch"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
NodeMap node_map_original(&item.graph);
auto merge_node = node_map_original.GetNode("mrg1");
merge_node->set_input(1, "nxt1");
auto const_node = node_map_original.GetNode("con1");
const_node->add_input("^mrg1");
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
AutoMixedPrecision optimizer(mode_);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
GraphView output_view(&output);
EXPECT_EQ(output.node_size(), item.graph.node_size() + 2);
EXPECT_EQ(output_view.GetNode("deny1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("ent1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("mrg1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("swt1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("infer1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("nxt1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("ext1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("mrg2")->attr().at("T").type(), DT_FLOAT);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectTensorNear<float>(tensors_expected[i], tensors[i], 1e-6);
}
}
TEST_P(AutoMixedPrecisionParamTest, TensorListSetGet) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
tensorflow::Input shape = {32, 32};
auto tl1 = ops::TensorListReserve(s.WithOpName("tl1"), {32, 32}, 8, DT_FLOAT);
Output input = ops::Const(s.WithOpName("input"), 1.f / 32, {32, 32});
Output idx1 = ops::Const(s.WithOpName("idx1"), 1);
Output idx2 = ops::Const(s.WithOpName("idx2"), 2);
Output idx3 = ops::Const(s.WithOpName("idx3"), 3);
auto tl1w1 =
ops::TensorListSetItem(s.WithOpName("tl1w1"), tl1.handle, idx1, input);
Output allow1 = ops::MatMul(s.WithOpName("allow1"), input, input);
auto tl1w2 =
ops::TensorListSetItem(s.WithOpName("tl1w2"), tl1.handle, idx2, allow1);
Output tl1rs =
ops::TensorListResize(s.WithOpName("tl1rs"), tl1w2.output_handle, 6);
Output tl1r1 = ops::TensorListGetItem(s.WithOpName("tl1r1"), tl1rs, idx2,
shape, DT_FLOAT)
.item;
Output infer1 = ops::Tanh(s.WithOpName("infer1"), tl1r1);
Output allow2 = ops::MatMul(s.WithOpName("allow2"), infer1, infer1);
auto tl1w3 =
ops::TensorListSetItem(s.WithOpName("tl1w3"), tl1.handle, idx3, allow2);
Output tl1r2 =
ops::TensorListGetItem(s.WithOpName("tl1r2"), tl1w3.output_handle, idx3,
shape, DT_FLOAT)
.item;
auto tl2 = ops::TensorListReserve(s.WithOpName("tl2"), shape, 8, DT_FLOAT);
auto tl2w1 =
ops::TensorListSetItem(s.WithOpName("tl2w1"), tl2.handle, idx1, input);
Output tl2r1 =
ops::TensorListGetItem(s.WithOpName("tl2r1"), tl2w1.output_handle, idx1,
shape, DT_FLOAT)
.item;
Output fetch1 = ops::Identity(s.WithOpName("fetch1"), tl1r2);
Output fetch2 = ops::Identity(s.WithOpName("fetch2"), tl2r1);
GrapplerItem item;
item.fetch = {"fetch1", "fetch2"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
AutoMixedPrecision optimizer(mode_);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
GraphView output_view(&output);
EXPECT_EQ(output.node_size(), item.graph.node_size() + 2);
const char* type_key = "element_dtype";
EXPECT_EQ(output_view.GetNode("tl1")->attr().at(type_key).type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("tl1w1")->attr().at(type_key).type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("tl1w2")->attr().at(type_key).type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("tl1r1")->attr().at(type_key).type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("infer1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("allow2")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("tl1w3")->attr().at(type_key).type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("tl2")->attr().at(type_key).type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("tl2w1")->attr().at(type_key).type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("tl2r1")->attr().at(type_key).type(), DT_FLOAT);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectClose(tensors_expected[i], tensors[i], -1, 5e-4);
}
}
TEST_P(AutoMixedPrecisionParamTest, TensorListPushPop) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
tensorflow::Input shape = {32, 32};
auto tl1 = ops::EmptyTensorList(s.WithOpName("tl1"), {32, 32}, 8, DT_FLOAT);
Output input = ops::Const(s.WithOpName("input"), 1.f / 32, {32, 32});
auto tl1w1 =
ops::TensorListPushBack(s.WithOpName("tl1w1"), tl1.handle, input);
Output allow1 = ops::MatMul(s.WithOpName("allow1"), input, input);
auto tl1w2 = ops::TensorListPushBack(s.WithOpName("tl1w2"),
tl1w1.output_handle, allow1);
Output tl1r1 = ops::TensorListPopBack(s.WithOpName("tl1r1"),
tl1w2.output_handle, shape, DT_FLOAT)
.tensor;
Output infer1 = ops::Tanh(s.WithOpName("infer1"), tl1r1);
Output allow2 = ops::MatMul(s.WithOpName("allow2"), infer1, infer1);
auto tl1w3 =
ops::TensorListPushBack(s.WithOpName("tl1w3"), tl1.handle, allow2);
Output tl1r2 = ops::TensorListPopBack(s.WithOpName("tl1r2"),
tl1w3.output_handle, shape, DT_FLOAT)
.tensor;
auto tl2 = ops::EmptyTensorList(s.WithOpName("tl2"), shape, 8, DT_FLOAT);
auto tl2w1 =
ops::TensorListPushBack(s.WithOpName("tl2w1"), tl2.handle, input);
Output tl2r1 = ops::TensorListPopBack(s.WithOpName("tl2r1"),
tl2w1.output_handle, shape, DT_FLOAT)
.tensor;
Output fetch1 = ops::Identity(s.WithOpName("fetch1"), tl1r2);
Output fetch2 = ops::Identity(s.WithOpName("fetch2"), tl2r1);
GrapplerItem item;
item.fetch = {"fetch1", "fetch2"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
AutoMixedPrecision optimizer(mode_);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
GraphView output_view(&output);
EXPECT_EQ(output.node_size(), item.graph.node_size() + 2);
const char* type_key = "element_dtype";
EXPECT_EQ(output_view.GetNode("tl1")->attr().at(type_key).type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("tl1w1")->attr().at(type_key).type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("tl1w2")->attr().at(type_key).type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("tl1r1")->attr().at(type_key).type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("infer1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("allow2")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("tl1w3")->attr().at(type_key).type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("tl2")->attr().at(type_key).type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("tl2w1")->attr().at(type_key).type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("tl2r1")->attr().at(type_key).type(), DT_FLOAT);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectClose(tensors_expected[i], tensors[i], -1, 5e-4);
}
}
TEST_P(AutoMixedPrecisionParamTest, TensorListFromTensor) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
tensorflow::Input shape = {32};
Output input = ops::Const(s.WithOpName("input"), 1.f / 32, {32, 32});
Output allow1 = ops::MatMul(s.WithOpName("allow1"), input, input);
auto tl1 = ops::TensorListFromTensor(s.WithOpName("tl1"), allow1, shape);
Output tl1r1 = ops::TensorListStack(s.WithOpName("tl1r1"), tl1.output_handle,
shape, DT_FLOAT)
.tensor;
Output infer1 = ops::Tanh(s.WithOpName("infer1"), tl1r1);
Output allow2 = ops::MatMul(s.WithOpName("allow2"), infer1, infer1);
Output fetch1 = ops::Identity(s.WithOpName("fetch1"), allow2);
auto tl2 = ops::TensorListFromTensor(s.WithOpName("tl2"), allow1, shape);
auto tl2w1 =
ops::TensorListPushBack(s.WithOpName("tl2w1"), tl2.output_handle, input);
GrapplerItem item;
item.fetch = {"fetch1"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
AutoMixedPrecision optimizer(mode_);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
GraphView output_view(&output);
EXPECT_EQ(output.node_size(), item.graph.node_size() + 2);
const char* type_key = "element_dtype";
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("tl1")->attr().at(type_key).type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("tl1r1")->attr().at(type_key).type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("infer1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("allow2")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("tl2")->attr().at(type_key).type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("tl2w1")->attr().at(type_key).type(), DT_HALF);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectClose(tensors_expected[i], tensors[i], -1, 4e-4);
}
}
TEST_P(AutoMixedPrecisionParamTest, TensorListPushBackBatchAndConcatLists) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
tensorflow::Input shape = {32, 32};
auto tl1 = ops::EmptyTensorList(s.WithOpName("tl1"), {32, 32}, 8, DT_FLOAT);
auto tl2 = ops::EmptyTensorList(s.WithOpName("tl2"), {32, 32}, 8, DT_FLOAT);
Output input = ops::Const(s.WithOpName("input"), 1.f / 32, {32, 32});
Output allow1 = ops::MatMul(s.WithOpName("allow1"), input, input);
Output tl1_tl2 =
ops::Stack(s.WithOpName("tl1_tl2"), {tl1.handle, tl2.handle});
Output allow1_allow1 =
ops::Stack(s.WithOpName("allow1_allow1"), {allow1, allow1});
auto tl12w1 = ops::TensorListPushBackBatch(s.WithOpName("tl12w1"), tl1_tl2,
allow1_allow1);
OutputList tl12w1_outputs =
ops::Split(s.WithOpName("tl12w1_outputs"), 0, tl12w1.output_handles, 2)
.output;
Output scalar_shape = ops::Const(s.WithOpName("scalar_shape"), 0, {0});
Output tl12w1_output0 = ops::Reshape(s.WithOpName("tl12w1_output0"),
tl12w1_outputs[0], scalar_shape);
Output tl12w1_output1 = ops::Reshape(s.WithOpName("tl12w1_output1"),
tl12w1_outputs[1], scalar_shape);
Output tl3 = ops::TensorListConcatLists(s.WithOpName("tl3"), tl12w1_output0,
tl12w1_output1, DT_FLOAT);
Output tl3r1 =
ops::TensorListPopBack(s.WithOpName("tl3r1"), tl3, shape, DT_FLOAT)
.tensor;
Output infer1 = ops::Tanh(s.WithOpName("infer1"), tl3r1);
Output allow2 = ops::MatMul(s.WithOpName("allow2"), infer1, infer1);
Output fetch1 = ops::Identity(s.WithOpName("fetch1"), allow2);
GrapplerItem item;
item.fetch = {"fetch1"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
AutoMixedPrecision optimizer(mode_);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
GraphView output_view(&output);
EXPECT_EQ(output.node_size(), item.graph.node_size() + 2);
const char* type_key = "element_dtype";
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("allow2")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("infer1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("tl1")->attr().at(type_key).type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("tl2")->attr().at(type_key).type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("tl3")->attr().at(type_key).type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("tl3r1")->attr().at(type_key).type(), DT_HALF);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectClose(tensors_expected[i], tensors[i], -1, 5e-4);
}
}
TEST_P(AutoMixedPrecisionParamTest, TensorListThroughFunction) {
FunctionDefLibrary function_lib;
const Tensor kShape = test::AsTensor<int32>({32, 32});
FunctionDef func1 = FunctionDefHelper::Define(
"Func1", {"ihandle: variant", "x: float"},
{"ohandle: variant", "y: float"}, {},
{
{{"tl1w1_handle"},
"TensorListPushBack",
{"ihandle", "x"},
{{"element_dtype", DT_FLOAT}}},
{{"shape"}, "Const", {}, {{"value", kShape}, {"dtype", DT_INT32}}},
{{"tl1r1_handle", "tl1r1_data"},
"TensorListPopBack",
{"tl1w1_handle", "shape"},
{{"element_dtype", DT_FLOAT}}},
{{"ohandle"}, "Identity", {"tl1r1_handle"}, {{"T", DT_VARIANT}}},
{{"y"}, "Identity", {"tl1r1_data"}, {{"T", DT_FLOAT}}},
});
function_lib.add_function()->Swap(&func1);
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
TF_CHECK_OK(s.graph()->AddFunctionLibrary(function_lib));
tensorflow::Input shape = {32, 32};
Output input = ops::Const(s.WithOpName("input"), 1.f / 32, {32, 32});
Output allow1 = ops::MatMul(s.WithOpName("allow1"), input, input);
Output infer1 = ops::Tanh(s.WithOpName("infer1"), allow1);
auto tl1 = ops::EmptyTensorList(s.WithOpName("tl1"), {32, 32}, 8, DT_FLOAT);
auto tl1w1 =
ops::TensorListPushBack(s.WithOpName("tl1w1"), tl1.handle, infer1);
auto _infer1 = tensorflow::ops::AsNodeOut(s, infer1);
auto _tl1w1_handle = tensorflow::ops::AsNodeOut(s, tl1w1.output_handle);
auto builder =
tensorflow::NodeBuilder("Func1", "Func1", s.graph()->op_registry());
tensorflow::Node* func1_op;
TF_CHECK_OK(builder.Input(_tl1w1_handle)
.Input(_infer1)
.Finalize(s.graph(), &func1_op));
Output func1_handle(func1_op, 0);
Output tl1r1 = ops::TensorListPopBack(s.WithOpName("tl1r1"), func1_handle,
shape, DT_FLOAT)
.tensor;
auto tl2 = ops::EmptyTensorList(s.WithOpName("tl2"), {32, 32}, 8, DT_FLOAT);
auto tl2w1 =
ops::TensorListPushBack(s.WithOpName("tl2w1"), tl2.handle, infer1);
Output tl2r1 = ops::TensorListPopBack(s.WithOpName("tl2r1"),
tl2w1.output_handle, shape, DT_FLOAT)
.tensor;
Output allow2 = ops::MatMul(s.WithOpName("allow2"), tl1r1, tl2r1);
Output fetch1 = ops::Identity(s.WithOpName("fetch1"), allow2);
GrapplerItem item;
item.fetch = {"fetch1"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
AutoMixedPrecision optimizer(mode_);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
GraphView output_view(&output);
const char* type_key = "element_dtype";
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("allow2")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("infer1")->attr().at("T").type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("tl2")->attr().at(type_key).type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("tl2w1")->attr().at(type_key).type(), DT_HALF);
EXPECT_EQ(output_view.GetNode("tl2r1")->attr().at(type_key).type(), DT_HALF);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectClose(tensors_expected[i], tensors[i], -1, 5e-4);
}
}
int GetCudaVersion(const Cluster& cluster) {
auto devices = cluster.GetDevices();
for (const auto& device : devices) {
const DeviceProperties& device_properties = device.second;
if (device_properties.type() == "GPU") {
const auto& device_env = device_properties.environment();
auto it = device_env.find("cuda");
if (it != device_env.end()) {
string cuda_version_str = it->second;
return std::stoi(cuda_version_str);
}
}
}
return 0;
}
bool IsSupportedGPU(const Cluster& cluster) {
#ifdef GOOGLE_CUDA
return GetCudaVersion(cluster) >= 9010;
#else
return true;
#endif
}
TEST_P(AutoMixedPrecisionParamTest, BatchMatMul) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input = ops::Const(s.WithOpName("input"), 1.f / 33, {64, 32, 32});
Output allow1 = ops::BatchMatMul(s.WithOpName("allow1"), input, input);
Output fetch1 = ops::Identity(s.WithOpName("fetch1"), allow1);
GrapplerItem item;
item.fetch = {"fetch1"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
AutoMixedPrecision optimizer(mode_);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
GraphView output_view(&output);
EXPECT_EQ(output_view.GetNode("input")->attr().at("dtype").type(), DT_FLOAT);
if (IsSupportedGPU(*virtual_cluster_.get())) {
EXPECT_EQ(output.node_size(), item.graph.node_size() + 2);
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_HALF);
} else {
EXPECT_EQ(output.node_size(), item.graph.node_size());
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_FLOAT);
}
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectClose(tensors_expected[i], tensors[i], -1, 3.0e-3);
}
}
TEST_P(AutoMixedPrecisionParamTest, EluOp) {
TestSimpleUnaryInferOp(
-5, 5, 1.0e-3, 1.0e-3,
[](const tensorflow::Scope& scope, Output input) -> Output {
return ops::Elu(scope, input);
});
}
TEST_P(AutoMixedPrecisionParamTest, ErfOp) {
TestSimpleUnaryInferOp(
-5, 5, 1.0e-3, -1,
[](const tensorflow::Scope& scope, Output input) -> Output {
return ops::Erf(scope, input);
});
}
TEST_P(AutoMixedPrecisionParamTest, ErfcOp) {
TestSimpleUnaryInferOp(
-5, 5, 1.0e-3, -1,
[](const tensorflow::Scope& scope, Output input) -> Output {
return ops::Erfc(scope, input);
});
}
TEST_P(AutoMixedPrecisionParamTest, InvOp) {
TestSimpleUnaryInferOp(
0.01, 10, -1, 1.0e-3,
[](const tensorflow::Scope& scope, Output input) -> Output {
return ops::Inv(scope, input);
});
}
TEST_P(AutoMixedPrecisionParamTest, LogOp) {
TestSimpleUnaryInferOp(
0.01, 10, 1.0e-3, 2.0e-3,
[](const tensorflow::Scope& scope, Output input) -> Output {
return ops::Log(scope, input);
});
}
TEST_P(AutoMixedPrecisionParamTest, Log1pOp) {
TestSimpleUnaryInferOp(
-0.99, 9, 1.0e-3, 5.0e-3,
[](const tensorflow::Scope& scope, Output input) -> Output {
return ops::Log1p(scope, input);
});
}
TEST_P(AutoMixedPrecisionParamTest, LogSoftmaxOp) {
TestSimpleUnaryInferOp(
-8, 8, -1, 1.0e-2,
[](const tensorflow::Scope& scope, Output input) -> Output {
return ops::LogSoftmax(scope, input);
});
}
TEST_P(AutoMixedPrecisionParamTest, ReciprocalOp) {
TestSimpleUnaryInferOp(
0.01, 10, -1, 1.0e-3,
[](const tensorflow::Scope& scope, Output input) -> Output {
return ops::Reciprocal(scope, input);
});
}
TEST_P(AutoMixedPrecisionParamTest, SigmoidOp) {
TestSimpleUnaryInferOp(
-5, 5, 1.0e-3, -1,
[](const tensorflow::Scope& scope, Output input) -> Output {
return ops::Sigmoid(scope, input);
});
}
TEST_P(AutoMixedPrecisionParamTest, SoftmaxOp) {
TestSimpleUnaryInferOp(
-8, 8, 2.0e-3, -1,
[](const tensorflow::Scope& scope, Output input) -> Output {
return ops::Softmax(scope, input);
});
}
TEST_P(AutoMixedPrecisionParamTest, SoftplusOp) {
TestSimpleUnaryInferOp(
-5, 5, 2.0e-3, 2.0e-3,
[](const tensorflow::Scope& scope, Output input) -> Output {
return ops::Softplus(scope, input);
});
}
TEST_P(AutoMixedPrecisionParamTest, SqrtOp) {
TestSimpleUnaryInferOp(
0, 10, 1.0e-3, 1.0e-3,
[](const tensorflow::Scope& scope, Output input) -> Output {
return ops::Sqrt(scope, input);
});
}
TEST_P(AutoMixedPrecisionParamTest, TanhOp) {
TestSimpleUnaryInferOp(
-5, 5, 1.0e-3, -1,
[](const tensorflow::Scope& scope, Output input) -> Output {
return ops::Tanh(scope, input);
});
}
constexpr AutoMixedPrecisionMode kTestValues[] = {
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
AutoMixedPrecisionMode::CUDA,
#endif
#if INTEL_MKL
AutoMixedPrecisionMode::FP16_CPU,
#endif
};
INSTANTIATE_TEST_SUITE_P(AutoMixedPrecisionTest, AutoMixedPrecisionParamTest,
::testing::ValuesIn(kTestValues));
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
class AutoMixedPrecisionCpuTest : public GrapplerTest {
protected:
void SetUp() override {
virtual_cluster_.reset(new SingleMachine( 10, 1, 0));
TF_CHECK_OK(virtual_cluster_->Provision());
}
void TearDown() override { TF_CHECK_OK(virtual_cluster_->Shutdown()); }
std::unique_ptr<Cluster> virtual_cluster_;
};
TEST_F(AutoMixedPrecisionCpuTest, Simple) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope().WithDevice(
"/job:localhost/replica:0/task:0/device:CPU:0");
Output input = ops::Const(s.WithOpName("input"), 1.f / 32, {32, 32});
Output deny1 = ops::Exp(s.WithOpName("deny1"), input);
Output clr1 = ops::Relu(s.WithOpName("clr1"), deny1);
Output infer1 = ops::Sqrt(s.WithOpName("infer1"), clr1);
Output clr2 = ops::Relu(s.WithOpName("clr2"), infer1);
Output allow1 = ops::MatMul(s.WithOpName("allow1"), clr2, clr2);
Output clr3 = ops::Relu(s.WithOpName("clr3"), allow1);
Output infer2 = ops::Log(s.WithOpName("infer2"), clr3);
Output clr4 = ops::Relu(s.WithOpName("clr4"), infer2);
Output deny2 = ops::SparseMatMul(s.WithOpName("deny2"), clr4, clr4);
Output clr5 = ops::Relu(s.WithOpName("clr5"), deny2);
Output fetch = ops::Identity(s.WithOpName("fetch"), clr5);
GrapplerItem item;
item.fetch = {"fetch"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
AutoMixedPrecision optimizer{AutoMixedPrecisionMode::CPU};
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
const int expected_cast_ops = 9;
EXPECT_EQ(output.node_size(), item.graph.node_size() + expected_cast_ops);
GraphView output_view(&output);
auto matmul_op = output_view.GetNode("allow1");
EXPECT_EQ(matmul_op->attr().at("T").type(), DT_FLOAT);
for (auto edge : output_view.GetFaninEdges(*matmul_op, false)) {
EXPECT_EQ(edge.src.node->op(), "Cast");
auto cast_input_edges = output_view.GetFaninEdges(
*output_view.GetNode(edge.src.node->name()), false);
EXPECT_THAT(cast_input_edges, SizeIs(1));
EXPECT_THAT(edge.src.node->name(),
ContainsRegex("^" + cast_input_edges.begin()->src.node->name() +
"-0-CastToFp32-[0-9]-AutoMixedPrecision$"));
EXPECT_EQ(edge.src.node->attr().at("SrcT").type(), DT_HALF);
EXPECT_EQ(edge.src.node->attr().at("DstT").type(), DT_FLOAT);
}
for (auto edge : output_view.GetFanoutEdges(*matmul_op, false)) {
EXPECT_EQ(edge.dst.node->op(), "Cast");
EXPECT_THAT(edge.dst.node->name(),
ContainsRegex("^" + matmul_op->name() +
"-0-CastToFp16-[0-9]-AutoMixedPrecision$"));
EXPECT_EQ(edge.dst.node->attr().at("SrcT").type(), DT_FLOAT);
EXPECT_EQ(edge.dst.node->attr().at("DstT").type(), DT_HALF);
}
}
TEST_F(AutoMixedPrecisionCpuTest, MixedFanout) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope().WithDevice(
"/job:localhost/replica:0/task:0/device:CPU:0");
Output input1 = ops::Const(s.WithOpName("input1"), 1.f / 32, {32, 32});
Output input2 = ops::Const(s.WithOpName("input2"), 2.f / 32, {32, 32});
Output allow1 = ops::MatMul(s.WithOpName("allow1"), input1, input2);
Output allow2 = ops::MatMul(s.WithOpName("allow2"), allow1, input2);
Output deny = ops::Exp(s.WithOpName("deny"), allow1);
Output infer = ops::Add(s.WithOpName("infer"), deny, allow2);
Output fetch = ops::Identity(s.WithOpName("fetch"), infer);
GrapplerItem item;
item.fetch = {"fetch"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
AutoMixedPrecision optimizer{AutoMixedPrecisionMode::CPU};
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
const int expected_cast_ops = 10;
EXPECT_EQ(output.node_size(), item.graph.node_size() + expected_cast_ops);
GraphView output_view(&output);
auto allow1_op = output_view.GetNode("allow1");
for (auto edge : output_view.GetFaninEdges(*allow1_op, false)) {
EXPECT_EQ(edge.src.node->op(), "Cast");
EXPECT_EQ(edge.src.node->attr().at("SrcT").type(), DT_HALF);
EXPECT_EQ(edge.src.node->attr().at("DstT").type(), DT_FLOAT);
}
for (auto edge : output_view.GetFanoutEdges(*allow1_op, false)) {
EXPECT_EQ(edge.dst.node->op(), "Cast");
EXPECT_EQ(edge.dst.node->attr().at("SrcT").type(), DT_FLOAT);
EXPECT_EQ(edge.dst.node->attr().at("DstT").type(), DT_HALF);
}
auto deny_op = output_view.GetNode("deny");
for (auto edge : output_view.GetFaninEdges(*deny_op, false)) {
EXPECT_EQ(edge.src.node->op(), "Cast");
EXPECT_EQ(edge.src.node->attr().at("SrcT").type(), DT_HALF);
EXPECT_EQ(edge.src.node->attr().at("DstT").type(), DT_FLOAT);
}
for (auto edge : output_view.GetFanoutEdges(*deny_op, false)) {
EXPECT_NE(edge.dst.node->op(), "Cast");
}
}
class AutoMixedPrecisionSimulateGpuTest : public GrapplerTest {
protected:
void SetUp() override {
std::unordered_map<string, DeviceProperties> devices;
DeviceProperties cpu_device;
cpu_device.set_type("CPU");
cpu_device.set_frequency(1000);
cpu_device.set_num_cores(4);
cpu_device.set_memory_size(1024 * 1024);
devices["/job:localhost/replica:0/task:0/device:CPU:0"] = cpu_device;
virtual_cluster_.reset(new VirtualCluster(devices));
TF_CHECK_OK(virtual_cluster_->Provision());
}
void TearDown() override {
unsetenv("TF_AUTO_MIXED_PRECISION_GRAPH_REWRITE_SIMULATE_GPU");
TF_CHECK_OK(virtual_cluster_->Shutdown());
}
std::unique_ptr<Cluster> virtual_cluster_;
void TestSimple(tensorflow::Scope s, bool is_optimized) {
Output input = ops::Const(s.WithOpName("input"), 1.f / 32, {32, 32});
Output deny1 = ops::Exp(s.WithOpName("deny1"), input);
Output clr1 = ops::Relu(s.WithOpName("clr1"), deny1);
Output infer1 = ops::Sqrt(s.WithOpName("infer1"), clr1);
Output clr2 = ops::Relu(s.WithOpName("clr2"), infer1);
Output allow1 = ops::MatMul(s.WithOpName("allow1"), clr2, clr2);
Output clr3 = ops::Relu(s.WithOpName("clr3"), allow1);
Output infer2 = ops::Log(s.WithOpName("infer2"), clr3);
Output clr4 = ops::Relu(s.WithOpName("clr4"), infer2);
Output deny2 = ops::SparseMatMul(s.WithOpName("deny2"), clr4, clr4);
Output clr5 = ops::Relu(s.WithOpName("clr5"), deny2);
Output fetch = ops::Identity(s.WithOpName("fetch"), clr5);
GrapplerItem item;
item.fetch = {"fetch"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
GraphDef output;
AutoMixedPrecision optimizer;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
GraphView output_view(&output);
DataType expected_data_type = is_optimized ? DT_HALF : DT_FLOAT;
int expected_graph_size =
is_optimized ? item.graph.node_size() + 2 : item.graph.node_size();
EXPECT_EQ(output.node_size(), expected_graph_size);
EXPECT_EQ(output_view.GetNode("input")->attr().at("dtype").type(),
DT_FLOAT);
EXPECT_EQ(output_view.GetNode("deny1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("infer1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr2")->attr().at("T").type(),
expected_data_type);
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(),
expected_data_type);
EXPECT_EQ(output_view.GetNode("clr3")->attr().at("T").type(),
expected_data_type);
EXPECT_EQ(output_view.GetNode("infer2")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr4")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("deny2")->attr().at("Ta").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("deny2")->attr().at("Tb").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr5")->attr().at("T").type(), DT_FLOAT);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectClose(tensors_expected[i], tensors[i], -1, 5e-4);
}
}
};
TEST_F(AutoMixedPrecisionSimulateGpuTest, Simple_NoGpu) {
TestSimple(tensorflow::Scope::NewRootScope(), false);
}
TEST_F(AutoMixedPrecisionSimulateGpuTest, Simple_SimulatedGpu) {
setenv("TF_AUTO_MIXED_PRECISION_GRAPH_REWRITE_SIMULATE_GPU", "true",
1 );
TestSimple(tensorflow::Scope::NewRootScope(), true);
}
TEST_F(AutoMixedPrecisionSimulateGpuTest, Simple_SimulatedGpu_CpuScope) {
setenv("TF_AUTO_MIXED_PRECISION_GRAPH_REWRITE_SIMULATE_GPU", "true",
1 );
TestSimple(tensorflow::Scope::NewRootScope().WithDevice(
"/job:localhost/replica:0/task:0/device:CPU:0"),
false);
}
#endif
#if INTEL_MKL
class AutoMixedPrecisionMklTest : public GrapplerTest {
protected:
void SetUp() override {
virtual_cluster_.reset(new SingleMachine( 10, 1, 0));
TF_CHECK_OK(virtual_cluster_->Provision());
}
void TearDown() override { TF_CHECK_OK(virtual_cluster_->Shutdown()); }
std::unique_ptr<Cluster> virtual_cluster_;
};
TEST_F(AutoMixedPrecisionMklTest, AlreadyBf16) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope().WithDevice(
"/job:localhost/replica:0/task:0/device:CPU:0");
Output input = ops::Const(s.WithOpName("input"), 1.f, {32, 32});
Output cst1 = ops::Cast(s.WithOpName("cst1"), input, DT_BFLOAT16);
Output allow1 = ops::MatMul(s.WithOpName("allow1"), cst1, cst1);
Output clr1 = ops::Relu(s.WithOpName("clr1"), allow1);
Output cst2 = ops::Cast(s.WithOpName("cst2"), clr1, DT_FLOAT);
Output clr2 = ops::Relu(s.WithOpName("clr2"), cst2);
Output fetch = ops::Identity(s.WithOpName("fetch"), clr2);
GrapplerItem item;
item.fetch = {"fetch"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
AutoMixedPrecision optimizer{AutoMixedPrecisionMode::BF16};
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
VerifyGraphsEquivalent(item.graph, output, __FUNCTION__);
GraphView output_view(&output);
EXPECT_EQ(output_view.GetNode("input")->attr().at("dtype").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("cst1")->attr().at("DstT").type(), DT_BFLOAT16);
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_BFLOAT16);
EXPECT_EQ(output_view.GetNode("clr1")->attr().at("T").type(), DT_BFLOAT16);
EXPECT_EQ(output_view.GetNode("cst2")->attr().at("SrcT").type(), DT_BFLOAT16);
EXPECT_EQ(output_view.GetNode("cst2")->attr().at("DstT").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr2")->attr().at("T").type(), DT_FLOAT);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectTensorNear<float>(tensors_expected[i], tensors[i], 1e-6);
}
}
TEST_F(AutoMixedPrecisionMklTest, Simple) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope().WithDevice(
"/job:localhost/replica:0/task:0/device:CPU:0");
Output input = ops::Const(s.WithOpName("input"), 1.f / 32, {32, 32});
Output deny1 = ops::Exp(s.WithOpName("deny1"), input);
Output clr1 = ops::Relu(s.WithOpName("clr1"), deny1);
Output infer1 = ops::Sqrt(s.WithOpName("infer1"), clr1);
Output clr2 = ops::Relu(s.WithOpName("clr2"), infer1);
Output allow1 = ops::MatMul(s.WithOpName("allow1"), clr2, clr2);
Output clr3 = ops::Relu(s.WithOpName("clr3"), allow1);
Output deny2 = ops::Log(s.WithOpName("deny2"), clr3);
Output clr4 = ops::Relu(s.WithOpName("clr4"), deny2);
Output deny3 = ops::SparseMatMul(s.WithOpName("deny3"), clr4, clr4);
Output clr5 = ops::Relu(s.WithOpName("clr5"), deny3);
Output fetch = ops::Identity(s.WithOpName("fetch"), clr5);
GrapplerItem item;
item.fetch = {"fetch"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
AutoMixedPrecision optimizer{AutoMixedPrecisionMode::BF16};
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
GraphView output_view(&output);
EXPECT_EQ(output.node_size(), item.graph.node_size() + 2);
EXPECT_EQ(output_view.GetNode("input")->attr().at("dtype").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("deny1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("infer1")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr2")->attr().at("T").type(), DT_BFLOAT16);
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_BFLOAT16);
EXPECT_EQ(output_view.GetNode("clr3")->attr().at("T").type(), DT_BFLOAT16);
EXPECT_EQ(output_view.GetNode("deny2")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr4")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("deny3")->attr().at("Ta").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("deny3")->attr().at("Tb").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr5")->attr().at("T").type(), DT_FLOAT);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectClose(tensors_expected[i], tensors[i], -1, 5e-4);
}
}
TEST_F(AutoMixedPrecisionMklTest, TensorListSetGet) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope().WithDevice(
"/job:localhost/replica:0/task:0/device:CPU:0");
tensorflow::Input shape = {32, 32};
auto tl1 = ops::TensorListReserve(s.WithOpName("tl1"), {32, 32}, 8, DT_FLOAT);
Output input = ops::Const(s.WithOpName("input"), 1.f / 32, {32, 32});
Output idx1 = ops::Const(s.WithOpName("idx1"), 1);
Output idx2 = ops::Const(s.WithOpName("idx2"), 2);
Output idx3 = ops::Const(s.WithOpName("idx3"), 3);
auto tl1w1 =
ops::TensorListSetItem(s.WithOpName("tl1w1"), tl1.handle, idx1, input);
Output allow1 = ops::MatMul(s.WithOpName("allow1"), input, input);
auto tl1w2 =
ops::TensorListSetItem(s.WithOpName("tl1w2"), tl1.handle, idx2, allow1);
Output tl1rs =
ops::TensorListResize(s.WithOpName("tl1rs"), tl1w2.output_handle, 6);
Output tl1r1 = ops::TensorListGetItem(s.WithOpName("tl1r1"), tl1rs, idx2,
shape, DT_FLOAT)
.item;
Output infer1 = ops::Mul(s.WithOpName("infer1"), tl1r1, tl1r1);
Output allow2 = ops::MatMul(s.WithOpName("allow2"), infer1, infer1);
auto tl1w3 =
ops::TensorListSetItem(s.WithOpName("tl1w3"), tl1.handle, idx3, allow2);
Output tl1r2 =
ops::TensorListGetItem(s.WithOpName("tl1r2"), tl1w3.output_handle, idx3,
shape, DT_FLOAT)
.item;
auto tl2 = ops::TensorListReserve(s.WithOpName("tl2"), shape, 8, DT_FLOAT);
auto tl2w1 =
ops::TensorListSetItem(s.WithOpName("tl2w1"), tl2.handle, idx1, input);
Output tl2r1 =
ops::TensorListGetItem(s.WithOpName("tl2r1"), tl2w1.output_handle, idx1,
shape, DT_FLOAT)
.item;
Output fetch1 = ops::Identity(s.WithOpName("fetch1"), tl1r2);
Output fetch2 = ops::Identity(s.WithOpName("fetch2"), tl2r1);
GrapplerItem item;
item.fetch = {"fetch1", "fetch2"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
AutoMixedPrecision optimizer{AutoMixedPrecisionMode::BF16};
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
GraphView output_view(&output);
EXPECT_EQ(output.node_size(), item.graph.node_size() + 2);
const char* type_key = "element_dtype";
EXPECT_EQ(output_view.GetNode("tl1")->attr().at(type_key).type(),
DT_BFLOAT16);
EXPECT_EQ(output_view.GetNode("tl1w1")->attr().at(type_key).type(),
DT_BFLOAT16);
EXPECT_EQ(output_view.GetNode("allow1")->attr().at("T").type(), DT_BFLOAT16);
EXPECT_EQ(output_view.GetNode("tl1w2")->attr().at(type_key).type(),
DT_BFLOAT16);
EXPECT_EQ(output_view.GetNode("tl1r1")->attr().at(type_key).type(),
DT_BFLOAT16);
EXPECT_EQ(output_view.GetNode("infer1")->attr().at("T").type(), DT_BFLOAT16);
EXPECT_EQ(output_view.GetNode("allow2")->attr().at("T").type(), DT_BFLOAT16);
EXPECT_EQ(output_view.GetNode("tl1w3")->attr().at(type_key).type(),
DT_BFLOAT16);
EXPECT_EQ(output_view.GetNode("tl2")->attr().at(type_key).type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("tl2w1")->attr().at(type_key).type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("tl2r1")->attr().at(type_key).type(), DT_FLOAT);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectClose(tensors_expected[i], tensors[i], -1, 1e-2);
}
}
TEST_F(AutoMixedPrecisionMklTest, InferFollowUpStreamAllow) {
if (!IsMKLEnabled())
GTEST_SKIP() << "Test only applicable to MKL auto-mixed precision.";
tensorflow::Scope s = tensorflow::Scope::NewRootScope().WithDevice(
"/job:localhost/replica:0/task:0/device:CPU:0");
Output input1 = ops::Const(s.WithOpName("input1"), 1.f / 32, {8, 56, 56, 16});
Output weight = ops::Const(s.WithOpName("weight"), 2.f, {3, 3, 16, 16});
Output allow =
ops::Conv2D(s.WithOpName("allow"), input1, weight, {1, 1, 1, 1}, "SAME",
ops::Conv2D::DataFormat("NHWC"));
Output input2 = ops::Const(s.WithOpName("input2"), 1.f / 32, {16});
Output infer = ops::BiasAdd(s.WithOpName("infer"), allow, input2);
Output clr = ops::Relu(s.WithOpName("clr"), infer);
Output fetch = ops::Identity(s.WithOpName("fetch"), clr);
GrapplerItem item;
item.fetch = {"fetch"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
AutoMixedPrecision optimizer{AutoMixedPrecisionMode::BF16};
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
GraphView output_view(&output);
EXPECT_EQ(output.node_size(), item.graph.node_size() + 4);
EXPECT_EQ(output_view.GetNode("input1")->attr().at("dtype").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("weight")->attr().at("dtype").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("input2")->attr().at("dtype").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("allow")->attr().at("T").type(), DT_BFLOAT16);
EXPECT_EQ(output_view.GetNode("infer")->attr().at("T").type(), DT_BFLOAT16);
EXPECT_EQ(output_view.GetNode("clr")->attr().at("T").type(), DT_BFLOAT16);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectClose(tensors_expected[i], tensors[i], -1, 1e-2);
}
}
TEST_F(AutoMixedPrecisionMklTest, InferFollowUpStreamDeny) {
if (!IsMKLEnabled())
GTEST_SKIP() << "Test only applicable to MKL auto-mixed precision.";
tensorflow::Scope s = tensorflow::Scope::NewRootScope().WithDevice(
"/job:localhost/replica:0/task:0/device:CPU:0");
Output input1 = ops::Const(s.WithOpName("input1"), 1.f / 32, {8, 56, 56, 16});
Output input2 = ops::Const(s.WithOpName("input2"), 1.f, {16});
Output input3 = ops::Const(s.WithOpName("input3"), 1.f / 32, {16});
Output deny = ops::Pow(s.WithOpName("deny"), input1, input2);
Output infer = ops::BiasAdd(s.WithOpName("infer"), deny, input3);
Output clr = ops::Relu(s.WithOpName("clr"), infer);
Output fetch = ops::Identity(s.WithOpName("fetch"), clr);
GrapplerItem item;
item.fetch = {"fetch"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
AutoMixedPrecision optimizer{AutoMixedPrecisionMode::BF16};
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
VLOG(1) << output.DebugString();
GraphView output_view(&output);
EXPECT_EQ(output.node_size(), item.graph.node_size());
EXPECT_EQ(output_view.GetNode("input1")->attr().at("dtype").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("input2")->attr().at("dtype").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("input3")->attr().at("dtype").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("deny")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("infer")->attr().at("T").type(), DT_FLOAT);
EXPECT_EQ(output_view.GetNode("clr")->attr().at("T").type(), DT_FLOAT);
auto tensors = EvaluateNodes(output, item.fetch);
EXPECT_EQ(tensors.size(), tensors_expected.size());
EXPECT_EQ(tensors.size(), item.fetch.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectClose(tensors_expected[i], tensors[i]);
}
}
#endif
}
}
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/auto_mixed_precision.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/auto_mixed_precision_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
93afc18b-9012-463a-bf70-e385a2f499ec | cpp | tensorflow/tensorflow | triton_support | third_party/xla/xla/service/gpu/fusions/triton/triton_support.cc | third_party/xla/xla/service/gpu/fusions/triton/triton_support_test.cc | #include "xla/service/gpu/fusions/triton/triton_support.h"
#include <variant>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout.h"
#include "xla/primitive_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace gpu {
namespace {
bool IsTritonSupportedDataType(PrimitiveType type,
const se::GpuComputeCapability& gpu_version) {
switch (type) {
case PRED:
case S4:
case S8:
case S16:
case S32:
case S64:
case F16:
case F32:
case F64:
return true;
case F8E5M2:
case F8E4M3FN:
return std::holds_alternative<se::CudaComputeCapability>(gpu_version);
case BF16:
return std::holds_alternative<se::CudaComputeCapability>(gpu_version) ||
(std::holds_alternative<se::RocmComputeCapability>(gpu_version) &&
std::get<se::RocmComputeCapability>(gpu_version)
.has_bf16_dtype_support());
default:
return false;
}
}
absl::flat_hash_set<HloOpcode> TritonSupportedUnaryElementwiseOps(
PrimitiveType element_type) {
if (element_type == PrimitiveType::PRED) {
return {HloOpcode::kConvert, HloOpcode::kNot};
}
if (element_type == PrimitiveType::U16) {
return {HloOpcode::kAbs};
}
absl::flat_hash_set<HloOpcode> ret{HloOpcode::kAbs, HloOpcode::kConvert};
if (element_type != PrimitiveType::F8E5M2 &&
element_type != PrimitiveType::F8E4M3FN) {
ret.insert(HloOpcode::kNegate);
}
if (primitive_util::IsIntegralType(element_type)) {
ret.insert(HloOpcode::kNot);
}
if (element_type == PrimitiveType::F32 ||
element_type == PrimitiveType::F64) {
absl::flat_hash_set<HloOpcode> additional_opcodes{
HloOpcode::kCos, HloOpcode::kExp, HloOpcode::kExpm1,
HloOpcode::kFloor, HloOpcode::kCeil, HloOpcode::kLog,
HloOpcode::kLog1p, HloOpcode::kRsqrt, HloOpcode::kSin,
HloOpcode::kSqrt, HloOpcode::kCbrt, HloOpcode::kTan,
HloOpcode::kTanh, HloOpcode::kErf};
ret.insert(additional_opcodes.begin(), additional_opcodes.end());
}
if (element_type == PrimitiveType::BF16 ||
element_type == PrimitiveType::F16) {
absl::flat_hash_set<HloOpcode> additional_opcodes{HloOpcode::kFloor,
HloOpcode::kCeil};
ret.insert(additional_opcodes.begin(), additional_opcodes.end());
}
if (primitive_util::IsFloatingPointType(element_type)) {
ret.insert(HloOpcode::kReducePrecision);
}
return ret;
}
CodegenDecision IsTritonSupportedConversion(
PrimitiveType output, PrimitiveType input,
const se::GpuComputeCapability& gpu_version) {
auto any_is = [=](PrimitiveType compare) {
return input == compare || output == compare;
};
auto error_message = [&]() {
return CodegenDecision::Forbid(
absl::StrCat("Unsupported conversion in Triton: ",
primitive_util::LowercasePrimitiveTypeName(input), " to ",
primitive_util::LowercasePrimitiveTypeName(output)));
};
if (input != output && any_is(PrimitiveType::F8E4M3FN) &&
std::holds_alternative<se::CudaComputeCapability>(gpu_version) &&
!std::get<se::CudaComputeCapability>(gpu_version).IsAtLeastHopper()) {
return error_message();
}
if (input != output &&
(any_is(PrimitiveType::F8E4M3FN) || any_is(PrimitiveType::F8E5M2)) &&
!(any_is(PrimitiveType::F16) || any_is(PrimitiveType::BF16) ||
any_is(PrimitiveType::F32))) {
return error_message();
}
if (IsTritonSupportedDataType(input, gpu_version) &&
(IsTritonSupportedDataType(output, gpu_version) ||
output == PrimitiveType::S4)) {
return CodegenDecision::Allow();
}
return error_message();
}
absl::flat_hash_set<HloOpcode> TritonSupportedBinaryElementwiseOps(
PrimitiveType element_type, const se::GpuComputeCapability& gpu_version) {
if (element_type == PrimitiveType::U16 ||
element_type == PrimitiveType::F8E5M2 ||
element_type == PrimitiveType::F8E4M3FN) {
return {};
}
absl::flat_hash_set<HloOpcode> ret{HloOpcode::kAdd, HloOpcode::kCompare,
HloOpcode::kMaximum, HloOpcode::kMinimum,
HloOpcode::kMultiply};
if (element_type == PrimitiveType::PRED) {
ret.insert(HloOpcode::kAnd);
ret.insert(HloOpcode::kOr);
ret.insert(HloOpcode::kXor);
return ret;
}
ret.insert(HloOpcode::kSubtract);
if (primitive_util::IsIntegralType(element_type)) {
ret.insert(HloOpcode::kDivide);
ret.insert(HloOpcode::kAnd);
ret.insert(HloOpcode::kOr);
ret.insert(HloOpcode::kXor);
}
if (element_type == PrimitiveType::F32 ||
element_type == PrimitiveType::F64) {
ret.insert(HloOpcode::kAtan2);
ret.insert(HloOpcode::kDivide);
ret.insert(HloOpcode::kRemainder);
ret.insert(HloOpcode::kPower);
}
return ret;
}
absl::flat_hash_set<HloOpcode> TritonSupportedTernaryElementwiseOps(
PrimitiveType element_type, const se::GpuComputeCapability& gpu_version) {
if (element_type == PrimitiveType::U16) {
return {};
}
if (element_type == PrimitiveType::F8E5M2 ||
element_type == PrimitiveType::F8E4M3FN) {
return {HloOpcode::kSelect};
}
return {HloOpcode::kSelect, HloOpcode::kClamp};
}
bool IsTritonSupportedElementwise(HloOpcode opcode, PrimitiveType element_type,
const se::GpuComputeCapability& gpu_version) {
return TritonSupportedUnaryElementwiseOps(element_type).contains(opcode) ||
TritonSupportedBinaryElementwiseOps(element_type, gpu_version)
.contains(opcode) ||
TritonSupportedTernaryElementwiseOps(element_type, gpu_version)
.contains(opcode);
}
CodegenDecision IsTritonSupportedInstructionImpl(
const HloInstruction& instr, const se::GpuComputeCapability& gpu_version,
bool is_within_reduction_computation);
CodegenDecision CanTritonHandleReduce(
const HloReduceInstruction& reduce,
const se::GpuComputeCapability& gpu_version) {
if (reduce.shape().element_type() == PrimitiveType::F8E4M3FN ||
reduce.shape().element_type() == PrimitiveType::F8E5M2) {
return CodegenDecision::Forbid(
"F8E4M3FN and F8E5M2 are not supported for reductions.");
}
bool is_triton_supported_reduction_computation = absl::c_all_of(
reduce.to_apply()->instructions(), [&](const HloInstruction* instr) {
return IsTritonSupportedInstructionImpl(
*instr, gpu_version,
true)
.CanFuse();
});
if (!is_triton_supported_reduction_computation) {
return CodegenDecision::Forbid(
"Unsupported reduction computation by Triton.");
}
if (reduce.dimensions().size() == 1 && reduce.operand_count() == 2) {
return CodegenDecision::Allow();
}
return CodegenDecision::Forbid(
"Reduction is not a row-reduction of a single operand.");
}
CodegenDecision IsTritonSupportedInstructionImpl(
const HloInstruction& instr, const se::GpuComputeCapability& gpu_version,
bool is_within_reduction_computation) {
if (internal::IsTritonUnsupportedOpcode(instr.opcode())) {
return CodegenDecision::Forbid("Unsupported opcode.");
}
if (IsUnsupported0DTensor(instr, is_within_reduction_computation)) {
return CodegenDecision::Forbid("Unsupported 0D tensor");
}
if (instr.opcode() == HloOpcode::kConvert) {
return IsTritonSupportedConversion(instr.shape().element_type(),
instr.operand(0)->shape().element_type(),
gpu_version);
}
auto type = instr.shape().element_type();
bool output_type_is_supported = IsTritonSupportedDataType(type, gpu_version);
if (!output_type_is_supported) {
return CodegenDecision::Forbid("Unsupported output data type.");
}
bool input_types_are_supported =
absl::c_all_of(instr.operands(), [&](const HloInstruction* operand) {
return IsTritonSupportedDataType(operand->shape().element_type(),
gpu_version);
});
if (!input_types_are_supported) {
return CodegenDecision::Forbid("Unsupported input data type.");
}
if (instr.opcode() == HloOpcode::kConstant) {
return ShapeUtil::IsScalar(instr.shape())
? CodegenDecision::Allow()
: CodegenDecision::Forbid(
"Only scalar constants are supported in Triton.");
}
if (instr.opcode() == HloOpcode::kIota) {
PrimitiveType element_type = instr.shape().element_type();
return element_type != PrimitiveType::F8E4M3FN &&
element_type != PrimitiveType::F8E5M2
? CodegenDecision::Allow()
: CodegenDecision::Forbid(
"F8E4M3FN and F8E5M2 are not supported for iota.");
}
if (instr.IsElementwise()) {
if (!IsTritonSupportedElementwise(
instr.opcode(),
instr.operand(instr.operand_count() - 1)->shape().element_type(),
gpu_version)) {
return CodegenDecision::Forbid("Unsupported elementwise operation.");
}
return CodegenDecision::Allow();
}
switch (instr.opcode()) {
case HloOpcode::kReduce: {
return CanTritonHandleReduce(*Cast<HloReduceInstruction>(&instr),
gpu_version);
}
case HloOpcode::kSlice:
case HloOpcode::kTranspose:
case HloOpcode::kParameter:
case HloOpcode::kBroadcast:
case HloOpcode::kBitcast:
case HloOpcode::kReshape:
return CodegenDecision::Allow();
default:
VLOG(2) << "Unsupported instruction: " << instr.ToString();
break;
}
return CodegenDecision::Forbid("Unsupported opcode.");
}
}
namespace internal {
bool IsTritonUnsupportedOpcode(HloOpcode opcode) {
switch (opcode) {
case HloOpcode::kAddDependency:
case HloOpcode::kAfterAll:
case HloOpcode::kBatchNormGrad:
case HloOpcode::kBatchNormInference:
case HloOpcode::kBatchNormTraining:
case HloOpcode::kBitcastConvert:
case HloOpcode::kCall:
case HloOpcode::kCholesky:
case HloOpcode::kCollectiveBroadcast:
case HloOpcode::kCollectivePermuteDone:
case HloOpcode::kCollectivePermuteStart:
case HloOpcode::kComplex:
case HloOpcode::kConcatenate:
case HloOpcode::kConditional:
case HloOpcode::kConvolution:
case HloOpcode::kCopy:
case HloOpcode::kCopyDone:
case HloOpcode::kCopyStart:
case HloOpcode::kCustomCall:
case HloOpcode::kDomain:
case HloOpcode::kDot:
case HloOpcode::kDynamicReshape:
case HloOpcode::kDynamicSlice:
case HloOpcode::kDynamicUpdateSlice:
case HloOpcode::kFft:
case HloOpcode::kFusion:
case HloOpcode::kGather:
case HloOpcode::kGetDimensionSize:
case HloOpcode::kGetTupleElement:
case HloOpcode::kInfeed:
case HloOpcode::kMap:
case HloOpcode::kOptimizationBarrier:
case HloOpcode::kOutfeed:
case HloOpcode::kPad:
case HloOpcode::kPartitionId:
case HloOpcode::kRecv:
case HloOpcode::kRecvDone:
case HloOpcode::kReduceWindow:
case HloOpcode::kReplicaId:
case HloOpcode::kReverse:
case HloOpcode::kRngBitGenerator:
case HloOpcode::kRngGetAndUpdateState:
case HloOpcode::kScatter:
case HloOpcode::kSelectAndScatter:
case HloOpcode::kSend:
case HloOpcode::kSendDone:
case HloOpcode::kSetDimensionSize:
case HloOpcode::kSort:
case HloOpcode::kStochasticConvert:
case HloOpcode::kTopK:
case HloOpcode::kTriangularSolve:
case HloOpcode::kTuple:
case HloOpcode::kWhile:
return true;
default:
return false;
}
}
}
absl::Status EnsureTritonSupportsComputeCapability(
const se::GpuComputeCapability& gpu_compute_capability) {
auto cuda_compute_capability =
std::get_if<se::CudaComputeCapability>(&gpu_compute_capability);
auto rocm_compute_capability =
std::get_if<se::RocmComputeCapability>(&gpu_compute_capability);
if (!cuda_compute_capability && !rocm_compute_capability) {
return absl::FailedPreconditionError(
"Triton support is only enabled for CUDA and ROCm GPUs.");
}
if (cuda_compute_capability && !cuda_compute_capability->IsAtLeastAmpere()) {
return absl::FailedPreconditionError(
absl::StrCat("CUDA Triton support is only enabled for Ampere GPUs ",
"(compute capability 8.0) and up, but got compute ",
"capability ", cuda_compute_capability->major, ".",
cuda_compute_capability->minor, "."));
}
return absl::OkStatus();
}
CodegenDecision IsTritonSupportedInstruction(
const HloInstruction& instr, const se::GpuComputeCapability& gpu_version) {
CodegenDecision decision = IsTritonSupportedInstructionImpl(
instr, gpu_version, false);
VLOG(2) << "IsTritonSupportedInstruction: " << instr.ToString() << " "
<< bool(decision);
return decision;
}
CodegenDecision IsTritonSupportedComputation(
const HloComputation& computation,
const se::GpuComputeCapability& gpu_compute_capability) {
for (const auto* instruction : computation.instructions()) {
if (CodegenDecision can_codegen =
IsTritonSupportedInstruction(*instruction, gpu_compute_capability);
!can_codegen) {
return can_codegen;
}
}
return CodegenDecision::Allow();
}
bool IsTritonFusedComputation(const HloComputation& computation) {
HloFusionInstruction* fusion =
static_cast<HloFusionInstruction*>(computation.FusionInstruction());
return fusion != nullptr &&
fusion->fusion_kind() == HloInstruction::FusionKind::kCustom &&
fusion->backend_config<gpu::GpuBackendConfig>()
->fusion_backend_config()
.kind() == kTritonGemmFusionKind;
}
bool IsUnsupported0DTensor(const HloInstruction& instr,
bool is_within_reduction_computation) {
if (!instr.shape().IsArray() || instr.shape().rank() != 0 ||
is_within_reduction_computation ||
instr.opcode() == HloOpcode::kConstant) {
return false;
}
if (instr.user_count() > 0 &&
absl::c_all_of(instr.users(), [&](const HloInstruction* user) {
return user->opcode() == HloOpcode::kBroadcast;
})) {
return false;
}
if (instr.IsElementwise() && !instr.IsRoot() &&
absl::c_all_of(instr.operands(), [&](const HloInstruction* operand) {
return operand->shape().IsArray() && operand->shape().rank() == 0;
})) {
return false;
}
return true;
}
}
} | #include "xla/service/gpu/fusions/triton/triton_support.h"
#include <array>
#include <cstdint>
#include <string>
#include <tuple>
#include <utility>
#include <variant>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/string_view.h"
#include "absl/strings/substitute.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/primitive_util.h"
#include "xla/service/gpu/fusions/triton/triton_fusion_emitter.h"
#include "xla/service/gpu/fusions/triton/triton_test_utils.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/model/tiled_hlo_computation.h"
#include "xla/stream_executor/device_description.h"
#include "xla/xla.pb.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using ::testing::Not;
using ::tsl::testing::IsOk;
std::vector<xla::PrimitiveType> AllXlaDataTypes() {
std::vector<xla::PrimitiveType> xla_data_types;
std::vector<xla::PrimitiveType> to_filter_out = {PRIMITIVE_TYPE_INVALID,
TUPLE, OPAQUE_TYPE, TOKEN};
const tsl::protobuf::EnumDescriptor* xla_type_descriptor =
tsl::protobuf::GetEnumDescriptor<xla::PrimitiveType>();
for (int enum_ix = 0; enum_ix < xla_type_descriptor->value_count();
++enum_ix) {
xla::PrimitiveType xla_type = static_cast<xla::PrimitiveType>(
xla_type_descriptor->value(enum_ix)->number());
if (!absl::c_linear_search(to_filter_out, xla_type)) {
xla_data_types.push_back(xla_type);
}
}
return xla_data_types;
}
bool DoesOpSupportType(HloOpcode opcode, PrimitiveType type) {
namespace pu = ::xla::primitive_util;
switch (opcode) {
case HloOpcode::kAnd:
case HloOpcode::kOr:
case HloOpcode::kXor:
case HloOpcode::kNot:
return type == PRED || pu::IsIntegralType(type);
case HloOpcode::kCos:
case HloOpcode::kExp:
case HloOpcode::kExpm1:
case HloOpcode::kLog:
case HloOpcode::kLog1p:
case HloOpcode::kRsqrt:
case HloOpcode::kSin:
case HloOpcode::kSqrt:
case HloOpcode::kCbrt:
case HloOpcode::kTan:
case HloOpcode::kTanh:
case HloOpcode::kReal:
case HloOpcode::kImag:
case HloOpcode::kLogistic:
return pu::IsFloatingPointType(type) || pu::IsComplexType(type);
case HloOpcode::kErf:
case HloOpcode::kFloor:
case HloOpcode::kCeil:
case HloOpcode::kIsFinite:
case HloOpcode::kRoundNearestAfz:
case HloOpcode::kRoundNearestEven:
case HloOpcode::kReducePrecision:
return pu::IsFloatingPointType(type);
case HloOpcode::kClz:
case HloOpcode::kShiftRightArithmetic:
case HloOpcode::kShiftRightLogical:
case HloOpcode::kShiftLeft:
case HloOpcode::kPopulationCount:
return pu::IsIntegralType(type);
case HloOpcode::kAbs:
case HloOpcode::kSign:
return pu::IsSignedIntegralType(type) || pu::IsFloatingPointType(type) ||
pu::IsComplexType(type);
case HloOpcode::kPower:
case HloOpcode::kAtan2:
case HloOpcode::kDivide:
case HloOpcode::kRemainder:
case HloOpcode::kSubtract:
case HloOpcode::kNegate:
case HloOpcode::kIota:
return type != PRED;
case HloOpcode::kRng:
return !pu::IsComplexType(type);
default:
return true;
}
}
auto AllDevicesToTest() {
using cc = se::GpuComputeCapability;
#ifdef TENSORFLOW_USE_ROCM
se::RocmComputeCapability example_rocm_compute_capability =
TestGpuDeviceInfo::AMDMI210DeviceInfo().rocm_compute_capability();
return std::vector<cc>{cc(example_rocm_compute_capability)};
#else
return std::vector<cc>{cc(se::CudaComputeCapability::Ampere()),
cc(se::CudaComputeCapability::Hopper())};
#endif
}
auto AllTestCombinationsForOpcodes(absl::Span<const HloOpcode> opcodes) {
std::vector<std::tuple<PrimitiveType, HloOpcode, se::GpuComputeCapability>>
test_combinations;
for (PrimitiveType data_type : AllXlaDataTypes()) {
for (HloOpcode opcode : opcodes) {
if (DoesOpSupportType(opcode, data_type)) {
for (se::GpuComputeCapability cc : AllDevicesToTest()) {
test_combinations.push_back({data_type, opcode, cc});
}
}
}
}
return ::testing::ValuesIn(test_combinations);
};
class TritonSupportTest : public TritonSupportTestBase {
public:
void RunSupportTest(TestedInstruction ti,
std::vector<int64_t> output_tile_sizes,
se::GpuComputeCapability cc,
bool skip_failure_branch_to_avoid_crash = false) {
if (ti.Instruction().shape().IsArray()) {
ASSERT_EQ(output_tile_sizes.size(), ti.Instruction().shape().rank());
}
BlockLevelParameters block_level_parameters =
FromOutputTileSizes(std::move(output_tile_sizes));
const se::DeviceDescription dev_info =
std::holds_alternative<se::CudaComputeCapability>(cc)
? TestGpuDeviceInfo::RTXA6000DeviceInfo(cc)
: TestGpuDeviceInfo::AMDMI210DeviceInfo();
auto run_triton_codegen = [&]() {
return TritonWrapper("test_fn", &ti.TritonFusion(), cc, dev_info,
block_level_parameters, &llvm_module_,
mlir_context_);
};
if (IsTritonSupportedInstruction(ti.Instruction(), cc)) {
EXPECT_THAT(run_triton_codegen(), IsOk());
} else {
if (skip_failure_branch_to_avoid_crash) {
EXPECT_DEATH(
try { run_triton_codegen().IgnoreError(); } catch (...) {
abort();
},
"");
} else {
EXPECT_THAT(run_triton_codegen(), Not(IsOk()));
}
}
}
};
class TritonSupportTestWithTypeAndOpcodeAndDeviceParam
: public TritonSupportTest,
public ::testing::WithParamInterface<
std::tuple<PrimitiveType, HloOpcode, se::GpuComputeCapability>> {};
using BitcastOrReshapeTest = TritonSupportTestWithTypeAndOpcodeAndDeviceParam;
TEST_P(BitcastOrReshapeTest, IsTritonSupportedBitcastOrReshape) {
auto [data_type, opcode, cc] = GetParam();
const std::string kHloTestTemplate = R"(
ENTRY triton_computation {
parameter_0 = $0[1,16,4] parameter(0)
ROOT bitcast_or_reshape = $0[64] $1(parameter_0)
})";
TF_ASSERT_OK_AND_ASSIGN(
TestedInstruction ti,
ParseTemplateAndGetInstruction(kHloTestTemplate, data_type, opcode));
RunSupportTest(std::move(ti), {16}, cc);
}
constexpr std::array kTestedOpsBitcastReshape = {HloOpcode::kBitcast,
HloOpcode::kReshape};
INSTANTIATE_TEST_SUITE_P(
BitcastOrReshapeTestSuite, BitcastOrReshapeTest,
AllTestCombinationsForOpcodes(kTestedOpsBitcastReshape),
TritonSupportTestTypeAndOpcodeAndDeviceToString);
using UnaryElementwiseTest = TritonSupportTestWithTypeAndOpcodeAndDeviceParam;
TEST_P(UnaryElementwiseTest, IsTritonSupportedUnaryElementwise) {
auto [data_type, opcode, cc] = GetParam();
const std::string kDefaultHloTemplate = R"(
ENTRY triton_computation {
parameter_0 = $0[33,68] parameter(0)
ROOT unary = $0[33,68] $1(parameter_0)
})";
const std::string kF64OutputTemplate = R"(
ENTRY triton_computation {
parameter_0 = $0[33,68] parameter(0)
ROOT unary = f64[33,68] $1(parameter_0)
})";
const std::string kPredOutputTemplate = R"(
ENTRY triton_computation {
parameter_0 = $0[33,68] parameter(0)
ROOT unary = pred[33,68] $1(parameter_0)
})";
const std::string kReducePrecisionTemplate = R"(
ENTRY triton_computation {
parameter_0 = $0[33,68] parameter(0)
ROOT unary = $0[33,68] $1(parameter_0), exponent_bits=2, mantissa_bits=2
})";
bool f64_output =
opcode == HloOpcode::kReal || opcode == HloOpcode::kImag ||
(opcode == HloOpcode::kAbs && primitive_util::IsComplexType(data_type));
TF_ASSERT_OK_AND_ASSIGN(
TestedInstruction ti,
ParseTemplateAndGetInstruction(
f64_output ? kF64OutputTemplate
: (opcode == HloOpcode::kIsFinite
? kPredOutputTemplate
: (opcode == HloOpcode::kReducePrecision
? kReducePrecisionTemplate
: kDefaultHloTemplate)),
data_type, opcode));
RunSupportTest(std::move(ti), {1, 32}, cc);
}
constexpr std::array kTestedOpsUnaryElementwise = {HloOpcode::kAbs,
HloOpcode::kCbrt,
HloOpcode::kCeil,
HloOpcode::kClz,
HloOpcode::kCos,
HloOpcode::kErf,
HloOpcode::kExp,
HloOpcode::kExpm1,
HloOpcode::kFloor,
HloOpcode::kImag,
HloOpcode::kIsFinite,
HloOpcode::kLog,
HloOpcode::kLog1p,
HloOpcode::kLogistic,
HloOpcode::kNegate,
HloOpcode::kNot,
HloOpcode::kPopulationCount,
HloOpcode::kReal,
HloOpcode::kReducePrecision,
HloOpcode::kRoundNearestAfz,
HloOpcode::kRoundNearestEven,
HloOpcode::kRsqrt,
HloOpcode::kSign,
HloOpcode::kSin,
HloOpcode::kSqrt,
HloOpcode::kTan,
HloOpcode::kTanh};
INSTANTIATE_TEST_SUITE_P(
UnaryElementwiseTestSuite, UnaryElementwiseTest,
AllTestCombinationsForOpcodes(kTestedOpsUnaryElementwise),
TritonSupportTestTypeAndOpcodeAndDeviceToString);
class ConvertTest
: public TritonSupportTest,
public ::testing::WithParamInterface<
std::tuple<PrimitiveType, PrimitiveType, se::GpuComputeCapability>> {
};
TEST_P(ConvertTest, Convert) {
auto [data_type_in, data_type_out, cc] = GetParam();
const std::string hlo_text = absl::Substitute(
R"(
ENTRY triton_computation {
parameter_0 = $0[33,68] parameter(0)
ROOT convert = $1[33,68] convert(parameter_0)
})",
primitive_util::LowercasePrimitiveTypeName(data_type_in),
primitive_util::LowercasePrimitiveTypeName(data_type_out));
TF_ASSERT_OK_AND_ASSIGN(
TestedInstruction ti,
ParseTemplateAndGetInstruction(
hlo_text, data_type_in,
HloOpcode::kConvert));
bool skip_failure_branch_to_avoid_crash = false;
PrimitiveType captured_in = data_type_in;
PrimitiveType captured_out = data_type_out;
auto any_is = [=](PrimitiveType compare) {
return captured_in == compare || captured_out == compare;
};
if (data_type_in != data_type_out && any_is(PrimitiveType::F8E4M3FN) &&
std::holds_alternative<se::CudaComputeCapability>(cc) &&
!std::get<se::CudaComputeCapability>(cc).IsAtLeastHopper()) {
skip_failure_branch_to_avoid_crash |=
any_is(F16) || any_is(BF16) || any_is(F32);
skip_failure_branch_to_avoid_crash |=
(data_type_in == PrimitiveType::F8E4M3FN &&
data_type_out == PrimitiveType::F64);
}
skip_failure_branch_to_avoid_crash |=
(any_is(PrimitiveType::F8E4M3FN) && any_is(PrimitiveType::F8E5M2)) ||
(data_type_in == PrimitiveType::F64 &&
(data_type_out == PrimitiveType::F8E4M3FN ||
data_type_out == PrimitiveType::F8E5M2));
skip_failure_branch_to_avoid_crash |=
(data_type_out == PrimitiveType::F64 &&
(data_type_in == PrimitiveType::F8E4M3FN ||
data_type_in == PrimitiveType::F8E5M2));
RunSupportTest(std::move(ti), {1, 32}, cc,
skip_failure_branch_to_avoid_crash);
}
constexpr std::array kTestedOpsConvert = {HloOpcode::kConvert};
INSTANTIATE_TEST_SUITE_P(
ConvertTestSuite, ConvertTest,
::testing::Combine(::testing::ValuesIn(AllXlaDataTypes()),
::testing::ValuesIn(AllXlaDataTypes()),
::testing::ValuesIn(AllDevicesToTest())),
TritonSupportTestTwoTypesAndDeviceToString);
using BinaryElementwiseTest = TritonSupportTestWithTypeAndOpcodeAndDeviceParam;
TEST_P(BinaryElementwiseTest, IsTritonSupportedBinaryElementwise) {
auto [data_type, opcode, cc] = GetParam();
const std::string kHloTestTemplate = R"(
ENTRY triton_computation {
parameter_0 = $0[11,63] parameter(0)
parameter_1 = $0[11,63] parameter(1)
ROOT binary = $0[11,63] $1(parameter_0, parameter_1)
})";
const std::string kHloCompareTestTemplate = R"(
ENTRY triton_computation {
parameter_0 = $0[11,63] parameter(0)
parameter_1 = $0[11,63] parameter(1)
ROOT compare = pred[11,63] $1(parameter_0, parameter_1), direction=GE
})";
TF_ASSERT_OK_AND_ASSIGN(
TestedInstruction ti,
ParseTemplateAndGetInstruction(opcode == HloOpcode::kCompare
? kHloCompareTestTemplate
: kHloTestTemplate,
data_type, opcode));
bool skip_failure_branch_to_avoid_crash =
opcode == HloOpcode::kDivide &&
(data_type == PrimitiveType::BF16 || data_type == PrimitiveType::F16 ||
data_type == PrimitiveType::F8E5M2 ||
data_type == PrimitiveType::F8E4M3FN);
RunSupportTest(std::move(ti), {1, 32}, cc,
skip_failure_branch_to_avoid_crash);
}
TEST_P(BinaryElementwiseTest, IsTritonSupportedBinaryElementwise0D) {
auto [data_type, opcode, cc] = GetParam();
const std::string kHloTestTemplate = R"(
ENTRY triton_computation {
parameter_0 = $0[] parameter(0)
parameter_1 = $0[] parameter(1)
ROOT binary = $0[] $1(parameter_0, parameter_1)
})";
const std::string kHloCompareTestTemplate = R"(
ENTRY triton_computation {
parameter_0 = $0[] parameter(0)
parameter_1 = $0[] parameter(1)
ROOT compare = pred[] $1(parameter_0, parameter_1), direction=GE
})";
TF_ASSERT_OK_AND_ASSIGN(
TestedInstruction ti,
ParseTemplateAndGetInstruction(opcode == HloOpcode::kCompare
? kHloCompareTestTemplate
: kHloTestTemplate,
data_type, opcode));
RunSupportTest(std::move(ti), {}, cc);
}
constexpr std::array kTestedOpsBinaryElementwise = {
HloOpcode::kAnd,
HloOpcode::kOr,
HloOpcode::kXor,
HloOpcode::kAdd,
HloOpcode::kMultiply,
HloOpcode::kMaximum,
HloOpcode::kMinimum,
HloOpcode::kSubtract,
HloOpcode::kAtan2,
HloOpcode::kDivide,
HloOpcode::kRemainder,
HloOpcode::kPower,
HloOpcode::kShiftLeft,
HloOpcode::kShiftRightArithmetic,
HloOpcode::kShiftRightLogical,
HloOpcode::kCompare};
INSTANTIATE_TEST_SUITE_P(
BinaryElementwiseTestSuite, BinaryElementwiseTest,
AllTestCombinationsForOpcodes(kTestedOpsBinaryElementwise),
TritonSupportTestTypeAndOpcodeAndDeviceToString);
using TernaryElementwiseTest = TritonSupportTestWithTypeAndOpcodeAndDeviceParam;
TEST_P(TernaryElementwiseTest, IsTritonSupportedTernaryElementwise) {
auto [data_type, opcode, cc] = GetParam();
const std::string kHloTestTemplate = R"(
ENTRY triton_computation {
parameter_0 = $2[13,63] parameter(0)
parameter_1 = $0[13,63] parameter(1)
parameter_2 = $0[13,63] parameter(2)
ROOT ternary = $0[13,63] $1(parameter_0, parameter_1, parameter_2)
})";
auto type = primitive_util::LowercasePrimitiveTypeName(data_type);
const std::string hlo_text =
absl::Substitute(kHloTestTemplate, type, HloOpcodeString(opcode),
opcode == HloOpcode::kSelect ? "pred" : type);
TF_ASSERT_OK_AND_ASSIGN(
TestedInstruction ti,
ParseTemplateAndGetInstruction(hlo_text, data_type, opcode));
RunSupportTest(std::move(ti), {1, 32}, cc);
}
constexpr std::array kTestedOpsTernaryElementwise = {HloOpcode::kSelect,
HloOpcode::kClamp};
INSTANTIATE_TEST_SUITE_P(
TernaryElementwiseTestSuite, TernaryElementwiseTest,
AllTestCombinationsForOpcodes(kTestedOpsTernaryElementwise),
TritonSupportTestTypeAndOpcodeAndDeviceToString);
using ReduceTest = TritonSupportTestWithTypeAndOpcodeAndDeviceParam;
TEST_P(ReduceTest, IsTritonSupportedReduction) {
auto [data_type, opcode, cc] = GetParam();
bool dtype_is_complex = data_type == C64 || data_type == C128;
const std::string kHloTestTemplate =
absl::Substitute(R"(
add {
Arg_0 = $0[] parameter(0)
Arg_1 = $0[] parameter(1)
ROOT add = $0[] add(Arg_0, Arg_1)
}
ENTRY triton_computation {
parameter_0 = $0[125,127] parameter(0)
constant_0 = $0[] constant($1)
ROOT reduce = $0[125] reduce(parameter_0, constant_0),
dimensions={1}, to_apply=add
})",
"$0", dtype_is_complex ? "(0, 0)" : "0");
TF_ASSERT_OK_AND_ASSIGN(
TestedInstruction ti,
ParseTemplateAndGetInstruction(kHloTestTemplate, data_type, opcode));
RunSupportTest(std::move(ti), {1}, cc);
}
TEST_F(ReduceTest, IsTritonSupportedReductionWithMultidimensionalTile) {
const std::string kHloTestTemplate = R"(
add {
Arg_0 = $0[] parameter(0)
Arg_1 = $0[] parameter(1)
ROOT add = $0[] add(Arg_0, Arg_1)
}
ENTRY triton_computation {
parameter_0 = $0[3,125,127] parameter(0)
constant_0 = $0[] constant(0)
ROOT reduce = $0[3,125] reduce(parameter_0, constant_0),
dimensions={2}, to_apply=add
})";
TF_ASSERT_OK_AND_ASSIGN(TestedInstruction ti,
ParseTemplateAndGetInstruction(kHloTestTemplate, F32,
HloOpcode::kReduce));
RunSupportTest(std::move(ti), {3, 4},
se::CudaComputeCapability::Ampere());
}
TEST_P(
ReduceTest,
UnsupportedReduceWithMoreThanOneReduceDimensionsFailsGracefullyWithTriton) {
auto [data_type, opcode, cc] = GetParam();
bool dtype_is_complex = data_type == C64 || data_type == C128;
const std::string kHloTestTemplate =
absl::Substitute(R"(
add {
Arg_0 = $0[] parameter(0)
Arg_1 = $0[] parameter(1)
ROOT add = $0[] add(Arg_0, Arg_1)
}
ENTRY triton_computation {
parameter_0 = $0[2,125,127] parameter(0)
constant_0 = $0[] constant($1)
ROOT reduce = $0[2] reduce(parameter_0, constant_0),
dimensions={1,2}, to_apply=add
})",
"$0", dtype_is_complex ? "(0, 0)" : "0");
TF_ASSERT_OK_AND_ASSIGN(
TestedInstruction ti,
ParseTemplateAndGetInstruction(kHloTestTemplate, data_type, opcode));
EXPECT_FALSE(IsTritonSupportedInstruction(ti.Instruction(), cc));
RunSupportTest(std::move(ti), {1}, cc);
}
TEST_P(ReduceTest, IsTritonSupportedReduceWithNonLastReduceDimension) {
auto [data_type, opcode, cc] = GetParam();
bool dtype_is_complex = data_type == C64 || data_type == C128;
const std::string kHloTestTemplate =
absl::Substitute(R"(
add {
Arg_0 = $0[] parameter(0)
Arg_1 = $0[] parameter(1)
ROOT add = $0[] add(Arg_0, Arg_1)
}
ENTRY triton_computation {
parameter_0 = $0[125,127] parameter(0)
constant_0 = $0[] constant($1)
ROOT reduce = $0[127] reduce(parameter_0, constant_0), dimensions={0}, to_apply=add
})",
"$0", dtype_is_complex ? "(0, 0)" : "0");
TF_ASSERT_OK_AND_ASSIGN(
TestedInstruction ti,
ParseTemplateAndGetInstruction(kHloTestTemplate, data_type, opcode));
RunSupportTest(std::move(ti), {1}, cc);
}
TEST_P(ReduceTest,
UnsupportedReduceWithMoreThanOneOperandsFailsGracefullyWithTriton) {
auto [data_type, opcode, cc] = GetParam();
bool dtype_is_complex = data_type == C64 || data_type == C128;
const std::string kHloTestTemplate =
absl::Substitute(R"(
add {
Arg_0 = $0[] parameter(0)
Arg_1 = $0[] parameter(1)
Arg_2 = $0[] parameter(2)
Arg_3 = $0[] parameter(3)
add_0 = $0[] add(Arg_0, Arg_2)
add_1 = $0[] add(Arg_1, Arg_3)
ROOT pair = ($0[], $0[]) tuple(add_0, add_1)
}
ENTRY triton_computation {
parameter_0 = $0[125,127] parameter(0)
constant_0 = $0[] constant($1)
tuple = ($0[125], $0[125]) reduce(
parameter_0, parameter_0, constant_0, constant_0),
dimensions={1}, to_apply=add
ROOT reduce = $0[125] get-tuple-element(tuple), index=0
})",
"$0", dtype_is_complex ? "(0, 0)" : "0");
TF_ASSERT_OK_AND_ASSIGN(
TestedInstruction ti,
ParseTemplateAndGetInstruction(kHloTestTemplate, data_type, opcode));
EXPECT_FALSE(IsTritonSupportedInstruction(ti.Instruction(), cc));
RunSupportTest(std::move(ti), {1}, cc);
}
TEST_F(ReduceTest, ReduceWithNonConstReduceValueIsSupportedWithTriton) {
const se::GpuComputeCapability cc = se::CudaComputeCapability::Ampere();
const std::string kHloTestTemplate = R"(
add {
Arg_0 = $0[] parameter(0)
Arg_1 = $0[] parameter(1)
ROOT add = $0[] add(Arg_0, Arg_1)
}
ENTRY triton_computation {
parameter_0 = $0[125,127] parameter(0)
init = $0[] parameter(1)
ROOT reduce = $0[125] reduce(parameter_0, init), dimensions={1}, to_apply=add
})";
TF_ASSERT_OK_AND_ASSIGN(TestedInstruction ti,
ParseTemplateAndGetInstruction(kHloTestTemplate, F32,
HloOpcode::kReduce));
EXPECT_TRUE(IsTritonSupportedInstruction(ti.Instruction(), cc));
RunSupportTest(std::move(ti), {2}, cc);
}
TEST_P(ReduceTest, UnsupportedReductionComputationFailsGracefullyWithTriton) {
auto [data_type, opcode, cc] = GetParam();
bool dtype_is_complex = data_type == C64 || data_type == C128;
const std::string kHloTestTemplate =
absl::Substitute(R"(
custom_call {
Arg_0 = $0[] parameter(0)
Arg_1 = $0[] parameter(1)
ROOT custom_call = $0[] custom-call(Arg_0, Arg_1), custom_call_target="foo"
}
ENTRY triton_computation {
parameter_0 = $0[125,127] parameter(0)
constant_0 = $0[] constant($1)
ROOT reduce = $0[125] reduce(parameter_0, constant_0),
dimensions={1}, to_apply=custom_call
})",
"$0", dtype_is_complex ? "(0, 0)" : "0");
TF_ASSERT_OK_AND_ASSIGN(
TestedInstruction ti,
ParseTemplateAndGetInstruction(kHloTestTemplate, data_type, opcode));
EXPECT_FALSE(IsTritonSupportedInstruction(ti.Instruction(), cc));
RunSupportTest(std::move(ti), {1}, cc);
}
constexpr std::array kTestedOpsReduction = {HloOpcode::kReduce};
INSTANTIATE_TEST_SUITE_P(ReduceTestSuite, ReduceTest,
AllTestCombinationsForOpcodes(kTestedOpsReduction),
TritonSupportTestTypeAndOpcodeAndDeviceToString);
using ReductionComputationTest =
TritonSupportTestWithTypeAndOpcodeAndDeviceParam;
TEST_P(ReductionComputationTest, DifferentBinaryOps) {
auto [data_type, opcode, cc] = GetParam();
bool dtype_is_complex = data_type == C64 || data_type == C128;
const std::string kHloTestTemplate = absl::Substitute(
R"(
reduce_computation {
Arg_0 = $0[] parameter(0)
Arg_1 = $0[] parameter(1)
ROOT output = $0[] $1(Arg_0, Arg_1)
}
ENTRY triton_computation {
parameter_0 = $0[125,127] parameter(0)
constant_0 = $0[] constant($2)
ROOT reduce = $0[125] reduce(parameter_0, constant_0),
dimensions={1}, to_apply=reduce_computation
})",
"$0", HloOpcodeString(opcode), dtype_is_complex ? "(0, 0)" : "0");
TF_ASSERT_OK_AND_ASSIGN(TestedInstruction ti,
ParseTemplateAndGetInstruction(
kHloTestTemplate, data_type, HloOpcode::kReduce));
bool skip_failure_branch_to_avoid_crash =
opcode == HloOpcode::kDivide &&
(data_type == BF16 || data_type == F16 || data_type == F8E4M3FN ||
data_type == F8E5M2);
RunSupportTest(std::move(ti), {1}, cc,
skip_failure_branch_to_avoid_crash);
}
std::vector<HloOpcode> ExcludeOps(absl::Span<const HloOpcode> all_ops,
absl::Span<const HloOpcode> ops_to_exclude) {
std::vector<HloOpcode> ret;
for (HloOpcode op : all_ops) {
if (!absl::c_linear_search(ops_to_exclude, op)) {
ret.push_back(op);
}
}
return ret;
}
INSTANTIATE_TEST_SUITE_P(
ReductionComputationTestSuite, ReductionComputationTest,
AllTestCombinationsForOpcodes(ExcludeOps(kTestedOpsBinaryElementwise,
{HloOpcode::kCompare})),
TritonSupportTestTypeAndOpcodeAndDeviceToString);
using TransposeTest = TritonSupportTestWithTypeAndOpcodeAndDeviceParam;
TEST_P(TransposeTest, LoadTranspose3D) {
auto [data_type, opcode, cc] = GetParam();
const std::string kHloTestTemplate = R"(
ENTRY triton_computation {
parameter_0 = $0[125,127,37] parameter(0)
ROOT transpose = $0[127,37,125] $1(parameter_0), dimensions={1,2,0}
})";
TF_ASSERT_OK_AND_ASSIGN(
TestedInstruction ti,
ParseTemplateAndGetInstruction(kHloTestTemplate, data_type, opcode));
RunSupportTest(std::move(ti), {1, 32, 16}, cc);
}
constexpr std::array kTestedOpsTranspose = {HloOpcode::kTranspose};
INSTANTIATE_TEST_SUITE_P(TransposeTestSuite, TransposeTest,
AllTestCombinationsForOpcodes(kTestedOpsTranspose),
TritonSupportTestTypeAndOpcodeAndDeviceToString);
class TritonSupportTestWithTypeAndDeviceParam
: public TritonSupportTest,
public ::testing::WithParamInterface<
std::tuple<PrimitiveType, se::GpuComputeCapability>> {};
using SliceTest = TritonSupportTestWithTypeAndOpcodeAndDeviceParam;
TEST_P(SliceTest, ContinuousSlice) {
auto [data_type, opcode, cc] = GetParam();
const std::string kHloTestTemplate = (R"(
ENTRY triton_computation {
p = $0[128,32] parameter(0)
ROOT slice = $0[12,5] $1(p), slice={[116:128], [20:25]}
})");
TF_ASSERT_OK_AND_ASSIGN(
TestedInstruction ti,
ParseTemplateAndGetInstruction(kHloTestTemplate, data_type, opcode));
RunSupportTest(std::move(ti), {8, 4}, cc);
}
TEST_P(SliceTest, NonContinuousSliceWhereStrideDividesOffsetEvenly) {
auto [data_type, opcode, cc] = GetParam();
const std::string kHloTestTemplate = (R"(
ENTRY triton_computation {
p = f32[16,16,32] parameter(0)
ROOT slice = f32[4,4,8] slice(p), slice={[2:10:2], [2:6], [3:11]}
})");
TF_ASSERT_OK_AND_ASSIGN(
TestedInstruction ti,
ParseTemplateAndGetInstruction(kHloTestTemplate, data_type, opcode));
RunSupportTest(std::move(ti), {2, 2, 2}, cc);
}
TEST_P(SliceTest, NonContinuousSliceWhereStrideDoesNotDivideOffsetEvenly) {
auto [data_type, opcode, cc] = GetParam();
const std::string kHloTestTemplate = (R"(
ENTRY triton_computation {
p = f32[16,16,32] parameter(0)
ROOT slice = f32[4,4,8] slice(p), slice={[3:11:2], [2:6], [3:11]}
})");
TF_ASSERT_OK_AND_ASSIGN(
TestedInstruction ti,
ParseTemplateAndGetInstruction(kHloTestTemplate, data_type, opcode));
RunSupportTest(std::move(ti), {2, 2, 2}, cc);
}
constexpr std::array kTestedOpsSlice = {HloOpcode::kSlice};
INSTANTIATE_TEST_SUITE_P(SliceTestSuite, SliceTest,
AllTestCombinationsForOpcodes(kTestedOpsSlice),
TritonSupportTestTypeAndOpcodeAndDeviceToString);
using CollectiveTest = TritonSupportTestWithTypeAndDeviceParam;
TEST_P(CollectiveTest, UnsupportedAllGatherFailsGracefullyWithTriton) {
auto [data_type, cc] = GetParam();
const std::string kHloTestTemplate = R"(
ENTRY triton_computation {
input = $0[128,32] parameter(0)
ROOT all-gather = $0[128,128] all-gather(input),
replica_groups={}, dimensions={1}
})";
TF_ASSERT_OK_AND_ASSIGN(TestedInstruction ti, ParseTemplateAndGetInstruction(
kHloTestTemplate, data_type,
HloOpcode::kAllGather));
EXPECT_FALSE(IsTritonSupportedInstruction(ti.Instruction(), cc));
RunSupportTest(std::move(ti), {2, 2}, cc);
}
TEST_P(CollectiveTest, UnsupportedAllGatherStartFailsGracefullyWithTriton) {
auto [data_type, cc] = GetParam();
const std::string kHloTestTemplate = R"(
ENTRY triton_computation {
input = $0[128,32] parameter(0)
ROOT all-gather-start = ($0[128,32], $0[256,32]) all-gather-start(input),
replica_groups={{0,1}}, dimensions={0}
})";
TF_ASSERT_OK_AND_ASSIGN(
TestedInstruction ti,
ParseTemplateAndGetInstruction(kHloTestTemplate, data_type,
HloOpcode::kAllGatherStart));
EXPECT_FALSE(IsTritonSupportedInstruction(ti.Instruction(), cc));
RunSupportTest(std::move(ti), {2, 2}, cc);
}
TEST_P(CollectiveTest, UnsupportedAllGatherDoneFailsGracefullyWithTriton) {
auto [data_type, cc] = GetParam();
const std::string kHloTestTemplate = R"(
ENTRY triton_computation {
input = ($0[128,32], $0[128,32]) parameter(0)
ROOT all-gather-done = $0[128,32] all-gather-done(input)
})";
TF_ASSERT_OK_AND_ASSIGN(TestedInstruction ti, ParseTemplateAndGetInstruction(
kHloTestTemplate, data_type,
HloOpcode::kAllGatherDone));
EXPECT_FALSE(IsTritonSupportedInstruction(ti.Instruction(), cc));
RunSupportTest(std::move(ti), {2, 2}, cc);
}
TEST_P(CollectiveTest, UnsupportedAllReduceFailsGracefullyWithTriton) {
auto [data_type, cc] = GetParam();
const std::string kHloTestTemplate = R"(
apply_op {
x = $0[] parameter(0)
y = $0[] parameter(1)
ROOT apply_op = $0[] add(x, y)
}
ENTRY triton_computation {
input = $0[128,32] parameter(0)
ROOT all-reduce = $0[128,32] all-reduce(input), replica_groups={},
to_apply=apply_op
})";
TF_ASSERT_OK_AND_ASSIGN(TestedInstruction ti, ParseTemplateAndGetInstruction(
kHloTestTemplate, data_type,
HloOpcode::kAllReduce));
EXPECT_FALSE(IsTritonSupportedInstruction(ti.Instruction(), cc));
RunSupportTest(std::move(ti), {2, 2}, cc);
}
TEST_P(CollectiveTest,
UnsupportedAllReduceStartAndDoneFailGracefullyWithTriton) {
auto [data_type, cc] = GetParam();
const std::string kHloTestTemplate = R"(
apply_op {
x = $0[] parameter(0)
y = $0[] parameter(1)
ROOT apply_op = $0[] add(x, y)
}
ENTRY triton_computation {
input = $0[128,32] parameter(0)
all-reduce-start = $0[128,32] all-reduce-start(input), replica_groups={},
to_apply=apply_op
ROOT all-reduce-done = $0[128,32] all-reduce-done(all-reduce-start)
})";
TF_ASSERT_OK_AND_ASSIGN(
TestedInstruction ti,
ParseTemplateAndGetInstruction(kHloTestTemplate, data_type,
HloOpcode::kAllReduceStart));
EXPECT_FALSE(IsTritonSupportedInstruction(ti.Instruction(), cc));
EXPECT_FALSE(IsTritonSupportedInstruction(
*ti.TritonComputation().root_instruction(), cc));
RunSupportTest(std::move(ti), {2, 2}, cc);
}
TEST_P(CollectiveTest, UnsupportedAllToAllFailsGracefullyWithTriton) {
auto [data_type, cc] = GetParam();
const std::string kHloTestTemplate = R"(
ENTRY triton_computation {
input = $0[128,32] parameter(0)
ROOT a2a = ($0[128,32]) all-to-all(input), replica_groups={}
})";
TF_ASSERT_OK_AND_ASSIGN(TestedInstruction ti, ParseTemplateAndGetInstruction(
kHloTestTemplate, data_type,
HloOpcode::kAllToAll));
EXPECT_FALSE(IsTritonSupportedInstruction(ti.Instruction(), cc));
RunSupportTest(std::move(ti), {2, 2}, cc);
}
TEST_P(CollectiveTest, UnsupportedCollectivePermuteFailsGracefullyWithTriton) {
auto [data_type, cc] = GetParam();
const std::string kHloTestTemplate = R"(
ENTRY triton_computation {
a = $0[128,32] parameter(0)
ROOT collective-permute = $0[128,32] collective-permute(a),
source_target_pairs={{1,0}, {0,1}, {2,2}}
})";
TF_ASSERT_OK_AND_ASSIGN(
TestedInstruction ti,
ParseTemplateAndGetInstruction(kHloTestTemplate, data_type,
HloOpcode::kCollectivePermute));
EXPECT_FALSE(IsTritonSupportedInstruction(ti.Instruction(), cc));
RunSupportTest(std::move(ti), {2, 2}, cc);
}
TEST_P(CollectiveTest, UnsupportedReduceScatterFailsGracefullyWithTriton) {
auto [data_type, cc] = GetParam();
const std::string kHloTestTemplate = R"(
apply_op {
lhs = $0[] parameter(0)
rhs = $0[] parameter(1)
ROOT apply_op = $0[] add(lhs, rhs)
}
ENTRY triton_computation {
input = $0[8] parameter(0)
ROOT result = $0[4] reduce-scatter(input), replica_groups={},
dimensions={0}, to_apply=apply_op
})";
TF_ASSERT_OK_AND_ASSIGN(TestedInstruction ti, ParseTemplateAndGetInstruction(
kHloTestTemplate, data_type,
HloOpcode::kReduceScatter));
EXPECT_FALSE(IsTritonSupportedInstruction(ti.Instruction(), cc));
RunSupportTest(std::move(ti), {1}, cc);
}
TEST_P(CollectiveTest,
UnsupportedAsyncStartAndUpdateAndDoneFailGracefullyWithTriton) {
auto [data_type, cc] = GetParam();
const std::string kHloTestTemplate = R"(
async_computation {
ROOT p0 = $0[10] parameter(0)
}
ENTRY triton_computation {
input = $0[10] parameter(0)
async-start = (($0[10]), $0[10]) async-start(input),
calls=async_computation
async-update = (($0[10]), $0[10]) async-update(async-start),
calls=async_computation
ROOT async-done = $0[10] async-done(async-update), calls=async_computation
})";
TF_ASSERT_OK_AND_ASSIGN(TestedInstruction ti, ParseTemplateAndGetInstruction(
kHloTestTemplate, data_type,
HloOpcode::kAsyncStart));
EXPECT_FALSE(IsTritonSupportedInstruction(ti.Instruction(), cc));
EXPECT_FALSE(IsTritonSupportedInstruction(
*ti.TritonComputation().root_instruction(), cc));
EXPECT_FALSE(IsTritonSupportedInstruction(
*ti.TritonComputation().root_instruction()->operand(0), cc));
RunSupportTest(std::move(ti), {1}, cc);
}
constexpr std::array kTestedOpsCollectives = {
HloOpcode::kAllGather, HloOpcode::kAllGatherStart,
HloOpcode::kAllGatherDone, HloOpcode::kAllReduce,
HloOpcode::kAllReduceStart, HloOpcode::kAllReduceDone,
HloOpcode::kAsyncDone, HloOpcode::kAsyncStart,
HloOpcode::kAsyncUpdate, HloOpcode::kAllToAll,
HloOpcode::kCollectivePermute, HloOpcode::kReduceScatter};
INSTANTIATE_TEST_SUITE_P(
CollectiveTestSuite, CollectiveTest,
::testing::Combine(::testing::ValuesIn(AllXlaDataTypes()),
::testing::ValuesIn(AllDevicesToTest())),
TritonSupportTestTypeAndDeviceToString);
using BroadcastTest = TritonSupportTestWithTypeAndDeviceParam;
TEST_P(BroadcastTest, Broadcast) {
auto [data_type, cc] = GetParam();
const std::string kHloTestTemplate = R"(
ENTRY triton_computation {
input = $0[35,131] parameter(0)
ROOT bcast = $0[3,35,131,12] broadcast(input), dimensions={1,2}
})";
TF_ASSERT_OK_AND_ASSIGN(TestedInstruction ti, ParseTemplateAndGetInstruction(
kHloTestTemplate, data_type,
HloOpcode::kBroadcast));
RunSupportTest(std::move(ti), {2, 16, 32, 8}, cc);
}
constexpr std::array kTestedOpsBroadcast = {HloOpcode::kBroadcast};
INSTANTIATE_TEST_SUITE_P(
BroadcastTestSuite, BroadcastTest,
::testing::Combine(::testing::ValuesIn(AllXlaDataTypes()),
::testing::ValuesIn(AllDevicesToTest())),
TritonSupportTestTypeAndDeviceToString);
using ParameterTest = TritonSupportTestWithTypeAndDeviceParam;
TEST_P(ParameterTest, Parameter) {
auto [data_type, cc] = GetParam();
const std::string kHloTestTemplate = R"(
ENTRY triton_computation {
input = $0[35,131] parameter(0)
ROOT noop = $0[35,131] convert(input)
})";
TF_ASSERT_OK_AND_ASSIGN(TestedInstruction ti, ParseTemplateAndGetInstruction(
kHloTestTemplate, data_type,
HloOpcode::kParameter));
RunSupportTest(std::move(ti), {16, 32}, cc);
}
constexpr std::array kTestedOpsParameter = {HloOpcode::kParameter};
INSTANTIATE_TEST_SUITE_P(
ParameterTestSuite, ParameterTest,
::testing::Combine(::testing::ValuesIn(AllXlaDataTypes()),
::testing::ValuesIn(AllDevicesToTest())),
TritonSupportTestTypeAndDeviceToString);
using ConstantTest = TritonSupportTestWithTypeAndDeviceParam;
TEST_P(ConstantTest, Constant2D) {
auto [data_type, cc] = GetParam();
bool dtype_is_complex = data_type == C64 || data_type == C128;
const std::string kHloTestTemplate =
absl::Substitute(R"(
ENTRY triton_computation {
ROOT const = $0[3,3] constant({{$1,$1,$1},{$1,$1,$1},{$1,$1,$1}})
})",
"$0", dtype_is_complex ? "(0, 0)" : "0");
TF_ASSERT_OK_AND_ASSIGN(TestedInstruction ti, ParseTemplateAndGetInstruction(
kHloTestTemplate, data_type,
HloOpcode::kConstant));
RunSupportTest(std::move(ti), {2, 2}, cc);
}
constexpr std::array kTestedOpsConstant = {HloOpcode::kConstant};
INSTANTIATE_TEST_SUITE_P(
ConstantTestSuite, ConstantTest,
::testing::Combine(::testing::ValuesIn(AllXlaDataTypes()),
::testing::ValuesIn(AllDevicesToTest())),
TritonSupportTestTypeAndDeviceToString);
using IotaTest = TritonSupportTestWithTypeAndOpcodeAndDeviceParam;
TEST_P(IotaTest, Iota2D) {
auto [data_type, opcode, cc] = GetParam();
const std::string kHloTestTemplate = R"(
ENTRY triton_computation {
ROOT input = $0[35,131] iota(), iota_dimension=0
})";
TF_ASSERT_OK_AND_ASSIGN(
TestedInstruction ti,
ParseTemplateAndGetInstruction(kHloTestTemplate, data_type, opcode));
RunSupportTest(std::move(ti), {16, 32}, cc);
}
constexpr std::array kTestedOpsIota = {HloOpcode::kIota};
INSTANTIATE_TEST_SUITE_P(IotaTestSuite, IotaTest,
AllTestCombinationsForOpcodes(kTestedOpsIota),
TritonSupportTestTypeAndOpcodeAndDeviceToString);
using RngTest = TritonSupportTestWithTypeAndOpcodeAndDeviceParam;
TEST_P(RngTest, Rng) {
auto [data_type, opcode, cc] = GetParam();
const std::string kHloTestTemplate = R"(
ENTRY triton_computation {
low = $0[] parameter(0)
high = $0[] parameter(1)
ROOT root = $0[33,77] rng(low, high), distribution=rng_uniform
})";
TF_ASSERT_OK_AND_ASSIGN(
TestedInstruction ti,
ParseTemplateAndGetInstruction(kHloTestTemplate, data_type, opcode));
RunSupportTest(std::move(ti), {16, 32}, cc);
}
constexpr std::array kTestedOpsRng = {HloOpcode::kRng};
INSTANTIATE_TEST_SUITE_P(RngTestSuite, RngTest,
AllTestCombinationsForOpcodes(kTestedOpsRng),
TritonSupportTestTypeAndOpcodeAndDeviceToString);
constexpr std::array kUnsupportedOps = {HloOpcode::kAddDependency,
HloOpcode::kAfterAll,
HloOpcode::kBatchNormGrad,
HloOpcode::kBatchNormInference,
HloOpcode::kBatchNormTraining,
HloOpcode::kBitcastConvert,
HloOpcode::kCall,
HloOpcode::kCholesky,
HloOpcode::kCollectiveBroadcast,
HloOpcode::kCollectivePermuteDone,
HloOpcode::kCollectivePermuteStart,
HloOpcode::kComplex,
HloOpcode::kConcatenate,
HloOpcode::kConditional,
HloOpcode::kConvolution,
HloOpcode::kCopy,
HloOpcode::kCopyDone,
HloOpcode::kCopyStart,
HloOpcode::kCustomCall,
HloOpcode::kDomain,
HloOpcode::kDot,
HloOpcode::kDynamicReshape,
HloOpcode::kDynamicSlice,
HloOpcode::kDynamicUpdateSlice,
HloOpcode::kFft,
HloOpcode::kFusion,
HloOpcode::kGather,
HloOpcode::kGetDimensionSize,
HloOpcode::kGetTupleElement,
HloOpcode::kInfeed,
HloOpcode::kMap,
HloOpcode::kOptimizationBarrier,
HloOpcode::kOutfeed,
HloOpcode::kPad,
HloOpcode::kPartitionId,
HloOpcode::kRecv,
HloOpcode::kRecvDone,
HloOpcode::kReduceWindow,
HloOpcode::kReplicaId,
HloOpcode::kReverse,
HloOpcode::kRngBitGenerator,
HloOpcode::kRngGetAndUpdateState,
HloOpcode::kScatter,
HloOpcode::kSelectAndScatter,
HloOpcode::kSend,
HloOpcode::kSendDone,
HloOpcode::kSetDimensionSize,
HloOpcode::kSort,
HloOpcode::kStochasticConvert,
HloOpcode::kTopK,
HloOpcode::kTriangularSolve,
HloOpcode::kTuple,
HloOpcode::kWhile};
absl::flat_hash_set<HloOpcode> AllTestedOpcodes() {
absl::flat_hash_set<HloOpcode> ret;
ret.insert(kTestedOpsBitcastReshape.begin(), kTestedOpsBitcastReshape.end());
ret.insert(kTestedOpsUnaryElementwise.begin(),
kTestedOpsUnaryElementwise.end());
ret.insert(kTestedOpsConvert.begin(), kTestedOpsConvert.end());
ret.insert(kTestedOpsBinaryElementwise.begin(),
kTestedOpsBinaryElementwise.end());
ret.insert(kTestedOpsTernaryElementwise.begin(),
kTestedOpsTernaryElementwise.end());
ret.insert(kTestedOpsReduction.begin(), kTestedOpsReduction.end());
ret.insert(kTestedOpsSlice.begin(), kTestedOpsSlice.end());
ret.insert(kTestedOpsTranspose.begin(), kTestedOpsTranspose.end());
ret.insert(kTestedOpsCollectives.begin(), kTestedOpsCollectives.end());
ret.insert(kTestedOpsBroadcast.begin(), kTestedOpsBroadcast.end());
ret.insert(kTestedOpsParameter.begin(), kTestedOpsParameter.end());
ret.insert(kTestedOpsConstant.begin(), kTestedOpsConstant.end());
ret.insert(kTestedOpsIota.begin(), kTestedOpsIota.end());
ret.insert(kTestedOpsRng.begin(), kTestedOpsRng.end());
ret.insert(kUnsupportedOps.begin(), kUnsupportedOps.end());
return ret;
}
TEST(OpCoverage, UnsupportedOpcodes) {
for (HloOpcode opcode : kUnsupportedOps) {
EXPECT_TRUE(internal::IsTritonUnsupportedOpcode(opcode));
}
}
TEST(OpCoverage, AllOpcodesAreTested) {
absl::flat_hash_set<HloOpcode> tested_opcodes = AllTestedOpcodes();
for (int opcode_index = 0; opcode_index < HloOpcodeCount(); ++opcode_index) {
auto opcode = static_cast<HloOpcode>(opcode_index);
EXPECT_TRUE(tested_opcodes.contains(opcode))
<< "Opcode `" << HloOpcodeString(opcode) << "` is not tested.";
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusions/triton/triton_support.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusions/triton/triton_support_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
241482d4-5593-45b6-9f17-7c5376ad9853 | cpp | tensorflow/tensorflow | tensor_shape | tensorflow/core/framework/tensor_shape.cc | tensorflow/core/framework/tensor_shape_test.cc | #include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/bounds_check.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/util/overflow.h"
namespace tensorflow {
static_assert(sizeof(TensorShapeRep) == sizeof(TensorShape),
"TensorShape must have no fields beyond TensorShapeRep");
static_assert(sizeof(TensorShapeRep) == sizeof(PartialTensorShape),
"PartialTensorShape must have no fields beyond TensorShapeRep");
template <class Shape>
static void AppendTo(const TensorShapeBase<Shape>& s,
absl::InlinedVector<int64, 8UL>* vals) {
for (auto dim : s) {
vals->push_back(dim.size);
}
}
void TensorShape::CheckDimsEqual(int NDIMS) const {
CHECK_EQ(NDIMS, dims()) << "Asking for tensor of " << NDIMS << " dimensions"
<< " from a tensor of " << dims() << " dimensions";
}
void TensorShape::CheckDimsAtMost(int NDIMS) const {
CHECK_GE(NDIMS, dims()) << "Asking for tensor of at most " << NDIMS
<< " dimensions from a tensor of " << dims()
<< " dimensions";
}
template <class Shape>
bool TensorShapeBase<Shape>::IsValid() {
if (kIsPartial && unknown_rank()) return dims() == 0;
int64_t num_elements = 1;
if (dims() > MaxDimensions()) return false;
for (auto d : dim_sizes()) {
if (d < (kIsPartial ? -1 : 0)) return false;
if (d == -1) {
num_elements = -1;
} else if (!kIsPartial || num_elements >= 0) {
num_elements = MultiplyWithoutOverflow(num_elements, d);
if (num_elements < 0) return false;
}
}
return true;
}
template <class Shape>
bool TensorShapeBase<Shape>::IsValid(const TensorShapeProto& proto) {
if (kIsPartial && proto.unknown_rank()) return proto.dim_size() == 0;
int64_t num_elements = 1;
if (proto.dim().size() > MaxDimensions()) return false;
for (const auto& d : proto.dim()) {
if (d.size() < (kIsPartial ? -1 : 0)) return false;
if (d.size() == -1) {
num_elements = -1;
} else if (!kIsPartial || num_elements >= 0) {
num_elements = MultiplyWithoutOverflow(num_elements, d.size());
if (num_elements < 0) return false;
}
}
return true;
}
template <class Shape>
Status TensorShapeBase<Shape>::IsValidShape(const TensorShapeProto& proto) {
if (kIsPartial && proto.unknown_rank()) {
if (proto.dim_size() > 0) {
return errors::InvalidArgument(
"An unknown shape must not have any dimensions set.");
}
return absl::OkStatus();
}
int64_t num_elements = 1;
if (proto.dim().size() > MaxDimensions()) {
return errors::InvalidArgument("Shape ", DebugString(proto),
" has too many dimensions");
}
for (const auto& d : proto.dim()) {
if (d.size() < (kIsPartial ? -1 : 0)) {
if (kIsPartial) {
return errors::InvalidArgument(
"Shape ", DebugString(proto),
" has dimensions with values below -1 (where -1 means unknown)");
} else {
return errors::InvalidArgument("Shape ", DebugString(proto),
" is not fully defined");
}
}
if (d.size() == -1) {
num_elements = -1;
} else if (!kIsPartial || num_elements >= 0) {
num_elements = MultiplyWithoutOverflow(num_elements, d.size());
if (num_elements < 0) {
return errors::InvalidArgument(
"Shape ", DebugString(proto),
" is too large (more than 2**63 - 1 entries)");
}
}
}
return absl::OkStatus();
}
template <class Shape>
TensorShapeBase<Shape>::TensorShapeBase(const TensorShapeProto& proto) {
set_tag(REP16);
set_data_type(DT_INVALID);
if (kIsPartial && proto.unknown_rank()) {
set_ndims_byte(kUnknownRank);
set_num_elements(-1);
} else {
set_ndims_byte(0);
set_num_elements(1);
for (const auto& d : proto.dim()) {
AddDim(d.size());
}
}
}
template <class Shape>
Status TensorShapeBase<Shape>::BuildTensorShapeBase(
const TensorShapeProto& proto, TensorShapeBase* out) {
out->set_tag(REP16);
out->set_data_type(DT_INVALID);
if (kIsPartial && proto.unknown_rank()) {
out->set_ndims_byte(kUnknownRank);
out->set_num_elements(-1);
} else {
out->set_ndims_byte(0);
out->set_num_elements(1);
int64_t num_elements_excluding_zero_dims = 1;
Status s = absl::OkStatus();
for (const auto& d : proto.dim()) {
s = out->AddDimWithStatus(d.size());
if (!s.ok()) {
return s;
}
if (d.size() > 0) {
num_elements_excluding_zero_dims =
MultiplyWithoutOverflow(num_elements_excluding_zero_dims, d.size());
if (TF_PREDICT_FALSE(num_elements_excluding_zero_dims < 0)) {
return errors::InvalidArgument(
"Encountered overflow when multiplying shape dimensions");
}
}
}
}
return absl::OkStatus();
}
template <class Shape>
TensorShapeBase<Shape>::TensorShapeBase(absl::Span<const int64_t> dim_sizes) {
set_tag(REP16);
set_data_type(DT_INVALID);
TF_CHECK_OK(InitDims(dim_sizes));
}
template <class Shape>
Status TensorShapeBase<Shape>::BuildTensorShapeBase(
absl::Span<const int64_t> dim_sizes, TensorShapeBase* out) {
out->set_tag(REP16);
out->set_data_type(DT_INVALID);
return out->InitDims(dim_sizes);
}
static inline bool Set16(bool partial, uint16* dst, int dim, int64_t val) {
if (partial) {
if (val < 0) {
dst[dim] = std::numeric_limits<uint16>::max();
return true;
}
}
dst[dim] = val;
return false;
}
template <class Shape>
Status TensorShapeBase<Shape>::InitDims(absl::Span<const int64_t> dim_sizes) {
DCHECK_EQ(tag(), REP16);
static const int64_t kMaxSmall = 0xd744;
static_assert(kMaxSmall * kMaxSmall * kMaxSmall * kMaxSmall <= kint64max,
"bad overflow check");
bool large_size = false;
for (auto s : dim_sizes) {
if (s > kMaxSmall) {
large_size = true;
break;
}
}
if (!kIsPartial && !large_size) {
for (auto s : dim_sizes) {
if (TF_PREDICT_FALSE(s < 0)) {
return errors::InvalidArgument(
"Expected shape dimensions to be non-negative, got ", s);
}
}
}
if (!large_size) {
uint16* dst = as16()->dims_;
switch (dim_sizes.size()) {
case 1: {
set_ndims_byte(1);
const int64_t size = dim_sizes[0];
const bool neg = Set16(kIsPartial, dst, 0, size);
set_num_elements(neg ? -1 : size);
return absl::OkStatus();
}
case 2: {
set_ndims_byte(2);
const int64_t size0 = dim_sizes[0];
const int64_t size1 = dim_sizes[1];
bool neg = Set16(kIsPartial, dst, 0, size0);
neg |= Set16(kIsPartial, dst, 1, size1);
set_num_elements(neg ? -1 : (size0 * size1));
return absl::OkStatus();
}
case 3: {
set_ndims_byte(3);
const int64_t size0 = dim_sizes[0];
const int64_t size1 = dim_sizes[1];
const int64_t size2 = dim_sizes[2];
bool neg = Set16(kIsPartial, dst, 0, size0);
neg |= Set16(kIsPartial, dst, 1, size1);
neg |= Set16(kIsPartial, dst, 2, size2);
set_num_elements(neg ? -1 : (size0 * size1 * size2));
return absl::OkStatus();
}
case 4: {
set_ndims_byte(4);
const int64_t size0 = dim_sizes[0];
const int64_t size1 = dim_sizes[1];
const int64_t size2 = dim_sizes[2];
const int64_t size3 = dim_sizes[3];
bool neg = Set16(kIsPartial, dst, 0, size0);
neg |= Set16(kIsPartial, dst, 1, size1);
neg |= Set16(kIsPartial, dst, 2, size2);
neg |= Set16(kIsPartial, dst, 3, size3);
set_num_elements(neg ? -1 : (size0 * size1 * size2 * size3));
return absl::OkStatus();
}
}
}
set_ndims_byte(0);
set_num_elements(1);
Status status = absl::OkStatus();
for (int64_t s : dim_sizes) {
status.Update(AddDimWithStatus(internal::SubtleMustCopy(s)));
if (!status.ok()) {
return status;
}
}
return status;
}
template <class Shape>
TensorShapeBase<Shape>::TensorShapeBase() {
set_tag(REP16);
set_data_type(DT_INVALID);
if (kIsPartial) {
set_ndims_byte(kUnknownRank);
set_num_elements(-1);
} else {
set_ndims_byte(0);
set_num_elements(1);
}
}
void TensorShapeRep::DestructorOutOfLine() {
DCHECK(tag() == REP_OUT_OF_LINE);
delete as64()->dims_;
}
void TensorShapeRep::SlowCopyFrom(const TensorShapeRep& b) {
if (b.tag() != REP_OUT_OF_LINE) {
if (tag() == REP_OUT_OF_LINE) {
delete as64()->dims_;
}
memcpy(buf(), b.buf(), sizeof(u_.buf));
} else {
set_ndims_byte(b.ndims_byte());
set_data_type(b.data_type());
if (tag() == REP_OUT_OF_LINE) {
*(as64()->dims_) = *(b.as64()->dims_);
} else {
set_tag(REP_OUT_OF_LINE);
as64()->dims_ = new absl::InlinedVector<int64_t, 4UL>(*(b.as64()->dims_));
}
}
}
template <class Shape>
int64_t TensorShapeBase<Shape>::dim_size(int d) const {
if (unknown_rank()) return -1;
CHECK_GE(d, 0);
if (d > 0) CHECK_LT(d, dims());
if (tag() == REP16) {
uint16 dim = as16()->dims_[d];
if (kIsPartial && dim == kUnknownRep16) return -1;
return dim;
} else if (tag() == REP32) {
uint32 dim = as32()->dims_[d];
if (kIsPartial && dim == kUnknownRep32) return -1;
return dim;
} else {
return (*as64()->dims_)[d];
}
}
void TensorShapeRep::Clear() {
ClearAllButDataType();
set_data_type(DT_INVALID);
}
void TensorShapeRep::ClearAllButDataType() {
if (tag() == REP_OUT_OF_LINE) {
delete as64()->dims_;
}
set_tag(REP16);
set_ndims_byte(0);
set_num_elements(1);
}
template <class Shape>
Status TensorShapeBase<Shape>::RecomputeNumElements() {
if (unknown_rank()) {
set_num_elements(-1);
return absl::OkStatus();
}
int64_t n = 1;
for (auto dim : *this) {
if (kIsPartial && dim.size < 0) {
n = -1;
break;
}
n = MultiplyWithoutOverflow(n, dim.size);
if (TF_PREDICT_FALSE(n < 0)) {
return errors::InvalidArgument(
"Shape ", this->DebugString(),
" results in overflow when computing number of elements");
}
}
set_num_elements(n);
return absl::OkStatus();
}
template <class Shape>
void TensorShapeBase<Shape>::AddDim(int64_t size) {
if (!kIsPartial) CHECK_GE(size, 0);
if (unknown_rank()) return;
CHECK_LT(ndims_byte(), MaxDimensions()) << "Too many dimensions in tensor";
int64_t new_num_elements;
if (kIsPartial && (num_elements() < 0 || size < 0)) {
new_num_elements = -1;
} else {
new_num_elements = MultiplyWithoutOverflow(num_elements(), size);
CHECK_LE(0, new_num_elements);
}
UnsafeAddDim(size, new_num_elements);
}
template <class Shape>
Status TensorShapeBase<Shape>::AddDimWithStatus(int64_t size) {
if (!kIsPartial) {
if (TF_PREDICT_FALSE(size < 0)) {
return errors::InvalidArgument("Expected a non-negative size, got ",
size);
}
}
if (unknown_rank()) {
return absl::OkStatus();
}
if (TF_PREDICT_FALSE(ndims_byte() >= MaxDimensions())) {
return errors::InvalidArgument("Too many dimensions in tensor");
}
int64_t new_num_elements;
if (kIsPartial && (num_elements() < 0 || size < 0)) {
new_num_elements = -1;
} else {
new_num_elements = MultiplyWithoutOverflow(num_elements(), size);
if (TF_PREDICT_FALSE(new_num_elements < 0)) {
return errors::InvalidArgument("Encountered overflow when multiplying ",
num_elements(), " with ", size,
", result: ", new_num_elements);
}
}
UnsafeAddDim(size, new_num_elements);
return absl::OkStatus();
}
template <class Shape>
void TensorShapeBase<Shape>::UnsafeAddDim(int64_t size,
int64_t new_num_elements) {
const int nd = ndims_byte();
if (tag() == REP16 && nd < 6 && size < kMaxRep16) {
as16()->dims_[nd] =
kIsPartial && size < 0 ? kUnknownRep16 : static_cast<uint16>(size);
} else if (tag() == REP32 && nd < 3 && size < kMaxRep32) {
as32()->dims_[nd] =
kIsPartial && size < 0 ? kUnknownRep32 : static_cast<uint32>(size);
} else if (tag() == REP_OUT_OF_LINE) {
as64()->dims_->push_back(size);
} else {
absl::InlinedVector<int64_t, 8UL> vals;
AppendTo(*this, &vals);
vals.push_back(size);
bool can_be_rep32 = (vals.size() <= 3);
if (can_be_rep32) {
for (size_t i = 0; i < vals.size(); i++) {
if (vals[i] >= kMaxRep32) {
can_be_rep32 = false;
break;
}
}
}
if (can_be_rep32) {
set_tag(REP32);
for (size_t d = 0; d < vals.size(); d++) {
as32()->dims_[d] = kIsPartial && vals[d] < 0
? kUnknownRep32
: static_cast<uint32>(vals[d]);
}
} else {
set_tag(REP_OUT_OF_LINE);
as64()->dims_ =
new absl::InlinedVector<int64_t, 4UL>(vals.begin(), vals.end());
}
}
set_ndims_byte(nd + 1);
set_num_elements(new_num_elements);
}
template <class Shape>
void TensorShapeBase<Shape>::AppendShape(const TensorShapeBase& shape) {
for (auto d : shape) AddDim(d.size);
}
template <class Shape>
Status TensorShapeBase<Shape>::AppendShapeWithStatus(
const TensorShapeBase& shape) {
Status s = absl::OkStatus();
for (auto d : shape) {
s.Update(AddDimWithStatus(d.size));
if (!s.ok()) {
return s;
}
}
return s;
}
template <class Shape>
void TensorShapeBase<Shape>::InsertDim(int d, int64_t size) {
CHECK_GE(d, 0);
CHECK_LE(d, dims());
if (!kIsPartial) CHECK_GE(size, 0);
CHECK_LT(dims(), MaxDimensions());
absl::InlinedVector<int64_t, 8UL> vals;
AppendTo(*this, &vals);
vals.insert(vals.begin() + d, size);
ClearAllButDataType();
for (auto dval : vals) {
AddDim(dval);
}
}
template <class Shape>
Status TensorShapeBase<Shape>::InsertDimWithStatus(int d, int64_t size) {
if (!kIsPartial) {
if (TF_PREDICT_FALSE(size < 0)) {
return errors::InvalidArgument("Expected a non-negative size, got ",
size);
}
}
if (TF_PREDICT_FALSE(d < 0)) {
return errors::Internal("The insertion index must be non-negative, got ",
d);
}
if (TF_PREDICT_FALSE(d > dims())) {
return errors::Internal("The insertion index must be at most ", dims(),
" got ", d);
}
if (TF_PREDICT_FALSE(dims() >= MaxDimensions())) {
return errors::Internal("Shape has ", dims(),
" dimensions which is the maximum allowed");
}
absl::InlinedVector<int64_t, 8UL> vals;
AppendTo(*this, &vals);
vals.insert(vals.begin() + d, size);
ClearAllButDataType();
Status s = absl::OkStatus();
for (auto dval : vals) {
s.Update(AddDimWithStatus(dval));
if (!s.ok()) {
return s;
}
}
return s;
}
template <class Shape>
absl::InlinedVector<int64_t, 4UL> TensorShapeBase<Shape>::dim_sizes() const {
absl::InlinedVector<int64_t, 4UL> result;
for (auto dim : *this) {
result.push_back(dim.size);
}
return result;
}
template <class Shape>
void TensorShapeBase<Shape>::set_dim(int d, int64_t size) {
CHECK_GE(d, 0);
CHECK_LT(d, dims());
if (!kIsPartial) {
CHECK_GE(size, 0);
}
if (tag() == REP16 && size < kMaxRep16) {
as16()->dims_[d] =
kIsPartial && size < 0 ? kUnknownRep16 : static_cast<uint16>(size);
} else if (tag() == REP32 && size < kMaxRep32) {
as32()->dims_[d] =
kIsPartial && size < 0 ? kUnknownRep32 : static_cast<uint32>(size);
} else if (tag() == REP_OUT_OF_LINE) {
(*as64()->dims_)[d] = size;
} else {
absl::InlinedVector<int64_t, 8UL> vals;
AppendTo(*this, &vals);
vals[d] = size;
ClearAllButDataType();
for (auto dval : vals) {
AddDim(dval);
}
}
TF_CHECK_OK(RecomputeNumElements());
}
template <class Shape>
Status TensorShapeBase<Shape>::SetDimWithStatus(int d, int64_t size) {
if (TF_PREDICT_FALSE(d < 0)) {
return errors::InvalidArgument("Index must be non-negative, got ", d);
}
if (TF_PREDICT_FALSE(d >= dims())) {
return errors::InvalidArgument("Index must be less than ", dims(), ", got ",
d);
}
if (TF_PREDICT_FALSE(!kIsPartial && size < 0)) {
return errors::InvalidArgument("Expected a non-negative size, got ", size);
}
if (tag() == REP16 && size < kMaxRep16) {
as16()->dims_[d] =
kIsPartial && size < 0 ? kUnknownRep16 : static_cast<uint16>(size);
} else if (tag() == REP32 && size < kMaxRep32) {
as32()->dims_[d] =
kIsPartial && size < 0 ? kUnknownRep32 : static_cast<uint32>(size);
} else if (tag() == REP_OUT_OF_LINE) {
(*as64()->dims_)[d] = size;
} else {
absl::InlinedVector<int64_t, 8UL> vals;
AppendTo(*this, &vals);
vals[d] = size;
ClearAllButDataType();
Status s = absl::OkStatus();
for (auto dval : vals) {
s.Update(AddDimWithStatus(dval));
if (!s.ok()) {
return s;
}
}
}
return RecomputeNumElements();
}
template <class Shape>
void TensorShapeBase<Shape>::RemoveDimRange(int begin, int end) {
if (unknown_rank()) return;
begin = begin < 0 ? dims() + begin + 1 : begin;
end = end < 0 ? dims() + end + 1 : end;
CHECK_GE(begin, 0);
CHECK_LE(begin, dims());
CHECK_GE(end, 0);
CHECK_LE(end, dims());
if (begin >= end) return;
absl::InlinedVector<int64_t, 8UL> vals;
AppendTo(*this, &vals);
vals.erase(vals.begin() + begin, vals.begin() + end);
ClearAllButDataType();
for (auto dval : vals) {
AddDim(dval);
}
TF_CHECK_OK(RecomputeNumElements());
}
template <class Shape>
Status TensorShapeBase<Shape>::RemoveDimRangeWithStatus(int begin, int end) {
if (unknown_rank()) {
return absl::OkStatus();
}
begin = begin < 0 ? dims() + begin + 1 : begin;
end = end < 0 ? dims() + end + 1 : end;
if (TF_PREDICT_FALSE(begin < 0)) {
return errors::Internal("Start index must be non-negative, got ", begin);
}
if (TF_PREDICT_FALSE(begin > dims())) {
return errors::Internal("Start index must be less than ", dims(), ", got ",
begin);
}
if (TF_PREDICT_FALSE(end < 0)) {
return errors::Internal("End index must be non-negative, got ", end);
}
if (TF_PREDICT_FALSE(end > dims())) {
return errors::Internal("End index must be less than ", dims(), ", got ",
end);
}
if (begin >= end) {
return absl::OkStatus();
}
absl::InlinedVector<int64_t, 8UL> vals;
AppendTo(*this, &vals);
vals.erase(vals.begin() + begin, vals.begin() + end);
ClearAllButDataType();
Status s = absl::OkStatus();
for (auto dval : vals) {
s.Update(AddDimWithStatus(dval));
if (!s.ok()) {
return s;
}
}
return RecomputeNumElements();
}
bool TensorShape::IsSameSize(const TensorShape& b) const {
if (b.dims() != dims()) return false;
for (int d = 0; d < dims(); d++) {
if (dim_size(d) != b.dim_size(d)) return false;
}
return true;
}
template <class Shape>
void TensorShapeBase<Shape>::AsProto(TensorShapeProto* proto) const {
proto->Clear();
if (unknown_rank()) {
proto->set_unknown_rank(true);
} else {
for (int i = 0; i < dims(); i++) {
proto->add_dim()->set_size(dim_size(i));
}
}
}
template <class Shape>
TensorShapeProto TensorShapeBase<Shape>::AsProto() const {
TensorShapeProto out;
AsProto(&out);
return out;
}
template <class Shape>
TensorShapeIter<Shape> TensorShapeBase<Shape>::begin() const {
return TensorShapeIter<Shape>(static_cast<const Shape*>(this), 0);
}
template <class Shape>
TensorShapeIter<Shape> TensorShapeBase<Shape>::end() const {
const int max_dim = unknown_rank() ? -1 : dims();
return TensorShapeIter<Shape>(static_cast<const Shape*>(this), max_dim);
}
string TensorShapeRep::DebugString() const {
const auto& shape = *static_cast<const PartialTensorShape*>(this);
if (shape.unknown_rank()) return "<unknown>";
string s = "[";
for (int i = 0; i < shape.dims(); i++) {
if (i > 0) strings::StrAppend(&s, ",");
int64_t dim = shape.dim_size(i);
if (dim < 0) {
strings::StrAppend(&s, "?");
} else {
strings::StrAppend(&s, dim);
}
}
strings::StrAppend(&s, "]");
return s;
}
string TensorShapeRep::DebugString(const TensorShapeProto& proto) {
string s;
if (proto.unknown_rank()) {
strings::StrAppend(&s, "<unknown>");
if (proto.dim_size() == 0) return s;
}
strings::StrAppend(&s, "[");
bool first = true;
for (const auto& d : proto.dim()) {
if (!first) strings::StrAppend(&s, ",");
if (d.size() == -1) {
strings::StrAppend(&s, "?");
} else {
strings::StrAppend(&s, d.size());
}
first = false;
}
strings::StrAppend(&s, "]");
return s;
}
bool TensorShapeUtils::StartsWith(const TensorShape& shape,
const TensorShape& prefix) {
if (shape.dims() < prefix.dims()) return false;
for (int i = 0; i < prefix.dims(); ++i) {
if (shape.dim_size(i) != prefix.dim_size(i)) return false;
}
return true;
}
bool TensorShapeUtils::EndsWith(const TensorShape& shape,
const TensorShape& suffix) {
const int suffix_size = suffix.dims();
if (shape.dims() < suffix_size) return false;
for (int i = 0; i < suffix_size; ++i) {
if (shape.dim_size(shape.dims() - suffix_size + i) != suffix.dim_size(i)) {
return false;
}
}
return true;
}
template <typename T, class Shape>
Status MakeShapeHelper(const T* dims, int64_t n, Shape* out) {
out->Clear();
if (n > TensorShape::MaxDimensions()) {
return errors::InvalidArgument("Too many dimensions");
}
if (n < 0) {
return errors::InvalidArgument("Negative number of dimensions ", n);
}
for (int64_t i = 0; i < n; ++i) {
T dim = internal::SubtleMustCopy(dims[i]);
int64_t new_num_elements;
if (dim < 0) {
if (!out->kIsPartial) {
return errors::InvalidArgument("Dimension ", dim, " must be >= 0");
}
if (dim < -1) {
return errors::InvalidArgument("Dimension ", dim, " must be >= -1");
}
dim = -1;
new_num_elements = -1;
} else if (out->num_elements() < 0) {
new_num_elements = -1;
} else {
new_num_elements = MultiplyWithoutOverflow(out->num_elements(), dim);
if (TF_PREDICT_FALSE(new_num_elements < 0)) {
TensorShapeProto proto;
for (int64_t j = 0; j < n; ++j) {
proto.add_dim()->set_size(internal::SubtleMustCopy(dims[j]));
}
return errors::InvalidArgument(
"Shape ", TensorShape::DebugString(proto),
" would have more than 2**63 - 1 elements");
}
}
out->UnsafeAddDim(dim, new_num_elements);
}
return absl::OkStatus();
}
#define MAKE_SHAPE(T, Shape) \
Status TensorShapeUtils::MakeShape(const T* dims, int64_t n, Shape* out) { \
return MakeShapeHelper(dims, n, out); \
} \
Status TensorShapeUtils::MakeShape(gtl::ArraySlice<T> shape, Shape* out) { \
return MakeShapeHelper(shape.data(), shape.size(), out); \
}
MAKE_SHAPE(int32, TensorShape)
MAKE_SHAPE(int64_t, TensorShape)
MAKE_SHAPE(int32, PartialTensorShape)
MAKE_SHAPE(int64_t, PartialTensorShape)
#undef MAKE_SHAPE
string TensorShapeUtils::ShapeListString(
const absl::Span<const TensorShape>& shapes) {
string result = "[";
bool first = true;
for (const TensorShape& shape : shapes) {
strings::StrAppend(&result, (first ? "" : ", "), shape.DebugString());
first = false;
}
strings::StrAppend(&result, "]");
return result;
}
PartialTensorShape PartialTensorShape::Concatenate(int64_t size) const {
PartialTensorShape out = *this;
out.AddDim(size);
return out;
}
Status PartialTensorShape::ConcatenateWithStatus(
int64_t size, PartialTensorShape* out) const {
*out = *this;
return out->AddDimWithStatus(size);
}
PartialTensorShape PartialTensorShape::Concatenate(
const PartialTensorShape& shape) const {
if (unknown_rank() || shape.unknown_rank()) {
return PartialTensorShape();
}
PartialTensorShape out = *this;
for (auto dim : shape) out.AddDim(dim.size);
return out;
}
Status PartialTensorShape::ConcatenateWithStatus(
const PartialTensorShape& shape, PartialTensorShape* out) const {
if (unknown_rank() || shape.unknown_rank()) {
*out = PartialTensorShape();
return absl::OkStatus();
}
*out = *this;
for (auto dim : shape) {
Status s = out->AddDimWithStatus(dim.size);
if (!s.ok()) return s;
}
return absl::OkStatus();
}
Status PartialTensorShape::MergeWith(const PartialTensorShape& shape,
PartialTensorShape* result) const {
if (unknown_rank()) {
*result = shape;
return absl::OkStatus();
}
if (shape.unknown_rank()) {
*result = *this;
return absl::OkStatus();
}
const int dims_ = dims();
if (dims_ != shape.dims()) {
return errors::InvalidArgument(
"PartialTensorShape: Incompatible ranks during merge: ", dims_, " vs. ",
shape.dims());
}
if (result == this) {
return errors::Internal(
"PartialTensorShape::MergeWith: Cannot output result to itself");
}
result->Clear();
Status s = absl::OkStatus();
for (int i = 0; i < dims_; ++i) {
const int64_t dim0 = dim_size(i);
const int64_t dim1 = shape.dim_size(i);
if (dim0 >= 0 && dim1 >= 0 && dim0 != dim1) {
return errors::InvalidArgument(
"PartialTensorShape: Incompatible shapes during merge: ",
DebugString(), " vs. ", shape.DebugString());
}
s.Update(result->AddDimWithStatus(dim0 >= 0 ? dim0 : dim1));
if (!s.ok()) {
return s;
}
}
return absl::OkStatus();
}
bool PartialTensorShape::AsTensorShape(TensorShape* shape) const {
if (IsFullyDefined()) {
const TensorShapeRep* rep = this;
*shape = *static_cast<const TensorShape*>(rep);
return true;
}
return false;
}
bool PartialTensorShape::IsIdenticalTo(const PartialTensorShape& shape) const {
if (unknown_rank() || shape.unknown_rank()) {
return unknown_rank() == shape.unknown_rank();
}
if (dims() != shape.dims()) return false;
for (int i = 0; i < dims(); i++) {
if (dim_size(i) != shape.dim_size(i)) return false;
}
return true;
}
bool PartialTensorShape::IsCompatibleWith(
const PartialTensorShape& shape) const {
if (unknown_rank() || shape.unknown_rank()) return true;
if (dims() != shape.dims()) return false;
for (int i = 0; i < dims(); i++) {
const int64_t dim0 = dim_size(i);
const int64_t dim1 = shape.dim_size(i);
if (dim0 >= 0 && dim1 >= 0 && dim0 != dim1) return false;
}
return true;
}
string PartialTensorShapeUtils::PartialShapeListString(
const absl::Span<const PartialTensorShape>& shapes) {
string result = "[";
bool first = true;
for (const PartialTensorShape& shape : shapes) {
strings::StrAppend(&result, (first ? "" : ", "), shape.DebugString());
first = false;
}
strings::StrAppend(&result, "]");
return result;
}
bool PartialTensorShapeUtils::AreCompatible(
const absl::Span<const PartialTensorShape>& shapes0,
const absl::Span<const PartialTensorShape>& shapes1) {
if (shapes0.size() == shapes1.size()) {
for (size_t i = 0; i < shapes0.size(); ++i) {
if (!shapes0[i].IsCompatibleWith(shapes1[i])) {
return false;
}
}
return true;
} else {
return false;
}
}
bool PartialTensorShapeUtils::AreIdentical(
const absl::Span<const PartialTensorShape>& shapes0,
const absl::Span<const PartialTensorShape>& shapes1) {
if (shapes0.size() == shapes1.size()) {
for (size_t i = 0; i < shapes0.size(); ++i) {
if (!shapes0[i].IsIdenticalTo(shapes1[i])) {
return false;
}
}
return true;
} else {
return false;
}
}
Status TensorShapeUtils::NumElements(absl::Span<const int64_t> shape,
int64_t* num_elements) {
int64_t n = 1;
for (auto dim : shape) {
n = MultiplyWithoutOverflow(n, dim);
if (n < 0) {
return errors::InvalidArgument("Can't compute total size of shape [",
absl::StrJoin(shape, ","),
"]; product would overflow int64");
}
}
*num_elements = n;
return absl::OkStatus();
}
template class TensorShapeBase<TensorShape>;
template class TensorShapeBase<PartialTensorShape>;
} | #include "tensorflow/core/framework/tensor_shape.h"
#include <cstdint>
#include <limits>
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/random/simple_philox.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
namespace tensorflow {
class TensorShapeTestHelper {
public:
static void set_data_type(TensorShape* s, DataType t) { s->set_data_type(t); }
static uint8 data_type(const TensorShape* s) { return s->data_type(); }
};
namespace {
TEST(TensorShapeTest, Default) {
TensorShape s;
EXPECT_EQ(s.dims(), 0);
EXPECT_EQ(s.num_elements(), 1);
}
TEST(TensorShapeTest, set_dim) {
TensorShape s({10, 5});
s.set_dim(0, 20);
ASSERT_EQ(2, s.dims());
EXPECT_EQ(20, s.dim_size(0));
EXPECT_EQ(100, s.num_elements());
s.set_dim(1, 2);
ASSERT_EQ(2, s.dims());
EXPECT_EQ(2, s.dim_size(1));
EXPECT_EQ(40, s.num_elements());
}
TEST(TensorShapeTest, RemoveDim) {
TensorShape s({10, 5});
s.RemoveDim(0);
EXPECT_EQ(5, s.num_elements());
ASSERT_EQ(1, s.dims());
}
TEST(TensorShapeTest, RemoveDimWithStatus) {
TensorShape s({10, 5});
TF_EXPECT_OK(s.RemoveDimWithStatus(0));
EXPECT_EQ(s.num_elements(), 5);
EXPECT_EQ(s.dims(), 1);
EXPECT_THAT(
s.RemoveDimWithStatus(-1),
testing::StatusIs(error::Code::INTERNAL,
::testing::ContainsRegex(
"Expected dimension index to be non-negative")));
}
TEST(TensorShapeTest, RemoveAndAddDim) {
TensorShape s({10, 5, 20});
s.RemoveDim(1);
s.AddDim(100);
EXPECT_EQ(20000, s.num_elements());
ASSERT_EQ(3, s.dims());
}
TEST(TensorShapeTest, RemoveLastDims) {
TensorShape s({2, 3, 5, 7});
s.RemoveLastDims(1);
ASSERT_EQ(3, s.dims());
EXPECT_EQ(30, s.num_elements());
s.RemoveLastDims(2);
ASSERT_EQ(1, s.dims());
EXPECT_EQ(2, s.dim_size(0));
}
TEST(TensorShapeTest, RemoveLastDimsWithStatus) {
TensorShape s({2, 3, 5, 7});
TF_EXPECT_OK(s.RemoveLastDimsWithStatus(1));
EXPECT_EQ(s.dims(), 3);
EXPECT_EQ(s.num_elements(), 30);
EXPECT_THAT(
s.RemoveLastDimsWithStatus(4),
testing::StatusIs(error::Code::INTERNAL,
::testing::ContainsRegex(
"Expected dimension index to be at most 3")));
}
TEST(TensorShapeTest, RemoveDimRange) {
TensorShape s0({2, 3, 5, 7});
for (int i = -4; i <= 4; ++i) {
s0.RemoveDimRange(i, i);
ASSERT_EQ(4, s0.dims());
ASSERT_EQ(210, s0.num_elements());
}
s0.RemoveDimRange(3, 1);
ASSERT_EQ(4, s0.dims());
ASSERT_EQ(210, s0.num_elements());
s0.RemoveDimRange(0, 3);
ASSERT_EQ(1, s0.dims());
EXPECT_EQ(7, s0.dim_size(0));
TensorShape s1({2, 3, 5, 7});
s1.RemoveDimRange(2, 3);
ASSERT_EQ(3, s1.dims());
ASSERT_EQ(42, s1.num_elements());
TensorShape s2({2, 3, 5, 7});
s2.RemoveDimRange(-2, -3);
ASSERT_EQ(4, s2.dims());
ASSERT_EQ(210, s2.num_elements());
s2.RemoveDimRange(0, -2);
ASSERT_EQ(1, s2.dims());
ASSERT_EQ(7, s2.dim_size(0));
TensorShape s3({2, 3, 5, 7});
s3.RemoveDimRange(-3, -2);
ASSERT_EQ(3, s3.dims());
ASSERT_EQ(42, s3.num_elements());
}
TEST(TensorShapeTest, RemoveDimRangeWithStatusWithEmptyInterval) {
TensorShape s0({2, 3, 5, 7});
for (int i = -4; i <= 4; ++i) {
TF_EXPECT_OK(s0.RemoveDimRangeWithStatus(i, i));
EXPECT_EQ(s0.dims(), 4);
EXPECT_EQ(s0.num_elements(), 210);
}
}
TEST(TensorShapeTest, RemoveDimRangeWithStatusWithPositiveBeginEnd) {
TensorShape s0({2, 3, 5, 7});
TF_EXPECT_OK(s0.RemoveDimRangeWithStatus(3, 1));
EXPECT_EQ(s0.dims(), 4);
EXPECT_EQ(s0.num_elements(), 210);
TF_EXPECT_OK(s0.RemoveDimRangeWithStatus(0, 3));
EXPECT_EQ(s0.dims(), 1);
EXPECT_EQ(s0.dim_size(0), 7);
TensorShape s1({2, 3, 5, 7});
TF_EXPECT_OK(s1.RemoveDimRangeWithStatus(2, 3));
EXPECT_EQ(s1.dims(), 3);
EXPECT_EQ(s1.num_elements(), 42);
}
TEST(TensorShapeTest, RemoveDimRangeWithStatusWithNegativeBeginEnd) {
TensorShape s2({2, 3, 5, 7});
TF_EXPECT_OK(s2.RemoveDimRangeWithStatus(-2, -3));
EXPECT_EQ(s2.dims(), 4);
EXPECT_EQ(s2.num_elements(), 210);
TF_EXPECT_OK(s2.RemoveDimRangeWithStatus(0, -2));
EXPECT_EQ(s2.dims(), 1);
EXPECT_EQ(s2.dim_size(0), 7);
TensorShape s3({2, 3, 5, 7});
TF_EXPECT_OK(s3.RemoveDimRangeWithStatus(-3, -2));
EXPECT_EQ(s3.dims(), 3);
EXPECT_EQ(s3.num_elements(), 42);
}
TEST(TensorShapeTest, RemoveDimRangeWithStatusWithInvalidBeginEnd) {
TensorShape s3({2, 5, 7});
EXPECT_THAT(s3.RemoveDimRangeWithStatus(-5, 0),
testing::StatusIs(error::Code::INTERNAL,
::testing::ContainsRegex(
"Start index must be non-negative")));
EXPECT_THAT(s3.RemoveDimRangeWithStatus(5, 0),
testing::StatusIs(
error::Code::INTERNAL,
::testing::ContainsRegex("Start index must be less than 3")));
EXPECT_THAT(s3.RemoveDimRangeWithStatus(0, -5),
testing::StatusIs(
error::Code::INTERNAL,
::testing::ContainsRegex("End index must be non-negative")));
EXPECT_THAT(s3.RemoveDimRangeWithStatus(0, 5),
testing::StatusIs(
error::Code::INTERNAL,
::testing::ContainsRegex("End index must be less than 3")));
}
TEST(TensorShapeTest, InsertDimWithStatus) {
TensorShape s({10, 20});
TF_EXPECT_OK(s.InsertDimWithStatus(1, 5));
EXPECT_EQ(s.dims(), 3);
EXPECT_EQ(s.dim_size(1), 5);
TF_EXPECT_OK(s.InsertDimWithStatus(1, 30));
EXPECT_EQ(s.dims(), 4);
EXPECT_EQ(s.dim_size(1), 30);
EXPECT_EQ(s.dim_size(2), 5);
}
TEST(TensorShapeTest, InsertDimWithStatusWithInvalidData) {
TensorShape s({10, 5, 20});
EXPECT_THAT(s.InsertDimWithStatus(1, -5),
testing::StatusIs(
error::Code::INVALID_ARGUMENT,
::testing::ContainsRegex("Expected a non-negative size")));
EXPECT_THAT(
s.InsertDimWithStatus(-1, 5),
testing::StatusIs(error::Code::INTERNAL,
::testing::ContainsRegex(
"The insertion index must be non-negative")));
EXPECT_THAT(s.InsertDimWithStatus(4, 5),
testing::StatusIs(error::Code::INTERNAL,
::testing::ContainsRegex(
"The insertion index must be at most 3")));
}
TEST(TensorShapeTest, InsertDimWithStatusWithTooManyDims) {
TensorShape s({10, 20});
int max_dims_to_add = TensorShape::MaxDimensions() - s.dims();
for (int i = 0; i < max_dims_to_add; ++i) {
TF_EXPECT_OK(s.InsertDimWithStatus(1, 1));
}
EXPECT_THAT(s.InsertDimWithStatus(1, 1),
testing::StatusIs(
error::Code::INTERNAL,
::testing::ContainsRegex(
"Shape has.*dimensions which is the maximum allowed")));
}
TEST(TensorShapeTest, TensorShapeAssignment) {
TensorShape s({std::numeric_limits<int64_t>::max()});
TensorShape s2({1, std::numeric_limits<int64_t>::max()});
TensorShape s3({10});
s = s2;
EXPECT_EQ(s.dims(), 2);
EXPECT_EQ(s.dim_size(0), 1);
EXPECT_EQ(s.dim_size(1), std::numeric_limits<int64_t>::max());
s = s3;
EXPECT_EQ(s.dims(), 1);
EXPECT_EQ(s.dim_size(0), 10);
}
TEST(TensorShapeTest, InvalidShapeProto) {
TensorShapeProto proto;
EXPECT_TRUE(TensorShape::IsValid(proto));
proto.add_dim()->set_size(357);
proto.add_dim()->set_size(982);
EXPECT_TRUE(TensorShape::IsValid(proto));
proto.Clear();
proto.add_dim()->set_size(-357);
proto.add_dim()->set_size(-982);
EXPECT_FALSE(TensorShape::IsValid(proto));
proto.Clear();
proto.add_dim()->set_size(1LL << 35);
proto.add_dim()->set_size((1LL << 35) + 1);
EXPECT_FALSE(TensorShape::IsValid(proto));
}
TEST(TensorShapeTest, TooManyDimsProto) {
TensorShapeProto proto;
EXPECT_TRUE(TensorShape::IsValid(proto));
TF_EXPECT_OK(TensorShape::IsValidShape(proto));
for (int i = 0; i < TensorShape::MaxDimensions(); i++) {
proto.add_dim()->set_size(1);
}
EXPECT_TRUE(TensorShape::IsValid(proto));
TF_EXPECT_OK(TensorShape::IsValidShape(proto));
proto.add_dim()->set_size(1);
EXPECT_FALSE(TensorShape::IsValid(proto));
EXPECT_FALSE(TensorShape::IsValidShape(proto).ok());
}
TEST(TensorShapeTest, SetDimForEmptyTensor) {
TensorShape s({10, 5, 20});
EXPECT_EQ(1000, s.num_elements());
s.set_dim(1, 0);
EXPECT_EQ(0, s.num_elements());
s.set_dim(1, 7);
EXPECT_EQ(1400, s.num_elements());
}
TEST(TensorShapeTest, SetDimWithLargeSizeFormat) {
TensorShape s({std::numeric_limits<uint32_t>::max() - 2});
s.set_dim(0, 5);
EXPECT_EQ(s.dim_size(0), 5);
TensorShape s2({std::numeric_limits<int64_t>::max()});
s2.set_dim(0, 10);
EXPECT_EQ(s2.dim_size(0), 10);
s.set_dim(0, std::numeric_limits<int64_t>::max());
EXPECT_EQ(s.dim_size(0), std::numeric_limits<int64_t>::max());
}
TEST(TensorShapeTest, SetDimWithStatus) {
TensorShape s({10, 5, 20});
TF_EXPECT_OK(s.SetDimWithStatus(1, 2));
EXPECT_EQ(s.dims(), 3);
EXPECT_EQ(s.dim_size(1), 2);
EXPECT_THAT(s.SetDimWithStatus(-1, 2),
testing::StatusIs(
error::Code::INVALID_ARGUMENT,
::testing::ContainsRegex("Index must be non-negative")));
EXPECT_THAT(
s.SetDimWithStatus(4, 2),
testing::StatusIs(error::Code::INVALID_ARGUMENT,
::testing::ContainsRegex("Index must be less than 3")));
EXPECT_THAT(s.SetDimWithStatus(0, -2),
testing::StatusIs(
error::Code::INVALID_ARGUMENT,
::testing::ContainsRegex("Expected a non-negative size")));
}
TEST(TensorShapeTest, SetDimWithStatusWithLargeSizeFormat) {
TensorShape s({std::numeric_limits<uint32_t>::max() - 2});
TF_EXPECT_OK(s.SetDimWithStatus(0, 2));
s.set_dim(0, 5);
EXPECT_EQ(s.dim_size(0), 5);
TensorShape s2({std::numeric_limits<int64_t>::max()});
TF_EXPECT_OK(s2.SetDimWithStatus(0, 10));
EXPECT_EQ(s2.dim_size(0), 10);
TF_EXPECT_OK(s.SetDimWithStatus(0, std::numeric_limits<int64_t>::max()));
EXPECT_EQ(s.dim_size(0), std::numeric_limits<int64_t>::max());
}
TEST(TensorShapeTest, AppendShape64BitIndices) {
TensorShape s({10, 2147483648});
EXPECT_EQ(10, s.dim_size(0));
EXPECT_EQ(2147483648, s.dim_size(1));
TensorShape s2;
s2.AppendShape(s);
EXPECT_EQ(10, s2.dim_size(0));
EXPECT_EQ(2147483648, s2.dim_size(1));
}
TEST(TensorShapeTest, DataType) {
TensorShape s({});
EXPECT_EQ(TensorShapeTestHelper::data_type(&s), DT_INVALID);
TensorShapeTestHelper::set_data_type(&s, DT_INT32);
s.AddDim(1);
EXPECT_EQ(TensorShapeTestHelper::data_type(&s), DT_INT32);
s.AddDim(100000);
EXPECT_EQ(TensorShapeTestHelper::data_type(&s), DT_INT32);
TensorShapeTestHelper::set_data_type(&s, DT_UINT16_REF);
s.AddDim(2);
EXPECT_EQ(TensorShapeTestHelper::data_type(&s), DT_UINT16_REF);
s.AddDim(4);
EXPECT_EQ(TensorShapeTestHelper::data_type(&s), DT_UINT16_REF);
s.AddDim(3);
EXPECT_EQ(TensorShapeTestHelper::data_type(&s), DT_UINT16_REF);
TensorShape s2 = s;
EXPECT_EQ(TensorShapeTestHelper::data_type(&s2), DT_UINT16_REF);
s2.RemoveDim(2);
EXPECT_EQ(TensorShapeTestHelper::data_type(&s2), DT_UINT16_REF);
TensorShapeTestHelper::set_data_type(&s2, DT_FLOAT);
EXPECT_EQ(TensorShapeTestHelper::data_type(&s2), DT_FLOAT);
s2.Clear();
EXPECT_EQ(TensorShapeTestHelper::data_type(&s2), DT_INVALID);
}
TEST(TensorShapeTest, ostream) {
TensorShape s({10, 5, 4});
std::stringstream ss;
ss << s;
EXPECT_EQ(ss.str(), "[10,5,4]");
}
TEST(TensorShapeTest, AddDimWithStatus) {
TensorShape s({10, 5, 20});
Status status = s.AddDimWithStatus(400);
EXPECT_TRUE(status.ok());
EXPECT_EQ(400000, s.num_elements());
ASSERT_EQ(4, s.dims());
status = s.AddDimWithStatus(-1);
EXPECT_EQ(absl::StatusCode::kInvalidArgument, status.code());
TensorShape s2({std::numeric_limits<int64_t>::max()});
EXPECT_THAT(s2.AddDimWithStatus(2),
testing::StatusIs(error::Code::INVALID_ARGUMENT,
::testing::ContainsRegex(
"Encountered overflow when multiplying")));
}
TEST(TensorShapeTest, AppendShapeWithStatus) {
TensorShape s({10, 5, 20});
TensorShape s2({400});
TF_EXPECT_OK(s.AppendShapeWithStatus(s2));
EXPECT_EQ(s.num_elements(), 400000);
EXPECT_EQ(s.dims(), 4);
TensorShape s3({std::numeric_limits<int64_t>::max()});
EXPECT_THAT(s2.AppendShapeWithStatus(s3),
testing::StatusIs(error::Code::INVALID_ARGUMENT,
::testing::ContainsRegex(
"Encountered overflow when multiplying")));
}
TEST(TensorShapeTest, Factory) {
TensorShape s;
Status status = TensorShape::BuildTensorShapeBase({10, 5, 20}, &s);
EXPECT_TRUE(status.ok());
EXPECT_EQ(1000, s.num_elements());
ASSERT_EQ(3, s.dims());
status = TensorShape::BuildTensorShapeBase({-10, 5, 20}, &s);
EXPECT_EQ(absl::StatusCode::kInvalidArgument, status.code());
}
TEST(TensorShapeTest, AsEigenDSizess) {
TensorShape s({10, 5, 20});
Eigen::DSizes<Eigen::DenseIndex, 3> dsizes_expected{10, 5, 20};
EXPECT_EQ(s.AsEigenDSizes<3>(), dsizes_expected);
EXPECT_DEATH(s.AsEigenDSizes<2>(),
"tensor of 2 dimensions from a tensor of 3 dimensions");
Eigen::DSizes<Eigen::DenseIndex, 3> dsizes_out;
TF_EXPECT_OK(s.AsEigenDSizesWithStatus<3>(&dsizes_out));
EXPECT_EQ(dsizes_out, dsizes_expected);
Eigen::DSizes<Eigen::DenseIndex, 2> dsizes_out2;
EXPECT_THAT(s.AsEigenDSizesWithStatus<2>(&dsizes_out2),
testing::StatusIs(
error::Code::INTERNAL,
::testing::ContainsRegex(
"tensor of 2 dimensions from a tensor of 3 dimensions")));
}
TEST(TensorShapeTest, AsEigenDSizesWithPadding) {
TensorShape s({10, 5, 20});
Eigen::DSizes<Eigen::DenseIndex, 5> dsizes_expected{10, 5, 20, 1, 1};
EXPECT_EQ(s.AsEigenDSizesWithPadding<5>(), dsizes_expected);
EXPECT_DEATH(s.AsEigenDSizesWithPadding<2>(),
"at most 2 dimensions from a tensor of 3 dimensions");
Eigen::DSizes<Eigen::DenseIndex, 5> dsizes_out;
TF_EXPECT_OK(s.AsEigenDSizesWithPaddingWithStatus<5>(&dsizes_out));
EXPECT_EQ(dsizes_out, dsizes_expected);
Eigen::DSizes<Eigen::DenseIndex, 2> dsizes_out2;
EXPECT_THAT(s.AsEigenDSizesWithPaddingWithStatus<2>(&dsizes_out2),
testing::StatusIs(
error::Code::INTERNAL,
::testing::ContainsRegex(
"at most 2 dimensions from a tensor of 3 dimensions")));
}
TEST(TensorShapeTest, AsProto) {
TensorShape s({10, 5});
TensorShapeProto sp;
s.AsProto(&sp);
EXPECT_EQ(sp.dim_size(), 2);
EXPECT_EQ(sp.dim(0).size(), 10);
EXPECT_EQ(sp.dim(1).size(), 5);
TensorShapeProto sp2 = s.AsProto();
EXPECT_EQ(sp.DebugString(), sp2.DebugString());
}
class TensorShapeIterOld;
class TensorShapeOld {
public:
explicit TensorShapeOld(absl::Span<const int64_t> dim_sizes);
TensorShapeOld(std::initializer_list<int64_t> dim_sizes)
: TensorShapeOld(absl::Span<const int64_t>(dim_sizes)) {}
explicit TensorShapeOld(const TensorShapeProto& proto);
TensorShapeOld();
static bool IsValid(const TensorShapeProto& proto);
static Status IsValidShape(const TensorShapeProto& proto);
void Clear();
void AddDim(int64_t size);
void AppendShape(const TensorShapeOld& shape);
void InsertDim(int d, int64_t size);
void set_dim(int d, int64_t size);
void RemoveDim(int d);
int dims() const { return dim_sizes_.size(); }
int64_t dim_size(int d) const {
DCHECK_GE(d, 0);
DCHECK_LT(d, dims());
return dim_sizes_[d];
}
absl::Span<const int64_t> dim_sizes() const { return dim_sizes_; }
int64_t num_elements() const { return num_elements_; }
bool IsSameSize(const TensorShapeOld& b) const;
bool operator==(const TensorShapeOld& b) const { return IsSameSize(b); }
void AsProto(TensorShapeProto* proto) const;
template <int NDIMS>
Eigen::DSizes<Eigen::DenseIndex, NDIMS> AsEigenDSizes() const;
template <int NDIMS>
Eigen::DSizes<Eigen::DenseIndex, NDIMS> AsEigenDSizesWithPadding() const;
TensorShapeIterOld begin() const;
TensorShapeIterOld end() const;
string DebugString() const;
static string DebugString(const TensorShapeProto& proto);
private:
void recompute_dims();
absl::InlinedVector<int64_t, 4UL> dim_sizes_;
int64_t num_elements_;
};
struct TensorShapeDimOld {
explicit TensorShapeDimOld(int64_t s) : size(s) {}
int64_t size;
};
class TensorShapeIterOld {
public:
TensorShapeIterOld(const TensorShapeOld* shape, int d)
: shape_(shape), d_(d) {}
bool operator==(const TensorShapeIterOld& rhs) {
DCHECK(shape_ == rhs.shape_);
return d_ == rhs.d_;
}
bool operator!=(const TensorShapeIterOld& rhs) {
DCHECK(shape_ == rhs.shape_);
return d_ != rhs.d_;
}
void operator++() { ++d_; }
TensorShapeDimOld operator*() {
return TensorShapeDimOld(shape_->dim_size(d_));
}
private:
const TensorShapeOld* shape_;
int d_;
};
static const int64_t kMaxElements = (1LL << 40);
bool TensorShapeOld::IsValid(const TensorShapeProto& proto) {
int64_t num_elements = 1;
for (const auto& d : proto.dim()) {
if (d.size() < 0) return false;
num_elements *= d.size();
if (num_elements > kMaxElements) return false;
}
return true;
}
Status TensorShapeOld::IsValidShape(const TensorShapeProto& proto) {
int64_t num_elements = 1;
for (const auto& d : proto.dim()) {
if (d.size() < 0) {
return errors::InvalidArgument("Shape ", DebugString(proto),
" has negative dimensions; ",
"perhaps an un-fed placeholder?");
}
num_elements *= d.size();
if (num_elements > kMaxElements) {
return errors::InvalidArgument("Shape ", DebugString(proto),
" is too large (more than ", kMaxElements,
" entries)");
}
}
return absl::OkStatus();
}
TensorShapeOld::TensorShapeOld(const TensorShapeProto& proto) {
dim_sizes_.reserve(proto.dim_size());
num_elements_ = 1;
for (const auto& d : proto.dim()) {
AddDim(d.size());
}
}
TensorShapeOld::TensorShapeOld(absl::Span<const int64_t> dim_sizes) {
dim_sizes_.reserve(dim_sizes.size());
num_elements_ = 1;
for (auto s : dim_sizes) {
AddDim(s);
}
}
TensorShapeOld::TensorShapeOld() : num_elements_(1) {}
void TensorShapeOld::Clear() {
dim_sizes_.clear();
num_elements_ = 1;
}
void TensorShapeOld::AddDim(int64_t size) {
CHECK_GE(size, 0);
dim_sizes_.push_back(size);
num_elements_ *= size;
CHECK_LE(0, num_elements_);
CHECK_LE(num_elements_, kMaxElements);
}
void TensorShapeOld::AppendShape(const TensorShapeOld& shape) {
for (auto d : shape) AddDim(d.size);
}
void TensorShapeOld::InsertDim(int d, int64_t size) {
CHECK_GE(d, 0);
CHECK_LE(d, dims());
CHECK_GE(size, 0);
dim_sizes_.insert(dim_sizes_.begin() + d, size);
num_elements_ *= size;
CHECK_LE(0, num_elements_);
CHECK_LE(num_elements_, kMaxElements);
}
void TensorShapeOld::set_dim(int d, int64_t size) {
CHECK_GE(d, 0);
CHECK_LT(d, dims());
CHECK_GE(size, 0);
dim_sizes_[d] = size;
recompute_dims();
}
void TensorShapeOld::RemoveDim(int d) {
CHECK_GE(d, 0);
CHECK_LT(d, dims());
dim_sizes_.erase(dim_sizes_.begin() + d);
recompute_dims();
}
void TensorShapeOld::recompute_dims() {
num_elements_ = 1;
for (auto s : dim_sizes_) {
num_elements_ *= s;
CHECK_LE(0, num_elements_);
CHECK_LE(num_elements_, kMaxElements);
}
}
bool TensorShapeOld::IsSameSize(const TensorShapeOld& b) const {
if (b.dims() != dims()) return false;
for (int d = 0; d < dims(); d++) {
if (dim_size(d) != b.dim_size(d)) return false;
}
return true;
}
void TensorShapeOld::AsProto(TensorShapeProto* proto) const {
proto->Clear();
for (size_t d = 0; d < dim_sizes_.size(); ++d) {
auto* dim = proto->add_dim();
dim->set_size(dim_sizes_[d]);
}
}
TensorShapeIterOld TensorShapeOld::begin() const {
return TensorShapeIterOld(this, 0);
}
TensorShapeIterOld TensorShapeOld::end() const {
return TensorShapeIterOld(this, dims());
}
string TensorShapeOld::DebugString() const {
return strings::StrCat(
"[", absl::StrJoin(absl::Span<const int64_t>(dim_sizes_), ","), "]");
}
string TensorShapeOld::DebugString(const TensorShapeProto& proto) {
string s = "[";
bool first = true;
for (const auto& d : proto.dim()) {
strings::StrAppend(&s, first ? "" : ",", d.size());
first = false;
}
strings::StrAppend(&s, "]");
return s;
}
static int64_t SkewedSize(random::SimplePhilox* gen, int64_t current_elements) {
int64_t result = 0;
do {
if (current_elements < 100) {
result = gen->Uniform(100000);
} else {
result = gen->Uniform(2);
}
} while ((result * current_elements >= 1LL << 34) ||
(result * current_elements < 0));
return result;
}
TEST(TensorShapeTest, Randomized) {
random::PhiloxRandom philox(7, 7);
random::SimplePhilox gen(&philox);
TensorShape s;
TensorShapeOld sold;
TensorShapeProto sp;
TensorShapeProto spold;
LOG(INFO) << "Sizes: " << sizeof(TensorShape) << " vs "
<< sizeof(TensorShapeOld);
for (int i = 0; i < 100000; i++) {
s.AsProto(&sp);
sold.AsProto(&spold);
EXPECT_EQ(sp.DebugString(), spold.DebugString());
if ((i % 1000) == 0) {
fprintf(stderr, "ITERATION %d: %s\n", i, sp.DebugString().c_str());
}
EXPECT_EQ(s.num_elements(), sold.num_elements());
TensorShape copy = s;
TensorShape moved(std::move(copy));
EXPECT_EQ(s, moved);
copy = s;
moved = std::move(copy);
EXPECT_EQ(s, moved);
int64_t ne = sold.num_elements();
int r = gen.Uniform(100);
if (r < 10) {
int64_t sz = SkewedSize(&gen, sold.num_elements());
s.AddDim(sz);
sold.AddDim(sz);
} else if (r < 15) {
s.Clear();
sold.Clear();
} else if (r < 35 && s.dims() > 0 && ne > 0 && ne < 100000000) {
int dim = gen.Uniform(s.dims());
s.RemoveDim(dim);
sold.RemoveDim(dim);
} else if (r < 50 && ne > 0 && ne < 100000000) {
int dim = gen.Uniform(s.dims() + 1);
int64_t sz = SkewedSize(&gen, sold.num_elements());
s.InsertDim(dim, sz);
sold.InsertDim(dim, sz);
} else {
std::vector<int64_t> sizes;
const int N = (gen.Uniform(4) == 0) ? gen.Uniform(10) : gen.Uniform(3);
int64_t num_elements = 1;
for (int i = 0; i < N; i++) {
int64_t sz = SkewedSize(&gen, num_elements);
sizes.push_back(sz);
num_elements *= std::max<int64_t>(1, sz);
}
s = TensorShape(sizes);
sold = TensorShapeOld(sizes);
}
}
}
TEST(TensorShapeTest, Large) {
int64_t one = 1;
int64_t max = std::numeric_limits<int64_t>::max();
EXPECT_EQ(TensorShape({max}).num_elements(), max);
EXPECT_EQ(TensorShape({1, max}).num_elements(), max);
EXPECT_EQ(TensorShape({max, 1}).num_elements(), max);
EXPECT_EQ(TensorShape({one << 62}).num_elements(), one << 62);
EXPECT_EQ(TensorShape({one << 20, one << 41}).num_elements(), one << 61);
EXPECT_EQ(TensorShape({1000, 1000, 1000, 1000, 1000, 1000}).num_elements(),
1e18);
}
TEST(TensorShapeTest, Overflow) {
int64_t one = 1;
std::vector<std::vector<int64_t>> overflows = {
{1 << 30, 1 << 30, 1 << 30},
{1 << 5, (one << 60) + 1},
};
for (const auto& overflow : overflows) {
TensorShapeProto proto;
for (auto dim : overflow) {
proto.add_dim()->set_size(dim);
}
EXPECT_EQ(absl::StatusCode::kInvalidArgument,
TensorShape::IsValidShape(proto).code());
TensorShape shape;
EXPECT_EQ(absl::StatusCode::kInvalidArgument,
TensorShapeUtils::MakeShape(overflow, &shape).code());
}
}
TEST(TensorShapeTest, UnknownRank) {
TensorShapeProto proto;
proto.set_unknown_rank(true);
EXPECT_TRUE(TensorShape::IsValid(proto));
TF_EXPECT_OK(TensorShape::IsValidShape(proto));
EXPECT_EQ(TensorShape(), TensorShape(proto));
proto.add_dim()->set_size(7);
EXPECT_TRUE(TensorShape::IsValid(proto));
TF_EXPECT_OK(TensorShape::IsValidShape(proto));
EXPECT_EQ(TensorShape({7}), TensorShape(proto));
}
TEST(TensorShapeUtilsTest, StartsWith) {
EXPECT_TRUE(TensorShapeUtils::StartsWith(TensorShape({}), TensorShape({})));
EXPECT_TRUE(
TensorShapeUtils::StartsWith(TensorShape({2, 3}), TensorShape({})));
EXPECT_TRUE(
TensorShapeUtils::StartsWith(TensorShape({2, 3}), TensorShape({2})));
EXPECT_TRUE(
TensorShapeUtils::StartsWith(TensorShape({2, 3}), TensorShape({2, 3})));
EXPECT_TRUE(TensorShapeUtils::StartsWith(TensorShape({2, 3, 4}),
TensorShape({2, 3})));
EXPECT_FALSE(
TensorShapeUtils::StartsWith(TensorShape({2, 3}), TensorShape({3})));
EXPECT_FALSE(
TensorShapeUtils::StartsWith(TensorShape({2, 3}), TensorShape({2, 4})));
EXPECT_FALSE(TensorShapeUtils::StartsWith(TensorShape({2, 3}),
TensorShape({2, 3, 4})));
EXPECT_FALSE(TensorShapeUtils::StartsWith(TensorShape({2, 3, 4}),
TensorShape({3, 4})));
}
TEST(TensorShapeUtilsTest, EndsWith) {
EXPECT_TRUE(TensorShapeUtils::EndsWith(TensorShape({}), TensorShape({})));
EXPECT_TRUE(TensorShapeUtils::EndsWith(TensorShape({2, 3}), TensorShape({})));
EXPECT_TRUE(
TensorShapeUtils::EndsWith(TensorShape({2, 3}), TensorShape({3})));
EXPECT_TRUE(
TensorShapeUtils::EndsWith(TensorShape({2, 3}), TensorShape({2, 3})));
EXPECT_TRUE(
TensorShapeUtils::EndsWith(TensorShape({2, 3, 4}), TensorShape({3, 4})));
EXPECT_FALSE(
TensorShapeUtils::EndsWith(TensorShape({2, 3}), TensorShape({2})));
EXPECT_FALSE(
TensorShapeUtils::EndsWith(TensorShape({2, 3}), TensorShape({2, 4})));
EXPECT_FALSE(
TensorShapeUtils::EndsWith(TensorShape({2, 3}), TensorShape({2, 3, 4})));
EXPECT_FALSE(
TensorShapeUtils::EndsWith(TensorShape({2, 3, 4}), TensorShape({2, 3})));
}
TEST(TensorShapeUtilsTest, ShapeListString) {
EXPECT_EQ(
TensorShapeUtils::ShapeListString({TensorShape({}), TensorShape({})}),
"[[], []]");
EXPECT_EQ(TensorShapeUtils::ShapeListString(
{TensorShape({2, 3}), TensorShape({4, 5, 6})}),
"[[2,3], [4,5,6]]");
}
TEST(TensorShapeUtilsTest, NumElements) {
int64_t num_elements = 0;
TF_EXPECT_OK(TensorShapeUtils::NumElements({}, &num_elements));
EXPECT_EQ(num_elements, 1);
TF_EXPECT_OK(TensorShapeUtils::NumElements({1}, &num_elements));
EXPECT_EQ(num_elements, 1);
TF_EXPECT_OK(TensorShapeUtils::NumElements({2, 3, 4}, &num_elements));
EXPECT_EQ(num_elements, 24);
int64_t int64_max_val = std::numeric_limits<int64_t>::max();
EXPECT_THAT(
TensorShapeUtils::NumElements({int64_max_val, int64_max_val},
&num_elements),
testing::StatusIs(
error::Code::INVALID_ARGUMENT,
::testing::ContainsRegex(
"Can't compute total size of shape.*product would overflow")));
}
static std::vector<int64_t> MakeSizes(int arg) {
std::vector<int64_t> sizes;
switch (arg) {
case 0:
sizes = {100};
break;
case 1:
sizes = {100, 1000};
break;
case 2:
sizes = {100, 1000000};
break;
case 3:
sizes = {100, 256, 192, 3};
break;
case 4:
sizes = {1, 2, 1ll << 34, 1, 1, 1};
break;
}
return sizes;
}
void BM_TensorShape_Init(::testing::benchmark::State& state) {
const int arg = state.range(0);
auto sizes = MakeSizes(arg);
for (auto s : state) {
TensorShape shape(sizes);
tensorflow::testing::DoNotOptimize(shape.num_elements());
}
}
BENCHMARK(BM_TensorShape_Init)->Arg(0)->Arg(1)->Arg(2)->Arg(3)->Arg(4);
void BM_TensorShape_Assign(::testing::benchmark::State& state) {
const int arg = state.range(0);
TensorShape shape(MakeSizes(arg));
for (auto s : state) {
const TensorShape s2 = shape;
tensorflow::testing::DoNotOptimize(s2);
}
}
BENCHMARK(BM_TensorShape_Assign)->Arg(0)->Arg(1)->Arg(2)->Arg(3)->Arg(4);
void BM_TensorShape_SetDim(::testing::benchmark::State& state) {
const int arg = state.range(0);
TensorShape shape(MakeSizes(arg));
tensorflow::testing::DoNotOptimize(shape);
for (auto s : state) {
shape.set_dim(0, 8);
}
}
BENCHMARK(BM_TensorShape_SetDim)->Arg(0)->Arg(1)->Arg(2)->Arg(3)->Arg(4);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/tensor_shape.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/tensor_shape_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
656417ff-07ef-40b3-a25e-31c722343732 | cpp | tensorflow/tensorflow | hlo_verifier | third_party/xla/xla/service/hlo_verifier.cc | third_party/xla/xla/service/hlo_verifier_test.cc | #include "xla/service/hlo_verifier.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <map>
#include <memory>
#include <numeric>
#include <optional>
#include <string>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_input_output_alias_config.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/permutation_util.h"
#include "xla/primitive_util.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/shape_inference.h"
#include "xla/shape.h"
#include "xla/shape_layout.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
bool IsCallerInstruction(HloInstruction* hlo) {
return HloInstruction::MightHaveCalledComputations(hlo->opcode());
}
absl::Status CheckOperandCount(const HloInstruction* hlo, int expected) {
if (hlo->operand_count() != expected) {
return Internal("Expected %d operands for %s instruction: %s", expected,
HloOpcodeString(hlo->opcode()), hlo->ToString());
}
return absl::OkStatus();
}
int64_t GetSubgroupSize(HloCollectiveInstruction* hlo,
CollectiveOpGroupMode group_mode) {
const HloModuleConfig& config = hlo->GetModule()->config();
switch (group_mode) {
case CollectiveOpGroupMode::kCrossReplica:
case CollectiveOpGroupMode::kCrossReplicaAndPartition: {
int64_t replica_subgroup_size =
hlo->replica_groups().empty()
? config.replica_count()
: hlo->replica_groups()[0].replica_ids_size();
if (group_mode == CollectiveOpGroupMode::kCrossReplicaAndPartition) {
replica_subgroup_size *= config.num_partitions();
}
return replica_subgroup_size;
}
case CollectiveOpGroupMode::kFlattenedID:
return hlo->replica_groups()[0].replica_ids_size();
case CollectiveOpGroupMode::kCrossPartition:
return hlo->replica_groups().empty()
? config.num_partitions()
: hlo->replica_groups()[0].replica_ids_size();
}
}
absl::Status CheckNestedComputationThreadNameEqual(
const HloComputation* comp, bool skip_nested_async_op_check) {
for (const HloInstruction* instr : comp->instructions()) {
if (skip_nested_async_op_check && instr->IsAsynchronous()) {
continue;
}
for (const HloComputation* called_cmp : instr->called_computations()) {
if (called_cmp->execution_thread() != comp->execution_thread()) {
return Internal(
"Nested computations expects same computation's thread name: %s vs "
"%s, in called computation `%s` vs caller computation `%s`",
called_cmp->execution_thread(), comp->execution_thread(),
called_cmp->name(), comp->name());
}
TF_RETURN_IF_ERROR(CheckNestedComputationThreadNameEqual(
called_cmp, skip_nested_async_op_check));
}
}
return absl::OkStatus();
}
}
absl::Status ShapeVerifier::CheckParameterCount(
const HloInstruction* calling_instruction,
const HloComputation* computation, int expected) {
if (computation->num_parameters() != expected) {
return Internal(
"Expected computation %s called from %s to have %d parameters, has %d",
computation->name(), calling_instruction->name(), expected,
computation->num_parameters());
}
return absl::OkStatus();
}
absl::Status ShapeVerifier::Preprocess(HloInstruction* hlo) {
if (!hlo->called_computations().empty() && !IsCallerInstruction(hlo)) {
return Internal(
"Called computations specified for non-caller instruction %s",
hlo->ToString());
}
std::optional<int> arity = HloOpcodeArity(hlo->opcode());
if (arity) {
TF_RETURN_IF_ERROR(CheckOperandCount(hlo, *arity));
}
if (!opts_.allow_unbounded_dynamism && hlo->shape().is_unbounded_dynamic()) {
return InvalidArgument("Unbounded dynamism is disabled for instruction: %s",
hlo->ToString());
}
if (hlo->shape().has_layout()) {
if (hlo->shape().layout().minor_to_major_size() !=
hlo->shape().dimensions_size()) {
return InvalidArgument(
"Instruction has mismatched minor-to-major size and dimension size: "
"%s",
hlo->ToString());
}
}
return absl::OkStatus();
}
absl::Status ShapeVerifier::HandleElementwiseUnary(HloInstruction* hlo) {
return CheckUnaryShape(hlo);
}
absl::Status ShapeVerifier::HandleElementwiseBinary(HloInstruction* hlo) {
return CheckBinaryShape(hlo);
}
absl::Status ShapeVerifier::HandleClamp(HloInstruction* clamp) {
return CheckTernaryShape(clamp);
}
absl::Status ShapeVerifier::HandleSelect(HloInstruction* select) {
return CheckTernaryShape(select);
}
absl::Status ShapeVerifier::HandleConcatenate(HloInstruction* concatenate) {
std::vector<const Shape*> operand_shapes;
for (const HloInstruction* operand : concatenate->operands()) {
operand_shapes.push_back(&operand->shape());
}
return CheckShape(concatenate,
ShapeInference::InferConcatOpShape(
operand_shapes, concatenate->concatenate_dimension()));
}
absl::Status ShapeVerifier::HandleConvert(HloInstruction* convert) {
return CheckShape(convert, ShapeInference::InferConvertShape(
convert->operand(0)->shape(),
convert->shape().element_type()));
}
absl::Status ShapeVerifier::HandleBitcastConvert(HloInstruction* convert) {
return CheckShape(convert, ShapeInference::InferBitcastConvertShape(
convert->operand(0)->shape(),
convert->shape().element_type()));
}
absl::Status ShapeVerifier::HandleStochasticConvert(HloInstruction* convert) {
return CheckShape(
convert, ShapeInference::InferStochasticConvertShape(
convert->operand(0)->shape(), convert->operand(1)->shape(),
convert->shape().element_type()));
}
absl::Status ShapeVerifier::HandleCopy(HloInstruction* copy) {
return CheckUnaryShape(copy);
}
absl::Status ShapeVerifier::HandleDot(HloInstruction* dot) {
auto sparsity = Cast<HloDotInstruction>(dot)->sparsity();
TF_RETURN_IF_ERROR(
CheckOperandCount(dot, HloDotInstruction::kOperands + sparsity.size()));
TF_ASSIGN_OR_RETURN(
const Shape expected,
ShapeInference::InferDotOpShape(
dot->operand(0)->shape(), dot->operand(1)->shape(),
dot->dot_dimension_numbers(),
dot->shape().element_type(), sparsity));
if (auto nibble_count =
absl::c_count(dot->precision_config().operand_precision(),
PrecisionConfig::PACKED_NIBBLE)) {
if (nibble_count == 1) {
return InvalidArgument("Dot cannot have a single packed nibble argument");
}
if (nibble_count == 2) {
if (!ShapeUtil::ElementIsIntegralWithBits(dot->operand(0)->shape(), 8)) {
return InvalidArgument(
"Packed nibble precision can only apply to 8 bit integers. LHS is "
"%s.",
dot->operand(0)->ToString());
}
if (!ShapeUtil::ElementIsIntegralWithBits(dot->operand(1)->shape(), 8)) {
return InvalidArgument(
"Packed nibble precision can only apply to 8 bit integers. RHS is "
"%s.",
dot->operand(1)->ToString());
}
}
}
for (int i = 0; i < sparsity.size(); ++i) {
const SparsityDescriptor& descriptor = sparsity[i];
TF_RET_CHECK(descriptor.index() == 0 || descriptor.index() == 1);
TF_ASSIGN_OR_RETURN(const Shape expected_metadata_shape,
ShapeInference::InferSparseDotMetadataShape(
dot->operand(descriptor.index())->shape(),
dot->dot_dimension_numbers(), descriptor));
const Shape actual_metadata_shape =
dot->operand(HloDotInstruction::kOperands + i)->shape();
if (!ShapeUtil::Compatible(actual_metadata_shape,
expected_metadata_shape)) {
return Internal(
"Expected sparse dot metadata to have shape equal to %s, actual "
"shape is %s:\n%s",
StringifyShape(expected_metadata_shape),
StringifyShape(actual_metadata_shape), dot->ToString());
}
}
return CheckShape(dot, expected);
}
absl::Status ShapeVerifier::HandleConvolution(HloInstruction* convolution) {
TF_ASSIGN_OR_RETURN(
Shape expected,
ShapeInference::InferConvolveShape(
convolution->operand(0)->shape(), convolution->operand(1)->shape(),
convolution->feature_group_count(), convolution->batch_group_count(),
convolution->window(), convolution->convolution_dimension_numbers(),
convolution->shape().element_type()));
if (auto nibble_count =
absl::c_count(convolution->precision_config().operand_precision(),
PrecisionConfig::PACKED_NIBBLE)) {
if (nibble_count == 1) {
return InvalidArgument(
"Convolution cannot have a single packed nibble argument");
}
if (nibble_count == 2) {
if (convolution->feature_group_count() != 1) {
return InvalidArgument(
"Packed nibble precision does not support feature group count "
"%s.",
convolution->ToString());
}
if (convolution->batch_group_count() != 1) {
return InvalidArgument(
"Packed nibble precision does not support batch group count "
"%s.",
convolution->ToString());
}
if (!ShapeUtil::ElementIsIntegralWithBits(
convolution->operand(0)->shape(), 8)) {
return InvalidArgument(
"Packed nibble precision can only apply to 8 bit integers. LHS is "
"%s.",
convolution->operand(0)->ToString());
}
if (!ShapeUtil::ElementIsIntegralWithBits(
convolution->operand(1)->shape(), 8)) {
return InvalidArgument(
"Packed nibble precision can only apply to 8 bit integers. RHS is "
"%s.",
convolution->operand(1)->ToString());
}
}
}
return CheckShape(convolution, expected);
}
absl::Status ShapeVerifier::HandleFft(HloInstruction* fft) {
TF_ASSIGN_OR_RETURN(
const Shape expected,
ShapeInference::InferFftShape(fft->operand(0)->shape(), fft->fft_type(),
fft->fft_length()));
return CheckShape(fft, expected);
}
absl::Status ShapeVerifier::HandleTriangularSolve(HloInstruction* hlo) {
TF_ASSIGN_OR_RETURN(const Shape expected,
ShapeInference::InferTriangularSolveShape(
hlo->operand(0)->shape(), hlo->operand(1)->shape(),
hlo->triangular_solve_options()));
return CheckShape(hlo, expected);
}
absl::Status ShapeVerifier::HandleCholesky(HloInstruction* hlo) {
TF_RETURN_IF_ERROR(CheckOperandCount(hlo, 1));
TF_ASSIGN_OR_RETURN(const Shape expected, ShapeInference::InferCholeskyShape(
hlo->operand(0)->shape()));
return CheckShape(hlo, expected);
}
absl::Status ShapeVerifier::HandleOptimizationBarrier(HloInstruction* hlo) {
TF_RETURN_IF_ERROR(CheckOperandCount(hlo, 1));
return CheckShape(hlo, hlo->operand(0)->shape());
}
bool ShapeVerifier::ShapesSame(const Shape& a, const Shape& b,
Shape::Equal equal) {
if (!opts_.layout_sensitive) {
return ShapeUtil::Compatible(a, b);
}
return equal(a, b);
}
static absl::Status CheckReplicaGroups(HloInstruction* hlo,
CollectiveOpGroupMode group_mode,
bool uniform_replica_group_size = true) {
if (!hlo->replica_groups().empty()) {
absl::flat_hash_set<int64_t> replicas_seen;
for (const ReplicaGroup& g : hlo->replica_groups()) {
if (g.replica_ids().empty()) {
return Internal("Instruction cannot have an empty replica group: %s",
hlo->ToString());
}
for (int64_t i : g.replica_ids()) {
if (!replicas_seen.insert(i).second) {
return Internal(
"Replica %d is repeated in instruction's replica-groups: %s", i,
hlo->ToString());
}
}
}
size_t n = replicas_seen.size();
for (int64_t i = 0; i < n; ++i) {
if (!replicas_seen.count(i)) {
return Internal(
"Replica %d is not named in instruction's replica-groups: %s", i,
hlo->ToString());
}
}
int64_t replica_count = hlo->GetModule()->config().replica_count();
int64_t num_partitions = hlo->GetModule()->config().num_partitions();
switch (group_mode) {
case CollectiveOpGroupMode::kCrossReplica:
case CollectiveOpGroupMode::kCrossReplicaAndPartition: {
TF_RET_CHECK(replica_count == 1 || n == replica_count)
<< "In " << CollectiveOpGroupModeToString(group_mode)
<< " mode, replica groups should contain " << replica_count
<< " replicas, but found " << n << ": " << hlo->ToString();
break;
}
case CollectiveOpGroupMode::kCrossPartition: {
TF_RET_CHECK(num_partitions == 1 || n == num_partitions)
<< "In " << CollectiveOpGroupModeToString(group_mode)
<< " mode, replica groups should contain " << num_partitions
<< " partitions, but found " << n << ": " << hlo->ToString();
break;
}
case CollectiveOpGroupMode::kFlattenedID: {
const int64_t num_flattened_ids = replica_count * num_partitions;
TF_RET_CHECK(num_flattened_ids == 1 || n == num_flattened_ids)
<< "In " << CollectiveOpGroupModeToString(group_mode)
<< " mode, replica groups should contain " << num_flattened_ids
<< " flattened IDs, but found " << n << ": " << hlo->ToString();
break;
}
}
if (uniform_replica_group_size) {
int64_t size = hlo->replica_groups()[0].replica_ids_size();
for (const ReplicaGroup& g : hlo->replica_groups()) {
TF_RET_CHECK(size == g.replica_ids_size())
<< "Replica groups expected to be of uniform size";
}
}
} else {
TF_RET_CHECK(group_mode != CollectiveOpGroupMode::kFlattenedID)
<< "Replica groups must be specified in flattened-id mode";
}
return absl::OkStatus();
}
static absl::Status CheckCommonAllGatherInvariants(
HloInstruction* hlo, int64_t* computed_shard_count) {
auto ag = Cast<HloAllGatherInstruction>(hlo);
CHECK_NE(computed_shard_count, nullptr) << "Expected a shard count as input";
TF_ASSIGN_OR_RETURN(CollectiveOpGroupMode group_mode,
GetCollectiveOpGroupMode(ag->channel_id().has_value(),
ag->use_global_device_ids()));
TF_RETURN_IF_ERROR(CheckReplicaGroups(ag, group_mode));
TF_RET_CHECK(ag->all_gather_dimension() >= 0);
TF_RET_CHECK(ag->operand_count() >= 1);
int64_t shard_count;
for (int64_t i = 0; i < ag->operand_count(); ++i) {
TF_RET_CHECK(ag->all_gather_dimension() < ag->operand(i)->shape().rank());
Shape output_shape;
if (hlo->opcode() == HloOpcode::kAllGather) {
output_shape = (ag->operand_count() == 1) ? ag->shape()
: ag->shape().tuple_shapes(i);
} else {
TF_RET_CHECK(hlo->opcode() == HloOpcode::kAllGatherStart);
output_shape = (ag->operand_count() == 1)
? ag->shape().tuple_shapes(1)
: ag->shape().tuple_shapes(1).tuple_shapes(i);
}
TF_RET_CHECK(ag->all_gather_dimension() < output_shape.rank());
if (i == 0) {
shard_count = CeilOfRatio(
output_shape.dimensions(ag->all_gather_dimension()),
ag->operand(i)->shape().dimensions(ag->all_gather_dimension()));
}
}
int64_t subgroup_size = GetSubgroupSize(ag, group_mode);
TF_RET_CHECK(subgroup_size == 1 || shard_count == subgroup_size)
<< "shard_count = " << shard_count
<< ", subgroup_size = " << subgroup_size << ", " << hlo->ToString();
*computed_shard_count = shard_count;
return absl::OkStatus();
}
absl::Status ShapeVerifier::HandleAllGather(HloInstruction* hlo) {
auto ag = Cast<HloAllGatherInstruction>(hlo);
int64_t shard_count;
TF_RETURN_IF_ERROR(CheckCommonAllGatherInvariants(hlo, &shard_count));
std::vector<const Shape*> operand_shapes;
for (const HloInstruction* operand : hlo->operands()) {
operand_shapes.push_back(&operand->shape());
}
return CheckShape(
ag, ShapeInference::InferAllGatherShape(
operand_shapes, ag->all_gather_dimension(), shard_count));
}
absl::Status ShapeVerifier::HandleAllGatherStart(HloInstruction* hlo) {
auto ag = Cast<HloAllGatherInstruction>(hlo);
int64_t shard_count;
TF_RETURN_IF_ERROR(CheckCommonAllGatherInvariants(hlo, &shard_count));
std::vector<const Shape*> operand_shapes;
for (const HloInstruction* operand : hlo->operands()) {
operand_shapes.push_back(&operand->shape());
}
return CheckShape(
ag, ShapeInference::InferAllGatherStartShape(
operand_shapes, ag->all_gather_dimension(), shard_count));
}
absl::Status ShapeVerifier::HandleAllGatherDone(HloInstruction* hlo) {
return CheckShape(
hlo, ShapeInference::InferAllGatherDoneShape(hlo->operand(0)->shape()));
}
absl::Status ShapeVerifier::HandleAllReduce(HloInstruction* hlo) {
auto ar = Cast<HloAllReduceInstruction>(hlo);
TF_ASSIGN_OR_RETURN(CollectiveOpGroupMode group_mode,
GetCollectiveOpGroupMode(ar->channel_id().has_value(),
ar->use_global_device_ids()));
TF_RETURN_IF_ERROR(
CheckReplicaGroups(ar, group_mode, false));
std::vector<const Shape*> operand_shapes;
for (const HloInstruction* operand : hlo->operands()) {
operand_shapes.push_back(&operand->shape());
}
return CheckShape(hlo, ShapeInference::InferAllReduceShape(operand_shapes));
}
absl::Status ShapeVerifier::HandleReduceScatter(HloInstruction* hlo) {
auto ars = Cast<HloReduceScatterInstruction>(hlo);
TF_ASSIGN_OR_RETURN(CollectiveOpGroupMode group_mode,
GetCollectiveOpGroupMode(ars->channel_id().has_value(),
ars->use_global_device_ids()));
TF_RETURN_IF_ERROR(CheckReplicaGroups(ars, group_mode));
TF_RET_CHECK(ars->scatter_dimension() >= 0);
TF_RET_CHECK(ars->operand_count() >= 1);
for (int64_t i = 0; i < ars->operand_count(); ++i) {
TF_RET_CHECK(ars->scatter_dimension() < ars->operand(i)->shape().rank());
const Shape& output_shape = (ars->operand_count() == 1)
? ars->shape()
: ars->shape().tuple_shapes(i);
TF_RET_CHECK(ars->scatter_dimension() < output_shape.rank());
}
const Shape& output0_shape =
(ars->operand_count() == 1) ? ars->shape() : ars->shape().tuple_shapes(0);
int64_t shard_count =
CeilOfRatio(ars->operand(0)->shape().dimensions(ars->scatter_dimension()),
output0_shape.dimensions(ars->scatter_dimension()));
int64_t subgroup_size = GetSubgroupSize(ars, group_mode);
TF_RET_CHECK(subgroup_size == 1 || shard_count == subgroup_size)
<< "shard_count = " << shard_count
<< ", subgroup_size = " << subgroup_size << ", " << hlo->ToString();
std::vector<const Shape*> operand_shapes;
for (const HloInstruction* operand : hlo->operands()) {
operand_shapes.push_back(&operand->shape());
}
return CheckShape(ars,
ShapeInference::InferReduceScatterShape(
operand_shapes, ars->scatter_dimension(), shard_count));
}
absl::Status ShapeVerifier::HandleAllReduceStart(HloInstruction* hlo) {
auto ar = Cast<HloAllReduceInstruction>(hlo);
TF_ASSIGN_OR_RETURN(CollectiveOpGroupMode group_mode,
GetCollectiveOpGroupMode(ar->channel_id().has_value(),
ar->use_global_device_ids()));
TF_RETURN_IF_ERROR(
CheckReplicaGroups(ar, group_mode, false));
std::vector<const Shape*> operand_shapes;
for (const HloInstruction* operand : hlo->operands()) {
operand_shapes.push_back(&operand->shape());
}
return CheckShape(hlo,
ShapeInference::InferAllReduceStartShape(operand_shapes));
}
absl::Status ShapeVerifier::HandleAllReduceDone(HloInstruction* hlo) {
return CheckShape(
hlo, ShapeInference::InferAllReduceDoneShape(hlo->operand(0)->shape()));
}
absl::Status ShapeVerifier::HandleAllToAll(HloInstruction* hlo) {
auto* all_to_all = Cast<HloAllToAllInstruction>(hlo);
TF_ASSIGN_OR_RETURN(CollectiveOpGroupMode group_mode,
GetCollectiveOpGroupMode(
all_to_all->channel_id().has_value(), std::nullopt));
TF_RETURN_IF_ERROR(CheckReplicaGroups(hlo, group_mode));
TF_RET_CHECK(all_to_all != nullptr);
const int64_t split_count = GetSubgroupSize(all_to_all, group_mode);
if (all_to_all->split_dimension()) {
TF_RET_CHECK(hlo->operand_count() == 1);
return CheckShape(
hlo, ShapeInference::InferAllToAllShape(
hlo->operand(0)->shape(), *all_to_all->split_dimension(),
*all_to_all->split_dimension(), split_count));
} else {
TF_RET_CHECK(hlo->operand_count() == split_count);
std::vector<const Shape*> operand_shapes;
for (const HloInstruction* operand : hlo->operands()) {
operand_shapes.push_back(&operand->shape());
}
return CheckShape(hlo,
ShapeInference::InferAllToAllTupleShape(operand_shapes));
}
}
absl::Status ShapeVerifier::HandlePartitionId(HloInstruction* hlo) {
return CheckShape(hlo, ShapeUtil::MakeShape(U32, {}));
}
absl::Status ShapeVerifier::HandleReplicaId(HloInstruction* hlo) {
return CheckShape(hlo, ShapeUtil::MakeShape(U32, {}));
}
namespace {
absl::Status CheckBufferOffset(const Shape& buffer_shape,
const Shape& buffer_offset_shape) {
if (!buffer_offset_shape.IsTuple()) {
return Internal("Buffer offset is not tuple.");
}
bool all_is_array =
absl::c_all_of(buffer_offset_shape.tuple_shapes(),
[](const Shape& shape) { return shape.IsArray(); });
bool all_is_tuple =
absl::c_all_of(buffer_offset_shape.tuple_shapes(),
[](const Shape& shape) { return shape.IsTuple(); });
if (!all_is_array && !all_is_tuple) {
return Internal(
"Buffer offset should either be a tuple of arrays or "
" a tuple of tuples.");
}
if (all_is_tuple) {
if (absl::c_any_of(buffer_offset_shape.tuple_shapes(),
[&buffer_shape](const Shape& shape) {
return ShapeUtil::TupleElementCount(shape) !=
buffer_shape.rank();
})) {
return Internal(
"Buffer offset index should have the same number of "
"elements as the buffer's rank.");
}
} else {
if (buffer_offset_shape.tuple_shapes_size() != buffer_shape.rank()) {
return Internal(
"Buffer offset index should have the same number of "
"elements as the buffer's rank.");
}
}
return absl::OkStatus();
}
absl::Status CheckInplaceCollectivePermute(HloInstruction* collective_permute) {
if (collective_permute->operand_count() == 1) {
return absl::OkStatus();
}
if (collective_permute->operand_count() != 4) {
return Internal("Unexpected number of operands: %d.",
collective_permute->operand_count());
}
const Shape& input_buffer_shape = collective_permute->operand(0)->shape();
const Shape& output_buffer_shape = collective_permute->operand(1)->shape();
const Shape& input_offset_shape = collective_permute->operand(2)->shape();
const Shape& output_offset_shape = collective_permute->operand(3)->shape();
if (input_buffer_shape.IsArray() && output_buffer_shape.IsArray()) {
absl::Status check_input_buffer_offset =
CheckBufferOffset(input_buffer_shape, input_offset_shape);
if (!check_input_buffer_offset.ok()) {
return check_input_buffer_offset;
}
absl::Status check_output_buffer_offset =
CheckBufferOffset(output_buffer_shape, output_offset_shape);
if (!check_output_buffer_offset.ok()) {
return check_output_buffer_offset;
}
} else if (input_buffer_shape.IsTuple() && output_buffer_shape.IsTuple()) {
if (ShapeUtil::TupleElementCount(input_buffer_shape) !=
ShapeUtil::TupleElementCount(output_buffer_shape)) {
return Internal("Unmatching input buffers and output buffers.");
}
if (!input_offset_shape.IsTuple() ||
ShapeUtil::TupleElementCount(input_offset_shape) !=
ShapeUtil::TupleElementCount(input_buffer_shape)) {
return Internal("Unmatching input buffers and input offset.");
}
for (int i = 0; i < input_buffer_shape.tuple_shapes_size(); ++i) {
absl::Status check_input_buffer_offset =
CheckBufferOffset(input_buffer_shape.tuple_shapes(i),
input_offset_shape.tuple_shapes(i));
if (!check_input_buffer_offset.ok()) {
return check_input_buffer_offset;
}
}
if (!output_offset_shape.IsTuple() ||
ShapeUtil::TupleElementCount(output_offset_shape) !=
ShapeUtil::TupleElementCount(output_buffer_shape)) {
return Internal("Unmatching output buffers and output offset.");
}
for (int i = 0; i < output_buffer_shape.tuple_shapes_size(); ++i) {
absl::Status check_output_buffer_offset =
CheckBufferOffset(output_buffer_shape.tuple_shapes(i),
output_offset_shape.tuple_shapes(i));
if (!check_output_buffer_offset.ok()) {
return check_output_buffer_offset;
}
}
} else {
return Internal("Unmatching input buffers and output buffers.");
}
return absl::OkStatus();
}
absl::Status CheckDuplicatedSourceOrTarget(HloInstruction* hlo,
CollectiveOpGroupMode group_mode) {
const HloModuleConfig& config = hlo->GetModule()->config();
const int64_t limit = group_mode == CollectiveOpGroupMode::kCrossReplica
? config.replica_count()
: config.num_partitions();
absl::flat_hash_map<int64_t, std::vector<int64_t>> seen_source_to_targets;
absl::flat_hash_map<int64_t, std::vector<int64_t>> seen_target_to_sources;
int allowed_seen_count = 1;
if (hlo->operand_count() == 4) {
if (hlo->operand(0)->shape().IsArray()) {
allowed_seen_count = hlo->operand(2)->shape().tuple_shapes_size();
} else {
allowed_seen_count =
hlo->operand(2)->shape().tuple_shapes(0).tuple_shapes_size();
}
}
for (const auto& p : hlo->source_target_pairs()) {
TF_RET_CHECK(p.first >= 0)
<< "Source " << p.first
<< " in the instruction's source-target pair must be >= 0 : "
<< hlo->ToString();
TF_RET_CHECK(limit == 1 || p.first < limit)
<< "Source " << p.first
<< " in the instruction's source-target pair must be < " << limit
<< " : " << hlo->ToString();
if (seen_source_to_targets.contains(p.first) &&
seen_source_to_targets[p.first].size() == allowed_seen_count) {
if (allowed_seen_count == 1) {
return Internal(
"Source %d appears more than once in instruction's source-target "
"pairs: %s",
p.first, hlo->ToString());
} else {
return Internal(
"Source %d appears more than %d times in instruction's "
"source-target "
"pairs: %s",
p.first, allowed_seen_count, hlo->ToString());
}
} else {
seen_source_to_targets[p.first].push_back(p.second);
}
TF_RET_CHECK(p.second >= 0)
<< "Target " << p.second
<< " in the instruction's source-target pair must be >= 0 : "
<< hlo->ToString();
TF_RET_CHECK(limit == 1 || p.second < limit)
<< "Target " << p.second
<< " in the instruction's source-target pair must be < " << limit
<< " : " << hlo->ToString();
if (seen_target_to_sources.contains(p.second) &&
seen_target_to_sources[p.second].size() == allowed_seen_count) {
if (allowed_seen_count == 1) {
return Internal(
"Target %d appears more than once in instruction's source-target "
"pairs: %s",
p.second, hlo->ToString());
} else {
return Internal(
"Target %d appears more than %d times in instruction's "
"source-target "
"pairs: %s",
p.second, allowed_seen_count, hlo->ToString());
}
} else {
seen_target_to_sources[p.second].push_back(p.first);
}
}
return absl::OkStatus();
}
}
absl::Status ShapeVerifier::HandleCollectiveBroadcast(HloInstruction* hlo) {
std::vector<const Shape*> operand_shapes;
for (const HloInstruction* operand : hlo->operands()) {
operand_shapes.push_back(&operand->shape());
}
return CheckShape(
hlo, ShapeInference::InferCollectiveBroadcastShape(operand_shapes));
}
absl::Status ShapeVerifier::HandleCollectivePermute(HloInstruction* hlo) {
TF_ASSIGN_OR_RETURN(
CollectiveOpGroupMode group_mode,
GetCollectiveOpGroupMode(hlo->channel_id().has_value(),
std::nullopt));
TF_RETURN_IF_ERROR(CheckInplaceCollectivePermute(hlo));
TF_RETURN_IF_ERROR(CheckDuplicatedSourceOrTarget(hlo, group_mode));
std::vector<const Shape*> operand_shapes;
absl::c_transform(
hlo->operands(), std::back_inserter(operand_shapes),
[](const HloInstruction* operand) { return &(operand->shape()); });
return CheckShape(
hlo, ShapeInference::InferCollectivePermuteShape(operand_shapes));
}
absl::Status ShapeVerifier::HandleCollectivePermuteStart(HloInstruction* hlo) {
TF_ASSIGN_OR_RETURN(
CollectiveOpGroupMode group_mode,
GetCollectiveOpGroupMode(hlo->channel_id().has_value(),
std::nullopt));
TF_RETURN_IF_ERROR(CheckInplaceCollectivePermute(hlo));
TF_RETURN_IF_ERROR(CheckDuplicatedSourceOrTarget(hlo, group_mode));
std::vector<const Shape*> operand_shapes;
absl::c_transform(
hlo->operands(), std::back_inserter(operand_shapes),
[](const HloInstruction* operand) { return &(operand->shape()); });
std::vector<Shape> context_shapes;
if (hlo->shape().tuple_shapes_size() > 2) {
context_shapes = std::vector<Shape>(hlo->shape().tuple_shapes().begin() + 2,
hlo->shape().tuple_shapes().end());
}
return CheckShape(hlo, ShapeInference::InferCollectivePermuteStartShape(
operand_shapes, context_shapes));
}
absl::Status ShapeVerifier::HandleCollectivePermuteDone(HloInstruction* hlo) {
return CheckShape(hlo, ShapeInference::InferCollectivePermuteDoneShape(
hlo->operand(0)->shape()));
}
absl::Status ShapeVerifier::HandleReducePrecision(
HloInstruction* reduce_precision) {
return CheckShape(reduce_precision, ShapeInference::InferReducePrecisionShape(
reduce_precision->operand(0)->shape(),
reduce_precision->exponent_bits(),
reduce_precision->mantissa_bits()));
}
absl::Status ShapeVerifier::CheckIsTokenOperand(
const HloInstruction* instruction, int64_t operand_no) {
const HloInstruction* token = instruction->operand(operand_no);
if (!ShapeUtil::Equal(token->shape(), ShapeUtil::MakeTokenShape())) {
return Internal(
"Expected operand %d to be token-shaped, actual shape is "
"%s:\n%s",
operand_no, StringifyShape(token->shape()), instruction->ToString());
}
return absl::OkStatus();
}
absl::Status ShapeVerifier::CheckOperandAndParameter(
const HloInstruction* instruction, int64_t operand_number,
const HloComputation* computation, int64_t parameter_number) {
const HloInstruction* operand = instruction->operand(operand_number);
const HloInstruction* parameter =
computation->parameter_instruction(parameter_number);
if (!ShapesSame(operand->shape(), parameter->shape())) {
return Internal("Operand %s shape does not match parameter's %s in %s",
operand->ToString(), parameter->ToString(),
instruction->ToString());
}
return absl::OkStatus();
}
absl::Status ShapeVerifier::HandleInfeed(HloInstruction* instruction) {
HloInfeedInstruction* infeed = Cast<HloInfeedInstruction>(instruction);
TF_RETURN_IF_ERROR(CheckIsTokenOperand(instruction, 0));
return CheckShape(infeed,
ShapeUtil::MakeTupleShape(
{infeed->infeed_shape(), ShapeUtil::MakeTokenShape()}),
true);
}
absl::Status ShapeVerifier::HandleOutfeed(HloInstruction* instruction) {
HloOutfeedInstruction* outfeed = Cast<HloOutfeedInstruction>(instruction);
TF_RETURN_IF_ERROR(CheckIsTokenOperand(instruction, 1));
if (!ShapesSame(outfeed->outfeed_shape(), outfeed->operand(0)->shape())) {
return Internal(
"Expected outfeed shape to be equal to operand's shape %s, "
"actual shape is %s:\n%s",
StringifyShape(outfeed->operand(0)->shape()),
StringifyShape(outfeed->outfeed_shape()), outfeed->ToString());
}
return CheckShape(outfeed, ShapeUtil::MakeTokenShape());
}
bool ShapeVerifier::HasCompatibleElementTypes(const Shape& shape_0,
const Shape& shape_1,
const Shape& result_shape) {
return ShapeUtil::SameElementType(shape_0, shape_1) &&
(ShapeUtil::SameElementType(shape_0, result_shape) ||
(opts_.allow_mixed_precision &&
ShapeUtil::SameElementTypeIgnoringFpPrecision(shape_0,
result_shape)));
}
absl::Status ShapeVerifier::HandleRng(HloInstruction* instruction) {
TF_RETURN_IF_ERROR(CheckOperandCount(instruction, 2));
const Shape& shape_0 = instruction->operand(0)->shape();
const Shape& shape_1 = instruction->operand(1)->shape();
if (!ShapeUtil::IsScalar(shape_0) || !ShapeUtil::IsScalar(shape_1)) {
return Internal(
"Expected scalar types for the two operands of Rng instruction: %s",
instruction->ToString());
}
if (!HasCompatibleElementTypes(shape_0, shape_1, instruction->shape())) {
return Internal(
"Expected compatible element types for the result and the two operands"
" of Rng instruction: %s",
instruction->ToString());
}
PrimitiveType element_type = shape_0.element_type();
switch (instruction->random_distribution()) {
case RNG_UNIFORM:
if (!primitive_util::IsFloatingPointType(element_type) &&
!primitive_util::IsIntegralType(element_type) &&
element_type != PRED) {
return Internal(
"Element type not supported."
" Expected element to be of floating point type, integral type or"
" predicate type for RngUniform: %s",
instruction->ToString());
}
break;
case RNG_NORMAL:
if (!primitive_util::IsFloatingPointType(element_type)) {
return Internal(
"Element type not supported."
" Expected element to be FloatingPointType for RngNormal: %s",
instruction->ToString());
}
break;
default:
return Internal(
"Invalid Rng distribution %s",
RandomDistribution_Name(instruction->random_distribution()));
}
return absl::OkStatus();
}
absl::Status ShapeVerifier::HandleRngBitGenerator(HloInstruction* hlo) {
if (!hlo->shape().IsTuple()) {
return absl::OkStatus();
}
if (hlo->shape().IsTuple() && hlo->shape().tuple_shapes_size() != 2) {
return Internal(
"Expected tuple shape with 2 elements for RngBitGenerator. Got: %s",
hlo->shape().ToString(true));
}
if (!ShapeUtil::Compatible(hlo->operand(0)->shape(),
hlo->shape().tuple_shapes(0))) {
return Internal(
"Expected state shape to match between input and output for "
"RngBitGenerator. Got %s vs. %s",
hlo->operand(0)->shape().ToString(true),
hlo->shape().tuple_shapes(0).ToString());
}
return absl::OkStatus();
}
absl::Status ShapeVerifier::HandleRngGetAndUpdateState(
HloInstruction* instruction) {
TF_RETURN_IF_ERROR(CheckOperandCount(instruction, 0));
const Shape& result_shape = instruction->shape();
const Shape expected_shape = ShapeUtil::MakeShape(U64, {2});
if (!ShapeUtil::Compatible(result_shape, expected_shape)) {
return Internal(
"Invalid RngGetAndUpdateState, expect result to have shape %s, got %s ",
StringifyShape(expected_shape), StringifyShape(result_shape));
}
return absl::OkStatus();
}
absl::Status ShapeVerifier::HandleReverse(HloInstruction* reverse) {
return CheckShape(
reverse, ShapeInference::InferReverseShape(reverse->operand(0)->shape(),
reverse->dimensions()));
}
absl::Status ShapeVerifier::HandleTopK(HloInstruction* hlo) {
return CheckShape(
hlo, ShapeInference::InferTopKShape(hlo->operand(0)->shape(),
Cast<HloTopKInstruction>(hlo)->k()));
}
absl::Status ShapeVerifier::HandleSort(HloInstruction* hlo) {
HloSortInstruction* sort = Cast<HloSortInstruction>(hlo);
if (sort->operand_count() < 1) {
return Internal("Expected at least 1 operand for %s instruction: %s",
HloOpcodeString(sort->opcode()), sort->ToString());
}
HloComputation* compare = sort->to_apply();
Shape compare_shape = compare->root_instruction()->shape();
if (!ShapeUtil::Compatible(compare_shape, ShapeUtil::MakeShape(PRED, {}))) {
return Internal(
"The Sort compare computation shape does not lead to a scalar "
"predicate shape: %s",
StringifyShape(compare_shape));
}
TF_RETURN_IF_ERROR(
CheckParameterCount(sort, compare, sort->operand_count() * 2));
for (int64_t parameter_idx = 0; parameter_idx < compare->num_parameters();
++parameter_idx) {
int64_t operand_idx = parameter_idx / 2;
Shape expected_scalar_shape = ShapeUtil::MakeShape(
sort->operand(operand_idx)->shape().element_type(), {});
Shape actual_parameter_shape =
compare->parameter_instruction(parameter_idx)->shape();
if (!ShapeUtil::CompatibleIgnoringFpPrecision(expected_scalar_shape,
actual_parameter_shape)) {
return Internal(
"Expected the %lld-th parameter of the compare computation of sort "
"to have shape %s, but got %s",
parameter_idx, StringifyShape(expected_scalar_shape),
StringifyShape(actual_parameter_shape));
}
}
for (int64_t operand = 1; operand < sort->operand_count(); ++operand) {
if (!ShapeUtil::SameDimensions(sort->operand(0)->shape(),
sort->operand(operand)->shape())) {
return Internal(
"Expected sort to have to have the same dimensions for all operands. "
"First operand shape is: %s\n, shape (operand index %lld) is: %s",
StringifyShape(sort->operand(0)->shape()), operand,
StringifyShape(sort->operand(operand)->shape()));
}
}
if (sort->sort_dimension() >= sort->operand(0)->shape().rank()) {
return Internal(
"Expected the sort_dimension %d of sort to be smaller than the rank %d "
"of the operand(s).",
sort->sort_dimension(), sort->shape().rank());
}
return CheckVariadicShape(sort);
}
absl::Status ShapeVerifier::HandleConstant(HloInstruction* constant) {
if (!Cast<HloConstantInstruction>(constant)->HasLiteral()) {
return Internal("Constant is required to have a valid literal: %s",
constant->ToString());
}
return CheckShape(constant, constant->literal().shape(),
true);
}
absl::Status ShapeVerifier::HandleIota(HloInstruction* hlo) {
auto* iota = Cast<HloIotaInstruction>(hlo);
if (!iota->shape().IsArray()) {
return Internal("Iota does not support non-array result.");
}
const int64_t rank = iota->shape().rank();
if (rank == 0) {
return Internal("Iota does not support scalars.");
}
int64_t iota_dimension = iota->iota_dimension();
if (iota_dimension >= rank || iota_dimension < 0) {
return Internal(
"The iota dimension cannot go beyond the operation rank or be "
"negative.");
}
PrimitiveType primitive_type = iota->shape().element_type();
if (!primitive_util::IsIntegralType(primitive_type) &&
!primitive_util::IsFloatingPointType(primitive_type) &&
!primitive_util::IsComplexType(primitive_type)) {
return InvalidArgument(
"Only support iota of integral, floating point or complex primitive "
"types, got %s",
PrimitiveType_Name(primitive_type));
}
return absl::OkStatus();
}
absl::Status ShapeVerifier::HandleGetTupleElement(
HloInstruction* get_tuple_element) {
return CheckShape(get_tuple_element,
ShapeInference::InferGetTupleElementShape(
get_tuple_element->operand(0)->shape(),
get_tuple_element->tuple_index()));
}
namespace {
absl::Status SameElementTypesForOperandsAndToApplyParameters(
const HloInstruction& instruction, int64_t num_operands_to_check) {
const ProgramShape& to_apply = instruction.to_apply()->ComputeProgramShape();
for (int i = 0; i < num_operands_to_check; ++i) {
const Shape& parameter_shape = to_apply.parameters(i);
const Shape& operand_shape = instruction.operands()[i]->shape();
if (!ShapeUtil::SameElementType(parameter_shape, operand_shape)) {
return InvalidArgument(
"Shape mismatch between to_apply computation"
" parameter and operand %d in %s.",
i, instruction.ToString().c_str());
}
}
return absl::OkStatus();
}
}
absl::Status ShapeVerifier::HandleReduce(HloInstruction* reduce) {
if (reduce->operand_count() % 2 != 0) {
return Internal(
"Expected an even number of operands for %s instruction: %s",
HloOpcodeString(reduce->opcode()), reduce->ToString());
}
std::vector<const Shape*> operand_shapes;
for (const HloInstruction* operand : reduce->operands()) {
operand_shapes.push_back(&operand->shape());
}
TF_RETURN_IF_ERROR(
CheckShape(reduce, ShapeInference::InferReduceShape(
operand_shapes, reduce->dimensions(),
reduce->to_apply()->ComputeProgramShape())));
return opts_.allow_mixed_precision
? absl::OkStatus()
: SameElementTypesForOperandsAndToApplyParameters(
*reduce, reduce->operand_count());
}
absl::Status ShapeVerifier::HandleBitcast(HloInstruction* bitcast) {
const Shape& output_shape = bitcast->shape();
const Shape& operand_shape = bitcast->operand(0)->shape();
if (opts_.layout_sensitive &&
opts_.shape_size(output_shape) != opts_.shape_size(operand_shape)) {
if (!opts_.allow_bitcast_to_have_different_size ||
!(output_shape.is_static() && operand_shape.is_static() &&
(ShapeUtil::ArrayDataSize(output_shape) ==
ShapeUtil::ArrayDataSize(operand_shape)))) {
return Internal(
"%s: Bitcast cannot have different shape sizes of output (%d) and "
"operand "
"(%d) (%s) (%s)",
bitcast->ToString(), opts_.shape_size(output_shape),
opts_.shape_size(operand_shape), output_shape.ToString(true),
operand_shape.ToString(true));
}
}
return absl::OkStatus();
}
absl::Status ShapeVerifier::HandleBroadcast(HloInstruction* broadcast) {
const Shape& operand_shape = broadcast->operand(0)->shape();
TF_RET_CHECK(SameElementType(broadcast->shape(), operand_shape))
<< broadcast->ToString();
TF_RET_CHECK(operand_shape.rank() == broadcast->dimensions().size())
<< broadcast->ToString();
for (int64_t operand_dimension = 0; operand_dimension < operand_shape.rank();
++operand_dimension) {
int64_t output_dimension = broadcast->dimensions()[operand_dimension];
TF_RET_CHECK((output_dimension < broadcast->shape().rank()) &&
output_dimension >= 0 &&
(broadcast->shape().dimensions(output_dimension) ==
operand_shape.dimensions(operand_dimension)))
<< broadcast->ToString() << " operand shape " << operand_shape;
}
return absl::OkStatus();
}
absl::Status ShapeVerifier::HandleDynamicReshape(
HloInstruction* dynamic_reshape) {
const Shape& operand_shape = dynamic_reshape->operand(0)->shape();
TF_RET_CHECK(SameElementType(dynamic_reshape->shape(), operand_shape));
TF_RET_CHECK(ShapeUtil::ElementsIn(dynamic_reshape->shape()) ==
ShapeUtil::ElementsIn(operand_shape));
TF_RET_CHECK(dynamic_reshape->shape().rank() + 1 ==
dynamic_reshape->operand_count());
for (int64_t i = 1; i < dynamic_reshape->operand_count(); ++i) {
TF_RET_CHECK(dynamic_reshape->operand(i)->shape().element_type() == S32);
}
return absl::OkStatus();
}
absl::Status ShapeVerifier::HandleReshape(HloInstruction* reshape) {
const Shape& operand_shape = reshape->operand(0)->shape();
TF_RET_CHECK(SameElementType(reshape->shape(), operand_shape));
TF_RET_CHECK(ShapeUtil::ElementsIn(reshape->shape()) ==
ShapeUtil::ElementsIn(operand_shape));
return absl::OkStatus();
}
absl::Status ShapeVerifier::HandleTranspose(HloInstruction* transpose) {
return CheckShape(
transpose, ShapeInference::InferTransposeShape(
transpose->operand(0)->shape(), transpose->dimensions()));
}
absl::Status ShapeVerifier::HandleParameter(HloInstruction* hlo) {
return absl::OkStatus();
}
absl::Status ShapeVerifier::HandleFusion(HloInstruction* fusion) {
if (fusion->called_computations().size() != 1) {
return Internal("Fusion has a non-unary number of called computations (%s)",
fusion->ToString().c_str());
}
const Shape& root_computation_shape =
fusion->called_computations()[0]->root_instruction()->shape();
if (!ShapesSame(fusion->shape(), root_computation_shape)) {
return Internal(
"Fused computation shape (%s) is not equal to the fusion shape (%s)",
root_computation_shape.ToString(true), fusion->shape().ToString(true));
}
auto& fused_parameters = fusion->fused_parameters();
if (fused_parameters.size() > fusion->operand_count()) {
return Internal(
"Fused parameter count (%d) is greater than the number of operands (%d)"
" passed to the fusion instruction in: %s.",
fused_parameters.size(), fusion->operand_count(),
fusion->ToString().c_str());
}
for (HloInstruction* fused_param : fused_parameters) {
int64_t param_no = fused_param->parameter_number();
if (!ShapesSame(fused_param->shape(), fusion->operand(param_no)->shape())) {
return Internal(
"Shape mismatch between parameter number %d and its operand in "
"%s.",
param_no, fusion->ToString().c_str());
}
}
const HloFusionInstruction* casted_fusion =
DynCast<const HloFusionInstruction>(fusion);
for (const auto& pair : casted_fusion->output_to_operand_aliasing()) {
TF_RET_CHECK(pair.second.first < casted_fusion->operand_count())
<< "Invalid aliasing operand index.";
TF_RET_CHECK(ShapeUtil::IndexIsValid(
casted_fusion->operand(pair.second.first)->shape(), pair.second.second))
<< "Invalid aliasing operand shape index.";
TF_RET_CHECK(ShapeUtil::IndexIsValid(casted_fusion->shape(), pair.first))
<< "Invalid aliasing output shape index.";
const Shape& output_subshape =
ShapeUtil::GetSubshape(casted_fusion->shape(), pair.first);
const Shape& operand_subshape = ShapeUtil::GetSubshape(
casted_fusion->operand(pair.second.first)->shape(), pair.second.second);
if (opts_.layout_sensitive) {
if (casted_fusion->IsFused()) {
TF_RET_CHECK(
Shape::Equal().IgnoreTilesInLayout().IgnoreMemorySpaceInLayout()(
operand_subshape, output_subshape))
<< "Different aliasing shapes: "
<< operand_subshape.ToString(true) << " vs "
<< output_subshape.ToString(true);
} else {
TF_RET_CHECK(Shape::Equal()(operand_subshape, output_subshape))
<< "Different aliasing shapes: "
<< operand_subshape.ToString(true) << " vs "
<< output_subshape.ToString(true);
}
} else {
TF_RET_CHECK(ShapeUtil::Compatible(output_subshape, operand_subshape))
<< "Different aliasing shapes: " << operand_subshape.ToString()
<< " vs " << output_subshape.ToString();
}
}
return absl::OkStatus();
}
absl::Status ShapeVerifier::HandleCall(HloInstruction* call) {
TF_RETURN_IF_ERROR(
CheckParameterCount(call, call->to_apply(), call->operand_count()));
for (int64_t i = 0; i < call->to_apply()->num_parameters(); ++i) {
TF_RETURN_IF_ERROR(CheckOperandAndParameter(call, i, call->to_apply(), i));
}
if (call->is_composite()) {
TF_RET_CHECK(call->has_frontend_attributes())
<< "A composite call op must have frontend attributes";
auto map = call->frontend_attributes().map();
if (auto name = map.find("composite.name");
name == map.end() || name->second.empty()) {
return InvalidArgument(
"A composite call op must have frontend attributes with key "
"composite.name whose value is non-empty");
}
if (auto attributes = map.find("composite.attributes");
attributes != map.end() && attributes->second.empty()) {
return InvalidArgument(
"A composite call op must have frontend attributes with key "
"composite.attributes whose value is default: {} or non-empty");
}
if (auto version_str = map.find("composite.version");
version_str != map.end()) {
int64_t version = 0;
if (!absl::SimpleAtoi(version_str->second, &version) || version < 0) {
return InvalidArgument(
"A composite call op must have frontend attributes with a "
"composite.version whose value is a non-negative integer but got: "
"%s",
version_str->second);
}
}
}
return CheckShape(call, call->to_apply()->root_instruction()->shape());
}
absl::Status ShapeVerifier::HandleCustomCall(HloInstruction* instruction) {
const HloCustomCallInstruction* custom_call =
DynCast<const HloCustomCallInstruction>(instruction);
TF_RET_CHECK(custom_call != nullptr);
if (custom_call->layout_constrained() &&
!custom_call->IsCustomCall("LayoutConstraint")) {
TF_RET_CHECK(LayoutUtil::HasLayout(custom_call->shape()));
TF_RET_CHECK(custom_call->operand_count() ==
custom_call->operand_shapes_with_layout().size());
for (int64_t i = 0; i < custom_call->operand_count(); ++i) {
const Shape& operand_shape_with_layout =
custom_call->operand_shapes_with_layout()[i];
TF_RET_CHECK(ShapeUtil::Compatible(custom_call->operand(i)->shape(),
operand_shape_with_layout))
<< custom_call->operand(i)->shape().ToString(true) << " operand "
<< operand_shape_with_layout.ToString();
TF_RET_CHECK(LayoutUtil::HasLayout(operand_shape_with_layout));
}
}
for (const auto& pair : custom_call->output_to_operand_aliasing()) {
TF_RET_CHECK(pair.second.first < custom_call->operand_count())
<< "Invalid aliasing operand index.";
TF_RET_CHECK(ShapeUtil::IndexIsValid(
custom_call->operand(pair.second.first)->shape(), pair.second.second))
<< "Invalid aliasing operand shape index.";
TF_RET_CHECK(ShapeUtil::IndexIsValid(custom_call->shape(), pair.first))
<< "Invalid aliasing output shape index.";
const Shape& output_subshape =
ShapeUtil::GetSubshape(custom_call->shape(), pair.first);
const Shape& operand_subshape = ShapeUtil::GetSubshape(
custom_call->operand(pair.second.first)->shape(), pair.second.second);
if (opts_.layout_sensitive) {
TF_RET_CHECK(operand_subshape == output_subshape)
<< "Different aliasing shapes: " << operand_subshape.ToString()
<< " vs " << output_subshape.ToString();
} else {
TF_RET_CHECK(ShapeUtil::Compatible(output_subshape, operand_subshape))
<< "Different aliasing shapes: " << operand_subshape.ToString()
<< " vs " << output_subshape.ToString();
}
}
return absl::OkStatus();
}
absl::Status ShapeVerifier::HandleSlice(HloInstruction* slice) {
return CheckShape(slice,
ShapeInference::InferSliceShape(
slice->operand(0)->shape(), slice->slice_starts(),
slice->slice_limits(), slice->slice_strides()));
}
absl::Status ShapeVerifier::HandleDynamicSlice(HloInstruction* dynamic_slice) {
return CheckShape(
dynamic_slice,
ShapeInference::InferDynamicSliceShape(
dynamic_slice->operand(0)->shape(),
Cast<HloDynamicSliceInstruction>(dynamic_slice)->index_shapes(),
dynamic_slice->dynamic_slice_sizes()));
}
absl::Status ShapeVerifier::HandleDynamicUpdateSlice(
HloInstruction* dynamic_update_slice) {
return CheckShape(
dynamic_update_slice,
ShapeInference::InferDynamicUpdateSliceShape(
dynamic_update_slice->operand(0)->shape(),
dynamic_update_slice->operand(1)->shape(),
Cast<HloDynamicUpdateSliceInstruction>(dynamic_update_slice)
->index_shapes()));
}
absl::Status ShapeVerifier::HandleTuple(HloInstruction* tuple) {
return CheckVariadicShape(tuple);
}
absl::Status ShapeVerifier::HandleMap(HloInstruction* map) {
std::vector<const Shape*> operand_shapes;
int64_t max_operand_rank = 0;
for (const HloInstruction* operand : map->operands()) {
operand_shapes.push_back(&operand->shape());
max_operand_rank = std::max(max_operand_rank, operand->shape().rank());
}
std::vector<int64_t> map_dims(max_operand_rank);
std::iota(map_dims.begin(), map_dims.end(), 0);
TF_RETURN_IF_ERROR(CheckShape(
map,
ShapeInference::InferMapShape(
operand_shapes, map->to_apply()->ComputeProgramShape(), map_dims)));
return opts_.allow_mixed_precision
? absl::OkStatus()
: SameElementTypesForOperandsAndToApplyParameters(
*map, map->operand_count());
}
absl::Status ShapeVerifier::HandleReduceWindow(HloInstruction* reduce_window) {
auto reduce_window_instr = Cast<HloReduceWindowInstruction>(reduce_window);
auto input_shapes = reduce_window_instr->input_shapes();
auto init_shapes = reduce_window_instr->init_value_shapes();
TF_RETURN_IF_ERROR(CheckShape(
reduce_window, ShapeInference::InferReduceWindowShape(
input_shapes, init_shapes, reduce_window->window(),
reduce_window->to_apply()->ComputeProgramShape())));
return opts_.allow_mixed_precision
? absl::OkStatus()
: SameElementTypesForOperandsAndToApplyParameters(
*reduce_window, reduce_window->operand_count());
}
absl::Status ShapeVerifier::HandleSelectAndScatter(
HloInstruction* instruction) {
return CheckShape(
instruction,
ShapeInference::InferSelectAndScatterShape(
instruction->operand(0)->shape(),
instruction->select()->ComputeProgramShape(), instruction->window(),
instruction->operand(1)->shape(), instruction->operand(2)->shape(),
instruction->scatter()->ComputeProgramShape()));
}
absl::Status ShapeVerifier::HandleWhile(HloInstruction* xla_while) {
TF_RETURN_IF_ERROR(
CheckParameterCount(xla_while, xla_while->while_body(), 1));
TF_RETURN_IF_ERROR(
CheckParameterCount(xla_while, xla_while->while_condition(), 1));
TF_RETURN_IF_ERROR(
CheckOperandAndParameter(xla_while, 0, xla_while->while_body(), 0));
TF_RETURN_IF_ERROR(
CheckOperandAndParameter(xla_while, 0, xla_while->while_condition(), 0));
const Shape& conditional_shape =
xla_while->while_condition()->root_instruction()->shape();
if (!ShapeUtil::Compatible(conditional_shape,
ShapeUtil::MakeShape(PRED, {}))) {
return Internal(
"Conditional computation shape does not lead to a scalar predicate "
"shape: %s",
StringifyShape(conditional_shape));
}
return CheckShape(xla_while,
xla_while->while_body()->root_instruction()->shape());
}
absl::Status ShapeVerifier::HandleConditional(HloInstruction* conditional) {
if (!ShapeUtil::IsScalar(conditional->operand(0)->shape())) {
return InvalidArgument(
"The first operand of conditional must be a scalar. Got %s",
conditional->operand(0)->shape().DebugString());
}
const int num_branches = conditional->branch_count();
PrimitiveType operand0_type = conditional->operand(0)->shape().element_type();
if (operand0_type == PRED) {
TF_RET_CHECK(num_branches == 2);
} else {
if (operand0_type != S32) {
return InvalidArgument(
"The first operand of indexed conditional must be a scalar of S32. "
"Got type %s.",
PrimitiveType_Name(operand0_type));
}
TF_RET_CHECK(num_branches >= 1);
}
TF_RETURN_IF_ERROR(CheckOperandCount(conditional, num_branches + 1));
for (int j = 0; j < num_branches; ++j) {
TF_RETURN_IF_ERROR(CheckParameterCount(
conditional, conditional->branch_computation(j), 1));
TF_RETURN_IF_ERROR(CheckOperandAndParameter(
conditional, j + 1, conditional->branch_computation(j), 0));
TF_RETURN_IF_ERROR(CheckShape(
conditional,
conditional->branch_computation(j)->root_instruction()->shape()));
}
return absl::OkStatus();
}
absl::Status ShapeVerifier::HandlePad(HloInstruction* pad) {
return CheckShape(pad, ShapeInference::InferPadShape(pad->operand(0)->shape(),
pad->operand(1)->shape(),
pad->padding_config()));
}
namespace {
absl::Status CheckAsyncOpOperand(const HloInstruction* async_op) {
const HloInstruction* operand = async_op->operand(0);
if (operand->opcode() != HloOpcode::kAsyncStart &&
operand->opcode() != HloOpcode::kAsyncUpdate) {
return Internal(
"%s expects operand to be async-update or async-done, found "
"%s.",
HloOpcodeString(async_op->opcode()),
HloOpcodeString(operand->opcode()));
}
if (*async_op->async_wrapped_computation() !=
*operand->async_wrapped_computation()) {
return Internal(
"The %s expects its wrapped async computation to be identical to its "
"operand's wrapped async computation (%s vs %s), thread name (%s vs "
"%s).",
HloOpcodeString(async_op->opcode()),
async_op->async_wrapped_instruction()->ToString(),
operand->async_wrapped_instruction()->ToString(),
async_op->async_wrapped_computation()->execution_thread(),
operand->async_wrapped_computation()->execution_thread());
}
return absl::OkStatus();
}
absl::Status CheckAsyncOpComputationThreadName(const HloInstruction* async_op) {
absl::string_view async_execution_thread = async_op->async_execution_thread();
if (async_execution_thread !=
async_op->async_wrapped_computation()->execution_thread()) {
return Internal(
"%s expects same async thread name as wrapped computation's "
"thread name (%s vs %s).",
HloOpcodeString(async_op->opcode()), async_execution_thread,
async_op->async_wrapped_computation()->execution_thread());
}
return CheckNestedComputationThreadNameEqual(
async_op->async_wrapped_computation(),
false);
}
absl::Status CheckCallableInstructionThreadName(
const HloInstruction* instruction, bool skip_nested_async_op_check) {
for (const HloComputation* computation : instruction->called_computations()) {
if (instruction->parent() != nullptr) {
if (instruction->parent()->execution_thread() !=
computation->execution_thread()) {
return Internal(
"callable instruction %s expects parent computation thread name "
"same as called computation's thread name (%s vs %s).",
instruction->ToString(), instruction->parent()->execution_thread(),
computation->execution_thread());
}
}
TF_RETURN_IF_ERROR(CheckNestedComputationThreadNameEqual(
computation, skip_nested_async_op_check));
}
return absl::OkStatus();
}
}
absl::Status ShapeVerifier::CheckAsyncOpComputationShapes(
const HloInstruction* async_op, const Shape& async_shape) {
if (!async_shape.IsTuple() || async_shape.tuple_shapes_size() < 2) {
return Internal(
"The %s expects the async shape to be a tuple of at least two "
"elements, found %s.",
HloOpcodeString(async_op->opcode()), async_shape.ToString());
}
ProgramShape computation_shape =
async_op->async_wrapped_computation()->ComputeProgramShape();
Shape param_shape = ShapeUtil::MakeTupleShape(computation_shape.parameters());
if (!ShapesSame(async_shape.tuple_shapes(0), param_shape)) {
return Internal(
"The %s expects the async shape at index {0} to match async "
"computation parameter shape (%s vs %s).",
HloOpcodeString(async_op->opcode()),
async_shape.tuple_shapes(0).ToString(true),
param_shape.ToString(true));
}
if (!ShapesSame(async_shape.tuple_shapes(1), computation_shape.result())) {
return Internal(
"The %s expects the async shape at index {1} to match the async "
"computation root shape (%s vs %s).",
HloOpcodeString(async_op->opcode()),
async_shape.tuple_shapes(1).ToString(true),
computation_shape.result().ToString(true));
}
return absl::OkStatus();
}
absl::Status ShapeVerifier::HandleAsyncStart(HloInstruction* async_start) {
TF_RETURN_IF_ERROR(
CheckAsyncOpComputationShapes(async_start, async_start->shape()));
TF_RETURN_IF_ERROR(CheckAsyncOpComputationThreadName(async_start));
const Shape& param_shape = async_start->shape().tuple_shapes(0);
for (int i = 0; i < async_start->operand_count(); ++i) {
if (!ShapesSame(param_shape.tuple_shapes(i),
async_start->operand(i)->shape())) {
return Internal(
"The %s expects the shape of operand %d to match the async shape at "
"index {0} (%s vs %s).",
HloOpcodeString(async_start->opcode()), i,
async_start->operand(i)->shape().ToString(true),
param_shape.tuple_shapes(i).ToString(true));
}
}
return absl::OkStatus();
}
absl::Status ShapeVerifier::HandleAsyncUpdate(HloInstruction* async_update) {
TF_RETURN_IF_ERROR(CheckAsyncOpComputationThreadName(async_update));
if (!ShapesSame(async_update->operand(0)->shape(), async_update->shape())) {
return Internal(
"The %s expects the shape of operand and output to match (%s vs %s).",
HloOpcodeString(async_update->opcode()),
async_update->operand(0)->shape().ToString(true),
async_update->shape().ToString(true));
}
TF_RETURN_IF_ERROR(
CheckAsyncOpComputationShapes(async_update, async_update->shape()));
return CheckAsyncOpOperand(async_update);
}
absl::Status ShapeVerifier::HandleAsyncDone(HloInstruction* async_done) {
TF_RETURN_IF_ERROR(CheckAsyncOpComputationThreadName(async_done));
TF_RETURN_IF_ERROR(CheckAsyncOpComputationShapes(
async_done, async_done->operand(0)->shape()));
const Shape& root_shape = async_done->operand(0)->shape().tuple_shapes(1);
if (!ShapesSame(root_shape, async_done->shape())) {
return Internal(
"The %s expects the shape of output to match the async shape at index "
"{1} (%s vs %s).",
HloOpcodeString(async_done->opcode()),
async_done->shape().ToString(true), root_shape.ToString(true));
}
return CheckAsyncOpOperand(async_done);
}
absl::Status ShapeVerifier::HandleCopyStart(HloInstruction* copy_start) {
return CheckShape(copy_start,
ShapeUtil::MakeTupleShape({copy_start->operand(0)->shape(),
copy_start->operand(0)->shape(),
ShapeUtil::MakeShape(U32, {})}),
true);
}
absl::Status ShapeVerifier::HandleCopyDone(HloInstruction* copy_done) {
const Shape& operand_shape = copy_done->operand(0)->shape();
const Shape& dest_shape = ShapeUtil::GetTupleElementShape(operand_shape, 0);
const Shape& src_shape = ShapeUtil::GetTupleElementShape(operand_shape, 1);
if (!ShapesSame(dest_shape, src_shape,
Shape::Equal()
.IgnoreMemorySpaceInLayout()
.IgnoreSplitConfigInLayout())) {
return Internal(
"Source and destination buffers in CopyDone arguments need to be the "
"same shape found %s and %s\n%s",
StringifyShape(dest_shape), StringifyShape(src_shape),
copy_done->ToString());
}
return CheckShape(copy_done, ShapeUtil::GetTupleElementShape(
copy_done->operand(0)->shape(), 0));
}
absl::Status ShapeVerifier::HandleSend(HloInstruction* send) {
return CheckShape(send,
ShapeUtil::MakeTupleShape({send->operand(0)->shape(),
ShapeUtil::MakeShape(U32, {}),
ShapeUtil::MakeTokenShape()}),
true);
}
absl::Status ShapeVerifier::HandleSendDone(HloInstruction* send_done) {
return CheckShape(send_done, ShapeUtil::MakeTokenShape());
}
absl::Status ShapeVerifier::HandleRecv(HloInstruction* recv) {
return CheckShape(
recv,
ShapeUtil::MakeTupleShape(
{ShapeUtil::GetTupleElementShape(recv->shape(), 0),
ShapeUtil::MakeShape(U32, {}), ShapeUtil::MakeTokenShape()}),
true);
}
absl::Status ShapeVerifier::HandleRecvDone(HloInstruction* recv_done) {
return CheckShape(
recv_done,
ShapeUtil::MakeTupleShape(
{ShapeUtil::GetTupleElementShape(recv_done->operand(0)->shape(), 0),
ShapeUtil::MakeTokenShape()}));
}
absl::Status ShapeVerifier::HandleBatchNormTraining(
HloInstruction* batch_norm_training) {
return CheckShape(batch_norm_training,
ShapeInference::InferBatchNormTrainingShape(
batch_norm_training->operand(0)->shape(),
batch_norm_training->operand(1)->shape(),
batch_norm_training->operand(2)->shape(),
batch_norm_training->feature_index()));
}
absl::Status ShapeVerifier::HandleBatchNormInference(
HloInstruction* batch_norm_inference) {
return CheckShape(batch_norm_inference,
ShapeInference::InferBatchNormInferenceShape(
batch_norm_inference->operand(0)->shape(),
batch_norm_inference->operand(1)->shape(),
batch_norm_inference->operand(2)->shape(),
batch_norm_inference->operand(3)->shape(),
batch_norm_inference->operand(4)->shape(),
batch_norm_inference->feature_index()));
}
absl::Status ShapeVerifier::HandleBatchNormGrad(
HloInstruction* batch_norm_grad) {
return CheckShape(batch_norm_grad, ShapeInference::InferBatchNormGradShape(
batch_norm_grad->operand(0)->shape(),
batch_norm_grad->operand(1)->shape(),
batch_norm_grad->operand(2)->shape(),
batch_norm_grad->operand(3)->shape(),
batch_norm_grad->operand(4)->shape(),
batch_norm_grad->feature_index()));
}
namespace {
absl::Status CheckMixedPrecisionOperands(const HloInstruction* instruction) {
switch (instruction->opcode()) {
case HloOpcode::kCall:
case HloOpcode::kConditional:
case HloOpcode::kConstant:
case HloOpcode::kConvolution:
case HloOpcode::kDot:
case HloOpcode::kAllReduce:
case HloOpcode::kAllReduceStart:
case HloOpcode::kAllReduceDone:
case HloOpcode::kAllGather:
case HloOpcode::kAllGatherStart:
case HloOpcode::kAllGatherDone:
case HloOpcode::kAsyncDone:
case HloOpcode::kAsyncUpdate:
case HloOpcode::kAsyncStart:
case HloOpcode::kCopyDone:
case HloOpcode::kCopyStart:
case HloOpcode::kCustomCall:
case HloOpcode::kDomain:
case HloOpcode::kFusion:
case HloOpcode::kGetTupleElement:
case HloOpcode::kOptimizationBarrier:
case HloOpcode::kInfeed:
case HloOpcode::kOutfeed:
case HloOpcode::kParameter:
case HloOpcode::kRecv:
case HloOpcode::kRecvDone:
case HloOpcode::kReducePrecision:
case HloOpcode::kReduceWindow:
case HloOpcode::kSend:
case HloOpcode::kSendDone:
case HloOpcode::kSort:
case HloOpcode::kTuple:
case HloOpcode::kWhile:
break;
default: {
PrimitiveType fp_type = PRIMITIVE_TYPE_INVALID;
for (auto operand : instruction->operands()) {
TF_RETURN_IF_ERROR(ShapeUtil::ForEachSubshapeWithStatus(
operand->shape(),
[&](const Shape& subshape,
const ShapeIndex& index) -> absl::Status {
if (!ShapeUtil::ElementIsFloating(subshape)) {
return absl::OkStatus();
}
if (fp_type == PRIMITIVE_TYPE_INVALID) {
fp_type = subshape.element_type();
} else if (fp_type != subshape.element_type()) {
return Internal(
"Seen floating point types of different precisions in "
"%s, but mixed precision is disallowed.",
instruction->ToString());
}
return absl::OkStatus();
}));
}
}
}
return absl::OkStatus();
}
}
absl::Status ShapeVerifier::HandleGather(HloInstruction* gather) {
return CheckShape(
gather,
ShapeInference::InferGatherShape(
gather->operand(0)->shape(), gather->operand(1)->shape(),
gather->gather_dimension_numbers(), gather->gather_slice_sizes()));
}
absl::Status ShapeVerifier::HandleScatter(HloInstruction* scatter) {
absl::InlinedVector<const Shape*, 3> arg_shapes;
arg_shapes.reserve(scatter->operand_count());
for (const HloInstruction* operand : scatter->operands()) {
arg_shapes.push_back(&operand->shape());
}
return CheckShape(scatter,
ShapeInference::InferScatterShape(
arg_shapes, scatter->to_apply()->ComputeProgramShape(),
scatter->scatter_dimension_numbers()));
}
absl::Status ShapeVerifier::HandleAfterAll(HloInstruction* token) {
std::vector<const Shape*> operand_shapes;
for (const HloInstruction* operand : token->operands()) {
operand_shapes.push_back(&operand->shape());
}
return CheckShape(token, ShapeUtil::MakeTokenShape());
}
absl::Status ShapeVerifier::HandleAddDependency(
HloInstruction* add_dependency) {
TF_RETURN_IF_ERROR(CheckIsTokenOperand(add_dependency, 1));
return CheckShape(add_dependency, add_dependency->operand(0)->shape());
}
absl::Status ShapeVerifier::HandleGetDimensionSize(HloInstruction* get_size) {
return CheckShape(get_size,
ShapeInference::InferGetDimensionSizeShape(
get_size->operand(0)->shape(), get_size->dimension()));
}
absl::Status ShapeVerifier::HandleSetDimensionSize(HloInstruction* set_size) {
return CheckShape(set_size,
ShapeInference::InferSetDimensionSizeShape(
set_size->operand(0)->shape(),
set_size->operand(1)->shape(), set_size->dimension()));
}
absl::Status ShapeVerifier::CheckShape(
const HloInstruction* instruction, const Shape& inferred_shape,
bool only_compare_minor_to_major_in_layout) {
if (!opts_.allow_mixed_precision) {
TF_RETURN_IF_ERROR(CheckMixedPrecisionOperands(instruction));
}
bool equal = [&] {
switch (instruction->opcode()) {
case HloOpcode::kBitcast:
case HloOpcode::kCall:
case HloOpcode::kConditional:
case HloOpcode::kConstant:
case HloOpcode::kCopyDone:
case HloOpcode::kCopyStart:
case HloOpcode::kCustomCall:
case HloOpcode::kGetTupleElement:
case HloOpcode::kInfeed:
case HloOpcode::kOutfeed:
case HloOpcode::kOptimizationBarrier:
case HloOpcode::kParameter:
case HloOpcode::kRecv:
case HloOpcode::kRecvDone:
case HloOpcode::kSend:
case HloOpcode::kSendDone:
case HloOpcode::kTuple:
case HloOpcode::kWhile: {
Shape::Equal equal;
if (only_compare_minor_to_major_in_layout) {
equal.MinorToMajorOnlyInLayout();
}
return ShapesSame(instruction->shape(), inferred_shape, equal);
}
case HloOpcode::kDynamicUpdateSlice: {
Shape::Equal equal;
if (only_compare_minor_to_major_in_layout) {
equal.MinorToMajorOnlyInLayout();
}
if (instruction->parent()->IsFusionComputation()) {
equal.IgnoreMemorySpaceInLayout().IgnoreTilesInLayout();
}
return ShapesSame(instruction->shape(), inferred_shape, equal);
}
case HloOpcode::kCopy: {
if (opts_.IsLayoutSensitive()) {
if (instruction->shape().has_layout() &&
inferred_shape.has_layout()) {
int64_t instruction_memory_space =
instruction->shape().layout().memory_space();
int64_t operand_memory_space =
inferred_shape.layout().memory_space();
if (instruction_memory_space != operand_memory_space &&
(instruction_memory_space == Layout::kHostMemorySpace ||
operand_memory_space == Layout::kHostMemorySpace)) {
return Shape::Equal().IgnoreMemorySpaceInLayout()(
instruction->shape(), inferred_shape);
}
}
}
[[fallthrough]];
}
default:
if (opts_.allow_mixed_precision) {
return ShapeUtil::CompatibleIgnoringFpPrecision(instruction->shape(),
inferred_shape);
} else {
return ShapeUtil::Compatible(instruction->shape(), inferred_shape);
}
}
}();
if (!equal) {
return Internal(
"Expected instruction to have shape equal to %s, actual "
"shape is %s:\n%s",
StringifyShape(inferred_shape), StringifyShape(instruction->shape()),
instruction->ToString());
}
return absl::OkStatus();
}
absl::Status ShapeVerifier::CheckShape(
const HloInstruction* instruction,
const absl::StatusOr<Shape>& inferred_shape_status) {
if (!inferred_shape_status.ok()) {
absl::Status s = inferred_shape_status.status();
tsl::errors::AppendToMessage(&s, ", for instruction ",
instruction->ToString());
return s;
}
return CheckShape(instruction, inferred_shape_status.value());
}
absl::Status ShapeVerifier::CheckUnaryShape(const HloInstruction* instruction) {
return CheckShape(instruction,
ShapeInference::InferUnaryOpShape(instruction->opcode(),
instruction->operand(0)));
}
absl::Status ShapeVerifier::CheckBinaryShape(
const HloInstruction* instruction) {
return CheckShape(
instruction, ShapeInference::InferBinaryOpShape(instruction->opcode(),
instruction->operand(0),
instruction->operand(1)));
}
absl::Status ShapeVerifier::CheckTernaryShape(
const HloInstruction* instruction) {
return CheckShape(instruction,
ShapeInference::InferTernaryOpShape(
instruction->opcode(), instruction->operand(0),
instruction->operand(1), instruction->operand(2)));
}
absl::Status ShapeVerifier::CheckVariadicShape(
const HloInstruction* instruction) {
return CheckShape(instruction,
ShapeInference::InferVariadicOpShape(
instruction->opcode(), instruction->operands()));
}
absl::Status ShapeVerifier::VerifyEntryComputationLayout(
const HloModule& module) {
const HloComputation* computation = module.entry_computation();
const auto& layout = module.entry_computation_layout();
const ShapeLayout& result_layout = layout.result_layout();
TF_RETURN_IF_ERROR(
ShapeUtil::ValidateShapeWithOptionalLayout(result_layout.shape()));
if (!ShapesSame(computation->root_instruction()->shape(),
result_layout.shape(),
Shape::Equal()
.IgnoreTilesInLayout()
.IgnoreTailPaddingAlignmentInElements()
.IgnoreMemorySpaceInLayout())) {
return Internal(
"Shape of the root instruction of entry computation (%s) should be "
"compatible to one specified in module's entry computation layout (%s)",
StringifyShape(computation->root_instruction()->shape()),
StringifyShape(result_layout.shape()));
}
if (computation->num_parameters() != layout.parameter_count()) {
return Internal(
"Number of parameters in entry computation layout (%d) must be same "
"as number of parameters of entry computation (%d)",
layout.parameter_count(), computation->num_parameters());
}
for (int i = 0; i < computation->num_parameters(); ++i) {
const HloInstruction* parameter = computation->parameter_instruction(i);
TF_RETURN_IF_ERROR(
ShapeUtil::ValidateShapeWithOptionalLayout(layout.parameter_shape(i)));
if (!ShapesSame(parameter->shape(), layout.parameter_shape(i),
Shape::Equal()
.IgnoreTilesInLayout()
.IgnoreTailPaddingAlignmentInElements()
.IgnoreMemorySpaceInLayout())) {
return Internal(
"Shape of the entry computation parameter %d is %s should be "
"compatible to the one specified in module's entry computation "
"layout %s",
i, StringifyShape(parameter->shape()),
StringifyShape(layout.parameter_shape(i)));
}
}
const auto& alias_config = module.input_output_alias_config();
TF_RETURN_IF_ERROR(alias_config.ForEachAliasWithStatus(
[&](ShapeIndex result_index,
HloInputOutputAliasConfig::Alias alias) -> absl::Status {
if (!alias.must_alias()) {
return absl::OkStatus();
}
const Shape& result_shape =
ShapeUtil::GetSubshape(result_layout.shape(), result_index);
const Shape& parameter_shape = ShapeUtil::GetSubshape(
layout.parameter_layout(alias.parameter_number).shape(),
alias.parameter_index);
if (result_shape != parameter_shape) {
return Internal(
"Shape and memory space of the result at index %s (%s) "
"must be the same as the shape and memory spaceof aliased "
"parameter %d at index %s (%s)",
result_index.ToString(), StringifyShape(result_shape),
alias.parameter_number, alias.parameter_index.ToString(),
StringifyShape(parameter_shape));
}
return absl::OkStatus();
}));
return absl::OkStatus();
}
std::string ComputationsToString(
absl::Span<HloComputation* const> computations) {
return absl::StrJoin(computations, ",",
[](std::string* s, const HloComputation* computation) {
absl::StrAppend(s, computation->name());
});
}
absl::Status VerifyInstructionNameUnchanged(const HloModule& module,
const HloVerifierOpts& opts) {
if (!opts.verify_instruction_name_unchanged) {
return absl::OkStatus();
}
for (auto* comp : module.computations()) {
for (auto* inst : comp->instructions()) {
if (inst->metadata().scheduling_name().empty()) {
continue;
}
if (inst->metadata().scheduling_name() != inst->name() &&
(!absl::StrContains(inst->name(), ".remat") &&
!absl::StrContains(inst->name(), ".clone"))) {
return absl::FailedPreconditionError(absl::StrCat(
"Expected instruction name to remain the same. Was '",
inst->metadata().scheduling_name(), "' is '", inst->name(), "'."));
}
}
}
return absl::OkStatus();
}
absl::Status VerifyHloStructure(HloModule* module) {
for (const HloComputation* computation : module->computations()) {
if (computation == nullptr) {
return Internal("Computation in module %s is a null pointer",
module->name());
}
if (computation->parent() == nullptr) {
return Internal("Computation %s has a null parent pointer",
computation->name());
}
if (computation->parent() != module) {
return Internal("Computation %s parent() does not point to parent module",
computation->name());
}
for (const HloInstruction* instruction : computation->instructions()) {
if (instruction == nullptr) {
return Internal("Instruction in computation %s is a null pointer",
computation->name());
}
if (instruction->parent() == nullptr) {
return Internal("Instruction %s has a null parent pointer",
instruction->name());
}
if (instruction->parent() != computation) {
return Internal(
"Instruction %s parent() does not point to parent computation",
instruction->name());
}
}
}
for (const HloComputation* computation : module->computations()) {
for (const HloInstruction* instruction : computation->instructions()) {
for (int i = 0; i < instruction->operand_count(); ++i) {
const HloInstruction* operand = instruction->operand(i);
if (operand == nullptr) {
return Internal(
"Operand %d (out of %d) of instruction: %s is a null pointer", i,
instruction->operand_count(), instruction->name());
}
if (operand->parent() == nullptr) {
return Internal(
"Operand %d (out of %d) of instruction: %s has a null pointer "
"parent",
i, instruction->operand_count(), instruction->name());
}
if (operand->parent() != instruction->parent()) {
return Internal(
"Operand %d (%s) of instruction %s is in a different "
"computation: %s vs %s",
i, operand->name(), instruction->name(),
operand->parent() ? operand->parent()->name() : "(null)",
instruction->parent()->name());
}
}
}
}
return absl::OkStatus();
}
namespace {
bool ShapeContainsToken(const Shape& shape) {
bool contains_token = false;
ShapeUtil::ForEachSubshape(
shape, [&contains_token](const Shape& subshape, const ShapeIndex&) {
if (subshape.IsToken()) {
contains_token = true;
}
});
return contains_token;
}
absl::Status CheckSameChannel(const HloInstruction* instr1,
const HloInstruction* instr2) {
if (instr1->channel_id() != instr2->channel_id()) {
return Internal(
"Expected to have the same channel id, actual channel ids are: %s "
"(%d), %s (%d)",
instr1->ToString(), *instr1->channel_id(), instr2->ToString(),
*instr2->channel_id());
}
return absl::OkStatus();
}
absl::Status CheckSameIsHostTransfer(const HloInstruction* instr1,
const HloInstruction* instr2) {
const HloSendRecvInstruction* send_recv1 =
DynCast<const HloSendRecvInstruction>(instr1);
const HloSendRecvInstruction* send_recv2 =
DynCast<const HloSendRecvInstruction>(instr2);
TF_RET_CHECK(send_recv1 != nullptr);
TF_RET_CHECK(send_recv2 != nullptr);
if (send_recv1->is_host_transfer() != send_recv2->is_host_transfer()) {
return Internal(
"Expected instructions to have the same is-host-transfer property: "
"%s, "
"%s ",
instr1->ToString(), instr2->ToString());
}
return absl::OkStatus();
}
absl::Status VerifySingleUser(
const HloInstruction* instruction,
const absl::flat_hash_set<HloOpcode>& expected_users) {
TF_RET_CHECK(instruction->users().size() == 1)
<< "The " << instruction->opcode()
<< " instruction requires one consumer, found "
<< instruction->users().size();
const HloInstruction* user = instruction->users().front();
TF_RET_CHECK(expected_users.contains(user->opcode()))
<< "The consumer of a " << instruction->opcode()
<< " instruction needs to be one of ("
<< absl::StrJoin(expected_users, ", ",
[](std::string* out, HloOpcode opcode) {
absl::StrAppend(out, HloOpcodeString(opcode));
})
<< "), found " << user->opcode();
return absl::OkStatus();
}
absl::Status VerifySingleOperand(
const HloInstruction* instruction,
const std::vector<HloOpcode>& expected_operands) {
TF_RET_CHECK(instruction->operands().size() == 1)
<< "The " << instruction->opcode()
<< " instruction requires one consumer, found "
<< instruction->users().size();
const HloInstruction* operand = instruction->operand(0);
TF_RET_CHECK(absl::c_find(expected_operands, operand->opcode()) !=
expected_operands.end())
<< "The operand of a " << instruction->opcode()
<< " instruction needs to be "
<< absl::StrJoin(expected_operands, " or ",
[](std::string* out, HloOpcode opcode) {
absl::StrAppend(out, HloOpcodeString(opcode));
})
<< ", found " << operand->opcode();
return absl::OkStatus();
}
absl::Status VerifyAsynchronousInstructionPairs(const HloModule& module) {
for (const HloComputation* computation : module.computations()) {
for (const HloInstruction* instruction : computation->instructions()) {
switch (instruction->opcode()) {
case HloOpcode::kAsyncStart: {
TF_RETURN_IF_ERROR(VerifySingleUser(
instruction, {HloOpcode::kAsyncUpdate, HloOpcode::kAsyncDone}));
break;
}
case HloOpcode::kAsyncUpdate: {
TF_RETURN_IF_ERROR(VerifySingleOperand(
instruction, {HloOpcode::kAsyncStart, HloOpcode::kAsyncUpdate}));
TF_RETURN_IF_ERROR(VerifySingleUser(
instruction, {HloOpcode::kAsyncUpdate, HloOpcode::kAsyncDone}));
break;
}
case HloOpcode::kAsyncDone: {
TF_RETURN_IF_ERROR(VerifySingleOperand(
instruction, {HloOpcode::kAsyncStart, HloOpcode::kAsyncUpdate}));
break;
}
case HloOpcode::kAllReduceStart: {
TF_RETURN_IF_ERROR(
VerifySingleUser(instruction, {HloOpcode::kAllReduceDone}));
break;
}
case HloOpcode::kAllReduceDone: {
TF_RETURN_IF_ERROR(
VerifySingleOperand(instruction, {HloOpcode::kAllReduceStart}));
break;
}
case HloOpcode::kCopyStart: {
TF_RETURN_IF_ERROR(
VerifySingleUser(instruction, {HloOpcode::kCopyDone}));
break;
}
case HloOpcode::kCopyDone: {
TF_RETURN_IF_ERROR(
VerifySingleOperand(instruction, {HloOpcode::kCopyStart}));
break;
}
case HloOpcode::kCollectivePermuteStart: {
TF_RETURN_IF_ERROR(VerifySingleUser(
instruction, {HloOpcode::kCollectivePermuteDone}));
break;
}
case HloOpcode::kCollectivePermuteDone: {
TF_RETURN_IF_ERROR(VerifySingleOperand(
instruction, {HloOpcode::kCollectivePermuteStart}));
break;
}
case HloOpcode::kSend: {
TF_RETURN_IF_ERROR(VerifySingleUser(
instruction, {HloOpcode::kSendDone, HloOpcode::kTuple}));
break;
}
case HloOpcode::kSendDone: {
TF_RETURN_IF_ERROR(VerifySingleOperand(
instruction, {HloOpcode::kSend, HloOpcode::kGetTupleElement}));
break;
}
case HloOpcode::kRecv: {
TF_RETURN_IF_ERROR(VerifySingleUser(
instruction, {HloOpcode::kRecvDone, HloOpcode::kTuple}));
break;
}
case HloOpcode::kRecvDone: {
TF_RETURN_IF_ERROR(VerifySingleOperand(
instruction, {HloOpcode::kRecv, HloOpcode::kGetTupleElement}));
break;
}
default:
break;
}
}
}
return absl::OkStatus();
}
absl::Status VerifyAsyncComputation(const HloComputation* async_computation) {
if (!async_computation->CanExpandIntoSingleInstruction()) {
return FailedPrecondition(
"Asynchronous computation %s expected to contain only the root and "
"parameter instructions.",
async_computation->name());
}
return absl::OkStatus();
}
absl::Status VerifyLayoutConstrainedAllReduce(const HloModule& module) {
const HloAllReduceInstruction* reference = nullptr;
for (const HloComputation* computation : module.computations()) {
for (const HloInstruction* instruction : computation->instructions()) {
if ((instruction->opcode() != HloOpcode::kAllReduce) &&
(instruction->opcode() != HloOpcode::kAllReduceStart)) {
continue;
}
auto all_reduce = DynCast<HloAllReduceInstruction>(instruction);
if (!reference) {
reference = all_reduce;
}
if (reference->constrain_layout() != all_reduce->constrain_layout()) {
return FailedPrecondition(
"HloModule has a mix of layout constrained and unconstrained "
"AllReduce instructions.");
}
}
}
return absl::OkStatus();
}
absl::Status VerifyChannels(const HloModule& module,
const HloVerifierOpts& opts) {
absl::flat_hash_map<int64_t, std::vector<const HloInstruction*>>
channel_instructions;
for (const HloComputation* computation : module.computations()) {
for (const HloInstruction* instruction : computation->instructions()) {
auto channel_instr = DynCast<HloChannelInstruction>(instruction);
if (!channel_instr || !channel_instr->channel_id()) {
continue;
}
channel_instructions[*channel_instr->channel_id()].push_back(instruction);
switch (instruction->opcode()) {
case HloOpcode::kSend: {
TF_RET_CHECK(instruction->users().size() == 1);
const HloInstruction* send_done = instruction->users().front();
if (send_done->opcode() == HloOpcode::kSendDone) {
TF_RETURN_IF_ERROR(CheckSameChannel(instruction, send_done));
TF_RETURN_IF_ERROR(CheckSameIsHostTransfer(instruction, send_done));
}
break;
}
case HloOpcode::kRecv: {
TF_RET_CHECK(instruction->users().size() == 1);
const HloInstruction* recv_done = instruction->users().front();
if (recv_done->opcode() == HloOpcode::kRecvDone) {
TF_RETURN_IF_ERROR(CheckSameChannel(instruction, recv_done));
TF_RETURN_IF_ERROR(CheckSameIsHostTransfer(instruction, recv_done));
}
break;
}
case HloOpcode::kSendDone:
case HloOpcode::kRecvDone:
TF_RET_CHECK(instruction->operands().size() == 1);
break;
default:
break;
}
}
}
for (auto& pair : channel_instructions) {
auto& instructions = pair.second;
const HloInstruction* first = instructions[0];
if (const auto* sendrecv = DynCast<HloSendRecvInstruction>(first)) {
absl::flat_hash_set<HloOpcode> opcodes;
for (const HloInstruction* instr : instructions) {
opcodes.insert(instr->opcode());
auto cast = DynCast<HloSendRecvInstruction>(instr);
TF_RET_CHECK(cast != nullptr)
<< "channel " << pair.first
<< " is used for different types of channel instructions";
}
if (sendrecv->is_host_transfer()) {
TF_RET_CHECK(instructions.size() == 2)
<< "channel " << pair.first
<< " is used for multiple host send/recv instructions";
}
} else {
for (const HloInstruction* instr : instructions) {
if (opts.verify_unique_channel_ids) {
TF_RET_CHECK(first->opcode() == instr->opcode())
<< "channel " << pair.first
<< " is used for different types of channel instructions";
}
}
}
}
return absl::OkStatus();
}
absl::Status CheckFusionInstruction(HloInstruction* fusion) {
HloComputation* fused_computation = fusion->fused_instructions_computation();
if (fusion != fused_computation->FusionInstruction()) {
return Internal(
"Instruction of fused computation does not match expected "
"instruction "
"%s.",
fusion->ToString());
}
bool root_owned = false;
const auto& fused_parameters = fusion->fused_parameters();
const HloInstruction* fused_root = fusion->fused_expression_root();
std::vector<bool> parameter_owned(fused_parameters.size(), false);
for (auto* instruction : fused_computation->instructions()) {
if (fused_root == instruction) {
if (root_owned) {
return Internal("Root appears more than once in %s.",
fusion->ToString());
}
root_owned = true;
}
for (int i = 0; i < fused_parameters.size(); ++i) {
if (fused_parameters[i] == instruction) {
if (parameter_owned[i]) {
return Internal("Parameter appears more than once in %s.",
fusion->ToString());
}
parameter_owned[i] = true;
}
}
}
if (!root_owned) {
return Internal("Root not found in computation of %s.", fusion->ToString());
}
for (int i = 0; i < parameter_owned.size(); i++) {
if (!parameter_owned[i]) {
return Internal("Parameter %d not found in computation of %s.", i,
fusion->ToString());
}
}
if (fused_root->user_count() != 0) {
return Internal("Root of %s may not have users.", fusion->ToString());
}
for (auto* instruction :
fusion->fused_instructions_computation()->instructions()) {
if (instruction != fused_root) {
if (instruction->user_count() == 0) {
return Internal("Non-root instruction %s in %s must have users.",
instruction->ToString(), fusion->ToString());
}
for (auto& user : instruction->users()) {
if (fused_computation != user->parent()) {
return Internal(
"Non-root instruction %s in %s may not have external users.",
instruction->ToString(), fusion->ToString());
}
}
}
}
CHECK_GE(fusion->operands().size(), fused_parameters.size());
std::vector<bool> parameter_numbers(fused_parameters.size(), false);
for (auto fused_param : fused_parameters) {
int64_t param_no = fused_param->parameter_number();
if (param_no < 0) {
return Internal("Unexpected negative parameter number %d in %s.",
param_no, fusion->ToString());
}
if (param_no >= fused_parameters.size()) {
return Internal(
"Unexpected parameter number %d in %s: higher then number of "
"parameters %lu.",
param_no, fusion->ToString(), fused_parameters.size());
}
if (parameter_numbers[param_no]) {
return Internal(
"Did not expect parameter number %d more than once in %s.", param_no,
fusion->ToString());
}
parameter_numbers[param_no] = true;
}
for (int i = 0; i < parameter_numbers.size(); i++) {
if (!parameter_numbers[i]) {
return Internal("Did not see parameter number %d in %s.", i,
fusion->ToString());
}
}
TF_RET_CHECK(fusion->called_computations() ==
absl::Span<HloComputation* const>(
{fusion->fused_instructions_computation()}))
<< "Fusion HLO calls computations other than the "
"fused_instructions_computation: "
<< fusion->ToString() << " fusion->fused_instructions_computation(): "
<< fusion->fused_instructions_computation()->ToString()
<< " fusion->called_computations(): "
<< ComputationsToString(fusion->called_computations());
for (const auto& fused : fusion->fused_instructions()) {
TF_RET_CHECK(fused->parent() == fusion->fused_instructions_computation())
<< "Fused HLO was missing a parent: " << fused->ToString()
<< " parent: " << fused->parent()
<< " computation: " << fusion->parent();
}
return absl::OkStatus();
}
absl::Status CheckElementwiseInstruction(HloInstruction* instruction) {
const Shape& out_shape = instruction->shape();
for (HloInstruction* operand : instruction->operands()) {
const Shape& operand_shape = operand->shape();
if (!ShapeUtil::CompatibleIgnoringElementType(operand_shape, out_shape)) {
return FailedPrecondition(
"Implicit broadcast is not allowed in HLO."
"Found different shapes for instruction %s.\n"
"output: %s\noperand: %s\n",
HloOpcodeString(instruction->opcode()),
ShapeUtil::HumanString(out_shape),
ShapeUtil::HumanString(operand_shape));
}
}
if (auto* comparison = DynCast<HloCompareInstruction>(instruction)) {
const Shape& operand_shape = comparison->operand(1)->shape();
PrimitiveType operand_element_type = operand_shape.element_type();
Comparison::Type default_comparison_type =
Comparison::DefaultComparisonType(operand_element_type);
if (primitive_util::IsFloatingPointType(operand_element_type)) {
if (comparison->type() != Comparison::Type::kFloat &&
comparison->type() != Comparison::Type::kFloatTotalOrder) {
return FailedPrecondition(
"Expected comparison type %s or %s.\n"
"actual: %s\noperand: %s\n",
ComparisonTypeToString(Comparison::Type::kFloat),
ComparisonTypeToString(Comparison::Type::kFloatTotalOrder),
ComparisonTypeToString(comparison->type()),
ShapeUtil::HumanString(operand_shape));
}
} else if (comparison->type() != default_comparison_type) {
return FailedPrecondition(
"Expected comparison type %s.\n"
"actual: %s\noperand: %s\n",
ComparisonTypeToString(default_comparison_type),
ComparisonTypeToString(comparison->type()),
ShapeUtil::HumanString(operand_shape));
}
}
return absl::OkStatus();
}
class InstructionVerifier : public DfsHloVisitorWithDefault {
public:
InstructionVerifier(const HloModule* module, const HloVerifierOpts& opts)
: opts_(opts) {
const int64_t num_partitions = module->config().num_partitions();
if (module->config().use_spmd_partitioning() &&
opts.verify_sharding_device_numbers && num_partitions > 1) {
num_devices_ = module->config().num_partitions();
}
}
absl::Status DefaultAction(HloInstruction*) override {
return absl::OkStatus();
}
absl::Status HandleFusion(HloInstruction* fusion) override {
TF_RETURN_IF_ERROR(CheckCallableInstructionThreadName(
fusion, false));
return CheckFusionInstruction(fusion);
}
absl::Status HandleBroadcast(HloInstruction* broadcast) override {
TF_RET_CHECK(broadcast->dimensions().size() ==
broadcast->operand(0)->shape().rank())
<< "Broadcast HLO (" << broadcast->ToShortString()
<< ") has invalid number of dimensions: "
<< broadcast->dimensions().size()
<< " != " << broadcast->operand(0)->shape().rank();
if (opts_.verify_broadcast_dimensions_order) {
TF_RET_CHECK(absl::c_is_sorted(broadcast->dimensions()))
<< "Broadcast dimensions should be ordered, got: "
<< broadcast->ToString();
}
return absl::OkStatus();
}
absl::Status HandleBitcastConvert(HloInstruction* c) override {
return absl::OkStatus();
}
absl::Status HandleWhile(HloInstruction* xla_while) override {
auto* while_cond = xla_while->while_condition();
auto* while_body = xla_while->while_body();
if (while_cond->num_parameters() != 1) {
return FailedPrecondition(
"While condition must have exactly 1 parameter; had %d : %s",
while_cond->num_parameters(), while_cond->ToString());
}
if (while_body->num_parameters() != 1) {
return FailedPrecondition(
"While body must have exactly 1 parameter; had %d : %s",
while_body->num_parameters(), while_body->ToString());
}
if (xla_while->operand_count() != 1) {
return FailedPrecondition(
"While loop must have exactly one operand; had %d : %s",
xla_while->operand_count(), xla_while->ToString());
}
TF_RETURN_IF_ERROR(CheckCallableInstructionThreadName(
xla_while, true));
TF_RETURN_IF_ERROR(VerifyConsistentSharding(
xla_while, {xla_while, xla_while->while_body()->root_instruction(),
xla_while->while_body()->parameter_instruction(0),
xla_while->while_condition()->parameter_instruction(0)}));
return absl::OkStatus();
}
absl::Status HandleCall(HloInstruction* call) override {
return CheckCallableInstructionThreadName(
call, true);
}
absl::Status HandleConditional(HloInstruction* conditional) override {
const std::vector<HloComputation*> branch_computations =
conditional->branch_computations();
std::vector<const HloInstruction*> sharding_check_instructions;
sharding_check_instructions.reserve(branch_computations.size() + 1);
sharding_check_instructions.push_back(conditional);
for (const HloComputation* branch_computation : branch_computations) {
if (branch_computation->num_parameters() != 1) {
return FailedPrecondition(
"Branch computation %s of %s must have 1 parameter instead of %d",
branch_computation->name(), conditional->ToString(),
branch_computation->num_parameters());
}
sharding_check_instructions.push_back(
branch_computation->root_instruction());
}
TF_RETURN_IF_ERROR(CheckCallableInstructionThreadName(
conditional, true));
TF_RETURN_IF_ERROR(
VerifyConsistentSharding(conditional, sharding_check_instructions));
return absl::OkStatus();
}
absl::Status HandleElementwiseUnary(HloInstruction* instruction) override {
return CheckElementwiseInstruction(instruction);
}
absl::Status HandleElementwiseBinary(HloInstruction* instruction) override {
return CheckElementwiseInstruction(instruction);
}
absl::Status HandleGetTupleElement(HloInstruction* gte) override {
TF_RET_CHECK(gte->operand(0)->shape().IsTuple());
return absl::OkStatus();
}
absl::Status HandleTranspose(HloInstruction* transpose) override {
const Shape& shape = transpose->shape();
const HloInstruction* operand = transpose->operand(0);
TF_RET_CHECK(shape.dimensions().size() == transpose->dimensions().size());
TF_RET_CHECK(shape.dimensions().size() ==
transpose->operand(0)->shape().dimensions().size());
TF_RET_CHECK(std::equal(
shape.dimensions().begin(), shape.dimensions().end(),
Permute(operand->shape().dimensions(), transpose->dimensions())
.begin()))
<< "shape: " << shape << ", operand->shape(): " << shape
<< ", dimensions: {" << absl::StrJoin(transpose->dimensions(), ", ")
<< "}";
return absl::OkStatus();
}
absl::Status HandleAllReduce(HloInstruction* crs) override {
if (crs->channel_id().has_value()) {
TF_RET_CHECK(crs->channel_id().value() > 0)
<< "All reduce channel id must be greater than 0 for "
<< crs->ToShortString();
}
return absl::OkStatus();
}
absl::Status HandleReshape(HloInstruction* hlo) override {
if (opts_.verify_reshape_is_bitcast && !hlo->IsFused()) {
TF_RET_CHECK(
ShapeUtil::ReshapeIsBitcast(hlo->operand(0)->shape(), hlo->shape()))
<< "Reshape should be a physical bitcast, got: " << hlo->ToString();
}
return absl::OkStatus();
}
absl::Status HandleCustomCall(HloInstruction* hlo) override {
if (opts_.verify_custom_call_nested_computation_thread_name) {
return CheckCallableInstructionThreadName(
hlo, true);
}
return absl::OkStatus();
}
absl::Status HandleScatter(HloInstruction* scatter) override {
int64_t rank = scatter->operand(0)->shape().rank();
for (int64_t operand_dim :
scatter->scatter_dimension_numbers().scatter_dims_to_operand_dims()) {
if (operand_dim > rank) {
return absl::OutOfRangeError(absl::StrCat(
"The provided scatter_dims_to_operand_dim was out of range.",
" (operand_dim: ", operand_dim, ", rank: ", rank, ")"));
}
}
return absl::OkStatus();
}
absl::Status Preprocess(HloInstruction* instruction) override {
auto [it, inserted] =
instructions_by_name_.emplace(instruction->name(), instruction);
TF_RET_CHECK(inserted) << "HLO has name that is not unique within module:\n"
<< instruction->ToString() << " in computation: "
<< instruction->parent()->name()
<< "\nPrevious HLO with same name:\n"
<< it->second->ToString() << " in computation: "
<< it->second->parent()->name();
if (instruction->has_sharding()) {
absl::Status status =
instruction->sharding().Validate(instruction->shape(), num_devices_);
if (!status.ok()) {
return absl::Status(
status.code(),
absl::StrCat("Invalid sharding for instruction: ",
instruction->ToString(), ": ", status.message()));
}
}
if (instruction->has_to_apply() &&
instruction->to_apply()->execution_thread() !=
instruction->parent()->execution_thread()) {
return Internal(
"%s top_apply computation execution thread does not match (%s vs %s)",
instruction->name(), instruction->to_apply()->execution_thread(),
instruction->parent()->execution_thread());
}
return absl::OkStatus();
}
absl::Status Postprocess(HloInstruction* instruction) override {
if (!opts_.InstructionCanChangeLayout(instruction) &&
LayoutUtil::IsDenseArray(instruction->shape()) &&
instruction->shape().has_layout()) {
const Shape& result_shape = instruction->shape();
const Layout& result_layout = result_shape.layout();
for (HloInstruction* operand : instruction->operands()) {
const Shape& operand_shape = operand->shape();
if (LayoutUtil::IsDenseArray(operand_shape) &&
operand_shape.rank() == result_shape.rank() &&
operand_shape.has_layout()) {
const Layout& operand_layout = operand_shape.layout();
Layout::Equal equal_predicate =
Layout::Equal().IgnoreTiles().IgnoreMemorySpace();
if (instruction->opcode() == HloOpcode::kConvert ||
instruction->opcode() == HloOpcode::kCompare ||
(instruction->opcode() == HloOpcode::kSelect &&
operand_shape.element_type() == PRED)) {
equal_predicate.IgnoreElementSize();
} else if (instruction->opcode() == HloOpcode::kDynamicSlice ||
instruction->opcode() == HloOpcode::kDynamicUpdateSlice ||
instruction->opcode() == HloOpcode::kCopy) {
TF_RETURN_IF_ERROR(HostOffloadInstructionCanChangeMemorySpace(
instruction, operand_layout.memory_space(),
result_layout.memory_space()));
equal_predicate.IgnoreMemorySpace();
}
TF_RET_CHECK(equal_predicate(result_layout, operand_layout))
<< "Instruction shouldn't change layouts "
<< instruction->ToString() << " From " << result_shape << " To "
<< operand_shape;
}
}
}
return absl::OkStatus();
}
private:
static absl::Status VerifyConsistentSharding(
const HloInstruction* parent,
absl::Span<const HloInstruction* const> instructions) {
const HloInstruction* common_sharding_inst = nullptr;
for (const HloInstruction* check_inst : instructions) {
if (!check_inst->has_sharding()) {
continue;
}
if (!common_sharding_inst) {
common_sharding_inst = check_inst;
continue;
}
TF_RET_CHECK(check_inst->sharding() == common_sharding_inst->sharding())
<< "Inconsistent " << parent->opcode()
<< " sharding among instructions: \n"
<< common_sharding_inst->ToString() << "\n"
<< check_inst->ToString();
}
return absl::OkStatus();
}
static absl::Status HostOffloadInstructionCanChangeMemorySpace(
const HloInstruction* instruction, const int64_t operand_memory_space,
const int64_t result_memory_space) {
TF_RET_CHECK(!(operand_memory_space == Layout::kGenericFastMemorySpace &&
result_memory_space != Layout::kGenericFastMemorySpace) ||
(operand_memory_space != Layout::kGenericFastMemorySpace &&
result_memory_space == Layout::kGenericFastMemorySpace))
<< "Instruction shouldn't change layout memory space between generic "
"fast memory space and others for instruction: "
<< instruction->ToString();
if (instruction->opcode() == HloOpcode::kDynamicSlice) {
TF_RET_CHECK(!(operand_memory_space == Layout::kDefaultMemorySpace &&
result_memory_space == Layout::kHostMemorySpace))
<< "DynamicSlice instruction shouldn't change layout memory "
<< "space from device to host: " << instruction->ToString();
} else if (instruction->opcode() == HloOpcode::kDynamicUpdateSlice) {
TF_RET_CHECK(!(operand_memory_space == Layout::kHostMemorySpace &&
result_memory_space == Layout::kDefaultMemorySpace))
<< "DynamicUpdateSlice instruction shouldn't change layout "
<< "memory space from host to device: " << instruction->ToString();
} else if (instruction->opcode() != HloOpcode::kCopy) {
return absl::InvalidArgumentError(
absl::StrCat("Instruction shouldn't change layout memory space: ",
instruction->ToString()));
}
return absl::OkStatus();
}
absl::flat_hash_map<std::string, const HloInstruction*> instructions_by_name_;
const HloVerifierOpts& opts_;
std::optional<int64_t> num_devices_;
};
}
absl::StatusOr<bool> HloVerifier::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
auto disabled = module->config().debug_options().xla_disable_hlo_passes();
if (std::find(disabled.begin(), disabled.end(), name()) != disabled.end()) {
return false;
}
auto status_or_changed = [&]() -> absl::StatusOr<bool> {
TF_RET_CHECK(!module->name().empty());
if (module->entry_computation()->IsFusionComputation()) {
return InvalidArgument(
"Module entry computation cannot be a fusion computation");
}
TF_RETURN_IF_ERROR(VerifyHloStructure(module));
TF_RETURN_IF_ERROR(VerifyAsynchronousInstructionPairs(*module));
TF_RETURN_IF_ERROR(
VerifyChannels(*module, target_metadata_->GetVerifierOpts()));
TF_RETURN_IF_ERROR(VerifyInstructionNameUnchanged(
*module, target_metadata_->GetVerifierOpts()));
std::unique_ptr<ShapeVerifier> shape_verifier =
target_metadata_->GetVerifier();
InstructionVerifier instruction_verifier(
module, target_metadata_->GetVerifierOpts());
for (auto* computation : module->computations(execution_threads)) {
TF_RETURN_IF_ERROR(computation->Accept(shape_verifier.get()));
TF_RETURN_IF_ERROR(computation->Accept(&instruction_verifier));
if (computation->IsAsyncComputation()) {
TF_RETURN_IF_ERROR(VerifyAsyncComputation(computation));
}
}
TF_RETURN_IF_ERROR(shape_verifier->VerifyEntryComputationLayout(*module));
if (module->has_schedule()) {
TF_RETURN_IF_ERROR(module->schedule().Verify());
}
if (HloInstruction::IsThreadIncluded(
module->entry_computation()->execution_thread(),
execution_threads)) {
TF_RETURN_IF_ERROR(module->input_output_alias_config().Verify(
*module, [this](const Shape& shape) -> int64_t {
if (target_metadata_->GetVerifierOpts().IsLayoutSensitive()) {
return target_metadata_->GetVerifierOpts().ShapeSize(shape);
} else {
return 0;
}
}));
}
TF_RETURN_IF_ERROR(module->buffer_donor_config().Verify(*module));
TF_RETURN_IF_ERROR(VerifyLayoutConstrainedAllReduce(*module));
return false;
}();
if (status_or_changed.ok()) {
return status_or_changed.value();
}
return absl::Status(status_or_changed.status().code(),
absl::StrCat("during context [", context_, "]: ",
status_or_changed.status().message()));
}
MetadataTracker::MetadataTracker(absl::string_view prefix) : prefix_(prefix) {}
MetadataTracker::~MetadataTracker() {
if (instruction_count_ == 0) {
return;
}
const std::map<std::string, double> values = {
{"instruction_count", 1.0 * instruction_count_},
{"op_type_coverage", 1.0 * has_op_type_count_ / instruction_count_},
{"op_name_coverage", 1.0 * has_op_name_count_ / instruction_count_},
{"source_file_coverage",
1.0 * has_source_file_count_ / instruction_count_},
{"dummy_source_file_coverage",
1.0 * has_dummy_source_file_count_ / instruction_count_},
{"source_line_coverage",
1.0 * has_source_line_count_ / instruction_count_},
{"creation_pass_coverage",
1.0 * has_creation_pass_id_count_ / instruction_count_},
{"logical_creation_pass_coverage",
1.0 * has_logical_creation_pass_id_count_ / instruction_count_},
{"size_of_generated_code_in_bytes_coverage",
1.0 * has_size_of_generated_code_in_bytes_count_ / instruction_count_},
{"size_of_memory_working_set_in_bytes_coverage",
1.0 * has_size_of_memory_working_set_in_bytes_count_ /
instruction_count_},
{"profile_info_coverage",
1.0 * has_profile_info_count_ / instruction_count_}};
LOG(INFO) << prefix_ << " "
<< absl::StrJoin(values, ",", absl::PairFormatter("="));
}
void MetadataTracker::HandleMetadata(const OpMetadata& metadata) {
++instruction_count_;
if (!metadata.op_type().empty()) {
++has_op_type_count_;
}
if (!metadata.op_name().empty()) {
++has_op_name_count_;
}
if (!metadata.source_file().empty()) {
++has_source_file_count_;
if (absl::StrContains(metadata.source_file(), "dummy")) {
++has_dummy_source_file_count_;
}
}
if (metadata.source_line() != 0) {
++has_source_line_count_;
}
if (metadata.size_of_generated_code_in_bytes() != 0) {
++has_size_of_generated_code_in_bytes_count_;
}
if (metadata.size_of_memory_working_set_in_bytes() != 0) {
++has_size_of_memory_working_set_in_bytes_count_;
}
if (metadata.has_profile_info()) {
++has_profile_info_count_;
}
}
absl::Status MetadataTracker::DefaultAction(HloInstruction* instruction) {
HandleMetadata(instruction->metadata());
return absl::OkStatus();
}
} | #include "xla/service/hlo_verifier.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/base/log_severity.h"
#include "absl/log/scoped_mock_log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout.h"
#include "xla/literal_util.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/layout_assignment.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/xla.pb.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/platform.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
using ::testing::HasSubstr;
std::unique_ptr<HloModule> CreateUnverifiedModule() {
return std::make_unique<HloModule>("module", HloModuleConfig());
}
class HloVerifierTest : public HloTestBase {
public:
HloVerifierTest()
: HloTestBase(false,
false) {}
};
class HloVerifierTestAllowMixedPrecision : public HloTestBase {
public:
HloVerifierTestAllowMixedPrecision()
: HloTestBase(false,
true) {}
};
class HloVerifierTestLayoutSensitive : public HloTestBase {
public:
HloVerifierTestLayoutSensitive()
: HloTestBase(true,
false,
LayoutAssignment::InstructionCanChangeLayout) {}
};
class HloVerifierTestLayoutSensitiveAndAllowMixedPrecision
: public HloTestBase {
public:
HloVerifierTestLayoutSensitiveAndAllowMixedPrecision()
: HloTestBase(true,
true,
LayoutAssignment::InstructionCanChangeLayout) {}
};
class HloVerifierTestLayoutFusion : public HloTestBase {
public:
HloVerifierTestLayoutFusion()
: HloTestBase(true,
false) {}
};
TEST_F(HloVerifierTest, NullInstructionParent) {
HloComputation::Builder builder(TestName());
const Shape scalar_shape = ShapeUtil::MakeShape(F32, {});
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "param"));
HloInstruction* negate = builder.AddInstruction(
HloInstruction::CreateUnary(scalar_shape, HloOpcode::kNegate, param));
auto module = CreateUnverifiedModule();
module->AddEntryComputation(builder.Build());
TF_ASSERT_OK(verifier().Run(module.get()).status());
negate->set_parent(nullptr);
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("has a null parent pointer"));
}
TEST_F(HloVerifierTest, NullComputationParent) {
HloComputation::Builder builder(TestName());
const Shape scalar_shape = ShapeUtil::MakeShape(F32, {});
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "param"));
builder.AddInstruction(
HloInstruction::CreateUnary(scalar_shape, HloOpcode::kNegate, param));
auto module = CreateUnverifiedModule();
HloComputation* computation = module->AddEntryComputation(builder.Build());
TF_ASSERT_OK(verifier().Run(module.get()).status());
computation->set_parent(nullptr);
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("has a null parent pointer"));
}
TEST_F(HloVerifierTest, DifferentOperandParents) {
HloComputation::Builder builder(TestName());
const Shape scalar_shape = ShapeUtil::MakeShape(F32, {});
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "param"));
HloInstruction* negate = builder.AddInstruction(
HloInstruction::CreateUnary(scalar_shape, HloOpcode::kNegate, param));
auto module = CreateUnverifiedModule();
module->AddEntryComputation(builder.Build());
HloComputation::Builder emb_builder(TestName());
HloInstruction* emb_param = emb_builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "param"));
module->AddEmbeddedComputation(emb_builder.Build());
TF_ASSERT_OK(verifier().Run(module.get()).status());
TF_ASSERT_OK(negate->ReplaceOperandWith(0, emb_param));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("is in a different computation"));
}
TEST_F(HloVerifierTest, ResetsShapeVerifierState) {
HloComputation::Builder builder(TestName());
Shape s1 = ShapeUtil::MakeShape(F32, {1});
Shape s2 = ShapeUtil::MakeShape(F32, {2});
HloInstruction* param =
builder.AddInstruction(HloInstruction::CreateParameter(0, s1, "param"));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(s2, HloOpcode::kAdd, param, param));
builder.AddInstruction(
HloInstruction::CreateBinary(s2, HloOpcode::kMultiply, add, add));
auto module = CreateUnverifiedModule();
module->AddEntryComputation(builder.Build());
EXPECT_FALSE(verifier().Run(module.get()).status().ok());
EXPECT_FALSE(verifier().Run(module.get()).status().ok());
}
TEST_F(HloVerifierTest, CheckCallOperandParameterShapesMismatch) {
const char* const hlo_string = R"(
HloModule Module
callme {
ROOT param = (s32[], f32[4]) parameter(0)
}
ENTRY entry {
p0 = (f32[4], s32[]) parameter(0)
ROOT mycall = (s32[], f32[4]) call(p0), to_apply=callme
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("shape does not match parameter"));
}
TEST_F(HloVerifierTest, CheckCallThreadMismatch) {
constexpr absl::string_view hlo = R"(
HloModule Module
callme {
ROOT param = (s32[], f32[4]) parameter(0)
}, execution_thread="parallel_thread"
ENTRY entry {
p0 = (s32[], f32[4]) parameter(0)
ROOT mycall = (s32[], f32[4]) call(p0), to_apply=callme
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("mycall top_apply computation execution thread does "
"not match (parallel_thread vs main)"));
}
TEST_F(HloVerifierTest, CompositeCall) {
constexpr absl::string_view hlo = R"(
HloModule Module
add_n {
x = f32[] parameter(0)
constant = f32[] constant(2)
ROOT z = f32[] add(f32[] x, f32[] constant)
}
ENTRY entry {
constant = f32[] constant(42)
ROOT mycall = f32[] call(constant), is_composite=true, to_apply=add_n, frontend_attributes={composite.name="foo.bar",composite.attributes={n = 1 : i32, tensor = dense<1> : tensor<i32>},composite.version="1"}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo));
auto status = verifier().Run(module.get()).status();
EXPECT_TRUE(status.ok());
}
TEST_F(HloVerifierTest, CompositeCallMissingFrontendAttributes) {
constexpr absl::string_view hlo = R"(
HloModule Module
add_n {
x = f32[] parameter(0)
constant = f32[] constant(2)
ROOT z = f32[] add(f32[] x, f32[] constant)
}
ENTRY entry {
constant = f32[] constant(42)
ROOT mycall = f32[] call(constant), is_composite=true, to_apply=add_n
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("A composite call op must have frontend attributes"));
}
TEST_F(HloVerifierTest, CompositeCallOptionalAttributesAndVersion) {
constexpr absl::string_view hlo = R"(
HloModule Module
add_n {
x = f32[] parameter(0)
constant = f32[] constant(2)
ROOT z = f32[] add(f32[] x, f32[] constant)
}
ENTRY entry {
constant = f32[] constant(42)
ROOT mycall = f32[] call(constant), is_composite=true, to_apply=add_n, frontend_attributes={composite.name="foo.bar"}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo));
auto status = verifier().Run(module.get()).status();
EXPECT_TRUE(status.ok());
}
TEST_F(HloVerifierTest, CompositeCallOptionalAttributes) {
constexpr absl::string_view hlo = R"(
HloModule Module
add_n {
x = f32[] parameter(0)
constant = f32[] constant(2)
ROOT z = f32[] add(f32[] x, f32[] constant)
}
ENTRY entry {
constant = f32[] constant(42)
ROOT mycall = f32[] call(constant), is_composite=true, to_apply=add_n, frontend_attributes={composite.name="foo.bar",composite.version="1"}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo));
auto status = verifier().Run(module.get()).status();
EXPECT_TRUE(status.ok());
}
TEST_F(HloVerifierTest, CompositeCallMissingName) {
constexpr absl::string_view hlo = R"(
HloModule Module
add_n {
x = f32[] parameter(0)
constant = f32[] constant(2)
ROOT z = f32[] add(f32[] x, f32[] constant)
}
ENTRY entry {
constant = f32[] constant(42)
ROOT mycall = f32[] call(constant), is_composite=true, to_apply=add_n, frontend_attributes={composite.attributes={n = 1 : i32, tensor = dense<1> : tensor<i32>},composite.version="1"}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("A composite call op must have frontend attributes "
"with key composite.name whose value is non-empty"));
}
TEST_F(HloVerifierTest, CompositeCallOptionalVersion) {
constexpr absl::string_view hlo = R"(
HloModule Module
add_n {
x = f32[] parameter(0)
constant = f32[] constant(2)
ROOT z = f32[] add(f32[] x, f32[] constant)
}
ENTRY entry {
constant = f32[] constant(42)
ROOT mycall = f32[] call(constant), is_composite=true, to_apply=add_n, frontend_attributes={composite.attributes={n = 1 : i32, tensor = dense<1> : tensor<i32>},composite.name="foo.bar"}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo));
auto status = verifier().Run(module.get()).status();
EXPECT_TRUE(status.ok());
}
TEST_F(HloVerifierTest, CompositeCallNonNegativeVersion) {
constexpr absl::string_view hlo = R"(
HloModule Module
add_n {
x = f32[] parameter(0)
constant = f32[] constant(2)
ROOT z = f32[] add(f32[] x, f32[] constant)
}
ENTRY entry {
constant = f32[] constant(42)
ROOT mycall = f32[] call(constant), is_composite=true, to_apply=add_n, frontend_attributes={composite.attributes={n = 1 : i32, tensor = dense<1> : tensor<i32>},composite.name="foo.bar",composite.version="-1"}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(
status.message(),
HasSubstr("A composite call op must have frontend attributes with a "
"composite.version whose value is a non-negative integer"));
}
TEST_F(HloVerifierTest, CheckConditionalOperandParameterShapesMismatch) {
const char* const hlo_string = R"(
HloModule Module
true_branch {
tparam = (s32[], f32[4]) parameter(0)
ROOT tgte1 = f32[4] get-tuple-element(tparam), index=1
}
false_branch {
fparam = (s32[], f32[4]) parameter(0)
ROOT fgte1 = f32[4] get-tuple-element(fparam), index=1
}
ENTRY entry {
p0 = (f32[4], s32[]) parameter(0)
constant = pred[] constant(true)
ROOT conditional = f32[4] conditional(constant, p0, p0),
true_computation=true_branch, false_computation=false_branch
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("shape does not match parameter"));
}
TEST_F(HloVerifierTest, CheckConditionalBranchIndexOperandShape) {
const char* const hlo_string = R"(
HloModule Module
branch0 {
tparam = f32[4] parameter(0)
ROOT tgte1 = f32[4] ceil(tparam)
}
branch1 {
fparam = f32[4] parameter(0)
ROOT fgte1 = f32[4] floor(fparam)
}
branch2 {
sparam = f32[4] parameter(0)
ROOT sgte1 = f32[4] ceil(sparam)
}
ENTRY entry {
p0 = f32[4] parameter(0)
b0 = s32[] parameter(1)
ROOT conditional = f32[4] conditional(b0, p0, p0, p0),
branch_computations={branch0, branch1, branch2}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
HloInstruction* condition = FindInstruction(module.get(), "b0");
*condition->mutable_shape() = ShapeUtil::MakeShape(F32, {});
status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(
status.message(),
HasSubstr(
"first operand of indexed conditional must be a scalar of S32"));
*condition->mutable_shape() = ShapeUtil::MakeShape(S32, {4});
status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("first operand of conditional must be a scalar"));
}
TEST_F(HloVerifierTest, CheckConditionalBranchThread) {
const char* const hlo_string = R"(
HloModule Module
branch0 {
tparam = f32[4] parameter(0)
ROOT tgte1 = f32[4] ceil(tparam)
}
branch1 {
fparam = f32[4] parameter(0)
ROOT fgte1 = f32[4] floor(fparam)
}, execution_thread="parallel_thread"
branch2 {
sparam = f32[4] parameter(0)
ROOT sgte1 = f32[4] ceil(sparam)
}
ENTRY entry {
p0 = f32[4] parameter(0)
b0 = s32[] parameter(1)
ROOT conditional = f32[4] conditional(b0, p0, p0, p0),
branch_computations={branch0, branch1, branch2}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
EXPECT_THAT(status.message(),
HasSubstr("expects parent computation thread name same as called "
"computation's thread name"));
}
TEST_F(HloVerifierTest, CheckConditionalBranchContainsAsyncThread) {
const char* const hlo_string = R"(
HloModule Module
branch0 {
tparam = f32[4] parameter(0)
ROOT tgte1 = f32[4] ceil(tparam)
}
branch1 {
fparam = f32[4] parameter(0)
%async-start = ((f32[4]), f32[4], s32[]) custom-call-start(f32[4] fparam), async_execution_thread="parallel_thread", custom_call_target="foo"
ROOT %async-done = f32[4] custom-call-done(((f32[4]), f32[4], s32[]) %async-start)
}
branch2 {
sparam = f32[4] parameter(0)
ROOT sgte1 = f32[4] ceil(sparam)
}
ENTRY entry {
p0 = f32[4] parameter(0)
b0 = s32[] parameter(1)
ROOT conditional = f32[4] conditional(b0, p0, p0, p0),
branch_computations={branch0, branch1, branch2}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
TF_ASSERT_OK(verifier().Run(module.get()).status());
}
TEST_F(HloVerifierTest, RngOpnd0NotScalar) {
const char* const hlo_string = R"(
HloModule Module
ENTRY RngOpnd0NotScalar {
constant.0 = f32[] constant(0)
constant.1 = f16[2] constant({1, 3})
ROOT rng.0 = f32[10]{0} rng(f32[] constant.0, f16[2] constant.1),
distribution=rng_uniform
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("Expected scalar type"));
}
TEST_F(HloVerifierTest, RngOperandElementTypesDoNotMatch) {
const char* const hlo_string = R"(
HloModule Module
ENTRY RngOperandElementTypesNotMatch {
constant.0 = f32[] constant(0)
constant.1 = f16[] constant(1)
ROOT rng.0 = f32[10]{0} rng(f32[] constant.0, f16[] constant.1),
distribution=rng_normal
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("Expected compatible element types"));
}
TEST_F(HloVerifierTest, RngMixedPrecisionNotAllowed) {
const char* const hlo_string = R"(
HloModule Module
ENTRY RngResultElementTypeNotMatch {
constant.0 = f32[] constant(0)
constant.1 = f32[] constant(1)
ROOT rng.0 = f16[10]{0} rng(f32[] constant.0, f32[] constant.1),
distribution=rng_normal
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("Expected compatible element types"));
}
TEST_F(HloVerifierTestAllowMixedPrecision, RngMixedPrecisionAllowed) {
const char* const hlo_string = R"(
HloModule Module
ENTRY RngResultElementTypeNotMatch {
constant.0 = f32[] constant(0)
constant.1 = f32[] constant(1)
ROOT rng.0 = f16[10]{0} rng(f32[] constant.0, f32[] constant.1),
distribution=rng_normal
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_TRUE(status.ok());
}
TEST_F(HloVerifierTest, RngElementTypeNotSupported) {
const char* const hlo_string = R"(
HloModule Module
ENTRY RngElementTypeNotSupported {
constant.0 = s32[] constant(0)
constant.1 = s32[] constant(1)
ROOT rng.0 = s32[10]{0} rng(s32[] constant.0, s32[] constant.1),
distribution=rng_normal
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("Element type not supported"));
}
TEST_F(HloVerifierTest, NegativeInteriorPaddingNotAllowed) {
HloComputation::Builder builder(TestName());
HloInstruction* param =
builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {100}), "param"));
PaddingConfig padding_config;
padding_config.add_dimensions()->set_interior_padding(-1);
builder.AddInstruction(HloInstruction::CreatePad(
ShapeUtil::MakeShape(F32, {100}), param,
builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::Zero(F32))),
padding_config));
auto module = CreateUnverifiedModule();
module->AddEntryComputation(builder.Build());
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Interior padding cannot be negative"));
}
TEST_F(HloVerifierTest, PadNegativeInteriorDilationNotAllowed) {
HloComputation::Builder builder(TestName());
HloInstruction* param =
builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {100}), "param"));
PaddingConfig padding_config;
padding_config.add_dimensions()->set_interior_padding(-1);
builder.AddInstruction(HloInstruction::CreatePad(
ShapeUtil::MakeShape(F32, {100}), param,
builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::Zero(F32).Clone())),
padding_config));
auto module = CreateUnverifiedModule();
module->AddEntryComputation(builder.Build());
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("Interior padding cannot be negative"));
}
TEST_F(HloVerifierTest, DotMixedPrecisionAllowed) {
static const char* const kDotHloString = R"(
HloModule module
ENTRY entry_computation {
a = f32[2,10] parameter(0)
b = bf16[10,2] parameter(1)
ROOT dot = f32[2,2] dot(a, b), lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kDotHloString));
auto status = verifier().Run(module.get()).status();
EXPECT_TRUE(status.ok()) << status;
}
static const char* const kConvHloString = R"(
HloModule module
ENTRY entry_computation {
param0 = f16[128,128,56,56] parameter(0)
param1 = f16[3,3,128,128] parameter(1)
zero_f16 = f16[] constant(0)
ROOT conv = f16[128,128,28,28] convolution(param0, param1),
window={size=3x3 stride=2x2}, dim_labels=bf01_01io->bf01
})";
TEST_F(HloVerifierTest, ConvNegativeWindowDilationNotAllowed) {
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kConvHloString));
auto* conv = module->entry_computation()->root_instruction();
Window w = conv->window();
w.mutable_dimensions(0)->set_window_dilation(-1);
conv->set_window(w);
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("non-positive window dilation factor"));
}
TEST_F(HloVerifierTest, ConvNegativeBaseDilationNotAllowed) {
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kConvHloString));
auto* conv = module->entry_computation()->root_instruction();
Window w = conv->window();
w.mutable_dimensions(0)->set_base_dilation(-1);
conv->set_window(w);
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("non-positive base area dilation factor"));
}
static const char* const kAddWithLayoutChangeHlo = R"(
HloModule AddWithLayoutChange
ENTRY AddWithLayoutChange {
par0 = f32[3,4]{1,0} parameter(0)
par1 = f32[3,4]{0,1} parameter(1)
ROOT add0 = f32[3,4]{1,0} add(par0,par1)
}
)";
TEST_F(HloVerifierTest, AddWithLayoutChange) {
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndReturnVerifiedModule(kAddWithLayoutChangeHlo));
auto status = verifier().Run(module.get()).status();
ASSERT_TRUE(status.ok());
}
TEST_F(HloVerifierTest, ScalarIndexDynamicSlice) {
const char* const kScalarIndexDynamicSlice = R"(
HloModule DynamicSlice_module
ENTRY %DynamicSlice.v5 (original_parameter: s32[2,2,258], start_index: s32[]) -> s32[2,2,258] {
%original_parameter = s32[2,2,258] parameter(0)
%constant = s32[] constant(0)
%start_index = s32[] parameter(1)
ROOT %dynamic-slice = s32[2,2,258] dynamic-slice(s32[2,2,258] %original_parameter, s32[] %constant, s32[] %constant, s32[] %start_index), dynamic_slice_sizes={2,2,258}
}
)";
HloModuleConfig config;
DebugOptions debug_options = config.debug_options();
debug_options.set_xla_allow_scalar_index_dynamic_ops(true);
config.set_debug_options(debug_options);
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(
kScalarIndexDynamicSlice, config));
auto status = verifier().Run(module.get()).status();
ASSERT_TRUE(status.ok());
}
TEST_F(HloVerifierTest, ScalarIndexDynamicUpdateSlice) {
const char* const kScalarIndexDynamicSlice = R"(
HloModule DynamicUpdateSlice_module
ENTRY %DynamicUpdateSlice.v4 (input: s32[1,1,25,1], update: s32[1,1,2,1], start_index.0: s32[], start_index.1: s32[], start_index.2: s32[], start_index.3: s32[]) -> s32[1,1,25,1] {
%input = s32[1,1,25,1]{3,2,1,0} parameter(0)
%update = s32[1,1,2,1]{3,2,1,0} parameter(1)
%start_index.0 = s32[] parameter(2)
%start_index.1 = s32[] parameter(3)
%start_index.2 = s32[] parameter(4)
%start_index.3 = s32[] parameter(5)
ROOT %dynamic-update-slice = s32[1,1,25,1]{3,2,1,0} dynamic-update-slice(s32[1,1,25,1]{3,2,1,0} %input, s32[1,1,2,1]{3,2,1,0} %update, s32[] %start_index.0, s32[] %start_index.1, s32[] %start_index.2, s32[] %start_index.3)
}
)";
HloModuleConfig config;
DebugOptions debug_options = config.debug_options();
debug_options.set_xla_allow_scalar_index_dynamic_ops(true);
config.set_debug_options(debug_options);
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(
kScalarIndexDynamicSlice, config));
auto status = verifier().Run(module.get()).status();
ASSERT_TRUE(status.ok());
}
TEST_F(HloVerifierTestAllowMixedPrecision, DynamicUpdateSliceMixedPrecision) {
const char* const kDynamicUpdateSliceMixedPrecision = R"(
HloModule kDynamicUpdateSliceMixedPrecision
ENTRY %entry (parameter.0: f32[32,511,2048], parameter.1: bf16[32,511,512], parameter.2: s32[], parameter.3: s32[], parameter.4: s32[]) -> bf16[32,511,2048] {
%parameter.0 = f32[32,511,2048] parameter(0)
%parameter.1 = bf16[32,511,512] parameter(1)
%parameter.2 = s32[] parameter(2)
%parameter.3 = s32[] parameter(3)
%parameter.4 = s32[] parameter(4)
ROOT %dus = bf16[32,511,2048] dynamic-update-slice(f32[32,511,2048] %parameter.0, bf16[32,511,512] %parameter.1, s32[] %parameter.2, s32[] %parameter.3, s32[] %parameter.4)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(
kDynamicUpdateSliceMixedPrecision));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Expected instruction to have shape equal to "
"f32[32,511,2048], actual shape is bf16[32,511,2048]"));
}
TEST_F(HloVerifierTestLayoutSensitive, AddWithLayoutChangeNotAllowed) {
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndReturnUnverifiedModule(kAddWithLayoutChangeHlo));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Instruction shouldn't change layouts"));
}
TEST_F(HloVerifierTestLayoutSensitive, SliceWithLayoutChangeNotAllowed) {
const char* const kSliceWithLayoutChangeHlo = R"(
HloModule SliceWithLayoutChange
ENTRY SliceWithLayoutChange {
par0 = f32[4,5]{0,1} parameter(0)
par1 = s32[] parameter(1)
par2 = s32[] parameter(2)
ROOT dslice0 = f32[3,4]{1,0} dynamic-slice(par0, par1, par2),
dynamic_slice_sizes={3,4}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndReturnUnverifiedModule(kSliceWithLayoutChangeHlo));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Instruction shouldn't change layouts"));
}
TEST_F(HloVerifierTestLayoutSensitive, ConcatWithLayoutChangeNotAllowed) {
const char* const kConcatWithLayoutChangeHlo = R"(
HloModule ConcatWithLayoutChange
ENTRY ConcatWithLayoutChange {
par0 = f32[3,5]{0,1} parameter(0)
par1 = f32[3,3]{1,0} parameter(1)
ROOT concat0 = f32[3,8]{1,0} concatenate(f32[3,5] par0, f32[3,3] par1),
dimensions={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndReturnUnverifiedModule(kConcatWithLayoutChangeHlo));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Instruction shouldn't change layouts"));
}
TEST_F(HloVerifierTestLayoutSensitive, BitcastNeedsSameNumberOfElements) {
const char* const hlo_string = R"(
HloModule Module
ENTRY BitcastNeedsToBeNoOp {
constant.0 = f32[2] constant({0.0, 0.0})
ROOT bitcast = f32[3] bitcast(constant.0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Bitcast cannot have different shape sizes of output "
"(12) and operand (8)"));
}
TEST_F(HloVerifierTest, SelectMixedPrecisionNotAllowed) {
const char* const hlo_string = R"(
HloModule Module
ENTRY SelectMixedPrecisionNotAllowed {
p0 = pred[32] parameter(0)
p1 = f32[32] parameter(1)
p2 = bf16[32] parameter(2)
ROOT select = f32[32] select(p0, p1, p2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Seen floating point types of different precisions"));
}
TEST_F(HloVerifierTestAllowMixedPrecision, SelectMixedPrecisionAllowed) {
const char* const hlo_string = R"(
HloModule Module
ENTRY SelectMixedPrecisionAllowed {
p0 = pred[32] parameter(0)
p1 = f32[32] parameter(1)
p2 = bf16[32] parameter(2)
ROOT select = f32[32] select(p0, p1, p2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_TRUE(status.ok());
}
TEST_F(HloVerifierTest, SelectTupleNotAllowed) {
const char* const hlo_string = R"(
HloModule Module
ENTRY SelectWithTuple {
p0 = (f32[], f32[]) parameter(0)
p1 = (f32[], f32[]) parameter(1)
p2 = pred[] parameter(2)
ROOT select = (f32[], f32[]) select(p2, p0, p1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Expected array argument for select"));
}
TEST_F(HloVerifierTestLayoutSensitive, CopyStartAndCopyDone) {
const char* const hlo_string = R"(
HloModule Module
ENTRY CopyStartAndCopyDone {
p0 = f32[2,3]{1,0:S(1)} parameter(0)
copy-start = (f32[2,3]{1,0:S(2)}, f32[2,3]{1,0:S(1)}, u32[]) copy-start(p0)
ROOT copy-done = f32[2,3]{1,0:S(2)} copy-done(copy-start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_TRUE(status.ok());
}
TEST_F(HloVerifierTestLayoutSensitive, CopyStartAndCopyDoneWrongLayout) {
const char* const hlo_string = R"(
HloModule Module
ENTRY CopyStartAndCopyDone {
p0 = f32[2,3]{1,0:S(1)} parameter(0)
copy-start = (f32[2,3]{0,1:S(2)}, f32[2,3]{1,0:S(1)}, u32[]) copy-start(p0)
ROOT copy-done = f32[2,3]{1,0:S(2)} copy-done(copy-start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Expected instruction to have shape equal to"));
}
TEST_F(HloVerifierTest, CopyStartAndCopyDoneWrongType) {
const char* const hlo_string = R"(
HloModule Module
ENTRY CopyStartAndCopyDone {
p0 = f32[2,3] parameter(0)
copy-start = f32[2,3] copy-start(p0)
ROOT copy-done = f32[2,3] copy-done(copy-start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Expected instruction to have shape equal to "
"(f32[2,3], f32[2,3], u32[])"));
}
TEST_F(HloVerifierTest, CopyStartMultipleCopyDone) {
const char* const hlo_string = R"(
HloModule Module
ENTRY CopyStartAndCopyDone {
p0 = f32[2,3] parameter(0)
copy-start = (f32[2,3], f32[2,3], u32[]) copy-start(p0)
copy-done.1 = f32[2,3] copy-done(copy-start)
copy-done.2 = f32[2,3] copy-done(copy-start)
ROOT tuple = (f32[2,3], f32[2,3]) tuple(copy-done.1, copy-done.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
absl::Status status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(
status.message(),
HasSubstr("copy-start instruction requires one consumer, found 2"));
}
TEST_F(HloVerifierTest, CopyDoneNoCopyStart) {
const char* const hlo_string = R"(
HloModule Module
ENTRY CopyStartAndCopyDone {
p0 = f32[2,3] parameter(0)
p1 = u32[] parameter(1)
tuple = (f32[2,3], f32[2,3], u32[]) tuple(p0, p0, p1)
ROOT copy-done = f32[2,3] copy-done(tuple)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("The operand of a copy-done instruction needs to be "
"copy-start, found tuple"));
}
TEST_F(HloVerifierTestLayoutSensitive, AsyncStartAndAsyncDone) {
const char* const hlo_string = R"(
HloModule Module
ENTRY AsyncStartAndAsyncDone {
p0 = f32[2,3]{1,0:S(1)} parameter(0)
async-start = ((f32[2,3]{1,0:S(1)}), f32[2,3]{1,0:S(2)}, u32[]) custom-call-start(p0), custom_call_target="foo"
ROOT async-done = f32[2,3]{1,0:S(2)} custom-call-done(async-start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_TRUE(status.ok());
}
TEST_F(HloVerifierTestLayoutSensitive, AsyncStartAndAsyncUpdateAndAsyncDone) {
const char* const hlo_string = R"(
HloModule Module
ENTRY AsyncStartAndAsyncUpdateAndAsyncDone {
p0 = f32[2,3]{1,0:S(1)} parameter(0)
async-start = ((f32[2,3]{1,0:S(1)}), f32[2,3]{1,0:S(2)}, u32[]) custom-call-start(p0), custom_call_target="foo"
async-update.1 = ((f32[2,3]{1,0:S(1)}), f32[2,3]{1,0:S(2)}, u32[]) custom-call-update(async-start)
async-update.2 = ((f32[2,3]{1,0:S(1)}), f32[2,3]{1,0:S(2)}, u32[]) custom-call-update(async-update.1)
ROOT async-done = f32[2,3]{1,0:S(2)} custom-call-done(async-update.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_TRUE(status.ok());
}
TEST_F(HloVerifierTestLayoutSensitive,
AsyncStartAndAsyncUpdateAndAsyncDoneWithThreadName) {
const char* const hlo_string = R"(
HloModule Module
ENTRY AsyncStartAndAsyncUpdateAndAsyncDone {
p0 = f32[2,3]{1,0:S(1)} parameter(0)
async-start = ((f32[2,3]{1,0:S(1)}), f32[2,3]{1,0:S(2)}, u32[]) custom-call-start(p0), async_execution_thread="parallel_thread", custom_call_target="foo"
async-update.1 = ((f32[2,3]{1,0:S(1)}), f32[2,3]{1,0:S(2)}, u32[]) custom-call-update(async-start)
async-update.2 = ((f32[2,3]{1,0:S(1)}), f32[2,3]{1,0:S(2)}, u32[]) custom-call-update(async-update.1)
ROOT async-done = f32[2,3]{1,0:S(2)} custom-call-done(async-update.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_TRUE(status.ok());
}
TEST_F(HloVerifierTest, AsyncStartAndAsyncDoneWrongType) {
const char* const hlo_string = R"(
HloModule Module
ENTRY AsyncStartAndAsyncDone {
p0 = f32[2,3] parameter(0)
async-start = ((f32[2,3]), f32[3,2], u32[]) custom-call-start(p0), custom_call_target="foo"
ROOT async-done = f32[2,3] custom-call-done(async-start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("async-done expects the shape of output to match the "
"async shape at index {1}"));
}
TEST_F(HloVerifierTest, AsyncStartMultipleAsyncDone) {
const char* const hlo_string = R"(
HloModule Module
ENTRY AsyncStartAndAsyncDone {
p0 = f32[2,3] parameter(0)
async-start = ((f32[2,3]), f32[2,3], u32[]) custom-call-start(p0), custom_call_target="foo"
async-done.1 = f32[2,3] custom-call-done(async-start)
async-done.2 = f32[2,3] custom-call-done(async-start)
ROOT tuple = (f32[2,3], f32[2,3]) tuple(async-done.1, async-done.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(
status.message(),
HasSubstr("async-start instruction requires one consumer, found 2"));
}
TEST_F(HloVerifierTest, AsyncStartNoAsyncDone) {
const char* const hlo_string = R"(
HloModule Module
ENTRY AsyncStartAndAsyncDone {
p0 = f32[2,3] parameter(0)
ROOT async-start = ((f32[2,3]), f32[2,3], u32[]) custom-call-start(p0), custom_call_target="foo"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(
status.message(),
HasSubstr("async-start instruction requires one consumer, found 0"));
}
TEST_F(HloVerifierTest, AsyncStartAndAsyncUpdateNoAsyncDone) {
const char* const hlo_string = R"(
HloModule Module
ENTRY AsyncStartAndAsyncDone {
p0 = f32[2,3] parameter(0)
async-start = ((f32[2,3]), f32[2,3], u32[]) custom-call-start(p0), custom_call_target="foo"
ROOT async-update = ((f32[2,3]), f32[2,3], u32[]) custom-call-update(async-start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(
status.message(),
HasSubstr("async-update instruction requires one consumer, found 0"));
}
TEST_F(HloVerifierTest, AsyncDoneNoAsyncStart) {
const char* const hlo_string = R"(
HloModule Module
ENTRY AsyncDoneNoAsyncStart {
p0 = f32[2,3] parameter(0)
p1 = u32[] parameter(1)
tuple = ((f32[2,3]), f32[2,3], u32[]) tuple(p0, p0, p1)
async-start = ((f32[2,3]), f32[2,3], u32[]) custom-call-start(p0), custom_call_target="foo"
ROOT async-done = f32[2,3] custom-call-done(async-start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
HloInstruction* tuple = FindInstruction(module.get(), "tuple");
HloInstruction* async_done = FindInstruction(module.get(), "async-done");
TF_ASSERT_OK(async_done->ReplaceOperandWith(0, tuple));
HloInstruction* async_start = FindInstruction(module.get(), "async-start");
HloComputation* computation =
FindComputation(module.get(), "AsyncDoneNoAsyncStart");
TF_ASSERT_OK(computation->RemoveInstruction(async_start));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("The operand of a async-done instruction needs to be "
"async-start or async-update, found tuple"));
}
TEST_F(HloVerifierTest, AsyncUpdateAndAsyncDoneNoAsyncStart) {
const char* const hlo_string = R"(
HloModule Module
ENTRY AsyncUpdateAndAsyncDoneNoAsyncStart {
p0 = f32[2,3] parameter(0)
p1 = u32[] parameter(1)
tuple = ((f32[2,3]), f32[2,3], u32[]) tuple(p0, p0, p1)
async-start = ((f32[2,3]), f32[2,3], u32[]) custom-call-start(p0), custom_call_target="foo"
async-update = ((f32[2,3]), f32[2,3], u32[]) custom-call-update(async-start)
ROOT async-done = f32[2,3] custom-call-done(async-update)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
HloInstruction* tuple = FindInstruction(module.get(), "tuple");
HloInstruction* async_update = FindInstruction(module.get(), "async-update");
TF_ASSERT_OK(async_update->ReplaceOperandWith(0, tuple));
HloInstruction* async_done = FindInstruction(module.get(), "async-done");
TF_ASSERT_OK(async_done->ReplaceOperandWith(0, tuple));
HloInstruction* async_start = FindInstruction(module.get(), "async-start");
HloComputation* computation =
FindComputation(module.get(), "AsyncUpdateAndAsyncDoneNoAsyncStart");
TF_ASSERT_OK(computation->RemoveInstruction(async_start));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("The operand of a async-update instruction needs to be "
"async-start or async-update, found tuple"));
}
TEST_F(HloVerifierTest, AsyncOpComputationParamWrongType) {
const char* const hlo_string = R"(
HloModule Module
async_computation {
p0 = f32[2,3] parameter(0)
ROOT p1 = f32[3,2] parameter(1)
}
ENTRY AsyncStartAndAsyncDone {
p0 = f32[2,3] parameter(0)
p1 = f32[3,2] parameter(1)
async-start = ((f32[3,2], f32[3,2]), f32[3,2], u32[]) async-start(p0, p1), calls=async_computation
ROOT async-done = f32[3,2] async-done(async-start), calls=async_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("async-start expects the async shape at index {0} to "
"match async computation parameter shape"));
}
TEST_F(HloVerifierTest, AsyncOpComputationRootWrongType) {
const char* const hlo_string = R"(
HloModule Module
async_computation {
p0 = f32[2,3] parameter(0)
ROOT p1 = f32[3,2] parameter(1)
}
ENTRY AsyncStartAndAsyncDone {
p0 = f32[2,3] parameter(0)
p1 = f32[3,2] parameter(1)
async-start = ((f32[2,3], f32[3,2]), f32[2,3], u32[]) async-start(p0, p1), calls=async_computation
ROOT async-done = f32[3,2] async-done(async-start), calls=async_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("async-start expects the async shape at index {1} to "
"match the async computation root shape"));
}
TEST_F(HloVerifierTest, AsyncOpTupleWrongType) {
const char* const hlo_string = R"(
HloModule Module
async_computation {
p = f32[2,3] parameter(0)
ROOT custom-call = f32[3,2] custom-call(p), custom_call_target="foo"
}
ENTRY AsyncStartAndAsyncDone {
p0 = f32[2,3] parameter(0)
async-start = ((f32[2,3]), f32[3,2], s32[]) async-start(p0), calls=async_computation
ROOT async-done = f32[3,2] async-done(async-start), calls=async_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
HloInstruction* async_start = FindInstruction(module.get(), "async-start");
async_start->mutable_shape()->clear_tuple_shapes();
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("async-start expects the async shape to be a tuple of "
"at least two elements"));
}
TEST_F(HloVerifierTest, AsyncStartOperandWrongType) {
const char* const hlo_string = R"(
HloModule Module
async_computation {
p = f32[2,3] parameter(0)
ROOT custom-call = f32[3,2] custom-call(p), custom_call_target="foo"
}
ENTRY AsyncStartAndAsyncDone {
p0 = f32[3,2] parameter(0)
async-start = ((f32[2,3]), f32[3,2], u32[]) async-start(p0), calls=async_computation
ROOT async-done = f32[3,2] async-done(async-start), calls=async_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("async-start expects the shape of operand 0 to match "
"the async shape at index {0}"));
}
TEST_F(HloVerifierTest, AsyncDoneOutputWrongType) {
const char* const hlo_string = R"(
HloModule Module
async_computation {
p = f32[2,3] parameter(0)
ROOT custom-call = f32[3,2] custom-call(p), custom_call_target="foo"
}
ENTRY AsyncStartAndAsyncDone {
p0 = f32[2,3] parameter(0)
async-start = ((f32[2,3]), f32[3,2], u32[]) async-start(p0), calls=async_computation
ROOT async-done = f32[2,3] async-done(async-start), calls=async_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("async-done expects the shape of output to match the "
"async shape at index {1}"));
}
TEST_F(HloVerifierTest, AsyncUpdateWrongType) {
const char* const hlo_string = R"(
HloModule Module
async_computation {
p = f32[2,3] parameter(0)
ROOT custom-call = f32[3,2] custom-call(p), custom_call_target="foo"
}
ENTRY AsyncStartAndAsyncDone {
p0 = f32[2,3] parameter(0)
async-start = ((f32[2,3]), f32[3,2], u32[]) async-start(p0), calls=async_computation
async-update = ((f32[3,2]), f32[3,2], u32[]) async-update(async-start), calls=async_computation
ROOT async-done = f32[3,2] async-done(async-update), calls=async_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(
status.message(),
HasSubstr(
"async-update expects the shape of operand and output to match"));
}
TEST_F(HloVerifierTest, AsyncOpComputationNotTrivial) {
const char* const hlo_string = R"(
HloModule Module
async_computation {
p = f32[2,3] parameter(0)
copy = f32[2,3] copy(p)
ROOT custom-call = f32[3,2] custom-call(copy), custom_call_target="foo"
}
ENTRY AsyncStartAndAsyncDone {
p0 = f32[2,3] parameter(0)
async-start = ((f32[2,3]), f32[3,2], u32[]) async-start(p0), calls=async_computation
ROOT async-done = f32[3,2] async-done(async-start), calls=async_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(
status.message(),
HasSubstr(
"expected to contain only the root and parameter instructions"));
}
TEST_F(HloVerifierTest, IotaNonArrayResult) {
const char* const hlo_string = R"(
HloModule IotaTupleResult
ENTRY kernelEntry {
ROOT iota = () iota(), iota_dimension=24
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("does not support non-array result"));
}
TEST_F(HloVerifierTest, IotaNegativeDimension) {
const char* const hlo_string = R"(
HloModule IotaTupleResult
ENTRY kernelEntry {
ROOT iota = s32[128,1001]{1,0} iota(), iota_dimension=-1
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("negative"));
}
TEST_F(HloVerifierTest, IotaPredResultNotAllowed) {
const char* const hlo_string = R"(
HloModule IotaPredResult
ENTRY kernelEntry {
ROOT iota = pred[128] iota(), iota_dimension=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("got PRED"));
}
static const char* const kMapOperandComputationMismatchHlo = R"(
HloModule MapOperandComputationMismatch
Computation {
param0 = f32[] parameter(0)
constant = f32[] constant(1)
ROOT add = f32[] add(param0, constant)
}
ENTRY kernelEntry {
param = f64[] parameter(0)
ROOT map = f32[] map(param), dimensions={}, to_apply=Computation
})";
TEST_F(HloVerifierTest, MapOperandComputationMismatch) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(
kMapOperandComputationMismatchHlo));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(
status.message(),
HasSubstr(
"Shape mismatch between to_apply computation parameter and operand"));
}
TEST_F(HloVerifierTestAllowMixedPrecision, MapOperandComputationMismatch) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(
kMapOperandComputationMismatchHlo));
auto status = verifier().Run(module.get()).status();
ASSERT_TRUE(status.ok());
}
static const char* const kReduceOperandComputationMismatchHlo = R"(
HloModule ReduceOperandComputationMismatch
computation {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT add = f32[] add(x, y)
}
ENTRY kernelEntry {
arg0 = f16[64,64,224,224]{3,2,1,0} parameter(0)
constant = f16[] constant(0)
reduce = f16[64]{0} reduce(arg0, constant), dimensions={0,2,3}, to_apply=computation
})";
TEST_F(HloVerifierTest, ReduceOperandComputationMismatch) {
TF_ASSERT_OK_AND_ASSIGN(
auto module,
ParseAndReturnUnverifiedModule(kReduceOperandComputationMismatchHlo));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Expected instruction to have shape equal to f32[64]"));
}
TEST_F(HloVerifierTestAllowMixedPrecision, ReduceOperandComputationMismatch) {
TF_ASSERT_OK_AND_ASSIGN(
auto module,
ParseAndReturnVerifiedModule(kReduceOperandComputationMismatchHlo));
auto status = verifier().Run(module.get()).status();
ASSERT_TRUE(status.ok());
}
std::string ReplicaGroupsStr(std::vector<std::vector<int64_t>> replica_groups) {
std::vector<std::string> replica_group_strs;
replica_group_strs.reserve(replica_groups.size());
for (const auto& g : replica_groups) {
replica_group_strs.push_back(
absl::StrFormat("{%s}", absl::StrJoin(g, ",")));
}
return absl::StrFormat("{%s}", absl::StrJoin(replica_group_strs, ", "));
}
int64_t ReplicaCount(const std::vector<std::vector<int64_t>>& replica_groups) {
int64_t replica_count = 0;
for (auto group : replica_groups) {
replica_count += group.size();
}
return replica_count;
}
absl::StatusOr<std::unique_ptr<HloModule>> MakeCollectiveCommOpComputation(
std::vector<std::vector<int64_t>> replica_groups,
std::optional<int64_t> replica_count, std::optional<int64_t> num_partitions,
absl::string_view other_attributes, absl::string_view template_str) {
HloModuleConfig config;
config.set_replica_count(
replica_count.value_or(ReplicaCount(replica_groups)));
config.set_num_partitions(num_partitions.value_or(1));
return ParseAndReturnUnverifiedModule(
absl::StrReplaceAll(
template_str,
{{"REPLICA_GROUPS", ReplicaGroupsStr(replica_groups)},
{"OTHER_ATTRIBUTES", other_attributes.empty()
? ""
: absl::StrCat(",", other_attributes)}}),
config);
}
absl::StatusOr<std::unique_ptr<HloModule>> MakeAllReduceComputation(
std::vector<std::vector<int64_t>> replica_groups,
std::optional<int64_t> replica_count = std::nullopt,
std::optional<int64_t> num_partitions = std::nullopt,
absl::string_view other_attributes = "") {
const char* kTemplate = R"(
HloModule test
add {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT add = f32[] add(x, y)
}
ENTRY entry {
p = f32[128]{0} parameter(0)
crs = f32[128]{0} all-reduce(p), to_apply=add, replica_groups=REPLICA_GROUPS
OTHER_ATTRIBUTES
})";
return MakeCollectiveCommOpComputation(replica_groups, replica_count,
num_partitions, other_attributes,
kTemplate);
}
TEST_F(HloVerifierTest, AllReduce_NoReplicaGroupsOK) {
TF_ASSERT_OK_AND_ASSIGN(auto module, MakeAllReduceComputation({}));
TF_ASSERT_OK(verifier().Run(module.get()).status());
}
TEST_F(HloVerifierTest, AllReduce_DifferentGroupSizesOk) {
TF_ASSERT_OK_AND_ASSIGN(auto module,
MakeAllReduceComputation({{0}, {1, 3}, {2}}));
TF_ASSERT_OK(verifier().Run(module.get()).status());
}
TEST_F(HloVerifierTest, AllReduce_EmptyReplicaGroup) {
TF_ASSERT_OK_AND_ASSIGN(auto module, MakeAllReduceComputation({{0}, {}}));
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("empty replica group"));
}
TEST_F(HloVerifierTest, AllReduce_RepeatedReplicaId) {
TF_ASSERT_OK_AND_ASSIGN(auto module,
MakeAllReduceComputation({{0, 1}, {2, 3}, {4, 0}}));
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("Replica 0 is repeated"));
}
TEST_F(HloVerifierTest, AllReduce_MissingReplicaId) {
TF_ASSERT_OK_AND_ASSIGN(auto module,
MakeAllReduceComputation({{0, 1}, {2, 3}, {5, 6}}));
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("Replica 4 is not named"));
}
TEST_F(HloVerifierTest, AllReduce_NotEnougReplicasInGroupConfig) {
TF_ASSERT_OK_AND_ASSIGN(auto module, MakeAllReduceComputation({{0, 1}}, 8));
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("In kCrossReplica mode, replica groups should contain "
"8 replicas, but found 2"));
}
TEST_F(HloVerifierTest, AllReduce_TooManyReplicasInGroupConfig) {
TF_ASSERT_OK_AND_ASSIGN(auto module,
MakeAllReduceComputation({{0, 1}, {2, 3}}, 2));
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("In kCrossReplica mode, replica groups should contain "
"2 replicas, but found 4"));
}
TEST_F(HloVerifierTest, AllReduce_CrossReplicaAndPartition_Invalid) {
TF_ASSERT_OK_AND_ASSIGN(
auto module,
MakeAllReduceComputation({{0, 1}, {2, 3}}, 2, 1, "channel_id=1"));
EXPECT_THAT(
verifier().Run(module.get()).status().message(),
HasSubstr(
"In kCrossReplicaAndPartition mode, replica groups should contain "
"2 replicas, but found 4"));
}
TEST_F(HloVerifierTest, AllReduce_CrossReplicaAndPartition_Valid) {
TF_ASSERT_OK_AND_ASSIGN(
auto module,
MakeAllReduceComputation({{0, 1}, {2, 3}}, 4, 1, "channel_id=1"));
TF_ASSERT_OK(verifier().Run(module.get()).status());
}
TEST_F(HloVerifierTest, AllReduce_FlattenedID_Invalid) {
TF_ASSERT_OK_AND_ASSIGN(
auto module,
MakeAllReduceComputation({{0, 1}, {2, 3}}, 1, 2,
"channel_id=1, use_global_device_ids=true"));
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("In kFlattenedID mode, replica groups should contain "
"2 flattened IDs, but found 4"));
}
TEST_F(HloVerifierTest, AllReduce_FlattenedID_Valid) {
TF_ASSERT_OK_AND_ASSIGN(
auto module,
MakeAllReduceComputation({{0, 1}, {2, 3}}, 2, 2,
"channel_id=1, use_global_device_ids=true"));
TF_ASSERT_OK(verifier().Run(module.get()).status());
}
TEST_F(HloVerifierTest, AllReduceStartAndDone) {
const char* const kModuleStr = R"(
HloModule test
add {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT add = f32[] add(x, y)
}
ENTRY entry {
p0 = f32[2,3] parameter(0)
start = f32[2,3] all-reduce-start(p0), to_apply=add
ROOT done = f32[2,3] all-reduce-done(start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule(kModuleStr));
auto status = verifier().Run(module.get()).status();
ASSERT_TRUE(status.ok());
}
TEST_F(HloVerifierTest, AllReduceStartAndDoneWrongType) {
const char* const kModuleStr = R"(
HloModule test
add {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT add = f32[] add(x, y)
}
ENTRY entry {
p0 = f32[2,3] parameter(0)
start = (f32[2,3], f32[2,3]) all-reduce-start(p0), to_apply=add
ROOT done = f32[2,3] all-reduce-done(start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule(kModuleStr));
auto status = verifier().Run(module.get()).status();
EXPECT_THAT(status.message(),
HasSubstr("Expected instruction to have shape equal to "
"f32[2,3]"));
}
TEST_F(HloVerifierTest, AllReduceStartAndMultipleDone) {
const char* const kModuleStr = R"(
HloModule test
add {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT add = f32[] add(x, y)
}
ENTRY entry {
p0 = f32[2,3] parameter(0)
start = (f32[2,3], f32[2,3]) all-reduce-start(p0), to_apply=add
done1 = f32[2,3] all-reduce-done(start)
ROOT done2 = f32[2,3] all-reduce-done(start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule(kModuleStr));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(
status.message(),
HasSubstr("all-reduce-start instruction requires one consumer, found 2"));
}
TEST_F(HloVerifierTest, AllReduceDoneWithoutStart) {
const char* const kModuleStr = R"(
HloModule test
ENTRY entry {
p0 = f32[2,3] parameter(0)
p1 = u32[] parameter(1)
tuple = (f32[2,3], f32[2,3], u32[], u32[]) tuple(p0, p0, p1, p1)
ROOT done = f32[2,3] all-reduce-done(tuple)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule(kModuleStr));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("The operand of a all-reduce-done instruction "
"needs to be all-reduce-start, found tuple"));
}
absl::StatusOr<std::unique_ptr<HloModule>> MakeAllToAllComputation(
std::vector<std::vector<int64_t>> replica_groups,
std::optional<int64_t> replica_count = std::nullopt,
std::optional<int64_t> num_partitions = std::nullopt,
absl::string_view other_attributes = "") {
const char* kTemplate = R"(
HloModule test
ENTRY entry {
p0 = f32[128]{0} parameter(0)
p1 = f32[128]{0} parameter(1)
a2a = (f32[128], f32[128]) all-to-all(p0, p1), replica_groups=REPLICA_GROUPS
OTHER_ATTRIBUTES
})";
return MakeCollectiveCommOpComputation(replica_groups, replica_count,
num_partitions, other_attributes,
kTemplate);
}
TEST_F(HloVerifierTest, AllToAll_NoReplicaGroupsOK) {
TF_ASSERT_OK_AND_ASSIGN(auto module, MakeAllToAllComputation({}, 2));
TF_ASSERT_OK(verifier().Run(module.get()).status());
}
TEST_F(HloVerifierTest, AllToAll_EmptyReplicaGroup) {
TF_ASSERT_OK_AND_ASSIGN(auto module, MakeAllToAllComputation({{0, 1}, {}}));
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("cannot have an empty replica group"));
}
TEST_F(HloVerifierTest, AllToAll_RepeatedReplicaId) {
TF_ASSERT_OK_AND_ASSIGN(auto module,
MakeAllToAllComputation({{0, 1}, {2, 3}, {4, 0}}));
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("Replica 0 is repeated"));
}
TEST_F(HloVerifierTest, AllToAll_MissingReplicaId) {
TF_ASSERT_OK_AND_ASSIGN(auto module,
MakeAllToAllComputation({{0, 1}, {2, 3}, {5, 6}}));
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("Replica 4 is not named"));
}
TEST_F(HloVerifierTest, AllToAll_UniformSizeOfReplicasInGroup) {
TF_ASSERT_OK_AND_ASSIGN(auto module,
MakeAllToAllComputation({{0, 1}, {2}, {3, 4}}));
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("Replica groups expected to be of uniform size"));
}
TEST_F(HloVerifierTest, AllToAll_CrossPartition_Invalid) {
TF_ASSERT_OK_AND_ASSIGN(
auto module,
MakeAllToAllComputation({{0, 1}, {2, 3}}, 1, 2, "channel_id=1"));
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("In kCrossPartition mode, replica groups should "
"contain 2 partitions, but found 4"));
}
TEST_F(HloVerifierTest, AllToAll_CrossPartition_Valid) {
TF_ASSERT_OK_AND_ASSIGN(
auto module,
MakeAllToAllComputation({{0, 1}, {2, 3}}, 1, 4, "channel_id=1"));
TF_ASSERT_OK(verifier().Run(module.get()).status());
}
TEST_F(HloVerifierTest, AllToAll_LayoutConstrained) {
const char* const kModuleStr = R"(
HloModule test
ENTRY entry {
p0 = f32[128,4]{0,1} parameter(0)
p1 = f32[128,4]{1,0} parameter(1)
ROOT a2a = (f32[128,4]{0,1}, f32[128,4]{1,0}) all-to-all(p0, p1),
replica_groups={{0,1}}
}
)";
HloModuleConfig config;
config.set_replica_count(2);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr, config));
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("HLO all-to-all has operands with different shapes"));
}
TEST_F(HloVerifierTest, AllToAll_OperandCountMismatchWithReplicaGroupSize) {
const char* const kModuleStr = R"(
HloModule test
ENTRY entry {
p0 = f32[128,4] parameter(0)
p1 = f32[128,4] parameter(1)
ROOT a2a = (f32[128,4], f32[128,4], f32[128,4]) all-to-all(p0, p1, p1),
replica_groups={{0,1}}
}
)";
HloModuleConfig config;
config.set_replica_count(2);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr, config));
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("hlo->operand_count() == split_count"));
}
TEST_F(HloVerifierTest, CollectivePermuteSameSourceTwice) {
const char* const kModuleStr = R"(
HloModule test
ENTRY entry {
p0 = f32[128] parameter(0)
ROOT permute = f32[128] collective-permute(p0),
source_target_pairs={{0,1}, {0,2}, {1,0}}
}
)";
HloModuleConfig config;
config.set_replica_count(3);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr, config));
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("Source 0 appears more than once"));
}
TEST_F(HloVerifierTest, CollectivePermuteSameTargetTwice) {
const char* const kModuleStr = R"(
HloModule test
ENTRY entry {
p0 = f32[128] parameter(0)
ROOT permute = f32[128] collective-permute(p0),
source_target_pairs={{0,2}, {1,2}, {2,0}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr));
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("Target 2 appears more than once"));
}
TEST_F(HloVerifierTest, CollectivePermuteSameSourceTooManyTimes) {
const char* const kModuleStr = R"(
HloModule test
ENTRY entry {
replica_id = u32[] replica-id()
broadcast.0 = u32[2,8,128]{2,1,0:T(2,128)} broadcast(u32[] replica_id), dimensions={}
constant.1 = u32[] constant(1000)
broadcast.1 = u32[2,8,128]{2,1,0:T(2,128)} broadcast(u32[] constant.1), dimensions={}
constant.2 = s32[] constant(0)
constant.3 = s32[] constant(1)
tuple.2 = (s32[],s32[],s32[]) tuple(constant.2, constant.2, constant.2)
tuple.3 = (s32[],s32[],s32[]) tuple(constant.3, constant.2, constant.2)
tuple.4 = ((s32[],s32[],s32[]), (s32[],s32[],s32[])) tuple((s32[],s32[],s32[]) tuple.2, (s32[],s32[],s32[]) tuple.3)
ROOT collective-permute = u32[2,8,128]{2,1,0:T(2,128)} collective-permute(u32[2,8,128] broadcast.0, u32[2,8,128] broadcast.1, ((s32[],s32[],s32[]), (s32[],s32[],s32[])) tuple.4, ((s32[],s32[],s32[]), (s32[],s32[],s32[])) tuple.4), source_target_pairs={{0,1},{1,2},{2,3},{3,0},{0,3},{0,2},{2,1},{1,0}}, slice_sizes={{1,8,128},{1,8,128}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr));
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("Source 0 appears more than 2 times in instruction's "
"source-target pairs:"));
}
TEST_F(HloVerifierTest, CollectivePermuteSameTargetTooManyTimes) {
const char* const kModuleStr = R"(
HloModule test
ENTRY entry {
replica_id = u32[] replica-id()
broadcast.0 = u32[2,8,128]{2,1,0:T(2,128)} broadcast(u32[] replica_id), dimensions={}
constant.1 = u32[] constant(1000)
broadcast.1 = u32[2,8,128]{2,1,0:T(2,128)} broadcast(u32[] constant.1), dimensions={}
constant.2 = s32[] constant(0)
constant.3 = s32[] constant(1)
tuple.2 = (s32[],s32[],s32[]) tuple(constant.2, constant.2, constant.2)
tuple.3 = (s32[],s32[],s32[]) tuple(constant.3, constant.2, constant.2)
tuple.4 = ((s32[],s32[],s32[]), (s32[],s32[],s32[])) tuple((s32[],s32[],s32[]) tuple.2, (s32[],s32[],s32[]) tuple.3)
ROOT collective-permute = u32[2,8,128]{2,1,0:T(2,128)} collective-permute(u32[2,8,128] broadcast.0, u32[2,8,128] broadcast.1, ((s32[],s32[],s32[]), (s32[],s32[],s32[])) tuple.4, ((s32[],s32[],s32[]), (s32[],s32[],s32[])) tuple.4), source_target_pairs={{0,1},{1,2},{2,3},{3,0},{0,3},{3,2},{2,3},{1,0}}, slice_sizes={{1,8,128},{1,8,128}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr));
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("Target 3 appears more than 2 times in instruction's "
"source-target pairs:"));
}
TEST_F(HloVerifierTest, CollectivePermuteUnmatchingSourceTarget) {
const char* const kModuleStr = R"(
HloModule test
ENTRY entry {
replica_id = u32[] replica-id()
broadcast.0 = u32[2,8,128]{2,1,0:T(2,128)} broadcast(u32[] replica_id), dimensions={}
constant.1 = u32[] constant(1000)
broadcast.1 = u32[2,8,128]{2,1,0:T(2,128)} broadcast(u32[] constant.1), dimensions={}
broadcast.2 = u32[4,8,128]{2,1,0:T(2,128)} broadcast(u32[] constant.1), dimensions={}
constant.2 = s32[] constant(0)
constant.3 = s32[] constant(1)
tuple.output = (u32[2,8,128]{2,1,0:T(2,128)}, u32[4,8,128]{2,1,0:T(2,128)}) tuple(u32[2,8,128]{2,1,0:T(2,128)} broadcast.1, u32[4,8,128]{2,1,0:T(2,128)} broadcast.2)
tuple.2 = (s32[],s32[],s32[]) tuple(constant.2, constant.2, constant.2)
tuple.3 = (s32[],s32[],s32[]) tuple(constant.3, constant.2, constant.2)
tuple.4 = ((s32[],s32[],s32[]), (s32[],s32[],s32[])) tuple((s32[],s32[],s32[]) tuple.2, (s32[],s32[],s32[]) tuple.3)
constant.4 = s32[] constant(2)
tuple.5 = (s32[],s32[],s32[]) tuple(constant.4, constant.2, constant.2)
tuple.6 = ((s32[],s32[],s32[]), (s32[],s32[],s32[])) tuple((s32[],s32[],s32[]) tuple.2, (s32[],s32[],s32[]) tuple.5)
tuple.9 = (((s32[],s32[],s32[]), (s32[],s32[],s32[])), ((s32[],s32[],s32[]), (s32[],s32[],s32[]))) tuple(((s32[],s32[],s32[]), (s32[],s32[],s32[])) tuple.4, ((s32[],s32[],s32[]), (s32[],s32[],s32[])) tuple.6)
ROOT collective-permute.53 = (u32[2,8,128]{2,1,0:T(2,128)}, u32[4,8,128]{2,1,0:T(2,128)}) collective-permute(u32[2,8,128]{2,1,0:T(2,128)} broadcast.0, (u32[2,8,128]{2,1,0:T(2,128)}, u32[4,8,128]{2,1,0:T(2,128)}) tuple.output, ((s32[],s32[],s32[]), (s32[],s32[],s32[])) tuple.4, (((s32[],s32[],s32[]), (s32[],s32[],s32[])), ((s32[],s32[],s32[]), (s32[],s32[],s32[]))) tuple.9), source_target_pairs={{0,1},{1,2},{2,3},{3,0},{0,3},{3,2},{2,1},{1,0}}, slice_sizes={{1,8,128},{1,8,128},{2,8,128},{2,8,128}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr));
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("Unmatching input buffers and output buffers"));
}
TEST_F(HloVerifierTest, CollectivePermuteUnmatchingInputAndInputOffset) {
const char* const kModuleStr = R"(
HloModule test
ENTRY entry {
replica_id = u32[] replica-id()
broadcast.0 = u32[2,8,128]{2,1,0:T(2,128)} broadcast(u32[] replica_id), dimensions={}
constant.1 = u32[] constant(1000)
broadcast.1 = u32[2,8,128]{2,1,0:T(2,128)} broadcast(u32[] constant.1), dimensions={}
broadcast.2 = u32[4,8,128]{2,1,0:T(2,128)} broadcast(u32[] constant.1), dimensions={}
constant.2 = s32[] constant(0)
constant.3 = s32[] constant(1)
tuple.input = (u32[2,8,128]{2,1,0:T(2,128)}, u32[2,8,128]{2,1,0:T(2,128)}) tuple(u32[2,8,128]{2,1,0:T(2,128)} broadcast.0, u32[2,8,128]{2,1,0:T(2,128)} broadcast.0)
tuple.output = (u32[2,8,128]{2,1,0:T(2,128)}, u32[4,8,128]{2,1,0:T(2,128)}) tuple(u32[2,8,128]{2,1,0:T(2,128)} broadcast.1, u32[4,8,128]{2,1,0:T(2,128)} broadcast.2)
tuple.2 = (s32[],s32[],s32[]) tuple(constant.2, constant.2, constant.2)
tuple.3 = (s32[],s32[],s32[]) tuple(constant.3, constant.2, constant.2)
tuple.4 = ((s32[],s32[],s32[]), (s32[],s32[],s32[])) tuple((s32[],s32[],s32[]) tuple.2, (s32[],s32[],s32[]) tuple.3)
constant.4 = s32[] constant(2)
tuple.5 = (s32[],s32[],s32[]) tuple(constant.4, constant.2, constant.2)
tuple.6 = ((s32[],s32[],s32[]), (s32[],s32[],s32[])) tuple((s32[],s32[],s32[]) tuple.2, (s32[],s32[],s32[]) tuple.5)
tuple.9 = (((s32[],s32[],s32[]), (s32[],s32[],s32[])), ((s32[],s32[],s32[]), (s32[],s32[],s32[]))) tuple(((s32[],s32[],s32[]), (s32[],s32[],s32[])) tuple.4, ((s32[],s32[],s32[]), (s32[],s32[],s32[])) tuple.6)
ROOT collective-permute.53 = (u32[2,8,128]{2,1,0:T(2,128)}, u32[4,8,128]{2,1,0:T(2,128)}) collective-permute((u32[2,8,128]{2,1,0:T(2,128)}, u32[2,8,128]{2,1,0:T(2,128)}) tuple.input, (u32[2,8,128]{2,1,0:T(2,128)}, u32[4,8,128]{2,1,0:T(2,128)}) tuple.output, (s32[],s32[],s32[]) tuple.3, (((s32[],s32[],s32[]), (s32[],s32[],s32[])), ((s32[],s32[],s32[]), (s32[],s32[],s32[]))) tuple.9), source_target_pairs={{0,1},{1,2},{2,3},{3,0},{0,3},{3,2},{2,1},{1,0}}, slice_sizes={{1,8,128},{1,8,128},{2,8,128},{2,8,128}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr));
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("Unmatching input buffers and input offset."));
}
TEST_F(HloVerifierTest, CollectivePermuteUnmatchingOutputAndOutputOffset) {
const char* const kModuleStr = R"(
HloModule test
ENTRY entry {
replica_id = u32[] replica-id()
broadcast.0 = u32[2,8,128]{2,1,0:T(2,128)} broadcast(u32[] replica_id), dimensions={}
constant.1 = u32[] constant(1000)
broadcast.1 = u32[2,8,128]{2,1,0:T(2,128)} broadcast(u32[] constant.1), dimensions={}
broadcast.2 = u32[4,8,128]{2,1,0:T(2,128)} broadcast(u32[] constant.1), dimensions={}
constant.2 = s32[] constant(0)
constant.3 = s32[] constant(1)
tuple.input = (u32[2,8,128]{2,1,0:T(2,128)}, u32[2,8,128]{2,1,0:T(2,128)}) tuple(u32[2,8,128]{2,1,0:T(2,128)} broadcast.0, u32[2,8,128]{2,1,0:T(2,128)} broadcast.0)
tuple.output = (u32[2,8,128]{2,1,0:T(2,128)}, u32[4,8,128]{2,1,0:T(2,128)}) tuple(u32[2,8,128]{2,1,0:T(2,128)} broadcast.1, u32[4,8,128]{2,1,0:T(2,128)} broadcast.2)
tuple.2 = (s32[],s32[],s32[]) tuple(constant.2, constant.2, constant.2)
tuple.3 = (s32[],s32[],s32[]) tuple(constant.3, constant.2, constant.2)
tuple.4 = ((s32[],s32[],s32[]), (s32[],s32[],s32[])) tuple((s32[],s32[],s32[]) tuple.2, (s32[],s32[],s32[]) tuple.3)
constant.4 = s32[] constant(2)
tuple.5 = (s32[],s32[],s32[]) tuple(constant.4, constant.2, constant.2)
tuple.7 = ((s32[],s32[],s32[]), (s32[],s32[],s32[])) tuple((s32[],s32[],s32[]) tuple.2, (s32[],s32[],s32[]) tuple.2)
tuple.8 = (((s32[],s32[],s32[]), (s32[],s32[],s32[])), ((s32[],s32[],s32[]), (s32[],s32[],s32[]))) tuple(((s32[],s32[],s32[]), (s32[],s32[],s32[])) tuple.4, ((s32[],s32[],s32[]), (s32[],s32[],s32[])) tuple.7)
ROOT collective-permute.53 = (u32[2,8,128]{2,1,0:T(2,128)}, u32[4,8,128]{2,1,0:T(2,128)}) collective-permute((u32[2,8,128]{2,1,0:T(2,128)}, u32[2,8,128]{2,1,0:T(2,128)}) tuple.input, (u32[2,8,128]{2,1,0:T(2,128)}, u32[4,8,128]{2,1,0:T(2,128)}) tuple.output, (((s32[],s32[],s32[]), (s32[],s32[],s32[])), ((s32[],s32[],s32[]), (s32[],s32[],s32[]))) tuple.8, (s32[],s32[],s32[]) tuple.2), source_target_pairs={{0,1},{1,2},{2,3},{3,0},{0,3},{3,2},{2,1},{1,0}}, slice_sizes={{1,8,128},{1,8,128},{2,8,128},{2,8,128}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr));
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("Unmatching output buffers and output offset."));
}
TEST_F(HloVerifierTest, CollectivePermuteCrossReplicaSourceOOR) {
const char* const kModuleStr = R"(
HloModule test
ENTRY entry {
p0 = f32[128] parameter(0)
ROOT permute = f32[128] collective-permute(p0),
source_target_pairs={{5,2}, {1,2}, {2,0}}
}
)";
HloModuleConfig config;
config.set_replica_count(3);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr, config));
const std::string error_message(
verifier().Run(module.get()).status().message());
EXPECT_THAT(error_message, HasSubstr("Source 5"));
EXPECT_THAT(error_message, HasSubstr("must be < 3"));
}
TEST_F(HloVerifierTest, CollectivePermuteCrossReplicaTargetOOR) {
const char* const kModuleStr = R"(
HloModule test
ENTRY entry {
p0 = f32[128] parameter(0)
ROOT permute = f32[128] collective-permute(p0),
source_target_pairs={{0,1}, {1,2}, {2,7}}
}
)";
HloModuleConfig config;
config.set_replica_count(3);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr, config));
const std::string error_message(
verifier().Run(module.get()).status().message());
EXPECT_THAT(error_message, HasSubstr("Target 7"));
EXPECT_THAT(error_message, HasSubstr("must be < 3"));
}
TEST_F(HloVerifierTest, CollectivePermuteCrossPartitionSourceOOR) {
const char* const kModuleStr = R"(
HloModule test
ENTRY entry {
p0 = f32[128] parameter(0)
ROOT permute = f32[128] collective-permute(p0),
source_target_pairs={{5,2}, {1,2}, {2,0}}, channel_id=1
}
)";
HloModuleConfig config;
config.set_num_partitions(3);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr, config));
const std::string error_message(
verifier().Run(module.get()).status().message());
EXPECT_THAT(error_message, HasSubstr("Source 5"));
EXPECT_THAT(error_message, HasSubstr("must be < 3"));
}
TEST_F(HloVerifierTest, CollectivePermuteCrossPartitionTargetOOR) {
const char* const kModuleStr = R"(
HloModule test
ENTRY entry {
p0 = f32[128] parameter(0)
ROOT permute = f32[128] collective-permute(p0),
source_target_pairs={{0,2}, {1,7}, {2,0}}, channel_id=1
}
)";
HloModuleConfig config;
config.set_num_partitions(3);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr, config));
const std::string error_message(
verifier().Run(module.get()).status().message());
EXPECT_THAT(error_message, HasSubstr("Target 7"));
EXPECT_THAT(error_message, HasSubstr("must be < 3"));
}
TEST_F(HloVerifierTest, FusionMoreOperandsThanParameters) {
const char* const kModuleStr = R"(
HloModule test
fused_computation {
ROOT p0 = f32[10] parameter(0)
}
ENTRY entry {
p0 = f32[10] parameter(0)
p1 = f32[10] parameter(1)
ROOT out = f32[10] fusion(p0, p1), kind=kInput, calls=fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr));
auto status = verifier().Run(module.get()).status();
ASSERT_TRUE(status.ok());
}
TEST_F(HloVerifierTest, FusionLessOperandsThanParameters) {
const char* const kModuleStr = R"(
HloModule test
fused_computation {
p0 = f32[10] parameter(0)
p1 = f32[10] parameter(1)
ROOT out = f32[10] add(p0, p1)
}
ENTRY entry {
p0 = f32[10] parameter(0)
ROOT out = f32[10] fusion(p0), kind=kInput, calls=fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr));
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("greater than the number of operands"));
}
TEST_F(HloVerifierTest, FusionShapeVerifier) {
const char* const kModuleStr = R"(
HloModule test
fused_computation {
ROOT p0 = f32[10,10] parameter(0)
}
ENTRY entry {
p0 = f32[10,10] parameter(0)
ROOT out = f32[10] fusion(p0), kind=kInput, calls=fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr));
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("Fused computation shape"));
}
TEST_F(HloVerifierTest, FusionThreadVerifier) {
const char* const kModuleStr = R"(
HloModule test
fused_computation {
ROOT p0 = f32[8,12] parameter(0)
}, execution_thread="parallel_thread"
ENTRY entry {
p0 = f32[8,12] parameter(0)
ROOT out = f32[8,12] fusion(p0), kind=kInput, calls=fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr));
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("expects parent computation thread name same as called "
"computation's thread name"));
}
TEST_F(HloVerifierTest, FusionNestedComputationThreadVerifier) {
const char* const kModuleStr = R"(
HloModule test
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}, execution_thread="parallel_thread"
fused_computation {
p0 = f32[8,12] parameter(0)
p1 = f32[8,12] parameter(1)
crs0 = f32[8,12] all-reduce(p1), replica_groups={}, to_apply=add
ROOT result = add(p0, crs0)
}
ENTRY entry {
p0 = f32[8,12] parameter(0)
p1 = f32[8,12] parameter(1)
ROOT out = f32[8,12] fusion(p0, p1), kind=kInput, calls=fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr));
EXPECT_THAT(
verifier().Run(module.get()).status().message(),
HasSubstr("crs0 top_apply computation execution thread does not match "
"(parallel_thread vs main)"));
}
TEST_F(HloVerifierTest, AllReduceVerifier) {
const char* const kModuleStr = R"(
HloModule test
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY entry {
input = f32[8,12]{0,1} parameter(0)
crs0 = f32[8,12]{0,1} all-reduce(input), replica_groups={}, to_apply=add
crs1 = f32[8,12]{0,1} all-reduce(input), replica_groups={}, to_apply=add,
constrain_layout=true
ROOT result = (f32[8,12]{0,1}, f32[8,12]{0,1}) tuple(crs0, crs1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr));
EXPECT_THAT(
verifier().Run(module.get()).status().message(),
HasSubstr("mix of layout constrained and unconstrained AllReduce"));
}
TEST_F(HloVerifierTest, ChannelVerifier) {
const char* const kModuleStr = R"(
HloModule test
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY entry {
%input = f32[8,12] parameter(0)
%token0 = token[] after-all()
%send = (f32[8,12], u32[], token[]) send(%input, %token0), channel_id=1
%send-done = token[] send-done(%send), channel_id=1
%crs = f32[8,12] all-reduce(%input), replica_groups={}, to_apply=add,
channel_id=1
ROOT result = (f32[8,12]{0,1}, f32[8,12]{0,1}) tuple(%input, %crs)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr));
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("used for different types of channel instructions"));
}
TEST_F(HloVerifierTest, ChannelVerifierPartiallyPipelinedAsyncRecv) {
const char* const kModuleStr = R"(
HloModule test
while_body {
param = ((f32[16], u32[], token[])) parameter(0)
prev_recv = (f32[16], u32[], token[]) get-tuple-element(param), index=0
recv_done = (f32[16], token[]) recv-done(prev_recv), channel_id=1
after_all = token[] after-all()
recv = (f32[16], u32[], token[]) recv(after_all), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs={{0,1},{1,2},{2,3}}}
ROOT tuple = ((f32[16], u32[], token[])) tuple(recv)
}
while_condition {
param = ((f32[16], u32[], token[])) parameter(0)
ROOT infinite_loop = pred[] constant(true)
}
ENTRY main_spmd {
after_all = token[] after-all()
recv = (f32[16], u32[], token[]) recv(after_all), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs={{0,1},{1,2},{2,3}}}
init = ((f32[16], u32[], token[])) tuple(recv)
while = ((f32[16], u32[], token[])) while(init),
condition=while_condition, body=while_body
recv_ctx = (f32[16], u32[], token[]) get-tuple-element(while), index=0
recv_done = (f32[16], token[]) recv-done(recv_ctx), channel_id=1
ROOT result = f32[16] get-tuple-element(recv_done), index=0
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr));
TF_ASSERT_OK(verifier().Run(module.get()));
}
TEST_F(HloVerifierTest, ChannelVerifierPartiallyPipelinedAsyncSend) {
const char* const kModuleStr = R"(
HloModule test
while_body {
param = ((f32[16], u32[], token[]), f32[16]) parameter(0)
prev_send = (f32[16], u32[], token[]) get-tuple-element(param), index=0
data = f32[16] get-tuple-element(param), index=1
send_done = (f32[16], token[]) send-done(prev_send), channel_id=1
after_all = token[] after-all()
send = (f32[16], u32[], token[]) send(data, after_all), channel_id=1,
frontend_attributes={
_xla_send_send_source_target_pairs={{0,1},{1,2},{2,3}}}
ROOT tuple = ((f32[16], u32[], token[]), f32[16]) tuple(send, data)
}
while_condition {
param = ((f32[16], u32[], token[]), f32[16]) parameter(0)
ROOT infinite_loop = pred[] constant(true)
}
ENTRY main_spmd {
data = f32[16] parameter(0)
after_all = token[] after-all()
send = (f32[16], u32[], token[]) send(data, after_all), channel_id=1,
frontend_attributes={
_xla_send_send_source_target_pairs={{0,1},{1,2},{2,3}}}
init = ((f32[16], u32[], token[]), f32[16]) tuple(send, data)
while = ((f32[16], u32[], token[]), f32[16]) while(init),
condition=while_condition, body=while_body
send_ctx = (f32[16], u32[], token[]) get-tuple-element(while), index=0
ROOT send_done = (f32[16], token[]) send-done(send_ctx), channel_id=1
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr));
TF_ASSERT_OK(verifier().Run(module.get()));
}
TEST_F(HloVerifierTest, ChannelVerifierAsyncSend) {
const char* const kModuleStr = R"(
HloModule test
ENTRY main_spmd {
data = f32[16] parameter(0)
after_all = token[] after-all()
send = (f32[16], u32[], token[]) send(after_all, data), channel_id=1,
frontend_attributes={
_xla_send_send_source_target_pairs={{0,1},{1,2},{2,3}}}
ROOT send_done = (f32[16], token[]) send-done(send), channel_id=1
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr));
TF_ASSERT_OK(verifier().Run(module.get()));
}
TEST_F(HloVerifierTest, ChannelVerifierAsyncRecv) {
const char* const kModuleStr = R"(
HloModule test
ENTRY main_spmd {
after_all = token[] after-all()
recv = (f32[16], u32[], token[]) recv(after_all), channel_id=1,
frontend_attributes={
_xla_send_send_source_target_pairs={{0,1},{1,2},{2,3}}}
recv_done = (f32[16], token[]) recv-done(recv), channel_id=1
ROOT result = f32[16] get-tuple-element(recv_done), index=0
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr));
TF_ASSERT_OK(verifier().Run(module.get()));
}
TEST_F(HloVerifierTest, ChannelVerifierMultipleSendUsers) {
const char* const kModuleStr = R"(
HloModule test
ENTRY main_spmd {
data = f32[16] parameter(0)
after_all = token[] after-all()
send = (f32[16], u32[], token[]) send(data, after_all), channel_id=1,
frontend_attributes={
_xla_send_send_source_target_pairs={{0,1},{1,2},{2,3}}}
send_done = (f32[16], token[]) send-done(send), channel_id=1
ROOT result = ((f32[16], u32[], token[]), f32[16]) tuple(send, send_done)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr));
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("send instruction requires one consumer, found 2"));
}
TEST_F(HloVerifierTest, ChannelVerifierMultipleRecvUsers) {
const char* const kModuleStr = R"(
HloModule test
ENTRY main_spmd {
after_all = token[] after-all()
recv = (f32[16], u32[], token[]) recv(after_all), channel_id=1,
frontend_attributes={
_xla_send_send_source_target_pairs={{0,1},{1,2},{2,3}}}
recv_done = (f32[16], token[]) recv-done(recv), channel_id=1
ROOT result = (((f32[16], u32[], token[])), f32[16])
tuple(recv, recv_done)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr));
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("recv instruction requires one consumer, found 2"));
}
TEST_F(HloVerifierTest, CollectiveChannelVerifier) {
const char* const kModuleStr = R"(
HloModule test
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY entry {
%input = f32[8,12] parameter(0)
%permute = f32[8,12] collective-permute(%input),
source_target_pairs={{0,1},{1,0}}, channel_id=1
%crs = f32[8,12] all-reduce(%input), replica_groups={}, to_apply=add,
channel_id=1
ROOT result = (f32[8,12]{0,1}, f32[8,12]{0,1}) tuple(%permute, %crs)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr));
EXPECT_THAT(verifier().Run(module.get()).status().message(),
HasSubstr("used for different types of channel instructions"));
}
TEST_F(HloVerifierTestLayoutSensitive, CollectivePermuteStartAndDone) {
const char* const kModuleStr = R"(
HloModule Module
ENTRY CollectivePermuteStartAndDone {
p0 = f32[2,3]{1,0:S(1)} parameter(0)
collective-permute-start.1 = (f32[2,3]{1,0:S(1)}, f32[2,3]{1,0:S(1)}, u32[], u32[]) collective-permute-start(p0), source_target_pairs={{0,1},{1,0}}, channel_id=1
ROOT collective-permute-done.1 = f32[2,3]{1,0:S(1)} collective-permute-done(collective-permute-start.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr));
auto status = verifier().Run(module.get()).status();
ASSERT_TRUE(status.ok());
}
TEST_F(HloVerifierTest, CollectivePermuteStartAndDoneWrongType) {
const char* const kModuleStr = R"(
HloModule Module
ENTRY CollectivePermuteStartAndDoneWrongType {
p0 = f32[2,3]{1,0:S(1)} parameter(0)
collective-permute-start.1 = f32[2,3]{1,0:S(1)} collective-permute-start(p0), source_target_pairs={{0,1},{1,0}}, channel_id=1
ROOT collective-permute-done.1 = f32[2,3]{1,0:S(1)} collective-permute-done(collective-permute-start.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Expected instruction to have shape equal to "
"(f32[2,3], f32[2,3])"));
}
TEST_F(HloVerifierTest, CollectivePermuteStartAndMultipleDone) {
const char* const kModuleStr = R"(
HloModule Module
ENTRY CollectivePermuteStartAndMultipleDone {
p0 = f32[2,3]{1,0:S(1)} parameter(0)
collective-permute-start.1 = (f32[2,3]{1,0:S(1)}, f32[2,3]{1,0:S(1)}, u32[], u32[]) collective-permute-start(p0), source_target_pairs={{0,1},{1,0}}, channel_id=1
collective-permute-done.1 = f32[2,3]{1,0:S(1)} collective-permute-done(collective-permute-start.1)
ROOT collective-permute-done.2 = f32[2,3]{1,0:S(1)} collective-permute-done(collective-permute-start.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(
status.message(),
HasSubstr("collective-permute-start instruction requires one consumer, "
"found 2"));
}
TEST_F(HloVerifierTest, CollectivePermuteDoneNoCollectivePermuteStart) {
const char* const kModuleStr = R"(
HloModule Module
ENTRY CollectivePermuteDoneNoCollectivePermuteStart {
p0 = f32[2,3]{1,0:S(1)} parameter(0)
p1 = f32[2,3]{1,0:S(1)} parameter(1)
p2 = u32[] parameter(2)
p3 = u32[] parameter(3)
tuple.1 = (f32[2,3], f32[2,3], u32[], u32[]) tuple(p0, p1, p2, p3)
ROOT collective-permute-done.1 = f32[2,3]{1,0:S(1)} collective-permute-done(tuple.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("The operand of a collective-permute-done instruction "
"needs to be collective-permute-start, found tuple"));
}
TEST_F(HloVerifierTest, ComparisonTypeFloat) {
const char* const hlo_string = R"(
HloModule Module
ENTRY RngOperandElementTypesNotMatch {
p0 = f32[] parameter(0)
ROOT cmp = pred[] compare(f32[] p0, f32[] p0), direction=LT, type=UNSIGNED
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Expected comparison type FLOAT or TOTALORDER"));
}
TEST_F(HloVerifierTest, ComparisonTypeSigned) {
const char* const hlo_string = R"(
HloModule Module
ENTRY RngOperandElementTypesNotMatch {
p0 = s32[] parameter(0)
ROOT cmp = pred[] compare(s32[] p0, s32[] p0), direction=LT, type=UNSIGNED
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("Expected comparison type SIGNED"));
}
TEST_F(HloVerifierTest, ComparisonTypeUnsigned) {
const char* const hlo_string = R"(
HloModule Module
ENTRY RngOperandElementTypesNotMatch {
p0 = u32[] parameter(0)
ROOT cmp = pred[] compare(u32[] p0, u32[] p0), direction=LT, type=SIGNED
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("Expected comparison type UNSIGNED"));
}
TEST_F(HloVerifierTest, ComparisonTypePred) {
const char* const hlo_string = R"(
HloModule Module
ENTRY RngOperandElementTypesNotMatch {
p0 = pred[] parameter(0)
ROOT cmp = pred[] compare(pred[] p0, pred[] p0), direction=LT, type=SIGNED
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("Expected comparison type UNSIGNED"));
}
TEST_F(HloVerifierTest, UseGlobalDeviceIdsEmptyReplicaGroup) {
const char* const hlo_string = R"(
HloModule Module
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY CRS {
input = f32[8]{0} parameter(0)
ROOT crs = f32[8]{0} all-reduce(input), replica_groups={}, channel_id=1,
use_global_device_ids=true, to_apply=add
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(
status.message(),
HasSubstr("Replica groups must be specified in flattened-id mode"));
}
TEST_F(HloVerifierTest, InvalidChannelIDandUseGlobalDeviceIDs) {
const char* const hlo_string = R"(
HloModule Module
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY CRS {
input = f32[8]{0} parameter(0)
ROOT crs = f32[8]{0} all-reduce(input), replica_groups={},
use_global_device_ids=true, to_apply=add
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(
status.message(),
HasSubstr(
"Invalid combination of has_channel_id and use_global_device_ids"));
}
TEST_F(HloVerifierTest, ReduceScatterInvalidOutputSize0) {
const char* const hlo_string = R"(
HloModule Module
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY CRS {
input = f32[8]{0} parameter(0)
ROOT crs = f32[8]{0} reduce-scatter(input), replica_groups={{0,1}},
to_apply=add, dimensions={0}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("shard_count = 1, subgroup_size = 2"));
}
TEST_F(HloVerifierTest, ReduceScatterInvalidScatterDim) {
const char* const hlo_string = R"(
HloModule Module
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY CRS {
input = f32[8]{0} parameter(0)
ROOT crs = f32[4]{0} reduce-scatter(input), replica_groups={{0,1}},
to_apply=add, dimensions={1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(
status.message(),
HasSubstr("ars->scatter_dimension() < ars->operand(i)->shape().rank()"));
}
TEST_F(HloVerifierTest, ReduceScatterNonUniformGroups) {
const char* const hlo_string = R"(
HloModule Module
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY CRS {
input = f32[8]{0} parameter(0)
ROOT crs = f32[4]{0} reduce-scatter(input), replica_groups={{0,1}, {2,3,4}},
to_apply=add, dimensions={0}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Replica groups expected to be of uniform size"));
}
TEST_F(HloVerifierTest, ScatterInvalidScatterDim) {
const char* const hlo_string = R"(
HloModule Module
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY CRS {
Arg_0 = s8[11,6]{1,0} parameter(0)
constant = s32[] constant(1)
broadcast = s32[1,7,9,2,16,2]{5,4,3,2,1,0} broadcast(constant), dimensions={}
Arg_1 = s8[1,7,9,2,9,4,16]{6,5,4,3,2,1,0} parameter(1)
scatter = s8[11,6]{1,0} scatter(Arg_0, broadcast, Arg_1), update_window_dims={4,5}, inserted_window_dims={}, scatter_dims_to_operand_dims={1094795585,1}, index_vector_dim=5, to_apply=add
abs = s8[11,6]{1,0} abs(scatter)
ROOT tuple = (s8[11,6]{1,0}) tuple(abs)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Invalid scatter_dims_to_operand_dims mapping"));
}
TEST_F(HloVerifierTest, VerifyBroadcastDimensionsOrder) {
const char* const hlo = R"(
HloModule module
ENTRY computation {
mul = f32[32,32,32]{2,1,0} parameter(0)
ROOT broadcast = f32[32,32,32,32]{3,2,1,0} broadcast(mul), dimensions={3,2,1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo));
auto status = HloVerifier{HloVerifierOpts{}.VerifyBroadcastDimensionsOrder()}
.Run(module.get())
.status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Broadcast dimensions should be ordered"));
}
TEST_F(HloVerifierTest, VerifyBroadcastDimensionsOrderOK) {
const char* const hlo = R"(
HloModule module
ENTRY computation {
mul = f32[4,5] parameter(0)
ROOT broadcast = f32[4,3,2,5] broadcast(mul), dimensions={0,3}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo));
TF_ASSERT_OK(HloVerifier{HloVerifierOpts{}.VerifyBroadcastDimensionsOrder()}
.Run(module.get())
.status());
}
TEST_F(HloVerifierTest, VerifyInstructionNameChanged) {
const char* const hlo = R"(
HloModule module
ENTRY computation {
p0 = f32[32] parameter(0), metadata={scheduling_name="p0"}
p1 = f32[32] parameter(1), metadata={scheduling_name="p1"}
ROOT add0 = f32[32] add(p0,p1), metadata={scheduling_name="add_changed"}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo));
auto status = HloVerifier{HloVerifierOpts{}.VerifyInstructionNameUnchanged()}
.Run(module.get())
.status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Expected instruction name to remain the same."));
}
TEST_F(HloVerifierTest, VerifyInstructionNameUnchanged) {
const char* const hlo = R"(
HloModule module
ENTRY computation {
p0 = f32[32] parameter(0), metadata={scheduling_name="p0"}
p1 = f32[32] parameter(1), metadata={scheduling_name="p1"}
ROOT add0 = f32[32] add(p0,p1), metadata={scheduling_name="add0"}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo));
TF_ASSERT_OK(HloVerifier{HloVerifierOpts{}.VerifyInstructionNameUnchanged()}
.Run(module.get())
.status());
}
TEST_F(HloVerifierTest, VerifyInstructionNameSchedulingNameNotPresent) {
const char* const hlo = R"(
HloModule module
ENTRY computation {
p0 = f32[32] parameter(0)
p1 = f32[32] parameter(1)
ROOT add0 = f32[32] add(p0,p1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo));
TF_ASSERT_OK(HloVerifier{HloVerifierOpts{}.VerifyInstructionNameUnchanged()}
.Run(module.get())
.status());
}
TEST_F(HloVerifierTest, VerifyInstructionNameChangedOkWithRematAndClones) {
const char* const hlo = R"(
HloModule module
ENTRY computation {
p0 = f32[32] parameter(0), metadata={scheduling_name="p0"}
p1 = f32[32] parameter(1), metadata={scheduling_name="p1"}
add0.remat = f32[32] add(p0,p1), metadata={scheduling_name="add0"}
ROOT add1.clone = f32[32] add(add0.remat, p0), metadata={scheduling_name="add1"}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo));
auto status = HloVerifier{HloVerifierOpts{}.VerifyInstructionNameUnchanged()}
.Run(module.get())
.status();
TF_ASSERT_OK(HloVerifier{HloVerifierOpts{}.VerifyInstructionNameUnchanged()}
.Run(module.get())
.status());
}
TEST_F(HloVerifierTest, ReshapeIsNotBitcast) {
const char* const hlo = R"(
HloModule Module
ENTRY main {
p = f32[8,3]{1,0} parameter(0)
ROOT r = f32[4,2,3]{0,1,2} reshape(p)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo));
auto status =
HloVerifier{
HloVerifierOpts{}.MakeLayoutSensitive().VerifyReshapeIsBitcast()}
.Run(module.get())
.status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Reshape should be a physical bitcast"));
}
TEST_F(HloVerifierTest, ReshapeIsBitcast) {
const char* const hlo = R"(
HloModule Module
ENTRY main {
p = f32[8]{0} parameter(0)
ROOT r = f32[4,2]{1,0} reshape(p)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo));
TF_ASSERT_OK(HloVerifier{
HloVerifierOpts{}.MakeLayoutSensitive().VerifyReshapeIsBitcast()}
.Run(module.get())
.status());
}
TEST_F(HloVerifierTest, VerifyCustomCallThread) {
const char* const hlo = R"(
HloModule module
%call_body (prev.2: s32[]) -> pred[] {
%constant.1 = s32[] constant(5)
%prev.2 = s32[] parameter(0)
ROOT %greater-than = pred[] compare(s32[] %constant.1, s32[] %prev.2), direction=GT
}, execution_thread="parallel_thread"
ENTRY %WhileWithScalarS32Result.v2 () -> s32[] {
%constant.2 = s32[] constant(0)
ROOT %custom = s32[] custom-call(s32[] %constant.2), custom_call_target="MyCustomCall", to_apply=%call_body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo));
auto status =
HloVerifier{
HloVerifierOpts{}.VerifyCustomCallNestedComputationThreadName()}
.Run(module.get())
.status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("custom top_apply computation execution thread does "
"not match (parallel_thread vs main)"));
}
TEST_F(HloVerifierTest, CheckWhileThread) {
const char* const hlo_string = R"(
HloModule While, entry_computation_layout={()->s32[]}
%body.v3 (prev.1: s32[]) -> s32[] {
%constant = s32[] constant(1)
%prev.1 = s32[] parameter(0)
ROOT %add = s32[] add(s32[] %constant, s32[] %prev.1)
}
%condition.v3 (prev.2: s32[]) -> pred[] {
%constant.1 = s32[] constant(5)
%prev.2 = s32[] parameter(0)
ROOT %greater-than = pred[] compare(s32[] %constant.1, s32[] %prev.2), direction=GT
}, execution_thread="parallel_thread"
ENTRY %WhileWithScalarS32Result.v2 () -> s32[] {
%constant.2 = s32[] constant(0)
ROOT %while = s32[] while(s32[] %constant.2), condition=%condition.v3, body=%body.v3
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("expects parent computation thread name same as called "
"computation's thread name"));
}
TEST_F(HloVerifierTest, CheckWhileContainsAsyncThread) {
const char* const hlo_string = R"(
HloModule While, entry_computation_layout={()->s32[]}
%async_add (prev.1: s32[]) -> s32[] {
%constant = s32[] constant(1)
%prev.1 = s32[] parameter(0)
ROOT %add = s32[] add(s32[] %constant, s32[] %prev.1)
}, execution_thread="parallel_thread"
%body.v3 (prev.1: s32[]) -> s32[] {
%constant = s32[] constant(1)
%prev.1 = s32[] parameter(0)
ROOT %add = s32[] add(s32[] %constant, s32[] %prev.1)
}
%condition.v3 (prev.2: s32[]) -> pred[] {
%constant.1 = s32[] constant(5)
%prev.2 = s32[] parameter(0)
%async-start = ((s32[]), s32[], s32[]) custom-call-start(s32[] %prev.2), async_execution_thread="parallel_thread", custom_call_target="async_add"
%async-done = s32[] custom-call-done(((s32[]), s32[], s32[]) %async-start)
ROOT %greater-than = pred[] compare(s32[] %constant.1, s32[] %async-done), direction=GT
}
ENTRY %WhileWithScalarS32Result.v2 () -> s32[] {
%constant.2 = s32[] constant(0)
ROOT %while = s32[] while(s32[] %constant.2), condition=%condition.v3, body=%body.v3
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_TRUE(status.ok());
}
TEST_F(HloVerifierTestLayoutFusion, DynamicUpdateSliceWithMemorySpace) {
const char* const hlo_string = R"(
HloModule fusion, is_scheduled=true
fused_computation {
%parameter.0 = bf16[1,8,1,8,320]{4,0,3,2,1:T(2,128)(2,1)S(3)} parameter(0)
%parameter.1 = bf16[1,8,6,8,320]{4,0,3,2,1:T(2,128)(2,1)S(3)} parameter(1)
%c = bf16[1,8,6,8,320]{4,0,3,2,1:T(2,128)(2,1)} copy(parameter.1)
%constant.1 = s32[] constant(0)
ROOT %dynamic-update-slice.1 = bf16[1,8,6,8,320]{4,0,3,2,1:T(2,128)(2,1)S(3)}
dynamic-update-slice(%c, %parameter.0, %constant.1, %constant.1,
%constant.1, %constant.1, %constant.1)
}
ENTRY entry (parameter.0: bf16[1,8,1,8,320], parameter.1: bf16[1,8,6,8,320]) -> bf16[1,8,6,8,320]{
%p0 = bf16[1,8,1,8,320]{4,0,3,2,1:T(2,128)(2,1)S(3)} parameter(0)
%p1 = bf16[1,8,6,8,320]{4,0,3,2,1:T(2,128)(2,1)S(3)} parameter(1)
ROOT out = bf16[1,8,6,8,320]{4,0,3,2,1:T(2,128)(2,1)S(3)} fusion(p0, p1), kind=kLoop, calls=fused_computation
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_TRUE(status.ok());
}
TEST_F(HloVerifierTest, InvalidShardingRank) {
const char* const hlo = R"(
HloModule Module
ENTRY main {
p = f32[4,2] parameter(0), sharding={devices=[1,2,2,1]0,1,2,3}
ROOT r = f32[4,2] copy(p)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("tile assignment dimensions (excluding subgroups) is "
"different than the input rank."));
}
TEST_F(HloVerifierTest, InvalidShardingDevices) {
const char* const hlo = R"(
HloModule Module
ENTRY main {
p = f32[4,2] parameter(0), sharding={devices=[2,2]0,1,2,3}
ROOT r = f32[4,2] copy(p)
}
)";
HloModuleConfig config;
config.set_num_partitions(2);
config.set_use_spmd_partitioning(true);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo, config));
ASSERT_TRUE(module->config().use_spmd_partitioning());
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("device 2 > num_devices (2) in tile assignment"));
}
TEST_F(HloVerifierTest, InconsistentWhileSharding) {
const char* const hlo = R"(
HloModule While
%body.v3 (prev.1: s32[]) -> s32[] {
%prev.1 = s32[] parameter(0), sharding={replicated}
%constant = s32[] constant(1)
ROOT %add = s32[] add(s32[] %constant, s32[] %prev.1)
}
%condition.v3 (prev.2: s32[]) -> pred[] {
%prev.2 = s32[] parameter(0), sharding={maximal device=0}
%constant.1 = s32[] constant(5)
ROOT %greater-than = pred[] compare(s32[] %constant.1, s32[] %prev.2), direction=GT
}
ENTRY %WhileWithScalarS32Result.v2 () -> s32[] {
%constant.2 = s32[] constant(0)
ROOT %while = s32[] while(s32[] %constant.2), condition=%condition.v3, body=%body.v3
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Inconsistent while sharding among instructions"));
}
TEST_F(HloVerifierTest, InconsistentConditionSharding) {
const char* const hlo = R"(
HloModule Module
true_branch {
tparam = (s32[], f32[4]) parameter(0)
ROOT tgte1 = f32[4] get-tuple-element(tparam), index=1
}
false_branch {
fparam = (s32[], f32[4]) parameter(0)
ROOT fgte1 = f32[4] get-tuple-element(fparam), index=1, sharding={replicated}
}
ENTRY entry {
p0 = (s32[], f32[4]) parameter(0)
constant = pred[] constant(true)
ROOT conditional = f32[4] conditional(constant, p0, p0),
true_computation=true_branch, false_computation=false_branch,
sharding={maximal device=0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(
status.message(),
HasSubstr("Inconsistent conditional sharding among instructions"));
}
TEST_F(HloVerifierTest, DisableS4Veridication) {
const char* const hlo = R"(
HloModule Module
ENTRY entry {
param0 = s32[] parameter(0)
x = s4[] convert(param0)
ROOT add = s4[] add(x, x)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo));
HloVerifier verifier{HloVerifierOpts{}.WithVerifyS4U4Usage(false)};
auto status = verifier.Run(module.get()).status();
ASSERT_TRUE(status.ok());
}
TEST(MetadataTrackerTest, MetadataTrackerLogsInfo) {
if (tsl::kIsOpenSource) {
return;
}
constexpr absl::string_view hlo = R"(
HloModule Module
ENTRY entry {
p0 = s32[] parameter(0)
p1 = s32[] parameter(1)
ROOT sum = s32[] add(p0, p1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo));
::absl::ScopedMockLog log(::absl::MockLogDefault::kIgnoreUnexpected);
EXPECT_CALL(
log,
Log(absl::LogSeverity::kInfo, ::testing::EndsWith("/hlo_verifier.cc"),
::testing::StartsWith("TEST PREFIX creation_pass_coverage=0")))
.Times(1);
log.StartCapturingLogs();
{
MetadataTracker tracker("TEST PREFIX");
for (const auto* c : module->computations()) {
TF_ASSERT_OK(c->Accept(&tracker));
}
}
}
TEST_F(HloVerifierTest, TopKOK) {
const char* const hlo = R"(
HloModule topk, entry_computation_layout={(f32[10,10]{0,1})->(f32[10,2]{0,1}, s32[10,2]{0,1})}
ENTRY TopK {
x = f32[10,10]{0,1} parameter(0)
ROOT topk = (f32[10,2]{0,1}, s32[10,2]{0,1}) topk(x), k=2, largest=true
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo));
auto status = verifier().Run(module.get()).status();
ASSERT_TRUE(status.ok());
}
TEST_F(HloVerifierTest, InputLayoutMismatchIgnored) {
constexpr absl::string_view kHlo = R"(
HloModule module, entry_computation_layout={(f32[10,10]{1,0},f32[10,10]{1,0})->f32[10,10]{1,0}}
ENTRY entry {
x = f32[10,10]{1,0} parameter(0)
y = f32[10,10]{0,1} parameter(1)
ROOT z = f32[10,10]{1,0} dot(x, y),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule(kHlo));
absl::Status status = verifier().Run(module.get()).status();
TF_ASSERT_OK(status);
}
TEST_F(HloVerifierTestLayoutSensitive, InputLayoutMismatchReported) {
constexpr absl::string_view kHlo = R"(
HloModule module, entry_computation_layout={(f32[10,10]{1,0},f32[10,10]{1,0})->f32[10,10]{1,0}}
ENTRY entry {
x = f32[10,10]{1,0} parameter(0)
y = f32[10,10]{0,1} parameter(1)
ROOT z = f32[10,10]{1,0} dot(x, y),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule(kHlo));
absl::Status status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("should be compatible"));
}
TEST_F(HloVerifierTest, OutputLayoutMismatchIgnored) {
constexpr absl::string_view kHlo = R"(
HloModule module, entry_computation_layout={(f32[10,10]{1,0},f32[10,10]{1,0})->f32[10,10]{1,0}}
ENTRY entry {
x = f32[10,10]{1,0} parameter(0)
y = f32[10,10]{1,0} parameter(1)
ROOT z = f32[10,10]{0,1} dot(x, y),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule(kHlo));
absl::Status status = verifier().Run(module.get()).status();
TF_ASSERT_OK(status);
}
TEST_F(HloVerifierTestLayoutSensitive, OutputLayoutMismatchReported) {
constexpr absl::string_view kHlo = R"(
HloModule module, entry_computation_layout={(f32[10,10]{1,0},f32[10,10]{1,0})->f32[10,10]{1,0}}
ENTRY entry {
x = f32[10,10]{1,0} parameter(0)
y = f32[10,10]{1,0} parameter(1)
ROOT z = f32[10,10]{0,1} dot(x, y),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule(kHlo));
absl::Status status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("should be compatible"));
}
TEST_F(HloVerifierTestLayoutSensitive, AliasedMemorySpaceMismatchReported) {
constexpr absl::string_view kHlo = R"(
HloModule module, input_output_alias={{}: (0, {}, must-alias)},
entry_computation_layout={(f32[10]{0:S(5)})->f32[10]{0}}
ENTRY entry {
x = f32[10]{0} parameter(0)
ROOT add = f32[10]{0} add(x, x)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule(kHlo));
absl::Status status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Shape and memory space of the result"));
}
TEST_F(HloVerifierTestLayoutSensitive, LayoutOK) {
constexpr absl::string_view kHlo = R"(
HloModule module, entry_computation_layout={(f32[10,10]{1,0},f32[10,10]{1,0})->f32[10,10]{1,0}}
ENTRY entry {
x = f32[10,10]{1,0} parameter(0)
y = f32[10,10]{1,0} parameter(1)
ROOT z = f32[10,10]{1,0} dot(x, y),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule(kHlo));
absl::Status status = verifier().Run(module.get()).status();
TF_ASSERT_OK(status);
}
TEST_F(HloVerifierTest, MixedTypeForAllGatherAllowed) {
constexpr absl::string_view kHlo = R"(
HloModule module
ENTRY entry {
p0 = f32[10] parameter(0)
p1 = bf16[10] parameter(1)
ROOT ag = (f32[20], bf16[20]) all-gather(p0, p1), dimensions={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule(kHlo));
absl::Status status = verifier().Run(module.get()).status();
TF_ASSERT_OK(status);
}
TEST_F(HloVerifierTest, UnboundedDynamism) {
const char* const hlo = R"(
HloModule Module
ENTRY entry {
ROOT param0 = f32[?,784] parameter(0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("Unbounded dynamism is disabled"));
}
TEST_F(HloVerifierTest, EnableUnboundedDynamism) {
const char* const hlo = R"(
HloModule Module
ENTRY entry {
ROOT param0 = f32[?,784] parameter(0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo));
HloVerifier verifier{HloVerifierOpts{}.WithAllowUnboundedDynamism(true)};
auto status = verifier.Run(module.get()).status();
ASSERT_TRUE(status.ok());
}
TEST_F(HloVerifierTest, SparseDotMetadataShape) {
const char* const kHlo = R"(
HloModule test
ENTRY entry {
%lhs = f32[10,16] parameter(0)
%rhs = f32[32,20] parameter(1)
%meta = u16[10,4] parameter(2)
ROOT %dot = f32[10,20] dot(%lhs, %rhs, %meta),
lhs_contracting_dims={1}, rhs_contracting_dims={0}, sparsity=L.1@2:4
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(kHlo));
HloVerifier verifier{HloVerifierOpts{}.WithAllowUnboundedDynamism(true)};
auto status = verifier.Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("Expected sparse dot metadata"));
}
TEST_F(HloVerifierTestLayoutSensitive,
HostOffloadingDUSAndDSAreVerifiedWhenChangingLayout) {
const char* const hlo_string = R"(
HloModule m
ENTRY main {
constant_f32_0 = f32[] constant(0)
custom-call = f32[2,2048,2048]{2,1,0:S(5)} custom-call(), custom_call_target="AllocateBuffer"
data_param = f32[1,2048,2048]{2,1,0} parameter(0)
index_param = s32[] parameter(1)
constant_s32_0 = s32[] constant(0)
dynamic_update_slice = f32[2,2048,2048]{2,1,0:S(5)} dynamic-update-slice(custom-call, data_param, index_param, constant_s32_0, constant_s32_0)
ROOT dynamic_slice = f32[1,2048,2048]{2,1,0} dynamic-slice(f32[2,2048,2048]{2,1,0:S(5)} dynamic_update_slice, index_param, constant_s32_0, constant_s32_0), dynamic_slice_sizes={1,2048,2048}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_TRUE(status.ok());
}
TEST_F(HloVerifierTestLayoutSensitive,
HostOffloadingCopyIsVerifiedWhenChangingLayout) {
const char* const hlo_string = R"(
HloModule m
ENTRY main {
data_param = f32[2048]{0} parameter(0)
copy_0 = f32[2048]{0:S(5)} copy(f32[2048]{0} data_param)
ROOT copy_1 = f32[2048]{0} copy(f32[2048]{0:S(5)} copy_0)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_TRUE(status.ok());
}
TEST_F(HloVerifierTestLayoutSensitive,
HostOffloadingDSCannotChangeLayoutFromDeviceToHost) {
const char* const hlo_string = R"(
HloModule m
ENTRY main {
constant_f32_0 = f32[] constant(0)
custom-call = f32[2,2048,2048]{2,1,0} custom-call(), custom_call_target="AllocateBuffer"
data_param = f32[1,2048,2048]{2,1,0} parameter(0)
index_param = s32[] parameter(1)
constant_s32_0 = s32[] constant(0)
dynamic_update_slice = f32[2,2048,2048]{2,1,0} dynamic-update-slice(custom-call, data_param, index_param, constant_s32_0, constant_s32_0)
ROOT dynamic_slice = f32[1,2048,2048]{2,1,0:S(5)} dynamic-slice(f32[2,2048,2048]{2,1,0} dynamic_update_slice, index_param, constant_s32_0, constant_s32_0), dynamic_slice_sizes={1,2048,2048}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("DynamicSlice instruction shouldn't change layout "
"memory space from device to host"));
}
TEST_F(HloVerifierTestLayoutSensitiveAndAllowMixedPrecision,
HostOffloadingCopyCannotChangeType) {
const char* const hlo_string = R"(
HloModule m
ENTRY main {
param = f32[1024,1024]{1,0:T(8,128)S(5)} parameter(0)
copy = bf16[1024,1024]{1,0:T(8,128)} copy(param)
ROOT dot = f32[1024,1024]{1,0:T(8,128)} dot(copy, copy), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Expected instruction to have shape equal to "
"f32[1024,1024]{1,0:T(8,128)S(5)}, actual shape is "
"bf16[1024,1024]{1,0:T(8,128)}"));
}
TEST_F(HloVerifierTestLayoutSensitiveAndAllowMixedPrecision,
HostOffloadingCopyCannotChangeLayout) {
const char* const hlo_string = R"(
HloModule m
ENTRY main {
param = f32[1024,1024]{1,0:T(8,128)S(5)} parameter(0)
ROOT copy = f32[1024,1024]{0,1:T(8,128)} copy(param)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Expected instruction to have shape equal to "
"f32[1024,1024]{1,0:T(8,128)S(5)}, actual shape is "
"f32[1024,1024]{0,1:T(8,128)}"));
}
TEST_F(HloVerifierTestLayoutSensitive,
MismatchedMinorToMajorSizeAndDimensionSize) {
const char* const hlo_string = R"(
HloModule m
ENTRY main {
data_param = f32[2048,2048]{1,0} parameter(0)
add = f32[2048,2048]{1,0} add(data_param, data_param)
ROOT const = f32[] constant(0)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
HloInstruction* instruction =
module->entry_computation()->parameter_instruction(0)->users().at(0);
Layout* layout = instruction->mutable_shape()->mutable_layout();
layout->add_minor_to_major(2);
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Instruction has mismatched minor-to-major size and "
"dimension size: "));
}
TEST_F(HloVerifierTest, NoErrorOnDuplicateChannelId) {
const char* const hlo_string = R"(
HloModule m
ENTRY main {
data_param = f32[2048,2048]{1,0} parameter(0)
cp1 = f32[2048,2048]{1,0} collective-permute(data_param), source_target_pairs={{0,1},{1,2},{2,3}}, channel_id=1
cp2 = f32[2048,2048]{1,0} collective-permute(data_param), source_target_pairs={{0,1}}, channel_id=1
ROOT tuple = (f32[2048,2048]{1,0}, f32[2048,2048]{1,0}) tuple(cp1, cp2)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
HloVerifierOpts opts{};
opts.verify_unique_channel_ids = false;
HloVerifier verifier(std::move(opts));
ASSERT_IS_OK(verifier.Run(module.get()).status());
}
TEST_F(HloVerifierTestLayoutSensitive, Int4CompareSelect) {
const char* const kModuleStr = R"(
HloModule test
ENTRY main {
a = s4[10]{0:E(4)} parameter(0)
b = s4[10]{0:E(4)} parameter(1)
less = pred[10] compare(a, b), direction=LT
ROOT result = select(less, a, b)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(kModuleStr));
TF_ASSERT_OK(verifier().Run(module.get()));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_verifier.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_verifier_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1537adbd-f895-48a3-a962-980f34a94b42 | cpp | tensorflow/tensorflow | cudnn_norm_rewriter | third_party/xla/xla/service/gpu/transforms/cudnn_norm_rewriter.cc | third_party/xla/xla/service/gpu/transforms/cudnn_norm_rewriter_test.cc | #include "xla/service/gpu/transforms/cudnn_norm_rewriter.h"
#include <algorithm>
#include <cstdint>
#include <cstdlib>
#include <functional>
#include <iterator>
#include <limits>
#include <optional>
#include <utility>
#include <vector>
#include "google/protobuf/wrappers.pb.h"
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/pattern_matcher.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tsl/protobuf/dnn.pb.h"
#include "xla/types.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
#if GOOGLE_CUDA
#include "third_party/gpus/cuda/include/cuda.h"
#include "third_party/gpus/cudnn/cudnn.h"
#include "third_party/gpus/cudnn/cudnn_version.h"
#endif
namespace xla {
namespace gpu {
namespace {
namespace m = match;
const HloInstruction* SkipUnaryOps(const HloInstruction* instr) {
while (instr->opcode() == HloOpcode::kConvert ||
instr->opcode() == HloOpcode::kBitcast ||
instr->opcode() == HloOpcode::kReshape) {
instr = instr->operand(0);
}
return instr;
}
void SkipUnaryOpsTopDownRecursive(HloInstruction* instr,
std::vector<HloInstruction*>& instrs) {
if (instr->opcode() == HloOpcode::kConvert ||
instr->opcode() == HloOpcode::kBitcast ||
instr->opcode() == HloOpcode::kReshape) {
for (HloInstruction* user : instr->users()) {
SkipUnaryOpsTopDownRecursive(user, instrs);
}
} else {
instrs.emplace_back(instr);
}
}
struct NormMetadata {
HloInstruction *x_transpose, *y_transpose;
std::vector<int64_t> norm_dims_adjusted, non_norm_dims_adjusted;
};
using NormMetadataMap = absl::flat_hash_map<HloInstruction*, NormMetadata>;
class UniqueHloInstruction {
public:
UniqueHloInstruction()
: is_set_(false), instr_(nullptr), capture_or_verify_() {}
HloInstruction* Instr() const { return instr_; }
void SetInstr(HloInstruction* instr) {
is_set_ = true;
instr_ = instr;
}
bool CaptureOrVerify(HloInstruction* instr) {
if (is_set_ && instr != instr_) {
instr_ = nullptr;
}
if (!is_set_) {
is_set_ = true;
instr_ = instr;
}
return instr_;
}
std::function<bool(const HloInstruction*)> GetCaptureOrVerifyFn() {
if (!capture_or_verify_) {
capture_or_verify_ = [this](const HloInstruction* instr) -> bool {
return CaptureOrVerify(const_cast<HloInstruction*>(instr));
};
}
return capture_or_verify_;
}
private:
bool is_set_;
HloInstruction* instr_;
std::function<bool(const HloInstruction*)> capture_or_verify_;
};
absl::StatusOr<int64_t> CConstant(
se::CudaComputeCapability cuda_compute_capability) {
if (cuda_compute_capability.major == se::CudaComputeCapability::AMPERE) {
return 32 * 128;
} else if (cuda_compute_capability.major ==
se::CudaComputeCapability::HOPPER) {
return 32 * 144;
}
return xla::Internal("Norm kernels require Ampere or Hopper architecture.");
}
bool CompatibleElementType(const HloInstruction* instr) {
PrimitiveType element_type = instr->shape().element_type();
return element_type == BF16 || element_type == F16 || element_type == F32;
}
std::vector<int64_t> AdjustedDimensions(const Shape& shape,
absl::Span<const int64_t> dimensions) {
absl::flat_hash_map<int64_t, int64_t> dimension_map;
for (int64_t dimension = 0, non_degen_dimension = 0; dimension < shape.rank();
++dimension) {
if (shape.dimensions(dimension) > 1) {
dimension_map.insert({dimension, non_degen_dimension});
non_degen_dimension++;
}
}
std::vector<int64_t> adjusted_dimensions;
for (int64_t dimension : dimensions) {
auto non_degenerate_dimension = dimension_map.find(dimension);
if (non_degenerate_dimension != dimension_map.end()) {
adjusted_dimensions.emplace_back(non_degenerate_dimension->second);
}
}
return adjusted_dimensions;
}
std::vector<int64_t> AdjustedDimensions(const HloInstruction* instr) {
Shape shape;
if (instr->opcode() == HloOpcode::kBroadcast) {
shape = instr->shape();
} else if (instr->opcode() == HloOpcode::kReduce) {
shape = instr->operand(0)->shape();
} else {
return {};
}
return AdjustedDimensions(shape, instr->dimensions());
}
bool AppliesAddReduce(const HloInstruction* instr,
absl::Span<const int64_t> reduce_dims = {}) {
if (instr->opcode() != HloOpcode::kReduce) {
return false;
}
if (!reduce_dims.empty() && AdjustedDimensions(instr) != reduce_dims) {
return false;
}
HloComputation* reduce_comp = instr->to_apply();
HloInstruction* reduce_comp_root = reduce_comp->root_instruction();
return instr->operand_count() == 2 &&
instr->operand(1)->opcode() == HloOpcode::kConstant &&
ShapeUtil::IsScalar(instr->operand(1)->shape()) &&
instr->operand(1)->literal().GetAsDouble({}) == 0. &&
reduce_comp_root->opcode() == HloOpcode::kAdd &&
reduce_comp_root->operand(0)->opcode() == HloOpcode::kParameter &&
reduce_comp_root->operand(1)->opcode() == HloOpcode::kParameter;
}
bool CalculatesExpectation(const HloInstruction* instr) {
instr = SkipUnaryOps(instr);
if (instr->opcode() != HloOpcode::kMultiply) {
return false;
}
bool bcast_operand = instr->operand(0)->opcode() != HloOpcode::kBroadcast;
const HloInstruction *broadcast = instr->operand(bcast_operand),
*reduce = SkipUnaryOps(instr->operand(!bcast_operand));
if (reduce->opcode() != HloOpcode::kReduce ||
broadcast->opcode() != HloOpcode::kBroadcast ||
broadcast->operand(0)->opcode() != HloOpcode::kConstant) {
return false;
}
float actual_r_nelems =
broadcast->operand(0)->literal().GetAsDouble({}).value();
int64_t nelems = 1;
for (int64_t norm_dim : reduce->dimensions()) {
nelems *= reduce->operand(0)->shape().dimensions()[norm_dim];
}
float r_nelems = 1. / static_cast<float>(nelems);
float numerical_epsilon = std::numeric_limits<bfloat16>::epsilon();
return abs(actual_r_nelems - r_nelems) <
((actual_r_nelems + r_nelems) * numerical_epsilon);
}
bool FindTargetRecursive(
const HloInstruction* instr, const HloInstruction* target,
absl::flat_hash_set<const HloInstruction*>& visited_instrs,
const HloInstruction* transpose) {
visited_instrs.emplace(instr);
const absl::flat_hash_set<HloOpcode> supported_ops = {
HloOpcode::kConvert, HloOpcode::kBitcast, HloOpcode::kReshape};
if (instr == target) {
return true;
}
for (HloInstruction* user : instr->users()) {
if ((supported_ops.contains(user->opcode()) || user == transpose) &&
!visited_instrs.contains(user)) {
return FindTargetRecursive(user, target, visited_instrs, transpose);
}
}
if (supported_ops.contains(instr->opcode())) {
return FindTargetRecursive(instr->operand(0), target, visited_instrs,
transpose);
}
return false;
}
bool FindTarget(const HloInstruction* custom_call, const HloInstruction* instr,
const HloInstruction* target,
const NormMetadataMap& norm_metadata) {
absl::flat_hash_set<const HloInstruction*> visited_instrs;
auto custom_call_metadata = norm_metadata.find(custom_call);
if (custom_call_metadata == norm_metadata.end()) {
return false;
}
return FindTargetRecursive(instr, target, visited_instrs,
custom_call_metadata->second.x_transpose);
}
std::vector<int64_t> MapDimensions(const Shape& original_shape,
const Shape& reshaped_shape,
const absl::Span<const int64_t> dimensions) {
auto dimension_product =
[](const Shape& shape,
absl::Span<const int64_t> product_dimensions) -> int64_t {
int64_t product = 1;
for (int64_t product_dimension : product_dimensions) {
product *= shape.dimensions(product_dimension);
}
return product;
};
absl::flat_hash_map<int64_t, std::vector<int64_t>> dimensions_map;
std::vector<int64_t> original_dimensions, reshaped_dimensions;
for (int64_t original_dimension = 0, reshaped_dimension = 0;
original_dimension < original_shape.rank(); ++original_dimension) {
original_dimensions.emplace_back(original_dimension);
while ((reshaped_dimensions.empty() ||
dimension_product(reshaped_shape, reshaped_dimensions) <
dimension_product(original_shape, original_dimensions)) &&
reshaped_dimension < reshaped_shape.rank()) {
reshaped_dimensions.emplace_back(reshaped_dimension++);
}
if (original_dimensions.size() > 1 && reshaped_dimensions.size() > 1) {
return {};
}
if (dimension_product(original_shape, original_dimensions) ==
dimension_product(reshaped_shape, reshaped_dimensions)) {
std::vector<int64_t> original_dimensions_in_dimensions;
std::set_intersection(
original_dimensions.begin(), original_dimensions.end(),
dimensions.begin(), dimensions.end(),
std::back_inserter(original_dimensions_in_dimensions));
if (!original_dimensions_in_dimensions.empty() &&
original_dimensions_in_dimensions.size() !=
original_dimensions.size()) {
return {};
}
for (int64_t dimension : original_dimensions) {
dimensions_map.insert({dimension, reshaped_dimensions});
}
original_dimensions.clear();
reshaped_dimensions.clear();
}
}
std::vector<int64_t> mapped_dimensions;
for (int64_t dimension : dimensions) {
auto mapped_dimension = dimensions_map.find(dimension);
if (mapped_dimension == dimensions_map.end()) {
return {};
}
mapped_dimensions.insert(mapped_dimensions.end(),
mapped_dimension->second.begin(),
mapped_dimension->second.end());
}
mapped_dimensions.erase(
std::unique(mapped_dimensions.begin(), mapped_dimensions.end()),
mapped_dimensions.end());
return mapped_dimensions;
}
HloInstruction* FindAddReduceRecursive(
HloInstruction* instr, const Shape& orig_instr_shape,
const absl::Span<const int64_t> reduce_dims,
absl::flat_hash_set<HloInstruction*>& visited_instrs) {
visited_instrs.emplace(instr);
const absl::flat_hash_set<HloOpcode> supported_ops = {
HloOpcode::kConvert, HloOpcode::kBitcast, HloOpcode::kReshape};
for (HloInstruction* user : instr->users()) {
if (user->opcode() == HloOpcode::kReduce) {
std::vector<int64_t> mapped_reduce_dims =
MapDimensions(orig_instr_shape, instr->shape(), reduce_dims);
if (!mapped_reduce_dims.empty() &&
AppliesAddReduce(user, mapped_reduce_dims)) {
return user;
}
}
if (supported_ops.contains(user->opcode()) &&
!visited_instrs.contains(user)) {
return FindAddReduceRecursive(user, orig_instr_shape, reduce_dims,
visited_instrs);
}
}
if (supported_ops.contains(instr->opcode())) {
return FindAddReduceRecursive(instr->mutable_operand(0), orig_instr_shape,
reduce_dims, visited_instrs);
}
return nullptr;
}
HloInstruction* FindAddReduce(HloInstruction* instr,
const absl::Span<const int64_t> reduce_dims) {
absl::flat_hash_set<HloInstruction*> visited_instrs;
return FindAddReduceRecursive(instr, instr->shape(), reduce_dims,
visited_instrs);
}
template <typename Pattern>
auto SupportedConvert(Pattern pattern) {
auto supported_convert = [](const HloInstruction* instr) -> bool {
return CompatibleElementType(instr) &&
CompatibleElementType(instr->operand(0));
};
return m::Convert(pattern).WithPredicate(supported_convert);
}
template <typename Pattern>
auto SupportedBitcastOrReshape(Pattern pattern) {
auto supported_bitcast_or_reshape = [](const HloInstruction* instr) -> bool {
return ShapeUtil::Equal(
ShapeUtil::DropDegenerateDimensions(instr->shape()),
ShapeUtil::DropDegenerateDimensions(instr->operand(0)->shape()));
};
return m::AnyOf<HloInstruction>(
m::Bitcast(pattern).WithPredicate(supported_bitcast_or_reshape),
m::Reshape(pattern).WithPredicate(supported_bitcast_or_reshape));
}
template <typename Pattern>
auto OptionalSupportedTransform(Pattern pattern) {
auto shared_subpattern = m::SharedSubpattern(pattern);
return m::AnyOf<HloInstruction>(
SupportedConvert(SupportedBitcastOrReshape(shared_subpattern)),
SupportedBitcastOrReshape(SupportedConvert(shared_subpattern)),
SupportedConvert(shared_subpattern),
SupportedBitcastOrReshape(shared_subpattern), shared_subpattern);
}
template <typename Pattern>
auto BitcastOrReshape(Pattern pattern) {
return OptionalSupportedTransform(
m::AnyOf<HloInstruction>(m::Bitcast(pattern), m::Reshape(pattern)));
}
template <typename Pattern>
auto Transpose(Pattern pattern) {
return OptionalSupportedTransform(m::Transpose(pattern));
}
template <typename Pattern>
auto Rsqrt(HloInstruction** rsqrt, Pattern pattern) {
return OptionalSupportedTransform(m::Rsqrt(rsqrt, pattern));
}
template <typename Pattern0, typename Pattern1>
auto AddAnyOrder(Pattern0 pattern0, Pattern1 pattern1) {
return OptionalSupportedTransform(m::AddAnyOrder(pattern0, pattern1));
}
template <typename Pattern0, typename Pattern1>
auto Subtract(Pattern0 pattern0, Pattern1 pattern1) {
return OptionalSupportedTransform(m::Subtract(pattern0, pattern1));
}
template <typename Pattern0, typename Pattern1>
auto Subtract(HloInstruction** subtract, Pattern0 pattern0, Pattern1 pattern1) {
return OptionalSupportedTransform(m::Subtract(subtract, pattern0, pattern1));
}
template <typename Pattern0, typename Pattern1>
auto MultiplyAnyOrder(Pattern0 pattern0, Pattern1 pattern1) {
return OptionalSupportedTransform(m::MultiplyAnyOrder(pattern0, pattern1));
}
template <typename Pattern0, typename Pattern1>
auto MultiplyAnyOrder(HloInstruction** multiply, Pattern0 pattern0,
Pattern1 pattern1) {
return OptionalSupportedTransform(
m::MultiplyAnyOrder(multiply, pattern0, pattern1));
}
template <typename Pattern>
auto Square(Pattern pattern) {
return MultiplyAnyOrder(pattern, pattern)
.WithPredicate([](const HloInstruction* instr) {
return instr->unique_operands().size() == 1;
});
}
template <typename Pattern>
auto Cube(Pattern pattern) {
auto unique_cube = [](const HloInstruction* instr) -> bool {
bool square_operand = instr->operand(0)->opcode() != HloOpcode::kMultiply;
return instr->operand(!square_operand)->opcode() != HloOpcode::kMultiply &&
instr->operand(square_operand)->operand(0) ==
instr->operand(!square_operand);
};
return MultiplyAnyOrder(Square(pattern), pattern).WithPredicate(unique_cube);
}
template <typename Pattern>
auto AddReduce(Pattern pattern) {
return OptionalSupportedTransform(
m::Reduce(pattern, m::Op())
.WithPredicate([](const HloInstruction* instr) {
return AppliesAddReduce(instr);
}));
}
template <typename Pattern>
auto AddReduce(HloInstruction** reduction, Pattern pattern) {
return OptionalSupportedTransform(
m::Reduce(reduction, pattern, m::Op())
.WithPredicate([](const HloInstruction* instr) {
return AppliesAddReduce(instr);
}));
}
template <typename Pattern>
auto NegateAddReduce(HloInstruction** reduction, Pattern pattern) {
return m::AnyOf<HloInstruction>(AddReduce(reduction, m::Negate(pattern)),
m::Negate(AddReduce(reduction, pattern)));
}
template <typename Pattern>
auto Expectation(Pattern pattern) {
auto shared_subpattern =
MultiplyAnyOrder(m::Broadcast(m::ConstantScalar()), AddReduce(pattern))
.WithPredicate([](const HloInstruction* instr) {
return CalculatesExpectation(instr);
});
return m::AnyOf<HloInstruction>(m::Broadcast(shared_subpattern),
shared_subpattern);
}
template <typename Pattern>
auto Expectation(UniqueHloInstruction* expectation, Pattern pattern) {
auto shared_subpattern = OptionalSupportedTransform(
m::MultiplyAnyOrder(m::Broadcast(m::ConstantScalar()), AddReduce(pattern))
.WithPredicate([](const HloInstruction* instr) {
return CalculatesExpectation(instr);
})
.WithPredicate(expectation->GetCaptureOrVerifyFn()));
return m::AnyOf<HloInstruction>(m::Broadcast(shared_subpattern),
shared_subpattern);
}
template <typename Pattern>
auto Expectation(UniqueHloInstruction* expectation, HloInstruction** reduce,
Pattern pattern) {
auto shared_subpattern = OptionalSupportedTransform(
m::MultiplyAnyOrder(m::Broadcast(m::ConstantScalar()),
AddReduce(reduce, pattern))
.WithPredicate([](const HloInstruction* instr) {
return CalculatesExpectation(instr);
})
.WithPredicate(expectation->GetCaptureOrVerifyFn()));
return m::AnyOf<HloInstruction>(m::Broadcast(shared_subpattern),
shared_subpattern);
}
auto Variance(UniqueHloInstruction* variance, UniqueHloInstruction* expectation,
UniqueHloInstruction* x) {
return m::AnyOf<HloInstruction>(
Subtract(
Expectation(Square(OptionalSupportedTransform(
m::Op().WithPredicate(x->GetCaptureOrVerifyFn())))),
Square(Expectation(expectation,
OptionalSupportedTransform(m::Op().WithPredicate(
x->GetCaptureOrVerifyFn())))))
.WithPredicate(variance->GetCaptureOrVerifyFn()),
Expectation(
Square(Subtract(
OptionalSupportedTransform(
m::Op().WithPredicate(x->GetCaptureOrVerifyFn())),
Expectation(expectation,
OptionalSupportedTransform(m::Op().WithPredicate(
x->GetCaptureOrVerifyFn()))))))
.WithPredicate(variance->GetCaptureOrVerifyFn()));
}
auto NormFactor(HloInstruction** norm_factor, UniqueHloInstruction* x,
UniqueHloInstruction* variance,
UniqueHloInstruction* expectation,
UniqueHloInstruction* epsilon) {
auto shared_subpattern = m::SharedSubpattern(Rsqrt(
norm_factor, AddAnyOrder(Variance(variance, expectation, x),
m::Broadcast(m::ConstantScalar().WithPredicate(
epsilon->GetCaptureOrVerifyFn())))));
return m::AnyOf<HloInstruction>(m::Broadcast(shared_subpattern),
shared_subpattern);
}
template <typename P0, typename P1, typename P2>
auto MultiplyMultiplyAnyOrder(P0 p0, P1 p1, P2 p2) {
return m::AnyOf<HloInstruction>(
MultiplyAnyOrder(p0, MultiplyAnyOrder(p1, p2)),
MultiplyAnyOrder(p1, MultiplyAnyOrder(p0, p2)),
MultiplyAnyOrder(p2, MultiplyAnyOrder(p0, p1)));
}
template <typename P0, typename P1, typename P2>
auto AddAddAnyOrder(P0 p0, P1 p1, P2 p2) {
return m::AnyOf<HloInstruction>(AddAnyOrder(p0, AddAnyOrder(p1, p2)),
AddAnyOrder(p1, AddAnyOrder(p0, p2)),
AddAnyOrder(p2, AddAnyOrder(p0, p1)));
}
template <typename P0, typename P1, typename P2>
auto MultiplyAddAnyOrder(P0 p0, P1 p1, P2 p2) {
return m::AnyOf<HloInstruction>(
MultiplyAnyOrder(p0, AddAnyOrder(p1, p2)),
AddAnyOrder(MultiplyAnyOrder(p0, p1), MultiplyAnyOrder(p0, p2)));
}
template <typename P0, typename P1, typename P2>
auto SubtractAddAnyOrder(P0 p0, P1 p1, P2 p2) {
return m::AnyOf<HloInstruction>(AddAnyOrder(Subtract(p0, p1), p2),
AddAnyOrder(Subtract(p2, p1), p0),
Subtract(AddAnyOrder(p0, p2), p1));
}
template <typename P0, typename P1, typename P2, typename P3, typename P4>
auto SubtractMultiplyAddAnyOrder(P0 p0, P1 p1, P2 p2, P3 p3, P4 p4) {
return m::AnyOf<HloInstruction>(
SubtractAddAnyOrder(MultiplyMultiplyAnyOrder(p0, p2, p3),
MultiplyMultiplyAnyOrder(p1, p2, p3), p4),
AddAnyOrder(MultiplyMultiplyAnyOrder(Subtract(p0, p1), p2, p3), p4));
}
auto FusedExpectation(UniqueHloInstruction* custom_call) {
auto shared_subpattern = m::SharedSubpattern(m::GetTupleElement(
m::CustomCall({kCudnnNormCallTarget})
.WithPredicate(custom_call->GetCaptureOrVerifyFn()),
1));
return m::AnyOf<HloInstruction>(shared_subpattern,
BitcastOrReshape(shared_subpattern));
}
auto FusedExpectation(UniqueHloInstruction* fused_expectation,
UniqueHloInstruction* custom_call) {
auto shared_subpattern = m::SharedSubpattern(
m::GetTupleElement(
m::CustomCall({kCudnnNormCallTarget})
.WithPredicate(custom_call->GetCaptureOrVerifyFn()),
1)
.WithPredicate(fused_expectation->GetCaptureOrVerifyFn()));
return m::AnyOf<HloInstruction>(shared_subpattern,
BitcastOrReshape(shared_subpattern));
}
auto FusedNormFactor(UniqueHloInstruction* custom_call) {
auto shared_subpattern = m::SharedSubpattern(m::GetTupleElement(
m::CustomCall({kCudnnNormCallTarget})
.WithPredicate(custom_call->GetCaptureOrVerifyFn()),
2));
return m::AnyOf<HloInstruction>(shared_subpattern,
BitcastOrReshape(shared_subpattern));
}
auto FusedNormFactor(UniqueHloInstruction* fused_norm_factor,
UniqueHloInstruction* custom_call) {
auto shared_subpattern = m::SharedSubpattern(
m::GetTupleElement(
m::CustomCall({kCudnnNormCallTarget})
.WithPredicate(custom_call->GetCaptureOrVerifyFn()),
2)
.WithPredicate(fused_norm_factor->GetCaptureOrVerifyFn()));
return m::AnyOf<HloInstruction>(shared_subpattern,
BitcastOrReshape(shared_subpattern));
}
auto DNormFactor(UniqueHloInstruction* custom_call) {
return MultiplyAnyOrder(m::Broadcast(m::ConstantScalar(-0.5)),
Cube(FusedNormFactor(custom_call)));
}
auto XCenter(UniqueHloInstruction* x, UniqueHloInstruction* custom_call,
const NormMetadataMap& norm_metadata) {
auto capture_or_verify_x =
[x, custom_call, &norm_metadata](const HloInstruction* instr) -> bool {
return x->CaptureOrVerify(
FindTarget(custom_call->Instr(), instr->operand(0),
custom_call->Instr()->operand(0), norm_metadata)
? custom_call->Instr()->mutable_operand(0)
: nullptr);
};
return Subtract(m::Op(), m::Broadcast(FusedExpectation(custom_call)))
.WithPredicate(capture_or_verify_x);
}
auto XCenter(UniqueHloInstruction* x_center, UniqueHloInstruction* x,
UniqueHloInstruction* fused_expectation,
UniqueHloInstruction* custom_call,
const NormMetadataMap& norm_metadata) {
auto capture_or_verify_x =
[x, custom_call, &norm_metadata](const HloInstruction* instr) -> bool {
return x->CaptureOrVerify(
FindTarget(custom_call->Instr(), instr->operand(0),
custom_call->Instr()->operand(0), norm_metadata)
? custom_call->Instr()->mutable_operand(0)
: nullptr);
};
return Subtract(m::Op(), m::Broadcast(FusedExpectation(fused_expectation,
custom_call)))
.WithPredicate(x_center->GetCaptureOrVerifyFn())
.WithPredicate(capture_or_verify_x);
}
auto F0(UniqueHloInstruction* custom_call, UniqueHloInstruction* scale,
UniqueHloInstruction* dy, UniqueHloInstruction* x,
HloInstruction** reduce, const NormMetadataMap& norm_metadata) {
auto capture_or_verify_scale = [scale, custom_call, &norm_metadata](
const HloInstruction* instr) -> bool {
return scale->CaptureOrVerify(FindTarget(custom_call->Instr(), instr,
custom_call->Instr()->operand(1),
norm_metadata)
? custom_call->Instr()->mutable_operand(1)
: nullptr);
};
return AddReduce(
reduce, MultiplyMultiplyAnyOrder(
XCenter(x, custom_call, norm_metadata),
m::Broadcast(m::Op().WithPredicate(capture_or_verify_scale)),
m::Op().WithPredicate(dy->GetCaptureOrVerifyFn())));
}
auto F1(UniqueHloInstruction* x, UniqueHloInstruction* x_center,
UniqueHloInstruction* fused_expectation,
UniqueHloInstruction* custom_call, UniqueHloInstruction* scale,
UniqueHloInstruction* dy, HloInstruction** reduce,
const NormMetadataMap& norm_metadata) {
auto broadcasts_two_over_nelems = [](const HloInstruction* instr) -> bool {
const HloInstruction* multiply = SkipUnaryOps(instr->operand(0));
bool bcast_operand =
multiply->operand(0)->opcode() != HloOpcode::kBroadcast;
float actual_two_over_nelems = multiply->operand(bcast_operand)
->operand(0)
->literal()
.GetAsDouble({})
.value();
int64_t nelems = 1;
for (int i = 0; i < instr->shape().dimensions_size(); ++i) {
if (!absl::c_linear_search(instr->dimensions(), i)) {
nelems *= instr->shape().dimensions()[i];
}
}
float two_over_nelems = 2. / static_cast<float>(nelems);
float numerical_epsilon = std::numeric_limits<bfloat16>::epsilon();
return abs(actual_two_over_nelems - two_over_nelems) <
((actual_two_over_nelems + two_over_nelems) * numerical_epsilon);
};
return MultiplyAnyOrder(
XCenter(x_center, x, fused_expectation, custom_call, norm_metadata),
m::Broadcast(
MultiplyAnyOrder(m::Broadcast(m::ConstantScalar()),
MultiplyAnyOrder(DNormFactor(custom_call),
F0(custom_call, scale, dy, x,
reduce, norm_metadata))))
.WithPredicate(broadcasts_two_over_nelems));
}
auto F2(UniqueHloInstruction* fused_norm_factor, UniqueHloInstruction* scale,
UniqueHloInstruction* dy, UniqueHloInstruction* custom_call,
const NormMetadataMap& norm_metadata) {
auto capture_or_verify_scale = [scale, custom_call, &norm_metadata](
const HloInstruction* instr) -> bool {
return scale->CaptureOrVerify(
FindTarget(custom_call->Instr(), instr->operand(0),
custom_call->Instr()->operand(1), norm_metadata)
? custom_call->Instr()->mutable_operand(1)
: nullptr);
};
return MultiplyAnyOrder(
m::Broadcast(
BitcastOrReshape(FusedNormFactor(fused_norm_factor, custom_call))),
MultiplyAnyOrder(m::Broadcast().WithPredicate(capture_or_verify_scale),
m::Op().WithPredicate(dy->GetCaptureOrVerifyFn())));
}
class CudnnNormRewriterVisitor : public DfsHloRewriteVisitor {
public:
explicit CudnnNormRewriterVisitor(
const se::CudaComputeCapability cuda_compute_capability)
: cuda_compute_capability_(cuda_compute_capability) {}
absl::Status HandleAdd(HloInstruction* instr) override {
TF_RETURN_IF_ERROR(MatchLayerNorm(instr));
TF_RETURN_IF_ERROR(MatchLayerNormGradient(instr));
return absl::OkStatus();
}
absl::Status HandleSubtract(HloInstruction* instr) override {
return MatchLayerNorm(instr);
}
absl::Status MatchLayerNorm(HloInstruction* instr) {
UniqueHloInstruction x, expectation, variance, epsilon;
HloInstruction *scale, *bias, *reduce, *norm_factor, *broadcast_scale,
*broadcast_bias;
if (Match(
instr,
SubtractMultiplyAddAnyOrder(
OptionalSupportedTransform(
m::Op().WithPredicate(x.GetCaptureOrVerifyFn())),
Expectation(&expectation, &reduce,
OptionalSupportedTransform(m::Op().WithPredicate(
x.GetCaptureOrVerifyFn()))),
NormFactor(&norm_factor, &x, &variance, &expectation, &epsilon),
m::Broadcast(&broadcast_scale, m::Op(&scale)),
m::Broadcast(&broadcast_bias, m::Op(&bias))))) {
#if CUDNN_VERSION < 8905
VLOG(1) << "Layer norm Custom Calls require cuDNN 8.9.5.";
return absl::OkStatus();
#endif
if (!instr->GetModule()
->config()
.debug_options()
.xla_gpu_enable_cudnn_layer_norm()) {
VLOG(1) << "Layer norm Custom Calls disabled.";
return absl::OkStatus();
}
if (cuda_compute_capability_.major != se::CudaComputeCapability::AMPERE &&
cuda_compute_capability_.major != se::CudaComputeCapability::HOPPER) {
VLOG(1) << "Layer norm Custom Calls require Ampere or Hopper "
"architectures.";
return absl::OkStatus();
}
if (!x.Instr() || !expectation.Instr() || !variance.Instr() ||
!epsilon.Instr()) {
VLOG(1) << "Layer norm operands not unique.";
return absl::OkStatus();
}
if (!LayoutUtil::IsMonotonicWithDim0Major(x.Instr()->shape().layout()) ||
!LayoutUtil::IsMonotonicWithDim0Major(scale->shape().layout()) ||
!LayoutUtil::IsMonotonicWithDim0Major(bias->shape().layout()) ||
!LayoutUtil::IsMonotonicWithDim0Major(instr->shape().layout())) {
VLOG(1) << "Layer norm input and/or output layouts nor supported.";
return absl::OkStatus();
}
if (!CompatibleElementType(instr) || !CompatibleElementType(scale) ||
!CompatibleElementType(bias) ||
!ShapeUtil::SameElementType(instr->shape(), x.Instr()->shape()) ||
!ShapeUtil::Equal(scale->shape(), bias->shape())) {
VLOG(1) << "Layer norm input types or shapes not supported.";
return absl::OkStatus();
}
std::vector<int64_t> norm_dims(reduce->dimensions().begin(),
reduce->dimensions().end());
std::vector<int64_t> norm_dims_adjusted = AdjustedDimensions(reduce);
if (norm_dims_adjusted.size() !=
ShapeUtil::DropDegenerateDimensions(scale->shape())
.dimensions_size()) {
VLOG(1) << "Layer norm input dimensions not supported.";
return absl::OkStatus();
}
if (!ShapeUtil::EqualIgnoringElementType(
ShapeUtil::DropDegenerateDimensions(reduce->operand(0)->shape()),
ShapeUtil::DropDegenerateDimensions(broadcast_scale->shape())) ||
!ShapeUtil::EqualIgnoringElementType(
ShapeUtil::DropDegenerateDimensions(reduce->operand(0)->shape()),
ShapeUtil::DropDegenerateDimensions(broadcast_bias->shape())) ||
norm_dims_adjusted != AdjustedDimensions(broadcast_scale) ||
norm_dims_adjusted != AdjustedDimensions(broadcast_bias)) {
VLOG(1) << "Layer norm operand broadcast not supported.";
return absl::OkStatus();
}
std::vector<int64_t> non_norm_dims;
for (int64_t x_dim = 0; x_dim < x.Instr()->shape().rank(); ++x_dim) {
if (std::find(norm_dims.begin(), norm_dims.end(), x_dim) ==
norm_dims.end()) {
non_norm_dims.emplace_back(x_dim);
}
}
std::vector<int64_t> non_norm_dims_adjusted =
AdjustedDimensions(x.Instr()->shape(), non_norm_dims);
std::vector<int64_t> x_transpose_order = non_norm_dims;
x_transpose_order.insert(x_transpose_order.end(), norm_dims.begin(),
norm_dims.end());
bool apply_transpose = false;
for (int i = 0; i < x_transpose_order.size(); ++i) {
if (x_transpose_order[i] != i) {
apply_transpose = true;
break;
}
}
std::optional<HloInstruction*> x_transpose;
std::vector<int64_t> y_transpose_order(x_transpose_order.size());
if (apply_transpose) {
for (int k = 0; k < x_transpose_order.size(); ++k) {
y_transpose_order[x_transpose_order[k]] = k;
}
TF_ASSIGN_OR_RETURN(x_transpose,
MakeTransposeHlo(x.Instr(), x_transpose_order));
}
std::vector<int64_t> reshaped_dims = {1};
for (auto non_norm_dim : non_norm_dims) {
reshaped_dims[0] *= x.Instr()->shape().dimensions(non_norm_dim);
}
for (auto norm_dim : norm_dims) {
reshaped_dims.emplace_back(x.Instr()->shape().dimensions(norm_dim));
}
while (reshaped_dims.size() < 4) {
reshaped_dims.emplace_back(1);
}
Shape reshaped_shape = ShapeUtil::MakeShape(
x.Instr()->shape().element_type(), reshaped_dims);
TF_ASSIGN_OR_RETURN(
HloInstruction * x_reshape,
MakeReshapeHlo(reshaped_shape, x_transpose.value_or(x.Instr())));
std::vector<int64_t> reshaped_scale_dims = reshaped_dims;
reshaped_scale_dims[0] = 1;
Shape scale_bias_shape = ShapeUtil::MakeShape(
scale->shape().element_type(), reshaped_scale_dims);
TF_ASSIGN_OR_RETURN(HloInstruction * scale_reshape,
MakeReshapeHlo(scale_bias_shape, scale));
TF_ASSIGN_OR_RETURN(HloInstruction * bias_reshape,
MakeReshapeHlo(scale_bias_shape, bias));
GpuBackendConfig gpu_backend_config;
CudnnNormBackendConfig& backend_config =
*gpu_backend_config.mutable_cudnn_norm_backend_config();
backend_config.set_epsilon(
epsilon.Instr()->literal().GetAsDouble({}).value());
backend_config.set_kind(CudnnNormBackendConfig::LAYER_FWD_INFER);
auto* algorithm = backend_config.mutable_algorithm();
algorithm->set_algo_id(0);
algorithm->set_math_type(se::dnn::AlgorithmProto::TENSOR_OP_MATH);
algorithm->set_is_cudnn_frontend(true);
TF_ASSIGN_OR_RETURN(const int64_t c_constant,
CConstant(cuda_compute_capability_));
const int64_t workspace_size =
(2 * c_constant * (4 + 256)) + (2 * reshaped_dims[0] * 4) + 64;
algorithm->mutable_workspace_size()->set_value(workspace_size);
Shape custom_call_shape = ShapeUtil::MakeTupleShape(
{x_reshape->shape(), ShapeUtil::MakeShape(U8, {workspace_size})});
HloInstruction* custom_call =
instr->AddInstruction(HloInstruction::CreateCustomCall(
custom_call_shape, {x_reshape, scale_reshape, bias_reshape},
kCudnnNormCallTarget));
TF_RETURN_IF_ERROR(custom_call->set_backend_config(gpu_backend_config));
TF_ASSIGN_OR_RETURN(HloInstruction * gte,
MakeGetTupleElementHlo(custom_call, 0));
TF_ASSIGN_OR_RETURN(
HloInstruction * y_reshape,
MakeReshapeHlo(x_transpose.value_or(instr)->shape(), gte));
std::optional<HloInstruction*> y_transpose;
if (apply_transpose) {
TF_ASSIGN_OR_RETURN(y_transpose,
MakeTransposeHlo(y_reshape, y_transpose_order));
}
TF_RETURN_IF_ERROR(
ReplaceInstruction(instr, y_transpose.value_or(y_reshape)));
norm_metadata_.insert(
{custom_call,
NormMetadata({x_transpose.value_or(nullptr),
y_transpose.value_or(nullptr), norm_dims_adjusted,
non_norm_dims_adjusted})});
VLOG(1) << "Layer norm rewritten into Custom Call.";
for (HloInstruction* user : norm_factor->users()) {
if (user->opcode() == HloOpcode::kDivide &&
user->operand_index(norm_factor) == 0) {
TF_ASSIGN_OR_RETURN(bool changed,
MatchNormFactor(user, custom_call, variance,
expectation, epsilon));
if (changed) {
break;
}
}
}
}
return absl::OkStatus();
}
absl::StatusOr<bool> MatchNormFactor(HloInstruction* instr,
HloInstruction* custom_call,
UniqueHloInstruction& variance,
UniqueHloInstruction& expectation,
UniqueHloInstruction& epsilon) {
HloInstruction* gte = custom_call->users()[0];
if (Match(instr,
m::Divide(
m::Op(),
AddAnyOrder(
m::Op().WithPredicate(variance.GetCaptureOrVerifyFn()),
m::Broadcast(m::ConstantScalar().WithPredicate(
epsilon.GetCaptureOrVerifyFn())))))) {
if (!variance.Instr() || !epsilon.Instr()) {
VLOG(1) << "Layer norm operands not unique.";
return false;
}
if (!CompatibleElementType(instr) ||
!CompatibleElementType(expectation.Instr())) {
VLOG(1) << "Layer norm input types not compatible.";
return false;
}
auto norm_metadata = norm_metadata_.extract(custom_call);
if (!norm_metadata) {
VLOG(1) << "Unable to retrieve norm metadata of forward Custom Call.";
return false;
}
auto make_compatible_shape = [](Shape shape) -> Shape {
return ShapeUtil::MakeShape(shape.element_type(),
{ShapeUtil::ElementsIn(shape), 1, 1, 1});
};
Shape expectation_shape =
make_compatible_shape(expectation.Instr()->shape());
Shape norm_factor_shape = make_compatible_shape(instr->shape());
std::vector<Shape> tuple_shapes = custom_call->shape().tuple_shapes();
tuple_shapes.insert(tuple_shapes.begin() + 1,
{expectation_shape, norm_factor_shape});
Shape custom_call_shape = ShapeUtil::MakeTupleShape(tuple_shapes);
HloInstruction* new_custom_call = instr->AddInstruction(
custom_call->CloneWithNewShape(custom_call_shape));
TF_ASSIGN_OR_RETURN(
GpuBackendConfig gpu_backend_config,
custom_call->backend_config<xla::gpu::GpuBackendConfig>());
CudnnNormBackendConfig& backend_config =
*gpu_backend_config.mutable_cudnn_norm_backend_config();
backend_config.set_kind(CudnnNormBackendConfig::LAYER_FWD_TRAIN);
TF_ASSIGN_OR_RETURN(const int64_t c_constant,
CConstant(cuda_compute_capability_));
const int64_t workspace_size = (2 * c_constant * (4 + 256)) + 32;
backend_config.mutable_algorithm()->mutable_workspace_size()->set_value(
workspace_size);
TF_RETURN_IF_ERROR(
new_custom_call->set_backend_config(gpu_backend_config));
auto replace_with_new_cc = [new_custom_call, this](
HloInstruction* old_instr,
int tuple_index) -> absl::Status {
TF_ASSIGN_OR_RETURN(
HloInstruction * new_gte,
MakeGetTupleElementHlo(new_custom_call, tuple_index));
HloInstruction* new_instr = new_gte;
if (!ShapeUtil::Equal(new_gte->shape(), old_instr->shape())) {
TF_ASSIGN_OR_RETURN(new_instr,
MakeReshapeHlo(old_instr->shape(), new_gte));
}
if (old_instr->opcode() != HloOpcode::kDivide) {
TF_RETURN_IF_ERROR(ReplaceInstruction(old_instr, new_instr));
} else {
TF_RETURN_IF_ERROR(
ReplaceInstruction(old_instr->mutable_operand(0), new_instr));
TF_ASSIGN_OR_RETURN(
HloInstruction * new_multiply0,
MakeBinaryHlo(HloOpcode::kMultiply, new_instr, new_instr));
TF_ASSIGN_OR_RETURN(
HloInstruction * new_multiply1,
MakeBinaryHlo(HloOpcode::kMultiply, new_multiply0, new_instr));
TF_RETURN_IF_ERROR(ReplaceInstruction(old_instr, new_multiply1));
}
return absl::OkStatus();
};
TF_RETURN_IF_ERROR(replace_with_new_cc(gte, 0));
TF_RETURN_IF_ERROR(replace_with_new_cc(expectation.Instr(), 1));
TF_RETURN_IF_ERROR(replace_with_new_cc(instr, 2));
norm_metadata.key() = new_custom_call;
norm_metadata_.insert(std::move(norm_metadata));
VLOG(1)
<< "Expectation and norm factor fused into layer norm Custom Call.";
}
return true;
}
absl::Status MatchLayerNormGradient(HloInstruction* instr) {
UniqueHloInstruction fwd_custom_call, x, x_center, scale, dy,
fused_expectation, fused_norm_factor;
HloInstruction *broadcast, *scalar, *dscale, *dbias, *reduce0, *reduce1,
*reduce2, *reduce3;
if (Match(instr,
AddAddAnyOrder(
m::Broadcast(
&broadcast,
MultiplyAddAnyOrder(
m::Broadcast(m::ConstantScalar(&scalar)),
NegateAddReduce(&reduce0,
F1(&x, &x_center, &fused_expectation,
&fwd_custom_call, &scale, &dy,
&reduce2, norm_metadata_)),
NegateAddReduce(
&reduce1, F2(&fused_norm_factor, &scale, &dy,
&fwd_custom_call, norm_metadata_)))),
F2(&fused_norm_factor, &scale, &dy, &fwd_custom_call,
norm_metadata_),
F1(&x, &x_center, &fused_expectation, &fwd_custom_call,
&scale, &dy, &reduce3, norm_metadata_)))) {
if (instr->user_count() == 1 &&
instr->users()[0]->opcode() == HloOpcode::kConvert &&
CompatibleElementType(instr->users()[0])) {
instr = instr->users()[0];
}
if (!fwd_custom_call.Instr() || !x.Instr() || !dy.Instr() ||
!x_center.Instr() || !scale.Instr() || !fused_expectation.Instr() ||
!fused_norm_factor.Instr()) {
VLOG(1) << "Layer norm gradient inputs not unique.";
return absl::OkStatus();
}
auto norm_metadata = norm_metadata_.find(fwd_custom_call.Instr());
if (norm_metadata == norm_metadata_.end()) {
VLOG(1) << "Unable to retrieve norm metadata of forward Custom Call.";
return absl::OkStatus();
}
if (AdjustedDimensions(reduce0) !=
norm_metadata->second.norm_dims_adjusted ||
AdjustedDimensions(reduce1) !=
norm_metadata->second.norm_dims_adjusted ||
AdjustedDimensions(reduce2) !=
norm_metadata->second.norm_dims_adjusted ||
AdjustedDimensions(reduce3) !=
norm_metadata->second.norm_dims_adjusted) {
VLOG(1) << "Unexpected reductions dimensions in layer norm gradient.";
return absl::OkStatus();
}
float actual_r_nelems = scalar->literal().GetAsDouble({}).value();
int64_t nelems = 1;
for (int i = 0; i < broadcast->shape().dimensions_size(); ++i) {
if (!absl::c_linear_search(broadcast->dimensions(), i)) {
nelems *= broadcast->shape().dimensions()[i];
}
}
float r_nelems = 1. / static_cast<float>(nelems);
float numerical_epsilon = std::numeric_limits<bfloat16>::epsilon();
if (!(abs(actual_r_nelems - r_nelems) <
((actual_r_nelems + r_nelems) * numerical_epsilon))) {
VLOG(1)
<< "Layer norm backward broadcast operand outside expected range.";
return absl::OkStatus();
}
auto find_dscale =
[&fused_norm_factor, &norm_metadata](
const UniqueHloInstruction& factor0,
const UniqueHloInstruction& factor1) -> HloInstruction* {
for (HloInstruction* factor0_user : factor0.Instr()->users()) {
std::vector<HloInstruction*> users;
SkipUnaryOpsTopDownRecursive(factor0_user, users);
for (HloInstruction* user : users) {
if (Match(user,
MultiplyAnyOrder(
m::Op(), MultiplyAnyOrder(
m::Broadcast(BitcastOrReshape(m::Op().Is(
fused_norm_factor.Instr()))),
m::Op().Is(factor1.Instr()))))) {
for (HloInstruction* multiply_user : user->users()) {
if (AppliesAddReduce(
multiply_user,
norm_metadata->second.non_norm_dims_adjusted)) {
return multiply_user;
}
}
}
}
}
return nullptr;
};
if (!(dscale = find_dscale(x_center, dy)) &&
!(dscale = find_dscale(dy, x_center))) {
VLOG(1) << "Unable to identify Dscale in graph.";
return absl::OkStatus();
}
dbias = FindAddReduce(dy.Instr(),
norm_metadata->second.non_norm_dims_adjusted);
if (!LayoutUtil::IsMonotonicWithDim0Major(dy.Instr()->shape().layout()) ||
!LayoutUtil::IsMonotonicWithDim0Major(instr->shape().layout()) ||
!LayoutUtil::IsMonotonicWithDim0Major(dscale->shape().layout()) ||
(dbias &&
!LayoutUtil::IsMonotonicWithDim0Major(dbias->shape().layout()))) {
VLOG(1) << "Layer norm input and/or output layouts nor supported.";
return absl::OkStatus();
}
if (x.Instr()->shape().element_type() != instr->shape().element_type()) {
VLOG(1) << "The types of X and DX must match.";
return absl::OkStatus();
}
if (!ShapeUtil::Equal(
ShapeUtil::DropDegenerateDimensions(scale.Instr()->shape()),
ShapeUtil::DropDegenerateDimensions(dscale->shape())) ||
(dbias &&
!ShapeUtil::Equal(
ShapeUtil::DropDegenerateDimensions(scale.Instr()->shape()),
ShapeUtil::DropDegenerateDimensions(dbias->shape())))) {
VLOG(1) << "Backward layer norm types not supported.";
return absl::OkStatus();
}
if (!CompatibleElementType(dy.Instr())) {
VLOG(1) << "Backward layer norm types not supported.";
return absl::OkStatus();
}
if (ShapeUtil::ByteSizeOfPrimitiveType(
x.Instr()->shape().element_type()) <
ShapeUtil::ByteSizeOfPrimitiveType(
dy.Instr()->shape().element_type()) ||
ShapeUtil::ByteSizeOfPrimitiveType(
x.Instr()->shape().element_type()) <
ShapeUtil::ByteSizeOfPrimitiveType(
scale.Instr()->shape().element_type())) {
VLOG(1) << "Backward layer norm types not supported.";
return absl::OkStatus();
}
HloInstruction* transposed_dy = dy.Instr();
if (norm_metadata->second.x_transpose) {
TF_ASSIGN_OR_RETURN(
transposed_dy,
MakeTransposeHlo(dy.Instr(),
norm_metadata->second.x_transpose->dimensions()));
}
TF_ASSIGN_OR_RETURN(HloInstruction * reshaped_dy,
MakeReshapeHlo(x.Instr()->shape(), transposed_dy));
Shape dx_shape = ShapeUtil::MakeShape(instr->shape().element_type(),
x.Instr()->shape().dimensions());
Shape dscale_dbias_shape = ShapeUtil::MakeShape(
dscale->shape().element_type(), scale.Instr()->shape().dimensions());
GpuBackendConfig gpu_backend_config;
CudnnNormBackendConfig& backend_config =
*gpu_backend_config.mutable_cudnn_norm_backend_config();
backend_config.set_kind(CudnnNormBackendConfig::LAYER_BWD);
auto* algorithm = backend_config.mutable_algorithm();
algorithm->set_algo_id(0);
algorithm->set_math_type(se::dnn::AlgorithmProto::TENSOR_OP_MATH);
algorithm->set_is_cudnn_frontend(true);
TF_ASSIGN_OR_RETURN(const int64_t c_constant,
CConstant(cuda_compute_capability_));
const int64_t workspace_size =
(2 * c_constant * (4 + 256)) +
(2 * x.Instr()->shape().dimensions(0) * 4) + 64;
algorithm->mutable_workspace_size()->set_value(workspace_size);
Shape custom_call_shape = ShapeUtil::MakeTupleShape(
{dx_shape, dscale_dbias_shape, dscale_dbias_shape,
ShapeUtil::MakeShape(U8, {workspace_size})});
HloInstruction* custom_call =
instr->AddInstruction(HloInstruction::CreateCustomCall(
custom_call_shape,
{x.Instr(), scale.Instr(), reshaped_dy, fused_expectation.Instr(),
fused_norm_factor.Instr()},
kCudnnNormCallTarget));
TF_RETURN_IF_ERROR(custom_call->set_backend_config(gpu_backend_config));
auto replace_with_cc = [custom_call, norm_metadata, transposed_dy, this](
HloInstruction* old_instr,
int tuple_index) -> absl::Status {
TF_ASSIGN_OR_RETURN(HloInstruction * gte,
MakeGetTupleElementHlo(custom_call, tuple_index));
HloInstruction* new_instr;
if (tuple_index == 0 && norm_metadata->second.y_transpose) {
TF_ASSIGN_OR_RETURN(new_instr,
MakeReshapeHlo(transposed_dy->shape(), gte));
TF_ASSIGN_OR_RETURN(
new_instr,
MakeTransposeHlo(
new_instr, norm_metadata->second.y_transpose->dimensions()));
} else {
TF_ASSIGN_OR_RETURN(new_instr,
MakeReshapeHlo(old_instr->shape(), gte));
}
TF_RETURN_IF_ERROR(ReplaceInstruction(old_instr, new_instr));
return absl::OkStatus();
};
TF_RETURN_IF_ERROR(replace_with_cc(instr, 0));
TF_RETURN_IF_ERROR(replace_with_cc(dscale, 1));
if (dbias) {
TF_RETURN_IF_ERROR(replace_with_cc(dbias, 2));
}
VLOG(1) << "Gradients w.r.t. x"
<< (dbias ? ", scale and bias" : " and scale")
<< " rewritten into layer norm backward Custom Call.";
}
return absl::OkStatus();
}
private:
se::CudaComputeCapability cuda_compute_capability_;
NormMetadataMap norm_metadata_;
};
absl::StatusOr<bool> RunOnComputation(
HloComputation* computation,
se::CudaComputeCapability cuda_compute_capability) {
CudnnNormRewriterVisitor visitor(cuda_compute_capability);
TF_RETURN_IF_ERROR(computation->Accept(&visitor));
return visitor.changed();
}
}
CudnnNormRewriter::CudnnNormRewriter(
se::CudaComputeCapability cuda_compute_capability)
: cuda_compute_capability_(cuda_compute_capability) {}
absl::StatusOr<bool> CudnnNormRewriter::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
TF_ASSIGN_OR_RETURN(
bool result, RunOnComputation(computation, cuda_compute_capability_));
changed |= result;
}
return changed;
}
}
} | #include <string>
#include <gtest/gtest.h>
#include "xla/error_spec.h"
#include "xla/stream_executor/device_description.h"
#if GOOGLE_CUDA
#include "third_party/gpus/cuda/include/cuda.h"
#include "third_party/gpus/cudnn/cudnn.h"
#include "third_party/gpus/cudnn/cudnn_version.h"
#endif
#include "xla/service/gpu/tests/gpu_codegen_test.h"
namespace xla {
namespace gpu {
namespace {
class CudnnNormRewriterTest : public GpuCodegenTest {
public:
se::CudaComputeCapability GetCudaComputeCapability() {
return backend()
.default_stream_executor()
->GetDeviceDescription()
.cuda_compute_capability();
}
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = GpuCodegenTest::GetDebugOptionsForTest();
debug_options.set_xla_gpu_enable_cudnn_layer_norm(true);
return debug_options;
}
protected:
void SetUp() override {
#if (CUDA_VERSION < 12000 || CUDNN_VERSION < 8905)
GTEST_SKIP() << "Layer norm kernels require CUDA 12 and cuDNN 8.9.5.";
#endif
if (!(GetCudaComputeCapability().major ==
se::CudaComputeCapability::AMPERE) &&
!(GetCudaComputeCapability().major ==
se::CudaComputeCapability::HOPPER)) {
GTEST_SKIP()
<< "Layer norm kernels require Ampere or Hopper architectures.";
}
}
void TestNorm(std::string hlo_text, std::string optimized_hlo) {
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-3, 1e-3}));
MatchOptimizedHlo(hlo_text, optimized_hlo);
}
};
TEST_F(CudnnNormRewriterTest, LayerNorm2D1) {
const char* hlo_text = R"(
HloModule test
apply {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT c = f32[] add(a,b)
}
ENTRY test {
input = f32[2,4] parameter(0)
input_square = f32[2,4] multiply(input, input)
c0 = f32[] constant(0)
input_square_sum = f32[2] reduce(input_square, c0), dimensions={1}, to_apply=apply
r_nelems = f32[] constant(0.25)
r_nelems_bcast = f32[2] broadcast(r_nelems), dimensions={}
input_square_mean = f32[2] multiply(input_square_sum, r_nelems_bcast)
input_sum = f32[2] reduce(input, c0),dimensions={1}, to_apply=apply
input_mean = f32[2] multiply(input_sum, r_nelems_bcast)
input_mean_square = f32[2] multiply(input_mean, input_mean)
variance = f32[2] subtract(input_square_mean, input_mean_square)
epsilon = f32[] constant(0.001)
epsilon_bcast = f32[2] broadcast(epsilon), dimensions={}
variance_plus_epsilon = f32[2] add(variance, epsilon_bcast)
norm_factor = f32[2] rsqrt(variance_plus_epsilon)
norm_factor_bcast = f32[2,4] broadcast(norm_factor), dimensions={0}
input_mean_bcast = f32[2,4] broadcast(input_mean), dimensions={0}
input_center = f32[2,4] subtract(input, input_mean_bcast)
norm = f32[2,4] multiply(norm_factor_bcast, input_center)
scale = f32[4] parameter(1)
scale_bcast = f32[2,4] broadcast(scale), dimensions={1}
norm_scale = f32[2,4] multiply(norm, scale_bcast)
bias = f32[4] parameter(2)
bias_broadcast = f32[2,4] broadcast(bias), dimensions={1}
ROOT out = f32[2,4] add(norm_scale, bias_broadcast)
})";
const char* optimized_hlo = R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,4], {{.*}}: f32[4], {{.*}}: f32[4]) -> f32[2,4] {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,4]{1,0} parameter(0)
; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = f32[2,4,1,1]{3,2,1,0} bitcast([[P0]])
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[4]{0} parameter(1)
; CHECK-NEXT: [[P1_BITCAST:%[^ ]+]] = f32[1,4,1,1]{3,2,1,0} bitcast([[P1]])
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[4]{0} parameter(2)
; CHECK-NEXT: [[P2_BITCAST:%[^ ]+]] = f32[1,4,1,1]{3,2,1,0} bitcast([[P2]])
; CHECK-NEXT: [[CC:%[^ ]+]] = (f32[2,4,1,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P2_BITCAST]]),
; CHECK: custom_call_target="__cudnn$norm",
; CHECK: backend_config={
; CHECK-DAG: "epsilon":0.001
; CHECK: }
; CHECK-NEXT: [[GTE:%[^ ]+]] = f32[2,4,1,1]{3,2,1,0} get-tuple-element([[CC]]), index=0
; CHECK-NEXT: ROOT [[GTE_BITCAST:%[^ ]+]] = f32[2,4]{1,0} bitcast([[GTE]])
)";
TestNorm(hlo_text, optimized_hlo);
}
TEST_F(CudnnNormRewriterTest, LayerNorm4D3) {
const char* hlo_text = R"(
HloModule test
apply {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT c = f32[] add(a,b)
}
ENTRY test {
input = f32[2,4,6,8] parameter(0)
input_square = f32[2,4,6,8] multiply(input, input)
c0 = f32[] constant(0)
input_square_sum = f32[2,4,6] reduce(input_square, c0), dimensions={3}, to_apply=apply
r_nelems = f32[] constant(0.125)
r_nelems_bcast = f32[2,4,6] broadcast(r_nelems), dimensions={}
input_square_mean = f32[2,4,6] multiply(input_square_sum, r_nelems_bcast)
input_sum = f32[2,4,6] reduce(input, c0), dimensions={3}, to_apply=apply
input_mean = f32[2,4,6] multiply(input_sum, r_nelems_bcast)
input_mean_square = f32[2,4,6] multiply(input_mean, input_mean)
variance = f32[2,4,6] subtract(input_square_mean, input_mean_square)
epsilon = f32[] constant(0.001)
epsilon_bcast = f32[2,4,6] broadcast(epsilon), dimensions={}
variance_plus_epsilon = f32[2,4,6] add(variance, epsilon_bcast)
norm_factor = f32[2,4,6] rsqrt(variance_plus_epsilon)
norm_factor_bcast = f32[2,4,6,8] broadcast(norm_factor), dimensions={0,1,2}
input_mean_bcast = f32[2,4,6,8] broadcast(input_mean), dimensions={0,1,2}
input_center = f32[2,4,6,8] subtract(input, input_mean_bcast)
norm = f32[2,4,6,8] multiply(norm_factor_bcast, input_center)
scale = f32[8] parameter(1)
scale_bcast = f32[2,4,6,8] broadcast(scale), dimensions={3}
norm_scale = f32[2,4,6,8] multiply(norm, scale_bcast)
bias = f32[8] parameter(2)
bias_bcast = f32[2,4,6,8] broadcast(bias), dimensions={3}
ROOT out = f32[2,4,6,8] add(norm_scale, bias_bcast)
})";
const char* optimized_hlo = R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,4,6,8], {{.*}}: f32[8], {{.*}}: f32[8]) -> f32[2,4,6,8] {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} parameter(0)
; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = f32[48,8,1,1]{3,2,1,0} bitcast([[P0]])
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[8]{0} parameter(1)
; CHECK-NEXT: [[P1_BITCAST:%[^ ]+]] = f32[1,8,1,1]{3,2,1,0} bitcast([[P1]])
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[8]{0} parameter(2)
; CHECK-NEXT: [[P2_BITCAST:%[^ ]+]] = f32[1,8,1,1]{3,2,1,0} bitcast([[P2]])
; CHECK-NEXT: [[CC:%[^ ]+]] = (f32[48,8,1,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P2_BITCAST]]),
; CHECK: custom_call_target="__cudnn$norm",
; CHECK: backend_config={
; CHECK-DAG: "epsilon":0.001
; CHECK: }
; CHECK-NEXT: [[GTE:%[^ ]+]] = f32[48,8,1,1]{3,2,1,0} get-tuple-element([[CC]]), index=0
; CHECK-NEXT: ROOT [[GTE_BITCAST:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} bitcast([[GTE]])
)";
TestNorm(hlo_text, optimized_hlo);
}
TEST_F(CudnnNormRewriterTest, LayerNorm4D3Degenerate0) {
const char* hlo_text = R"(
HloModule test
apply {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT c = f32[] add(a,b)
}
ENTRY test {
input = f32[1,4,6,8] parameter(0)
input_square = f32[1,4,6,8] multiply(input, input)
c0 = f32[] constant(0)
input_square_sum = f32[1,4,6] reduce(input_square, c0), dimensions={3}, to_apply=apply
r_nelems = f32[] constant(0.125)
r_nelems_bcast = f32[1,4,6] broadcast(r_nelems), dimensions={}
input_square_mean = f32[1,4,6] multiply(input_square_sum, r_nelems_bcast)
input_sum = f32[1,4,6] reduce(input, c0), dimensions={3}, to_apply=apply
input_mean = f32[1,4,6] multiply(input_sum, r_nelems_bcast)
input_mean_square = f32[1,4,6] multiply(input_mean, input_mean)
variance = f32[1,4,6] subtract(input_square_mean, input_mean_square)
epsilon = f32[] constant(0.001)
epsilon_bcast = f32[1,4,6] broadcast(epsilon), dimensions={}
variance_plus_epsilon = f32[1,4,6] add(variance, epsilon_bcast)
norm_factor = f32[1,4,6] rsqrt(variance_plus_epsilon)
norm_factor_bcast = f32[1,4,6,8] broadcast(norm_factor), dimensions={0,1,2}
input_mean_bcast = f32[1,4,6,8] broadcast(input_mean), dimensions={0,1,2}
input_center = f32[1,4,6,8] subtract(input, input_mean_bcast)
norm = f32[1,4,6,8] multiply(norm_factor_bcast, input_center)
scale = f32[8] parameter(1)
scale_bcast = f32[1,4,6,8] broadcast(scale), dimensions={3}
norm_scale = f32[1,4,6,8] multiply(norm, scale_bcast)
bias = f32[8] parameter(2)
bias_bcast = f32[1,4,6,8] broadcast(bias), dimensions={3}
ROOT out = f32[1,4,6,8] add(norm_scale, bias_bcast)
})";
const char* optimized_hlo = R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[1,4,6,8], {{.*}}: f32[8], {{.*}}: f32[8]) -> f32[1,4,6,8] {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[1,4,6,8]{3,2,1,0} parameter(0)
; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = f32[24,8,1,1]{3,2,1,0} bitcast([[P0]])
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[8]{0} parameter(1)
; CHECK-NEXT: [[P1_BITCAST:%[^ ]+]] = f32[1,8,1,1]{3,2,1,0} bitcast([[P1]])
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[8]{0} parameter(2)
; CHECK-NEXT: [[P2_BITCAST:%[^ ]+]] = f32[1,8,1,1]{3,2,1,0} bitcast([[P2]])
; CHECK-NEXT: [[CC:%[^ ]+]] = (f32[24,8,1,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P2_BITCAST]]),
; CHECK: custom_call_target="__cudnn$norm",
; CHECK: backend_config={
; CHECK-DAG: "epsilon":0.001
; CHECK: }
; CHECK-NEXT: [[GTE:%[^ ]+]] = f32[24,8,1,1]{3,2,1,0} get-tuple-element([[CC]]), index=0
; CHECK-NEXT: ROOT [[GTE_BITCAST:%[^ ]+]] = f32[1,4,6,8]{3,2,1,0} bitcast([[GTE]])
)";
TestNorm(hlo_text, optimized_hlo);
}
TEST_F(CudnnNormRewriterTest, LayerNorm4D2) {
const char* hlo_text = R"(
HloModule test
apply {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT c = f32[] add(a,b)
}
ENTRY test {
input = f32[2,4,6,8] parameter(0)
input_square = f32[2,4,6,8] multiply(input, input)
c0 = f32[] constant(0)
input_square_sum = f32[2,4,8] reduce(input_square, c0), dimensions={2}, to_apply=apply
r_nelems = f32[] constant(0.166667)
r_nelems_bcast = f32[2,4,8] broadcast(r_nelems), dimensions={}
input_square_mean = f32[2,4,8] multiply(input_square_sum, r_nelems_bcast)
reduce = f32[2,4,8] reduce(input, c0), dimensions={2}, to_apply=apply
input_mean = f32[2,4,8] multiply(reduce, r_nelems_bcast)
input_mean_square = f32[2,4,8] multiply(input_mean, input_mean)
variance = f32[2,4,8] subtract(input_square_mean, input_mean_square)
epsilon = f32[] constant(0.001)
epsilon_bcast = f32[2,4,8] broadcast(epsilon), dimensions={}
variance_plus_epsilon = f32[2,4,8] add(variance, epsilon_bcast)
norm_factor = f32[2,4,8] rsqrt(variance_plus_epsilon)
norm_factor_bcast = f32[2,4,6,8] broadcast(norm_factor), dimensions={0,1,3}
input_mean_bcast = f32[2,4,6,8] broadcast(input_mean), dimensions={0,1,3}
input_center = f32[2,4,6,8] subtract(input, input_mean_bcast)
norm = f32[2,4,6,8] multiply(norm_factor_bcast, input_center)
scale = f32[6] parameter(1)
scale_bcast = f32[2,4,6,8] broadcast(scale), dimensions={2}
norm_scale = f32[2,4,6,8] multiply(norm, scale_bcast)
bias = f32[6] parameter(2)
bias_broadcast = f32[2,4,6,8] broadcast(bias), dimensions={2}
ROOT out = f32[2,4,6,8] add(norm_scale, bias_broadcast)
})";
const char* optimized_hlo = R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,4,6,8], {{.*}}: f32[6], {{.*}}: f32[6]) -> f32[2,4,6,8] {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} parameter(0)
; CHECK-NEXT: [[TRANSPOSE:%[^ ]+]] = f32[8,8,6]{2,1,0} fusion([[P0]]), kind=kLoop, calls={{.*}}
; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = f32[64,6,1,1]{3,2,1,0} bitcast([[TRANSPOSE]])
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[6]{0} parameter(1)
; CHECK-NEXT: [[P1_BITCAST:%[^ ]+]] = f32[1,6,1,1]{3,2,1,0} bitcast([[P1]])
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[6]{0} parameter(2)
; CHECK-NEXT: [[P2_BITCAST:%[^ ]+]] = f32[1,6,1,1]{3,2,1,0} bitcast([[P2]])
; CHECK-NEXT: [[CC:%[^ ]+]] = (f32[64,6,1,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P2_BITCAST]]),
; CHECK: custom_call_target="__cudnn$norm",
; CHECK: backend_config={
; CHECK-DAG: "epsilon":0.001
; CHECK: }
; CHECK-NEXT: [[GTE:%[^ ]+]] = f32[64,6,1,1]{3,2,1,0} get-tuple-element([[CC]]), index=0
; CHECK-NEXT: [[FUSION:%[^ ]+]] = f32[8,6,8]{2,1,0} fusion([[GTE]]), kind=kLoop, calls=[[FUSED_COMPUTATION:%[^ ]+]]
; CHECK-NEXT: ROOT {{.*}} = f32[2,4,6,8]{3,2,1,0} bitcast([[FUSION]])
)";
TestNorm(hlo_text, optimized_hlo);
}
TEST_F(CudnnNormRewriterTest, LayerNorm4D2Degenerate1) {
const char* hlo_text = R"(
HloModule test
apply {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT c = f32[] add(a,b)
}
ENTRY test {
input = f32[2,1,6,8] parameter(0)
input_square = f32[2,1,6,8] multiply(input, input)
c0 = f32[] constant(0)
input_square_sum = f32[2,1,8] reduce(input_square, c0), dimensions={2}, to_apply=apply
r_nelems = f32[] constant(0.166667)
r_nelems_bcast = f32[2,1,8] broadcast(r_nelems), dimensions={}
input_square_mean = f32[2,1,8] multiply(input_square_sum, r_nelems_bcast)
reduce = f32[2,1,8] reduce(input, c0), dimensions={2}, to_apply=apply
input_mean = f32[2,1,8] multiply(reduce, r_nelems_bcast)
input_mean_square = f32[2,1,8] multiply(input_mean, input_mean)
variance = f32[2,1,8] subtract(input_square_mean, input_mean_square)
epsilon = f32[] constant(0.001)
epsilon_bcast = f32[2,1,8] broadcast(epsilon), dimensions={}
variance_plus_epsilon = f32[2,1,8] add(variance, epsilon_bcast)
norm_factor = f32[2,1,8] rsqrt(variance_plus_epsilon)
norm_factor_bcast = f32[2,1,6,8] broadcast(norm_factor), dimensions={0,1,3}
input_mean_bcast = f32[2,1,6,8] broadcast(input_mean), dimensions={0,1,3}
input_center = f32[2,1,6,8] subtract(input, input_mean_bcast)
norm = f32[2,1,6,8] multiply(norm_factor_bcast, input_center)
scale = f32[6] parameter(1)
scale_bcast = f32[2,1,6,8] broadcast(scale), dimensions={2}
norm_scale = f32[2,1,6,8] multiply(norm, scale_bcast)
bias = f32[6] parameter(2)
bias_broadcast = f32[2,1,6,8] broadcast(bias), dimensions={2}
ROOT out = f32[2,1,6,8] add(norm_scale, bias_broadcast)
})";
const char* optimized_hlo = R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,1,6,8], {{.*}}: f32[6], {{.*}}: f32[6]) -> f32[2,1,6,8] {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,1,6,8]{3,2,1,0} parameter(0)
; CHECK-NEXT: [[TRANSPOSE:%[^ ]+]] = f32[2,8,6]{2,1,0} fusion([[P0]]), kind=kLoop, calls={{.*}}
; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = f32[16,6,1,1]{3,2,1,0} bitcast([[TRANSPOSE]])
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[6]{0} parameter(1)
; CHECK-NEXT: [[P1_BITCAST:%[^ ]+]] = f32[1,6,1,1]{3,2,1,0} bitcast([[P1]])
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[6]{0} parameter(2)
; CHECK-NEXT: [[P2_BITCAST:%[^ ]+]] = f32[1,6,1,1]{3,2,1,0} bitcast([[P2]])
; CHECK-NEXT: [[CC:%[^ ]+]] = (f32[16,6,1,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P2_BITCAST]]),
; CHECK: custom_call_target="__cudnn$norm",
; CHECK: backend_config={
; CHECK-DAG: "epsilon":0.001
; CHECK: }
; CHECK-NEXT: [[GTE:%[^ ]+]] = f32[16,6,1,1]{3,2,1,0} get-tuple-element([[CC]]), index=0
; CHECK-NEXT: [[FUSION:%[^ ]+]] = f32[2,6,8]{2,1,0} fusion([[GTE]]), kind=kLoop, calls=[[FUSED_COMPUTATION:%[^ ]+]]
; CHECK-NEXT: ROOT {{.*}} = f32[2,1,6,8]{3,2,1,0} bitcast([[FUSION]])
)";
TestNorm(hlo_text, optimized_hlo);
}
TEST_F(CudnnNormRewriterTest, LayerNorm4D12) {
const char* hlo_text = R"(
HloModule test
apply {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT c = f32[] add(a,b)
}
ENTRY test {
input = f32[2,4,6,8] parameter(0)
input_square = f32[2,4,6,8] multiply(input, input)
c0 = f32[] constant(0)
input_square_sum = f32[2,8] reduce(input_square, c0), dimensions={1,2}, to_apply=apply
r_nelems = f32[] constant(0.041667)
r_nelems_bcast = f32[2,8] broadcast(r_nelems), dimensions={}
input_square_mean = f32[2,8] multiply(input_square_sum, r_nelems_bcast)
reduce = f32[2,8] reduce(input, c0), dimensions={1,2}, to_apply=apply
input_mean = f32[2,8] multiply(reduce, r_nelems_bcast)
input_mean_square = f32[2,8] multiply(input_mean, input_mean)
variance = f32[2,8] subtract(input_square_mean, input_mean_square)
epsilon = f32[] constant(0.001)
epsilon_bcast = f32[2,8] broadcast(epsilon), dimensions={}
variance_plus_epsilon = f32[2,8] add(variance, epsilon_bcast)
norm_factor = f32[2,8] rsqrt(variance_plus_epsilon)
norm_factor_bcast = f32[2,4,6,8] broadcast(norm_factor), dimensions={0,3}
input_mean_bcast = f32[2,4,6,8] broadcast(input_mean), dimensions={0,3}
input_center = f32[2,4,6,8] subtract(input, input_mean_bcast)
norm = f32[2,4,6,8] multiply(norm_factor_bcast, input_center)
scale = f32[4,6] parameter(1)
scale_bcast = f32[2,4,6,8] broadcast(scale), dimensions={1,2}
norm_scale = f32[2,4,6,8] multiply(norm, scale_bcast)
bias = f32[4,6] parameter(2)
bias_broadcast = f32[2,4,6,8] broadcast(bias), dimensions={1,2}
ROOT out = f32[2,4,6,8] add(norm_scale, bias_broadcast)
})";
const char* optimized_hlo = R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,4,6,8], {{.*}}: f32[4,6], {{.*}}: f32[4,6]) -> f32[2,4,6,8] {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} parameter(0)
; CHECK-NEXT: [[TRANSPOSE:%[^ ]+]] = f32[2,8,24]{2,1,0} fusion([[P0]]), kind=kLoop, calls={{.*}}
; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = f32[16,4,6,1]{3,2,1,0} bitcast([[TRANSPOSE]])
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[4,6]{1,0} parameter(1)
; CHECK-NEXT: [[P1_BITCAST:%[^ ]+]] = f32[1,4,6,1]{3,2,1,0} bitcast([[P1]])
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[4,6]{1,0} parameter(2)
; CHECK-NEXT: [[P2_BITCAST:%[^ ]+]] = f32[1,4,6,1]{3,2,1,0} bitcast([[P2]])
; CHECK-NEXT: [[CC:%[^ ]+]] = (f32[16,4,6,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P2_BITCAST]]),
; CHECK: custom_call_target="__cudnn$norm",
; CHECK: backend_config={
; CHECK-DAG: "epsilon":0.001
; CHECK: }
; CHECK-NEXT: [[GTE:%[^ ]+]] = f32[16,4,6,1]{3,2,1,0} get-tuple-element([[CC]]), index=0
; CHECK-NEXT: [[FUSION:%[^ ]+]] = f32[2,24,8]{2,1,0} fusion([[GTE]]), kind=kLoop, calls=[[FUSED_COMPUTATION:%[^ ]+]]
; CHECK-NEXT: ROOT {{.*}} = f32[2,4,6,8]{3,2,1,0} bitcast([[FUSION]])
)";
TestNorm(hlo_text, optimized_hlo);
}
TEST_F(CudnnNormRewriterTest, LayerNorm4D12Degenerate2) {
const char* hlo_text = R"(
HloModule test
apply {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT c = f32[] add(a,b)
}
ENTRY test {
input = f32[2,4,1,8] parameter(0)
input_square = f32[2,4,1,8] multiply(input, input)
c0 = f32[] constant(0)
input_square_sum = f32[2,8] reduce(input_square, c0), dimensions={1,2}, to_apply=apply
r_nelems = f32[] constant(0.25)
r_nelems_bcast = f32[2,8] broadcast(r_nelems), dimensions={}
input_square_mean = f32[2,8] multiply(input_square_sum, r_nelems_bcast)
reduce = f32[2,8] reduce(input, c0), dimensions={1,2}, to_apply=apply
input_mean = f32[2,8] multiply(reduce, r_nelems_bcast)
input_mean_square = f32[2,8] multiply(input_mean, input_mean)
variance = f32[2,8] subtract(input_square_mean, input_mean_square)
epsilon = f32[] constant(0.001)
epsilon_bcast = f32[2,8] broadcast(epsilon), dimensions={}
variance_plus_epsilon = f32[2,8] add(variance, epsilon_bcast)
norm_factor = f32[2,8] rsqrt(variance_plus_epsilon)
norm_factor_bcast = f32[2,4,1,8] broadcast(norm_factor), dimensions={0,3}
input_mean_bcast = f32[2,4,1,8] broadcast(input_mean), dimensions={0,3}
input_center = f32[2,4,1,8] subtract(input, input_mean_bcast)
norm = f32[2,4,1,8] multiply(norm_factor_bcast, input_center)
scale = f32[4,1] parameter(1)
scale_bcast = f32[2,4,1,8] broadcast(scale), dimensions={1,2}
norm_scale = f32[2,4,1,8] multiply(norm, scale_bcast)
bias = f32[4,1] parameter(2)
bias_broadcast = f32[2,4,1,8] broadcast(bias), dimensions={1,2}
ROOT out = f32[2,4,1,8] add(norm_scale, bias_broadcast)
})";
const char* optimized_hlo = R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,4,1,8], {{.*}}: f32[4,1], {{.*}}: f32[4,1]) -> f32[2,4,1,8] {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,4,1,8]{3,2,1,0} parameter(0)
; CHECK-NEXT: [[TRANSPOSE:%[^ ]+]] = f32[2,8,4]{2,1,0} fusion([[P0]]), kind=kLoop, calls={{.*}}
; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = f32[16,4,1,1]{3,2,1,0} bitcast([[TRANSPOSE]])
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[4,1]{1,0} parameter(1)
; CHECK-NEXT: [[P1_BITCAST:%[^ ]+]] = f32[1,4,1,1]{3,2,1,0} bitcast([[P1]])
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[4,1]{1,0} parameter(2)
; CHECK-NEXT: [[P2_BITCAST:%[^ ]+]] = f32[1,4,1,1]{3,2,1,0} bitcast([[P2]])
; CHECK-NEXT: [[CC:%[^ ]+]] = (f32[16,4,1,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P2_BITCAST]]),
; CHECK: custom_call_target="__cudnn$norm",
; CHECK: backend_config={
; CHECK-DAG: "epsilon":0.001
; CHECK: }
; CHECK-NEXT: [[GTE:%[^ ]+]] = f32[16,4,1,1]{3,2,1,0} get-tuple-element([[CC]]), index=0
; CHECK-NEXT: [[FUSION:%[^ ]+]] = f32[2,4,8]{2,1,0} fusion([[GTE]]), kind=kLoop, calls=[[FUSED_COMPUTATION:%[^ ]+]]
; CHECK-NEXT: ROOT {{.*}} = f32[2,4,1,8]{3,2,1,0} bitcast([[FUSION]])
)";
TestNorm(hlo_text, optimized_hlo);
}
TEST_F(CudnnNormRewriterTest, LayerNorm4D3IncorrectScaleBroadcast) {
const char* hlo_text = R"(
HloModule test
apply {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT c = f32[] add(a,b)
}
ENTRY test {
input = f32[2,2,2,2] parameter(0)
input_square = f32[2,2,2,2] multiply(input, input)
c0 = f32[] constant(0)
input_square_sum = f32[2,2,2] reduce(input_square, c0), dimensions={3}, to_apply=apply
r_nelems = f32[] constant(0.5)
r_nelems_bcast = f32[2,2,2] broadcast(r_nelems), dimensions={}
input_square_mean = f32[2,2,2] multiply(input_square_sum, r_nelems_bcast)
input_sum = f32[2,2,2] reduce(input, c0), dimensions={3}, to_apply=apply
input_mean = f32[2,2,2] multiply(input_sum, r_nelems_bcast)
input_mean_square = f32[2,2,2] multiply(input_mean, input_mean)
variance = f32[2,2,2] subtract(input_square_mean, input_mean_square)
epsilon = f32[] constant(0.001)
epsilon_bcast = f32[2,2,2] broadcast(epsilon), dimensions={}
variance_plus_epsilon = f32[2,2,2] add(variance, epsilon_bcast)
norm_factor = f32[2,2,2] rsqrt(variance_plus_epsilon)
norm_factor_bcast = f32[2,2,2,2] broadcast(norm_factor), dimensions={0,1,2}
input_mean_bcast = f32[2,2,2,2] broadcast(input_mean), dimensions={0,1,2}
input_center = f32[2,2,2,2] subtract(input, input_mean_bcast)
norm = f32[2,2,2,2] multiply(norm_factor_bcast, input_center)
scale = f32[2] parameter(1)
scale_bcast = f32[2,2,2,2] broadcast(scale), dimensions={2}
norm_scale = f32[2,2,2,2] multiply(norm, scale_bcast)
bias = f32[2] parameter(2)
bias_bcast = f32[2,2,2,2] broadcast(bias), dimensions={3}
ROOT out = f32[2,2,2,2] add(norm_scale, bias_bcast)
})";
const char* optimized_hlo = R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,2,2,2], {{.*}}: f32[2], {{.*}}: f32[2]) -> f32[2,2,2,2] {
; CHECK-NOT: custom_call_target="__cudnn$norm"
)";
TestNorm(hlo_text, optimized_hlo);
}
TEST_F(CudnnNormRewriterTest, LayerNorm4D3InputOutputTypeMismatch) {
const char* hlo_text = R"(
HloModule test
apply {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT c = f32[] add(a,b)
}
ENTRY test {
input = f16[2,4,6,8] parameter(0)
input_f32 = f32[2,4,6,8] convert(input)
input_square = f32[2,4,6,8] multiply(input_f32, input_f32)
c0 = f32[] constant(0)
input_square_sum = f32[2,4,6] reduce(input_square, c0), dimensions={3}, to_apply=apply
r_nelems = f32[] constant(0.125)
r_nelems_bcast = f32[2,4,6] broadcast(r_nelems), dimensions={}
input_square_mean = f32[2,4,6] multiply(input_square_sum, r_nelems_bcast)
input_sum = f32[2,4,6] reduce(input_f32, c0), dimensions={3}, to_apply=apply
input_mean = f32[2,4,6] multiply(input_sum, r_nelems_bcast)
input_mean_square = f32[2,4,6] multiply(input_mean, input_mean)
variance = f32[2,4,6] subtract(input_square_mean, input_mean_square)
epsilon = f32[] constant(0.001)
epsilon_bcast = f32[2,4,6] broadcast(epsilon), dimensions={}
variance_plus_epsilon = f32[2,4,6] add(variance, epsilon_bcast)
norm_factor = f32[2,4,6] rsqrt(variance_plus_epsilon)
norm_factor_bcast = f32[2,4,6,8] broadcast(norm_factor), dimensions={0,1,2}
input_mean_bcast = f32[2,4,6,8] broadcast(input_mean), dimensions={0,1,2}
input_center = f32[2,4,6,8] subtract(input_f32, input_mean_bcast)
norm = f32[2,4,6,8] multiply(norm_factor_bcast, input_center)
scale = f32[8] parameter(1)
scale_bcast = f32[2,4,6,8] broadcast(scale), dimensions={3}
norm_scale = f32[2,4,6,8] multiply(norm, scale_bcast)
bias = f32[8] parameter(2)
bias_bcast = f32[2,4,6,8] broadcast(bias), dimensions={3}
ROOT out = f32[2,4,6,8] add(norm_scale, bias_bcast)
})";
const char* optimized_hlo = R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f16[2,4,6,8], {{.*}}: f32[8], {{.*}}: f32[8]) -> f32[2,4,6,8] {
; CHECK-NOT: custom_call_target="__cudnn$norm"
)";
TestNorm(hlo_text, optimized_hlo);
}
TEST_F(CudnnNormRewriterTest, LayerNormTrain2D1) {
const char* hlo_text = R"(
HloModule test
apply {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT c = f32[] add(a,b)
}
ENTRY test {
input = f32[2,4] parameter(0)
input_square = f32[2,4] multiply(input, input)
c0 = f32[] constant(0)
input_square_sum = f32[2] reduce(input_square, c0), dimensions={1}, to_apply=apply
r_nelems = f32[] constant(0.25)
r_nelems_bcast = f32[2] broadcast(r_nelems), dimensions={}
input_square_mean = f32[2] multiply(input_square_sum,r_nelems_bcast)
reduce = f32[2] reduce(input, c0), dimensions={1}, to_apply=apply
input_mean = f32[2] multiply(reduce,r_nelems_bcast)
input_mean_square = f32[2] multiply(input_mean,input_mean)
variance = f32[2] subtract(input_square_mean,input_mean_square)
epsilon = f32[] constant(0.001)
epsilon_bcast = f32[2] broadcast(epsilon), dimensions={}
variance_plus_epsilon = f32[2] add(variance, epsilon_bcast)
norm_factor = f32[2] rsqrt(variance_plus_epsilon)
norm_factor_bcast = f32[2,4] broadcast(norm_factor), dimensions={0}
input_mean_bcast = f32[2,4] broadcast(input_mean), dimensions={0}
input_center = f32[2,4] subtract(input,input_mean_bcast)
norm = f32[2,4] multiply(norm_factor_bcast,input_center)
scale = f32[4] parameter(1)
scale_bcast = f32[2,4] broadcast(scale), dimensions={1}
norm_scale = f32[2,4] multiply(norm,scale_bcast)
bias = f32[4] parameter(2)
bias_broadcast = f32[2,4] broadcast(bias), dimensions={1}
norm_scale_bias = f32[2,4] add(norm_scale, bias_broadcast)
norm_factor_cube = f32[2] divide(norm_factor, variance_plus_epsilon)
ROOT out = (f32[2,4], f32[2], f32[2], f32[2]) tuple(norm_scale_bias, input_mean, norm_factor, norm_factor_cube)
})";
const char* optimized_hlo = R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,4], {{.*}}: f32[4], {{.*}}: f32[4]) -> (f32[2,4], f32[2], f32[2], f32[2]) {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,4]{1,0} parameter(0)
; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = f32[2,4,1,1]{3,2,1,0} bitcast([[P0]])
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[4]{0} parameter(1)
; CHECK-NEXT: [[P1_BITCAST:%[^ ]+]] = f32[1,4,1,1]{3,2,1,0} bitcast([[P1]])
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[4]{0} parameter(2)
; CHECK-NEXT: [[P2_BITCAST:%[^ ]+]] = f32[1,4,1,1]{3,2,1,0} bitcast([[P2]])
; CHECK-NEXT: [[CC:%[^ ]+]] = (f32[2,4,1,1]{3,2,1,0}, f32[2,1,1,1]{3,2,1,0}, f32[2,1,1,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P2_BITCAST]]),
; CHECK: custom_call_target="__cudnn$norm",
; CHECK: backend_config={
; CHECK-DAG: "epsilon":0.001
; CHECK: }
; CHECK-NEXT: [[GTE0:%[^ ]+]] = f32[2,4,1,1]{3,2,1,0} get-tuple-element([[CC]]), index=0
; CHECK-NEXT: [[GTE0_BITCAST:%[^ ]+]] = f32[2,4]{1,0} bitcast([[GTE0]])
; CHECK-NEXT: [[GTE1:%[^ ]+]] = f32[2,1,1,1]{3,2,1,0} get-tuple-element([[CC]]), index=1
; CHECK-NEXT: [[GTE1_BITCAST:%[^ ]+]] = f32[2]{0} bitcast([[GTE1]])
; CHECK-NEXT: [[GTE2:%[^ ]+]] = f32[2,1,1,1]{3,2,1,0} get-tuple-element([[CC]]), index=2
; CHECK-NEXT: [[GTE2_BITCAST:%[^ ]+]] = f32[2]{0} bitcast([[GTE2]])
; CHECK-NEXT: [[FUSION:%[^ ]+]] = f32[2]{0} fusion([[GTE2]]), kind=kLoop, calls=[[FUSED_COMPUTATION:%[^ ]+]]
; CHECK-NEXT: ROOT [[OUT:%[^ ]+]] = (f32[2,4]{1,0}, f32[2]{0}, f32[2]{0}, f32[2]{0}) tuple([[GTE0_BITCAST]], [[GTE1_BITCAST]], [[GTE2_BITCAST]], [[FUSION]])
)";
TestNorm(hlo_text, optimized_hlo);
}
TEST_F(CudnnNormRewriterTest, LayerNormTrain4D3) {
const char* hlo_text = R"(
HloModule test
apply {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT c = f32[] add(a,b)
}
ENTRY test {
input = f32[2,4,6,8] parameter(0)
input_square = f32[2,4,6,8] multiply(input, input)
c0 = f32[] constant(0)
input_square_sum = f32[2,4,6] reduce(input_square, c0), dimensions={3}, to_apply=apply
r_nelems = f32[] constant(0.125)
r_nelems_bcast = f32[2,4,6] broadcast(r_nelems), dimensions={}
input_square_mean = f32[2,4,6] multiply(input_square_sum, r_nelems_bcast)
reduce = f32[2,4,6] reduce(input, c0), dimensions={3}, to_apply=apply
input_mean = f32[2,4,6] multiply(reduce, r_nelems_bcast)
input_mean_square = f32[2,4,6] multiply(input_mean, input_mean)
variance = f32[2,4,6] subtract(input_square_mean, input_mean_square)
epsilon = f32[] constant(0.001)
epsilon_bcast = f32[2,4,6] broadcast(epsilon), dimensions={}
variance_plus_epsilon = f32[2,4,6] add(variance, epsilon_bcast)
norm_factor = f32[2,4,6] rsqrt(variance_plus_epsilon)
norm_factor_bcast = f32[2,4,6,8] broadcast(norm_factor), dimensions={0,1,2}
input_mean_bcast = f32[2,4,6,8] broadcast(input_mean), dimensions={0,1,2}
input_center = f32[2,4,6,8] subtract(input, input_mean_bcast)
norm = f32[2,4,6,8] multiply(norm_factor_bcast, input_center)
scale = f32[8] parameter(1)
scale_bcast = f32[2,4,6,8] broadcast(scale), dimensions={3}
norm_scale = f32[2,4,6,8] multiply(norm,scale_bcast)
bias = f32[8] parameter(2)
bias_broadcast = f32[2,4,6,8] broadcast(bias), dimensions={3}
norm_scale_bias = f32[2,4,6,8] add(norm_scale, bias_broadcast)
norm_factor_cube = f32[2,4,6] divide(norm_factor, variance_plus_epsilon)
ROOT out = (f32[2,4,6,8], f32[2,4,6], f32[2,4,6], f32[2,4,6]) tuple(norm_scale_bias, input_mean, norm_factor, norm_factor_cube)
})";
const char* optimized_hlo = R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,4,6,8], {{.*}}: f32[8], {{.*}}: f32[8]) -> (f32[2,4,6,8], f32[2,4,6], f32[2,4,6], f32[2,4,6]) {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} parameter(0)
; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = f32[48,8,1,1]{3,2,1,0} bitcast([[P0]])
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[8]{0} parameter(1)
; CHECK-NEXT: [[P1_BITCAST:%[^ ]+]] = f32[1,8,1,1]{3,2,1,0} bitcast([[P1]])
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[8]{0} parameter(2)
; CHECK-NEXT: [[P2_BITCAST:%[^ ]+]] = f32[1,8,1,1]{3,2,1,0} bitcast([[P2]])
; CHECK-NEXT: [[CC:%[^ ]+]] = (f32[48,8,1,1]{3,2,1,0}, f32[48,1,1,1]{3,2,1,0}, f32[48,1,1,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P2_BITCAST]]),
; CHECK: custom_call_target="__cudnn$norm",
; CHECK: backend_config={
; CHECK-DAG: "epsilon":0.001
; CHECK: }
; CHECK-NEXT: [[GTE0:%[^ ]+]] = f32[48,8,1,1]{3,2,1,0} get-tuple-element([[CC]]), index=0
; CHECK-NEXT: [[GTE0_BITCAST:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} bitcast([[GTE0]])
; CHECK-NEXT: [[GTE1:%[^ ]+]] = f32[48,1,1,1]{3,2,1,0} get-tuple-element([[CC]]), index=1
; CHECK-NEXT: [[GTE1_BITCAST:%[^ ]+]] = f32[2,4,6]{2,1,0} bitcast([[GTE1]])
; CHECK-NEXT: [[GTE2:%[^ ]+]] = f32[48,1,1,1]{3,2,1,0} get-tuple-element([[CC]]), index=2
; CHECK-NEXT: [[GTE2_BITCAST:%[^ ]+]] = f32[2,4,6]{2,1,0} bitcast([[GTE2]])
; CHECK-NEXT: [[FUSION:%[^ ]+]] = f32[2,4,6]{2,1,0} fusion([[GTE2]]), kind=kLoop, calls=[[FUSED_COMPUTATION:%[^ ]+]]
; CHECK-NEXT: ROOT [[OUT:%[^ ]+]] = (f32[2,4,6,8]{3,2,1,0}, f32[2,4,6]{2,1,0}, f32[2,4,6]{2,1,0}, f32[2,4,6]{2,1,0}) tuple([[GTE0_BITCAST]], [[GTE1_BITCAST]], [[GTE2_BITCAST]], [[FUSION]])
)";
TestNorm(hlo_text, optimized_hlo);
}
TEST_F(CudnnNormRewriterTest, LayerNormTrain4D12) {
const char* hlo_text = R"(
HloModule test
apply {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT c = f32[] add(a,b)
}
ENTRY test {
input = f32[2,4,6,8] parameter(0)
input_square = f32[2,4,6,8] multiply(input, input)
c0 = f32[] constant(0)
input_square_sum = f32[2,8] reduce(input_square, c0), dimensions={1,2}, to_apply=apply
r_nelems = f32[] constant(0.041667)
r_nelems_bcast = f32[2,8] broadcast(r_nelems), dimensions={}
input_square_mean = f32[2,8] multiply(input_square_sum, r_nelems_bcast)
reduce = f32[2,8] reduce(input, c0), dimensions={1,2}, to_apply=apply
input_mean = f32[2,8] multiply(reduce, r_nelems_bcast)
input_mean_square = f32[2,8] multiply(input_mean, input_mean)
variance = f32[2,8] subtract(input_square_mean, input_mean_square)
epsilon = f32[] constant(0.001)
epsilon_bcast = f32[2,8] broadcast(epsilon), dimensions={}
variance_plus_epsilon = f32[2,8] add(variance, epsilon_bcast)
norm_factor = f32[2,8] rsqrt(variance_plus_epsilon)
norm_factor_bcast = f32[2,4,6,8] broadcast(norm_factor), dimensions={0,3}
input_mean_bcast = f32[2,4,6,8] broadcast(input_mean), dimensions={0,3}
input_center = f32[2,4,6,8] subtract(input, input_mean_bcast)
norm = f32[2,4,6,8] multiply(norm_factor_bcast, input_center)
scale = f32[4,6] parameter(1)
scale_bcast = f32[2,4,6,8] broadcast(scale), dimensions={1,2}
norm_scale = f32[2,4,6,8] multiply(norm, scale_bcast)
bias = f32[4,6] parameter(2)
bias_broadcast = f32[2,4,6,8] broadcast(bias), dimensions={1,2}
norm_scale_bias = f32[2,4,6,8] add(norm_scale, bias_broadcast)
norm_factor_cube = f32[2,8] divide(norm_factor, variance_plus_epsilon)
ROOT out = (f32[2,4,6,8], f32[2,8], f32[2,8], f32[2,8]) tuple(norm_scale_bias, input_mean, norm_factor, norm_factor_cube)
})";
const char* optimized_hlo = R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,4,6,8], {{.*}}: f32[4,6], {{.*}}: f32[4,6]) -> (f32[2,4,6,8], f32[2,8], f32[2,8], f32[2,8]) {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} parameter(0)
; CHECK-NEXT: [[TRANSPOSE:%[^ ]+]] = f32[2,8,24]{2,1,0} fusion([[P0]]), kind=kLoop, calls={{.*}}
; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = f32[16,4,6,1]{3,2,1,0} bitcast([[TRANSPOSE]])
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[4,6]{1,0} parameter(1)
; CHECK-NEXT: [[P1_BITCAST:%[^ ]+]] = f32[1,4,6,1]{3,2,1,0} bitcast([[P1]])
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[4,6]{1,0} parameter(2)
; CHECK-NEXT: [[P2_BITCAST:%[^ ]+]] = f32[1,4,6,1]{3,2,1,0} bitcast([[P2]])
; CHECK-NEXT: [[CC:%[^ ]+]] = (f32[16,4,6,1]{3,2,1,0}, f32[16,1,1,1]{3,2,1,0}, f32[16,1,1,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P2_BITCAST]]),
; CHECK: custom_call_target="__cudnn$norm",
; CHECK: backend_config={
; CHECK-DAG: "epsilon":0.001
; CHECK: }
; CHECK-NEXT: [[GTE0:%[^ ]+]] = f32[16,4,6,1]{3,2,1,0} get-tuple-element([[CC]]), index=0
; CHECK-NEXT: [[FUSION0:%[^ ]+]] = f32[2,24,8]{2,1,0} fusion([[GTE0]]), kind=kLoop, calls=[[FUSED_COMPUTATION0:%[^ ]+]]
; CHECK-NEXT: [[BITCAST:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} bitcast([[FUSION0]])
; CHECK-NEXT: [[GTE1:%[^ ]+]] = f32[16,1,1,1]{3,2,1,0} get-tuple-element([[CC]]), index=1
; CHECK-NEXT: [[GTE1_BITCAST:%[^ ]+]] = f32[2,8]{1,0} bitcast([[GTE1]])
; CHECK-NEXT: [[GTE2:%[^ ]+]] = f32[16,1,1,1]{3,2,1,0} get-tuple-element([[CC]]), index=2
; CHECK-NEXT: [[GTE2_BITCAST:%[^ ]+]] = f32[2,8]{1,0} bitcast([[GTE2]])
; CHECK-NEXT: [[FUSION1:%[^ ]+]] = f32[2,8]{1,0} fusion([[GTE2]]), kind=kLoop, calls=[[FUSED_COMPUTATION1:%[^ ]+]]
; CHECK-NEXT: ROOT [[OUT:%[^ ]+]] = (f32[2,4,6,8]{3,2,1,0}, f32[2,8]{1,0}, f32[2,8]{1,0}, f32[2,8]{1,0}) tuple([[BITCAST]], [[GTE1_BITCAST]], [[GTE2_BITCAST]], [[FUSION1]])
)";
TestNorm(hlo_text, optimized_hlo);
}
TEST_F(CudnnNormRewriterTest, LayerNormTrain4D12Degenerate2) {
const char* hlo_text = R"(
HloModule test
apply {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT c = f32[] add(a,b)
}
ENTRY test {
input = f32[2,4,1,8] parameter(0)
input_square = f32[2,4,1,8] multiply(input, input)
c0 = f32[] constant(0)
input_square_sum = f32[2,8] reduce(input_square, c0), dimensions={1,2}, to_apply=apply
r_nelems = f32[] constant(0.25)
r_nelems_bcast = f32[2,8] broadcast(r_nelems), dimensions={}
input_square_mean = f32[2,8] multiply(input_square_sum, r_nelems_bcast)
reduce = f32[2,8] reduce(input, c0), dimensions={1,2}, to_apply=apply
input_mean = f32[2,8] multiply(reduce, r_nelems_bcast)
input_mean_square = f32[2,8] multiply(input_mean, input_mean)
variance = f32[2,8] subtract(input_square_mean, input_mean_square)
epsilon = f32[] constant(0.001)
epsilon_bcast = f32[2,8] broadcast(epsilon), dimensions={}
variance_plus_epsilon = f32[2,8] add(variance, epsilon_bcast)
norm_factor = f32[2,8] rsqrt(variance_plus_epsilon)
norm_factor_bcast = f32[2,4,1,8] broadcast(norm_factor), dimensions={0,3}
input_mean_bcast = f32[2,4,1,8] broadcast(input_mean), dimensions={0,3}
input_center = f32[2,4,1,8] subtract(input, input_mean_bcast)
norm = f32[2,4,1,8] multiply(norm_factor_bcast, input_center)
scale = f32[4,1] parameter(1)
scale_bcast = f32[2,4,1,8] broadcast(scale), dimensions={1,2}
norm_scale = f32[2,4,1,8] multiply(norm, scale_bcast)
bias = f32[4,1] parameter(2)
bias_broadcast = f32[2,4,1,8] broadcast(bias), dimensions={1,2}
norm_scale_bias = f32[2,4,1,8] add(norm_scale, bias_broadcast)
norm_factor_cube = f32[2,8] divide(norm_factor, variance_plus_epsilon)
ROOT out = (f32[2,4,1,8], f32[2,8], f32[2,8], f32[2,8]) tuple(norm_scale_bias, input_mean, norm_factor, norm_factor_cube)
})";
const char* optimized_hlo = R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,4,1,8], {{.*}}: f32[4,1], {{.*}}: f32[4,1]) -> (f32[2,4,1,8], f32[2,8], f32[2,8], f32[2,8]) {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,4,1,8]{3,2,1,0} parameter(0)
; CHECK-NEXT: [[TRANSPOSE:%[^ ]+]] = f32[2,8,4]{2,1,0} fusion([[P0]]), kind=kLoop, calls={{.*}}
; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = f32[16,4,1,1]{3,2,1,0} bitcast([[TRANSPOSE]])
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[4,1]{1,0} parameter(1)
; CHECK-NEXT: [[P1_BITCAST:%[^ ]+]] = f32[1,4,1,1]{3,2,1,0} bitcast([[P1]])
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[4,1]{1,0} parameter(2)
; CHECK-NEXT: [[P2_BITCAST:%[^ ]+]] = f32[1,4,1,1]{3,2,1,0} bitcast([[P2]])
; CHECK-NEXT: [[CC:%[^ ]+]] = (f32[16,4,1,1]{3,2,1,0}, f32[16,1,1,1]{3,2,1,0}, f32[16,1,1,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P2_BITCAST]]),
; CHECK: custom_call_target="__cudnn$norm",
; CHECK: backend_config={
; CHECK-DAG: "epsilon":0.001
; CHECK: }
; CHECK-NEXT: [[GTE0:%[^ ]+]] = f32[16,4,1,1]{3,2,1,0} get-tuple-element([[CC]]), index=0
; CHECK-NEXT: [[FUSION0:%[^ ]+]] = f32[2,4,8]{2,1,0} fusion([[GTE0]]), kind=kLoop, calls=[[FUSED_COMPUTATION0:%[^ ]+]]
; CHECK-NEXT: [[BITCAST:%[^ ]+]] = f32[2,4,1,8]{3,2,1,0} bitcast([[FUSION0]])
; CHECK-NEXT: [[GTE1:%[^ ]+]] = f32[16,1,1,1]{3,2,1,0} get-tuple-element([[CC]]), index=1
; CHECK-NEXT: [[GTE1_BITCAST:%[^ ]+]] = f32[2,8]{1,0} bitcast([[GTE1]])
; CHECK-NEXT: [[GTE2:%[^ ]+]] = f32[16,1,1,1]{3,2,1,0} get-tuple-element([[CC]]), index=2
; CHECK-NEXT: [[GTE2_BITCAST:%[^ ]+]] = f32[2,8]{1,0} bitcast([[GTE2]])
; CHECK-NEXT: [[FUSION1:%[^ ]+]] = f32[2,8]{1,0} fusion([[GTE2]]), kind=kLoop, calls=[[FUSED_COMPUTATION1:%[^ ]+]]
; CHECK-NEXT: ROOT [[OUT:%[^ ]+]] = (f32[2,4,1,8]{3,2,1,0}, f32[2,8]{1,0}, f32[2,8]{1,0}, f32[2,8]{1,0}) tuple([[BITCAST]], [[GTE1_BITCAST]], [[GTE2_BITCAST]], [[FUSION1]])
)";
TestNorm(hlo_text, optimized_hlo);
}
TEST_F(CudnnNormRewriterTest, LayerNormTrainBackward2D1) {
const char* hlo_text = R"(
HloModule test
apply {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT c = f32[] add(a,b)
}
ENTRY test {
input = f32[2,4] parameter(0)
input_square = f32[2,4] multiply(input, input)
c0 = f32[] constant(0)
input_square_sum = f32[2] reduce(input_square, c0), dimensions={1}, to_apply=apply
reduce = f32[2] reduce(input, c0), dimensions={1}, to_apply=apply
r_nelems = f32[] constant(0.25)
r_nelems_bcast = f32[2] broadcast(r_nelems), dimensions={}
input_square_mean = f32[2] multiply(input_square_sum,r_nelems_bcast)
input_mean = f32[2] multiply(reduce, r_nelems_bcast)
input_mean_square = f32[2] multiply(input_mean,input_mean)
variance = f32[2] subtract(input_square_mean,input_mean_square)
epsilon = f32[] constant(0.001)
epsilon_bcast = f32[2] broadcast(epsilon), dimensions={}
variance_plus_epsilon = f32[2] add(variance, epsilon_bcast)
norm_factor = f32[2] rsqrt(variance_plus_epsilon)
norm_factor_bcast = f32[2,4] broadcast(norm_factor), dimensions={0}
input_mean_bcast = f32[2,4] broadcast(input_mean), dimensions={0}
input_center = f32[2,4] subtract(input, input_mean_bcast)
norm = f32[2,4] multiply(input_center, norm_factor_bcast)
scale = f32[4] parameter(1)
scale_bcast = f32[2,4] broadcast(scale), dimensions={1}
norm_scale = f32[2,4] multiply(norm, scale_bcast)
bias = f32[4] parameter(2)
bias_bcast = f32[2,4] broadcast(bias), dimensions={1}
norm_scale_bias = f32[2,4] add(norm_scale, bias_bcast)
doutput = f32[2,4] parameter(3)
dbias = f32[4] reduce(doutput, c0), dimensions={0}, to_apply=apply
norm_doutput = f32[2,4] multiply(norm, doutput)
dscale = f32[4] reduce(norm_doutput, c0), dimensions={0}, to_apply=apply
scale_doutput = f32[2,4] multiply(scale_bcast, doutput)
input_center_scale_doutput = f32[2,4] multiply(input_center, scale_doutput)
f0 = f32[2] reduce(input_center_scale_doutput, c0), dimensions={1}, to_apply=apply
norm_factor_cube = f32[2] divide(norm_factor, variance_plus_epsilon)
c1 = f32[] constant(-0.5)
c1_bcast = f32[2] broadcast(c1), dimensions={}
dnorm_factor = f32[2] multiply(norm_factor_cube, c1_bcast)
f0_dnorm_factor = f32[2] multiply(f0, dnorm_factor)
c2 = f32[] constant(0.5)
c2_bcast = f32[2] broadcast(c2), dimensions={}
f0_dnorm_factor_scaled = f32[2] multiply(f0_dnorm_factor, c2_bcast)
f0_dnorm_factor_scaled_bcast = f32[2,4] broadcast(f0_dnorm_factor_scaled), dimensions={0}
f1 = f32[2,4] multiply(input_center, f0_dnorm_factor_scaled_bcast)
minus_f1 = f32[2,4] negate(f1)
minus_f1_sum = f32[2] reduce(minus_f1, c0), dimensions={1}, to_apply=apply
f2 = f32[2,4] multiply(norm_factor_bcast, scale_doutput)
minus_f2 = f32[2,4] negate(f2)
minus_f2_sum = f32[2] reduce(minus_f2, c0), dimensions={1}, to_apply=apply
minus_f1_f2_sum = f32[2] add(minus_f1_sum, minus_f2_sum)
minus_f1_f2_sum_scaled = f32[2] multiply(minus_f1_f2_sum, r_nelems_bcast)
minus_f1_f2_sum_scaled_bcast = f32[2,4] broadcast(minus_f1_f2_sum_scaled), dimensions={0}
f1_f2 = f32[2,4] add(f1, f2)
dinput = f32[2,4] add(f1_f2, minus_f1_f2_sum_scaled_bcast)
ROOT out = (f32[2,4], f32[2,4], f32[4], f32[4]) tuple(norm_scale_bias, dinput, dscale, dbias)
})";
const char* optimized_hlo = R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,4], {{.*}}: f32[4], {{.*}}: f32[4], {{.*}}: f32[2,4]) -> (f32[2,4], f32[2,4], f32[4], f32[4]) {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,4]{1,0} parameter(0)
; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = f32[2,4,1,1]{3,2,1,0} bitcast([[P0]])
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[4]{0} parameter(1)
; CHECK-NEXT: [[P1_BITCAST:%[^ ]+]] = f32[1,4,1,1]{3,2,1,0} bitcast([[P1]])
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[4]{0} parameter(2)
; CHECK-NEXT: [[P2_BITCAST:%[^ ]+]] = f32[1,4,1,1]{3,2,1,0} bitcast([[P2]])
; CHECK-NEXT: [[CC0:%[^ ]+]] = (f32[2,4,1,1]{3,2,1,0}, f32[2,1,1,1]{3,2,1,0}, f32[2,1,1,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P2_BITCAST]]),
; CHECK: custom_call_target="__cudnn$norm",
; CHECK: backend_config={
; CHECK-DAG: "epsilon":0.001
; CHECK-DAG: "kind":"LAYER_FWD_TRAIN"
; CHECK: }
; CHECK-DAG: [[GTE0:%[^ ]+]] = f32[2,4,1,1]{3,2,1,0} get-tuple-element([[CC0]]), index=0
; CHECK-DAG: [[GTE0_BITCAST:%[^ ]+]] = f32[2,4]{1,0} bitcast([[GTE0]])
; CHECK-DAG: [[P3:%[^ ]+]] = f32[2,4]{1,0} parameter(3)
; CHECK-DAG: [[P3_BITCAST:%[^ ]+]] = f32[2,4,1,1]{3,2,1,0} bitcast([[P3]])
; CHECK-DAG: [[GTE1:%[^ ]+]] = f32[2,1,1,1]{3,2,1,0} get-tuple-element([[CC0]]), index=1
; CHECK-DAG: [[GTE2:%[^ ]+]] = f32[2,1,1,1]{3,2,1,0} get-tuple-element([[CC0]]), index=2
; CHECK-NEXT: [[CC1:%[^ ]+]] = (f32[2,4,1,1]{3,2,1,0}, f32[1,4,1,1]{3,2,1,0}, f32[1,4,1,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P3_BITCAST]], [[GTE1]], [[GTE2]]),
; CHECK: custom_call_target="__cudnn$norm",
; CHECK: backend_config={
; CHECK-DAG: "epsilon":0
; CHECK-DAG: "kind":"LAYER_BWD"
; CHECK: }
; CHECK-DAG: [[GTE3:%[^ ]+]] = f32[2,4,1,1]{3,2,1,0} get-tuple-element([[CC1]]), index=0
; CHECK-DAG: [[GTE3_BITCAST:%[^ ]+]] = f32[2,4]{1,0} bitcast([[GTE3]])
; CHECK-DAG: [[GTE4:%[^ ]+]] = f32[1,4,1,1]{3,2,1,0} get-tuple-element([[CC1]]), index=1
; CHECK-DAG: [[GTE4_BITCAST:%[^ ]+]] = f32[4]{0} bitcast([[GTE4]])
; CHECK-DAG: [[GTE5:%[^ ]+]] = f32[1,4,1,1]{3,2,1,0} get-tuple-element([[CC1]]), index=2
; CHECK-DAG: [[GTE5_BITCAST:%[^ ]+]] = f32[4]{0} bitcast([[GTE5]])
; CHECK-DAG: ROOT [[OUT:%[^ ]+]] = (f32[2,4]{1,0}, f32[2,4]{1,0}, f32[4]{0}, f32[4]{0}) tuple([[GTE0_BITCAST]], [[GTE3_BITCAST]], [[GTE4_BITCAST]], [[GTE5_BITCAST]])
)";
TestNorm(hlo_text, optimized_hlo);
}
TEST_F(CudnnNormRewriterTest, LayerNormTrainBackward4D3) {
const char* hlo_text = R"(
HloModule test
apply {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT c = f32[] add(a,b)
}
ENTRY test {
input = f32[2,4,6,8] parameter(0)
input_square = f32[2,4,6,8] multiply(input, input)
c0 = f32[] constant(0)
input_square_sum = f32[2,4,6] reduce(input_square, c0), dimensions={3}, to_apply=apply
reduce = f32[2,4,6] reduce(input, c0), dimensions={3}, to_apply=apply
r_nelems = f32[] constant(0.125)
r_nelems_bcast = f32[2,4,6] broadcast(r_nelems), dimensions={}
input_square_mean = f32[2,4,6] multiply(input_square_sum,r_nelems_bcast)
input_mean = f32[2,4,6] multiply(reduce, r_nelems_bcast)
input_mean_square = f32[2,4,6] multiply(input_mean,input_mean)
variance = f32[2,4,6] subtract(input_square_mean,input_mean_square)
epsilon = f32[] constant(0.001)
epsilon_bcast = f32[2,4,6] broadcast(epsilon), dimensions={}
variance_plus_epsilon = f32[2,4,6] add(variance, epsilon_bcast)
norm_factor = f32[2,4,6] rsqrt(variance_plus_epsilon)
norm_factor_bcast = f32[2,4,6,8] broadcast(norm_factor), dimensions={0,1,2}
input_mean_bcast = f32[2,4,6,8] broadcast(input_mean), dimensions={0,1,2}
input_center = f32[2,4,6,8] subtract(input, input_mean_bcast)
norm = f32[2,4,6,8] multiply(input_center, norm_factor_bcast)
scale = f32[8] parameter(1)
scale_bcast = f32[2,4,6,8] broadcast(scale), dimensions={3}
norm_scale = f32[2,4,6,8] multiply(norm, scale_bcast)
bias = f32[8] parameter(2)
bias_bcast = f32[2,4,6,8] broadcast(bias), dimensions={3}
norm_scale_bias = f32[2,4,6,8] add(norm_scale, bias_bcast)
doutput = f32[2,4,6,8] parameter(3)
dbias = f32[8] reduce(doutput, c0), dimensions={0,1,2}, to_apply=apply
norm_doutput = f32[2,4,6,8] multiply(norm, doutput)
dscale = f32[8] reduce(norm_doutput, c0), dimensions={0,1,2}, to_apply=apply
scale_doutput = f32[2,4,6,8] multiply(scale_bcast, doutput)
input_center_scale_doutput = f32[2,4,6,8] multiply(input_center, scale_doutput)
f0 = f32[2,4,6] reduce(input_center_scale_doutput, c0), dimensions={3}, to_apply=apply
norm_factor_cube = f32[2,4,6] divide(norm_factor, variance_plus_epsilon)
c1 = f32[] constant(-0.5)
c1_bcast = f32[2,4,6] broadcast(c1), dimensions={}
dnorm_factor = f32[2,4,6] multiply(norm_factor_cube, c1_bcast)
f0_dnorm_factor = f32[2,4,6] multiply(f0, dnorm_factor)
c2 = f32[] constant(0.25)
c2_bcast = f32[2,4,6] broadcast(c2), dimensions={}
f0_dnorm_factor_scaled = f32[2,4,6] multiply(f0_dnorm_factor, c2_bcast)
f0_dnorm_factor_scaled_bcast = f32[2,4,6,8] broadcast(f0_dnorm_factor_scaled), dimensions={0,1,2}
f1 = f32[2,4,6,8] multiply(input_center, f0_dnorm_factor_scaled_bcast)
minus_f1 = f32[2,4,6,8] negate(f1)
minus_f1_sum = f32[2,4,6] reduce(minus_f1, c0), dimensions={3}, to_apply=apply
f2 = f32[2,4,6,8] multiply(norm_factor_bcast, scale_doutput)
minus_f2 = f32[2,4,6,8] negate(f2)
minus_f2_sum = f32[2,4,6] reduce(minus_f2, c0), dimensions={3}, to_apply=apply
minus_f1_f2_sum = f32[2,4,6] add(minus_f1_sum, minus_f2_sum)
minus_f1_f2_sum_scaled = f32[2,4,6] multiply(minus_f1_f2_sum, r_nelems_bcast)
minus_f1_f2_sum_scaled_bcast = f32[2,4,6,8] broadcast(minus_f1_f2_sum_scaled), dimensions={0,1,2}
f1_f2 = f32[2,4,6,8] add(f1, f2)
dinput = f32[2,4,6,8] add(f1_f2, minus_f1_f2_sum_scaled_bcast)
ROOT out = (f32[2,4,6,8], f32[2,4,6,8], f32[8], f32[8]) tuple(norm_scale_bias, dinput, dscale, dbias)
})";
const char* optimized_hlo = R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,4,6,8], {{.*}}: f32[8], {{.*}}: f32[8], {{.*}}: f32[2,4,6,8]) -> (f32[2,4,6,8], f32[2,4,6,8], f32[8], f32[8]) {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} parameter(0)
; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = f32[48,8,1,1]{3,2,1,0} bitcast([[P0]])
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[8]{0} parameter(1)
; CHECK-NEXT: [[P1_BITCAST:%[^ ]+]] = f32[1,8,1,1]{3,2,1,0} bitcast([[P1]])
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[8]{0} parameter(2)
; CHECK-NEXT: [[P2_BITCAST:%[^ ]+]] = f32[1,8,1,1]{3,2,1,0} bitcast([[P2]])
; CHECK-NEXT: [[CC0:%[^ ]+]] = (f32[48,8,1,1]{3,2,1,0}, f32[48,1,1,1]{3,2,1,0}, f32[48,1,1,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P2_BITCAST]]),
; CHECK: custom_call_target="__cudnn$norm",
; CHECK: backend_config={
; CHECK-DAG: "epsilon":0.001
; CHECK-DAG: "kind":"LAYER_FWD_TRAIN"
; CHECK: }
; CHECK-DAG: [[GTE0:%[^ ]+]] = f32[48,8,1,1]{3,2,1,0} get-tuple-element([[CC0]]), index=0
; CHECK-DAG: [[GTE0_BITCAST:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} bitcast([[GTE0]])
; CHECK-DAG: [[P3:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} parameter(3)
; CHECK-DAG: [[P3_BITCAST:%[^ ]+]] = f32[48,8,1,1]{3,2,1,0} bitcast([[P3]])
; CHECK-DAG: [[GTE1:%[^ ]+]] = f32[48,1,1,1]{3,2,1,0} get-tuple-element([[CC0]]), index=1
; CHECK-DAG: [[GTE2:%[^ ]+]] = f32[48,1,1,1]{3,2,1,0} get-tuple-element([[CC0]]), index=2
; CHECK-NEXT: [[CC1:%[^ ]+]] = (f32[48,8,1,1]{3,2,1,0}, f32[1,8,1,1]{3,2,1,0}, f32[1,8,1,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P3_BITCAST]], [[GTE1]], [[GTE2]]),
; CHECK: custom_call_target="__cudnn$norm",
; CHECK: backend_config={
; CHECK-DAG: "epsilon":0
; CHECK-DAG: "kind":"LAYER_BWD"
; CHECK: }
; CHECK-DAG: [[GTE3:%[^ ]+]] = f32[48,8,1,1]{3,2,1,0} get-tuple-element([[CC1]]), index=0
; CHECK-DAG: [[GTE3_BITCAST:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} bitcast([[GTE3]])
; CHECK-DAG: [[GTE4:%[^ ]+]] = f32[1,8,1,1]{3,2,1,0} get-tuple-element([[CC1]]), index=1
; CHECK-DAG: [[GTE4_BITCAST:%[^ ]+]] = f32[8]{0} bitcast([[GTE4]])
; CHECK-DAG: [[GTE5:%[^ ]+]] = f32[1,8,1,1]{3,2,1,0} get-tuple-element([[CC1]]), index=2
; CHECK-DAG: [[GTE5_BITCAST:%[^ ]+]] = f32[8]{0} bitcast([[GTE5]])
; CHECK-DAG: ROOT [[OUT:%[^ ]+]] = (f32[2,4,6,8]{3,2,1,0}, f32[2,4,6,8]{3,2,1,0}, f32[8]{0}, f32[8]{0}) tuple([[GTE0_BITCAST]], [[GTE3_BITCAST]], [[GTE4_BITCAST]], [[GTE5_BITCAST]])
)";
TestNorm(hlo_text, optimized_hlo);
}
TEST_F(CudnnNormRewriterTest, LayerNormTrainBackward4D2) {
const char* hlo_text = R"(
HloModule test
apply {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT c = f32[] add(a,b)
}
ENTRY test {
input = f32[2,4,6,8] parameter(0)
input_square = f32[2,4,6,8] multiply(input, input)
c0 = f32[] constant(0)
input_square_sum = f32[2,4,8] reduce(input_square, c0), dimensions={2}, to_apply=apply
reduce = f32[2,4,8] reduce(input, c0), dimensions={2}, to_apply=apply
r_nelems = f32[] constant(0.166667)
r_nelems_bcast = f32[2,4,8] broadcast(r_nelems), dimensions={}
input_square_mean = f32[2,4,8] multiply(input_square_sum,r_nelems_bcast)
input_mean = f32[2,4,8] multiply(reduce, r_nelems_bcast)
input_mean_square = f32[2,4,8] multiply(input_mean,input_mean)
variance = f32[2,4,8] subtract(input_square_mean,input_mean_square)
epsilon = f32[] constant(0.001)
epsilon_bcast = f32[2,4,8] broadcast(epsilon), dimensions={}
variance_plus_epsilon = f32[2,4,8] add(variance, epsilon_bcast)
norm_factor = f32[2,4,8] rsqrt(variance_plus_epsilon)
norm_factor_bcast = f32[2,4,6,8] broadcast(norm_factor), dimensions={0,1,3}
input_mean_bcast = f32[2,4,6,8] broadcast(input_mean), dimensions={0,1,3}
input_center = f32[2,4,6,8] subtract(input, input_mean_bcast)
norm = f32[2,4,6,8] multiply(input_center, norm_factor_bcast)
scale = f32[6] parameter(1)
scale_bcast = f32[2,4,6,8] broadcast(scale), dimensions={2}
norm_scale = f32[2,4,6,8] multiply(norm, scale_bcast)
bias = f32[6] parameter(2)
bias_bcast = f32[2,4,6,8] broadcast(bias), dimensions={2}
norm_scale_bias = f32[2,4,6,8] add(norm_scale, bias_bcast)
doutput = f32[2,4,6,8] parameter(3)
dbias = f32[6] reduce(doutput, c0), dimensions={0,1,3}, to_apply=apply
norm_doutput = f32[2,4,6,8] multiply(norm, doutput)
dscale = f32[6] reduce(norm_doutput, c0), dimensions={0,1,3}, to_apply=apply
scale_doutput = f32[2,4,6,8] multiply(scale_bcast, doutput)
input_center_scale_doutput = f32[2,4,6,8] multiply(input_center, scale_doutput)
f0 = f32[2,4,8] reduce(input_center_scale_doutput, c0), dimensions={2}, to_apply=apply
norm_factor_cube = f32[2,4,8] divide(norm_factor, variance_plus_epsilon)
c1 = f32[] constant(-0.5)
c1_bcast = f32[2,4,8] broadcast(c1), dimensions={}
dnorm_factor = f32[2,4,8] multiply(norm_factor_cube, c1_bcast)
f0_dnorm_factor = f32[2,4,8] multiply(f0, dnorm_factor)
c2 = f32[] constant(0.333333)
c2_bcast = f32[2,4,8] broadcast(c2), dimensions={}
f0_dnorm_factor_scaled = f32[2,4,8] multiply(f0_dnorm_factor, c2_bcast)
f0_dnorm_factor_scaled_bcast = f32[2,4,6,8] broadcast(f0_dnorm_factor_scaled), dimensions={0,1,3}
f1 = f32[2,4,6,8] multiply(input_center, f0_dnorm_factor_scaled_bcast)
minus_f1 = f32[2,4,6,8] negate(f1)
minus_f1_sum = f32[2,4,8] reduce(minus_f1, c0), dimensions={2}, to_apply=apply
f2 = f32[2,4,6,8] multiply(norm_factor_bcast, scale_doutput)
minus_f2 = f32[2,4,6,8] negate(f2)
minus_f2_sum = f32[2,4,8] reduce(minus_f2, c0), dimensions={2}, to_apply=apply
minus_f1_f2_sum = f32[2,4,8] add(minus_f1_sum, minus_f2_sum)
minus_f1_f2_sum_scaled = f32[2,4,8] multiply(minus_f1_f2_sum, r_nelems_bcast)
minus_f1_f2_sum_scaled_bcast = f32[2,4,6,8] broadcast(minus_f1_f2_sum_scaled), dimensions={0,1,3}
f1_f2 = f32[2,4,6,8] add(f1, f2)
dinput = f32[2,4,6,8] add(f1_f2, minus_f1_f2_sum_scaled_bcast)
ROOT out = (f32[2,4,6,8], f32[2,4,6,8], f32[6], f32[6]) tuple(norm_scale_bias, dinput, dscale, dbias)
})";
const char* optimized_hlo = R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,4,6,8], {{.*}}: f32[6], {{.*}}: f32[6], {{.*}}: f32[2,4,6,8]) -> (f32[2,4,6,8], f32[2,4,6,8], f32[6], f32[6]) {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} parameter(0)
; CHECK-NEXT: [[TRANSPOSE0:%[^ ]+]] = f32[8,8,6]{2,1,0} fusion([[P0]]), kind=kLoop, calls={{.*}}
; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = f32[64,6,1,1]{3,2,1,0} bitcast([[TRANSPOSE0]])
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[6]{0} parameter(1)
; CHECK-NEXT: [[P1_BITCAST:%[^ ]+]] = f32[1,6,1,1]{3,2,1,0} bitcast([[P1]])
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[6]{0} parameter(2)
; CHECK-NEXT: [[P2_BITCAST:%[^ ]+]] = f32[1,6,1,1]{3,2,1,0} bitcast([[P2]])
; CHECK-NEXT: [[CC0:%[^ ]+]] = (f32[64,6,1,1]{3,2,1,0}, f32[64,1,1,1]{3,2,1,0}, f32[64,1,1,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P2_BITCAST]]),
; CHECK: custom_call_target="__cudnn$norm",
; CHECK: backend_config={
; CHECK-DAG: "epsilon":0.001
; CHECK-DAG: "kind":"LAYER_FWD_TRAIN"
; CHECK: }
; CHECK-DAG: [[GTE0:%[^ ]+]] = f32[64,6,1,1]{3,2,1,0} get-tuple-element([[CC0]]), index=0
; CHECK-DAG: [[TRANSPOSE1:%[^ ]+]] = f32[8,6,8]{2,1,0} fusion([[GTE0]]), kind=kLoop, calls={{.*}}
; CHECK-DAG: [[BITCAST:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} bitcast([[TRANSPOSE1]])
; CHECK-DAG: [[P3:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} parameter(3)
; CHECK-NEXT: [[TRANSPOSE2:%[^ ]+]] = f32[8,8,6]{2,1,0} fusion([[P3]]), kind=kLoop, calls{{.*}}
; CHECK-DAG: [[P3_BITCAST:%[^ ]+]] = f32[64,6,1,1]{3,2,1,0} bitcast([[TRANSPOSE2]])
; CHECK-DAG: [[GTE1:%[^ ]+]] = f32[64,1,1,1]{3,2,1,0} get-tuple-element([[CC0]]), index=1
; CHECK-DAG: [[GTE2:%[^ ]+]] = f32[64,1,1,1]{3,2,1,0} get-tuple-element([[CC0]]), index=2
; CHECK-NEXT: [[CC1:%[^ ]+]] = (f32[64,6,1,1]{3,2,1,0}, f32[1,6,1,1]{3,2,1,0}, f32[1,6,1,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P3_BITCAST]], [[GTE1]], [[GTE2]]),
; CHECK: custom_call_target="__cudnn$norm",
; CHECK: backend_config={
; CHECK-DAG: "epsilon":0
; CHECK-DAG: "kind":"LAYER_BWD"
; CHECK: }
; CHECK-DAG: [[GTE3:%[^ ]+]] = f32[64,6,1,1]{3,2,1,0} get-tuple-element([[CC1]]), index=0
; CHECK-DAG: [[FUSION:%[^ ]+]] = f32[8,6,8]{2,1,0} fusion([[GTE3]]), kind=kLoop, calls=[[FUSED_COMPUTATION:%[^ ]+]]
; CHECK-DAG: [[BITCAST2:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} bitcast([[FUSION]])
; CHECK-DAG: [[GTE4:%[^ ]+]] = f32[1,6,1,1]{3,2,1,0} get-tuple-element([[CC1]]), index=1
; CHECK-DAG: [[GTE4_BITCAST:%[^ ]+]] = f32[6]{0} bitcast([[GTE4]])
; CHECK-DAG: [[GTE5:%[^ ]+]] = f32[1,6,1,1]{3,2,1,0} get-tuple-element([[CC1]]), index=2
; CHECK-DAG: [[GTE5_BITCAST:%[^ ]+]] = f32[6]{0} bitcast([[GTE5]])
; CHECK-DAG: ROOT [[OUT:%[^ ]+]] = (f32[2,4,6,8]{3,2,1,0}, f32[2,4,6,8]{3,2,1,0}, f32[6]{0}, f32[6]{0}) tuple([[BITCAST]], [[BITCAST2]], [[GTE4_BITCAST]], [[GTE5_BITCAST]])
)";
TestNorm(hlo_text, optimized_hlo);
}
TEST_F(CudnnNormRewriterTest, LayerNormTrainBackward4D12) {
const char* hlo_text = R"(
HloModule test
apply {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT c = f32[] add(a,b)
}
ENTRY test {
input = f32[2,4,6,8] parameter(0)
input_square = f32[2,4,6,8] multiply(input, input)
c0 = f32[] constant(0)
input_square_sum = f32[2,8] reduce(input_square, c0), dimensions={1,2}, to_apply=apply
reduce = f32[2,8] reduce(input, c0), dimensions={1,2}, to_apply=apply
r_nelems = f32[] constant(0.041667)
r_nelems_bcast = f32[2,8] broadcast(r_nelems), dimensions={}
input_square_mean = f32[2,8] multiply(input_square_sum,r_nelems_bcast)
input_mean = f32[2,8] multiply(reduce, r_nelems_bcast)
input_mean_square = f32[2,8] multiply(input_mean,input_mean)
variance = f32[2,8] subtract(input_square_mean,input_mean_square)
epsilon = f32[] constant(0.001)
epsilon_bcast = f32[2,8] broadcast(epsilon), dimensions={}
variance_plus_epsilon = f32[2,8] add(variance, epsilon_bcast)
norm_factor = f32[2,8] rsqrt(variance_plus_epsilon)
norm_factor_bcast = f32[2,4,6,8] broadcast(norm_factor), dimensions={0,3}
input_mean_bcast = f32[2,4,6,8] broadcast(input_mean), dimensions={0,3}
input_center = f32[2,4,6,8] subtract(input, input_mean_bcast)
norm = f32[2,4,6,8] multiply(input_center, norm_factor_bcast)
scale = f32[4,6] parameter(1)
scale_bcast = f32[2,4,6,8] broadcast(scale), dimensions={1,2}
norm_scale = f32[2,4,6,8] multiply(norm, scale_bcast)
bias = f32[4,6] parameter(2)
bias_bcast = f32[2,4,6,8] broadcast(bias), dimensions={1,2}
norm_scale_bias = f32[2,4,6,8] add(norm_scale, bias_bcast)
doutput = f32[2,4,6,8] parameter(3)
dbias = f32[4,6] reduce(doutput, c0), dimensions={0,3}, to_apply=apply
norm_doutput = f32[2,4,6,8] multiply(norm, doutput)
dscale = f32[4,6] reduce(norm_doutput, c0), dimensions={0,3}, to_apply=apply
scale_doutput = f32[2,4,6,8] multiply(scale_bcast, doutput)
input_center_scale_doutput = f32[2,4,6,8] multiply(input_center, scale_doutput)
f0 = f32[2,8] reduce(input_center_scale_doutput, c0), dimensions={1,2}, to_apply=apply
norm_factor_cube = f32[2,8] divide(norm_factor, variance_plus_epsilon)
c1 = f32[] constant(-0.5)
c1_bcast = f32[2,8] broadcast(c1), dimensions={}
dnorm_factor = f32[2,8] multiply(norm_factor_cube, c1_bcast)
f0_dnorm_factor = f32[2,8] multiply(f0, dnorm_factor)
c2 = f32[] constant(0.083333)
c2_bcast = f32[2,8] broadcast(c2), dimensions={}
f0_dnorm_factor_scaled = f32[2,8] multiply(f0_dnorm_factor, c2_bcast)
f0_dnorm_factor_scaled_bcast = f32[2,4,6,8] broadcast(f0_dnorm_factor_scaled), dimensions={0,3}
f1 = f32[2,4,6,8] multiply(input_center, f0_dnorm_factor_scaled_bcast)
minus_f1 = f32[2,4,6,8] negate(f1)
minus_f1_sum = f32[2,8] reduce(minus_f1, c0), dimensions={1,2}, to_apply=apply
f2 = f32[2,4,6,8] multiply(norm_factor_bcast, scale_doutput)
minus_f2 = f32[2,4,6,8] negate(f2)
minus_f2_sum = f32[2,8] reduce(minus_f2, c0), dimensions={1,2}, to_apply=apply
minus_f1_f2_sum = f32[2,8] add(minus_f1_sum, minus_f2_sum)
minus_f1_f2_sum_scaled = f32[2,8] multiply(minus_f1_f2_sum, r_nelems_bcast)
minus_f1_f2_sum_scaled_bcast = f32[2,4,6,8] broadcast(minus_f1_f2_sum_scaled), dimensions={0,3}
f1_f2 = f32[2,4,6,8] add(f1, f2)
dinput = f32[2,4,6,8] add(f1_f2, minus_f1_f2_sum_scaled_bcast)
ROOT out = (f32[2,4,6,8], f32[2,4,6,8], f32[4,6], f32[4,6]) tuple(norm_scale_bias, dinput, dscale, dbias)
})";
const char* optimized_hlo = R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,4,6,8], {{.*}}: f32[4,6], {{.*}}: f32[4,6], {{.*}}: f32[2,4,6,8]) -> (f32[2,4,6,8], f32[2,4,6,8], f32[4,6], f32[4,6]) {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} parameter(0)
; CHECK-NEXT: [[TRANSPOSE0:%[^ ]+]] = f32[2,8,24]{2,1,0} fusion([[P0]]), kind=kLoop, calls={{.*}}
; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = f32[16,4,6,1]{3,2,1,0} bitcast([[TRANSPOSE0]])
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[4,6]{1,0} parameter(1)
; CHECK-NEXT: [[P1_BITCAST:%[^ ]+]] = f32[1,4,6,1]{3,2,1,0} bitcast([[P1]])
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[4,6]{1,0} parameter(2)
; CHECK-NEXT: [[P2_BITCAST:%[^ ]+]] = f32[1,4,6,1]{3,2,1,0} bitcast([[P2]])
; CHECK-NEXT: [[CC0:%[^ ]+]] = (f32[16,4,6,1]{3,2,1,0}, f32[16,1,1,1]{3,2,1,0}, f32[16,1,1,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P2_BITCAST]]),
; CHECK: custom_call_target="__cudnn$norm",
; CHECK: backend_config={
; CHECK-DAG: "epsilon":0.001
; CHECK-DAG: "kind":"LAYER_FWD_TRAIN"
; CHECK: }
; CHECK-DAG: [[GTE0:%[^ ]+]] = f32[16,4,6,1]{3,2,1,0} get-tuple-element([[CC0]]), index=0
; CHECK-DAG: [[TRANSPOSE1:%[^ ]+]] = f32[2,24,8]{2,1,0} fusion([[GTE0]]), kind=kLoop, calls={{.*}}
; CHECK-DAG: [[BITCAST:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} bitcast([[TRANSPOSE1]])
; CHECK-DAG: [[P3:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} parameter(3)
; CHECK-NEXT: [[TRANSPOSE2:%[^ ]+]] = f32[2,8,24]{2,1,0} fusion([[P3]]), kind=kLoop, calls={{.*}}
; CHECK-DAG: [[P3_BITCAST:%[^ ]+]] = f32[16,4,6,1]{3,2,1,0} bitcast([[TRANSPOSE2]])
; CHECK-DAG: [[GTE1:%[^ ]+]] = f32[16,1,1,1]{3,2,1,0} get-tuple-element([[CC0]]), index=1
; CHECK-DAG: [[GTE2:%[^ ]+]] = f32[16,1,1,1]{3,2,1,0} get-tuple-element([[CC0]]), index=2
; CHECK-NEXT: [[CC1:%[^ ]+]] = (f32[16,4,6,1]{3,2,1,0}, f32[1,4,6,1]{3,2,1,0}, f32[1,4,6,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P3_BITCAST]], [[GTE1]], [[GTE2]]),
; CHECK: custom_call_target="__cudnn$norm",
; CHECK: backend_config={
; CHECK-DAG: "epsilon":0
; CHECK-DAG: "kind":"LAYER_BWD"
; CHECK: }
; CHECK-DAG: [[GTE3:%[^ ]+]] = f32[16,4,6,1]{3,2,1,0} get-tuple-element([[CC1]]), index=0
; CHECK-DAG: [[FUSION:%[^ ]+]] = f32[2,24,8]{2,1,0} fusion([[GTE3]]), kind=kLoop, calls=[[FUSED_COMPUTATION:%[^ ]+]]
; CHECK-DAG: [[BITCAST2:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} bitcast([[FUSION]])
; CHECK-DAG: [[GTE4:%[^ ]+]] = f32[1,4,6,1]{3,2,1,0} get-tuple-element([[CC1]]), index=1
; CHECK-DAG: [[GTE4_BITCAST:%[^ ]+]] = f32[4,6]{1,0} bitcast([[GTE4]])
; CHECK-DAG: [[GTE5:%[^ ]+]] = f32[1,4,6,1]{3,2,1,0} get-tuple-element([[CC1]]), index=2
; CHECK-DAG: [[GTE5_BITCAST:%[^ ]+]] = f32[4,6]{1,0} bitcast([[GTE5]])
; CHECK-DAG: ROOT [[OUT:%[^ ]+]] = (f32[2,4,6,8]{3,2,1,0}, f32[2,4,6,8]{3,2,1,0}, f32[4,6]{1,0}, f32[4,6]{1,0}) tuple([[BITCAST]], [[BITCAST2]], [[GTE4_BITCAST]], [[GTE5_BITCAST]])
)";
TestNorm(hlo_text, optimized_hlo);
}
TEST_F(CudnnNormRewriterTest, LayerNormTrainBackward4D12Degenerate2) {
const char* hlo_text = R"(
HloModule test
apply {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT c = f32[] add(a,b)
}
ENTRY test {
input = f32[2,4,1,8] parameter(0)
input_square = f32[2,4,1,8] multiply(input, input)
c0 = f32[] constant(0)
input_square_sum = f32[2,8] reduce(input_square, c0), dimensions={1,2}, to_apply=apply
reduce = f32[2,8] reduce(input, c0), dimensions={1,2}, to_apply=apply
r_nelems = f32[] constant(0.25)
r_nelems_bcast = f32[2,8] broadcast(r_nelems), dimensions={}
input_square_mean = f32[2,8] multiply(input_square_sum,r_nelems_bcast)
input_mean = f32[2,8] multiply(reduce, r_nelems_bcast)
input_mean_square = f32[2,8] multiply(input_mean,input_mean)
variance = f32[2,8] subtract(input_square_mean,input_mean_square)
epsilon = f32[] constant(0.001)
epsilon_bcast = f32[2,8] broadcast(epsilon), dimensions={}
variance_plus_epsilon = f32[2,8] add(variance, epsilon_bcast)
norm_factor = f32[2,8] rsqrt(variance_plus_epsilon)
norm_factor_bcast = f32[2,4,1,8] broadcast(norm_factor), dimensions={0,3}
input_mean_bcast = f32[2,4,1,8] broadcast(input_mean), dimensions={0,3}
input_center = f32[2,4,1,8] subtract(input, input_mean_bcast)
norm = f32[2,4,1,8] multiply(input_center, norm_factor_bcast)
scale = f32[4,1] parameter(1)
scale_bcast = f32[2,4,1,8] broadcast(scale), dimensions={1,2}
norm_scale = f32[2,4,1,8] multiply(norm, scale_bcast)
bias = f32[4,1] parameter(2)
bias_bcast = f32[2,4,1,8] broadcast(bias), dimensions={1,2}
norm_scale_bias = f32[2,4,1,8] add(norm_scale, bias_bcast)
doutput = f32[2,4,1,8] parameter(3)
dbias = f32[4,1] reduce(doutput, c0), dimensions={0,3}, to_apply=apply
norm_doutput = f32[2,4,1,8] multiply(norm, doutput)
dscale = f32[4,1] reduce(norm_doutput, c0), dimensions={0,3}, to_apply=apply
scale_doutput = f32[2,4,1,8] multiply(scale_bcast, doutput)
input_center_scale_doutput = f32[2,4,1,8] multiply(input_center, scale_doutput)
f0 = f32[2,8] reduce(input_center_scale_doutput, c0), dimensions={1,2}, to_apply=apply
norm_factor_cube = f32[2,8] divide(norm_factor, variance_plus_epsilon)
c1 = f32[] constant(-0.5)
c1_bcast = f32[2,8] broadcast(c1), dimensions={}
dnorm_factor = f32[2,8] multiply(norm_factor_cube, c1_bcast)
f0_dnorm_factor = f32[2,8] multiply(f0, dnorm_factor)
c2 = f32[] constant(0.5)
c2_bcast = f32[2,8] broadcast(c2), dimensions={}
f0_dnorm_factor_scaled = f32[2,8] multiply(f0_dnorm_factor, c2_bcast)
f0_dnorm_factor_scaled_bcast = f32[2,4,1,8] broadcast(f0_dnorm_factor_scaled), dimensions={0,3}
f1 = f32[2,4,1,8] multiply(input_center, f0_dnorm_factor_scaled_bcast)
minus_f1 = f32[2,4,1,8] negate(f1)
minus_f1_sum = f32[2,8] reduce(minus_f1, c0), dimensions={1,2}, to_apply=apply
f2 = f32[2,4,1,8] multiply(norm_factor_bcast, scale_doutput)
minus_f2 = f32[2,4,1,8] negate(f2)
minus_f2_sum = f32[2,8] reduce(minus_f2, c0), dimensions={1,2}, to_apply=apply
minus_f1_f2_sum = f32[2,8] add(minus_f1_sum, minus_f2_sum)
minus_f1_f2_sum_scaled = f32[2,8] multiply(minus_f1_f2_sum, r_nelems_bcast)
minus_f1_f2_sum_scaled_bcast = f32[2,4,1,8] broadcast(minus_f1_f2_sum_scaled), dimensions={0,3}
f1_f2 = f32[2,4,1,8] add(f1, f2)
dinput = f32[2,4,1,8] add(f1_f2, minus_f1_f2_sum_scaled_bcast)
ROOT out = (f32[2,4,1,8], f32[2,4,1,8], f32[4,1], f32[4,1]) tuple(norm_scale_bias, dinput, dscale, dbias)
})";
const char* optimized_hlo = R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,4,1,8], {{.*}}: f32[4,1], {{.*}}: f32[4,1], {{.*}}: f32[2,4,1,8]) -> (f32[2,4,1,8], f32[2,4,1,8], f32[4,1], f32[4,1]) {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,4,1,8]{3,2,1,0} parameter(0)
; CHECK-NEXT: [[TRANSPOSE0:%[^ ]+]] = f32[2,8,4]{2,1,0} fusion([[P0]]), kind=kLoop, calls={{.*}}
; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = f32[16,4,1,1]{3,2,1,0} bitcast([[TRANSPOSE0]])
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[4,1]{1,0} parameter(1)
; CHECK-NEXT: [[P1_BITCAST:%[^ ]+]] = f32[1,4,1,1]{3,2,1,0} bitcast([[P1]])
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[4,1]{1,0} parameter(2)
; CHECK-NEXT: [[P2_BITCAST:%[^ ]+]] = f32[1,4,1,1]{3,2,1,0} bitcast([[P2]])
; CHECK-NEXT: [[CC0:%[^ ]+]] = (f32[16,4,1,1]{3,2,1,0}, f32[16,1,1,1]{3,2,1,0}, f32[16,1,1,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P2_BITCAST]]),
; CHECK: custom_call_target="__cudnn$norm",
; CHECK: backend_config={
; CHECK-DAG: "epsilon":0.001
; CHECK-DAG: "kind":"LAYER_FWD_TRAIN"
; CHECK: }
; CHECK-DAG: [[GTE0:%[^ ]+]] = f32[16,4,1,1]{3,2,1,0} get-tuple-element([[CC0]]), index=0
; CHECK-DAG: [[TRANSPOSE1:%[^ ]+]] = f32[2,4,8]{2,1,0} fusion([[GTE0]]), kind=kLoop, calls={{.*}}
; CHECK-DAG: [[BITCAST:%[^ ]+]] = f32[2,4,1,8]{3,2,1,0} bitcast([[TRANSPOSE1]])
; CHECK-DAG: [[P3:%[^ ]+]] = f32[2,4,1,8]{3,2,1,0} parameter(3)
; CHECK-NEXT: [[TRANSPOSE2:%[^ ]+]] = f32[2,8,4]{2,1,0} fusion([[P3]]), kind=kLoop, calls={{.*}}
; CHECK-DAG: [[P3_BITCAST:%[^ ]+]] = f32[16,4,1,1]{3,2,1,0} bitcast([[TRANSPOSE2]])
; CHECK-DAG: [[GTE1:%[^ ]+]] = f32[16,1,1,1]{3,2,1,0} get-tuple-element([[CC0]]), index=1
; CHECK-DAG: [[GTE2:%[^ ]+]] = f32[16,1,1,1]{3,2,1,0} get-tuple-element([[CC0]]), index=2
; CHECK-NEXT: [[CC1:%[^ ]+]] = (f32[16,4,1,1]{3,2,1,0}, f32[1,4,1,1]{3,2,1,0}, f32[1,4,1,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P3_BITCAST]], [[GTE1]], [[GTE2]]),
; CHECK: custom_call_target="__cudnn$norm",
; CHECK: backend_config={
; CHECK-DAG: "epsilon":0
; CHECK-DAG: "kind":"LAYER_BWD"
; CHECK: }
; CHECK-DAG: [[GTE3:%[^ ]+]] = f32[16,4,1,1]{3,2,1,0} get-tuple-element([[CC1]]), index=0
; CHECK-DAG: [[FUSION0:%[^ ]+]] = f32[2,4,8]{2,1,0} fusion([[GTE3]]), kind=kLoop, calls=[[FUSED_COMPUTATION0:%[^ ]+]]
; CHECK-DAG: [[BITCAST2:%[^ ]+]] = f32[2,4,1,8]{3,2,1,0} bitcast([[FUSION0]])
; CHECK-DAG: [[GTE4:%[^ ]+]] = f32[1,4,1,1]{3,2,1,0} get-tuple-element([[CC1]]), index=1
; CHECK-DAG: [[GTE4_BITCAST:%[^ ]+]] = f32[4,1]{1,0} bitcast([[GTE4]])
; CHECK-DAG: [[GTE5:%[^ ]+]] = f32[1,4,1,1]{3,2,1,0} get-tuple-element([[CC1]]), index=2
; CHECK-DAG: [[GTE5_BITCAST:%[^ ]+]] = f32[4,1]{1,0} bitcast([[GTE5]])
; CHECK-DAG: ROOT [[OUT:%[^ ]+]] = (f32[2,4,1,8]{3,2,1,0}, f32[2,4,1,8]{3,2,1,0}, f32[4,1]{1,0}, f32[4,1]{1,0}) tuple([[BITCAST]], [[BITCAST2]], [[GTE4_BITCAST]], [[GTE5_BITCAST]])
)";
TestNorm(hlo_text, optimized_hlo);
}
TEST_F(CudnnNormRewriterTest,
DISABLED_LayerNormTrainBackward4D1DoutputReshapeSplit) {
const char* hlo_text = R"(
HloModule test
apply {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT c = f32[] add(a,b)
}
ENTRY test {
input = f32[2,4,6,8] parameter(0)
input_square = f32[2,4,6,8] multiply(input, input)
c0 = f32[] constant(0)
input_square_sum = f32[2,6,8] reduce(input_square, c0), dimensions={1}, to_apply=apply
reduce = f32[2,6,8] reduce(input, c0), dimensions={1}, to_apply=apply
r_nelems = f32[] constant(0.25)
r_nelems_bcast = f32[2,6,8] broadcast(r_nelems), dimensions={}
input_square_mean = f32[2,6,8] multiply(input_square_sum,r_nelems_bcast)
input_mean = f32[2,6,8] multiply(reduce, r_nelems_bcast)
input_mean_square = f32[2,6,8] multiply(input_mean,input_mean)
variance = f32[2,6,8] subtract(input_square_mean,input_mean_square)
epsilon = f32[] constant(0.001)
epsilon_bcast = f32[2,6,8] broadcast(epsilon), dimensions={}
variance_plus_epsilon = f32[2,6,8] add(variance, epsilon_bcast)
norm_factor = f32[2,6,8] rsqrt(variance_plus_epsilon)
norm_factor_bcast = f32[2,4,6,8] broadcast(norm_factor), dimensions={0,2,3}
input_mean_bcast = f32[2,4,6,8] broadcast(input_mean), dimensions={0,2,3}
input_center = f32[2,4,6,8] subtract(input, input_mean_bcast)
norm = f32[2,4,6,8] multiply(input_center, norm_factor_bcast)
scale = f32[4] parameter(1)
scale_bcast = f32[2,4,6,8] broadcast(scale), dimensions={1}
norm_scale = f32[2,4,6,8] multiply(norm, scale_bcast)
bias = f32[4] parameter(2)
bias_bcast = f32[2,4,6,8] broadcast(bias), dimensions={1}
norm_scale_bias = f32[2,4,6,8] add(norm_scale, bias_bcast)
doutput = f32[2,4,48] parameter(3)
dbias = f32[4] reduce(doutput, c0), dimensions={0,2}, to_apply=apply
doutput_bitcast = f32[2,4,6,8] reshape(doutput)
norm_doutput = f32[2,4,6,8] multiply(norm, doutput_bitcast)
dscale = f32[4] reduce(norm_doutput, c0), dimensions={0,2,3}, to_apply=apply
scale_doutput = f32[2,4,6,8] multiply(scale_bcast, doutput_bitcast)
input_center_scale_doutput = f32[2,4,6,8] multiply(input_center, scale_doutput)
f0 = f32[2,6,8] reduce(input_center_scale_doutput, c0), dimensions={1}, to_apply=apply
norm_factor_cube = f32[2,6,8] divide(norm_factor, variance_plus_epsilon)
c1 = f32[] constant(-0.5)
c1_bcast = f32[2,6,8] broadcast(c1), dimensions={}
dnorm_factor = f32[2,6,8] multiply(norm_factor_cube, c1_bcast)
f0_dnorm_factor = f32[2,6,8] multiply(f0, dnorm_factor)
c2 = f32[] constant(0.5)
c2_bcast = f32[2,6,8] broadcast(c2), dimensions={}
f0_dnorm_factor_scaled = f32[2,6,8] multiply(f0_dnorm_factor, c2_bcast)
f0_dnorm_factor_scaled_bcast = f32[2,4,6,8] broadcast(f0_dnorm_factor_scaled), dimensions={0,2,3}
f1 = f32[2,4,6,8] multiply(input_center, f0_dnorm_factor_scaled_bcast)
minus_f1 = f32[2,4,6,8] negate(f1)
minus_f1_sum = f32[2,6,8] reduce(minus_f1, c0), dimensions={1}, to_apply=apply
f2 = f32[2,4,6,8] multiply(norm_factor_bcast, scale_doutput)
minus_f2 = f32[2,4,6,8] negate(f2)
minus_f2_sum = f32[2,6,8] reduce(minus_f2, c0), dimensions={1}, to_apply=apply
minus_f1_f2_sum = f32[2,6,8] add(minus_f1_sum, minus_f2_sum)
minus_f1_f2_sum_scaled = f32[2,6,8] multiply(minus_f1_f2_sum, r_nelems_bcast)
minus_f1_f2_sum_scaled_bcast = f32[2,4,6,8] broadcast(minus_f1_f2_sum_scaled), dimensions={0,2,3}
f1_f2 = f32[2,4,6,8] add(f1, f2)
dinput = f32[2,4,6,8] add(f1_f2, minus_f1_f2_sum_scaled_bcast)
ROOT out = (f32[2,4,6,8], f32[2,4,6,8], f32[4], f32[4]) tuple(norm_scale_bias, dinput, dscale, dbias)
})";
const char* optimized_hlo = R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,4,6,8], {{.*}}: f32[4], {{.*}}: f32[4], {{.*}}: f32[2,4,48]) -> (f32[2,4,6,8], f32[2,4,6,8], f32[4], f32[4]) {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} parameter(0)
; CHECK-NEXT: [[TRANSPOSE0:%[^ ]+]] = f32[2,6,8,4]{3,2,1,0} transpose([[P0]]), dimensions={0,2,3,1}
; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = f32[96,4,1,1]{3,2,1,0} bitcast([[TRANSPOSE0]])
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[4]{0} parameter(1)
; CHECK-NEXT: [[P1_BITCAST:%[^ ]+]] = f32[1,4,1,1]{3,2,1,0} bitcast([[P1]])
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[4]{0} parameter(2)
; CHECK-NEXT: [[P2_BITCAST:%[^ ]+]] = f32[1,4,1,1]{3,2,1,0} bitcast([[P2]])
; CHECK-NEXT: [[CC0:%[^ ]+]] = (f32[96,4,1,1]{3,2,1,0}, f32[96,1,1,1]{3,2,1,0}, f32[96,1,1,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P2_BITCAST]]),
; CHECK: custom_call_target="__cudnn$norm",
; CHECK: backend_config={
; CHECK-DAG: "epsilon":0.001
; CHECK-DAG: "kind":"LAYER_FWD_TRAIN"
; CHECK: }
; CHECK-DAG: [[GTE0:%[^ ]+]] = f32[96,4,1,1]{3,2,1,0} get-tuple-element([[CC0]]), index=0
; CHECK-DAG: [[P3:%[^ ]+]] = f32[2,4,48]{2,1,0} parameter(3)
; CHECK-DAG: [[FUSION0:%[^ ]+]] = f32[2,6,8,4]{3,2,1,0} fusion([[P3]]), kind=kLoop, calls=[[FUSED_COMPUTATION0:%[^ ]+]]
; CHECK-DAG: [[FUSION0_BITCAST:%[^ ]+]] = f32[96,4,1,1]{3,2,1,0} bitcast([[FUSION0]])
; CHECK-DAG: [[GTE1:%[^ ]+]] = f32[96,1,1,1]{3,2,1,0} get-tuple-element([[CC0]]), index=1
; CHECK-DAG: [[GTE2:%[^ ]+]] = f32[96,1,1,1]{3,2,1,0} get-tuple-element([[CC0]]), index=2
; CHECK-NEXT: [[CC1:%[^ ]+]] = (f32[96,4,1,1]{3,2,1,0}, f32[1,4,1,1]{3,2,1,0}, f32[1,4,1,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[FUSION0_BITCAST]], [[GTE1]], [[GTE2]]),
; CHECK: custom_call_target="__cudnn$norm",
; CHECK: backend_config={
; CHECK-DAG: "epsilon":0
; CHECK-DAG: "kind":"LAYER_BWD"
; CHECK: }
; CHECK-DAG: [[GTE3:%[^ ]+]] = f32[96,4,1,1]{3,2,1,0} get-tuple-element([[CC1]]), index=0
; CHECK-DAG: [[FUSION1:%[^ ]+]] = (f32[2,4,6,8]{3,2,1,0}, f32[2,4,6,8]{3,2,1,0}) fusion([[GTE0]], [[GTE3]]), kind=kLoop, calls=[[FUSED_COMPUTATION1:%[^ ]+]]
; CHECK-DAG: [[GTEF1:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} get-tuple-element([[FUSION1]]), index=0
; CHECK-DAG: [[GTEF2:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} get-tuple-element([[FUSION1]]), index=1
; CHECK-DAG: [[GTE4:%[^ ]+]] = f32[1,4,1,1]{3,2,1,0} get-tuple-element([[CC1]]), index=1
; CHECK-DAG: [[GTE4_BITCAST:%[^ ]+]] = f32[4]{0} bitcast([[GTE4]])
; CHECK-DAG: [[GTE5:%[^ ]+]] = f32[1,4,1,1]{3,2,1,0} get-tuple-element([[CC1]]), index=2
; CHECK-DAG: [[GTE5_BITCAST:%[^ ]+]] = f32[4]{0} bitcast([[GTE5]])
; CHECK-DAG: ROOT [[OUT:%[^ ]+]] = (f32[2,4,6,8]{3,2,1,0}, f32[2,4,6,8]{3,2,1,0}, f32[4]{0}, f32[4]{0}) tuple([[GTEF1]], [[GTEF2]], [[GTE4_BITCAST]], [[GTE5_BITCAST]])
)";
TestNorm(hlo_text, optimized_hlo);
}
TEST_F(CudnnNormRewriterTest,
DISABLED_LayerNormTrainBackward4D1DoutputReshapeCombine) {
const char* hlo_text = R"(
HloModule test
apply {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT c = f32[] add(a,b)
}
ENTRY test {
input = f32[2,4,6,8] parameter(0)
input_square = f32[2,4,6,8] multiply(input, input)
c0 = f32[] constant(0)
input_square_sum = f32[2,6,8] reduce(input_square, c0), dimensions={1}, to_apply=apply
reduce = f32[2,6,8] reduce(input, c0), dimensions={1}, to_apply=apply
r_nelems = f32[] constant(0.25)
r_nelems_bcast = f32[2,6,8] broadcast(r_nelems), dimensions={}
input_square_mean = f32[2,6,8] multiply(input_square_sum,r_nelems_bcast)
input_mean = f32[2,6,8] multiply(reduce, r_nelems_bcast)
input_mean_square = f32[2,6,8] multiply(input_mean,input_mean)
variance = f32[2,6,8] subtract(input_square_mean,input_mean_square)
epsilon = f32[] constant(0.001)
epsilon_bcast = f32[2,6,8] broadcast(epsilon), dimensions={}
variance_plus_epsilon = f32[2,6,8] add(variance, epsilon_bcast)
norm_factor = f32[2,6,8] rsqrt(variance_plus_epsilon)
norm_factor_bcast = f32[2,4,6,8] broadcast(norm_factor), dimensions={0,2,3}
input_mean_bcast = f32[2,4,6,8] broadcast(input_mean), dimensions={0,2,3}
input_center = f32[2,4,6,8] subtract(input, input_mean_bcast)
norm = f32[2,4,6,8] multiply(input_center, norm_factor_bcast)
scale = f32[4] parameter(1)
scale_bcast = f32[2,4,6,8] broadcast(scale), dimensions={1}
norm_scale = f32[2,4,6,8] multiply(norm, scale_bcast)
bias = f32[4] parameter(2)
bias_bcast = f32[2,4,6,8] broadcast(bias), dimensions={1}
norm_scale_bias = f32[2,4,6,8] add(norm_scale, bias_bcast)
doutput = f32[2,4,6,2,2,2] parameter(3)
dbias = f32[4] reduce(doutput, c0), dimensions={0,2,3,4,5}, to_apply=apply
doutput_bitcast = f32[2,4,6,8] reshape(doutput)
norm_doutput = f32[2,4,6,8] multiply(norm, doutput_bitcast)
dscale = f32[4] reduce(norm_doutput, c0), dimensions={0,2,3}, to_apply=apply
scale_doutput = f32[2,4,6,8] multiply(scale_bcast, doutput_bitcast)
input_center_scale_doutput = f32[2,4,6,8] multiply(input_center, scale_doutput)
f0 = f32[2,6,8] reduce(input_center_scale_doutput, c0), dimensions={1}, to_apply=apply
norm_factor_cube = f32[2,6,8] divide(norm_factor, variance_plus_epsilon)
c1 = f32[] constant(-0.5)
c1_bcast = f32[2,6,8] broadcast(c1), dimensions={}
dnorm_factor = f32[2,6,8] multiply(norm_factor_cube, c1_bcast)
f0_dnorm_factor = f32[2,6,8] multiply(f0, dnorm_factor)
c2 = f32[] constant(0.5)
c2_bcast = f32[2,6,8] broadcast(c2), dimensions={}
f0_dnorm_factor_scaled = f32[2,6,8] multiply(f0_dnorm_factor, c2_bcast)
f0_dnorm_factor_scaled_bcast = f32[2,4,6,8] broadcast(f0_dnorm_factor_scaled), dimensions={0,2,3}
f1 = f32[2,4,6,8] multiply(input_center, f0_dnorm_factor_scaled_bcast)
minus_f1 = f32[2,4,6,8] negate(f1)
minus_f1_sum = f32[2,6,8] reduce(minus_f1, c0), dimensions={1}, to_apply=apply
f2 = f32[2,4,6,8] multiply(norm_factor_bcast, scale_doutput)
minus_f2 = f32[2,4,6,8] negate(f2)
minus_f2_sum = f32[2,6,8] reduce(minus_f2, c0), dimensions={1}, to_apply=apply
minus_f1_f2_sum = f32[2,6,8] add(minus_f1_sum, minus_f2_sum)
minus_f1_f2_sum_scaled = f32[2,6,8] multiply(minus_f1_f2_sum, r_nelems_bcast)
minus_f1_f2_sum_scaled_bcast = f32[2,4,6,8] broadcast(minus_f1_f2_sum_scaled), dimensions={0,2,3}
f1_f2 = f32[2,4,6,8] add(f1, f2)
dinput = f32[2,4,6,8] add(f1_f2, minus_f1_f2_sum_scaled_bcast)
ROOT out = (f32[2,4,6,8], f32[2,4,6,8], f32[4], f32[4]) tuple(norm_scale_bias, dinput, dscale, dbias)
})";
const char* optimized_hlo = R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,4,6,8], {{.*}}: f32[4], {{.*}}: f32[4], {{.*}}: f32[2,4,6,2,2,2]) -> (f32[2,4,6,8], f32[2,4,6,8], f32[4], f32[4]) {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} parameter(0)
; CHECK-NEXT: [[TRANSPOSE0:%[^ ]+]] = f32[2,6,8,4]{3,2,1,0} transpose([[P0]]), dimensions={0,2,3,1}
; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = f32[96,4,1,1]{3,2,1,0} bitcast([[TRANSPOSE0]])
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[4]{0} parameter(1)
; CHECK-NEXT: [[P1_BITCAST:%[^ ]+]] = f32[1,4,1,1]{3,2,1,0} bitcast([[P1]])
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[4]{0} parameter(2)
; CHECK-NEXT: [[P2_BITCAST:%[^ ]+]] = f32[1,4,1,1]{3,2,1,0} bitcast([[P2]])
; CHECK-NEXT: [[CC0:%[^ ]+]] = (f32[96,4,1,1]{3,2,1,0}, f32[96,1,1,1]{3,2,1,0}, f32[96,1,1,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P2_BITCAST]]),
; CHECK: custom_call_target="__cudnn$norm",
; CHECK: backend_config={
; CHECK-DAG: "epsilon":0.001
; CHECK-DAG: "kind":"LAYER_FWD_TRAIN"
; CHECK: }
; CHECK-DAG: [[GTE0:%[^ ]+]] = f32[96,4,1,1]{3,2,1,0} get-tuple-element([[CC0]]), index=0
; CHECK-DAG: [[P3:%[^ ]+]] = f32[2,4,6,2,2,2]{5,4,3,2,1,0} parameter(3)
; CHECK-DAG: [[FUSION0:%[^ ]+]] = f32[2,6,8,4]{3,2,1,0} fusion([[P3]]), kind=kLoop, calls=[[FUSED_COMPUTATION0:%[^ ]+]]
; CHECK-DAG: [[FUSION0_BITCAST:%[^ ]+]] = f32[96,4,1,1]{3,2,1,0} bitcast([[FUSION0]])
; CHECK-DAG: [[GTE1:%[^ ]+]] = f32[96,1,1,1]{3,2,1,0} get-tuple-element([[CC0]]), index=1
; CHECK-DAG: [[GTE2:%[^ ]+]] = f32[96,1,1,1]{3,2,1,0} get-tuple-element([[CC0]]), index=2
; CHECK-NEXT: [[CC1:%[^ ]+]] = (f32[96,4,1,1]{3,2,1,0}, f32[1,4,1,1]{3,2,1,0}, f32[1,4,1,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[FUSION0_BITCAST]], [[GTE1]], [[GTE2]]),
; CHECK: custom_call_target="__cudnn$norm",
; CHECK: backend_config={
; CHECK-DAG: "epsilon":0
; CHECK-DAG: "kind":"LAYER_BWD"
; CHECK: }
; CHECK-DAG: [[GTE3:%[^ ]+]] = f32[96,4,1,1]{3,2,1,0} get-tuple-element([[CC1]]), index=0
; CHECK-DAG: [[FUSION1:%[^ ]+]] = (f32[2,4,6,8]{3,2,1,0}, f32[2,4,6,8]{3,2,1,0}) fusion([[GTE0]], [[GTE3]]), kind=kLoop, calls=[[FUSED_COMPUTATION1:%[^ ]+]]
; CHECK-DAG: [[GTEF1:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} get-tuple-element([[FUSION1]]), index=0
; CHECK-DAG: [[GTEF2:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} get-tuple-element([[FUSION1]]), index=1
; CHECK-DAG: [[GTE4:%[^ ]+]] = f32[1,4,1,1]{3,2,1,0} get-tuple-element([[CC1]]), index=1
; CHECK-DAG: [[GTE4_BITCAST:%[^ ]+]] = f32[4]{0} bitcast([[GTE4]])
; CHECK-DAG: [[GTE5:%[^ ]+]] = f32[1,4,1,1]{3,2,1,0} get-tuple-element([[CC1]]), index=2
; CHECK-DAG: [[GTE5_BITCAST:%[^ ]+]] = f32[4]{0} bitcast([[GTE5]])
; CHECK-DAG: ROOT [[OUT:%[^ ]+]] = (f32[2,4,6,8]{3,2,1,0}, f32[2,4,6,8]{3,2,1,0}, f32[4]{0}, f32[4]{0}) tuple([[GTEF1]], [[GTEF2]], [[GTE4_BITCAST]], [[GTE5_BITCAST]])
)";
TestNorm(hlo_text, optimized_hlo);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/cudnn_norm_rewriter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/cudnn_norm_rewriter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0c6af5f6-a3fb-4656-a51c-7ca0c76e6dbc | cpp | google/arolla | factory_operators | arolla/expr/operators/factory_operators.cc | arolla/expr/operators/factory_operators_test.cc | #include "arolla/expr/operators/factory_operators.h"
#include <cstdint>
#include <memory>
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "arolla/expr/basic_expr_operator.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/expr_operator_signature.h"
#include "arolla/qtype/optional_qtype.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/standard_type_properties/properties.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::expr_operators {
namespace {
using ::arolla::expr::CallOp;
using ::arolla::expr::ExprNodePtr;
using ::arolla::expr::ExprOperatorPtr;
using ::arolla::expr::ExprOperatorSignature;
using ::arolla::expr::Literal;
using NarrowestNumericType = int32_t;
class EmptyLikeOp final : public expr::BasicExprOperator {
public:
EmptyLikeOp()
: BasicExprOperator(
"core.empty_like", ExprOperatorSignature{{"target"}},
"Creates an empty value with shape and (optional) type like "
"target.",
FingerprintHasher("arolla::expr_operators::EmptyLikeOp").Finish()) {
}
absl::StatusOr<expr::ExprNodePtr> ToLowerLevel(
const expr::ExprNodePtr& node) const final {
RETURN_IF_ERROR(ValidateNodeDepsCount(*node));
auto target_qtype = node->node_deps()[0]->qtype();
ASSIGN_OR_RETURN(auto scalar_qtype, GetScalarQType(target_qtype));
ASSIGN_OR_RETURN(auto optional_scalar_qtype, ToOptionalQType(scalar_qtype));
ASSIGN_OR_RETURN(auto missing, CreateMissingValue(optional_scalar_qtype));
return CallOp("core.const_like", {node->node_deps()[0], Literal(missing)});
}
absl::StatusOr<QTypePtr> GetOutputQType(
absl::Span<const QTypePtr> input_qtypes) const final {
return ToOptionalLikeQType(input_qtypes[0]);
}
};
}
absl::StatusOr<ExprOperatorPtr> MakeEmptyLikeOp() {
return std::make_shared<EmptyLikeOp>();
}
} | #include <optional>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status_matchers.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/testing/testing.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qtype/optional_qtype.h"
#include "arolla/qtype/qtype_traits.h"
namespace arolla::expr_operators {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::arolla::expr::CallOp;
using ::arolla::expr::ExprNodePtr;
using ::arolla::expr::Leaf;
using ::arolla::expr::Literal;
using ::arolla::testing::EqualsExpr;
using ::arolla::testing::WithQTypeAnnotation;
using ::testing::Eq;
TEST(FactoryTest, EmptyLike) {
ASSERT_OK_AND_ASSIGN(auto scalar_leaf,
WithQTypeAnnotation(Leaf("scalar"), GetQType<float>()));
ASSERT_OK_AND_ASSIGN(auto empty_like_scalar,
CallOp("core.empty_like", {scalar_leaf}));
EXPECT_THAT(empty_like_scalar->qtype(), Eq(GetOptionalQType<float>()));
EXPECT_THAT(
ToLowerNode(empty_like_scalar),
IsOkAndHolds(EqualsExpr(
CallOp("core.const_like",
{scalar_leaf, Literal<OptionalValue<float>>(std::nullopt)}))));
ASSERT_OK_AND_ASSIGN(
auto array_leaf,
WithQTypeAnnotation(Leaf("array"), GetDenseArrayQType<float>()));
ASSERT_OK_AND_ASSIGN(auto empty_like_array,
CallOp("core.empty_like", {array_leaf}));
EXPECT_THAT(empty_like_array->qtype(), Eq(GetDenseArrayQType<float>()));
EXPECT_THAT(ToLowerNode(empty_like_array),
IsOkAndHolds(EqualsExpr(CallOp(
"core.const_like",
{array_leaf, Literal<OptionalValue<float>>(std::nullopt)}))));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/operators/factory_operators.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/operators/factory_operators_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
fb20ada8-b714-43c8-8b83-15f3843d3fde | cpp | google/tsl | gcs_dns_cache | tsl/platform/cloud/gcs_dns_cache.cc | tsl/platform/cloud/gcs_dns_cache_test.cc | #include "tsl/platform/cloud/gcs_dns_cache.h"
#include <cstring>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/retrying_utils.h"
#include "tsl/platform/status.h"
#ifndef _WIN32
#include <arpa/inet.h>
#include <netdb.h>
#include <netinet/in.h>
#include <sys/socket.h>
#else
#include <Windows.h>
#include <winsock2.h>
#include <ws2tcpip.h>
#endif
#include <sys/types.h>
namespace tsl {
namespace {
const std::vector<string>& kCachedDomainNames =
*new std::vector<string>{"www.googleapis.com", "storage.googleapis.com"};
inline void print_getaddrinfo_error(const string& name,
absl::Status return_status) {
LOG(ERROR) << "Error resolving " << name << ": " << return_status;
}
template <typename T>
const T& SelectRandomItemUniform(std::default_random_engine* random,
const std::vector<T>& items) {
CHECK_GT(items.size(), 0);
std::uniform_int_distribution<size_t> distribution(0u, items.size() - 1u);
size_t choice_index = distribution(*random);
return items[choice_index];
}
}
GcsDnsCache::GcsDnsCache(Env* env, int64_t refresh_rate_secs)
: env_(env), refresh_rate_secs_(refresh_rate_secs) {}
void GcsDnsCache::AnnotateRequest(HttpRequest* request) {
mutex_lock l(mu_);
if (!started_) {
VLOG(1) << "Starting GCS DNS cache.";
DCHECK(!worker_) << "Worker thread already exists!";
addresses_ = ResolveNames(kCachedDomainNames);
worker_.reset(env_->StartThread({}, "gcs_dns_worker",
[this]() { return WorkerThread(); }));
started_ = true;
}
CHECK_EQ(kCachedDomainNames.size(), addresses_.size());
for (size_t i = 0; i < kCachedDomainNames.size(); ++i) {
const string& name = kCachedDomainNames[i];
const std::vector<string>& addresses = addresses_[i];
if (!addresses.empty()) {
const string& chosen_address =
SelectRandomItemUniform(&random_, addresses);
request->AddResolveOverride(name, 443, chosen_address);
VLOG(1) << "Annotated DNS mapping: " << name << " --> " << chosen_address;
} else {
LOG(WARNING) << "No IP addresses available for " << name;
}
}
}
std::vector<string> GcsDnsCache::ResolveName(const string& name) {
VLOG(1) << "Resolving DNS name: " << name;
addrinfo hints;
memset(&hints, 0, sizeof(hints));
hints.ai_family = AF_INET;
hints.ai_socktype = SOCK_STREAM;
addrinfo* result = nullptr;
RetryConfig retryConfig(
5000,
50 * 1000 * 5000,
5);
const absl::Status getaddrinfo_status = RetryingUtils::CallWithRetries(
[&name, &hints, &result]() {
int return_code = getaddrinfo(name.c_str(), nullptr, &hints, &result);
absl::Status return_status;
switch (return_code) {
case 0:
return_status = absl::OkStatus();
break;
#ifndef _WIN32
case EAI_ADDRFAMILY:
case EAI_SERVICE:
case EAI_SOCKTYPE:
case EAI_NONAME:
return_status = absl::FailedPreconditionError(
absl::StrCat("System in invalid state for getaddrinfo call: ",
gai_strerror(return_code)));
break;
case EAI_AGAIN:
case EAI_NODATA:
return_status = absl::UnavailableError(absl::StrCat(
"Resolving ", name, " is temporarily unavailable"));
break;
case EAI_BADFLAGS:
case EAI_FAMILY:
return_status = absl::InvalidArgumentError(absl::StrCat(
"Bad arguments for getaddrinfo: ", gai_strerror(return_code)));
break;
case EAI_FAIL:
return_status = absl::NotFoundError(
absl::StrCat("Permanent failure resolving ", name, ": ",
gai_strerror(return_code)));
break;
case EAI_MEMORY:
return_status = absl::ResourceExhaustedError("Out of memory");
break;
case EAI_SYSTEM:
default:
return_status = absl::UnknownError(strerror(return_code));
#else
case WSATYPE_NOT_FOUND:
case WSAESOCKTNOSUPPORT:
case WSAHOST_NOT_FOUND:
return_status = absl::FailedPreconditionError(
absl::StrCat("System in invalid state for getaddrinfo call: ",
gai_strerror(return_code)));
break;
case WSATRY_AGAIN:
return_status = absl::UnavailableError(absl::StrCat(
"Resolving ", name, " is temporarily unavailable"));
break;
case WSAEINVAL:
case WSAEAFNOSUPPORT:
return_status = absl::InvalidArgumentError(absl::StrCat(
"Bad arguments for getaddrinfo: ", gai_strerror(return_code)));
break;
case WSANO_RECOVERY:
return_status = absl::NotFoundError(
absl::StrCat("Permanent failure resolving ", name, ": ",
gai_strerror(return_code)));
break;
case WSA_NOT_ENOUGH_MEMORY:
return_status = absl::ResourceExhaustedError("Out of memory");
break;
default:
return_status = absl::UnknownError(strerror(return_code));
#endif
}
return absl::Status(return_status);
},
retryConfig);
std::vector<string> output;
if (getaddrinfo_status.ok()) {
for (const addrinfo* i = result; i != nullptr; i = i->ai_next) {
if (i->ai_family != AF_INET || i->ai_addr->sa_family != AF_INET) {
LOG(WARNING) << "Non-IPv4 address returned. ai_family: " << i->ai_family
<< ". sa_family: " << i->ai_addr->sa_family << ".";
continue;
}
char buf[INET_ADDRSTRLEN];
void* address_ptr =
&(reinterpret_cast<sockaddr_in*>(i->ai_addr)->sin_addr);
const char* formatted = nullptr;
if ((formatted = inet_ntop(i->ai_addr->sa_family, address_ptr, buf,
INET_ADDRSTRLEN)) == nullptr) {
LOG(ERROR) << "Error converting response to IP address for " << name
<< ": " << strerror(errno);
} else {
output.emplace_back(buf);
VLOG(1) << "... address: " << buf;
}
}
} else {
print_getaddrinfo_error(name, getaddrinfo_status);
}
if (result != nullptr) {
freeaddrinfo(result);
}
return output;
}
std::vector<std::vector<string>> GcsDnsCache::ResolveNames(
const std::vector<string>& names) {
std::vector<std::vector<string>> all_addresses;
all_addresses.reserve(names.size());
for (const string& name : names) {
all_addresses.push_back(ResolveName(name));
}
return all_addresses;
}
void GcsDnsCache::WorkerThread() {
while (true) {
{
mutex_lock l(mu_);
if (cancelled_) return;
cond_var_.wait_for(l, std::chrono::seconds(refresh_rate_secs_));
if (cancelled_) return;
}
auto new_addresses = ResolveNames(kCachedDomainNames);
{
mutex_lock l(mu_);
addresses_.swap(new_addresses);
}
}
}
} | #include "tsl/platform/cloud/gcs_dns_cache.h"
#include "tsl/platform/str_util.h"
#include "tsl/platform/test.h"
namespace tsl {
class TestHttpRequest : public HttpRequest {
public:
void SetUri(const string& uri) override {}
void SetRange(uint64 start, uint64 end) override {}
void AddHeader(const string& name, const string& value) override {}
void AddResolveOverride(const string& hostname, int64_t port,
const string& ip_addr) override {
EXPECT_EQ(port, 443) << "Unexpected port set for hostname: " << hostname;
auto itr = resolve_overrides_.find(hostname);
EXPECT_EQ(itr, resolve_overrides_.end())
<< "Hostname " << hostname << "already in map: " << itr->second;
resolve_overrides_.insert(
std::map<string, string>::value_type(hostname, ip_addr));
}
void AddAuthBearerHeader(const string& auth_token) override {}
void SetRequestStats(HttpRequest::RequestStats* stats) override {}
void SetDeleteRequest() override {}
absl::Status SetPutFromFile(const string& body_filepath,
size_t offset) override {
return absl::OkStatus();
}
void SetPutEmptyBody() override {}
void SetPostFromBuffer(const char* buffer, size_t size) override {}
void SetPostEmptyBody() override {}
void SetResultBuffer(std::vector<char>* out_buffer) override {}
void SetResultBufferDirect(char* buffer, size_t size) override {}
size_t GetResultBufferDirectBytesTransferred() override { return 0; }
string GetResponseHeader(const string& name) const override { return ""; }
uint64 GetResponseCode() const override { return 0; }
absl::Status Send() override { return absl::OkStatus(); }
string EscapeString(const string& str) override { return ""; }
void SetTimeouts(uint32 connection, uint32 inactivity,
uint32 total) override {}
std::map<string, string> resolve_overrides_;
};
class GcsDnsCacheTest : public ::testing::Test {
protected:
void ResolveNameTest() {
auto response = GcsDnsCache::ResolveName("www.googleapis.com");
EXPECT_LT(1, response.size()) << absl::StrJoin(response, ", ");
}
void AnnotateRequestTest() {
GcsDnsCache d;
{
mutex_lock l(d.mu_);
d.started_ = true;
d.addresses_ = {{"192.168.1.1"}, {"172.134.1.1"}};
}
TestHttpRequest req;
d.AnnotateRequest(&req);
EXPECT_EQ("192.168.1.1", req.resolve_overrides_["www.googleapis.com"]);
EXPECT_EQ("172.134.1.1", req.resolve_overrides_["storage.googleapis.com"]);
}
void SuccessfulCleanupTest() {
GcsDnsCache d;
TestHttpRequest req;
d.AnnotateRequest(&req);
}
};
TEST_F(GcsDnsCacheTest, AnnotateRequest) { AnnotateRequestTest(); }
TEST_F(GcsDnsCacheTest, SuccessfulCleanup) { SuccessfulCleanupTest(); }
} | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/cloud/gcs_dns_cache.cc | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/cloud/gcs_dns_cache_test.cc | 6d708fdcdd4f40537b7fa273371215a6fa3d4423 |
b7d6da6e-3bdf-40bb-8cec-56ffbc861a17 | cpp | tensorflow/tensorflow | partially_decluster_pass | tensorflow/compiler/jit/partially_decluster_pass.cc | tensorflow/compiler/jit/partially_decluster_pass_test.cc | #include "tensorflow/compiler/jit/partially_decluster_pass.h"
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/compiler/jit/device_util.h"
#include "tensorflow/compiler/jit/xla_cluster_util.h"
#include "tensorflow/compiler/tf2xla/const_analysis.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/memory_types.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/graph/graph_node_util.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
namespace {
bool NotBackedge(const Edge& edge) { return !edge.src()->IsNextIteration(); }
namespace reduce_device_to_host_copies {
Status FindNodesToDecluster(const Graph& graph,
absl::flat_hash_set<Node*>* result,
absl::Span<Node* const> post_order) {
MemoryTypeVector input_mtypes, output_mtypes;
for (Node* n : post_order) {
std::optional<absl::string_view> from_cluster = GetXlaClusterForNode(*n);
if (!from_cluster) {
continue;
}
if (IsShapeConsumerOp(*n)) {
continue;
}
if (HasResourceInputOrOutput(*n)) {
continue;
}
DeviceType device_type("");
TF_RETURN_IF_ERROR(
DeviceNameToDeviceType(n->assigned_device_name(), &device_type));
TF_RETURN_IF_ERROR(MemoryTypesForNode(graph.op_registry(), device_type,
n->def(), &input_mtypes,
&output_mtypes));
for (const Edge* e : n->out_edges()) {
Node* dst = e->dst();
if (e->IsControlEdge()) {
continue;
}
bool edge_incurs_extra_device_to_host_copy;
if (output_mtypes[e->src_output()] == DEVICE_MEMORY) {
edge_incurs_extra_device_to_host_copy = false;
} else {
MemoryTypeVector dst_input_mtypes, dst_output_mtypes;
DeviceType dst_device_type("");
TF_RETURN_IF_ERROR(DeviceNameToDeviceType(dst->assigned_device_name(),
&dst_device_type));
TF_RETURN_IF_ERROR(MemoryTypesForNode(graph.op_registry(), device_type,
dst->def(), &dst_input_mtypes,
&dst_output_mtypes));
edge_incurs_extra_device_to_host_copy =
dst_input_mtypes[e->dst_input()] == HOST_MEMORY;
}
if (!edge_incurs_extra_device_to_host_copy) {
continue;
}
std::optional<absl::string_view> dst_cluster =
result->count(dst) ? std::nullopt : GetXlaClusterForNode(*dst);
if (from_cluster != dst_cluster) {
CHECK(result->insert(n).second);
break;
}
}
}
return absl::OkStatus();
}
Status PartiallyDeclusterNode(Graph* graph, Node* n) {
absl::string_view cluster_name = *GetXlaClusterForNode(*n);
absl::InlinedVector<const Edge*, 6> out_edges_to_clone;
for (const Edge* out_edge : n->out_edges()) {
if (out_edge->IsControlEdge()) {
continue;
}
Node* dst = out_edge->dst();
std::optional<absl::string_view> dst_cluster_name =
GetXlaClusterForNode(*dst);
if (dst_cluster_name != cluster_name) {
out_edges_to_clone.push_back(out_edge);
}
}
CHECK(!out_edges_to_clone.empty()) << n->DebugString();
NodeDef ndef = n->def();
ndef.set_name(absl::StrCat(n->name(), "/declustered"));
MergeDebugInfo(NodeDebugInfo(n->def()), &ndef);
RemoveFromXlaCluster(&ndef);
TF_ASSIGN_OR_RETURN(Node * cloned_node, graph->AddNode(ndef));
cloned_node->set_assigned_device_name(n->assigned_device_name());
for (const Edge* in_edge : n->in_edges()) {
graph->AddEdge(in_edge->src(), in_edge->src_output(), cloned_node,
in_edge->dst_input());
}
for (const Edge* out_edge_to_clone : out_edges_to_clone) {
graph->AddEdge(cloned_node, out_edge_to_clone->src_output(),
out_edge_to_clone->dst(), out_edge_to_clone->dst_input());
graph->RemoveEdge(out_edge_to_clone);
}
if (n->out_edges().empty()) {
graph->RemoveNode(n);
}
return absl::OkStatus();
}
Status PartiallyDeclusterGraph(Graph* graph) {
std::vector<Node*> post_order;
GetPostOrder(*graph, &post_order, NodeComparatorName(),
NotBackedge);
absl::flat_hash_set<Node*> nodes_to_partially_decluster;
TF_RETURN_IF_ERROR(
FindNodesToDecluster(*graph, &nodes_to_partially_decluster, post_order));
if (VLOG_IS_ON(3)) {
for (Node* n : post_order) {
if (nodes_to_partially_decluster.count(n)) {
VLOG(3) << n->DebugString();
}
}
}
for (Node* n : post_order) {
if (nodes_to_partially_decluster.count(n)) {
TF_RETURN_IF_ERROR(PartiallyDeclusterNode(graph, n));
}
}
post_order.clear();
GetPostOrder(*graph, &post_order, NodeComparatorName(),
NotBackedge);
nodes_to_partially_decluster.clear();
TF_RETURN_IF_ERROR(
FindNodesToDecluster(*graph, &nodes_to_partially_decluster, post_order));
CHECK(nodes_to_partially_decluster.empty());
return absl::OkStatus();
}
}
namespace reduce_recompilation {
bool IsIntraClusterEdge(const Edge& edge) {
std::optional<absl::string_view> src_cluster_name =
GetXlaClusterForNode(*edge.src());
std::optional<absl::string_view> dst_cluster_name =
GetXlaClusterForNode(*edge.dst());
return src_cluster_name.has_value() && src_cluster_name == dst_cluster_name;
}
bool IsMustCompileDevice(const DeviceType& device_type) {
const XlaOpRegistry::DeviceRegistration* registration;
if (XlaOpRegistry::GetCompilationDevice(device_type.type(), ®istration)) {
return registration->autoclustering_policy ==
XlaOpRegistry::AutoclusteringPolicy::kAlways;
}
return false;
}
Status MustCompileNode(const Node* n, bool* must_compile) {
DeviceType device_type("");
TF_RETURN_IF_ERROR(
DeviceNameToDeviceType(n->assigned_device_name(), &device_type));
if (IsMustCompileDevice(device_type)) {
*must_compile = true;
return absl::OkStatus();
}
*must_compile = !FindKernelDef(device_type, n->def(), nullptr, nullptr).ok();
return absl::OkStatus();
}
Status PartiallyDeclusterGraph(Graph* graph,
const FunctionLibraryDefinition* flib_def,
Env* env) {
std::vector<bool> compile_time_const_nodes(graph->num_node_ids());
OptimizerOptions opts;
auto pflr = std::make_unique<ProcessFunctionLibraryRuntime>(
nullptr, env, nullptr, TF_GRAPH_DEF_VERSION, flib_def, opts);
FunctionLibraryRuntime* lib_runtime =
pflr->GetFLR(ProcessFunctionLibraryRuntime::kDefaultFLRDevice);
TF_RETURN_IF_ERROR(BackwardsConstAnalysis(*graph, nullptr,
&compile_time_const_nodes,
lib_runtime, IsIntraClusterEdge));
std::vector<Node*> rpo;
GetReversePostOrder(*graph, &rpo, NodeComparatorName(),
NotBackedge);
for (Node* n : rpo) {
if (!compile_time_const_nodes[n->id()]) {
continue;
}
absl::string_view cluster_name = *GetXlaClusterForNode(*n);
bool node_on_cluster_edge =
absl::c_all_of(n->in_edges(), [&](const Edge* e) {
std::optional<absl::string_view> incoming_cluster =
GetXlaClusterForNode(*e->src());
return !incoming_cluster || *incoming_cluster != cluster_name;
});
if (node_on_cluster_edge) {
bool must_compile_node;
TF_RETURN_IF_ERROR(MustCompileNode(n, &must_compile_node));
if (!must_compile_node) {
if (n->IsConstant()) {
for (auto it : n->in_edges()) {
if (!it->src()->assigned_device_name().empty() &&
it->src()->assigned_device_name() !=
n->assigned_device_name()) {
VLOG(3) << "Declustering Const with cross-device control input "
<< n->name();
RemoveFromXlaCluster(n);
break;
}
}
} else {
VLOG(3) << "Declustering must-be-constant node " << n->name();
RemoveFromXlaCluster(n);
}
}
}
}
return absl::OkStatus();
}
}
namespace decluster_root_shape_consumers {
Status PartiallyDeclusterGraph(Graph* graph) {
std::vector<Node*> reverse_post_order;
GetReversePostOrder(*graph, &reverse_post_order,
NodeComparatorName(),
NotBackedge);
for (Node* n : reverse_post_order) {
if (!IsShapeConsumerOp(*n)) {
continue;
}
std::optional<absl::string_view> cluster = GetXlaClusterForNode(*n);
if (!cluster.has_value()) {
continue;
}
auto input_belongs_to_same_cluster = [&](const Edge* e) {
return cluster == GetXlaClusterForNode(*e->src());
};
if (absl::c_any_of(n->in_edges(), input_belongs_to_same_cluster)) {
continue;
}
VLOG(2) << "Declustering " << n->name()
<< " because it is a root shape consumer";
RemoveFromXlaCluster(n);
}
return absl::OkStatus();
}
}
}
Status PartiallyDeclusterPass::Run(
const GraphOptimizationPassOptions& options) {
Graph* graph = options.graph->get();
TF_RETURN_IF_ERROR(
reduce_device_to_host_copies::PartiallyDeclusterGraph(graph));
if (options.flib_def == nullptr) {
return errors::InvalidArgument(
"GraphOptimizationPassOptions::flib_def must be set for "
"PartiallyDeclusterPass.");
}
if (options.session_options == nullptr ||
options.session_options->env == nullptr) {
return errors::InvalidArgument(
"GraphOptimizationPassOptions::session_options::env must be set for "
"PartiallyDeclusterPass.");
}
TF_RETURN_IF_ERROR(reduce_recompilation::PartiallyDeclusterGraph(
graph, options.flib_def, options.session_options->env));
TF_RETURN_IF_ERROR(
decluster_root_shape_consumers::PartiallyDeclusterGraph(graph));
return absl::OkStatus();
}
} | #include "tensorflow/compiler/jit/partially_decluster_pass.h"
#include "absl/memory/memory.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/control_flow_ops_internal.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/sendrecv_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/compiler/jit/defs.h"
#include "tensorflow/compiler/jit/test_util.h"
#include "tensorflow/compiler/jit/xla_cluster_util.h"
#include "tensorflow/compiler/tf2xla/cc/ops/xla_ops.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/graph_def_builder_util.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
REGISTER_OP("FakeNullary").Output("out: int32");
REGISTER_OP("FakeBinary")
.Input("host_in: int32")
.Input("device_in: int32")
.Output("host_out: int32")
.Output("device_out: int32");
REGISTER_OP("FakeResourceVar").Output("out: resource");
REGISTER_OP("FakeResourceUpdate")
.Input("in: resource")
.Output("out: resource")
.Output("something_else: int32");
class FakeBinaryOp : public OpKernel {
public:
explicit FakeBinaryOp(OpKernelConstruction* context) : OpKernel(context) {}
void Compute(OpKernelContext* ctx) override { CHECK(false); }
};
class FakeResourceUpdateOp : public OpKernel {
public:
explicit FakeResourceUpdateOp(OpKernelConstruction* context)
: OpKernel(context) {}
void Compute(OpKernelContext* ctx) override { CHECK(false); }
};
REGISTER_KERNEL_BUILDER(Name("FakeBinary")
.Device(DEVICE_CPU)
.HostMemory("host_in")
.HostMemory("host_out"),
FakeBinaryOp);
REGISTER_KERNEL_BUILDER(
Name("FakeResourceUpdate").Device(DEVICE_CPU).HostMemory("something_else"),
FakeResourceUpdateOp);
Status PartiallyDecluster(std::unique_ptr<Graph>* graph) {
FixupSourceAndSinkEdges(graph->get());
static const char* kCpuDevice = "/job:localhost/replica:0/task:0/cpu:0";
for (Node* n : (*graph)->nodes()) {
if (n->assigned_device_name().empty()) {
n->set_assigned_device_name(kCpuDevice);
}
}
GraphOptimizationPassWrapper wrapper;
GraphOptimizationPassOptions opt_options =
wrapper.CreateGraphOptimizationPassOptions(graph);
PartiallyDeclusterPass pass;
return pass.Run(opt_options);
}
Node* FindNodeByName(const Graph& graph, const string& name) {
for (Node* node : graph.nodes()) {
if (node->name() == name) {
return node;
}
}
return nullptr;
}
bool GetInputsForNode(const Graph& graph, const string& node_name,
std::vector<Node*>* inputs) {
const Node* node = FindNodeByName(graph, node_name);
if (node == nullptr) {
return false;
}
for (const Edge* e : node->in_edges()) {
inputs->push_back(e->src());
}
std::sort(inputs->begin(), inputs->end(), NodeComparatorName());
return true;
}
TEST(PartiallyDeclusterPassTest, ClusteredAndUnclustered) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
{
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
Node* input =
ops::SourceOp("FakeNullary", builder.opts().WithName("Input"));
Node* clustered_producer =
ops::BinaryOp("FakeBinary", input, input,
builder.opts().WithName("ClusteredProducer"));
ops::BinaryOp("FakeBinary", clustered_producer, input,
builder.opts().WithName("UnclusteredConsumer"));
Node* clustered_consumer =
ops::BinaryOp("FakeBinary", {clustered_producer, 1}, input,
builder.opts().WithName("ClusteredConsumer"));
clustered_producer->AddAttr(kXlaClusterAttr, "cluster_0");
clustered_consumer->AddAttr(kXlaClusterAttr, "cluster_0");
TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get()));
}
TF_ASSERT_OK(PartiallyDecluster(&graph));
std::vector<Node*> unclustered_consumer_inputs;
ASSERT_TRUE(GetInputsForNode(*graph, "UnclusteredConsumer",
&unclustered_consumer_inputs));
ASSERT_EQ(unclustered_consumer_inputs.size(), 2);
EXPECT_EQ(unclustered_consumer_inputs[0]->name(),
"ClusteredProducer/declustered");
EXPECT_EQ(unclustered_consumer_inputs[1]->name(), "Input");
std::vector<Node*> clustered_consumer_inputs;
ASSERT_TRUE(GetInputsForNode(*graph, "ClusteredConsumer",
&clustered_consumer_inputs));
ASSERT_EQ(clustered_consumer_inputs.size(), 2);
EXPECT_EQ(clustered_consumer_inputs[0]->name(), "ClusteredProducer");
EXPECT_EQ(clustered_consumer_inputs[1]->name(), "Input");
}
TEST(PartiallyDeclusterPassTest, DifferentClusters) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
{
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
Node* input =
ops::SourceOp("FakeNullary", builder.opts().WithName("Input"));
Node* clustered_producer =
ops::BinaryOp("FakeBinary", input, input,
builder.opts().WithName("ClusteredProducer"));
Node* consumer_in_different_cluster =
ops::BinaryOp("FakeBinary", clustered_producer, input,
builder.opts().WithName("ConsumerInDifferentCluster"));
Node* clustered_consumer =
ops::BinaryOp("FakeBinary", input, {clustered_producer, 1},
builder.opts().WithName("ClusteredConsumer"));
clustered_producer->AddAttr(kXlaClusterAttr, "cluster_0");
clustered_consumer->AddAttr(kXlaClusterAttr, "cluster_0");
consumer_in_different_cluster->AddAttr(kXlaClusterAttr, "cluster_1");
TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get()));
}
TF_ASSERT_OK(PartiallyDecluster(&graph));
std::vector<Node*> inputs;
ASSERT_TRUE(GetInputsForNode(*graph, "ConsumerInDifferentCluster", &inputs));
ASSERT_EQ(inputs.size(), 2);
EXPECT_EQ(inputs[0]->name(), "ClusteredProducer/declustered");
EXPECT_EQ(inputs[1]->name(), "Input");
}
TEST(PartiallyDeclusterPassTest, DontDeclusterIfUserIsDeviceMem) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
{
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
Node* input =
ops::SourceOp("FakeNullary", builder.opts().WithName("Input"));
Node* clustered_producer =
ops::BinaryOp("FakeBinary", input, input,
builder.opts().WithName("ClusteredProducer"));
Node* consumer_in_different_cluster =
ops::BinaryOp("FakeBinary", input, clustered_producer,
builder.opts().WithName("ConsumerInDifferentCluster"));
Node* clustered_consumer =
ops::BinaryOp("FakeBinary", input, {clustered_producer, 1},
builder.opts().WithName("ClusteredConsumer"));
clustered_producer->AddAttr(kXlaClusterAttr, "cluster_0");
clustered_consumer->AddAttr(kXlaClusterAttr, "cluster_0");
consumer_in_different_cluster->AddAttr(kXlaClusterAttr, "cluster_1");
TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get()));
}
TF_ASSERT_OK(PartiallyDecluster(&graph));
std::vector<Node*> inputs;
ASSERT_TRUE(GetInputsForNode(*graph, "ConsumerInDifferentCluster", &inputs));
ASSERT_EQ(inputs.size(), 2);
EXPECT_EQ(inputs[0]->name(), "ClusteredProducer");
EXPECT_EQ(inputs[1]->name(), "Input");
}
TEST(PartiallyDeclusterPassTest, DontDuplicateResourceVarOps) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
{
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
Node* input =
ops::SourceOp("FakeNullary", builder.opts().WithName("Input"));
Node* resource_var = ops::SourceOp("FakeResourceVar",
builder.opts().WithName("ResourceVar"));
Node* clustered_producer =
ops::UnaryOp("FakeResourceUpdate", resource_var,
builder.opts().WithName("ClusteredProducer"));
Node* consumer_in_different_cluster =
ops::BinaryOp("FakeBinary", {clustered_producer, 1}, input,
builder.opts().WithName("ConsumerInDifferentCluster"));
Node* clustered_consumer =
ops::BinaryOp("FakeBinary", input, {clustered_producer, 1},
builder.opts().WithName("ClusteredConsumer"));
clustered_producer->AddAttr(kXlaClusterAttr, "cluster_0");
clustered_consumer->AddAttr(kXlaClusterAttr, "cluster_0");
consumer_in_different_cluster->AddAttr(kXlaClusterAttr, "cluster_1");
TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get()));
}
TF_ASSERT_OK(PartiallyDecluster(&graph));
std::vector<Node*> inputs;
ASSERT_TRUE(GetInputsForNode(*graph, "ConsumerInDifferentCluster", &inputs));
ASSERT_EQ(inputs.size(), 2);
EXPECT_EQ(inputs[0]->name(), "ClusteredProducer");
EXPECT_EQ(inputs[1]->name(), "Input");
}
TEST(PartiallyDeclusterPassTest, DeclusterDependentNodes) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
{
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
Node* input =
ops::SourceOp("FakeNullary", builder.opts().WithName("Input"));
Node* clustered_producer_0 =
ops::BinaryOp("FakeBinary", input, input,
builder.opts().WithName("ClusteredProducer0"));
Node* clustered_producer_1 =
ops::BinaryOp("FakeBinary", clustered_producer_0, input,
builder.opts().WithName("ClusteredProducer1"));
ops::BinaryOp("FakeBinary", clustered_producer_1, input,
builder.opts().WithName("UnclusteredConsumer"));
Node* clustered_consumer =
ops::BinaryOp("FakeBinary", {clustered_producer_1, 1}, input,
builder.opts().WithName("ClusteredConsumer"));
clustered_producer_0->AddAttr(kXlaClusterAttr, "cluster_0");
clustered_producer_1->AddAttr(kXlaClusterAttr, "cluster_0");
clustered_consumer->AddAttr(kXlaClusterAttr, "cluster_0");
TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get()));
}
TF_ASSERT_OK(PartiallyDecluster(&graph));
std::vector<Node*> unclustered_consumer_inputs, declustered_producer_1_inputs;
ASSERT_TRUE(GetInputsForNode(*graph, "UnclusteredConsumer",
&unclustered_consumer_inputs));
ASSERT_EQ(unclustered_consumer_inputs.size(), 2);
EXPECT_EQ(unclustered_consumer_inputs[0]->name(),
"ClusteredProducer1/declustered");
EXPECT_EQ(unclustered_consumer_inputs[1]->name(), "Input");
ASSERT_TRUE(GetInputsForNode(*graph, "ClusteredProducer1/declustered",
&declustered_producer_1_inputs));
ASSERT_EQ(declustered_producer_1_inputs.size(), 2);
EXPECT_EQ(declustered_producer_1_inputs[0]->name(),
"ClusteredProducer0/declustered");
EXPECT_EQ(declustered_producer_1_inputs[1]->name(), "Input");
}
void AddToCluster(absl::Span<Node* const> nodes,
absl::string_view cluster_name) {
for (Node* n : nodes) {
n->AddAttr(kXlaClusterAttr, string(cluster_name));
}
}
TEST(PartiallyDeclusterPassTest, DeclusterMustBeConstantNodes) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output shape_a = ops::Placeholder(s.WithOpName("shape_a"), DT_INT32,
ops::Placeholder::Attrs{});
Output shape_b = ops::Placeholder(s.WithOpName("shape_b"), DT_INT32,
ops::Placeholder::Attrs{});
Output shape = ops::Add(s.WithOpName("shape"), shape_a, shape_b);
Output reshape_input = ops::Placeholder(s.WithOpName("reshape_input"),
DT_FLOAT, ops::Placeholder::Attrs{});
Output reshape = ops::Reshape(s.WithOpName("reshape"), reshape_input, shape);
AddToCluster({shape.node(), reshape.node()}, "cluster_0");
auto graph = std::make_unique<Graph>(OpRegistry::Global());
TF_ASSERT_OK(s.ToGraph(graph.get()));
TF_ASSERT_OK(PartiallyDecluster(&graph));
const Node* n = FindNodeByName(*graph, "shape");
ASSERT_NE(n, nullptr);
EXPECT_EQ(GetXlaClusterForNode(*n), std::nullopt);
}
TEST(PartiallyDeclusterPassTest, DeclusteringStopsAtMetadataOps) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input_a = ops::Placeholder(s.WithOpName("input_a"), DT_INT32,
ops::Placeholder::Attrs{});
Output input_b = ops::Placeholder(s.WithOpName("shape_b"), DT_FLOAT,
ops::Placeholder::Attrs{});
Output mul = ops::Mul(s.WithOpName("mul"), input_b, input_b);
Output shape_of_mul = ops::Shape(s.WithOpName("shape_of_mul"), mul);
Output shape = ops::Add(s.WithOpName("shape"), shape_of_mul, input_a);
Output reshape_input = ops::Placeholder(s.WithOpName("reshape_input"),
DT_FLOAT, ops::Placeholder::Attrs{});
Output reshape = ops::Reshape(s.WithOpName("reshape"), reshape_input, shape);
AddToCluster({mul.node(), shape_of_mul.node(), shape.node(), reshape.node()},
"cluster_0");
auto graph = std::make_unique<Graph>(OpRegistry::Global());
TF_ASSERT_OK(s.ToGraph(graph.get()));
TF_ASSERT_OK(PartiallyDecluster(&graph));
const Node* n = FindNodeByName(*graph, "shape");
ASSERT_NE(n, nullptr);
EXPECT_EQ(GetXlaClusterForNode(*n), "cluster_0");
}
TEST(PartiallyDeclusterPassTest, EdgeAcrossDifferentClusters) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output shape_a = ops::Placeholder(s.WithOpName("shape_a"), DT_INT32,
ops::Placeholder::Attrs{});
Output shape_b = ops::Placeholder(s.WithOpName("shape_b"), DT_INT32,
ops::Placeholder::Attrs{});
Output shape = ops::Add(s.WithOpName("shape"), shape_a, shape_b);
Output reshape_input = ops::Placeholder(s.WithOpName("reshape_input"),
DT_FLOAT, ops::Placeholder::Attrs{});
Output reshape = ops::Reshape(s.WithOpName("reshape"), reshape_input, shape);
AddToCluster({reshape.node()}, "cluster_0");
AddToCluster({shape.node()}, "cluster_1");
auto graph = std::make_unique<Graph>(OpRegistry::Global());
TF_ASSERT_OK(s.ToGraph(graph.get()));
TF_ASSERT_OK(PartiallyDecluster(&graph));
const Node* n = FindNodeByName(*graph, "shape");
ASSERT_NE(n, nullptr);
EXPECT_EQ(GetXlaClusterForNode(*n), "cluster_1");
}
TEST(PartiallyDeclusterPassTest, DontDeclusterXlaDeviceOps) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output shape_a = ops::Placeholder(s.WithOpName("shape_a"), DT_INT32,
ops::Placeholder::Attrs{});
Output shape_b = ops::Placeholder(s.WithOpName("shape_b"), DT_INT32,
ops::Placeholder::Attrs{});
Output shape = ops::Add(s.WithOpName("shape"), shape_a, shape_b);
Output reshape_input = ops::Placeholder(s.WithOpName("reshape_input"),
DT_FLOAT, ops::Placeholder::Attrs{});
Output reshape = ops::Reshape(s.WithOpName("reshape"), reshape_input, shape);
AddToCluster({shape.node(), reshape.node()}, "cluster_0");
auto graph = std::make_unique<Graph>(OpRegistry::Global());
TF_ASSERT_OK(s.ToGraph(graph.get()));
std::vector<std::unique_ptr<Device>> devices;
TF_ASSERT_OK(DeviceFactory::AddDevices(
SessionOptions(), "/job:localhost/replica:0/task:0", &devices));
Node* n = FindNodeByName(*graph, "shape");
ASSERT_NE(n, nullptr);
n->set_assigned_device_name(
"/job:localhost/replica:0/task:0/device:XLA_GPU:0");
TF_ASSERT_OK(PartiallyDecluster(&graph));
EXPECT_EQ(GetXlaClusterForNode(*n), "cluster_0");
}
TEST(PartiallyDeclusterPassTest, EliminatedUnusedNodes) {
const char* const kClusteredProducer0Name = "ClusteredProducer0";
const char* const kClusteredProducer1Name = "ClusteredProducer1";
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
{
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
Node* input =
ops::SourceOp("FakeNullary", builder.opts().WithName("Input"));
Node* clustered_producer_0 =
ops::BinaryOp("FakeBinary", input, input,
builder.opts().WithName(kClusteredProducer0Name));
Node* clustered_producer_1 =
ops::BinaryOp("FakeBinary", clustered_producer_0, input,
builder.opts().WithName(kClusteredProducer1Name));
ops::BinaryOp("FakeBinary", clustered_producer_1, input,
builder.opts().WithName("UnclusteredConsumer"));
clustered_producer_0->AddAttr(kXlaClusterAttr, "cluster_0");
clustered_producer_1->AddAttr(kXlaClusterAttr, "cluster_0");
TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get()));
}
TF_ASSERT_OK(PartiallyDecluster(&graph));
EXPECT_EQ(FindNodeByName(*graph, kClusteredProducer0Name), nullptr);
EXPECT_EQ(FindNodeByName(*graph, kClusteredProducer1Name), nullptr);
}
TEST(PartiallyDeclusterPassTest, MetadataOpsDontStartClusters) {
tensorflow::Scope root = tensorflow::Scope::NewRootScope();
tensorflow::Scope in_cluster_and = root.WithXlaCluster("cluster_0");
Output a = ops::Placeholder(root.WithOpName("a"), DT_FLOAT);
Output b = ops::Shape(in_cluster_and.WithOpName("b"), a);
Output c = ops::Rank(in_cluster_and.WithOpName("c"), b);
Output d = ops::Size(in_cluster_and.WithOpName("d"), c);
(void)ops::Shape(in_cluster_and.WithOpName("e"), d);
auto graph = std::make_unique<Graph>(OpRegistry::Global());
TF_ASSERT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(PartiallyDecluster(&graph));
Node* n_b = FindNodeByName(*graph, "b");
ASSERT_NE(n_b, nullptr);
EXPECT_EQ(GetXlaClusterForNode(*n_b), std::nullopt);
Node* n_c = FindNodeByName(*graph, "c");
ASSERT_NE(n_c, nullptr);
EXPECT_EQ(GetXlaClusterForNode(*n_c), std::nullopt);
Node* n_d = FindNodeByName(*graph, "d");
ASSERT_NE(n_d, nullptr);
EXPECT_EQ(GetXlaClusterForNode(*n_d), std::nullopt);
Node* n_e = FindNodeByName(*graph, "e");
ASSERT_NE(n_e, nullptr);
EXPECT_EQ(GetXlaClusterForNode(*n_e), std::nullopt);
}
TEST(PartiallyDeclusterPassTest, MetaConsumersArentDeclustered) {
tensorflow::Scope root = tensorflow::Scope::NewRootScope();
tensorflow::Scope in_cluster_and = root.WithXlaCluster("cluster_0");
auto graph = std::make_unique<Graph>(OpRegistry::Global());
Output a = ops::Placeholder(root.WithOpName("a"), DT_FLOAT);
Output b = ops::Add(in_cluster_and.WithOpName("b"), a, a);
Output c = ops::Rank(in_cluster_and.WithOpName("c"), b);
Output e;
TF_ASSERT_OK(
CreateOutputWithScope("FakeBinary", {c, c}, root.WithOpName("e"), &e));
TF_ASSERT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(PartiallyDecluster(&graph));
Node* n_b = FindNodeByName(*graph, "b");
ASSERT_NE(n_b, nullptr);
EXPECT_EQ(GetXlaClusterForNode(*n_b), "cluster_0");
Node* n_c = FindNodeByName(*graph, "c");
ASSERT_NE(n_c, nullptr);
EXPECT_EQ(GetXlaClusterForNode(*n_c), "cluster_0");
}
TEST(PartiallyDeclusterPassTest, ConstInputsToSliceArentDeclustered) {
tensorflow::Scope root = tensorflow::Scope::NewRootScope();
tensorflow::Scope in_cluster_and = root.WithXlaCluster("cluster_0");
auto graph = std::make_unique<Graph>(OpRegistry::Global());
Output a = ops::Placeholder(root.WithOpName("a"), DT_FLOAT,
ops::Placeholder::Attrs{{4}});
Output b = ops::Const(in_cluster_and.WithOpName("b"), {1});
Output c = ops::Const(in_cluster_and.WithOpName("c"), {2});
Output d = ops::Slice(in_cluster_and.WithOpName("d"), a, b, c);
TF_ASSERT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(PartiallyDecluster(&graph));
Node* n_b = FindNodeByName(*graph, "b");
ASSERT_NE(n_b, nullptr);
EXPECT_EQ(GetXlaClusterForNode(*n_b), "cluster_0");
Node* n_c = FindNodeByName(*graph, "c");
ASSERT_NE(n_c, nullptr);
EXPECT_EQ(GetXlaClusterForNode(*n_c), "cluster_0");
}
TEST(PartiallyDeclusterPassTest,
ConstInLoopWithCrossDeviceControlInputsAreDeclustered) {
tensorflow::Scope root = tensorflow::Scope::NewRootScope();
tensorflow::Scope in_cluster_and = root.WithXlaCluster("cluster_0");
auto graph = std::make_unique<Graph>(OpRegistry::Global());
Output a = ops::Placeholder(root.WithOpName("a"), DT_FLOAT,
ops::Placeholder::Attrs{{4}});
Output b = ops::Const(in_cluster_and.WithOpName("b"), {1});
Output c = ops::Const(in_cluster_and.WithOpName("c"), {2});
Output slice = ops::Slice(in_cluster_and.WithOpName("slice"), a, b, c);
Output cond = ops::Placeholder(root.WithOpName("cond"), DT_BOOL);
Output value = ops::Placeholder(root.WithOpName("value"), DT_FLOAT);
Output loop_cond = ops::LoopCond(root.WithOpName("loop_cond"), cond);
ops::Switch switch_node(root.WithOpName("switch"), value, loop_cond);
Output identity =
ops::Identity(root.WithOpName("identity"), switch_node.output_true);
root.graph()->AddControlEdge(identity.node(), b.node());
TF_ASSERT_OK(root.ToGraph(graph.get()));
std::vector<std::unique_ptr<Device>> devices;
TF_ASSERT_OK(DeviceFactory::AddDevices(
SessionOptions(), "/job:localhost/replica:0/task:0", &devices));
Node* identity_node = FindNodeByName(*graph, "identity");
ASSERT_NE(identity_node, nullptr);
identity_node->set_assigned_device_name(
"/job:localhost/replica:0/task:0/device:XLA_GPU:0");
TF_ASSERT_OK(PartiallyDecluster(&graph));
Node* n_b = FindNodeByName(*graph, "b");
ASSERT_NE(n_b, nullptr);
EXPECT_EQ(GetXlaClusterForNode(*n_b), std::nullopt);
Node* n_c = FindNodeByName(*graph, "c");
ASSERT_NE(n_c, nullptr);
EXPECT_EQ(GetXlaClusterForNode(*n_c), "cluster_0");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/partially_decluster_pass.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/partially_decluster_pass_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2a892248-e2f2-4e15-b37d-ddc783b8833a | cpp | abseil/abseil-cpp | nullability | absl/base/nullability.h | absl/base/nullability_test.cc | #ifndef ABSL_BASE_NULLABILITY_H_
#define ABSL_BASE_NULLABILITY_H_
#include "absl/base/config.h"
#include "absl/base/internal/nullability_impl.h"
#define ABSL_POINTERS_DEFAULT_NONNULL
namespace absl {
ABSL_NAMESPACE_BEGIN
template <typename T>
using Nonnull = nullability_internal::NonnullImpl<T>;
template <typename T>
using Nullable = nullability_internal::NullableImpl<T>;
template <typename T>
using NullabilityUnknown = nullability_internal::NullabilityUnknownImpl<T>;
ABSL_NAMESPACE_END
}
#if ABSL_HAVE_FEATURE(nullability_on_classes)
#define ABSL_NULLABILITY_COMPATIBLE _Nullable
#else
#define ABSL_NULLABILITY_COMPATIBLE
#endif
#endif | #include "absl/base/nullability.h"
#include <cassert>
#include <memory>
#include <utility>
#include "gtest/gtest.h"
#include "absl/base/attributes.h"
namespace {
using ::absl::Nonnull;
using ::absl::NullabilityUnknown;
using ::absl::Nullable;
void funcWithNonnullArg(Nonnull<int*> ) {}
template <typename T>
void funcWithDeducedNonnullArg(Nonnull<T*> ) {}
TEST(NonnullTest, NonnullArgument) {
int var = 0;
funcWithNonnullArg(&var);
funcWithDeducedNonnullArg(&var);
}
Nonnull<int*> funcWithNonnullReturn() {
static int var = 0;
return &var;
}
TEST(NonnullTest, NonnullReturn) {
auto var = funcWithNonnullReturn();
(void)var;
}
TEST(PassThroughTest, PassesThroughRawPointerToInt) {
EXPECT_TRUE((std::is_same<Nonnull<int*>, int*>::value));
EXPECT_TRUE((std::is_same<Nullable<int*>, int*>::value));
EXPECT_TRUE((std::is_same<NullabilityUnknown<int*>, int*>::value));
}
TEST(PassThroughTest, PassesThroughRawPointerToVoid) {
EXPECT_TRUE((std::is_same<Nonnull<void*>, void*>::value));
EXPECT_TRUE((std::is_same<Nullable<void*>, void*>::value));
EXPECT_TRUE((std::is_same<NullabilityUnknown<void*>, void*>::value));
}
TEST(PassThroughTest, PassesThroughUniquePointerToInt) {
using T = std::unique_ptr<int>;
EXPECT_TRUE((std::is_same<Nonnull<T>, T>::value));
EXPECT_TRUE((std::is_same<Nullable<T>, T>::value));
EXPECT_TRUE((std::is_same<NullabilityUnknown<T>, T>::value));
}
TEST(PassThroughTest, PassesThroughSharedPointerToInt) {
using T = std::shared_ptr<int>;
EXPECT_TRUE((std::is_same<Nonnull<T>, T>::value));
EXPECT_TRUE((std::is_same<Nullable<T>, T>::value));
EXPECT_TRUE((std::is_same<NullabilityUnknown<T>, T>::value));
}
TEST(PassThroughTest, PassesThroughSharedPointerToVoid) {
using T = std::shared_ptr<void>;
EXPECT_TRUE((std::is_same<Nonnull<T>, T>::value));
EXPECT_TRUE((std::is_same<Nullable<T>, T>::value));
EXPECT_TRUE((std::is_same<NullabilityUnknown<T>, T>::value));
}
TEST(PassThroughTest, PassesThroughPointerToMemberObject) {
using T = decltype(&std::pair<int, int>::first);
EXPECT_TRUE((std::is_same<Nonnull<T>, T>::value));
EXPECT_TRUE((std::is_same<Nullable<T>, T>::value));
EXPECT_TRUE((std::is_same<NullabilityUnknown<T>, T>::value));
}
TEST(PassThroughTest, PassesThroughPointerToMemberFunction) {
using T = decltype(&std::unique_ptr<int>::reset);
EXPECT_TRUE((std::is_same<Nonnull<T>, T>::value));
EXPECT_TRUE((std::is_same<Nullable<T>, T>::value));
EXPECT_TRUE((std::is_same<NullabilityUnknown<T>, T>::value));
}
}
namespace util {
template <typename T>
bool DidAdlWin(T*) {
return true;
}
struct MakeAdlWin {};
}
namespace {
bool DidAdlWin(...) { return false; }
TEST(NullableAdlTest, NullableAddsNothingToArgumentDependentLookup) {
EXPECT_FALSE(DidAdlWin((int*)nullptr));
EXPECT_FALSE(DidAdlWin((Nullable<int*>)nullptr));
EXPECT_TRUE(DidAdlWin((util::MakeAdlWin*)nullptr));
EXPECT_TRUE(DidAdlWin((Nullable<util::MakeAdlWin*>)nullptr));
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/base/nullability.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/base/nullability_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
080bb713-9ec6-4917-8c00-a8802f2a3a3c | cpp | tensorflow/tensorflow | attribute_map | tensorflow/lite/core/async/interop/c/attribute_map.cc | tensorflow/lite/core/async/interop/c/attribute_map_test.cc | #include "tensorflow/lite/core/async/interop/c/attribute_map.h"
#include <cstddef>
#include <cstdint>
#include "tensorflow/lite/core/async/interop/attribute_map_internal.h"
#include "tensorflow/lite/core/async/interop/c/types.h"
extern "C" {
TfLiteAttributeMap* TfLiteAttributeMapCreate(TfLiteAttrMapType type) {
return new TfLiteAttributeMap(type);
}
void TfLiteAttributeMapDelete(TfLiteAttributeMap* attrs) { delete attrs; }
bool TfLiteAttributeMapIsBufferAttributeMap(const TfLiteAttributeMap* attrs) {
if (attrs) return attrs->impl.IsBufferAttributeMap();
return false;
}
bool TfLiteAttributeMapIsSyncAttributeMap(const TfLiteAttributeMap* attrs) {
if (attrs) return attrs->impl.IsSyncAttributeMap();
return false;
}
void TfLiteAttributeMapCopy(const TfLiteAttributeMap* src,
TfLiteAttributeMap* dst) {
if (src && dst) {
dst->impl = src->impl;
}
}
bool TfLiteAttributeMapGetSizeTBufferAttr(const TfLiteAttributeMap* attrs,
TfLiteBufferAttrKey key,
size_t* val) {
return attrs && attrs->impl.IsBufferAttributeMap() &&
attrs->impl.GetAttr(key, val);
}
bool TfLiteAttributeMapSetSizeTBufferAttr(TfLiteAttributeMap* attrs,
TfLiteBufferAttrKey key, size_t val) {
if (attrs && attrs->impl.IsBufferAttributeMap()) {
attrs->impl.SetAttr(key, val);
return true;
}
return false;
}
bool TfLiteAttributeMapGetStringBufferAttr(const TfLiteAttributeMap* attrs,
TfLiteBufferAttrKey key,
const char** val) {
return attrs && attrs->impl.IsBufferAttributeMap() &&
attrs->impl.GetAttr(key, val);
}
bool TfLiteAttributeMapSetStringBufferAttr(TfLiteAttributeMap* attrs,
TfLiteBufferAttrKey key,
const char* val) {
if (attrs && attrs->impl.IsBufferAttributeMap()) {
attrs->impl.SetAttr(key, val);
return true;
}
return false;
}
bool TfLiteAttributeMapGetBoolBufferAttr(const TfLiteAttributeMap* attrs,
TfLiteBufferAttrKey key, bool* val) {
return attrs && attrs->impl.IsBufferAttributeMap() &&
attrs->impl.GetAttr(key, val);
}
bool TfLiteAttributeMapSetBoolBufferAttr(TfLiteAttributeMap* attrs,
TfLiteBufferAttrKey key, bool val) {
if (attrs && attrs->impl.IsBufferAttributeMap()) {
attrs->impl.SetAttr(key, val);
return true;
}
return false;
}
bool TfLiteAttributeMapGetStringSyncAttr(const TfLiteAttributeMap* attrs,
TfLiteSynchronizationAttrKey key,
const char** val) {
return attrs && attrs->impl.IsSyncAttributeMap() &&
attrs->impl.GetAttr(key, val);
}
bool TfLiteAttributeMapSetStringSyncAttr(TfLiteAttributeMap* attrs,
TfLiteSynchronizationAttrKey key,
const char* val) {
if (attrs && attrs->impl.IsSyncAttributeMap()) {
attrs->impl.SetAttr(key, val);
return true;
}
return false;
}
#define DEFINE_ATTR_MAP_ACCESSOR(type, type_name) \
bool TfLiteAttributeMapGet##type_name##Attr(const TfLiteAttributeMap* attrs, \
uint32_t key, type* val) { \
return attrs ? attrs->impl.GetAttr(static_cast<TfLiteBufferAttrKey>(key), \
val) \
: false; \
} \
void TfLiteAttributeMapSet##type_name##Attr(TfLiteAttributeMap* attrs, \
uint32_t key, type val) { \
if (attrs) { \
attrs->impl.SetAttr(static_cast<TfLiteBufferAttrKey>(key), val); \
} \
} \
bool TfLiteAttributeMapGetCustom##type_name##Attr( \
const TfLiteAttributeMap* attrs, const char* key, type* val) { \
return attrs ? attrs->impl.GetCustomAttr(key, val) : false; \
} \
void TfLiteAttributeMapSetCustom##type_name##Attr( \
TfLiteAttributeMap* attrs, const char* key, type val) { \
if (attrs) { \
attrs->impl.SetCustomAttr(key, val); \
} \
}
DEFINE_ATTR_MAP_ACCESSOR(int, Int);
DEFINE_ATTR_MAP_ACCESSOR(size_t, SizeT);
DEFINE_ATTR_MAP_ACCESSOR(const char*, String);
DEFINE_ATTR_MAP_ACCESSOR(bool, Bool);
#undef DEFINE_ATTR_MAP_ACCESSOR
} | #include "tensorflow/lite/core/async/interop/c/attribute_map.h"
#include <cstddef>
#include <cstdint>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/async/interop/c/types.h"
namespace {
TEST(AttributeMapTest, AttributeMapCreateTypeCheckTest) {
{
auto* attr = TfLiteAttributeMapCreate(kTfLiteAttrMapTypeBuffer);
EXPECT_TRUE(TfLiteAttributeMapIsBufferAttributeMap(attr));
EXPECT_FALSE(TfLiteAttributeMapIsSyncAttributeMap(attr));
TfLiteAttributeMapDelete(attr);
}
{
auto* attr = TfLiteAttributeMapCreate(kTfLiteAttrMapTypeSync);
EXPECT_FALSE(TfLiteAttributeMapIsBufferAttributeMap(attr));
EXPECT_TRUE(TfLiteAttributeMapIsSyncAttributeMap(attr));
TfLiteAttributeMapDelete(attr);
}
}
TEST(AttributeMapTest, AttributeMapAccessor) {
auto* attr = TfLiteAttributeMapCreate(kTfLiteAttrMapTypeBuffer);
{
TfLiteAttributeMapSetSizeTBufferAttr(attr, kTfLiteBufferAttrKeyAlignment,
42);
size_t result = 0;
EXPECT_TRUE(TfLiteAttributeMapGetSizeTBufferAttr(
attr, kTfLiteBufferAttrKeyAlignment, &result));
EXPECT_EQ(42, result);
EXPECT_FALSE(TfLiteAttributeMapGetSizeTBufferAttr(
attr, kTfLiteBufferAttrKeyOffset, &result));
}
{
const char str[] = "some string";
TfLiteAttributeMapSetStringBufferAttr(
attr, kTfLiteBufferAttrKeyResourceTypeName, str);
const char* result = nullptr;
EXPECT_TRUE(TfLiteAttributeMapGetStringBufferAttr(
attr, kTfLiteBufferAttrKeyResourceTypeName, &result));
EXPECT_EQ(str, result);
EXPECT_FALSE(TfLiteAttributeMapGetStringBufferAttr(
attr, kTfLiteBufferAttrKeyAlignment, &result));
EXPECT_FALSE(TfLiteAttributeMapSetStringSyncAttr(
attr, kTfLiteSynchronizationAttrKeyObjectTypeName, str));
EXPECT_FALSE(TfLiteAttributeMapGetStringSyncAttr(
attr, kTfLiteSynchronizationAttrKeyObjectTypeName, &result));
}
TfLiteAttributeMapDelete(attr);
}
TEST(AttributeMapTest, UnCheckedAttributeMapAccessor) {
auto* attr = TfLiteAttributeMapCreate(kTfLiteAttrMapTypeBuffer);
{
TfLiteAttributeMapSetSizeTAttr(attr, 1, 42);
size_t result = 0;
EXPECT_TRUE(TfLiteAttributeMapGetSizeTAttr(attr, 1, &result));
EXPECT_EQ(42, result);
EXPECT_FALSE(TfLiteAttributeMapGetSizeTAttr(attr, 2, &result));
}
{
TfLiteAttributeMapSetIntAttr(attr, 3, 21);
int result = 0;
EXPECT_TRUE(TfLiteAttributeMapGetIntAttr(attr, 3, &result));
EXPECT_EQ(21, result);
EXPECT_FALSE(TfLiteAttributeMapGetIntAttr(attr, 4, &result));
}
{
const char str[] = "some string";
TfLiteAttributeMapSetStringAttr(attr, 1, str);
const char* result = nullptr;
EXPECT_TRUE(TfLiteAttributeMapGetStringAttr(attr, 1, &result));
EXPECT_EQ(str, result);
EXPECT_FALSE(TfLiteAttributeMapGetStringAttr(attr, 2, &result));
}
{
TfLiteAttributeMapSetBoolAttr(
attr, kTfLiteBufferAttrKeyCurrentHostCoherencyState, true);
bool result = false;
EXPECT_TRUE(TfLiteAttributeMapGetBoolAttr(
attr, kTfLiteBufferAttrKeyCurrentHostCoherencyState, &result));
EXPECT_TRUE(result);
EXPECT_FALSE(TfLiteAttributeMapGetBoolAttr(
attr, kTfLiteBufferAttrKeyPreferredHostCoherencyState, &result));
}
TfLiteAttributeMapDelete(attr);
}
TEST(AttributeMapTest, UnCheckedAttributeMapCustomAccessor) {
auto* attr = TfLiteAttributeMapCreate(kTfLiteAttrMapTypeBuffer);
{
TfLiteAttributeMapSetCustomSizeTAttr(attr, "foo", 42);
size_t result = 0;
EXPECT_TRUE(TfLiteAttributeMapGetCustomSizeTAttr(attr, "foo", &result));
EXPECT_EQ(42, result);
EXPECT_FALSE(TfLiteAttributeMapGetCustomSizeTAttr(attr, "bar", &result));
}
{
TfLiteAttributeMapSetCustomIntAttr(attr, "baz", 21);
int result = 0;
EXPECT_TRUE(TfLiteAttributeMapGetCustomIntAttr(attr, "baz", &result));
EXPECT_EQ(21, result);
EXPECT_FALSE(TfLiteAttributeMapGetCustomIntAttr(attr, "quux", &result));
}
{
const char str[] = "some string";
TfLiteAttributeMapSetCustomStringAttr(attr, "foo", str);
const char* result = nullptr;
EXPECT_TRUE(TfLiteAttributeMapGetCustomStringAttr(attr, "foo", &result));
EXPECT_EQ(str, result);
EXPECT_FALSE(TfLiteAttributeMapGetCustomStringAttr(attr, "bar", &result));
}
{
TfLiteAttributeMapSetCustomBoolAttr(attr, "foo", true);
bool result = false;
EXPECT_TRUE(TfLiteAttributeMapGetCustomBoolAttr(attr, "foo", &result));
EXPECT_TRUE(result);
EXPECT_FALSE(TfLiteAttributeMapGetCustomBoolAttr(attr, "bar", &result));
}
TfLiteAttributeMapDelete(attr);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/async/interop/c/attribute_map.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/async/interop/c/attribute_map_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8eece88e-b26c-48bd-9e50-14e9a4efb88c | cpp | google/tensorstore | unique_with_intrusive_allocator | tensorstore/internal/unique_with_intrusive_allocator.h | tensorstore/internal/unique_with_intrusive_allocator_test.cc | #ifndef TENSORSTORE_INTERNAL_UNIQUE_WITH_INTRUSIVE_ALLOCATOR_H_
#define TENSORSTORE_INTERNAL_UNIQUE_WITH_INTRUSIVE_ALLOCATOR_H_
#include <memory>
#include <new>
#include <utility>
namespace tensorstore {
namespace internal {
template <typename T>
struct IntrusiveAllocatorDeleter {
void operator()(T* p) {
auto allocator = p->get_allocator();
typename std::allocator_traits<decltype(
allocator)>::template rebind_alloc<T>
rebound_allocator(std::move(allocator));
std::allocator_traits<decltype(rebound_allocator)>::destroy(
rebound_allocator, p);
std::allocator_traits<decltype(rebound_allocator)>::deallocate(
rebound_allocator, p, 1);
}
};
template <typename T, typename Allocator, typename... Arg>
std::unique_ptr<T, IntrusiveAllocatorDeleter<T>>
MakeUniqueWithIntrusiveAllocator(Allocator allocator, Arg&&... arg) {
using ReboundAllocator =
typename std::allocator_traits<Allocator>::template rebind_alloc<T>;
ReboundAllocator rebound_allocator(std::move(allocator));
auto temp_deleter = [&rebound_allocator](T* p) {
std::allocator_traits<ReboundAllocator>::deallocate(rebound_allocator, p,
1);
};
std::unique_ptr<T, decltype(temp_deleter)> temp_ptr(
std::allocator_traits<ReboundAllocator>::allocate(rebound_allocator, 1),
temp_deleter);
new (temp_ptr.get())
T(std::forward<Arg>(arg)..., std::move(rebound_allocator));
return std::unique_ptr<T, IntrusiveAllocatorDeleter<T>>(temp_ptr.release());
}
struct VirtualDestroyDeleter {
template <typename T>
void operator()(T* p) const {
p->Destroy();
}
};
template <typename Derived, typename IntrusiveBase>
class IntrusiveAllocatorBase : public IntrusiveBase {
public:
using IntrusiveBase::IntrusiveBase;
void Destroy() override {
IntrusiveAllocatorDeleter<Derived>()(static_cast<Derived*>(this));
}
};
template <typename T, typename Allocator, typename... Arg>
std::unique_ptr<T, VirtualDestroyDeleter>
MakeUniqueWithVirtualIntrusiveAllocator(Allocator allocator, Arg&&... arg) {
return std::unique_ptr<T, VirtualDestroyDeleter>(
MakeUniqueWithIntrusiveAllocator<T>(std::move(allocator),
std::forward<Arg>(arg)...)
.release());
}
}
}
#endif | #include "tensorstore/internal/unique_with_intrusive_allocator.h"
#include <cstddef>
#include <memory>
#include <vector>
#include <gtest/gtest.h>
#include "tensorstore/internal/arena.h"
namespace {
using ::tensorstore::internal::Arena;
using ::tensorstore::internal::ArenaAllocator;
using ::tensorstore::internal::IntrusiveAllocatorBase;
using ::tensorstore::internal::MakeUniqueWithIntrusiveAllocator;
using ::tensorstore::internal::MakeUniqueWithVirtualIntrusiveAllocator;
class Base {
public:
virtual void Destroy() = 0;
virtual ~Base() = default;
};
class Derived : public IntrusiveAllocatorBase<Derived, Base> {
public:
Derived(ArenaAllocator<> allocator)
:
vec(100, allocator) {}
ArenaAllocator<> get_allocator() const { return vec.get_allocator(); }
std::vector<double, ArenaAllocator<double>> vec;
};
TEST(UniqueWithVirtualIntrusiveAllocatorTest, Basic) {
Arena arena;
std::unique_ptr<Base, tensorstore::internal::VirtualDestroyDeleter> ptr =
MakeUniqueWithVirtualIntrusiveAllocator<Derived>(
ArenaAllocator<>(&arena));
}
class Foo {
public:
using allocator_type = ArenaAllocator<int>;
Foo(size_t n, ArenaAllocator<int> allocator) : vec_(n, allocator) {}
allocator_type get_allocator() const { return vec_.get_allocator(); }
int operator()(int x) const { return vec_[x]; }
void operator()(int x, int y) { vec_[x] = y; }
private:
std::vector<int, allocator_type> vec_;
};
TEST(UniqueWithIntrusiveAllocatorTest, Basic) {
unsigned char buffer[200];
Arena arena(buffer);
auto ptr =
MakeUniqueWithIntrusiveAllocator<Foo>(ArenaAllocator<>(&arena), 10);
(*ptr)(2, 3);
EXPECT_EQ(3, (*ptr)(2));
EXPECT_EQ(3, (static_cast<const Foo&>(*ptr)(2)));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/unique_with_intrusive_allocator.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/unique_with_intrusive_allocator_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
4498a8c2-e52a-4d92-ab97-db20211d4a22 | cpp | tensorflow/tensorflow | custom_call_status | third_party/xla/xla/service/custom_call_status.cc | third_party/xla/xla/service/custom_call_status_test.cc | #include "xla/service/custom_call_status_internal.h"
namespace xla {
std::optional<absl::string_view> CustomCallStatusGetMessage(
const XlaCustomCallStatus* status) {
return status->message;
}
}
void XlaCustomCallStatusSetSuccess(XlaCustomCallStatus* status) {
status->message = std::nullopt;
}
void XlaCustomCallStatusSetFailure(XlaCustomCallStatus* status,
const char* message, size_t message_len) {
status->message = std::string(message, 0, message_len);
} | #include "xla/service/custom_call_status_internal.h"
#include "xla/service/custom_call_status_test_c_caller.h"
#include "tsl/platform/test.h"
TEST(XlaCustomCallStatusTest, DefaultIsSuccess) {
XlaCustomCallStatus status;
ASSERT_EQ(xla::CustomCallStatusGetMessage(&status), std::nullopt);
}
TEST(XlaCustomCallStatusTest, SetSuccess) {
XlaCustomCallStatus status;
XlaCustomCallStatusSetSuccess(&status);
ASSERT_EQ(xla::CustomCallStatusGetMessage(&status), std::nullopt);
}
TEST(XlaCustomCallStatusTest, SetSuccessAfterFailure) {
XlaCustomCallStatus status;
XlaCustomCallStatusSetFailure(&status, "error", 5);
XlaCustomCallStatusSetSuccess(&status);
ASSERT_EQ(xla::CustomCallStatusGetMessage(&status), std::nullopt);
}
TEST(XlaCustomCallStatusTest, SetFailure) {
XlaCustomCallStatus status;
XlaCustomCallStatusSetFailure(&status, "error", 5);
ASSERT_EQ(xla::CustomCallStatusGetMessage(&status), "error");
}
TEST(XlaCustomCallStatusTest, SetFailureAfterSuccess) {
XlaCustomCallStatus status;
XlaCustomCallStatusSetSuccess(&status);
XlaCustomCallStatusSetFailure(&status, "error", 5);
ASSERT_EQ(xla::CustomCallStatusGetMessage(&status), "error");
}
TEST(XlaCustomCallStatusTest, SetFailureTruncatesErrorAtGivenLength) {
XlaCustomCallStatus status;
XlaCustomCallStatusSetFailure(&status, "error", 4);
ASSERT_EQ(xla::CustomCallStatusGetMessage(&status), "erro");
}
TEST(XlaCustomCallStatusTest, SetFailureTruncatesErrorAtNullTerminator) {
XlaCustomCallStatus status;
XlaCustomCallStatusSetFailure(&status, "error", 100);
ASSERT_EQ(xla::CustomCallStatusGetMessage(&status), "error");
}
TEST(XlaCustomCallStatusTest, CSetSuccess) {
XlaCustomCallStatus status;
CSetSuccess(&status);
ASSERT_EQ(xla::CustomCallStatusGetMessage(&status), std::nullopt);
}
TEST(XlaCustomCallStatusTest, CSetFailure) {
XlaCustomCallStatus status;
CSetFailure(&status, "error", 5);
ASSERT_EQ(xla::CustomCallStatusGetMessage(&status), "error");
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/custom_call_status.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/custom_call_status_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b429e49a-77aa-4098-91af-c00083cc61fb | cpp | tensorflow/tensorflow | debug_stripper | tensorflow/core/grappler/optimizers/debug_stripper.cc | tensorflow/core/grappler/optimizers/debug_stripper_test.cc | #include "tensorflow/core/grappler/optimizers/debug_stripper.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/platform/protobuf.h"
namespace tensorflow {
namespace grappler {
Status DebugStripper::Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* output) {
bool can_optimize = false;
for (const NodeDef& node : item.graph.node()) {
if (IsAssert(node) || IsCheckNumerics(node) || IsPrint(node)) {
can_optimize = true;
break;
}
}
if (!can_optimize) {
return errors::Aborted("Nothing to do.");
}
*output = item.graph;
for (NodeDef& node : *output->mutable_node()) {
if (IsAssert(node) || node.op() == "PrintV2") {
node.set_op("NoOp");
EraseRegularNodeAttributes(&node);
for (string& inp : *node.mutable_input()) {
if (!IsControlInput(inp)) {
inp = AsControlDependency(NodeName(inp));
}
}
} else if (IsCheckNumerics(node) || node.op() == "Print") {
node.set_op("Identity");
protobuf::Map<string, AttrValue> new_attr;
if (node.attr().find("T") != node.attr().end()) {
new_attr.insert({"T", node.attr().at("T")});
}
node.mutable_attr()->swap(new_attr);
for (int i = 1, end = node.input_size(); i < end; ++i) {
if (!IsControlInput(node.input(i))) {
*node.mutable_input(i) = AsControlDependency(NodeName(node.input(i)));
}
}
}
}
return absl::OkStatus();
}
}
} | #include "tensorflow/core/grappler/optimizers/debug_stripper.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/utils/grappler_test.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
class DebugStripperTest : public GrapplerTest {};
TEST_F(DebugStripperTest, OutputEqualToInput) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = ops::Placeholder(s, DT_FLOAT, ops::Placeholder::Shape({}));
Output y = ops::Placeholder(s, DT_FLOAT, ops::Placeholder::Shape({}));
Output add = ops::Add(s, x, y);
Output result = ops::Identity(s, add);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
DebugStripper optimizer;
GraphDef output;
EXPECT_EQ(optimizer.Optimize(nullptr, item, &output),
errors::Aborted("Nothing to do."));
}
TEST_F(DebugStripperTest, StripAssertOnTwoOutputs) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input = ops::Placeholder(s.WithOpName("input"), DT_FLOAT,
ops::Placeholder::Shape({6}));
auto split =
ops::Split(s.WithOpName("split"), 0, input, 2);
Output x = split[0];
Output y = split[1];
Output ge = ops::GreaterEqual(s.WithOpName("GreaterEqual"), x, y);
auto assert = ops::Assert(s.WithOpName("Assert"), ge, {x, y});
Output add = ops::Add(
s.WithOpName("add").WithControlDependencies({assert.operation}), x, y);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
DebugStripper optimizer;
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
for (const NodeDef& node : output.node()) {
for (const string& input : node.input()) {
if (IsControlInput(input)) {
EXPECT_EQ(input.find(':'), -1);
}
}
}
}
TEST_F(DebugStripperTest, StripAssertFromGraph) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = ops::Placeholder(s.WithOpName("x"), DT_FLOAT,
ops::Placeholder::Shape({}));
Output y = ops::Placeholder(s.WithOpName("y"), DT_FLOAT,
ops::Placeholder::Shape({}));
auto greaterequal = ops::GreaterEqual(s.WithOpName("GreaterEqual"), x, y);
auto assert = ops::Assert(s.WithOpName("Assert"), greaterequal, {x, y});
Output add = ops::Add(
s.WithOpName("z").WithControlDependencies({assert.operation}), x, y);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
DebugStripper optimizer;
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
int count = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "x") {
count++;
EXPECT_EQ("Placeholder", node.op());
EXPECT_EQ(0, node.input_size());
} else if (node.name() == "y") {
count++;
EXPECT_EQ("Placeholder", node.op());
EXPECT_EQ(0, node.input_size());
} else if (node.name() == "GreaterEqual") {
count++;
EXPECT_EQ("GreaterEqual", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("x", node.input(0));
EXPECT_EQ("y", node.input(1));
} else if (node.name() == "Assert") {
count++;
EXPECT_EQ("NoOp", node.op());
EXPECT_EQ(3, node.input_size());
EXPECT_EQ("^GreaterEqual", node.input(0));
EXPECT_EQ("^x", node.input(1));
EXPECT_EQ("^y", node.input(2));
} else if (node.name() == "z") {
count++;
EXPECT_EQ("Add", node.op());
EXPECT_EQ(3, node.input_size());
EXPECT_EQ("x", node.input(0));
EXPECT_EQ("y", node.input(1));
EXPECT_EQ("^Assert", node.input(2));
}
}
EXPECT_EQ(5, count);
Tensor x_t(DT_FLOAT, TensorShape({}));
Tensor y_t(DT_FLOAT, TensorShape({}));
x_t.flat<float>()(0) = 1.0f;
y_t.flat<float>()(0) = 0.5f;
std::vector<Tensor> expected =
EvaluateNodes(item.graph, {"z"}, {{"x", x_t}, {"y", y_t}});
std::vector<Tensor> optimized =
EvaluateNodes(output, {"z"}, {{"x", x_t}, {"y", y_t}});
test::ExpectTensorEqual<float>(expected[0], optimized[0]);
}
TEST_F(DebugStripperTest, StripCheckNumericsFromGraph) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = ops::Placeholder(s.WithOpName("x"), DT_FLOAT,
ops::Placeholder::Shape({}));
Output y = ops::Placeholder(s.WithOpName("y"), DT_FLOAT,
ops::Placeholder::Shape({}));
auto check1 = ops::CheckNumerics(s.WithOpName("CheckNumerics1"), x, "foo");
auto check2 = ops::CheckNumerics(s.WithOpName("CheckNumerics2"), y, "foo");
Output add = ops::Add(s.WithOpName("z"), check1, check2);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
DebugStripper optimizer;
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
int count = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "x") {
count++;
EXPECT_EQ("Placeholder", node.op());
EXPECT_EQ(0, node.input_size());
} else if (node.name() == "y") {
count++;
EXPECT_EQ("Placeholder", node.op());
EXPECT_EQ(0, node.input_size());
} else if (node.name() == "CheckNumerics1") {
count++;
EXPECT_EQ("Identity", node.op());
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("x", node.input(0));
EXPECT_EQ(1, node.attr_size());
} else if (node.name() == "CheckNumerics2") {
count++;
EXPECT_EQ("Identity", node.op());
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("y", node.input(0));
EXPECT_EQ(1, node.attr_size());
} else if (node.name() == "z") {
count++;
EXPECT_EQ("Add", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("CheckNumerics1", node.input(0));
EXPECT_EQ("CheckNumerics2", node.input(1));
}
}
EXPECT_EQ(5, count);
Tensor x_t(DT_FLOAT, TensorShape({}));
Tensor y_t(DT_FLOAT, TensorShape({}));
x_t.flat<float>()(0) = 1.0f;
y_t.flat<float>()(0) = 0.5f;
std::vector<Tensor> expected =
EvaluateNodes(item.graph, {"z"}, {{"x", x_t}, {"y", y_t}});
std::vector<Tensor> optimized =
EvaluateNodes(output, {"z"}, {{"x", x_t}, {"y", y_t}});
test::ExpectTensorEqual<float>(expected[0], optimized[0]);
}
TEST_F(DebugStripperTest, StripPrintFromGraph) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = ops::Placeholder(s.WithOpName("x"), DT_FLOAT,
ops::Placeholder::Shape({}));
Output print = ops::Print(s.WithOpName("Print"), x, {x});
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
DebugStripper optimizer;
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
for (const NodeDef& node : output.node()) {
if (node.name() == "x") {
EXPECT_EQ("Placeholder", node.op());
EXPECT_EQ(0, node.input_size());
} else if (node.name() == "Print") {
EXPECT_EQ("Identity", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("x", node.input(0));
EXPECT_EQ("^x", node.input(1));
EXPECT_EQ(1, node.attr_size());
}
}
EXPECT_EQ(2, output.node_size());
Tensor x_t(DT_FLOAT, TensorShape({}));
x_t.flat<float>()(0) = 1.0f;
std::vector<Tensor> expected =
EvaluateNodes(item.graph, {"Print"}, {{"x", x_t}});
std::vector<Tensor> optimized =
EvaluateNodes(output, {"Print"}, {{"x", x_t}});
test::ExpectTensorEqual<float>(expected[0], optimized[0]);
}
TEST_F(DebugStripperTest, StripPrintV2FromGraph) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output x = ops::Const(s.WithOpName("x"), string("Hello"), {});
Operation print = ops::PrintV2(s.WithOpName("PrintV2"), x);
Output y =
ops::Identity(s.WithOpName("y").WithControlDependencies({print}), x);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
DebugStripper optimizer;
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
for (const NodeDef& node : output.node()) {
if (node.name() == "x") {
EXPECT_EQ("Const", node.op());
EXPECT_EQ(0, node.input_size());
} else if (node.name() == "PrintV2") {
EXPECT_EQ("NoOp", node.op());
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("^x", node.input(0));
EXPECT_EQ(0, node.attr_size());
} else if (node.name() == "y") {
EXPECT_EQ("Identity", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("x", node.input(0));
EXPECT_EQ("^PrintV2", node.input(1));
}
}
EXPECT_EQ(3, output.node_size());
Tensor expected = EvaluateNodes(item.graph, {"y"}, {})[0];
Tensor optimized = EvaluateNodes(output, {"y"}, {})[0];
EXPECT_EQ(expected.scalar<tstring>()(), optimized.scalar<tstring>()());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/debug_stripper.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/debug_stripper_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d89b3608-8520-4260-8f56-7949b38f8dce | cpp | tensorflow/tensorflow | call | tensorflow/lite/experimental/acceleration/mini_benchmark/call.cc | tensorflow/lite/experimental/acceleration/mini_benchmark/call_test.cc | #include <stddef.h>
#include <cstring>
#include <sstream>
#include <string>
#include <vector>
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/lite/context_util.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/call_register.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/op_macros.h"
namespace tflite {
namespace acceleration {
namespace ops {
namespace call_kernel {
namespace {
bool MatchDimensionsExceptBatchSize(TfLiteTensor* a, TfLiteTensor* b) {
if (a->dims->size != b->dims->size) {
return false;
}
for (int i = 1; i < a->dims->size; ++i) {
if (a->dims->data[i] != b->dims->data[i]) {
return false;
}
}
return true;
}
TfLiteStatus ValidateAndResizeInputsIfNeeded(TfLiteContext* context,
TfLiteNode* node,
Subgraph* subgraph,
int loop_count) {
TF_LITE_ENSURE_EQ(context, subgraph->inputs().size(), node->inputs->size);
for (int i = 0; i < node->inputs->size; ++i) {
TfLiteTensor* node_input = context->tensors + node->inputs->data[i];
TfLiteTensor* subgraph_input = subgraph->tensor(subgraph->inputs()[i]);
TF_LITE_ENSURE_TYPES_EQ(context, node_input->type, subgraph_input->type);
TF_LITE_ENSURE_MSG(
context, node_input->dims->size > 0,
"Dimensions of all of call node's inputs should be non-zero.");
TF_LITE_ENSURE_EQ(context, node_input->dims->data[0], loop_count);
if (!subgraph_input->dims->size) {
std::vector<int> new_dims;
new_dims.reserve(node_input->dims->size);
new_dims.push_back(1);
new_dims.insert(new_dims.end(), node_input->dims->data + 1,
node_input->dims->data + node_input->dims->size);
subgraph->ResizeInputTensor(subgraph->inputs()[i], new_dims);
} else {
if (!MatchDimensionsExceptBatchSize(node_input, subgraph_input)) {
std::stringstream node_input_dims, subgraph_input_dims;
for (int i = 0; i < node_input->dims->size; i++) {
node_input_dims << node_input->dims->data[i] << " ";
subgraph_input_dims << subgraph_input->dims->data[i] << " ";
}
TF_LITE_KERNEL_LOG(
context,
"%s:%d: All dimensions except the batch size should match for call "
"node and the subgraph to invoke (input tensor %s[ %s], subgraph "
"tensor %s[ %s])",
__FILE__, __LINE__, node_input->name, node_input_dims.str().c_str(),
subgraph_input->name, subgraph_input_dims.str().c_str());
return kTfLiteError;
}
TF_LITE_ENSURE_EQ(context, subgraph_input->dims->data[0], 1);
}
}
return kTfLiteOk;
}
TfLiteStatus ValidateAndResizeOutputs(TfLiteContext* context, TfLiteNode* node,
Subgraph* subgraph, int loop_count) {
TF_LITE_ENSURE_EQ(context, subgraph->outputs().size(), node->outputs->size);
for (int i = 0; i < node->outputs->size; ++i) {
const TfLiteTensor* subgraph_output =
subgraph->tensor(subgraph->outputs()[i]);
TfLiteTensor* node_output = context->tensors + node->outputs->data[i];
TF_LITE_ASSERT(subgraph_output->dims->size > 0);
TfLiteIntArray* new_dims_array = TfLiteIntArrayCopy(subgraph_output->dims);
new_dims_array->data[0] = loop_count;
TF_LITE_ENSURE_OK(
context, context->ResizeTensor(context, node_output, new_dims_array));
node_output->type = subgraph_output->type;
}
return kTfLiteOk;
}
TfLiteStatus CopyInputTensorsData(TfLiteContext* context, TfLiteNode* node,
Subgraph* dst_subgraph, int loop_index,
int loop_count) {
const std::vector<int>& dst_tensor_indices = dst_subgraph->inputs();
TF_LITE_ENSURE_EQ(context, node->inputs->size, dst_tensor_indices.size());
for (int i = 0; i < dst_tensor_indices.size(); ++i) {
TfLiteTensor* src_tensor = context->tensors + node->inputs->data[i];
TfLiteTensor* dst_tensor = dst_subgraph->tensor(dst_tensor_indices[i]);
size_t offset = src_tensor->bytes / loop_count * loop_index;
TF_LITE_ENSURE_EQ(context, src_tensor->bytes / loop_count,
dst_tensor->bytes);
memcpy(dst_tensor->data.raw, src_tensor->data.raw + offset,
src_tensor->bytes / loop_count);
}
return kTfLiteOk;
}
TfLiteStatus CopyOutputTensorsData(TfLiteContext* context,
Subgraph* src_subgraph, TfLiteNode* node,
int loop_index, int loop_count) {
const std::vector<int>& src_tensor_indices = src_subgraph->outputs();
TF_LITE_ENSURE_EQ(context, src_tensor_indices.size(), node->outputs->size);
for (int i = 0; i < src_tensor_indices.size(); ++i) {
const TfLiteTensor* src_tensor =
src_subgraph->tensor(src_tensor_indices[i]);
TfLiteTensor* dst_tensor = context->tensors + node->outputs->data[i];
size_t offset = dst_tensor->bytes / loop_count * loop_index;
TF_LITE_ENSURE_EQ(context, src_tensor->bytes,
dst_tensor->bytes / loop_count);
memcpy(dst_tensor->data.raw + offset, src_tensor->data.raw,
src_tensor->bytes);
}
return kTfLiteOk;
}
}
struct OpData {
int subgraph_index;
int loop_count;
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
if (!buffer) {
return nullptr;
}
auto* op_data = new OpData;
const uint8_t* buffer_fixed_width = reinterpret_cast<const uint8_t*>(buffer);
const flexbuffers::Map& map =
flexbuffers::GetRoot(buffer_fixed_width, length).AsMap();
op_data->subgraph_index = map["subgraph_index"].AsInt32();
op_data->loop_count = map["loop_count"].AsInt32();
return op_data;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
TF_LITE_ENSURE(context, op_data);
Subgraph* this_subgraph = reinterpret_cast<Subgraph*>(context->impl_);
auto* subgraphs = this_subgraph->GetSubgraphs();
TF_LITE_ENSURE_MSG(context,
(op_data->subgraph_index < subgraphs->size()) &&
(op_data->subgraph_index >= 0),
"Index of subgraph to be invoked is invalid.");
Subgraph* subgraph = (*subgraphs)[op_data->subgraph_index].get();
TF_LITE_ENSURE_MSG(
context, subgraph != this_subgraph,
"Subgraph to invoke must be different from the invoking graph.");
int loop_count = op_data->loop_count;
TF_LITE_ENSURE_MSG(context, loop_count >= 0, "Loop count must be positive. ");
TF_LITE_ENSURE_OK(context, ValidateAndResizeInputsIfNeeded(
context, node, subgraph, loop_count));
TF_LITE_ENSURE_OK(context, subgraph->AllocateTensors());
TF_LITE_ENSURE_OK(
context, ValidateAndResizeOutputs(context, node, subgraph, loop_count));
TF_LITE_ENSURE(context, !subgraph->HasDynamicTensors());
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
Subgraph* this_subgraph = reinterpret_cast<Subgraph*>(context->impl_);
auto* subgraphs = this_subgraph->GetSubgraphs();
Subgraph* subgraph = (*subgraphs)[op_data->subgraph_index].get();
for (int loop_index = 0; loop_index < op_data->loop_count; loop_index++) {
TF_LITE_ENSURE_OK(context,
CopyInputTensorsData(context, node, subgraph, loop_index,
op_data->loop_count));
TF_LITE_ENSURE_OK(context, subgraph->Invoke());
for (int tensor_index : subgraph->outputs()) {
subgraph->EnsureTensorDataIsReadable(tensor_index);
}
TF_LITE_ENSURE_OK(context,
CopyOutputTensorsData(context, subgraph, node, loop_index,
op_data->loop_count));
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_CALL() {
static TfLiteRegistration r = {call_kernel::Init, call_kernel::Free,
call_kernel::Prepare, call_kernel::Eval};
return &r;
}
}
}
} | #include <cstddef>
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/core/kernels/builtin_op_kernels.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/call_register.h"
#include "tensorflow/lite/interpreter_test_util.h"
#include "tensorflow/lite/kernels/subgraph_test_util.h"
#include "tensorflow/lite/testing/util.h"
namespace tflite {
namespace {
class CallTest : public subgraph_test_util::ControlFlowOpTest {
public:
CallTest() { interpreter_ = std::make_unique<Interpreter>(&error_reporter_); }
~CallTest() override = default;
void SetupTensor(Subgraph* subgraph, int tensor_index, TfLiteType type) {
ASSERT_EQ(subgraph->SetTensorParametersReadWrite(tensor_index, type, "", 0,
nullptr, {}, false),
kTfLiteOk);
}
void BuildCallSubgraph(Subgraph* subgraph, std::vector<uint8_t> params_buffer,
std::vector<int> inputs, std::vector<int> outputs,
int expected_node_index, bool single_node_subgraph) {
if (single_node_subgraph) {
int first_new_tensor_index;
ASSERT_EQ(subgraph->AddTensors(inputs.size() + outputs.size(),
&first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(first_new_tensor_index, 0);
ASSERT_EQ(subgraph->SetInputs(inputs), kTfLiteOk);
ASSERT_EQ(subgraph->SetOutputs(outputs), kTfLiteOk);
}
for (const int& idx : inputs) {
SetupTensor(subgraph, idx, kTfLiteInt32);
}
for (const int& idx : outputs) {
SetupTensor(subgraph, idx, kTfLiteInt32);
}
int node_index;
subgraph->AddNodeWithParameters(
inputs, outputs, {},
reinterpret_cast<const char*>(params_buffer.data()),
params_buffer.size(), nullptr, acceleration::ops::Register_CALL(),
&node_index);
ASSERT_EQ(node_index, expected_node_index);
}
void BuildCallSubgraph(Subgraph* subgraph, int index, int loop_count,
std::vector<int> inputs, std::vector<int> outputs,
int expected_node_index = 0,
bool single_node_subgraph = true) {
flexbuffers::Builder fbb;
fbb.Map([&] {
fbb.Int("subgraph_index", index);
fbb.Int("loop_count", loop_count);
});
fbb.Finish();
BuildCallSubgraph(subgraph, fbb.GetBuffer(), inputs, outputs,
expected_node_index, single_node_subgraph);
}
void BuildGraphWithMultipleOutputs(Subgraph* subgraph) {
const int kInput1 = 0;
const int kInput2 = 1;
const int kMulOutput = 2;
const int kAddOutput = 3;
const int kTensorCount = 4;
int first_new_tensor_index;
ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(first_new_tensor_index, 0);
ASSERT_EQ(subgraph->SetInputs({kInput1, kInput2}), kTfLiteOk);
ASSERT_EQ(subgraph->SetOutputs({kMulOutput, kAddOutput}), kTfLiteOk);
SetupTensor(subgraph, kInput1, kTfLiteInt32);
SetupTensor(subgraph, kInput2, kTfLiteInt32);
SetupTensor(subgraph, kMulOutput, kTfLiteInt32);
SetupTensor(subgraph, kAddOutput, kTfLiteInt32);
TfLiteMulParams* params_mul =
reinterpret_cast<TfLiteMulParams*>(malloc(sizeof(TfLiteMulParams)));
params_mul->activation = kTfLiteActNone;
int node_index;
subgraph->AddNodeWithParameters(
{kInput1, kInput2}, {kMulOutput}, {}, nullptr, 0, params_mul,
::tflite::ops::builtin::Register_MUL(), &node_index);
TfLiteAddParams* params_add =
reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
params_add->activation = kTfLiteActNone;
subgraph->AddNodeWithParameters(
{kInput1, kInput2}, {kAddOutput}, {}, nullptr, 0, params_add,
::tflite::ops::builtin::Register_ADD(), &node_index);
}
void BuildMultiNodeGraph(Subgraph* this_subgraph) {
const int kInput1 = 0, kInput2 = 1, kInput3 = 2, kInput4 = 3;
const int kOutput1 = 4, kOutput2 = 5, kOutput3 = 6;
const int kTensorCount = 7;
int first_new_tensor_index;
ASSERT_EQ(this_subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(first_new_tensor_index, 0);
std::vector<int> inputs = {kInput1, kInput2, kInput3, kInput4};
std::vector<int> outputs = {kOutput3};
ASSERT_EQ(this_subgraph->SetInputs(inputs), kTfLiteOk);
ASSERT_EQ(this_subgraph->SetOutputs({kOutput3}), kTfLiteOk);
for (int idx = 0; idx < kTensorCount; ++idx) {
SetupTensor(this_subgraph, idx, kTfLiteInt32);
}
int expected_node_index = 0, node_index;
auto* pad_reg = ops::builtin::Register_PAD();
pad_reg->builtin_code = kTfLiteBuiltinPad;
this_subgraph->AddNodeWithParameters(
{kInput2, kInput3}, {kOutput1}, {}, nullptr, 0,
reinterpret_cast<TfLitePadParams*>(malloc(sizeof(TfLitePadParams))),
pad_reg, &node_index);
ASSERT_EQ(node_index, expected_node_index++);
AddSubgraphs(1);
const int kLoopCount = 1;
const int kSubgraphIndex = 1;
builder_->BuildAddSubgraph(interpreter_->subgraph(1));
CallTest::BuildCallSubgraph(this_subgraph, kSubgraphIndex, kLoopCount,
{kInput1, kOutput1}, {kOutput2},
expected_node_index++, false);
TfLiteMulParams* mul_params =
reinterpret_cast<TfLiteMulParams*>(malloc(sizeof(TfLiteMulParams)));
mul_params->activation = kTfLiteActNone;
auto* mul_reg = ops::builtin::Register_MUL();
mul_reg->builtin_code = kTfLiteBuiltinMul;
this_subgraph->AddNodeWithParameters({kInput4, kOutput2}, {kOutput3}, {},
nullptr, 0, mul_params, mul_reg,
&node_index);
ASSERT_EQ(node_index, expected_node_index++);
}
TestErrorReporter error_reporter_;
};
TEST_F(CallTest, SubgraphMultipleInputsSingleOutput) {
std::vector<std::vector<int>> test_shapes = {
{3, 2}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
for (size_t i = 0; i < test_shapes.size(); ++i) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(1);
int loop_count = test_shapes[i][0];
builder_->BuildMulSubgraph(interpreter_->subgraph(1));
CallTest::BuildCallSubgraph(&interpreter_->primary_subgraph(), 1,
loop_count, {0, 1}, {2});
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], test_shapes[i]);
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], test_shapes[i]);
ASSERT_EQ(interpreter_->subgraph(1)->AllocateTensors(), kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
subgraph_test_util::FillIntTensor(
interpreter_->tensor(interpreter_->inputs()[0]), {-1, 2, -3, 4, -5, 6});
subgraph_test_util::FillIntTensor(
interpreter_->tensor(interpreter_->inputs()[1]), {-1, 2, -3, 4, -5, 6});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output = interpreter_->tensor(interpreter_->outputs()[0]);
subgraph_test_util::CheckIntTensor(output, test_shapes[i],
{1, 4, 9, 16, 25, 36});
}
}
TEST_F(CallTest, ShouldBeANoOpWhenLoopCountIsZero) {
AddSubgraphs(1);
int loop_count = 0;
builder_->BuildMulSubgraph(interpreter_->subgraph(1));
CallTest::BuildCallSubgraph(&interpreter_->primary_subgraph(), 1, loop_count,
{0, 1}, {2});
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {0, 3});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {0, 3});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output = interpreter_->tensor(interpreter_->outputs()[0]);
subgraph_test_util::CheckIntTensor(output, {0, 3}, {});
}
TEST_F(CallTest, SubgraphWithFixedInputShapes) {
AddSubgraphs(1);
const int kLoopCount = 2;
const int kBatchSizeSubgraph = 1;
const int kFixedInputLen = 3;
const std::vector<int> kCallOpInputShape = {kLoopCount, kFixedInputLen};
const std::vector<int> kSubgraphInputShape = {kBatchSizeSubgraph,
kFixedInputLen};
Subgraph* subgraph = interpreter_->subgraph(1);
builder_->BuildMulSubgraph(subgraph);
CallTest::BuildCallSubgraph(&interpreter_->primary_subgraph(), 1, kLoopCount,
{0, 1}, {2});
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], kCallOpInputShape);
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], kCallOpInputShape);
subgraph->ResizeInputTensor(subgraph->inputs()[0], kSubgraphInputShape);
subgraph->ResizeInputTensor(subgraph->inputs()[1], kSubgraphInputShape);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
subgraph_test_util::FillIntTensor(
interpreter_->tensor(interpreter_->inputs()[0]), {-1, 2, -3, 4, -5, 6});
subgraph_test_util::FillIntTensor(
interpreter_->tensor(interpreter_->inputs()[1]), {-1, 2, -3, 4, -5, 6});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output = interpreter_->tensor(interpreter_->outputs()[0]);
subgraph_test_util::CheckIntTensor(output, kCallOpInputShape,
{1, 4, 9, 16, 25, 36});
}
TEST_F(CallTest, SubgraphWithMultipleInputsAndOutputs) {
std::vector<std::vector<int>> test_shapes = {
{3, 2, 1}, {1, 2, 3}, {2, 1, 3}, {2, 3, 1, 1}, {2, 3}};
for (size_t i = 0; i < test_shapes.size(); ++i) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(1);
int loop_count = test_shapes[i][0];
CallTest::BuildGraphWithMultipleOutputs(interpreter_->subgraph(1));
CallTest::BuildCallSubgraph(&interpreter_->primary_subgraph(), 1,
loop_count, {0, 1}, {2, 3});
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], test_shapes[i]);
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], test_shapes[i]);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
subgraph_test_util::FillIntTensor(
interpreter_->tensor(interpreter_->inputs()[0]), {-1, 2, -3, 4, -5, 6});
subgraph_test_util::FillIntTensor(
interpreter_->tensor(interpreter_->inputs()[1]), {-1, 2, -3, 4, -5, 6});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output_mul = interpreter_->tensor(interpreter_->outputs()[0]);
subgraph_test_util::CheckIntTensor(output_mul, test_shapes[i],
{1, 4, 9, 16, 25, 36});
TfLiteTensor* output_add = interpreter_->tensor(interpreter_->outputs()[1]);
subgraph_test_util::CheckIntTensor(output_add, test_shapes[i],
{-2, 4, -6, 8, -10, 12});
}
}
TEST_F(CallTest, ShouldHandleInvalidParamsAndSetToDefault) {
flexbuffers::Builder fbb;
fbb.Vector([&]() {
fbb.String("hi");
fbb.String("hello");
});
fbb.Finish();
AddSubgraphs(1);
CallTest::BuildCallSubgraph(&interpreter_->primary_subgraph(),
fbb.GetBuffer(), {0}, {1}, 0, true);
const int kNodeIndex = 0;
const TfLiteNode* call_node = &interpreter_->primary_subgraph()
.nodes_and_registration()[kNodeIndex]
.first;
tflite::acceleration::ops::TfLiteCallParams* op_data =
reinterpret_cast<tflite::acceleration::ops::TfLiteCallParams*>(
call_node->user_data);
EXPECT_EQ(op_data->subgraph_index, 0);
EXPECT_EQ(op_data->loop_count, 0);
}
TEST_F(CallTest, MultiNodeGraph) {
CallTest::BuildMultiNodeGraph(&interpreter_->primary_subgraph());
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1, 4, 4, 1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1, 2, 2, 1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {4, 2});
interpreter_->ResizeInputTensor(interpreter_->inputs()[3], {1, 4, 4, 1});
ASSERT_EQ(interpreter_->subgraph(1)->AllocateTensors(), kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
subgraph_test_util::FillIntTensor(
interpreter_->tensor(interpreter_->inputs()[0]), std::vector<int>(16, 1));
subgraph_test_util::FillIntTensor(
interpreter_->tensor(interpreter_->inputs()[1]), {1, 2, 3, 4});
subgraph_test_util::FillIntTensor(
interpreter_->tensor(interpreter_->inputs()[2]),
{0, 0, 1, 1, 1, 1, 0, 0});
subgraph_test_util::FillIntTensor(
interpreter_->tensor(interpreter_->inputs()[3]), std::vector<int>(16, 2));
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output = interpreter_->tensor(interpreter_->outputs()[0]);
subgraph_test_util::CheckIntTensor(
output, {1, 4, 4, 1}, {2, 2, 2, 2, 2, 4, 6, 2, 2, 8, 10, 2, 2, 2, 2, 2});
}
TEST_F(CallTest, ShouldFailWith0DInputs) {
AddSubgraphs(1);
int loop_count = 5;
builder_->BuildMulSubgraph(interpreter_->subgraph(1));
interpreter_->subgraph(1)->ResizeInputTensor(0, {});
interpreter_->subgraph(1)->ResizeInputTensor(1, {});
CallTest::BuildCallSubgraph(&interpreter_->primary_subgraph(), 1, loop_count,
{0, 1}, {2});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteError);
EXPECT_THAT(
error_reporter_.error_messages(),
testing::HasSubstr(
"Dimensions of all of call node's inputs should be non-zero."));
}
TEST_F(CallTest, ShouldFailWhenLoopCountDoesNotMatchBatchSize) {
AddSubgraphs(1);
int loop_count = 7;
builder_->BuildMulSubgraph(interpreter_->subgraph(1));
CallTest::BuildCallSubgraph(&interpreter_->primary_subgraph(), 1, loop_count,
{0, 1}, {2});
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {5, 3});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {5, 3});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteError);
EXPECT_THAT(
error_reporter_.error_messages(),
testing::HasSubstr("node_input->dims->data[0] != loop_count (5 != 7)"));
}
TEST_F(CallTest, ShouldFailForSubgraphWithIncompatibleInputShapes) {
AddSubgraphs(1);
const int kLoopCount = 5;
const int kBatchSizeSubgraph = 1;
std::vector<int> call_op_input = {kLoopCount, 3};
std::vector<int> subgraph_input = {kBatchSizeSubgraph, 7};
Subgraph* subgraph = interpreter_->subgraph(1);
builder_->BuildMulSubgraph(subgraph);
CallTest::BuildCallSubgraph(&interpreter_->primary_subgraph(), 1, kLoopCount,
{0, 1}, {2});
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], call_op_input);
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], call_op_input);
subgraph->ResizeInputTensor(subgraph->inputs()[0], subgraph_input);
subgraph->ResizeInputTensor(subgraph->inputs()[1], subgraph_input);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteError);
EXPECT_THAT(
error_reporter_.error_messages(),
testing::HasSubstr("All dimensions except the batch size should match "
"for call node and the subgraph to invoke"));
}
TEST_F(CallTest, ShouldFailWhenSubgraphIndexMatchesInvokedSubgraph) {
const int kPrimarySubgraphIndex = 0;
CallTest::BuildCallSubgraph(&interpreter_->primary_subgraph(),
kPrimarySubgraphIndex, 1, {0}, {1});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteError);
EXPECT_THAT(
error_reporter_.error_messages(),
testing::HasSubstr(
"Subgraph to invoke must be different from the invoking graph."));
}
TEST_F(CallTest, ShouldFailWithNegativeLoopCount) {
AddSubgraphs(1);
CallTest::BuildCallSubgraph(&interpreter_->primary_subgraph(), 1, -1, {0},
{1});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteError);
EXPECT_THAT(error_reporter_.error_messages(),
testing::HasSubstr("Loop count must be positive."));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/call.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/call_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
28040aa5-8181-4e52-8669-91ca1a3372ac | cpp | tensorflow/tensorflow | skip_gram | tensorflow/lite/kernels/skip_gram.cc | tensorflow/lite/kernels/skip_gram_test.cc | #include <ctype.h>
#include <vector>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/string_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace {
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input_tensor;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input_tensor));
TF_LITE_ENSURE_TYPES_EQ(context, input_tensor->type, kTfLiteString);
TfLiteTensor* output_tensor;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output_tensor));
TF_LITE_ENSURE_TYPES_EQ(context, output_tensor->type, kTfLiteString);
return kTfLiteOk;
}
bool ShouldIncludeCurrentNgram(const TfLiteSkipGramParams* params, int size) {
if (size <= 0) {
return false;
}
if (params->include_all_ngrams) {
return size <= params->ngram_size;
} else {
return size == params->ngram_size;
}
}
bool ShouldStepInRecursion(const TfLiteSkipGramParams* params,
const std::vector<int>& stack, int stack_idx,
int num_words) {
if (stack_idx < params->ngram_size && stack[stack_idx] + 1 < num_words) {
if (stack_idx == 0) {
return true;
}
if (stack[stack_idx] - stack[stack_idx - 1] <= params->max_skip_size) {
return true;
}
}
return false;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* params = reinterpret_cast<TfLiteSkipGramParams*>(node->builtin_data);
std::vector<StringRef> words;
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
tflite::StringRef strref = tflite::GetString(input, 0);
int prev_idx = 0;
for (size_t i = 1; i < strref.len; i++) {
if (isspace(*(strref.str + i))) {
if (i > prev_idx && !isspace(*(strref.str + prev_idx))) {
words.push_back({strref.str + prev_idx, i - prev_idx});
}
prev_idx = i + 1;
}
}
if (strref.len > prev_idx) {
words.push_back({strref.str + prev_idx, strref.len - prev_idx});
}
tflite::DynamicBuffer buf;
if (words.size() < params->ngram_size) {
buf.WriteToTensorAsVector(GetOutput(context, node, 0));
return kTfLiteOk;
}
std::vector<int> stack(params->ngram_size, 0);
int stack_idx = 1;
int num_words = words.size();
while (stack_idx >= 0) {
if (ShouldStepInRecursion(params, stack, stack_idx, num_words)) {
stack[stack_idx]++;
stack_idx++;
if (stack_idx < params->ngram_size) {
stack[stack_idx] = stack[stack_idx - 1];
}
} else {
if (ShouldIncludeCurrentNgram(params, stack_idx)) {
std::vector<StringRef> gram(stack_idx);
for (int i = 0; i < stack_idx; i++) {
gram[i] = words[stack[i]];
}
buf.AddJoinedString(gram, ' ');
}
stack_idx--;
}
}
buf.WriteToTensorAsVector(GetOutput(context, node, 0));
return kTfLiteOk;
}
}
TfLiteRegistration* Register_SKIP_GRAM() {
static TfLiteRegistration r = {nullptr, nullptr, Prepare, Eval};
return &r;
}
}
}
} | #include <memory>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/string_type.h"
#include "tensorflow/lite/string_util.h"
namespace tflite {
namespace {
using ::testing::ElementsAre;
static char kSentence[] = "The quick\t brown fox\n jumps over\n the lazy dog!";
class SkipGramOp : public SingleOpModel {
public:
SkipGramOp(int ngram_size, int max_skip_size, bool include_all_ngrams) {
input_ = AddInput(TensorType_STRING);
output_ = AddOutput(TensorType_STRING);
SetBuiltinOp(BuiltinOperator_SKIP_GRAM, BuiltinOptions_SkipGramOptions,
CreateSkipGramOptions(builder_, ngram_size, max_skip_size,
include_all_ngrams)
.Union());
BuildInterpreter({{1}});
}
void SetInput(const string& content) {
PopulateStringTensor(input_, {content});
}
std::vector<string> GetOutput() {
std::vector<string> ans;
TfLiteTensor* tensor = interpreter_->tensor(output_);
int num = GetStringCount(tensor);
for (int i = 0; i < num; i++) {
StringRef strref = GetString(tensor, i);
ans.push_back(string(strref.str, strref.len));
}
return ans;
}
private:
int input_;
int output_;
};
TEST(SkipGramTest, TestUnigram) {
SkipGramOp m(1, 0, false);
m.SetInput(kSentence);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), testing::UnorderedElementsAreArray(
{"The", "quick", "brown", "fox", "jumps",
"over", "the", "lazy", "dog!"}));
}
TEST(SkipGramTest, TestBigram) {
SkipGramOp m(2, 0, false);
m.SetInput(kSentence);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
testing::UnorderedElementsAreArray(
{"The quick", "quick brown", "brown fox", "fox jumps",
"jumps over", "over the", "the lazy", "lazy dog!"}));
}
TEST(SkipGramTest, TestAllBigram) {
SkipGramOp m(2, 0, true);
m.SetInput(kSentence);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
testing::UnorderedElementsAreArray(
{
"The", "quick", "brown", "fox", "jumps", "over", "the",
"lazy", "dog!",
"The quick", "quick brown", "brown fox", "fox jumps",
"jumps over", "over the", "the lazy", "lazy dog!"}));
}
TEST(SkipGramTest, TestAllTrigram) {
SkipGramOp m(3, 0, true);
m.SetInput(kSentence);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
testing::UnorderedElementsAreArray(
{
"The", "quick", "brown", "fox", "jumps", "over", "the",
"lazy", "dog!",
"The quick", "quick brown", "brown fox", "fox jumps",
"jumps over", "over the", "the lazy", "lazy dog!",
"The quick brown", "quick brown fox", "brown fox jumps",
"fox jumps over", "jumps over the", "over the lazy",
"the lazy dog!"}));
}
TEST(SkipGramTest, TestSkip1Bigram) {
SkipGramOp m(2, 1, false);
m.SetInput(kSentence);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(
m.GetOutput(),
testing::UnorderedElementsAreArray(
{"The quick", "The brown", "quick brown", "quick fox", "brown fox",
"brown jumps", "fox jumps", "fox over", "jumps over", "jumps the",
"over the", "over lazy", "the lazy", "the dog!", "lazy dog!"}));
}
TEST(SkipGramTest, TestSkip2Bigram) {
SkipGramOp m(2, 2, false);
m.SetInput(kSentence);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
testing::UnorderedElementsAreArray(
{"The quick", "The brown", "The fox", "quick brown",
"quick fox", "quick jumps", "brown fox", "brown jumps",
"brown over", "fox jumps", "fox over", "fox the",
"jumps over", "jumps the", "jumps lazy", "over the",
"over lazy", "over dog!", "the lazy", "the dog!",
"lazy dog!"}));
}
TEST(SkipGramTest, TestSkip1Trigram) {
SkipGramOp m(3, 1, false);
m.SetInput(kSentence);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
testing::UnorderedElementsAreArray(
{"The quick brown", "The quick fox", "The brown fox",
"The brown jumps", "quick brown fox", "quick brown jumps",
"quick fox jumps", "quick fox over", "brown fox jumps",
"brown fox over", "brown jumps over", "brown jumps the",
"fox jumps over", "fox jumps the", "fox over the",
"fox over lazy", "jumps over the", "jumps over lazy",
"jumps the lazy", "jumps the dog!", "over the lazy",
"over the dog!", "over lazy dog!", "the lazy dog!"}));
}
TEST(SkipGramTest, TestSkip2Trigram) {
SkipGramOp m(3, 2, false);
m.SetInput(kSentence);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
testing::UnorderedElementsAreArray(
{"The quick brown", "The quick fox", "The quick jumps",
"The brown fox", "The brown jumps", "The brown over",
"The fox jumps", "The fox over", "The fox the",
"quick brown fox", "quick brown jumps", "quick brown over",
"quick fox jumps", "quick fox over", "quick fox the",
"quick jumps over", "quick jumps the", "quick jumps lazy",
"brown fox jumps", "brown fox over", "brown fox the",
"brown jumps over", "brown jumps the", "brown jumps lazy",
"brown over the", "brown over lazy", "brown over dog!",
"fox jumps over", "fox jumps the", "fox jumps lazy",
"fox over the", "fox over lazy", "fox over dog!",
"fox the lazy", "fox the dog!", "jumps over the",
"jumps over lazy", "jumps over dog!", "jumps the lazy",
"jumps the dog!", "jumps lazy dog!", "over the lazy",
"over the dog!", "over lazy dog!", "the lazy dog!"}));
}
TEST(SkipGramTest, TestAllSkip2Trigram) {
SkipGramOp m(3, 2, true);
m.SetInput(kSentence);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(
m.GetOutput(),
testing::UnorderedElementsAreArray(
{
"The", "quick", "brown", "fox", "jumps", "over", "the", "lazy",
"dog!",
"The quick", "The brown", "The fox", "quick brown", "quick fox",
"quick jumps", "brown fox", "brown jumps", "brown over", "fox jumps",
"fox over", "fox the", "jumps over", "jumps the", "jumps lazy",
"over the", "over lazy", "over dog!", "the lazy", "the dog!",
"lazy dog!",
"The quick brown", "The quick fox", "The quick jumps",
"The brown fox", "The brown jumps", "The brown over",
"The fox jumps", "The fox over", "The fox the", "quick brown fox",
"quick brown jumps", "quick brown over", "quick fox jumps",
"quick fox over", "quick fox the", "quick jumps over",
"quick jumps the", "quick jumps lazy", "brown fox jumps",
"brown fox over", "brown fox the", "brown jumps over",
"brown jumps the", "brown jumps lazy", "brown over the",
"brown over lazy", "brown over dog!", "fox jumps over",
"fox jumps the", "fox jumps lazy", "fox over the", "fox over lazy",
"fox over dog!", "fox the lazy", "fox the dog!", "jumps over the",
"jumps over lazy", "jumps over dog!", "jumps the lazy",
"jumps the dog!", "jumps lazy dog!", "over the lazy",
"over the dog!", "over lazy dog!", "the lazy dog!"}));
}
TEST(SkipGramTest, TestSingleWord) {
SkipGramOp m(1, 1, false);
m.SetInput("Hi");
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAre("Hi"));
}
TEST(SkipGramTest, TestWordsLessThanGram) {
SkipGramOp m(3, 1, false);
m.SetInput("Hi hi");
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), std::vector<string>());
}
TEST(SkipGramTest, TestEmptyInput) {
SkipGramOp m(1, 1, false);
m.SetInput("");
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAre());
}
TEST(SkipGramTest, TestWhitespaceInput) {
SkipGramOp m(1, 1, false);
m.SetInput(" ");
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAre());
}
TEST(SkipGramTest, TestInputWithExtraSpace) {
SkipGramOp m(1, 1, false);
m.SetInput(" Hello world ! ");
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAre("Hello", "world", "!"));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/skip_gram.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/skip_gram_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
132383d5-a84f-45ff-9829-fb63d2ae5943 | cpp | google/cel-cpp | utf8 | internal/utf8.cc | internal/utf8_test.cc | #include "internal/utf8.h"
#include <algorithm>
#include <cstdint>
#include <cstring>
#include <string>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/base/macros.h"
#include "absl/base/optimization.h"
#include "absl/log/absl_check.h"
#include "absl/strings/cord.h"
#include "absl/strings/string_view.h"
#include "internal/unicode.h"
namespace cel::internal {
namespace {
constexpr uint8_t kUtf8RuneSelf = 0x80;
constexpr size_t kUtf8Max = 4;
constexpr uint8_t kLow = 0x80;
constexpr uint8_t kHigh = 0xbf;
constexpr uint8_t kMaskX = 0x3f;
constexpr uint8_t kMask2 = 0x1f;
constexpr uint8_t kMask3 = 0xf;
constexpr uint8_t kMask4 = 0x7;
constexpr uint8_t kTX = 0x80;
constexpr uint8_t kT2 = 0xc0;
constexpr uint8_t kT3 = 0xe0;
constexpr uint8_t kT4 = 0xf0;
constexpr uint8_t kXX = 0xf1;
constexpr uint8_t kAS = 0xf0;
constexpr uint8_t kS1 = 0x02;
constexpr uint8_t kS2 = 0x13;
constexpr uint8_t kS3 = 0x03;
constexpr uint8_t kS4 = 0x23;
constexpr uint8_t kS5 = 0x34;
constexpr uint8_t kS6 = 0x04;
constexpr uint8_t kS7 = 0x44;
constexpr uint8_t kLeading[256] = {
kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS,
kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS,
kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS,
kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS,
kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS,
kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS,
kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS,
kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS, kAS,
kXX, kXX, kXX, kXX, kXX, kXX, kXX, kXX, kXX, kXX, kXX, kXX, kXX, kXX, kXX, kXX,
kXX, kXX, kXX, kXX, kXX, kXX, kXX, kXX, kXX, kXX, kXX, kXX, kXX, kXX, kXX, kXX,
kXX, kXX, kXX, kXX, kXX, kXX, kXX, kXX, kXX, kXX, kXX, kXX, kXX, kXX, kXX, kXX,
kXX, kXX, kXX, kXX, kXX, kXX, kXX, kXX, kXX, kXX, kXX, kXX, kXX, kXX, kXX, kXX,
kXX, kXX, kS1, kS1, kS1, kS1, kS1, kS1, kS1, kS1, kS1, kS1, kS1, kS1, kS1, kS1,
kS1, kS1, kS1, kS1, kS1, kS1, kS1, kS1, kS1, kS1, kS1, kS1, kS1, kS1, kS1, kS1,
kS2, kS3, kS3, kS3, kS3, kS3, kS3, kS3, kS3, kS3, kS3, kS3, kS3, kS4, kS3, kS3,
kS5, kS6, kS6, kS6, kS7, kXX, kXX, kXX, kXX, kXX, kXX, kXX, kXX, kXX, kXX, kXX,
};
constexpr std::pair<const uint8_t, const uint8_t> kAccept[16] = {
{kLow, kHigh}, {0xa0, kHigh}, {kLow, 0x9f}, {0x90, kHigh},
{kLow, 0x8f}, {0x0, 0x0}, {0x0, 0x0}, {0x0, 0x0},
{0x0, 0x0}, {0x0, 0x0}, {0x0, 0x0}, {0x0, 0x0},
{0x0, 0x0}, {0x0, 0x0}, {0x0, 0x0}, {0x0, 0x0},
};
class StringReader final {
public:
constexpr explicit StringReader(absl::string_view input) : input_(input) {}
size_t Remaining() const { return input_.size(); }
bool HasRemaining() const { return !input_.empty(); }
absl::string_view Peek(size_t n) {
ABSL_ASSERT(n <= Remaining());
return input_.substr(0, n);
}
char Read() {
ABSL_ASSERT(HasRemaining());
char value = input_.front();
input_.remove_prefix(1);
return value;
}
void Advance(size_t n) {
ABSL_ASSERT(n <= Remaining());
input_.remove_prefix(n);
}
void Reset(absl::string_view input) { input_ = input; }
private:
absl::string_view input_;
};
class CordReader final {
public:
explicit CordReader(const absl::Cord& input)
: input_(input), size_(input_.size()), buffer_(), index_(0) {}
size_t Remaining() const { return size_; }
bool HasRemaining() const { return size_ != 0; }
absl::string_view Peek(size_t n) {
ABSL_ASSERT(n <= Remaining());
if (n == 0) {
return absl::string_view();
}
if (n <= buffer_.size() - index_) {
return absl::string_view(buffer_.data() + index_, n);
}
if (buffer_.capacity() >= n) {
if (buffer_.capacity() - buffer_.size() < n && index_ != 0) {
buffer_.erase(buffer_.begin(), buffer_.begin() + index_);
index_ = 0;
}
}
buffer_.reserve(std::max(buffer_.size() + n, kUtf8Max));
size_t to_copy = n - (buffer_.size() - index_);
absl::CopyCordToString(input_.Subcord(0, to_copy), &buffer_);
input_.RemovePrefix(to_copy);
return absl::string_view(buffer_.data() + index_, n);
}
char Read() {
char value = Peek(1).front();
Advance(1);
return value;
}
void Advance(size_t n) {
ABSL_ASSERT(n <= Remaining());
if (n == 0) {
return;
}
if (index_ < buffer_.size()) {
size_t count = std::min(n, buffer_.size() - index_);
index_ += count;
n -= count;
size_ -= count;
if (index_ < buffer_.size()) {
return;
}
buffer_.clear();
index_ = 0;
}
input_.RemovePrefix(n);
size_ -= n;
}
void Reset(const absl::Cord& input) {
input_ = input;
size_ = input_.size();
buffer_.clear();
index_ = 0;
}
private:
absl::Cord input_;
size_t size_;
std::string buffer_;
size_t index_;
};
template <typename BufferedByteReader>
bool Utf8IsValidImpl(BufferedByteReader* reader) {
while (reader->HasRemaining()) {
const auto b = static_cast<uint8_t>(reader->Read());
if (b < kUtf8RuneSelf) {
continue;
}
const auto leading = kLeading[b];
if (leading == kXX) {
return false;
}
const auto size = static_cast<size_t>(leading & 7) - 1;
if (size > reader->Remaining()) {
return false;
}
const absl::string_view segment = reader->Peek(size);
const auto& accept = kAccept[leading >> 4];
if (static_cast<uint8_t>(segment[0]) < accept.first ||
static_cast<uint8_t>(segment[0]) > accept.second) {
return false;
} else if (size == 1) {
} else if (static_cast<uint8_t>(segment[1]) < kLow ||
static_cast<uint8_t>(segment[1]) > kHigh) {
return false;
} else if (size == 2) {
} else if (static_cast<uint8_t>(segment[2]) < kLow ||
static_cast<uint8_t>(segment[2]) > kHigh) {
return false;
}
reader->Advance(size);
}
return true;
}
template <typename BufferedByteReader>
size_t Utf8CodePointCountImpl(BufferedByteReader* reader) {
size_t count = 0;
while (reader->HasRemaining()) {
count++;
const auto b = static_cast<uint8_t>(reader->Read());
if (b < kUtf8RuneSelf) {
continue;
}
const auto leading = kLeading[b];
if (leading == kXX) {
continue;
}
auto size = static_cast<size_t>(leading & 7) - 1;
if (size > reader->Remaining()) {
continue;
}
const absl::string_view segment = reader->Peek(size);
const auto& accept = kAccept[leading >> 4];
if (static_cast<uint8_t>(segment[0]) < accept.first ||
static_cast<uint8_t>(segment[0]) > accept.second) {
size = 0;
} else if (size == 1) {
} else if (static_cast<uint8_t>(segment[1]) < kLow ||
static_cast<uint8_t>(segment[1]) > kHigh) {
size = 0;
} else if (size == 2) {
} else if (static_cast<uint8_t>(segment[2]) < kLow ||
static_cast<uint8_t>(segment[2]) > kHigh) {
size = 0;
}
reader->Advance(size);
}
return count;
}
template <typename BufferedByteReader>
std::pair<size_t, bool> Utf8ValidateImpl(BufferedByteReader* reader) {
size_t count = 0;
while (reader->HasRemaining()) {
const auto b = static_cast<uint8_t>(reader->Read());
if (b < kUtf8RuneSelf) {
count++;
continue;
}
const auto leading = kLeading[b];
if (leading == kXX) {
return {count, false};
}
const auto size = static_cast<size_t>(leading & 7) - 1;
if (size > reader->Remaining()) {
return {count, false};
}
const absl::string_view segment = reader->Peek(size);
const auto& accept = kAccept[leading >> 4];
if (static_cast<uint8_t>(segment[0]) < accept.first ||
static_cast<uint8_t>(segment[0]) > accept.second) {
return {count, false};
} else if (size == 1) {
count++;
} else if (static_cast<uint8_t>(segment[1]) < kLow ||
static_cast<uint8_t>(segment[1]) > kHigh) {
return {count, false};
} else if (size == 2) {
count++;
} else if (static_cast<uint8_t>(segment[2]) < kLow ||
static_cast<uint8_t>(segment[2]) > kHigh) {
return {count, false};
} else {
count++;
}
reader->Advance(size);
}
return {count, true};
}
}
bool Utf8IsValid(absl::string_view str) {
StringReader reader(str);
bool valid = Utf8IsValidImpl(&reader);
ABSL_ASSERT((reader.Reset(str), valid == Utf8ValidateImpl(&reader).second));
return valid;
}
bool Utf8IsValid(const absl::Cord& str) {
CordReader reader(str);
bool valid = Utf8IsValidImpl(&reader);
ABSL_ASSERT((reader.Reset(str), valid == Utf8ValidateImpl(&reader).second));
return valid;
}
size_t Utf8CodePointCount(absl::string_view str) {
StringReader reader(str);
return Utf8CodePointCountImpl(&reader);
}
size_t Utf8CodePointCount(const absl::Cord& str) {
CordReader reader(str);
return Utf8CodePointCountImpl(&reader);
}
std::pair<size_t, bool> Utf8Validate(absl::string_view str) {
StringReader reader(str);
auto result = Utf8ValidateImpl(&reader);
ABSL_ASSERT((reader.Reset(str), result.second == Utf8IsValidImpl(&reader)));
return result;
}
std::pair<size_t, bool> Utf8Validate(const absl::Cord& str) {
CordReader reader(str);
auto result = Utf8ValidateImpl(&reader);
ABSL_ASSERT((reader.Reset(str), result.second == Utf8IsValidImpl(&reader)));
return result;
}
namespace {
std::pair<char32_t, size_t> Utf8DecodeImpl(uint8_t b, uint8_t leading,
size_t size, absl::string_view str) {
const auto& accept = kAccept[leading >> 4];
const auto b1 = static_cast<uint8_t>(str.front());
if (ABSL_PREDICT_FALSE(b1 < accept.first || b1 > accept.second)) {
return {kUnicodeReplacementCharacter, 1};
}
if (size <= 1) {
return {(static_cast<char32_t>(b & kMask2) << 6) |
static_cast<char32_t>(b1 & kMaskX),
2};
}
str.remove_prefix(1);
const auto b2 = static_cast<uint8_t>(str.front());
if (ABSL_PREDICT_FALSE(b2 < kLow || b2 > kHigh)) {
return {kUnicodeReplacementCharacter, 1};
}
if (size <= 2) {
return {(static_cast<char32_t>(b & kMask3) << 12) |
(static_cast<char32_t>(b1 & kMaskX) << 6) |
static_cast<char32_t>(b2 & kMaskX),
3};
}
str.remove_prefix(1);
const auto b3 = static_cast<uint8_t>(str.front());
if (ABSL_PREDICT_FALSE(b3 < kLow || b3 > kHigh)) {
return {kUnicodeReplacementCharacter, 1};
}
return {(static_cast<char32_t>(b & kMask4) << 18) |
(static_cast<char32_t>(b1 & kMaskX) << 12) |
(static_cast<char32_t>(b2 & kMaskX) << 6) |
static_cast<char32_t>(b3 & kMaskX),
4};
}
}
std::pair<char32_t, size_t> Utf8Decode(absl::string_view str) {
ABSL_DCHECK(!str.empty());
const auto b = static_cast<uint8_t>(str.front());
if (b < kUtf8RuneSelf) {
return {static_cast<char32_t>(b), 1};
}
const auto leading = kLeading[b];
if (ABSL_PREDICT_FALSE(leading == kXX)) {
return {kUnicodeReplacementCharacter, 1};
}
auto size = static_cast<size_t>(leading & 7) - 1;
str.remove_prefix(1);
if (ABSL_PREDICT_FALSE(size > str.size())) {
return {kUnicodeReplacementCharacter, 1};
}
return Utf8DecodeImpl(b, leading, size, str);
}
std::pair<char32_t, size_t> Utf8Decode(const absl::Cord::CharIterator& it) {
absl::string_view str = absl::Cord::ChunkRemaining(it);
ABSL_DCHECK(!str.empty());
const auto b = static_cast<uint8_t>(str.front());
if (b < kUtf8RuneSelf) {
return {static_cast<char32_t>(b), 1};
}
const auto leading = kLeading[b];
if (ABSL_PREDICT_FALSE(leading == kXX)) {
return {kUnicodeReplacementCharacter, 1};
}
auto size = static_cast<size_t>(leading & 7) - 1;
str.remove_prefix(1);
if (ABSL_PREDICT_TRUE(size <= str.size())) {
return Utf8DecodeImpl(b, leading, size, str);
}
absl::Cord::CharIterator current = it;
absl::Cord::Advance(¤t, 1);
char buffer[3];
size_t buffer_len = 0;
while (buffer_len < size) {
str = absl::Cord::ChunkRemaining(current);
if (ABSL_PREDICT_FALSE(str.empty())) {
return {kUnicodeReplacementCharacter, 1};
}
size_t to_copy = std::min(size_t{3} - buffer_len, str.size());
std::memcpy(buffer + buffer_len, str.data(), to_copy);
buffer_len += to_copy;
absl::Cord::Advance(¤t, to_copy);
}
return Utf8DecodeImpl(b, leading, size,
absl::string_view(buffer, buffer_len));
}
size_t Utf8Encode(std::string& buffer, char32_t code_point) {
if (ABSL_PREDICT_FALSE(!UnicodeIsValid(code_point))) {
code_point = kUnicodeReplacementCharacter;
}
char storage[4];
size_t storage_len = 0;
if (code_point <= 0x7f) {
storage[storage_len++] =
static_cast<char>(static_cast<uint8_t>(code_point));
} else if (code_point <= 0x7ff) {
storage[storage_len++] =
static_cast<char>(kT2 | static_cast<uint8_t>(code_point >> 6));
storage[storage_len++] =
static_cast<char>(kTX | (static_cast<uint8_t>(code_point) & kMaskX));
} else if (code_point <= 0xffff) {
storage[storage_len++] =
static_cast<char>(kT3 | static_cast<uint8_t>(code_point >> 12));
storage[storage_len++] = static_cast<char>(
kTX | (static_cast<uint8_t>(code_point >> 6) & kMaskX));
storage[storage_len++] =
static_cast<char>(kTX | (static_cast<uint8_t>(code_point) & kMaskX));
} else {
storage[storage_len++] =
static_cast<char>(kT4 | static_cast<uint8_t>(code_point >> 18));
storage[storage_len++] = static_cast<char>(
kTX | (static_cast<uint8_t>(code_point >> 12) & kMaskX));
storage[storage_len++] = static_cast<char>(
kTX | (static_cast<uint8_t>(code_point >> 6) & kMaskX));
storage[storage_len++] =
static_cast<char>(kTX | (static_cast<uint8_t>(code_point) & kMaskX));
}
buffer.append(storage, storage_len);
return storage_len;
}
} | #include "internal/utf8.h"
#include <string>
#include <vector>
#include "absl/strings/cord.h"
#include "absl/strings/cord_test_helpers.h"
#include "absl/strings/escaping.h"
#include "absl/strings/string_view.h"
#include "internal/benchmark.h"
#include "internal/testing.h"
namespace cel::internal {
namespace {
TEST(Utf8IsValid, String) {
EXPECT_TRUE(Utf8IsValid(""));
EXPECT_TRUE(Utf8IsValid("a"));
EXPECT_TRUE(Utf8IsValid("abc"));
EXPECT_TRUE(Utf8IsValid("\xd0\x96"));
EXPECT_TRUE(Utf8IsValid("\xd0\x96\xd0\x96"));
EXPECT_TRUE(Utf8IsValid(
"\xd0\xb1\xd1\x80\xd1\x8d\xd0\xb4-\xd0\x9b\xd0\x93\xd0\xa2\xd0\x9c"));
EXPECT_TRUE(Utf8IsValid("\xe2\x98\xba\xe2\x98\xbb\xe2\x98\xb9"));
EXPECT_TRUE(Utf8IsValid("a\ufffdb"));
EXPECT_TRUE(Utf8IsValid("\xf4\x8f\xbf\xbf"));
EXPECT_FALSE(Utf8IsValid("\x42\xfa"));
EXPECT_FALSE(Utf8IsValid("\x42\xfa\x43"));
EXPECT_FALSE(Utf8IsValid("\xf4\x90\x80\x80"));
EXPECT_FALSE(Utf8IsValid("\xf7\xbf\xbf\xbf"));
EXPECT_FALSE(Utf8IsValid("\xfb\xbf\xbf\xbf\xbf"));
EXPECT_FALSE(Utf8IsValid("\xc0\x80"));
EXPECT_FALSE(Utf8IsValid("\xed\xa0\x80"));
EXPECT_FALSE(Utf8IsValid("\xed\xbf\xbf"));
}
TEST(Utf8IsValid, Cord) {
EXPECT_TRUE(Utf8IsValid(absl::Cord("")));
EXPECT_TRUE(Utf8IsValid(absl::Cord("a")));
EXPECT_TRUE(Utf8IsValid(absl::Cord("abc")));
EXPECT_TRUE(Utf8IsValid(absl::Cord("\xd0\x96")));
EXPECT_TRUE(Utf8IsValid(absl::Cord("\xd0\x96\xd0\x96")));
EXPECT_TRUE(Utf8IsValid(absl::Cord(
"\xd0\xb1\xd1\x80\xd1\x8d\xd0\xb4-\xd0\x9b\xd0\x93\xd0\xa2\xd0\x9c")));
EXPECT_TRUE(Utf8IsValid(absl::Cord("\xe2\x98\xba\xe2\x98\xbb\xe2\x98\xb9")));
EXPECT_TRUE(Utf8IsValid(absl::Cord("a\ufffdb")));
EXPECT_TRUE(Utf8IsValid(absl::Cord("\xf4\x8f\xbf\xbf")));
EXPECT_FALSE(Utf8IsValid(absl::Cord("\x42\xfa")));
EXPECT_FALSE(Utf8IsValid(absl::Cord("\x42\xfa\x43")));
EXPECT_FALSE(Utf8IsValid(absl::Cord("\xf4\x90\x80\x80")));
EXPECT_FALSE(Utf8IsValid(absl::Cord("\xf7\xbf\xbf\xbf")));
EXPECT_FALSE(Utf8IsValid(absl::Cord("\xfb\xbf\xbf\xbf\xbf")));
EXPECT_FALSE(Utf8IsValid(absl::Cord("\xc0\x80")));
EXPECT_FALSE(Utf8IsValid(absl::Cord("\xed\xa0\x80")));
EXPECT_FALSE(Utf8IsValid(absl::Cord("\xed\xbf\xbf")));
}
TEST(Utf8CodePointCount, String) {
EXPECT_EQ(Utf8CodePointCount("abcd"), 4);
EXPECT_EQ(Utf8CodePointCount("1,2,3,4"), 7);
EXPECT_EQ(Utf8CodePointCount("\xe2\x98\xba\xe2\x98\xbb\xe2\x98\xb9"), 3);
EXPECT_EQ(Utf8CodePointCount(absl::string_view("\xe2\x00", 2)), 2);
EXPECT_EQ(Utf8CodePointCount("\xe2\x80"), 2);
EXPECT_EQ(Utf8CodePointCount("a\xe2\x80"), 3);
}
TEST(Utf8CodePointCount, Cord) {
EXPECT_EQ(Utf8CodePointCount(absl::Cord("abcd")), 4);
EXPECT_EQ(Utf8CodePointCount(absl::Cord("1,2,3,4")), 7);
EXPECT_EQ(
Utf8CodePointCount(absl::Cord("\xe2\x98\xba\xe2\x98\xbb\xe2\x98\xb9")),
3);
EXPECT_EQ(Utf8CodePointCount(absl::Cord(absl::string_view("\xe2\x00", 2))),
2);
EXPECT_EQ(Utf8CodePointCount(absl::Cord("\xe2\x80")), 2);
EXPECT_EQ(Utf8CodePointCount(absl::Cord("a\xe2\x80")), 3);
}
TEST(Utf8Validate, String) {
EXPECT_TRUE(Utf8Validate("").second);
EXPECT_TRUE(Utf8Validate("a").second);
EXPECT_TRUE(Utf8Validate("abc").second);
EXPECT_TRUE(Utf8Validate("\xd0\x96").second);
EXPECT_TRUE(Utf8Validate("\xd0\x96\xd0\x96").second);
EXPECT_TRUE(
Utf8Validate(
"\xd0\xb1\xd1\x80\xd1\x8d\xd0\xb4-\xd0\x9b\xd0\x93\xd0\xa2\xd0\x9c")
.second);
EXPECT_TRUE(Utf8Validate("\xe2\x98\xba\xe2\x98\xbb\xe2\x98\xb9").second);
EXPECT_TRUE(Utf8Validate("a\ufffdb").second);
EXPECT_TRUE(Utf8Validate("\xf4\x8f\xbf\xbf").second);
EXPECT_FALSE(Utf8Validate("\x42\xfa").second);
EXPECT_FALSE(Utf8Validate("\x42\xfa\x43").second);
EXPECT_FALSE(Utf8Validate("\xf4\x90\x80\x80").second);
EXPECT_FALSE(Utf8Validate("\xf7\xbf\xbf\xbf").second);
EXPECT_FALSE(Utf8Validate("\xfb\xbf\xbf\xbf\xbf").second);
EXPECT_FALSE(Utf8Validate("\xc0\x80").second);
EXPECT_FALSE(Utf8Validate("\xed\xa0\x80").second);
EXPECT_FALSE(Utf8Validate("\xed\xbf\xbf").second);
EXPECT_EQ(Utf8Validate("abcd").first, 4);
EXPECT_EQ(Utf8Validate("1,2,3,4").first, 7);
EXPECT_EQ(Utf8Validate("\xe2\x98\xba\xe2\x98\xbb\xe2\x98\xb9").first, 3);
EXPECT_EQ(Utf8Validate(absl::string_view("\xe2\x00", 2)).first, 0);
EXPECT_EQ(Utf8Validate("\xe2\x80").first, 0);
EXPECT_EQ(Utf8Validate("a\xe2\x80").first, 1);
}
TEST(Utf8Validate, Cord) {
EXPECT_TRUE(Utf8Validate(absl::Cord("")).second);
EXPECT_TRUE(Utf8Validate(absl::Cord("a")).second);
EXPECT_TRUE(Utf8Validate(absl::Cord("abc")).second);
EXPECT_TRUE(Utf8Validate(absl::Cord("\xd0\x96")).second);
EXPECT_TRUE(Utf8Validate(absl::Cord("\xd0\x96\xd0\x96")).second);
EXPECT_TRUE(Utf8Validate(absl::Cord("\xd0\xb1\xd1\x80\xd1\x8d\xd0\xb4-"
"\xd0\x9b\xd0\x93\xd0\xa2\xd0\x9c"))
.second);
EXPECT_TRUE(
Utf8Validate(absl::Cord("\xe2\x98\xba\xe2\x98\xbb\xe2\x98\xb9")).second);
EXPECT_TRUE(Utf8Validate(absl::Cord("a\ufffdb")).second);
EXPECT_TRUE(Utf8Validate(absl::Cord("\xf4\x8f\xbf\xbf")).second);
EXPECT_FALSE(Utf8Validate(absl::Cord("\x42\xfa")).second);
EXPECT_FALSE(Utf8Validate(absl::Cord("\x42\xfa\x43")).second);
EXPECT_FALSE(Utf8Validate(absl::Cord("\xf4\x90\x80\x80")).second);
EXPECT_FALSE(Utf8Validate(absl::Cord("\xf7\xbf\xbf\xbf")).second);
EXPECT_FALSE(Utf8Validate(absl::Cord("\xfb\xbf\xbf\xbf\xbf")).second);
EXPECT_FALSE(Utf8Validate(absl::Cord("\xc0\x80")).second);
EXPECT_FALSE(Utf8Validate(absl::Cord("\xed\xa0\x80")).second);
EXPECT_FALSE(Utf8Validate(absl::Cord("\xed\xbf\xbf")).second);
EXPECT_EQ(Utf8Validate(absl::Cord("abcd")).first, 4);
EXPECT_EQ(Utf8Validate(absl::Cord("1,2,3,4")).first, 7);
EXPECT_EQ(
Utf8Validate(absl::Cord("\xe2\x98\xba\xe2\x98\xbb\xe2\x98\xb9")).first,
3);
EXPECT_EQ(Utf8Validate(absl::Cord(absl::string_view("\xe2\x00", 2))).first,
0);
EXPECT_EQ(Utf8Validate(absl::Cord("\xe2\x80")).first, 0);
EXPECT_EQ(Utf8Validate(absl::Cord("a\xe2\x80")).first, 1);
}
struct Utf8EncodeTestCase final {
char32_t code_point;
absl::string_view code_units;
};
using Utf8EncodeTest = testing::TestWithParam<Utf8EncodeTestCase>;
TEST_P(Utf8EncodeTest, Compliance) {
const Utf8EncodeTestCase& test_case = GetParam();
std::string result;
EXPECT_EQ(Utf8Encode(result, test_case.code_point),
test_case.code_units.size());
EXPECT_EQ(result, test_case.code_units);
}
INSTANTIATE_TEST_SUITE_P(Utf8EncodeTest, Utf8EncodeTest,
testing::ValuesIn<Utf8EncodeTestCase>({
{0x0000, absl::string_view("\x00", 1)},
{0x0001, "\x01"},
{0x007e, "\x7e"},
{0x007f, "\x7f"},
{0x0080, "\xc2\x80"},
{0x0081, "\xc2\x81"},
{0x00bf, "\xc2\xbf"},
{0x00c0, "\xc3\x80"},
{0x00c1, "\xc3\x81"},
{0x00c8, "\xc3\x88"},
{0x00d0, "\xc3\x90"},
{0x00e0, "\xc3\xa0"},
{0x00f0, "\xc3\xb0"},
{0x00f8, "\xc3\xb8"},
{0x00ff, "\xc3\xbf"},
{0x0100, "\xc4\x80"},
{0x07ff, "\xdf\xbf"},
{0x0400, "\xd0\x80"},
{0x0800, "\xe0\xa0\x80"},
{0x0801, "\xe0\xa0\x81"},
{0x1000, "\xe1\x80\x80"},
{0xd000, "\xed\x80\x80"},
{0xd7ff, "\xed\x9f\xbf"},
{0xe000, "\xee\x80\x80"},
{0xfffe, "\xef\xbf\xbe"},
{0xffff, "\xef\xbf\xbf"},
{0x10000, "\xf0\x90\x80\x80"},
{0x10001, "\xf0\x90\x80\x81"},
{0x40000, "\xf1\x80\x80\x80"},
{0x10fffe, "\xf4\x8f\xbf\xbe"},
{0x10ffff, "\xf4\x8f\xbf\xbf"},
{0xFFFD, "\xef\xbf\xbd"},
}));
struct Utf8DecodeTestCase final {
char32_t code_point;
absl::string_view code_units;
};
using Utf8DecodeTest = testing::TestWithParam<Utf8DecodeTestCase>;
TEST_P(Utf8DecodeTest, StringView) {
const Utf8DecodeTestCase& test_case = GetParam();
auto [code_point, code_units] = Utf8Decode(test_case.code_units);
EXPECT_EQ(code_units, test_case.code_units.size())
<< absl::CHexEscape(test_case.code_units);
EXPECT_EQ(code_point, test_case.code_point)
<< absl::CHexEscape(test_case.code_units);
}
TEST_P(Utf8DecodeTest, Cord) {
const Utf8DecodeTestCase& test_case = GetParam();
auto cord = absl::Cord(test_case.code_units);
auto it = cord.char_begin();
auto [code_point, code_units] = Utf8Decode(it);
absl::Cord::Advance(&it, code_units);
EXPECT_EQ(it, cord.char_end());
EXPECT_EQ(code_units, test_case.code_units.size())
<< absl::CHexEscape(test_case.code_units);
EXPECT_EQ(code_point, test_case.code_point)
<< absl::CHexEscape(test_case.code_units);
}
std::vector<std::string> FragmentString(absl::string_view text) {
std::vector<std::string> fragments;
fragments.reserve(text.size());
for (const auto& c : text) {
fragments.emplace_back().push_back(c);
}
return fragments;
}
TEST_P(Utf8DecodeTest, CordFragmented) {
const Utf8DecodeTestCase& test_case = GetParam();
auto cord = absl::MakeFragmentedCord(FragmentString(test_case.code_units));
auto it = cord.char_begin();
auto [code_point, code_units] = Utf8Decode(it);
absl::Cord::Advance(&it, code_units);
EXPECT_EQ(it, cord.char_end());
EXPECT_EQ(code_units, test_case.code_units.size())
<< absl::CHexEscape(test_case.code_units);
EXPECT_EQ(code_point, test_case.code_point)
<< absl::CHexEscape(test_case.code_units);
}
INSTANTIATE_TEST_SUITE_P(Utf8DecodeTest, Utf8DecodeTest,
testing::ValuesIn<Utf8DecodeTestCase>({
{0x0000, absl::string_view("\x00", 1)},
{0x0001, "\x01"},
{0x007e, "\x7e"},
{0x007f, "\x7f"},
{0x0080, "\xc2\x80"},
{0x0081, "\xc2\x81"},
{0x00bf, "\xc2\xbf"},
{0x00c0, "\xc3\x80"},
{0x00c1, "\xc3\x81"},
{0x00c8, "\xc3\x88"},
{0x00d0, "\xc3\x90"},
{0x00e0, "\xc3\xa0"},
{0x00f0, "\xc3\xb0"},
{0x00f8, "\xc3\xb8"},
{0x00ff, "\xc3\xbf"},
{0x0100, "\xc4\x80"},
{0x07ff, "\xdf\xbf"},
{0x0400, "\xd0\x80"},
{0x0800, "\xe0\xa0\x80"},
{0x0801, "\xe0\xa0\x81"},
{0x1000, "\xe1\x80\x80"},
{0xd000, "\xed\x80\x80"},
{0xd7ff, "\xed\x9f\xbf"},
{0xe000, "\xee\x80\x80"},
{0xfffe, "\xef\xbf\xbe"},
{0xffff, "\xef\xbf\xbf"},
{0x10000, "\xf0\x90\x80\x80"},
{0x10001, "\xf0\x90\x80\x81"},
{0x40000, "\xf1\x80\x80\x80"},
{0x10fffe, "\xf4\x8f\xbf\xbe"},
{0x10ffff, "\xf4\x8f\xbf\xbf"},
{0xFFFD, "\xef\xbf\xbd"},
}));
void BM_Utf8CodePointCount_String_AsciiTen(benchmark::State& state) {
for (auto s : state) {
benchmark::DoNotOptimize(Utf8CodePointCount("0123456789"));
}
}
BENCHMARK(BM_Utf8CodePointCount_String_AsciiTen);
void BM_Utf8CodePointCount_Cord_AsciiTen(benchmark::State& state) {
absl::Cord value("0123456789");
for (auto s : state) {
benchmark::DoNotOptimize(Utf8CodePointCount(value));
}
}
BENCHMARK(BM_Utf8CodePointCount_Cord_AsciiTen);
void BM_Utf8CodePointCount_String_JapaneseTen(benchmark::State& state) {
for (auto s : state) {
benchmark::DoNotOptimize(Utf8CodePointCount(
"\xe6\x97\xa5\xe6\x9c\xac\xe8\xaa\x9e\xe6\x97\xa5\xe6\x9c\xac\xe8\xaa"
"\x9e\xe6\x97\xa5\xe6\x9c\xac\xe8\xaa\x9e\xe6\x97\xa5"));
}
}
BENCHMARK(BM_Utf8CodePointCount_String_JapaneseTen);
void BM_Utf8CodePointCount_Cord_JapaneseTen(benchmark::State& state) {
absl::Cord value(
"\xe6\x97\xa5\xe6\x9c\xac\xe8\xaa\x9e\xe6\x97\xa5\xe6\x9c\xac\xe8\xaa"
"\x9e\xe6\x97\xa5\xe6\x9c\xac\xe8\xaa\x9e\xe6\x97\xa5");
for (auto s : state) {
benchmark::DoNotOptimize(Utf8CodePointCount(value));
}
}
BENCHMARK(BM_Utf8CodePointCount_Cord_JapaneseTen);
void BM_Utf8IsValid_String_AsciiTen(benchmark::State& state) {
for (auto s : state) {
benchmark::DoNotOptimize(Utf8IsValid("0123456789"));
}
}
BENCHMARK(BM_Utf8IsValid_String_AsciiTen);
void BM_Utf8IsValid_Cord_AsciiTen(benchmark::State& state) {
absl::Cord value("0123456789");
for (auto s : state) {
benchmark::DoNotOptimize(Utf8IsValid(value));
}
}
BENCHMARK(BM_Utf8IsValid_Cord_AsciiTen);
void BM_Utf8IsValid_String_JapaneseTen(benchmark::State& state) {
for (auto s : state) {
benchmark::DoNotOptimize(Utf8IsValid(
"\xe6\x97\xa5\xe6\x9c\xac\xe8\xaa\x9e\xe6\x97\xa5\xe6\x9c\xac\xe8\xaa"
"\x9e\xe6\x97\xa5\xe6\x9c\xac\xe8\xaa\x9e\xe6\x97\xa5"));
}
}
BENCHMARK(BM_Utf8IsValid_String_JapaneseTen);
void BM_Utf8IsValid_Cord_JapaneseTen(benchmark::State& state) {
absl::Cord value(
"\xe6\x97\xa5\xe6\x9c\xac\xe8\xaa\x9e\xe6\x97\xa5\xe6\x9c\xac\xe8\xaa"
"\x9e\xe6\x97\xa5\xe6\x9c\xac\xe8\xaa\x9e\xe6\x97\xa5");
for (auto s : state) {
benchmark::DoNotOptimize(Utf8IsValid(value));
}
}
BENCHMARK(BM_Utf8IsValid_Cord_JapaneseTen);
void BM_Utf8Validate_String_AsciiTen(benchmark::State& state) {
for (auto s : state) {
benchmark::DoNotOptimize(Utf8Validate("0123456789"));
}
}
BENCHMARK(BM_Utf8Validate_String_AsciiTen);
void BM_Utf8Validate_Cord_AsciiTen(benchmark::State& state) {
absl::Cord value("0123456789");
for (auto s : state) {
benchmark::DoNotOptimize(Utf8Validate(value));
}
}
BENCHMARK(BM_Utf8Validate_Cord_AsciiTen);
void BM_Utf8Validate_String_JapaneseTen(benchmark::State& state) {
for (auto s : state) {
benchmark::DoNotOptimize(Utf8Validate(
"\xe6\x97\xa5\xe6\x9c\xac\xe8\xaa\x9e\xe6\x97\xa5\xe6\x9c\xac\xe8\xaa"
"\x9e\xe6\x97\xa5\xe6\x9c\xac\xe8\xaa\x9e\xe6\x97\xa5"));
}
}
BENCHMARK(BM_Utf8Validate_String_JapaneseTen);
void BM_Utf8Validate_Cord_JapaneseTen(benchmark::State& state) {
absl::Cord value(
"\xe6\x97\xa5\xe6\x9c\xac\xe8\xaa\x9e\xe6\x97\xa5\xe6\x9c\xac\xe8\xaa"
"\x9e\xe6\x97\xa5\xe6\x9c\xac\xe8\xaa\x9e\xe6\x97\xa5");
for (auto s : state) {
benchmark::DoNotOptimize(Utf8Validate(value));
}
}
BENCHMARK(BM_Utf8Validate_Cord_JapaneseTen);
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/internal/utf8.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/internal/utf8_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
846b0ef6-bee7-4e12-ac28-585dca10c72c | cpp | google/arolla | operators | arolla/qexpr/operators.cc | arolla/qexpr/operators_test.cc | #include "arolla/qexpr/operators.h"
#include <algorithm>
#include <bitset>
#include <cstddef>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/no_destructor.h"
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "arolla/memory/frame.h"
#include "arolla/qexpr/casting.h"
#include "arolla/qexpr/eval_context.h"
#include "arolla/qexpr/operator_errors.h"
#include "arolla/qexpr/qexpr_operator_signature.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/util/operator_name.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla {
namespace {
class CombinedOperatorFamily final : public OperatorFamily {
public:
explicit CombinedOperatorFamily(std::string name) : name_(std::move(name)) {}
absl::StatusOr<OperatorPtr> DoGetOperator(
absl::Span<const QTypePtr> input_types,
QTypePtr output_type) const final {
auto it = operators_.find(input_types);
if (it != operators_.end() &&
it->second.op->signature()->output_type() == output_type) {
return it->second.op;
}
ASSIGN_OR_RETURN(const QExprOperatorSignature* matching_signature,
FindMatchingSignature(input_types, output_type,
supported_signatures_, name_));
return operators_.at(matching_signature->input_types()).op;
}
absl::Status Insert(OperatorPtr op, size_t overwrite_priority) {
DCHECK_NE(op, nullptr);
auto* signature = op->signature();
auto& record = operators_[signature->input_types()];
if (overwrite_priority >= record.overwrite_priority_mask.size()) {
return absl::InvalidArgumentError(
absl::StrFormat("unable to register QExpr operator %s%s:"
" overwrite_priority=%d is out of range",
name_, FormatTypeVector(signature->input_types()),
overwrite_priority));
}
if (record.overwrite_priority_mask.test(overwrite_priority)) {
return absl::AlreadyExistsError(
absl::StrFormat("trying to register QExpr operator %s%s twice", name_,
FormatTypeVector(signature->input_types())));
}
record.overwrite_priority_mask.set(overwrite_priority);
if ((record.overwrite_priority_mask >> (overwrite_priority + 1)).any()) {
return absl::OkStatus();
}
if (record.op != nullptr) {
auto it = std::find(supported_signatures_.begin(),
supported_signatures_.end(), record.op->signature());
DCHECK(it != supported_signatures_.end());
*it = signature;
} else {
supported_signatures_.push_back(signature);
}
record.op = std::move(op);
return absl::OkStatus();
}
private:
struct Record {
OperatorPtr op;
std::bitset<2> overwrite_priority_mask;
};
std::string name_;
absl::flat_hash_map<absl::Span<const QTypePtr>, Record> operators_;
std::vector<const QExprOperatorSignature*> supported_signatures_;
};
}
absl::Status OperatorRegistry::RegisterOperatorFamily(
absl::string_view name, std::unique_ptr<OperatorFamily> operation) {
absl::WriterMutexLock lock(&mutex_);
if (!IsOperatorName(name)) {
return absl::InvalidArgumentError(
absl::StrFormat("incorrect operator name \"%s\"", name));
}
auto inserted = families_.emplace(name, std::move(operation));
if (!inserted.second) {
return absl::Status(
absl::StatusCode::kAlreadyExists,
absl::StrFormat(
"trying to register non-static QExpr operator family %s twice",
name));
}
return absl::OkStatus();
}
absl::Status OperatorRegistry::RegisterOperator(absl::string_view name,
OperatorPtr op,
size_t overwrite_priority) {
if (!IsOperatorName(name)) {
return absl::InvalidArgumentError(
absl::StrFormat("incorrect operator name \"%s\"", name));
}
absl::WriterMutexLock lock(&mutex_);
auto& family = families_[name];
if (family == nullptr) {
family = std::make_unique<CombinedOperatorFamily>(std::string(name));
}
auto* combined_family = dynamic_cast<CombinedOperatorFamily*>(family.get());
if (combined_family == nullptr) {
return absl::AlreadyExistsError(
absl::StrFormat("trying to register a single QExpr operator and an "
"operator family under the same name %s",
name));
}
return combined_family->Insert(std::move(op), overwrite_priority);
}
std::vector<std::string> OperatorRegistry::ListRegisteredOperators() {
absl::ReaderMutexLock lock(&mutex_);
std::vector<std::string> names;
names.reserve(families_.size());
for (const auto& [name, family] : families_) {
names.push_back(name);
}
return names;
}
absl::StatusOr<const OperatorFamily*> OperatorRegistry::LookupOperatorFamily(
absl::string_view name) const {
absl::ReaderMutexLock lock(&mutex_);
auto iter = families_.find(name);
if (iter == families_.end()) {
return absl::Status(absl::StatusCode::kNotFound,
absl::StrFormat("QExpr operator %s not found; %s", name,
SuggestMissingDependency()));
}
return iter->second.get();
}
absl::StatusOr<OperatorPtr> OperatorRegistry::DoLookupOperator(
absl::string_view name, absl::Span<const QTypePtr> input_types,
QTypePtr output_type) const {
ASSIGN_OR_RETURN(auto family, LookupOperatorFamily(name));
return family->GetOperator(input_types, output_type);
}
OperatorRegistry* OperatorRegistry::GetInstance() {
static absl::NoDestructor<OperatorRegistry> instance;
return instance.get();
}
namespace {
struct BoundOperatorState {
std::unique_ptr<BoundOperator> op;
std::vector<TypedSlot> input_slots;
TypedSlot output_slot;
FrameLayout layout;
};
absl::StatusOr<BoundOperatorState> BindToNewLayout(const QExprOperator& op) {
FrameLayout::Builder layout_builder;
auto input_slots = AddSlots(op.signature()->input_types(), &layout_builder);
auto output_slot = AddSlot(op.signature()->output_type(), &layout_builder);
ASSIGN_OR_RETURN(auto bound_op, op.Bind(input_slots, output_slot));
return BoundOperatorState{std::move(bound_op), input_slots, output_slot,
std::move(layout_builder).Build()};
}
absl::Status VerifyOperatorSlots(const QExprOperator& op,
absl::Span<const TypedSlot> input_slots,
TypedSlot output_slot) {
auto signature = op.signature();
RETURN_IF_ERROR(VerifyInputSlotTypes(input_slots, signature->input_types()));
return VerifyOutputSlotType(output_slot, signature->output_type());
}
}
absl::StatusOr<OperatorPtr> EnsureOutputQTypeMatches(
absl::StatusOr<OperatorPtr> op_or, absl::Span<const QTypePtr> input_types,
QTypePtr output_type) {
ASSIGN_OR_RETURN(auto op, op_or);
if (op->signature()->output_type() != output_type) {
return absl::Status(
absl::StatusCode::kNotFound,
absl::StrFormat("unexpected output type for arguments %s: requested "
"%s, available %s",
FormatTypeVector(input_types), output_type->name(),
op->signature()->output_type()->name()));
}
return op;
}
absl::StatusOr<TypedValue> InvokeOperator(const QExprOperator& op,
absl::Span<const TypedValue> args) {
RETURN_IF_ERROR(VerifyInputValueTypes(args, op.signature()->input_types()));
ASSIGN_OR_RETURN(auto bound, BindToNewLayout(op));
RootEvaluationContext root_ctx(&bound.layout);
for (size_t i = 0; i < args.size(); ++i) {
RETURN_IF_ERROR(args[i].CopyToSlot(bound.input_slots[i], root_ctx.frame()));
}
EvaluationContext ctx(root_ctx);
bound.op->Run(&ctx, root_ctx.frame());
if (!ctx.status().ok()) {
return std::move(ctx).status();
}
return TypedValue::FromSlot(bound.output_slot, root_ctx.frame());
}
absl::StatusOr<TypedValue> InvokeOperator(absl::string_view op_name,
absl::Span<const TypedValue> args,
QTypePtr output_qtype) {
std::vector<QTypePtr> arg_types;
arg_types.reserve(args.size());
for (const auto& arg : args) {
arg_types.push_back(arg.GetType());
}
ASSIGN_OR_RETURN(auto op, OperatorRegistry::GetInstance()->LookupOperator(
op_name, arg_types, output_qtype));
return InvokeOperator(*op, args);
}
absl::StatusOr<std::unique_ptr<BoundOperator>> QExprOperator::Bind(
absl::Span<const TypedSlot> input_slots, TypedSlot output_slot) const {
RETURN_IF_ERROR(VerifyOperatorSlots(*this, input_slots, output_slot));
return DoBind(input_slots, output_slot);
}
} | #include "arolla/qexpr/operators.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "arolla/codegen/qexpr/testing/test_operators.h"
#include "arolla/memory/frame.h"
#include "arolla/qexpr/eval_context.h"
#include "arolla/qexpr/qexpr_operator_signature.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/tuple_qtype.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/qtype/typed_value.h"
namespace arolla {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::testing::ContainsRegex;
using ::testing::ElementsAre;
using ::testing::Eq;
using ::testing::HasSubstr;
using ::testing::Property;
TEST(OperatorsTest, LookupTestOperator) {
QTypePtr f32_type = GetQType<float>();
auto op = OperatorRegistry::GetInstance()
->LookupOperator("test.add", {f32_type, f32_type}, f32_type)
.value();
EXPECT_EQ(op->signature(),
QExprOperatorSignature::Get({f32_type, f32_type}, f32_type));
FrameLayout::Builder layout_builder;
auto arg1_slot = layout_builder.AddSlot<float>();
auto arg2_slot = layout_builder.AddSlot<float>();
auto result_slot = layout_builder.AddSlot<float>();
auto bound_op = op->Bind(ToTypedSlots(arg1_slot, arg2_slot),
TypedSlot::FromSlot(result_slot))
.value();
FrameLayout memory_layout = std::move(layout_builder).Build();
RootEvaluationContext root_ctx(&memory_layout);
EvaluationContext ctx(root_ctx);
root_ctx.Set(arg1_slot, 2.0f);
root_ctx.Set(arg2_slot, 3.0f);
bound_op->Run(&ctx, root_ctx.frame());
EXPECT_OK(ctx.status());
float result = root_ctx.Get(result_slot);
EXPECT_THAT(result, Eq(5.0f));
}
TEST(OperatorsTest, LookupOperator_WithOutputType) {
QTypePtr f32_type = GetQType<float>();
auto op_float =
OperatorRegistry::GetInstance()
->LookupOperator("test.add", {f32_type, f32_type}, f32_type)
.value();
EXPECT_EQ(op_float->signature(),
QExprOperatorSignature::Get({f32_type, f32_type}, f32_type));
QTypePtr f64_type = GetQType<float>();
auto op_double =
OperatorRegistry::GetInstance()
->LookupOperator("test.add", {f32_type, f32_type}, f64_type)
.value();
EXPECT_EQ(op_double->signature(),
QExprOperatorSignature::Get({f64_type, f64_type}, f64_type));
EXPECT_THAT(
OperatorRegistry::GetInstance()->LookupOperator(
"test.add", {f32_type, f32_type}, GetQType<int32_t>()),
StatusIs(
absl::StatusCode::kNotFound,
HasSubstr(
"QExpr operator test.add(FLOAT32,FLOAT32)->INT32 not found")));
}
TEST(OperatorsTest, Bind) {
QTypePtr float_type = GetQType<float>();
auto op =
OperatorRegistry::GetInstance()
->LookupOperator("test.add", {float_type, float_type}, float_type)
.value();
EXPECT_EQ(op->signature(),
QExprOperatorSignature::Get({float_type, float_type}, float_type));
FrameLayout::Builder layout_builder;
auto arg1_slot = layout_builder.AddSlot<float>();
auto arg2_slot = layout_builder.AddSlot<float>();
auto double_slot = layout_builder.AddSlot<double>();
auto result_slot = layout_builder.AddSlot<float>();
auto bound_op = op->Bind(ToTypedSlots(arg1_slot, arg2_slot),
TypedSlot::FromSlot(result_slot))
.value();
EXPECT_THAT(
op->Bind(ToTypedSlots(arg1_slot), TypedSlot::FromSlot(result_slot)),
StatusIs(
absl::StatusCode::kFailedPrecondition,
"incorrect input types: expected (FLOAT32,FLOAT32), got (FLOAT32)"));
EXPECT_THAT(op->Bind(ToTypedSlots(arg1_slot, double_slot),
TypedSlot::FromSlot(result_slot)),
StatusIs(absl::StatusCode::kFailedPrecondition,
"incorrect input types: expected (FLOAT32,FLOAT32), got "
"(FLOAT32,FLOAT64)"));
EXPECT_THAT(
op->Bind(ToTypedSlots(arg1_slot, arg2_slot),
TypedSlot::FromSlot(double_slot)),
StatusIs(absl::StatusCode::kFailedPrecondition,
"incorrect output types: expected (FLOAT32), got (FLOAT64)"));
FrameLayout memory_layout = std::move(layout_builder).Build();
RootEvaluationContext root_ctx(&memory_layout);
EvaluationContext ctx(root_ctx);
root_ctx.Set(arg1_slot, 2.0f);
root_ctx.Set(arg2_slot, 3.0f);
bound_op->Run(&ctx, root_ctx.frame());
EXPECT_OK(ctx.status());
float result = root_ctx.Get(result_slot);
EXPECT_THAT(result, Eq(5.0f));
}
TEST(OperatorsTest, TestUserDefinedDataType) {
QTypePtr f64_type = GetQType<double>();
QTypePtr v3_type = GetQType<testing::Vector3<double>>();
auto op1 = OperatorRegistry::GetInstance()
->LookupOperator("test.vector3",
{f64_type, f64_type, f64_type}, v3_type)
.value();
EXPECT_EQ(op1->signature(), QExprOperatorSignature::Get(
{f64_type, f64_type, f64_type}, v3_type));
auto op2 = OperatorRegistry::GetInstance()
->LookupOperator("test.dot_prod", {v3_type, v3_type}, f64_type)
.value();
EXPECT_EQ(op2->signature(),
QExprOperatorSignature::Get({v3_type, v3_type}, f64_type));
FrameLayout::Builder layout_builder;
auto x_slot = layout_builder.AddSlot<double>();
auto y_slot = layout_builder.AddSlot<double>();
auto z_slot = layout_builder.AddSlot<double>();
auto v_slot = layout_builder.AddSlot<testing::Vector3<double>>();
auto v_typed_slot = TypedSlot::FromSlot(v_slot, v3_type);
auto result_slot = layout_builder.AddSlot<double>();
auto bound_op1 =
op1->Bind(ToTypedSlots(x_slot, y_slot, z_slot), v_typed_slot).value();
auto bound_op2 =
op2->Bind({v_typed_slot, v_typed_slot}, TypedSlot::FromSlot(result_slot))
.value();
FrameLayout memory_layout = std::move(layout_builder).Build();
RootEvaluationContext root_ctx(&memory_layout);
EvaluationContext ctx(root_ctx);
root_ctx.Set(x_slot, 3.0);
root_ctx.Set(y_slot, 4.0);
root_ctx.Set(z_slot, 5.0);
bound_op1->Run(&ctx, root_ctx.frame());
EXPECT_OK(ctx.status());
bound_op2->Run(&ctx, root_ctx.frame());
EXPECT_OK(ctx.status());
double result = root_ctx.Get(result_slot);
EXPECT_THAT(result, Eq(50.0));
}
TEST(OperatorsTest, OperatorNotFound) {
auto error = OperatorRegistry::GetInstance()->LookupOperator(
"test.halts", {}, GetQType<int64_t>());
EXPECT_THAT(error, StatusIs(absl::StatusCode::kNotFound,
ContainsRegex(
"QExpr operator test.halts not found; adding "
"\".*\" build dependency may help")));
}
TEST(OperatorsTest, OperatorOverloadNotFound) {
QTypePtr bool_type = GetQType<bool>();
QTypePtr float_type = GetQType<float>();
EXPECT_THAT(
OperatorRegistry::GetInstance()->LookupOperator(
"test.add", {bool_type, float_type}, float_type),
StatusIs(
absl::StatusCode::kNotFound,
ContainsRegex("QExpr operator test.add\\(BOOLEAN,FLOAT32\\)->FLOAT32 "
"not found; adding \".*\" build dependency may help")));
}
TEST(OperatorsTest, InvokeOperator) {
ASSERT_OK_AND_ASSIGN(
auto mul_op, OperatorRegistry::GetInstance()->LookupOperator(
"test.mul", {GetQType<int64_t>(), GetQType<int64_t>()},
GetQType<int64_t>()));
EXPECT_THAT(
InvokeOperator(*mul_op, {TypedValue::FromValue(int64_t{3}),
TypedValue::FromValue(int64_t{19})}),
IsOkAndHolds(Property(&TypedValue::As<int64_t>, IsOkAndHolds(Eq(57)))));
EXPECT_THAT(InvokeOperator(*mul_op, {TypedValue::FromValue(3.0),
TypedValue::FromValue(int64_t{19})}),
StatusIs(absl::StatusCode::kFailedPrecondition,
"incorrect input types: expected (INT64,INT64), got "
"(FLOAT64,INT64)"));
EXPECT_THAT(InvokeOperator<int64_t>(*mul_op, int64_t{3}, int64_t{19}),
IsOkAndHolds(Eq(57)));
EXPECT_THAT(InvokeOperator<int32_t>(*mul_op, int64_t{3}, int64_t{19}),
StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("type mismatch")));
}
TEST(OperatorsTest, InvokeOperatorWithLookup) {
EXPECT_THAT(
InvokeOperator("test.mul",
{TypedValue::FromValue(int64_t{3}),
TypedValue::FromValue(int64_t{19})},
GetQType<int64_t>()),
IsOkAndHolds(Property(&TypedValue::As<int64_t>, IsOkAndHolds(Eq(57)))));
EXPECT_THAT(
InvokeOperator(
"test.mul",
{TypedValue::FromValue(3.0), TypedValue::FromValue(int64_t{19})},
GetQType<int64_t>()),
StatusIs(absl::StatusCode::kNotFound,
HasSubstr(
"QExpr operator test.mul(FLOAT64,INT64)->INT64 not found")));
EXPECT_THAT(InvokeOperator<int64_t>("test.mul", int64_t{3}, int64_t{19}),
IsOkAndHolds(Eq(57)));
}
TEST(OperatorsTest, QExprOperatorSignatureTypeAndName) {
auto i32 = GetQType<int32_t>();
auto f64 = GetQType<double>();
auto fn = QExprOperatorSignature::Get({i32}, f64);
EXPECT_EQ(absl::StrCat(fn), "(INT32)->FLOAT64");
}
TEST(OperatorsTest, GetQExprOperatorSignature) {
auto i32 = GetQType<int32_t>();
auto f64 = GetQType<double>();
const QExprOperatorSignature* fn = QExprOperatorSignature::Get({i32}, f64);
EXPECT_THAT(fn->input_types(), ElementsAre(i32));
EXPECT_THAT(fn->output_type(), Eq(f64));
}
TEST(OperatorsTest, QExprOperatorSignatureInputsAreStored) {
auto i32 = GetQType<int32_t>();
std::vector<QTypePtr> types(100, i32);
auto fn_type = QExprOperatorSignature::Get(types, i32);
auto f64 = GetQType<double>();
std::fill(types.begin(), types.end(), f64);
std::vector<QTypePtr> types2(100, i32);
auto fn2_type = QExprOperatorSignature::Get(types2, i32);
ASSERT_EQ(fn_type, fn2_type);
}
TEST(OperatorsTest, QExprOperatorSignatureSingleton) {
auto i32 = GetQType<int32_t>();
auto f64 = GetQType<double>();
EXPECT_TRUE(QExprOperatorSignature::Get({f64}, i32) ==
QExprOperatorSignature::Get({f64}, i32));
auto get_complex_fn = [&]() {
return QExprOperatorSignature::Get({f64, i32, MakeTupleQType({f64, i32})},
MakeTupleQType({f64, i32, f64}));
};
EXPECT_TRUE(get_complex_fn() == get_complex_fn());
}
class DummyQExprOperator final : public QExprOperator {
public:
using QExprOperator::QExprOperator;
private:
absl::StatusOr<std::unique_ptr<BoundOperator>> DoBind(
absl::Span<const TypedSlot> input_slots,
TypedSlot output_slot) const final {
return absl::UnimplementedError("unimplemented");
}
};
TEST(OperatorsTest, RegisterOperatorWithHigherPriority) {
const std::string op_name = "test_register_operator_with_higher_priority.op";
const auto f32 = GetQType<float>();
const auto f64 = GetQType<double>();
auto op1 = std::make_shared<DummyQExprOperator>(
QExprOperatorSignature::Get({}, f32));
auto op2 = std::make_shared<DummyQExprOperator>(
QExprOperatorSignature::Get({}, f64));
auto& registry = *OperatorRegistry::GetInstance();
ASSERT_OK(registry.RegisterOperator(op_name, op1, 0));
ASSERT_THAT(registry.LookupOperator(op_name, {}, f32), IsOkAndHolds(op1));
ASSERT_THAT(registry.LookupOperator(op_name, {}, f64),
StatusIs(absl::StatusCode::kNotFound));
ASSERT_OK(registry.RegisterOperator(op_name, op2, 1));
ASSERT_THAT(registry.LookupOperator(op_name, {}, f32),
StatusIs(absl::StatusCode::kNotFound));
ASSERT_THAT(registry.LookupOperator(op_name, {}, f64), IsOkAndHolds(op2));
}
TEST(OperatorsTest, RegisterOperatorWithLowerPriority) {
const std::string op_name = "test_register_operator_with_lower_priority.op";
const auto f32 = GetQType<float>();
const auto f64 = GetQType<double>();
auto op1 = std::make_shared<DummyQExprOperator>(
QExprOperatorSignature::Get({}, f32));
auto op2 = std::make_shared<DummyQExprOperator>(
QExprOperatorSignature::Get({}, f64));
auto& registry = *OperatorRegistry::GetInstance();
ASSERT_OK(registry.RegisterOperator(op_name, op1, 1));
ASSERT_THAT(registry.LookupOperator(op_name, {}, f32), IsOkAndHolds(op1));
ASSERT_THAT(registry.LookupOperator(op_name, {}, f64),
StatusIs(absl::StatusCode::kNotFound));
ASSERT_OK(registry.RegisterOperator(op_name, op2, 0));
ASSERT_THAT(registry.LookupOperator(op_name, {}, f32), IsOkAndHolds(op1));
ASSERT_THAT(registry.LookupOperator(op_name, {}, f64),
StatusIs(absl::StatusCode::kNotFound));
}
TEST(OperatorsTest, RegisterOperatorAlreadyExists) {
const std::string op_name = "test_register_operator_already_exisits.op";
const auto f32 = GetQType<float>();
auto op = std::make_shared<DummyQExprOperator>(
QExprOperatorSignature::Get({}, f32));
auto& registry = *OperatorRegistry::GetInstance();
ASSERT_OK(registry.RegisterOperator(op_name, op, 1));
ASSERT_THAT(registry.RegisterOperator(op_name, op, 1),
StatusIs(absl::StatusCode::kAlreadyExists));
ASSERT_OK(registry.RegisterOperator(op_name, op, 0));
ASSERT_THAT(registry.RegisterOperator(op_name, op, 0),
StatusIs(absl::StatusCode::kAlreadyExists));
}
TEST(OperatorsTest, RegisterOperatorBadName) {
const std::string op_name = "123name";
const auto f32 = GetQType<float>();
auto op = std::make_shared<DummyQExprOperator>(
QExprOperatorSignature::Get({}, f32));
auto& registry = *OperatorRegistry::GetInstance();
EXPECT_THAT(registry.RegisterOperator(op_name, op, 0),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("incorrect operator name")));
}
TEST(OperatorsTest, RegisterOperatorPriorityOutOfRange) {
const std::string op_name = "test_register_operator_priority_out_or_range.op";
const auto f32 = GetQType<float>();
auto op = std::make_shared<DummyQExprOperator>(
QExprOperatorSignature::Get({}, f32));
auto& registry = *OperatorRegistry::GetInstance();
ASSERT_THAT(registry.RegisterOperator(op_name, op, 2),
StatusIs(absl::StatusCode::kInvalidArgument));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qexpr/operators.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qexpr/operators_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
b29a0a1c-13ca-4fde-bdbc-0fdc345e1ae1 | cpp | tensorflow/tensorflow | tensor_utils | tensorflow/lite/core/api/tensor_utils.cc | tensorflow/lite/kernels/internal/tensor_utils_test.cc | #include "tensorflow/lite/core/api/tensor_utils.h"
#include <string.h>
#include "tensorflow/lite/core/c/common.h"
namespace tflite {
TfLiteStatus ResetVariableTensor(TfLiteTensor* tensor) {
if (!tensor->is_variable) {
return kTfLiteOk;
}
int value = 0;
if (tensor->type == kTfLiteInt8) {
value = tensor->params.zero_point;
}
#if __ANDROID__ || defined(__x86_64__) || defined(__i386__) || \
defined(__i386) || defined(__x86__) || defined(__X86__) || \
defined(_X86_) || defined(_M_IX86) || defined(_M_X64)
memset(tensor->data.raw, value, tensor->bytes);
#else
char* raw_ptr = tensor->data.raw;
for (size_t i = 0; i < tensor->bytes; ++i) {
*raw_ptr = value;
raw_ptr++;
}
#endif
return kTfLiteOk;
}
} | #include "tensorflow/lite/kernels/internal/tensor_utils.h"
#include <math.h>
#include <algorithm>
#include <gmock/gmock.h>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/quantization_util.h"
#include "tensorflow/lite/kernels/test_util.h"
#ifdef DOTPROD_BENCHMARKS
#include "testing/base/public/benchmark.h"
#endif
namespace tflite {
namespace tensor_utils {
template <typename T>
void CompareRoundingResults(int flat_size, const T* expected_result,
const T* real_result, int max_element_tolerance = 1,
int max_total_tolerance = 5) {
int max_diff = 0;
int64_t total_diff = 0;
for (int i = 0; i < flat_size; i++) {
int diff = static_cast<int>(std::abs(expected_result[i] - real_result[i]));
total_diff += diff;
max_diff = std::max(max_diff, diff);
}
EXPECT_LE(max_diff, max_element_tolerance);
EXPECT_LE(total_diff, max_total_tolerance);
}
TEST(uKernels, FloorLog2Test) {
for (int i = 1; i < 257; ++i) {
EXPECT_EQ(::tflite::FloorLog2(i),
static_cast<int>(std::floor(std::log2(i))));
}
}
TEST(uKernels, VectorScalarMultiply) {
constexpr int kVectorSize = 29;
static int8_t input[kVectorSize];
for (int i = 0; i < 29; ++i) {
input[i] = static_cast<int8_t>(i - 14);
}
const float scale = 0.1f;
std::vector<float> output(kVectorSize, 0.0f);
VectorScalarMultiply(input, kVectorSize, scale, output.data());
EXPECT_THAT(output,
ElementsAreArray(ArrayFloatNear(
{-1.4, -1.3, -1.2, -1.1, -1.0, -0.9, -0.8, -0.7, -0.6, -0.5,
-0.4, -0.3, -0.2, -0.1, 0, 0.1, 0.2, 0.3, 0.4, 0.5,
0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4})));
}
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
TEST(uKernels, IsZeroFloatTest) {
{
const float four_zeros[4] = {0, 0, 0, 0};
EXPECT_TRUE(IsZeroVector(four_zeros, ARRAY_SIZE(four_zeros)));
}
{
const float four_nonzeros[4] = {1, 2, 3, 4};
EXPECT_FALSE(IsZeroVector(four_nonzeros, ARRAY_SIZE(four_nonzeros)));
}
{
const float eight_zeros[8] = {0, 0, 0, 0, 0, 0, 0, 0};
EXPECT_TRUE(IsZeroVector(eight_zeros, ARRAY_SIZE(eight_zeros)));
}
{
const float eight_nonzeros[8] = {1, 2, 3, 4, 5, 6, 7, 8};
EXPECT_FALSE(IsZeroVector(eight_nonzeros, ARRAY_SIZE(eight_nonzeros)));
}
{
const float multiple_four_mixed1[8] = {0, 0, 0, 0, 5, 6, 7, 8};
EXPECT_FALSE(
IsZeroVector(multiple_four_mixed1, ARRAY_SIZE(multiple_four_mixed1)));
}
{
const float multiple_four_mixed2[8] = {1, 2, 3, 4, 0, 0, 0, 0};
EXPECT_FALSE(
IsZeroVector(multiple_four_mixed2, ARRAY_SIZE(multiple_four_mixed2)));
}
{
const float three_zeros[3] = {0, 0, 0};
EXPECT_TRUE(IsZeroVector(three_zeros, ARRAY_SIZE(three_zeros)));
}
{
const float three_nonzeros[3] = {1, 2, 3};
EXPECT_FALSE(IsZeroVector(three_nonzeros, ARRAY_SIZE(three_nonzeros)));
}
{
const float three_mixed[3] = {1, 0, 3};
EXPECT_FALSE(IsZeroVector(three_mixed, ARRAY_SIZE(three_mixed)));
}
{
const float seven_zeros[7] = {0, 0, 0, 0, 0, 0, 0};
EXPECT_TRUE(IsZeroVector(seven_zeros, ARRAY_SIZE(seven_zeros)));
}
{
const float seven_nonzeros[7] = {1, 2, 3, 4, 5, 6, 7};
EXPECT_FALSE(IsZeroVector(seven_nonzeros, ARRAY_SIZE(seven_nonzeros)));
}
{
const float nonzeros_after_zeros[7] = {0, 0, 0, 0, 5, 6, 7};
EXPECT_FALSE(
IsZeroVector(nonzeros_after_zeros, ARRAY_SIZE(nonzeros_after_zeros)));
}
}
TEST(uKernels, IsZeroInt8Test) {
{
const int8_t sixteen_zeros[16] = {0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0};
EXPECT_TRUE(IsZeroVector(sixteen_zeros, ARRAY_SIZE(sixteen_zeros)));
}
{
const int8_t sixteen_nonzeros[16] = {1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16};
EXPECT_FALSE(IsZeroVector(sixteen_nonzeros, ARRAY_SIZE(sixteen_nonzeros)));
}
{
const int8_t thritytwo_zeros[32] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
EXPECT_TRUE(IsZeroVector(thritytwo_zeros, ARRAY_SIZE(thritytwo_zeros)));
}
{
const int8_t thritytwo_nonzeros[32] = {
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
EXPECT_FALSE(
IsZeroVector(thritytwo_nonzeros, ARRAY_SIZE(thritytwo_nonzeros)));
}
{
const int8_t thritytwo_mixed1[32] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
EXPECT_FALSE(IsZeroVector(thritytwo_mixed1, ARRAY_SIZE(thritytwo_mixed1)));
}
{
const int8_t thritytwo_mixed2[32] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6,
7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
EXPECT_FALSE(IsZeroVector(thritytwo_mixed2, ARRAY_SIZE(thritytwo_mixed2)));
}
{
const int8_t fifteen_zeros[15] = {0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0};
EXPECT_TRUE(IsZeroVector(fifteen_zeros, ARRAY_SIZE(fifteen_zeros)));
}
{
const int8_t fifteen_nonzeros[15] = {1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15};
EXPECT_FALSE(IsZeroVector(fifteen_nonzeros, ARRAY_SIZE(fifteen_nonzeros)));
}
{
const int8_t fifteen_mixed[15] = {1, 0, 3, 0, 5, 0, 7, 0,
9, 0, 11, 0, 13, 0, 15};
EXPECT_FALSE(IsZeroVector(fifteen_mixed, ARRAY_SIZE(fifteen_mixed)));
}
{
const int8_t seventeen_zeros[17] = {0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0};
EXPECT_TRUE(IsZeroVector(seventeen_zeros, ARRAY_SIZE(seventeen_zeros)));
}
{
const int8_t seventeen_nonzeros[17] = {1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17};
EXPECT_FALSE(
IsZeroVector(seventeen_nonzeros, ARRAY_SIZE(seventeen_nonzeros)));
}
{
const int8_t nonzeros_after_zeros[17] = {0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 17};
EXPECT_FALSE(
IsZeroVector(nonzeros_after_zeros, ARRAY_SIZE(nonzeros_after_zeros)));
}
}
#undef ARRAY_SIZE
TEST(uKernels, SymmetricQuantizeFloatsTest) {
constexpr int kVectorSize = 9;
static float input[kVectorSize] = {-640, -635.0, -630, 10.0, 2.0,
-5.0, -10.0, 0.0, 1000.0};
int8_t output[kVectorSize];
float min, max, scaling_factor;
SymmetricQuantizeFloats(input, kVectorSize, output, &min, &max,
&scaling_factor);
EXPECT_EQ(min, -640);
EXPECT_EQ(max, 1000);
EXPECT_NEAR(scaling_factor, 1000 / 127.0, 1e-6);
EXPECT_THAT(output,
testing::ElementsAreArray({-81, -81, -80, 1, 0, -1, -1, 0, 127}));
}
TEST(uKernels, SymmetricQuantizeFloatsAllZerosTest) {
constexpr int kVectorSize = 9;
static float input[kVectorSize] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
int8_t output[kVectorSize];
float min, max, scaling_factor;
SymmetricQuantizeFloats(input, kVectorSize, output, &min, &max,
&scaling_factor);
EXPECT_EQ(min, 0);
EXPECT_EQ(max, 0);
EXPECT_EQ(scaling_factor, 1);
EXPECT_THAT(output, testing::ElementsAreArray({0, 0, 0, 0, 0, 0, 0, 0, 0}));
}
TEST(uKernels, SymmetricQuantizeFloatsAllAlmostZeroTest) {
constexpr int kVectorSize = 9;
static float input[kVectorSize] = {-1e-5, 3e-5, -7e-6, -9e-5, 1e-6,
4e-5, 9e-6, 2e-4, 0};
int8_t output[kVectorSize];
float min, max, scaling_factor;
SymmetricQuantizeFloats(input, kVectorSize, output, &min, &max,
&scaling_factor);
EXPECT_NEAR(min, -9e-05, 1e-6);
EXPECT_NEAR(max, 0.0002, 1e-6);
EXPECT_NEAR(scaling_factor, 1.57e-6, 1e-6);
EXPECT_THAT(output,
testing::ElementsAreArray({-6, 19, -4, -57, 1, 25, 6, 127, 0}));
}
TEST(uKernels, AsymmetricQuantizeFloatsTest) {
constexpr int kVectorSize = 9;
static float input[kVectorSize] = {-640, -635.0, -630, 10.0, 2.0,
-5.0, -10.0, 0.0, 1000.0};
int8_t output[kVectorSize];
double min = -640.0;
double max = 1000.0;
QuantizationParams quantization_params =
ChooseQuantizationParams<int8_t>(min, max);
float scale = quantization_params.scale;
int32_t offset = quantization_params.zero_point;
float test_scale;
int32_t test_offset;
AsymmetricQuantizeFloats(input, kVectorSize, output, &test_scale,
&test_offset);
EXPECT_NEAR(test_scale, scale, 1e-6);
EXPECT_EQ(test_offset, offset);
EXPECT_THAT(output, testing::ElementsAreArray(
{-128, -127, -126, -26, -28, -29, -30, -28, 127}));
}
TEST(uKernels, AsymmetricQuantizeFloatsAllZerosTest) {
constexpr int kVectorSize = 9;
static float input[kVectorSize] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
int8_t output[kVectorSize];
float test_scale;
int32_t test_offset;
AsymmetricQuantizeFloats(input, kVectorSize, output, &test_scale,
&test_offset);
EXPECT_EQ(test_scale, 1);
EXPECT_EQ(test_offset, 0);
EXPECT_THAT(output, testing::ElementsAreArray({0, 0, 0, 0, 0, 0, 0, 0, 0}));
}
TEST(uKernels, AsymmetricQuantizeFloatsZeroRangeTest) {
constexpr int kVectorSize = 9;
static float input[kVectorSize] = {2000, 2000, 2000, 2000, 2000,
2000, 2000, 2000, 2000};
int8_t output[kVectorSize];
double min = 0;
double max = 2000;
QuantizationParams quantization_params =
ChooseQuantizationParams<int8_t>(min, max);
int32_t offset = quantization_params.zero_point;
float scale = quantization_params.scale;
float test_scale;
int32_t test_offset;
AsymmetricQuantizeFloats(input, kVectorSize, output, &test_scale,
&test_offset);
EXPECT_NEAR(test_scale, scale, 1e-6);
EXPECT_EQ(test_offset, offset);
EXPECT_THAT(output, testing::ElementsAreArray(
{127, 127, 127, 127, 127, 127, 127, 127, 127}));
}
TEST(uKernels, AsymmetricQuantizeFloatsAllAlmostZeroTest) {
constexpr int kVectorSize = 9;
static float input[kVectorSize] = {-1e-5, 3e-5, -7e-6, -9e-5, 1e-6,
4e-5, 9e-6, 2e-4, 0};
int8_t output[kVectorSize];
double min = -9e-05;
double max = 0.0002;
QuantizationParams quantization_params =
ChooseQuantizationParams<int8_t>(min, max);
int32_t offset = quantization_params.zero_point;
float scale = quantization_params.scale;
float test_scale;
int32_t test_offset;
AsymmetricQuantizeFloats(input, kVectorSize, output, &test_scale,
&test_offset);
EXPECT_NEAR(test_scale, scale, 1e-6);
EXPECT_EQ(test_offset, offset);
EXPECT_THAT(output, testing::ElementsAreArray(
{-58, -23, -55, -128, -48, -14, -41, 127, -49}));
}
TEST(uKernels, MatrixBatchVectorMultiplyAccumulateTest) {
constexpr int kRow = 3;
constexpr int kCol = 4;
constexpr int kBatch = 2;
static float matrix[kRow * kCol] = {1.0, 2.0, 3.0, 4.0,
-1.0, -2.0, -3.0, -4.0,
1.0, -2.0, 3.0, -4.0};
static float vector[kCol * kBatch] = {1.0, -1.0, 1.0, -1.0,
2.0, -2.0, 2.0, -2.0};
std::vector<float> output(kRow * kBatch);
std::fill(output.begin(), output.end(), 3.0);
MatrixBatchVectorMultiplyAccumulate(matrix, kRow, kCol, vector, kBatch,
output.data());
EXPECT_THAT(output, ElementsAreArray(ArrayFloatNear({1., 5., 13.,
-1., 7., 23.})));
}
TEST(uKernels, QuantMatrixBatchVectorMultiplyAccumulate8x8_16Test) {
CpuBackendContext context;
const std::vector<int8_t> input = {
4, -41, 5, -41, 22, 17, -30, 24, 13, -47, 18, 9, -11, -30, 16,
-47, 12, 36, -20, 27, -3, 0, -51, -31, 3, -8, -38, 43, 23, 12,
11, -23, -26, 23, 14, -9, -44, 22, 21, -30, 3, -47, -26, -21, -24,
-44, 34, -11, -23, -28, 26, -38, 19, 35, 9, 23, 6, -42, -25, 28,
};
const std::vector<int32_t> input_zeropoint_times_weights = {
-620, -170, -395, 715, -1220, -1080, 1130, -260, -470,
};
const std::vector<int8_t> input_to_gate_weights = {
-10, -4, -8, 16, 4, -16, -1, 11, 1, 2, -25, 19, 7, 9, 2,
-24, -2, 10, -7, 7, -5, -2, 3, 4, 3, -4, -7, -11, -13, -18,
11, 10, 12, -9, 17, -15, -5, 20, -6, -11, 2, -6, -18, 15, 4,
4, -9, -2, -3, -9, -13, 17, -21, 5, 3, -12, 0, -4, 9, -5,
10, -2, 8, 1, -10, -6, 1, -9, 10, 11, -1, -5, 4, -7, -4,
-4, 4, 12, -7, -5, -9, -19, 6, -4, 12, -17, -22, 0, 9, -4,
-5, 5, -8, 8, 3, 15, -18, -18, 5, 3, -12, 5, -10, 7, 7,
-9, 17, 2, -11, -25, 3, 19, -6, 7, 1, 7, 5, -3, 11, 3,
0, -8, 8, -2, -2, -12, 14, -5, 7, 8, 16, 20, -16, -5, -5,
1, -10, -6, 14, 10, -12, 10, -6, 5, 0, 3, 8, -9, -13, -2,
4, 4, -16, -17, -9, 16, -5, 14, -9, -5, -12, 0, 17, 6, -1,
16, -20, 1, -11, -1, -10, -21, 13, 4, -12, -7, 0, -14, -6, 3,
-4, 6, -18, -3, -1, 14, -8, -6, -15, 5, 12, -3, -10, 4, 6,
-5, -20, 0, 3, -3, -7, 1, 2, -10, 7, -3, 6, 1, -12, 6,
4, -12, 2, 6, -20, 0, 5, 23, 15, 14, 9, 8, 20, -2, 9,
-8, -8, -7, -4, -8, -9, 7, -12, -2, 2, 1, -14, 31, 4, -14,
3, 10, -18, -17, -1, 18, 1, 12, 0, 7, -3, -5, 8, -9, 18,
17, 7, -15, 3, 20, 4, -8, 16, 6, -3, -3, 9, -4, -6, 4,
};
const int32_t multiplier = 2080364544;
const int32_t shift = -2;
std::vector<int32_t> scratch(2 * 9, 0);
std::vector<int16_t> output = {10, 2, 33, 4, 5, 6, 65, 4, 3,
52, 1, 2, 8, -1, -2, 11, 17, -18};
MatrixBatchVectorMultiplyAccumulate(
input.data(), input_zeropoint_times_weights.data(),
input_to_gate_weights.data(), multiplier, shift,
2, 30, 9, 0,
scratch.data(), output.data(), &context);
const std::vector<int16_t> expected_output = {
-210, 331, 153, 139, -570, -657, 258, 515, -495,
91, -243, -73, 603, -744, -269, 169, -748, -174,
};
EXPECT_THAT(output, testing::ElementsAreArray(expected_output));
}
TEST(uKernels, HybridMatrixBatchVectorMultiplyAccumulate8x8_16Test) {
CpuBackendContext context;
const std::vector<int8_t> input = {
4, -41, 5, -41, 22, 17, -30, 24, 13, -47, 18, 9, -11, -30, 16,
1, -47, 12, 36, -20, 27, -3, 0, -51, -31, 3, -8, -38, 43, 23,
12, 1, 11, -23, -26, 23, 14, -9, -44, 22, 21, -30, 3, -47, -26,
-21, -24, 1, -44, 34, -11, -23, -28, 26, -38, 19, 35, 9, 23, 6,
-42, -25, 28, 1, 4, -41, 5, -41, 22, 17, -30, 24, 13, -47, 18,
9, -11, -30, 16, 1, -47, 12, 36, -20, 27, -3, 0, -51, -31, 3,
-8, -38, 43, 23, 12, 1, 11, -23, -26, 23, 14, -9, -44, 22, 21,
-30, 3, -47, -26, -21, -24, 1, -44, 34, -11, -23, -28, 26, -38, 19,
35, 9, 23, 6, -42, -25, 28, 1,
};
const std::vector<int32_t> input_offsets = {1, 1, 1, 1};
const std::vector<float> scaling_factors = {
1.0,
1.0,
1.0,
1.0,
};
const std::vector<int8_t> input_to_gate_weights = {
-10, -4, -8, 16, 4, -16, -1, 11, 1, 2, -25, 19, 7, 9, 2,
1, -24, -2, 10, -7, 7, -5, -2, 3, 4, 3, -4, -7, -11, -13,
-18, 2, 11, 10, 12, -9, 17, -15, -5, 20, -6, -11, 2, -6, -18,
15, 4, 3, 4, -9, -2, -3, -9, -13, 17, -21, 5, 3, -12, 0,
-4, 9, -5, 4, 10, -2, 8, 1, -10, -6, 1, -9, 10, 11, -1,
-5, 4, -7, -4, 5, -4, 4, 12, -7, -5, -9, -19, 6, -4, 12,
-17, -22, 0, 9, -4, 6, -5, 5, -8, 8, 3, 15, -18, -18, 5,
3, -12, 5, -10, 7, 7, 7, -9, 17, 2, -11, -25, 3, 19, -6,
7, 1, 7, 5, -3, 11, 3, 8, 0, -8, 8, -2, -2, -12, 14,
-5, 7, 8, 16, 20, -16, -5, -5, 9, 1, -10, -6, 14, 10, -12,
10, -6, 5, 0, 3, 8, -9, -13, -2, 10, 4, 4, -16, -17, -9,
16, -5, 14, -9, -5, -12, 0, 17, 6, -1, 11, 16, -20, 1, -11,
-1, -10, -21, 13, 4, -12, -7, 0, -14, -6, 3, 12, -4, 6, -18,
-3, -1, 14, -8, -6, -15, 5, 12, -3, -10, 4, 6, 13, -5, -20,
0, 3, -3, -7, 1, 2, -10, 7, -3, 6, 1, -12, 6, 14, -5,
-20, 0, 3, -3, -7, 1, 2, -10, 7, -3, 6, 1, -12, 6, 15,
-5, -20, 0, 3, -3, -7, 1, 2, -10, 7, -3, 6, 1, -12, 6,
16,
};
std::vector<int32_t> scratch(5 * 8, 0);
std::vector<float> output(4 * 8, 0);
int32_t* row_sums = scratch.data() + 8 * 4;
bool compute_row_sums = true;
MatrixBatchVectorMultiplyAccumulate(
input_to_gate_weights.data(), 8, 32, input.data(),
scaling_factors.data(), 4, output.data(), nullptr,
input_offsets.data(), scratch.data(), row_sums, &compute_row_sums,
&context);
const std::vector<float> expected_output = {
-228, 1548, 937, -166, -1164, -1578, -278, 303, 839, -820, 132,
1733, -1858, 58, -425, -587, -228, 1548, 937, -166, -1164, -1578,
-278, 303, 839, -820, 132, 1733, -1858, 58, -425, -587,
};
EXPECT_THAT(output, testing::ElementsAreArray(expected_output));
EXPECT_THAT(compute_row_sums, false);
std::vector<float> output2(4 * 8, 0);
MatrixBatchVectorMultiplyAccumulate(
input_to_gate_weights.data(), 8, 32, input.data(),
scaling_factors.data(), 4, output2.data(), nullptr,
input_offsets.data(), scratch.data(), row_sums, &compute_row_sums,
&context);
EXPECT_THAT(output2, testing::ElementsAreArray(expected_output));
constexpr int kBatchMultiplier = 8;
std::vector<int8_t> input_big_batch(input.size() * kBatchMultiplier);
std::vector<float> scaling_factors_big_batch(scaling_factors.size() *
kBatchMultiplier);
std::vector<int32_t> scratch_big_batch(scratch.size() * kBatchMultiplier);
std::vector<int32_t> input_offsets_big_batch(input_offsets.size() *
kBatchMultiplier);
for (int i = 0; i < kBatchMultiplier; i++) {
std::copy(input.begin(), input.end(),
input_big_batch.begin() + i * input.size());
std::copy(scaling_factors.begin(), scaling_factors.end(),
scaling_factors_big_batch.begin() + i * scaling_factors.size());
std::copy(input_offsets.begin(), input_offsets.end(),
input_offsets_big_batch.begin() + i * input_offsets.size());
}
std::vector<float> output_big_batch(output.size() * kBatchMultiplier, 0);
MatrixBatchVectorMultiplyAccumulate(
input_to_gate_weights.data(), 8, 32,
input_big_batch.data(), scaling_factors_big_batch.data(),
4 * kBatchMultiplier, output_big_batch.data(), nullptr,
input_offsets_big_batch.data(), scratch_big_batch.data(), row_sums,
&compute_row_sums, &context);
for (int i = 0; i < kBatchMultiplier; i++) {
std::vector<float> output_per_batch(
output_big_batch.begin() + i * output.size(),
output_big_batch.begin() + (i + 1) * output.size());
EXPECT_THAT(output_per_batch, testing::ElementsAreArray(expected_output));
}
}
TEST(uKernels, QuantMatrixBatchVectorMultiplyAccumulate8x8_8Test) {
CpuBackendContext context;
const std::vector<int8_t> input = {
4, -41, 5, -41, 22, 17, -30, 24, 13, -47, 18, 9, -11, -30, 16,
-47, 12, 36, -20, 27, -3, 0, -51, -31, 3, -8, -38, 43, 23, 12,
11, -23, -26, 23, 14, -9, -44, 22, 21, -30, 3, -47, -26, -21, -24,
-44, 34, -11, -23, -28, 26, -38, 19, 35, 9, 23, 6, -42, -25, 28,
};
const std::vector<int32_t> input_zeropoint_times_weights = {
0, 0, 0, 0, 0, 0, 0, 0, 0,
};
const std::vector<int8_t> input_to_gate_weights = {
13, -7, -20, -22, 8, -46, 9, -2, -18, -42, 40, 28, -7, 24, 34,
-7, -24, -24, 19, 14, -19, -6, -2, -3, 5, -36, -13, 6, -27, 36,
-23, 0, 20, -37, -23, 9, 17, -41, 33, -15, -18, -42, -41, -34, -16,
-6, 12, -14, -15, -20, -14, 21, -3, -1, -26, 54, 51, 35, -14, 9,
-2, 13, -6, 39, 34, -21, 39, -51, 19, -44, 52, 0, -2, -38, -35,
-33, 4, -22, -37, 27, -23, 3, -10, 5, 32, 6, 1, -35, 24, -19,
46, 43, -55, 5, 38, -14, 32, -43, -44, -17, -13, -28, 56, 28, -42,
4, 10, -7, 25, -15, -9, -25, -14, -15, 6, -10, -22, 40, -72, 18,
-6, -18, -2, 37, -13, -10, 11, -9, 32, -28, 19, -2, 4, -31, 50,
-15, 23, -34, -9, 41, -6, -34, 17, 2, 24, -15, 21, -17, -8, -20,
1, -63, 19, -40, 12, -5, 5, -6, 1, 19, -9, -23, 5, -34, 11,
26, 21, 54, 34, -43, -29, 1, 16, 31, -56, -28, 57, -15, -23, 37,
-17, -3, -6, 29, 18, 77, 17, -20, -14, -19, 8, -24, -7, -45, -3,
0, -25, -8, 6, 9, 3, -15, 51, 4, -15, -19, -16, -14, -47, -52,
25, 9, 58, 26, -9, -27, 49, -6, -21, 21, 18, 12, -9, -9, 14,
31, -26, -19, -50, 17, 35, 11, -10, 22, -16, -43, -2, 26, 55, -20,
-7, 21, 33, -20, 26, -15, -22, 30, 27, 3, -34, 26, 12, -1, 19,
26, -25, 10, 30, 30, -14, -23, -23, -35, -16, 26, -41, 11, 1, 21,
};
const int32_t multiplier = 1347771520;
const int32_t shift = -7;
const int32_t output_zp = -11;
std::vector<int8_t> output = {1, 2, 3, 4, 5, 6, 5, 4, 3,
2, 1, 2, 8, -1, -2, 11, 17, 18};
std::vector<int32_t> scratch(2 * 9, 0);
MatrixBatchVectorMultiplyAccumulate(
input.data(), input_zeropoint_times_weights.data(),
input_to_gate_weights.data(), multiplier, shift,
2, 30, 9, output_zp, scratch.data(),
output.data(), &context);
const std::vector<int8_t> expected_output = {
5, -9, -2, -30, -5, -11, -22, -18, 18,
-19, 2, 11, -5, 9, -2, 10, -38, -22,
};
EXPECT_THAT(output, testing::ElementsAreArray(expected_output));
}
TEST(uKernels, QuantMatrixBatchVectorMultiply8x8_8WithZPTest) {
const int32_t input_zp = 3;
const std::vector<int8_t> input = {
4, -41, 5, -41, 22, 17, -30, 24, 13, -47, 18, 9, -11, -30, 16,
-47, 12, 36, -20, 27, -3, 0, -51, -31, 3, -8, -38, 43, 23, 12,
11, -23, -26, 23, 14, -9, -44, 22, 21, -30, 3, -47, -26, -21, -24,
-44, 34, -11, -23, -28, 26, -38, 19, 35, 9, 23, 6, -42, -25, 28,
};
const std::vector<int8_t> input_to_gate_weights = {
13, -7, -20, -22, 8, -46, 9, -2, -18, -42, 40, 28, -7, 24, 34,
-7, -24, -24, 19, 14, -19, -6, -2, -3, 5, -36, -13, 6, -27, 36,
-23, 0, 20, -37, -23, 9, 17, -41, 33, -15, -18, -42, -41, -34, -16,
-6, 12, -14, -15, -20, -14, 21, -3, -1, -26, 54, 51, 35, -14, 9,
-2, 13, -6, 39, 34, -21, 39, -51, 19, -44, 52, 0, -2, -38, -35,
-33, 4, -22, -37, 27, -23, 3, -10, 5, 32, 6, 1, -35, 24, -19,
46, 43, -55, 5, 38, -14, 32, -43, -44, -17, -13, -28, 56, 28, -42,
4, 10, -7, 25, -15, -9, -25, -14, -15, 6, -10, -22, 40, -72, 18,
-6, -18, -2, 37, -13, -10, 11, -9, 32, -28, 19, -2, 4, -31, 50,
-15, 23, -34, -9, 41, -6, -34, 17, 2, 24, -15, 21, -17, -8, -20,
1, -63, 19, -40, 12, -5, 5, -6, 1, 19, -9, -23, 5, -34, 11,
26, 21, 54, 34, -43, -29, 1, 16, 31, -56, -28, 57, -15, -23, 37,
-17, -3, -6, 29, 18, 77, 17, -20, -14, -19, 8, -24, -7, -45, -3,
0, -25, -8, 6, 9, 3, -15, 51, 4, -15, -19, -16, -14, -47, -52,
25, 9, 58, 26, -9, -27, 49, -6, -21, 21, 18, 12, -9, -9, 14,
31, -26, -19, -50, 17, 35, 11, -10, 22, -16, -43, -2, 26, 55, -20,
-7, 21, 33, -20, 26, -15, -22, 30, 27, 3, -34, 26, 12, -1, 19,
26, -25, 10, 30, 30, -14, -23, -23, -35, -16, 26, -41, 11, 1, 21,
};
const int32_t multiplier = 1347771520;
const int32_t shift = -7;
const int32_t output_zp = -11;
std::vector<int8_t> output = {1, 2, 3, 4, 5, 6, 5, 4, 3,
2, 1, 2, 8, -1, -2, 11, 17, 18};
MatrixBatchVectorMultiply(
input.data(), input_zp, input_to_gate_weights.data(), multiplier, shift,
2, 30, 9, output.data(), output_zp);
const std::vector<int8_t> expected_output = {6, -9, -4, -32, -10, -17,
-25, -25, 14, -19, 3, 10,
-12, 10, 0, 1, -57, -41};
EXPECT_THAT(output, testing::ElementsAreArray(expected_output));
}
TEST(uKernels, QuantMatrixBatchVectorMultiply16x8_8WithZPTest) {
const std::vector<int16_t> input = {
400, -41, 5, -41, 22, 17, -30, 24, 130, -47, 18, 9, -11, -30, 16,
-47, 12, 36, -20, 27, -3, 0, -51, -31, 3, -8, -38, 43, 23, 12,
11, -23, -26, 23, 14, -9, -44, 22, 21, -30, 3, -47, -26, -21, -24,
-44, 34, -11, -23, -28, 26, -38, 19, 35, 9, 23, 6, -42, -25, 28,
};
const std::vector<int8_t> input_to_gate_weights = {
13, -7, -20, -22, 8, -46, 9, -2, -18, -42, 40, 28, -7, 24, 34,
-7, -24, -24, 19, 14, -19, -6, -2, -3, 5, -36, -13, 6, -27, 36,
-23, 0, 20, -37, -23, 9, 17, -41, 33, -15, -18, -42, -41, -34, -16,
-6, 12, -14, -15, -20, -14, 21, -3, -1, -26, 54, 51, 35, -14, 9,
-2, 13, -6, 39, 34, -21, 39, -51, 19, -44, 52, 0, -2, -38, -35,
-33, 4, -22, -37, 27, -23, 3, -10, 5, 32, 6, 1, -35, 24, -19,
46, 43, -55, 5, 38, -14, 32, -43, -44, -17, -13, -28, 56, 28, -42,
4, 10, -7, 25, -15, -9, -25, -14, -15, 6, -10, -22, 40, -72, 18,
-6, -18, -2, 37, -13, -10, 11, -9, 32, -28, 19, -2, 4, -31, 50,
-15, 23, -34, -9, 41, -6, -34, 17, 2, 24, -15, 21, -17, -8, -20,
1, -63, 19, -40, 12, -5, 5, -6, 1, 19, -9, -23, 5, -34, 11,
26, 21, 54, 34, -43, -29, 1, 16, 31, -56, -28, 57, -15, -23, 37,
-17, -3, -6, 29, 18, 77, 17, -20, -14, -19, 8, -24, -7, -45, -3,
0, -25, -8, 6, 9, 3, -15, 51, 4, -15, -19, -16, -14, -47, -52,
25, 9, 58, 26, -9, -27, 49, -6, -21, 21, 18, 12, -9, -9, 14,
31, -26, -19, -50, 17, 35, 11, -10, 22, -16, -43, -2, 26, 55, -20,
-7, 21, 33, -20, 26, -15, -22, 30, 27, 3, -34, 26, 12, -1, 19,
26, -25, 10, 30, 30, -14, -23, -23, -35, -16, 26, -41, 11, 1, 21,
};
const std::vector<int32_t> input_zeropoint_times_weights = {
0, 2, 3, 4, 5, 4, 3, 2, 10,
};
const int32_t multiplier = 1347771520;
const int32_t shift = -8;
const int32_t output_zp = -11;
std::vector<int8_t> output = {1, 2, 3, 4, 5, 6, 5, 4, 3,
2, 1, 2, 8, -1, -2, 11, 17, 18};
MatrixBatchVectorMultiply(
input.data(), input_to_gate_weights.data(), multiplier, shift,
input_zeropoint_times_weights.data(),
2, 30, 9, output_zp, output.data());
const std::vector<int8_t> expected_output = {4, -24, -5, 10, -7, -13,
-39, 2, 3, -16, -5, -1,
-12, -1, -6, -6, -33, -25};
EXPECT_THAT(output, testing::ElementsAreArray(expected_output));
}
TEST(uKernels, MatrixScalarMultiplyAccumulateTest) {
std::vector<int32_t> output = {
-620, -170, -395, 715, -1220, -1080, 1130, -260, -470,
};
const std::vector<int8_t> weight = {
-10, -4, -8, 16, 4, -16, -1, 11, 1, 2, -25, 19, 7, 9, 2,
-24, -2, 10, -7, 7, -5, -2, 3, 4, 3, -4, -7, -11, -13, -18,
11, 10, 12, -9, 17, -15, -5, 20, -6, -11, 2, -6, -18, 15, 4,
4, -9, -2, -3, -9, -13, 17, -21, 5, 3, -12, 0, -4, 9, -5,
10, -2, 8, 1, -10, -6, 1, -9, 10, 11, -1, -5, 4, -7, -4,
-4, 4, 12, -7, -5, -9, -19, 6, -4, 12, -17, -22, 0, 9, -4,
-5, 5, -8, 8, 3, 15, -18, -18, 5, 3, -12, 5, -10, 7, 7,
-9, 17, 2, -11, -25, 3, 19, -6, 7, 1, 7, 5, -3, 11, 3,
0, -8, 8, -2, -2, -12, 14, -5, 7, 8, 16, 20, -16, -5, -5,
1, -10, -6, 14, 10, -12, 10, -6, 5, 0, 3, 8, -9, -13, -2,
4, 4, -16, -17, -9, 16, -5, 14, -9, -5, -12, 0, 17, 6, -1,
16, -20, 1, -11, -1, -10, -21, 13, 4, -12, -7, 0, -14, -6, 3,
-4, 6, -18, -3, -1, 14, -8, -6, -15, 5, 12, -3, -10, 4, 6,
-5, -20, 0, 3, -3, -7, 1, 2, -10, 7, -3, 6, 1, -12, 6,
4, -12, 2, 6, -20, 0, 5, 23, 15, 14, 9, 8, 20, -2, 9,
-8, -8, -7, -4, -8, -9, 7, -12, -2, 2, 1, -14, 31, 4, -14,
3, 10, -18, -17, -1, 18, 1, 12, 0, 7, -3, -5, 8, -9, 18,
17, 7, -15, 3, 20, 4, -8, 16, 6, -3, -3, 9, -4, -6, 4,
};
MatrixScalarMultiplyAccumulate(weight.data(), 3, 9, 30, output.data());
const std::vector<int32_t> expected_output = {
-797, -227, -536, 739, -1187, -1314, 965, -140, -257,
};
EXPECT_THAT(output, testing::ElementsAreArray(expected_output));
}
TEST(uKernels, QuantApplyLayerNormTest) {
const std::vector<int16_t> input = {
-310, 596, 34, -68, 475, 92, 672, -54, -913, -200,
-1194, -836, -620, -237, 991, 533, 721, -736, -8, -941,
-372, -1084, 591, 2557, -779, 175, 582, 956, -287, 944,
};
const std::vector<int16_t> layer_norm_weights = {
21849, 22882, 20626, 23854, 24779, 26354, 12980, 26231,
23716, 27271, 24937, 22647, 24715, 22854, 19646,
};
const std::vector<int32_t> bias_weight = {
-14175520, -13805465, -16027609, -13786809, -13321033,
-14399810, -15055368, -14536623, -14508746, -13784007,
-15206609, -15125830, -14996304, -14847597, -12814379,
};
const int32_t multiplier = 1895840000;
const int32_t shift = -13;
const int32_t limit = 1;
std::vector<int16_t> output(2 * 15, 0);
ApplyLayerNorm(input.data(), layer_norm_weights.data(), bias_weight.data(),
multiplier, shift, limit, 2, 15, output.data());
const std::vector<int16_t> expected_output = {
-9407, 5846, -4802, -5295, 4822, -2390, 930, -5283,
-20352, -7846, -26539, -18704, -15829, -8627, 10313, -2522,
-132, -16058, -8206, -19158, -13296, -14407, -1235, 20612,
-18591, -6738, -2274, 2602, -11622, 1565,
};
EXPECT_THAT(output, testing::ElementsAreArray(expected_output));
}
TEST(uKernels, QuantApplyLayerNormFloatTest) {
const std::vector<int16_t> input = {
-310, 596, 34, -68, 475, 92, 672, -54, -913, -200,
-1194, -836, -620, -237, 991, 533, 721, -736, -8, -941,
-372, -1084, 591, 2557, -779, 175, 582, 956, -287, 944,
};
const std::vector<int16_t> layer_norm_weights = {
21849, 22882, 20626, 23854, 24779, 26354, 12980, 26231,
23716, 27271, 24937, 22647, 24715, 22854, 19646,
};
const std::vector<int32_t> bias_weight = {
-14175520, -13805465, -16027609, -13786809, -13321033,
-14399810, -15055368, -14536623, -14508746, -13784007,
-15206609, -15125830, -14996304, -14847597, -12814379,
};
const int32_t multiplier = 1895840000;
const int32_t shift = -13;
std::vector<int16_t> output(2 * 15, 0);
ApplyLayerNormFloat(input.data(), layer_norm_weights.data(), multiplier,
shift, bias_weight.data(), 2, 15, output.data());
const std::vector<int16_t> expected_output = {
-9408, 5844, -4803, -5297, 4826, -2392, 927, -5286,
-20353, -7851, -26534, -18701, -15830, -8623, 10312, -2524,
-136, -16053, -8206, -19160, -13299, -14407, -1233, 20617,
-18594, -6736, -2272, 2597, -11620, 1566};
EXPECT_THAT(output, testing::ElementsAreArray(expected_output));
}
TEST(uKernels, QuantTanh0Test) {
const std::vector<int16_t> input = {
-145, 899, -176, -35, 264, 289, 8, 27, -37, -1310,
-120, 127, -16, 106, 370, -583, -299, 93, -548, 548,
653, -29, -53, 1058, -52, -164, -149, -635, 201, -1297,
-145, 899, -176, -35, 264, 289, 8, 27, -37, -1310,
-120, 127, -16, 106, 370, -583, -299, 93, -548, 548,
653, -29, -53, 1058, -52, -164, -149, -635, 201, -1297,
};
std::vector<int16_t> output(4 * 15, 0);
ApplyTanh(0, input.data(), 4, 15, output.data());
const std::vector<int16_t> expected_output = {
-136, 904, -176, -40, 260, 292, 8, 28, -44, -1304,
-120, 120, -24, 112, 376, -576, -308, 88, -544, 544,
652, -32, -60, 1056, -56, -156, -144, -636, 192, -1300,
-136, 904, -176, -40, 260, 292, 8, 28, -44, -1304,
-120, 120, -24, 112, 376, -576, -308, 88, -544, 544,
652, -32, -60, 1056, -56, -156, -144, -636, 192, -1300,
};
EXPECT_THAT(output, testing::ElementsAreArray(expected_output));
}
TEST(uKernels, QuantTanh3Test) {
const std::vector<int16_t> input = {
-145, 899, -176, -35, 264, 289, 8, 27, -37, -1310,
-120, 127, -16, 106, 370, -583, -299, 93, -548, 548,
653, -29, -53, 1058, -52, -164, -149, -635, 201, -1297,
-145, 899, -176, -35, 264, 289, 8, 27, -37, -1310,
-120, 127, -16, 106, 370, -583, -299, 93, -548, 548,
653, -29, -53, 1058, -52, -164, -149, -635, 201, -1297,
};
std::vector<int16_t> output(4 * 15, 0);
ApplyTanh(3, input.data(), 4, 15, output.data());
const std::vector<int16_t> expected_output = {
-1156, 7076, -1412, -276, 2104, 2308, 64, 220, -288, -10132,
-964, 1016, -120, 844, 2944, -4640, -2392, 736, -4352, 4352,
5180, -232, -428, 8276, -412, -1308, -1196, -5044, 1612, -10044,
-1156, 7076, -1412, -276, 2104, 2308, 64, 220, -288, -10132,
-964, 1016, -120, 844, 2944, -4640, -2392, 736, -4352, 4352,
5180, -232, -428, 8276, -412, -1308, -1196, -5044, 1612, -10044,
};
EXPECT_THAT(output, testing::ElementsAreArray(expected_output));
}
TEST(uKernels, QuantTanhFloatTest) {
const std::vector<int16_t> input = {
-1, 0, 1, -35, 264, 289, 8, 27, -37, -1310,
-120, 127, -16, 106, 370, -583, -299, 93, -548, 548,
653, -29, -53, 1058, -52, -164, -149, -635, 201, -1297,
-145, 899, -176, -35, 264, 289, 8, 27, -37, -1310,
-120, 127, -16, 106, 370, -583, -299, 93, -548, 548,
653, -29, -53, 1058, -52, -164, -149, -635, 201, -1297,
};
std::vector<int16_t> output(4 * 15, 0);
ApplyTanhFloat(input.data(), 4, 15, -12, output.data());
const std::vector<int16_t> expected_output = {
-8, 0, 8, -279, 2109, 2308, 63, 215, -295, -10136,
-959, 1015, -127, 847, 2951, -4632, -2387, 743, -4358, 4358,
5180, -231, -423, 8280, -415, -1311, -1191, -5039, 1606, -10042,
-1159, 7078, -1407, -279, 2109, 2308, 63, 215, -295, -10136,
-959, 1015, -127, 847, 2951, -4632, -2387, 743, -4358, 4358,
5180, -231, -423, 8280, -415, -1311, -1191, -5039, 1606, -10042};
EXPECT_THAT(output, testing::ElementsAreArray(expected_output));
}
TEST(uKernels, QuantTanh4Test) {
const std::vector<int16_t> input = {
-5, 163, -31, -5, 54, 90, 1, 2, -4, -42, -8, 29, 0, 47, 150,
-26, -36, 9, -73, 25, 14, -2, -1, 29, -10, -12, -18, -29, 51, -92,
-5, 163, -31, -5, 54, 90, 1, 2, -4, -42, -8, 29, 0, 47, 150,
-26, -36, 9, -73, 25, 14, -2, -1, 29, -10, -12, -18, -29, 51, -92,
};
std::vector<int16_t> output(4 * 15, 0);
ApplyTanh(4, input.data(), 4, 15, output.data());
const std::vector<int16_t> expected_output = {
-76, 2596, -496, -76, 856, 1436, 24, 36, -64, -672,
-120, 456, 0, 752, 2400, -412, -576, 148, -1168, 400,
216, -36, -24, 456, -164, -192, -292, -456, 820, -1476,
-76, 2596, -496, -76, 856, 1436, 24, 36, -64, -672,
-120, 456, 0, 752, 2400, -412, -576, 148, -1168, 400,
216, -36, -24, 456, -164, -192, -292, -456, 820, -1476,
};
EXPECT_THAT(output, testing::ElementsAreArray(expected_output));
}
TEST(uKernels, QuantSigmoidTest) {
const std::vector<int16_t> input = {
-10500, 1398, -6963, -7404, 485, -5401, -1757, -7668, -19248,
-9692, -24249, -17923, -15840, -10026, 5249, -89, 1787, -16178,
-6691, -19524, -13439, -24048, -1123, 32767, -17267, -3378, 823,
11482, -11139, 7508, -10500, 1398, -6963, -7404, 485, -5401,
-1757, -7668, -19248, -9692, -24249, -17923, -15840, -10026, 5249,
-89, 1787, -16178, -6691, -19524, -13439, -24048, -1123, 32767,
-17267, -3378, 823, 11482, -11139, 7508,
};
std::vector<int16_t> output(4 * 15, 0);
ApplySigmoid(input.data(), 4, 15, output.data());
const std::vector<int16_t> expected_output = {
2339, 19152, 5063, 4617, 17350, 6917, 12921, 4371, 299, 2813,
89, 409, 673, 2605, 25646, 16207, 19904, 615, 5353, 273,
1187, 91, 14153, 32756, 475, 9983, 18026, 30898, 2023, 28246,
2339, 19152, 5063, 4617, 17350, 6917, 12921, 4371, 299, 2813,
89, 409, 673, 2605, 25646, 16207, 19904, 615, 5353, 273,
1187, 91, 14153, 32756, 475, 9983, 18026, 30898, 2023, 28246,
};
EXPECT_THAT(output, testing::ElementsAreArray(expected_output));
}
TEST(uKernels, QuantSigmoidFloatTest) {
const std::vector<int16_t> input = {
-10500, 1398, -6963, -7404, 485, -5401, -1757, -7668, -19248,
-9692, -24249, -17923, -15840, -10026, 5249, -89, 1787, -16178,
-6691, -19524, -13439, -24048, -1123, 32767, -17267, -3378, 823,
11482, -11139, 7508, -10500, 1398, -6963, -7404, 485, -5401,
-1757, -7668, -19248, -9692, -24249, -17923, -15840, -10026, 5249,
-89, 1787, -16178, -6691, -19524, -13439, -24048, -1123, 32767,
-17267, -3378, 823, 11482, -11139, 7508,
};
std::vector<int16_t> output(4 * 15, 0);
ApplySigmoidFloat(input.data(), 4, 15, output.data());
const std::vector<int16_t> expected_output = {
2343, 19153, 5061, 4617, 17352, 6915, 12922, 4368, 295, 2811,
87, 407, 671, 2608, 25647, 16206, 19902, 619, 5352, 276,
1187, 92, 14151, 32757, 476, 9986, 18024, 30895, 2026, 28249,
2343, 19153, 5061, 4617, 17352, 6915, 12922, 4368, 295, 2811,
87, 407, 671, 2608, 25647, 16206, 19902, 619, 5352, 276,
1187, 92, 14151, 32757, 476, 9986, 18024, 30895, 2026, 28249};
EXPECT_THAT(output, testing::ElementsAreArray(expected_output));
}
TEST(uKernels, QuantMul16bitOut15ShiftTest) {
const std::vector<int16_t> input1 = {
2491, 32767, -32768, 32767, -32768, 32767, 32767, -32768, -32768, 2157,
4545, 14835, 1285, 29498, 26788, 2907, 7877, 6331, 8775, 3001,
1399, 4683, 1437, 1853, 12163, 4927, 7977, 3001, 16612, 4791,
};
const std::vector<int16_t> input2 = {
-1156, 32767, -32768, -32768, 32767, 2308, 64, 220, -288, -10132,
-964, 1016, -120, 844, 2944, -4640, -2392, 736, -4352, 4352,
5180, -232, -428, 8276, -412, -1308, -1196, -5044, 1612, -10044,
};
std::vector<int16_t> output(2 * 15, 0);
CwiseMul(input1.data(), input2.data(), 2, 15, 15, output.data());
const std::vector<int16_t> expected_output = {
-88, 32766, -32768, -32767, -32767, 2308, 64, -220, 288, -667,
-134, 460, -5, 760, 2407, -412, -575, 142, -1165, 399,
221, -33, -19, 468, -153, -197, -291, -462, 817, -1469,
};
EXPECT_THAT(output, testing::ElementsAreArray(expected_output));
}
TEST(uKernels, QuantMul16bitOut19ShiftTest) {
const std::vector<int16_t> input1 = {
2491, 32767, -32768, 32767, -32768, 32767, 32767, -32768, -32768, 2157,
4545, 14835, 1285, 29498, 26788, 2907, 7877, 6331, 8775, 3001,
1399, 4683, 1437, 1853, 12163, 4927, 7977, 3001, 16612, 4791,
};
const std::vector<int16_t> input2 = {
-1156, 32767, -32768, -32768, 32767, 2308, 64, 220, -288, -10132,
-964, 1016, -120, 844, 2944, -4640, -2392, 736, -4352, 4352,
5180, -232, -428, 8276, -412, -1308, -1196, -5044, 1612, -10044,
};
std::vector<int16_t> output(2 * 15, 0);
CwiseMul(input1.data(), input2.data(), 2, 15, 19, output.data());
const std::vector<int16_t> expected_output = {
-5, 2048, 2048, -2048, -2048, 144, 4, -14, 18, -42,
-8, 29, 0, 47, 150, -26, -36, 9, -73, 25,
14, -2, -1, 29, -10, -12, -18, -29, 51, -92,
};
EXPECT_THAT(output, testing::ElementsAreArray(expected_output));
}
TEST(uKernels, QuantMul8bitArbitrarySclaeTest) {
int multiplier = 1970324837;
int shift = -15;
const std::vector<int16_t> input1 = {
2491, 32767, -32768, 32767, -32768, 32767, 32767, -32768, -32768, 2157,
4545, 14835, 1285, 29498, 26788, 2907, 7877, 6331, 8775, 3001,
1399, 4683, 1437, 1853, 12163, 4927, 7977, 3001, 16612, 4791,
};
const std::vector<int16_t> input2 = {
-1156, 32767, -32768, -32768, 32767, 2308, 64, 220, -288, -10132,
-964, 1016, -120, 844, 2944, -4640, -2392, 736, -4352, 4352,
5180, -232, -428, 8276, -412, -1308, -1196, -5044, 1612, -10044,
};
std::vector<int8_t> output(2 * 15, 0);
CwiseMul(input1.data(), input2.data(), multiplier, shift, 2, 15, 3,
output.data());
const std::vector<int8_t> expected_output = {
-78, 127, 127, -128, -128, 127, 62, -128, 127, -128,
-120, 127, -1, 127, 127, -128, -128, 127, -128, 127,
127, -27, -14, 127, -128, -128, -128, -128, 127, -128,
};
EXPECT_THAT(output, testing::ElementsAreArray(expected_output));
}
TEST(uKernels, QuantAddTest) {
const std::vector<int16_t> input1 = {
2491, 32767, -32768, 32767, -32768, 32767, 32767, -32768, -32768, 20000,
-20000, 14835, 1285, 29498, 26788, 2907, 7877, 6331, 8775, 3001,
1399, 4683, 1437, 1853, 12163, 4927, 7977, 3001, 16612, 4791,
};
const std::vector<int16_t> input2 = {
-1156, 32767, -32768, -32768, 32767, 2308, 64, 220, -288, 20000,
-20000, 1016, -120, 844, 2944, -4640, -2392, 736, -4352, 4352,
5180, -232, -428, 8276, -412, -1308, -1196, -5044, 1612, -10044,
};
std::vector<int16_t> output(2 * 15, 0);
CwiseAdd(input1.data(), input2.data(), 2, 15, output.data());
const std::vector<int16_t> expected_output = {
1335, 32767, -32768, -1, -1, 32767, 32767, -32548, -32768, 32767,
-32768, 15851, 1165, 30342, 29732, -1733, 5485, 7067, 4423, 7353,
6579, 4451, 1009, 10129, 11751, 3619, 6781, -2043, 18224, -5253,
};
EXPECT_THAT(output, testing::ElementsAreArray(expected_output));
}
TEST(uKernels, ClipTest) {
constexpr int kVectorSize = 10;
constexpr float kAbsLimit = 2.0;
std::vector<float> input = {0.0, -0.5, 1.0, -1.5, 2.0,
-2.5, 3.0, -3.5, 4.0, -4.5};
CwiseClipping(input.data(), kVectorSize, kAbsLimit);
const std::vector<float> expected_output = {0.0, -0.5, 1.0, -1.5, 2.0,
-2.0, 2.0, -2.0, 2.0, -2.0};
EXPECT_THAT(input, testing::ElementsAreArray(expected_output));
}
TEST(uKernels, QuantClip16Test) {
constexpr int kVectorSize = 30;
constexpr int16_t kAbsLimit = 300;
std::vector<int16_t> input = {
-10500, 1, -2, -7404, 200, -5401, -1757, -7668,
-19248, -9692, -24249, -17923, -15840, -10026, 5249, -89,
1787, -200, -6691, -19524, -13439, -24048, -1123, 32767,
-17267, -3378, 823, 11482, -11139, 7508,
};
CwiseClipping(input.data(), kVectorSize, kAbsLimit);
const std::vector<int16_t> expected_output = {
-300, 1, -2, -300, 200, -300, -300, -300, -300, -300,
-300, -300, -300, -300, 300, -89, 300, -200, -300, -300,
-300, -300, -300, 300, -300, -300, 300, 300, -300, 300,
};
EXPECT_THAT(input, testing::ElementsAreArray(expected_output));
}
TEST(uKernels, QuantClip8Test) {
constexpr int kVectorSize = 30;
constexpr int8_t kAbsLimit = 32;
std::vector<int8_t> input = {
4, -11, -5, -34, -10, -17, -27, -22, 15, 127, -128, 1, 3, 56, 3,
-21, 1, 9, -13, 10, 0, -1, -55, -40, 127, -128, 11, 4, 6, 32,
};
CwiseClipping(input.data(), kVectorSize, kAbsLimit);
const std::vector<int8_t> expected_output = {
4, -11, -5, -32, -10, -17, -27, -22, 15, 32, -32, 1, 3, 32, 3,
-21, 1, 9, -13, 10, 0, -1, -32, -32, 32, -32, 11, 4, 6, 32,
};
EXPECT_THAT(input, testing::ElementsAreArray(expected_output));
}
struct MatrixVectorData {
std::vector<int8_t> matrix;
std::vector<int8_t> zeroed_matrix;
std::vector<int8_t> sparse_matrix;
std::vector<uint8_t> ledger;
std::vector<int8_t> vectors;
std::vector<float> scale_factors;
std::vector<float> results;
std::vector<float> per_channel_scales;
std::vector<int32_t> input_offsets;
int rows;
int cols;
int batch;
};
MatrixVectorData SetupMatrixVectorData(int rows, int cols, int batch,
bool negative = false,
bool is_per_channel = false,
bool init_to_one = false) {
MatrixVectorData data;
data.rows = rows;
data.cols = cols;
data.batch = batch;
for (int i = 0; i < rows * cols; i++) {
int sign = 1;
if ((i % 3) == 0 && negative) sign = -1;
data.matrix.push_back(sign * (i % 70));
}
for (int i = 0; i < cols * batch; i++) {
int sign = 1;
if ((i % 5) == 0 && negative) sign = -1;
data.vectors.push_back(sign * (i % 50));
}
for (int i = 0; i < batch; i++) {
data.scale_factors.insert(data.scale_factors.end(),
{1, 2, 3, 4, 5, 6, 7, 8});
}
data.results.resize(rows * batch, init_to_one ? 1 : 0);
data.zeroed_matrix = data.matrix;
for (int i = 0; i < rows; i++) {
int max_chunks = cols / 16;
int selected_chunks = (max_chunks / 2);
bool row_is_odd = (i % 2) > 0;
bool max_chunks_is_odd = (max_chunks % 2) > 0;
data.ledger.push_back(selected_chunks);
if (max_chunks_is_odd && row_is_odd) {
selected_chunks++;
}
for (int j = 0; j < max_chunks; j++) {
const int chunk_start = i * cols + (j * 16);
const int chunk_end = i * cols + (j * 16) + 16;
if ((j % 2) == (i % 2)) {
data.ledger.push_back(j);
for (int k = chunk_start; k < chunk_end; k++) {
data.sparse_matrix.push_back(data.matrix[k]);
}
} else {
for (int k = chunk_start; k < chunk_end; k++) {
data.zeroed_matrix[k] = 0;
}
}
}
}
if (is_per_channel) {
for (int i = 0; i < rows; i++) {
if (i % 2 == 0) {
data.per_channel_scales.push_back(0.5);
} else {
data.per_channel_scales.push_back(1.0);
}
}
for (int i = 0; i < batch; i++) {
for (int j = 0; j < cols; j++) {
data.vectors[i * cols + j] += i;
}
data.input_offsets.push_back(i);
}
}
return data;
}
std::vector<float> TestDotprodMatrixBatchVectorMultiply(
int rows, int cols, int batch, bool negative = false,
bool init_to_one = false) {
MatrixVectorData data =
SetupMatrixVectorData(rows, cols, batch, negative, false, init_to_one);
MatrixBatchVectorMultiplyAccumulate(
data.matrix.data(), rows, cols, data.vectors.data(),
data.scale_factors.data(), batch, &data.results[0]);
return data.results;
}
std::vector<float> TestSparseDotprodMatrixBatchVectorMultiply(
int rows, int cols, int batch, bool negative = false) {
MatrixVectorData data = SetupMatrixVectorData(rows, cols, batch, negative);
SparseMatrixBatchVectorMultiplyAccumulate(
data.sparse_matrix.data(), data.ledger.data(), rows, cols,
data.vectors.data(), data.scale_factors.data(), batch, &data.results[0]);
return data.results;
}
std::vector<float> TestPerChannelDotprodMatrixBatchVectorMultiply(
int rows, int cols, int batch, bool negative = false,
bool is_per_channel = true) {
MatrixVectorData data =
SetupMatrixVectorData(rows, cols, batch, negative, is_per_channel);
std::vector<int32_t> scratch(rows * batch);
std::vector<int32_t> row_sums(rows);
bool compute_row_sums = true;
CpuBackendContext context;
MatrixBatchVectorMultiplyAccumulate(
data.matrix.data(), rows, cols, data.vectors.data(),
data.scale_factors.data(), batch, &data.results[0],
data.per_channel_scales.data(), data.input_offsets.data(), scratch.data(),
row_sums.data(), &compute_row_sums, &context);
return data.results;
}
TEST(uKernels, DotprodMatrixBatchVectorMultiplyAccumulateTest) {
ASSERT_THAT(TestDotprodMatrixBatchVectorMultiply(4, 16, 1),
testing::ElementsAre(1240, 3160, 5080, 7000));
ASSERT_THAT(TestDotprodMatrixBatchVectorMultiply(4, 32, 2),
testing::ElementsAre(10416, 26288, 8490, 23312, 18276, 70756,
37416, 60916));
std::vector<float> results = TestDotprodMatrixBatchVectorMultiply(32, 512, 5);
EXPECT_NEAR(415566, results[0], 0.0001);
EXPECT_NEAR(880736, results[50], 0.0001);
EXPECT_NEAR(1312062, results[72], 0.0001);
EXPECT_NEAR(1750384, results[100], 0.0001);
EXPECT_NEAR(1776224, results[120], 0.0001);
EXPECT_NEAR(2101860, results[150], 0.0001);
const bool kNegative = true;
ASSERT_THAT(TestDotprodMatrixBatchVectorMultiply(4, 64, 1, kNegative),
testing::ElementsAre(13696, 6904, 7764, 11806));
ASSERT_THAT(
TestDotprodMatrixBatchVectorMultiply(4, 32, 2, kNegative),
testing::ElementsAre(3436, 3522, 1590, 6972, 2516, 20520, 456, 10628));
const bool kInitToOne = true;
ASSERT_THAT(
TestDotprodMatrixBatchVectorMultiply(4, 32, 2, kNegative, kInitToOne),
testing::ElementsAre(3437, 3523, 1591, 6973, 2517, 20521, 457, 10629));
}
TEST(uKernels, PerChannelDotprodMatrixBatchVectorMultiplyAccumulateTest) {
ASSERT_THAT(TestPerChannelDotprodMatrixBatchVectorMultiply(4, 16, 1),
testing::ElementsAre(1240 / 2, 3160, 5080 / 2, 7000));
ASSERT_THAT(TestPerChannelDotprodMatrixBatchVectorMultiply(4, 32, 2),
testing::ElementsAre(10416 / 2, 26288, 8490 / 2, 23312, 18276 / 2,
70756, 37416 / 2, 60916));
std::vector<float> results =
TestPerChannelDotprodMatrixBatchVectorMultiply(32, 512, 5);
EXPECT_NEAR(207783, results[0], 0.0001);
EXPECT_NEAR(411552, results[13], 0.0001);
EXPECT_NEAR(835936, results[39], 0.0001);
EXPECT_NEAR(440368, results[50], 0.0001);
EXPECT_NEAR(875192, results[100], 0.0001);
EXPECT_NEAR(1775536, results[123], 0.0001);
EXPECT_NEAR(1050930, results[150], 0.0001);
}
TEST(uKernels, DotprodMatrixBatchFourVectorMultiplyAccumulateDotprodTest) {
ASSERT_THAT(TestDotprodMatrixBatchVectorMultiply(2, 16, 4),
testing::ElementsAreArray(
{1240, 3160, 6320, 18352, 15240, 45576, 4200, 16232}));
ASSERT_THAT(TestDotprodMatrixBatchVectorMultiply(2, 64, 4),
testing::ElementsAreArray({45794, 38948, 88536, 84252, 157626,
165312, 209864, 246128}));
ASSERT_THAT(
TestDotprodMatrixBatchVectorMultiply(2, 64, 8),
testing::ElementsAreArray({45794, 38948, 88536, 84252, 157626, 165312,
209864, 246128, 219700, 195550, 279684, 278928,
413616, 445662, 374896, 365952}));
ASSERT_THAT(
TestDotprodMatrixBatchVectorMultiply(4, 64, 8),
testing::ElementsAreArray(
{45794, 38948, 34622, 32816, 88536, 84252, 85008, 90804,
157626, 165312, 180558, 203364, 209864, 246128, 236472, 208896,
219700, 195550, 184000, 185050, 279684, 278928, 293292, 322776,
413616, 445662, 495348, 513674, 374896, 365952, 321168, 296544}));
ASSERT_THAT(
TestDotprodMatrixBatchVectorMultiply(16, 1024, 4),
testing::ElementsAreArray(
{841094, 853168, 866642, 840286, 860760, 862754, 843678,
872552, 837586, 851270, 877414, 834188, 863062, 857846,
841780, 879054, 1724476, 1769072, 1747588, 1738844, 1758240,
1742916, 1761612, 1755808, 1737684, 1750780, 1747356, 1754152,
1748348, 1753324, 1743320, 1754316, 2506896, 2564262, 2629188,
2515824, 2598390, 2569236, 2537352, 2645118, 2508444, 2571480,
2610576, 2510442, 2618208, 2566584, 2544570, 2614536, 3458904,
3502688, 3474792, 3505976, 3499360, 3488264, 3485848, 3512832,
3500616, 3482520, 3489624, 3469008, 3495992, 3524376, 3465680,
3526264}));
ASSERT_THAT(
TestDotprodMatrixBatchVectorMultiply(4, 128, 4),
testing::ElementsAreArray({87920, 80024, 92288, 103712, 228148, 224820,
233812, 213124, 271284, 271788, 332772, 328236,
419328, 431328, 411968, 417248}));
ASSERT_THAT(
TestDotprodMatrixBatchVectorMultiply(4, 128, 8),
testing::ElementsAreArray(
{87920, 80024, 92288, 103712, 228148, 224820, 233812, 213124,
271284, 271788, 332772, 328236, 419328, 431328, 411968, 417248,
482680, 523840, 560800, 593560, 563940, 609924, 566868, 644772,
743708, 857780, 818972, 823284, 708384, 695008, 730912, 872096}));
const bool kNegative = true;
EXPECT_THAT(TestDotprodMatrixBatchVectorMultiply(1, 16, 1, kNegative),
testing::ElementsAre(450));
EXPECT_THAT(TestDotprodMatrixBatchVectorMultiply(2, 64, 8, kNegative),
testing::ElementsAreArray({13696, 6904, 9952, 12368, 22848, 61632,
40424, 46776, 57630, 38670, 62976,
49824, 39032, 71988, 60128, 148992}));
std::vector<float> results =
TestDotprodMatrixBatchVectorMultiply(256, 1024, 8);
int64_t sum = 0;
for (int i = 0; i < results.size(); i++) {
sum += static_cast<int64_t>(results[i]);
}
EXPECT_EQ(7980076336, sum);
}
TEST(uKernels,
PerChannelDotprodMatrixBatchFourVectorMultiplyAccumulateDotprodTest) {
ASSERT_THAT(
TestPerChannelDotprodMatrixBatchVectorMultiply(16, 1024, 4),
testing::ElementsAreArray(
{841094 / 2, 853168, 866642 / 2, 840286, 860760 / 2, 862754,
843678 / 2, 872552, 837586 / 2, 851270, 877414 / 2, 834188,
863062 / 2, 857846, 841780 / 2, 879054, 1724476 / 2, 1769072,
1747588 / 2, 1738844, 1758240 / 2, 1742916, 1761612 / 2, 1755808,
1737684 / 2, 1750780, 1747356 / 2, 1754152, 1748348 / 2, 1753324,
1743320 / 2, 1754316, 2506896 / 2, 2564262, 2629188 / 2, 2515824,
2598390 / 2, 2569236, 2537352 / 2, 2645118, 2508444 / 2, 2571480,
2610576 / 2, 2510442, 2618208 / 2, 2566584, 2544570 / 2, 2614536,
3458904 / 2, 3502688, 3474792 / 2, 3505976, 3499360 / 2, 3488264,
3485848 / 2, 3512832, 3500616 / 2, 3482520, 3489624 / 2, 3469008,
3495992 / 2, 3524376, 3465680 / 2, 3526264}));
ASSERT_THAT(TestPerChannelDotprodMatrixBatchVectorMultiply(4, 128, 4),
testing::ElementsAreArray(
{87920 / 2, 80024, 92288 / 2, 103712, 228148 / 2, 224820,
233812 / 2, 213124, 271284 / 2, 271788, 332772 / 2, 328236,
419328 / 2, 431328, 411968 / 2, 417248}));
ASSERT_THAT(TestPerChannelDotprodMatrixBatchVectorMultiply(4, 128, 8),
testing::ElementsAreArray(
{87920 / 2, 80024, 92288 / 2, 103712, 228148 / 2, 224820,
233812 / 2, 213124, 271284 / 2, 271788, 332772 / 2, 328236,
419328 / 2, 431328, 411968 / 2, 417248, 482680 / 2, 523840,
560800 / 2, 593560, 563940 / 2, 609924, 566868 / 2, 644772,
743708 / 2, 857780, 818972 / 2, 823284, 708384 / 2, 695008,
730912 / 2, 872096}));
}
TEST(uKernels, DotprodSparseMatrixBatchVectorMultiplyAccumulate) {
EXPECT_THAT(TestSparseDotprodMatrixBatchVectorMultiply(1, 16, 1),
testing::ElementsAre(0));
EXPECT_THAT(TestSparseDotprodMatrixBatchVectorMultiply(1, 32, 1),
testing::ElementsAre(1240));
EXPECT_THAT(TestSparseDotprodMatrixBatchVectorMultiply(1, 64, 1),
testing::ElementsAre(26544));
EXPECT_THAT(TestSparseDotprodMatrixBatchVectorMultiply(1, 64, 2),
testing::ElementsAre(26544, 24344));
EXPECT_THAT(TestSparseDotprodMatrixBatchVectorMultiply(4, 64, 4),
testing::ElementsAreArray(
{26544, 15866, 22140, 11408, 24344, 53248, 42704, 39900,
48000, 94146, 101892, 81876, 87712, 105160, 148304, 75936}));
const bool kNegative = true;
EXPECT_THAT(TestSparseDotprodMatrixBatchVectorMultiply(1, 64, 1, kNegative),
testing::ElementsAre(8764));
EXPECT_THAT(TestSparseDotprodMatrixBatchVectorMultiply(2, 64, 2, kNegative),
testing::ElementsAre(8764, 5196, 7204, 11148));
}
#ifdef __ANDROID__
TEST(uKernels, MatrixBatchVectorMultiplyAccumulateSymmetricQuantizedTest) {
const int a_rows = 4, a_cols = 29;
const int kWeightsPerUint32 = 4;
const float a_float_data[] = {
1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.1, 11.11, 12.12, 13.13,
14.14, 15.15, 16.16, 17.17, 18.18, 19.19, 20.2, 21.21, 22.22, 23.23,
24.24, 25.25, 26.26, 27.27, 28.28, 0,
-1.1, -2.2, -3.3, -4.4, -5.5, -6.6, -7.7, -8.8, -9.9, -10.1, -11.11,
-12.12, -13.13, -14.14, -15.15, -16.16, -17.17, -18.18, -19.19, -20.2,
-21.21, -22.22, -23.23, -24.24, -25.25, -26.26, -27.27, -28.28, 0,
1.1, -2.2, 3.3, -4.4, 5.5, -6.6, 7.7, -8.8, 9.9, -10.1, 11.11, -12.12,
13.13, -14.14, 15.15, -16.16, 17.17, -18.18, 19.19, -20.2, 21.21, -22.22,
23.23, -24.24, 25.25, -26.26, 27.27, -28.28, 0,
-1.1, 2.2, -3.3, 4.4, -5.5, 6.6, -7.7, 8.8, -9.9, 10.1, -11.11, 12.12,
-13.13, 14.14, -15.15, 16.16, -17.17, 18.18, -19.19, 20.2, -21.21, 22.22,
-23.23, 24.24, -25.25, 26.26, -27.27, 28.28, 0};
int8_t* a_int8_data = reinterpret_cast<int8_t*>(
aligned_malloc(a_rows * a_cols, kWeightsPerUint32));
float a_min, a_max;
float scaling_factor_a;
SymmetricQuantizeFloats(a_float_data, a_rows * a_cols, a_int8_data, &a_min,
&a_max, &scaling_factor_a);
const int8_t expected_a_int8_data[] = {
5, 10, 15, 20, 25, 30, 35, 40, 44, 45, 50, 54, 59, 64, 68, 73, 77, 82, 86,
91, 95, 100, 104, 109, 113, 118, 122, 127, 0,
-5, -10, -15, -20, -25, -30, -35, -40, -44, -45, -50, -54, -59, -64, -68,
-73, -77, -82, -86, -91, -95, -100, -104, -109, -113, -118, -122, -127, 0,
5, -10, 15, -20, 25, -30, 35, -40, 44, -45, 50, -54, 59, -64, 68, -73, 77,
-82, 86, -91, 95, -100, 104, -109, 113, -118, 122, -127, 0,
-5, 10, -15, 20, -25, 30, -35, 40, -44, 45, -50, 54, -59, 64, -68, 73, -77,
82, -86, 91, -95, 100, -104, 109, -113, 118, -122, 127, 0,
};
for (int i = 0; i < a_rows * a_cols; ++i) {
EXPECT_EQ(expected_a_int8_data[i], a_int8_data[i]);
}
const int b_rows = 29, b_cols = 1, batches = 2;
const float b_float_data[] = {
1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0,
1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0,
1.0,
2.5, -2.1, 3.0, -1.3, 1.3, -1.1, 2.0, -1.7, 1.9, -1.5, 0.5, -0.7, 0.8, -0.3,
2.8, -2.8, 1.1, -2.3, 1.9, -1.9, 2.1, -0.5, 2.4, -0.1, 1.0, -2.5, 0.7, -1.9,
0.2,
};
int8_t b_int8_data[b_rows * b_cols * batches];
float b_min, b_max;
float scaling_factor_b[batches];
SymmetricQuantizeFloats(b_float_data, b_rows * b_cols, b_int8_data, &b_min,
&b_max, &scaling_factor_b[0]);
SymmetricQuantizeFloats(&b_float_data[b_rows * b_cols], b_rows * b_cols,
&b_int8_data[b_rows * b_cols], &b_min, &b_max,
&scaling_factor_b[1]);
const int8_t expected_b_int8_data[] = {
127, -127, 127, -127, 127, -127, 127, -127, 127, -127, 127, -127, 127, -127,
127, -127, 127, -127, 127, -127, 127, -127, 127, -127, 127, -127, 127, -127,
127,
106, -89, 127, -55, 55, -47, 85, -72, 80, -64, 21, -30, 34, -13, 119, -119,
47, -97, 80, -80, 89, -21, 102, -4, 42, -106, 30, -80, 8,
};
for (int i = 0; i < b_rows * b_cols * batches; ++i) {
EXPECT_EQ(expected_b_int8_data[i], b_int8_data[i]);
}
float c_float_data[a_rows * b_cols * batches];
for (int i = 0; i < a_rows * b_cols * batches; ++i) {
c_float_data[i] = 0.0;
}
const float scaling_factor_c[2] = {
scaling_factor_a * scaling_factor_b[0],
scaling_factor_a * scaling_factor_b[1],
};
MatrixBatchVectorMultiplyAccumulate(a_int8_data, a_rows, a_cols, b_int8_data,
scaling_factor_c, batches, c_float_data);
const float expected_c_float_data[] = {
-14.474, 14.474, 414.402, -414.402, -6.92228, 6.92228, 632.042, -632.042,
};
for (int i = 0; i < a_rows * b_cols * batches; ++i) {
EXPECT_NEAR(expected_c_float_data[i], c_float_data[i], 0.001);
}
std::vector<int32_t> accum_scratch(a_rows * batches);
std::vector<float> c_float_data_2(a_rows * batches, 0.0);
CpuBackendContext context;
MatrixBatchVectorMultiplyAccumulate(
a_int8_data, a_rows, a_cols, b_int8_data, scaling_factor_c, batches,
accum_scratch.data(), c_float_data_2.data(), &context);
for (int i = 0; i < a_rows * b_cols * batches; ++i) {
EXPECT_NEAR(expected_c_float_data[i], c_float_data_2[i], 0.001);
}
aligned_free(a_int8_data);
}
#endif
TEST(uKernels, SparseMatrixBatchVectorMultiplyAccumulateTest) {
const int kRow = 4;
const int kCol = 48;
const int kBatch = 2;
float matrix[kRow * kCol] = {
1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.1, 11.11, 12.12, 13.13,
14.14, 15.15, 16.16, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 33.33, 34.34, 35.35, 36.36, 37.37, 38.38,
39.39, 40.40, 41.41, 42.42, 43.43, 44.44, 0, 0, 0, 0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, -17.17, -18.18, -19.19, -20.2, -21.21, -22.22, -23.23, -24.24,
-25.25, -26.26, -27.27, -28.28, 0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0, 0, 0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 17.17, -18.18, 19.19, -20.2, 21.21, -22.22, 23.23, -24.24, 25.25,
-26.26, 27.27, -28.28, 0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0, 0, 0,
-1.1, 2.2, -3.3, 4.4, -5.5, 6.6, -7.7, 8.8, -9.9, 10.1, -11.11, 12.12,
-13.13, 14.14, -15.15, 16.16, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -33.33, 34.34, -35.35, 36.36, -37.37,
38.38, -39.39, 40.40, -41.41, 42.42, -43.43, 44.44, 0, 0, 0, 0};
float matrix_values[] = {
1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.1, 11.11, 12.12, 13.13,
14.14, 15.15, 16.16, 33.33, 34.34, 35.35, 36.36, 37.37, 38.38, 39.39,
40.40, 41.41, 42.42, 43.43, 44.44, 0, 0, 0, 0,
-17.17, -18.18, -19.19, -20.2, -21.21, -22.22, -23.23, -24.24, -25.25,
-26.26, -27.27, -28.28, 0, 0.0, 0.0, 0.0,
17.17, -18.18, 19.19, -20.2, 21.21, -22.22, 23.23, -24.24, 25.25, -26.26,
27.27, -28.28, 0, 0.0, 0.0, 0.0,
-1.1, 2.2, -3.3, 4.4, -5.5, 6.6, -7.7, 8.8, -9.9, 10.1, -11.11, 12.12,
-13.13, 14.14, -15.15, 16.16, -33.33, 34.34, -35.35, 36.36, -37.37, 38.38,
-39.39, 40.40, -41.41, 42.42, -43.43, 44.44, 0, 0, 0, 0};
uint8_t ledger[] = {
2, 0, 2,
1, 1,
1, 1,
2, 0, 2
};
float vector[kBatch * kCol] = {
1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0,
1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0,
1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0,
1.0, -1.0, 1.0, -1.0, 1.0, -1.0,
2.5, 0.0, -2.1, 0.0, 3.0, 0.0, -1.3, 0.0, 1.3, 0.0, -1.1, 0.0, 2.0, 0.0,
-1.7, 0.0, 1.9, 0.0, -1.5, 0.0, 0.5, 0.0, -0.7, 0.0, 0.8, 0.0, -0.3, 0.0,
2.8, 0.0, -2.8, 0.0, 1.1, -2.3, 1.9, -1.9, 2.1, -0.5, 2.4, -0.1, 1.0, -2.5,
0.7, -1.9, 0.2, 0.0, 0.1, 0.2,
};
std::vector<float> dense_output(kRow * kBatch, 0.0);
MatrixBatchVectorMultiplyAccumulate(matrix, kRow, kCol, vector, kBatch,
dense_output.data());
EXPECT_THAT(dense_output, ElementsAreArray(ArrayFloatNear(
{-13.69, 6.06001, 272.7, -608.03, -9.66602,
-10.201, 10.201, -713.897949},
1e-4)));
std::vector<float> sparse_output(kRow * kBatch, 0.0);
SparseMatrixBatchVectorMultiplyAccumulate(
matrix_values, ledger, kRow, kCol, vector, kBatch, sparse_output.data());
EXPECT_THAT(sparse_output,
ElementsAreArray(ArrayFloatNear(dense_output, 1e-4)));
}
#ifdef __ANDROID__
TEST(uKernels,
SparseMatrixBatchVectorMultiplyAccumulateSymmetricQuantizedTest) {
const int kRow = 4;
const int kCol = 48;
const int kBatch = 2;
const int8_t quantized_matrix[] = {
3, 6, 9, 13, 16, 19, 22, 25, 28, 29, 32, 35, 38, 40, 43, 46, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 95, 98, 101, 104, 107, 110, 113, 115,
118, 121, 124, 127, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -49, -52, -55, -58, -61,
-64, -66, -69, -72, -75, -78, -81, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 49, -52, 55, -58, 61, -64,
66, -69, 72, -75, 78, -81, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0,
-3, 6, -9, 13, -16, 19, -22, 25, -28, 29, -32, 35, -38, 40, -43, 46, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -95, 98, -101, 104, -107, 110,
-113, 115, -118, 121, -124, 127, 0, 0, 0, 0,
};
const int8_t quantized_matrix_values[] = {
3, 6, 9, 13, 16, 19, 22, 25, 28, 29, 32, 35, 38, 40, 43, 46, 95, 98, 101,
104, 107, 110, 113, 115, 118, 121, 124, 127, 0, 0, 0, 0,
-49, -52, -55, -58, -61, -64, -66, -69, -72, -75, -78, -81, 0, 0, 0, 0,
49, -52, 55, -58, 61, -64, 66, -69, 72, -75, 78, -81, 0, 0, 0, 0,
-3, 6, -9, 13, -16, 19, -22, 25, -28, 29, -32, 35, -38, 40, -43, 46, -95,
98, -101, 104, -107, 110, -113, 115, -118, 121, -124, 127, 0, 0, 0, 0,
};
uint8_t ledger[] = {
2, 0, 2,
1, 1,
1, 1,
2, 0, 2
};
float matrix_scaling_factor = 0.349921;
const int8_t quantized_vector[] = {
127, -127, 127, -127, 127, -127, 127, -127, 127, -127, 127, -127, 127,
-127, 127, -127, 127, -127, 127, -127, 127, -127, 127, -127, 127, -127,
127, -127, 127, -127, 127, -127, 127, -127, 127, -127, 127, -127, 127,
-127, 127, -127, 127, -127, 127, -127, 127, -127,
106, 0, -89, 0, 127, 0, -55, 0, 55, 0, -47, 0, 85, 0, -72, 0, 80, 0,
-64, 0, 21, 0, -30, 0, 34, 0, -13, 0, 119, 0, -119, 0, 47, -97, 80, -80,
89, -21, 102, -4, 42, -106, 30, -80, 8, 1, 2, 3,
};
float vector_scaling_factor[2] = {0.00787402, 0.023622};
float result_scaling_factor[2] = {
matrix_scaling_factor * vector_scaling_factor[0],
matrix_scaling_factor * vector_scaling_factor[1],
};
std::vector<float> dense_output(kRow * kBatch, 0.0);
MatrixBatchVectorMultiplyAccumulate(quantized_matrix, kRow, kCol,
quantized_vector, result_scaling_factor,
kBatch, dense_output.data());
EXPECT_THAT(dense_output,
ElementsAreArray(ArrayFloatNear(
{-13.646927, 6.298582, 272.938538, -607.813110, -6.637464,
-9.381721, 9.381721, -713.845642})));
std::vector<float> sparse_output(kRow * kBatch, 0.0);
SparseMatrixBatchVectorMultiplyAccumulate(
quantized_matrix_values, ledger, kRow, kCol, quantized_vector,
result_scaling_factor, kBatch, sparse_output.data());
EXPECT_THAT(sparse_output,
ElementsAreArray(ArrayFloatNear(
{-13.646927, 6.298582, 272.938538, -607.813110, -6.637464,
-9.381721, 9.381721, -713.845642})));
}
#endif
TEST(uKernels, VectorVectorCwiseProductTest) {
constexpr int kVectorSize = 10;
static float input1[kVectorSize] = {0.0, -0.5, 1.0, -1.5, 2.0,
-2.5, 3.0, -3.5, 4.0, -4.5};
static float input2[kVectorSize] = {0.1, -0.1, 0.1, -0.1, 0.1,
-0.1, 0.1, -0.1, 0.1, -0.1};
std::vector<float> output(kVectorSize);
VectorVectorCwiseProduct(input1, input2, kVectorSize, output.data());
EXPECT_THAT(output,
ElementsAreArray(ArrayFloatNear(
{0.0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45})));
}
TEST(uKernels, VectorVectorCwiseProductAccumulateTest) {
constexpr int kVectorSize = 10;
static float input1[kVectorSize] = {0.0, -0.5, 1.0, -1.5, 2.0,
-2.5, 3.0, -3.5, 4.0, -4.5};
static float input2[kVectorSize] = {0.1, -0.1, 0.1, -0.1, 0.1,
-0.1, 0.1, -0.1, 0.1, -0.1};
std::vector<float> output(kVectorSize);
std::fill(output.begin(), output.end(), 1.0);
VectorVectorCwiseProductAccumulate(input1, input2, kVectorSize,
output.data());
EXPECT_THAT(output,
ElementsAreArray(ArrayFloatNear(
{1.0, 1.05, 1.1, 1.15, 1.2, 1.25, 1.3, 1.35, 1.4, 1.45})));
}
TEST(uKernels, VectorBatchVectorAddTest) {
constexpr int kVectorSize = 3;
constexpr int kBatchSize = 2;
static float input[kVectorSize] = {0.0, -0.5, 1.0};
std::vector<float> output = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0};
VectorBatchVectorAdd(input, kVectorSize, kBatchSize, output.data());
EXPECT_THAT(output,
testing::ElementsAreArray({1.0, 1.5, 4.0, 4.0, 4.5, 7.0}));
}
TEST(uKernels, VectorBatchVectorAssignTest) {
constexpr int kVectorSize = 5;
constexpr int kBatchSize = 3;
static float input[kVectorSize] = {0.0, -0.5, 1.0, -1.5, 2.0};
std::vector<float> output(kVectorSize * kBatchSize);
VectorBatchVectorAssign(input, kVectorSize, kBatchSize, output.data());
EXPECT_THAT(output, ElementsAreArray(ArrayFloatNear(
{0.0, -0.5, 1.0, -1.5, 2.0, 0.0, -0.5, 1.0, -1.5, 2.0,
0.0, -0.5, 1.0, -1.5, 2.0})));
}
TEST(uKernels, ApplySigmoidToVectorTest) {
constexpr int kVectorSize = 5;
static float input[kVectorSize] = {0.0, -0.5, 1.0, -1.5, 2.0};
std::vector<float> output(kVectorSize);
ApplySigmoidToVector(input, kVectorSize, output.data());
EXPECT_THAT(output, ElementsAreArray(ArrayFloatNear(
{0.5, 0.377541, 0.731059, 0.182426, 0.880797})));
}
TEST(uKernels, ApplyActivationToVectorTest) {
constexpr int kVectorSize = 5;
static float input[kVectorSize] = {0.0, -0.5, 1.0, -1.5, 2.0};
std::vector<float> output(kVectorSize);
ApplyActivationToVector(input, kVectorSize, kTfLiteActRelu, output.data());
EXPECT_THAT(output,
ElementsAreArray(ArrayFloatNear({0.0, 0.0, 1.0, 0.0, 2.0})));
ApplyActivationToVector(input, kVectorSize, kTfLiteActTanh, output.data());
EXPECT_THAT(output, ElementsAreArray(ArrayFloatNear(
{0.0, -0.462117, 0.761594, -0.905148, 0.964028})));
}
TEST(uKernels, Sub1VectorTest) {
constexpr int kVectorSize = 5;
static float input[kVectorSize] = {0.0, -0.5, 1.0, -1.5, 2.0};
std::vector<float> output(kVectorSize);
Sub1Vector(input, kVectorSize, output.data());
EXPECT_THAT(output,
ElementsAreArray(ArrayFloatNear({1.0, 1.5, 0.0, 2.5, -1.0})));
}
TEST(uKernels, Sub1VectorInt16Test) {
constexpr int kVectorSize = 30;
static int16_t input[kVectorSize] = {
32760, 300, 1, 2, 3, 4, 5, 6, 300, 1000,
32767, 32000, 300, 1, 2, 3, 4, 5, 56, 300,
1000, 32767, 32761, 1300, 1, 2, 3, 4, 5, 6,
};
std::vector<int16_t> output(kVectorSize);
Sub1Vector(input, kVectorSize, output.data());
EXPECT_THAT(
output,
testing::ElementsAreArray({
7, 32467, 32766, 32765, 32764, 32763, 32762, 32761, 32467, 31767,
0, 767, 32467, 32766, 32765, 32764, 32763, 32762, 32711, 32467,
31767, 0, 6, 31467, 32766, 32765, 32764, 32763, 32762, 32761,
}));
}
TEST(uKernels, VectorBatchVectorCwiseProductAccumulateInteger) {
constexpr int kVectorSize = 29;
constexpr int kBatchSize = 4;
static int16_t vector[kVectorSize] = {-10, 9, 8, 7, 6, 5, 4, 3, 2, 1,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18};
const std::vector<int16_t> batch_vector = {
10, 11, 12, 13, 14, 15, 16, 17, 18, -10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 1,
2, 3, 4, 5, 6, 7, 8, 9,
-10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, 11, 12, 13, 14, 15, 16, 17, 18, 0, 1,
2, 3, 4, 5, 6, 7, 8, 9,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, 11, 12,
13, 14, 15, 16, 17, 18,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, 11, 12,
13, 14, 15, 16, 17, 18};
std::vector<int16_t> batch_output = {
-10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, 11, 12, 13, 14, 15, 16, 17, 18, 0, 1,
2, 3, 4, 5, 6, 7, 8, 9,
2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -10, 9, 8, 7, 6, 5,
4, 3, 2, 1, 10, 11, 12,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, 11, 12,
13, 14, 15, 16, 17, 18,
10, 11, 12, 13, 14, 15, 16, 17, 18, -10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 1,
13, 14, 15, 16, 17, 18};
VectorBatchVectorCwiseProductAccumulate(vector, kVectorSize,
batch_vector.data(), kBatchSize,
1073741824, -1, batch_output.data());
const std::vector<int16_t> expected_output = {
-35, 34, 32, 30, 27, 24, 20, 16, 11, -1, 10, 13, 16, 18, 19, 20, 21, 21,
20, 0, 4, 8, 12, 17, 23, 29, 35, 42, 50,
27, 24, 20, 18, 15, 14, 12, 12, 1, 2, 2, 6, 10, 15, 20, 26, 32, 39, 26, 9,
11, 13, 15, 18, 22, 26, 30, 35, 51,
11, 15, 4, 7, 8, 10, 10, 11, 10, 10, 8, 12, -6, 15, 14, 14, 12, 11, 8, 6,
27, 32, 46, 54, 61, 70, 78, 88, 97,
17, 21, 14, 17, 18, 20, 20, 21, 20, 20, 18, -7, 13, 14, 13, 13, 11, 10, 7,
5, 26, 31, 37, 56, 63, 72, 80, 90, 99};
CompareRoundingResults<int16_t>(4 * 29, expected_output.data(),
batch_output.data(), 1, 1);
}
TEST(uKernels, VectorBatchVectorCwiseProductAccumulateFloat) {
constexpr int kVectorSize = 29;
constexpr int kBatchSize = 4;
static float input[kVectorSize] = {
1.1f, 2.2f, 3.3f, 4.4f, 5.5f, 6.6f, 7.7f, 8.8f,
9.9f, 10.10f, 11.11f, 12.12f, 13.13f, 14.14f, 15.15f, 16.16f,
17.17f, 18.18f, 19.19f, 20.20f, 21.21f, 22.22f, 23.23f, 24.24f,
25.25f, 26.26f, 27.27f, 28.28f, 0.0f};
std::vector<float> output = {
1.1f, 2.2f, 3.3f, 4.4f, 5.5f, 6.6f, 7.7f, 8.8f, 9.9f, 10.10f, 11.11f,
12.12f, 13.13f, 14.14f, 15.15f, 16.16f, 17.17f, 18.18f, 19.19f, 20.20f,
21.21f, 22.22f, 23.23f, 24.24f, 25.25f, 26.26f, 27.27f, 28.28f, 0.0f,
-1.1f, -2.2f, -3.3f, -4.4f, -5.5f, -6.6f, -7.7f, -8.8f, -9.9f, -10.10f,
-11.11f, -12.12f, -13.13f, -14.14f, -15.15f, -16.16f, -17.17f, -18.18f,
-19.19f, -20.20f, -21.21f, -22.22f, -23.23f, -24.24f, -25.25f, -26.26f,
-27.27f, -28.28f, 0.0f,
1.1f, -2.2f, 3.3f, -4.4f, 5.5f, -6.6f, 7.7f, -8.8f, 9.9f, -10.10f, 11.11f,
-12.12f, 13.13f, -14.14f, 15.15f, -16.16f, 17.17f, -18.18f, 19.19f,
-20.20f, 21.21f, -22.22f, 23.23f, -24.24f, 25.25f, -26.26f, 27.27f,
-28.28f, 0.0f,
-1.1f, 2.2f, -3.3f, 4.4f, -5.5f, 6.6f, -7.7f, 8.8f, -9.9f, 10.10f,
-11.11f, 12.12f, -13.13f, 14.14f, -15.15f, 16.16f, -17.17f, 18.18f,
-19.19f, 20.20f, -21.21f, 22.22f, -23.23f, 24.24f, -25.25f, 26.26f,
-27.27f, 28.28f, 0.0f};
VectorBatchVectorCwiseProductAccumulate(input, kVectorSize, output.data(),
kBatchSize, output.data());
const std::vector<float> expected_output = {
2.31f, 7.04f, 14.19f, 23.76f, 35.75f, 50.16f, 66.99f, 86.24f, 107.91f,
112.11f, 134.5421f, 159.0144f, 185.5269f, 214.0796f, 244.6725f, 277.3056f,
311.9789f, 348.6924f, 387.4461f, 428.24f, 471.0741f, 515.9484f, 562.8629f,
611.8176f, 662.8125f, 715.8476f, 770.9229f, 828.0384f, 0.0f,
-2.31f, -7.04f, -14.19f, -23.76f, -35.75f, -50.16f, -66.99f, -86.24f,
-107.91f, -112.11f, -134.5421f, -159.0144f, -185.5269f, -214.0796f,
-244.6725f, -277.3056f, -311.9789f, -348.6924f, -387.4461f, -428.24f,
-471.0741f, -515.9484f, -562.8629f, -611.8176f, -662.8125f, -715.8476f,
-770.9229f, -828.0384f, 0.0f,
2.31f, -7.04f, 14.19f, -23.76f, 35.75f, -50.16f, 66.99f, -86.24f, 107.91f,
-112.11f, 134.5421f, -159.0144f, 185.5269f, -214.0796f, 244.6725f,
-277.3056f, 311.9789f, -348.6924f, 387.4461f, -428.24f, 471.0741f,
-515.9484f, 562.8629f, -611.8176f, 662.8125f, -715.8476f, 770.9229f,
-828.0384f, 0.0f,
-2.31f, 7.04f, -14.19f, 23.76f, -35.75f, 50.16f, -66.99f, 86.24f,
-107.91f, 112.11f, -134.5421f, 159.0144f, -185.5269f, 214.0796f,
-244.6725f, 277.3056f, -311.9789f, 348.6924f, -387.4461f, 428.24f,
-471.0741f, 515.9484f, -562.8629f, 611.8176f, -662.8125f, 715.8476f,
-770.9229f, 828.0384f, 0.0f};
EXPECT_THAT(output, testing::ElementsAreArray(
ArrayFloatNear(expected_output, 6.5e-5f)));
}
TEST(uKernels, VectorBatchVectorCwiseProductNoAccumulate) {
constexpr int kVectorSize = 29;
constexpr int kBatchSize = 4;
static float input[kVectorSize] = {
1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.1,
11.11, 12.12, 13.13, 14.14, 15.15, 16.16, 17.17, 18.18, 19.19, 20.2,
21.21, 22.22, 23.23, 24.24, 25.25, 26.26, 27.27, 28.28, 0};
std::vector<float> output = {
1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.1, 11.11, 12.12, 13.13,
14.14, 15.15, 16.16, 17.17, 18.18, 19.19, 20.2, 21.21, 22.22, 23.23,
24.24, 25.25, 26.26, 27.27, 28.28, 0,
-1.1, -2.2, -3.3, -4.4, -5.5, -6.6, -7.7, -8.8, -9.9, -10.1, -11.11,
-12.12, -13.13, -14.14, -15.15, -16.16, -17.17, -18.18, -19.19, -20.2,
-21.21, -22.22, -23.23, -24.24, -25.25, -26.26, -27.27, -28.28, 0,
1.1, -2.2, 3.3, -4.4, 5.5, -6.6, 7.7, -8.8, 9.9, -10.1, 11.11, -12.12,
13.13, -14.14, 15.15, -16.16, 17.17, -18.18, 19.19, -20.2, 21.21, -22.22,
23.23, -24.24, 25.25, -26.26, 27.27, -28.28, 0,
-1.1, 2.2, -3.3, 4.4, -5.5, 6.6, -7.7, 8.8, -9.9, 10.1, -11.11, 12.12,
-13.13, 14.14, -15.15, 16.16, -17.17, 18.18, -19.19, 20.2, -21.21, 22.22,
-23.23, 24.24, -25.25, 26.26, -27.27, 28.28, 0};
VectorBatchVectorCwiseProduct(input, kVectorSize, output.data(), kBatchSize,
output.data());
const std::vector<float> expected_output = {
1.210000, 4.840000, 10.889999, 19.360001, 30.250000, 43.559998, 59.289997,
77.440002, 98.009995, 102.010010, 123.432091, 146.894394, 172.396896,
199.939606, 229.522491, 261.145599, 294.808899, 330.512421, 368.256134,
408.040039, 449.864075, 493.728363, 539.632874, 587.577576, 637.562500,
689.587585, 743.652954, 799.758423, 0.000000,
-1.210000, -4.840000, -10.889999, -19.360001, -30.250000, -43.559998,
-59.289997, -77.440002, -98.009995, -102.010010, -123.432091, -146.894394,
-172.396896, -199.939606, -229.522491, -261.145599, -294.808899,
-330.512421, -368.256134, -408.040039, -449.864075, -493.728363,
-539.632874, -587.577576, -637.562500, -689.587585, -743.652954,
-799.758423, 0.000000,
1.210000, -4.840000, 10.889999, -19.360001, 30.250000, -43.559998,
59.289997, -77.440002, 98.009995, -102.010010, 123.432091, -146.894394,
172.396896, -199.939606, 229.522491, -261.145599, 294.808899, -330.512421,
368.256134, -408.040039, 449.864075, -493.728363, 539.632874, -587.577576,
637.562500, -689.587585, 743.652954, -799.758423, 0.000000,
-1.210000, 4.840000, -10.889999, 19.360001, -30.250000, 43.559998,
-59.289997, 77.440002, -98.009995, 102.010010, -123.432091, 146.894394,
-172.396896, 199.939606, -229.522491, 261.145599, -294.808899, 330.512421,
-368.256134, 408.040039, -449.864075, 493.728363, -539.632874, 587.577576,
-637.562500, 689.587585, -743.652954, 799.758423, 0.000000};
EXPECT_THAT(output, testing::ElementsAreArray(expected_output));
}
TEST(uKernels, BatchVectorBatchVectorDotProductTest) {
constexpr int kVectorSize = 5;
constexpr int kBatch = 2;
static float input1[kVectorSize * kBatch] = {0.0, -0.5, 1.0, -1.5, 2.0,
-2.5, 3.0, -3.5, 4.0, -4.5};
static float input2[kVectorSize * kBatch] = {0.1, -0.1, 0.1, -0.1, 0.1,
-0.1, 0.1, -0.1, 0.1, -0.1};
std::vector<float> output(kBatch);
BatchVectorBatchVectorDotProduct(input1, input2, kVectorSize, kBatch,
output.data());
EXPECT_THAT(output, ElementsAreArray(ArrayFloatNear({0.5, 1.75})));
}
TEST(uKernels, BatchVectorBatchVectorDotProductIntegerTest) {
constexpr int kVectorSize = 5;
constexpr int kBatch = 2;
static int16_t input1[kVectorSize * kBatch] = {0, 5, 10, -15, 20,
-25, 30, -35, 40, -45};
static int16_t input2[kVectorSize * kBatch] = {1, -1, 1, -1, 1,
-1, 1, -1, 1, 1};
std::vector<int32_t> output(kBatch);
BatchVectorBatchVectorDotProduct(input1, input2, kVectorSize, kBatch,
output.data());
EXPECT_THAT(output, ElementsAreArray(ArrayFloatNear({40, 85})));
}
TEST(uKernels, ReductionSumVectorTest) {
constexpr int kInputVectorSize = 10;
constexpr int kOutputVectorSize1 = 5;
constexpr int kReductionSize1 = 2;
static float input[kInputVectorSize] = {0.0, -0.5, 1.0, -1.5, 2.0,
0.0, -0.5, 1.0, 1.0, 2.0};
std::vector<float> result1(kOutputVectorSize1);
ReductionSumVector(input, result1.data(), kOutputVectorSize1,
kReductionSize1);
EXPECT_THAT(result1,
ElementsAreArray(ArrayFloatNear({-0.5, -0.5, 2.0, 0.5, 3.0})));
constexpr int kOutputVectorSize2 = 2;
constexpr int kReductionSize2 = 5;
std::vector<float> result2(kOutputVectorSize2);
ReductionSumVector(input, result2.data(), kOutputVectorSize2,
kReductionSize2);
EXPECT_THAT(result2, ElementsAreArray(ArrayFloatNear({1.0, 3.5})));
}
TEST(uKernels, ReductionSumVectorIntegerTest) {
constexpr int kInputVectorSize = 10;
constexpr int kOutputVectorSize1 = 5;
constexpr int kReductionSize1 = 2;
static int32_t input[kInputVectorSize] = {1, 2, 1, 5, -3, 2, 1, 2, 5, 10};
std::vector<int32_t> result1(kOutputVectorSize1);
ReductionSumVector(input, result1.data(), kOutputVectorSize1,
kReductionSize1);
EXPECT_THAT(result1, testing::ElementsAreArray({3, 6, -1, 3, 15}));
}
void TwoGateSaturatingAdd(const int8_t* input, int8_t input_zp,
const int8_t* recurrent, int8_t recurrent_zp,
int32_t input_effective_scale_a,
int32_t input_effective_scale_b,
int32_t recurrent_effective_scale_a,
int32_t recurrent_effective_scale_b, int32_t n_batch,
int32_t n_cell, int16_t* output);
TEST(uKernels, TwoGateSaturateAddTest) {
const std::vector<int8_t> input1 = {1, 2, 3, 4, 55, 66, 77};
const std::vector<int8_t> input2 = {100, 2, 3, 4, 55, 66, 77};
const int32_t input1_zp = 10;
const int32_t input2_zp = -5;
const int32_t multiplier1 = 1347771520;
const int32_t shift1 = -7;
const int32_t multiplier2 = 1047577121;
const int32_t shift2 = -6;
std::vector<int16_t> output(7);
TwoGateSaturatingAdd(input1.data(), input1_zp, input2.data(), input2_zp,
multiplier1, shift1, multiplier2, shift2, 1, 7,
output.data());
const std::vector<int16_t> expected_output = {1, 0, 0, 0, 0, 1, 1};
EXPECT_THAT(output, testing::ElementsAreArray(expected_output));
}
namespace {
class MeanStddevNormalizationTest
: public testing::TestWithParam<std::tuple<float, float, float>> {};
}
TEST_P(MeanStddevNormalizationTest, SeparateBatches) {
const float mean = std::get<0>(GetParam());
const float diff = std::get<1>(GetParam());
const float tolerance = std::get<2>(GetParam());
constexpr int kVectorSize = 4;
const float input[kVectorSize] = {mean - 2 * diff, mean - diff, mean + diff,
mean + 2 * diff};
float output[kVectorSize];
MeanStddevNormalization(input, output, kVectorSize, 1);
std::vector<float> expected_output;
if (diff == 0.0f) {
expected_output.assign({0.0f, 0.0f, 0.0f, 0.0f});
} else {
const float ksqrt16 = std::sqrt(1.6f);
const float ksqrt04 = std::sqrt(0.4f);
expected_output.assign({-ksqrt16, -ksqrt04, ksqrt04, ksqrt16});
}
EXPECT_THAT(output, testing::ElementsAreArray(
ArrayFloatNear(expected_output, tolerance)));
}
INSTANTIATE_TEST_SUITE_P(
uKernels, MeanStddevNormalizationTest,
testing::Values(
std::make_tuple(0.0f, 0.0f, 0.0f),
std::make_tuple(0.0f, 0.01f, 2.53e-5f),
std::make_tuple(0.0f, 100.0f, 1.20e-7f),
std::make_tuple(0.01f, 0.0f, 0.0f),
std::make_tuple(0.01f, 0.01f, 2.53e-5f),
std::make_tuple(0.01f, 100.0f, 1.20e-7f),
std::make_tuple(100.0f, 0.0f, 0.0f),
std::make_tuple(100.0f, 0.01f, 1.81e-4f),
std::make_tuple(100.0f, 100.0f, 1.20e-7f)
));
TEST(uKernels, MeanStddevNormalizationAllBatches) {
constexpr int kVectorSize = 4;
constexpr int kBatchSize = 9;
static float input[kVectorSize * kBatchSize] = {
0.0f, 0.0f, 0.0f, 0.0f,
-0.02f, -0.01f, 0.01f, 0.02f,
-200.0f, -100.0f, 100.0f, 200.0f,
0.01f, 0.01f, 0.01f, 0.01f,
-0.01f, 0.0f, 0.02f, 0.03f,
-199.99f, -99.99f, 100.01f, 200.01f,
100.0f, 100.0f, 100.0f, 100.0f,
99.98f, 99.99f, 100.01f, 100.02f,
-100.0f, 0.0f, 200.0f, 300.0f,
};
float output[kVectorSize * kBatchSize];
MeanStddevNormalization(input, output, kVectorSize, kBatchSize);
const float ksqrt16 = std::sqrt(1.6f);
const float ksqrt04 = std::sqrt(0.4f);
const std::vector<float> expected_output = {
0.0f, 0.0f, 0.0f, 0.0f,
-ksqrt16, -ksqrt04, ksqrt04, ksqrt16,
-ksqrt16, -ksqrt04, ksqrt04, ksqrt16,
0.0f, 0.0f, 0.0f, 0.0f,
-ksqrt16, -ksqrt04, ksqrt04, ksqrt16,
-ksqrt16, -ksqrt04, ksqrt04, ksqrt16,
0.0f, 0.0f, 0.0f, 0.0f,
-ksqrt16, -ksqrt04, ksqrt04, ksqrt16,
-ksqrt16, -ksqrt04, ksqrt04, ksqrt16,
};
EXPECT_THAT(output, testing::ElementsAreArray(
ArrayFloatNear(expected_output, 1.81e-4f)));
}
TEST(uKernels, MeanStddevNormalizationLargeVector) {
const float mean = 100.0f;
const float diff = 1.0f;
constexpr int kVectorSize = 16 * 16 + 16 + 1;
float input[kVectorSize];
input[0] = mean;
for (int i = 1; i < kVectorSize - 1; i += 2) {
input[i + 0] = mean + diff;
input[i + 1] = mean - diff;
}
float output[kVectorSize];
MeanStddevNormalization(input, output, kVectorSize, 1);
float expected_output[kVectorSize];
expected_output[0] = 0.0;
const float expected_elem = std::sqrt(static_cast<double>(kVectorSize) /
static_cast<double>(kVectorSize - 1));
for (int i = 1; i < kVectorSize - 1; i += 2) {
expected_output[i + 0] = +expected_elem;
expected_output[i + 1] = -expected_elem;
}
EXPECT_THAT(output, testing::Pointwise(testing::FloatEq(), expected_output));
}
TEST(uKernels, UnpackInt4Basic) {
const int8_t input[2] = {0x38, static_cast<int8_t>(0xBE)};
const int8_t expected_output[4] = {-8, 3, -2, -5};
int8_t actual_output[4];
UnpackDenseInt4IntoInt8(input, 4, actual_output);
EXPECT_THAT(actual_output,
testing::Pointwise(testing::Eq(), expected_output));
}
TEST(uKernels, UnpackInt4OddLength) {
const int8_t input[2] = {0x21, 0x43};
const int8_t expected_output[3] = {1, 2, 3};
int8_t actual_output[3];
UnpackDenseInt4IntoInt8(input, 3, actual_output);
EXPECT_THAT(actual_output,
testing::Pointwise(testing::Eq(), expected_output));
}
}
}
#ifdef DOTPROD_BENCHMARKS
void BM_DotprodBatchOneMultiply(benchmark::State& state) {
const int rows = state.range(0);
const int cols = state.range(1);
const int batch = state.range(2);
const int copies = state.range(3);
std::vector<tflite::tensor_utils::MatrixVectorData> datas;
for (int i = 0; i < copies; i++) {
datas.push_back(
tflite::tensor_utils::SetupMatrixVectorData(rows, cols, batch));
}
int copy = 0;
for (auto _ : state) {
copy = (copy + 1) % datas.size();
auto& data = datas[copy];
for (int i = 0; i < batch; i++) {
tflite::tensor_utils::MatrixBatchVectorMultiplyAccumulate(
data.matrix.data(), data.rows, data.cols,
data.vectors.data() + (data.cols * i), data.scale_factors.data(), 1,
&data.results[0]);
testing::DoNotOptimize(data.results[2]);
}
}
}
BENCHMARK(BM_DotprodBatchOneMultiply)
->Args({16, 16, 1, 1})
->Args({16, 16, 4, 1})
->Args({32, 32, 1, 1})
->Args({32, 32, 4, 1})
->Args({64, 64, 1, 1})
->Args({64, 64, 4, 1})
->Args({128, 128, 1, 1})
->Args({128, 128, 4, 1})
->Args({992, 992, 1, 1})
->Args({992, 992, 8, 1})
->Args({1024, 1024, 1, 1})
->Args({1024, 1024, 1, 8})
->Args({1024, 1024, 4, 1})
->Args({1024, 1024, 4, 8})
->Args({1024, 1024, 8, 1})
->Args({640, 2048, 1, 1})
->Args({640, 2048, 4, 1})
->Args({640, 2048, 8, 1})
->Args({640, 2048, 8, 8})
->Args({2048, 2048, 1, 1})
->Args({2048, 2048, 1, 8})
->Args({2048, 2048, 8, 1})
->Args({4096, 4096, 8, 1})
->Args({4096, 4096, 1, 8})
->Args({8192, 8192, 8, 1})
->Args({8192, 8192, 1, 8})
->Args({16384, 16384, 8, 1})
->Args({16384, 16384, 1, 8});
void BM_DotprodBatchFourMultiply(benchmark::State& state) {
const int rows = state.range(0);
const int cols = state.range(1);
const int batch = state.range(2);
const int copies = state.range(3);
std::vector<tflite::tensor_utils::MatrixVectorData> datas;
for (int i = 0; i < copies; i++) {
datas.push_back(
tflite::tensor_utils::SetupMatrixVectorData(rows, cols, batch));
}
int copy = 0;
for (auto _ : state) {
copy = (copy + 1) % datas.size();
auto& data = datas[copy];
tflite::tensor_utils::MatrixBatchVectorMultiplyAccumulate(
data.matrix.data(), data.rows, data.cols, data.vectors.data(),
data.scale_factors.data(), data.batch, &data.results[0]);
testing::DoNotOptimize(data.results[2]);
}
}
BENCHMARK(BM_DotprodBatchFourMultiply)
->Args({16, 16, 4, 1})
->Args({32, 32, 4, 1})
->Args({64, 64, 4, 1})
->Args({64, 256, 64, 1})
->Args({64, 256, 256, 1})
->Args({64, 256, 1024, 1})
->Args({64, 256, 12544, 1})
->Args({128, 128, 2, 1})
->Args({128, 128, 3, 1})
->Args({128, 128, 4, 1})
->Args({128, 128, 5, 1})
->Args({640, 640, 4, 1})
->Args({992, 992, 8, 1})
->Args({1024, 1024, 2, 1})
->Args({1024, 1024, 3, 1})
->Args({1024, 1024, 4, 1})
->Args({1024, 1024, 5, 1})
->Args({1024, 1024, 8, 1})
->Args({1024, 1024, 8, 8})
->Args({1024, 1024, 256, 1})
->Args({640, 2048, 2, 1})
->Args({640, 2048, 3, 1})
->Args({640, 2048, 4, 1})
->Args({640, 2048, 4, 8})
->Args({640, 2048, 8, 1})
->Args({2048, 2048, 3, 1})
->Args({2048, 2048, 4, 1})
->Args({2048, 2048, 4, 8})
->Args({2048, 2048, 5, 1})
->Args({2048, 2048, 8, 1})
->Args({2048, 2048, 64, 1})
->Args({2048, 2048, 1024, 1})
->Args({4096, 4096, 1024, 1})
->Args({8192, 8192, 1024, 1})
->Args({8192, 8192, 1024, 8})
->Args({16384, 16384, 1024, 1})
->Args({16384, 8192, 1024, 1});
void BM_DotprodSparseMultiply(benchmark::State& state) {
const int rows = state.range(0);
const int cols = state.range(1);
const int batch = state.range(2);
const int copies = state.range(3);
std::vector<tflite::tensor_utils::MatrixVectorData> datas;
for (int i = 0; i < copies; i++) {
datas.push_back(
tflite::tensor_utils::SetupMatrixVectorData(rows, cols, batch));
}
int copy = 0;
for (auto _ : state) {
copy = (copy + 1) % datas.size();
auto& data = datas[copy];
tflite::tensor_utils::SparseMatrixBatchVectorMultiplyAccumulate(
data.sparse_matrix.data(), data.ledger.data(), data.rows, data.cols,
data.vectors.data(), data.scale_factors.data(), data.batch,
&data.results[0]);
testing::DoNotOptimize(data.results[2]);
}
}
BENCHMARK(BM_DotprodSparseMultiply)
->Args({128, 128, 1, 1})
->Args({128, 128, 4, 1})
->Args({640, 640, 4, 1})
->Args({992, 992, 8, 1})
->Args({1024, 1024, 1, 1})
->Args({1024, 1024, 4, 1})
->Args({1024, 1024, 8, 1})
->Args({640, 2048, 1, 1})
->Args({640, 2048, 4, 1})
->Args({640, 2048, 8, 1})
->Args({2048, 2048, 1, 1})
->Args({2048, 2048, 8, 1});
void BM_DotprodFloatMultiply(benchmark::State& state) {
const int rows = state.range(0);
const int cols = state.range(1);
const int batch = state.range(2);
std::vector<float> matrix(rows * cols);
std::fill(matrix.begin(), matrix.end(), 1.0);
std::vector<float> vector(cols * batch);
std::fill(vector.begin(), vector.end(), 0.3);
std::vector<float> output(rows * batch);
for (auto _ : state) {
std::fill(output.begin(), output.end(), 0.0);
tflite::tensor_utils::MatrixBatchVectorMultiplyAccumulate(
matrix.data(), rows, cols, vector.data(), batch, output.data());
}
}
BENCHMARK(BM_DotprodFloatMultiply)
->Args({16, 16, 4})
->Args({32, 32, 4})
->Args({64, 64, 4})
->Args({64, 256, 64})
->Args({64, 256, 256})
->Args({64, 256, 1024})
->Args({64, 256, 12544})
->Args({128, 128, 2})
->Args({128, 128, 3})
->Args({128, 128, 4})
->Args({128, 128, 5})
->Args({640, 640, 4})
->Args({992, 992, 8})
->Args({1024, 1024, 2})
->Args({1024, 1024, 3})
->Args({1024, 1024, 4})
->Args({1024, 1024, 5})
->Args({1024, 1024, 8})
->Args({1024, 1024, 8})
->Args({1024, 1024, 256})
->Args({640, 2048, 2})
->Args({640, 2048, 3})
->Args({640, 2048, 4})
->Args({640, 2048, 4})
->Args({640, 2048, 8})
->Args({2048, 2048, 3})
->Args({2048, 2048, 4})
->Args({2048, 2048, 4})
->Args({2048, 2048, 5})
->Args({2048, 2048, 8});
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/api/tensor_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/internal/tensor_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ca410313-e701-44a2-b7c7-7d99c9f9e40f | cpp | tensorflow/tensorflow | mkl_swish_op | tensorflow/core/kernels/mkl/mkl_swish_op.cc | tensorflow/core/kernels/mkl/mkl_swish_op_test.cc | #ifdef INTEL_MKL
#include "tensorflow/core/kernels/mkl/mkl_eltwise_activation_base_op.h"
namespace tensorflow {
template <typename Device, typename T>
class MklSwishOp
: public MklEltwiseFwdActivationOpBase<Device, T,
dnnl::algorithm::eltwise_swish> {
public:
~MklSwishOp() {}
explicit MklSwishOp(OpKernelConstruction* context)
: MklEltwiseFwdActivationOpBase<Device, T,
dnnl::algorithm::eltwise_swish>(
context, 1.0f, 0.0f) {}
virtual void Compute_Scalar(OpKernelContext* context) {
const Tensor& src_tensor = context->input(0);
TensorShape src_shape = src_tensor.shape();
Tensor* dst_tensor = nullptr;
void* user_i =
static_cast<void*>(const_cast<T*>(src_tensor.flat<T>().data()));
TensorShape dst_shape = src_shape;
OP_REQUIRES_OK(context, context->allocate_output(
GetTensorDataIndex(0, context->num_outputs()),
dst_shape, &dst_tensor));
void* out_o = static_cast<void*>(dst_tensor->flat<T>().data());
T feature = (static_cast<T*>(user_i))[0];
T e1 = Eigen::numext::exp(-feature);
(static_cast<T*>(out_o))[0] = feature / (static_cast<T>(1) + e1);
return;
}
};
#define REGISTER_SWISH_MKL_SUPPORTED_KERNELS_TYPES(type) \
REGISTER_KERNEL_BUILDER( \
Name("_MklSwish").Device(DEVICE_CPU).TypeConstraint<type>("T"), \
MklSwishOp<CPUDevice, type>);
TF_CALL_float(REGISTER_SWISH_MKL_SUPPORTED_KERNELS_TYPES);
TF_CALL_bfloat16(REGISTER_SWISH_MKL_SUPPORTED_KERNELS_TYPES);
TF_CALL_half(REGISTER_SWISH_MKL_SUPPORTED_KERNELS_TYPES);
}
#endif | #ifdef INTEL_MKL
#include "absl/strings/match.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/math_ops.h"
#include "tensorflow/cc/ops/nn_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/mkl/mkl_eltwise_activation_base_op.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/platform/cpu_info.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/stacktrace_handler.h"
#include "tensorflow/core/platform/str_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/util/mkl_util.h"
namespace tensorflow {
template <typename T>
static Graph* SwishGraph(const string& kind, const TensorShape& shape) {
auto* graph = new Graph(OpRegistry::Global());
DataType dtype = DataTypeToEnum<T>::v();
Tensor input_t(dtype, shape);
input_t.flat<T>().setRandom();
Node* input = test::graph::Constant(graph, input_t, "input");
const bool isDefault = (kind == "Default");
Node* sigmoid;
Node* mul;
Node* swish;
if (isDefault) {
TF_CHECK_OK(NodeBuilder(graph->NewName("Default_sigmoid"), "Sigmoid")
.Input(input)
.Attr("T", dtype)
.Finalize(graph, &sigmoid));
TF_CHECK_OK(NodeBuilder(graph->NewName("Default_mul"), "Mul")
.Input(input)
.Input(sigmoid)
.Attr("T", dtype)
.Finalize(graph, &mul));
return graph;
}
TF_CHECK_OK(NodeBuilder(graph->NewName("Mkl_swish"), "_MklSwish")
.Input(input)
.Attr("T", dtype)
.Finalize(graph, &swish));
return graph;
}
#define BM_SWISH(kind, A, B, C, D, type, T) \
static void BM_SWISH_##kind##_##type##_##A##_##B##_##C##_##D##_##T( \
::testing::benchmark::State& state) { \
int64 num_computed_elements = (A) * (B) * (C) * (D); \
int64 flops_per_iter = num_computed_elements; \
\
test::Benchmark(#type, SwishGraph<T>(#kind, {A, B, C, D})).Run(state); \
state.SetItemsProcessed(state.iterations() * flops_per_iter); \
} \
BENCHMARK(BM_SWISH_##kind##_##type##_##A##_##B##_##C##_##D##_##T)
#define BENCHMARK_SWISH(A, B, C, D, type, T) \
BM_SWISH(Default, A, B, C, D, type, T); \
BM_SWISH(Mkl, A, B, C, D, type, T);
#define BENCHMARK_DTYPE(T) \
BENCHMARK_SWISH(1, 16, 16, 3, cpu, T); \
BENCHMARK_SWISH(16, 32, 32, 1, cpu, T); \
BENCHMARK_SWISH(16, 64, 64, 128, cpu, T); \
BENCHMARK_SWISH(32, 64, 64, 128, cpu, T); \
BENCHMARK_SWISH(32, 256, 256, 128, cpu, T); \
BENCHMARK_SWISH(32, 512, 512, 128, cpu, T);
BENCHMARK_DTYPE(float)
BENCHMARK_DTYPE(bfloat16)
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/mkl/mkl_swish_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/mkl/mkl_swish_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ee0b0fcd-8545-49e5-8b69-2c9a4f14e780 | cpp | tensorflow/tensorflow | conv_constants | tensorflow/lite/delegates/gpu/common/tasks/conv_constants.cc | tensorflow/lite/delegates/gpu/cl/kernels/conv_constants_test.cc | #include "tensorflow/lite/delegates/gpu/common/tasks/conv_constants.h"
#include <algorithm>
#include <memory>
#include <string>
#include <utility>
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
namespace tflite {
namespace gpu {
namespace {
int GetAdrenoOptimalMaxConstantSize(const AdrenoInfo& adreno_info) {
if (adreno_info.IsAdreno3xx() || adreno_info.IsAdreno4xx() ||
adreno_info.IsAdreno5xx()) {
return 256 * 10;
} else {
return 256 * 14;
}
}
int GetOptimalMaxConstantSize(const GpuInfo& gpu_info) {
if (gpu_info.IsAdreno()) {
return GetAdrenoOptimalMaxConstantSize(gpu_info.adreno_info);
} else if (gpu_info.IsAMD()) {
return 4096;
} else {
return 1024;
}
}
void AppendConditionally(const std::string& value, const std::string& delimeter,
std::string* result) {
if (!result->empty()) {
*result += delimeter;
}
*result += value;
}
std::string GenerateConv(int src_size, int dst_size, bool use_dot_conv,
int const_mem_offset, CalculationsPrecision precision,
const std::string& dst, const std::string& src) {
std::string result;
const std::string postfixes[] = {".x", ".y", ".z", ".w"};
if (use_dot_conv) {
const std::string src_postfixes[] = {".x", ".xy", ".xyz", ""};
const std::string src_postfix = src_postfixes[src_size - 1];
for (int i = 0; i < dst_size; ++i) {
result += " " + dst + postfixes[i] + " += dot(" + src +
", args.weights.Read(" + std::to_string(const_mem_offset + i) +
")" + src_postfix + ");\n";
}
} else {
const std::string dst_postfixes[] = {".x", ".xy", ".xyz", ""};
const std::string dst_postfix = dst_postfixes[dst_size - 1];
if (precision == CalculationsPrecision::F32_F16) {
for (int i = 0; i < src_size; ++i) {
if (i != 0) {
result += " + ";
}
std::string src_name = src;
if (src_size != 1) {
src_name += postfixes[i];
}
result += src_name + " * args.weights.Read(" +
std::to_string(const_mem_offset + i) + ")" + dst_postfix;
}
std::string size = dst_size == 1 ? "" : std::to_string(dst_size);
result = " " + dst + dst_postfix + " += TO_ACCUM_FLT" + size + "(" +
result + ");\n";
} else {
for (int i = 0; i < src_size; ++i) {
std::string src_name = src;
if (src_size != 1) {
src_name += postfixes[i];
}
result += " " + dst + dst_postfix + " += " + src_name +
" * args.weights.Read(" +
std::to_string(const_mem_offset + i) + ")" + dst_postfix +
";\n";
}
}
}
return result;
}
std::string GenerateConvolutionConstantCode(const GpuInfo& gpu_info,
const OperationDef& op_def,
const OHWI& weights_shape,
bool x_oob_reads, bool y_oob_reads,
bool use_dot_conv,
GPUOperation* op) {
auto src_desc = op_def.src_tensors[0];
op->AddSrcTensor("src_tensor", src_desc);
op->AddDstTensor("dst_tensor", op_def.dst_tensors[0]);
const int out_z = DivideRoundUp(weights_shape.o, 4);
const std::string kOutZ = std::to_string(out_z);
const int src_depth = DivideRoundUp(weights_shape.i, 4);
const std::string postfixes[] = {".x", ".xy", ".xyz", ""};
std::string c;
c += "MAIN_FUNCTION($0) {\n";
if (src_desc.HasAxis(Axis::BATCH)) {
c += " int linear_id = GLOBAL_ID_0;\n";
c += " int X = linear_id / args.dst_tensor.Batch();\n";
c += " int B = linear_id % args.dst_tensor.Batch();\n";
c += " args.src_tensor.SetBatchRef(B);\n";
c += " args.dst_tensor.SetBatchRef(B);\n";
} else {
c += " int X = GLOBAL_ID_0;\n";
}
c += " int Y = GLOBAL_ID_1;\n";
c += " if (X >= args.dst_tensor.Width() || Y >= args.dst_tensor.Height()) "
"return;\n";
c += " int start_x = X * args.stride_x + args.padding_x;\n";
c += " int start_y = Y * args.stride_y + args.padding_y;\n";
for (int i = 0; i < out_z; ++i) {
c += " ACCUM_FLT4 r" + std::to_string(i) + " = INIT_ACCUM_FLT4(0.0f);\n";
}
std::string check;
if (y_oob_reads && !src_desc.SupportsZeroClamp(Axis::HEIGHT, gpu_info)) {
AppendConditionally("inside_y", " && ", &check);
}
if (x_oob_reads && !src_desc.SupportsZeroClamp(Axis::WIDTH, gpu_info)) {
AppendConditionally("inside_x", " && ", &check);
}
int filters_counter = 0;
for (int s = 0; s < src_depth; ++s) {
const int src_ch_count = std::min(4, weights_shape.i - s * 4);
const std::string s_count =
src_ch_count == 1 ? "" : std::to_string(src_ch_count);
const std::string s_type = absl::StrCat("FLT", s_count);
const std::string s_postfix = postfixes[src_ch_count - 1];
for (int ky = 0; ky < weights_shape.h; ++ky) {
std::string s_y = absl::StrCat("(start_y + ", ky, " * args.dilation_y)");
c += " {\n";
c += " int y_c = start_y + " + std::to_string(ky) +
" * args.dilation_y;\n";
if (y_oob_reads && !src_desc.SupportsZeroClamp(Axis::HEIGHT, gpu_info)) {
c +=
" bool inside_y = y_c >= 0 && y_c < args.src_tensor.Height();\n";
c += " y_c = clamp(y_c, 0, args.src_tensor.Height() - 1);\n";
}
for (int kx = 0; kx < weights_shape.w; ++kx) {
c += " {\n";
c += " int x_c = start_x + " + std::to_string(kx) +
" * args.dilation_x;\n";
if (x_oob_reads && !src_desc.SupportsZeroClamp(Axis::WIDTH, gpu_info)) {
c += " bool inside_x = x_c >= 0 && x_c < "
"args.src_tensor.Width();\n";
c += " x_c = clamp(x_c, 0, args.src_tensor.Width() - 1);\n";
}
c += " " + s_type + " src = args.src_tensor.Read(x_c, y_c, " +
std::to_string(s) + ")" + s_postfix + ";\n";
if (!check.empty()) {
c += " src *= INIT_FLT(" + check + ");\n";
}
for (int d = 0; d < out_z; ++d) {
const int dst_ch_count = std::min(4, weights_shape.o - d * 4);
c += GenerateConv(src_ch_count, dst_ch_count, use_dot_conv,
filters_counter, op_def.precision,
"r" + std::to_string(d), "src");
filters_counter += use_dot_conv ? dst_ch_count : src_ch_count;
}
c += " }\n";
}
c += " }\n";
}
}
for (int i = 0; i < out_z; ++i) {
std::string s_i = std::to_string(i);
c += " {\n";
c += " FLT4 res = TO_FLT4(r" + s_i + ") + args.biases.Read(" + s_i +
");\n";
c += " args.dst_tensor.Write(res, X, Y, " + s_i + ");\n";
c += " }\n";
}
c += "}\n";
return c;
}
bool IsDotConvBetter(int src_channels, int dst_channels) {
if (dst_channels % 4 == 0) {
return false;
}
if (src_channels % 4 == 0) {
return true;
}
const int src_depth = DivideRoundUp(src_channels, 4);
const int dst_depth = DivideRoundUp(dst_channels, 4);
return dst_channels * src_depth < src_channels * dst_depth;
}
}
bool IsConvConstantsSupported(const GpuInfo& gpu_info,
const OperationDef& definition,
const Convolution2DAttributes& attr) {
if (gpu_info.IsApiOpenCl() && gpu_info.IsAdreno()) {
const std::string kBadDriver =
"OpenCL 2.0 QUALCOMM build: commit #7ff4f54 changeid #I4460aa6217 "
"Date: 12/30/18";
if (absl::StrContains(gpu_info.opencl_info.platform_version, kBadDriver)) {
return false;
}
}
if (attr.groups != 1) {
return false;
}
const bool use_dot_conv =
IsDotConvBetter(attr.weights.shape.i, attr.weights.shape.o);
const auto& w_shape = attr.weights.shape;
const int src_depth = DivideRoundUp(w_shape.i, 4);
const int dst_depth = DivideRoundUp(w_shape.o, 4);
const int aligned_ch_count =
use_dot_conv ? w_shape.o * src_depth * 4 : w_shape.i * dst_depth * 4;
const int filters_count = aligned_ch_count * w_shape.h * w_shape.w;
const int float_size = definition.precision == CalculationsPrecision::F32
? sizeof(float)
: sizeof(half);
const int filters_buffer_size = filters_count * float_size;
const int kConstantMaxSize = GetOptimalMaxConstantSize(gpu_info);
const int flt4_registers = DivideRoundUp(w_shape.o, 4);
return filters_buffer_size <= kConstantMaxSize && flt4_registers <= 8;
}
GPUOperation CreateConvConstants(const GpuInfo& gpu_info,
const OperationDef& definition,
const Convolution2DAttributes& attr) {
const bool use_dot_conv =
IsDotConvBetter(attr.weights.shape.i, attr.weights.shape.o);
GPUOperation op(definition);
UploadWeightsForConvConstants(attr.weights, gpu_info, definition.precision,
use_dot_conv, &op);
op.args_.AddInt("stride_x", attr.strides.w);
op.args_.AddInt("stride_y", attr.strides.h);
op.args_.AddInt("padding_x", -attr.padding.prepended.w);
op.args_.AddInt("padding_y", -attr.padding.prepended.h);
op.args_.AddInt("dilation_x", attr.dilations.w);
op.args_.AddInt("dilation_y", attr.dilations.h);
op.tensor_to_grid_ = TensorToGrid::kWBToX_HDToY_ZIs1;
bool x_oob_reads =
attr.padding.appended.w != 0 || attr.padding.prepended.w != 0;
bool y_oob_reads =
attr.padding.appended.h != 0 || attr.padding.prepended.h != 0;
op.code_ = GenerateConvolutionConstantCode(gpu_info, definition,
attr.weights.shape, x_oob_reads,
y_oob_reads, use_dot_conv, &op);
if (definition.precision == CalculationsPrecision::F16 &&
gpu_info.IsAdreno() && gpu_info.adreno_info.IsAdreno3xx()) {
op.compiler_options_.push_back(CompilerOptions::kAdrenoFullSimd);
}
if (definition.precision != CalculationsPrecision::F32 &&
gpu_info.IsPowerVR()) {
op.compiler_options_.push_back(CompilerOptions::kClDisableOptimizations);
}
TensorDescriptor bias_tensor_desc = CreateConstantLinearTensorDescriptor(
gpu_info, definition.src_tensors[0].GetDataType(), attr.bias);
op.args_.AddObject("biases", std::make_unique<TensorDescriptor>(
std::move(bias_tensor_desc)));
return op;
}
}
} | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tasks/conv_constants_test_util.h"
namespace tflite {
namespace gpu {
namespace cl {
TEST_F(OpenCLOperationTest, ConvConstantsSimpleWeights) {
const auto status = ConvConstantsSimpleWeightsTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, ConvConstants) {
const auto status = ConvConstantsTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/tasks/conv_constants.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/kernels/conv_constants_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5b8f066c-2a97-4e81-9265-a2cf2cc649cd | cpp | tensorflow/tensorflow | sparse_core_layout | tensorflow/core/tpu/kernels/sparse_core_layout.cc | tensorflow/core/tpu/kernels/sparse_core_layout_test.cc | #include "tensorflow/core/tpu/kernels/sparse_core_layout.h"
#include <cstdint>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/attributes.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/substitute.h"
#include "tensorflow/compiler/jit/flags.h"
#include "tensorflow/core/tpu/kernels/sparse_core_layout.pb.h"
#include "tsl/platform/stringpiece.h"
namespace tensorflow {
ABSL_ATTRIBUTE_WEAK bool GetDisableTableStacking(bool disable_table_stacking) {
bool should_disable_stacking = false;
XlaSparseCoreFlags *sparse_core_flags = GetXlaSparseCoreFlags();
should_disable_stacking =
sparse_core_flags->tf_xla_sparse_core_disable_table_stacking;
return should_disable_stacking || disable_table_stacking;
}
ABSL_ATTRIBUTE_WEAK int64_t GetXlaSparseCoreStackingMemLimit() {
XlaSparseCoreFlags *sparse_core_flags = GetXlaSparseCoreFlags();
return sparse_core_flags->tf_xla_sparse_core_stacking_mem_limit_bytes;
}
ABSL_ATTRIBUTE_WEAK int64_t GetXlaSparseCoreStackingTableShardLimit() {
XlaSparseCoreFlags *sparse_core_flags = GetXlaSparseCoreFlags();
return sparse_core_flags->tf_xla_sparse_core_stacking_table_shard_limit_bytes;
}
namespace tpu {
static int64_t NextLargestMultiple(int64_t n, int64_t factor) {
int64_t extra = n % factor;
if (extra == 0) return n;
return n + factor - extra;
}
SparseCoreLayoutStacker::SparseCoreLayoutStacker(int num_partitions,
bool disable_table_stacking,
int sparse_cores_per_partition)
: num_partitions_(num_partitions),
sparse_cores_per_partition_(sparse_cores_per_partition),
num_sparse_cores_(num_partitions_ * sparse_cores_per_partition_),
stacking_enabled_(!GetDisableTableStacking(disable_table_stacking)),
activation_mem_bytes_limit_(GetXlaSparseCoreStackingMemLimit()),
variable_shard_bytes_limit_(GetXlaSparseCoreStackingTableShardLimit()) {}
absl::Status SparseCoreLayoutStacker::AddTable(absl::string_view table_name,
int64_t table_height,
int64_t table_width,
absl::string_view group,
int64_t output_samples) {
if (stacks_by_group_.empty()) {
VLOG(1) << "Stacking parameters: stacking_enabled_ = " << stacking_enabled_
<< ", activation_mem_bytes_limit_ = " << activation_mem_bytes_limit_
<< ", variable_shard_bytes_limit_ = " << variable_shard_bytes_limit_
<< ", row_limit_ = " << row_limit_
<< ", table_limit_ = " << table_limit_;
}
VLOG(2) << "Table " << table_name << ":";
int64_t samples_per_sparse_core =
output_samples / sparse_cores_per_partition_;
int64_t padded_width = NextLargestMultiple(table_width, 8);
int64_t padded_height =
NextLargestMultiple(table_height, num_sparse_cores_ * 8);
VLOG(2) << " Original size: " << table_height << "x" << table_width
<< " padded size: " << padded_height << "x" << padded_width;
int64_t activation_mem_bytes =
sizeof(float) * padded_width * samples_per_sparse_core;
int64_t variable_shard_bytes =
sizeof(float) * padded_width * padded_height / num_partitions_;
VLOG(2) << " activation mem = " << activation_mem_bytes
<< ", variable shard bytes = " << variable_shard_bytes;
std::vector<TableStack> &candidate_stacks =
stacks_by_group_[std::make_pair(padded_width, std::string(group))];
TableStack *stack = nullptr;
if (stacking_enabled_) {
for (TableStack &ts : candidate_stacks) {
if (ts.incomplete_tables.size() >= table_limit_) continue;
if (activation_mem_bytes_limit_ != 0 &&
ts.total_activation_mem_bytes + activation_mem_bytes >=
activation_mem_bytes_limit_) {
continue;
}
if (variable_shard_bytes_limit_ != 0 &&
ts.total_variable_shard_bytes + variable_shard_bytes >=
variable_shard_bytes_limit_) {
continue;
}
if (row_limit_ != 0 &&
ts.unsharded_height + padded_height >= row_limit_) {
continue;
}
stack = &ts;
break;
}
}
if (stack == nullptr) {
candidate_stacks.emplace_back();
stack = &candidate_stacks.back();
stack->padded_width = padded_width;
stack->temporary_name = absl::Substitute("w$0_i$1_$2", padded_width,
candidate_stacks.size(), group);
}
stack->incomplete_tables.emplace_back();
SparseCoreTableLayout &layout = stack->incomplete_tables.back();
layout.set_table_name(std::string(table_name));
layout.set_num_sparse_cores(num_sparse_cores_);
layout.set_num_partitions(num_partitions_);
layout.add_unsharded_shape(table_height);
layout.add_unsharded_shape(table_width);
layout.add_unsharded_padded_shape(padded_height);
layout.add_unsharded_padded_shape(padded_width);
layout.set_sparse_core_shard_row_offset(stack->unsharded_height /
num_sparse_cores_);
layout.set_sparse_core_shard_rotation(((stack->incomplete_tables.size() - 1) *
num_sparse_cores_ / num_partitions_) %
num_sparse_cores_);
stack->unsharded_height += padded_height;
stack->total_variable_shard_bytes += variable_shard_bytes;
stack->total_activation_mem_bytes += activation_mem_bytes;
return absl::OkStatus();
}
absl::StatusOr<SparseCoreTableLayouts> SparseCoreLayoutStacker::GetLayouts() {
SparseCoreTableLayouts layouts;
for (const auto &[key, stacks] : stacks_by_group_) {
VLOG(1) << "Stack group: padded width " << key.first
<< ", name = " << key.second;
for (const TableStack &stack : stacks) {
VLOG(1) << " Stack " << stack.temporary_name
<< ": unsharded_height = " << stack.unsharded_height
<< ", total_activation_mem_bytes = "
<< stack.total_activation_mem_bytes
<< ", total_variable_shard_bytes = "
<< stack.total_variable_shard_bytes;
std::string stacked_table_name;
for (const SparseCoreTableLayout &incomplete_layout :
stack.incomplete_tables) {
if (!stacked_table_name.empty()) stacked_table_name += "_";
absl::StrAppend(&stacked_table_name, incomplete_layout.table_name());
}
for (const SparseCoreTableLayout &incomplete_layout :
stack.incomplete_tables) {
SparseCoreTableLayout *out_layout = layouts.add_tables();
*out_layout = incomplete_layout;
out_layout->set_stacked_table_name(stacked_table_name);
VLOG(1) << " Contains " << out_layout->table_name();
out_layout->set_total_rows_per_sparse_core_shard(
stack.unsharded_height / num_sparse_cores_);
}
}
}
return layouts;
}
}
} | #include "tensorflow/core/tpu/kernels/sparse_core_layout.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/core/tpu/kernels/sparse_core_layout.pb.h"
namespace tensorflow {
namespace tpu {
namespace {
using ::testing::EqualsProto;
using ::testing::proto::Partially;
using ::testing::status::IsOkAndHolds;
TEST(SparseCoreLayoutStacker, StacksTwoTablesAndPads) {
SparseCoreLayoutStacker stacker(2);
ASSERT_OK(stacker.AddTable("table1", 100, 6, "stack1", 10));
ASSERT_OK(stacker.AddTable("table2", 50, 5, "stack1", 10));
EXPECT_THAT(stacker.GetLayouts(), IsOkAndHolds(EqualsProto(R"pb(
tables {
table_name: 'table1'
stacked_table_name: 'table1_table2'
num_sparse_cores: 8
num_partitions: 2
total_rows_per_sparse_core_shard: 24 # = (128 + 64) / 8
unsharded_shape: [ 100, 6 ]
unsharded_padded_shape: [ 128, 8 ]
sparse_core_shard_row_offset: 0
sparse_core_shard_rotation: 0
}
tables {
table_name: 'table2'
stacked_table_name: 'table1_table2'
num_sparse_cores: 8
num_partitions: 2
total_rows_per_sparse_core_shard: 24
unsharded_shape: [ 50, 5 ]
unsharded_padded_shape: [ 64, 8 ]
sparse_core_shard_row_offset: 16 # = 128/8
sparse_core_shard_rotation: 4
}
)pb")));
}
TEST(SparseCoreLayoutStacker, RespectsDisableStacking) {
SparseCoreLayoutStacker stacker(2);
stacker.SetStackingEnabled(false);
ASSERT_OK(stacker.AddTable("table1", 100, 6, "stack1", 10));
ASSERT_OK(stacker.AddTable("table2", 50, 5, "stack1", 10));
EXPECT_THAT(stacker.GetLayouts(), IsOkAndHolds(EqualsProto(R"pb(
tables {
table_name: 'table1'
stacked_table_name: 'table1'
num_sparse_cores: 8
num_partitions: 2
total_rows_per_sparse_core_shard: 16 # = 128 / 8
unsharded_shape: [ 100, 6 ]
unsharded_padded_shape: [ 128, 8 ]
sparse_core_shard_row_offset: 0
sparse_core_shard_rotation: 0
}
tables {
table_name: 'table2'
stacked_table_name: 'table2'
num_sparse_cores: 8
num_partitions: 2
total_rows_per_sparse_core_shard: 8 # = 64/8
unsharded_shape: [ 50, 5 ]
unsharded_padded_shape: [ 64, 8 ]
sparse_core_shard_row_offset: 0
sparse_core_shard_rotation: 0
}
)pb")));
}
TEST(SparseCoreLayoutStacker, RespectsActivationMemLimit) {
SparseCoreLayoutStacker stacker(2);
stacker.SetActivationMemoryBytesLimit(16384 + 1);
ASSERT_OK(stacker.AddTable("table1", 128, 8, "stack1", 1024));
ASSERT_OK(stacker.AddTable("table2", 128, 8, "stack1", 1024));
ASSERT_OK(stacker.AddTable("table3", 128, 8, "stack1", 1024));
ASSERT_OK(stacker.AddTable("table4", 128, 8, "stack1", 1024));
ASSERT_OK(stacker.AddTable("table5", 128, 8, "stack1", 1024));
EXPECT_THAT(
stacker.GetLayouts(), IsOkAndHolds(Partially(EqualsProto(R"pb(
tables { table_name: 'table1' stacked_table_name: 'table1_table2' }
tables { table_name: 'table2' stacked_table_name: 'table1_table2' }
tables { table_name: 'table3' stacked_table_name: 'table3_table4' }
tables { table_name: 'table4' stacked_table_name: 'table3_table4' }
tables { table_name: 'table5' stacked_table_name: 'table5' }
)pb"))));
}
TEST(SparseCoreLayoutStacker, RespectsVariableShardLimit) {
SparseCoreLayoutStacker stacker(2);
stacker.SetVariableShardBytesLimit(4096 + 1);
ASSERT_OK(stacker.AddTable("table1", 128, 8, "stack1", 1024));
ASSERT_OK(stacker.AddTable("table2", 128, 8, "stack1", 1024));
ASSERT_OK(stacker.AddTable("table3", 128, 8, "stack1", 1024));
ASSERT_OK(stacker.AddTable("table4", 128, 8, "stack1", 1024));
ASSERT_OK(stacker.AddTable("table5", 128, 8, "stack1", 1024));
EXPECT_THAT(
stacker.GetLayouts(), IsOkAndHolds(Partially(EqualsProto(R"pb(
tables { table_name: 'table1' stacked_table_name: 'table1_table2' }
tables { table_name: 'table2' stacked_table_name: 'table1_table2' }
tables { table_name: 'table3' stacked_table_name: 'table3_table4' }
tables { table_name: 'table4' stacked_table_name: 'table3_table4' }
tables { table_name: 'table5' stacked_table_name: 'table5' }
)pb"))));
}
TEST(SparseCoreLayoutStacker, RespectsRowLimit) {
SparseCoreLayoutStacker stacker(2);
stacker.SetActivationMemoryBytesLimit(0);
stacker.SetVariableShardBytesLimit(0);
ASSERT_OK(stacker.AddTable("table1", 1 << 29, 8, "stack1", 1024));
ASSERT_OK(stacker.AddTable("table2", 1 << 29, 8, "stack1", 1024));
ASSERT_OK(stacker.AddTable("table3", 1 << 29, 8, "stack1", 1024));
ASSERT_OK(stacker.AddTable("table4", 1 << 29, 8, "stack1", 1024));
EXPECT_THAT(stacker.GetLayouts(), IsOkAndHolds(Partially(EqualsProto(R"pb(
tables {
table_name: 'table1'
stacked_table_name: 'table1_table2_table3'
}
tables {
table_name: 'table2'
stacked_table_name: 'table1_table2_table3'
}
tables {
table_name: 'table3'
stacked_table_name: 'table1_table2_table3'
}
tables { table_name: 'table4' stacked_table_name: 'table4' }
)pb"))));
}
TEST(SparseCoreLayoutStacker, RespectsTableLimit) {
SparseCoreLayoutStacker stacker(2);
stacker.SetActivationMemoryBytesLimit(0);
stacker.SetVariableShardBytesLimit(0);
stacker.SetStackingTableLimit(2);
ASSERT_OK(stacker.AddTable("table1", 128, 8, "stack1", 1024));
ASSERT_OK(stacker.AddTable("table2", 128, 8, "stack1", 1024));
ASSERT_OK(stacker.AddTable("table3", 128, 8, "stack1", 1024));
ASSERT_OK(stacker.AddTable("table4", 128, 8, "stack1", 1024));
EXPECT_THAT(
stacker.GetLayouts(), IsOkAndHolds(Partially(EqualsProto(R"pb(
tables { table_name: 'table1' stacked_table_name: 'table1_table2' }
tables { table_name: 'table2' stacked_table_name: 'table1_table2' }
tables { table_name: 'table3' stacked_table_name: 'table3_table4' }
tables { table_name: 'table4' stacked_table_name: 'table3_table4' }
)pb"))));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tpu/kernels/sparse_core_layout.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tpu/kernels/sparse_core_layout_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c5455352-1ad0-4ff2-9c83-e89ac052dc9c | cpp | google/tensorstore | value_as | tensorstore/internal/json/value_as.cc | tensorstore/internal/json/value_as_test.cc | #include "tensorstore/internal/json/value_as.h"
#include <stdint.h>
#include <cmath>
#include <cstddef>
#include <limits>
#include <optional>
#include <string>
#include <string_view>
#include <type_traits>
#include "absl/status/status.h"
#include "absl/strings/numbers.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/type_traits.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_json {
absl::Status ExpectedError(const ::nlohmann::json& j,
std::string_view type_name) {
if (j.is_discarded()) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Expected ", type_name, ", but member is missing"));
}
return absl::InvalidArgumentError(tensorstore::StrCat(
"Expected ", type_name, ", but received: ", j.dump()));
}
absl::Status ValidationError(const ::nlohmann::json& j,
std::string_view type_name) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Validation of ", type_name, " failed, received: ", j.dump()));
}
template <typename T>
absl::Status JsonRequireIntegerImpl<T>::Execute(const ::nlohmann::json& json,
T* result, bool strict,
T min_value, T max_value) {
if (auto x = JsonValueAs<T>(json, strict)) {
if (*x >= min_value && *x <= max_value) {
*result = *x;
return absl::OkStatus();
}
}
constexpr const char* kTypeName = []() {
if constexpr (sizeof(T) == 4 && std::is_signed_v<T>)
return "32-bit signed integer";
if constexpr (sizeof(T) == 4 && std::is_unsigned_v<T>)
return "32-bit unsigned integer";
if constexpr (sizeof(T) == 8 && std::is_signed_v<T>)
return "64-bit signed integer";
if constexpr (sizeof(T) == 8 && std::is_unsigned_v<T>)
return "64-bit unsigned integer";
return GetTypeName(internal::type_identity_t<T>{});
}();
if constexpr (kTypeName != nullptr) {
if (min_value == std::numeric_limits<T>::min() &&
max_value == std::numeric_limits<T>::max()) {
return internal_json::ExpectedError(json, kTypeName);
}
}
return absl::InvalidArgumentError(
tensorstore::StrCat("Expected integer in the range [", min_value, ", ",
max_value, "], but received: ", json.dump()));
}
template struct JsonRequireIntegerImpl<int64_t>;
template struct JsonRequireIntegerImpl<uint64_t>;
template <>
std::optional<std::nullptr_t> JsonValueAs<std::nullptr_t>(
const ::nlohmann::json& j, bool strict) {
if (j.is_null()) {
return nullptr;
}
return std::nullopt;
}
template <>
std::optional<bool> JsonValueAs<bool>(const ::nlohmann::json& j, bool strict) {
if (j.is_boolean()) {
return j.get<bool>();
}
if (!strict && j.is_string()) {
const auto& str = j.get_ref<std::string const&>();
if (str == "true") return true;
if (str == "false") return false;
}
return std::nullopt;
}
template <>
std::optional<int64_t> JsonValueAs<int64_t>(const ::nlohmann::json& j,
bool strict) {
if (j.is_number_unsigned()) {
auto x = j.get<uint64_t>();
if (x <= static_cast<uint64_t>(std::numeric_limits<int64_t>::max())) {
return static_cast<int64_t>(x);
}
} else if (j.is_number_integer()) {
return j.get<int64_t>();
} else if (j.is_number_float()) {
auto x = j.get<double>();
if (x >= -9223372036854775808.0 &&
x < 9223372036854775808.0 && x == std::floor(x)) {
return static_cast<int64_t>(x);
}
} else if (!strict) {
if (j.is_string()) {
int64_t result = 0;
if (absl::SimpleAtoi(j.get_ref<std::string const&>(), &result)) {
return result;
}
}
}
return std::nullopt;
}
template <>
std::optional<uint64_t> JsonValueAs<uint64_t>(const ::nlohmann::json& j,
bool strict) {
if (j.is_number_unsigned()) {
return j.get<uint64_t>();
} else if (j.is_number_integer()) {
int64_t x = j.get<int64_t>();
if (x >= 0) {
return static_cast<uint64_t>(x);
}
} else if (j.is_number_float()) {
double x = j.get<double>();
if (x >= 0.0 && x < 18446744073709551616.0 &&
x == std::floor(x)) {
return static_cast<uint64_t>(x);
}
} else if (!strict) {
if (j.is_string()) {
uint64_t result = 0;
if (absl::SimpleAtoi(j.get_ref<std::string const&>(), &result)) {
return result;
}
}
}
return std::nullopt;
}
template <>
std::optional<double> JsonValueAs<double>(const ::nlohmann::json& j,
bool strict) {
if (j.is_number()) {
return j.get<double>();
}
if (!strict && j.is_string()) {
double result = 0;
if (absl::SimpleAtod(j.get_ref<std::string const&>(), &result)) {
return result;
}
}
return std::nullopt;
}
template <>
std::optional<std::string> JsonValueAs<std::string>(const ::nlohmann::json& j,
bool strict) {
if (j.is_string()) {
return j.get<std::string>();
}
return std::nullopt;
}
}
} | #include "tensorstore/internal/json/value_as.h"
#include <stdint.h>
#include <map>
#include <optional>
#include <set>
#include <string>
#include <type_traits>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal_json::JsonRequireInteger;
using ::tensorstore::internal_json::JsonRequireValueAs;
using ::tensorstore::internal_json::JsonValueAs;
template <typename T, bool kStrict = true>
std::optional<T> JsonMemberT(const ::nlohmann::json::object_t& j,
const char* member) {
auto it = j.find(member);
if (it == j.end()) {
return std::nullopt;
}
return JsonValueAs<T>(it->second, kStrict);
}
template <typename T, bool kStrict = true>
std::optional<T> JsonMemberT(const ::nlohmann::json& j, const char* member) {
if (const auto* obj = j.get_ptr<const ::nlohmann::json::object_t*>()) {
return JsonMemberT<T, kStrict>(*obj, member);
}
return std::nullopt;
}
TEST(JsonTest, Meta) {
auto JsonRequireString = [](const ::nlohmann::json& json,
const char* member) -> bool {
auto v = JsonMemberT<std::string>(json, member);
return v.has_value() && !v->empty();
};
auto JsonRequireInt = [](const ::nlohmann::json& json,
const char* member) -> bool {
auto v = JsonMemberT<int64_t, false>(json, member);
return v.has_value();
};
auto meta = ::nlohmann::json::meta();
EXPECT_TRUE(JsonRequireString(meta, "copyright"));
EXPECT_TRUE(JsonRequireString(meta, "name"));
EXPECT_TRUE(JsonRequireString(meta, "url"));
EXPECT_TRUE(JsonRequireString(meta, "platform"));
EXPECT_TRUE(JsonRequireString(meta, "copyright"));
EXPECT_TRUE(meta.find("compiler") != meta.end());
auto compiler = meta["compiler"];
EXPECT_TRUE(JsonRequireString(compiler, "c++"));
EXPECT_FALSE(JsonRequireString(meta, "version"));
auto version = meta["version"];
EXPECT_TRUE(JsonRequireInt(version, "major"));
}
::nlohmann::json GetDefaultJSON() {
return ::nlohmann::json{
{"bool_true", true}, {"bool_false", false}, {"str_bool", "true"},
{"signed", 456}, {"neg_signed", -567}, {"unsigned", 565u},
{"float", 456.789}, {"neg_float", -678.91}, {"int_float", 122.0},
{"str", "abc"}, {"str_number", "789"}, {"str_float", "123.40"},
{"nil", nullptr}, {"empty_obj", {}}, {"obj", {"a", 1}},
};
}
std::set<std::string> GetKeys() {
return std::set<std::string>{{
"bool_true",
"bool_false",
"str_bool",
"signed",
"neg_signed",
"unsigned",
"float",
"neg_float",
"int_float",
"str",
"abc",
"str_number",
"str_float",
"nil",
"empty_obj",
"obj",
"missing",
}};
}
TEST(JsonTest, JsonParseBool) {
auto keys = GetKeys();
auto JsonParseBool = [&keys](const ::nlohmann::json& json,
const char* member) {
keys.erase(member);
return JsonMemberT<bool, false>(json, member);
};
auto result = GetDefaultJSON();
EXPECT_FALSE(result.is_discarded());
ASSERT_TRUE(JsonParseBool(result, "bool_true"));
EXPECT_EQ(true, *JsonParseBool(result, "bool_true"));
ASSERT_TRUE(JsonParseBool(result, "bool_false"));
EXPECT_EQ(false, *JsonParseBool(result, "bool_false"));
ASSERT_TRUE(JsonParseBool(result, "str_bool"));
EXPECT_EQ(true, *JsonParseBool(result, "str_bool"));
std::set<std::string> remaining = keys;
for (const std::string& x : remaining) {
EXPECT_FALSE(JsonParseBool(result, x.c_str())) << x;
}
EXPECT_EQ(std::nullopt, JsonValueAs<bool>(::nlohmann::json("a")));
EXPECT_EQ(false, JsonValueAs<bool>(::nlohmann::json("false")));
EXPECT_EQ(true, JsonValueAs<bool>(::nlohmann::json("true")));
const bool kStrict = true;
EXPECT_EQ(std::nullopt, JsonValueAs<bool>(::nlohmann::json("true"), kStrict));
EXPECT_EQ(true, JsonValueAs<bool>(::nlohmann::json(true), kStrict));
EXPECT_EQ(false, JsonValueAs<bool>(::nlohmann::json(false), kStrict));
}
TEST(JsonValueAsTest, Int64FromUint64) {
EXPECT_EQ(std::nullopt,
JsonValueAs<int64_t>(::nlohmann::json(0x8fffffffffffffffu)));
EXPECT_EQ(std::nullopt,
JsonValueAs<int64_t>(::nlohmann::json(0xffffffffffffffffu)));
EXPECT_EQ(0x7fffffffffffffff,
JsonValueAs<int64_t>(::nlohmann::json(0x7fffffffffffffffu)));
const bool kStrict = true;
EXPECT_EQ(
0x7fffffffffffffff,
JsonValueAs<int64_t>(::nlohmann::json(0x7fffffffffffffffu), kStrict));
}
TEST(JsonValueAsTest, Int64FromDouble) {
EXPECT_EQ(std::nullopt, JsonValueAs<int64_t>(::nlohmann::json(0.5)));
EXPECT_EQ(1, JsonValueAs<int64_t>(::nlohmann::json(1.0)));
EXPECT_EQ(
std::nullopt,
JsonValueAs<int64_t>(::nlohmann::json(9223372036854775808.0 )));
EXPECT_EQ(std::nullopt,
JsonValueAs<int64_t>(::nlohmann::json(-9223372036854777856.0)));
EXPECT_EQ(9223372036854774784,
JsonValueAs<int64_t>(::nlohmann::json(9223372036854774784.0)));
EXPECT_EQ(
-0x8000000000000000,
JsonValueAs<int64_t>(::nlohmann::json(-9223372036854775808.0 )));
}
TEST(JsonValueAsTest, Int64FromString) {
EXPECT_EQ(-1, JsonValueAs<int64_t>(::nlohmann::json("-1")));
EXPECT_EQ(-0x8000000000000000,
JsonValueAs<int64_t>(::nlohmann::json("-9223372036854775808")));
EXPECT_EQ(0x7fffffffffffffff,
JsonValueAs<int64_t>(::nlohmann::json("9223372036854775807")));
EXPECT_FALSE(JsonValueAs<int64_t>(::nlohmann::json("0.0")));
EXPECT_FALSE(JsonValueAs<int64_t>(::nlohmann::json("0a")));
EXPECT_FALSE(JsonValueAs<int64_t>(::nlohmann::json("0x0")));
EXPECT_FALSE(JsonValueAs<int64_t>(::nlohmann::json("0xf")));
EXPECT_FALSE(JsonValueAs<int64_t>(::nlohmann::json("9223372036854775808")));
EXPECT_FALSE(JsonValueAs<int64_t>(::nlohmann::json("-9223372036854775809")));
const bool kStrict = true;
EXPECT_EQ(std::nullopt,
JsonValueAs<int64_t>(::nlohmann::json("-1"), kStrict));
}
TEST(JsonValueAsTest, Uint64FromDouble) {
EXPECT_EQ(std::nullopt, JsonValueAs<uint64_t>(::nlohmann::json(0.5)));
EXPECT_EQ(1, JsonValueAs<uint64_t>(::nlohmann::json(1.0)));
EXPECT_EQ(std::nullopt, JsonValueAs<uint64_t>(::nlohmann::json(
18446744073709551616.0 )));
EXPECT_EQ(std::nullopt, JsonValueAs<uint64_t>(::nlohmann::json(-1.0)));
EXPECT_EQ(18446744073709549568u,
JsonValueAs<uint64_t>(::nlohmann::json(18446744073709549568.0)));
}
TEST(JsonValueAsTest, Uint64FromString) {
EXPECT_EQ(0xffffffffffffffffu,
JsonValueAs<uint64_t>(::nlohmann::json("18446744073709551615")));
EXPECT_FALSE(JsonValueAs<uint64_t>(::nlohmann::json("0.0")));
EXPECT_FALSE(JsonValueAs<uint64_t>(::nlohmann::json("0a")));
EXPECT_FALSE(JsonValueAs<uint64_t>(::nlohmann::json("0x0")));
EXPECT_FALSE(JsonValueAs<uint64_t>(::nlohmann::json("0xf")));
EXPECT_FALSE(JsonValueAs<uint64_t>(::nlohmann::json("-1")));
const bool kStrict = true;
EXPECT_EQ(std::nullopt,
JsonValueAs<uint64_t>(::nlohmann::json("1"), kStrict));
}
TEST(JsonTest, JsonParseInt) {
auto keys = GetKeys();
auto JsonParseInt = [&keys](const ::nlohmann::json& json,
const char* member) {
keys.erase(member);
return JsonMemberT<int64_t, false>(json, member);
};
auto result = GetDefaultJSON();
EXPECT_FALSE(result.is_discarded());
ASSERT_TRUE(JsonParseInt(result, "signed"));
EXPECT_EQ(456, *JsonParseInt(result, "signed"));
ASSERT_TRUE(JsonParseInt(result, "neg_signed"));
EXPECT_EQ(-567, *JsonParseInt(result, "neg_signed"));
ASSERT_TRUE(JsonParseInt(result, "unsigned"));
EXPECT_EQ(565, *JsonParseInt(result, "unsigned"));
ASSERT_TRUE(JsonParseInt(result, "int_float"));
EXPECT_EQ(122, *JsonParseInt(result, "int_float"));
ASSERT_TRUE(JsonParseInt(result, "str_number"));
EXPECT_EQ(789, *JsonParseInt(result, "str_number"));
std::set<std::string> remaining = keys;
for (const std::string& x : remaining) {
EXPECT_FALSE(JsonParseInt(result, x.c_str())) << x;
}
}
TEST(JsonTest, JsonParseUnsigned) {
auto keys = GetKeys();
auto JsonParseUnsigned = [&keys](const ::nlohmann::json& json,
const char* member) {
keys.erase(member);
return JsonMemberT<uint64_t, false>(json, member);
};
auto result = GetDefaultJSON();
EXPECT_FALSE(result.is_discarded());
ASSERT_TRUE(JsonParseUnsigned(result, "signed"));
EXPECT_EQ(456, *JsonParseUnsigned(result, "signed"));
ASSERT_TRUE(JsonParseUnsigned(result, "unsigned"));
EXPECT_EQ(565, *JsonParseUnsigned(result, "unsigned"));
ASSERT_TRUE(JsonParseUnsigned(result, "int_float"));
EXPECT_EQ(122, *JsonParseUnsigned(result, "int_float"));
ASSERT_TRUE(JsonParseUnsigned(result, "str_number"));
EXPECT_EQ(789, *JsonParseUnsigned(result, "str_number"));
std::set<std::string> remaining = keys;
for (const std::string& x : remaining) {
EXPECT_FALSE(JsonParseUnsigned(result, x.c_str())) << x;
}
}
TEST(JsonTest, JsonParseDouble) {
auto keys = GetKeys();
auto JsonParseDouble = [&keys](const ::nlohmann::json& json,
const char* member) {
keys.erase(member);
return JsonMemberT<double, false>(json, member);
};
auto result = GetDefaultJSON();
EXPECT_FALSE(result.is_discarded());
ASSERT_TRUE(JsonParseDouble(result, "signed"));
EXPECT_EQ(456, *JsonParseDouble(result, "signed"));
ASSERT_TRUE(JsonParseDouble(result, "neg_signed"));
EXPECT_EQ(-567, *JsonParseDouble(result, "neg_signed"));
ASSERT_TRUE(JsonParseDouble(result, "unsigned"));
EXPECT_EQ(565, *JsonParseDouble(result, "unsigned"));
ASSERT_TRUE(JsonParseDouble(result, "float"));
EXPECT_EQ(456.789, *JsonParseDouble(result, "float"));
ASSERT_TRUE(JsonParseDouble(result, "neg_float"));
EXPECT_EQ(-678.91, *JsonParseDouble(result, "neg_float"));
ASSERT_TRUE(JsonParseDouble(result, "int_float"));
EXPECT_EQ(122, *JsonParseDouble(result, "int_float"));
ASSERT_TRUE(JsonParseDouble(result, "str_number"));
EXPECT_EQ(789, *JsonParseDouble(result, "str_number"));
ASSERT_TRUE(JsonParseDouble(result, "str_float"));
EXPECT_EQ(123.4, *JsonParseDouble(result, "str_float"));
std::set<std::string> remaining = keys;
for (const std::string& x : remaining) {
EXPECT_FALSE(JsonParseDouble(result, x.c_str())) << x;
}
}
TEST(JsonTest, JsonParseString) {
auto keys = GetKeys();
auto JsonParseString = [&keys](const ::nlohmann::json& json,
const char* member) {
keys.erase(member);
return JsonMemberT<std::string>(json, member);
};
auto result = GetDefaultJSON();
EXPECT_FALSE(result.is_discarded());
ASSERT_TRUE(JsonParseString(result, "str_bool"));
EXPECT_EQ("true", *JsonParseString(result, "str_bool"));
ASSERT_TRUE(JsonParseString(result, "str"));
EXPECT_EQ("abc", *JsonParseString(result, "str"));
ASSERT_TRUE(JsonParseString(result, "str_number"));
EXPECT_EQ("789", *JsonParseString(result, "str_number"));
ASSERT_TRUE(JsonParseString(result, "str_float"));
EXPECT_EQ("123.40", *JsonParseString(result, "str_float"));
std::set<std::string> remaining = keys;
for (const std::string& x : remaining) {
EXPECT_FALSE(JsonParseString(result, x.c_str())) << x;
}
}
TEST(JsonRequireValueAs, Success) {
{
bool v;
EXPECT_TRUE(JsonRequireValueAs(::nlohmann::json(true), &v, true).ok());
EXPECT_TRUE(v);
EXPECT_TRUE(JsonRequireValueAs(::nlohmann::json("true"), &v, false).ok());
EXPECT_TRUE(v);
EXPECT_TRUE(JsonRequireValueAs(::nlohmann::json("true"), &v, [](bool) {
return true;
}).ok());
EXPECT_TRUE(v);
EXPECT_TRUE(
JsonRequireValueAs<bool>(::nlohmann::json(true), nullptr, true).ok());
}
{
int64_t v;
EXPECT_TRUE(JsonRequireValueAs(::nlohmann::json(-3), &v, true).ok());
EXPECT_EQ(-3, v);
EXPECT_TRUE(JsonRequireValueAs(::nlohmann::json(-4.0), &v, false).ok());
EXPECT_EQ(-4, v);
EXPECT_TRUE(JsonRequireValueAs(::nlohmann::json("-5"), &v, false).ok());
EXPECT_EQ(-5, v);
EXPECT_TRUE(JsonRequireValueAs(::nlohmann::json("-5"), &v, [](int64_t) {
return true;
}).ok());
EXPECT_EQ(-5, v);
EXPECT_TRUE(
JsonRequireValueAs<int64_t>(::nlohmann::json(-3), nullptr, true).ok());
}
{
uint64_t v;
EXPECT_TRUE(JsonRequireValueAs(::nlohmann::json(6), &v, true).ok());
EXPECT_EQ(6, v);
EXPECT_TRUE(JsonRequireValueAs(::nlohmann::json(7.0), &v, false).ok());
EXPECT_EQ(7, v);
EXPECT_TRUE(JsonRequireValueAs(::nlohmann::json("8"), &v, false).ok());
EXPECT_EQ(8, v);
EXPECT_TRUE(JsonRequireValueAs(::nlohmann::json("8"), &v, [](uint64_t) {
return true;
}).ok());
EXPECT_EQ(8, v);
EXPECT_TRUE(
JsonRequireValueAs<uint64_t>(::nlohmann::json(3), nullptr, true).ok());
}
{
double v;
EXPECT_TRUE(JsonRequireValueAs(::nlohmann::json(0.5), &v, true).ok());
EXPECT_EQ(0.5, v);
EXPECT_TRUE(JsonRequireValueAs(::nlohmann::json("2.0"), &v, false).ok());
EXPECT_EQ(2.0, v);
EXPECT_TRUE(JsonRequireValueAs(::nlohmann::json("2.0"), &v, [](double) {
return true;
}).ok());
EXPECT_EQ(2.0, v);
EXPECT_TRUE(
JsonRequireValueAs<double>(::nlohmann::json(3.0), nullptr, true).ok());
}
{
std::string v;
EXPECT_TRUE(JsonRequireValueAs(::nlohmann::json("x"), &v, false).ok());
EXPECT_EQ("x", v);
EXPECT_TRUE(JsonRequireValueAs(::nlohmann::json("y"), &v, [](std::string) {
return true;
}).ok());
EXPECT_EQ("y", v);
EXPECT_TRUE(
JsonRequireValueAs<std::string>(::nlohmann::json("z"), nullptr, true)
.ok());
}
}
TEST(JsonRequireValueAs, Failure) {
{
bool v;
EXPECT_THAT(JsonRequireValueAs(::nlohmann::json("true"), &v, true),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected boolean, but received: \"true\""));
}
EXPECT_THAT(JsonRequireValueAs<bool>(::nlohmann::json("true"), nullptr, true),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected boolean, but received: \"true\""));
EXPECT_THAT(JsonRequireValueAs<bool>(::nlohmann::json(true), nullptr,
[](bool) { return false; }),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Validation of boolean failed, received: true"));
EXPECT_THAT(
JsonRequireValueAs<int64_t>(::nlohmann::json("true"), nullptr, true),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected 64-bit signed integer, but received: \"true\""));
EXPECT_THAT(
JsonRequireValueAs<uint64_t>(::nlohmann::json(3.5), nullptr, true),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected 64-bit unsigned integer, but received: 3.5"));
EXPECT_THAT(
JsonRequireValueAs<std::string>(::nlohmann::json(true), nullptr, true),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected string, but received: true"));
}
TEST(JsonRequireIntegerTest, Success) {
{
std::int32_t result_int32 = 42;
EXPECT_EQ(absl::OkStatus(), JsonRequireInteger<std::int32_t>(
::nlohmann::json(-5), &result_int32,
true, -7, -3));
EXPECT_EQ(-5, result_int32);
}
{
std::int32_t result_int32 = 42;
EXPECT_EQ(absl::OkStatus(), JsonRequireInteger<std::int32_t>(
::nlohmann::json(-7), &result_int32,
true, -7, -3));
EXPECT_EQ(-7, result_int32);
}
{
std::int32_t result_int32 = 42;
EXPECT_EQ(absl::OkStatus(), JsonRequireInteger<std::int32_t>(
::nlohmann::json("-7"), &result_int32,
false, -7, -3));
EXPECT_EQ(-7, result_int32);
}
{
std::int32_t result_int32 = 42;
EXPECT_EQ(absl::OkStatus(), JsonRequireInteger<std::int32_t>(
::nlohmann::json(-3), &result_int32,
true, -7, -3));
EXPECT_EQ(-3, result_int32);
}
{
uint32_t result_uint32 = 42;
EXPECT_EQ(absl::OkStatus(),
JsonRequireInteger(::nlohmann::json(5), &result_uint32,
true, 2, 7));
EXPECT_EQ(5u, result_uint32);
}
{
std::int16_t result_int16 = 42;
EXPECT_EQ(absl::OkStatus(),
JsonRequireInteger(::nlohmann::json(5), &result_int16,
true, 2, 7));
EXPECT_EQ(5, result_int16);
}
}
TEST(JsonRequireIntegerTest, Failure) {
{
std::int32_t result_int32 = 42;
EXPECT_THAT(
JsonRequireInteger(::nlohmann::json(-2), &result_int32, true,
-7, -3),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Expected integer in the range \\[-7, -3\\], but received: -2"));
EXPECT_EQ(42, result_int32);
}
{
std::int32_t result_int32 = 42;
EXPECT_THAT(JsonRequireInteger(::nlohmann::json(true), &result_int32,
true, -7, -3),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected integer in the range \\[-7, -3\\], but "
"received: true"));
EXPECT_EQ(42, result_int32);
}
{
uint32_t result_uint32 = 42;
EXPECT_THAT(
JsonRequireInteger(::nlohmann::json(11), &result_uint32,
true, 5, 10),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Expected integer in the range \\[5, 10\\], but received: 11"));
EXPECT_EQ(42u, result_uint32);
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json/value_as.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json/value_as_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
34c8dafc-d129-4f28-a135-dae212b8739a | cpp | abseil/abseil-cpp | any | absl/types/any.h | absl/types/any_test.cc | #ifndef ABSL_TYPES_ANY_H_
#define ABSL_TYPES_ANY_H_
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/utility/utility.h"
#ifdef ABSL_USES_STD_ANY
#include <any>
namespace absl {
ABSL_NAMESPACE_BEGIN
using std::any;
using std::any_cast;
using std::bad_any_cast;
using std::make_any;
ABSL_NAMESPACE_END
}
#else
#include <algorithm>
#include <cstddef>
#include <initializer_list>
#include <memory>
#include <stdexcept>
#include <type_traits>
#include <typeinfo>
#include <utility>
#include "absl/base/internal/fast_type_id.h"
#include "absl/meta/type_traits.h"
#include "absl/types/bad_any_cast.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
class any;
void swap(any& x, any& y) noexcept;
template <typename T, typename... Args>
any make_any(Args&&... args);
template <typename T, typename U, typename... Args>
any make_any(std::initializer_list<U> il, Args&&... args);
template <typename ValueType>
ValueType any_cast(const any& operand);
template <typename ValueType>
ValueType any_cast(any& operand);
template <typename ValueType>
ValueType any_cast(any&& operand);
template <typename ValueType>
const ValueType* any_cast(const any* operand) noexcept;
template <typename ValueType>
ValueType* any_cast(any* operand) noexcept;
class any {
private:
template <typename T>
struct IsInPlaceType;
public:
constexpr any() noexcept;
any(const any& other)
: obj_(other.has_value() ? other.obj_->Clone()
: std::unique_ptr<ObjInterface>()) {}
any(any&& other) noexcept = default;
template <
typename T, typename VT = absl::decay_t<T>,
absl::enable_if_t<!absl::disjunction<
std::is_same<any, VT>, IsInPlaceType<VT>,
absl::negation<std::is_copy_constructible<VT> > >::value>* = nullptr>
any(T&& value) : obj_(new Obj<VT>(in_place, std::forward<T>(value))) {}
template <typename T, typename... Args, typename VT = absl::decay_t<T>,
absl::enable_if_t<absl::conjunction<
std::is_copy_constructible<VT>,
std::is_constructible<VT, Args...>>::value>* = nullptr>
explicit any(in_place_type_t<T> , Args&&... args)
: obj_(new Obj<VT>(in_place, std::forward<Args>(args)...)) {}
template <
typename T, typename U, typename... Args, typename VT = absl::decay_t<T>,
absl::enable_if_t<
absl::conjunction<std::is_copy_constructible<VT>,
std::is_constructible<VT, std::initializer_list<U>&,
Args...>>::value>* = nullptr>
explicit any(in_place_type_t<T> , std::initializer_list<U> ilist,
Args&&... args)
: obj_(new Obj<VT>(in_place, ilist, std::forward<Args>(args)...)) {}
any& operator=(const any& rhs) {
any(rhs).swap(*this);
return *this;
}
any& operator=(any&& rhs) noexcept {
any(std::move(rhs)).swap(*this);
return *this;
}
template <typename T, typename VT = absl::decay_t<T>,
absl::enable_if_t<absl::conjunction<
absl::negation<std::is_same<VT, any>>,
std::is_copy_constructible<VT>>::value>* = nullptr>
any& operator=(T&& rhs) {
any tmp(in_place_type_t<VT>(), std::forward<T>(rhs));
tmp.swap(*this);
return *this;
}
template <
typename T, typename... Args, typename VT = absl::decay_t<T>,
absl::enable_if_t<std::is_copy_constructible<VT>::value &&
std::is_constructible<VT, Args...>::value>* = nullptr>
VT& emplace(Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND {
reset();
Obj<VT>* const object_ptr =
new Obj<VT>(in_place, std::forward<Args>(args)...);
obj_ = std::unique_ptr<ObjInterface>(object_ptr);
return object_ptr->value;
}
template <
typename T, typename U, typename... Args, typename VT = absl::decay_t<T>,
absl::enable_if_t<std::is_copy_constructible<VT>::value &&
std::is_constructible<VT, std::initializer_list<U>&,
Args...>::value>* = nullptr>
VT& emplace(std::initializer_list<U> ilist,
Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND {
reset();
Obj<VT>* const object_ptr =
new Obj<VT>(in_place, ilist, std::forward<Args>(args)...);
obj_ = std::unique_ptr<ObjInterface>(object_ptr);
return object_ptr->value;
}
void reset() noexcept { obj_ = nullptr; }
void swap(any& other) noexcept { obj_.swap(other.obj_); }
bool has_value() const noexcept { return obj_ != nullptr; }
#ifdef ABSL_INTERNAL_HAS_RTTI
const std::type_info& type() const noexcept {
if (has_value()) {
return obj_->Type();
}
return typeid(void);
}
#endif
private:
class ObjInterface {
public:
virtual ~ObjInterface() = default;
virtual std::unique_ptr<ObjInterface> Clone() const = 0;
virtual const void* ObjTypeId() const noexcept = 0;
#ifdef ABSL_INTERNAL_HAS_RTTI
virtual const std::type_info& Type() const noexcept = 0;
#endif
};
template <typename T>
class Obj : public ObjInterface {
public:
template <typename... Args>
explicit Obj(in_place_t , Args&&... args)
: value(std::forward<Args>(args)...) {}
std::unique_ptr<ObjInterface> Clone() const final {
return std::unique_ptr<ObjInterface>(new Obj(in_place, value));
}
const void* ObjTypeId() const noexcept final { return IdForType<T>(); }
#ifdef ABSL_INTERNAL_HAS_RTTI
const std::type_info& Type() const noexcept final { return typeid(T); }
#endif
T value;
};
std::unique_ptr<ObjInterface> CloneObj() const {
if (!obj_) return nullptr;
return obj_->Clone();
}
template <typename T>
constexpr static const void* IdForType() {
using NormalizedType =
typename std::remove_cv<typename std::remove_reference<T>::type>::type;
return base_internal::FastTypeId<NormalizedType>();
}
const void* GetObjTypeId() const {
return obj_ ? obj_->ObjTypeId() : base_internal::FastTypeId<void>();
}
template <typename ValueType>
friend ValueType any_cast(const any& operand);
template <typename ValueType>
friend ValueType any_cast(any& operand);
template <typename T>
friend const T* any_cast(const any* operand) noexcept;
template <typename T>
friend T* any_cast(any* operand) noexcept;
std::unique_ptr<ObjInterface> obj_;
};
constexpr any::any() noexcept = default;
template <typename T>
struct any::IsInPlaceType : std::false_type {};
template <typename T>
struct any::IsInPlaceType<in_place_type_t<T>> : std::true_type {};
inline void swap(any& x, any& y) noexcept { x.swap(y); }
template <typename T, typename... Args>
any make_any(Args&&... args) {
return any(in_place_type_t<T>(), std::forward<Args>(args)...);
}
template <typename T, typename U, typename... Args>
any make_any(std::initializer_list<U> il, Args&&... args) {
return any(in_place_type_t<T>(), il, std::forward<Args>(args)...);
}
template <typename ValueType>
ValueType any_cast(const any& operand) {
using U = typename std::remove_cv<
typename std::remove_reference<ValueType>::type>::type;
static_assert(std::is_constructible<ValueType, const U&>::value,
"Invalid ValueType");
auto* const result = (any_cast<U>)(&operand);
if (result == nullptr) {
any_internal::ThrowBadAnyCast();
}
return static_cast<ValueType>(*result);
}
template <typename ValueType>
ValueType any_cast(any& operand) {
using U = typename std::remove_cv<
typename std::remove_reference<ValueType>::type>::type;
static_assert(std::is_constructible<ValueType, U&>::value,
"Invalid ValueType");
auto* result = (any_cast<U>)(&operand);
if (result == nullptr) {
any_internal::ThrowBadAnyCast();
}
return static_cast<ValueType>(*result);
}
template <typename ValueType>
ValueType any_cast(any&& operand) {
using U = typename std::remove_cv<
typename std::remove_reference<ValueType>::type>::type;
static_assert(std::is_constructible<ValueType, U>::value,
"Invalid ValueType");
return static_cast<ValueType>(std::move((any_cast<U&>)(operand)));
}
template <typename T>
const T* any_cast(const any* operand) noexcept {
using U =
typename std::remove_cv<typename std::remove_reference<T>::type>::type;
return operand && operand->GetObjTypeId() == any::IdForType<U>()
? std::addressof(
static_cast<const any::Obj<U>*>(operand->obj_.get())->value)
: nullptr;
}
template <typename T>
T* any_cast(any* operand) noexcept {
using U =
typename std::remove_cv<typename std::remove_reference<T>::type>::type;
return operand && operand->GetObjTypeId() == any::IdForType<U>()
? std::addressof(
static_cast<any::Obj<U>*>(operand->obj_.get())->value)
: nullptr;
}
ABSL_NAMESPACE_END
}
#endif
#endif | #include "absl/types/any.h"
#if !defined(ABSL_USES_STD_ANY)
#include <initializer_list>
#include <type_traits>
#include <utility>
#include <vector>
#include "gtest/gtest.h"
#include "absl/base/config.h"
#include "absl/base/internal/exception_testing.h"
#include "absl/container/internal/test_instance_tracker.h"
#include "absl/log/log.h"
namespace {
using absl::test_internal::CopyableOnlyInstance;
using absl::test_internal::InstanceTracker;
template <typename T>
const T& AsConst(const T& t) {
return t;
}
struct MoveOnly {
MoveOnly() = default;
explicit MoveOnly(int value) : value(value) {}
MoveOnly(MoveOnly&&) = default;
MoveOnly& operator=(MoveOnly&&) = default;
int value = 0;
};
struct CopyOnly {
CopyOnly() = default;
explicit CopyOnly(int value) : value(value) {}
CopyOnly(CopyOnly&&) = delete;
CopyOnly& operator=(CopyOnly&&) = delete;
CopyOnly(const CopyOnly&) = default;
CopyOnly& operator=(const CopyOnly&) = default;
int value = 0;
};
struct MoveOnlyWithListConstructor {
MoveOnlyWithListConstructor() = default;
explicit MoveOnlyWithListConstructor(std::initializer_list<int> ,
int value)
: value(value) {}
MoveOnlyWithListConstructor(MoveOnlyWithListConstructor&&) = default;
MoveOnlyWithListConstructor& operator=(MoveOnlyWithListConstructor&&) =
default;
int value = 0;
};
struct IntMoveOnlyCopyOnly {
IntMoveOnlyCopyOnly(int value, MoveOnly , CopyOnly )
: value(value) {}
int value;
};
struct ListMoveOnlyCopyOnly {
ListMoveOnlyCopyOnly(std::initializer_list<int> ilist, MoveOnly ,
CopyOnly )
: values(ilist) {}
std::vector<int> values;
};
using FunctionType = void();
void FunctionToEmplace() {}
using ArrayType = int[2];
using DecayedArray = absl::decay_t<ArrayType>;
TEST(AnyTest, Noexcept) {
static_assert(std::is_nothrow_default_constructible<absl::any>(), "");
static_assert(std::is_nothrow_move_constructible<absl::any>(), "");
static_assert(std::is_nothrow_move_assignable<absl::any>(), "");
static_assert(noexcept(std::declval<absl::any&>().has_value()), "");
static_assert(noexcept(std::declval<absl::any&>().type()), "");
static_assert(noexcept(absl::any_cast<int>(std::declval<absl::any*>())), "");
static_assert(
noexcept(std::declval<absl::any&>().swap(std::declval<absl::any&>())),
"");
using std::swap;
static_assert(
noexcept(swap(std::declval<absl::any&>(), std::declval<absl::any&>())),
"");
}
TEST(AnyTest, HasValue) {
absl::any o;
EXPECT_FALSE(o.has_value());
o.emplace<int>();
EXPECT_TRUE(o.has_value());
o.reset();
EXPECT_FALSE(o.has_value());
}
TEST(AnyTest, Type) {
absl::any o;
EXPECT_EQ(typeid(void), o.type());
o.emplace<int>(5);
EXPECT_EQ(typeid(int), o.type());
o.emplace<float>(5.f);
EXPECT_EQ(typeid(float), o.type());
o.reset();
EXPECT_EQ(typeid(void), o.type());
}
TEST(AnyTest, EmptyPointerCast) {
{
absl::any o;
EXPECT_EQ(nullptr, absl::any_cast<int>(&o));
o.emplace<int>();
EXPECT_NE(nullptr, absl::any_cast<int>(&o));
o.reset();
EXPECT_EQ(nullptr, absl::any_cast<int>(&o));
}
{
absl::any o;
EXPECT_EQ(nullptr, absl::any_cast<int>(&AsConst(o)));
o.emplace<int>();
EXPECT_NE(nullptr, absl::any_cast<int>(&AsConst(o)));
o.reset();
EXPECT_EQ(nullptr, absl::any_cast<int>(&AsConst(o)));
}
}
TEST(AnyTest, InPlaceConstruction) {
const CopyOnly copy_only{};
absl::any o(absl::in_place_type_t<IntMoveOnlyCopyOnly>(), 5, MoveOnly(),
copy_only);
IntMoveOnlyCopyOnly& v = absl::any_cast<IntMoveOnlyCopyOnly&>(o);
EXPECT_EQ(5, v.value);
}
TEST(AnyTest, InPlaceConstructionVariableTemplate) {
const CopyOnly copy_only{};
absl::any o(absl::in_place_type<IntMoveOnlyCopyOnly>, 5, MoveOnly(),
copy_only);
auto& v = absl::any_cast<IntMoveOnlyCopyOnly&>(o);
EXPECT_EQ(5, v.value);
}
TEST(AnyTest, InPlaceConstructionWithCV) {
const CopyOnly copy_only{};
absl::any o(absl::in_place_type_t<const volatile IntMoveOnlyCopyOnly>(), 5,
MoveOnly(), copy_only);
IntMoveOnlyCopyOnly& v = absl::any_cast<IntMoveOnlyCopyOnly&>(o);
EXPECT_EQ(5, v.value);
}
TEST(AnyTest, InPlaceConstructionWithCVVariableTemplate) {
const CopyOnly copy_only{};
absl::any o(absl::in_place_type<const volatile IntMoveOnlyCopyOnly>, 5,
MoveOnly(), copy_only);
auto& v = absl::any_cast<IntMoveOnlyCopyOnly&>(o);
EXPECT_EQ(5, v.value);
}
TEST(AnyTest, InPlaceConstructionWithFunction) {
absl::any o(absl::in_place_type_t<FunctionType>(), FunctionToEmplace);
FunctionType*& construction_result = absl::any_cast<FunctionType*&>(o);
EXPECT_EQ(&FunctionToEmplace, construction_result);
}
TEST(AnyTest, InPlaceConstructionWithFunctionVariableTemplate) {
absl::any o(absl::in_place_type<FunctionType>, FunctionToEmplace);
auto& construction_result = absl::any_cast<FunctionType*&>(o);
EXPECT_EQ(&FunctionToEmplace, construction_result);
}
TEST(AnyTest, InPlaceConstructionWithArray) {
ArrayType ar = {5, 42};
absl::any o(absl::in_place_type_t<ArrayType>(), ar);
DecayedArray& construction_result = absl::any_cast<DecayedArray&>(o);
EXPECT_EQ(&ar[0], construction_result);
}
TEST(AnyTest, InPlaceConstructionWithArrayVariableTemplate) {
ArrayType ar = {5, 42};
absl::any o(absl::in_place_type<ArrayType>, ar);
auto& construction_result = absl::any_cast<DecayedArray&>(o);
EXPECT_EQ(&ar[0], construction_result);
}
TEST(AnyTest, InPlaceConstructionIlist) {
const CopyOnly copy_only{};
absl::any o(absl::in_place_type_t<ListMoveOnlyCopyOnly>(), {1, 2, 3, 4},
MoveOnly(), copy_only);
ListMoveOnlyCopyOnly& v = absl::any_cast<ListMoveOnlyCopyOnly&>(o);
std::vector<int> expected_values = {1, 2, 3, 4};
EXPECT_EQ(expected_values, v.values);
}
TEST(AnyTest, InPlaceConstructionIlistVariableTemplate) {
const CopyOnly copy_only{};
absl::any o(absl::in_place_type<ListMoveOnlyCopyOnly>, {1, 2, 3, 4},
MoveOnly(), copy_only);
auto& v = absl::any_cast<ListMoveOnlyCopyOnly&>(o);
std::vector<int> expected_values = {1, 2, 3, 4};
EXPECT_EQ(expected_values, v.values);
}
TEST(AnyTest, InPlaceConstructionIlistWithCV) {
const CopyOnly copy_only{};
absl::any o(absl::in_place_type_t<const volatile ListMoveOnlyCopyOnly>(),
{1, 2, 3, 4}, MoveOnly(), copy_only);
ListMoveOnlyCopyOnly& v = absl::any_cast<ListMoveOnlyCopyOnly&>(o);
std::vector<int> expected_values = {1, 2, 3, 4};
EXPECT_EQ(expected_values, v.values);
}
TEST(AnyTest, InPlaceConstructionIlistWithCVVariableTemplate) {
const CopyOnly copy_only{};
absl::any o(absl::in_place_type<const volatile ListMoveOnlyCopyOnly>,
{1, 2, 3, 4}, MoveOnly(), copy_only);
auto& v = absl::any_cast<ListMoveOnlyCopyOnly&>(o);
std::vector<int> expected_values = {1, 2, 3, 4};
EXPECT_EQ(expected_values, v.values);
}
TEST(AnyTest, InPlaceNoArgs) {
absl::any o(absl::in_place_type_t<int>{});
EXPECT_EQ(0, absl::any_cast<int&>(o));
}
TEST(AnyTest, InPlaceNoArgsVariableTemplate) {
absl::any o(absl::in_place_type<int>);
EXPECT_EQ(0, absl::any_cast<int&>(o));
}
template <typename Enabler, typename T, typename... Args>
struct CanEmplaceAnyImpl : std::false_type {};
template <typename T, typename... Args>
struct CanEmplaceAnyImpl<
absl::void_t<decltype(
std::declval<absl::any&>().emplace<T>(std::declval<Args>()...))>,
T, Args...> : std::true_type {};
template <typename T, typename... Args>
using CanEmplaceAny = CanEmplaceAnyImpl<void, T, Args...>;
TEST(AnyTest, Emplace) {
const CopyOnly copy_only{};
absl::any o;
EXPECT_TRUE((std::is_same<decltype(o.emplace<IntMoveOnlyCopyOnly>(
5, MoveOnly(), copy_only)),
IntMoveOnlyCopyOnly&>::value));
IntMoveOnlyCopyOnly& emplace_result =
o.emplace<IntMoveOnlyCopyOnly>(5, MoveOnly(), copy_only);
EXPECT_EQ(5, emplace_result.value);
IntMoveOnlyCopyOnly& v = absl::any_cast<IntMoveOnlyCopyOnly&>(o);
EXPECT_EQ(5, v.value);
EXPECT_EQ(&emplace_result, &v);
static_assert(!CanEmplaceAny<int, int, int>::value, "");
static_assert(!CanEmplaceAny<MoveOnly, MoveOnly>::value, "");
}
TEST(AnyTest, EmplaceWithCV) {
const CopyOnly copy_only{};
absl::any o;
EXPECT_TRUE(
(std::is_same<decltype(o.emplace<const volatile IntMoveOnlyCopyOnly>(
5, MoveOnly(), copy_only)),
IntMoveOnlyCopyOnly&>::value));
IntMoveOnlyCopyOnly& emplace_result =
o.emplace<const volatile IntMoveOnlyCopyOnly>(5, MoveOnly(), copy_only);
EXPECT_EQ(5, emplace_result.value);
IntMoveOnlyCopyOnly& v = absl::any_cast<IntMoveOnlyCopyOnly&>(o);
EXPECT_EQ(5, v.value);
EXPECT_EQ(&emplace_result, &v);
}
TEST(AnyTest, EmplaceWithFunction) {
absl::any o;
EXPECT_TRUE(
(std::is_same<decltype(o.emplace<FunctionType>(FunctionToEmplace)),
FunctionType*&>::value));
FunctionType*& emplace_result = o.emplace<FunctionType>(FunctionToEmplace);
EXPECT_EQ(&FunctionToEmplace, emplace_result);
}
TEST(AnyTest, EmplaceWithArray) {
absl::any o;
ArrayType ar = {5, 42};
EXPECT_TRUE(
(std::is_same<decltype(o.emplace<ArrayType>(ar)), DecayedArray&>::value));
DecayedArray& emplace_result = o.emplace<ArrayType>(ar);
EXPECT_EQ(&ar[0], emplace_result);
}
TEST(AnyTest, EmplaceIlist) {
const CopyOnly copy_only{};
absl::any o;
EXPECT_TRUE((std::is_same<decltype(o.emplace<ListMoveOnlyCopyOnly>(
{1, 2, 3, 4}, MoveOnly(), copy_only)),
ListMoveOnlyCopyOnly&>::value));
ListMoveOnlyCopyOnly& emplace_result =
o.emplace<ListMoveOnlyCopyOnly>({1, 2, 3, 4}, MoveOnly(), copy_only);
ListMoveOnlyCopyOnly& v = absl::any_cast<ListMoveOnlyCopyOnly&>(o);
EXPECT_EQ(&v, &emplace_result);
std::vector<int> expected_values = {1, 2, 3, 4};
EXPECT_EQ(expected_values, v.values);
static_assert(!CanEmplaceAny<int, std::initializer_list<int>>::value, "");
static_assert(!CanEmplaceAny<MoveOnlyWithListConstructor,
std::initializer_list<int>, int>::value,
"");
}
TEST(AnyTest, EmplaceIlistWithCV) {
const CopyOnly copy_only{};
absl::any o;
EXPECT_TRUE(
(std::is_same<decltype(o.emplace<const volatile ListMoveOnlyCopyOnly>(
{1, 2, 3, 4}, MoveOnly(), copy_only)),
ListMoveOnlyCopyOnly&>::value));
ListMoveOnlyCopyOnly& emplace_result =
o.emplace<const volatile ListMoveOnlyCopyOnly>({1, 2, 3, 4}, MoveOnly(),
copy_only);
ListMoveOnlyCopyOnly& v = absl::any_cast<ListMoveOnlyCopyOnly&>(o);
EXPECT_EQ(&v, &emplace_result);
std::vector<int> expected_values = {1, 2, 3, 4};
EXPECT_EQ(expected_values, v.values);
}
TEST(AnyTest, EmplaceNoArgs) {
absl::any o;
o.emplace<int>();
EXPECT_EQ(0, absl::any_cast<int>(o));
}
TEST(AnyTest, ConversionConstruction) {
{
absl::any o = 5;
EXPECT_EQ(5, absl::any_cast<int>(o));
}
{
const CopyOnly copy_only(5);
absl::any o = copy_only;
EXPECT_EQ(5, absl::any_cast<CopyOnly&>(o).value);
}
static_assert(!std::is_convertible<MoveOnly, absl::any>::value, "");
}
TEST(AnyTest, ConversionAssignment) {
{
absl::any o;
o = 5;
EXPECT_EQ(5, absl::any_cast<int>(o));
}
{
const CopyOnly copy_only(5);
absl::any o;
o = copy_only;
EXPECT_EQ(5, absl::any_cast<CopyOnly&>(o).value);
}
static_assert(!std::is_assignable<MoveOnly, absl::any>::value, "");
}
#ifdef _MSC_VER
#pragma warning( push )
#pragma warning( disable : 4521)
#endif
struct WeirdConstructor42 {
explicit WeirdConstructor42(int value) : value(value) {}
WeirdConstructor42(const WeirdConstructor42& other) : value(other.value) {}
WeirdConstructor42(
WeirdConstructor42& )
: value(42) {}
int value;
};
#ifdef _MSC_VER
#pragma warning( pop )
#endif
TEST(AnyTest, WeirdConversionConstruction) {
{
const WeirdConstructor42 source(5);
absl::any o = source;
EXPECT_EQ(5, absl::any_cast<WeirdConstructor42&>(o).value);
}
{
WeirdConstructor42 source(5);
absl::any o = source;
EXPECT_EQ(42, absl::any_cast<WeirdConstructor42&>(o).value);
}
}
TEST(AnyTest, WeirdConversionAssignment) {
{
const WeirdConstructor42 source(5);
absl::any o;
o = source;
EXPECT_EQ(5, absl::any_cast<WeirdConstructor42&>(o).value);
}
{
WeirdConstructor42 source(5);
absl::any o;
o = source;
EXPECT_EQ(42, absl::any_cast<WeirdConstructor42&>(o).value);
}
}
struct Value {};
TEST(AnyTest, AnyCastValue) {
{
absl::any o;
o.emplace<int>(5);
EXPECT_EQ(5, absl::any_cast<int>(o));
EXPECT_EQ(5, absl::any_cast<int>(AsConst(o)));
static_assert(
std::is_same<decltype(absl::any_cast<Value>(o)), Value>::value, "");
}
{
absl::any o;
o.emplace<int>(5);
EXPECT_EQ(5, absl::any_cast<const int>(o));
EXPECT_EQ(5, absl::any_cast<const int>(AsConst(o)));
static_assert(std::is_same<decltype(absl::any_cast<const Value>(o)),
const Value>::value,
"");
}
}
TEST(AnyTest, AnyCastReference) {
{
absl::any o;
o.emplace<int>(5);
EXPECT_EQ(5, absl::any_cast<int&>(o));
EXPECT_EQ(5, absl::any_cast<const int&>(AsConst(o)));
static_assert(
std::is_same<decltype(absl::any_cast<Value&>(o)), Value&>::value, "");
}
{
absl::any o;
o.emplace<int>(5);
EXPECT_EQ(5, absl::any_cast<const int>(o));
EXPECT_EQ(5, absl::any_cast<const int>(AsConst(o)));
static_assert(std::is_same<decltype(absl::any_cast<const Value&>(o)),
const Value&>::value,
"");
}
{
absl::any o;
o.emplace<int>(5);
EXPECT_EQ(5, absl::any_cast<int&&>(std::move(o)));
static_assert(std::is_same<decltype(absl::any_cast<Value&&>(std::move(o))),
Value&&>::value,
"");
}
{
absl::any o;
o.emplace<int>(5);
EXPECT_EQ(5, absl::any_cast<const int>(std::move(o)));
static_assert(
std::is_same<decltype(absl::any_cast<const Value&&>(std::move(o))),
const Value&&>::value,
"");
}
}
TEST(AnyTest, AnyCastPointer) {
{
absl::any o;
EXPECT_EQ(nullptr, absl::any_cast<char>(&o));
o.emplace<int>(5);
EXPECT_EQ(nullptr, absl::any_cast<char>(&o));
o.emplace<char>('a');
EXPECT_EQ('a', *absl::any_cast<char>(&o));
static_assert(
std::is_same<decltype(absl::any_cast<Value>(&o)), Value*>::value, "");
}
{
absl::any o;
EXPECT_EQ(nullptr, absl::any_cast<const char>(&o));
o.emplace<int>(5);
EXPECT_EQ(nullptr, absl::any_cast<const char>(&o));
o.emplace<char>('a');
EXPECT_EQ('a', *absl::any_cast<const char>(&o));
static_assert(std::is_same<decltype(absl::any_cast<const Value>(&o)),
const Value*>::value,
"");
}
}
TEST(AnyTest, MakeAny) {
const CopyOnly copy_only{};
auto o = absl::make_any<IntMoveOnlyCopyOnly>(5, MoveOnly(), copy_only);
static_assert(std::is_same<decltype(o), absl::any>::value, "");
EXPECT_EQ(5, absl::any_cast<IntMoveOnlyCopyOnly&>(o).value);
}
TEST(AnyTest, MakeAnyIList) {
const CopyOnly copy_only{};
auto o =
absl::make_any<ListMoveOnlyCopyOnly>({1, 2, 3}, MoveOnly(), copy_only);
static_assert(std::is_same<decltype(o), absl::any>::value, "");
ListMoveOnlyCopyOnly& v = absl::any_cast<ListMoveOnlyCopyOnly&>(o);
std::vector<int> expected_values = {1, 2, 3};
EXPECT_EQ(expected_values, v.values);
}
TEST(AnyTest, Copy) {
InstanceTracker tracker_raii;
{
absl::any o(absl::in_place_type<CopyableOnlyInstance>, 123);
CopyableOnlyInstance* f1 = absl::any_cast<CopyableOnlyInstance>(&o);
absl::any o2(o);
const CopyableOnlyInstance* f2 = absl::any_cast<CopyableOnlyInstance>(&o2);
EXPECT_EQ(123, f2->value());
EXPECT_NE(f1, f2);
absl::any o3;
o3 = o2;
const CopyableOnlyInstance* f3 = absl::any_cast<CopyableOnlyInstance>(&o3);
EXPECT_EQ(123, f3->value());
EXPECT_NE(f2, f3);
const absl::any o4(4);
absl::any o5 = o4;
EXPECT_EQ(4, absl::any_cast<int>(o4));
EXPECT_EQ(4, absl::any_cast<int>(o5));
absl::any o6 = std::move(o4);
EXPECT_EQ(4, absl::any_cast<int>(o4));
EXPECT_EQ(4, absl::any_cast<int>(o6));
}
}
TEST(AnyTest, Move) {
InstanceTracker tracker_raii;
absl::any any1;
any1.emplace<CopyableOnlyInstance>(5);
absl::any any2 = any1;
EXPECT_EQ(5, absl::any_cast<CopyableOnlyInstance&>(any1).value());
EXPECT_EQ(5, absl::any_cast<CopyableOnlyInstance&>(any2).value());
EXPECT_EQ(1, tracker_raii.copies());
absl::any any3 = std::move(any2);
EXPECT_EQ(5, absl::any_cast<CopyableOnlyInstance&>(any3).value());
EXPECT_EQ(1, tracker_raii.copies());
absl::any any4;
any4 = std::move(any3);
EXPECT_EQ(5, absl::any_cast<CopyableOnlyInstance&>(any4).value());
EXPECT_EQ(1, tracker_raii.copies());
absl::any tmp4(4);
absl::any o4(std::move(tmp4));
EXPECT_EQ(4, absl::any_cast<int>(o4));
o4 = *&o4;
EXPECT_EQ(4, absl::any_cast<int>(o4));
EXPECT_TRUE(o4.has_value());
absl::any o5;
absl::any tmp5(5);
o5 = std::move(tmp5);
EXPECT_EQ(5, absl::any_cast<int>(o5));
}
TEST(AnyTest, Reset) {
absl::any o;
o.emplace<int>();
o.reset();
EXPECT_FALSE(o.has_value());
o.emplace<char>();
EXPECT_TRUE(o.has_value());
}
TEST(AnyTest, ConversionConstructionCausesOneCopy) {
InstanceTracker tracker_raii;
CopyableOnlyInstance counter(5);
absl::any o(counter);
EXPECT_EQ(5, absl::any_cast<CopyableOnlyInstance&>(o).value());
EXPECT_EQ(1, tracker_raii.copies());
}
#if defined(ABSL_USES_STD_ANY)
#define ABSL_ANY_TEST_EXPECT_BAD_ANY_CAST(...) \
ABSL_BASE_INTERNAL_EXPECT_FAIL((__VA_ARGS__), absl::bad_any_cast, \
"")
#else
#define ABSL_ANY_TEST_EXPECT_BAD_ANY_CAST(...) \
ABSL_BASE_INTERNAL_EXPECT_FAIL((__VA_ARGS__), absl::bad_any_cast, \
"Bad any cast")
#endif
TEST(AnyTest, ThrowBadAlloc) {
{
absl::any a;
ABSL_ANY_TEST_EXPECT_BAD_ANY_CAST(absl::any_cast<int&>(a));
ABSL_ANY_TEST_EXPECT_BAD_ANY_CAST(absl::any_cast<const int&>(a));
ABSL_ANY_TEST_EXPECT_BAD_ANY_CAST(absl::any_cast<int&&>(absl::any{}));
ABSL_ANY_TEST_EXPECT_BAD_ANY_CAST(absl::any_cast<const int&&>(absl::any{}));
ABSL_ANY_TEST_EXPECT_BAD_ANY_CAST(absl::any_cast<int>(a));
ABSL_ANY_TEST_EXPECT_BAD_ANY_CAST(absl::any_cast<const int>(a));
ABSL_ANY_TEST_EXPECT_BAD_ANY_CAST(absl::any_cast<int>(absl::any{}));
ABSL_ANY_TEST_EXPECT_BAD_ANY_CAST(absl::any_cast<const int>(absl::any{}));
ABSL_ANY_TEST_EXPECT_BAD_ANY_CAST(absl::any_cast<const int&>(AsConst(a)));
ABSL_ANY_TEST_EXPECT_BAD_ANY_CAST(absl::any_cast<int>(AsConst(a)));
ABSL_ANY_TEST_EXPECT_BAD_ANY_CAST(absl::any_cast<const int>(AsConst(a)));
}
{
absl::any a(absl::in_place_type<int>);
ABSL_ANY_TEST_EXPECT_BAD_ANY_CAST(absl::any_cast<float&>(a));
ABSL_ANY_TEST_EXPECT_BAD_ANY_CAST(absl::any_cast<const float&>(a));
ABSL_ANY_TEST_EXPECT_BAD_ANY_CAST(absl::any_cast<float&&>(absl::any{}));
ABSL_ANY_TEST_EXPECT_BAD_ANY_CAST(
absl::any_cast<const float&&>(absl::any{}));
ABSL_ANY_TEST_EXPECT_BAD_ANY_CAST(absl::any_cast<float>(a));
ABSL_ANY_TEST_EXPECT_BAD_ANY_CAST(absl::any_cast<const float>(a));
ABSL_ANY_TEST_EXPECT_BAD_ANY_CAST(absl::any_cast<float>(absl::any{}));
ABSL_ANY_TEST_EXPECT_BAD_ANY_CAST(absl::any_cast<const float>(absl::any{}));
ABSL_ANY_TEST_EXPECT_BAD_ANY_CAST(absl::any_cast<const float&>(AsConst(a)));
ABSL_ANY_TEST_EXPECT_BAD_ANY_CAST(absl::any_cast<float>(AsConst(a)));
ABSL_ANY_TEST_EXPECT_BAD_ANY_CAST(absl::any_cast<const float>(AsConst(a)));
}
}
class BadCopy {};
struct BadCopyable {
BadCopyable() = default;
BadCopyable(BadCopyable&&) = default;
BadCopyable(const BadCopyable&) {
#ifdef ABSL_HAVE_EXCEPTIONS
throw BadCopy();
#else
LOG(FATAL) << "Bad copy";
#endif
}
};
#define ABSL_ANY_TEST_EXPECT_BAD_COPY(...) \
ABSL_BASE_INTERNAL_EXPECT_FAIL((__VA_ARGS__), BadCopy, "Bad copy")
TEST(AnyTest, FailedCopy) {
{
const BadCopyable bad{};
ABSL_ANY_TEST_EXPECT_BAD_COPY(absl::any{bad});
}
{
absl::any src(absl::in_place_type<BadCopyable>);
ABSL_ANY_TEST_EXPECT_BAD_COPY(absl::any{src});
}
{
BadCopyable bad;
absl::any target;
ABSL_ANY_TEST_EXPECT_BAD_COPY(target = bad);
}
{
BadCopyable bad;
absl::any target(absl::in_place_type<BadCopyable>);
ABSL_ANY_TEST_EXPECT_BAD_COPY(target = bad);
EXPECT_TRUE(target.has_value());
}
{
absl::any src(absl::in_place_type<BadCopyable>);
absl::any target;
ABSL_ANY_TEST_EXPECT_BAD_COPY(target = src);
EXPECT_FALSE(target.has_value());
}
{
absl::any src(absl::in_place_type<BadCopyable>);
absl::any target(absl::in_place_type<BadCopyable>);
ABSL_ANY_TEST_EXPECT_BAD_COPY(target = src);
EXPECT_TRUE(target.has_value());
}
}
TEST(AnyTest, FailedEmplace) {
BadCopyable bad;
absl::any target;
ABSL_ANY_TEST_EXPECT_BAD_COPY(target.emplace<BadCopyable>(bad));
}
#ifdef __GNUC__
TEST(AnyTest, DISABLED_FailedEmplaceInPlace) {
#else
TEST(AnyTest, FailedEmplaceInPlace) {
#endif
BadCopyable bad;
absl::any target(absl::in_place_type<int>);
ABSL_ANY_TEST_EXPECT_BAD_COPY(target.emplace<BadCopyable>(bad));
EXPECT_FALSE(target.has_value());
}
}
#endif | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/types/any.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/types/any_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
6e49f4a9-c87d-4ea6-9c68-73d8c7379601 | cpp | tensorflow/tensorflow | pjrt_state | tensorflow/core/tfrt/common/pjrt_state.cc | tensorflow/core/tfrt/common/pjrt_state_test.cc | #include "tensorflow/core/tfrt/common/pjrt_state.h"
#include <memory>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/synchronization/mutex.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/tf_pjrt_client.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/tfrt/common/pjrt_client_factory_options.h"
#include "tensorflow/core/tfrt/common/pjrt_client_factory_registry.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
PjRtState* PjRtState::Create() { return new PjRtState(); }
absl::StatusOr<xla::PjRtClient*> PjRtState::GetPjRtClient(
const DeviceType& device_type) {
absl::MutexLock lock(&mu_);
if (auto it = clients_.find(device_type); it != clients_.end()) {
return it->second.get();
}
return errors::NotFound("PjRt client not found for device type ",
device_type);
}
absl::StatusOr<xla::PjRtClient*> PjRtState::GetOrCreatePjRtClient(
const DeviceType& device_type) {
absl::MutexLock lock(&mu_);
if (auto it = clients_.find(device_type); it != clients_.end()) {
return it->second.get();
}
std::unique_ptr<xla::PjRtClient> pjrt_client;
xla::PjrtClientFactoryOptions options = xla::PjrtClientFactoryOptions();
TF_ASSIGN_OR_RETURN(std::unique_ptr<xla::PjRtClient> client,
xla::PjrtClientFactoryRegistry::Get().GetPjrtClient(
device_type, options));
pjrt_client = xla::TfPjRtClient::CreateTfPjRtClient(std::move(client));
clients_[device_type] = std::move(pjrt_client);
return clients_[device_type].get();
}
Status PjRtState::SetPjRtClient(const DeviceType& device_type,
std::unique_ptr<xla::PjRtClient> client) {
absl::MutexLock lock(&mu_);
if (auto it = clients_.find(device_type); it != clients_.end()) {
unused_.push_back(std::move(it->second));
}
clients_[device_type] = std::move(client);
return absl::OkStatus();
}
Status PjRtState::MovePjRtClientToUnused(const DeviceType& device_type) {
absl::MutexLock lock(&mu_);
if (auto it = clients_.find(device_type); it != clients_.end()) {
unused_.push_back(std::move(it->second));
clients_.erase(it);
return absl::OkStatus();
}
return errors::NotFound("PjRt client not found for device type ",
device_type);
}
Status PjRtState::SetPjRtGpuClientCreationInfo(
std::unique_ptr<PjRtGpuClientCreationInfo> info) {
absl::MutexLock lock(&mu_);
pjrt_gpu_client_creation_info_ = std::move(info);
return absl::OkStatus();
}
PjRtGpuClientCreationInfo* PjRtState::GetPjRtGpuClientCreationInfo() {
absl::MutexLock lock(&mu_);
return pjrt_gpu_client_creation_info_.get();
}
string PjRtState::DebugString() const { return "PjRtState"; }
} | #include "tensorflow/core/tfrt/common/pjrt_state.h"
#include <memory>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/pjrt/cpu/cpu_client.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/refcount.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace {
using tensorflow::PjRtState;
using ::testing::HasSubstr;
using ::tsl::testing::StatusIs;
class PjRtStateTestFixture : public testing::Test {
protected:
PjRtStateTestFixture() { pjrt_state_ = PjRtState::Create(); }
~PjRtStateTestFixture() override {
tensorflow::core::ScopedUnref pjrt_state_ref(pjrt_state_);
}
PjRtState* pjrt_state_;
};
TEST_F(PjRtStateTestFixture, SetAndGetPjRtClient) {
TF_ASSERT_OK(pjrt_state_->SetPjRtClient(
tensorflow::DEVICE_CPU,
xla::GetTfrtCpuClient(true, 1)
.value()));
TF_ASSERT_OK_AND_ASSIGN(auto pjrt_client,
pjrt_state_->GetPjRtClient(tensorflow::DEVICE_CPU));
EXPECT_THAT(pjrt_client, testing::NotNull());
}
TEST_F(PjRtStateTestFixture, AddAlreadyExistsPjRtClient) {
TF_ASSERT_OK(pjrt_state_->SetPjRtClient(
tensorflow::DEVICE_CPU,
xla::GetTfrtCpuClient(true, 1)
.value()));
TF_ASSERT_OK_AND_ASSIGN(auto pjrt_client_1,
pjrt_state_->GetPjRtClient(tensorflow::DEVICE_CPU));
TF_ASSERT_OK(pjrt_state_->SetPjRtClient(
tensorflow::DEVICE_CPU, xla::GetTfrtCpuClient(true,
1)
.value()));
TF_ASSERT_OK_AND_ASSIGN(auto pjrt_client_2,
pjrt_state_->GetPjRtClient(tensorflow::DEVICE_CPU));
EXPECT_NE(pjrt_client_1, pjrt_client_2);
}
TEST_F(PjRtStateTestFixture, GetNotExistPjRtClient) {
EXPECT_THAT(pjrt_state_->GetPjRtClient(tensorflow::DEVICE_CPU),
StatusIs(tensorflow::error::NOT_FOUND,
HasSubstr("PjRt client not found for device type")));
}
TEST_F(PjRtStateTestFixture, DeletePjRtClient) {
TF_ASSERT_OK_AND_ASSIGN(
auto pjrt_client,
xla::GetTfrtCpuClient(true, 1));
xla::PjRtClient* pjrt_client_ptr = pjrt_client.get();
TF_ASSERT_OK(pjrt_state_->SetPjRtClient(tensorflow::DEVICE_CPU,
std::move(pjrt_client)));
TF_ASSERT_OK(pjrt_state_->MovePjRtClientToUnused(tensorflow::DEVICE_CPU));
EXPECT_THAT(pjrt_state_->GetPjRtClient(tensorflow::DEVICE_CPU),
StatusIs(tensorflow::error::NOT_FOUND,
HasSubstr("PjRt client not found for device type")));
EXPECT_EQ(pjrt_client_ptr->platform_name(), "cpu");
}
TEST_F(PjRtStateTestFixture, DeleteNotExistPjRtClient) {
EXPECT_THAT(pjrt_state_->MovePjRtClientToUnused(tensorflow::DEVICE_CPU),
StatusIs(tensorflow::error::NOT_FOUND,
HasSubstr("PjRt client not found for device type")));
}
TEST_F(PjRtStateTestFixture, GetOrCreatePjRtClientExist) {
TF_ASSERT_OK_AND_ASSIGN(
auto pjrt_client,
xla::GetTfrtCpuClient(true, 1));
auto pjrt_client_ptr = pjrt_client.get();
TF_ASSERT_OK(pjrt_state_->SetPjRtClient(tensorflow::DEVICE_CPU,
std::move(pjrt_client)));
TF_ASSERT_OK_AND_ASSIGN(
auto pjrt_client_get,
pjrt_state_->GetOrCreatePjRtClient(tensorflow::DEVICE_CPU));
EXPECT_THAT(pjrt_client_get, pjrt_client_ptr);
}
TEST_F(PjRtStateTestFixture, GetOrCreatePjRtClientNotExist) {
TF_ASSERT_OK_AND_ASSIGN(auto pjrt_client, pjrt_state_->GetOrCreatePjRtClient(
tensorflow::DEVICE_CPU));
EXPECT_THAT(pjrt_client, testing::NotNull());
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/common/pjrt_state.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/common/pjrt_state_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
170f268f-ba87-4ee9-b093-b6449fd5a01a | cpp | tensorflow/tensorflow | gpu_performance_model_base | third_party/xla/xla/service/gpu/model/gpu_performance_model_base.cc | third_party/xla/xla/service/gpu/model/gpu_performance_model_base_test.cc | #include "xla/service/gpu/model/gpu_performance_model_base.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <optional>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/fusions/fusion_emitter.h"
#include "xla/service/gpu/fusions/fusions.h"
#include "xla/service/gpu/fusions/triton.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/model/gpu_hlo_cost_analysis.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace gpu {
namespace {
bool FusionUsesParameterElementwiseFromRoot(
const HloInstruction* fusion, int parameter_index,
const GpuHloCostAnalysis* cost_analysis) {
return cost_analysis->CommonElementwiseUtilization(
fusion->fused_parameter(parameter_index),
fusion->fused_expression_root()) == 1.f;
}
int GetCoalescingWasteFactor(PrimitiveType element_type,
const se::DeviceDescription& gpu_device_info) {
int64_t element_size_bytes =
element_type == PrimitiveType::TUPLE ||
element_type == PrimitiveType::TOKEN
? 4
: ShapeUtil::ByteSizeOfPrimitiveType(element_type);
return gpu_device_info.dram_to_l2_transaction_size_bytes() /
element_size_bytes;
}
float AdjustBandwidth(const se::DeviceDescription& gpu_device_info,
float bandwidth, int64_t num_blocks) {
float per_block_bandwidth = gpu_device_info.clock_rate_ghz() * 1.0e9f *
gpu_device_info.memory_transactions_per_clock();
float max_bandwidth = num_blocks * per_block_bandwidth;
return std::min(bandwidth, max_bandwidth);
}
}
std::optional<EstimateRunTimeData> GpuPerformanceModelCache::Get(
const HloInstruction& instruction) {
auto it = instruction_runtime_data_.find(&instruction);
if (it != instruction_runtime_data_.end()) {
return it->second;
}
return std::nullopt;
}
std::optional<absl::Duration> GpuPerformanceModelCache::Get(
const HloInstruction& producer, const HloInstruction& consumer) {
absl::MutexLock lock(&mutex_);
auto it = fusion_runtime_data_.find(&producer);
if (it != fusion_runtime_data_.end()) {
auto jt = it->second.find(&consumer);
if (jt != it->second.end()) {
return jt->second;
}
}
return std::nullopt;
}
const absl::flat_hash_map<const HloInstruction*, absl::Duration>&
GpuPerformanceModelCache::GetAllConsumers(const HloInstruction& producer) {
return fusion_runtime_data_[&producer];
}
bool GpuPerformanceModelCache::ContainsConsumers(
const HloInstruction& producer) {
return fusion_runtime_data_.contains(&producer);
}
void GpuPerformanceModelCache::Set(const HloInstruction& instruction,
const EstimateRunTimeData& runtime_data) {
instruction_runtime_data_[&instruction] = runtime_data;
}
void GpuPerformanceModelCache::Set(const HloInstruction& producer,
const HloInstruction& consumer,
absl::Duration runtime) {
absl::MutexLock lock(&mutex_);
fusion_runtime_data_[&producer][&consumer] = runtime;
}
void GpuPerformanceModelCache::Invalidate(const HloInstruction& instruction) {
instruction_runtime_data_.erase(&instruction);
fusion_runtime_data_.erase(&instruction);
for (auto* operand : instruction.operands()) {
if (operand->opcode() == HloOpcode::kGetTupleElement) {
operand = operand->mutable_operand(0);
}
auto it = fusion_runtime_data_.find(operand);
if (it != fusion_runtime_data_.end()) {
it->second.erase(&instruction);
}
}
}
LaunchDimensions GpuPerformanceModelBase::EstimateFusionLaunchDimensions(
const HloFusionAnalysis& fusion_analysis) {
auto emitter =
GetFusionEmitter(PreBufferAssignmentFusionInfo{fusion_analysis});
if (const auto* kernel_emitter =
dynamic_cast<const KernelFusionInterface*>(emitter.get())) {
return kernel_emitter->launch_dimensions();
}
if (const auto* triton_emitter =
dynamic_cast<const TritonFusion*>(emitter.get())) {
if (auto launch_config = triton_emitter->launch_config()) {
return launch_config->launch_dimensions;
}
}
VLOG(5) << "Using fallback launch dimensions estimate for "
<< fusion_analysis.fusion().ToString();
int64_t num_threads_per_block = 128;
int64_t estimated_num_threads =
ShapeUtil::ElementsInRecursive(fusion_analysis.fusion_root(0).shape());
int64_t num_blocks =
CeilOfRatio(estimated_num_threads, num_threads_per_block);
return LaunchDimensions(num_blocks, num_threads_per_block);
}
int64_t GpuPerformanceModelBase::GetOperandBytesAccessed(
const GpuHloCostAnalysis* cost_analysis, const HloInstruction* instr,
const HloInstruction* operand) {
if (!instr->IsUserOf(operand)) {
return 0;
}
return cost_analysis->operand_bytes_accessed(*instr,
instr->operand_index(operand));
}
float GpuPerformanceModelBase::GetOperandUtilization(
const GpuHloCostAnalysis* cost_analysis, const HloInstruction* instr,
const HloInstruction* operand) {
if (operand->IsMultiOutputFusion()) {
float res = 0.f;
for (int64_t i = 0; i < instr->operand_count(); ++i) {
if (instr->operand(i)->opcode() == HloOpcode::kGetTupleElement &&
instr->operand(i)->operand(0) == operand) {
res += cost_analysis->operand_utilization(*instr, i);
}
}
return res;
}
if (!instr->IsUserOf(operand)) {
return 0.f;
}
return cost_analysis->operand_utilization(*instr,
instr->operand_index(operand));
}
float GpuPerformanceModelBase::GetCommonUtilization(
const GpuHloCostAnalysis* cost_analysis, const HloInstruction* producer,
int64_t producer_idx_of_operand, const HloInstruction* consumer) {
const auto* operand = producer->operand(producer_idx_of_operand);
if (!consumer || !consumer->IsUserOf(operand)) {
return 0.f;
}
if (producer->IsElementwise() ||
(producer->opcode() == HloOpcode::kFusion &&
FusionUsesParameterElementwiseFromRoot(producer, producer_idx_of_operand,
cost_analysis))) {
if (consumer->opcode() == HloOpcode::kFusion) {
int64_t consumer_idx_of_common_operand = consumer->operand_index(operand);
float res = 0.f;
std::vector<int64_t> consumer_indices_of_producer;
if (producer->IsMultiOutputFusion()) {
for (int64_t i = 0; i < consumer->operand_count(); ++i) {
if (consumer->operand(i)->opcode() == HloOpcode::kGetTupleElement &&
consumer->operand(i)->operand(0) == producer) {
consumer_indices_of_producer.push_back(i);
}
}
} else {
consumer_indices_of_producer.push_back(
consumer->operand_index(producer));
}
for (int64_t consumer_idx_of_producer : consumer_indices_of_producer) {
res += cost_analysis->CommonElementwiseUtilization(
consumer->fused_parameter(consumer_idx_of_common_operand),
consumer->fused_parameter(consumer_idx_of_producer));
}
return res;
} else if (consumer->IsElementwise()) {
return 1.f;
}
}
return 0.f;
}
int64_t GpuPerformanceModelBase::GetSharedOperandBytesAccessed(
const GpuHloCostAnalysis* cost_analysis, const HloInstruction* producer,
const HloInstruction* consumer, const HloInstruction* operand) {
float producer_utilization_by_consumer =
GetOperandUtilization(cost_analysis, consumer, producer);
int64_t bytes_accessed_by_producer =
GetOperandBytesAccessed(cost_analysis, producer, operand);
int64_t bytes_accessed_by_consumer =
GetOperandBytesAccessed(cost_analysis, consumer, operand);
float common_utilization =
producer->IsUserOf(operand)
? GetCommonUtilization(cost_analysis, producer,
producer->operand_index(operand), consumer)
: 0.f;
int64_t operand_size = cost_analysis->GetShapeSize(operand->shape());
int64_t common_bytes_accessed =
std::llround(operand_size * common_utilization);
return std::llround(bytes_accessed_by_producer *
producer_utilization_by_consumer) +
bytes_accessed_by_consumer - common_bytes_accessed;
}
absl::Duration GpuPerformanceModelBase::ReadTime(
const se::DeviceDescription& gpu_device_info, int64_t num_blocks,
int64_t n_bytes_net, int64_t n_bytes_total) {
float bandwidth = gpu_device_info.memory_bandwidth();
if (n_bytes_net < gpu_device_info.l2_cache_size()) {
bandwidth *= kL2CacheSpeedup;
if (n_bytes_net <
gpu_device_info.l1_cache_size_per_SM() * gpu_device_info.core_count()) {
bandwidth *= kL1CacheSpeedup;
}
}
bandwidth = AdjustBandwidth(gpu_device_info, bandwidth, num_blocks);
return absl::Seconds(n_bytes_total / bandwidth);
}
absl::Duration GpuPerformanceModelBase::ReadTimeWithDRAMHeuristic(
const se::DeviceDescription& gpu_device_info, int64_t num_blocks,
int64_t n_bytes_net, int64_t n_bytes_total, PrimitiveType element_type,
bool coalesced) {
int waste_factor =
coalesced ? 1 : GetCoalescingWasteFactor(element_type, gpu_device_info);
float dram_bandwidth = gpu_device_info.memory_bandwidth() / waste_factor;
float rest_bandwidth = gpu_device_info.memory_bandwidth();
if (n_bytes_net < gpu_device_info.l2_cache_size()) {
rest_bandwidth *= kL2CacheSpeedup;
if (n_bytes_net <
gpu_device_info.l1_cache_size_per_SM() * gpu_device_info.core_count()) {
rest_bandwidth *= kL1CacheSpeedup;
}
} else {
rest_bandwidth /= waste_factor;
}
dram_bandwidth = AdjustBandwidth(gpu_device_info, dram_bandwidth, num_blocks);
rest_bandwidth = AdjustBandwidth(gpu_device_info, rest_bandwidth, num_blocks);
int64_t n_bytes_read_dram = std::min(n_bytes_net, n_bytes_total);
int64_t n_bytes_read_cache = n_bytes_total - n_bytes_read_dram;
return absl::Seconds(n_bytes_read_dram / dram_bandwidth) +
absl::Seconds(n_bytes_read_cache / rest_bandwidth);
}
absl::Duration GpuPerformanceModelBase::ProducerInputAccessTime(
const GpuHloCostAnalysis* cost_analysis,
const se::DeviceDescription& gpu_device_info, int64_t num_blocks,
const HloInstruction* producer, const HloFusionAnalysis& fusion_analysis,
const GpuPerformanceModelOptions& config,
const HloInstruction* fused_consumer) {
absl::Duration ret = absl::ZeroDuration();
float producer_output_utilization =
fused_consumer
? GetOperandUtilization(cost_analysis, fused_consumer, producer)
: 1.f;
for (int i = 0; i < producer->operand_count(); ++i) {
int64_t operand_bytes_accessed =
cost_analysis->operand_bytes_accessed(*producer, i);
float operand_utilization =
cost_analysis->operand_utilization(*producer, i);
int64_t n_bytes_net = std::llround(operand_bytes_accessed /
std::max(operand_utilization, 1.0f));
float common_utilization = GetCommonUtilization(
cost_analysis, producer, i, fused_consumer);
CHECK_LE(common_utilization, producer_output_utilization);
float n_bytes_total = operand_bytes_accessed *
(producer_output_utilization - common_utilization);
ret += ReadTime(gpu_device_info, num_blocks, n_bytes_net, n_bytes_total);
}
return ret;
}
absl::Duration GpuPerformanceModelBase::WriteTime(
const se::DeviceDescription& gpu_device_info, int64_t bytes_written) {
return absl::Seconds(1.0f * bytes_written /
gpu_device_info.memory_bandwidth());
}
absl::Duration GpuPerformanceModelBase::ComputeTime(
const se::DeviceDescription& gpu_device_info, int64_t flops,
int64_t num_blocks, int64_t num_threads_per_block) {
int64_t n_active_fpus_per_core =
std::min<int64_t>(num_threads_per_block, gpu_device_info.fpus_per_core());
int64_t n_active_core =
std::min<int64_t>(num_blocks, gpu_device_info.core_count());
int64_t fpu_count = n_active_core * n_active_fpus_per_core;
int64_t flop_per_ns_per_fpu = gpu_device_info.clock_rate_ghz() * 2;
int64_t flop_per_ns_effective = flop_per_ns_per_fpu * fpu_count;
return absl::Nanoseconds(1.0f * flops / flop_per_ns_effective);
}
absl::Duration GpuPerformanceModelBase::CombineComputeAndMemoryAccessTime(
absl::Duration compute_time, absl::Duration memory_access_time,
const GpuPerformanceModelOptions& config) {
return compute_time + memory_access_time -
std::min(compute_time, memory_access_time) *
config.memory_compute_parallelism;
}
void GpuPerformanceModelBase::VLogOperandRead(const HloInstruction* operand,
int64_t n_bytes_total,
int64_t n_bytes_net,
bool coalesced) {
VLOG(8) << "operand " << operand->name()
<< ", n_bytes_total: " << n_bytes_total
<< ", n_bytes_net: " << n_bytes_net << ", coalesced: " << coalesced;
}
}
} | #include "xla/service/gpu/model/gpu_performance_model_base.h"
#include <cstdint>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/model/gpu_hlo_cost_analysis.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
class GpuPerformanceModelBaseTest : public HloTestBase {
public:
GpuHloCostAnalysis::ShapeSizeFunction ShapeSizeBytesFunction() const {
return [&](const Shape& shape) {
constexpr int64_t kPointerSize = 8;
return ShapeUtil::ByteSizeOf(shape, kPointerSize);
};
}
GpuHloCostAnalysis::Options options_{ShapeSizeBytesFunction(),
{},
{},
true};
se::DeviceDescription device_info_{TestGpuDeviceInfo::RTXA6000DeviceInfo()};
GpuHloCostAnalysis analysis_{options_, device_info_};
GpuPerformanceModelBaseTest() : HloTestBase() {}
};
TEST_F(GpuPerformanceModelBaseTest, SharedOperandBytesAccessed_InPlaceDUS) {
absl::string_view hlo_string = R"(
HloModule m
ENTRY entry_computation {
param_0 = f32[8,16] parameter(0)
param_1 = f32[4,4] parameter(1)
c_0 = s32[] constant(0)
log = f32[4,4] log(param_1)
ROOT dynamic-update-slice = f32[8,16] dynamic-update-slice(param_0, log, c_0, c_0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
ASSERT_IS_OK(computation->Accept(&analysis_));
auto dus_consumer = computation->root_instruction();
auto log_producer = dus_consumer->mutable_operand(1);
auto get_shared_operand_bytes_accessed = [&](const HloInstruction* operand) {
return GpuPerformanceModelBase::GetSharedOperandBytesAccessed(
&analysis_, log_producer, dus_consumer, operand);
};
EXPECT_EQ(get_shared_operand_bytes_accessed(dus_consumer->operand(0)), 0);
EXPECT_EQ(get_shared_operand_bytes_accessed(log_producer->operand(0)), 64);
}
TEST_F(GpuPerformanceModelBaseTest, SharedOperandBytesAccessed_DUS) {
absl::string_view hlo_string = R"(
HloModule m
ENTRY entry_computation {
param_0 = f32[8,16] parameter(0)
param_1 = f32[4,4] parameter(1)
c_0 = s32[] constant(0)
log = f32[8,16] log(param_0)
ROOT dynamic-update-slice = f32[8,16] dynamic-update-slice(log, param_1, c_0, c_0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
ASSERT_IS_OK(computation->Accept(&analysis_));
auto dus_consumer = computation->root_instruction();
auto log_producer = dus_consumer->mutable_operand(0);
auto get_shared_operand_bytes_accessed = [&](const HloInstruction* operand) {
return GpuPerformanceModelBase::GetSharedOperandBytesAccessed(
&analysis_, log_producer, dus_consumer, operand);
};
EXPECT_EQ(get_shared_operand_bytes_accessed(dus_consumer->operand(1)), 64);
EXPECT_EQ(get_shared_operand_bytes_accessed(log_producer->operand(0)), 448);
}
TEST_F(GpuPerformanceModelBaseTest,
ReduceBroadcastedDim_IncorrectBytesAccessed) {
absl::string_view hlo_string = R"(
HloModule m
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
f1 {
p0 = f32[128] parameter(0)
c0 = f32[] constant(0)
broadcast = f32[128,256] broadcast(p0), dimensions={0}
ROOT reduce = f32[128] reduce(broadcast, c0), dimensions={1}, to_apply=add
}
ENTRY entry_computation {
param_0 = f32[128] parameter(0)
param_1 = f32[4,4] parameter(1)
ROOT fusion = f32[128] fusion(param_0), kind=kLoop, calls=f1
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
ASSERT_IS_OK(computation->Accept(&analysis_));
auto root = computation->root_instruction();
EXPECT_EQ(GpuPerformanceModelBase::GetOperandBytesAccessed(&analysis_, root,
root->operand(0)),
131072);
}
TEST_F(GpuPerformanceModelBaseTest, ElementwiseBitcast_IncorrectBytesAccessed) {
absl::string_view hlo_string = R"(
HloModule m
f1 {
p0 = f32[128] parameter(0)
bitcast.1 = f32[8,16] bitcast(p0)
log = f32[128] log(p0)
bitcast.2 = f32[8,16] bitcast(log)
ROOT add = f32[8,16] add(bitcast.1, bitcast.2)
}
ENTRY entry_computation {
param_0 = f32[128] parameter(0)
ROOT fusion = f32[8,16] fusion(param_0), kind=kLoop, calls=f1
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
ASSERT_IS_OK(computation->Accept(&analysis_));
auto root = computation->root_instruction();
EXPECT_EQ(GpuPerformanceModelBase::GetOperandBytesAccessed(&analysis_, root,
root->operand(0)),
1024);
}
TEST_F(GpuPerformanceModelBaseTest, EstimateFusionLaunchDimensions_LoopFusion) {
absl::string_view hlo_string = R"(
HloModule m
f1 {
p0 = f32[8,16,128] parameter(0)
log = f32[8,16,128] log(p0)
ROOT add = f32[8,16,128] add(p0, log)
}
ENTRY entry_computation {
param_0 = f32[8,16,128] parameter(0)
ROOT fusion = f32[8,16,128] fusion(param_0), kind=kLoop, calls=f1
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto fusion_analysis = HloFusionAnalysis::Create(
*module->entry_computation()->root_instruction(), device_info_);
auto launch_dimensions =
GpuPerformanceModelBase::EstimateFusionLaunchDimensions(fusion_analysis);
EXPECT_EQ(launch_dimensions.num_blocks(), 128);
EXPECT_EQ(launch_dimensions.num_threads_per_block(), 128);
}
TEST_F(GpuPerformanceModelBaseTest,
EstimateFusionLaunchDimensions_TritonSoftMaxFusion) {
absl::string_view hlo_string = R"(
max {
p1 = f32[] parameter(1)
p0 = f32[] parameter(0)
ROOT m = f32[] maximum(p0, p1)
}
triton_softmax_computation {
p0 = f32[16,970] parameter(0)
constant = f32[] constant(-inf)
reduce = f32[16] reduce(p0, constant), dimensions={1}, to_apply=max
broadcast = f32[16,970] broadcast(reduce), dimensions={0}
ROOT subtract = f32[16,970] subtract(p0, broadcast)
}
ENTRY e {
p0 = f32[16,970]{1,0} parameter(0)
ROOT r = f32[16,970]{1,0} fusion(p0), kind=kCustom,
calls=triton_softmax_computation,
backend_config={"fusion_backend_config": {kind: "__triton","block_level_fusion_config":{"output_tile_sizes":["1","970"],"num_warps":"2"}}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto fusion_analysis = HloFusionAnalysis::Create(
*module->entry_computation()->root_instruction(), device_info_);
auto launch_dimensions =
GpuPerformanceModelBase::EstimateFusionLaunchDimensions(fusion_analysis);
EXPECT_EQ(launch_dimensions.num_blocks(), 16);
EXPECT_EQ(launch_dimensions.num_threads_per_block(), 64);
}
TEST_F(GpuPerformanceModelBaseTest,
EstimateFusionLaunchDimensions_CudnnFusion) {
absl::string_view hlo_string = R"(
fusion1 {
p0 = f32[32,96] parameter(0)
p1 = f32[96,256] parameter(1)
ROOT r = f32[32,256] dot(p0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = f32[32,96] parameter(0)
p1 = f32[96,256] parameter(1)
ROOT _ = f32[32,256] fusion(p0, p1), kind=kCustom, calls=fusion1,
backend_config={"fusion_backend_config": {kind: "__cudnn$fusion"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto fusion_analysis = HloFusionAnalysis::Create(
*module->entry_computation()->root_instruction(), device_info_);
auto launch_dimensions =
GpuPerformanceModelBase::EstimateFusionLaunchDimensions(fusion_analysis);
EXPECT_EQ(launch_dimensions.num_blocks(), 64);
EXPECT_EQ(launch_dimensions.num_threads_per_block(), 128);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/gpu_performance_model_base.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/gpu_performance_model_base_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
79f89674-bf24-4505-9db9-114f61502a06 | cpp | google/tensorstore | grid_chunk_key_ranges | tensorstore/internal/grid_chunk_key_ranges.cc | tensorstore/internal/grid_chunk_key_ranges_test.cc | #include "tensorstore/internal/grid_chunk_key_ranges.h"
#include <cassert>
#include <string>
#include "absl/functional/function_ref.h"
#include "absl/status/status.h"
#include "tensorstore/box.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/internal/grid_partition.h"
#include "tensorstore/internal/grid_partition_impl.h"
#include "tensorstore/internal/lexicographical_grid_index_key.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace internal {
absl::Status GetChunkKeyRangesForRegularGridWithSemiLexicographicalKeys(
const internal_grid_partition::IndexTransformGridPartition& grid_partition,
IndexTransformView<> transform,
tensorstore::span<const DimensionIndex> grid_output_dimensions,
internal::OutputToGridCellFn output_to_grid_cell, BoxView<> grid_bounds,
const LexicographicalGridIndexKeyFormatter& key_formatter,
absl::FunctionRef<absl::Status(std::string key,
tensorstore::span<const Index> grid_indices)>
handle_key,
absl::FunctionRef<absl::Status(KeyRange key_range, BoxView<> grid_bounds)>
handle_key_range) {
Box<dynamic_rank(kMaxRank)> grid_bounds_copy(grid_bounds);
assert(grid_output_dimensions.size() == grid_bounds.rank());
DimensionIndex cached_min_grid_index_for_lexicographical_order_dim = -1;
Index cached_min_grid_index_for_lexicographical_order;
const auto get_min_grid_index_for_lexicographical_order =
[&](DimensionIndex dim) {
if (dim == cached_min_grid_index_for_lexicographical_order_dim) {
return cached_min_grid_index_for_lexicographical_order;
}
cached_min_grid_index_for_lexicographical_order_dim = dim;
return cached_min_grid_index_for_lexicographical_order =
key_formatter.MinGridIndexForLexicographicalOrder(
dim, grid_bounds[dim]);
};
const auto forward_bounds =
[&](BoxView<> bounds, DimensionIndex outer_prefix_rank) -> absl::Status {
if (bounds.num_elements() == 1) {
return handle_key(key_formatter.FormatKey(bounds.origin()),
bounds.origin());
}
assert(outer_prefix_rank < bounds.rank());
if (bounds[outer_prefix_rank] == grid_bounds[outer_prefix_rank]) {
return handle_key_range(KeyRange::Prefix(key_formatter.FormatKey(
bounds.origin().first(outer_prefix_rank))),
bounds);
}
DimensionIndex key_dims = outer_prefix_rank + 1;
Index inclusive_max_indices[kMaxRank];
for (DimensionIndex i = 0; i < key_dims; ++i) {
inclusive_max_indices[i] = bounds[i].inclusive_max();
}
return handle_key_range(
KeyRange(key_formatter.FormatKey(bounds.origin().first(key_dims)),
KeyRange::PrefixExclusiveMax(
key_formatter.FormatKey(tensorstore::span<const Index>(
&inclusive_max_indices[0], key_dims)))),
bounds);
};
const auto handle_interval = [&](BoxView<> bounds) -> absl::Status {
DimensionIndex outer_prefix_rank = 0;
while (outer_prefix_rank < bounds.rank() &&
bounds.shape()[outer_prefix_rank] == 1) {
++outer_prefix_rank;
}
if (outer_prefix_rank == bounds.rank() ||
bounds[outer_prefix_rank] == grid_bounds[outer_prefix_rank]) {
return forward_bounds(bounds, outer_prefix_rank);
}
const Index min_index_for_lexicographical_order =
get_min_grid_index_for_lexicographical_order(outer_prefix_rank);
if (min_index_for_lexicographical_order <=
bounds.origin()[outer_prefix_rank]) {
return forward_bounds(bounds, outer_prefix_rank);
}
Box<dynamic_rank(kMaxRank)> new_bounds(bounds);
IndexInterval inner_interval = bounds[outer_prefix_rank];
while (!inner_interval.empty() && inner_interval.inclusive_min() <
min_index_for_lexicographical_order) {
new_bounds[outer_prefix_rank] =
IndexInterval::UncheckedSized(inner_interval.inclusive_min(), 1);
TENSORSTORE_RETURN_IF_ERROR(
forward_bounds(new_bounds, outer_prefix_rank + 1));
inner_interval = IndexInterval::UncheckedClosed(
inner_interval.inclusive_min() + 1, inner_interval.inclusive_max());
}
if (inner_interval.empty()) return absl::OkStatus();
new_bounds[outer_prefix_rank] = inner_interval;
return forward_bounds(new_bounds, inner_interval.size() == 1
? outer_prefix_rank + 1
: outer_prefix_rank);
};
return internal_grid_partition::GetGridCellRanges(
grid_partition, grid_output_dimensions, grid_bounds, output_to_grid_cell,
transform, handle_interval);
}
}
} | #include "tensorstore/internal/grid_chunk_key_ranges.h"
#include <cassert>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/functional/function_ref.h"
#include "absl/status/status.h"
#include "tensorstore/box.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/internal/grid_chunk_key_ranges_base10.h"
#include "tensorstore/internal/grid_partition.h"
#include "tensorstore/internal/grid_partition_impl.h"
#include "tensorstore/internal/regular_grid.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/division.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
namespace {
using ::tensorstore::Box;
using ::tensorstore::BoxView;
using ::tensorstore::CeilOfRatio;
using ::tensorstore::DimensionIndex;
using ::tensorstore::dynamic_rank;
using ::tensorstore::Index;
using ::tensorstore::IndexInterval;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::IndexTransformView;
using ::tensorstore::KeyRange;
using ::tensorstore::kMaxRank;
using ::tensorstore::Result;
using ::tensorstore::internal::Base10LexicographicalGridIndexKeyParser;
using ::tensorstore::internal_grid_partition::IndexTransformGridPartition;
using ::tensorstore::internal_grid_partition::
PrePartitionIndexTransformOverGrid;
using ::tensorstore::internal_grid_partition::RegularGridRef;
using ::testing::ElementsAre;
using ::testing::Optional;
using R = std::tuple<KeyRange, Box<>>;
absl::Status GetChunkKeyRangesForRegularGridWithBase10Keys(
IndexTransformView<> transform,
tensorstore::span<const DimensionIndex> grid_output_dimensions,
tensorstore::span<const Index> chunk_shape,
tensorstore::span<const Index> shape, char dimension_separator,
absl::FunctionRef<absl::Status(std::string key,
tensorstore::span<const Index> grid_indices)>
handle_key,
absl::FunctionRef<absl::Status(KeyRange key_range, BoxView<> grid_bounds)>
handle_key_range) {
const DimensionIndex rank = grid_output_dimensions.size();
assert(rank == chunk_shape.size());
assert(rank == shape.size());
Box<dynamic_rank(kMaxRank)> grid_bounds(rank);
for (DimensionIndex i = 0; i < shape.size(); ++i) {
const Index grid_size = CeilOfRatio(shape[i], chunk_shape[i]);
grid_bounds[i] = IndexInterval::UncheckedSized(0, grid_size);
}
RegularGridRef grid{chunk_shape};
IndexTransformGridPartition grid_partition;
TENSORSTORE_RETURN_IF_ERROR(PrePartitionIndexTransformOverGrid(
transform, grid_output_dimensions, grid, grid_partition));
return GetChunkKeyRangesForRegularGridWithSemiLexicographicalKeys(
grid_partition, transform, grid_output_dimensions, grid, grid_bounds,
Base10LexicographicalGridIndexKeyParser{rank, dimension_separator},
handle_key, handle_key_range);
}
Result<std::vector<R>> GetRanges(
IndexTransformView<> transform,
tensorstore::span<const DimensionIndex> grid_output_dimensions,
tensorstore::span<const Index> chunk_shape,
tensorstore::span<const Index> shape, char dimension_separator) {
std::vector<R> ranges;
const auto handle_key =
[&](std::string key,
tensorstore::span<const Index> grid_indices) -> absl::Status {
ranges.emplace_back(
KeyRange::Singleton(key),
Box<>(grid_indices, std::vector<Index>(grid_indices.size(), 1)));
return absl::OkStatus();
};
const auto handle_key_range = [&](KeyRange key_range,
BoxView<> grid_bounds) -> absl::Status {
ranges.emplace_back(std::move(key_range), grid_bounds);
return absl::OkStatus();
};
TENSORSTORE_RETURN_IF_ERROR(GetChunkKeyRangesForRegularGridWithBase10Keys(
transform, grid_output_dimensions, chunk_shape, shape,
dimension_separator, handle_key, handle_key_range));
return ranges;
}
TEST(ChunkKeyRangesTest, Rank0) {
EXPECT_THAT(GetRanges(IndexTransformBuilder(0, 0).Finalize().value(),
{}, {},
{}, '/'),
Optional(ElementsAre(R{KeyRange::Singleton("0"), {}})));
}
TEST(ChunkKeyRangesTest, Rank1Unconstrained) {
EXPECT_THAT(GetRanges(IndexTransformBuilder(1, 1)
.input_shape({50})
.output_identity_transform()
.Finalize()
.value(),
{{0}}, {{5}},
{{50}}, '/'),
Optional(ElementsAre(R{KeyRange(), Box<>{{0}, {10}}})));
}
TEST(ChunkKeyRangesTest, Rank1Constrained) {
EXPECT_THAT(
GetRanges(IndexTransformBuilder(1, 1)
.input_origin({7})
.input_shape({30})
.output_identity_transform()
.Finalize()
.value(),
{{0}}, {{5}},
{{50}}, '/'),
Optional(ElementsAre(R{KeyRange("1", KeyRange::PrefixExclusiveMax("7")),
Box<>{{1}, {7}}})));
}
TEST(ChunkKeyRangesTest, Rank1ConstrainedSplit) {
EXPECT_THAT(
GetRanges(IndexTransformBuilder(1, 1)
.input_origin({8})
.input_exclusive_max({13})
.output_identity_transform()
.Finalize()
.value(),
{{0}}, {{1}},
{{20}}, '/'),
Optional(ElementsAre(R{KeyRange::Singleton("8"), Box<>{{8}, {1}}},
R{KeyRange::Singleton("9"), Box<>{{9}, {1}}},
R{KeyRange("10", KeyRange::PrefixExclusiveMax("12")),
Box<>{{10}, {3}}})));
}
TEST(ChunkKeyRangesTest, Rank2ConstrainedBothDims) {
EXPECT_THAT(
GetRanges(IndexTransformBuilder(2, 2)
.input_origin({6, 7})
.input_shape({8, 30})
.output_identity_transform()
.Finalize()
.value(),
{{0, 1}}, {{5, 10}},
{{25, 100}}, '/'),
Optional(
ElementsAre(R{KeyRange("1/0", KeyRange::PrefixExclusiveMax("1/3")),
Box<>{{1, 0}, {1, 4}}},
R{KeyRange("2/0", KeyRange::PrefixExclusiveMax("2/3")),
Box<>{{2, 0}, {1, 4}}})));
}
TEST(ChunkKeyRangesTest, Rank2ConstrainedFirstDimOnly) {
EXPECT_THAT(
GetRanges(IndexTransformBuilder(2, 2)
.input_origin({6, 0})
.input_shape({8, 50})
.output_identity_transform()
.Finalize()
.value(),
{{0, 1}}, {{5, 5}},
{{25, 50}}, '/'),
Optional(ElementsAre(R{KeyRange("1/", KeyRange::PrefixExclusiveMax("2/")),
Box<>{{1, 0}, {2, 10}}})));
}
TEST(ChunkKeyRangesTest, Rank2ConstrainedFirstDimOnlySplit) {
EXPECT_THAT(
GetRanges(IndexTransformBuilder(2, 2)
.input_origin({8, 0})
.input_shape({5, 50})
.output_identity_transform()
.Finalize()
.value(),
{{0, 1}}, {{1, 5}},
{{25, 50}}, '/'),
Optional(
ElementsAre(R{KeyRange::Prefix("8/"), Box<>{{8, 0}, {1, 10}}},
R{KeyRange::Prefix("9/"), Box<>{{9, 0}, {1, 10}}},
R{KeyRange("10/", "120"), Box<>{{10, 0}, {3, 10}}})));
}
TEST(ChunkKeyRangesTest, Rank2ConstrainedSecondDimOnly) {
EXPECT_THAT(
GetRanges(IndexTransformBuilder(2, 2)
.input_origin({0, 7})
.input_shape({25, 30})
.output_identity_transform()
.Finalize()
.value(),
{{0, 1}}, {{5, 5}},
{{25, 50}}, '/'),
Optional(
ElementsAre(R{KeyRange("0/1", KeyRange::PrefixExclusiveMax("0/7")),
Box<>{{0, 1}, {1, 7}}},
R{KeyRange("1/1", KeyRange::PrefixExclusiveMax("1/7")),
Box<>{{1, 1}, {1, 7}}},
R{KeyRange("2/1", KeyRange::PrefixExclusiveMax("2/7")),
Box<>{{2, 1}, {1, 7}}},
R{KeyRange("3/1", KeyRange::PrefixExclusiveMax("3/7")),
Box<>{{3, 1}, {1, 7}}},
R{KeyRange("4/1", KeyRange::PrefixExclusiveMax("4/7")),
Box<>{{4, 1}, {1, 7}}})));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/grid_chunk_key_ranges.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/grid_chunk_key_ranges_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
84dc13a8-40c4-46d9-9c1a-bea4f801ff37 | cpp | tensorflow/tensorflow | grappler_test | tensorflow/c/experimental/grappler/grappler_test.cc | tensorflow/core/grappler/utils/grappler_test_test.cc | #include "tensorflow/c/experimental/grappler/grappler.h"
#include "absl/log/check.h"
#include "tensorflow/c/experimental/grappler/grappler_internal.h"
#include "tensorflow/c/tf_buffer.h"
#include "tensorflow/c/tf_buffer_internal.h"
#include "tensorflow/c/tf_status.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/grappler/clusters/single_machine.h"
#include "tensorflow/core/grappler/costs/op_performance_data.pb.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/inputs/trivial_test_graph_input_yielder.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace grappler {
namespace {
void optimize_func(void* optimizer, const TF_Buffer* graph_buf,
const TF_GrapplerItem* item, TF_Buffer* optimized_graph_buf,
TF_Status* tf_status) {}
void PopulateDefaultParam(TP_OptimizerRegistrationParams* params) {
params->struct_size = TP_OPTIMIZER_REGISTRATION_PARAMS_STRUCT_SIZE;
params->optimizer_configs->struct_size = TP_OPTIMIZER_CONFIGS_STRUCT_SIZE;
params->optimizer->struct_size = TP_OPTIMIZER_STRUCT_SIZE;
params->optimizer->create_func = nullptr;
params->optimizer->optimize_func = optimize_func;
params->optimizer->destroy_func = nullptr;
}
TEST(Grappler, SuccessfulRegistration) {
auto plugin_init = [](TP_OptimizerRegistrationParams* const params,
TF_Status* const status) -> void {
TF_SetStatus(status, TF_OK, "");
PopulateDefaultParam(params);
params->device_type = "Success";
params->optimizer_configs->remapping = TF_TriState_Off;
};
TF_ASSERT_OK(InitGraphPlugin(plugin_init));
ASSERT_EQ(PluginGraphOptimizerRegistry::CreateOptimizers(
std::set<string>{"Success"})
.size(),
1);
ConfigList config = PluginGraphOptimizerRegistry::GetPluginConfigs(
true, std::set<string>{"Success"});
ASSERT_EQ(config.toggle_config["remapping"], RewriterConfig::OFF);
}
TEST(Grappler, MultiplePluginRegistration) {
auto plugin_init_0 = [](TP_OptimizerRegistrationParams* const params,
TF_Status* const status) -> void {
TF_SetStatus(status, TF_OK, "");
PopulateDefaultParam(params);
params->device_type = "Device0";
};
auto plugin_init_1 = [](TP_OptimizerRegistrationParams* const params,
TF_Status* const status) -> void {
TF_SetStatus(status, TF_OK, "");
PopulateDefaultParam(params);
params->device_type = "Device1";
};
TF_ASSERT_OK(InitGraphPlugin(plugin_init_0));
TF_ASSERT_OK(InitGraphPlugin(plugin_init_1));
ASSERT_EQ(PluginGraphOptimizerRegistry::CreateOptimizers(
std::set<string>{"Device0", "Device1"})
.size(),
2);
}
TEST(Grappler, DeviceTypeNotSet) {
auto plugin_init = [](TP_OptimizerRegistrationParams* const params,
TF_Status* const status) -> void {
TF_SetStatus(status, TF_OK, "");
PopulateDefaultParam(params);
params->device_type = nullptr;
};
tensorflow::Status status = InitGraphPlugin(plugin_init);
ASSERT_EQ(status.code(), tensorflow::error::FAILED_PRECONDITION);
ASSERT_EQ(
status.message(),
"'device_type' field in TP_OptimizerRegistrationParams must be set.");
}
TEST(Grappler, OptimizeFuncNotSet) {
auto plugin_init = [](TP_OptimizerRegistrationParams* const params,
TF_Status* const status) -> void {
TF_SetStatus(status, TF_OK, "");
PopulateDefaultParam(params);
params->device_type = "FuncNotSet";
params->optimizer->optimize_func = nullptr;
};
tensorflow::Status status = InitGraphPlugin(plugin_init);
ASSERT_EQ(status.code(), tensorflow::error::FAILED_PRECONDITION);
ASSERT_EQ(status.message(),
"'optimize_func' field in TP_Optimizer must be set.");
}
TEST(TF_GrapplerItem, NodesToPreserve) {
GrapplerItem item;
item.fetch = std::vector<string>{"Conv", "BiasAdd"};
std::unordered_set<string> nodes_preserved = item.NodesToPreserve();
TF_GrapplerItem* c_item = reinterpret_cast<TF_GrapplerItem*>(&item);
int list_total_size = 0;
for (const string& s : nodes_preserved) {
list_total_size += s.size();
}
size_t storage_size = 0;
int num_values = 0;
TF_Status* status = TF_NewStatus();
TF_GetNodesToPreserveListSize(c_item, &num_values, &storage_size, status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
EXPECT_EQ(nodes_preserved.size(), num_values);
EXPECT_EQ(list_total_size, storage_size);
std::unique_ptr<char*[]> values(new char*[nodes_preserved.size()]);
std::unique_ptr<size_t[]> lens(new size_t[nodes_preserved.size()]);
std::unique_ptr<char[]> storage(new char[storage_size]);
TF_GetNodesToPreserveList(c_item, values.get(), lens.get(),
nodes_preserved.size(), storage.get(), storage_size,
status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
for (size_t i = 0; i < nodes_preserved.size(); ++i) {
EXPECT_EQ(nodes_preserved.find(string(static_cast<const char*>(values[i]),
lens[i])) != nodes_preserved.end(),
true);
}
TF_DeleteStatus(status);
}
TEST(TF_GrapplerItem, FetchNodes) {
GrapplerItem item;
item.fetch = std::vector<string>{"Conv", "BiasAdd"};
TF_GrapplerItem* c_item = reinterpret_cast<TF_GrapplerItem*>(&item);
int list_total_size = 0;
for (const string& s : item.fetch) {
list_total_size += s.size();
}
size_t storage_size = 0;
int num_values = 0;
TF_Status* status = TF_NewStatus();
TF_GetFetchNodesListSize(c_item, &num_values, &storage_size, status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
EXPECT_EQ(item.fetch.size(), num_values);
EXPECT_EQ(list_total_size, storage_size);
std::unique_ptr<char*[]> values(new char*[item.fetch.size()]);
std::unique_ptr<size_t[]> lens(new size_t[item.fetch.size()]);
std::unique_ptr<char[]> storage(new char[storage_size]);
TF_GetFetchNodesList(c_item, values.get(), lens.get(), item.fetch.size(),
storage.get(), storage_size, status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
for (size_t i = 0; i < item.fetch.size(); ++i) {
EXPECT_EQ(item.fetch[i].size(), lens[i]) << i;
EXPECT_EQ(item.fetch[i],
string(static_cast<const char*>(values[i]), lens[i]))
<< i;
}
TF_DeleteStatus(status);
}
TEST(TF_GraphProperties, InputProperties) {
std::unique_ptr<SingleMachine> cluster(new SingleMachine(5 * 60, 3, 0));
TF_ASSERT_OK(cluster->Provision());
TrivialTestGraphInputYielder fake_input(4, 1, 10, false,
cluster->GetDeviceNames());
GrapplerItem item;
CHECK(fake_input.NextItem(&item));
TF_Status* status = TF_NewStatus();
TF_GraphProperties* graph_properties =
TF_NewGraphProperties(reinterpret_cast<TF_GrapplerItem*>(&item));
TF_InferStatically(graph_properties, true, false, false, false, status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
for (const NodeDef& node : item.graph.node()) {
if (node.op() == "AddN") {
int num_values = 0;
TF_GetInputPropertiesListSize(graph_properties, node.name().c_str(),
&num_values, status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
EXPECT_EQ(num_values, 1);
std::vector<TF_Buffer*> in_props_buf(num_values, TF_NewBuffer());
TF_GetInputPropertiesList(graph_properties, node.name().c_str(),
in_props_buf.data(), num_values, status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
tensorflow::OpInfo::TensorProperties in_props;
Status s = tensorflow::BufferToMessage(in_props_buf[0], &in_props);
TF_ASSERT_OK(s);
EXPECT_EQ(DT_FLOAT, in_props.dtype());
EXPECT_FALSE(in_props.shape().unknown_rank());
EXPECT_EQ(2, in_props.shape().dim_size());
EXPECT_EQ(10, in_props.shape().dim(0).size());
EXPECT_EQ(1, in_props.shape().dim(1).size());
for (int i = 0; i < in_props_buf.size(); i++)
TF_DeleteBuffer(in_props_buf[i]);
}
}
TF_DeleteGraphProperties(graph_properties);
TF_DeleteStatus(status);
TF_ASSERT_OK(cluster->Shutdown());
}
TEST(TF_GraphProperties, OutputProperties) {
std::unique_ptr<SingleMachine> cluster(new SingleMachine(5 * 60, 3, 0));
TF_ASSERT_OK(cluster->Provision());
TrivialTestGraphInputYielder fake_input(4, 1, 10, false,
cluster->GetDeviceNames());
GrapplerItem item;
CHECK(fake_input.NextItem(&item));
TF_Status* status = TF_NewStatus();
TF_GraphProperties* graph_properties =
TF_NewGraphProperties(reinterpret_cast<TF_GrapplerItem*>(&item));
TF_InferStatically(graph_properties, true, false, false, false, status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
for (const NodeDef& node : item.graph.node()) {
if (node.op() == "AddN") {
int num_values = 0;
TF_GetOutputPropertiesListSize(graph_properties, node.name().c_str(),
&num_values, status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
EXPECT_EQ(num_values, 1);
std::vector<TF_Buffer*> out_props_buf(num_values, TF_NewBuffer());
TF_GetOutputPropertiesList(graph_properties, node.name().c_str(),
out_props_buf.data(), num_values, status);
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
tensorflow::OpInfo::TensorProperties out_props;
Status s = tensorflow::BufferToMessage(out_props_buf[0], &out_props);
TF_ASSERT_OK(s);
EXPECT_EQ(DT_FLOAT, out_props.dtype());
EXPECT_FALSE(out_props.shape().unknown_rank());
EXPECT_EQ(2, out_props.shape().dim_size());
EXPECT_EQ(10, out_props.shape().dim(0).size());
EXPECT_EQ(1, out_props.shape().dim(1).size());
for (int i = 0; i < out_props_buf.size(); i++)
TF_DeleteBuffer(out_props_buf[i]);
}
}
TF_DeleteStatus(status);
TF_DeleteGraphProperties(graph_properties);
TF_ASSERT_OK(cluster->Shutdown());
}
TEST(TF_FunctionLibraryDefinition, LookUpOpDef) {
TF_Buffer* g_buf = TF_NewBuffer();
TF_Buffer* op_buf = TF_NewBuffer();
TF_Status* status = TF_NewStatus();
GraphDef g_def;
Status s = MessageToBuffer(g_def, g_buf);
TF_ASSERT_OK(s);
TF_FunctionLibraryDefinition* func =
TF_NewFunctionLibraryDefinition(g_buf, status);
TF_LookUpOpDef(func, "Add", op_buf, status);
string actual_string(reinterpret_cast<const char*>(op_buf->data),
op_buf->length);
ASSERT_EQ(TF_OK, TF_GetCode(status));
const OpDef* expected_op_def;
TF_ASSERT_OK(OpRegistry::Global()->LookUpOpDef("Add", &expected_op_def));
string expected_serialized;
expected_op_def->SerializeToString(&expected_serialized);
EXPECT_EQ(expected_serialized, actual_string);
TF_DeleteBuffer(g_buf);
TF_DeleteBuffer(op_buf);
TF_DeleteStatus(status);
TF_DeleteFunctionLibraryDefinition(func);
}
}
}
} | #include "tensorflow/core/grappler/utils/grappler_test.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
class GrapplerTestTest : public GrapplerTest {};
TEST_F(GrapplerTestTest, CompareIdenticalGraphs) {
tensorflow::Scope s1 = tensorflow::Scope::NewRootScope();
auto s1_a = ops::Variable(s1.WithOpName("a"), {2, 2}, DT_FLOAT);
auto s1_b = ops::Variable(s1.WithOpName("b"), {2, 2}, DT_FLOAT);
auto s1_add = ops::Add(s1.WithOpName("Add_1"), s1_a, s1_b);
tensorflow::Scope s2 = tensorflow::Scope::NewRootScope();
auto s2_a = ops::Variable(s2.WithOpName("a"), {2, 2}, DT_FLOAT);
auto s2_b = ops::Variable(s2.WithOpName("b"), {2, 2}, DT_FLOAT);
auto s2_add = ops::Add(s2.WithOpName("Add_1"), s2_a, s2_b);
GraphDef graph1;
TF_ASSERT_OK(s1.ToGraphDef(&graph1));
GraphDef graph2;
TF_ASSERT_OK(s2.ToGraphDef(&graph2));
CompareGraphs(graph1, graph2);
}
TEST_F(GrapplerTestTest, CheckNodesConnectivity) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto a = ops::Variable(s.WithOpName("a"), {2, 2}, DT_FLOAT);
auto b = ops::Variable(s.WithOpName("b"), {2, 2}, DT_FLOAT);
auto add_1 = ops::Add(s.WithOpName("Add_1"), a, b);
auto add_2 = ops::Add(s.WithOpName("Add_2"), add_1, b);
GraphDef graph;
TF_ASSERT_OK(s.ToGraphDef(&graph));
NodeMap node_map(&graph);
EXPECT_TRUE(IsNodesDirectlyConnected(node_map, "a", "Add_1", 0));
EXPECT_TRUE(IsNodesDirectlyConnected(node_map, "b", "Add_1", 1));
EXPECT_FALSE(IsNodesDirectlyConnected(node_map, "a", "Add_2", 0));
EXPECT_TRUE(IsNodesDirectlyConnected(node_map, "b", "Add_2", 1));
}
TEST_F(GrapplerTestTest, CountOpNodes) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto a = ops::Variable(s.WithOpName("a"), {2, 2}, DT_FLOAT);
auto b = ops::Variable(s.WithOpName("b"), {2, 2}, DT_FLOAT);
auto c = ops::Variable(s.WithOpName("c"), {2, 2}, DT_FLOAT);
auto add_ab = ops::Add(s.WithOpName("Add_ab"), a, b);
auto add_bc = ops::Add(s.WithOpName("Add_bc"), b, c);
auto mul_ab = ops::Mul(s.WithOpName("Mull_ab"), a, b);
auto mul_bc = ops::Mul(s.WithOpName("Mull_bc"), a, b);
InputList inputs{
Output(add_ab),
Output(add_bc),
Output(mul_ab),
Output(mul_bc),
};
auto add_all = ops::AddN(s.WithOpName("Add_all"), inputs);
GraphDef graph;
TF_ASSERT_OK(s.ToGraphDef(&graph));
EXPECT_EQ(2, CountOpNodes(graph, "Add"));
EXPECT_EQ(2, CountOpNodes(graph, "Mul"));
EXPECT_EQ(1, CountOpNodes(graph, "AddN"));
EXPECT_EQ(0, CountOpNodes(graph, "Transpose"));
}
TEST_F(GrapplerTestTest, EvaluateNodes) {
EnableAllOptimizers();
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("c"), {1.0f, 2.0f}, {1, 2});
Output b = ops::Const(s.WithOpName("d"), {3.0f, 4.0f}, {1, 2});
Output mul = ops::Mul(s.WithOpName("mul"), a, b);
GrapplerItem item;
item.fetch = {"mul"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(tensors.size(), 1);
EXPECT_EQ(tensors[0].flat<float>()(0), 3.0f);
EXPECT_EQ(tensors[0].flat<float>()(1), 8.0f);
}
TEST_F(GrapplerTestTest, EvaluateNodesInvalidFetch) {
EnableAllOptimizers();
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("c"), {1.0f, 2.0f}, {1, 2});
Output b = ops::Const(s.WithOpName("d"), {3.0f, 4.0f}, {1, 2});
Output mul = ops::Mul(s.WithOpName("mul"), a, b);
GrapplerItem item;
item.fetch = {"no_such_node"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
EXPECT_DEATH(EvaluateNodes(item.graph, item.fetch),
"Tensor no_such_node:0, specified in either "
"feed_devices or fetch_devices was not found in the Graph");
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/grappler/grappler_test.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/utils/grappler_test_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e93d8c14-5b23-430f-b0da-3ba1cd3e21b0 | cpp | abseil/abseil-cpp | city | absl/hash/internal/city.cc | absl/hash/internal/city_test.cc | #include "absl/hash/internal/city.h"
#include <string.h>
#include <algorithm>
#include "absl/base/config.h"
#include "absl/base/internal/endian.h"
#include "absl/base/internal/unaligned_access.h"
#include "absl/base/optimization.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace hash_internal {
#ifdef ABSL_IS_BIG_ENDIAN
#define uint32_in_expected_order(x) (absl::gbswap_32(x))
#define uint64_in_expected_order(x) (absl::gbswap_64(x))
#else
#define uint32_in_expected_order(x) (x)
#define uint64_in_expected_order(x) (x)
#endif
static uint64_t Fetch64(const char *p) {
return uint64_in_expected_order(ABSL_INTERNAL_UNALIGNED_LOAD64(p));
}
static uint32_t Fetch32(const char *p) {
return uint32_in_expected_order(ABSL_INTERNAL_UNALIGNED_LOAD32(p));
}
static const uint64_t k0 = 0xc3a5c85c97cb3127ULL;
static const uint64_t k1 = 0xb492b66fbe98f273ULL;
static const uint64_t k2 = 0x9ae16a3b2f90404fULL;
static const uint32_t c1 = 0xcc9e2d51;
static const uint32_t c2 = 0x1b873593;
static uint32_t fmix(uint32_t h) {
h ^= h >> 16;
h *= 0x85ebca6b;
h ^= h >> 13;
h *= 0xc2b2ae35;
h ^= h >> 16;
return h;
}
static uint32_t Rotate32(uint32_t val, int shift) {
return shift == 0 ? val : ((val >> shift) | (val << (32 - shift)));
}
#undef PERMUTE3
#define PERMUTE3(a, b, c) \
do { \
std::swap(a, b); \
std::swap(a, c); \
} while (0)
static uint32_t Mur(uint32_t a, uint32_t h) {
a *= c1;
a = Rotate32(a, 17);
a *= c2;
h ^= a;
h = Rotate32(h, 19);
return h * 5 + 0xe6546b64;
}
static uint32_t Hash32Len13to24(const char *s, size_t len) {
uint32_t a = Fetch32(s - 4 + (len >> 1));
uint32_t b = Fetch32(s + 4);
uint32_t c = Fetch32(s + len - 8);
uint32_t d = Fetch32(s + (len >> 1));
uint32_t e = Fetch32(s);
uint32_t f = Fetch32(s + len - 4);
uint32_t h = static_cast<uint32_t>(len);
return fmix(Mur(f, Mur(e, Mur(d, Mur(c, Mur(b, Mur(a, h)))))));
}
static uint32_t Hash32Len0to4(const char *s, size_t len) {
uint32_t b = 0;
uint32_t c = 9;
for (size_t i = 0; i < len; i++) {
signed char v = static_cast<signed char>(s[i]);
b = b * c1 + static_cast<uint32_t>(v);
c ^= b;
}
return fmix(Mur(b, Mur(static_cast<uint32_t>(len), c)));
}
static uint32_t Hash32Len5to12(const char *s, size_t len) {
uint32_t a = static_cast<uint32_t>(len), b = a * 5, c = 9, d = b;
a += Fetch32(s);
b += Fetch32(s + len - 4);
c += Fetch32(s + ((len >> 1) & 4));
return fmix(Mur(c, Mur(b, Mur(a, d))));
}
uint32_t CityHash32(const char *s, size_t len) {
if (len <= 24) {
return len <= 12
? (len <= 4 ? Hash32Len0to4(s, len) : Hash32Len5to12(s, len))
: Hash32Len13to24(s, len);
}
uint32_t h = static_cast<uint32_t>(len), g = c1 * h, f = g;
uint32_t a0 = Rotate32(Fetch32(s + len - 4) * c1, 17) * c2;
uint32_t a1 = Rotate32(Fetch32(s + len - 8) * c1, 17) * c2;
uint32_t a2 = Rotate32(Fetch32(s + len - 16) * c1, 17) * c2;
uint32_t a3 = Rotate32(Fetch32(s + len - 12) * c1, 17) * c2;
uint32_t a4 = Rotate32(Fetch32(s + len - 20) * c1, 17) * c2;
h ^= a0;
h = Rotate32(h, 19);
h = h * 5 + 0xe6546b64;
h ^= a2;
h = Rotate32(h, 19);
h = h * 5 + 0xe6546b64;
g ^= a1;
g = Rotate32(g, 19);
g = g * 5 + 0xe6546b64;
g ^= a3;
g = Rotate32(g, 19);
g = g * 5 + 0xe6546b64;
f += a4;
f = Rotate32(f, 19);
f = f * 5 + 0xe6546b64;
size_t iters = (len - 1) / 20;
do {
uint32_t b0 = Rotate32(Fetch32(s) * c1, 17) * c2;
uint32_t b1 = Fetch32(s + 4);
uint32_t b2 = Rotate32(Fetch32(s + 8) * c1, 17) * c2;
uint32_t b3 = Rotate32(Fetch32(s + 12) * c1, 17) * c2;
uint32_t b4 = Fetch32(s + 16);
h ^= b0;
h = Rotate32(h, 18);
h = h * 5 + 0xe6546b64;
f += b1;
f = Rotate32(f, 19);
f = f * c1;
g += b2;
g = Rotate32(g, 18);
g = g * 5 + 0xe6546b64;
h ^= b3 + b1;
h = Rotate32(h, 19);
h = h * 5 + 0xe6546b64;
g ^= b4;
g = absl::gbswap_32(g) * 5;
h += b4 * 5;
h = absl::gbswap_32(h);
f += b0;
PERMUTE3(f, h, g);
s += 20;
} while (--iters != 0);
g = Rotate32(g, 11) * c1;
g = Rotate32(g, 17) * c1;
f = Rotate32(f, 11) * c1;
f = Rotate32(f, 17) * c1;
h = Rotate32(h + g, 19);
h = h * 5 + 0xe6546b64;
h = Rotate32(h, 17) * c1;
h = Rotate32(h + f, 19);
h = h * 5 + 0xe6546b64;
h = Rotate32(h, 17) * c1;
return h;
}
static uint64_t Rotate(uint64_t val, int shift) {
return shift == 0 ? val : ((val >> shift) | (val << (64 - shift)));
}
static uint64_t ShiftMix(uint64_t val) { return val ^ (val >> 47); }
static uint64_t HashLen16(uint64_t u, uint64_t v, uint64_t mul) {
uint64_t a = (u ^ v) * mul;
a ^= (a >> 47);
uint64_t b = (v ^ a) * mul;
b ^= (b >> 47);
b *= mul;
return b;
}
static uint64_t HashLen16(uint64_t u, uint64_t v) {
const uint64_t kMul = 0x9ddfea08eb382d69ULL;
return HashLen16(u, v, kMul);
}
static uint64_t HashLen0to16(const char *s, size_t len) {
if (len >= 8) {
uint64_t mul = k2 + len * 2;
uint64_t a = Fetch64(s) + k2;
uint64_t b = Fetch64(s + len - 8);
uint64_t c = Rotate(b, 37) * mul + a;
uint64_t d = (Rotate(a, 25) + b) * mul;
return HashLen16(c, d, mul);
}
if (len >= 4) {
uint64_t mul = k2 + len * 2;
uint64_t a = Fetch32(s);
return HashLen16(len + (a << 3), Fetch32(s + len - 4), mul);
}
if (len > 0) {
uint8_t a = static_cast<uint8_t>(s[0]);
uint8_t b = static_cast<uint8_t>(s[len >> 1]);
uint8_t c = static_cast<uint8_t>(s[len - 1]);
uint32_t y = static_cast<uint32_t>(a) + (static_cast<uint32_t>(b) << 8);
uint32_t z = static_cast<uint32_t>(len) + (static_cast<uint32_t>(c) << 2);
return ShiftMix(y * k2 ^ z * k0) * k2;
}
return k2;
}
static uint64_t HashLen17to32(const char *s, size_t len) {
uint64_t mul = k2 + len * 2;
uint64_t a = Fetch64(s) * k1;
uint64_t b = Fetch64(s + 8);
uint64_t c = Fetch64(s + len - 8) * mul;
uint64_t d = Fetch64(s + len - 16) * k2;
return HashLen16(Rotate(a + b, 43) + Rotate(c, 30) + d,
a + Rotate(b + k2, 18) + c, mul);
}
static std::pair<uint64_t, uint64_t> WeakHashLen32WithSeeds(
uint64_t w, uint64_t x, uint64_t y, uint64_t z, uint64_t a, uint64_t b) {
a += w;
b = Rotate(b + a + z, 21);
uint64_t c = a;
a += x;
a += y;
b += Rotate(a, 44);
return std::make_pair(a + z, b + c);
}
static std::pair<uint64_t, uint64_t> WeakHashLen32WithSeeds(const char *s,
uint64_t a,
uint64_t b) {
return WeakHashLen32WithSeeds(Fetch64(s), Fetch64(s + 8), Fetch64(s + 16),
Fetch64(s + 24), a, b);
}
static uint64_t HashLen33to64(const char *s, size_t len) {
uint64_t mul = k2 + len * 2;
uint64_t a = Fetch64(s) * k2;
uint64_t b = Fetch64(s + 8);
uint64_t c = Fetch64(s + len - 24);
uint64_t d = Fetch64(s + len - 32);
uint64_t e = Fetch64(s + 16) * k2;
uint64_t f = Fetch64(s + 24) * 9;
uint64_t g = Fetch64(s + len - 8);
uint64_t h = Fetch64(s + len - 16) * mul;
uint64_t u = Rotate(a + g, 43) + (Rotate(b, 30) + c) * 9;
uint64_t v = ((a + g) ^ d) + f + 1;
uint64_t w = absl::gbswap_64((u + v) * mul) + h;
uint64_t x = Rotate(e + f, 42) + c;
uint64_t y = (absl::gbswap_64((v + w) * mul) + g) * mul;
uint64_t z = e + f + c;
a = absl::gbswap_64((x + z) * mul + y) + b;
b = ShiftMix((z + a) * mul + d + h) * mul;
return b + x;
}
uint64_t CityHash64(const char *s, size_t len) {
if (len <= 32) {
if (len <= 16) {
return HashLen0to16(s, len);
} else {
return HashLen17to32(s, len);
}
} else if (len <= 64) {
return HashLen33to64(s, len);
}
uint64_t x = Fetch64(s + len - 40);
uint64_t y = Fetch64(s + len - 16) + Fetch64(s + len - 56);
uint64_t z = HashLen16(Fetch64(s + len - 48) + len, Fetch64(s + len - 24));
std::pair<uint64_t, uint64_t> v =
WeakHashLen32WithSeeds(s + len - 64, len, z);
std::pair<uint64_t, uint64_t> w =
WeakHashLen32WithSeeds(s + len - 32, y + k1, x);
x = x * k1 + Fetch64(s);
len = (len - 1) & ~static_cast<size_t>(63);
do {
x = Rotate(x + y + v.first + Fetch64(s + 8), 37) * k1;
y = Rotate(y + v.second + Fetch64(s + 48), 42) * k1;
x ^= w.second;
y += v.first + Fetch64(s + 40);
z = Rotate(z + w.first, 33) * k1;
v = WeakHashLen32WithSeeds(s, v.second * k1, x + w.first);
w = WeakHashLen32WithSeeds(s + 32, z + w.second, y + Fetch64(s + 16));
std::swap(z, x);
s += 64;
len -= 64;
} while (len != 0);
return HashLen16(HashLen16(v.first, w.first) + ShiftMix(y) * k1 + z,
HashLen16(v.second, w.second) + x);
}
uint64_t CityHash64WithSeed(const char *s, size_t len, uint64_t seed) {
return CityHash64WithSeeds(s, len, k2, seed);
}
uint64_t CityHash64WithSeeds(const char *s, size_t len, uint64_t seed0,
uint64_t seed1) {
return HashLen16(CityHash64(s, len) - seed0, seed1);
}
}
ABSL_NAMESPACE_END
} | #include "absl/hash/internal/city.h"
#include <string.h>
#include <cstdio>
#include <iostream>
#include "gtest/gtest.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace hash_internal {
namespace {
static const uint64_t k0 = 0xc3a5c85c97cb3127ULL;
static const uint64_t kSeed0 = 1234567;
static const uint64_t kSeed1 = k0;
static const int kDataSize = 1 << 20;
static const int kTestSize = 300;
static char data[kDataSize];
void setup() {
uint64_t a = 9;
uint64_t b = 777;
for (int i = 0; i < kDataSize; i++) {
a += b;
b += a;
a = (a ^ (a >> 41)) * k0;
b = (b ^ (b >> 41)) * k0 + i;
uint8_t u = b >> 37;
memcpy(data + i, &u, 1);
}
}
#define C(x) 0x##x##ULL
static const uint64_t testdata[kTestSize][4] = {
{C(9ae16a3b2f90404f), C(75106db890237a4a), C(3feac5f636039766),
C(dc56d17a)},
{C(541150e87f415e96), C(1aef0d24b3148a1a), C(bacc300e1e82345a),
C(99929334)},
{C(f3786a4b25827c1), C(34ee1a2bf767bd1c), C(2f15ca2ebfb631f2), C(4252edb7)},
{C(ef923a7a1af78eab), C(79163b1e1e9a9b18), C(df3b2aca6e1e4a30),
C(ebc34f3c)},
{C(11df592596f41d88), C(843ec0bce9042f9c), C(cce2ea1e08b1eb30),
C(26f2b463)},
{C(831f448bdc5600b3), C(62a24be3120a6919), C(1b44098a41e010da),
C(b042c047)},
{C(3eca803e70304894), C(d80de767e4a920a), C(a51cfbb292efd53d), C(e73bb0a8)},
{C(1b5a063fb4c7f9f1), C(318dbc24af66dee9), C(10ef7b32d5c719af),
C(91dfdd75)},
{C(a0f10149a0e538d6), C(69d008c20f87419f), C(41b36376185b3e9e),
C(c87f95de)},
{C(fb8d9c70660b910b), C(a45b0cc3476bff1b), C(b28d1996144f0207),
C(3f5538ef)},
{C(236827beae282a46), C(e43970221139c946), C(4f3ac6faa837a3aa),
C(70eb1a1f)},
{C(c385e435136ecf7c), C(d9d17368ff6c4a08), C(1b31eed4e5251a67),
C(cfd63b83)},
{C(e3f6828b6017086d), C(21b4d1900554b3b0), C(bef38be1809e24f1),
C(894a52ef)},
{C(851fff285561dca0), C(4d1277d73cdf416f), C(28ccffa61010ebe2),
C(9cde6a54)},
{C(61152a63595a96d9), C(d1a3a91ef3a7ba45), C(443b6bb4a493ad0c),
C(6c4898d5)},
{C(44473e03be306c88), C(30097761f872472a), C(9fd1b669bfad82d7),
C(13e1978e)},
{C(3ead5f21d344056), C(fb6420393cfb05c3), C(407932394cbbd303), C(51b4ba8)},
{C(6abbfde37ee03b5b), C(83febf188d2cc113), C(cda7b62d94d5b8ee),
C(b6b06e40)},
{C(943e7ed63b3c080), C(1ef207e9444ef7f8), C(ef4a9f9f8c6f9b4a), C(240a2f2)},
{C(d72ce05171ef8a1a), C(c6bd6bd869203894), C(c760e6396455d23a),
C(5dcefc30)},
{C(4182832b52d63735), C(337097e123eea414), C(b5a72ca0456df910),
C(7a48b105)},
{C(d6cdae892584a2cb), C(58de0fa4eca17dcd), C(43df30b8f5f1cb00),
C(fd55007b)},
{C(5c8e90bc267c5ee4), C(e9ae044075d992d9), C(f234cbfd1f0a1e59),
C(6b95894c)},
{C(bbd7f30ac310a6f3), C(b23b570d2666685f), C(fb13fb08c9814fe7),
C(3360e827)},
{C(36a097aa49519d97), C(8204380a73c4065), C(77c2004bdd9e276a), C(45177e0b)},
{C(dc78cb032c49217), C(112464083f83e03a), C(96ae53e28170c0f5), C(7c6fffe4)},
{C(441593e0da922dfe), C(936ef46061469b32), C(204a1921197ddd87),
C(bbc78da4)},
{C(2ba3883d71cc2133), C(72f2bbb32bed1a3c), C(27e1bd96d4843251),
C(c5c25d39)},
{C(f2b6d2adf8423600), C(7514e2f016a48722), C(43045743a50396ba),
C(b6e5d06e)},
{C(38fffe7f3680d63c), C(d513325255a7a6d1), C(31ed47790f6ca62f),
C(6178504e)},
{C(b7477bf0b9ce37c6), C(63b1c580a7fd02a4), C(f6433b9f10a5dac), C(bd4c3637)},
{C(55bdb0e71e3edebd), C(c7ab562bcf0568bc), C(43166332f9ee684f),
C(6e7ac474)},
{C(782fa1b08b475e7), C(fb7138951c61b23b), C(9829105e234fb11e), C(1fb4b518)},
{C(c5dc19b876d37a80), C(15ffcff666cfd710), C(e8c30c72003103e2),
C(31d13d6d)},
{C(5e1141711d2d6706), C(b537f6dee8de6933), C(3af0a1fbbe027c54),
C(26fa72e3)},
{C(782edf6da001234f), C(f48cbd5c66c48f3), C(808754d1e64e2a32), C(6a7433bf)},
{C(d26285842ff04d44), C(8f38d71341eacca9), C(5ca436f4db7a883c),
C(4e6df758)},
{C(c6ab830865a6bae6), C(6aa8e8dd4b98815c), C(efe3846713c371e5),
C(d57f63ea)},
{C(44b3a1929232892), C(61dca0e914fc217), C(a607cc142096b964), C(52ef73b3)},
{C(4b603d7932a8de4f), C(fae64c464b8a8f45), C(8fafab75661d602a), C(3cb36c3)},
{C(4ec0b54cf1566aff), C(30d2c7269b206bf4), C(77c22e82295e1061),
C(72c39bea)},
{C(ed8b7a4b34954ff7), C(56432de31f4ee757), C(85bd3abaa572b155),
C(a65aa25c)},
{C(5d28b43694176c26), C(714cc8bc12d060ae), C(3437726273a83fe6),
C(74740539)},
{C(6a1ef3639e1d202e), C(919bc1bd145ad928), C(30f3f7e48c28a773),
C(c3ae3c26)},
{C(159f4d9e0307b111), C(3e17914a5675a0c), C(af849bd425047b51), C(f29db8a2)},
{C(cc0a840725a7e25b), C(57c69454396e193a), C(976eaf7eee0b4540),
C(1ef4cbf4)},
{C(a2b27ee22f63c3f1), C(9ebde0ce1b3976b2), C(2fe6a92a257af308),
C(a9be6c41)},
{C(d8f2f234899bcab3), C(b10b037297c3a168), C(debea2c510ceda7f), C(fa31801)},
{C(584f28543864844f), C(d7cee9fc2d46f20d), C(a38dca5657387205),
C(8331c5d8)},
{C(a94be46dd9aa41af), C(a57e5b7723d3f9bd), C(34bf845a52fd2f), C(e9876db8)},
{C(9a87bea227491d20), C(a468657e2b9c43e7), C(af9ba60db8d89ef7),
C(27b0604e)},
{C(27688c24958d1a5c), C(e3b4a1c9429cf253), C(48a95811f70d64bc),
C(dcec07f2)},
{C(5d1d37790a1873ad), C(ed9cd4bcc5fa1090), C(ce51cde05d8cd96a),
C(cff0a82a)},
{C(1f03fd18b711eea9), C(566d89b1946d381a), C(6e96e83fc92563ab),
C(fec83621)},
{C(f0316f286cf527b6), C(f84c29538de1aa5a), C(7612ed3c923d4a71), C(743d8dc)},
{C(297008bcb3e3401d), C(61a8e407f82b0c69), C(a4a35bff0524fa0e),
C(64d41d26)},
{C(43c6252411ee3be), C(b4ca1b8077777168), C(2746dc3f7da1737f), C(acd90c81)},
{C(ce38a9a54fad6599), C(6d6f4a90b9e8755e), C(c3ecc79ff105de3f),
C(7c746a4b)},
{C(270a9305fef70cf), C(600193999d884f3a), C(f4d49eae09ed8a1), C(b1047e99)},
{C(e71be7c28e84d119), C(eb6ace59932736e6), C(70c4397807ba12c5),
C(d1fd1068)},
{C(b5b58c24b53aaa19), C(d2a6ab0773dd897f), C(ef762fe01ecb5b97),
C(56486077)},
{C(44dd59bd301995cf), C(3ccabd76493ada1a), C(540db4c87d55ef23),
C(6069be80)},
{C(b4d4789eb6f2630b), C(bf6973263ce8ef0e), C(d1c75c50844b9d3), C(2078359b)},
{C(12807833c463737c), C(58e927ea3b3776b4), C(72dd20ef1c2f8ad0),
C(9ea21004)},
{C(e88419922b87176f), C(bcf32f41a7ddbf6f), C(d6ebefd8085c1a0f),
C(9c9cfe88)},
{C(105191e0ec8f7f60), C(5918dbfcca971e79), C(6b285c8a944767b9),
C(b70a6ddd)},
{C(a5b88bf7399a9f07), C(fca3ddfd96461cc4), C(ebe738fdc0282fc6),
C(dea37298)},
{C(d08c3f5747d84f50), C(4e708b27d1b6f8ac), C(70f70fd734888606),
C(8f480819)},
{C(2f72d12a40044b4b), C(889689352fec53de), C(f03e6ad87eb2f36), C(30b3b16)},
{C(aa1f61fdc5c2e11e), C(c2c56cd11277ab27), C(a1e73069fdf1f94f),
C(f31bc4e8)},
{C(9489b36fe2246244), C(3355367033be74b8), C(5f57c2277cbce516),
C(419f953b)},
{C(358d7c0476a044cd), C(e0b7b47bcbd8854f), C(ffb42ec696705519),
C(20e9e76d)},
{C(b0c48df14275265a), C(9da4448975905efa), C(d716618e414ceb6d),
C(646f0ff8)},
{C(daa70bb300956588), C(410ea6883a240c6d), C(f5c8239fb5673eb3),
C(eeb7eca8)},
{C(4ec97a20b6c4c7c2), C(5913b1cd454f29fd), C(a9629f9daf06d685), C(8112bb9)},
{C(5c3323628435a2e8), C(1bea45ce9e72a6e3), C(904f0a7027ddb52e),
C(85a6d477)},
{C(c1ef26bea260abdb), C(6ee423f2137f9280), C(df2118b946ed0b43),
C(56f76c84)},
{C(6be7381b115d653a), C(ed046190758ea511), C(de6a45ffc3ed1159),
C(9af45d55)},
{C(ae3eece1711b2105), C(14fd3f4027f81a4a), C(abb7e45177d151db),
C(d1c33760)},
{C(376c28588b8fb389), C(6b045e84d8491ed2), C(4e857effb7d4e7dc),
C(c56bbf69)},
{C(58d943503bb6748f), C(419c6c8e88ac70f6), C(586760cbf3d3d368),
C(abecfb9b)},
{C(dfff5989f5cfd9a1), C(bcee2e7ea3a96f83), C(681c7874adb29017),
C(8de13255)},
{C(7fb19eb1a496e8f5), C(d49e5dfdb5c0833f), C(c0d5d7b2f7c48dc7),
C(a98ee299)},
{C(5dba5b0dadccdbaa), C(4ba8da8ded87fcdc), C(f693fdd25badf2f0),
C(3015f556)},
{C(688bef4b135a6829), C(8d31d82abcd54e8e), C(f95f8a30d55036d7),
C(5a430e29)},
{C(d8323be05433a412), C(8d48fa2b2b76141d), C(3d346f23978336a5),
C(2797add0)},
{C(3b5404278a55a7fc), C(23ca0b327c2d0a81), C(a6d65329571c892c),
C(27d55016)},
{C(2a96a3f96c5e9bbc), C(8caf8566e212dda8), C(904de559ca16e45e),
C(84945a82)},
{C(22bebfdcc26d18ff), C(4b4d8dcb10807ba1), C(40265eee30c6b896),
C(3ef7e224)},
{C(627a2249ec6bbcc2), C(c0578b462a46735a), C(4974b8ee1c2d4f1f),
C(35ed8dc8)},
{C(3abaf1667ba2f3e0), C(ee78476b5eeadc1), C(7e56ac0a6ca4f3f4), C(6a75e43d)},
{C(3931ac68c5f1b2c9), C(efe3892363ab0fb0), C(40b707268337cd36),
C(235d9805)},
{C(b98fb0606f416754), C(46a6e5547ba99c1e), C(c909d82112a8ed2), C(f7d69572)},
{C(7f7729a33e58fcc4), C(2e4bc1e7a023ead4), C(e707008ea7ca6222),
C(bacd0199)},
{C(42a0aa9ce82848b3), C(57232730e6bee175), C(f89bb3f370782031),
C(e428f50e)},
{C(6b2c6d38408a4889), C(de3ef6f68fb25885), C(20754f456c203361),
C(81eaaad3)},
{C(930380a3741e862a), C(348d28638dc71658), C(89dedcfd1654ea0d),
C(addbd3e3)},
{C(94808b5d2aa25f9a), C(cec72968128195e0), C(d9f4da2bdc1e130f),
C(e66dbca0)},
{C(b31abb08ae6e3d38), C(9eb9a95cbd9e8223), C(8019e79b7ee94ea9),
C(afe11fd5)},
{C(dccb5534a893ea1a), C(ce71c398708c6131), C(fe2396315457c164),
C(a71a406f)},
{C(6369163565814de6), C(8feb86fb38d08c2f), C(4976933485cc9a20),
C(9d90eaf5)},
{C(edee4ff253d9f9b3), C(96ef76fb279ef0ad), C(a4d204d179db2460),
C(6665db10)},
{C(941993df6e633214), C(929bc1beca5b72c6), C(141fc52b8d55572d),
C(9c977cbf)},
{C(859838293f64cd4c), C(484403b39d44ad79), C(bf674e64d64b9339),
C(ee83ddd4)},
{C(c19b5648e0d9f555), C(328e47b2b7562993), C(e756b92ba4bd6a51), C(26519cc)},
{C(f963b63b9006c248), C(9e9bf727ffaa00bc), C(c73bacc75b917e3a),
C(a485a53f)},
{C(6a8aa0852a8c1f3b), C(c8f1e5e206a21016), C(2aa554aed1ebb524),
C(f62bc412)},
{C(740428b4d45e5fb8), C(4c95a4ce922cb0a5), C(e99c3ba78feae796),
C(8975a436)},
{C(658b883b3a872b86), C(2f0e303f0f64827a), C(975337e23dc45e1), C(94ff7f41)},
{C(6df0a977da5d27d4), C(891dd0e7cb19508), C(fd65434a0b71e680), C(760aa031)},
{C(a900275464ae07ef), C(11f2cfda34beb4a3), C(9abf91e5a1c38e4), C(3bda76df)},
{C(810bc8aa0c40bcb0), C(448a019568d01441), C(f60ec52f60d3aeae),
C(498e2e65)},
{C(22036327deb59ed7), C(adc05ceb97026a02), C(48bff0654262672b),
C(d38deb48)},
{C(7d14dfa9772b00c8), C(595735efc7eeaed7), C(29872854f94c3507),
C(82b3fb6b)},
{C(2d777cddb912675d), C(278d7b10722a13f9), C(f5c02bfb7cc078af),
C(e500e25f)},
{C(f2ec98824e8aa613), C(5eb7e3fb53fe3bed), C(12c22860466e1dd4),
C(bd2bb07c)},
{C(5e763988e21f487f), C(24189de8065d8dc5), C(d1519d2403b62aa0),
C(3a2b431d)},
{C(48949dc327bb96ad), C(e1fd21636c5c50b4), C(3f6eb7f13a8712b4),
C(7322a83d)},
{C(b7c4209fb24a85c5), C(b35feb319c79ce10), C(f0d3de191833b922),
C(a645ca1c)},
{C(9c9e5be0943d4b05), C(b73dc69e45201cbb), C(aab17180bfe5083d),
C(8909a45a)},
{C(3898bca4dfd6638d), C(f911ff35efef0167), C(24bdf69e5091fc88),
C(bd30074c)},
{C(5b5d2557400e68e7), C(98d610033574cee), C(dfd08772ce385deb), C(c17cf001)},
{C(a927ed8b2bf09bb6), C(606e52f10ae94eca), C(71c2203feb35a9ee),
C(26ffd25a)},
{C(8d25746414aedf28), C(34b1629d28b33d3a), C(4d5394aea5f82d7b),
C(f1d8ce3c)},
{C(b5bbdb73458712f2), C(1ff887b3c2a35137), C(7f7231f702d0ace9),
C(3ee8fb17)},
{C(3d32a26e3ab9d254), C(fc4070574dc30d3a), C(f02629579c2b27c9),
C(a77acc2a)},
{C(9371d3c35fa5e9a5), C(42967cf4d01f30), C(652d1eeae704145c), C(f4556dee)},
{C(cbaa3cb8f64f54e0), C(76c3b48ee5c08417), C(9f7d24e87e61ce9), C(de287a64)},
{C(b2e23e8116c2ba9f), C(7e4d9c0060101151), C(3310da5e5028f367),
C(878e55b9)},
{C(8aa77f52d7868eb9), C(4d55bd587584e6e2), C(d2db37041f495f5), C(7648486)},
{C(858fea922c7fe0c3), C(cfe8326bf733bc6f), C(4e5e2018cf8f7dfc),
C(57ac0fb1)},
{C(46ef25fdec8392b1), C(e48d7b6d42a5cd35), C(56a6fe1c175299ca),
C(d01967ca)},
{C(8d078f726b2df464), C(b50ee71cdcabb299), C(f4af300106f9c7ba),
C(96ecdf74)},
{C(35ea86e6960ca950), C(34fe1fe234fc5c76), C(a00207a3dc2a72b7),
C(779f5506)},
{C(8aee9edbc15dd011), C(51f5839dc8462695), C(b2213e17c37dca2d),
C(3c94c2de)},
{C(c3e142ba98432dda), C(911d060cab126188), C(b753fbfa8365b844),
C(39f98faf)},
{C(123ba6b99c8cd8db), C(448e582672ee07c4), C(cebe379292db9e65),
C(7af31199)},
{C(ba87acef79d14f53), C(b3e0fcae63a11558), C(d5ac313a593a9f45),
C(e341a9d6)},
{C(bcd3957d5717dc3), C(2da746741b03a007), C(873816f4b1ece472), C(ca24aeeb)},
{C(61442ff55609168e), C(6447c5fc76e8c9cf), C(6a846de83ae15728),
C(b2252b57)},
{C(dbe4b1b2d174757f), C(506512da18712656), C(6857f3e0b8dd95f), C(72c81da1)},
{C(531e8e77b363161c), C(eece0b43e2dae030), C(8294b82c78f34ed1),
C(6b9fce95)},
{C(f71e9c926d711e2b), C(d77af2853a4ceaa1), C(9aa0d6d76a36fae7),
C(19399857)},
{C(cb20ac28f52df368), C(e6705ee7880996de), C(9b665cc3ec6972f2),
C(3c57a994)},
{C(e4a794b4acb94b55), C(89795358057b661b), C(9c4cdcec176d7a70),
C(c053e729)},
{C(cb942e91443e7208), C(e335de8125567c2a), C(d4d74d268b86df1f),
C(51cbbba7)},
{C(ecca7563c203f7ba), C(177ae2423ef34bb2), C(f60b7243400c5731),
C(1acde79a)},
{C(1652cb940177c8b5), C(8c4fe7d85d2a6d6d), C(f6216ad097e54e72),
C(2d160d13)},
{C(31fed0fc04c13ce8), C(3d5d03dbf7ff240a), C(727c5c9b51581203),
C(787f5801)},
{C(e7b668947590b9b3), C(baa41ad32938d3fa), C(abcbc8d4ca4b39e4),
C(c9629828)},
{C(1de2119923e8ef3c), C(6ab27c096cf2fe14), C(8c3658edca958891),
C(be139231)},
{C(1269df1e69e14fa7), C(992f9d58ac5041b7), C(e97fcf695a7cbbb4),
C(7df699ef)},
{C(820826d7aba567ff), C(1f73d28e036a52f3), C(41c4c5a73f3b0893),
C(8ce6b96d)},
{C(ffe0547e4923cef9), C(3534ed49b9da5b02), C(548a273700fba03d),
C(6f9ed99c)},
{C(72da8d1b11d8bc8b), C(ba94b56b91b681c6), C(4e8cc51bd9b0fc8c),
C(e0244796)},
{C(d62ab4e3f88fc797), C(ea86c7aeb6283ae4), C(b5b93e09a7fe465), C(4ccf7e75)},
{C(d0f06c28c7b36823), C(1008cb0874de4bb8), C(d6c7ff816c7a737b),
C(915cef86)},
{C(99b7042460d72ec6), C(2a53e5e2b8e795c2), C(53a78132d9e1b3e3),
C(5cb59482)},
{C(4f4dfcfc0ec2bae5), C(841233148268a1b8), C(9248a76ab8be0d3), C(6ca3f532)},
{C(fe86bf9d4422b9ae), C(ebce89c90641ef9c), C(1c84e2292c0b5659),
C(e24f3859)},
{C(a90d81060932dbb0), C(8acfaa88c5fbe92b), C(7c6f3447e90f7f3f),
C(adf5a9c7)},
{C(17938a1b0e7f5952), C(22cadd2f56f8a4be), C(84b0d1183d5ed7c1),
C(32264b75)},
{C(de9e0cb0e16f6e6d), C(238e6283aa4f6594), C(4fb9c914c2f0a13b),
C(a64b3376)},
{C(6d4b876d9b146d1a), C(aab2d64ce8f26739), C(d315f93600e83fe5), C(d33890e)},
{C(e698fa3f54e6ea22), C(bd28e20e7455358c), C(9ace161f6ea76e66),
C(926d4b63)},
{C(7bc0deed4fb349f7), C(1771aff25dc722fa), C(19ff0644d9681917),
C(d51ba539)},
{C(db4b15e88533f622), C(256d6d2419b41ce9), C(9d7c5378396765d5),
C(7f37636d)},
{C(922834735e86ecb2), C(363382685b88328e), C(e9c92960d7144630),
C(b98026c0)},
{C(30f1d72c812f1eb8), C(b567cd4a69cd8989), C(820b6c992a51f0bc),
C(b877767e)},
{C(168884267f3817e9), C(5b376e050f637645), C(1c18314abd34497a), C(aefae77)},
{C(82e78596ee3e56a7), C(25697d9c87f30d98), C(7600a8342834924d), C(f686911)},
{C(aa2d6cf22e3cc252), C(9b4dec4f5e179f16), C(76fb0fba1d99a99a),
C(3deadf12)},
{C(7bf5ffd7f69385c7), C(fc077b1d8bc82879), C(9c04e36f9ed83a24),
C(ccf02a4e)},
{C(e89c8ff9f9c6e34b), C(f54c0f669a49f6c4), C(fc3e46f5d846adef),
C(176c1722)},
{C(a18fbcdccd11e1f4), C(8248216751dfd65e), C(40c089f208d89d7c), C(26f82ad)},
{C(2d54f40cc4088b17), C(59d15633b0cd1399), C(a8cc04bb1bffd15b),
C(b5244f42)},
{C(69276946cb4e87c7), C(62bdbe6183be6fa9), C(3ba9773dac442a1a),
C(49a689e5)},
{C(668174a3f443df1d), C(407299392da1ce86), C(c2a3f7d7f2c5be28), C(59fcdd3)},
{C(5e29be847bd5046), C(b561c7f19c8f80c3), C(5e5abd5021ccaeaf), C(4f4b04e9)},
{C(cd0d79f2164da014), C(4c386bb5c5d6ca0c), C(8e771b03647c3b63),
C(8b00f891)},
{C(e0e6fc0b1628af1d), C(29be5fb4c27a2949), C(1c3f781a604d3630),
C(16e114f3)},
{C(2058927664adfd93), C(6e8f968c7963baa5), C(af3dced6fff7c394),
C(d6b6dadc)},
{C(dc107285fd8e1af7), C(a8641a0609321f3f), C(db06e89ffdc54466),
C(897e20ac)},
{C(fbba1afe2e3280f1), C(755a5f392f07fce), C(9e44a9a15402809a), C(f996e05d)},
{C(bfa10785ddc1011b), C(b6e1c4d2f670f7de), C(517d95604e4fcc1f),
C(c4306af6)},
{C(534cc35f0ee1eb4e), C(b703820f1f3b3dce), C(884aa164cf22363), C(6dcad433)},
{C(7ca6e3933995dac), C(fd118c77daa8188), C(3aceb7b5e7da6545), C(3c07374d)},
{C(f0d6044f6efd7598), C(e044d6ba4369856e), C(91968e4f8c8a1a4c),
C(f0f4602c)},
{C(3d69e52049879d61), C(76610636ea9f74fe), C(e9bf5602f89310c0),
C(3e1ea071)},
{C(79da242a16acae31), C(183c5f438e29d40), C(6d351710ae92f3de), C(67580f0c)},
{C(461c82656a74fb57), C(d84b491b275aa0f7), C(8f262cb29a6eb8b2),
C(4e109454)},
{C(53c1a66d0b13003), C(731f060e6fe797fc), C(daa56811791371e3), C(88a474a7)},
{C(d3a2efec0f047e9), C(1cabce58853e58ea), C(7a17b2eae3256be4), C(5b5bedd)},
{C(43c64d7484f7f9b2), C(5da002b64aafaeb7), C(b576c1e45800a716),
C(1aaddfa7)},
{C(a7dec6ad81cf7fa1), C(180c1ab708683063), C(95e0fd7008d67cff),
C(5be07fd8)},
{C(5408a1df99d4aff), C(b9565e588740f6bd), C(abf241813b08006e), C(cbca8606)},
{C(a8b27a6bcaeeed4b), C(aec1eeded6a87e39), C(9daf246d6fed8326),
C(bde64d01)},
{C(9a952a8246fdc269), C(d0dcfcac74ef278c), C(250f7139836f0f1f),
C(ee90cf33)},
{C(c930841d1d88684f), C(5eb66eb18b7f9672), C(e455d413008a2546),
C(4305c3ce)},
{C(94dc6971e3cf071a), C(994c7003b73b2b34), C(ea16e85978694e5), C(4b3a1d76)},
{C(7fc98006e25cac9), C(77fee0484cda86a7), C(376ec3d447060456), C(a8bb6d80)},
{C(bd781c4454103f6), C(612197322f49c931), C(b9cf17fd7e5462d5), C(1f9fa607)},
{C(da60e6b14479f9df), C(3bdccf69ece16792), C(18ebf45c4fecfdc9),
C(8d0e4ed2)},
{C(4ca56a348b6c4d3), C(60618537c3872514), C(2fbb9f0e65871b09), C(1bf31347)},
{C(ebd22d4b70946401), C(6863602bf7139017), C(c0b1ac4e11b00666),
C(1ae3fc5b)},
{C(3cc4693d6cbcb0c), C(501689ea1c70ffa), C(10a4353e9c89e364), C(459c3930)},
{C(38908e43f7ba5ef0), C(1ab035d4e7781e76), C(41d133e8c0a68ff7),
C(e00c4184)},
{C(34983ccc6aa40205), C(21802cad34e72bc4), C(1943e8fb3c17bb8), C(ffc7a781)},
{C(86215c45dcac9905), C(ea546afe851cae4b), C(d85b6457e489e374),
C(6a125480)},
{C(420fc255c38db175), C(d503cd0f3c1208d1), C(d4684e74c825a0bc),
C(88a1512b)},
{C(1d7a31f5bc8fe2f9), C(4763991092dcf836), C(ed695f55b97416f4),
C(549bbbe5)},
{C(94129a84c376a26e), C(c245e859dc231933), C(1b8f74fecf917453),
C(c133d38c)},
{C(1d3a9809dab05c8d), C(adddeb4f71c93e8), C(ef342eb36631edb), C(fcace348)},
{C(90fa3ccbd60848da), C(dfa6e0595b569e11), C(e585d067a1f5135d),
C(ed7b6f9a)},
{C(2dbb4fc71b554514), C(9650e04b86be0f82), C(60f2304fba9274d3),
C(6d907dda)},
{C(b98bf4274d18374a), C(1b669fd4c7f9a19a), C(b1f5972b88ba2b7a),
C(7a4d48d5)},
{C(d6781d0b5e18eb68), C(b992913cae09b533), C(58f6021caaee3a40),
C(e686f3db)},
{C(226651cf18f4884c), C(595052a874f0f51c), C(c9b75162b23bab42), C(cce7c55)},
{C(a734fb047d3162d6), C(e523170d240ba3a5), C(125a6972809730e8), C(f58b96b)},
{C(c6df6364a24f75a3), C(c294e2c84c4f5df8), C(a88df65c6a89313b),
C(1bbf6f60)},
{C(d8d1364c1fbcd10), C(2d7cc7f54832deaa), C(4e22c876a7c57625), C(ce5e0cc2)},
{C(aae06f9146db885f), C(3598736441e280d9), C(fba339b117083e55),
C(584cfd6f)},
{C(8955ef07631e3bcc), C(7d70965ea3926f83), C(39aed4134f8b2db6),
C(8f9bbc33)},
{C(ad611c609cfbe412), C(d3c00b18bf253877), C(90b2172e1f3d0bfd),
C(d7640d95)},
{C(d5339adc295d5d69), C(b633cc1dcb8b586a), C(ee84184cf5b1aeaf), C(3d12a2b)},
{C(40d0aeff521375a8), C(77ba1ad7ecebd506), C(547c6f1a7d9df427),
C(aaeafed0)},
{C(8b2d54ae1a3df769), C(11e7adaee3216679), C(3483781efc563e03),
C(95b9b814)},
{C(99c175819b4eae28), C(932e8ff9f7a40043), C(ec78dcab07ca9f7c),
C(45fbe66e)},
{C(2a418335779b82fc), C(af0295987849a76b), C(c12bc5ff0213f46e),
C(b4baa7a8)},
{C(3b1fc6a3d279e67d), C(70ea1e49c226396), C(25505adcf104697c), C(83e962fe)},
{C(d97eacdf10f1c3c9), C(b54f4654043a36e0), C(b128f6eb09d1234), C(aac3531c)},
{C(293a5c1c4e203cd4), C(6b3329f1c130cefe), C(f2e32f8ec76aac91),
C(2b1db7cc)},
{C(4290e018ffaedde7), C(a14948545418eb5e), C(72d851b202284636),
C(cf00cd31)},
{C(f919a59cbde8bf2f), C(a56d04203b2dc5a5), C(38b06753ac871e48),
C(7d3c43b8)},
{C(1d70a3f5521d7fa4), C(fb97b3fdc5891965), C(299d49bbbe3535af),
C(cbd5fac6)},
{C(6af98d7b656d0d7c), C(d2e99ae96d6b5c0c), C(f63bd1603ef80627),
C(76d0fec4)},
{C(395b7a8adb96ab75), C(582df7165b20f4a), C(e52bd30e9ff657f9), C(405e3402)},
{C(3822dd82c7df012f), C(b9029b40bd9f122b), C(fd25b988468266c4),
C(c732c481)},
{C(79f7efe4a80b951a), C(dd3a3fddfc6c9c41), C(ab4c812f9e27aa40),
C(a8d123c9)},
{C(ae6e59f5f055921a), C(e9d9b7bf68e82), C(5ce4e4a5b269cc59), C(1e80ad7d)},
{C(8959dbbf07387d36), C(b4658afce48ea35d), C(8f3f82437d8cb8d6),
C(52aeb863)},
{C(4739613234278a49), C(99ea5bcd340bf663), C(258640912e712b12),
C(ef7c0c18)},
{C(420e6c926bc54841), C(96dbbf6f4e7c75cd), C(d8d40fa70c3c67bb),
C(b6ad4b68)},
{C(c8601bab561bc1b7), C(72b26272a0ff869a), C(56fdfc986d6bc3c4),
C(c1e46b17)},
{C(b2d294931a0e20eb), C(284ffd9a0815bc38), C(1f8a103aac9bbe6), C(57b8df25)},
{C(7966f53c37b6c6d7), C(8e6abcfb3aa2b88f), C(7f2e5e0724e5f345),
C(e9fa36d6)},
{C(be9bb0abd03b7368), C(13bca93a3031be55), C(e864f4f52b55b472),
C(8f8daefc)},
{C(a08d128c5f1649be), C(a8166c3dbbe19aad), C(cb9f914f829ec62c), C(6e1bb7e)},
{C(7c386f0ffe0465ac), C(530419c9d843dbf3), C(7450e3a4f72b8d8c),
C(fd0076f0)},
{C(bb362094e7ef4f8), C(ff3c2a48966f9725), C(55152803acd4a7fe), C(899b17b6)},
{C(cd80dea24321eea4), C(52b4fdc8130c2b15), C(f3ea100b154bfb82),
C(e3e84e31)},
{C(d599a04125372c3a), C(313136c56a56f363), C(1e993c3677625832),
C(eef79b6b)},
{C(dbbf541e9dfda0a), C(1479fceb6db4f844), C(31ab576b59062534), C(868e3315)},
{C(c2ee3288be4fe2bf), C(c65d2f5ddf32b92), C(af6ecdf121ba5485), C(4639a426)},
{C(d86603ced1ed4730), C(f9de718aaada7709), C(db8b9755194c6535),
C(f3213646)},
{C(915263c671b28809), C(a815378e7ad762fd), C(abec6dc9b669f559),
C(17f148e9)},
{C(2b67cdd38c307a5e), C(cb1d45bb5c9fe1c), C(800baf2a02ec18ad), C(bfd94880)},
{C(2d107419073b9cd0), C(a96db0740cef8f54), C(ec41ee91b3ecdc1b),
C(bb1fa7f3)},
{C(f3e9487ec0e26dfc), C(1ab1f63224e837fa), C(119983bb5a8125d8), C(88816b1)},
{C(1160987c8fe86f7d), C(879e6db1481eb91b), C(d7dcb802bfe6885d),
C(5c2faeb3)},
{C(eab8112c560b967b), C(97f550b58e89dbae), C(846ed506d304051f),
C(51b5fc6f)},
{C(1addcf0386d35351), C(b5f436561f8f1484), C(85d38e22181c9bb1),
C(33d94752)},
{C(d445ba84bf803e09), C(1216c2497038f804), C(2293216ea2237207),
C(b0c92948)},
{C(37235a096a8be435), C(d9b73130493589c2), C(3b1024f59378d3be),
C(c7171590)},
{C(763ad6ea2fe1c99d), C(cf7af5368ac1e26b), C(4d5e451b3bb8d3d4),
C(240a67fb)},
{C(ea627fc84cd1b857), C(85e372494520071f), C(69ec61800845780b),
C(e1843cd5)},
{C(1f2ffd79f2cdc0c8), C(726a1bc31b337aaa), C(678b7f275ef96434),
C(fda1452b)},
{C(39a9e146ec4b3210), C(f63f75802a78b1ac), C(e2e22539c94741c3),
C(a2cad330)},
{C(74cba303e2dd9d6d), C(692699b83289fad1), C(dfb9aa7874678480),
C(53467e16)},
{C(4cbc2b73a43071e0), C(56c5db4c4ca4e0b7), C(1b275a162f46bd3d),
C(da14a8d0)},
{C(875638b9715d2221), C(d9ba0615c0c58740), C(616d4be2dfe825aa),
C(67333551)},
{C(fb686b2782994a8d), C(edee60693756bb48), C(e6bc3cae0ded2ef5),
C(a0ebd66e)},
{C(ab21d81a911e6723), C(4c31b07354852f59), C(835da384c9384744),
C(4b769593)},
{C(33d013cc0cd46ecf), C(3de726423aea122c), C(116af51117fe21a9),
C(6aa75624)},
{C(8ca92c7cd39fae5d), C(317e620e1bf20f1), C(4f0b33bf2194b97f), C(602a3f96)},
{C(fdde3b03f018f43e), C(38f932946c78660), C(c84084ce946851ee), C(cd183c4d)},
{C(9c8502050e9c9458), C(d6d2a1a69964beb9), C(1675766f480229b5),
C(960a4d07)},
{C(348176ca2fa2fdd2), C(3a89c514cc360c2d), C(9f90b8afb318d6d0),
C(9ae998c4)},
{C(4a3d3dfbbaea130b), C(4e221c920f61ed01), C(553fd6cd1304531f),
C(74e2179d)},
{C(b371f768cdf4edb9), C(bdef2ace6d2de0f0), C(e05b4100f7f1baec),
C(ee9bae25)},
{C(7a1d2e96934f61f), C(eb1760ae6af7d961), C(887eb0da063005df), C(b66edf10)},
{C(8be53d466d4728f2), C(86a5ac8e0d416640), C(984aa464cdb5c8bb),
C(d6209737)},
{C(829677eb03abf042), C(43cad004b6bc2c0), C(f2f224756803971a), C(b994a88)},
{C(754435bae3496fc), C(5707fc006f094dcf), C(8951c86ab19d8e40), C(a05d43c0)},
{C(fda9877ea8e3805f), C(31e868b6ffd521b7), C(b08c90681fb6a0fd),
C(c79f73a8)},
{C(2e36f523ca8f5eb5), C(8b22932f89b27513), C(331cd6ecbfadc1bb),
C(a490aff5)},
{C(21a378ef76828208), C(a5c13037fa841da2), C(506d22a53fbe9812),
C(dfad65b4)},
{C(ccdd5600054b16ca), C(f78846e84204cb7b), C(1f9faec82c24eac9), C(1d07dfb)},
{C(7854468f4e0cabd0), C(3a3f6b4f098d0692), C(ae2423ec7799d30d),
C(416df9a0)},
{C(7f88db5346d8f997), C(88eac9aacc653798), C(68a4d0295f8eefa1),
C(1f8fb9cc)},
{C(bb3fb5fb01d60fcf), C(1b7cc0847a215eb6), C(1246c994437990a1),
C(7abf48e3)},
{C(2e783e1761acd84d), C(39158042bac975a0), C(1cd21c5a8071188d),
C(dea4e3dd)},
{C(392058251cf22acc), C(944ec4475ead4620), C(b330a10b5cb94166),
C(c6064f22)},
{C(adf5c1e5d6419947), C(2a9747bc659d28aa), C(95c5b8cb1f5d62c), C(743bed9c)},
{C(6bc1db2c2bee5aba), C(e63b0ed635307398), C(7b2eca111f30dbbc),
C(fce254d5)},
{C(b00f898229efa508), C(83b7590ad7f6985c), C(2780e70a0592e41d),
C(e47ec9d1)},
{C(b56eb769ce0d9a8c), C(ce196117bfbcaf04), C(b26c3c3797d66165),
C(334a145c)},
{C(70c0637675b94150), C(259e1669305b0a15), C(46e1dd9fd387a58d),
C(adec1e3c)},
{C(74c0b8a6821faafe), C(abac39d7491370e7), C(faf0b2a48a4e6aed),
C(f6a9fbf8)},
{C(5fb5e48ac7b7fa4f), C(a96170f08f5acbc7), C(bbf5c63d4f52a1e5),
C(5398210c)},
};
void TestUnchanging(const uint64_t* expected, int offset, int len) {
EXPECT_EQ(expected[0], CityHash64(data + offset, len));
EXPECT_EQ(expected[3], CityHash32(data + offset, len));
EXPECT_EQ(expected[1], CityHash64WithSeed(data + offset, len, kSeed0));
EXPECT_EQ(expected[2],
CityHash64WithSeeds(data + offset, len, kSeed0, kSeed1));
}
TEST(CityHashTest, Unchanging) {
setup();
int i = 0;
for (; i < kTestSize - 1; i++) {
TestUnchanging(testdata[i], i * i, i);
}
TestUnchanging(testdata[i], 0, kDataSize);
}
}
}
ABSL_NAMESPACE_END
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/hash/internal/city.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/hash/internal/city_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
e8e70257-c930-4276-aa31-a78ba89ae267 | cpp | tensorflow/tensorflow | profiler_client | third_party/xla/xla/tsl/profiler/rpc/client/profiler_client.cc | third_party/xla/xla/tsl/profiler/rpc/client/profiler_client_test.cc | #include "xla/tsl/profiler/rpc/client/profiler_client.h"
#include <limits>
#include <memory>
#include "absl/memory/memory.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "grpcpp/grpcpp.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
#include "tsl/platform/types.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tsl {
namespace profiler {
namespace {
using tensorflow::MonitorRequest;
using tensorflow::MonitorResponse;
using tensorflow::NewProfileSessionRequest;
using tensorflow::NewProfileSessionResponse;
using tensorflow::ProfileRequest;
using tensorflow::ProfileResponse;
inline absl::Status FromGrpcStatus(const ::grpc::Status& s) {
return s.ok() ? absl::OkStatus()
: absl::Status(static_cast<absl::StatusCode>(s.error_code()),
s.error_message());
}
template <typename T>
std::unique_ptr<typename T::Stub> CreateStub(
const std::string& service_address) {
::grpc::ChannelArguments channel_args;
channel_args.SetMaxReceiveMessageSize(std::numeric_limits<int32>::max());
auto channel = ::grpc::CreateCustomChannel(
service_address, ::grpc::InsecureChannelCredentials(), channel_args);
if (!channel) {
LOG(ERROR) << "Unable to create channel" << service_address;
return nullptr;
}
return T::NewStub(channel);
}
}
absl::Status ProfileGrpc(const std::string& service_address,
const ProfileRequest& request,
ProfileResponse* response) {
::grpc::ClientContext context;
std::unique_ptr<tensorflow::grpc::ProfilerService::Stub> stub =
CreateStub<tensorflow::grpc::ProfilerService>(service_address);
TF_RETURN_IF_ERROR(
FromGrpcStatus(stub->Profile(&context, request, response)));
return absl::OkStatus();
}
absl::Status NewSessionGrpc(const std::string& service_address,
const NewProfileSessionRequest& request,
NewProfileSessionResponse* response) {
::grpc::ClientContext context;
std::unique_ptr<tensorflow::grpc::ProfileAnalysis::Stub> stub =
CreateStub<tensorflow::grpc::ProfileAnalysis>(service_address);
TF_RETURN_IF_ERROR(
FromGrpcStatus(stub->NewSession(&context, request, response)));
return absl::OkStatus();
}
absl::Status MonitorGrpc(const std::string& service_address,
const MonitorRequest& request,
MonitorResponse* response) {
::grpc::ClientContext context;
std::unique_ptr<tensorflow::grpc::ProfilerService::Stub> stub =
CreateStub<tensorflow::grpc::ProfilerService>(service_address);
TF_RETURN_IF_ERROR(
FromGrpcStatus(stub->Monitor(&context, request, response)));
return absl::OkStatus();
}
std::unique_ptr<RemoteProfilerSession> RemoteProfilerSession::Create(
const std::string& service_address, absl::Time deadline,
const ProfileRequest& profile_request) {
auto instance = absl::WrapUnique(
new RemoteProfilerSession(service_address, deadline, profile_request));
instance->ProfileAsync();
return instance;
}
RemoteProfilerSession::RemoteProfilerSession(
const std::string& service_address, absl::Time deadline,
const ProfileRequest& profile_request)
: response_(absl::make_unique<ProfileResponse>()),
service_address_(service_address),
stub_(CreateStub<tensorflow::grpc::ProfilerService>(service_address_)),
deadline_(deadline),
profile_request_(profile_request) {
response_->set_empty_trace(true);
}
RemoteProfilerSession::~RemoteProfilerSession() {
absl::Status dummy;
WaitForCompletion(dummy);
grpc_context_.TryCancel();
}
void RemoteProfilerSession::ProfileAsync() {
LOG(INFO) << "Asynchronous gRPC Profile() to " << service_address_;
grpc_context_.set_deadline(absl::ToChronoTime(deadline_));
VLOG(1) << "Deadline set to " << deadline_;
rpc_ = stub_->AsyncProfile(&grpc_context_, profile_request_, &cq_);
rpc_->Finish(response_.get(), &grpc_status_,
static_cast<void*>(&status_on_completion_));
VLOG(2) << "Asynchronous gRPC Profile() issued." << absl::Now();
}
std::unique_ptr<ProfileResponse> RemoteProfilerSession::WaitForCompletion(
absl::Status& out_status) {
if (!response_) {
out_status = errors::FailedPrecondition(
"WaitForCompletion must only be called once.");
return nullptr;
}
LOG(INFO) << "Waiting for completion.";
void* got_tag = nullptr;
bool ok = false;
bool success = cq_.Next(&got_tag, &ok);
if (!success || !ok || got_tag == nullptr) {
out_status =
errors::Internal("Missing or invalid event from completion queue.");
return nullptr;
}
VLOG(1) << "Writing out status.";
DCHECK_EQ(got_tag, &status_on_completion_);
status_on_completion_.Update(FromGrpcStatus(grpc_status_));
if (status_on_completion_.code() == error::DEADLINE_EXCEEDED) {
LOG(WARNING) << status_on_completion_;
} else if (!status_on_completion_.ok()) {
LOG(ERROR) << status_on_completion_;
}
out_status = status_on_completion_;
return std::move(response_);
}
}
} | #include "xla/tsl/profiler/rpc/client/profiler_client.h"
#include <memory>
#include <string>
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "xla/tsl/profiler/rpc/client/profiler_client_test_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
#include "tsl/platform/test.h"
#include "tsl/platform/types.h"
#include "tsl/profiler/protobuf/profiler_service.pb.h"
namespace tsl {
namespace profiler {
namespace {
using tensorflow::ProfileRequest;
using ::tsl::profiler::test::DurationApproxLess;
using ::tsl::profiler::test::DurationNear;
using ::tsl::profiler::test::StartServer;
TEST(RemoteProfilerSession, Simple) {
absl::Duration duration = absl::Milliseconds(10);
ProfileRequest request;
std::string service_addr;
auto server = StartServer(duration, &service_addr, &request);
absl::Duration grace = absl::Seconds(1);
absl::Duration max_duration = duration + grace;
absl::Time approx_start = absl::Now();
absl::Time deadline = approx_start + max_duration;
auto remote_session =
RemoteProfilerSession::Create(service_addr, deadline, request);
absl::Status status;
auto response = remote_session->WaitForCompletion(status);
absl::Duration elapsed = absl::Now() - approx_start;
EXPECT_TRUE(status.ok());
EXPECT_TRUE(response->empty_trace());
EXPECT_EQ(response->tool_data_size(), 0);
EXPECT_THAT(elapsed, DurationApproxLess(max_duration));
}
TEST(RemoteProfilerSession, WaitNotCalled) {
absl::Duration duration = absl::Milliseconds(10);
ProfileRequest request;
std::string service_addr;
auto server = StartServer(duration, &service_addr, &request);
absl::Duration grace = absl::Seconds(1);
absl::Duration max_duration = duration + grace;
absl::Time approx_start = absl::Now();
absl::Time deadline = approx_start + max_duration;
auto remote_session =
RemoteProfilerSession::Create(service_addr, deadline, request);
absl::Duration elapsed = absl::Now() - approx_start;
EXPECT_THAT(elapsed, DurationApproxLess(max_duration));
}
TEST(RemoteProfilerSession, Timeout) {
absl::Duration duration = absl::Milliseconds(10);
ProfileRequest request;
std::string service_addr;
auto server = StartServer(duration, &service_addr, &request);
auto remote_session =
RemoteProfilerSession::Create(service_addr, absl::Now(), request);
absl::Status status;
auto response = remote_session->WaitForCompletion(status);
EXPECT_TRUE(errors::IsDeadlineExceeded(status));
EXPECT_TRUE(response->empty_trace());
EXPECT_EQ(response->tool_data_size(), 0);
}
TEST(RemoteProfilerSession, LongDeadline) {
absl::Duration duration = absl::Milliseconds(10);
ProfileRequest request;
std::string service_addr;
auto server = StartServer(duration, &service_addr, &request);
absl::Time approx_start = absl::Now();
absl::Duration grace = absl::Seconds(1000);
absl::Duration max_duration = duration + grace;
const absl::Time deadline = approx_start + max_duration;
auto remote_session =
RemoteProfilerSession::Create(service_addr, deadline, request);
absl::Status status;
auto response = remote_session->WaitForCompletion(status);
absl::Duration elapsed = absl::Now() - approx_start;
EXPECT_TRUE(status.ok());
EXPECT_TRUE(response->empty_trace());
EXPECT_EQ(response->tool_data_size(), 0);
EXPECT_THAT(elapsed, DurationNear(duration));
}
TEST(RemoteProfilerSession, LongDuration) {
absl::Duration duration = absl::Seconds(3);
ProfileRequest request;
std::string service_addr;
auto server = StartServer(duration, &service_addr, &request);
absl::Time approx_start = absl::Now();
absl::Duration grace = absl::Seconds(1);
absl::Duration max_duration = duration + grace;
const absl::Time deadline = approx_start + max_duration;
auto remote_session =
RemoteProfilerSession::Create(service_addr, deadline, request);
absl::Status status;
auto response = remote_session->WaitForCompletion(status);
absl::Duration elapsed = absl::Now() - approx_start;
EXPECT_TRUE(status.ok());
EXPECT_TRUE(response->empty_trace());
EXPECT_EQ(response->tool_data_size(), 0);
EXPECT_THAT(elapsed, DurationApproxLess(max_duration));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/rpc/client/profiler_client.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/rpc/client/profiler_client_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6a135d5f-6744-4874-af6f-dff8063fac3a | cpp | google/quiche | quic_crypto_client_handshaker | quiche/quic/core/quic_crypto_client_handshaker.cc | quiche/quic/core/quic_crypto_client_handshaker_test.cc | #include "quiche/quic/core/quic_crypto_client_handshaker.h"
#include <memory>
#include <string>
#include <utility>
#include "absl/strings/str_cat.h"
#include "quiche/quic/core/crypto/crypto_protocol.h"
#include "quiche/quic/core/crypto/crypto_utils.h"
#include "quiche/quic/core/quic_session.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/platform/api/quic_client_stats.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_logging.h"
#include "quiche/common/platform/api/quiche_logging.h"
namespace quic {
QuicCryptoClientHandshaker::ProofVerifierCallbackImpl::
ProofVerifierCallbackImpl(QuicCryptoClientHandshaker* parent)
: parent_(parent) {}
QuicCryptoClientHandshaker::ProofVerifierCallbackImpl::
~ProofVerifierCallbackImpl() {}
void QuicCryptoClientHandshaker::ProofVerifierCallbackImpl::Run(
bool ok, const std::string& error_details,
std::unique_ptr<ProofVerifyDetails>* details) {
if (parent_ == nullptr) {
return;
}
parent_->verify_ok_ = ok;
parent_->verify_error_details_ = error_details;
parent_->verify_details_ = std::move(*details);
parent_->proof_verify_callback_ = nullptr;
parent_->DoHandshakeLoop(nullptr);
}
void QuicCryptoClientHandshaker::ProofVerifierCallbackImpl::Cancel() {
parent_ = nullptr;
}
QuicCryptoClientHandshaker::QuicCryptoClientHandshaker(
const QuicServerId& server_id, QuicCryptoClientStream* stream,
QuicSession* session, std::unique_ptr<ProofVerifyContext> verify_context,
QuicCryptoClientConfig* crypto_config,
QuicCryptoClientStream::ProofHandler* proof_handler)
: QuicCryptoHandshaker(stream, session),
stream_(stream),
session_(session),
delegate_(session),
next_state_(STATE_IDLE),
num_client_hellos_(0),
crypto_config_(crypto_config),
server_id_(server_id),
generation_counter_(0),
verify_context_(std::move(verify_context)),
proof_verify_callback_(nullptr),
proof_handler_(proof_handler),
verify_ok_(false),
proof_verify_start_time_(QuicTime::Zero()),
num_scup_messages_received_(0),
encryption_established_(false),
one_rtt_keys_available_(false),
crypto_negotiated_params_(new QuicCryptoNegotiatedParameters) {}
QuicCryptoClientHandshaker::~QuicCryptoClientHandshaker() {
if (proof_verify_callback_) {
proof_verify_callback_->Cancel();
}
}
void QuicCryptoClientHandshaker::OnHandshakeMessage(
const CryptoHandshakeMessage& message) {
QuicCryptoHandshaker::OnHandshakeMessage(message);
if (message.tag() == kSCUP) {
if (!one_rtt_keys_available()) {
stream_->OnUnrecoverableError(
QUIC_CRYPTO_UPDATE_BEFORE_HANDSHAKE_COMPLETE,
"Early SCUP disallowed");
return;
}
HandleServerConfigUpdateMessage(message);
num_scup_messages_received_++;
return;
}
if (one_rtt_keys_available()) {
stream_->OnUnrecoverableError(QUIC_CRYPTO_MESSAGE_AFTER_HANDSHAKE_COMPLETE,
"Unexpected handshake message");
return;
}
DoHandshakeLoop(&message);
}
bool QuicCryptoClientHandshaker::CryptoConnect() {
next_state_ = STATE_INITIALIZE;
DoHandshakeLoop(nullptr);
return session()->connection()->connected();
}
int QuicCryptoClientHandshaker::num_sent_client_hellos() const {
return num_client_hellos_;
}
bool QuicCryptoClientHandshaker::ResumptionAttempted() const {
QUICHE_DCHECK(false);
return false;
}
bool QuicCryptoClientHandshaker::IsResumption() const {
QUIC_BUG_IF(quic_bug_12522_1, !one_rtt_keys_available_);
return false;
}
bool QuicCryptoClientHandshaker::EarlyDataAccepted() const {
QUIC_BUG_IF(quic_bug_12522_2, !one_rtt_keys_available_);
return num_client_hellos_ == 1;
}
ssl_early_data_reason_t QuicCryptoClientHandshaker::EarlyDataReason() const {
return early_data_reason_;
}
bool QuicCryptoClientHandshaker::ReceivedInchoateReject() const {
QUIC_BUG_IF(quic_bug_12522_3, !one_rtt_keys_available_);
return num_client_hellos_ >= 3;
}
int QuicCryptoClientHandshaker::num_scup_messages_received() const {
return num_scup_messages_received_;
}
std::string QuicCryptoClientHandshaker::chlo_hash() const { return chlo_hash_; }
bool QuicCryptoClientHandshaker::encryption_established() const {
return encryption_established_;
}
bool QuicCryptoClientHandshaker::IsCryptoFrameExpectedForEncryptionLevel(
EncryptionLevel ) const {
return true;
}
EncryptionLevel
QuicCryptoClientHandshaker::GetEncryptionLevelToSendCryptoDataOfSpace(
PacketNumberSpace space) const {
if (space == INITIAL_DATA) {
return ENCRYPTION_INITIAL;
}
QUICHE_DCHECK(false);
return NUM_ENCRYPTION_LEVELS;
}
bool QuicCryptoClientHandshaker::one_rtt_keys_available() const {
return one_rtt_keys_available_;
}
const QuicCryptoNegotiatedParameters&
QuicCryptoClientHandshaker::crypto_negotiated_params() const {
return *crypto_negotiated_params_;
}
CryptoMessageParser* QuicCryptoClientHandshaker::crypto_message_parser() {
return QuicCryptoHandshaker::crypto_message_parser();
}
HandshakeState QuicCryptoClientHandshaker::GetHandshakeState() const {
return one_rtt_keys_available() ? HANDSHAKE_COMPLETE : HANDSHAKE_START;
}
void QuicCryptoClientHandshaker::OnHandshakeDoneReceived() {
QUICHE_DCHECK(false);
}
void QuicCryptoClientHandshaker::OnNewTokenReceived(
absl::string_view ) {
QUICHE_DCHECK(false);
}
size_t QuicCryptoClientHandshaker::BufferSizeLimitForLevel(
EncryptionLevel level) const {
return QuicCryptoHandshaker::BufferSizeLimitForLevel(level);
}
std::unique_ptr<QuicDecrypter>
QuicCryptoClientHandshaker::AdvanceKeysAndCreateCurrentOneRttDecrypter() {
QUICHE_DCHECK(false);
return nullptr;
}
std::unique_ptr<QuicEncrypter>
QuicCryptoClientHandshaker::CreateCurrentOneRttEncrypter() {
QUICHE_DCHECK(false);
return nullptr;
}
void QuicCryptoClientHandshaker::OnConnectionClosed(
QuicErrorCode , ConnectionCloseSource ) {
next_state_ = STATE_CONNECTION_CLOSED;
}
void QuicCryptoClientHandshaker::HandleServerConfigUpdateMessage(
const CryptoHandshakeMessage& server_config_update) {
QUICHE_DCHECK(server_config_update.tag() == kSCUP);
std::string error_details;
QuicCryptoClientConfig::CachedState* cached =
crypto_config_->LookupOrCreate(server_id_);
QuicErrorCode error = crypto_config_->ProcessServerConfigUpdate(
server_config_update, session()->connection()->clock()->WallNow(),
session()->transport_version(), chlo_hash_, cached,
crypto_negotiated_params_, &error_details);
if (error != QUIC_NO_ERROR) {
stream_->OnUnrecoverableError(
error, "Server config update invalid: " + error_details);
return;
}
QUICHE_DCHECK(one_rtt_keys_available());
if (proof_verify_callback_) {
proof_verify_callback_->Cancel();
}
next_state_ = STATE_INITIALIZE_SCUP;
DoHandshakeLoop(nullptr);
}
void QuicCryptoClientHandshaker::DoHandshakeLoop(
const CryptoHandshakeMessage* in) {
QuicCryptoClientConfig::CachedState* cached =
crypto_config_->LookupOrCreate(server_id_);
QuicAsyncStatus rv = QUIC_SUCCESS;
do {
QUICHE_CHECK_NE(STATE_NONE, next_state_);
const State state = next_state_;
next_state_ = STATE_IDLE;
rv = QUIC_SUCCESS;
switch (state) {
case STATE_INITIALIZE:
DoInitialize(cached);
break;
case STATE_SEND_CHLO:
DoSendCHLO(cached);
return;
case STATE_RECV_REJ:
DoReceiveREJ(in, cached);
break;
case STATE_VERIFY_PROOF:
rv = DoVerifyProof(cached);
break;
case STATE_VERIFY_PROOF_COMPLETE:
DoVerifyProofComplete(cached);
break;
case STATE_RECV_SHLO:
DoReceiveSHLO(in, cached);
break;
case STATE_IDLE:
stream_->OnUnrecoverableError(QUIC_INVALID_CRYPTO_MESSAGE_TYPE,
"Handshake in idle state");
return;
case STATE_INITIALIZE_SCUP:
DoInitializeServerConfigUpdate(cached);
break;
case STATE_NONE:
QUICHE_NOTREACHED();
return;
case STATE_CONNECTION_CLOSED:
rv = QUIC_FAILURE;
return;
}
} while (rv != QUIC_PENDING && next_state_ != STATE_NONE);
}
void QuicCryptoClientHandshaker::DoInitialize(
QuicCryptoClientConfig::CachedState* cached) {
if (!cached->IsEmpty() && !cached->signature().empty()) {
QUICHE_DCHECK(crypto_config_->proof_verifier());
proof_verify_start_time_ = session()->connection()->clock()->Now();
chlo_hash_ = cached->chlo_hash();
next_state_ = STATE_VERIFY_PROOF;
} else {
next_state_ = STATE_SEND_CHLO;
}
}
void QuicCryptoClientHandshaker::DoSendCHLO(
QuicCryptoClientConfig::CachedState* cached) {
session()->connection()->SetDefaultEncryptionLevel(ENCRYPTION_INITIAL);
encryption_established_ = false;
if (num_client_hellos_ >= QuicCryptoClientStream::kMaxClientHellos) {
stream_->OnUnrecoverableError(
QUIC_CRYPTO_TOO_MANY_REJECTS,
absl::StrCat("More than ", QuicCryptoClientStream::kMaxClientHellos,
" rejects"));
return;
}
num_client_hellos_++;
CryptoHandshakeMessage out;
QUICHE_DCHECK(session() != nullptr);
QUICHE_DCHECK(session()->config() != nullptr);
session()->config()->ToHandshakeMessage(&out, session()->transport_version());
bool fill_inchoate_client_hello = false;
if (!cached->IsComplete(session()->connection()->clock()->WallNow())) {
early_data_reason_ = ssl_early_data_no_session_offered;
fill_inchoate_client_hello = true;
} else if (session()->config()->HasClientRequestedIndependentOption(
kQNZ2, session()->perspective()) &&
num_client_hellos_ == 1) {
early_data_reason_ = ssl_early_data_disabled;
fill_inchoate_client_hello = true;
}
if (fill_inchoate_client_hello) {
crypto_config_->FillInchoateClientHello(
server_id_, session()->supported_versions().front(), cached,
session()->connection()->random_generator(),
true, crypto_negotiated_params_, &out);
const QuicByteCount kFramingOverhead = 50;
const QuicByteCount max_packet_size =
session()->connection()->max_packet_length();
if (max_packet_size <= kFramingOverhead) {
QUIC_DLOG(DFATAL) << "max_packet_length (" << max_packet_size
<< ") has no room for framing overhead.";
stream_->OnUnrecoverableError(QUIC_INTERNAL_ERROR,
"max_packet_size too smalll");
return;
}
if (kClientHelloMinimumSize > max_packet_size - kFramingOverhead) {
QUIC_DLOG(DFATAL) << "Client hello won't fit in a single packet.";
stream_->OnUnrecoverableError(QUIC_INTERNAL_ERROR, "CHLO too large");
return;
}
next_state_ = STATE_RECV_REJ;
chlo_hash_ = CryptoUtils::HashHandshakeMessage(out, Perspective::IS_CLIENT);
session()->connection()->set_fully_pad_crypto_handshake_packets(
crypto_config_->pad_inchoate_hello());
SendHandshakeMessage(out, ENCRYPTION_INITIAL);
return;
}
std::string error_details;
QuicErrorCode error = crypto_config_->FillClientHello(
server_id_, session()->connection()->connection_id(),
session()->supported_versions().front(),
session()->connection()->version(), cached,
session()->connection()->clock()->WallNow(),
session()->connection()->random_generator(), crypto_negotiated_params_,
&out, &error_details);
if (error != QUIC_NO_ERROR) {
cached->InvalidateServerConfig();
stream_->OnUnrecoverableError(error, error_details);
return;
}
chlo_hash_ = CryptoUtils::HashHandshakeMessage(out, Perspective::IS_CLIENT);
if (cached->proof_verify_details()) {
proof_handler_->OnProofVerifyDetailsAvailable(
*cached->proof_verify_details());
}
next_state_ = STATE_RECV_SHLO;
session()->connection()->set_fully_pad_crypto_handshake_packets(
crypto_config_->pad_full_hello());
SendHandshakeMessage(out, ENCRYPTION_INITIAL);
delegate_->OnNewEncryptionKeyAvailable(
ENCRYPTION_ZERO_RTT,
std::move(crypto_negotiated_params_->initial_crypters.encrypter));
delegate_->OnNewDecryptionKeyAvailable(
ENCRYPTION_ZERO_RTT,
std::move(crypto_negotiated_params_->initial_crypters.decrypter),
true,
true);
encryption_established_ = true;
delegate_->SetDefaultEncryptionLevel(ENCRYPTION_ZERO_RTT);
if (early_data_reason_ == ssl_early_data_unknown && num_client_hellos_ > 1) {
early_data_reason_ = ssl_early_data_peer_declined;
}
}
void QuicCryptoClientHandshaker::DoReceiveREJ(
const CryptoHandshakeMessage* in,
QuicCryptoClientConfig::CachedState* cached) {
if (in->tag() != kREJ) {
next_state_ = STATE_NONE;
stream_->OnUnrecoverableError(QUIC_INVALID_CRYPTO_MESSAGE_TYPE,
"Expected REJ");
return;
}
QuicTagVector reject_reasons;
static_assert(sizeof(QuicTag) == sizeof(uint32_t), "header out of sync");
if (in->GetTaglist(kRREJ, &reject_reasons) == QUIC_NO_ERROR) {
uint32_t packed_error = 0;
for (size_t i = 0; i < reject_reasons.size(); ++i) {
if (reject_reasons[i] == HANDSHAKE_OK || reject_reasons[i] >= 32) {
continue;
}
HandshakeFailureReason reason =
static_cast<HandshakeFailureReason>(reject_reasons[i]);
packed_error |= 1 << (reason - 1);
}
QUIC_DVLOG(1) << "Reasons for rejection: " << packed_error;
}
delegate_->NeuterUnencryptedData();
std::string error_details;
QuicErrorCode error = crypto_config_->ProcessRejection(
*in, session()->connection()->clock()->WallNow(),
session()->transport_version(), chlo_hash_, cached,
crypto_negotiated_params_, &error_details);
if (error != QUIC_NO_ERROR) {
next_state_ = STATE_NONE;
stream_->OnUnrecoverableError(error, error_details);
return;
}
if (!cached->proof_valid()) {
if (!cached->signature().empty()) {
next_state_ = STATE_VERIFY_PROOF;
return;
}
}
next_state_ = STATE_SEND_CHLO;
}
QuicAsyncStatus QuicCryptoClientHandshaker::DoVerifyProof(
QuicCryptoClientConfig::CachedState* cached) {
ProofVerifier* verifier = crypto_config_->proof_verifier();
QUICHE_DCHECK(verifier);
next_state_ = STATE_VERIFY_PROOF_COMPLETE;
generation_counter_ = cached->generation_counter();
ProofVerifierCallbackImpl* proof_verify_callback =
new ProofVerifierCallbackImpl(this);
verify_ok_ = false;
QuicAsyncStatus status = verifier->VerifyProof(
server_id_.host(), server_id_.port(), cached->server_config(),
session()->transport_version(), chlo_hash_, cached->certs(),
cached->cert_sct(), cached->signature(), verify_context_.get(),
&verify_error_details_, &verify_details_,
std::unique_ptr<ProofVerifierCallback>(proof_verify_callback));
switch (status) {
case QUIC_PENDING:
proof_verify_callback_ = proof_verify_callback;
QUIC_DVLOG(1) << "Doing VerifyProof";
break;
case QUIC_FAILURE:
break;
case QUIC_SUCCESS:
verify_ok_ = true;
break;
}
return status;
}
void QuicCryptoClientHandshaker::DoVerifyProofComplete(
QuicCryptoClientConfig::CachedState* cached) {
if (proof_verify_start_time_.IsInitialized()) {
QUIC_CLIENT_HISTOGRAM_TIMES(
"QuicSession.VerifyProofTime.CachedServerConfig",
(session()->connection()->clock()->Now() - proof_verify_start_time_),
QuicTime::Delta::FromMilliseconds(1), QuicTime::Delta::FromSeconds(10),
50, "");
}
if (!verify_ok_) {
if (verify_details_) {
proof_handler_->OnProofVerifyDetailsAvailable(*verify_details_);
}
if (num_client_hellos_ == 0) {
cached->Clear();
next_state_ = STATE_INITIALIZE;
return;
}
next_state_ = STATE_NONE;
QUIC_CLIENT_HISTOGRAM_BOOL("QuicVerifyProofFailed.HandshakeConfirmed",
one_rtt_keys_available(), "");
stream_->OnUnrecoverableError(QUIC_PROOF_INVALID,
"Proof invalid: " + verify_error_details_);
return;
}
if (generation_counter_ != cached->generation_counter()) {
next_state_ = STATE_VERIFY_PROOF;
} else {
SetCachedProofValid(cached);
cached->SetProofVerifyDetails(verify_details_.release());
if (!one_rtt_keys_available()) {
next_state_ = STATE_SEND_CHLO;
} else {
next_state_ = STATE_NONE;
}
}
}
void QuicCryptoClientHandshaker::DoReceiveSHLO(
const CryptoHandshakeMessage* in,
QuicCryptoClientConfig::CachedState* cached) {
next_state_ = STATE_NONE;
if (in->tag() == kREJ) {
if (session()->connection()->last_decrypted_level() != ENCRYPTION_INITIAL) {
stream_->OnUnrecoverableError(QUIC_CRYPTO_ENCRYPTION_LEVEL_INCORRECT,
"encrypted REJ message");
return;
}
next_state_ = STATE_RECV_REJ;
return;
}
if (in->tag() != kSHLO) {
stream_->OnUnrecoverableError(
QUIC_INVALID_CRYPTO_MESSAGE_TYPE,
absl::StrCat("Expected SHLO or REJ. Received: ",
QuicTagToString(in->tag())));
return;
}
if (session()->connection()->last_decrypted_level() == ENCRYPTION_INITIAL) {
stream_->OnUnrecoverableError(QUIC_CRYPTO_ENCRYPTION_LEVEL_INCORRECT,
"unencrypted SHLO message");
return;
}
if (num_client_hellos_ == 1) {
early_data_reason_ = ssl_early_data_accepted;
}
std::string error_details;
QuicErrorCode error = crypto_config_->ProcessServerHello(
*in, session()->connection()->connection_id(),
session()->connection()->version(),
session()->connection()->server_supported_versions(), cached,
crypto_negotiated_params_, &error_details);
if (error != QUIC_NO_ERROR) {
stream_->OnUnrecoverableError(error,
"Server hello invalid: " + error_details);
return;
}
error = session()->config()->ProcessPeerHello(*in, SERVER, &error_details);
if (error != QUIC_NO_ERROR) {
stream_->OnUnrecoverableError(error,
"Server hello invalid: " + error_details);
return;
}
session()->OnConfigNegotiated();
CrypterPair* crypters = &crypto_negotiated_params_->forward_secure_crypters;
delegate_->OnNewEncryptionKeyAvailable(ENCRYPTION_FORWARD_SECURE,
std::move(crypters->encrypter));
delegate_->OnNewDecryptionKeyAvailable(ENCRYPTION_FORWARD_SECURE,
std::move(crypters->decrypter),
true,
false);
one_rtt_keys_available_ = true;
delegate_->SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
delegate_->DiscardOldEncryptionKey(ENCRYPTION_INITIAL);
delegate_->NeuterHandshakeData();
}
void QuicCryptoClientHandshaker::DoInitializeServerConfigUpdate(
QuicCryptoClientConfig::CachedState* cached) {
bool update_ignored = false;
if (!cached->IsEmpty() && !cached->signature().empty()) {
QUICHE_DCHECK(crypto_config_->proof_verifier());
next_state_ = STATE_VERIFY_PROOF;
} else {
update_ignored = true;
next_state_ = STATE_NONE;
}
QUIC_CLIENT_HISTOGRAM_COUNTS("QuicNumServerConfig.UpdateMessagesIgnored",
update_ignored, 1, 1000000, 50, "");
}
void QuicCryptoClientHandshaker::SetCachedProofValid(
QuicCryptoClientConfig::CachedState* cached) {
cached->SetProofValid();
proof_handler_->OnProofValid(*cached);
}
} | #include "quiche/quic/core/quic_crypto_client_handshaker.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/string_view.h"
#include "quiche/quic/core/proto/crypto_server_config_proto.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
namespace quic::test {
namespace {
class TestProofHandler : public QuicCryptoClientStream::ProofHandler {
public:
~TestProofHandler() override {}
void OnProofValid(
const QuicCryptoClientConfig::CachedState& ) override {}
void OnProofVerifyDetailsAvailable(
const ProofVerifyDetails& ) override {}
};
class InsecureProofVerifier : public ProofVerifier {
public:
InsecureProofVerifier() {}
~InsecureProofVerifier() override {}
QuicAsyncStatus VerifyProof(
const std::string& , const uint16_t ,
const std::string& ,
QuicTransportVersion ,
absl::string_view ,
const std::vector<std::string>& ,
const std::string& , const std::string& ,
const ProofVerifyContext* , std::string* ,
std::unique_ptr<ProofVerifyDetails>* ,
std::unique_ptr<ProofVerifierCallback> ) override {
return QUIC_SUCCESS;
}
QuicAsyncStatus VerifyCertChain(
const std::string& , const uint16_t ,
const std::vector<std::string>& ,
const std::string& , const std::string& ,
const ProofVerifyContext* , std::string* ,
std::unique_ptr<ProofVerifyDetails>* , uint8_t* ,
std::unique_ptr<ProofVerifierCallback> ) override {
return QUIC_SUCCESS;
}
std::unique_ptr<ProofVerifyContext> CreateDefaultContext() override {
return nullptr;
}
};
class DummyProofSource : public ProofSource {
public:
DummyProofSource() {}
~DummyProofSource() override {}
void GetProof(const QuicSocketAddress& server_address,
const QuicSocketAddress& client_address,
const std::string& hostname,
const std::string& ,
QuicTransportVersion ,
absl::string_view ,
std::unique_ptr<Callback> callback) override {
bool cert_matched_sni;
quiche::QuicheReferenceCountedPointer<ProofSource::Chain> chain =
GetCertChain(server_address, client_address, hostname,
&cert_matched_sni);
QuicCryptoProof proof;
proof.signature = "Dummy signature";
proof.leaf_cert_scts = "Dummy timestamp";
proof.cert_matched_sni = cert_matched_sni;
callback->Run(true, chain, proof, nullptr);
}
quiche::QuicheReferenceCountedPointer<Chain> GetCertChain(
const QuicSocketAddress& ,
const QuicSocketAddress& ,
const std::string& , bool* ) override {
std::vector<std::string> certs;
certs.push_back("Dummy cert");
return quiche::QuicheReferenceCountedPointer<ProofSource::Chain>(
new ProofSource::Chain(certs));
}
void ComputeTlsSignature(
const QuicSocketAddress& ,
const QuicSocketAddress& ,
const std::string& , uint16_t ,
absl::string_view ,
std::unique_ptr<SignatureCallback> callback) override {
callback->Run(true, "Dummy signature", nullptr);
}
absl::InlinedVector<uint16_t, 8> SupportedTlsSignatureAlgorithms()
const override {
return {};
}
TicketCrypter* GetTicketCrypter() override { return nullptr; }
};
class Handshaker : public QuicCryptoClientHandshaker {
public:
Handshaker(const QuicServerId& server_id, QuicCryptoClientStream* stream,
QuicSession* session,
std::unique_ptr<ProofVerifyContext> verify_context,
QuicCryptoClientConfig* crypto_config,
QuicCryptoClientStream::ProofHandler* proof_handler)
: QuicCryptoClientHandshaker(server_id, stream, session,
std::move(verify_context), crypto_config,
proof_handler) {}
void DoSendCHLOTest(QuicCryptoClientConfig::CachedState* cached) {
QuicCryptoClientHandshaker::DoSendCHLO(cached);
}
};
class QuicCryptoClientHandshakerTest
: public QuicTestWithParam<ParsedQuicVersion> {
protected:
QuicCryptoClientHandshakerTest()
: version_(GetParam()),
proof_handler_(),
helper_(),
alarm_factory_(),
server_id_("host", 123),
connection_(new test::MockQuicConnection(
&helper_, &alarm_factory_, Perspective::IS_CLIENT, {version_})),
session_(connection_, false),
crypto_client_config_(std::make_unique<InsecureProofVerifier>()),
client_stream_(
new QuicCryptoClientStream(server_id_, &session_, nullptr,
&crypto_client_config_, &proof_handler_,
false)),
handshaker_(server_id_, client_stream_, &session_, nullptr,
&crypto_client_config_, &proof_handler_),
state_() {
session_.SetCryptoStream(client_stream_);
session_.Initialize();
}
void InitializeServerParametersToEnableFullHello() {
QuicCryptoServerConfig::ConfigOptions options;
QuicServerConfigProtobuf config = QuicCryptoServerConfig::GenerateConfig(
helper_.GetRandomGenerator(), helper_.GetClock(), options);
state_.Initialize(
config.config(), "sourcetoken", std::vector<std::string>{"Dummy cert"},
"", "chlo_hash", "signature", helper_.GetClock()->WallNow(),
helper_.GetClock()->WallNow().Add(QuicTime::Delta::FromSeconds(30)));
state_.SetProofValid();
}
ParsedQuicVersion version_;
TestProofHandler proof_handler_;
test::MockQuicConnectionHelper helper_;
test::MockAlarmFactory alarm_factory_;
QuicServerId server_id_;
test::MockQuicConnection* connection_;
test::MockQuicSession session_;
QuicCryptoClientConfig crypto_client_config_;
QuicCryptoClientStream* client_stream_;
Handshaker handshaker_;
QuicCryptoClientConfig::CachedState state_;
};
INSTANTIATE_TEST_SUITE_P(
QuicCryptoClientHandshakerTests, QuicCryptoClientHandshakerTest,
::testing::ValuesIn(AllSupportedVersionsWithQuicCrypto()),
::testing::PrintToStringParamName());
TEST_P(QuicCryptoClientHandshakerTest, TestSendFullPaddingInInchoateHello) {
handshaker_.DoSendCHLOTest(&state_);
EXPECT_TRUE(connection_->fully_pad_during_crypto_handshake());
}
TEST_P(QuicCryptoClientHandshakerTest, TestDisabledPaddingInInchoateHello) {
crypto_client_config_.set_pad_inchoate_hello(false);
handshaker_.DoSendCHLOTest(&state_);
EXPECT_FALSE(connection_->fully_pad_during_crypto_handshake());
}
TEST_P(QuicCryptoClientHandshakerTest,
TestPaddingInFullHelloEvenIfInchoateDisabled) {
crypto_client_config_.set_pad_inchoate_hello(false);
InitializeServerParametersToEnableFullHello();
handshaker_.DoSendCHLOTest(&state_);
EXPECT_TRUE(connection_->fully_pad_during_crypto_handshake());
}
TEST_P(QuicCryptoClientHandshakerTest, TestNoPaddingInFullHelloWhenDisabled) {
crypto_client_config_.set_pad_full_hello(false);
InitializeServerParametersToEnableFullHello();
handshaker_.DoSendCHLOTest(&state_);
EXPECT_FALSE(connection_->fully_pad_during_crypto_handshake());
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_crypto_client_handshaker.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_crypto_client_handshaker_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
149c0591-4625-43a9-9386-2d63b5cf6225 | cpp | tensorflow/tensorflow | edit_distance | tensorflow/core/lib/gtl/edit_distance.h | tensorflow/core/lib/gtl/edit_distance_test.cc | #ifndef TENSORFLOW_CORE_LIB_GTL_EDIT_DISTANCE_H_
#define TENSORFLOW_CORE_LIB_GTL_EDIT_DISTANCE_H_
#include <numeric>
#include "tensorflow/core/lib/gtl/array_slice.h"
#include "tensorflow/core/lib/gtl/inlined_vector.h"
namespace tensorflow {
namespace gtl {
template <typename T, typename Cmp>
inline int64_t LevenshteinDistance(const gtl::ArraySlice<T> s,
const gtl::ArraySlice<T> t, const Cmp& cmp) {
const int64_t s_size = s.size();
const int64_t t_size = t.size();
if (t_size > s_size) return LevenshteinDistance(t, s, cmp);
const T* s_data = s.data();
const T* t_data = t.data();
if (t_size == 0) return s_size;
if (s == t) return 0;
absl::InlinedVector<int64_t, 32UL> scratch_holder(t_size);
int64_t* scratch = scratch_holder.data();
for (size_t j = 1; j < t_size; ++j) scratch[j - 1] = j;
for (size_t i = 1; i <= s_size; ++i) {
int substitution_base_cost = i - 1;
int insertion_cost = i + 1;
for (size_t j = 1; j <= t_size; ++j) {
const int replacement_cost = cmp(s_data[i - 1], t_data[j - 1]) ? 0 : 1;
const int substitution_cost = substitution_base_cost + replacement_cost;
const int deletion_cost = scratch[j - 1] + 1;
const int cheapest =
std::min(deletion_cost, std::min(insertion_cost, substitution_cost));
substitution_base_cost = scratch[j - 1];
scratch[j - 1] = cheapest;
insertion_cost = cheapest + 1;
}
}
return scratch[t_size - 1];
}
template <typename Container1, typename Container2, typename Cmp>
inline int64_t LevenshteinDistance(const Container1& s, const Container2& t,
const Cmp& cmp) {
return LevenshteinDistance(
gtl::ArraySlice<typename Container1::value_type>(s.data(), s.size()),
gtl::ArraySlice<typename Container1::value_type>(t.data(), t.size()),
cmp);
}
}
}
#endif | #include "tensorflow/core/lib/gtl/edit_distance.h"
#include <cctype>
#include <vector>
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace gtl {
namespace {
class LevenshteinDistanceTest : public ::testing::Test {
protected:
std::vector<char> empty_;
std::string s1_;
std::string s1234_;
std::string s567_;
std::string kilo_;
std::string kilogram_;
std::string mother_;
std::string grandmother_;
std::string lower_;
std::string upper_;
std::vector<char> ebab_;
std::vector<char> abcd_;
void SetUp() override {
s1_ = "1";
s1234_ = "1234";
s567_ = "567";
kilo_ = "kilo";
kilogram_ = "kilogram";
mother_ = "mother";
grandmother_ = "grandmother";
lower_ = "lower case";
upper_ = "UPPER case";
ebab_ = {'e', 'b', 'a', 'b'};
abcd_ = {'a', 'b', 'c', 'd'};
}
};
TEST_F(LevenshteinDistanceTest, BothEmpty) {
ASSERT_EQ(LevenshteinDistance(empty_, empty_, std::equal_to<char>()), 0);
}
TEST_F(LevenshteinDistanceTest, Symmetry) {
ASSERT_EQ(LevenshteinDistance(ebab_, abcd_, std::equal_to<char>()), 3);
ASSERT_EQ(LevenshteinDistance(abcd_, ebab_, std::equal_to<char>()), 3);
}
TEST_F(LevenshteinDistanceTest, OneEmpty) {
ASSERT_EQ(LevenshteinDistance(s1234_, empty_, std::equal_to<char>()), 4);
ASSERT_EQ(LevenshteinDistance(empty_, s567_, std::equal_to<char>()), 3);
}
TEST_F(LevenshteinDistanceTest, SingleElement) {
ASSERT_EQ(LevenshteinDistance(s1234_, s1_, std::equal_to<char>()), 3);
ASSERT_EQ(LevenshteinDistance(s1_, s1234_, std::equal_to<char>()), 3);
}
TEST_F(LevenshteinDistanceTest, Prefix) {
ASSERT_EQ(LevenshteinDistance(kilo_, kilogram_, std::equal_to<char>()), 4);
ASSERT_EQ(LevenshteinDistance(kilogram_, kilo_, std::equal_to<char>()), 4);
}
TEST_F(LevenshteinDistanceTest, Suffix) {
ASSERT_EQ(LevenshteinDistance(mother_, grandmother_, std::equal_to<char>()),
5);
ASSERT_EQ(LevenshteinDistance(grandmother_, mother_, std::equal_to<char>()),
5);
}
TEST_F(LevenshteinDistanceTest, DifferentComparisons) {
ASSERT_EQ(LevenshteinDistance(lower_, upper_, std::equal_to<char>()), 5);
ASSERT_EQ(LevenshteinDistance(upper_, lower_, std::equal_to<char>()), 5);
ASSERT_EQ(
LevenshteinDistance(absl::Span<const char>(lower_.data(), lower_.size()),
absl::Span<const char>(upper_.data(), upper_.size()),
std::equal_to<char>()),
5);
auto no_case_cmp = [](char c1, char c2) {
return std::tolower(c1) == std::tolower(c2);
};
ASSERT_EQ(LevenshteinDistance(lower_, upper_, no_case_cmp), 3);
ASSERT_EQ(LevenshteinDistance(upper_, lower_, no_case_cmp), 3);
}
TEST_F(LevenshteinDistanceTest, Vectors) {
ASSERT_EQ(
LevenshteinDistance(std::string("algorithm"), std::string("altruistic"),
std::equal_to<char>()),
6);
}
static void BM_EditDistanceHelper(::testing::benchmark::State& state, int len,
bool completely_different) {
string a =
"The quick brown fox jumped over the lazy dog and on and on and on"
" Every good boy deserves fudge. In fact, this is a very long sentence "
" w/many bytes..";
while (a.size() < static_cast<size_t>(len)) {
a = a + a;
}
string b = a;
if (completely_different) {
for (size_t i = 0; i < b.size(); i++) {
b[i]++;
}
}
for (auto s : state) {
LevenshteinDistance(absl::Span<const char>(a.data(), len),
absl::Span<const char>(b.data(), len),
std::equal_to<char>());
}
}
static void BM_EditDistanceSame(::testing::benchmark::State& state) {
BM_EditDistanceHelper(state, state.range(0), false);
}
static void BM_EditDistanceDiff(::testing::benchmark::State& state) {
BM_EditDistanceHelper(state, state.range(0), true);
}
BENCHMARK(BM_EditDistanceSame)->Arg(5);
BENCHMARK(BM_EditDistanceSame)->Arg(50);
BENCHMARK(BM_EditDistanceSame)->Arg(200);
BENCHMARK(BM_EditDistanceSame)->Arg(1000);
BENCHMARK(BM_EditDistanceDiff)->Arg(5);
BENCHMARK(BM_EditDistanceDiff)->Arg(50);
BENCHMARK(BM_EditDistanceDiff)->Arg(200);
BENCHMARK(BM_EditDistanceDiff)->Arg(1000);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/gtl/edit_distance.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/gtl/edit_distance_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
da9cd0b1-a718-4bc5-a59f-dcc55283c76a | cpp | tensorflow/tensorflow | collective_ops_utils | third_party/xla/xla/service/collective_ops_utils.cc | third_party/xla/xla/service/collective_ops_utils_test.cc | #include "xla/service/collective_ops_utils.h"
#include <cstdint>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_join.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/service/global_device_id.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/pattern_matcher.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
namespace xla {
std::optional<ReductionKind> MatchReductionInstruction(
const HloInstruction* hlo) {
PrimitiveType type = hlo->shape().element_type();
switch (hlo->opcode()) {
case HloOpcode::kAdd:
return ReductionKind::SUM;
case HloOpcode::kMultiply:
return ReductionKind::PRODUCT;
case HloOpcode::kMinimum:
return ReductionKind::MIN;
case HloOpcode::kMaximum:
return ReductionKind::MAX;
case HloOpcode::kAnd:
return type == PRED ? std::optional<ReductionKind>(ReductionKind::MIN)
: std::nullopt;
case HloOpcode::kOr:
return type == PRED ? std::optional<ReductionKind>(ReductionKind::MAX)
: std::nullopt;
default:
return std::nullopt;
}
}
std::optional<ReductionKind> MatchReductionComputation(
const HloComputation* computation) {
namespace m = match;
const HloInstruction* root = computation->root_instruction();
auto kind = MatchReductionInstruction(root);
if (kind && !Match(root, m::Op()
.WithBinaryOperandsAnyOrder(m::Parameter(0),
m::Parameter(1))
.WithShape(m::Shape().IsEffectiveScalar()))) {
kind = std::nullopt;
}
return kind;
}
std::optional<Literal> GetReductionIdentity(ReductionKind kind,
PrimitiveType type) {
switch (kind) {
case ReductionKind::SUM:
return LiteralUtil::Zero(type);
case ReductionKind::PRODUCT:
return LiteralUtil::One(type);
case ReductionKind::MIN:
return LiteralUtil::MaxValue(type);
case ReductionKind::MAX:
return LiteralUtil::MinValue(type);
default:
return std::nullopt;
}
}
absl::StatusOr<std::vector<int>> GetParticipatingIDs(
CollectiveOpGroupMode group_mode, int current_id,
std::optional<int> total_participant_count,
absl::Span<const ReplicaGroup> groups) {
if (groups.empty()) {
TF_RET_CHECK(total_participant_count.has_value());
std::vector<int> all_participants(*total_participant_count);
absl::c_iota(all_participants, 0);
return all_participants;
}
auto group_formatter = [](std::string* out, const ReplicaGroup& group) {
out->append("[");
out->append(absl::StrJoin(group.replica_ids(), ", "));
out->append("]");
};
std::optional<ReplicaGroup> group;
for (const ReplicaGroup& g : groups) {
if (absl::c_linear_search(g.replica_ids(), current_id)) {
TF_RET_CHECK(!group.has_value())
<< "Replica ID " << current_id << " appears twice in replica groups"
<< "; group_mode=" << CollectiveOpGroupModeToString(group_mode)
<< "; groups_size=" << groups.size()
<< "; groups= " << absl::StrJoin(groups, ", ", group_formatter);
group = g;
}
}
TF_RET_CHECK(group.has_value())
<< "Replica ID " << current_id << " doesn't appear in replica groups"
<< "; group_mode=" << CollectiveOpGroupModeToString(group_mode)
<< "; groups_size=" << groups.size()
<< "; groups= " << absl::StrJoin(groups, ", ", group_formatter);
return std::vector<int>(group->replica_ids().begin(),
group->replica_ids().end());
}
absl::StatusOr<CollectiveOpGroupMode> GetCollectiveOpGroupMode(
bool has_channel_id, std::optional<bool> use_global_device_ids) {
if (!has_channel_id) {
if (!use_global_device_ids.has_value() || !*use_global_device_ids) {
return CollectiveOpGroupMode::kCrossReplica;
} else {
return InvalidArgument(
"Invalid combination of has_channel_id and use_global_device_ids");
}
} else {
if (!use_global_device_ids.has_value()) {
return CollectiveOpGroupMode::kCrossPartition;
} else if (!*use_global_device_ids) {
return CollectiveOpGroupMode::kCrossReplicaAndPartition;
} else {
return CollectiveOpGroupMode::kFlattenedID;
}
}
}
absl::string_view CollectiveOpGroupModeToString(
CollectiveOpGroupMode group_mode) {
switch (group_mode) {
case CollectiveOpGroupMode::kCrossReplica:
return "kCrossReplica";
case CollectiveOpGroupMode::kCrossPartition:
return "kCrossPartition";
case CollectiveOpGroupMode::kCrossReplicaAndPartition:
return "kCrossReplicaAndPartition";
case CollectiveOpGroupMode::kFlattenedID:
return "kFlattenedID";
}
}
absl::StatusOr<std::vector<std::vector<GlobalDeviceId>>>
GetParticipatingDevicesGroups(const DeviceAssignment& device_assignment,
absl::Span<const ReplicaGroup> replica_groups,
CollectiveOpGroupMode group_mode) {
int replica_count = device_assignment.replica_count();
int partition_count = device_assignment.computation_count();
std::vector<ReplicaGroup> participating_replica_groups =
SpanToVector(replica_groups);
if (replica_groups.empty()) {
if (group_mode == CollectiveOpGroupMode::kFlattenedID) {
TF_RET_CHECK(!replica_groups.empty())
<< "replica groups cannot be empty for kFlattenedID mode";
}
int total_participant_count;
if (group_mode == CollectiveOpGroupMode::kCrossPartition) {
total_participant_count = partition_count;
} else {
total_participant_count = replica_count;
}
ReplicaGroup replica_group = ReplicaGroup();
for (int id = 0; id < total_participant_count; id++) {
replica_group.add_replica_ids(id);
}
participating_replica_groups.push_back(replica_group);
}
std::vector<std::vector<GlobalDeviceId>> groups;
switch (group_mode) {
case CollectiveOpGroupMode::kCrossReplica: {
for (const auto& replica_group : participating_replica_groups) {
for (int partition_id = 0; partition_id < partition_count;
partition_id++) {
std::vector<GlobalDeviceId> participants;
participants.reserve(replica_group.replica_ids().size());
for (int replica_id : replica_group.replica_ids()) {
participants.emplace_back(
device_assignment(replica_id, partition_id));
}
groups.push_back(participants);
}
}
return groups;
}
case CollectiveOpGroupMode::kCrossPartition: {
for (const auto& replica_group : participating_replica_groups) {
for (int replica_id = 0; replica_id < replica_count; replica_id++) {
std::vector<GlobalDeviceId> participants;
participants.reserve(replica_group.replica_ids().size());
for (int partition_id : replica_group.replica_ids()) {
participants.emplace_back(
device_assignment(replica_id, partition_id));
}
groups.push_back(participants);
}
}
return groups;
}
case CollectiveOpGroupMode::kCrossReplicaAndPartition: {
for (const auto& replica_group : participating_replica_groups) {
std::vector<GlobalDeviceId> participants;
participants.reserve(replica_group.replica_ids().size() *
partition_count);
for (int replica_id : replica_group.replica_ids()) {
for (int partition_id = 0; partition_id < partition_count;
partition_id++) {
participants.emplace_back(
device_assignment(replica_id, partition_id));
}
}
groups.push_back(participants);
}
return groups;
}
case CollectiveOpGroupMode::kFlattenedID: {
for (const auto& replica_group : participating_replica_groups) {
std::vector<GlobalDeviceId> participants;
participants.reserve(replica_group.replica_ids().size());
for (int flattened_id : replica_group.replica_ids()) {
int replica_id = flattened_id / partition_count;
int partition_id = flattened_id % partition_count;
participants.emplace_back(
device_assignment(replica_id, partition_id));
}
groups.push_back(participants);
}
return groups;
}
}
}
absl::StatusOr<std::vector<ReplicaGroup>> GetParticipatingFlattenedIdGroups(
const DeviceAssignment& device_assignment,
absl::Span<const ReplicaGroup> replica_groups,
CollectiveOpGroupMode group_mode) {
absl::flat_hash_map<GlobalDeviceId, int64_t> device_id_to_flattened_id;
for (int r = 0; r < device_assignment.replica_count(); ++r) {
for (int c = 0; c < device_assignment.computation_count(); ++c) {
GlobalDeviceId device_id = GlobalDeviceId(device_assignment(r, c));
int64_t flattened_id = r * device_assignment.computation_count() + c;
device_id_to_flattened_id[device_id] = flattened_id;
}
}
std::vector<ReplicaGroup> flattened_id_groups;
TF_ASSIGN_OR_RETURN(std::vector<std::vector<GlobalDeviceId>> device_groups,
GetParticipatingDevicesGroups(
device_assignment, replica_groups, group_mode));
for (const auto& device_group : device_groups) {
ReplicaGroup flattened_id_group;
flattened_id_group.mutable_replica_ids()->Reserve(device_group.size());
for (const GlobalDeviceId& device_id : device_group) {
flattened_id_group.add_replica_ids(device_id_to_flattened_id[device_id]);
}
flattened_id_groups.push_back(flattened_id_group);
}
return flattened_id_groups;
}
absl::StatusOr<std::vector<ReplicaGroup>> GetParticipatingFlattenedIdGroups(
absl::Span<const ReplicaGroup> replica_groups,
CollectiveOpGroupMode replica_group_mode, int replica_count,
int partition_count) {
std::vector<ReplicaGroup> filled_empty_replica_group;
absl::Span<const ReplicaGroup> original_replica_groups = replica_groups;
std::vector<ReplicaGroup> flattened_replica_groups;
if (replica_groups.empty()) {
filled_empty_replica_group.emplace_back();
const int64_t id_count =
replica_group_mode == CollectiveOpGroupMode::kCrossPartition
? partition_count
: replica_count;
for (int i = 0; i < id_count; ++i) {
filled_empty_replica_group.back().add_replica_ids(i);
}
original_replica_groups = filled_empty_replica_group;
}
if (replica_group_mode == CollectiveOpGroupMode::kFlattenedID) {
flattened_replica_groups.insert(flattened_replica_groups.end(),
original_replica_groups.begin(),
original_replica_groups.end());
} else if (replica_group_mode == CollectiveOpGroupMode::kCrossReplica) {
flattened_replica_groups.resize(original_replica_groups.size() *
partition_count);
for (int64_t i = 0, current_group_offset = 0;
i < original_replica_groups.size();
++i, current_group_offset += partition_count) {
for (int64_t replica_id : original_replica_groups.at(i).replica_ids()) {
for (int64_t partition_id = 0; partition_id < partition_count;
++partition_id) {
const int64_t flattened_id =
replica_id * partition_count + partition_id;
flattened_replica_groups[current_group_offset + partition_id]
.add_replica_ids(flattened_id);
}
}
}
} else if (replica_group_mode == CollectiveOpGroupMode::kCrossPartition) {
flattened_replica_groups.resize(original_replica_groups.size() *
replica_count);
for (int64_t i = 0, current_group_offset = 0;
i < original_replica_groups.size();
++i, current_group_offset += replica_count) {
for (int64_t partition_id : original_replica_groups.at(i).replica_ids()) {
for (int64_t replica_id = 0; replica_id < replica_count; ++replica_id) {
const int64_t flattened_id =
replica_id * partition_count + partition_id;
flattened_replica_groups[current_group_offset + replica_id]
.add_replica_ids(flattened_id);
}
}
}
} else {
CHECK(replica_group_mode ==
CollectiveOpGroupMode::kCrossReplicaAndPartition);
flattened_replica_groups.resize(original_replica_groups.size());
for (int64_t i = 0; i < original_replica_groups.size(); ++i) {
for (int64_t replica_id : original_replica_groups.at(i).replica_ids()) {
for (int64_t partition_id = 0; partition_id < partition_count;
++partition_id) {
const int64_t flattened_id =
replica_id * partition_count + partition_id;
flattened_replica_groups[i].add_replica_ids(flattened_id);
}
}
}
}
return flattened_replica_groups;
}
absl::StatusOr<std::vector<GlobalDeviceId>> GetParticipatingDevices(
GlobalDeviceId device_id, const DeviceAssignment& device_assignment,
absl::Span<const ReplicaGroup> replica_groups,
CollectiveOpGroupMode group_mode) {
int replica_count = device_assignment.replica_count();
int partition_count = device_assignment.computation_count();
TF_ASSIGN_OR_RETURN(const DeviceAssignment::LogicalID logical_id,
device_assignment.LogicalIdForDevice(device_id));
int current_replica_id = logical_id.replica_id;
int current_partition_id = logical_id.computation_id;
TF_RET_CHECK(0 <= current_replica_id && current_replica_id < replica_count)
<< current_replica_id << " " << replica_count;
TF_RET_CHECK(0 <= current_partition_id &&
current_partition_id < partition_count)
<< current_partition_id << " " << partition_count;
std::vector<GlobalDeviceId> participants;
switch (group_mode) {
case CollectiveOpGroupMode::kCrossReplica: {
TF_ASSIGN_OR_RETURN(std::vector<int> participating_replicas,
GetParticipatingIDs(group_mode, current_replica_id,
replica_count, replica_groups));
participants.reserve(participating_replicas.size());
for (int replica_id : participating_replicas) {
TF_RET_CHECK(0 <= replica_id && replica_id < replica_count)
<< replica_id << " " << replica_count;
participants.emplace_back(
device_assignment(replica_id, current_partition_id));
}
return participants;
}
case CollectiveOpGroupMode::kCrossPartition: {
TF_ASSIGN_OR_RETURN(std::vector<int> participating_partitions,
GetParticipatingIDs(group_mode, current_partition_id,
partition_count, replica_groups));
participants.reserve(participating_partitions.size());
for (int partition_id : participating_partitions) {
TF_RET_CHECK(0 <= partition_id && partition_id < partition_count)
<< partition_id << " " << partition_count;
participants.emplace_back(
device_assignment(current_replica_id, partition_id));
}
return participants;
}
case CollectiveOpGroupMode::kCrossReplicaAndPartition: {
TF_ASSIGN_OR_RETURN(std::vector<int> participating_replicas,
GetParticipatingIDs(group_mode, current_replica_id,
replica_count, replica_groups));
participants.reserve(participating_replicas.size() * partition_count);
for (int replica_id : participating_replicas) {
TF_RET_CHECK(0 <= replica_id && replica_id < replica_count)
<< replica_id << " " << replica_count;
for (int partition_id = 0; partition_id < partition_count;
++partition_id) {
participants.emplace_back(
device_assignment(replica_id, partition_id));
}
}
return participants;
}
case CollectiveOpGroupMode::kFlattenedID: {
TF_RET_CHECK(!replica_groups.empty())
<< "replica groups cannot be empty for kFlattenedID mode";
int current_flattened_id =
current_replica_id * partition_count + current_partition_id;
TF_ASSIGN_OR_RETURN(
std::vector<int> participating_flattened_ids,
GetParticipatingIDs(group_mode, current_flattened_id,
std::nullopt,
replica_groups));
participants.reserve(participating_flattened_ids.size());
for (int flattened_id : participating_flattened_ids) {
int replica_id = flattened_id / partition_count;
TF_RET_CHECK(0 <= replica_id && replica_id < replica_count)
<< replica_id << " " << replica_count;
int partition_id = flattened_id % partition_count;
participants.emplace_back(device_assignment(replica_id, partition_id));
}
return participants;
}
}
}
absl::StatusOr<std::vector<int64_t>> GetPariticipantCountsForReplicaGroups(
int64_t num_replicas, int64_t num_partitions,
absl::Span<const ReplicaGroup> replica_groups,
CollectiveOpGroupMode group_mode) {
std::vector<int64_t> participant_counts;
std::vector<ReplicaGroup> participating_replica_groups =
SpanToVector(replica_groups);
if (replica_groups.empty()) {
if (group_mode == CollectiveOpGroupMode::kFlattenedID) {
TF_RET_CHECK(!replica_groups.empty())
<< "replica groups cannot be empty for kFlattenedID mode";
}
int total_participant_count;
if (group_mode == CollectiveOpGroupMode::kCrossPartition) {
total_participant_count = num_partitions;
} else {
total_participant_count = num_replicas;
}
ReplicaGroup replica_group = ReplicaGroup();
for (int id = 0; id < total_participant_count; id++) {
replica_group.add_replica_ids(id);
}
participating_replica_groups.push_back(replica_group);
}
switch (group_mode) {
case CollectiveOpGroupMode::kCrossReplica: {
for (const auto& replica_group : participating_replica_groups) {
for (int partition_id = 0; partition_id < num_partitions;
++partition_id) {
participant_counts.push_back(replica_group.replica_ids().size());
}
}
return participant_counts;
}
case CollectiveOpGroupMode::kCrossPartition: {
for (const auto& replica_group : participating_replica_groups) {
participant_counts.push_back(replica_group.replica_ids().size());
}
return participant_counts;
}
case CollectiveOpGroupMode::kCrossReplicaAndPartition: {
for (const auto& replica_group : participating_replica_groups) {
participant_counts.push_back(replica_group.replica_ids().size() *
num_partitions);
}
return participant_counts;
}
case CollectiveOpGroupMode::kFlattenedID: {
for (const auto& replica_group : participating_replica_groups) {
participant_counts.push_back(replica_group.replica_ids().size());
}
return participant_counts;
}
}
}
bool ReplicaGroupsOrthogonal(absl::Span<const ReplicaGroup> first,
absl::Span<const ReplicaGroup> second) {
if (first.size() != second[0].replica_ids_size()) {
return false;
}
if (first[0].replica_ids_size() != second.size()) {
return false;
}
for (int64_t i = 0; i < first.size(); ++i) {
for (int64_t j = 0; j < first[i].replica_ids_size(); ++j) {
if (first[i].replica_ids(j) != second[j].replica_ids(i)) {
return false;
}
}
}
return true;
}
bool ReplicaGroupsEqual(absl::Span<const ReplicaGroup> first,
absl::Span<const ReplicaGroup> second) {
if (first.size() != second.size()) {
return false;
}
for (int64_t i = 0; i < first.size(); ++i) {
if (first[i].replica_ids_size() != second[i].replica_ids_size()) {
return false;
}
for (int j = 0; j < first[i].replica_ids_size(); ++j) {
if (first[i].replica_ids(j) != second[i].replica_ids(j)) {
return false;
}
}
}
return true;
}
bool IsNonFusionCollective(const HloInstruction* instruction) {
switch (instruction->opcode()) {
case HloOpcode::kAllReduce:
case HloOpcode::kAllReduceStart:
case HloOpcode::kAllReduceDone:
case HloOpcode::kAllGather:
case HloOpcode::kAllGatherStart:
case HloOpcode::kAllGatherDone:
case HloOpcode::kAllToAll:
case HloOpcode::kCollectiveBroadcast:
case HloOpcode::kCollectivePermute:
case HloOpcode::kCollectivePermuteStart:
case HloOpcode::kCollectivePermuteDone:
case HloOpcode::kReduceScatter:
return true;
case HloOpcode::kAsyncStart:
case HloOpcode::kAsyncUpdate:
case HloOpcode::kAsyncDone:
return IsNonFusionCollective(instruction->async_wrapped_instruction());
default:
return false;
}
}
bool IsCollective(const HloInstruction* instruction) {
if (IsNonFusionCollective(instruction)) {
return true;
}
if (instruction->opcode() == HloOpcode::kFusion &&
instruction->IsCustomFusion()) {
for (const auto* inner_inst : instruction->fused_instructions()) {
if (IsCollective(inner_inst)) {
return true;
}
}
}
return false;
}
HloInstruction* IsOrHasCollectiveWithChannelId(HloInstruction* instruction) {
if (instruction->opcode() == HloOpcode::kFusion) {
for (auto* inner_inst : instruction->fused_instructions()) {
if (IsOrHasCollectiveWithChannelId(inner_inst) != nullptr) {
return inner_inst;
}
}
return nullptr;
}
if (DynCast<HloChannelInstruction>(instruction) == nullptr) {
return nullptr;
}
if (IsCollective(instruction) && instruction->channel_id().has_value()) {
return instruction;
}
return nullptr;
}
bool IsSyncCollective(const HloInstruction* instr) {
auto backend_config = instr->backend_config<xla::gpu::GpuBackendConfig>();
if (!backend_config.ok()) {
return false;
}
return backend_config->collective_backend_config().is_sync();
}
using SourceTargetPair = std::pair<int64_t, int64_t>;
using SourceTargetPairs = std::vector<SourceTargetPair>;
bool IsForwardCycle(const SourceTargetPairs& pairs) {
int64_t size = pairs.size();
if (size <= 1) return false;
const SourceTargetPair& last_pair = pairs[size - 1];
if (last_pair.first != size - 1 || last_pair.second != 0) {
return false;
}
for (int64_t i = 0; i < size - 1; ++i) {
const SourceTargetPair& pair = pairs[i];
if (pair.first != i || pair.second != i + 1) {
return false;
}
}
return true;
}
bool IsBackwardCycle(const SourceTargetPairs& pairs) {
int64_t size = pairs.size();
if (size <= 1) return false;
const SourceTargetPair& first_pair = pairs[0];
if (first_pair.first != 0 || first_pair.second != size - 1) {
return false;
}
for (int64_t i = 1; i < size; ++i) {
const SourceTargetPair& pair = pairs[i];
if (pair.first != i || pair.second != i - 1) {
return false;
}
}
return true;
}
bool IsExclusivelyCrossModule(absl::Span<const ReplicaGroup> replica_groups,
bool use_global_ids, bool has_channel_id,
const DeviceAssignment& device_assignment) {
if (!has_channel_id) {
return false;
}
if (!use_global_ids) {
for (const ReplicaGroup& replica_group : replica_groups) {
if (replica_group.replica_ids_size() != 1) {
return false;
}
}
return true;
}
int64_t partition_count = device_assignment.computation_count();
for (const ReplicaGroup& replica_group : replica_groups) {
std::optional<int64_t> first_replica_id;
for (int64_t global_id : replica_group.replica_ids()) {
int64_t replica_id = global_id / partition_count;
if (!first_replica_id.has_value()) {
first_replica_id = replica_id;
} else if (replica_id != first_replica_id) {
return false;
}
}
}
return true;
}
} | #include "xla/service/collective_ops_utils.h"
#include <cstdint>
#include <iterator>
#include <optional>
#include <sstream>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/computation_placer.h"
#include "xla/service/global_device_id.h"
#include "xla/service/hlo_parser.h"
#include "xla/shape_util.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
std::vector<ReplicaGroup> CreateReplicaGroups(
const std::vector<std::vector<int64_t>> &replica_groups) {
std::vector<ReplicaGroup> result;
result.reserve(replica_groups.size());
for (const auto &replica_group : replica_groups) {
ReplicaGroup &group = result.emplace_back();
for (auto id : replica_group) {
group.add_replica_ids(id);
}
}
return result;
}
TEST(CollectiveOpsUtilsTest, GetParticipatingIDs_NoReplicaGroups) {
std::vector<int> actual =
GetParticipatingIDs(CollectiveOpGroupMode::kFlattenedID,
0, 3,
{})
.value();
std::vector<int> expected = {0, 1, 2};
EXPECT_EQ(actual, expected);
}
TEST(CollectiveOpsUtilsTest, GetParticipatingIDs_ReplicaGroups) {
std::vector<ReplicaGroup> replica_groups(3);
replica_groups[0].add_replica_ids(0);
replica_groups[0].add_replica_ids(4);
replica_groups[1].add_replica_ids(1);
replica_groups[1].add_replica_ids(5);
replica_groups[2].add_replica_ids(2);
replica_groups[2].add_replica_ids(3);
std::vector<int> actual =
GetParticipatingIDs(CollectiveOpGroupMode::kFlattenedID,
1,
std::nullopt,
replica_groups)
.value();
std::vector<int> expected = {1, 5};
EXPECT_EQ(actual, expected);
}
TEST(CollectiveOpsUtilsTest, CollectiveWithChannelId) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY %cluster {
%param0 = f32[512]{0} parameter(0)
%copy0 = f32[512]{0} copy(param0)
%reshape0 = f32[1,1,512]{2,0,1} reshape(f32[512]{0} %copy0)
%all-gather = f32[1,4,512]{2,0,1} all-gather(f32[1,1,512]{2,0,1} %reshape0), channel_id=3621, replica_groups={{0,1,2,3}}, dimensions={1}, use_global_device_ids=true
%copy1 = f32[1,4,512]{2,0,1} copy(all-gather)
ROOT root = f32[1,4,512]{2,1,0} copy(%copy1)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
HloInstruction *all_gather =
module->entry_computation()->GetInstructionWithName("all-gather");
EXPECT_EQ(IsOrHasCollectiveWithChannelId(all_gather), all_gather);
}
TEST(CollectiveOpsUtilsTest, CollectiveWithChannelId2) {
ReplicaGroup group;
for (int64_t i = 0; i < 8; i++) {
group.add_replica_ids(i);
}
auto builder = HloComputation::Builder("CollectiveWithChannelId2");
TF_ASSERT_OK_AND_ASSIGN(
HloInstruction * param_0,
builder.AddParameter(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(BF16, {1, 512, 4096}), "p0")));
HloInstruction *instr =
builder.AddInstruction(HloInstruction::CreateAllGather(
ShapeUtil::MakeShape(BF16, {1, 4096, 4096}), {param_0}, 1,
CollectiveDeviceList({group}), true, 231, true));
auto computation = builder.Build(
builder.AddInstruction(HloInstruction::CreateTuple({instr})));
auto fusion =
HloInstruction::CreateFusion(ShapeUtil::MakeShape(BF16, {1, 4096, 4096}),
HloInstruction::FusionKind::kOutput,
{param_0}, computation.get(), "fusion");
EXPECT_EQ(IsOrHasCollectiveWithChannelId(fusion.get()), instr);
auto builder2 = HloComputation::Builder("CollectiveWithChannelId2");
TF_ASSERT_OK_AND_ASSIGN(
HloInstruction * param_1,
builder2.AddParameter(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(BF16, {1, 512, 4096}), "p1")));
HloInstruction *instr_without_channel_id =
builder2.AddInstruction(HloInstruction::CreateAllGather(
ShapeUtil::MakeShape(BF16, {1, 4096, 4096}), {param_1}, 1, {group},
true, std::nullopt, true));
auto computation2 = builder2.Build(builder2.AddInstruction(
HloInstruction::CreateTuple({instr_without_channel_id})));
auto fusion2 =
HloInstruction::CreateFusion(ShapeUtil::MakeShape(BF16, {1, 4096, 4096}),
HloInstruction::FusionKind::kOutput,
{param_1}, computation2.get(), "fusion2");
EXPECT_EQ(IsOrHasCollectiveWithChannelId(fusion2.get()), nullptr);
}
TEST(CollectiveOpsUtilsTest, IsForwardCycle) {
EXPECT_TRUE(IsForwardCycle({{0, 1}, {1, 0}}));
EXPECT_TRUE(IsForwardCycle({{0, 1}, {1, 2}, {2, 3}, {3, 0}}));
EXPECT_FALSE(IsForwardCycle({{0, 0}})) << "Self link is not a cycle!";
EXPECT_FALSE(IsForwardCycle({{}})) << "Self link due to initialization to 0";
EXPECT_FALSE(IsForwardCycle({}));
EXPECT_FALSE(IsForwardCycle({{0, 1}}));
EXPECT_FALSE(IsForwardCycle({{0, 1}, {2, 0}})) << "No link between 1 and 2";
EXPECT_FALSE(IsForwardCycle({{1, 0}, {0, 1}})) << "Backward cycle";
EXPECT_FALSE(IsForwardCycle({{3, 0}, {0, 1}, {1, 2}, {2, 3}}))
<< "Unordered pairs are not a cycle";
EXPECT_FALSE(IsForwardCycle({{0, 1}, {1, 2}, {2, 3}, {4, 5}, {3, 0}}))
<< "Out of order pairs are not a cycle";
}
TEST(CollectiveOpsUtilsTest, IsBackwardCycle) {
EXPECT_TRUE(IsBackwardCycle({{0, 1}, {1, 0}}));
EXPECT_TRUE(IsBackwardCycle({{0, 3}, {1, 0}, {2, 1}, {3, 2}}));
EXPECT_FALSE(IsBackwardCycle({{0, 0}})) << "Self link is a backward cycle!";
EXPECT_FALSE(IsBackwardCycle({{}})) << "Self link due to initialization to 0";
EXPECT_FALSE(IsForwardCycle({}));
EXPECT_FALSE(IsForwardCycle({{1, 0}}));
EXPECT_FALSE(IsForwardCycle({{2, 1}, {0, 2}})) << "No link between 1 and 2";
EXPECT_FALSE(IsBackwardCycle({{3, 2}, {0, 3}, {1, 0}, {2, 1}}))
<< "Unordered pairs are not a cycle";
EXPECT_FALSE(IsForwardCycle({{0, 1}, {1, 2}, {4, 5}, {3, 0}}))
<< "Out of order pairs are not a cycle";
}
TEST(IsExclusivelyCrossModuleTest, CrossReplicaNoChannelSet) {
int64_t num_replicas = 4;
int64_t num_partitions = 2;
DeviceAssignment device_assignment(num_replicas, num_partitions);
std::vector<ReplicaGroup> replica_groups =
CreateReplicaGroups({{0, 1}, {2, 3}});
bool is_exclusively_cross_module =
IsExclusivelyCrossModule(replica_groups, false,
false, device_assignment);
EXPECT_FALSE(is_exclusively_cross_module);
}
TEST(IsExclusivelyCrossModuleTest, CrossReplicaAndCrossModuleNoGlobalIds) {
int64_t num_replicas = 4;
int64_t num_partitions = 2;
DeviceAssignment device_assignment(num_replicas, num_partitions);
std::vector<ReplicaGroup> replica_groups =
CreateReplicaGroups({{0, 1}, {2, 3}});
bool is_exclusively_cross_module =
IsExclusivelyCrossModule(replica_groups, false,
true, device_assignment);
EXPECT_FALSE(is_exclusively_cross_module);
}
TEST(IsExclusivelyCrossModuleTest, CrossModuleNoGlobalIds) {
int64_t num_replicas = 4;
int64_t num_partitions = 2;
ComputationPlacer placer;
TF_ASSERT_OK_AND_ASSIGN(DeviceAssignment device_assignment,
placer.AssignDevices(num_replicas, num_partitions));
std::vector<ReplicaGroup> replica_groups =
CreateReplicaGroups({{0}, {1}, {2}, {3}});
bool is_exclusively_cross_module =
IsExclusivelyCrossModule(replica_groups, false,
true, device_assignment);
EXPECT_TRUE(is_exclusively_cross_module);
}
TEST(IsExclusivelyCrossModuleTest, CrossReplicaWithGlobalIds) {
int64_t num_replicas = 8;
int64_t num_partitions = 1;
ComputationPlacer placer;
TF_ASSERT_OK_AND_ASSIGN(DeviceAssignment device_assignment,
placer.AssignDevices(num_replicas, num_partitions));
std::vector<ReplicaGroup> replica_groups =
CreateReplicaGroups({{0, 1, 2, 3, 4, 5, 6, 7}});
bool is_exclusively_cross_module =
IsExclusivelyCrossModule(replica_groups, true,
true, device_assignment);
EXPECT_FALSE(is_exclusively_cross_module);
}
TEST(IsExclusivelyCrossModuleTest, CrossReplicaAndCrossModuleWithGlobalIds) {
int64_t num_replicas = 4;
int64_t num_partitions = 2;
ComputationPlacer placer;
TF_ASSERT_OK_AND_ASSIGN(DeviceAssignment device_assignment,
placer.AssignDevices(num_replicas, num_partitions));
std::vector<ReplicaGroup> replica_groups =
CreateReplicaGroups({{0, 1, 2, 3, 4, 5, 6, 7}});
bool is_exclusively_cross_module =
IsExclusivelyCrossModule(replica_groups, true,
true, device_assignment);
EXPECT_FALSE(is_exclusively_cross_module);
}
TEST(IsExclusivelyCrossModuleTest, CrossModuleWithGlobalIds) {
int64_t num_replicas = 4;
int64_t num_partitions = 2;
ComputationPlacer placer;
TF_ASSERT_OK_AND_ASSIGN(DeviceAssignment device_assignment,
placer.AssignDevices(num_replicas, num_partitions));
std::vector<ReplicaGroup> replica_groups =
CreateReplicaGroups({{0, 1}, {2, 3}, {4, 5}, {6, 7}});
bool is_exclusively_cross_module =
IsExclusivelyCrossModule(replica_groups, true,
true, device_assignment);
EXPECT_TRUE(is_exclusively_cross_module);
}
}
namespace GetCollectiveOpGroupModeTest {
struct TestCase {
bool has_channel_id;
std::optional<bool> use_global_device_ids;
std::optional<xla::CollectiveOpGroupMode> expected;
std::string ToString() const {
std::ostringstream s;
s << (has_channel_id ? "chnl" : "nochnl");
s << "_"
<< (use_global_device_ids
? (*use_global_device_ids ? "ugdi_true" : "ugdi_false")
: "nougdi");
return s.str();
}
};
std::vector<TestCase> GetTestCases() {
const std::vector<TestCase> test_cases = {
{false, std::nullopt, CollectiveOpGroupMode::kCrossReplica},
{false, false, CollectiveOpGroupMode::kCrossReplica},
{false, true, std::nullopt},
{true, std::nullopt, CollectiveOpGroupMode::kCrossPartition},
{true, false, CollectiveOpGroupMode::kCrossReplicaAndPartition},
{true, true, CollectiveOpGroupMode::kFlattenedID},
};
return test_cases;
}
class GetCollectOpGroupModeTest : public testing::TestWithParam<TestCase> {};
TEST_P(GetCollectOpGroupModeTest, Test) {
const TestCase &tc = GetParam();
absl::StatusOr<CollectiveOpGroupMode> actual =
GetCollectiveOpGroupMode(tc.has_channel_id, tc.use_global_device_ids);
if (tc.expected) {
TF_ASSERT_OK(actual.status());
EXPECT_EQ(*actual, *tc.expected);
} else {
EXPECT_FALSE(actual.ok());
}
}
INSTANTIATE_TEST_SUITE_P(GetCollectOpGroupMode, GetCollectOpGroupModeTest,
testing::ValuesIn(GetTestCases()));
}
namespace GetParticipatingDevicesTest {
struct TestCase {
xla::Array2D<int> device_assignment;
std::vector<std::vector<int64_t>> replica_groups;
bool has_channel_id;
std::optional<bool> use_global_device_ids;
struct CurrentIdAndOutput {
int current_id;
std::vector<int> expected_output;
};
std::vector<CurrentIdAndOutput> subtests;
std::vector<std::vector<int>> participating_device_groups;
bool expected_failure;
std::string ToString() const;
};
std::string TestCase::ToString() const {
std::ostringstream s;
absl::StatusOr<CollectiveOpGroupMode> group_mode =
GetCollectiveOpGroupMode(has_channel_id, use_global_device_ids);
if (group_mode.ok()) {
s << CollectiveOpGroupModeToString(*group_mode);
} else {
s << "Invalid";
}
s << "_" << device_assignment.n1() << "x" << device_assignment.n2();
s << "_" << (replica_groups.empty() ? "NoRG" : "RG");
s << "_" << subtests.size() << "SubTests";
return s.str();
}
std::ostream &operator<<(std::ostream &os, const TestCase &tc) {
os << tc.ToString();
return os;
}
std::vector<TestCase> GetTestCases() {
std::vector<TestCase> test_cases;
const std::vector<TestCase> cross_replica_test_cases = {
{
{{33}, {44}, {55}},
{},
false,
false,
{
{33, {33, 44, 55}},
{44, {33, 44, 55}},
},
{{33, 44, 55}},
false
},
{
{{33, 34}, {44, 45}, {55, 56}},
{},
false,
false,
{
{33, {33, 44, 55}},
{34, {34, 45, 56}},
{45, {34, 45, 56}},
},
{{33, 44, 55}, {34, 45, 56}},
false
},
{
{{33}, {44}, {55}},
{{0}, {1, 2}},
false,
false,
{
{33, {33}},
{44, {44, 55}},
},
{{ 33 }, {44, 55}},
false
},
{
{{33, 34}, {44, 45}, {55, 56}},
{{0}, {1, 2}},
false,
false,
{
{33, {33}},
{34, {34}},
{45, {45, 56}},
},
{{33}, {34}, {44, 55}, {45, 56}},
false
},
};
const std::vector<TestCase> cross_partition_test_cases = {
{
{
{33, 34, 35, 36}, {44, 45, 46, 47}, {55, 56, 57, 58}
},
{{0, 1}, {2, 3}},
true,
std::nullopt,
{
{33, {33, 34}},
{35, {35, 36}},
{45, {44, 45}},
{47, {46, 47}},
{58, {57, 58}},
},
{{33, 34}, {44, 45}, {55, 56},
{35, 36}, {46, 47}, {57, 58}},
false
}
};
const std::vector<TestCase> cross_replica_and_partition_test_cases = {
{
{{33, 34}, {44, 45}, {55, 56}},
{{0}, {1, 2}},
true,
false,
{
{33, {33, 34}},
{34, {33, 34}},
{45, {44, 45, 55, 56}},
},
{{33, 34}, {44, 45, 55, 56}},
false
},
{
{{33, 34}, {44, 45}, {55, 56}},
{},
true,
false,
{
{33, {33, 34, 44, 45, 55, 56}},
{34, {33, 34, 44, 45, 55, 56}},
{56, {33, 34, 44, 45, 55, 56}},
},
{{33, 34, 44, 45, 55, 56}},
false
},
};
const std::vector<TestCase> flattened_id_test_cases = {
{
{{33, 34}, {44, 45}, {55, 56}},
{{0}, {1, 2}, {3, 4, 5}},
true,
true,
{
{33, {33}},
{34, {34, 44}},
{44, {34, 44}},
{45, {45, 55, 56}},
{55, {45, 55, 56}},
{56, {45, 55, 56}},
},
{{33}, {34, 44}, {45, 55, 56}},
false
},
{
{{33}},
{},
true,
true,
{
{33, {33}},
},
{{33}},
true
},
};
const std::vector<TestCase> failure_test_cases = {
{
{{33}, {44}, {55}},
{},
false,
true,
{
{33, {}},
},
{{33, 44, 55}},
true
},
};
test_cases.insert(test_cases.end(), cross_replica_test_cases.begin(),
cross_replica_test_cases.end());
for (TestCase tc : cross_replica_test_cases) {
tc.use_global_device_ids = std::nullopt;
test_cases.push_back(tc);
}
test_cases.insert(test_cases.end(), cross_partition_test_cases.begin(),
cross_partition_test_cases.end());
test_cases.insert(test_cases.end(),
cross_replica_and_partition_test_cases.begin(),
cross_replica_and_partition_test_cases.end());
test_cases.insert(test_cases.end(), flattened_id_test_cases.begin(),
flattened_id_test_cases.end());
test_cases.insert(test_cases.end(), failure_test_cases.begin(),
failure_test_cases.end());
return test_cases;
}
class GetParticipatingDevicesTest : public testing::TestWithParam<TestCase> {};
TEST_P(GetParticipatingDevicesTest, Test) {
const TestCase &tc = GetParam();
int64_t num_replicas = tc.device_assignment.n1();
int64_t num_partitions = tc.device_assignment.n2();
DeviceAssignment device_assignment(num_replicas, num_partitions);
for (int64_t replica_id = 0; replica_id < num_replicas; ++replica_id) {
for (int64_t partition_id = 0; partition_id < num_partitions;
++partition_id) {
device_assignment(replica_id, partition_id) =
tc.device_assignment(replica_id, partition_id);
}
}
std::vector<ReplicaGroup> replica_groups =
CreateReplicaGroups(tc.replica_groups);
absl::StatusOr<CollectiveOpGroupMode> group_mode =
GetCollectiveOpGroupMode(tc.has_channel_id, tc.use_global_device_ids);
if (!group_mode.ok()) {
EXPECT_TRUE(tc.expected_failure);
return;
}
for (const TestCase::CurrentIdAndOutput &subtest : tc.subtests) {
absl::StatusOr<std::vector<GlobalDeviceId>> actual =
GetParticipatingDevices(GlobalDeviceId(subtest.current_id),
device_assignment, replica_groups, *group_mode);
if (!actual.ok()) {
EXPECT_TRUE(tc.expected_failure);
continue;
}
std::vector<GlobalDeviceId> expected;
expected.reserve(subtest.expected_output.size());
absl::c_transform(subtest.expected_output, std::back_inserter(expected),
[](int id) { return GlobalDeviceId(id); });
EXPECT_EQ(*actual, expected);
}
absl::StatusOr<std::vector<std::vector<GlobalDeviceId>>>
actual_device_groups = GetParticipatingDevicesGroups(
device_assignment, replica_groups, *group_mode);
if (!actual_device_groups.ok()) {
EXPECT_TRUE(tc.expected_failure);
return;
}
std::vector<std::vector<GlobalDeviceId>> expect_device_groups;
expect_device_groups.reserve(tc.participating_device_groups.size());
for (auto subgroup : tc.participating_device_groups) {
std::vector<GlobalDeviceId> subgroup_device_ids;
subgroup_device_ids.reserve(subgroup.size());
absl::c_transform(subgroup, std::back_inserter(subgroup_device_ids),
[](int id) { return GlobalDeviceId(id); });
expect_device_groups.push_back(subgroup_device_ids);
}
EXPECT_THAT(*actual_device_groups,
testing::UnorderedElementsAreArray(expect_device_groups));
}
INSTANTIATE_TEST_SUITE_P(GetParticipatingDevices, GetParticipatingDevicesTest,
testing::ValuesIn(GetTestCases()));
}
namespace GetPariticipantCountsForReplicaGroupsTest {
struct TestCase {
std::string test_name;
std::vector<std::vector<int64_t>> replica_groups;
CollectiveOpGroupMode group_mode;
int64_t num_replicas;
int64_t num_partitions;
std::vector<int64_t> expected;
};
class GetPariticipantCountsForReplicaGroupsTest
: public testing::TestWithParam<TestCase> {};
TEST_P(GetPariticipantCountsForReplicaGroupsTest, Test) {
const TestCase &tc = GetParam();
std::vector<ReplicaGroup> replica_groups =
CreateReplicaGroups(tc.replica_groups);
TF_ASSERT_OK_AND_ASSIGN(
std::vector<int64_t> actual,
GetPariticipantCountsForReplicaGroups(tc.num_replicas, tc.num_partitions,
replica_groups, tc.group_mode));
EXPECT_THAT(actual, testing::ElementsAreArray(tc.expected));
}
std::vector<TestCase> GetTestCases() {
return {
{
"CrossReplicaEmptyGroup",
{},
CollectiveOpGroupMode::kCrossReplica,
8,
1,
{8},
},
{
"CrossReplicaWithPartitions",
{{0, 1}, {2, 3}},
CollectiveOpGroupMode::kCrossReplica,
4,
2,
{2, 2, 2, 2},
},
{
"CrossReplicaAndPartition",
{{0, 1}, {2, 3}},
CollectiveOpGroupMode::kCrossReplicaAndPartition,
4,
2,
{4, 4},
},
{
"FlattenedID",
{{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}},
CollectiveOpGroupMode::kFlattenedID,
4,
2,
{1, 1, 1, 1, 1, 1, 1, 1},
},
};
}
INSTANTIATE_TEST_SUITE_P(
GetPariticipantCountsForReplicaGroups,
GetPariticipantCountsForReplicaGroupsTest,
testing::ValuesIn(GetTestCases()),
[](const testing::TestParamInfo<
GetPariticipantCountsForReplicaGroupsTest::ParamType> &info) {
return info.param.test_name;
});
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/collective_ops_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/collective_ops_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
cd4d6a35-82ce-47f3-b580-f9949f4f1774 | cpp | abseil/abseil-cpp | structured | absl/log/internal/structured.h | absl/log/structured_test.cc | #ifndef ABSL_LOG_INTERNAL_STRUCTURED_H_
#define ABSL_LOG_INTERNAL_STRUCTURED_H_
#include <ostream>
#include "absl/base/config.h"
#include "absl/log/internal/log_message.h"
#include "absl/strings/string_view.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace log_internal {
class ABSL_MUST_USE_RESULT AsLiteralImpl final {
public:
explicit AsLiteralImpl(absl::string_view str) : str_(str) {}
AsLiteralImpl(const AsLiteralImpl&) = default;
AsLiteralImpl& operator=(const AsLiteralImpl&) = default;
private:
absl::string_view str_;
friend std::ostream& operator<<(std::ostream& os, AsLiteralImpl as_literal) {
return os << as_literal.str_;
}
void AddToMessage(log_internal::LogMessage& m) {
m.CopyToEncodedBuffer<log_internal::LogMessage::StringType::kLiteral>(str_);
}
friend log_internal::LogMessage& operator<<(log_internal::LogMessage& m,
AsLiteralImpl as_literal) {
as_literal.AddToMessage(m);
return m;
}
};
}
ABSL_NAMESPACE_END
}
#endif | #include "absl/log/structured.h"
#include <ios>
#include <sstream>
#include <string>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/attributes.h"
#include "absl/log/internal/test_helpers.h"
#include "absl/log/internal/test_matchers.h"
#include "absl/log/log.h"
#include "absl/log/scoped_mock_log.h"
namespace {
using ::absl::log_internal::MatchesOstream;
using ::absl::log_internal::TextMessage;
using ::testing::ElementsAre;
using ::testing::Eq;
auto *test_env ABSL_ATTRIBUTE_UNUSED = ::testing::AddGlobalTestEnvironment(
new absl::log_internal::LogTestEnvironment);
std::ios &LoggingDefaults(std::ios &str) {
str.setf(std::ios_base::showbase | std::ios_base::boolalpha |
std::ios_base::internal);
return str;
}
TEST(StreamingFormatTest, LogAsLiteral) {
std::ostringstream stream;
const std::string not_a_literal("hello world");
stream << LoggingDefaults << absl::LogAsLiteral(not_a_literal);
absl::ScopedMockLog sink;
EXPECT_CALL(sink,
Send(AllOf(TextMessage(MatchesOstream(stream)),
TextMessage(Eq("hello world")),
ENCODED_MESSAGE(HasValues(ElementsAre(
EqualsProto(R"pb(literal: "hello world")pb")))))));
sink.StartCapturingLogs();
LOG(INFO) << absl::LogAsLiteral(not_a_literal);
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/log/internal/structured.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/log/structured_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
ca34a38a-e8e5-4fb3-b1d4-e583a80562a1 | cpp | tensorflow/tensorflow | ifrt_executable_registry | tensorflow/core/tfrt/ifrt/ifrt_executable_registry.cc | tensorflow/core/tfrt/ifrt/ifrt_executable_registry_test.cc | #include "tensorflow/core/tfrt/ifrt/ifrt_executable_registry.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/base/const_init.h"
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/synchronization/mutex.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_serving_executable.h"
namespace tensorflow {
namespace ifrt_serving {
ServingExecutableRegistry::Handle::Handle(Handle&& other) {
*this = std::move(other);
}
ServingExecutableRegistry::Handle& ServingExecutableRegistry::Handle::operator=(
Handle&& other) {
if (this != &other) {
program_id_ = std::move(other.program_id_);
other.program_id_ = std::nullopt;
}
return *this;
}
ServingExecutableRegistry::Handle::~Handle() { Release(); }
absl::Status ServingExecutableRegistry::Handle::Freeze() {
if (!program_id_.has_value()) {
return absl::FailedPreconditionError("Program is not registered");
}
absl::MutexLock l(&ServingExecutableRegistry::mu_);
const auto it = ServingExecutableRegistry::executables_->find(*program_id_);
if (it == ServingExecutableRegistry::executables_->end()) {
return absl::NotFoundError(
absl::StrCat("Program ", *program_id_, " not found in the registry"));
}
VLOG(1) << "Freeze the program " << *program_id_ << " from signature '"
<< it->second->signature_name() << "' of model '"
<< it->second->model_name() << "'";
it->second->Freeze();
return absl::OkStatus();
}
void ServingExecutableRegistry::Handle::Release() {
if (!program_id_.has_value()) {
return;
}
absl::MutexLock l(&ServingExecutableRegistry::mu_);
const auto it = ServingExecutableRegistry::executables_->find(*program_id_);
if (it == ServingExecutableRegistry::executables_->end()) {
LOG(ERROR) << "Program " << *program_id_ << " not found in the registry";
return;
}
VLOG(1) << "Unregistering program " << *program_id_ << " from signature '"
<< it->second->signature_name() << "' of model '"
<< it->second->model_name() << "'";
ServingExecutableRegistry::executables_->erase(it);
program_id_ = std::nullopt;
}
ServingExecutableRegistry::Handle::Handle(int64_t program_id)
: program_id_(program_id) {}
absl::StatusOr<ServingExecutableRegistry::Handle>
ServingExecutableRegistry::Register(
int64_t program_id, std::unique_ptr<IfrtServingExecutable> executable) {
absl::MutexLock l(&mu_);
VLOG(1) << "Registering program " << program_id << " from signature '"
<< executable->signature_name() << "' of model '"
<< executable->model_name() << "'"
<< ", address is " << executable.get();
if (!executables_->insert({program_id, std::move(executable)}).second) {
return absl::AlreadyExistsError(absl::StrCat(
"Program ", program_id, " already exists in the program registry"));
}
return Handle(program_id);
}
IfrtServingExecutable* ServingExecutableRegistry::Lookup(int64_t program_id) {
absl::ReaderMutexLock l(&mu_);
VLOG(1) << "Looking up program " << program_id;
const auto it = executables_->find(program_id);
return it != executables_->end() ? it->second.get() : nullptr;
}
ABSL_CONST_INIT absl::Mutex ServingExecutableRegistry::mu_(absl::kConstInit);
absl::flat_hash_map<int64_t, std::unique_ptr<IfrtServingExecutable>>* const
ServingExecutableRegistry::executables_ =
new absl::flat_hash_map<int64_t,
std::unique_ptr<IfrtServingExecutable>>();
}
} | #include "tensorflow/core/tfrt/ifrt/ifrt_executable_registry.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/InitAllDialects.h"
#include "mlir/Parser/Parser.h"
#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "xla/python/ifrt/array.h"
#include "xla/python/ifrt/client.h"
#include "xla/python/ifrt/test_util.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_loaded_variable_registry.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_restore_tensor_registry.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_serving_executable.h"
#include "tensorflow/core/tfrt/ifrt/tf_host_callback.h"
#include "tsl/platform/env.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/threadpool.h"
#include "tfrt/host_context/concurrent_work_queue.h"
namespace tensorflow {
namespace ifrt_serving {
namespace {
tsl::thread::ThreadPool& GetThreadPool() {
constexpr int kMaxParallelism = 16;
static auto* thread_pool =
new tsl::thread::ThreadPool(tsl::Env::Default(), tsl::ThreadOptions(),
"IfrtSharding", kMaxParallelism);
return *thread_pool;
}
absl::StatusOr<std::unique_ptr<IfrtServingExecutable>>
CreateIfrtServingExecutable(mlir::MLIRContext& context, int64_t program_id) {
constexpr absl::string_view kDataDirectory =
"tensorflow/core/tfrt/ifrt/testdata";
std::string mlir_module_path = tensorflow::GetDataDependencyFilepath(
absl::StrCat(kDataDirectory, "/executable.mlir"));
mlir::OwningOpRef<mlir::ModuleOp> mlir_module =
mlir::parseSourceFile<mlir::ModuleOp>(mlir_module_path, &context);
if (!mlir_module) {
return absl::InvalidArgumentError(
absl::StrCat("Failed to parse MLIR file: ", mlir_module_path));
}
TF_ASSIGN_OR_RETURN(std::shared_ptr<xla::ifrt::Client> client,
xla::ifrt::test_util::GetClient());
IfrtLoadedVariableRegistry ifrt_loaded_variable_registry;
IfrtRestoreTensorRegistry ifrt_restore_tensor_registry;
std::unique_ptr<tfrt::ConcurrentWorkQueue> work_queue =
tfrt::CreateMultiThreadedWorkQueue(
4, 4);
TF_ASSIGN_OR_RETURN(std::unique_ptr<tensorflow::DynamicDeviceMgr> device_mgr,
CreateTfDynamicDeviceMgr());
return IfrtServingExecutable::Create(
program_id, "test", "main", std::move(mlir_module), client,
&GetThreadPool(), &ifrt_loaded_variable_registry,
&ifrt_restore_tensor_registry, work_queue.get(), device_mgr.get(),
tensorflow::IdentityShapeRepresentationFn(),
nullptr,
nullptr);
}
TEST(IfrtExecutableRegistry, Basic) {
mlir::DialectRegistry registry;
mlir::registerAllDialects(registry);
mlir::RegisterAllTensorFlowDialects(registry);
mlir::MLIRContext context(registry);
int64_t program_id = 1234;
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<IfrtServingExecutable> executable,
CreateIfrtServingExecutable(context, program_id));
IfrtServingExecutable* raw_ptr = executable.get();
TF_ASSERT_OK_AND_ASSIGN(auto handle, ServingExecutableRegistry::Register(
program_id, std::move(executable)));
IfrtServingExecutable* executable_ptr =
ServingExecutableRegistry::Lookup(program_id);
ASSERT_EQ(executable_ptr, raw_ptr);
}
TEST(IfrtExecutableRegistry, DuplicateRegistrationFails) {
mlir::DialectRegistry registry;
mlir::registerAllDialects(registry);
mlir::RegisterAllTensorFlowDialects(registry);
mlir::MLIRContext context(registry);
int64_t program_id = 1234;
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<IfrtServingExecutable> executable,
CreateIfrtServingExecutable(context, program_id));
TF_ASSERT_OK_AND_ASSIGN(auto handle, ServingExecutableRegistry::Register(
program_id, std::move(executable)));
EXPECT_THAT(
ServingExecutableRegistry::Register(program_id, std::move(executable)),
testing::StatusIs(absl::StatusCode::kAlreadyExists));
}
TEST(IfrtExecutableRegistry, ReleaseOk) {
mlir::DialectRegistry registry;
mlir::registerAllDialects(registry);
mlir::RegisterAllTensorFlowDialects(registry);
mlir::MLIRContext context(registry);
int64_t program_id = 1234;
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<IfrtServingExecutable> executable,
CreateIfrtServingExecutable(context, program_id));
TF_ASSERT_OK_AND_ASSIGN(auto handle, ServingExecutableRegistry::Register(
program_id, std::move(executable)));
handle.Release();
EXPECT_EQ(ServingExecutableRegistry::Lookup(program_id), nullptr);
}
TEST(IfrtExecutableRegistry, FreezeOk) {
mlir::DialectRegistry registry;
mlir::registerAllDialects(registry);
mlir::RegisterAllTensorFlowDialects(registry);
mlir::MLIRContext context(registry);
int64_t program_id = 1234;
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<IfrtServingExecutable> executable,
CreateIfrtServingExecutable(context, program_id));
IfrtServingExecutable* raw_ptr = executable.get();
TF_ASSERT_OK_AND_ASSIGN(auto handle, ServingExecutableRegistry::Register(
program_id, std::move(executable)));
ASSERT_OK(handle.Freeze());
IfrtServingExecutable* executable_ptr =
ServingExecutableRegistry::Lookup(program_id);
ASSERT_EQ(executable_ptr, raw_ptr);
}
TEST(IfrtExecutableRegistry, FreezeFailedProgramNotRegistered) {
mlir::DialectRegistry registry;
mlir::registerAllDialects(registry);
mlir::RegisterAllTensorFlowDialects(registry);
mlir::MLIRContext context(registry);
int64_t program_id = 1234;
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<IfrtServingExecutable> executable,
CreateIfrtServingExecutable(context, program_id));
TF_ASSERT_OK_AND_ASSIGN(auto handle, ServingExecutableRegistry::Register(
program_id, std::move(executable)));
handle.Release();
EXPECT_THAT(handle.Freeze(),
testing::StatusIs(absl::StatusCode::kFailedPrecondition));
}
TEST(IfrtExecutableRegistry, InvalidProgramIdShallReturnNull) {
int64_t program_id = 1234;
IfrtServingExecutable* executable_ptr =
ServingExecutableRegistry::Lookup(program_id);
ASSERT_EQ(executable_ptr, nullptr);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/ifrt/ifrt_executable_registry.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/ifrt/ifrt_executable_registry_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9131b767-6a50-48e5-8e87-4d8d59db6611 | cpp | tensorflow/tensorflow | rendezvous | tensorflow/core/framework/rendezvous.cc | tensorflow/core/framework/rendezvous_test.cc | #include "tensorflow/core/framework/rendezvous.h"
#include <deque>
#include <functional>
#include <utility>
#include <vector>
#include "tensorflow/core/framework/local_rendezvous.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/gtl/flatmap.h"
#include "tensorflow/core/lib/gtl/manual_constructor.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/thread_annotations.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
Rendezvous::ParsedKey& Rendezvous::ParsedKey::operator=(const ParsedKey& b) {
const char* b_base = b.buf_.data();
buf_ = b.buf_;
src_device = StringPiece(buf_.data() + (b.src_device.data() - b_base),
b.src_device.size());
src = b.src;
src_incarnation = b.src_incarnation;
dst_device = StringPiece(buf_.data() + (b.dst_device.data() - b_base),
b.dst_device.size());
dst = b.dst;
edge_name = StringPiece(buf_.data() + (b.edge_name.data() - b_base),
b.edge_name.size());
return *this;
}
string Rendezvous::CreateKey(const string& src_device, uint64 src_incarnation,
const string& dst_device, const string& name,
const FrameAndIter& frame_iter) {
char buf[strings::kFastToBufferSize];
return strings::StrCat(
src_device, ";", strings::Uint64ToHexString(src_incarnation, buf), ";",
dst_device, ";", name, ";", frame_iter.frame_id, ":", frame_iter.iter_id);
}
static StringPiece ConsumeNextPart(StringPiece* s, char delim) {
for (size_t offset = 0; offset < s->size(); offset++) {
if ((*s)[offset] == delim) {
StringPiece result(s->data(), offset);
s->remove_prefix(offset + 1);
return result;
}
}
StringPiece result(s->data(), s->size());
s->remove_prefix(s->size());
return result;
}
Status Rendezvous::ParseKey(StringPiece key, ParsedKey* out) {
if (key.data() == out->buf_.data()) {
DCHECK_EQ(key.size(), out->buf_.size());
} else {
out->buf_.assign(key.data(), key.size());
}
StringPiece s(out->buf_);
StringPiece parts[5];
for (int i = 0; i < 5; i++) {
parts[i] = ConsumeNextPart(&s, ';');
}
if (s.empty() &&
!parts[4].empty() &&
DeviceNameUtils::ParseFullName(parts[0], &out->src) &&
strings::HexStringToUint64(parts[1], &out->src_incarnation) &&
DeviceNameUtils::ParseFullName(parts[2], &out->dst) &&
!parts[3].empty()) {
out->src_device = StringPiece(parts[0].data(), parts[0].size());
out->dst_device = StringPiece(parts[2].data(), parts[2].size());
out->edge_name = StringPiece(parts[3].data(), parts[3].size());
return absl::OkStatus();
}
return errors::InvalidArgument("Invalid rendezvous key: ", key);
}
RendezvousInterface::~RendezvousInterface() {}
Status RendezvousInterface::Recv(const ParsedKey& key, const Args& recv_args,
Tensor* val, bool* is_dead,
int64_t timeout_ms) {
Status ret;
Notification n;
RecvAsync(key, recv_args,
[&ret, &n, val, is_dead](const Status& s, const Args& send_args,
const Args& recv_args, const Tensor& v,
const bool dead) {
ret = s;
*val = v;
*is_dead = dead;
n.Notify();
});
if (timeout_ms > 0) {
int64_t timeout_us = timeout_ms * 1000;
bool notified = WaitForNotificationWithTimeout(&n, timeout_us);
if (!notified) {
return Status(absl::StatusCode::kDeadlineExceeded,
"Timed out waiting for notification");
}
} else {
n.WaitForNotification();
}
return ret;
}
Status RendezvousInterface::Recv(const ParsedKey& key, const Args& args,
Tensor* val, bool* is_dead) {
const int64_t no_timeout = 0;
return Recv(key, args, val, is_dead, no_timeout);
}
namespace {
class LocalRendezvousWrapper : public Rendezvous {
public:
LocalRendezvousWrapper(int num_shards) : impl_(this, num_shards) {}
Status Send(const ParsedKey& key, const Args& send_args, const Tensor& val,
const bool is_dead) override {
return impl_.Send(key, send_args, val, is_dead);
}
void RecvAsync(const ParsedKey& key, const Args& recv_args,
DoneCallback done) override {
impl_.RecvAsync(key, recv_args, std::move(done));
}
void StartAbort(const Status& status) override { impl_.StartAbort(status); }
private:
LocalRendezvous impl_;
LocalRendezvousWrapper(const LocalRendezvousWrapper&) = delete;
void operator=(const LocalRendezvousWrapper&) = delete;
};
}
Rendezvous* NewLocalRendezvous(int num_shards) {
return new LocalRendezvousWrapper(num_shards);
}
} | #include "tensorflow/core/framework/rendezvous.h"
#include "absl/status/status.h"
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/cancellation.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/random/simple_philox.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/notification.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace {
TEST(RendezvousTest, Key) {
const string key = Rendezvous::CreateKey(
"/job:mnist/replica:1/task:2/CPU:0", 7890,
"/job:mnist/replica:1/task:2/device:GPU:0", "var0", FrameAndIter(0, 0));
EXPECT_EQ(key,
"/job:mnist/replica:1/task:2/CPU:0;"
"0000000000001ed2;"
"/job:mnist/replica:1/task:2/device:GPU:0;"
"var0;"
"0:0");
Rendezvous::ParsedKey parsed;
TF_EXPECT_OK(Rendezvous::ParseKey(key, &parsed));
EXPECT_EQ(parsed.src_device, "/job:mnist/replica:1/task:2/CPU:0");
EXPECT_EQ(parsed.src_incarnation, 7890);
EXPECT_EQ(parsed.src.type, "CPU");
EXPECT_EQ(parsed.dst_device, "/job:mnist/replica:1/task:2/device:GPU:0");
EXPECT_EQ(parsed.dst.type, "GPU");
EXPECT_FALSE(Rendezvous::ParseKey("foo;bar;baz", &parsed).ok());
EXPECT_FALSE(Rendezvous::ParseKey("/job:mnist/replica:1/task:2/CPU:0;"
"/job:mnist/replica:1/task:2/device:GPU:0;",
&parsed)
.ok());
EXPECT_FALSE(
Rendezvous::ParseKey(strings::StrCat(key, ";", key), &parsed).ok());
}
class LocalRendezvousTest : public ::testing::Test {
public:
LocalRendezvousTest() : threads_(Env::Default(), "test", 16) {
rendez_ = NewLocalRendezvous();
}
~LocalRendezvousTest() override { rendez_->Unref(); }
void SchedClosure(std::function<void()> fn) {
threads_.Schedule(std::move(fn));
}
Rendezvous* rendez_;
private:
thread::ThreadPool threads_;
};
Tensor V(const string& content) {
Tensor tensor(DT_STRING, TensorShape({}));
tensor.scalar<tstring>()() = content;
return tensor;
}
string V(const Tensor& tensor) {
CHECK_EQ(tensor.dtype(), DT_STRING);
CHECK(TensorShapeUtils::IsScalar(tensor.shape()));
return tensor.scalar<tstring>()();
}
Rendezvous::ParsedKey MakeKey(const string& name) {
string s = Rendezvous::CreateKey("/job:mnist/replica:1/task:2/CPU:0", 7890,
"/job:mnist/replica:1/task:2/device:GPU:0",
name, FrameAndIter(0, 0));
Rendezvous::ParsedKey k;
TF_EXPECT_OK(Rendezvous::ParseKey(s, &k));
return k;
}
const Rendezvous::ParsedKey& KeyFoo() {
static auto* key = new Rendezvous::ParsedKey(MakeKey("foo"));
return *key;
}
const Rendezvous::ParsedKey& KeyBar() {
static auto* key = new Rendezvous::ParsedKey(MakeKey("bar"));
return *key;
}
TEST_F(LocalRendezvousTest, SendRecv) {
Rendezvous::Args args;
TF_ASSERT_OK(rendez_->Send(KeyFoo(), args, V("hello"), false));
Tensor val(DT_STRING);
bool is_dead = false;
TF_ASSERT_OK(rendez_->Recv(KeyFoo(), args, &val, &is_dead));
EXPECT_EQ("hello", V(val));
}
TEST_F(LocalRendezvousTest, RecvSend) {
SchedClosure([this]() {
Env::Default()->SleepForMicroseconds(10000);
Rendezvous::Args args;
TF_ASSERT_OK(rendez_->Send(KeyFoo(), args, V("hello"), false));
});
Tensor val(DT_STRING);
bool is_dead = false;
Rendezvous::Args args;
TF_ASSERT_OK(rendez_->Recv(KeyFoo(), args, &val, &is_dead));
EXPECT_EQ("hello", V(val));
}
TEST_F(LocalRendezvousTest, PingPong) {
SchedClosure([this]() {
Tensor t(DT_STRING);
bool is_dead = false;
Rendezvous::Args args;
TF_ASSERT_OK(rendez_->Recv(KeyFoo(), args, &t, &is_dead));
TF_ASSERT_OK(rendez_->Send(KeyBar(), args, t, is_dead));
});
Env::Default()->SleepForMicroseconds(1000000);
Tensor val(DT_STRING);
bool val_dead = false;
Rendezvous::Args args;
TF_ASSERT_OK(rendez_->Send(KeyFoo(), args, V("secret msg"), val_dead));
TF_ASSERT_OK(rendez_->Recv(KeyBar(), args, &val, &val_dead));
EXPECT_EQ("secret msg", V(val));
}
TEST_F(LocalRendezvousTest, CancelBeforeRecv) {
auto* cm = new CancellationManager();
Tensor val(DT_STRING);
bool is_dead = false;
Rendezvous::Args args;
args.cancellation_manager = cm;
cm->StartCancel();
auto s = rendez_->Recv(KeyFoo(), args, &val, &is_dead);
EXPECT_FALSE(s.ok());
EXPECT_TRUE(absl::IsCancelled(s));
EXPECT_EQ("RecvAsync is cancelled.", s.message());
delete cm;
}
TEST_F(LocalRendezvousTest, CancelAfterRecv) {
auto* cm = new CancellationManager();
Notification n;
SchedClosure([cm, &n]() {
Env::Default()->SleepForMicroseconds(10000);
cm->StartCancel();
n.Notify();
});
Tensor val(DT_STRING);
bool is_dead = false;
Rendezvous::Args args;
args.cancellation_manager = cm;
auto s = rendez_->Recv(KeyFoo(), args, &val, &is_dead);
EXPECT_FALSE(s.ok());
EXPECT_TRUE(absl::IsCancelled(s));
EXPECT_EQ("RecvAsync is cancelled.", s.message());
n.WaitForNotification();
delete cm;
}
TEST_F(LocalRendezvousTest, CancelEmptyQueue) {
auto* cm = new CancellationManager();
Notification n;
SchedClosure([this, cm, &n]() {
Env::Default()->SleepForMicroseconds(10000);
Rendezvous::Args args;
TF_ASSERT_OK(rendez_->Send(KeyFoo(), args, V("hello"), false));
cm->StartCancel();
n.Notify();
});
Tensor val(DT_STRING);
bool is_dead = false;
Rendezvous::Args args;
args.cancellation_manager = cm;
TF_ASSERT_OK(rendez_->Recv(KeyFoo(), args, &val, &is_dead));
EXPECT_EQ("hello", V(val));
n.WaitForNotification();
delete cm;
}
TEST_F(LocalRendezvousTest, CancelMultiple) {
auto* cm = new CancellationManager();
SchedClosure([this, cm]() {
Env::Default()->SleepForMicroseconds(10000);
Rendezvous::Args args;
cm->StartCancel();
TF_ASSERT_OK(rendez_->Send(KeyFoo(), args, V("hello"), false));
TF_ASSERT_OK(rendez_->Send(KeyFoo(), args, V("hello"), false));
});
Tensor val(DT_STRING);
Rendezvous::Args args;
Rendezvous::Args args_with_cancellation;
args_with_cancellation.cancellation_manager = cm;
Notification n0;
Notification n1;
Notification n2;
Notification n3;
Status s0;
Status s1;
Status s2;
Status s3;
rendez_->RecvAsync(
KeyFoo(), args,
[&n0, &s0](const Status& s, const Rendezvous::Args& send_args,
const Rendezvous::Args& recv_args, const Tensor& v,
const bool dead) {
s0.Update(s);
n0.Notify();
});
rendez_->RecvAsync(
KeyFoo(), args_with_cancellation,
[&n1, &s1](const Status& s, const Rendezvous::Args& send_args,
const Rendezvous::Args& recv_args, const Tensor& v,
const bool dead) {
s1.Update(s);
n1.Notify();
});
rendez_->RecvAsync(
KeyFoo(), args,
[&n2, &s2](const Status& s, const Rendezvous::Args& send_args,
const Rendezvous::Args& recv_args, const Tensor& v,
const bool dead) {
s2.Update(s);
n2.Notify();
});
rendez_->RecvAsync(
KeyFoo(), args_with_cancellation,
[&n3, &s3](const Status& s, const Rendezvous::Args& send_args,
const Rendezvous::Args& recv_args, const Tensor& v,
const bool dead) {
s3.Update(s);
n3.Notify();
});
n0.WaitForNotification();
n1.WaitForNotification();
n2.WaitForNotification();
n3.WaitForNotification();
TF_ASSERT_OK(s0);
TF_ASSERT_OK(s2);
EXPECT_FALSE(s1.ok());
EXPECT_FALSE(s3.ok());
delete cm;
}
struct BlockingState {
mutex lock;
int counter = 0;
Notification done;
};
TEST_F(LocalRendezvousTest, RandomSendRecv) {
static const int N = 100;
random::PhiloxRandom philox(testing::RandomSeed(), 17);
random::SimplePhilox rnd(&philox);
BlockingState state;
state.counter = N;
for (int i = 0; i < N; ++i) {
int micros = 100 + rnd.Uniform(1000);
SchedClosure([this, i, micros]() {
Env::Default()->SleepForMicroseconds(micros);
Rendezvous::Args args;
TF_ASSERT_OK(rendez_->Send(MakeKey(strings::StrCat(i)), args,
V(strings::StrCat(i)), false));
});
auto recv_done = [this, &state, i](const Status& status,
const Rendezvous::Args& sender_args,
const Rendezvous::Args& recver_args,
const Tensor& val, const bool val_dead) {
EXPECT_EQ(strings::StrCat(i), V(val));
bool done = false;
{
mutex_lock l(state.lock);
state.counter--;
if (state.counter == 0) {
done = true;
}
}
if (done) {
state.done.Notify();
}
};
micros = 100 + rnd.Uniform(1000);
SchedClosure([this, i, micros, recv_done]() {
Env::Default()->SleepForMicroseconds(micros);
rendez_->RecvAsync(MakeKey(strings::StrCat(i)), Rendezvous::Args(),
recv_done);
});
}
state.done.WaitForNotification();
}
void RandomSleep() {
if (std::rand() % 10 == 0) {
Env::Default()->SleepForMicroseconds(1000);
}
}
TEST_F(LocalRendezvousTest, MultiSends) {
static const int N = 100;
const auto& key_foo = KeyFoo();
Rendezvous::Args args;
SchedClosure([=]() {
for (int i = 0; i < N; ++i) {
TF_ASSERT_OK(rendez_->Send(key_foo, args, V(strings::StrCat(i)), false));
RandomSleep();
}
});
Tensor val;
bool val_dead;
for (int i = 0; i < N; ++i) {
TF_ASSERT_OK(rendez_->Recv(key_foo, args, &val, &val_dead));
RandomSleep();
}
}
TEST_F(LocalRendezvousTest, RecvAbort) {
rendez_->Ref();
SchedClosure([this]() {
rendez_->StartAbort(errors::Aborted(""));
rendez_->Unref();
});
Tensor val(DT_STRING);
bool val_dead = false;
Rendezvous::Args args;
Status status = rendez_->Recv(KeyFoo(), args, &val, &val_dead);
EXPECT_TRUE(absl::IsAborted(status));
}
TEST_F(LocalRendezvousTest, RecvSleepAbort) {
rendez_->Ref();
SchedClosure([this]() {
Env::Default()->SleepForMicroseconds(1000000);
rendez_->StartAbort(errors::Aborted(""));
rendez_->Unref();
});
Tensor val(DT_STRING);
bool val_dead = false;
Rendezvous::Args args;
Status status = rendez_->Recv(KeyFoo(), args, &val, &val_dead);
EXPECT_TRUE(absl::IsAborted(status));
}
TEST_F(LocalRendezvousTest, AbortThenRecvOrSend) {
rendez_->StartAbort(errors::Aborted(""));
Tensor val(DT_STRING);
bool val_dead = false;
Rendezvous::Args args;
EXPECT_TRUE(absl::IsAborted(rendez_->Send(KeyFoo(), args, val, val_dead)));
EXPECT_TRUE(absl::IsAborted(rendez_->Recv(KeyFoo(), args, &val, &val_dead)));
}
class DummyDeviceContext : public DeviceContext {
public:
explicit DummyDeviceContext(int stream_id) : stream_id_(stream_id) {}
~DummyDeviceContext() override {}
int stream_id() const { return stream_id_; }
void CopyTensorInSameDevice(const Tensor* input_tensor, Device* device,
Tensor* output_tensor,
StatusCallback done) const override {
done(absl::OkStatus());
}
private:
const int stream_id_;
};
TEST_F(LocalRendezvousTest, TransferDummyDeviceContext) {
Rendezvous::Args args;
args.device_context = new DummyDeviceContext(123);
TF_ASSERT_OK(rendez_->Send(KeyFoo(), args, V("hello"), false));
Notification n;
Rendezvous::Args args1;
args1.device_context = new DummyDeviceContext(1);
rendez_->RecvAsync(
KeyFoo(), args1,
[&n](const Status& s, const Rendezvous::Args& send_args,
const Rendezvous::Args& recv_args, const Tensor& val, bool is_dead) {
CHECK_EQ(123, dynamic_cast<const DummyDeviceContext*>(
send_args.device_context)
->stream_id());
n.Notify();
});
n.WaitForNotification();
args.device_context->Unref();
args1.device_context->Unref();
}
void BM_SendRecv(::testing::benchmark::State& state) {
Rendezvous* rendez = NewLocalRendezvous();
Tensor orig = V("val");
Tensor val(DT_STRING, TensorShape({}));
bool is_dead = false;
Rendezvous::Args args;
for (auto s : state) {
TF_CHECK_OK(rendez->Send(KeyFoo(), args, orig, is_dead));
TF_CHECK_OK(rendez->Recv(KeyFoo(), args, &val, &is_dead));
}
CHECK_EQ(V(val), V(orig));
rendez->Unref();
}
BENCHMARK(BM_SendRecv);
void BM_RecvSend(::testing::benchmark::State& state) {
Rendezvous* rendez = NewLocalRendezvous();
Tensor orig = V("val");
Tensor val(DT_STRING, TensorShape({}));
bool is_dead = false;
Rendezvous::Args args;
for (auto s : state) {
bool received = false;
rendez->RecvAsync(
KeyFoo(), args,
[&val, &received](const Status& ,
const Rendezvous::Args& ,
const Rendezvous::Args& ,
const Tensor& tensor, bool ) {
val = tensor;
received = true;
});
TF_CHECK_OK(rendez->Send(KeyFoo(), args, orig, is_dead));
CHECK(received);
}
CHECK_EQ(V(val), V(orig));
rendez->Unref();
}
BENCHMARK(BM_RecvSend);
void BM_PingPong(::testing::benchmark::State& state) {
const int messages_count = state.range(0);
auto* cm = new CancellationManager();
thread::ThreadPool* pool = new thread::ThreadPool(Env::Default(), "test", 1);
for (auto s : state) {
Rendezvous* rendez = NewLocalRendezvous();
pool->Schedule([rendez, messages_count]() {
Tensor bar = V("bar");
Tensor foo(DT_STRING, TensorShape({}));
bool is_dead = false;
Rendezvous::Args args;
for (int i = 0; i < messages_count; ++i) {
TF_CHECK_OK(rendez->Recv(KeyFoo(), args, &foo, &is_dead));
TF_CHECK_OK(rendez->Send(KeyBar(), args, bar, is_dead));
}
CHECK_EQ("foo", V(foo));
});
Tensor foo = V("foo");
Tensor bar(DT_STRING, TensorShape({}));
bool is_dead = false;
Rendezvous::Args args;
args.cancellation_manager = cm;
for (int i = 0; i < messages_count; ++i) {
TF_CHECK_OK(rendez->Send(KeyFoo(), args, foo, is_dead));
TF_CHECK_OK(rendez->Recv(KeyBar(), args, &bar, &is_dead));
}
CHECK_EQ("bar", V(bar));
rendez->Unref();
}
state.SetItemsProcessed(messages_count * state.iterations());
delete pool;
delete cm;
}
BENCHMARK(BM_PingPong)->Arg(100)->Arg(200)->Arg(300);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/rendezvous.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/rendezvous_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
10c3c9a1-2e41-4864-a9be-cd9d7a285c38 | cpp | tensorflow/tensorflow | xplane_to_tool_names | tensorflow/core/profiler/convert/xplane_to_tool_names.cc | tensorflow/core/profiler/convert/xplane_to_tool_names_test.cc | #include "tensorflow/core/profiler/convert/xplane_to_tool_names.h"
#include <memory>
#include <string>
#include <vector>
#include "absl/strings/str_join.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/profiler/convert/repository.h"
#include "tensorflow/core/profiler/convert/xplane_to_dcn_collective_stats.h"
#include "tensorflow/core/profiler/convert/xplane_to_hlo.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_utils.h"
namespace tensorflow {
namespace profiler {
absl::StatusOr<std::string> GetAvailableToolNames(
const SessionSnapshot& session_snapshot) {
std::vector<std::string> tools;
bool is_cloud_vertex_ai = !session_snapshot.HasAccessibleRunDir();
if (session_snapshot.XSpaceSize() != 0) {
tools.reserve(11);
tools.push_back(is_cloud_vertex_ai ? "trace_viewer" : "trace_viewer@");
tools.push_back("overview_page");
tools.push_back("input_pipeline_analyzer");
tools.push_back("framework_op_stats");
tools.push_back("memory_profile");
tools.push_back("pod_viewer");
tools.push_back("tf_data_bottleneck_analysis");
tools.push_back("op_profile");
TF_ASSIGN_OR_RETURN(std::unique_ptr<XSpace> xspace,
session_snapshot.GetXSpace(0));
if (!FindPlanesWithPrefix(*xspace, kGpuPlanePrefix).empty()) {
tools.push_back("kernel_stats");
}
TF_ASSIGN_OR_RETURN(bool has_hlo,
ConvertMultiXSpaceToHloProto(session_snapshot));
if (has_hlo) {
tools.push_back("memory_viewer");
tools.push_back("graph_viewer");
}
TF_ASSIGN_OR_RETURN(bool has_dcn_collective_stats,
HasDcnCollectiveStatsInMultiXSpace(session_snapshot));
if (has_dcn_collective_stats) {
tools.push_back("dcn_collective_stats");
}
}
return absl::StrJoin(tools, ",");
}
}
} | #include "tensorflow/core/profiler/convert/xplane_to_tool_names.h"
#include <memory>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
#include "absl/strings/str_split.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/profiler/convert/repository.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_utils.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
namespace tensorflow {
namespace profiler {
namespace {
struct XPlaneToToolsTestCase {
std::string test_name;
std::string_view plane_name;
bool has_hlo_module;
bool has_dcn_collective_stats;
std::vector<std::string> expected_tools;
};
SessionSnapshot CreateSessionSnapshot(std::unique_ptr<XSpace> xspace,
bool has_hlo_module,
bool has_dcn_collective_stats) {
std::string test_name =
::testing::UnitTest::GetInstance()->current_test_info()->name();
std::string path = absl::StrCat("ram:
std::unique_ptr<WritableFile> xplane_file;
tensorflow::Env::Default()
->NewAppendableFile(absl::StrCat(path, "hostname.xplane.pb"),
&xplane_file)
.IgnoreError();
std::vector<std::string> paths = {path};
if (has_hlo_module) {
tensorflow::Env::Default()
->NewAppendableFile(absl::StrCat(path, "module_name.hlo_proto.pb"),
&xplane_file)
.IgnoreError();
} else {
tensorflow::Env::Default()
->NewAppendableFile(absl::StrCat(path, "NO_MODULE.hlo_proto.pb"),
&xplane_file)
.IgnoreError();
}
if (has_dcn_collective_stats) {
tensorflow::Env::Default()
->NewAppendableFile(
absl::StrCat(path, "hostname.dcn_collective_stats.pb"),
&xplane_file)
.IgnoreError();
tensorflow::Env::Default()
->NewAppendableFile(
absl::StrCat(path, "ALL_HOSTS.dcn_collective_stats.pb"),
&xplane_file)
.IgnoreError();
} else {
tensorflow::Env::Default()
->NewAppendableFile(
absl::StrCat(path, "NO_HOST.dcn_collective_stats.pb"), &xplane_file)
.IgnoreError();
}
std::vector<std::unique_ptr<XSpace>> xspaces;
xspaces.push_back(std::move(xspace));
absl::StatusOr<SessionSnapshot> session_snapshot =
SessionSnapshot::Create(paths, std::move(xspaces));
TF_CHECK_OK(session_snapshot.status());
return std::move(session_snapshot.value());
}
using XPlaneToToolsTest = ::testing::TestWithParam<XPlaneToToolsTestCase>;
TEST_P(XPlaneToToolsTest, ToolsList) {
const XPlaneToToolsTestCase& test_case = GetParam();
auto xspace = std::make_unique<XSpace>();
FindOrAddMutablePlaneWithName(xspace.get(), test_case.plane_name);
SessionSnapshot sessionSnapshot =
CreateSessionSnapshot(std::move(xspace), test_case.has_hlo_module,
test_case.has_dcn_collective_stats);
absl::StatusOr<std::string> toolsString =
GetAvailableToolNames(sessionSnapshot);
ASSERT_TRUE(toolsString.ok());
std::vector<std::string> tools = absl::StrSplit(toolsString.value(), ',');
std::vector<std::string> expected_tools = {"trace_viewer",
"overview_page",
"input_pipeline_analyzer",
"framework_op_stats",
"memory_profile",
"pod_viewer",
"tf_data_bottleneck_analysis",
"op_profile"};
expected_tools.insert(expected_tools.end(), test_case.expected_tools.begin(),
test_case.expected_tools.end());
EXPECT_THAT(tools, ::testing::UnorderedElementsAreArray(expected_tools));
}
INSTANTIATE_TEST_SUITE_P(
XPlaneToToolsTests, XPlaneToToolsTest,
::testing::ValuesIn<XPlaneToToolsTestCase>({
{"ToolsForTpuWithoutHloModule", kTpuPlanePrefix, false, false, {}},
{"ToolsForTpuWithHloModule",
kTpuPlanePrefix,
true,
false,
{"graph_viewer", "memory_viewer"}},
{"ToolsForGpuWithoutHloModule",
kGpuPlanePrefix,
false,
false,
{"kernel_stats"}},
{"ToolsForGpuWithHloModule",
kGpuPlanePrefix,
true,
false,
{"kernel_stats", "graph_viewer", "memory_viewer"}},
{"ToolsForTpuWithDcnCollectiveStats",
kTpuPlanePrefix,
false,
true,
{"dcn_collective_stats"}},
}),
[](const ::testing::TestParamInfo<XPlaneToToolsTest::ParamType>& info) {
return info.param.test_name;
});
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/xplane_to_tool_names.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/xplane_to_tool_names_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c981c3cc-c1bc-48ff-92ea-e891c374416e | cpp | tensorflow/tensorflow | indexing_map_serialization | third_party/xla/xla/service/gpu/model/indexing_map_serialization.cc | third_party/xla/xla/service/gpu/model/indexing_map_serialization_test.cc | #include "xla/service/gpu/model/indexing_map_serialization.h"
#include <algorithm>
#include <cctype>
#include <cstdint>
#include <optional>
#include <ostream>
#include <sstream>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/types/span.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/STLFunctionalExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/AsmParser/AsmParser.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Support/LLVM.h"
#include "xla/service/gpu/model/indexing_map.h"
namespace xla {
namespace gpu {
namespace {
using llvm::SmallVector;
using llvm::SmallVectorImpl;
using llvm::StringRef;
using mlir::AffineBinaryOpExpr;
using mlir::AffineConstantExpr;
using mlir::AffineDimExpr;
using mlir::AffineExpr;
using mlir::AffineExprKind;
using mlir::AffineMap;
using mlir::AffineMapAttr;
using mlir::AffineSymbolExpr;
using mlir::ArrayRef;
using mlir::MLIRContext;
enum class Delimeter { kParen, kBracket };
struct Token {
enum class Kind {
kVarName,
kIntLiteral,
kBoolLiteral,
kKeywordDomain,
kKeywordIn,
kKeywordIsSimplified,
kPlus,
kMinus,
kTimes,
kFloorDiv,
kMod,
kArrow,
kLParen,
kRParen,
kLBracket,
kRBracket,
kComma,
kColon,
kError,
kEOF
};
StringRef spelling;
Token::Kind kind;
};
Token::Kind GetSingleCharTokenType(char c) {
switch (c) {
case '(':
return Token::Kind::kLParen;
case ')':
return Token::Kind::kRParen;
case '[':
return Token::Kind::kLBracket;
case ']':
return Token::Kind::kRBracket;
case ',':
return Token::Kind::kComma;
case ':':
return Token::Kind::kColon;
case '+':
return Token::Kind::kPlus;
case '-':
return Token::Kind::kMinus;
case '*':
return Token::Kind::kTimes;
default:
return Token::Kind::kError;
}
}
bool IsPartOfAffineExpr(Token token) {
return token.kind == Token::Kind::kVarName ||
token.kind == Token::Kind::kIntLiteral ||
token.kind == Token::Kind::kPlus ||
token.kind == Token::Kind::kMinus ||
token.kind == Token::Kind::kTimes ||
token.kind == Token::Kind::kFloorDiv ||
token.kind == Token::Kind::kMod;
}
class Parser {
public:
explicit Parser(llvm::StringRef input) : input_(input), it_(input.begin()) {
current_token_ = GetNextTokenImpl();
}
const Token& GetCurrentToken() const { return current_token_; };
void Advance() {
if (current_token_.kind == Token::Kind::kError ||
current_token_.kind == Token::Kind::kEOF) {
return;
}
current_token_ = GetNextTokenImpl();
}
Token GetNextToken() {
Advance();
return current_token_;
}
bool ConsumeToken(Token::Kind kind);
bool ParseVarName(std::string* var_name);
bool ParseInt(int64_t* value);
bool ParseBool(bool* boolean);
bool ParseInterval(Interval* interval);
bool ParseAffineExprString(std::string* affine_expr_str);
bool ParseCommaSeparatedVarList(
Delimeter delimeter,
llvm::function_ref<bool(Parser& parser)> parse_element_fn);
private:
void ConsumeWhitespace() {
while (it_ != input_.end() && std::isspace(*it_)) ++it_;
}
Token GetNextTokenImpl();
llvm::StringRef input_;
llvm::StringRef::iterator it_;
Token current_token_;
};
bool Parser::ParseVarName(std::string* var_name) {
if (current_token_.kind != Token::Kind::kVarName) {
llvm::errs() << "Expected var name, got: " << current_token_.spelling
<< "\n";
return false;
}
*var_name = current_token_.spelling.str();
Advance();
return true;
}
bool Parser::ParseInt(int64_t* value) {
int val;
if (current_token_.kind != Token::Kind::kIntLiteral ||
current_token_.spelling.getAsInteger(0, val)) {
llvm::errs() << "Expected int literal, got: " << current_token_.spelling
<< "\n";
return false;
}
*value = static_cast<int64_t>(val);
Advance();
return true;
}
bool Parser::ParseBool(bool* boolean) {
if (current_token_.kind != Token::Kind::kBoolLiteral) {
llvm::errs() << "Expected bool literal, got: " << current_token_.spelling
<< "\n";
return false;
}
*boolean = current_token_.spelling.compare("true") == 0;
Advance();
return true;
}
bool Parser::ParseInterval(Interval* interval) {
if (!ConsumeToken(Token::Kind::kLBracket) || !ParseInt(&interval->lower) ||
!ConsumeToken(Token::Kind::kComma) || !ParseInt(&interval->upper) ||
!ConsumeToken(Token::Kind::kRBracket)) {
return false;
}
return interval;
}
bool Parser::ParseAffineExprString(std::string* affine_expr_str) {
unsigned num_unmatched_parens = 0;
while (true) {
if (IsPartOfAffineExpr(current_token_)) {
affine_expr_str->append(current_token_.spelling);
affine_expr_str->push_back(' ');
Advance();
continue;
}
if (ConsumeToken(Token::Kind::kLParen)) {
affine_expr_str->push_back('(');
++num_unmatched_parens;
continue;
}
if (current_token_.kind == Token::Kind::kRParen &&
num_unmatched_parens > 0) {
affine_expr_str->push_back(')');
--num_unmatched_parens;
Advance();
continue;
}
break;
}
return current_token_.kind != Token::Kind::kError;
}
bool Parser::ParseCommaSeparatedVarList(
Delimeter delimeter,
llvm::function_ref<bool(Parser& parser)> parse_element_fn) {
auto left_delimiter = delimeter == Delimeter::kParen ? Token::Kind::kLParen
: Token::Kind::kLBracket;
auto right_delimiter = delimeter == Delimeter::kParen
? Token::Kind::kRParen
: Token::Kind::kRBracket;
if (!ConsumeToken(left_delimiter)) {
return false;
}
if (ConsumeToken(right_delimiter)) {
return true;
}
std::string element;
while (parse_element_fn(*this)) {
if (ConsumeToken(Token::Kind::kComma)) continue;
return ConsumeToken(right_delimiter);
}
return false;
}
bool Parser::ConsumeToken(Token::Kind kind) {
Token token = GetCurrentToken();
if (token.kind != kind) {
return false;
}
GetNextToken();
return true;
}
Token Parser::GetNextTokenImpl() {
ConsumeWhitespace();
if (it_ == input_.end()) {
return Token{"", Token::Kind::kEOF};
}
auto start = it_;
if (std::isalpha(*it_)) {
while (it_ != input_.end() &&
(std::isalpha(*it_) || std::isdigit(*it_) || *it_ == '_')) {
++it_;
}
StringRef spelling = input_.substr(start - input_.data(), it_ - start);
if (spelling == "true" || spelling == "false") {
return Token{spelling, Token::Kind::kBoolLiteral};
}
if (spelling == "domain") {
return Token{spelling, Token::Kind::kKeywordDomain};
}
if (spelling == "in") {
return Token{spelling, Token::Kind::kKeywordIn};
}
if (spelling == "mod") {
return Token{spelling, Token::Kind::kMod};
}
if (spelling == "floorDiv") {
return Token{spelling, Token::Kind::kFloorDiv};
}
return Token{spelling, Token::Kind::kVarName};
}
if (std::isdigit(*it_)) {
auto start = it_;
while (it_ != input_.end() && std::isdigit(*it_)) {
++it_;
}
StringRef spelling = input_.substr(start - input_.data(), it_ - start);
return Token{spelling, Token::Kind::kIntLiteral};
}
if (*it_ == '-') {
++it_;
if (it_ != input_.end()) {
if (*it_ == '>') {
++it_;
return Token{"->", Token::Kind::kArrow};
} else if (std::isdigit(*it_)) {
auto start = it_ - 1;
while (it_ != input_.end() && std::isdigit(*it_)) {
++it_;
}
StringRef spelling = input_.substr(start - input_.data(), it_ - start);
return Token{spelling, Token::Kind::kIntLiteral};
} else {
return Token{"-", Token::Kind::kMinus};
}
}
}
StringRef spelling = input_.substr(start - input_.data(), 1);
return Token{spelling, GetSingleCharTokenType(*(it_++))};
}
bool ParseVarNames(Parser& parser, Delimeter delimeter,
SmallVectorImpl<std::string>& var_names) {
auto parse_var_name_fn = [&](Parser& parser) {
std::string var_name;
if (!parser.ParseVarName(&var_name)) {
return false;
}
var_names.push_back(var_name);
return true;
};
return parser.ParseCommaSeparatedVarList(delimeter, parse_var_name_fn);
}
bool ParseAffineMapResults(Parser& parser,
SmallVectorImpl<std::string>& affine_expr_strs) {
auto parse_var_name_fn = [&](Parser& parser) {
std::string affine_expr_str;
if (!parser.ParseAffineExprString(&affine_expr_str)) {
return false;
}
affine_expr_strs.push_back(affine_expr_str);
return true;
};
return parser.ParseCommaSeparatedVarList(Delimeter::kParen,
parse_var_name_fn);
}
bool ParseAffineExprsWithMLIR(ArrayRef<std::string> dim_var_names,
ArrayRef<std::string> symbol_var_names,
ArrayRef<std::string> affine_expr_strings,
MLIRContext* context,
SmallVectorImpl<AffineExpr>& affine_exprs) {
std::stringstream ss;
ss << "affine_map<(" << absl::StrJoin(dim_var_names, ", ") << ") ";
if (!symbol_var_names.empty()) {
ss << '[' << absl::StrJoin(symbol_var_names, ", ") << "] ";
}
ss << " -> (" << absl::StrJoin(affine_expr_strings, ", ") << ")>";
auto affine_map_attr = mlir::parseAttribute(ss.str(), context);
if (!affine_map_attr) {
llvm::errs() << "Failed to parse affine map: " << ss.str() << "\n";
return false;
}
AffineMap affine_map = mlir::cast<AffineMapAttr>(affine_map_attr).getValue();
affine_exprs = llvm::to_vector(affine_map.getResults());
return true;
}
std::string GetVarName(int64_t id, std::string_view name,
std::string_view prefix) {
if (!name.empty()) {
return std::string(name);
}
return absl::StrFormat("%s%d", prefix, id);
}
std::string GetDimVarName(int64_t dim_id, std::string_view dim_name = "") {
return GetVarName(dim_id, dim_name, "d");
}
std::string GetRangeVarName(int64_t range_id,
std::string_view range_name = "") {
return GetVarName(range_id, range_name, "s");
}
std::string GetRTVarName(int64_t rt_id, std::string_view rt_name = "") {
return GetVarName(rt_id, rt_name, "rt");
}
std::string GetAffineSymbolName(
int64_t id, absl::Span<const std::string> symbol_names = {}) {
if (id < symbol_names.size()) {
const auto& name = symbol_names[id];
if (!name.empty()) {
return name;
}
}
return absl::StrFormat("%s%d", "s", id);
}
std::string GetAffineDimensionName(
int64_t id, absl::Span<const std::string> dim_names = {}) {
if (id < dim_names.size()) {
const auto& name = dim_names[id];
if (!name.empty()) {
return name;
}
}
return absl::StrFormat("%s%d", "d", id);
}
void PrintAffineExprImpl(const AffineExpr affine_expr,
absl::Span<const std::string> dim_names,
absl::Span<const std::string> symbol_names,
bool add_parentheses, llvm::raw_ostream& os) {
const char* binopSpelling = nullptr;
switch (affine_expr.getKind()) {
case AffineExprKind::SymbolId: {
unsigned symbol_id =
mlir::cast<AffineSymbolExpr>(affine_expr).getPosition();
os << GetAffineSymbolName(symbol_id, symbol_names);
return;
}
case AffineExprKind::DimId: {
unsigned dim_id = mlir::cast<AffineDimExpr>(affine_expr).getPosition();
os << GetAffineDimensionName(dim_id, dim_names);
return;
}
case AffineExprKind::Constant:
os << mlir::cast<AffineConstantExpr>(affine_expr).getValue();
return;
case AffineExprKind::Add:
binopSpelling = " + ";
break;
case AffineExprKind::Mul:
binopSpelling = " * ";
break;
case AffineExprKind::FloorDiv:
binopSpelling = " floordiv ";
break;
case AffineExprKind::CeilDiv:
binopSpelling = " ceildiv ";
break;
case AffineExprKind::Mod:
binopSpelling = " mod ";
break;
}
auto binOp = mlir::cast<AffineBinaryOpExpr>(affine_expr);
AffineExpr lhsExpr = binOp.getLHS();
AffineExpr rhsExpr = binOp.getRHS();
if (binOp.getKind() != AffineExprKind::Add) {
if (add_parentheses) {
os << '(';
}
auto rhsConst = mlir::dyn_cast<AffineConstantExpr>(rhsExpr);
if (rhsConst && binOp.getKind() == AffineExprKind::Mul &&
rhsConst.getValue() == -1) {
os << "-";
PrintAffineExprImpl(lhsExpr, dim_names, symbol_names,
true, os);
if (add_parentheses) {
os << ')';
}
return;
}
PrintAffineExprImpl(lhsExpr, dim_names, symbol_names,
true, os);
os << binopSpelling;
PrintAffineExprImpl(rhsExpr, dim_names, symbol_names,
true, os);
if (add_parentheses) {
os << ')';
}
return;
}
if (add_parentheses) {
os << '(';
}
if (auto rhs = mlir::dyn_cast<AffineBinaryOpExpr>(rhsExpr)) {
if (rhs.getKind() == AffineExprKind::Mul) {
AffineExpr rrhsExpr = rhs.getRHS();
if (auto rrhs = mlir::dyn_cast<AffineConstantExpr>(rrhsExpr)) {
if (rrhs.getValue() == -1) {
PrintAffineExprImpl(lhsExpr, dim_names, symbol_names,
false, os);
os << " - ";
if (rhs.getLHS().getKind() == AffineExprKind::Add) {
PrintAffineExprImpl(rhs.getLHS(), dim_names, symbol_names,
true, os);
} else {
PrintAffineExprImpl(rhs.getLHS(), dim_names, symbol_names,
false, os);
}
if (add_parentheses) {
os << ')';
}
return;
}
if (rrhs.getValue() < -1) {
PrintAffineExprImpl(lhsExpr, dim_names, symbol_names,
false, os);
os << " - ";
PrintAffineExprImpl(rhs.getLHS(), dim_names, symbol_names,
true, os);
os << " * " << -rrhs.getValue();
if (add_parentheses) {
os << ')';
}
return;
}
}
}
}
if (auto rhsConst = mlir::dyn_cast<AffineConstantExpr>(rhsExpr)) {
if (rhsConst.getValue() < 0) {
PrintAffineExprImpl(lhsExpr, dim_names, symbol_names,
false, os);
os << " - " << -rhsConst.getValue();
if (add_parentheses) {
os << ')';
}
return;
}
}
PrintAffineExprImpl(lhsExpr, dim_names, symbol_names,
false, os);
os << " + ";
PrintAffineExprImpl(rhsExpr, dim_names, symbol_names,
false, os);
if (add_parentheses) {
os << ')';
}
}
}
std::optional<IndexingMap> ParseIndexingMap(llvm::StringRef input,
MLIRContext* context) {
Parser parser(input);
SmallVector<std::string, 8> dim_var_names;
SmallVector<std::string, 4> symbol_var_names;
if (!ParseVarNames(parser, Delimeter::kParen, dim_var_names) ||
(parser.GetCurrentToken().kind == Token::Kind::kLBracket &&
!ParseVarNames(parser, Delimeter::kBracket, symbol_var_names))) {
llvm::errs() << "Failed to parse variable names\n";
return std::nullopt;
}
SmallVector<std::string, 3> affine_expr_strs;
if (!parser.ConsumeToken(Token::Kind::kArrow) ||
!ParseAffineMapResults(parser, affine_expr_strs)) {
llvm::errs() << "Failed to parse affine map results\n";
return std::nullopt;
}
int num_affine_map_results = affine_expr_strs.size();
if (dim_var_names.empty() && symbol_var_names.empty()) {
if (num_affine_map_results != 0 ||
parser.GetCurrentToken().kind != Token::Kind::kEOF) {
llvm::errs() << "Expected an empty indexing map\n";
return std::nullopt;
}
return IndexingMap{AffineMap::get(context), {},
{}, {}};
}
if (!parser.ConsumeToken(Token::Kind::kComma) ||
!parser.ConsumeToken(Token::Kind::kKeywordDomain) ||
!parser.ConsumeToken(Token::Kind::kColon)) {
llvm::errs() << "Failed to parse domain keyword\n";
return std::nullopt;
}
std::vector<IndexingMap::Variable> dim_vars;
for (const auto& [dim_id, dim_name] : llvm::enumerate(dim_var_names)) {
std::string var_name;
Interval interval;
if (!parser.ParseVarName(&var_name) ||
!parser.ConsumeToken(Token::Kind::kKeywordIn) ||
!parser.ParseInterval(&interval) ||
(parser.GetCurrentToken().kind != Token::Kind::kEOF &&
!parser.ConsumeToken(Token::Kind::kComma))) {
llvm::errs() << "Failed to parse DimVar\n";
return std::nullopt;
}
if (var_name != dim_name) {
llvm::errs() << "Dimension name mismatch\n";
return std::nullopt;
}
if (var_name == GetDimVarName(dim_id)) {
var_name = "";
}
dim_vars.push_back(IndexingMap::Variable{interval, var_name});
}
std::vector<IndexingMap::Variable> range_vars;
for (const auto& [index, range_name] : llvm::enumerate(symbol_var_names)) {
std::string var_name;
Interval interval;
if (!parser.ParseVarName(&var_name) ||
!parser.ConsumeToken(Token::Kind::kKeywordIn) ||
!parser.ParseInterval(&interval) ||
(parser.GetCurrentToken().kind != Token::Kind::kEOF &&
!parser.ConsumeToken(Token::Kind::kComma))) {
llvm::errs() << "Failed to parse RangeVar\n";
return std::nullopt;
}
if (var_name != range_name) {
llvm::errs() << "Symbol name mismatch\n";
return std::nullopt;
}
if (var_name == GetRangeVarName(index)) {
var_name = "";
}
range_vars.push_back(IndexingMap::Variable{interval, var_name});
}
SmallVector<Interval> constraint_bounds;
while (!parser.ConsumeToken(Token::Kind::kEOF)) {
std::string affine_expr_str;
Interval interval;
if (!parser.ParseAffineExprString(&affine_expr_str) ||
!parser.ConsumeToken(Token::Kind::kKeywordIn) ||
!parser.ParseInterval(&interval) ||
(parser.GetCurrentToken().kind != Token::Kind::kEOF &&
!parser.ConsumeToken(Token::Kind::kComma))) {
llvm::errs() << "Failed to parse constraint\n";
return std::nullopt;
}
affine_expr_strs.push_back(affine_expr_str);
constraint_bounds.push_back(interval);
}
SmallVector<AffineExpr> affine_exprs;
if (!ParseAffineExprsWithMLIR(dim_var_names, symbol_var_names,
affine_expr_strs, context, affine_exprs)) {
return std::nullopt;
}
ArrayRef<AffineExpr> affine_map_results =
ArrayRef(affine_exprs).take_front(num_affine_map_results);
ArrayRef<AffineExpr> constraint_exprs =
ArrayRef(affine_exprs).drop_front(num_affine_map_results);
SmallVector<std::pair<AffineExpr, Interval>> constraints;
constraints.reserve(constraint_exprs.size());
for (const auto& [expr, bounds] :
llvm::zip(constraint_exprs, constraint_bounds)) {
constraints.push_back(std::make_pair(expr, bounds));
}
auto map = AffineMap::get(dim_vars.size(), range_vars.size(),
affine_map_results, context);
return IndexingMap{map, std::move(dim_vars), std::move(range_vars),
{}, constraints};
}
std::string ToString(AffineExpr affine_expr,
absl::Span<const std::string> dim_names,
absl::Span<const std::string> symbol_names) {
std::string s;
llvm::raw_string_ostream ss(s);
PrintAffineExprImpl(affine_expr, dim_names, symbol_names,
false, ss);
return s;
}
std::string ToString(AffineExpr affine_expr) {
return ToString(affine_expr, {}, {});
}
std::ostream& operator<<(std::ostream& out, AffineExpr affine_expr) {
out << ToString(affine_expr);
return out;
}
std::string ToString(AffineMap affine_map,
absl::Span<const std::string> dim_names,
absl::Span<const std::string> symbol_names) {
CHECK_EQ(dim_names.size(), affine_map.getNumDims());
CHECK_EQ(symbol_names.size(), affine_map.getNumSymbols());
std::string s;
llvm::raw_string_ostream ss(s);
ss << '(' << absl::StrJoin(dim_names, ", ") << ')';
if (affine_map.getNumSymbols() != 0) {
ss << '[' << absl::StrJoin(symbol_names, ", ") << ']';
}
ss << " -> (";
llvm::interleaveComma(affine_map.getResults(), ss, [&](AffineExpr expr) {
PrintAffineExprImpl(expr, dim_names, symbol_names,
false, ss);
});
ss << ')';
return s;
}
std::string ToString(AffineMap affine_map) {
int dim_count = affine_map.getNumDims();
SmallVector<std::string, 3> dim_names;
dim_names.reserve(affine_map.getNumDims());
for (int64_t dim_id = 0; dim_id < dim_count; ++dim_id) {
dim_names.push_back(GetAffineDimensionName(dim_id));
}
int symbol_count = affine_map.getNumSymbols();
SmallVector<std::string, 3> symbol_names;
symbol_names.reserve(affine_map.getNumSymbols());
for (int64_t symbol_id = 0; symbol_id < symbol_count; ++symbol_id) {
symbol_names.push_back(GetAffineSymbolName(symbol_id));
}
return ToString(affine_map, dim_names, symbol_names);
}
std::ostream& operator<<(std::ostream& out, AffineMap affine_map) {
out << ToString(affine_map);
return out;
}
std::string ToString(const IndexingMap& indexing_map,
absl::Span<const std::string> dim_names,
absl::Span<const std::string> range_names,
absl::Span<const std::string> rt_names) {
std::stringstream ss;
if (indexing_map.IsKnownEmpty()) {
ss << "KNOWN EMPTY\n";
return ss.str();
}
const auto& dim_vars = indexing_map.GetDimVars();
CHECK_EQ(dim_names.size(), dim_vars.size());
const auto& range_vars = indexing_map.GetRangeVars();
CHECK_EQ(range_names.size(), range_vars.size());
const auto& rt_vars = indexing_map.GetRTVars();
CHECK_EQ(rt_names.size(), rt_vars.size());
SmallVector<std::string, 3> symbol_names;
symbol_names.reserve(range_names.size() + rt_names.size());
symbol_names.append(range_names.begin(), range_names.end());
symbol_names.append(rt_names.begin(), rt_names.end());
ss << ToString(indexing_map.GetAffineMap(), dim_names, symbol_names);
if (dim_vars.empty() && range_vars.empty() && rt_vars.empty()) {
return ss.str();
}
ss << ", domain: ";
int64_t remaining_vars_to_print =
dim_vars.size() + range_vars.size() + rt_vars.size();
for (const auto& [index, dim_var] : llvm::enumerate(dim_vars)) {
ss << dim_names[index] << " in " << dim_var.bounds;
if (--remaining_vars_to_print > 0) {
ss << ", ";
}
}
for (const auto& [index, range_var] : llvm::enumerate(range_vars)) {
ss << symbol_names[index] << " in " << range_var.bounds;
if (--remaining_vars_to_print > 0) {
ss << ", ";
}
}
for (const auto& [index, rt_var] : llvm::enumerate(rt_vars)) {
ss << rt_names[index] << " in " << rt_var.bounds;
if (--remaining_vars_to_print > 0) {
ss << ", ";
}
}
std::vector<std::string> expr_range_strings;
const auto& constraints = indexing_map.GetConstraints();
expr_range_strings.reserve(constraints.size());
for (const auto& [expr, range] : constraints) {
expr_range_strings.push_back(absl::StrCat(
ToString(expr, dim_names, symbol_names), " in ", range.ToString()));
}
std::sort(expr_range_strings.begin(), expr_range_strings.end());
if (!expr_range_strings.empty()) {
ss << ", " << absl::StrJoin(expr_range_strings, ", ");
}
return ss.str();
}
std::string ToString(const IndexingMap& indexing_map) {
SmallVector<std::string, 3> dim_names;
dim_names.reserve(indexing_map.GetDimensionCount());
for (const auto& [index, dim_var] :
llvm::enumerate(indexing_map.GetDimVars())) {
dim_names.push_back(GetDimVarName(index, dim_var.name));
}
SmallVector<std::string, 3> range_names;
range_names.reserve(indexing_map.GetRangeVarsCount());
for (const auto& [index, range_var] :
llvm::enumerate(indexing_map.GetRangeVars())) {
range_names.push_back(GetRangeVarName(index, range_var.name));
}
SmallVector<std::string, 3> rt_names;
rt_names.reserve(indexing_map.GetRTVarsCount());
for (const auto& [index, rt_var] :
llvm::enumerate(indexing_map.GetRTVars())) {
rt_names.push_back(GetRTVarName(index, rt_var.name));
}
return ToString(indexing_map, dim_names, range_names, rt_names);
}
std::ostream& operator<<(std::ostream& out, const IndexingMap& indexing_map) {
out << ToString(indexing_map);
return out;
}
}
} | #include "xla/service/gpu/model/indexing_map_serialization.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/AffineMap.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/service/gpu/model/indexing_test_utils.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
namespace {
using ::testing::HasSubstr;
class IndexingMapSerializationTest : public HloTestBase {
public:
mlir::MLIRContext mlir_context_;
void ParseAndCheck(absl::string_view indexing_map_str) {
auto indexing_map = ParseIndexingMap(indexing_map_str, &mlir_context_);
ASSERT_TRUE(indexing_map.has_value());
EXPECT_THAT(ToString(*indexing_map), MatchIndexingString(indexing_map_str));
}
};
TEST_F(IndexingMapSerializationTest, EmptyMap) { ParseAndCheck("() -> ()"); }
TEST_F(IndexingMapSerializationTest, DimsOnly) {
ParseAndCheck(R"(
(d0, d1) -> (d0 mod 2 + d1),
domain:
d0 in [0, 3],
d1 in [-4, 4]
)");
}
TEST_F(IndexingMapSerializationTest, SymbolsOnly) {
ParseAndCheck(R"(
()[s0, s1] -> (s0 floordiv s1),
domain:
s0 in [0, 3],
s1 in [0, 4]
)");
}
TEST_F(IndexingMapSerializationTest, DimsAndSymbolsNoConstraints) {
ParseAndCheck(R"(
(d0, d1)[s0, s1, s2] -> (s2, d0 + d1, s1, s0),
domain:
d0 in [0, 3],
d1 in [0, 4],
s0 in [0, 1],
s1 in [0, 1],
s2 in [0, 3]
)");
}
TEST_F(IndexingMapSerializationTest, DimsAndSymbolsAndConstraints) {
ParseAndCheck(R"(
(d0, d1)[s0, s1, s2] -> (s2, d0 + d1, s1, s0),
domain:
d0 in [0, 3],
d1 in [0, 4],
s0 in [0, 1],
s1 in [0, 1],
s2 in [0, 3],
d0 mod 4 in [0, 0],
d1 + s0 in [0, 45]
)");
}
TEST_F(IndexingMapSerializationTest, AffineExprsWithParens) {
ParseAndCheck(R"(
(d0, d1)[s0, s1] -> ((d0 + d0 mod 3) floordiv 3
+ s0 + (s0 * 2) mod 3 + (d0 + s0) mod 3),
domain:
d0 in [0, 9],
d1 in [0, 19],
s0 in [0, 29],
s1 in [0, 39]
)");
}
TEST_F(IndexingMapSerializationTest, CustomNames) {
ParseAndCheck(R"(
(th_x, bl_x)[s0, vector_elem, s2] -> (s2, th_x + bl_x, vector_elem, s0),
domain:
th_x in [0, 3],
bl_x in [0, 4],
s0 in [0, 1],
vector_elem in [0, 1],
s2 in [0, 3],
bl_x + s0 in [0, 45],
th_x mod 4 in [0, 0]
)");
}
TEST_F(IndexingMapSerializationTest, AffineMapPrinterTest) {
mlir::AffineExpr d0, d1, s0, s1;
mlir::bindDims(&mlir_context_, d0, d1);
mlir::bindSymbols(&mlir_context_, s0, s1);
auto map = mlir::AffineMap::get(2, 2, {d0 + d1.floorDiv(8), s0 + s1 % 16},
&mlir_context_);
EXPECT_THAT(ToString(map, {"offset", "d1"}, {"s0", "linear_index"}),
HasSubstr("(offset, d1)[s0, linear_index] -> "
"(offset + d1 floordiv 8, s0 + linear_index mod 16)"));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/indexing_map_serialization.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/indexing_map_serialization_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
18f9c3b8-6cfb-4ca6-8922-7f83ac1b5560 | cpp | tensorflow/tensorflow | matrix_set_diag | tensorflow/lite/kernels/matrix_set_diag.cc | tensorflow/lite/kernels/matrix_set_diag_test.cc | #include <stdint.h>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace matrix_set_diag {
constexpr int kInputTensor = 0;
constexpr int kDiagonalTensor = 1;
constexpr int kOutputTensor = 0;
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteIntArray* input_dims = input->dims;
int input_dims_size = input_dims->size;
TF_LITE_ENSURE(context, input_dims_size >= 2);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TfLiteIntArray* output_shape = TfLiteIntArrayCreate(input_dims_size);
for (int i = 0; i < input_dims_size; i++) {
output_shape->data[i] = input_dims->data[i];
}
output->type = input->type;
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, output, output_shape));
return kTfLiteOk;
}
template <typename T>
void FillDiagImpl(const T* in, const T* diag, T* out, const int batch_size,
const int row_size, const int col_size) {
int idx = 0;
for (int b = 0; b < batch_size; b++) {
for (int i = 0; i < row_size; i++) {
for (int j = 0; j < col_size; ++j) {
if (i == j) {
out[i * col_size + j] = diag[idx];
idx++;
} else {
out[i * col_size + j] = in[i * col_size + j];
}
}
}
out += row_size * col_size;
in += row_size * col_size;
}
}
template <typename T>
void FillDiag(const TfLiteTensor* input, const TfLiteTensor* diag,
TfLiteTensor* output, const int batch_size, const int row_size,
const int col_size) {
FillDiagImpl<T>(GetTensorData<T>(input), GetTensorData<T>(diag),
GetTensorData<T>(output), batch_size, row_size, col_size);
}
void FillDiagHelper(const TfLiteTensor* input, const TfLiteTensor* diag,
TfLiteTensor* output) {
const int num_output_dims = output->dims->size;
int batch_size = 1;
for (int i = 0; i < num_output_dims - 2; ++i) {
batch_size *= output->dims->data[i];
}
const int row_size = output->dims->data[num_output_dims - 2];
const int col_size = output->dims->data[num_output_dims - 1];
switch (output->type) {
case kTfLiteInt64: {
return FillDiag<int64_t>(input, diag, output, batch_size, row_size,
col_size);
}
case kTfLiteInt32: {
return FillDiag<int32_t>(input, diag, output, batch_size, row_size,
col_size);
}
case kTfLiteInt16: {
return FillDiag<int16_t>(input, diag, output, batch_size, row_size,
col_size);
}
case kTfLiteInt8: {
return FillDiag<int8_t>(input, diag, output, batch_size, row_size,
col_size);
}
case kTfLiteUInt8: {
return FillDiag<uint8_t>(input, diag, output, batch_size, row_size,
col_size);
}
default:
return FillDiag<float>(input, diag, output, batch_size, row_size,
col_size);
}
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* diag;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kDiagonalTensor, &diag));
FillDiagHelper(input, diag, output);
return kTfLiteOk;
}
}
TfLiteRegistration* Register_MATRIX_SET_DIAG() {
static TfLiteRegistration r = {nullptr, nullptr, matrix_set_diag::Prepare,
matrix_set_diag::Eval};
return &r;
}
}
}
} | #include <stdint.h>
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
template <typename T>
class MatrixSetDiagOpModel : public SingleOpModel {
public:
explicit MatrixSetDiagOpModel(const TensorData& input,
const TensorData& diag) {
input_ = AddInput(input);
diag_ = AddInput(diag);
output_ = AddOutput({input.type, {}});
SetBuiltinOp(BuiltinOperator_MATRIX_SET_DIAG,
BuiltinOptions_MatrixSetDiagOptions,
CreateMatrixSetDiagOptions(builder_).Union());
BuildInterpreter({GetShape(input_), GetShape(diag_)});
}
int input() { return input_; }
int diag() { return diag_; }
std::vector<T> GetOutput() { return ExtractVector<T>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
TfLiteType GetOutputType() {
TfLiteTensor* t = interpreter_->tensor(output_);
return t->type;
}
private:
int input_;
int diag_;
int output_;
};
template <typename T>
class MatrixSetDiagOpTest : public ::testing::Test {};
using TypesUnderTest =
::testing::Types<TypeUnion<int32_t>, TypeUnion<float>, TypeUnion<int16_t>,
TypeUnion<int8_t>, TypeUnion<uint8_t>>;
TYPED_TEST_SUITE(MatrixSetDiagOpTest, TypesUnderTest);
TYPED_TEST(MatrixSetDiagOpTest, ThreeByThreeDiagScatter) {
MatrixSetDiagOpModel<typename TypeParam::ScalarType> model(
{TypeParam::tensor_type, {3, 3}}, {TypeParam::tensor_type, {3}});
model.template PopulateTensor<typename TypeParam::ScalarType>(model.input(),
{7, 1, 2,
3, 8, 4,
5, 6, 9});
model.template PopulateTensor<typename TypeParam::ScalarType>(model.diag(),
{0, 4, 2});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(3, 3));
EXPECT_THAT(model.GetOutput(), ElementsAreArray({0, 1, 2,
3, 4, 4,
5, 6, 2}));
EXPECT_THAT(model.GetOutputType(), TypeParam::tflite_type);
}
TEST(MatrixSetDiagTest, Int32TestMoreColumnsThanRows) {
MatrixSetDiagOpModel<int32_t> model({TensorType_INT32, {2, 3}},
{TensorType_INT32, {2}});
model.PopulateTensor<int32_t>(model.input(), {0, 0, 0,
9, 9, 9});
model.PopulateTensor<int32_t>(model.diag(), {1, 1});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(2, 3));
EXPECT_THAT(model.GetOutput(), ElementsAreArray({1, 0, 0,
9, 1, 9}));
EXPECT_THAT(model.GetOutputType(), TfLiteType::kTfLiteInt32);
}
TEST(MatrixSetDiagTest, Int32TestTwoDimDiag) {
MatrixSetDiagOpModel<int32_t> model({TensorType_INT32, {2, 4, 4}},
{TensorType_INT32, {2, 4}});
model.PopulateTensor<int32_t>(model.input(), {5, 5, 5, 5,
5, 5, 5, 5,
5, 5, 5, 5,
5, 5, 5, 5,
1, 1, 1, 1,
1, 1, 1, 1,
1, 1, 1, 1,
1, 1, 1, 1});
model.PopulateTensor<int32_t>(model.diag(), {1, 2, 3, 4, 5, 6, 7, 8});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(2, 4, 4));
EXPECT_THAT(model.GetOutput(), ElementsAreArray({1, 5, 5, 5,
5, 2, 5, 5,
5, 5, 3, 5,
5, 5, 5, 4,
5, 1, 1, 1,
1, 6, 1, 1,
1, 1, 7, 1,
1, 1, 1, 8}));
EXPECT_THAT(model.GetOutputType(), TfLiteType::kTfLiteInt32);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/matrix_set_diag.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/matrix_set_diag_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ebe25b54-ad49-46c8-a552-3bfae8fe7623 | cpp | tensorflow/tensorflow | device_memory_handle | third_party/xla/xla/stream_executor/device_memory_handle.cc | third_party/xla/xla/stream_executor/device_memory_handle_test.cc | #include "xla/stream_executor/device_memory_handle.h"
#include <utility>
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/stream_executor.h"
namespace stream_executor {
DeviceMemoryHandle::DeviceMemoryHandle(StreamExecutor *executor,
DeviceMemoryBase memory)
: memory_(std::move(memory)), executor_(executor) {}
DeviceMemoryHandle::DeviceMemoryHandle(DeviceMemoryHandle &&other) noexcept
: memory_(std::move(other.memory_)), executor_(other.executor_) {
other.memory_ = DeviceMemoryBase();
}
DeviceMemoryHandle::~DeviceMemoryHandle() { Free(); }
void DeviceMemoryHandle::Free() {
if (!memory_.is_null()) {
executor_->Deallocate(&memory_);
}
}
DeviceMemoryHandle &DeviceMemoryHandle::operator=(
DeviceMemoryHandle &&other) noexcept {
Free();
memory_ = std::move(other.memory_);
other.memory_ = DeviceMemoryBase();
executor_ = other.executor_;
return *this;
}
} | #include "xla/stream_executor/device_memory_handle.h"
#include <utility>
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/mock_stream_executor.h"
#include "tsl/platform/test.h"
namespace stream_executor {
namespace {
TEST(DeviceMemoryHandle, NullMemoryNoDeallocate) {
DeviceMemoryBase null_memory;
MockStreamExecutor executor;
EXPECT_CALL(executor, Deallocate).Times(0);
{ DeviceMemoryHandle releaser(&executor, null_memory); }
}
TEST(DeviceMemoryHandle, Deallocates) {
MockStreamExecutor executor;
DeviceMemoryBase memory(&executor, sizeof(executor));
EXPECT_CALL(executor, Deallocate).Times(1);
{ DeviceMemoryHandle releaser(&executor, memory); }
}
TEST(DeviceMemoryHandle, MoveDeallocatesOnce) {
MockStreamExecutor executor;
DeviceMemoryBase memory(&executor, sizeof(executor));
EXPECT_CALL(executor, Deallocate).Times(1);
{
DeviceMemoryHandle releaser(&executor, memory);
DeviceMemoryHandle releaser_moved(std::move(releaser));
}
}
TEST(DeviceMemoryHandle, MoveAssignmentDeallocatesOnce) {
MockStreamExecutor executor;
DeviceMemoryBase memory(&executor, sizeof(executor));
EXPECT_CALL(executor, Deallocate).Times(1);
{
DeviceMemoryHandle releaser(&executor, memory);
DeviceMemoryHandle releaser2;
releaser2 = std::move(releaser);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/device_memory_handle.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/device_memory_handle_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
40d198c4-ab0f-4c3f-b86b-e60a3a7036c9 | cpp | tensorflow/tensorflow | all_reduce_combiner | third_party/xla/xla/service/all_reduce_combiner.cc | third_party/xla/xla/service/all_reduce_combiner_test.cc | #include "xla/service/all_reduce_combiner.h"
#include <algorithm>
#include <list>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/hlo/utils/hlo_sharding_util.h"
#include "xla/service/all_reduce_key.h"
#include "xla/service/collective_combiner_utils.h"
#include "xla/service/hlo_domain_map.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
absl::Status CombineAllReduces(absl::Span<HloInstruction* const> to_combine) {
if (to_combine.size() < 2) {
return absl::OkStatus();
}
VLOG(1) << "Combined " << to_combine.size() << " CRS ops";
HloComputation& computation = *to_combine.back()->parent();
HloComputation* reduction = to_combine[0]->to_apply();
const HloOpcode type = reduction->root_instruction()->opcode();
std::vector<HloInstruction*> operands;
std::vector<const Shape*> operand_shapes;
VLOG(1) << "Combining set";
for (HloInstruction* hlo : to_combine) {
VLOG(1) << "Set element: " << hlo->ToString();
TF_RET_CHECK(hlo->opcode() == HloOpcode::kAllReduce);
TF_RET_CHECK(hlo->operands().size() == 1);
TF_RET_CHECK(hlo->to_apply() == reduction ||
(hlo->to_apply()->instruction_count() == 3 &&
hlo->to_apply()->num_parameters() == 2 &&
hlo->to_apply()->root_instruction()->opcode() == type));
TF_RET_CHECK(hlo->shape().IsArray());
for (HloInstruction* operand : hlo->operands()) {
operands.push_back(operand);
operand_shapes.push_back(&operand->shape());
}
}
HloInstruction* combined;
TF_RET_CHECK(operands.size() >= 2);
combined = computation.AddInstruction(HloInstruction::CreateAllReduce(
ShapeUtil::MakeTupleShapeWithPtrs(operand_shapes), operands, reduction,
to_combine.front()->device_list(),
false, to_combine.front()->channel_id(),
Cast<HloAllReduceInstruction>(to_combine.front())
->use_global_device_ids()));
combined->set_sharding(
hlo_sharding_util::CreateTupleSharding(combined->shape(), to_combine));
VLOG(1) << "Replacing with : " << combined->ToString();
for (int64_t i = 0; i < to_combine.size(); ++i) {
auto replace_with = HloInstruction::CreateGetTupleElement(
to_combine[i]->shape(), combined, i);
TF_RETURN_IF_ERROR(computation.ReplaceWithNewInstruction(
to_combine[i], std::move(replace_with)));
}
return absl::OkStatus();
}
}
AllReduceCombiner::AllReduceCombiner(int64_t combine_threshold_in_bytes,
int64_t combine_threshold_count)
: combine_threshold_in_bytes_(combine_threshold_in_bytes),
combine_threshold_count_(combine_threshold_count) {}
absl::StatusOr<bool> AllReduceCombiner::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
VLOG(1) << "Running AllReduceCombiner with threshold of "
<< combine_threshold_in_bytes_ << " bytes";
if (combine_threshold_in_bytes_ <= 0 || combine_threshold_count_ <= 0) {
VLOG(1) << "Skip AllReduceCombiner because the threshold is zero";
return false;
}
if (hlo_query::ContainsLayoutConstrainedAllReduce(*module)) {
VLOG(1) << "Skip AllReduceCombiner because the module contains all-reduce "
"with constrained layouts";
return false;
}
bool changed = false;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
TF_ASSIGN_OR_RETURN(auto domain_map, HloDomainMap::Create(computation, ""));
auto key_fn =
[&domain_map](
const HloInstruction* instruction) -> std::optional<AllReduceKey> {
if (instruction->opcode() != HloOpcode::kAllReduce) {
return std::nullopt;
}
return GetAllReduceKey(instruction, domain_map.get());
};
TF_ASSIGN_OR_RETURN(
bool computation_changed,
CombineInstructionsByKey<AllReduceKey>(
computation, key_fn, &CombineAllReduces,
combine_threshold_in_bytes_, combine_threshold_count_));
changed |= computation_changed;
}
return changed;
}
} | #include "xla/service/all_reduce_combiner.h"
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "xla/hlo/ir/collective_device_list.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using std::nullopt;
using ::testing::AllOf;
namespace op = xla::testing::opcode_matchers;
int64_t kMaxCombineCount = 256;
int64_t AllReduceCount(const HloModule& module) {
int64_t count = 0;
for (HloComputation* computation : module.computations()) {
if (computation->IsFusionComputation()) {
continue;
}
for (HloInstruction* hlo : computation->instructions()) {
if (hlo->opcode() == HloOpcode::kAllReduce) {
++count;
}
}
}
return count;
}
HloInstruction* MakeCrossReplicaReductions(
std::vector<int64_t> sizes_in_kib, std::vector<HloComputation*> reductions,
std::vector<HloInstruction*>* inputs, HloComputation::Builder* b) {
CHECK_EQ(reductions.size(), sizes_in_kib.size());
std::vector<HloInstruction*> all_reduces;
for (int i = 0; i < sizes_in_kib.size(); i++) {
int64_t size_in_kib = sizes_in_kib[i];
HloComputation* reduction = reductions[i];
auto constant = b->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0(42.3)));
Shape shape = ShapeUtil::MakeShape(
F32, {static_cast<int32_t>(size_in_kib * 1024 / sizeof(float))});
auto input =
b->AddInstruction(HloInstruction::CreateBroadcast(shape, constant, {}));
inputs->push_back(input);
all_reduces.push_back(b->AddInstruction(HloInstruction::CreateAllReduce(
shape, {input}, reduction, CollectiveDeviceList(),
false, nullopt,
false)));
}
return b->AddInstruction(HloInstruction::CreateTuple(all_reduces));
}
HloComputation* MakeReduction(const HloOpcode type, HloModule* module) {
HloComputation::Builder sum_builder(HloOpcodeString(type));
auto x = sum_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "x"));
auto y = sum_builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {}), "y"));
sum_builder.AddInstruction(
HloInstruction::CreateBinary(ShapeUtil::MakeShape(F32, {}), type, x, y));
HloComputation* reduction =
module->AddEmbeddedComputation(sum_builder.Build());
return reduction;
}
using AllReduceCombinerTest = HloTestBase;
TEST_F(AllReduceCombinerTest, CombineAllReduces) {
auto module = CreateNewVerifiedModule();
HloComputation* sum = MakeReduction(HloOpcode::kAdd, module.get());
HloComputation::Builder b(TestName());
std::vector<HloInstruction*> inputs;
auto root = MakeCrossReplicaReductions(
{1, 2, 10, 7, 6}, {sum, sum, sum, sum, sum}, &inputs, &b);
auto computation = module->AddEntryComputation(b.Build());
AllReduceCombiner combine(10 * 1024 * 1024, kMaxCombineCount);
ASSERT_EQ(AllReduceCount(*module), inputs.size());
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
ASSERT_EQ(AllReduceCount(*module), 1);
EXPECT_TRUE(changed);
ASSERT_EQ(root, computation->root_instruction());
ASSERT_EQ(inputs.size(), root->operands().size());
HloInstruction* combined = nullptr;
for (int64_t i = 0; i < root->operands().size(); ++i) {
HloInstruction* hlo = root->mutable_operand(i);
ASSERT_TRUE(hlo->opcode() == HloOpcode::kGetTupleElement);
EXPECT_EQ(hlo->tuple_index(), i);
EXPECT_TRUE(ShapeUtil::Equal(inputs[i]->shape(), hlo->shape()));
if (combined == nullptr) {
combined = hlo->mutable_operand(0);
ASSERT_TRUE(combined->opcode() == HloOpcode::kAllReduce);
EXPECT_TRUE(ShapeUtil::Equal(root->shape(), combined->shape()));
ASSERT_EQ(combined->operands().size(), inputs.size());
}
EXPECT_EQ(combined, hlo->operand(0));
EXPECT_TRUE(ShapeUtil::Equal(inputs[i]->shape(), hlo->shape()));
EXPECT_EQ(combined->operand(i), inputs[i]);
EXPECT_EQ(1, inputs[i]->users().size());
}
ASSERT_NE(combined, nullptr);
}
TEST_F(AllReduceCombinerTest, CombineCrossReplicaReductionsInGroups) {
auto module = CreateNewVerifiedModule();
HloComputation* sum = MakeReduction(HloOpcode::kAdd, module.get());
HloComputation* min = MakeReduction(HloOpcode::kMinimum, module.get());
HloComputation* max = MakeReduction(HloOpcode::kMaximum, module.get());
HloComputation* sum_2 = MakeReduction(HloOpcode::kAdd, module.get());
HloComputation::Builder b(TestName());
std::vector<HloInstruction*> inputs;
MakeCrossReplicaReductions(
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
{sum, sum_2, min, min, min, max, max, max, sum, sum_2}, &inputs, &b);
module->AddEntryComputation(b.Build());
AllReduceCombiner combine(10 * 1024 * 1024, kMaxCombineCount);
ASSERT_EQ(AllReduceCount(*module), inputs.size());
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
ASSERT_EQ(AllReduceCount(*module), 3)
<< "expects 3 groups for 3 reduction types.";
EXPECT_TRUE(changed);
}
TEST_F(AllReduceCombinerTest, RespectThreshold) {
auto module = CreateNewVerifiedModule();
HloComputation* sum = MakeReduction(HloOpcode::kAdd, module.get());
HloComputation::Builder b(TestName());
std::vector<HloInstruction*> inputs;
MakeCrossReplicaReductions({8, 4}, {sum, sum}, &inputs, &b);
module->AddEntryComputation(b.Build());
{
AllReduceCombiner combine((8 + 4) * 1024 - 1, kMaxCombineCount);
ASSERT_EQ(AllReduceCount(*module), inputs.size());
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_EQ(AllReduceCount(*module), inputs.size());
EXPECT_FALSE(changed);
}
{
AllReduceCombiner combine((8 + 4) * 1024, kMaxCombineCount);
ASSERT_EQ(AllReduceCount(*module), inputs.size());
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_EQ(AllReduceCount(*module), 1);
EXPECT_TRUE(changed);
}
}
TEST_F(AllReduceCombinerTest, NoDependentCombination) {
auto module = CreateNewVerifiedModule();
HloComputation* reduction = MakeReduction(HloOpcode::kAdd, module.get());
HloComputation::Builder b(TestName());
auto constant = b.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0(42.3)));
auto all_reduce = b.AddInstruction(HloInstruction::CreateAllReduce(
constant->shape(), {constant}, reduction,
CollectiveDeviceList(),
false, nullopt,
false));
b.AddInstruction(HloInstruction::CreateAllReduce(
constant->shape(), {all_reduce}, reduction,
CollectiveDeviceList(), false,
nullopt, false));
module->AddEntryComputation(b.Build());
AllReduceCombiner combine(1024 * 1024, kMaxCombineCount);
ASSERT_EQ(AllReduceCount(*module), 2);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_EQ(AllReduceCount(*module), 2);
EXPECT_FALSE(changed);
}
TEST_F(AllReduceCombinerTest, GroupAllReduce) {
auto module = CreateNewVerifiedModule(TestName(), 4);
HloComputation::Builder b(TestName());
HloComputation* reduction = MakeReduction(HloOpcode::kAdd, module.get());
auto constant = b.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0(42.3)));
auto crs0 = b.AddInstruction(HloInstruction::CreateAllReduce(
constant->shape(), {constant}, reduction,
CollectiveDeviceList({{0, 1}, {2, 3}}),
false,
nullopt, false));
auto crs1 = b.AddInstruction(HloInstruction::CreateAllReduce(
constant->shape(), {constant}, reduction,
CollectiveDeviceList({{0, 2}, {1, 3}}),
false,
nullopt, false));
b.AddInstruction(HloInstruction::CreateTuple({crs0, crs1}));
module->AddEntryComputation(b.Build());
AllReduceCombiner combine(1024 * 1024, kMaxCombineCount);
ASSERT_EQ(AllReduceCount(*module), 2);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_EQ(AllReduceCount(*module), 2);
EXPECT_FALSE(changed);
}
TEST_F(AllReduceCombinerTest, DomainPreventsCombining) {
const char* const hlo_string = R"(
HloModule Module
summit {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY entry {
param0 = f32[128] parameter(0), sharding={maximal device=0}
param1 = f32[128] parameter(1), sharding={maximal device=1}
crs0 = f32[128] all-reduce(param0),
replica_groups={}, to_apply=summit, sharding={maximal device=0}
crs1 = f32[128] all-reduce(param1),
replica_groups={}, to_apply=summit, sharding={maximal device=1}
domain0 = f32[128] domain(crs0),
domain={kind="sharding", entry={{maximal device=0}, {maximal device=1}}, exit={maximal device=0}}
domain1 = f32[128] domain(crs1),
domain={kind="sharding", entry={{maximal device=0}, {maximal device=1}}, exit={maximal device=1}}
ROOT tuple = (f32[128], f32[128]) tuple(domain0, domain1),
sharding={{maximal device=0}, {maximal device=1}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
LOG(INFO) << "Original module:\n" << module->ToString();
AllReduceCombiner combine(1024 * 1024, kMaxCombineCount);
ASSERT_EQ(AllReduceCount(*module), 2);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_EQ(AllReduceCount(*module), 2);
EXPECT_FALSE(changed);
}
TEST_F(AllReduceCombinerTest, CombineFromTwoDomainsWithSameMetadata) {
const char* const hlo_string = R"(
HloModule Module
summit {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY entry {
param0 = f32[128] parameter(0), sharding={maximal device=0}
param1 = f32[128] parameter(1), sharding={maximal device=1}
param2 = f32[128] parameter(2), sharding={maximal device=1}
crs0 = f32[128] all-reduce(param0),
replica_groups={}, to_apply=summit, sharding={maximal device=0}
crs1 = f32[128] all-reduce(param1),
replica_groups={}, to_apply=summit, sharding={maximal device=1}
crs2 = f32[128] all-reduce(param2),
replica_groups={}, to_apply=summit, sharding={maximal device=0}
domain0 = f32[128] domain(crs0),
domain={kind="sharding", entry={{maximal device=0}, {maximal device=1},
{maximal device=0}}, exit={maximal device=0}}
domain1 = f32[128] domain(crs1),
domain={kind="sharding", entry={{maximal device=0}, {maximal device=1},
{maximal device=0}}, exit={maximal device=1}}
domain2 = f32[128] domain(crs2),
domain={kind="sharding", entry={{maximal device=0}, {maximal device=1},
{maximal device=0}}, exit={maximal device=0}}
ROOT tuple = (f32[128], f32[128], f32[128]) tuple(domain0, domain1, domain2),
sharding={{maximal device=0}, {maximal device=1}, {maximal device=0}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AllReduceCombiner combine(1024 * 1024, kMaxCombineCount);
ASSERT_EQ(AllReduceCount(*module), 3);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_EQ(AllReduceCount(*module), 2);
EXPECT_TRUE(changed);
const HloInstruction* param0 =
module->entry_computation()->parameter_instruction(0);
ASSERT_EQ(param0->user_count(), 1);
const HloInstruction* combined_ar = param0->users().front();
ASSERT_EQ(combined_ar->opcode(), HloOpcode::kAllReduce);
EXPECT_THAT(combined_ar, testing::opcode_matchers::Sharding(
"{{maximal device=0}, {maximal device=0}}"));
}
TEST_F(AllReduceCombinerTest, DoNotCombineCrossShardAndCrossReplicaInSPMD) {
const char* const hlo_string = R"(
HloModule Module
summit {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY entry {
param0 = f32[128] parameter(0), sharding={maximal device=0}
param1 = f32[128] parameter(1), sharding={maximal device=1}
cross_shard_ar = f32[128] all-reduce(param0),
replica_groups={{0}}, to_apply=summit, channel_id=1
cross_replica_ar = f32[128] all-reduce(param1),
replica_groups={{0}}, to_apply=summit, sharding={maximal device=1}
ROOT tuple = (f32[128], f32[128]) tuple(cross_shard_ar, cross_replica_ar)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AllReduceCombiner combine(1024 * 1024, kMaxCombineCount);
ASSERT_EQ(AllReduceCount(*module), 2);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_EQ(AllReduceCount(*module), 2);
EXPECT_FALSE(changed);
}
TEST_F(AllReduceCombinerTest, CrossCoreAllReduce) {
const char* const hlo_string = R"(
HloModule Module
summit {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY entry {
param0 = f32[128] parameter(0), sharding={maximal device=0}
param1 = f32[128] parameter(1), sharding={maximal device=1}
crs00 = f32[128] all-reduce(param0),
replica_groups={{0}}, channel_id=1, to_apply=summit,
sharding={maximal device=0}
crs01 = f32[128] all-reduce(param1),
replica_groups={{0}}, channel_id=1, to_apply=summit,
sharding={maximal device=1}
crs10 = f32[128] all-reduce(param0),
replica_groups={{0}}, channel_id=2, to_apply=summit,
sharding={maximal device=0}
crs11 = f32[128] all-reduce(param1),
replica_groups={{0}}, channel_id=2, to_apply=summit,
sharding={maximal device=1}
domain0 = f32[128] domain(crs00),
domain={kind="sharding", entry={maximal device=0}, exit={maximal device=1}}
ROOT add = f32[128] add(domain0, crs11),
sharding={maximal device=1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AllReduceCombiner combine(1024 * 1024, kMaxCombineCount);
ASSERT_EQ(AllReduceCount(*module), 4);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_EQ(AllReduceCount(*module), 2);
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Add(op::Domain(op::GetTupleElement(AllOf(
op::AllReduce(op::Parameter(0), op::Parameter(0)),
op::Shape("(f32[128], f32[128])")))),
op::GetTupleElement(AllOf(
op::AllReduce(op::Parameter(1), op::Parameter(1)),
op::Shape("(f32[128], f32[128])")))));
}
TEST_F(AllReduceCombinerTest, CrossCombineGroupCycle) {
const char* const hlo_string = R"(
HloModule module
%add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
%max {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] maximum(lhs, rhs)
}
ENTRY %comp {
p0 = f32[128] parameter(0)
p1 = f32[128] parameter(1)
crs00 = f32[128] all-reduce(p0), to_apply=add
crs10 = f32[128] all-reduce(p1), to_apply=max
crs01 = f32[128] all-reduce(crs00), to_apply=max
crs11 = f32[128] all-reduce(crs10), to_apply=add
add0 = f32[128] add(crs01, crs11)
crs02 = f32[128] all-reduce(add0), to_apply=add
crs12 = f32[128] all-reduce(crs11), to_apply=add
ROOT tuple = (f32[128], f32[128]) tuple(crs02, crs12)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AllReduceCombiner combine(1024 * 1024, kMaxCombineCount);
ASSERT_EQ(AllReduceCount(*module), 6);
TF_ASSERT_OK_AND_ASSIGN(bool changed, combine.Run(module.get()));
EXPECT_EQ(AllReduceCount(*module), 4);
EXPECT_TRUE(changed);
auto crs0 = op::AllReduce(op::Parameter(0), op::AllReduce(op::Parameter(1)));
auto add = op::Add(op::AllReduce(op::GetTupleElement(crs0, 0)),
op::GetTupleElement(crs0, 1));
auto crs1 = op::AllReduce(add, op::GetTupleElement(crs0));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Tuple(op::GetTupleElement(crs1, 0), op::GetTupleElement(crs1, 1)));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/all_reduce_combiner.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/all_reduce_combiner_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e01ac670-7a1a-4d0b-8ef1-61fc5f1f5991 | cpp | abseil/abseil-cpp | charconv_bigint | absl/strings/internal/charconv_bigint.cc | absl/strings/internal/charconv_bigint_test.cc | #include "absl/strings/internal/charconv_bigint.h"
#include <algorithm>
#include <cassert>
#include <string>
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace strings_internal {
namespace {
constexpr int kLargePowerOfFiveStep = 27;
constexpr int kLargestPowerOfFiveIndex = 20;
const uint32_t kLargePowersOfFive[] = {
0xfa10079dU, 0x6765c793U,
0x97d9f649U, 0x6664242dU, 0x29939b14U, 0x29c30f10U,
0xc4f809c5U, 0x7bf3f22aU, 0x67bdae34U, 0xad340517U, 0x369d1b5fU, 0x10de1593U,
0x92b260d1U, 0x9efff7c7U, 0x81de0ec6U, 0xaeba5d56U, 0x410664a4U, 0x4f40737aU,
0x20d3846fU, 0x06d00f73U,
0xff1b172dU, 0x13a1d71cU, 0xefa07617U, 0x7f682d3dU, 0xff8c90c0U, 0x3f0131e7U,
0x3fdcb9feU, 0x917b0177U, 0x16c407a7U, 0x02c06b9dU,
0x960f7199U, 0x056667ecU, 0xe07aefd8U, 0x80f2b9ccU, 0x8273f5e3U, 0xeb9a214aU,
0x40b38005U, 0x0e477ad4U, 0x277d08e6U, 0xfa28b11eU, 0xd3f7d784U, 0x011c835bU,
0xf723d9d5U, 0x3282d3f3U, 0xe00857d1U, 0x69659d25U, 0x2cf117cfU, 0x24da6d07U,
0x954d1417U, 0x3e5d8cedU, 0x7a8bb766U, 0xfd785ae6U, 0x645436d2U, 0x40c78b34U,
0x94151217U, 0x0072e9f7U,
0x2b416aa1U, 0x7893c5a7U, 0xe37dc6d4U, 0x2bad2beaU, 0xf0fc846cU, 0x7575ae4bU,
0x62587b14U, 0x83b67a34U, 0x02110cdbU, 0xf7992f55U, 0x00deb022U, 0xa4a23becU,
0x8af5c5cdU, 0xb85b654fU, 0x818df38bU, 0x002e69d2U,
0x3518cbbdU, 0x20b0c15fU, 0x38756c2fU, 0xfb5dc3ddU, 0x22ad2d94U, 0xbf35a952U,
0xa699192aU, 0x9a613326U, 0xad2a9cedU, 0xd7f48968U, 0xe87dfb54U, 0xc8f05db6U,
0x5ef67531U, 0x31c1ab49U, 0xe202ac9fU, 0x9b2957b5U, 0xa143f6d3U, 0x0012bf07U,
0x8b971de9U, 0x21aba2e1U, 0x63944362U, 0x57172336U, 0xd9544225U, 0xfb534166U,
0x08c563eeU, 0x14640ee2U, 0x24e40d31U, 0x02b06537U, 0x03887f14U, 0x0285e533U,
0xb744ef26U, 0x8be3a6c4U, 0x266979b4U, 0x6761ece2U, 0xd9cb39e4U, 0xe67de319U,
0x0d39e796U, 0x00079250U,
0x260eb6e5U, 0xf414a796U, 0xee1a7491U, 0xdb9368ebU, 0xf50c105bU, 0x59157750U,
0x9ed2fb5cU, 0xf6e56d8bU, 0xeaee8d23U, 0x0f319f75U, 0x2aa134d6U, 0xac2908e9U,
0xd4413298U, 0x02f02a55U, 0x989d5a7aU, 0x70dde184U, 0xba8040a7U, 0x03200981U,
0xbe03b11cU, 0x3c1c2a18U, 0xd60427a1U, 0x00030ee0U,
0xce566d71U, 0xf1c4aa25U, 0x4e93ca53U, 0xa72283d0U, 0x551a73eaU, 0x3d0538e2U,
0x8da4303fU, 0x6a58de60U, 0x0e660221U, 0x49cf61a6U, 0x8d058fc1U, 0xb9d1a14cU,
0x4bab157dU, 0xc85c6932U, 0x518c8b9eU, 0x9b92b8d0U, 0x0d8a0e21U, 0xbd855df9U,
0xb3ea59a1U, 0x8da29289U, 0x4584d506U, 0x3752d80fU, 0xb72569c6U, 0x00013c33U,
0x190f354dU, 0x83695cfeU, 0xe5a4d0c7U, 0xb60fb7e8U, 0xee5bbcc4U, 0xb922054cU,
0xbb4f0d85U, 0x48394028U, 0x1d8957dbU, 0x0d7edb14U, 0x4ecc7587U, 0x505e9e02U,
0x4c87f36bU, 0x99e66bd6U, 0x44b9ed35U, 0x753037d4U, 0xe5fe5f27U, 0x2742c203U,
0x13b2ed2bU, 0xdc525d2cU, 0xe6fde59aU, 0x77ffb18fU, 0x13c5752cU, 0x08a84bccU,
0x859a4940U, 0x00007fb6U,
0x4f98cb39U, 0xa60edbbcU, 0x83b5872eU, 0xa501acffU, 0x9cc76f78U, 0xbadd4c73U,
0x43e989faU, 0xca7acf80U, 0x2e0c824fU, 0xb19f4ffcU, 0x092fd81cU, 0xe4eb645bU,
0xa1ff84c2U, 0x8a5a83baU, 0xa8a1fae9U, 0x1db43609U, 0xb0fed50bU, 0x0dd7d2bdU,
0x7d7accd8U, 0x91fa640fU, 0x37dcc6c5U, 0x1c417fd5U, 0xe4d462adU, 0xe8a43399U,
0x131bf9a5U, 0x8df54d29U, 0x36547dc1U, 0x00003395U,
0x5bd330f5U, 0x77d21967U, 0x1ac481b7U, 0x6be2f7ceU, 0x7f4792a9U, 0xe84c2c52U,
0x84592228U, 0x9dcaf829U, 0xdab44ce1U, 0x3d0c311bU, 0x532e297dU, 0x4704e8b4U,
0x9cdc32beU, 0x41e64d9dU, 0x7717bea1U, 0xa824c00dU, 0x08f50b27U, 0x0f198d77U,
0x49bbfdf0U, 0x025c6c69U, 0xd4e55cd3U, 0xf083602bU, 0xb9f0fecdU, 0xc0864aeaU,
0x9cb98681U, 0xaaf620e9U, 0xacb6df30U, 0x4faafe66U, 0x8af13c3bU, 0x000014d5U,
0x682bb941U, 0x89a9f297U, 0xcba75d7bU, 0x404217b1U, 0xb4e519e9U, 0xa1bc162bU,
0xf7f5910aU, 0x98715af5U, 0x2ff53e57U, 0xe3ef118cU, 0x490c4543U, 0xbc9b1734U,
0x2affbe4dU, 0x4cedcb4cU, 0xfb14e99eU, 0x35e34212U, 0xece39c24U, 0x07673ab3U,
0xe73115ddU, 0xd15d38e7U, 0x093eed3bU, 0xf8e7eac5U, 0x78a8cc80U, 0x25227aacU,
0x3f590551U, 0x413da1cbU, 0xdf643a55U, 0xab65ad44U, 0xd70b23d7U, 0xc672cd76U,
0x3364ea62U, 0x0000086aU,
0x22f163ddU, 0x23cf07acU, 0xbe2af6c2U, 0xf412f6f6U, 0xc3ff541eU, 0x6eeaf7deU,
0xa47047e0U, 0x408cda92U, 0x0f0eeb08U, 0x56deba9dU, 0xcfc6b090U, 0x8bbbdf04U,
0x3933cdb3U, 0x9e7bb67dU, 0x9f297035U, 0x38946244U, 0xee1d37bbU, 0xde898174U,
0x63f3559dU, 0x705b72fbU, 0x138d27d9U, 0xf8603a78U, 0x735eec44U, 0xe30987d5U,
0xc6d38070U, 0x9cfe548eU, 0x9ff01422U, 0x7c564aa8U, 0x91cc60baU, 0xcbc3565dU,
0x7550a50bU, 0x6909aeadU, 0x13234c45U, 0x00000366U,
0x17954989U, 0x3a7d7709U, 0x98042de5U, 0xa9011443U, 0x45e723c2U, 0x269ffd6fU,
0x58852a46U, 0xaaa1042aU, 0x2eee8153U, 0xb2b6c39eU, 0xaf845b65U, 0xf6c365d7U,
0xe4cffb2bU, 0xc840e90cU, 0xabea8abbU, 0x5c58f8d2U, 0x5c19fa3aU, 0x4670910aU,
0x4449f21cU, 0xefa645b3U, 0xcc427decU, 0x083c3d73U, 0x467cb413U, 0x6fe10ae4U,
0x3caffc72U, 0x9f8da55eU, 0x5e5c8ea7U, 0x490594bbU, 0xf0871b0bU, 0xdd89816cU,
0x8e931df8U, 0xe85ce1c9U, 0xcca090a5U, 0x575fa16bU, 0x6b9f106cU, 0x0000015fU,
0xee20d805U, 0x57bc3c07U, 0xcdea624eU, 0xd3f0f52dU, 0x9924b4f4U, 0xcf968640U,
0x61d41962U, 0xe87fb464U, 0xeaaf51c7U, 0x564c8b60U, 0xccda4028U, 0x529428bbU,
0x313a1fa8U, 0x96bd0f94U, 0x7a82ebaaU, 0xad99e7e9U, 0xf2668cd4U, 0xbe33a45eU,
0xfd0db669U, 0x87ee369fU, 0xd3ec20edU, 0x9c4d7db7U, 0xdedcf0d8U, 0x7cd2ca64U,
0xe25a6577U, 0x61003fd4U, 0xe56f54ccU, 0x10b7c748U, 0x40526e5eU, 0x7300ae87U,
0x5c439261U, 0x2c0ff469U, 0xbf723f12U, 0xb2379b61U, 0xbf59b4f5U, 0xc91b1c3fU,
0xf0046d27U, 0x0000008dU,
0x525c9e11U, 0xf4e0eb41U, 0xebb2895dU, 0x5da512f9U, 0x7d9b29d4U, 0x452f4edcU,
0x0b90bc37U, 0x341777cbU, 0x63d269afU, 0x1da77929U, 0x0a5c1826U, 0x77991898U,
0x5aeddf86U, 0xf853a877U, 0x538c31ccU, 0xe84896daU, 0xb7a0010bU, 0x17ef4de5U,
0xa52a2adeU, 0x029fd81cU, 0x987ce701U, 0x27fefd77U, 0xdb46c66fU, 0x5d301900U,
0x496998c0U, 0xbb6598b9U, 0x5eebb607U, 0xe547354aU, 0xdf4a2f7eU, 0xf06c4955U,
0x96242ffaU, 0x1775fb27U, 0xbecc58ceU, 0xebf2a53bU, 0x3eaad82aU, 0xf41137baU,
0x573e6fbaU, 0xfb4866b8U, 0x54002148U, 0x00000039U,
};
const uint32_t* LargePowerOfFiveData(int i) {
return kLargePowersOfFive + i * (i - 1);
}
int LargePowerOfFiveSize(int i) { return 2 * i; }
}
ABSL_DLL const uint32_t kFiveToNth[14] = {
1, 5, 25, 125, 625, 3125, 15625,
78125, 390625, 1953125, 9765625, 48828125, 244140625, 1220703125,
};
ABSL_DLL const uint32_t kTenToNth[10] = {
1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000,
};
template <int max_words>
int BigUnsigned<max_words>::ReadFloatMantissa(const ParsedFloat& fp,
int significant_digits) {
SetToZero();
assert(fp.type == FloatType::kNumber);
if (fp.subrange_begin == nullptr) {
words_[0] = fp.mantissa & 0xffffffffu;
words_[1] = fp.mantissa >> 32;
if (words_[1]) {
size_ = 2;
} else if (words_[0]) {
size_ = 1;
}
return fp.exponent;
}
int exponent_adjust =
ReadDigits(fp.subrange_begin, fp.subrange_end, significant_digits);
return fp.literal_exponent + exponent_adjust;
}
template <int max_words>
int BigUnsigned<max_words>::ReadDigits(const char* begin, const char* end,
int significant_digits) {
assert(significant_digits <= Digits10() + 1);
SetToZero();
bool after_decimal_point = false;
while (begin < end && *begin == '0') {
++begin;
}
int dropped_digits = 0;
while (begin < end && *std::prev(end) == '0') {
--end;
++dropped_digits;
}
if (begin < end && *std::prev(end) == '.') {
dropped_digits = 0;
--end;
while (begin < end && *std::prev(end) == '0') {
--end;
++dropped_digits;
}
} else if (dropped_digits) {
const char* dp = std::find(begin, end, '.');
if (dp != end) {
dropped_digits = 0;
}
}
int exponent_adjust = dropped_digits;
uint32_t queued = 0;
int digits_queued = 0;
for (; begin != end && significant_digits > 0; ++begin) {
if (*begin == '.') {
after_decimal_point = true;
continue;
}
if (after_decimal_point) {
--exponent_adjust;
}
char digit = (*begin - '0');
--significant_digits;
if (significant_digits == 0 && std::next(begin) != end &&
(digit == 0 || digit == 5)) {
++digit;
}
queued = 10 * queued + static_cast<uint32_t>(digit);
++digits_queued;
if (digits_queued == kMaxSmallPowerOfTen) {
MultiplyBy(kTenToNth[kMaxSmallPowerOfTen]);
AddWithCarry(0, queued);
queued = digits_queued = 0;
}
}
if (digits_queued) {
MultiplyBy(kTenToNth[digits_queued]);
AddWithCarry(0, queued);
}
if (begin < end && !after_decimal_point) {
const char* decimal_point = std::find(begin, end, '.');
exponent_adjust += (decimal_point - begin);
}
return exponent_adjust;
}
template <int max_words>
BigUnsigned<max_words> BigUnsigned<max_words>::FiveToTheNth(
int n) {
BigUnsigned answer(1u);
bool first_pass = true;
while (n >= kLargePowerOfFiveStep) {
int big_power =
std::min(n / kLargePowerOfFiveStep, kLargestPowerOfFiveIndex);
if (first_pass) {
std::copy_n(LargePowerOfFiveData(big_power),
LargePowerOfFiveSize(big_power), answer.words_);
answer.size_ = LargePowerOfFiveSize(big_power);
first_pass = false;
} else {
answer.MultiplyBy(LargePowerOfFiveSize(big_power),
LargePowerOfFiveData(big_power));
}
n -= kLargePowerOfFiveStep * big_power;
}
answer.MultiplyByFiveToTheNth(n);
return answer;
}
template <int max_words>
void BigUnsigned<max_words>::MultiplyStep(int original_size,
const uint32_t* other_words,
int other_size, int step) {
int this_i = std::min(original_size - 1, step);
int other_i = step - this_i;
uint64_t this_word = 0;
uint64_t carry = 0;
for (; this_i >= 0 && other_i < other_size; --this_i, ++other_i) {
uint64_t product = words_[this_i];
product *= other_words[other_i];
this_word += product;
carry += (this_word >> 32);
this_word &= 0xffffffff;
}
AddWithCarry(step + 1, carry);
words_[step] = this_word & 0xffffffff;
if (this_word > 0 && size_ <= step) {
size_ = step + 1;
}
}
template <int max_words>
std::string BigUnsigned<max_words>::ToString() const {
BigUnsigned<max_words> copy = *this;
std::string result;
while (copy.size() > 0) {
uint32_t next_digit = copy.DivMod<10>();
result.push_back('0' + static_cast<char>(next_digit));
}
if (result.empty()) {
result.push_back('0');
}
std::reverse(result.begin(), result.end());
return result;
}
template class BigUnsigned<4>;
template class BigUnsigned<84>;
}
ABSL_NAMESPACE_END
} | #include "absl/strings/internal/charconv_bigint.h"
#include <string>
#include "gtest/gtest.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace strings_internal {
TEST(BigUnsigned, ShiftLeft) {
{
BigUnsigned<4> num(3u);
num.ShiftLeft(100);
EXPECT_EQ(num, BigUnsigned<4>("3802951800684688204490109616128"));
}
{
BigUnsigned<4> a(15u);
BigUnsigned<4> b(7u);
BigUnsigned<4> c(3u);
a.ShiftLeft(125);
b.ShiftLeft(125);
c.ShiftLeft(125);
EXPECT_EQ(a, b);
EXPECT_NE(a, c);
}
{
BigUnsigned<84> a(15u);
BigUnsigned<84> b(7u);
BigUnsigned<84> c(3u);
a.ShiftLeft(84 * 32 - 3);
b.ShiftLeft(84 * 32 - 3);
c.ShiftLeft(84 * 32 - 3);
EXPECT_EQ(a, b);
EXPECT_NE(a, c);
}
{
const std::string seed = "1234567890123456789012345678901234567890";
BigUnsigned<84> a(seed);
for (int i = 1; i <= 84 * 32; ++i) {
a.ShiftLeft(1);
BigUnsigned<84> b(seed);
b.ShiftLeft(i);
EXPECT_EQ(a, b);
}
EXPECT_EQ(a, BigUnsigned<84>(0u));
}
{
const BigUnsigned<84> all_bits_one(
"1474444211396924248063325089479706787923460402125687709454567433186613"
"6228083464060749874845919674257665016359189106695900028098437021384227"
"3285029708032466536084583113729486015826557532750465299832071590813090"
"2011853039837649252477307070509704043541368002938784757296893793903797"
"8180292336310543540677175225040919704702800559606097685920595947397024"
"8303316808753252115729411497720357971050627997031988036134171378490368"
"6008000778741115399296162550786288457245180872759047016734959330367829"
"5235612397427686310674725251378116268607113017720538636924549612987647"
"5767411074510311386444547332882472126067840027882117834454260409440463"
"9345147252664893456053258463203120637089916304618696601333953616715125"
"2115882482473279040772264257431663818610405673876655957323083702713344"
"4201105427930770976052393421467136557055");
const BigUnsigned<84> zero(0u);
const BigUnsigned<84> one(1u);
for (int i = 1; i < 84*32; ++i) {
BigUnsigned<84> big_shifted = all_bits_one;
big_shifted.ShiftLeft(i);
EXPECT_GT(all_bits_one, big_shifted);
BigUnsigned<84> small_shifted = one;
small_shifted.ShiftLeft(i);
EXPECT_LT(one, small_shifted);
}
for (int no_op_shift : {0, -1, -84 * 32, std::numeric_limits<int>::min()}) {
BigUnsigned<84> big_shifted = all_bits_one;
big_shifted.ShiftLeft(no_op_shift);
EXPECT_EQ(all_bits_one, big_shifted);
BigUnsigned<84> small_shifted = one;
big_shifted.ShiftLeft(no_op_shift);
EXPECT_EQ(one, small_shifted);
}
for (int out_of_bounds_shift :
{84 * 32, 84 * 32 + 1, std::numeric_limits<int>::max()}) {
BigUnsigned<84> big_shifted = all_bits_one;
big_shifted.ShiftLeft(out_of_bounds_shift);
EXPECT_EQ(zero, big_shifted);
BigUnsigned<84> small_shifted = one;
small_shifted.ShiftLeft(out_of_bounds_shift);
EXPECT_EQ(zero, small_shifted);
}
}
}
TEST(BigUnsigned, MultiplyByUint32) {
const BigUnsigned<84> factorial_100(
"933262154439441526816992388562667004907159682643816214685929638952175999"
"932299156089414639761565182862536979208272237582511852109168640000000000"
"00000000000000");
BigUnsigned<84> a(1u);
for (uint32_t i = 1; i <= 100; ++i) {
a.MultiplyBy(i);
}
EXPECT_EQ(a, BigUnsigned<84>(factorial_100));
}
TEST(BigUnsigned, MultiplyByBigUnsigned) {
{
const BigUnsigned<84> factorial_200(
"7886578673647905035523632139321850622951359776871732632947425332443594"
"4996340334292030428401198462390417721213891963883025764279024263710506"
"1926624952829931113462857270763317237396988943922445621451664240254033"
"2918641312274282948532775242424075739032403212574055795686602260319041"
"7032406235170085879617892222278962370389737472000000000000000000000000"
"0000000000000000000000000");
BigUnsigned<84> evens(1u);
BigUnsigned<84> odds(1u);
for (uint32_t i = 1; i < 200; i += 2) {
odds.MultiplyBy(i);
evens.MultiplyBy(i + 1);
}
evens.MultiplyBy(odds);
EXPECT_EQ(evens, factorial_200);
}
{
for (int a = 0 ; a < 700; a += 25) {
SCOPED_TRACE(a);
BigUnsigned<84> a_value("3" + std::string(a, '0'));
for (int b = 0; b < (700 - a); b += 25) {
SCOPED_TRACE(b);
BigUnsigned<84> b_value("2" + std::string(b, '0'));
BigUnsigned<84> expected_product("6" + std::string(a + b, '0'));
b_value.MultiplyBy(a_value);
EXPECT_EQ(b_value, expected_product);
}
}
}
}
TEST(BigUnsigned, MultiplyByOverflow) {
{
BigUnsigned<4> all_bits_on("340282366920938463463374607431768211455");
all_bits_on.MultiplyBy(all_bits_on);
EXPECT_EQ(all_bits_on, BigUnsigned<4>(1u));
}
{
BigUnsigned<4> value_1("12345678901234567890123456789012345678");
BigUnsigned<4> value_2("12345678901234567890123456789012345678");
BigUnsigned<4> two_to_fiftieth(1u);
two_to_fiftieth.ShiftLeft(50);
value_1.ShiftLeft(50);
value_2.MultiplyBy(two_to_fiftieth);
EXPECT_EQ(value_1, value_2);
}
}
TEST(BigUnsigned, FiveToTheNth) {
{
for (int i = 0; i < 1160; ++i) {
SCOPED_TRACE(i);
BigUnsigned<84> value_1(123u);
BigUnsigned<84> value_2(123u);
value_1.MultiplyByFiveToTheNth(i);
for (int j = 0; j < i; j++) {
value_2.MultiplyBy(5u);
}
EXPECT_EQ(value_1, value_2);
}
}
{
for (int i = 0; i < 1160; ++i) {
SCOPED_TRACE(i);
BigUnsigned<84> value_1(1u);
value_1.MultiplyByFiveToTheNth(i);
BigUnsigned<84> value_2 = BigUnsigned<84>::FiveToTheNth(i);
EXPECT_EQ(value_1, value_2);
}
}
}
TEST(BigUnsigned, TenToTheNth) {
{
for (int i = 0; i < 800; ++i) {
SCOPED_TRACE(i);
BigUnsigned<84> value_1(123u);
BigUnsigned<84> value_2(123u);
value_1.MultiplyByTenToTheNth(i);
for (int j = 0; j < i; j++) {
value_2.MultiplyBy(10u);
}
EXPECT_EQ(value_1, value_2);
}
}
{
for (int i = 0; i < 200; ++i) {
SCOPED_TRACE(i);
BigUnsigned<84> value_1(135u);
value_1.MultiplyByTenToTheNth(i);
BigUnsigned<84> value_2("135" + std::string(i, '0'));
EXPECT_EQ(value_1, value_2);
}
}
}
}
ABSL_NAMESPACE_END
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/internal/charconv_bigint.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/internal/charconv_bigint_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
78c82e59-e383-40f5-be5e-ae074095bd05 | cpp | tensorflow/tensorflow | type_inference | tensorflow/core/common_runtime/type_inference.cc | tensorflow/core/common_runtime/type_inference_test.cc | #include "tensorflow/core/common_runtime/type_inference.h"
#include <functional>
#include <list>
#include <queue>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/framework/full_type.pb.h"
#include "tensorflow/core/framework/full_type_util.h"
#include "tensorflow/core/framework/op_def_builder.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/util/dump_graph.h"
namespace tensorflow {
namespace {
int MAX_VISITS_PER_NODE = 3;
typedef absl::flat_hash_map<int, std::reference_wrapper<TypeInferenceFn const>>
ForwardInferMap;
typedef absl::flat_hash_map<
int, std::pair<int, std::reference_wrapper<TypeInferenceFn const>>>
ReverseInferMap;
bool all_sources_closed(const Node& n, const absl::flat_hash_set<int>& closed,
const ForwardInferMap& forward,
const ReverseInferMap& reverse) {
for (const auto& e : n.out_edges()) {
if (e->IsControlEdge()) {
continue;
}
int dst_id = e->dst()->id();
if (reverse.contains(dst_id) && !closed.contains(dst_id)) {
return false;
}
}
if (forward.contains(n.id())) {
for (const auto& e : n.in_edges()) {
if (e->IsControlEdge()) {
continue;
}
if (!closed.contains(e->src()->id())) {
return false;
}
}
}
return true;
}
std::vector<std::reference_wrapper<const FullTypeDef>> input_types(
const Node& n) {
static FullTypeDef* no_type = new FullTypeDef();
std::vector<std::reference_wrapper<const FullTypeDef>> input_types;
for (const auto& in_edge : n.in_edges()) {
if (in_edge->IsControlEdge()) {
continue;
}
input_types.push_back(*no_type);
}
for (const auto& in_edge : n.in_edges()) {
if (in_edge->IsControlEdge()) {
continue;
}
VLOG(5) << " in edge: " << in_edge->DebugString();
NodeDef* ndef = in_edge->src()->mutable_def();
if (ndef->has_experimental_type()) {
const auto& t = ndef->experimental_type();
if (t.type_id() != TFT_UNSET) {
DCHECK(t.type_id() == TFT_PRODUCT) << ndef->DebugString();
DCHECK(t.args_size() > in_edge->src_output()) << ndef->DebugString();
input_types.at(in_edge->dst_input()) = t.args(in_edge->src_output());
}
}
}
return input_types;
}
Status update_inferred_type(Node* target, const FullTypeDef& t, bool& updated) {
if (t.type_id() == TFT_UNSET) {
VLOG(3) << " " << target->name() << " no inferred type";
return absl::OkStatus();
}
if (target->def().has_experimental_type()) {
const auto existing = target->def().experimental_type();
if (full_type::IsSubtype(existing, t)) {
VLOG(3) << " " << target->name() << " no new type info";
return absl::OkStatus();
} else if (!full_type::IsSubtype(t, existing)) {
return Status(
absl::StatusCode::kInvalidArgument,
absl::StrCat("type mismatch for node '", target->name(),
"': expected a subtype of:\n", existing.DebugString(),
"\n got:\n", t.DebugString(), "\n "));
}
}
*(target->mutable_def()->mutable_experimental_type()) = t;
updated = true;
VLOG(3) << " " << target->name() << " updated";
return absl::OkStatus();
}
absl::StatusOr<FullTypeDef> run_inference(const string& fn_name,
const TypeRefVector& in_types) {
return absl::OkStatus();
}
}
Status TypeInferencePass::Run(
const GraphOptimizationPassOptions& options) {
VLOG(1) << "TypeInferencePass::Run";
DCHECK(options.graph != nullptr);
Graph* g = options.graph->get();
DCHECK(g != nullptr);
FunctionLibraryDefinition* flib_def = options.flib_def;
DCHECK(flib_def != nullptr);
if (VLOG_IS_ON(1)) {
DumpGraphToFile("forward_type_inference_before", *g, flib_def);
}
for (Node* n : g->nodes()) {
n->UpdateProperties();
}
ForwardInferMap forward;
ReverseInferMap reverse;
for (Node* n : g->nodes()) {
VLOG(4) << "\n node: " << n->def().DebugString()
<< "\n op def: " << n->op_def().DebugString();
const OpRegistrationData* reg;
TF_RETURN_IF_ERROR(flib_def->LookUp(n->op_def().name(), ®));
if (reg->fwd_type_fn != nullptr) {
forward.emplace(n->id(), reg->fwd_type_fn);
}
if (reg->rev_type_fn != nullptr) {
reverse.emplace(n->id(), std::make_pair(reg->rev_type_input,
std::cref(reg->rev_type_fn)));
}
}
auto infer_forward = [&forward](Node* n, bool& updated) {
if (!forward.contains(n->id())) {
return absl::OkStatus();
}
VLOG(4) << " " << n->name() << " has forward function";
auto in_types = input_types(*n);
const auto& infer_ret = forward.at(n->id())(in_types, run_inference);
TF_RETURN_WITH_CONTEXT_IF_ERROR(
infer_ret.status(),
absl::StrCat("while inferring type of node '", n->name(), "'"));
TF_RETURN_WITH_CONTEXT_IF_ERROR(
update_inferred_type(n, *infer_ret, updated),
"while updating its output type.");
return absl::OkStatus();
};
auto infer_reverse = [&reverse](Node* n, bool& updated) {
if (!reverse.contains(n->id())) {
return absl::OkStatus();
}
VLOG(4) << " " << n->name() << " has reverse function";
auto in_types = input_types(*n);
auto rev_idx_and_fn = reverse.at(n->id());
const auto& infer_ret = rev_idx_and_fn.second(in_types, run_inference);
const Edge* e;
TF_RETURN_WITH_CONTEXT_IF_ERROR(
n->input_edge(rev_idx_and_fn.first, &e),
absl::StrCat("while querying input ", rev_idx_and_fn.first, " of '",
n->name(), "'"));
TF_RETURN_WITH_CONTEXT_IF_ERROR(
infer_ret.status(),
absl::StrCat("while inferring type of node '", e->src()->name(),
"' via '", n->name(), "'"));
TF_RETURN_WITH_CONTEXT_IF_ERROR(
update_inferred_type(e->src(), *infer_ret, updated),
absl::StrCat("while updating its output type inferred from '",
n->name(), ","));
return absl::OkStatus();
};
std::list<int> queue;
absl::flat_hash_set<int> in_queue;
absl::flat_hash_map<int, int> visit_count;
absl::flat_hash_set<int> open;
absl::flat_hash_set<int> closed;
int max_passes = g->num_nodes();
int visits = 0;
for (Node* n : g->nodes()) {
const int nid = n->id();
bool niladic = true;
for (const auto& e : n->in_edges()) {
if (!e->IsControlEdge()) {
niladic = false;
break;
}
}
if (niladic) {
queue.emplace_back(nid);
in_queue.emplace(nid);
}
open.emplace(nid);
visit_count.emplace(nid, 0);
}
for (int i = 0; i < max_passes; i++) {
VLOG(2) << "Iteration " << i << ", " << queue.size() << " nodes in queue";
while (!queue.empty()) {
int nid = queue.front();
Node* n = g->FindNodeId(nid);
VLOG(3) << " visiting " << n->name();
visits++;
visit_count[nid]++;
DCHECK(!closed.contains(nid));
bool updated = false;
TF_RETURN_IF_ERROR(infer_forward(n, updated));
TF_RETURN_IF_ERROR(infer_reverse(n, updated));
VLOG(4) << " done " << n->def().DebugString();
queue.pop_front();
in_queue.erase(nid);
open.erase(nid);
if (visit_count.at(nid) >= MAX_VISITS_PER_NODE) {
VLOG(3) << " closing " << n->name() << " - visit limit reached";
closed.emplace(nid);
} else if (all_sources_closed(*n, closed, forward, reverse)) {
VLOG(3) << " closing " << n->name() << " - all sources closed";
closed.emplace(nid);
}
for (const auto& out_edge : n->out_edges()) {
if (out_edge->IsControlEdge()) {
continue;
}
Node* c = out_edge->dst();
int cid = c->id();
if (closed.contains(cid) || in_queue.contains(cid)) {
continue;
}
if (updated || all_sources_closed(*c, closed, forward, reverse)) {
queue.emplace_back(cid);
in_queue.emplace(cid);
}
}
if (updated && reverse.contains(nid)) {
const Edge* e;
TF_RETURN_IF_ERROR(n->input_edge(reverse.at(nid).first, &e));
Node* c = e->src();
int cid = c->id();
if (!closed.contains(cid) && !in_queue.contains(cid)) {
queue.emplace_back(cid);
in_queue.emplace(cid);
}
}
}
VLOG(2) << "Done iteration " << i << ", " << closed.size()
<< " nodes closed";
if (open.empty()) {
VLOG(1) << "Finished after " << i + 1 << " iterations; done "
<< closed.size() << " of " << g->num_nodes() << " nodes in "
<< visits << " visits";
break;
} else {
queue.emplace_back(*(open.begin()));
}
}
if (VLOG_IS_ON(1)) {
DumpGraphToFile("forward_type_inference_after", *g, flib_def);
}
return absl::OkStatus();
}
Status WeakTypeInferencePass::Run(
const GraphOptimizationPassOptions& options) {
TypeInferencePass pass;
const auto& pass_status = pass.Run(options);
if (!pass_status.ok()) {
LOG_FIRST_N(WARNING, 1)
<< "Type inference failed. This indicates an "
"invalid graph that escaped type checking. Error message: "
<< pass_status.ToString();
}
return absl::OkStatus();
}
REGISTER_OPTIMIZATION(OptimizationPassRegistry::PRE_PLACEMENT, 99999,
WeakTypeInferencePass);
} | #include "tensorflow/core/common_runtime/type_inference.h"
#include <functional>
#include <string>
#include "tensorflow/cc/client/client_session.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/graph_runner.h"
#include "tensorflow/core/framework/full_type.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_def_builder.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
Status Rewrite(std::unique_ptr<Graph>* graph) {
FunctionLibraryDefinition flib_def((*graph)->flib_def());
GraphOptimizationPassOptions opt_options;
SessionOptions session_options;
opt_options.session_options = &session_options;
opt_options.graph = graph;
opt_options.flib_def = &flib_def;
TypeInferencePass pass;
return pass.Run(opt_options);
}
TEST(TypeInferenceTest, BasicStraightline) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
Scope root = Scope::NewRootScope().ExitOnError();
auto start = ops::Placeholder(root.WithOpName("start"), DT_INT64);
auto stop = ops::Placeholder(root.WithOpName("stop"), DT_INT64);
auto step = ops::Placeholder(root.WithOpName("step"), DT_INT64);
Node* ds;
TensorShapeProto shape;
shape.mutable_dim();
shape.set_unknown_rank(false);
TF_ASSERT_OK(NodeBuilder("ds", "RangeDataset", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(start.node())})
.Input({NodeBuilder::NodeOut(stop.node())})
.Input({NodeBuilder::NodeOut(step.node())})
.Attr("output_types", {DT_INT32})
.Attr("output_shapes", {shape})
.Finalize(root.graph(), &ds));
Node* id;
TF_ASSERT_OK(NodeBuilder("id", "Identity", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(ds)})
.Attr("T", DT_VARIANT)
.Finalize(root.graph(), &id));
TF_ASSERT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(Rewrite(&graph));
for (const auto& node : graph->nodes()) {
if ((node->name() == "ds") || ((node->name() == "id"))) {
const auto& t = node->def().experimental_type();
ASSERT_EQ(t.type_id(), TFT_PRODUCT) << node->def().DebugString();
EXPECT_EQ(t.args(0).type_id(), TFT_DATASET) << node->def().DebugString();
}
}
}
TEST(TypeInferenceTest, CyclicGraphWithV1ControlFlow) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
Scope root = Scope::NewRootScope().ExitOnError();
auto start = ops::Placeholder(root.WithOpName("start"), DT_INT64);
auto stop = ops::Placeholder(root.WithOpName("stop"), DT_INT64);
auto step = ops::Placeholder(root.WithOpName("step"), DT_INT64);
auto cond = ops::Placeholder(root.WithOpName("cond"), DT_BOOL);
Node* ds;
TensorShapeProto shape;
shape.mutable_dim();
shape.set_unknown_rank(false);
TF_ASSERT_OK(NodeBuilder("ds", "RangeDataset", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(start.node())})
.Input({NodeBuilder::NodeOut(stop.node())})
.Input({NodeBuilder::NodeOut(step.node())})
.Attr("output_types", {DT_INT32})
.Attr("output_shapes", {shape})
.Finalize(root.graph(), &ds));
Node* enter;
TF_ASSERT_OK(NodeBuilder("enter", "Enter", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(ds)})
.Attr("frame_name", "loop")
.Finalize(root.graph(), &enter));
Node* loop_cond;
TF_ASSERT_OK(NodeBuilder("loop_cond", "Enter", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(cond.node())})
.Attr("frame_name", "loop")
.Finalize(root.graph(), &loop_cond));
Node* merge;
TF_ASSERT_OK(
NodeBuilder("merge", "Merge", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(enter), NodeBuilder::NodeOut(enter)})
.Finalize(root.graph(), &merge));
Node* sw;
TF_ASSERT_OK(NodeBuilder("sw", "Switch", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(merge)})
.Input({NodeBuilder::NodeOut(loop_cond)})
.Finalize(root.graph(), &sw));
Node* id;
TF_ASSERT_OK(NodeBuilder("id", "Identity", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(sw)})
.Finalize(root.graph(), &id));
Node* next;
TF_ASSERT_OK(NodeBuilder("next", "NextIteration", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(id)})
.Finalize(root.graph(), &next));
TF_ASSERT_OK(root.graph()->UpdateEdge(next, 0, merge, 1));
Node* exit;
TF_ASSERT_OK(NodeBuilder("exit", "Exit", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(sw)})
.Finalize(root.graph(), &exit));
TF_ASSERT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(Rewrite(&graph));
for (const auto& node : graph->nodes()) {
if ((node->name() == "ds") || (node->name() == "id") ||
(node->name() == "enter") || (node->name() == "exit") ||
(node->name() == "sw") || (node->name() == "merge") ||
(node->name() == "next")) {
const auto& t = node->def().experimental_type();
ASSERT_EQ(t.type_id(), TFT_PRODUCT) << node->def().DebugString();
EXPECT_EQ(t.args(0).type_id(), TFT_DATASET) << node->def().DebugString();
}
}
}
REGISTER_OP("TestSourceOp").Output("o: variant");
REGISTER_OP("TestTensorUnaryOp")
.Input("i: variant")
.Output("o: variant")
.SetForwardTypeFn([](const TypeRefVector& input_types,
const FunctionTypeInferrer& call_infer) {
FullTypeDef t;
t.set_type_id(TFT_PRODUCT);
t.add_args()->set_type_id(TFT_TENSOR);
return t;
});
REGISTER_OP("TestArrayUnaryOp")
.Input("i: variant")
.Output("o: variant")
.SetForwardTypeFn([](const TypeRefVector& input_types,
const FunctionTypeInferrer& call_infer) {
FullTypeDef t;
t.set_type_id(TFT_PRODUCT);
t.add_args()->set_type_id(TFT_ARRAY);
return t;
});
REGISTER_OP("TestMergeOp")
.Input("i1: variant")
.Input("i2: variant")
.Output("o: variant")
.SetForwardTypeFn([](const TypeRefVector& input_types,
const FunctionTypeInferrer& call_infer) {
EXPECT_EQ(input_types.size(), 2);
FullTypeDef t;
t.set_type_id(TFT_PRODUCT);
if ((input_types[0].get().type_id() == TFT_TENSOR) &&
(input_types[1].get().type_id() == TFT_ARRAY)) {
t.add_args()->set_type_id(TFT_ARRAY);
} else {
t.add_args()->set_type_id(TFT_ANY);
}
return t;
});
TEST(TypeInferenceTest, TernaryNodeWithIgnoredInputs) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
Scope root = Scope::NewRootScope().ExitOnError();
Node* s;
TF_ASSERT_OK(NodeBuilder("s", "TestSourceOp", &root.graph()->flib_def())
.Finalize(root.graph(), &s));
Node* tn;
TF_ASSERT_OK(NodeBuilder("tn", "TestTensorUnaryOp", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(s)})
.Finalize(root.graph(), &tn));
Node* id;
TF_ASSERT_OK(NodeBuilder("id", "Identity", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(s)})
.Finalize(root.graph(), &id));
Node* an;
TF_ASSERT_OK(NodeBuilder("an", "TestArrayUnaryOp", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(id)})
.Finalize(root.graph(), &an));
Node* m;
TF_ASSERT_OK(NodeBuilder("m", "TestMergeOp", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(tn)})
.Input({NodeBuilder::NodeOut(an)})
.Finalize(root.graph(), &m));
TF_ASSERT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(Rewrite(&graph));
for (const auto& node : graph->nodes()) {
if (node->name() == "m") {
const auto& t = node->def().experimental_type();
ASSERT_EQ(t.type_id(), TFT_PRODUCT) << node->def().DebugString();
EXPECT_EQ(t.args(0).type_id(), TFT_ARRAY) << node->def().DebugString();
}
}
}
TEST(TypeInferenceTest, BinaryNodeWithUnorderedInputs) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
Scope root = Scope::NewRootScope().ExitOnError();
Node* s;
TF_ASSERT_OK(NodeBuilder("s", "TestSourceOp", &root.graph()->flib_def())
.Finalize(root.graph(), &s));
Node* tn;
TF_ASSERT_OK(NodeBuilder("tn", "TestTensorUnaryOp", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(s)})
.Finalize(root.graph(), &tn));
Node* an;
TF_ASSERT_OK(NodeBuilder("an", "TestArrayUnaryOp", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(s)})
.Finalize(root.graph(), &an));
Node* m;
TF_ASSERT_OK(NodeBuilder("m", "TestMergeOp", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(s)})
.Input({NodeBuilder::NodeOut(s)})
.Finalize(root.graph(), &m));
TF_ASSERT_OK(root.ToGraph(graph.get()));
Node* m_copy = nullptr;
Node* tn_copy = nullptr;
Node* an_copy = nullptr;
for (const auto& node : graph->nodes()) {
if (node->name() == "m") {
m_copy = node;
} else if (node->name() == "tn") {
tn_copy = node;
} else if (node->name() == "an") {
an_copy = node;
}
}
TF_ASSERT_OK(graph->UpdateEdge(an_copy, 0, m_copy, 1));
TF_ASSERT_OK(graph->UpdateEdge(tn_copy, 0, m_copy, 0));
TF_ASSERT_OK(Rewrite(&graph));
for (const auto& node : graph->nodes()) {
if (node->name() == "m") {
const auto& t = node->def().experimental_type();
ASSERT_EQ(t.type_id(), TFT_PRODUCT) << node->def().DebugString();
EXPECT_EQ(t.args(0).type_id(), TFT_ARRAY) << node->def().DebugString();
}
}
}
TEST(TypeInferenceTest, BinaryNodeWithCycleInput) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
Scope root = Scope::NewRootScope().ExitOnError();
auto cond = ops::Placeholder(root.WithOpName("cond"), DT_BOOL);
Node* s;
TF_ASSERT_OK(NodeBuilder("s", "TestSourceOp", &root.graph()->flib_def())
.Finalize(root.graph(), &s));
Node* an;
TF_ASSERT_OK(NodeBuilder("an", "TestArrayUnaryOp", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(s)})
.Finalize(root.graph(), &an));
Node* enter;
TF_ASSERT_OK(NodeBuilder("enter", "Enter", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(an)})
.Attr("frame_name", "loop")
.Finalize(root.graph(), &enter));
Node* loop_cond;
TF_ASSERT_OK(NodeBuilder("loop_cond", "Enter", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(cond.node())})
.Attr("frame_name", "loop")
.Finalize(root.graph(), &loop_cond));
Node* merge;
TF_ASSERT_OK(
NodeBuilder("merge", "Merge", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(enter), NodeBuilder::NodeOut(enter)})
.Finalize(root.graph(), &merge));
Node* sw;
TF_ASSERT_OK(NodeBuilder("sw", "Switch", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(merge)})
.Input({NodeBuilder::NodeOut(loop_cond)})
.Finalize(root.graph(), &sw));
Node* tn;
TF_ASSERT_OK(NodeBuilder("tn", "TestTensorUnaryOp", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(sw)})
.Finalize(root.graph(), &tn));
Node* next;
TF_ASSERT_OK(NodeBuilder("next", "NextIteration", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(tn)})
.Finalize(root.graph(), &next));
TF_ASSERT_OK(root.graph()->UpdateEdge(next, 0, merge, 1));
Node* exit;
TF_ASSERT_OK(NodeBuilder("exit", "Exit", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(sw)})
.Finalize(root.graph(), &exit));
TF_ASSERT_OK(root.ToGraph(graph.get()));
const auto& status = Rewrite(&graph);
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
::testing::HasSubstr("expected compatible input types"));
}
TEST(WeakTypeInferenceTest, AlwaysSucceeds) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
Scope root = Scope::NewRootScope().ExitOnError();
auto cond = ops::Placeholder(root.WithOpName("cond"), DT_BOOL);
Node* s;
TF_ASSERT_OK(NodeBuilder("s", "TestSourceOp", &root.graph()->flib_def())
.Finalize(root.graph(), &s));
Node* an;
TF_ASSERT_OK(NodeBuilder("an", "TestArrayUnaryOp", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(s)})
.Finalize(root.graph(), &an));
Node* tn;
TF_ASSERT_OK(NodeBuilder("tn", "TestTensorUnaryOp", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(s)})
.Finalize(root.graph(), &tn));
Node* merge;
TF_ASSERT_OK(NodeBuilder("merge", "Merge", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(an), NodeBuilder::NodeOut(tn)})
.Finalize(root.graph(), &merge));
TF_ASSERT_OK(root.ToGraph(graph.get()));
FunctionLibraryDefinition flib_def(graph->flib_def());
GraphOptimizationPassOptions opt_options;
SessionOptions session_options;
opt_options.session_options = &session_options;
opt_options.graph = &graph;
opt_options.flib_def = &flib_def;
WeakTypeInferencePass pass;
TF_ASSERT_OK(pass.Run(opt_options));
}
TEST(ReverseTypeInferenceTest, BasicVDependency) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
Scope root = Scope::NewRootScope().ExitOnError();
auto start = ops::Placeholder(root.WithOpName("start"), DT_INT64);
auto stop = ops::Placeholder(root.WithOpName("stop"), DT_INT64);
auto step = ops::Placeholder(root.WithOpName("step"), DT_INT64);
Node* ds;
TensorShapeProto shape;
shape.mutable_dim();
shape.set_unknown_rank(false);
TF_ASSERT_OK(NodeBuilder("ds", "RangeDataset", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(start.node())})
.Input({NodeBuilder::NodeOut(stop.node())})
.Input({NodeBuilder::NodeOut(step.node())})
.Attr("output_types", {DT_INT32})
.Attr("output_shapes", {shape})
.Finalize(root.graph(), &ds));
Node* it;
TF_ASSERT_OK(
NodeBuilder("it", "AnonymousIteratorV2", &root.graph()->flib_def())
.Attr("output_types", {DT_INT32})
.Attr("output_shapes", {shape})
.Finalize(root.graph(), &it));
Node* it_ctor;
TF_ASSERT_OK(NodeBuilder("it_ctor", "MakeIterator", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(ds)})
.Input({NodeBuilder::NodeOut(it)})
.Finalize(root.graph(), &it_ctor));
TF_ASSERT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(Rewrite(&graph));
for (const auto& node : graph->nodes()) {
if (node->name() == "it") {
const auto& t = node->def().experimental_type();
ASSERT_EQ(t.type_id(), TFT_PRODUCT) << node->def().DebugString();
EXPECT_EQ(t.args(0).type_id(), TFT_ITERATOR) << node->def().DebugString();
}
}
}
TEST(ReverseTypeInferenceTest, FromUnsetType) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
Scope root = Scope::NewRootScope().ExitOnError();
Node* s;
TF_ASSERT_OK(NodeBuilder("s", "TestSourceOp", &root.graph()->flib_def())
.Finalize(root.graph(), &s));
Node* it;
TensorShapeProto shape;
shape.mutable_dim();
shape.set_unknown_rank(false);
TF_ASSERT_OK(
NodeBuilder("it", "AnonymousIteratorV2", &root.graph()->flib_def())
.Attr("output_types", {DT_INT32})
.Attr("output_shapes", {shape})
.Finalize(root.graph(), &it));
Node* it_ctor;
TF_ASSERT_OK(NodeBuilder("it_ctor", "MakeIterator", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(s)})
.Input({NodeBuilder::NodeOut(it)})
.Finalize(root.graph(), &it_ctor));
TF_ASSERT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(Rewrite(&graph));
for (const auto& node : graph->nodes()) {
if (node->name() == "it") {
ASSERT_FALSE(node->def().has_experimental_type());
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/type_inference.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/type_inference_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
40fe60b6-15b3-4cfd-9724-57a62982e349 | cpp | tensorflow/tensorflow | min_max_builder | tensorflow/lite/delegates/hexagon/builders/min_max_builder.cc | tensorflow/lite/delegates/hexagon/builders/tests/min_max_builder_test.cc | #include "tensorflow/lite/delegates/hexagon/builders/min_max_builder.h"
#include "tensorflow/lite/core/c/common.h"
namespace tflite {
namespace delegates {
namespace hexagon {
TfLiteStatus MinMaxOpBuilder::PopulateSubGraph(const TfLiteIntArray* inputs,
const TfLiteIntArray* outputs,
TfLiteContext* context) {
int a_tensor_id = inputs->data[0];
int b_tensor_id = inputs->data[1];
const auto& a_tensor = context->tensors[a_tensor_id];
const auto& b_tensor = context->tensors[b_tensor_id];
AddInput(graph_builder_->GetHexagonTensorId(a_tensor_id));
AddInput(graph_builder_->GetHexagonTensorId(b_tensor_id));
TF_LITE_ENSURE_STATUS(ComputeAndAddMinAndMax(context, a_tensor));
TF_LITE_ENSURE_STATUS(ComputeAndAddMinAndMax(context, b_tensor));
const int output_tensor_id = outputs->data[0];
const auto& output_tensor = context->tensors[output_tensor_id];
TF_LITE_ENSURE_STATUS(ComputeAndAddMinAndMax(context, output_tensor));
int output_batch_size, output_height_size, output_width_size,
output_depth_size;
GetDims(&output_batch_size, &output_height_size, &output_width_size,
&output_depth_size, context->tensors[outputs->data[0]].dims);
node_output_ = AddOutput(sizeof(uint8_t), 4,
{output_batch_size, output_height_size,
output_width_size, output_depth_size});
AddOutput(sizeof(float), 4, kScalarShape);
AddOutput(sizeof(float), 4, kScalarShape);
return kTfLiteOk;
}
TfLiteStatus MinMaxOpBuilder::RegisterOutputs(const TfLiteIntArray* outputs,
TfLiteContext* context) {
graph_builder_->AddTensorWithID(outputs->data[0], node_output_.first,
node_output_.second);
return kTfLiteOk;
}
OpBuilder* CreateMinMaxBuilder(GraphBuilder* graph_builder, int op_type) {
return new MinMaxOpBuilder(graph_builder, op_type);
}
}
}
} | #include <initializer_list>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/delegates/hexagon/builders/tests/hexagon_delegate_op_model.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
using testing::ElementsAreArray;
template <typename data_type>
class MinMaxOpModel : public SingleOpModelWithHexagon {
public:
MinMaxOpModel(tflite::BuiltinOperator op, const TensorData& input1,
const TensorData& input2, const TensorData& output) {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
output_ = AddOutput(output);
SetBuiltinOp(op, BuiltinOptions_MaximumMinimumOptions,
CreateMaximumMinimumOptions(builder_).Union());
BuildInterpreter({GetShape(input1_), GetShape(input2_)});
}
MinMaxOpModel(tflite::BuiltinOperator op, const TensorData& input1,
std::initializer_list<data_type> input1_values,
const TensorData& input2,
std::initializer_list<data_type> input2_values,
const TensorData& output, bool input1_const) {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
output_ = AddOutput(output);
SetBuiltinOp(op, BuiltinOptions_MaximumMinimumOptions,
CreateMaximumMinimumOptions(builder_).Union());
BuildInterpreter({GetShape(input1_), GetShape(input2_)});
if (input1_const) {
auto* input1_tensor = interpreter_->tensor(input1_);
input1_tensor->allocation_type = kTfLiteMmapRo;
} else {
auto* input2_tensor = interpreter_->tensor(input2_);
input2_tensor->allocation_type = kTfLiteMmapRo;
}
}
void SetInput1(std::vector<data_type> data) { PopulateTensor(input1_, data); }
void SetInput2(std::vector<data_type> data) { PopulateTensor(input2_, data); }
std::vector<data_type> GetOutput() {
return ExtractVector<data_type>(output_);
}
template <typename T>
std::vector<float> GetDequantizedOutput() {
return Dequantize<T>(ExtractVector<T>(output_), GetScale(output_),
GetZeroPoint(output_));
}
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
protected:
int input1_;
int input2_;
int output_;
};
template <typename data_type>
void TestModel(tflite::BuiltinOperator op, const TensorData& input1,
const TensorData& input2, const TensorData& output,
std::initializer_list<data_type> input1_values,
std::initializer_list<data_type> input2_values) {
std::unique_ptr<MinMaxOpModel<data_type>> m;
m = std::make_unique<MinMaxOpModel<data_type>>(op, input1, input2, output);
m->SetInput1(input1_values);
m->SetInput2(input2_values);
ASSERT_EQ(m->Invoke(), kTfLiteOk);
const auto reference_output = m->GetOutput();
const auto reference_output_shape = m->GetOutputShape();
m->ApplyDelegateAndInvoke();
EXPECT_THAT(m->GetOutputShape(), ElementsAreArray(reference_output_shape));
EXPECT_THAT(m->GetOutput(), ElementsAreArray(reference_output));
}
template <typename data_type>
void TestModelConstInput(tflite::BuiltinOperator op, const TensorData& input1,
const TensorData& input2, const TensorData& output,
std::initializer_list<data_type> input1_values,
std::initializer_list<data_type> input2_values,
bool input1_const) {
std::unique_ptr<MinMaxOpModel<data_type>> m;
m = std::make_unique<MinMaxOpModel<data_type>>(
op, input1, input1_values, input2, input2_values, output, input1_const);
m->SetInput1(input1_values);
m->SetInput2(input2_values);
ASSERT_EQ(m->Invoke(), kTfLiteOk);
const auto reference_output = m->GetOutput();
const auto reference_output_shape = m->GetOutputShape();
m->ApplyDelegateAndInvoke();
EXPECT_THAT(m->GetOutputShape(), ElementsAreArray(reference_output_shape));
EXPECT_THAT(m->GetOutput(), ElementsAreArray(reference_output));
}
TEST(MinMaxOpTest, Maximum_Uint8Test) {
std::initializer_list<uint8_t> data1 = {1, 0, 2, 11, 2, 23};
std::initializer_list<uint8_t> data2 = {0, 0, 1, 12, 255, 1};
TestModel<uint8_t>(BuiltinOperator_MAXIMUM,
{TensorType_UINT8, {1, 3, 1, 2}, -1, 255},
{TensorType_UINT8, {1, 3, 1, 2}, -1, 255},
{TensorType_UINT8, {1, 3, 1, 2}, -1, 255}, data1, data2);
}
TEST(MinMaxOpTest, Maximum_Uint8Test_Const) {
std::initializer_list<uint8_t> data1 = {1, 0, 2, 11, 2, 23};
std::initializer_list<uint8_t> data2 = {0, 0, 1, 12, 255, 1};
TestModelConstInput<uint8_t>(
BuiltinOperator_MAXIMUM, {TensorType_UINT8, {1, 3, 1, 2}, -1, 255},
{TensorType_UINT8, {1, 3, 1, 2}, -1, 255},
{TensorType_UINT8, {1, 3, 1, 2}, -1, 255}, data1, data2, false);
}
TEST(MinMaxOpTest, Minimum_Uint8Test) {
std::initializer_list<uint8_t> data1 = {1, 0, 2, 11, 2, 23};
std::initializer_list<uint8_t> data2 = {0, 0, 1, 12, 255, 1};
TestModel<uint8_t>(BuiltinOperator_MINIMUM,
{TensorType_UINT8, {1, 3, 1, 2}, -1, 255},
{TensorType_UINT8, {1, 3, 1, 2}, -1, 255},
{TensorType_UINT8, {1, 3, 1, 2}, -1, 255}, data1, data2);
}
TEST(MinMaxOpTest, Minimum_Uint8Test_Const) {
std::initializer_list<uint8_t> data1 = {1, 0, 2, 11, 2, 23};
std::initializer_list<uint8_t> data2 = {0, 0, 1, 12, 20, 1};
TestModelConstInput<uint8_t>(
BuiltinOperator_MINIMUM, {TensorType_UINT8, {1, 3, 1, 2}, -1, 25},
{TensorType_UINT8, {1, 3, 1, 2}, -1, 25},
{TensorType_UINT8, {1, 3, 1, 2}, -1, 25}, data1, data2, false);
}
TEST(MinMaxOpTest, Maximum_Int8Test) {
std::initializer_list<int8_t> data1 = {1, 0, 2, 11, 2, 23};
std::initializer_list<int8_t> data2 = {0, 0, 1, 12, 123, 1};
TestModel<int8_t>(BuiltinOperator_MAXIMUM,
{TensorType_INT8, {1, 3, 1, 2}, -1, 125},
{TensorType_INT8, {1, 3, 1, 2}, -1, 125},
{TensorType_INT8, {1, 3, 1, 2}, -1, 125}, data1, data2);
}
TEST(MinMaxOpTest, Minimum_Int8Test) {
std::initializer_list<int8_t> data1 = {1, 0, 2, 11, 2, 23};
std::initializer_list<int8_t> data2 = {0, 0, 1, 12, 12, 1};
TestModel<int8_t>(BuiltinOperator_MINIMUM,
{TensorType_INT8, {1, 3, 1, 2}, -1, 25},
{TensorType_INT8, {1, 3, 1, 2}, -1, 25},
{TensorType_INT8, {1, 3, 1, 2}, -1, 25}, data1, data2);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/hexagon/builders/min_max_builder.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/hexagon/builders/tests/min_max_builder_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
28137463-1133-408c-a2a4-1261817f14ef | cpp | tensorflow/tensorflow | async_value_ref | third_party/xla/xla/tsl/concurrency/async_value_ref.cc | third_party/xla/xla/tsl/concurrency/async_value_ref_test.cc | #include "xla/tsl/concurrency/async_value_ref.h"
#include <string_view>
#include <utility>
#include "absl/status/status.h"
#include "xla/tsl/concurrency/async_value.h"
#include "xla/tsl/concurrency/ref_count.h"
#include "tsl/platform/logging.h"
namespace tsl {
RCReference<IndirectAsyncValue> MakeIndirectAsyncValue() {
return TakeRef(internal::AllocateAndConstruct<IndirectAsyncValue>());
}
RCReference<ErrorAsyncValue> MakeErrorAsyncValueRef(absl::Status status) {
CHECK(!status.ok()) << "status must be an error";
return TakeRef(
internal::AllocateAndConstruct<ErrorAsyncValue>(std::move(status)));
}
RCReference<ErrorAsyncValue> MakeErrorAsyncValueRef(std::string_view message) {
return MakeErrorAsyncValueRef(absl::InternalError(message));
}
} | #include "xla/tsl/concurrency/async_value_ref.h"
#include <any>
#include <array>
#include <atomic>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "absl/functional/any_invocable.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "xla/tsl/concurrency/async_value.h"
#include "xla/tsl/concurrency/ref_count.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
namespace tsl {
class WrappedInt32 {
public:
explicit WrappedInt32(int32_t value) : value_(value) {}
int32_t value() const { return value_; }
private:
int32_t value_;
};
constexpr int32_t kTestValue = 42;
TEST(AsyncValueRefTest, MakeUnconstructedStatusOrOfAny) {
auto value = MakeUnconstructedAsyncValueRef<absl::StatusOr<std::any>>();
EXPECT_TRUE(value.IsUnavailable());
}
TEST(AsyncValueRefTest, MakeUnconstructedStatusOr) {
auto value = MakeUnconstructedAsyncValueRef<absl::StatusOr<int32_t>>();
EXPECT_TRUE(value.IsUnavailable());
}
TEST(AsyncValueRefTest, MakeConstructedStatusOr) {
auto value = MakeConstructedAsyncValueRef<absl::StatusOr<int32_t>>(42);
EXPECT_TRUE(value.IsUnavailable());
}
TEST(AsyncValueRefTest, MakeAvailableStatusOr) {
auto value = MakeAvailableAsyncValueRef<absl::StatusOr<int32_t>>(42);
EXPECT_TRUE(value.IsAvailable());
EXPECT_EQ(**value, 42);
}
TEST(AsyncValueRefTest, ImplicitStatusConversion) {
auto error = []() -> AsyncValueRef<WrappedInt32> {
return absl::InternalError("Error");
}();
EXPECT_TRUE(error.IsAvailable());
EXPECT_TRUE(error.IsError());
EXPECT_EQ(error.GetError(), absl::InternalError("Error"));
}
TEST(AsyncValueRefTest, ImplicitStatusConversionWithStatusOrPayloadAndStatus) {
auto status = []() -> absl::StatusOr<absl::StatusOr<int32_t>> {
return absl::InternalError("Error");
}();
auto error = []() -> AsyncValueRef<absl::StatusOr<int32_t>> {
return absl::InternalError("Error");
}();
ASSERT_FALSE(status.ok());
ASSERT_EQ(status.status(), absl::InternalError("Error"));
EXPECT_TRUE(error.IsError());
EXPECT_EQ(error.GetError(), absl::InternalError("Error"));
}
TEST(AsyncValueRefTest, ValueCheck) {
auto wrapped_int_value = MakeAvailableAsyncValueRef<WrappedInt32>(kTestValue);
EXPECT_EQ(wrapped_int_value.get().value(), kTestValue);
EXPECT_EQ(wrapped_int_value->value(), kTestValue);
EXPECT_EQ((*wrapped_int_value).value(), kTestValue);
}
TEST(AsyncValueRefTest, ValueCheckFromRCReference) {
auto wrapped_int_value = MakeAvailableAsyncValueRef<WrappedInt32>(kTestValue);
RCReference<AsyncValue> generic_value = std::move(wrapped_int_value);
EXPECT_EQ(generic_value->get<WrappedInt32>().value(), kTestValue);
}
TEST(AsyncValueRefTest, ValueCheckFromAliasedRCReference) {
auto wrapped_int_value = MakeAvailableAsyncValueRef<WrappedInt32>(kTestValue);
RCReference<AsyncValue> generic_value = std::move(wrapped_int_value);
AsyncValueRef<WrappedInt32> aliased_int_value(std::move(generic_value));
EXPECT_EQ(aliased_int_value.get().value(), kTestValue);
EXPECT_EQ(aliased_int_value->value(), kTestValue);
EXPECT_EQ((*aliased_int_value).value(), kTestValue);
}
TEST(AsyncValueRefTest, ConstructedToError) {
auto value = MakeConstructedAsyncValueRef<int32_t>(kTestValue);
EXPECT_FALSE(value.IsConcrete());
EXPECT_FALSE(value.IsAvailable());
value.AndThen([] {});
value.SetError(absl::InternalError("test error"));
EXPECT_TRUE(value.IsAvailable());
EXPECT_FALSE(value.IsConcrete());
EXPECT_TRUE(value.IsError());
}
TEST(AsyncValueRefTest, ConstructedToConcrete) {
auto value = MakeConstructedAsyncValueRef<int32_t>(kTestValue);
EXPECT_FALSE(value.IsConcrete());
EXPECT_FALSE(value.IsAvailable());
value.AndThen([] {});
value.SetStateConcrete();
EXPECT_TRUE(value.IsAvailable());
EXPECT_TRUE(value.IsConcrete());
EXPECT_FALSE(value.IsError());
EXPECT_EQ(kTestValue, value.get());
}
TEST(AsyncValueRefTest, UnconstructedEmplace) {
auto value = MakeUnconstructedAsyncValueRef<int32_t>();
EXPECT_FALSE(value.IsConcrete());
EXPECT_FALSE(value.IsAvailable());
value.AndThen([] {});
value.emplace(kTestValue);
EXPECT_TRUE(value.IsAvailable());
EXPECT_TRUE(value.IsConcrete());
EXPECT_EQ(kTestValue, value.get());
}
TEST(AsyncValueRefTest, CopyRef) {
auto value = MakeAvailableAsyncValueRef<int32_t>(kTestValue);
EXPECT_TRUE(value.IsConcrete());
EXPECT_TRUE(value.IsUnique());
auto copied_value = value.CopyRef();
EXPECT_FALSE(value.IsUnique());
EXPECT_EQ(value.GetAsyncValue(), copied_value.GetAsyncValue());
}
TEST(AsyncValueRefTest, AndThen) {
AsyncValueRef<int32_t> ref = MakeUnconstructedAsyncValueRef<int32_t>();
EXPECT_FALSE(ref.IsConcrete());
EXPECT_FALSE(ref.IsAvailable());
bool executed = false;
ref.AndThen([&]() { executed = true; });
ref.emplace(42);
EXPECT_TRUE(executed);
}
TEST(AsyncValueRefTest, AndThenError) {
AsyncValueRef<int32_t> ref = MakeConstructedAsyncValueRef<int32_t>(42);
auto error = absl::InternalError("test error");
ref.SetError(error);
ref.AndThen([&](absl::Status status) { EXPECT_EQ(status, error); });
}
TEST(AsyncValueRefTest, AndThenNoError) {
AsyncValueRef<int32_t> ref = MakeAvailableAsyncValueRef<int32_t>(42);
ref.AndThen([](absl::Status status) { EXPECT_TRUE(status.ok()); });
}
TEST(AsyncValueRefTest, AndThenStatusOrError) {
AsyncValueRef<int32_t> ref = MakeConstructedAsyncValueRef<int32_t>(42);
auto error = absl::InternalError("test error");
ref.SetError(error);
ref.AndThen([&](absl::StatusOr<int32_t*> v) {
EXPECT_FALSE(v.ok());
EXPECT_EQ(v.status(), error);
});
}
TEST(AsyncValueRefTest, AndThenStatusOrNoError) {
AsyncValueRef<int32_t> ref = MakeAvailableAsyncValueRef<int32_t>(42);
ref.AndThen([&](absl::StatusOr<int32_t*> v) { EXPECT_EQ(**v, 42); });
}
TEST(AsyncValueRefTest, Nullptr) {
AsyncValueRef<int> av_int = nullptr;
EXPECT_FALSE(av_int);
AsyncValueRef<int> av_int2 = MakeConstructedAsyncValueRef<int>(kTestValue);
EXPECT_TRUE(av_int2);
av_int2 = nullptr;
EXPECT_FALSE(av_int2);
}
TEST(AsyncValueRefTest, MapAvailable) {
AsyncValueRef<int32_t> ref = MakeAvailableAsyncValueRef<int32_t>(42);
AsyncValueRef<float> mapped_to_float =
ref.Map([](int32_t value) -> float { return value; });
EXPECT_TRUE(mapped_to_float.IsAvailable());
EXPECT_EQ(mapped_to_float.get(), 42.0f);
}
TEST(AsyncValueRefTest, MapUnvailable) {
AsyncValueRef<int32_t> ref = MakeConstructedAsyncValueRef<int32_t>(42);
AsyncValueRef<float> mapped_to_float =
ref.Map([](int32_t value) -> float { return value; });
EXPECT_FALSE(mapped_to_float.IsAvailable());
ref.SetStateConcrete();
EXPECT_TRUE(mapped_to_float.IsAvailable());
EXPECT_EQ(mapped_to_float.get(), 42.0f);
}
TEST(AsyncValueRefTest, MapToNonMoveable) {
AsyncValueRef<int32_t> ref = MakeAvailableAsyncValueRef<int32_t>(42);
AsyncValueRef<std::atomic<int32_t>> mapped_to_atomic =
ref.Map<std::atomic<int32_t>>([](int32_t value) { return value; });
EXPECT_TRUE(mapped_to_atomic.IsAvailable());
EXPECT_EQ(mapped_to_atomic->load(), 42);
}
TEST(AsyncValueRefTest, MapError) {
AsyncValueRef<int32_t> ref =
MakeErrorAsyncValueRef(absl::InternalError("error"));
AsyncValueRef<float> mapped_to_float =
ref.Map([](int32_t value) -> float { return value; });
EXPECT_TRUE(mapped_to_float.IsError());
EXPECT_EQ(mapped_to_float.GetError(), absl::InternalError("error"));
}
TEST(AsyncValueRefTest, MapUnvailableError) {
AsyncValueRef<int32_t> ref = MakeConstructedAsyncValueRef<int32_t>(42);
AsyncValueRef<float> mapped_to_float =
ref.Map([](int32_t value) -> float { return value; });
EXPECT_FALSE(mapped_to_float.IsAvailable());
ref.SetError(absl::InternalError("error"));
EXPECT_TRUE(mapped_to_float.IsError());
EXPECT_EQ(mapped_to_float.GetError(), absl::InternalError("error"));
}
TEST(AsyncValueRefTest, MapMultipleTimes) {
AsyncValueRef<int32_t> ref = MakeAvailableAsyncValueRef<int32_t>(42);
auto plus_one = [](int32_t value) { return value + 1; };
AsyncValueRef<int32_t> mapped = ref.Map(plus_one)
.Map(plus_one)
.Map(plus_one)
.Map(plus_one)
.Map(plus_one)
.Map(plus_one);
EXPECT_TRUE(mapped.IsAvailable());
EXPECT_EQ(mapped.get(), 42 + 6);
}
TEST(AsyncValuePtrTest, MapToStatus) {
AsyncValueRef<int32_t> ref = MakeAvailableAsyncValueRef<int32_t>(42);
AsyncValueRef<absl::Status> mapped_to_status =
ref.Map([](int32_t value) -> absl::Status { return absl::OkStatus(); });
EXPECT_TRUE(mapped_to_status.IsAvailable());
EXPECT_EQ(mapped_to_status.get(), absl::OkStatus());
}
TEST(AsyncValueRefTest, MapToStatusOr) {
AsyncValueRef<int32_t> ref = MakeAvailableAsyncValueRef<int32_t>(42);
AsyncValueRef<absl::StatusOr<float>> mapped_to_float =
ref.Map([](int32_t value) -> absl::StatusOr<float> { return value; });
EXPECT_TRUE(mapped_to_float.IsAvailable());
EXPECT_EQ(*mapped_to_float.get(), 42.0f);
}
TEST(AsyncValueRefTest, TryMap) {
AsyncValueRef<int32_t> ref = MakeAvailableAsyncValueRef<int32_t>(42);
AsyncValueRef<float> mapped_to_float =
ref.TryMap([](int32_t value) -> absl::StatusOr<float> { return value; });
EXPECT_TRUE(mapped_to_float.IsAvailable());
EXPECT_EQ(mapped_to_float.get(), 42.0f);
}
TEST(AsyncValueRefTest, TryMapError) {
AsyncValueRef<int32_t> ref = MakeAvailableAsyncValueRef<int32_t>(42);
AsyncValueRef<float> mapped_to_float =
ref.TryMap([](int32_t value) -> absl::StatusOr<float> {
return absl::InternalError("error");
});
EXPECT_TRUE(mapped_to_float.IsError());
EXPECT_EQ(mapped_to_float.GetError(), absl::InternalError("error"));
}
TEST(AsyncValueRefTest, TryMapConstructible) {
AsyncValueRef<int32_t> ref = MakeAvailableAsyncValueRef<int32_t>(42);
struct X {
explicit X(float value) : value(value) {}
float value;
};
AsyncValueRef<X> mapped_to_x = ref.TryMap<X>(
[](int32_t value) -> absl::StatusOr<float> { return value; });
EXPECT_TRUE(mapped_to_x.IsAvailable());
EXPECT_EQ(mapped_to_x->value, 42.0f);
}
TEST(AsyncValueRefTest, FlatMapAvailable) {
AsyncValueRef<int32_t> ref = MakeAvailableAsyncValueRef<int32_t>(42);
AsyncValueRef<float> fmapped_to_float = ref.FlatMap([](int32_t value) {
return MakeAvailableAsyncValueRef<float>(static_cast<float>(value));
});
EXPECT_TRUE(fmapped_to_float.IsAvailable());
EXPECT_EQ(fmapped_to_float.get(), 42.0f);
}
TEST(AsyncValueRefTest, FlatMapUnavailable) {
AsyncValueRef<int32_t> ref = MakeConstructedAsyncValueRef<int32_t>(42);
AsyncValueRef<float> fmapped_to_float = ref.FlatMap([](int32_t value) {
return MakeAvailableAsyncValueRef<float>(static_cast<float>(value));
});
EXPECT_FALSE(fmapped_to_float.IsAvailable());
ref.SetStateConcrete();
EXPECT_TRUE(fmapped_to_float.IsAvailable());
EXPECT_EQ(fmapped_to_float.get(), 42.0f);
}
TEST(AsyncValueRefTest, FlatMapAvailableError) {
AsyncValueRef<int32_t> ref =
MakeErrorAsyncValueRef(absl::InternalError("error"));
AsyncValueRef<float> fmapped_to_float = ref.FlatMap([](int32_t value) {
return MakeAvailableAsyncValueRef<float>(static_cast<float>(value));
});
EXPECT_TRUE(fmapped_to_float.IsError());
EXPECT_EQ(fmapped_to_float.GetError(), absl::InternalError("error"));
}
TEST(AsyncValueRefTest, FlatMapUnavailableError) {
AsyncValueRef<int32_t> ref = MakeConstructedAsyncValueRef<int32_t>(42);
AsyncValueRef<float> fmapped_to_float = ref.FlatMap([](int32_t value) {
return MakeAvailableAsyncValueRef<float>(static_cast<float>(value));
});
EXPECT_FALSE(fmapped_to_float.IsAvailable());
ref.SetError(absl::InternalError("error"));
EXPECT_TRUE(fmapped_to_float.IsError());
EXPECT_EQ(fmapped_to_float.GetError(), absl::InternalError("error"));
}
struct DeferredExecutor : public AsyncValue::Executor {
void Execute(Task task) final { tasks.push_back(std::move(task)); }
size_t Quiesce() {
size_t n = 0;
while (!tasks.empty()) {
Task task = std::move(tasks.back());
tasks.pop_back();
task();
++n;
}
return n;
}
std::vector<Task> tasks;
};
TEST(AsyncValueRefTest, MakeAsyncValueRef) {
DeferredExecutor executor;
{
AsyncValueRef<float> ref =
MakeAsyncValueRef<float>(executor, []() -> float { return 42.0f; });
EXPECT_FALSE(ref.IsAvailable());
EXPECT_EQ(executor.Quiesce(), 1);
EXPECT_TRUE(ref.IsAvailable());
EXPECT_EQ(ref.get(), 42.0f);
}
{
AsyncValueRef<float> ref =
MakeAsyncValueRef(executor, []() -> float { return 42.0f; });
EXPECT_FALSE(ref.IsAvailable());
EXPECT_EQ(executor.Quiesce(), 1);
EXPECT_TRUE(ref.IsAvailable());
EXPECT_EQ(ref.get(), 42.0f);
}
{
AsyncValueRef<float> ref = TryMakeAsyncValueRef<float>(
executor, []() -> absl::StatusOr<float> { return 42.0f; });
EXPECT_FALSE(ref.IsAvailable());
EXPECT_EQ(executor.Quiesce(), 1);
EXPECT_TRUE(ref.IsAvailable());
EXPECT_EQ(ref.get(), 42.0f);
}
{
AsyncValueRef<float> ref = TryMakeAsyncValueRef(
executor, []() -> absl::StatusOr<float> { return 42.0f; });
EXPECT_FALSE(ref.IsAvailable());
EXPECT_EQ(executor.Quiesce(), 1);
EXPECT_TRUE(ref.IsAvailable());
EXPECT_EQ(ref.get(), 42.0f);
}
{
AsyncValueRef<float> ref = TryMakeAsyncValueRef<float>(
executor,
[]() -> absl::StatusOr<float> { return absl::InternalError("test"); });
EXPECT_FALSE(ref.IsAvailable());
EXPECT_EQ(executor.Quiesce(), 1);
EXPECT_TRUE(ref.IsError());
EXPECT_EQ(ref.GetError(), absl::InternalError("test"));
}
{
AsyncValueRef<float> ref = TryMakeAsyncValueRef(
executor,
[]() -> absl::StatusOr<float> { return absl::InternalError("test"); });
EXPECT_FALSE(ref.IsAvailable());
EXPECT_EQ(executor.Quiesce(), 1);
EXPECT_TRUE(ref.IsError());
EXPECT_EQ(ref.GetError(), absl::InternalError("test"));
}
}
TEST(AsyncValueRefTest, MapAvailableOnExecutor) {
AsyncValueRef<int32_t> ref = MakeAvailableAsyncValueRef<int32_t>(42);
DeferredExecutor executor;
AsyncValueRef<float> mapped_to_float =
ref.Map(executor, [](int32_t value) -> float { return value; });
EXPECT_FALSE(mapped_to_float.IsAvailable());
EXPECT_EQ(executor.Quiesce(), 1);
EXPECT_TRUE(mapped_to_float.IsAvailable());
EXPECT_EQ(mapped_to_float.get(), 42.0f);
}
TEST(AsyncValueRefTest, MapErrorOnExecutor) {
AsyncValueRef<int32_t> ref =
MakeErrorAsyncValueRef(absl::InternalError("error"));
DeferredExecutor executor;
AsyncValueRef<float> mapped_to_float =
ref.Map(executor, [](int32_t value) -> float { return value; });
EXPECT_FALSE(mapped_to_float.IsAvailable());
EXPECT_EQ(executor.Quiesce(), 1);
EXPECT_TRUE(mapped_to_float.IsError());
EXPECT_EQ(mapped_to_float.GetError(), absl::InternalError("error"));
}
TEST(AsyncValueRefTest, MapUnavailableOnExecutor) {
AsyncValueRef<int32_t> ref = MakeConstructedAsyncValueRef<int32_t>(42);
DeferredExecutor executor;
AsyncValueRef<float> mapped_to_float =
ref.Map(executor, [](int32_t value) -> float { return value; });
ref.SetStateConcrete();
ref.release()->DropRef();
EXPECT_FALSE(mapped_to_float.IsAvailable());
EXPECT_EQ(executor.Quiesce(), 1);
EXPECT_TRUE(mapped_to_float.IsAvailable());
EXPECT_EQ(mapped_to_float.get(), 42.0f);
}
TEST(AsyncValueRefTest, TryMapOnExecutor) {
AsyncValueRef<int32_t> ref = MakeConstructedAsyncValueRef<int32_t>(42);
DeferredExecutor executor;
AsyncValueRef<float> mapped_to_float = ref.TryMap(
executor, [](int32_t value) -> absl::StatusOr<float> { return value; });
ref.SetStateConcrete();
ref.release()->DropRef();
EXPECT_FALSE(mapped_to_float.IsAvailable());
EXPECT_EQ(executor.Quiesce(), 1);
EXPECT_TRUE(mapped_to_float.IsAvailable());
EXPECT_EQ(mapped_to_float.get(), 42.0f);
}
TEST(AsyncValueRefTest, TryMapErrorOnExecutor) {
AsyncValueRef<int32_t> ref = MakeConstructedAsyncValueRef<int32_t>(42);
DeferredExecutor executor;
AsyncValueRef<float> mapped_to_float =
ref.TryMap(executor, [](int32_t value) -> absl::StatusOr<float> {
return absl::InternalError("error");
});
ref.SetStateConcrete();
ref.release()->DropRef();
EXPECT_FALSE(mapped_to_float.IsAvailable());
EXPECT_EQ(executor.Quiesce(), 1);
EXPECT_TRUE(mapped_to_float.IsError());
EXPECT_EQ(mapped_to_float.GetError(), absl::InternalError("error"));
}
TEST(AsyncValueRefTest, FlatMapAvailableOnExecutor) {
AsyncValueRef<int32_t> ref = MakeConstructedAsyncValueRef<int32_t>(42);
DeferredExecutor executor;
AsyncValueRef<float> fmapped_to_float =
ref.FlatMap(executor, [](int32_t value) {
return MakeAvailableAsyncValueRef<float>(static_cast<float>(value));
});
ref.SetStateConcrete();
ref.release()->DropRef();
EXPECT_FALSE(fmapped_to_float.IsAvailable());
EXPECT_EQ(executor.Quiesce(), 1);
EXPECT_TRUE(fmapped_to_float.IsAvailable());
EXPECT_EQ(fmapped_to_float.get(), 42.0f);
}
TEST(AsyncValueRefTest, FlatMapDeferredAsyncValueOnExecutor) {
DeferredExecutor executor0;
DeferredExecutor executor1;
{
AsyncValueRef<float> fmapped_to_float =
MakeAsyncValueRef<std::unique_ptr<int32_t>>(executor0, [] {
return std::make_unique<int32_t>(42);
}).FlatMap([&](AsyncValuePtr<std::unique_ptr<int32_t>> ptr) {
return MakeAsyncValueRef<float>(
executor1, [ref = ptr.CopyRef()] { return **ref; });
});
EXPECT_FALSE(fmapped_to_float.IsAvailable());
EXPECT_EQ(executor0.Quiesce(), 1);
EXPECT_FALSE(fmapped_to_float.IsAvailable());
EXPECT_EQ(executor1.Quiesce(), 1);
EXPECT_TRUE(fmapped_to_float.IsAvailable());
EXPECT_EQ(fmapped_to_float.get(), 42.0f);
}
{
AsyncValueRef<float> fmapped_to_float =
MakeAsyncValueRef<std::unique_ptr<int32_t>>(executor0, [] {
return std::make_unique<int32_t>(42);
}).FlatMap(executor1, [&](AsyncValuePtr<std::unique_ptr<int32_t>> ptr) {
return MakeAsyncValueRef<float>(
executor1, [ref = ptr.CopyRef()] { return **ref; });
});
EXPECT_FALSE(fmapped_to_float.IsAvailable());
EXPECT_EQ(executor0.Quiesce(), 1);
EXPECT_FALSE(fmapped_to_float.IsAvailable());
EXPECT_EQ(executor1.Quiesce(), 2);
EXPECT_TRUE(fmapped_to_float.IsAvailable());
EXPECT_EQ(fmapped_to_float.get(), 42.0f);
}
}
TEST(AsyncValueRefTest, BlockUntilReady) {
AsyncValueRef<int32_t> ref = MakeAvailableAsyncValueRef<int32_t>(42);
BlockUntilReady(ref);
}
TEST(AsyncValueRefTest, RunWhenReady) {
AsyncValueRef<int32_t> ref = MakeAvailableAsyncValueRef<int32_t>(42);
bool executed = false;
RunWhenReady(absl::MakeConstSpan({ref}), [&] { executed = true; });
EXPECT_TRUE(executed);
}
namespace {
struct A {
alignas(16) int32_t a;
};
struct B : public A {
alignas(32) int32_t b;
};
struct C : public B {
alignas(64) int32_t c;
};
struct D : public B {
alignas(64) int32_t d;
};
}
TEST(AsyncValueRefTest, AlignedPayload) {
AsyncValueRef<D> d_ref = MakeAvailableAsyncValueRef<D>();
d_ref->a = 1;
d_ref->b = 2;
d_ref->d = 3;
EXPECT_EQ(d_ref->a, 1);
EXPECT_EQ(d_ref->b, 2);
EXPECT_EQ(d_ref->d, 3);
AsyncValueRef<B> b_ref = d_ref.CopyRef();
EXPECT_EQ(b_ref->a, 1);
EXPECT_EQ(b_ref->b, 2);
AsyncValueRef<A> a_ref = d_ref.CopyRef();
EXPECT_EQ(a_ref->a, 1);
}
TEST(AsyncValueRefTest, Isa) {
AsyncValueRef<A> null_ref;
EXPECT_FALSE(Isa<A>(null_ref));
AsyncValueRef<A> a_ref = MakeAvailableAsyncValueRef<A>();
AsyncValueRef<A> b_ref = MakeAvailableAsyncValueRef<B>();
AsyncValueRef<A> c_ref = MakeAvailableAsyncValueRef<C>();
AsyncValueRef<A> d_ref = MakeAvailableAsyncValueRef<D>();
EXPECT_TRUE(Isa<A>(a_ref));
EXPECT_TRUE(Isa<B>(b_ref));
EXPECT_TRUE(Isa<C>(c_ref));
EXPECT_TRUE(Isa<D>(d_ref));
AsyncValueRef<A> err = MakeErrorAsyncValueRef(absl::InternalError("error"));
EXPECT_TRUE(Isa<A>(err));
EXPECT_TRUE(Isa<B>(err));
EXPECT_TRUE(Isa<C>(err));
EXPECT_TRUE(Isa<D>(err));
AsyncValueRef<A> a_err = MakeConstructedAsyncValueRef<A>();
AsyncValueRef<B> b_err = MakeConstructedAsyncValueRef<B>();
a_err.SetError(absl::InternalError("error"));
b_err.SetError(absl::InternalError("error"));
EXPECT_TRUE(Isa<A>(a_err));
EXPECT_TRUE(Isa<B>(b_err));
auto indirect = MakeIndirectAsyncValue();
AsyncValueRef<A> c_indirect(indirect);
EXPECT_TRUE(Isa<A>(c_indirect));
EXPECT_FALSE(Isa<C>(c_indirect));
indirect->ForwardTo(c_ref.CopyRCRef());
EXPECT_TRUE(Isa<A>(c_indirect));
EXPECT_TRUE(Isa<C>(c_indirect));
auto typed_indirect = MakeIndirectAsyncValue<C>();
AsyncValueRef<A> c_typed_indirect(indirect);
EXPECT_TRUE(Isa<A>(c_typed_indirect));
EXPECT_TRUE(Isa<C>(c_typed_indirect));
typed_indirect->ForwardTo(c_ref.CopyRCRef());
EXPECT_TRUE(Isa<A>(c_typed_indirect));
EXPECT_TRUE(Isa<C>(c_typed_indirect));
auto typed_indirect_err = MakeIndirectAsyncValue<C>();
AsyncValueRef<A> c_typed_indirect_err(typed_indirect_err);
EXPECT_TRUE(Isa<A>(c_typed_indirect.AsPtr()));
EXPECT_TRUE(Isa<C>(c_typed_indirect.AsPtr()));
typed_indirect_err->SetError(absl::InternalError("error"));
EXPECT_TRUE(Isa<A>(c_typed_indirect_err.AsPtr()));
EXPECT_TRUE(Isa<C>(c_typed_indirect_err.AsPtr()));
}
TEST(AsyncValueRefTest, DynCast) {
AsyncValueRef<A> a_ref = MakeAvailableAsyncValueRef<A>();
AsyncValueRef<A> b_ref = MakeAvailableAsyncValueRef<B>();
AsyncValueRef<A> c_ref = MakeAvailableAsyncValueRef<C>();
AsyncValueRef<A> d_ref = MakeAvailableAsyncValueRef<D>();
EXPECT_TRUE(DynCast<A>(a_ref));
EXPECT_TRUE(DynCast<B>(b_ref));
EXPECT_TRUE(DynCast<C>(c_ref));
EXPECT_TRUE(DynCast<D>(d_ref));
EXPECT_TRUE(DynCast<A>(c_ref));
EXPECT_FALSE(DynCast<B>(c_ref));
EXPECT_FALSE(DynCast<C>(d_ref));
AsyncValueRef<A> err = MakeErrorAsyncValueRef(absl::InternalError("error"));
EXPECT_TRUE(DynCast<A>(err));
EXPECT_TRUE(DynCast<B>(err));
EXPECT_TRUE(DynCast<C>(err));
EXPECT_TRUE(DynCast<D>(err));
AsyncValueRef<A> a_err = MakeConstructedAsyncValueRef<A>();
AsyncValueRef<B> b_err = MakeConstructedAsyncValueRef<B>();
a_err.SetError(absl::InternalError("error"));
b_err.SetError(absl::InternalError("error"));
EXPECT_TRUE(DynCast<A>(a_err));
EXPECT_TRUE(DynCast<B>(b_err));
EXPECT_FALSE(DynCast<C>(a_err));
auto indirect = MakeIndirectAsyncValue();
AsyncValueRef<A> c_indirect(indirect);
EXPECT_TRUE(DynCast<A>(c_indirect));
EXPECT_FALSE(DynCast<C>(c_indirect));
indirect->ForwardTo(c_ref.CopyRCRef());
EXPECT_TRUE(DynCast<A>(c_indirect));
EXPECT_TRUE(DynCast<C>(c_indirect));
auto typed_indirect = MakeIndirectAsyncValue<C>();
AsyncValueRef<A> c_typed_indirect(indirect);
EXPECT_TRUE(DynCast<A>(c_typed_indirect));
EXPECT_TRUE(DynCast<C>(c_typed_indirect));
typed_indirect->ForwardTo(c_ref.CopyRCRef());
EXPECT_TRUE(DynCast<A>(c_typed_indirect));
EXPECT_TRUE(DynCast<C>(c_typed_indirect));
}
TEST(AsyncValueRefTest, Cast) {
AsyncValueRef<A> a_ref = MakeAvailableAsyncValueRef<A>();
AsyncValueRef<A> b_ref = MakeAvailableAsyncValueRef<B>();
AsyncValueRef<A> c_ref = MakeAvailableAsyncValueRef<C>();
AsyncValueRef<A> d_ref = MakeAvailableAsyncValueRef<D>();
EXPECT_TRUE(Cast<A>(a_ref));
EXPECT_TRUE(Cast<B>(b_ref));
EXPECT_TRUE(Cast<C>(c_ref));
EXPECT_TRUE(Cast<D>(d_ref));
EXPECT_TRUE(Cast<A>(c_ref));
AsyncValueRef<A> err = MakeErrorAsyncValueRef(absl::InternalError("error"));
EXPECT_TRUE(Cast<A>(err));
EXPECT_TRUE(Cast<B>(err));
EXPECT_TRUE(Cast<C>(err));
EXPECT_TRUE(Cast<D>(err));
AsyncValueRef<A> a_err = MakeConstructedAsyncValueRef<A>();
AsyncValueRef<B> b_err = MakeConstructedAsyncValueRef<B>();
a_err.SetError(absl::InternalError("error"));
b_err.SetError(absl::InternalError("error"));
EXPECT_TRUE(Cast<A>(a_err));
EXPECT_TRUE(Cast<B>(b_err));
auto indirect = MakeIndirectAsyncValue();
AsyncValueRef<A> c_indirect(indirect);
EXPECT_TRUE(Cast<A>(c_indirect));
indirect->ForwardTo(c_ref.CopyRCRef());
EXPECT_TRUE(Cast<A>(c_indirect));
EXPECT_TRUE(Cast<C>(c_indirect));
auto typed_indirect = MakeIndirectAsyncValue<C>();
AsyncValueRef<A> c_typed_indirect(indirect);
EXPECT_TRUE(Cast<A>(c_typed_indirect));
EXPECT_TRUE(Cast<C>(c_typed_indirect));
typed_indirect->ForwardTo(c_ref.CopyRCRef());
EXPECT_TRUE(Cast<A>(c_typed_indirect));
EXPECT_TRUE(Cast<C>(c_typed_indirect));
}
TEST(AsyncValueRefTest, RecursiveOwnership) {
struct State {
explicit State(AsyncValueRef<int32_t> value) : value(std::move(value)) {}
AsyncValueRef<int32_t> value;
};
AsyncValueRef<int32_t> value = MakeConstructedAsyncValueRef<int32_t>(42);
auto state = std::make_unique<State>(std::move(value));
State* state_ptr = state.get();
int64_t counter = 0;
state_ptr->value.AndThen([&, value = 1] { counter += value; });
state_ptr->value.AndThen([&, value = 2] { counter += value; });
state_ptr->value.AndThen([&, value = 3] { counter += value; });
state_ptr->value.AndThen([state = std::move(state)] {});
state_ptr->value.SetStateConcrete();
EXPECT_EQ(counter, 1 + 2 + 3);
}
template <size_t size>
static void BM_MakeConstructed(benchmark::State& state) {
for (auto _ : state) {
auto ref = MakeConstructedAsyncValueRef<std::array<char, size>>();
benchmark::DoNotOptimize(ref);
}
}
BENCHMARK(BM_MakeConstructed<1>);
BENCHMARK(BM_MakeConstructed<4>);
BENCHMARK(BM_MakeConstructed<8>);
BENCHMARK(BM_MakeConstructed<16>);
BENCHMARK(BM_MakeConstructed<32>);
BENCHMARK(BM_MakeConstructed<64>);
BENCHMARK(BM_MakeConstructed<128>);
BENCHMARK(BM_MakeConstructed<256>);
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/concurrency/async_value_ref.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/concurrency/async_value_ref_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0d1d5990-3582-48ac-beea-f64aa5a27125 | cpp | tensorflow/tensorflow | cleanup | tensorflow/c/experimental/filesystem/plugins/gcs/cleanup.h | tensorflow/core/lib/gtl/cleanup_test.cc | #ifndef TENSORFLOW_C_EXPERIMENTAL_FILESYSTEM_PLUGINS_GCS_CLEANUP_H_
#define TENSORFLOW_C_EXPERIMENTAL_FILESYSTEM_PLUGINS_GCS_CLEANUP_H_
#include <type_traits>
#include <utility>
namespace tf_gcs_filesystem {
template <typename F>
class Cleanup {
public:
Cleanup() : released_(true), f_() {}
template <typename G>
explicit Cleanup(G&& f)
: f_(std::forward<G>(f)) {}
Cleanup(Cleanup&& src)
: released_(src.is_released()), f_(src.release()) {}
template <typename G>
Cleanup(Cleanup<G>&& src)
: released_(src.is_released()), f_(src.release()) {}
Cleanup& operator=(Cleanup&& src) {
if (!released_) f_();
released_ = src.released_;
f_ = src.release();
return *this;
}
~Cleanup() {
if (!released_) f_();
}
F release() {
released_ = true;
return std::move(f_);
}
bool is_released() const { return released_; }
private:
static_assert(!std::is_reference<F>::value, "F must not be a reference");
bool released_ = false;
F f_;
};
template <int&... ExplicitParameterBarrier, typename F,
typename DecayF = typename std::decay<F>::type>
Cleanup<DecayF> MakeCleanup(F&& f) {
return Cleanup<DecayF>(std::forward<F>(f));
}
}
#endif | #include "tensorflow/core/lib/gtl/cleanup.h"
#include <functional>
#include <type_traits>
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace {
using AnyCleanup = gtl::Cleanup<std::function<void()>>;
template <typename T1, typename T2>
void AssertTypeEq() {
static_assert(std::is_same<T1, T2>::value, "unexpected type");
}
TEST(CleanupTest, BasicLambda) {
string s = "active";
{
auto s_cleaner = gtl::MakeCleanup([&s] { s.assign("cleaned"); });
EXPECT_EQ("active", s);
}
EXPECT_EQ("cleaned", s);
}
TEST(FinallyTest, NoCaptureLambda) {
static string& s = *new string;
s.assign("active");
{
auto s_cleaner = gtl::MakeCleanup([] { s.append(" clean"); });
EXPECT_EQ("active", s);
}
EXPECT_EQ("active clean", s);
}
TEST(CleanupTest, Release) {
string s = "active";
{
auto s_cleaner = gtl::MakeCleanup([&s] { s.assign("cleaned"); });
EXPECT_EQ("active", s);
s_cleaner.release();
}
EXPECT_EQ("active", s);
}
TEST(FinallyTest, TypeErasedWithoutFactory) {
string s = "active";
{
AnyCleanup s_cleaner([&s] { s.append(" clean"); });
EXPECT_EQ("active", s);
}
EXPECT_EQ("active clean", s);
}
struct Appender {
Appender(string* s, const string& msg) : s_(s), msg_(msg) {}
void operator()() const { s_->append(msg_); }
string* s_;
string msg_;
};
TEST(CleanupTest, NonLambda) {
string s = "active";
{
auto c = gtl::MakeCleanup(Appender(&s, " cleaned"));
AssertTypeEq<decltype(c), gtl::Cleanup<Appender>>();
EXPECT_EQ("active", s);
}
EXPECT_EQ("active cleaned", s);
}
TEST(CleanupTest, Assign) {
string s = "0";
{
auto clean1 = gtl::MakeCleanup(Appender(&s, " 1"));
auto clean2 = gtl::MakeCleanup(Appender(&s, " 2"));
EXPECT_EQ("0", s);
clean2 = std::move(clean1);
EXPECT_EQ("0 2", s);
}
EXPECT_EQ("0 2 1", s);
}
TEST(CleanupTest, AssignAny) {
string s = "0";
{
auto clean1 = gtl::MakeCleanup(Appender(&s, " 1"));
AnyCleanup clean2 = gtl::MakeCleanup(Appender(&s, " 2"));
EXPECT_EQ("0", s);
clean2 = std::move(clean1);
EXPECT_EQ("0 2", s);
}
EXPECT_EQ("0 2 1", s);
}
TEST(CleanupTest, AssignFromReleased) {
string s = "0";
{
auto clean1 = gtl::MakeCleanup(Appender(&s, " 1"));
auto clean2 = gtl::MakeCleanup(Appender(&s, " 2"));
EXPECT_EQ("0", s);
clean1.release();
clean2 = std::move(clean1);
EXPECT_EQ("0 2", s);
}
EXPECT_EQ("0 2", s);
}
TEST(CleanupTest, AssignToReleased) {
string s = "0";
{
auto clean1 = gtl::MakeCleanup(Appender(&s, " 1"));
auto clean2 = gtl::MakeCleanup(Appender(&s, " 2"));
EXPECT_EQ("0", s);
clean2.release();
EXPECT_EQ("0", s);
clean2 = std::move(clean1);
EXPECT_EQ("0", s);
}
EXPECT_EQ("0 1", s);
}
TEST(CleanupTest, AssignToDefaultInitialized) {
string s = "0";
{
auto clean1 = gtl::MakeCleanup(Appender(&s, " 1"));
{
AnyCleanup clean2;
EXPECT_EQ("0", s);
clean2 = std::move(clean1);
EXPECT_EQ("0", s);
}
EXPECT_EQ("0 1", s);
}
EXPECT_EQ("0 1", s);
}
class CleanupReferenceTest : public ::testing::Test {
public:
struct F {
int* cp;
int* i;
F(int* cp, int* i) : cp(cp), i(i) {}
F(const F& o) : cp(o.cp), i(o.i) { ++*cp; }
F& operator=(const F& o) {
cp = o.cp;
i = o.i;
++*cp;
return *this;
}
F(F&&) = default;
F& operator=(F&&) = default;
void operator()() const { ++*i; }
};
int copies_ = 0;
int calls_ = 0;
F f_ = F(&copies_, &calls_);
static int g_calls;
void SetUp() override { g_calls = 0; }
static void CleanerFunction() { ++g_calls; }
};
int CleanupReferenceTest::g_calls = 0;
TEST_F(CleanupReferenceTest, FunctionPointer) {
{
auto c = gtl::MakeCleanup(&CleanerFunction);
AssertTypeEq<decltype(c), gtl::Cleanup<void (*)()>>();
EXPECT_EQ(0, g_calls);
}
EXPECT_EQ(1, g_calls);
{
auto c = gtl::MakeCleanup(CleanerFunction);
AssertTypeEq<decltype(c), gtl::Cleanup<void (*)()>>();
EXPECT_EQ(1, g_calls);
}
EXPECT_EQ(2, g_calls);
}
TEST_F(CleanupReferenceTest, AssignLvalue) {
string s = "0";
Appender app1(&s, "1");
Appender app2(&s, "2");
{
auto c = gtl::MakeCleanup(app1);
c.release();
c = gtl::MakeCleanup(app2);
EXPECT_EQ("0", s);
app1();
EXPECT_EQ("01", s);
}
EXPECT_EQ("012", s);
}
TEST_F(CleanupReferenceTest, FunctorLvalue) {
EXPECT_EQ(0, copies_);
EXPECT_EQ(0, calls_);
{
auto c = gtl::MakeCleanup(f_);
AssertTypeEq<decltype(c), gtl::Cleanup<F>>();
EXPECT_EQ(1, copies_);
EXPECT_EQ(0, calls_);
}
EXPECT_EQ(1, copies_);
EXPECT_EQ(1, calls_);
{
auto c = gtl::MakeCleanup(f_);
EXPECT_EQ(2, copies_);
EXPECT_EQ(1, calls_);
F f2 = c.release();
EXPECT_EQ(2, copies_);
EXPECT_EQ(1, calls_);
auto c2 = gtl::MakeCleanup(f2);
EXPECT_EQ(3, copies_);
EXPECT_EQ(1, calls_);
}
EXPECT_EQ(3, copies_);
EXPECT_EQ(2, calls_);
}
TEST_F(CleanupReferenceTest, FunctorRvalue) {
{
auto c = gtl::MakeCleanup(std::move(f_));
AssertTypeEq<decltype(c), gtl::Cleanup<F>>();
EXPECT_EQ(0, copies_);
EXPECT_EQ(0, calls_);
}
EXPECT_EQ(0, copies_);
EXPECT_EQ(1, calls_);
}
TEST_F(CleanupReferenceTest, FunctorReferenceWrapper) {
{
auto c = gtl::MakeCleanup(std::cref(f_));
AssertTypeEq<decltype(c), gtl::Cleanup<std::reference_wrapper<const F>>>();
EXPECT_EQ(0, copies_);
EXPECT_EQ(0, calls_);
}
EXPECT_EQ(0, copies_);
EXPECT_EQ(1, calls_);
}
volatile int i;
void Incr(volatile int* ip) { ++*ip; }
void Incr() { Incr(&i); }
void BM_Cleanup(::testing::benchmark::State& state) {
for (auto s : state) {
auto fin = gtl::MakeCleanup([] { Incr(); });
}
}
BENCHMARK(BM_Cleanup);
void BM_AnyCleanup(::testing::benchmark::State& state) {
for (auto s : state) {
AnyCleanup fin = gtl::MakeCleanup([] { Incr(); });
}
}
BENCHMARK(BM_AnyCleanup);
void BM_AnyCleanupNoFactory(::testing::benchmark::State& state) {
for (auto s : state) {
AnyCleanup fin([] { Incr(); });
}
}
BENCHMARK(BM_AnyCleanupNoFactory);
void BM_CleanupBound(::testing::benchmark::State& state) {
volatile int* ip = &i;
for (auto s : state) {
auto fin = gtl::MakeCleanup([ip] { Incr(ip); });
}
}
BENCHMARK(BM_CleanupBound);
void BM_AnyCleanupBound(::testing::benchmark::State& state) {
volatile int* ip = &i;
for (auto s : state) {
AnyCleanup fin = gtl::MakeCleanup([ip] { Incr(ip); });
}
}
BENCHMARK(BM_AnyCleanupBound);
void BM_AnyCleanupNoFactoryBound(::testing::benchmark::State& state) {
volatile int* ip = &i;
for (auto s : state) {
AnyCleanup fin([ip] { Incr(ip); });
}
}
BENCHMARK(BM_AnyCleanupNoFactoryBound);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/filesystem/plugins/gcs/cleanup.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/gtl/cleanup_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a9f555b1-f12d-4de3-a59e-cd6f28002d77 | cpp | google/tensorstore | tagged_ptr | tensorstore/internal/tagged_ptr.h | tensorstore/internal/tagged_ptr_test.cc | #ifndef TENSORSTORE_INTERNAL_TAGGED_PTR_H_
#define TENSORSTORE_INTERNAL_TAGGED_PTR_H_
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <type_traits>
#include <utility>
namespace tensorstore {
namespace internal {
template <typename T, int TagBits>
class TaggedPtr {
constexpr static std::uintptr_t kTagMask =
(static_cast<std::uintptr_t>(1) << TagBits) - 1;
constexpr static std::uintptr_t kPointerMask = ~kTagMask;
public:
using element_type = T;
template <typename U>
using rebind = TaggedPtr<U, TagBits>;
constexpr TaggedPtr() noexcept : value_(0) {}
constexpr TaggedPtr(std::nullptr_t) noexcept : value_(0) {}
constexpr TaggedPtr(std::nullptr_t, std::uintptr_t tag) noexcept
: value_(tag) {
assert((tag & kPointerMask) == 0);
}
template <typename U,
std::enable_if_t<std::is_convertible_v<U*, T*>>* = nullptr>
TaggedPtr(U* ptr, std::uintptr_t tag = 0) noexcept {
assert((reinterpret_cast<std::uintptr_t>(static_cast<T*>(ptr)) &
kTagMask) == 0 &&
(tag & kPointerMask) == 0);
value_ = reinterpret_cast<std::uintptr_t>(static_cast<T*>(ptr)) | tag;
}
template <typename U,
std::enable_if_t<std::is_convertible_v<U*, T*>>* = nullptr>
TaggedPtr(TaggedPtr<U, TagBits> other) noexcept
: TaggedPtr(other.get(), other.tag()) {}
TaggedPtr& operator=(std::nullptr_t) noexcept {
value_ = 0;
return *this;
}
template <typename U>
std::enable_if_t<std::is_convertible_v<U*, T*>, TaggedPtr&> operator=(
U* ptr) noexcept {
*this = TaggedPtr(ptr);
return *this;
}
explicit operator bool() const noexcept {
return static_cast<bool>(reinterpret_cast<T*>(value_ & kPointerMask));
}
T* get() const noexcept {
static_assert(alignof(T) >= (1 << TagBits),
"Number of TagBits is incompatible with alignment of T.");
return reinterpret_cast<T*>(value_ & kPointerMask);
}
operator T*() const noexcept { return get(); }
std::uintptr_t tag() const noexcept { return value_ & kTagMask; }
template <int Bit>
std::enable_if_t<(Bit >= 0 && Bit < TagBits), bool> tag() const noexcept {
return static_cast<bool>((value_ >> Bit) & 1);
}
template <int Bit>
std::enable_if_t<(Bit >= 0 && Bit < TagBits), void> set_tag(
bool value) noexcept {
constexpr std::uintptr_t mask = (static_cast<std::uintptr_t>(1) << Bit);
value_ = (value_ & ~mask) | (static_cast<std::uintptr_t>(value) << Bit);
}
void set_tag(std::uintptr_t tag) noexcept {
assert((tag & kPointerMask) == 0);
value_ = (value_ & kPointerMask) | tag;
}
T* operator->() const noexcept {
T* ptr = get();
assert(ptr != nullptr);
return ptr;
}
T& operator*() const noexcept {
T* ptr = get();
assert(ptr != nullptr);
return *ptr;
}
friend bool operator==(TaggedPtr x, TaggedPtr y) {
return x.get() == y.get() && x.tag() == y.tag();
}
friend bool operator!=(TaggedPtr x, TaggedPtr y) { return !(x == y); }
template <typename H>
friend H AbslHashValue(H h, TaggedPtr x) {
return H::combine(std::move(h), x.value_);
}
private:
std::uintptr_t value_;
};
template <typename T, int TagBits>
inline T* to_address(TaggedPtr<T, TagBits> p) {
return p.get();
}
template <typename T, typename U, int TagBits>
TaggedPtr<T, TagBits> static_pointer_cast(TaggedPtr<U, TagBits> p) {
return TaggedPtr<T, TagBits>(static_cast<T*>(p.get()), p.tag());
}
template <typename T, typename U, int TagBits>
TaggedPtr<T, TagBits> const_pointer_cast(TaggedPtr<U, TagBits> p) {
return TaggedPtr<T, TagBits>(const_cast<T*>(p.get()), p.tag());
}
template <typename T, typename U, int TagBits>
TaggedPtr<T, TagBits> dynamic_pointer_cast(TaggedPtr<U, TagBits> p) {
return TaggedPtr<T, TagBits>(dynamic_cast<T*>(p.get()), p.tag());
}
}
}
#endif | #include "tensorstore/internal/tagged_ptr.h"
#include <memory>
#include <type_traits>
#include <gtest/gtest.h>
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/memory.h"
namespace {
using ::tensorstore::internal::const_pointer_cast;
using ::tensorstore::internal::dynamic_pointer_cast;
using ::tensorstore::internal::IntrusivePtr;
using ::tensorstore::internal::static_pointer_cast;
using ::tensorstore::internal::TaggedPtr;
struct alignas(8) X {
virtual ~X() = default;
};
struct Y : public X {
virtual ~Y() = default;
};
static_assert(!std::is_convertible_v<TaggedPtr<Y, 1>, TaggedPtr<Y, 2>>);
static_assert(std::is_convertible_v<TaggedPtr<Y, 2>, TaggedPtr<X, 2>>);
static_assert(!std::is_convertible_v<TaggedPtr<Y, 1>, TaggedPtr<X, 2>>);
static_assert(std::is_convertible_v<Y*, TaggedPtr<X, 2>>);
static_assert(!std::is_convertible_v<TaggedPtr<Y, 2>, TaggedPtr<Y, 1>>);
static_assert(!std::is_convertible_v<TaggedPtr<X, 2>, TaggedPtr<Y, 2>>);
static_assert(!std::is_convertible_v<TaggedPtr<X, 2>, TaggedPtr<Y, 1>>);
static_assert(!std::is_convertible_v<X*, TaggedPtr<Y, 2>>);
static_assert(std::is_assignable_v<TaggedPtr<X, 2>, TaggedPtr<Y, 2>>);
static_assert(!std::is_assignable_v<TaggedPtr<X, 2>, TaggedPtr<X, 1>>);
static_assert(!std::is_assignable_v<TaggedPtr<X, 2>, TaggedPtr<Y, 1>>);
static_assert(!std::is_assignable_v<TaggedPtr<Y, 2>, TaggedPtr<Y, 3>>);
static_assert(!std::is_assignable_v<TaggedPtr<Y, 2>, TaggedPtr<X, 2>>);
static_assert(!std::is_assignable_v<TaggedPtr<Y, 2>, TaggedPtr<X, 3>>);
TEST(TaggedPtr, DefaultConstruct) {
TaggedPtr<X, 3> p;
EXPECT_EQ(nullptr, p.get());
EXPECT_EQ(0u, p.tag());
}
TEST(TaggedPtr, Construct) {
X x;
TaggedPtr<X, 3> p(&x, 5);
EXPECT_EQ(&x, p.get());
EXPECT_EQ(5u, p.tag());
}
TEST(TaggedPtr, ConstructNullptr) {
TaggedPtr<X, 3> p(nullptr, 5);
EXPECT_EQ(nullptr, p.get());
EXPECT_EQ(5u, p.tag());
}
TEST(TaggedPtr, CopyConstruct) {
X x;
TaggedPtr<X, 3> p(&x, 5);
TaggedPtr<X, 3> p2(p);
EXPECT_EQ(&x, p2.get());
EXPECT_EQ(&x, p.get());
EXPECT_EQ(5u, p.tag());
EXPECT_EQ(5u, p2.tag());
}
TEST(TaggedPtr, CopyAssignTaggedPtr) {
X x;
TaggedPtr<X, 3> p(&x, 5);
TaggedPtr<X, 3> p2;
p2 = p;
EXPECT_EQ(&x, p2.get());
EXPECT_EQ(&x, p.get());
EXPECT_EQ(5u, p2.tag());
EXPECT_EQ(5u, p.tag());
}
TEST(TaggedPtr, CopyAssignPointer) {
X x;
TaggedPtr<X, 3> p(nullptr, 5);
p = &x;
EXPECT_EQ(&x, p.get());
EXPECT_EQ(0u, p.tag());
}
TEST(TaggedPtr, CopyAssignNullptr) {
X x;
TaggedPtr<X, 3> p(&x, 5);
p = nullptr;
EXPECT_EQ(nullptr, p.get());
EXPECT_EQ(0u, p.tag());
}
TEST(TaggedPtr, GetAndSetTag) {
X x;
TaggedPtr<X, 3> p(&x, 3);
EXPECT_EQ(3u, p.tag());
p.set_tag(4);
EXPECT_EQ(4u, p.tag());
EXPECT_TRUE(p.tag<2>());
EXPECT_FALSE(p.tag<0>());
EXPECT_FALSE(p.tag<1>());
p.set_tag<0>(true);
EXPECT_EQ(5u, p.tag());
p.set_tag<2>(false);
EXPECT_EQ(1u, p.tag());
}
TEST(TaggedPtr, TagComparison) {
X x;
X x2;
TaggedPtr<X, 2> p(&x, 3);
TaggedPtr<X, 2> p2(&x, 1);
TaggedPtr<X, 2> p3(&x2, 3);
EXPECT_EQ(p, p);
EXPECT_NE(p, p2);
EXPECT_NE(p, p3);
}
TEST(TaggedPtr, StaticPointerCast) {
Y y;
TaggedPtr<X, 3> p(&y, 5);
TaggedPtr<Y, 3> p2 = static_pointer_cast<Y>(p);
EXPECT_EQ(&y, p2.get());
EXPECT_EQ(5u, p2.tag());
}
TEST(TaggedPtr, ConstPointerCast) {
X x;
TaggedPtr<const X, 3> p(&x, 5);
TaggedPtr<X, 3> p2 = const_pointer_cast<X>(p);
EXPECT_EQ(&x, p2.get());
EXPECT_EQ(5u, p2.tag());
}
TEST(TaggedPtr, DynamicPointerCastSuccess) {
Y y;
TaggedPtr<X, 3> p(&y, 5);
TaggedPtr<Y, 3> p2 = dynamic_pointer_cast<Y>(p);
EXPECT_EQ(&y, p2.get());
EXPECT_EQ(5u, p2.tag());
}
TEST(TaggedPtr, DynamicPointerCastFailure) {
X x;
TaggedPtr<X, 3> p(&x, 5);
TaggedPtr<Y, 3> p2 = dynamic_pointer_cast<Y>(p);
EXPECT_EQ(nullptr, p2.get());
EXPECT_EQ(5u, p2.tag());
}
struct alignas(8) X2 : public tensorstore::internal::AtomicReferenceCount<X2> {
int value;
virtual ~X2() = default;
};
struct Y2 : public X2 {
virtual ~Y2() = default;
};
template <int TagBits>
struct TaggedIntrusivePtrTraits
: public tensorstore::internal::DefaultIntrusivePtrTraits {
template <typename U>
using pointer = TaggedPtr<U, TagBits>;
};
template <typename T, int TagBits>
using TaggedIntrusivePtr = IntrusivePtr<T, TaggedIntrusivePtrTraits<TagBits>>;
TEST(IntrusivePtrTest, Basic) {
Y2* x = new Y2;
TaggedIntrusivePtr<Y2, 3> p(x);
EXPECT_EQ(1u, p->use_count());
EXPECT_EQ(x, p.get().get());
EXPECT_EQ(0u, p.get().tag());
TaggedIntrusivePtr<Y2, 3> p2({x, 5});
EXPECT_EQ(2u, p2->use_count());
EXPECT_EQ(x, p2.get().get());
EXPECT_EQ(5u, p2.get().tag());
TaggedIntrusivePtr<const X2, 3> p3 = p2;
EXPECT_EQ(3u, p3->use_count());
EXPECT_EQ(x, p3.get().get());
EXPECT_EQ(5u, p3.get().tag());
auto p4 = static_pointer_cast<const Y2>(p3);
static_assert(std::is_same_v<TaggedIntrusivePtr<const Y2, 3>, decltype(p4)>);
EXPECT_EQ(4u, p4->use_count());
EXPECT_EQ(x, p4.get().get());
EXPECT_EQ(5u, p4.get().tag());
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/tagged_ptr.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/tagged_ptr_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
ba4bb99e-6122-4932-bfa9-b7e6c0737093 | cpp | tensorflow/tensorflow | flatmap | tensorflow/core/lib/gtl/flatmap.h | third_party/xla/xla/tsl/lib/gtl/flatmap_test.cc | #ifndef TENSORFLOW_CORE_LIB_GTL_FLATMAP_H_
#define TENSORFLOW_CORE_LIB_GTL_FLATMAP_H_
#include "xla/tsl/lib/gtl/flatmap.h"
#include "tensorflow/core/lib/gtl/flatrep.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace gtl {
using tsl::gtl::FlatMap;
}
}
#endif | #include "xla/tsl/lib/gtl/flatmap.h"
#include <algorithm>
#include <memory>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include "tsl/platform/hash.h"
#include "tsl/platform/test.h"
#include "tsl/platform/types.h"
namespace tsl {
namespace gtl {
namespace {
typedef FlatMap<int64_t, int32> NumMap;
int32 Get(const NumMap& map, int64_t k, int32_t def = -1) {
auto iter = map.find(k);
if (iter == map.end()) {
EXPECT_EQ(map.count(k), 0);
return def;
} else {
EXPECT_EQ(map.count(k), 1);
EXPECT_EQ(&map.at(k), &iter->second);
EXPECT_EQ(iter->first, k);
return iter->second;
}
}
typedef std::vector<std::pair<int64_t, int32>> NumMapContents;
NumMapContents Contents(const NumMap& map) {
NumMapContents result;
for (const auto& p : map) {
result.push_back({p.first, p.second});
}
std::sort(result.begin(), result.end());
return result;
}
void Fill(NumMap* map, int64_t start, int64_t limit) {
for (int64_t i = start; i < limit; i++) {
map->insert({i, i * 100});
}
}
TEST(FlatMapTest, Find) {
NumMap map;
EXPECT_EQ(Get(map, 1), -1);
map.insert({1, 100});
map.insert({2, 200});
EXPECT_EQ(Get(map, 1), 100);
EXPECT_EQ(Get(map, 2), 200);
EXPECT_EQ(Get(map, 3), -1);
}
TEST(FlatMapTest, Insert) {
NumMap map;
EXPECT_EQ(Get(map, 1), -1);
auto result = map.insert({1, 100});
EXPECT_TRUE(result.second);
EXPECT_EQ(result.first->first, 1);
EXPECT_EQ(result.first->second, 100);
EXPECT_EQ(Get(map, 1), 100);
result = map.insert({1, 200});
EXPECT_FALSE(result.second);
EXPECT_EQ(result.first->first, 1);
EXPECT_EQ(result.first->second, 100);
EXPECT_EQ(Get(map, 1), 100);
result.first->second = 300;
EXPECT_EQ(result.first->second, 300);
EXPECT_EQ(Get(map, 1), 300);
result = map.insert({1, 400});
EXPECT_FALSE(result.second);
EXPECT_EQ(result.first->first, 1);
EXPECT_EQ(result.first->second, 300);
EXPECT_EQ(Get(map, 1), 300);
}
TEST(FlatMapTest, InsertGrowth) {
NumMap map;
const int n = 100;
Fill(&map, 0, 100);
EXPECT_EQ(map.size(), n);
for (int i = 0; i < n; i++) {
EXPECT_EQ(Get(map, i), i * 100) << i;
}
}
TEST(FlatMapTest, Emplace) {
NumMap map;
auto result = map.emplace(1, 100);
EXPECT_TRUE(result.second);
EXPECT_EQ(result.first->first, 1);
EXPECT_EQ(result.first->second, 100);
EXPECT_EQ(Get(map, 1), 100);
result = map.emplace(1, 200);
EXPECT_FALSE(result.second);
EXPECT_EQ(result.first->first, 1);
EXPECT_EQ(result.first->second, 100);
EXPECT_EQ(Get(map, 1), 100);
result.first->second = 300;
EXPECT_EQ(result.first->second, 300);
EXPECT_EQ(Get(map, 1), 300);
result = map.emplace(2, 400);
EXPECT_TRUE(result.second);
EXPECT_EQ(result.first->first, 2);
EXPECT_EQ(result.first->second, 400);
EXPECT_EQ(Get(map, 2), 400);
}
TEST(FlatMapTest, EmplaceUniquePtr) {
FlatMap<int64_t, std::unique_ptr<string>> smap;
smap.emplace(1, std::make_unique<string>("hello"));
}
TEST(FlatMapTest, Size) {
NumMap map;
EXPECT_EQ(map.size(), 0);
map.insert({1, 100});
map.insert({2, 200});
EXPECT_EQ(map.size(), 2);
}
TEST(FlatMapTest, Empty) {
NumMap map;
EXPECT_TRUE(map.empty());
map.insert({1, 100});
map.insert({2, 200});
EXPECT_FALSE(map.empty());
}
TEST(FlatMapTest, ArrayOperator) {
NumMap map;
auto v1 = &map[1];
EXPECT_EQ(*v1, 0);
EXPECT_EQ(Get(map, 1), 0);
*v1 = 100;
EXPECT_EQ(map[1], 100);
EXPECT_EQ(Get(map, 1), 100);
auto v1a = &map[1];
EXPECT_EQ(v1, v1a);
EXPECT_EQ(*v1, 100);
map[2] = 200;
EXPECT_EQ(Get(map, 1), 100);
EXPECT_EQ(Get(map, 2), 200);
}
TEST(FlatMapTest, Count) {
NumMap map;
EXPECT_EQ(map.count(1), 0);
EXPECT_EQ(map.count(2), 0);
map.insert({1, 100});
EXPECT_EQ(map.count(1), 1);
EXPECT_EQ(map.count(2), 0);
map.insert({2, 200});
EXPECT_EQ(map.count(1), 1);
EXPECT_EQ(map.count(2), 1);
}
TEST(FlatMapTest, Iter) {
NumMap map;
EXPECT_EQ(Contents(map), NumMapContents());
map.insert({1, 100});
map.insert({2, 200});
EXPECT_EQ(Contents(map), NumMapContents({{1, 100}, {2, 200}}));
}
TEST(FlatMapTest, Erase) {
NumMap map;
EXPECT_EQ(map.erase(1), 0);
map[1] = 100;
map[2] = 200;
EXPECT_EQ(map.erase(3), 0);
EXPECT_EQ(map.erase(1), 1);
EXPECT_EQ(map.size(), 1);
EXPECT_EQ(Get(map, 2), 200);
EXPECT_EQ(Contents(map), NumMapContents({{2, 200}}));
EXPECT_EQ(map.erase(2), 1);
EXPECT_EQ(Contents(map), NumMapContents());
}
TEST(FlatMapTest, EraseIter) {
NumMap map;
Fill(&map, 1, 11);
size_t size = 10;
for (auto iter = map.begin(); iter != map.end();) {
iter = map.erase(iter);
size--;
EXPECT_EQ(map.size(), size);
}
EXPECT_EQ(Contents(map), NumMapContents());
}
TEST(FlatMapTest, EraseIterPair) {
NumMap map;
Fill(&map, 1, 11);
NumMap expected;
auto p1 = map.begin();
expected.insert(*p1);
++p1;
expected.insert(*p1);
++p1;
auto p2 = map.end();
EXPECT_EQ(map.erase(p1, p2), map.end());
EXPECT_EQ(map.size(), 2);
EXPECT_EQ(Contents(map), Contents(expected));
}
TEST(FlatMapTest, EraseLongChains) {
NumMap map;
const int num = 128;
Fill(&map, 0, num);
for (int i = 0; i < num; i += 3) {
EXPECT_EQ(map.erase(i), 1);
}
for (int i = 0; i < num; i++) {
if ((i % 3) != 0) {
EXPECT_EQ(Get(map, i), i * 100);
} else {
EXPECT_EQ(map.count(i), 0);
}
}
const size_t orig_buckets = map.bucket_count();
for (int i = 0; i < num; i++) {
map.erase(i);
}
EXPECT_TRUE(map.empty());
EXPECT_EQ(map.bucket_count(), orig_buckets);
map[1] = 100;
EXPECT_LT(map.bucket_count(), orig_buckets);
}
TEST(FlatMap, AlternatingInsertRemove) {
NumMap map;
map.insert({1000, 1000});
map.insert({2000, 1000});
map.insert({3000, 1000});
for (int i = 0; i < 10000; i++) {
map.insert({i, i});
map.erase(i);
}
}
TEST(FlatMap, ClearNoResize) {
NumMap map;
Fill(&map, 0, 100);
const size_t orig = map.bucket_count();
map.clear_no_resize();
EXPECT_EQ(map.size(), 0);
EXPECT_EQ(Contents(map), NumMapContents());
EXPECT_EQ(map.bucket_count(), orig);
}
TEST(FlatMap, Clear) {
NumMap map;
Fill(&map, 0, 100);
const size_t orig = map.bucket_count();
map.clear();
EXPECT_EQ(map.size(), 0);
EXPECT_EQ(Contents(map), NumMapContents());
EXPECT_LT(map.bucket_count(), orig);
}
TEST(FlatMap, Copy) {
for (int n = 0; n < 10; n++) {
NumMap src;
Fill(&src, 0, n);
NumMap copy = src;
EXPECT_EQ(Contents(src), Contents(copy));
NumMap copy2;
copy2 = src;
EXPECT_EQ(Contents(src), Contents(copy2));
copy2 = *©2;
EXPECT_EQ(Contents(src), Contents(copy2));
}
}
TEST(FlatMap, InitFromIter) {
for (int n = 0; n < 10; n++) {
NumMap src;
Fill(&src, 0, n);
auto vec = Contents(src);
NumMap dst(vec.begin(), vec.end());
EXPECT_EQ(Contents(dst), vec);
}
}
TEST(FlatMap, InitializerList) {
NumMap a{{1, 10}, {2, 20}, {3, 30}};
NumMap b({{1, 10}, {2, 20}, {3, 30}});
NumMap c = {{1, 10}, {2, 20}, {3, 30}};
typedef std::unordered_map<int64_t, int32> StdNumMap;
StdNumMap std({{1, 10}, {2, 20}, {3, 30}});
StdNumMap::value_type std_r1 = *std.find(1);
StdNumMap::value_type std_r2 = *std.find(2);
StdNumMap::value_type std_r3 = *std.find(3);
NumMap d{std_r1, std_r2, std_r3};
NumMap e({std_r1, std_r2, std_r3});
NumMap f = {std_r1, std_r2, std_r3};
for (NumMap* map : std::vector<NumMap*>({&a, &b, &c, &d, &e, &f})) {
EXPECT_EQ(Get(*map, 1), 10);
EXPECT_EQ(Get(*map, 2), 20);
EXPECT_EQ(Get(*map, 3), 30);
EXPECT_EQ(Contents(*map), NumMapContents({{1, 10}, {2, 20}, {3, 30}}));
}
}
TEST(FlatMap, InsertIter) {
NumMap a, b;
Fill(&a, 1, 10);
Fill(&b, 8, 20);
b[9] = 10000;
a.insert(b.begin(), b.end());
NumMap expected;
Fill(&expected, 1, 20);
EXPECT_EQ(Contents(a), Contents(expected));
}
TEST(FlatMap, Eq) {
NumMap empty;
NumMap elems;
Fill(&elems, 0, 5);
EXPECT_FALSE(empty == elems);
EXPECT_TRUE(empty != elems);
NumMap copy = elems;
EXPECT_TRUE(copy == elems);
EXPECT_FALSE(copy != elems);
NumMap changed = elems;
changed[3] = 1;
EXPECT_FALSE(changed == elems);
EXPECT_TRUE(changed != elems);
NumMap changed2 = elems;
changed2.erase(3);
EXPECT_FALSE(changed2 == elems);
EXPECT_TRUE(changed2 != elems);
}
TEST(FlatMap, Swap) {
NumMap a, b;
Fill(&a, 1, 5);
Fill(&b, 100, 200);
NumMap c = a;
NumMap d = b;
EXPECT_EQ(c, a);
EXPECT_EQ(d, b);
c.swap(d);
EXPECT_EQ(c, b);
EXPECT_EQ(d, a);
}
TEST(FlatMap, Reserve) {
NumMap src;
Fill(&src, 1, 100);
NumMap a = src;
a.reserve(10);
EXPECT_EQ(a, src);
NumMap b = src;
b.rehash(1000);
EXPECT_EQ(b, src);
}
TEST(FlatMap, EqualRangeMutable) {
NumMap map;
Fill(&map, 1, 10);
auto p1 = map.equal_range(3);
EXPECT_TRUE(p1.first != p1.second);
EXPECT_EQ(p1.first->first, 3);
EXPECT_EQ(p1.first->second, 300);
++p1.first;
EXPECT_TRUE(p1.first == p1.second);
auto p2 = map.equal_range(100);
EXPECT_TRUE(p2.first == p2.second);
}
TEST(FlatMap, EqualRangeConst) {
NumMap tmp;
Fill(&tmp, 1, 10);
const NumMap map = tmp;
auto p1 = map.equal_range(3);
EXPECT_TRUE(p1.first != p1.second);
EXPECT_EQ(p1.first->first, 3);
EXPECT_EQ(p1.first->second, 300);
++p1.first;
EXPECT_TRUE(p1.first == p1.second);
auto p2 = map.equal_range(100);
EXPECT_TRUE(p2.first == p2.second);
}
TEST(FlatMap, Prefetch) {
NumMap map;
Fill(&map, 0, 1000);
for (int i = 0; i < 2000; i++) {
map.prefetch_value(i);
}
}
struct NA {
int64_t value;
NA() : value(-1) {}
explicit NA(int64_t v) : value(v) {}
NA(const NA& x) : value(x.value) {}
bool operator==(const NA& x) const { return value == x.value; }
};
struct HashNA {
size_t operator()(NA x) const { return x.value; }
};
TEST(FlatMap, NonAssignable) {
FlatMap<NA, NA, HashNA> map;
for (int i = 0; i < 100; i++) {
map[NA(i)] = NA(i * 100);
}
for (int i = 0; i < 100; i++) {
EXPECT_EQ(map.count(NA(i)), 1);
auto iter = map.find(NA(i));
EXPECT_NE(iter, map.end());
EXPECT_EQ(iter->first, NA(i));
EXPECT_EQ(iter->second, NA(i * 100));
EXPECT_EQ(map[NA(i)], NA(i * 100));
}
map.erase(NA(10));
EXPECT_EQ(map.count(NA(10)), 0);
}
TEST(FlatMap, ForwardIterator) {
typedef FlatMap<NA, NA, HashNA> NAMap;
NAMap map({{NA(1), NA(10)}, {NA(2), NA(20)}});
NAMap::iterator it1 = map.find(NA(1));
NAMap::iterator it2 = map.find(NA(2));
EXPECT_TRUE(it1 != map.end());
EXPECT_TRUE(it2 != map.end());
EXPECT_FALSE(it1 == map.end());
EXPECT_FALSE(it2 == map.end());
EXPECT_TRUE(it1 != it2);
EXPECT_FALSE(it1 == it2);
EXPECT_EQ((*it1).first, NA(1));
EXPECT_EQ((*it1).second, NA(10));
EXPECT_EQ((*it2).first, NA(2));
EXPECT_EQ((*it2).second, NA(20));
EXPECT_EQ(it1->first, NA(1));
EXPECT_EQ(it1->second, NA(10));
EXPECT_EQ(it2->first, NA(2));
EXPECT_EQ(it2->second, NA(20));
NAMap::iterator copy_it1 = it1;
NAMap::iterator copy_it2 = it2;
EXPECT_EQ(copy_it1->first, NA(1));
EXPECT_EQ(copy_it1->second, NA(10));
EXPECT_EQ(copy_it2->first, NA(2));
EXPECT_EQ(copy_it2->second, NA(20));
NAMap::iterator& pp_copy_it1 = ++copy_it1;
NAMap::iterator& pp_copy_it2 = ++copy_it2;
EXPECT_TRUE(pp_copy_it1 == copy_it1);
EXPECT_TRUE(pp_copy_it2 == copy_it2);
EXPECT_TRUE(copy_it1 != it1);
EXPECT_TRUE(copy_it2 != it2);
if (copy_it1 == map.end()) {
EXPECT_TRUE(copy_it2 != map.end());
EXPECT_EQ(copy_it2->first, NA(1));
EXPECT_EQ(copy_it2->second, NA(10));
EXPECT_EQ(pp_copy_it2->first, NA(1));
EXPECT_EQ(pp_copy_it2->second, NA(10));
} else {
EXPECT_TRUE(copy_it2 == map.end());
EXPECT_EQ(copy_it1->first, NA(2));
EXPECT_EQ(copy_it1->second, NA(20));
EXPECT_EQ(pp_copy_it1->first, NA(2));
EXPECT_EQ(pp_copy_it1->second, NA(20));
}
EXPECT_EQ(it1->first, NA(1));
EXPECT_EQ(it1->second, NA(10));
EXPECT_EQ(it2->first, NA(2));
EXPECT_EQ(it2->second, NA(20));
copy_it1 = it1;
copy_it2 = it2;
EXPECT_EQ(copy_it1->first, NA(1));
EXPECT_EQ(copy_it1->second, NA(10));
EXPECT_EQ(copy_it2->first, NA(2));
EXPECT_EQ(copy_it2->second, NA(20));
NAMap::iterator copy_it1_pp = copy_it1++;
NAMap::iterator copy_it2_pp = copy_it2++;
EXPECT_TRUE(copy_it1_pp != copy_it1);
EXPECT_TRUE(copy_it2_pp != copy_it2);
EXPECT_TRUE(copy_it1_pp == it1);
EXPECT_TRUE(copy_it2_pp == it2);
EXPECT_EQ(copy_it1_pp->first, NA(1));
EXPECT_EQ(copy_it1_pp->second, NA(10));
EXPECT_EQ(copy_it2_pp->first, NA(2));
EXPECT_EQ(copy_it2_pp->second, NA(20));
EXPECT_TRUE(copy_it1 != it1);
EXPECT_TRUE(copy_it2 != it2);
if (copy_it1 == map.end()) {
EXPECT_TRUE(copy_it2 != map.end());
EXPECT_EQ(copy_it2->first, NA(1));
EXPECT_EQ(copy_it2->second, NA(10));
} else {
EXPECT_TRUE(copy_it2 == map.end());
EXPECT_EQ(copy_it1->first, NA(2));
EXPECT_EQ(copy_it1->second, NA(20));
}
EXPECT_EQ(it1->first, NA(1));
EXPECT_EQ(it1->second, NA(10));
EXPECT_EQ(it2->first, NA(2));
EXPECT_EQ(it2->second, NA(20));
}
TEST(FlatMap, ConstructDestruct) {
FlatMap<string, string> map;
string k1 = "the quick brown fox jumped over the lazy dog";
string k2 = k1 + k1;
string k3 = k1 + k2;
map[k1] = k2;
map[k3] = k1;
EXPECT_EQ(k1, map.find(k1)->first);
EXPECT_EQ(k2, map.find(k1)->second);
EXPECT_EQ(k1, map[k3]);
map.erase(k3);
EXPECT_EQ(string(), map[k3]);
map.clear();
map[k1] = k2;
EXPECT_EQ(k2, map[k1]);
map.reserve(100);
EXPECT_EQ(k2, map[k1]);
}
struct CustomCmpKey {
int64_t a;
int64_t b;
CustomCmpKey(int64_t v1, int64_t v2) : a(v1), b(v2) {}
bool operator==(const CustomCmpKey& x) const { return a == x.a && b == x.b; }
};
struct HashA {
size_t operator()(CustomCmpKey x) const { return x.a; }
};
struct EqA {
bool operator()(CustomCmpKey x, CustomCmpKey y) const { return x.a == y.a; }
};
TEST(FlatMap, CustomCmp) {
FlatMap<CustomCmpKey, int, HashA, EqA> map;
map[CustomCmpKey(100, 200)] = 300;
EXPECT_EQ(300, map[CustomCmpKey(100, 200)]);
EXPECT_EQ(300, map[CustomCmpKey(100, 500)]);
}
typedef std::unique_ptr<int> UniqInt;
static UniqInt MakeUniq(int i) { return std::make_unique<int>(i); }
struct HashUniq {
size_t operator()(const UniqInt& p) const { return *p; }
};
struct EqUniq {
bool operator()(const UniqInt& a, const UniqInt& b) const { return *a == *b; }
};
typedef FlatMap<UniqInt, UniqInt, HashUniq, EqUniq> UniqMap;
TEST(FlatMap, UniqueMap) {
UniqMap map;
const int N = 10;
for (int i = 0; i < N; i++) {
if ((i % 2) == 0) {
map[MakeUniq(i)] = MakeUniq(i + 100);
} else {
map.emplace(MakeUniq(i), MakeUniq(i + 100));
}
}
EXPECT_EQ(map.size(), N);
UniqMap map2(std::move(map));
for (int i = 0; i < N; i++) {
EXPECT_EQ(*map2.at(MakeUniq(i)), i + 100);
}
UniqMap map3;
map3 = std::move(map2);
EXPECT_EQ(map3.count(MakeUniq(2)), 1);
map3.erase(MakeUniq(2));
EXPECT_EQ(map3.count(MakeUniq(2)), 0);
map3.clear();
EXPECT_EQ(map3.size(), 0);
EXPECT_GE(map.size(), 0);
EXPECT_GE(map2.size(), 0);
EXPECT_TRUE(map.emplace(MakeUniq(-1), MakeUniq(-1)).second);
}
TEST(FlatMap, UniqueMapIter) {
UniqMap map;
const int kCount = 10;
const int kValueDelta = 100;
for (int i = 1; i <= kCount; i++) {
map[MakeUniq(i)] = MakeUniq(i + kValueDelta);
}
int key_sum = 0;
int val_sum = 0;
for (const auto& p : map) {
key_sum += *p.first;
val_sum += *p.second;
}
EXPECT_EQ(key_sum, (kCount * (kCount + 1)) / 2);
EXPECT_EQ(val_sum, key_sum + (kCount * kValueDelta));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/gtl/flatmap.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/gtl/flatmap_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
054d4ec8-7d02-492e-9705-84fd9efd0b3a | cpp | tensorflow/tensorflow | serialization_utils | tensorflow/core/data/serialization_utils.cc | tensorflow/core/data/serialization_utils_test.cc | #include "tensorflow/core/data/serialization_utils.h"
#include <cstdint>
#include <map>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/graph_runner.h"
#include "tensorflow/core/data/compression_utils.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/dataset.pb.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/variant.h"
#include "tensorflow/core/framework/variant_op_registry.h"
#include "tensorflow/core/framework/variant_tensor_data.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/platform/stringpiece.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kDelimiter[] = "@@";
constexpr char kComponent[] = "component";
constexpr char kNumComponents[] = "num_components";
constexpr char kNumElements[] = "num_elements";
constexpr char kIsDataset[] = ".is_dataset";
constexpr char kIteratorVariantTypeName[] = "tensorflow::Iterator";
constexpr char kOutputNode[] = ".output_node";
Status FromGraphDef(FunctionLibraryRuntime* flr, const GraphDef& graph_def,
const std::vector<std::pair<string, Tensor>>& input_list,
const string& output_node, Tensor* result) {
FunctionLibraryRuntime* cloned_flr = nullptr;
std::unique_ptr<ProcessFunctionLibraryRuntime> pflr = nullptr;
std::unique_ptr<FunctionLibraryDefinition> lib_def = nullptr;
TF_RETURN_IF_ERROR(flr->Clone(&lib_def, &pflr, &cloned_flr, true));
TF_RETURN_IF_ERROR(AddToFunctionLibrary(lib_def.get(), graph_def.library()));
Graph graph(OpRegistry::Global());
TF_RETURN_IF_ERROR(ImportGraphDef({}, graph_def, &graph, nullptr));
std::vector<Tensor> outputs;
GraphRunner graph_runner(cloned_flr->device());
TF_RETURN_IF_ERROR(graph_runner.Run(&graph, cloned_flr, input_list,
{output_node}, &outputs));
*result = outputs[0];
return absl::OkStatus();
}
Status FindStatefulOps(const GraphDef& graph_def,
std::vector<string>* stateful_op_names) {
FunctionLibraryDefinition lib_def(OpRegistry::Global(), graph_def.library());
for (const auto& node : graph_def.node()) {
if (node.op() == FunctionLibraryDefinition::kRetOp) continue;
if (!IsNodeStateful(lib_def, node).ok()) {
stateful_op_names->push_back(node.op());
}
}
for (const auto& fdef : graph_def.library().function()) {
if (!fdef.signature().is_stateful()) continue;
for (const auto& node : fdef.node_def()) {
if (!IsNodeStateful(lib_def, node).ok()) {
stateful_op_names->push_back(
absl::StrCat(node.op(), " in function: ", fdef.signature().name()));
}
}
}
return absl::OkStatus();
}
}
Status ReadElementsFromCheckpoint(IteratorContext* ctx,
IteratorStateReader* reader,
StringPiece key_prefix,
std::vector<std::vector<Tensor>>* elements) {
int64_t num_elements;
TF_RETURN_IF_ERROR(
reader->ReadScalar(key_prefix, kNumElements, &num_elements));
DCHECK(elements->empty());
elements->reserve(num_elements);
for (int i = 0; i < num_elements; ++i) {
std::string element_prefix = absl::StrCat(key_prefix, "::", i);
int64_t num_components;
TF_RETURN_IF_ERROR(
reader->ReadScalar(element_prefix, kNumComponents, &num_components));
elements->emplace_back();
std::vector<Tensor>& element = elements->at(i);
element.reserve(num_components);
for (int j = 0; j < num_components; ++j) {
element.emplace_back();
TF_RETURN_IF_ERROR(reader->ReadTensor(
ctx->flr(), element_prefix, absl::StrCat(kComponent, "[", j, "]"),
&element.back()));
}
}
return absl::OkStatus();
}
Status WriteElement(IteratorStateWriter* writer, StringPiece key_prefix,
const std::vector<std::vector<Tensor>>& elements,
int64_t index) {
const std::vector<Tensor>& element = elements[index];
std::string element_prefix = absl::StrCat(key_prefix, "::", index);
TF_RETURN_IF_ERROR(
writer->WriteScalar(element_prefix, kNumComponents, element.size()));
for (int j = 0; j < element.size(); ++j) {
TF_RETURN_IF_ERROR(writer->WriteTensor(
element_prefix, absl::StrCat(kComponent, "[", j, "]"), element[j]));
}
return absl::OkStatus();
}
Status WriteElementsToCheckpoint(
IteratorStateWriter* writer, StringPiece key_prefix,
const std::vector<std::vector<Tensor>>& elements) {
TF_RETURN_IF_ERROR(
writer->WriteScalar(key_prefix, kNumElements, elements.size()));
for (int i = 0; i < elements.size(); ++i) {
TF_RETURN_IF_ERROR(WriteElement(writer, key_prefix, elements, i));
}
return absl::OkStatus();
}
Status UpdateCheckpointElements(
IteratorStateWriter* writer, StringPiece key_prefix,
const std::vector<std::vector<Tensor>>& elements,
const absl::flat_hash_set<int64_t>& checkpoint_indices) {
TF_RETURN_IF_ERROR(
writer->WriteScalar(key_prefix, kNumElements, elements.size()));
for (int64_t i : checkpoint_indices) {
TF_RETURN_IF_ERROR(WriteElement(writer, key_prefix, elements, i));
}
return absl::OkStatus();
}
VariantTensorDataReader::VariantTensorDataReader(
const std::vector<const tensorflow::VariantTensorData*>& data) {
for (const auto& d : data) {
string metadata;
d->get_metadata(&metadata);
auto keys = str_util::Split(metadata, kDelimiter, str_util::SkipEmpty());
const string name = keys[0];
data_[name] = d;
map_[name] = std::map<string, size_t>();
for (size_t i = 1; i < keys.size(); ++i) {
map_[name][keys[i]] = i - 1;
}
}
}
Status VariantTensorDataReader::ReadScalar(StringPiece key,
int64_t* val) const {
string prefix;
TF_RETURN_IF_ERROR(ExtractIteratorPrefix(key, &prefix));
return ReadScalar(prefix, key, val);
}
Status VariantTensorDataReader::ReadScalar(StringPiece name, StringPiece key,
int64_t* val) const {
return ReadScalarInternal(name, key, val);
}
Status VariantTensorDataReader::ReadScalar(StringPiece key,
tstring* val) const {
string prefix;
TF_RETURN_IF_ERROR(ExtractIteratorPrefix(key, &prefix));
return ReadScalar(prefix, key, val);
}
Status VariantTensorDataReader::ReadScalar(StringPiece name, StringPiece key,
tstring* val) const {
return ReadScalarInternal(name, key, val);
}
Status VariantTensorDataReader::ReadTensor(StringPiece key, Tensor* val) const {
string prefix;
TF_RETURN_IF_ERROR(ExtractIteratorPrefix(key, &prefix));
return ReadTensor(prefix, key, val);
}
Status VariantTensorDataReader::ReadTensor(FunctionLibraryRuntime* flr,
StringPiece key, Tensor* val) const {
string prefix;
TF_RETURN_IF_ERROR(ExtractIteratorPrefix(key, &prefix));
return ReadTensorInternal(flr, prefix, key, val);
}
Status VariantTensorDataReader::ReadTensor(StringPiece name, StringPiece key,
Tensor* val) const {
return ReadTensor(nullptr, name, key, val);
}
Status VariantTensorDataReader::ReadTensor(FunctionLibraryRuntime* flr,
StringPiece name, StringPiece key,
Tensor* val) const {
return ReadTensorInternal(flr, name, key, val);
}
bool VariantTensorDataReader::Contains(StringPiece key) const {
string prefix;
if (!ExtractIteratorPrefix(key, &prefix).ok()) {
return false;
}
return Contains(prefix, key);
}
bool VariantTensorDataReader::Contains(StringPiece n, StringPiece key) const {
string name(n);
auto it = map_.find(name);
if (it == map_.end()) {
return false;
}
const auto& bucket = it->second;
return bucket.find(string(key)) != bucket.end();
}
template <typename T>
Status VariantTensorDataReader::ReadScalarInternal(StringPiece n,
StringPiece key,
T* val) const {
string name(n);
auto it = map_.find(name);
if (it == map_.end()) {
return errors::NotFound(name);
}
const auto& bucket = it->second;
auto key_it = bucket.find(string(key));
if (key_it == bucket.end()) {
return errors::NotFound(key);
}
*val = data_.at(name)->tensors(key_it->second).scalar<T>()();
return absl::OkStatus();
}
Status VariantTensorDataReader::ReadTensorInternal(FunctionLibraryRuntime* flr,
StringPiece n,
StringPiece key,
Tensor* val) const {
if (Contains(n, strings::StrCat(key, kIsDataset))) {
return ReadDatasetInternal(flr, n, key, val);
}
string name(n);
auto it = map_.find(name);
if (it == map_.end()) {
return errors::NotFound(name);
}
const auto& bucket = it->second;
auto key_it = bucket.find(string(key));
if (key_it == bucket.end()) {
return errors::NotFound(key);
}
*val = data_.at(name)->tensors(key_it->second);
return absl::OkStatus();
}
Status VariantTensorDataReader::ReadDatasetInternal(FunctionLibraryRuntime* flr,
StringPiece n,
StringPiece key,
Tensor* val) const {
if (flr == nullptr) {
return errors::Internal(
"Function library runtime is needed to restore a dataset.");
}
tstring output_node, serialized_graph_def;
TF_RETURN_IF_ERROR(
ReadScalar(n, strings::StrCat(key, kOutputNode), &output_node));
TF_RETURN_IF_ERROR(
ReadScalar(n, strings::StrCat(key), &serialized_graph_def));
GraphDef graph_def;
graph_def.ParseFromString(serialized_graph_def);
TF_RETURN_IF_ERROR(FromGraphDef(flr, graph_def, {}, output_node, val));
return absl::OkStatus();
}
std::map<string, Tensor> VariantTensorDataReader::ReadAllTensors() {
std::map<string, Tensor> result;
for (const auto& entry : map_) {
string key1 = entry.first;
for (const auto& inner : entry.second) {
string key2 = inner.first;
size_t index = inner.second;
result[absl::StrCat(key1, kDelimiter, key2)] =
data_[key1]->tensors(index);
}
}
return result;
}
Status VariantTensorDataWriter::WriteScalar(StringPiece key,
const int64_t val) {
string prefix;
TF_RETURN_IF_ERROR(ExtractIteratorPrefix(key, &prefix));
return WriteScalar(prefix, key, val);
}
Status VariantTensorDataWriter::WriteScalar(StringPiece name, StringPiece key,
const int64_t val) {
return WriteScalarInternal(name, key, val);
}
Status VariantTensorDataWriter::WriteScalar(StringPiece key,
const tstring& val) {
string prefix;
TF_RETURN_IF_ERROR(ExtractIteratorPrefix(key, &prefix));
return WriteScalar(prefix, key, val);
}
Status VariantTensorDataWriter::WriteScalar(StringPiece name, StringPiece key,
const tstring& val) {
return WriteScalarInternal(name, key, val);
}
Status VariantTensorDataWriter::WriteTensor(StringPiece key,
const Tensor& val) {
string prefix;
TF_RETURN_IF_ERROR(ExtractIteratorPrefix(key, &prefix));
return WriteTensor(prefix, key, val);
}
Status VariantTensorDataWriter::WriteTensor(StringPiece name, StringPiece key,
const Tensor& val) {
return WriteTensorInternal(name, key, val);
}
void VariantTensorDataWriter::MaybeFlush() {
if (is_flushed_) return;
for (auto& keys : keys_) {
const string name = keys.first;
string metadata = name;
for (size_t i = 0; i < keys_[name].size(); ++i) {
strings::StrAppend(&metadata, kDelimiter, keys_[name][i]);
}
data_[name]->set_metadata(metadata);
}
is_flushed_ = true;
}
void VariantTensorDataWriter::Reset() {
is_flushed_ = false;
data_.clear();
keys_.clear();
}
void VariantTensorDataWriter::ReleaseData(
std::vector<std::unique_ptr<VariantTensorData>>* variants) {
MaybeFlush();
for (auto& it : data_) {
variants->push_back(std::move(it.second));
}
Reset();
}
void VariantTensorDataWriter::GetData(
std::vector<const VariantTensorData*>* variants) {
MaybeFlush();
for (auto& it : data_) {
variants->push_back(it.second.get());
}
}
template <typename T>
Status VariantTensorDataWriter::WriteScalarInternal(StringPiece name,
StringPiece key,
const T& val) {
if (is_flushed_) {
return errors::FailedPrecondition(
"Cannot call WriteScalar after GetData or ReleaseData is called");
}
Tensor val_t = Tensor(DataTypeToEnum<T>::v(), TensorShape({}));
val_t.scalar<T>()() = val;
return WriteTensorInternal(name, key, val_t);
}
Status VariantTensorDataWriter::WriteTensorInternal(StringPiece n,
StringPiece key,
const Tensor& val) {
DatasetBase* dataset;
if (GetDatasetFromVariantTensor(val, &dataset).ok()) {
return WriteDatasetInternal(n, key, dataset);
}
if (is_flushed_) {
return errors::FailedPrecondition(
"Cannot call WriteTensor after GetData or ReleaseData is called");
}
DCHECK_EQ(key.find(kDelimiter), string::npos);
string name(n);
if (keys_.count(name) == 0) {
keys_[name] = std::vector<string>();
}
keys_[name].push_back(string(key));
if (data_.count(name) == 0) {
data_[name] = std::make_unique<VariantTensorData>();
data_[name]->set_type_name("tensorflow::Iterator");
}
*(data_[name]->add_tensors()) = val;
return absl::OkStatus();
}
Status VariantTensorDataWriter::WriteDatasetInternal(
StringPiece n, StringPiece key, const DatasetBase* dataset) {
GraphDef graph_def;
SerializationContext ctx((SerializationContext::Params()));
TF_RETURN_IF_ERROR(AsGraphDef(dataset, std::move(ctx), &graph_def));
string output_node;
for (const auto& node : graph_def.node()) {
if (node.op() == kRetvalOp) {
output_node = node.input(0);
break;
}
}
string result;
graph_def.SerializeToString(&result);
TF_RETURN_IF_ERROR(WriteScalar(n, strings::StrCat(key, kIsDataset), ""));
TF_RETURN_IF_ERROR(
WriteScalar(n, strings::StrCat(key, kOutputNode), output_node));
TF_RETURN_IF_ERROR(WriteScalar(n, key, result));
return absl::OkStatus();
}
std::string IteratorStateVariant::TypeName() {
return kIteratorVariantTypeName;
}
IteratorStateVariant::IteratorStateVariant(const IteratorStateVariant& other) {
if (other.data_) {
data_ = std::make_unique<VariantTensorData>(*other.data_);
}
}
Status IteratorStateVariant::InitializeFromVariantData(
std::unique_ptr<VariantTensorData> data) {
data_ = std::move(data);
return absl::OkStatus();
}
void IteratorStateVariant::Encode(VariantTensorData* data) const {
CompressedElement compressed_tensors;
Status s = CompressElement(data_->tensors(), &compressed_tensors);
if (!s.ok()) {
LOG(WARNING) << "Failed to compress iterator state variant: " << s;
*data = *data_;
return;
}
data->set_type_name(TypeName());
data->set_metadata(data_->metadata_string());
Tensor tensor(DT_VARIANT, TensorShape({}));
tensor.scalar<Variant>()() = std::move(compressed_tensors);
*data->add_tensors() = std::move(tensor);
}
bool IteratorStateVariant::Decode(VariantTensorData data) {
if (data.type_name() != TypeName()) {
return false;
}
const CompressedElement* compressed = GetCompressedElement(data);
if (!compressed) {
data_ = std::make_unique<VariantTensorData>(std::move(data));
return true;
}
std::vector<Tensor> tensors;
Status s = UncompressElement(*compressed, &tensors);
if (!s.ok()) {
LOG(WARNING) << "Failed to uncompress iterator state variant: " << s;
data_ = std::make_unique<VariantTensorData>(std::move(data));
return true;
}
data_ = std::make_unique<VariantTensorData>();
data_->set_type_name(TypeName());
data_->set_metadata(std::move(data.metadata_string()));
for (auto& tensor : tensors) {
*data_->add_tensors() = std::move(tensor);
}
return true;
}
const CompressedElement* IteratorStateVariant::GetCompressedElement(
const VariantTensorData& data) {
bool should_uncompress =
data.tensors_size() == 1 &&
TensorShapeUtils::IsScalar(data.tensors(0).shape()) &&
data.tensors(0).dtype() == DT_VARIANT;
if (!should_uncompress) {
return nullptr;
}
const Variant& variant = data.tensors(0).scalar<Variant>()();
return variant.get<CompressedElement>();
}
std::string IteratorStateVariant::DebugString() const {
if (data_) {
return strings::StrCat("IteratorStateVariant<", data_->DebugString(), ">");
} else {
return strings::StrCat("IteratorStateVariant<empty>");
}
}
REGISTER_UNARY_VARIANT_DECODE_FUNCTION(IteratorStateVariant,
kIteratorVariantTypeName);
Status AsGraphDefForRewrite(OpKernelContext* ctx, const DatasetBase* input,
std::vector<std::pair<string, Tensor>>* input_list,
GraphDef* result, string* dataset_node) {
SerializationContext::Params params(ctx);
params.input_list = input_list;
params.external_state_policy = ExternalStatePolicy::POLICY_IGNORE;
params.is_graph_rewrite = true;
SerializationContext serialization_ctx(params);
TF_RETURN_IF_ERROR(AsGraphDef(input, std::move(serialization_ctx), result));
for (const auto& node : result->node()) {
if (node.op() == kRetvalOp) {
*dataset_node = node.input(0);
}
}
return absl::OkStatus();
}
Status AsGraphDef(const DatasetBase* dataset,
SerializationContext&& serialization_ctx,
GraphDef* graph_def) {
if (serialization_ctx.external_state_policy() ==
ExternalStatePolicy::POLICY_FAIL) {
TF_RETURN_IF_ERROR(dataset->CheckExternalState());
}
if (serialization_ctx.external_state_policy() ==
ExternalStatePolicy::POLICY_WARN) {
std::vector<string> stateful_op_names;
TF_RETURN_IF_ERROR(FindStatefulOps(*graph_def, &stateful_op_names));
if (!stateful_op_names.empty()) {
LOG(WARNING) << "We found the following stateful ops in the dataset "
"construction graph whose state would not be "
"serialized and might "
"cause subtle bugs: "
<< absl::StrJoin(stateful_op_names, ", ");
}
}
GraphDefBuilder b;
DatasetBase::DatasetGraphDefBuilder db(&b);
Node* output_node = nullptr;
TF_RETURN_IF_ERROR(
db.AddInputDataset(&serialization_ctx, dataset, &output_node));
ops::UnaryOp(std::string(kRetvalOp), output_node,
b.opts()
.WithName("dataset")
.WithAttr("T", DT_VARIANT)
.WithAttr("index", 0));
TF_RETURN_IF_ERROR(b.ToGraphDef(graph_def));
return absl::OkStatus();
}
absl::StatusOr<absl::flat_hash_map<std::string, int64_t>> CheckpointStats(
const std::string& checkpoint_bytes) {
TensorProto proto;
if (!ParseProtoUnlimited(&proto, checkpoint_bytes)) {
return absl::InvalidArgumentError(
"Failed to parse checkpoint bytes into proto.");
}
Tensor t;
if (!t.FromProto(proto)) {
return absl::InvalidArgumentError(
"Failed to parse checkpoint tensor from proto.");
}
auto variant = t.scalar<Variant>()();
auto* w = variant.get<IteratorStateVariant>();
if (!w) {
return absl::InvalidArgumentError(
"Failed to access IteratorStateVariant inside checkpoint tensor");
}
const VariantTensorData* data = w->GetData();
auto reader = std::make_unique<VariantTensorDataReader>(
std::vector<const VariantTensorData*>{data});
absl::flat_hash_map<std::string, int64_t> stats;
for (const auto& [key, tensor] : reader->ReadAllTensors()) {
stats[key] = tensor.TotalBytes();
}
return stats;
}
}
} | #include "tensorflow/core/data/serialization_utils.h"
#include <cstdint>
#include <functional>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/test_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/framework/variant.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/str_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/public/version.h"
#include "tensorflow/core/util/work_sharder.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace data {
namespace {
string full_name(string key) { return FullName("Iterator:", key); }
TEST(SerializationUtilsTest, CheckpointElementsRoundTrip) {
std::vector<std::vector<Tensor>> elements;
elements.push_back(CreateTensors<int32>(TensorShape({3}), {{1, 2, 3}}));
elements.push_back(CreateTensors<int32>(TensorShape({2}), {{4, 5}}));
VariantTensorDataWriter writer;
tstring test_prefix = full_name("test_prefix");
TF_ASSERT_OK(WriteElementsToCheckpoint(&writer, test_prefix, elements));
std::vector<const VariantTensorData*> data;
writer.GetData(&data);
VariantTensorDataReader reader(data);
std::vector<std::vector<Tensor>> read_elements;
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<TestContext> ctx,
TestContext::Create());
TF_ASSERT_OK(ReadElementsFromCheckpoint(ctx->iter_ctx(), &reader, test_prefix,
&read_elements));
ASSERT_EQ(elements.size(), read_elements.size());
for (int i = 0; i < elements.size(); ++i) {
std::vector<Tensor>& original = elements[i];
std::vector<Tensor>& read = read_elements[i];
ASSERT_EQ(original.size(), read.size());
for (int j = 0; j < original.size(); ++j) {
EXPECT_EQ(original[j].NumElements(), read[j].NumElements());
EXPECT_EQ(original[j].flat<int32>()(0), read[j].flat<int32>()(0));
}
}
}
TEST(SerializationUtilsTest, VariantTensorDataRoundtrip) {
VariantTensorDataWriter writer;
TF_ASSERT_OK(writer.WriteScalar(full_name("Int64"), 24));
Tensor input_tensor(DT_FLOAT, {1});
input_tensor.flat<float>()(0) = 2.0f;
TF_ASSERT_OK(writer.WriteTensor(full_name("Tensor"), input_tensor));
std::vector<const VariantTensorData*> data;
writer.GetData(&data);
VariantTensorDataReader reader(data);
int64_t val_int64;
TF_ASSERT_OK(reader.ReadScalar(full_name("Int64"), &val_int64));
EXPECT_EQ(val_int64, 24);
Tensor val_tensor;
TF_ASSERT_OK(reader.ReadTensor(full_name("Tensor"), &val_tensor));
EXPECT_EQ(input_tensor.NumElements(), val_tensor.NumElements());
EXPECT_EQ(input_tensor.flat<float>()(0), val_tensor.flat<float>()(0));
}
TEST(SerializationUtilsTest, VariantTensorDataNonExistentKey) {
VariantTensorData data;
strings::StrAppend(&data.metadata_, "key1", "@@");
data.tensors_.push_back(Tensor(DT_INT64, {1}));
std::vector<const VariantTensorData*> reader_data;
reader_data.push_back(&data);
VariantTensorDataReader reader(reader_data);
int64_t val_int64;
tstring val_string;
Tensor val_tensor;
EXPECT_EQ(error::NOT_FOUND,
reader.ReadScalar(full_name("NonExistentKey"), &val_int64).code());
EXPECT_EQ(error::NOT_FOUND,
reader.ReadScalar(full_name("NonExistentKey"), &val_string).code());
EXPECT_EQ(error::NOT_FOUND,
reader.ReadTensor(full_name("NonExistentKey"), &val_tensor).code());
}
TEST(SerializationUtilsTest, VariantTensorDataRoundtripIteratorName) {
VariantTensorDataWriter writer;
TF_ASSERT_OK(writer.WriteScalar("Iterator", "Int64", 24));
Tensor input_tensor(DT_FLOAT, {1});
input_tensor.flat<float>()(0) = 2.0f;
TF_ASSERT_OK(writer.WriteTensor("Iterator", "Tensor", input_tensor));
std::vector<const VariantTensorData*> data;
writer.GetData(&data);
VariantTensorDataReader reader(data);
int64_t val_int64;
TF_ASSERT_OK(reader.ReadScalar("Iterator", "Int64", &val_int64));
EXPECT_EQ(val_int64, 24);
Tensor val_tensor;
TF_ASSERT_OK(reader.ReadTensor("Iterator", "Tensor", &val_tensor));
EXPECT_EQ(input_tensor.NumElements(), val_tensor.NumElements());
EXPECT_EQ(input_tensor.flat<float>()(0), val_tensor.flat<float>()(0));
}
TEST(SerializationUtilsTest, VariantTensorDataNonExistentKeyIteratorName) {
VariantTensorData data;
strings::StrAppend(&data.metadata_, "key1", "@@");
data.tensors_.push_back(Tensor(DT_INT64, {1}));
std::vector<const VariantTensorData*> reader_data;
reader_data.push_back(&data);
VariantTensorDataReader reader(reader_data);
int64_t val_int64;
tstring val_string;
Tensor val_tensor;
EXPECT_EQ(error::NOT_FOUND,
reader.ReadScalar("Iterator", "NonExistentKey", &val_int64).code());
EXPECT_EQ(
error::NOT_FOUND,
reader.ReadScalar("Iterator", "NonExistentKey", &val_string).code());
EXPECT_EQ(
error::NOT_FOUND,
reader.ReadTensor("Iterator", "NonExistentKey", &val_tensor).code());
}
TEST(SerializationUtilsTest, VariantTensorDataWriteAfterFlushing) {
VariantTensorDataWriter writer;
TF_ASSERT_OK(writer.WriteScalar(full_name("Int64"), 24));
std::vector<const VariantTensorData*> data;
writer.GetData(&data);
Tensor input_tensor(DT_FLOAT, {1});
input_tensor.flat<float>()(0) = 2.0f;
EXPECT_EQ(error::FAILED_PRECONDITION,
writer.WriteTensor(full_name("Tensor"), input_tensor).code());
}
class ParameterizedIteratorStateVariantTest
: public DatasetOpsTestBase,
public ::testing::WithParamInterface<std::vector<Tensor>> {
protected:
VariantTensorData GetVariantTensorData() const {
std::vector<Tensor> tensors = GetParam();
VariantTensorData data;
data.set_type_name(IteratorStateVariant::TypeName());
for (Tensor& tensor : tensors) {
*data.add_tensors() = std::move(tensor);
}
return data;
}
absl::StatusOr<VariantTensorData> EncodeAndDecode(
const VariantTensorData& data) const {
IteratorStateVariant encoder;
TF_RETURN_IF_ERROR(encoder.InitializeFromVariantData(
std::make_unique<VariantTensorData>(data)));
VariantTensorData encoded_data;
encoder.Encode(&encoded_data);
IteratorStateVariant decoder;
decoder.Decode(encoded_data);
return *decoder.GetData();
}
absl::StatusOr<VariantTensorData> DecodeUncompressed(
const VariantTensorData& data) const {
IteratorStateVariant decoder;
decoder.Decode(data);
return *decoder.GetData();
}
};
class ParemeterizedCheckpointIndicesTest
: public DatasetOpsTestBase,
public ::testing::WithParamInterface<absl::flat_hash_set<int64_t>> {
protected:
absl::flat_hash_set<int64_t> GetCheckpointIndices() const {
absl::flat_hash_set<int64_t> checkpoint_indices = GetParam();
return checkpoint_indices;
}
};
std::vector<std::vector<Tensor>> TestCases() {
return {
CreateTensors<int64_t>(TensorShape{1}, {{1}}),
CreateTensors<int64_t>(TensorShape{1}, {{1}, {2}}),
CreateTensors<tstring>(TensorShape{1}, {{"a"}, {"b"}}),
{CreateTensor<tstring>(TensorShape{1}, {"a"}),
CreateTensor<int64_t>(TensorShape{1}, {1})},
{},
{CreateTensor<int64_t>(TensorShape{128, 128}),
CreateTensor<int64_t>(TensorShape{64, 2})},
};
}
std::vector<absl::flat_hash_set<int64_t>> CheckpointIndicesTestCases() {
return {
{},
{ 0},
{ 0, 1},
{ 0, 1, 2},
{ 1, 3, 4},
{ 1, 2, 3, 4},
{ 0, 1, 2, 3, 4},
};
}
TEST_P(ParameterizedIteratorStateVariantTest, EncodeAndDecode) {
VariantTensorData data = GetVariantTensorData();
TF_ASSERT_OK_AND_ASSIGN(VariantTensorData result, EncodeAndDecode(data));
EXPECT_EQ(result.type_name(), data.type_name());
for (int i = 0; i < result.tensors_size(); ++i) {
test::ExpectEqual(result.tensors(i), data.tensors(i));
}
}
TEST_P(ParameterizedIteratorStateVariantTest, DecodeUncompressed) {
VariantTensorData data = GetVariantTensorData();
TF_ASSERT_OK_AND_ASSIGN(VariantTensorData result, DecodeUncompressed(data));
EXPECT_EQ(result.type_name(), data.type_name());
for (int i = 0; i < result.tensors_size(); ++i) {
test::ExpectEqual(result.tensors(i), data.tensors(i));
}
}
TEST_P(ParemeterizedCheckpointIndicesTest,
CheckpointElementsRoundTripUsingIndices) {
std::vector<std::vector<Tensor>> elements;
elements.push_back(CreateTensors<int32>(TensorShape({3}), {{1, 2, 3}}));
elements.push_back(CreateTensors<int32>(TensorShape({2}), {{4, 5}}));
elements.push_back(
CreateTensors<int32>(TensorShape({5}), {{6, 7, 8, 9, 10}}));
elements.push_back(
CreateTensors<int32>(TensorShape({4}), {{11, 12, 13, 14}}));
elements.push_back(CreateTensors<int32>(TensorShape({2}), {{15, 16}}));
VariantTensorDataWriter writer;
tstring test_prefix = full_name("test_prefix");
absl::flat_hash_set<int64_t> checkpoint_indices_write = {0, 1, 2, 3, 4};
TF_ASSERT_OK(WriteElementsToCheckpoint(&writer, test_prefix, elements));
for (auto index : GetCheckpointIndices()) {
elements.at(index) = CreateTensors<int32>(TensorShape({1}), {{1}});
}
TF_ASSERT_OK(UpdateCheckpointElements(&writer, test_prefix, elements,
GetCheckpointIndices()));
std::vector<const VariantTensorData*> data;
writer.GetData(&data);
VariantTensorDataReader reader(data);
std::vector<std::vector<Tensor>> read_elements;
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<TestContext> ctx,
TestContext::Create());
TF_ASSERT_OK(ReadElementsFromCheckpoint(ctx->iter_ctx(), &reader, test_prefix,
&read_elements));
ASSERT_EQ(elements.size(), read_elements.size());
for (int index = 0; index < elements.size(); ++index) {
std::vector<Tensor>& original = elements[index];
std::vector<Tensor>& read = read_elements[index];
ASSERT_EQ(original.size(), read.size());
for (int j = 0; j < original.size(); ++j) {
EXPECT_EQ(original[j].NumElements(), read[j].NumElements());
EXPECT_EQ(original[j].flat<int32>()(0), read[j].flat<int32>()(0));
}
}
}
INSTANTIATE_TEST_SUITE_P(Instantiation, ParameterizedIteratorStateVariantTest,
::testing::ValuesIn(TestCases()));
INSTANTIATE_TEST_SUITE_P(Instantiation, ParemeterizedCheckpointIndicesTest,
::testing::ValuesIn(CheckpointIndicesTestCases()));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/serialization_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/serialization_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
95025948-2ae7-4372-8da2-9564390b0027 | cpp | tensorflow/tensorflow | save_variables | tensorflow/compiler/mlir/quantization/tensorflow/cc/save_variables.cc | tensorflow/compiler/mlir/quantization/tensorflow/cc/save_variables_test.cc | #include "tensorflow/compiler/mlir/quantization/tensorflow/cc/save_variables.h"
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/ir/importexport/convert_tensor.h"
#include "tensorflow/core/util/tensor_bundle/tensor_bundle.h"
#include "tsl/platform/env.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
namespace tensorflow {
namespace quantization {
namespace {
using ::mlir::func::FuncOp;
using ::mlir::tf_saved_model::GetInitializerFunction;
using ::mlir::tf_saved_model::kTfSavedModelInitializerRestoreType;
absl::StatusOr<std::string> AddTensorToBundleWriter(
mlir::TF::AssignVariableOp assign_var_op, BundleWriter& bundle_writer) {
auto resource_operand = assign_var_op.getOperand(0);
auto var_handle_op =
llvm::dyn_cast<mlir::TF::VarHandleOp>(resource_operand.getDefiningOp());
if (!var_handle_op) {
assign_var_op->emitRemark(
"Operand idx 0 is not a tf.VarHandleOp. The initializing tensor is not "
"saved to checkpoint.");
return "";
}
auto assigned_value_operand = assign_var_op.getOperand(1);
auto const_op =
llvm::dyn_cast<mlir::TF::ConstOp>(assigned_value_operand.getDefiningOp());
if (!const_op) {
assign_var_op->emitRemark(
"Operand idx 1 is not a tf.ConstOp. The initializing tensor is not "
"saved to checkpoint.");
return "";
}
Tensor const_tensor{};
if (const absl::Status status = mlir::tfg::ConvertToTensor(
const_op.getValue(), &const_tensor);
!status.ok()) {
return status;
}
if (!bundle_writer.Add(var_handle_op.getSharedName(), const_tensor)
.ok()) {
return bundle_writer.status();
}
return var_handle_op.getSharedName().str();
}
}
absl::StatusOr<std::vector<std::string>> SaveVariablesToCheckpoint(
const absl::string_view prefix, mlir::ModuleOp module_op) {
FuncOp session_init_func_type_restore_op = GetInitializerFunction(
module_op, kTfSavedModelInitializerRestoreType);
if (!session_init_func_type_restore_op) {
LOG(INFO) << "No session initializer function with type 'restore_op'. No "
"variables are saved to checkpoint.";
return std::vector<std::string>{};
}
BundleWriter bundle_writer(Env::Default(), prefix);
if (!bundle_writer.status().ok()) {
return bundle_writer.status();
}
std::vector<std::string> saved_variable_shared_names;
for (auto assign_variable_op :
session_init_func_type_restore_op.getOps<mlir::TF::AssignVariableOp>()) {
if (const absl::StatusOr<std::string> variable_shared_name =
AddTensorToBundleWriter(assign_variable_op, bundle_writer);
!variable_shared_name.ok()) {
return variable_shared_name.status();
} else if (!variable_shared_name->empty()) {
saved_variable_shared_names.emplace_back(
std::move(*variable_shared_name));
VLOG(1) << "Saved a variable with shared_name: " << *variable_shared_name;
}
}
if (saved_variable_shared_names.empty()) {
LOG(INFO) << "No variables are saved to checkpoint";
return saved_variable_shared_names;
}
if (!bundle_writer.Finish().ok()) {
return bundle_writer.status();
}
return saved_variable_shared_names;
}
}
} | #include "tensorflow/compiler/mlir/quantization/tensorflow/cc/save_variables.h"
#include <string>
#include <vector>
#include "absl/cleanup/cleanup.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Parser/Parser.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/tensor_bundle/tensor_bundle.h"
#include "tsl/platform/status.h"
#include "tsl/platform/status_matchers.h"
namespace tensorflow {
namespace quantization {
namespace {
using ::tensorflow::test::AsTensor;
using ::tensorflow::test::ExpectEqual;
using ::testing::IsEmpty;
using ::testing::Not;
using ::testing::UnorderedElementsAre;
using ::tsl::testing::IsOk;
class SaveVariablesToCheckpointTest : public ::testing::Test {
protected:
SaveVariablesToCheckpointTest() : env_(Env::Default()) {
ctx_.loadDialect<mlir::func::FuncDialect, mlir::TF::TensorFlowDialect,
mlir::tf_saved_model::TensorFlowSavedModelDialect>();
}
absl::StatusOr<std::string> MakeTempDir() {
std::string tmp_dir{};
if (!env_->LocalTempFilename(&tmp_dir)) {
return absl::InternalError("Failed to create temp file.");
}
TF_CHECK_OK(env_->CreateDir(tmp_dir));
return tmp_dir;
}
mlir::OwningOpRef<mlir::ModuleOp> ParseModuleOpString(
const absl::string_view module_op_str) {
auto module_op_ref =
mlir::parseSourceString<mlir::ModuleOp>(module_op_str, &ctx_);
EXPECT_TRUE(module_op_ref);
return module_op_ref;
}
Env* env_{};
mlir::MLIRContext ctx_{};
};
TEST_F(SaveVariablesToCheckpointTest, VariableSavedToCheckpoint) {
constexpr absl::string_view kModuleCode = R"mlir(
module attributes {tf_saved_model.semantics} {
"tf_saved_model.session_initializer"() {initializers = [@init_func_restore_op]} : () -> ()
func.func @init_func_restore_op() -> () attributes {tf_saved_model.exported_names = ["restore"], tf_saved_model.initializer_type = "restore_op"} {
%cst = "tf.Const"() {device = "", value = dense<[1.0, 2.0]> : tensor<2xf32>} : () -> tensor<2xf32>
%0 = "tf.VarHandleOp"() {container = "", device = "/device:CPU:0", shared_name = "var_0"} : () -> tensor<!tf_type.resource<tensor<2xf32>>>
"tf.AssignVariableOp"(%0, %cst) : (tensor<!tf_type.resource<tensor<2xf32>>>, tensor<2xf32>) -> ()
return
}
}
)mlir";
mlir::OwningOpRef<mlir::ModuleOp> module_op_ref =
ParseModuleOpString(kModuleCode);
const absl::StatusOr<std::string> checkpoint_prefix = MakeTempDir();
EXPECT_TRUE(checkpoint_prefix.ok());
const absl::Cleanup checkpoint_prefix_cleanup = [this, &checkpoint_prefix]() {
int64_t undeleted_files, undeleted_dirs;
TF_CHECK_OK(env_->DeleteRecursively(*checkpoint_prefix, &undeleted_files,
&undeleted_dirs));
};
const absl::StatusOr<std::vector<std::string>> variable_shared_names =
SaveVariablesToCheckpoint(*checkpoint_prefix, *module_op_ref);
EXPECT_TRUE(variable_shared_names.ok());
EXPECT_THAT(*variable_shared_names, UnorderedElementsAre("var_0"));
BundleReader bundle_reader(env_, *checkpoint_prefix);
Tensor loaded_tensor{};
EXPECT_TRUE(bundle_reader.Lookup("var_0", &loaded_tensor).ok());
ExpectEqual(loaded_tensor, AsTensor<float>({1.0, 2.0}));
}
TEST_F(SaveVariablesToCheckpointTest, MultipleVariablesSavedToCheckpoint) {
constexpr absl::string_view kModuleCode = R"mlir(
module attributes {tf_saved_model.semantics} {
"tf_saved_model.session_initializer"() {initializers = [@init_func_restore_op]} : () -> ()
func.func @init_func_restore_op() -> () attributes {tf_saved_model.exported_names = ["restore"], tf_saved_model.initializer_type = "restore_op"} {
%cst = "tf.Const"() {device = "", value = dense<[1.0, 2.0]> : tensor<2xf32>} : () -> tensor<2xf32>
%0 = "tf.VarHandleOp"() {container = "", device = "/device:CPU:0", shared_name = "var_0"} : () -> tensor<!tf_type.resource<tensor<2xf32>>>
"tf.AssignVariableOp"(%0, %cst) : (tensor<!tf_type.resource<tensor<2xf32>>>, tensor<2xf32>) -> ()
%cst_0 = "tf.Const"() {device = "", value = dense<[3, 4, 5, 6]> : tensor<4xi32>} : () -> tensor<4xi32>
%1 = "tf.VarHandleOp"() {container = "", device = "/device:CPU:0", shared_name = "var_1"} : () -> tensor<!tf_type.resource<tensor<4xi32>>>
"tf.AssignVariableOp"(%1, %cst_0) : (tensor<!tf_type.resource<tensor<4xi32>>>, tensor<4xi32>) -> ()
return
}
}
)mlir";
mlir::OwningOpRef<mlir::ModuleOp> module_op_ref =
ParseModuleOpString(kModuleCode);
const absl::StatusOr<std::string> checkpoint_prefix = MakeTempDir();
EXPECT_TRUE(checkpoint_prefix.ok());
const absl::Cleanup checkpoint_prefix_cleanup = [this, &checkpoint_prefix]() {
int64_t undeleted_files, undeleted_dirs;
TF_CHECK_OK(env_->DeleteRecursively(*checkpoint_prefix, &undeleted_files,
&undeleted_dirs));
};
const absl::StatusOr<std::vector<std::string>> variable_shared_names =
SaveVariablesToCheckpoint(*checkpoint_prefix, *module_op_ref);
EXPECT_TRUE(variable_shared_names.ok());
EXPECT_THAT(*variable_shared_names, UnorderedElementsAre("var_0", "var_1"));
BundleReader bundle_reader(env_, *checkpoint_prefix);
Tensor loaded_var_0{};
EXPECT_TRUE(bundle_reader.Lookup("var_0", &loaded_var_0).ok());
ExpectEqual(loaded_var_0, AsTensor<float>({1.0, 2.0}));
Tensor loaded_var_1{};
EXPECT_TRUE(bundle_reader.Lookup("var_1", &loaded_var_1).ok());
ExpectEqual(loaded_var_1, AsTensor<int>({3, 4, 5, 6}));
}
TEST_F(SaveVariablesToCheckpointTest,
NoVariablesSavedWhenNoInitializerFunction) {
constexpr absl::string_view kModuleCode = R"mlir(
module attributes {tf_saved_model.semantics} {
"tf_saved_model.session_initializer"() {initializers = []} : () -> ()
}
)mlir";
mlir::OwningOpRef<mlir::ModuleOp> module_op_ref =
ParseModuleOpString(kModuleCode);
const absl::StatusOr<std::string> checkpoint_prefix = MakeTempDir();
EXPECT_TRUE(checkpoint_prefix.ok());
const absl::Cleanup checkpoint_prefix_cleanup = [this, &checkpoint_prefix]() {
int64_t undeleted_files, undeleted_dirs;
TF_CHECK_OK(env_->DeleteRecursively(*checkpoint_prefix, &undeleted_files,
&undeleted_dirs));
};
const absl::StatusOr<std::vector<std::string>> variable_shared_names =
SaveVariablesToCheckpoint(*checkpoint_prefix, *module_op_ref);
EXPECT_TRUE(variable_shared_names.ok());
EXPECT_THAT(*variable_shared_names, IsEmpty());
BundleReader bundle_reader(env_, *checkpoint_prefix);
EXPECT_THAT(bundle_reader.status(), Not(IsOk()));
}
TEST_F(SaveVariablesToCheckpointTest,
NoVariablesSavedWhenNoSessionInitializerOp) {
constexpr absl::string_view kModuleCode = R"mlir(
module {
func.func @my_func() -> () {
return
}
}
)mlir";
mlir::OwningOpRef<mlir::ModuleOp> module_op_ref =
ParseModuleOpString(kModuleCode);
const absl::StatusOr<std::string> checkpoint_prefix = MakeTempDir();
EXPECT_TRUE(checkpoint_prefix.ok());
const absl::Cleanup checkpoint_prefix_cleanup = [this, &checkpoint_prefix]() {
int64_t undeleted_files, undeleted_dirs;
TF_CHECK_OK(env_->DeleteRecursively(*checkpoint_prefix, &undeleted_files,
&undeleted_dirs));
};
EXPECT_TRUE(
SaveVariablesToCheckpoint(*checkpoint_prefix, *module_op_ref).ok());
BundleReader bundle_reader(env_, *checkpoint_prefix);
EXPECT_THAT(bundle_reader.status(), Not(IsOk()));
}
TEST_F(SaveVariablesToCheckpointTest,
NoVariablesSavedWhenNoSessionInitializerOpTypeRestoreOp) {
constexpr absl::string_view kModuleCode = R"mlir(
module attributes {tf_saved_model.semantics} {
"tf_saved_model.session_initializer"() {initializers = [@init_func_init_op]} : () -> ()
func.func @init_func_init_op() -> () attributes {tf_saved_model.exported_names = ["init"], tf_saved_model.initializer_type = "init_op"} {
%cst = "tf.Const"() {device = "", value = dense<[1.0, 2.0]> : tensor<2xf32>} : () -> tensor<2xf32>
%0 = "tf.VarHandleOp"() {container = "", device = "/device:CPU:0", shared_name = "var_0"} : () -> tensor<!tf_type.resource<tensor<2xf32>>>
"tf.AssignVariableOp"(%0, %cst) : (tensor<!tf_type.resource<tensor<2xf32>>>, tensor<2xf32>) -> ()
return
}
}
)mlir";
mlir::OwningOpRef<mlir::ModuleOp> module_op_ref =
ParseModuleOpString(kModuleCode);
const absl::StatusOr<std::string> checkpoint_prefix = MakeTempDir();
EXPECT_TRUE(checkpoint_prefix.ok());
const absl::Cleanup checkpoint_prefix_cleanup = [this, &checkpoint_prefix]() {
int64_t undeleted_files, undeleted_dirs;
TF_CHECK_OK(env_->DeleteRecursively(*checkpoint_prefix, &undeleted_files,
&undeleted_dirs));
};
const absl::StatusOr<std::vector<std::string>> variable_shared_names =
SaveVariablesToCheckpoint(*checkpoint_prefix, *module_op_ref);
EXPECT_TRUE(variable_shared_names.ok());
EXPECT_THAT(*variable_shared_names, IsEmpty());
BundleReader bundle_reader(env_, *checkpoint_prefix);
EXPECT_THAT(bundle_reader.status(), Not(IsOk()));
}
TEST_F(SaveVariablesToCheckpointTest, MutableVariablesNotSaved) {
constexpr absl::string_view kModuleCode = R"mlir(
module attributes {tf_saved_model.semantics} {
"tf_saved_model.session_initializer"() {initializers = [@init_func_restore_op]} : () -> ()
func.func @init_func_restore_op() -> () attributes {tf_saved_model.exported_names = ["init"], tf_saved_model.initializer_type = "restore_op"} {
%cst = "tf.Const"() {device = "", value = dense<[1.0, 2.0]> : tensor<2xf32>} : () -> tensor<2xf32>
%add = "tf.AddV2"(%cst, %cst) : (tensor<2xf32>, tensor<2xf32>) -> tensor<2xf32>
%var_handle = "tf.VarHandleOp"() {container = "", device = "/device:CPU:0", shared_name = "var_0"} : () -> tensor<!tf_type.resource<tensor<2xf32>>>
"tf.AssignVariableOp"(%var_handle, %add) : (tensor<!tf_type.resource<tensor<2xf32>>>, tensor<2xf32>) -> ()
return
}
}
)mlir";
mlir::OwningOpRef<mlir::ModuleOp> module_op_ref =
ParseModuleOpString(kModuleCode);
const absl::StatusOr<std::string> checkpoint_prefix = MakeTempDir();
EXPECT_TRUE(checkpoint_prefix.ok());
const absl::Cleanup checkpoint_prefix_cleanup = [this, &checkpoint_prefix]() {
int64_t undeleted_files, undeleted_dirs;
TF_CHECK_OK(env_->DeleteRecursively(*checkpoint_prefix, &undeleted_files,
&undeleted_dirs));
};
const absl::StatusOr<std::vector<std::string>> variable_shared_names =
SaveVariablesToCheckpoint(*checkpoint_prefix, *module_op_ref);
EXPECT_TRUE(variable_shared_names.ok());
EXPECT_THAT(*variable_shared_names, IsEmpty());
BundleReader bundle_reader(env_, *checkpoint_prefix);
EXPECT_THAT(bundle_reader.status(), Not(IsOk()));
}
TEST_F(SaveVariablesToCheckpointTest,
VariableNotSavedWhenNonVarHandleOpOperandForAssignVariableOp) {
constexpr absl::string_view kModuleCode = R"mlir(
module attributes {tf_saved_model.semantics} {
"tf_saved_model.session_initializer"() {initializers = [@init_func_restore_op]} : () -> ()
func.func @init_func_restore_op() -> () attributes {tf_saved_model.exported_names = ["init"], tf_saved_model.initializer_type = "restore_op"} {
%cst = "tf.Const"() {device = "", value = dense<[1.0, 2.0]> : tensor<2xf32>} : () -> tensor<2xf32>
%var_handle = "tf.VarHandleOp"() {container = "", device = "/device:CPU:0", shared_name = "var_0"} : () -> tensor<!tf_type.resource<tensor<2xf32>>>
%var_handle_cast = "tf.Cast"(%var_handle) : (tensor<!tf_type.resource<tensor<2xf32>>>) -> tensor<!tf_type.resource>
"tf.AssignVariableOp"(%var_handle_cast, %cst) : (tensor<!tf_type.resource>, tensor<2xf32>) -> ()
return
}
}
)mlir";
mlir::OwningOpRef<mlir::ModuleOp> module_op_ref =
ParseModuleOpString(kModuleCode);
const absl::StatusOr<std::string> checkpoint_prefix = MakeTempDir();
EXPECT_TRUE(checkpoint_prefix.ok());
const absl::Cleanup checkpoint_prefix_cleanup = [this, &checkpoint_prefix]() {
int64_t undeleted_files, undeleted_dirs;
TF_CHECK_OK(env_->DeleteRecursively(*checkpoint_prefix, &undeleted_files,
&undeleted_dirs));
};
const absl::StatusOr<std::vector<std::string>> variable_shared_names =
SaveVariablesToCheckpoint(*checkpoint_prefix, *module_op_ref);
EXPECT_TRUE(variable_shared_names.ok());
EXPECT_THAT(*variable_shared_names, IsEmpty());
BundleReader bundle_reader(env_, *checkpoint_prefix);
EXPECT_THAT(bundle_reader.status(), Not(IsOk()));
}
TEST_F(SaveVariablesToCheckpointTest, FailsWhenDuplicateSharedName) {
constexpr absl::string_view kModuleCode = R"mlir(
module attributes {tf_saved_model.semantics} {
"tf_saved_model.session_initializer"() {initializers = [@init_func_restore_op]} : () -> ()
func.func @init_func_restore_op() -> () attributes {tf_saved_model.exported_names = ["restore"], tf_saved_model.initializer_type = "restore_op"} {
%cst = "tf.Const"() {device = "", value = dense<[1.0, 2.0]> : tensor<2xf32>} : () -> tensor<2xf32>
%0 = "tf.VarHandleOp"() {container = "", device = "/device:CPU:0", shared_name = "var_0"} : () -> tensor<!tf_type.resource<tensor<2xf32>>>
"tf.AssignVariableOp"(%0, %cst) : (tensor<!tf_type.resource<tensor<2xf32>>>, tensor<2xf32>) -> ()
%cst_0 = "tf.Const"() {device = "", value = dense<[3, 4, 5, 6]> : tensor<4xi32>} : () -> tensor<4xi32>
%1 = "tf.VarHandleOp"() {container = "", device = "/device:CPU:0", shared_name = "var_0"} : () -> tensor<!tf_type.resource<tensor<4xi32>>>
"tf.AssignVariableOp"(%1, %cst_0) : (tensor<!tf_type.resource<tensor<4xi32>>>, tensor<4xi32>) -> ()
return
}
}
)mlir";
mlir::OwningOpRef<mlir::ModuleOp> module_op_ref =
ParseModuleOpString(kModuleCode);
const absl::StatusOr<std::string> checkpoint_prefix = MakeTempDir();
EXPECT_TRUE(checkpoint_prefix.ok());
const absl::Cleanup checkpoint_prefix_cleanup = [this, &checkpoint_prefix]() {
int64_t undeleted_files, undeleted_dirs;
TF_CHECK_OK(env_->DeleteRecursively(*checkpoint_prefix, &undeleted_files,
&undeleted_dirs));
};
EXPECT_FALSE(
SaveVariablesToCheckpoint(*checkpoint_prefix, *module_op_ref).ok());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/tensorflow/cc/save_variables.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/tensorflow/cc/save_variables_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
578789e4-5367-4156-8c17-19fde5e0ba5e | cpp | google/quiche | lifetime_tracking | quiche/common/lifetime_tracking.h | quiche/common/lifetime_tracking_test.cc | #ifndef QUICHE_COMMON_LIFETIME_TRACKING_H_
#define QUICHE_COMMON_LIFETIME_TRACKING_H_
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/strings/str_format.h"
#include "quiche/common/platform/api/quiche_export.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/platform/api/quiche_stack_trace.h"
namespace quiche {
namespace test {
class LifetimeTrackingTest;
}
struct QUICHE_EXPORT LifetimeInfo {
bool IsDead() const { return destructor_stack.has_value(); }
std::optional<std::vector<void*>> destructor_stack;
};
class QUICHE_EXPORT LifetimeTracker {
public:
LifetimeTracker(const LifetimeTracker& other) { CopyFrom(other); }
LifetimeTracker& operator=(const LifetimeTracker& other) {
CopyFrom(other);
return *this;
}
LifetimeTracker(LifetimeTracker&& other) { CopyFrom(other); }
LifetimeTracker& operator=(LifetimeTracker&& other) {
CopyFrom(other);
return *this;
}
bool IsTrackedObjectDead() const { return info_->IsDead(); }
template <typename Sink>
friend void AbslStringify(Sink& sink, const LifetimeTracker& tracker) {
if (tracker.info_->IsDead()) {
absl::Format(&sink, "Tracked object has died with %v",
SymbolizeStackTrace(*tracker.info_->destructor_stack));
} else {
absl::Format(&sink, "Tracked object is alive.");
}
}
private:
friend class LifetimeTrackable;
explicit LifetimeTracker(std::shared_ptr<const LifetimeInfo> info)
: info_(std::move(info)) {
QUICHE_CHECK(info_ != nullptr)
<< "Passed a null info pointer into the lifetime tracker";
}
void CopyFrom(const LifetimeTracker& other) { info_ = other.info_; }
std::shared_ptr<const LifetimeInfo> info_;
};
class QUICHE_EXPORT LifetimeTrackable {
public:
LifetimeTrackable() = default;
virtual ~LifetimeTrackable() {
if (info_ != nullptr) {
info_->destructor_stack = CurrentStackTrace();
}
}
LifetimeTrackable(const LifetimeTrackable&) : LifetimeTrackable() {}
LifetimeTrackable& operator=(const LifetimeTrackable&) { return *this; }
LifetimeTrackable(LifetimeTrackable&&) : LifetimeTrackable() {}
LifetimeTrackable& operator=(LifetimeTrackable&&) { return *this; }
LifetimeTracker NewTracker() {
if (info_ == nullptr) {
info_ = std::make_shared<LifetimeInfo>();
}
return LifetimeTracker(info_);
}
private:
friend class test::LifetimeTrackingTest;
std::shared_ptr<LifetimeInfo> info_;
};
}
#endif | #include "quiche/common/lifetime_tracking.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/str_cat.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace quiche {
namespace test {
struct ComposedTrackable {
LifetimeTrackable trackable;
};
struct InheritedTrackable : LifetimeTrackable {};
enum class TrackableType {
kComposed,
kInherited,
};
std::string PrintToString(const TrackableType& type) {
switch (type) {
case TrackableType::kComposed:
return "Composed";
case TrackableType::kInherited:
return "Inherited";
default:
QUICHE_LOG(FATAL) << "Unknown TrackableType: " << static_cast<int>(type);
}
}
class LifetimeTrackingTest : public QuicheTestWithParam<TrackableType> {
protected:
LifetimeTrackingTest() {
if (GetParam() == TrackableType::kComposed) {
composed_trackable_ = std::make_unique<ComposedTrackable>();
} else {
inherited_trackable_ = std::make_unique<InheritedTrackable>();
}
}
LifetimeTrackable& GetTrackable() {
if (composed_trackable_ != nullptr) {
return composed_trackable_->trackable;
} else {
return *inherited_trackable_;
}
}
const std::shared_ptr<LifetimeInfo>& GetLifetimeInfoFromTrackable(
LifetimeTrackable& trackable) {
return trackable.info_;
}
const std::shared_ptr<LifetimeInfo>& GetLifetimeInfoFromTrackable() {
return GetLifetimeInfoFromTrackable(GetTrackable());
}
void FreeTrackable() {
composed_trackable_ = nullptr;
inherited_trackable_ = nullptr;
}
std::unique_ptr<ComposedTrackable> composed_trackable_;
std::unique_ptr<InheritedTrackable> inherited_trackable_;
};
TEST_P(LifetimeTrackingTest, TrackableButNeverTracked) {
EXPECT_EQ(GetLifetimeInfoFromTrackable(), nullptr);
}
TEST_P(LifetimeTrackingTest, SingleTrackerQueryLiveness) {
LifetimeTracker tracker = GetTrackable().NewTracker();
EXPECT_FALSE(tracker.IsTrackedObjectDead());
EXPECT_THAT(absl::StrCat(tracker),
testing::HasSubstr("Tracked object is alive"));
FreeTrackable();
EXPECT_TRUE(tracker.IsTrackedObjectDead());
EXPECT_THAT(absl::StrCat(tracker),
testing::HasSubstr("Tracked object has died"));
}
TEST_P(LifetimeTrackingTest, MultiTrackersQueryLiveness) {
LifetimeTracker tracker1 = GetTrackable().NewTracker();
LifetimeTracker tracker2 = GetTrackable().NewTracker();
LifetimeTracker tracker3 = tracker2;
LifetimeTracker tracker4 = std::move(tracker3);
LifetimeTracker tracker5(std::move(tracker4));
LifetimeTrackable another_trackable;
LifetimeTracker tracker6 = another_trackable.NewTracker();
LifetimeTracker tracker7 = another_trackable.NewTracker();
tracker6 = tracker2;
tracker7 = std::move(tracker2);
EXPECT_FALSE(tracker1.IsTrackedObjectDead());
EXPECT_FALSE(
tracker2.IsTrackedObjectDead());
EXPECT_FALSE(
tracker3.IsTrackedObjectDead());
EXPECT_FALSE(
tracker4.IsTrackedObjectDead());
EXPECT_FALSE(tracker5.IsTrackedObjectDead());
EXPECT_FALSE(tracker6.IsTrackedObjectDead());
EXPECT_FALSE(tracker7.IsTrackedObjectDead());
FreeTrackable();
EXPECT_TRUE(tracker1.IsTrackedObjectDead());
EXPECT_TRUE(
tracker2.IsTrackedObjectDead());
EXPECT_TRUE(
tracker3.IsTrackedObjectDead());
EXPECT_TRUE(
tracker4.IsTrackedObjectDead());
EXPECT_TRUE(tracker5.IsTrackedObjectDead());
EXPECT_TRUE(tracker6.IsTrackedObjectDead());
EXPECT_TRUE(tracker7.IsTrackedObjectDead());
}
TEST_P(LifetimeTrackingTest, CopyTrackableIsNoop) {
LifetimeTracker tracker = GetTrackable().NewTracker();
const LifetimeInfo* info = GetLifetimeInfoFromTrackable().get();
EXPECT_NE(info, nullptr);
LifetimeTrackable trackable2(GetTrackable());
EXPECT_EQ(GetLifetimeInfoFromTrackable(trackable2), nullptr);
LifetimeTrackable trackable3;
trackable3 = GetTrackable();
EXPECT_EQ(GetLifetimeInfoFromTrackable(trackable3), nullptr);
EXPECT_EQ(GetLifetimeInfoFromTrackable().get(), info);
}
TEST_P(LifetimeTrackingTest, MoveTrackableIsNoop) {
LifetimeTracker tracker = GetTrackable().NewTracker();
const LifetimeInfo* info = GetLifetimeInfoFromTrackable().get();
EXPECT_NE(info, nullptr);
LifetimeTrackable trackable2(std::move(GetTrackable()));
EXPECT_EQ(GetLifetimeInfoFromTrackable(trackable2), nullptr);
LifetimeTrackable trackable3;
trackable3 = std::move(GetTrackable());
EXPECT_EQ(GetLifetimeInfoFromTrackable(trackable3), nullptr);
EXPECT_EQ(GetLifetimeInfoFromTrackable().get(), info);
}
TEST_P(LifetimeTrackingTest, ObjectDiedDueToVectorRealloc) {
if (GetParam() == TrackableType::kComposed) {
return;
}
std::vector<InheritedTrackable> trackables;
InheritedTrackable& trackable = trackables.emplace_back();
LifetimeTracker tracker = trackable.NewTracker();
EXPECT_FALSE(tracker.IsTrackedObjectDead());
for (int i = 0; i < 1000; ++i) {
trackables.emplace_back();
}
EXPECT_TRUE(tracker.IsTrackedObjectDead());
}
INSTANTIATE_TEST_SUITE_P(Tests, LifetimeTrackingTest,
testing::Values(TrackableType::kComposed,
TrackableType::kInherited),
testing::PrintToStringParamName());
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/lifetime_tracking.h | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/lifetime_tracking_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
172d4993-1731-40c0-8561-33d46cf51e04 | cpp | tensorflow/tensorflow | hash | third_party/xla/third_party/tsl/tsl/platform/hash.cc | third_party/xla/third_party/tsl/tsl/platform/hash_test.cc | #include "tsl/platform/hash.h"
#include <string.h>
#include "tsl/platform/macros.h"
#include "tsl/platform/raw_coding.h"
#include "tsl/platform/types.h"
namespace tsl {
static inline uint32 ByteAs32(char c) { return static_cast<uint32>(c) & 0xff; }
static inline uint64 ByteAs64(char c) { return static_cast<uint64>(c) & 0xff; }
uint32 Hash32(const char* data, size_t n, uint32 seed) {
const uint32 m = 0x5bd1e995;
const int r = 24;
uint32 h = seed ^ n;
while (n >= 4) {
uint32 k = core::DecodeFixed32(data);
k *= m;
k ^= k >> r;
k *= m;
h *= m;
h ^= k;
data += 4;
n -= 4;
}
switch (n) {
case 3:
h ^= ByteAs32(data[2]) << 16;
TF_FALLTHROUGH_INTENDED;
case 2:
h ^= ByteAs32(data[1]) << 8;
TF_FALLTHROUGH_INTENDED;
case 1:
h ^= ByteAs32(data[0]);
h *= m;
}
h ^= h >> 13;
h *= m;
h ^= h >> 15;
return h;
}
uint64 Hash64(const char* data, size_t n, uint64 seed) {
const uint64 m = 0xc6a4a7935bd1e995;
const int r = 47;
uint64 h = seed ^ (n * m);
while (n >= 8) {
uint64 k = core::DecodeFixed64(data);
data += 8;
n -= 8;
k *= m;
k ^= k >> r;
k *= m;
h ^= k;
h *= m;
}
switch (n) {
case 7:
h ^= ByteAs64(data[6]) << 48;
TF_FALLTHROUGH_INTENDED;
case 6:
h ^= ByteAs64(data[5]) << 40;
TF_FALLTHROUGH_INTENDED;
case 5:
h ^= ByteAs64(data[4]) << 32;
TF_FALLTHROUGH_INTENDED;
case 4:
h ^= ByteAs64(data[3]) << 24;
TF_FALLTHROUGH_INTENDED;
case 3:
h ^= ByteAs64(data[2]) << 16;
TF_FALLTHROUGH_INTENDED;
case 2:
h ^= ByteAs64(data[1]) << 8;
TF_FALLTHROUGH_INTENDED;
case 1:
h ^= ByteAs64(data[0]);
h *= m;
}
h ^= h >> r;
h *= m;
h ^= h >> r;
return h;
}
} | #include <map>
#include <unordered_map>
#include <vector>
#include "tsl/platform/hash.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
namespace tsl {
TEST(Hash, SignedUnsignedIssue) {
const unsigned char d1[1] = {0x62};
const unsigned char d2[2] = {0xc3, 0x97};
const unsigned char d3[3] = {0xe2, 0x99, 0xa5};
const unsigned char d4[4] = {0xe1, 0x80, 0xb9, 0x32};
const unsigned char d5[48] = {
0x01, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x18, 0x28, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};
struct Case {
uint32 hash32;
uint64 hash64;
const unsigned char* data;
size_t size;
uint32 seed;
};
for (Case c : std::vector<Case>{
{0x471a8188u, 0x4c61ea3eeda4cb87ull, nullptr, 0, 0xbc9f1d34},
{0xd615eba5u, 0x091309f7ef916c8aull, d1, sizeof(d1), 0xbc9f1d34},
{0x0c3cccdau, 0xa815bcdf1d1af01cull, d2, sizeof(d2), 0xbc9f1d34},
{0x3ba37e0eu, 0x02167564e4d06430ull, d3, sizeof(d3), 0xbc9f1d34},
{0x16174eb3u, 0x8f7ed82ffc21071full, d4, sizeof(d4), 0xbc9f1d34},
{0x98b1926cu, 0xce196580c97aff1eull, d5, sizeof(d5), 0x12345678},
}) {
EXPECT_EQ(c.hash32,
Hash32(reinterpret_cast<const char*>(c.data), c.size, c.seed));
EXPECT_EQ(c.hash64,
Hash64(reinterpret_cast<const char*>(c.data), c.size, c.seed));
for (int align = 1; align <= 7; align++) {
std::string input(align, 'x');
input.append(reinterpret_cast<const char*>(c.data), c.size);
EXPECT_EQ(c.hash32, Hash32(&input[align], c.size, c.seed));
EXPECT_EQ(c.hash64, Hash64(&input[align], c.size, c.seed));
}
}
}
TEST(Hash, HashPtrIsNotIdentityFunction) {
int* ptr = reinterpret_cast<int*>(0xcafe0000);
EXPECT_NE(hash<int*>()(ptr), size_t{0xcafe0000});
}
static void BM_Hash32(::testing::benchmark::State& state) {
int len = state.range(0);
std::string input(len, 'x');
uint32 h = 0;
for (auto s : state) {
h = Hash32(input.data(), len, 1);
}
state.SetBytesProcessed(state.iterations() * len);
VLOG(1) << h;
}
BENCHMARK(BM_Hash32)->Range(1, 1024);
TEST(StringPieceHasher, Equality) {
StringPieceHasher hasher;
absl::string_view s1("foo");
absl::string_view s2("bar");
absl::string_view s3("baz");
absl::string_view s4("zot");
EXPECT_TRUE(hasher(s1) != hasher(s2));
EXPECT_TRUE(hasher(s1) != hasher(s3));
EXPECT_TRUE(hasher(s1) != hasher(s4));
EXPECT_TRUE(hasher(s2) != hasher(s3));
EXPECT_TRUE(hasher(s2) != hasher(s4));
EXPECT_TRUE(hasher(s3) != hasher(s4));
EXPECT_TRUE(hasher(s1) == hasher(s1));
EXPECT_TRUE(hasher(s2) == hasher(s2));
EXPECT_TRUE(hasher(s3) == hasher(s3));
EXPECT_TRUE(hasher(s4) == hasher(s4));
}
TEST(StringPieceHasher, HashMap) {
string s1("foo");
string s2("bar");
string s3("baz");
absl::string_view p1(s1);
absl::string_view p2(s2);
absl::string_view p3(s3);
std::unordered_map<absl::string_view, int, StringPieceHasher> map;
map.insert(std::make_pair(p1, 0));
map.insert(std::make_pair(p2, 1));
map.insert(std::make_pair(p3, 2));
EXPECT_EQ(map.size(), 3);
bool found[3] = {false, false, false};
for (auto const& val : map) {
int x = val.second;
EXPECT_TRUE(x >= 0 && x < 3);
EXPECT_TRUE(!found[x]);
found[x] = true;
}
EXPECT_EQ(found[0], true);
EXPECT_EQ(found[1], true);
EXPECT_EQ(found[2], true);
auto new_iter = map.find("zot");
EXPECT_TRUE(new_iter == map.end());
new_iter = map.find("bar");
EXPECT_TRUE(new_iter != map.end());
map.erase(new_iter);
EXPECT_EQ(map.size(), 2);
found[0] = false;
found[1] = false;
found[2] = false;
for (const auto& iter : map) {
int x = iter.second;
EXPECT_TRUE(x >= 0 && x < 3);
EXPECT_TRUE(!found[x]);
found[x] = true;
}
EXPECT_EQ(found[0], true);
EXPECT_EQ(found[1], false);
EXPECT_EQ(found[2], true);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/hash.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/hash_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
dc692c4b-5539-4049-846b-b6721c7cd45d | cpp | tensorflow/tensorflow | batch_resource_base | tensorflow/core/kernels/batching_util/batch_resource_base.cc | tensorflow/core/kernels/batching_util/batch_resource_base_test.cc | #include "tensorflow/core/kernels/batching_util/batch_resource_base.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <functional>
#include <iterator>
#include <memory>
#include <optional>
#include <sstream>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/fixed_array.h"
#include "absl/container/flat_hash_map.h"
#include "absl/functional/bind_front.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/blocking_counter.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
#include "absl/types/optional.h"
#include "tensorflow/core/common_runtime/cost_constants.h"
#include "tensorflow/core/common_runtime/cost_measurement.h"
#include "tensorflow/core/common_runtime/cost_measurement_registry.h"
#include "tensorflow/core/common_runtime/cost_util.h"
#include "tensorflow/core/common_runtime/request_cost.h"
#include "tensorflow/core/common_runtime/request_cost_accessor.h"
#include "tensorflow/core/common_runtime/request_cost_accessor_registry.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/op_requires.h"
#include "tensorflow/core/framework/ops_util.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/kernels/batching_util/batch_scheduler.h"
#include "tensorflow/core/kernels/batching_util/batch_scheduler_utils.h"
#include "tensorflow/core/kernels/batching_util/batch_stats.h"
#include "tensorflow/core/kernels/batching_util/concat_split_util.h"
#include "tensorflow/core/kernels/batching_util/input_split_metadata.h"
#include "tensorflow/core/kernels/batching_util/threadsafe_status.h"
#include "tensorflow/core/kernels/batching_util/warmup.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/lib/monitoring/counter.h"
#include "tensorflow/core/lib/monitoring/gauge.h"
#include "tensorflow/core/lib/monitoring/percentile_sampler.h"
#include "tensorflow/core/lib/monitoring/sampler.h"
#include "tensorflow/core/lib/monitoring/types.h"
#include "tensorflow/core/platform/context.h"
#include "tensorflow/core/platform/env_time.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/lib/traceme.h"
#include "tensorflow/core/profiler/lib/traceme_encode.h"
#include "tensorflow/core/util/incremental_barrier.h"
#include "tsl/platform/criticality.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace serving {
namespace {
void RecordPaddingSize(int32_t padding_size, const string& model_name,
int32_t execution_batch_size, const string& op_name) {
static auto* cell = tensorflow::monitoring::PercentileSampler<3>::New(
{"/tensorflow/serving/batching/padding_size",
"Tracks the padding size distribution on batches by model_name (if "
"available).",
"model_name", "execution_batch_size", "op_name"},
{25.0, 50.0, 75.0, 90.0, 95.0, 99.0},
1024, tensorflow::monitoring::UnitOfMeasure::kNumber);
cell->GetCell(model_name, absl::StrCat(execution_batch_size), op_name)
->Add(static_cast<double>(padding_size));
}
void RecordPaddingSizeV2(int32_t padding_size, const string& model_name,
int32_t execution_batch_size, const string& op_name) {
std::vector<double> bucket_limits;
bucket_limits.push_back(-2.0 / 3.0);
double bound = 2.0 / 3.0;
double growth_factor = 2;
for (int i = 0; i < 16; i++) {
bucket_limits.push_back(bound);
bound *= growth_factor;
}
static auto* cell = tensorflow::monitoring::Sampler<3>::New(
{"/tensorflow/serving/batching/padding_size_v2",
"Tracks the padding size distribution on batches by model_name (if "
"available).",
"model_name", "execution_batch_size", "op_name"},
monitoring::Buckets::Explicit(bucket_limits));
cell->GetCell(model_name, absl::StrCat(execution_batch_size), op_name)
->Add(static_cast<double>(padding_size));
}
void RecordInputBatchSize(int32_t batch_size, const string& model_name,
const string& op_name) {
static auto* cell = tensorflow::monitoring::PercentileSampler<2>::New(
{"/tensorflow/serving/batching/input_batch_size",
"Tracks the batch size distribution on the inputs by model_name (if "
"available).",
"model_name", "op_name"},
{25.0, 50.0, 75.0, 90.0, 95.0, 99.0},
1024, tensorflow::monitoring::UnitOfMeasure::kNumber);
cell->GetCell(model_name, op_name)->Add(static_cast<double>(batch_size));
}
void RecordInputBatchSizeV2(int32_t batch_size, const string& model_name,
const string& op_name) {
static auto* cell = tensorflow::monitoring::Sampler<2>::New(
{"/tensorflow/serving/batching/input_batch_size_v2",
"Tracks the batch size distribution on the inputs by model_name (if "
"available).",
"model_name", "op_name"},
monitoring::Buckets::Exponential(2.0 / 3.0, 2, 15));
cell->GetCell(model_name, op_name)->Add(static_cast<double>(batch_size));
}
void RecordBatchSize(int32_t batch_size, const string& model_name,
const string& op_name) {
static auto* cell = tensorflow::monitoring::Sampler<2>::New(
{"/tensorflow/serving/batching/batch_size",
"Tracks the batch size distribution on the batch result by model_name "
"(if available).",
"model_name", "op_name"},
monitoring::Buckets::Exponential(1, 1.5, 20));
cell->GetCell(model_name, op_name)->Add(static_cast<double>(batch_size));
}
void RecordProcessedBatchSize(int32_t batch_size, const string& model_name,
const string& op_name) {
static auto* cell = tensorflow::monitoring::PercentileSampler<2>::New(
{"/tensorflow/serving/batching/processed_batch_size",
"Tracks the batch size distribution on processing by model_name (if "
"available).",
"model_name", "op_name"},
{25.0, 50.0, 75.0, 90.0, 95.0, 99.0},
1024, tensorflow::monitoring::UnitOfMeasure::kNumber);
cell->GetCell(model_name, op_name)->Add(static_cast<double>(batch_size));
}
void RecordProcessedBatchSizeV2(int32_t batch_size, const string& model_name,
const string& op_name) {
static auto* cell = monitoring::Counter<3>::New(
"/tensorflow/serving/batching/processed_batch_size_v2",
"Tracks the batch size on processing by model_name and op name (if "
"available).",
"model_name", "op_name", "batch_size");
cell->GetCell(model_name, op_name, std::to_string(batch_size))
->IncrementBy(1);
}
void RecordBatchDelayUs(int64_t batch_delay_us, const string& model_name,
const string& op_name, int32_t batch_size) {
static auto* cell = monitoring::PercentileSampler<3>::New(
{"/tensorflow/serving/batching/batch_delay_us",
"Tracks the batching delay (in microseconds) for inputs by model_name "
"(if available).",
"model_name", "op_name", "processed_batch_size"},
{25.0, 50.0, 75.0, 90.0, 95.0, 99.0},
1024, monitoring::UnitOfMeasure::kTime);
cell->GetCell(model_name, op_name, std::to_string(batch_size))
->Add(static_cast<double>(batch_delay_us));
}
void RecordBatchDelayUsV2(int64_t batch_delay_us, const string& model_name,
const string& op_name, int32_t batch_size) {
static auto* cell = tensorflow::monitoring::Sampler<3>::New(
{"/tensorflow/serving/batching/batch_delay_us_v2",
"Tracks the batching delay (in microseconds) for inputs by model_name "
"(if available).",
"model_name", "op_name", "processed_batch_size"},
monitoring::Buckets::Exponential(1, 2, 27));
cell->GetCell(model_name, op_name, std::to_string(batch_size))
->Add(static_cast<double>(batch_delay_us));
}
void RecordBatchParamBatchTimeoutMicros(int64_t batch_timeout_micros,
const string& model_name,
const string& op_name) {
static auto* cell = monitoring::Gauge<int64_t, 2>::New(
"/tensorflow/serving/batching/batch_timeout_micros",
"Tracks how long a request can wait before being processed by a batch.",
"model_name", "op_name");
cell->GetCell(model_name, op_name)->Set(batch_timeout_micros);
}
void RecordBatchParamMaxBatchSize(int64_t max_batch_size,
const string& model_name,
const string& op_name) {
static auto* cell = monitoring::Gauge<int64_t, 2>::New(
"/tensorflow/serving/batching/max_batch_size",
"Tracks the maximum size of a batch.", "model_name", "op_name");
cell->GetCell(model_name, op_name)->Set(max_batch_size);
}
void RecordBatchParamPaddingPolicy(const string& batch_padding_policy,
const string& model_name,
const string& op_name) {
static auto* cell = monitoring::Gauge<string, 2>::New(
"/tensorflow/serving/batching/configured_batch_padding_policy",
"The value of BatchFunction.batch_padding_policy attribute.",
"model_name", "op_name");
cell->GetCell(model_name, op_name)->Set(batch_padding_policy);
}
void RecordBatchParamMaxEnqueuedBatches(int64_t max_enqueued_batches,
const string& model_name,
const string& op_name) {
static auto* cell = monitoring::Gauge<int64_t, 2>::New(
"/tensorflow/serving/batching/max_enqueued_batches",
"Tracks the maximum number of enqueued batches.", "model_name",
"op_name");
cell->GetCell(model_name, op_name)->Set(max_enqueued_batches);
}
void RecordBatchParamAllowedBatchSizes(const string& allowed_batch_sizes,
const string& model_name,
const string& op_name) {
static auto* cell = monitoring::Gauge<string, 2>::New(
"/tensorflow/serving/batching/allowed_batch_sizes",
"Tracks the sizes that are allowed to form a batch.", "model_name",
"op_name");
cell->GetCell(model_name, op_name)->Set(allowed_batch_sizes);
}
void RecordBatchCosts(const std::string& model_name,
const int64_t processed_size,
const absl::string_view cost_type,
const absl::Duration total_cost) {
static auto* cell = tensorflow::monitoring::Sampler<3>::New(
{"/tensorflow/serving/batching/costs",
"Tracks the batch costs (in microseconds) by model name and processed "
"size.",
"model_name", "processed_size", "cost_type"},
monitoring::Buckets::Exponential(1, 2, 27));
cell->GetCell(model_name, std::to_string(processed_size),
std::string(cost_type))
->Add(absl::ToDoubleMicroseconds(total_cost));
}
const string& GetModelName(OpKernelContext* ctx) {
static string* kModelNameUnset = new string("model_name_unset");
if (!ctx->session_metadata()) return *kModelNameUnset;
if (ctx->session_metadata()->name().empty()) return *kModelNameUnset;
return ctx->session_metadata()->name();
}
int GetTotalTaskSize(
const std::vector<std::unique_ptr<BatchResourceBase::BatchTask>>& tasks) {
int tasks_size = 0;
for (const auto& task : tasks) {
tasks_size += task->size();
}
return tasks_size;
}
}
std::unique_ptr<BatchResourceBase::BatchTask>
BatchResourceBase::BatchTask::CreateSplitTask(
int split_index, AsyncOpKernel::DoneCallback done_callback) {
std::unique_ptr<BatchTask> task = CreateDerivedTask();
task->guid = this->guid;
task->propagated_context = Context(ContextKind::kThread);
task->inputs.reserve(this->inputs.size());
task->captured_inputs = this->captured_inputs;
task->context = this->context;
task->done_callback = done_callback;
task->split_index = split_index;
task->output = this->output;
task->status = this->status;
task->is_partial = true;
task->start_time = this->start_time;
task->request_cost = this->request_cost;
task->forced_warmup_batch_size = this->forced_warmup_batch_size;
return task;
}
using ::tensorflow::concat_split_util::Concat;
using ::tensorflow::concat_split_util::Split;
using TensorMatrix = std::vector<std::vector<Tensor>>;
string GetTensorNamesAndShapesString(const OpKernelContext* context,
const OpInputList& tensors) {
std::stringstream out;
int i = 0;
for (const Tensor& tensor : tensors) {
out << " - " << context->op_kernel().requested_input(i++) << " has shape "
<< tensor.shape().DebugString() << "\n";
}
return out.str();
}
Status BatchResourceBase::RegisterWarmupInputs(
int64_t guid, OpKernelContext* context, const string& batcher_queue_name,
const CreateBatchTaskFn& create_batch_task_fn,
AsyncOpKernel::DoneCallback done) {
auto shared_status = std::make_shared<ThreadSafeStatus>();
auto create_batch_task_fn_share_status = [&create_batch_task_fn,
&shared_status]() {
auto batch_task = create_batch_task_fn();
if (!batch_task.ok()) {
return batch_task;
}
(*batch_task)->status = shared_status;
return batch_task;
};
auto warmup_counter =
std::make_shared<absl::BlockingCounter>(allowed_batch_sizes_.size());
for (int i = 0; i < allowed_batch_sizes_.size(); ++i) {
Status status = RegisterInput(
guid, context, batcher_queue_name, create_batch_task_fn_share_status,
[warmup_counter = warmup_counter.get()]() {
warmup_counter->DecrementCount();
},
allowed_batch_sizes_[i]);
if (!status.ok()) return status;
}
return RegisterInput(
guid, context, batcher_queue_name, create_batch_task_fn_share_status,
[warmup_counter, context, shared_status, done = std::move(done)]() {
warmup_counter->Wait();
context->SetStatus(shared_status->status());
done();
});
}
Status BatchResourceBase::RegisterInput(
int64_t guid, OpKernelContext* context, const string& batcher_queue_name,
const CreateBatchTaskFn& create_batch_task_fn,
AsyncOpKernel::DoneCallback done_callback, int forced_warmup_batch_size) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<BatchTask> batch_components,
create_batch_task_fn());
batch_components->start_time = EnvTime::NowNanos();
batch_components->guid = guid;
batch_components->propagated_context = Context(ContextKind::kThread);
OpInputList tensors;
TF_RETURN_IF_ERROR(context->input_list("in_tensors", &tensors));
batch_components->inputs.reserve(tensors.size());
for (const Tensor& tensor : tensors) {
if (tensor.shape().dims() == 0) {
return errors::InvalidArgument(
"Batching input tensors must have at least one dimension.\nBelow are "
"the input tensors: \n",
GetTensorNamesAndShapesString(context, tensors));
}
if (tensors.size() >= 2 &&
tensor.shape().dim_size(0) != tensors[0].shape().dim_size(0)) {
return errors::InvalidArgument(
"Batching input tensors supplied in a given op invocation must "
"have equal 0th-dimension size.\nBelow are the input tensors: \n",
GetTensorNamesAndShapesString(context, tensors));
}
batch_components->inputs.push_back(tensor);
}
RecordInputBatchSize(tensors[0].shape().dim_size(0), GetModelName(context),
context->op_kernel().name());
RecordInputBatchSizeV2(tensors[0].shape().dim_size(0), GetModelName(context),
context->op_kernel().name());
if (batcher_) {
RecordBatchParamBatchTimeoutMicros(
batcher_queue_options_.batch_timeout_micros, GetModelName(context),
context->op_kernel().name());
RecordBatchParamMaxBatchSize(
batcher_queue_options_.max_execution_batch_size, GetModelName(context),
context->op_kernel().name());
RecordBatchParamMaxEnqueuedBatches(
batcher_queue_options_.max_enqueued_batches, GetModelName(context),
context->op_kernel().name());
RecordBatchParamPaddingPolicy(
this->batcher_queue_options_.batch_padding_policy,
GetModelName(context), context->op_kernel().name());
} else if (adaptive_batcher_) {
RecordBatchParamBatchTimeoutMicros(
adaptive_batcher_queue_options_.batch_timeout_micros,
GetModelName(context), context->op_kernel().name());
RecordBatchParamMaxBatchSize(adaptive_batcher_queue_options_.max_batch_size,
GetModelName(context),
context->op_kernel().name());
RecordBatchParamMaxEnqueuedBatches(
adaptive_batcher_queue_options_.max_enqueued_batches,
GetModelName(context), context->op_kernel().name());
} else {
return errors::Internal("No batcher defined.");
}
RecordBatchParamAllowedBatchSizes(allowed_batch_sizes_str_,
GetModelName(context),
context->op_kernel().name());
if (tensors[0].shape().dim_size(0) == 0) {
for (int i = 0; i < context->num_outputs(); i++) {
Tensor* empty_output;
AllocatorAttributes cpu_alloc;
cpu_alloc.set_on_host(true);
TF_RETURN_IF_ERROR(context->allocate_output(i, TensorShape({0}),
&empty_output, cpu_alloc));
}
done_callback();
return absl::OkStatus();
}
OpInputList captured_tensors;
const auto captured_status =
context->input_list("captured_tensors", &captured_tensors);
if (captured_status.ok()) {
batch_components->captured_inputs.reserve(captured_tensors.size());
for (const Tensor& captured_tensor : captured_tensors) {
batch_components->captured_inputs.push_back(captured_tensor);
}
}
batch_components->context = context;
batch_components->split_index = 0;
batch_components->output = std::make_shared<TensorMatrix>();
if (!batch_components->status) {
batch_components->status = std::make_shared<ThreadSafeStatus>();
batch_components->done_callback = [done_callback = std::move(done_callback),
shared_status = batch_components->status,
context = context]() {
context->SetStatus(shared_status->status());
done_callback();
};
} else {
batch_components->done_callback = std::move(done_callback);
}
batch_components->forced_warmup_batch_size = forced_warmup_batch_size;
std::unique_ptr<RequestCostAccessor> request_cost_accessor =
CreateRequestCostAccessor();
if (request_cost_accessor) {
batch_components->request_cost = request_cost_accessor->GetRequestCost();
}
BatcherQueueT* batcher_queue;
TF_RETURN_IF_ERROR(LookupOrCreateBatcherQueue(
batcher_queue_name,
GetModelName(context),
context->op_kernel().name(), &batcher_queue));
if (!session_metadata().name().empty()) {
absl::MutexLock lock(&outstanding_batch_mu_);
WarmupStateRegistry::Key key(session_metadata().name(),
session_metadata().version());
if (GetGlobalWarmupStateRegistry().Lookup(key)) {
outstanding_batch_mu_.Await({+[](int* num_outstanding_batched_items) {
return *num_outstanding_batched_items == 0;
},
&num_outstanding_batched_items_});
}
num_outstanding_batched_items_ += batch_components->size();
}
return batcher_queue->Schedule(&batch_components);
}
BatchResourceBase::BatcherT::QueueOptions
BatchResourceBase::GetBatcherQueueOptions(
int32_t num_batch_threads, int32_t max_batch_size,
int32_t batch_timeout_micros, int32_t max_enqueued_batches,
const std::vector<int32>& allowed_batch_sizes,
bool enable_large_batch_splitting, bool disable_padding) {
return GetBatcherQueueOptions(
num_batch_threads, max_batch_size, batch_timeout_micros,
max_enqueued_batches, allowed_batch_sizes, enable_large_batch_splitting,
disable_padding,
kPadUpPolicy,
0,
0,
0,
{},
MixedPriorityBatchingPolicy::kLowPriorityPaddingWithMaxBatchSize);
}
BatchResourceBase::BatcherT::QueueOptions
BatchResourceBase::GetBatcherQueueOptions(
int32_t num_batch_threads, int32_t max_batch_size,
int32_t batch_timeout_micros, int32_t max_enqueued_batches,
const std::vector<int32>& allowed_batch_sizes,
bool enable_large_batch_splitting, bool disable_padding,
absl::string_view batch_padding_policy, int32_t low_priority_max_batch_size,
int32_t low_priority_batch_timeout_micros,
int32_t low_priority_max_enqueued_batches,
const std::vector<int32>& low_priority_allowed_batch_sizes,
MixedPriorityBatchingPolicy mixed_priority_batching_policy) {
BatcherT::QueueOptions batcher_queue_options;
batcher_queue_options.input_batch_size_limit = max_batch_size;
batcher_queue_options.max_enqueued_batches = max_enqueued_batches;
batcher_queue_options.batch_timeout_micros = batch_timeout_micros;
batcher_queue_options.batch_padding_policy =
std::string(batch_padding_policy);
if (low_priority_max_batch_size > 0) {
batcher_queue_options.enable_priority_queue = true;
}
batcher_queue_options.high_priority_queue_options.input_batch_size_limit =
max_batch_size;
batcher_queue_options.high_priority_queue_options.max_enqueued_batches =
max_enqueued_batches;
batcher_queue_options.high_priority_queue_options.batch_timeout_micros =
batch_timeout_micros;
batcher_queue_options.low_priority_queue_options.input_batch_size_limit =
low_priority_max_batch_size;
batcher_queue_options.low_priority_queue_options.max_enqueued_batches =
low_priority_max_enqueued_batches;
batcher_queue_options.low_priority_queue_options.batch_timeout_micros =
low_priority_batch_timeout_micros;
if (low_priority_allowed_batch_sizes.empty()) {
batcher_queue_options.low_priority_queue_options.max_execution_batch_size =
low_priority_max_batch_size;
} else {
batcher_queue_options.low_priority_queue_options.max_execution_batch_size =
*low_priority_allowed_batch_sizes.rbegin();
}
batcher_queue_options.low_priority_queue_options.allowed_batch_sizes =
low_priority_allowed_batch_sizes;
batcher_queue_options.mixed_priority_batching_policy =
mixed_priority_batching_policy;
batcher_queue_options.enable_large_batch_splitting =
enable_large_batch_splitting;
if (enable_large_batch_splitting) {
batcher_queue_options.split_input_task_func =
[](std::unique_ptr<BatchTask>* input_task,
int open_batch_remaining_slot, int max_batch_size,
std::vector<std::unique_ptr<BatchTask>>* output_tasks) -> Status {
return SplitInputTask(input_task, open_batch_remaining_slot,
max_batch_size, output_tasks);
};
if (allowed_batch_sizes.empty()) {
batcher_queue_options.max_execution_batch_size = max_batch_size;
batcher_queue_options.high_priority_queue_options
.max_execution_batch_size = max_batch_size;
} else {
batcher_queue_options.max_execution_batch_size =
*allowed_batch_sizes.rbegin();
batcher_queue_options.high_priority_queue_options
.max_execution_batch_size = *allowed_batch_sizes.rbegin();
batcher_queue_options.allowed_batch_sizes = allowed_batch_sizes;
}
}
batcher_queue_options.disable_padding = disable_padding;
return batcher_queue_options;
}
BatchResourceBase::AdaptiveBatcherT::QueueOptions
BatchResourceBase::GetAdaptiveBatcherQueueOptions(
int32_t max_batch_size, int32_t batch_timeout_micros,
int32_t max_enqueued_batches, bool enable_large_batch_splitting,
const std::vector<int32>& allowed_batch_sizes, bool disable_padding) {
AdaptiveBatcherT::QueueOptions batcher_queue_options;
batcher_queue_options.max_input_task_size =
std::make_optional(max_batch_size);
batcher_queue_options.max_enqueued_batches = max_enqueued_batches;
batcher_queue_options.batch_timeout_micros = batch_timeout_micros;
if (allowed_batch_sizes.empty()) {
batcher_queue_options.max_batch_size = max_batch_size;
} else {
batcher_queue_options.max_batch_size = *allowed_batch_sizes.rbegin();
}
if (enable_large_batch_splitting) {
batcher_queue_options.split_input_task_func =
[](std::unique_ptr<BatchTask>* input_task,
int open_batch_remaining_slot, int max_batch_size,
std::vector<std::unique_ptr<BatchTask>>* output_tasks) -> Status {
return SplitInputTask(input_task, open_batch_remaining_slot,
max_batch_size, output_tasks);
};
}
batcher_queue_options.disable_padding = disable_padding;
return batcher_queue_options;
}
Status BatchResourceBase::ValidateBatch(const BatchT& batch) {
for (int task_idx = 0; task_idx < batch.num_tasks(); ++task_idx) {
const BatchResourceBase::BatchTask& task = batch.task(task_idx);
if (task.inputs.size() != batch.task(0).inputs.size()) {
return errors::InvalidArgument(
"Batching inputs must have equal number of edges");
}
}
return absl::OkStatus();
}
bool BatchResourceBase::IsLowPriorityBatch(const BatchT& batch) const {
if (!batcher_queue_options_.enable_priority_queue) return false;
if (batch.empty()) return false;
return batch.task(0).criticality() ==
tsl::criticality::Criticality::kSheddablePlus ||
batch.task(0).criticality() ==
tsl::criticality::Criticality::kSheddable;
}
int BatchResourceBase::RoundToLowestAllowedBatchSize(
int batch_size, bool is_low_priority_batch) const {
const std::vector<int32>& allowed_batch_sizes =
is_low_priority_batch ? batcher_queue_options_.low_priority_queue_options
.allowed_batch_sizes
: allowed_batch_sizes_;
return GetNextAllowedBatchSize(batch_size, allowed_batch_sizes,
batcher_queue_options_.disable_padding);
}
Status BatchResourceBase::ConcatInputTensors(
const BatchT& batch,
const std::vector<std::unique_ptr<BatchTask>>& unbatched_tasks,
OpKernelContext* context, std::vector<Tensor>* concatenated_tensors) const {
if (batch.num_tasks() == 0) {
return errors::InvalidArgument("Empty batch.");
}
int unbatched_tasks_size = GetTotalTaskSize(unbatched_tasks);
const bool just_for_warmup = batch.task(0).forced_warmup_batch_size > 0;
const int padded_batch_size =
just_for_warmup
? batch.task(0).forced_warmup_batch_size
: RoundToLowestAllowedBatchSize(batch.size() + unbatched_tasks_size,
IsLowPriorityBatch(batch));
const int padding_amount =
just_for_warmup ? padded_batch_size
: padded_batch_size - batch.size() - unbatched_tasks_size;
tsl::profiler::TraceMe trace_me(
[padded_batch_size, padding_amount,
disable_padding = batcher_queue_options_.disable_padding]() {
return tsl::profiler::TraceMeEncode(
"ConcatInputTensors",
{{"batch_size_after_padding", padded_batch_size},
{"padding_amount", padding_amount},
{"disable_padding", disable_padding}});
});
RecordPaddingSize(padding_amount, GetModelName(context), padded_batch_size,
context->op_kernel().name());
RecordPaddingSizeV2(padding_amount, GetModelName(context), padded_batch_size,
context->op_kernel().name());
RecordProcessedBatchSize(padded_batch_size, GetModelName(context),
context->op_kernel().name());
RecordProcessedBatchSizeV2(padded_batch_size, GetModelName(context),
context->op_kernel().name());
RecordBatchSize(batch.size(), GetModelName(context),
context->op_kernel().name());
const int num_inputs = batch.task(0).inputs.size();
concatenated_tensors->reserve(num_inputs);
for (int i = 0; i < num_inputs; ++i) {
std::vector<Tensor> to_concatenate;
if (just_for_warmup) {
to_concatenate.reserve(padding_amount);
} else {
to_concatenate.reserve(batch.num_tasks() + unbatched_tasks.size() +
padding_amount);
for (int task_idx = 0; task_idx < batch.num_tasks(); ++task_idx) {
to_concatenate.push_back(batch.task(task_idx).inputs.at(i));
}
for (int task_idx = 0; task_idx < unbatched_tasks.size(); ++task_idx) {
to_concatenate.push_back(unbatched_tasks[task_idx]->inputs.at(i));
}
}
if (padding_amount != 0) {
const Tensor& padding_source = batch.task(0).inputs.at(i);
Tensor padding;
if (padding_source.shape().dim_size(0) == 0) {
return errors::InvalidArgument(
"Cannot use an empty tensor with zero rows as padding when "
"batching. (Input ",
i, " got shape ", padding_source.shape().DebugString(), ".)");
}
if (padding_source.shape().dim_size(0) == 1) {
padding = padding_source;
} else {
padding = padding_source.Slice(0, 1);
}
for (int i = 0; i < padding_amount; ++i) {
to_concatenate.push_back(padding);
}
}
Tensor concatenated_tensor;
Status concat_status =
Concat(context, to_concatenate, &concatenated_tensor);
TF_RETURN_IF_ERROR(concat_status);
concatenated_tensors->push_back(concatenated_tensor);
}
return absl::OkStatus();
}
Status BatchResourceBase::SplitInputTask(
std::unique_ptr<BatchTask>* input_task_ptr, int open_batch_remaining_slot,
int max_batch_size, std::vector<std::unique_ptr<BatchTask>>* output_tasks) {
BatchTask& input_task = *(*input_task_ptr);
const int64_t input_task_size = input_task.size();
DCHECK_GT(input_task_size, 0);
std::shared_ptr<ThreadSafeStatus> shared_status = input_task.status;
std::function<void()> split_task_done_callback =
[done_callback = input_task.done_callback, output = input_task.output,
forced_warmup_batch_size = input_task.forced_warmup_batch_size,
op_kernel_context = input_task.context,
status = shared_status]() mutable {
const int num_output = op_kernel_context->num_outputs();
for (int i = 0; i < num_output; ++i) {
Tensor output_tensor;
std::vector<Tensor> to_concatenate;
to_concatenate.reserve(output->size());
for (int j = 0; j < output->size(); ++j) {
to_concatenate.push_back(std::move((*output)[j][i]));
}
const auto concat_status =
Concat(op_kernel_context, to_concatenate, &output_tensor);
if (!concat_status.ok()) {
status->Update(concat_status);
}
if (forced_warmup_batch_size == 0) {
op_kernel_context->set_output(i, std::move(output_tensor));
}
}
done_callback();
};
IncrementalBarrier barrier(split_task_done_callback);
const internal::InputSplitMetadata input_split_metadata(
input_task_size, open_batch_remaining_slot, max_batch_size);
const absl::FixedArray<int>& task_sizes = input_split_metadata.task_sizes();
const int num_batches = task_sizes.size();
std::vector<int64_t> output_task_sizes;
output_task_sizes.resize(num_batches);
for (int i = 0; i < num_batches; i++) {
output_task_sizes[i] = task_sizes[i];
}
input_task.output->resize(num_batches);
for (int i = 0; i < num_batches; ++i) {
(*input_task.output)[i].resize(input_task.context->num_outputs());
}
output_tasks->reserve(num_batches);
for (int i = 0; i < num_batches; i++) {
output_tasks->push_back(input_task.CreateSplitTask(i, barrier.Inc()));
}
const int num_input_tensors = input_task.inputs.size();
for (int i = 0; i < num_input_tensors; ++i) {
std::vector<Tensor> split_tensors;
const Tensor& input_tensor = input_task.inputs[i];
const Status split_status = Split(input_task.context, input_tensor,
output_task_sizes, &split_tensors);
if (!split_status.ok()) {
return errors::Internal(
"When splitting input, Tensor split operation failed: ",
split_status.message());
}
if (split_tensors.size() != output_task_sizes.size()) {
return errors::Internal(
"When splitting input, tensor split operation did not work as "
"expected; got ",
split_tensors.size(), " splits; expected ", output_task_sizes.size());
}
for (int j = 0; j < output_tasks->size(); ++j) {
BatchTask& output_task = *((*output_tasks)[j]);
auto moved_tensor_iter = std::next(split_tensors.begin(), j);
std::move(moved_tensor_iter, moved_tensor_iter + 1,
std::back_inserter(output_task.inputs));
}
}
return absl::OkStatus();
}
Status BatchResourceBase::SplitOutputTensors(
const std::vector<Tensor>& combined_outputs, BatchT* batch,
std::vector<std::unique_ptr<BatchTask>>& unbatched_tasks) const {
DCHECK_GE(batch->num_tasks(), 1);
if (batch->num_tasks() < 1) {
return errors::Internal("Batch size expected to be positive; was ",
batch->num_tasks());
}
std::vector<int64_t> task_sizes_plus_optional_padding;
task_sizes_plus_optional_padding.reserve(batch->num_tasks() +
unbatched_tasks.size());
for (int i = 0; i < batch->num_tasks(); ++i) {
task_sizes_plus_optional_padding.push_back(batch->task(i).size());
}
for (int i = 0; i < unbatched_tasks.size(); ++i) {
task_sizes_plus_optional_padding.push_back(unbatched_tasks[i]->size());
}
int unbatched_tasks_size = GetTotalTaskSize(unbatched_tasks);
const int padding_size =
batcher_queue_options_.disable_padding
? 0
: RoundToLowestAllowedBatchSize(batch->size() + unbatched_tasks_size,
IsLowPriorityBatch(*batch)) -
batch->size() - unbatched_tasks_size;
if (padding_size > 0) {
task_sizes_plus_optional_padding.push_back(padding_size);
}
DCHECK_EQ(batch->task(0).context->num_outputs(), combined_outputs.size());
int combined_outputs_size = combined_outputs.size();
if (combined_outputs_size != batch->task(0).context->num_outputs()) {
return errors::Internal("Wrong number of batched output tensors");
}
for (int i = 0, iter_limit = combined_outputs.size(); i < iter_limit; ++i) {
const Tensor& output_tensor = combined_outputs[i];
if (output_tensor.shape().dims() == 0) {
return errors::FailedPrecondition(
"Batched output tensor has 0 dimensions");
}
if (output_tensor.shape().dim_size(0) !=
static_cast<int64_t>(batch->size() + unbatched_tasks_size +
padding_size)) {
return errors::FailedPrecondition(
"Batched output tensor's 0th dimension does not equal the sum of "
"the 0th dimension sizes of the input tensors");
}
std::vector<Tensor> split_tensor;
const Status split_status = tensor::Split(
output_tensor, task_sizes_plus_optional_padding, &split_tensor);
DCHECK(split_status.ok()) << split_status;
if (!split_status.ok()) {
return errors::Internal("Tensor split operation failed: ",
split_status.message());
}
DCHECK_EQ(split_tensor.size(), task_sizes_plus_optional_padding.size());
if (split_tensor.size() != task_sizes_plus_optional_padding.size()) {
return errors::Internal(
"Tensor split operation did not work as expected; got ",
split_tensor.size(), " splits; expected ",
task_sizes_plus_optional_padding.size());
}
for (int j = 0; j < batch->num_tasks(); ++j) {
BatchTask& task = *(batch->mutable_task(j));
if (task.is_partial) {
std::vector<Tensor>& tensor_vector = (*task.output)[task.split_index];
tensor_vector[i] = std::move(split_tensor[j]);
} else {
task.context->set_output(i, split_tensor[j]);
}
}
for (int j = 0; j < unbatched_tasks.size(); ++j) {
unbatched_tasks[j]->context->set_output(
i, split_tensor[batch->num_tasks() + j]);
}
}
return absl::OkStatus();
}
void BatchResourceBase::CleanUpFunctionHelper(BatchTask& task,
const Status& status) const {
WithContext wc(task.propagated_context);
if (!status.ok()) {
if (!absl::StrContains(status.message(),
"Function was cancelled before it was started")) {
task.status->Update(status);
} else {
LOG(ERROR) << "ERROR!!!! " << status.message();
}
}
task.done_callback();
}
void BatchResourceBase::ProcessFuncBatch(
std::unique_ptr<BatchT> batch,
std::vector<std::unique_ptr<BatchTask>> unbatched_tasks) const {
if (batch->empty()) {
return;
}
WithContext wc(batch->task(batch->num_tasks() - 1).propagated_context);
const CostMeasurement::Context batching_context{false};
std::vector<std::unique_ptr<CostMeasurement>> batch_cost_measurements =
CreateCostMeasurements(batching_context);
auto& last_task = batch->task(batch->num_tasks() - 1);
OpKernelContext* last_task_context = last_task.context;
const std::string& model_name = GetModelName(last_task_context);
const std::string& op_name = last_task_context->op_kernel().name();
Status status;
bool cleanup_done = false;
int64_t processed_size = batch->size();
auto cleanup_fn = [&](const Status& status) {
if (cleanup_done) {
return;
}
SplitBatchCostsAndRecordMetrics(
model_name, op_name,
batch_cost_measurements, processed_size, *batch);
batch_cost_measurements.clear();
for (int i = 0; i < batch->num_tasks(); ++i) {
CleanUpFunctionHelper(*batch->mutable_task(i), status);
}
for (int i = 0; i < unbatched_tasks.size(); ++i) {
CleanUpFunctionHelper(*unbatched_tasks[i], status);
}
cleanup_done = true;
};
auto finally =
gtl::MakeCleanup([&cleanup_fn, &status] { cleanup_fn(status); });
status = ValidateBatch(*batch);
if (!status.ok()) {
return;
}
std::vector<Tensor> concatenated_tensors;
status = ConcatInputTensors(*batch, unbatched_tasks, last_task_context,
&concatenated_tensors);
processed_size = RoundToLowestAllowedBatchSize(batch->size());
if (!status.ok()) {
return;
}
std::vector<Tensor> combined_outputs;
std::vector<Tensor> args(concatenated_tensors.begin(),
concatenated_tensors.end());
const auto& captured_inputs =
batch->task(batch->num_tasks() - 1).captured_inputs;
args.insert(args.end(), captured_inputs.begin(), captured_inputs.end());
uint64 current_time = EnvTime::NowNanos();
for (int i = 0; i < batch->num_tasks(); ++i) {
RecordBatchDelayUs((current_time - batch->task(i).start_time) * 1e-3,
model_name, last_task_context->op_kernel().name(),
processed_size);
RecordBatchDelayUsV2((current_time - batch->task(i).start_time) * 1e-3,
model_name, last_task_context->op_kernel().name(),
processed_size);
}
finally.release();
ProcessFuncBatchImpl(
last_task, args, &combined_outputs, [&](const Status& run_status) {
Status final_status;
auto run_finally = gtl::MakeCleanup([&]() {
cleanup_fn(final_status);
});
final_status = run_status;
if (!final_status.ok()) {
return;
}
if (last_task.forced_warmup_batch_size == 0) {
final_status = SplitOutputTensors(combined_outputs, batch.get(),
unbatched_tasks);
}
});
}
void BatchResourceBase::ProcessBatch(std::unique_ptr<BatchT> batch) const {
if (batch->empty()) {
return;
}
WithContext wc(batch->task(batch->num_tasks() - 1).propagated_context);
const CostMeasurement::Context batching_context{false};
std::vector<std::unique_ptr<CostMeasurement>> batch_cost_measurements =
CreateCostMeasurements(batching_context);
int64_t processed_size = batch->size();
OpKernelContext* last_task_context =
batch->task(batch->num_tasks() - 1).context;
AsyncOpKernel::DoneCallback last_task_callback =
batch->task(batch->num_tasks() - 1).done_callback;
const std::string& model_name = GetModelName(last_task_context);
const std::string& op_name = last_task_context->op_kernel().name();
auto batch_cost_cleanup = gtl::MakeCleanup([&] {
SplitBatchCostsAndRecordMetrics(
model_name, op_name,
batch_cost_measurements, processed_size, *batch);
});
OP_REQUIRES_OK_ASYNC(last_task_context, ValidateBatch(*batch),
last_task_callback);
const int num_input_edges = batch->task(0).inputs.size();
std::vector<Tensor> concatenated_tensors;
const Status concat_status =
ConcatInputTensors(*batch, {}, last_task_context, &concatenated_tensors);
processed_size = RoundToLowestAllowedBatchSize(batch->size());
OP_REQUIRES_OK_ASYNC(last_task_context, concat_status, last_task_callback);
for (int i = 0; i < num_input_edges; ++i) {
last_task_context->set_output(i, concatenated_tensors[i]);
for (int task_idx = 0; task_idx < batch->num_tasks() - 1; ++task_idx) {
const BatchTask& task = batch->task(task_idx);
TensorShape output_shape(task.inputs[i].shape());
output_shape.set_dim(0, 0);
Tensor* output = nullptr;
OP_REQUIRES_OK_ASYNC(
task.context, task.context->allocate_output(i, output_shape, &output),
task.done_callback);
}
}
for (int task_idx = 0; task_idx < batch->num_tasks() - 1; ++task_idx) {
const BatchTask& task = batch->task(task_idx);
TensorShape index_shape({0, 3});
Tensor* output = nullptr;
OP_REQUIRES_OK_ASYNC(
task.context,
task.context->allocate_output(num_input_edges, index_shape, &output),
task.done_callback);
}
for (int task_idx = 0; task_idx < batch->num_tasks(); ++task_idx) {
const BatchTask& task = batch->task(task_idx);
Tensor* id;
OP_REQUIRES_OK_ASYNC(task.context,
task.context->allocate_output(num_input_edges + 1,
TensorShape({}), &id),
task.done_callback);
id->scalar<int64_t>()() = task.guid;
}
OP_REQUIRES_OK_ASYNC(
last_task_context,
EmitIndexTensor(last_task_context, *batch, num_input_edges),
last_task_callback);
for (int task_idx = 0; task_idx < batch->num_tasks(); ++task_idx) {
batch->mutable_task(task_idx)->done_callback();
}
}
Status BatchResourceBase::EmitIndexTensor(OpKernelContext* context,
const BatchT& batch,
int output_index) {
const TensorShape index_shape({batch.num_tasks(), 3});
Tensor* index = nullptr;
TF_RETURN_IF_ERROR(
context->allocate_output(output_index, index_shape, &index));
auto index_flat = index->shaped<int64_t, 2>({batch.num_tasks(), 3});
size_t offset = 0;
for (int task_idx = 0; task_idx < batch.num_tasks(); ++task_idx) {
const BatchTask& task = batch.task(task_idx);
index_flat(task_idx, 0) = task.guid;
index_flat(task_idx, 1) = offset;
index_flat(task_idx, 2) = offset + task.size();
offset += task.size();
}
return absl::OkStatus();
}
void BatchResourceBase::ProcessBatchCallBack(
std::unique_ptr<Batch<BatchTask>> batch,
std::vector<std::unique_ptr<BatchTask>> unbatched_tasks) {
if (!session_metadata().name().empty()) {
absl::MutexLock lock(&outstanding_batch_mu_);
num_outstanding_batched_items_ -= batch->size();
}
if (!has_process_batch_function_) {
ProcessBatch(std::move(batch));
} else {
ProcessFuncBatch(std::move(batch), std::move(unbatched_tasks));
}
}
Status BatchResourceBase::LookupOrCreateBatcherQueue(const string& queue_name,
const string& model_name,
const string& op_name,
BatcherQueueT** queue) {
mutex_lock l(batcher_queues_mu_);
auto it = batcher_queues_.find(queue_name);
if (it != batcher_queues_.end()) {
*queue = it->second.get();
return absl::OkStatus();
}
std::unique_ptr<BatcherQueueT> new_queue;
if (batcher_) {
BatcherT::QueueOptions batcher_queue_options = batcher_queue_options_;
batcher_queue_options.model_batch_stats = &GlobalBatchStatsRegistry().model(
model_name, op_name);
TF_RETURN_IF_ERROR(batcher_->AddQueue(
batcher_queue_options,
absl::bind_front(&BatchResourceBase::ProcessBatchCallBack, this),
&new_queue));
} else if (adaptive_batcher_) {
std::function<void(std::unique_ptr<Batch<BatchTask>>)>
reduced_process_batch_callback = [this](std::unique_ptr<BatchT> batch) {
ProcessBatchCallBack(std::move(batch), {});
};
TF_RETURN_IF_ERROR(adaptive_batcher_->AddQueue(
adaptive_batcher_queue_options_, reduced_process_batch_callback,
&new_queue));
} else {
return errors::Internal("No batcher defined.");
}
*queue = new_queue.get();
batcher_queues_[queue_name] = std::move(new_queue);
return absl::OkStatus();
}
void BatchResourceBase::SplitBatchCostsAndRecordMetrics(
const std::string& model_name, const std::string& op_name,
const std::vector<std::unique_ptr<CostMeasurement>>&
batch_cost_measurements,
const int64_t processed_size, BatchT& batch) {
absl::flat_hash_map<std::string, absl::Duration> batch_costs;
for (const auto& batch_cost_measurement : batch_cost_measurements) {
if (batch_cost_measurement->GetTotalCost() <= absl::ZeroDuration()) {
continue;
}
if (batch.size() == 0) {
LOG_EVERY_N_SEC(ERROR, 60)
<< "Non-zero cost collected but the batch size is 0.";
return;
}
if (processed_size == 0) {
LOG_EVERY_N_SEC(ERROR, 60)
<< "Non-zero cost collected but the processed size is 0.";
return;
}
const absl::string_view cost_type = batch_cost_measurement->GetCostType();
const absl::Duration total_cost = batch_cost_measurement->GetTotalCost();
batch_costs[cost_type] = total_cost;
RecordBatchCosts(model_name, processed_size,
absl::StrCat(cost_type, kWithSmearSuffix), total_cost);
RecordBatchCosts(model_name, processed_size,
absl::StrCat(cost_type, kNoSmearSuffix),
total_cost / processed_size * batch.size());
if (cost_type == kTpuCostName) {
ModelBatchStats& model_stats = GlobalBatchStatsRegistry().model(
model_name, op_name);
model_stats.batch_size(processed_size).tpu_cost().Register(total_cost);
model_stats.RegisterProcessedSize(batch.size());
}
for (int i = 0; i < batch.num_tasks(); i++) {
RequestCost* request_cost = batch.task(i).request_cost;
if (!request_cost) continue;
const auto cost_with_smear =
total_cost / batch.size() * batch.task(i).size();
const auto cost_no_smear =
total_cost / processed_size * batch.task(i).size();
request_cost->RecordCost(
{{absl::StrCat(cost_type, kWithSmearSuffix), cost_with_smear},
{absl::StrCat(cost_type, kNoSmearSuffix), cost_no_smear}});
}
}
const int64_t padding_size = processed_size - batch.size();
for (int i = 0; i < batch.num_tasks(); i++) {
RequestCost* request_cost = batch.task(i).request_cost;
if (!request_cost) continue;
request_cost->RecordBatchMetrics(RequestCost::BatchMetrics{
processed_size, static_cast<int64_t>(batch.task(i).size()),
padding_size, batch_costs});
}
}
}
} | #include "tensorflow/core/kernels/batching_util/batch_resource_base.h"
#include <cstdint>
#include <functional>
#include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/time/time.h"
#include "absl/types/span.h"
#include "tensorflow/core/common_runtime/cost_constants.h"
#include "tensorflow/core/common_runtime/cost_measurement.h"
#include "tensorflow/core/common_runtime/cost_measurement_registry.h"
#include "tensorflow/core/common_runtime/request_cost.h"
#include "tensorflow/core/framework/device.h"
#include "tensorflow/core/framework/device_factory.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/batching_util/batch_scheduler.h"
#include "tensorflow/core/kernels/batching_util/batch_scheduler_utils.h"
#include "tensorflow/core/kernels/batching_util/batch_stats.h"
#include "tensorflow/core/kernels/batching_util/shared_batch_scheduler.h"
#include "tensorflow/core/lib/monitoring/cell_reader.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/notification.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/public/version.h"
#include "tsl/platform/criticality.h"
#include "tsl/platform/status.h"
namespace tensorflow {
namespace serving {
namespace {
using ::testing::Pair;
using ::testing::UnorderedElementsAre;
TEST(BatchTaskCriticalityTest, CriticalityDefaultsToCritical) {
BatchResourceBase::BatchTask batch_task;
EXPECT_EQ(batch_task.criticality(), tsl::criticality::Criticality::kCritical);
}
#if defined(PLATFORM_GOOGLE)
TEST(BatchTaskCriticalityTest, CriticalitySuccessfullyPropagated) {
std::vector<BatchResourceBase::BatchTask> batch_tasks;
{
tsl::criticality::ScopedCriticality scoped_criticality(
tsl::criticality::Criticality::kCriticalPlus);
ASSERT_EQ(tsl::criticality::GetCriticality(),
tsl::criticality::Criticality::kCriticalPlus);
batch_tasks.push_back(BatchResourceBase::BatchTask());
}
{
tsl::criticality::ScopedCriticality scoped_criticality(
tsl::criticality::Criticality::kCritical);
ASSERT_EQ(tsl::criticality::GetCriticality(),
tsl::criticality::Criticality::kCritical);
batch_tasks.push_back(BatchResourceBase::BatchTask());
}
{
tsl::criticality::ScopedCriticality scoped_criticality(
tsl::criticality::Criticality::kSheddablePlus);
ASSERT_EQ(tsl::criticality::GetCriticality(),
tsl::criticality::Criticality::kSheddablePlus);
batch_tasks.push_back(BatchResourceBase::BatchTask());
}
{
tsl::criticality::ScopedCriticality scoped_criticality(
tsl::criticality::Criticality::kSheddable);
ASSERT_EQ(tsl::criticality::GetCriticality(),
tsl::criticality::Criticality::kSheddable);
batch_tasks.push_back(BatchResourceBase::BatchTask());
}
batch_tasks.push_back(BatchResourceBase::BatchTask());
EXPECT_EQ(batch_tasks[0].criticality(),
tsl::criticality::Criticality::kCriticalPlus);
EXPECT_EQ(batch_tasks[1].criticality(),
tsl::criticality::Criticality::kCritical);
EXPECT_EQ(batch_tasks[2].criticality(),
tsl::criticality::Criticality::kSheddablePlus);
EXPECT_EQ(batch_tasks[3].criticality(),
tsl::criticality::Criticality::kSheddable);
EXPECT_EQ(batch_tasks[4].criticality(),
tsl::criticality::Criticality::kCritical);
}
#endif
class TestTpuCostMeasurement : public CostMeasurement {
public:
using CostMeasurement::CostMeasurement;
absl::Duration GetTotalCost() override { return absl::Milliseconds(100); }
absl::string_view GetCostType() const override { return "test_tpu"; }
};
REGISTER_COST_MEASUREMENT("test_tpu", TestTpuCostMeasurement);
class TestGcuCostMeasurement : public CostMeasurement {
public:
using CostMeasurement::CostMeasurement;
absl::Duration GetTotalCost() override { return absl::Milliseconds(200); }
absl::string_view GetCostType() const override { return "test_gcu"; }
};
REGISTER_COST_MEASUREMENT("test_gcu", TestGcuCostMeasurement);
std::unique_ptr<BatchResourceBase::BatchTask> MakeBatchTask(
const int64_t task_size, RequestCost* request_cost) {
auto task = std::make_unique<BatchResourceBase::BatchTask>();
task->inputs.push_back(Tensor(DT_DOUBLE, TensorShape({task_size, 1})));
task->request_cost = request_cost;
return task;
}
TEST(SplitBatchCostsAndRecordMetricsTest, SkipOnNoCostMeasurement) {
BatchResourceBase::BatchT batch;
RequestCost cost;
batch.AddTask(MakeBatchTask(1, &cost));
batch.Close();
std::vector<std::unique_ptr<CostMeasurement>> batch_cost_measurements;
BatchResourceBase::SplitBatchCostsAndRecordMetrics(
"model_name", "op_name", batch_cost_measurements, 16,
batch);
EXPECT_TRUE(batch.task(0).request_cost->GetCosts().empty());
EXPECT_THAT(batch.task(0).request_cost->GetBatchMetrics(),
::testing::ElementsAre(::testing::FieldsAre(
16, 1, 15,
::testing::IsEmpty())));
}
TEST(SplitBatchCostsAndRecordMetricsTest, SkipOnZeroCost) {
BatchResourceBase::BatchT batch;
RequestCost cost;
batch.AddTask(MakeBatchTask(1, &cost));
batch.Close();
CostMeasurement::Context context{false};
std::vector<std::unique_ptr<CostMeasurement>> batch_cost_measurements;
batch_cost_measurements.push_back(
CostMeasurementRegistry::CreateByNameOrNull("no_op", context));
BatchResourceBase::SplitBatchCostsAndRecordMetrics(
"model_name", "op_name", batch_cost_measurements, 16,
batch);
EXPECT_TRUE(batch.task(0).request_cost->GetCosts().empty());
EXPECT_THAT(batch.task(0).request_cost->GetBatchMetrics(),
::testing::ElementsAre(::testing::FieldsAre(
16, 1, 15,
::testing::IsEmpty())));
}
TEST(SplitBatchCostsAndRecordMetricsTest, SkipOnZeroBatchSize) {
BatchResourceBase::BatchT batch;
batch.Close();
CostMeasurement::Context context{false};
std::vector<std::unique_ptr<CostMeasurement>> batch_cost_measurements;
batch_cost_measurements.push_back(
CostMeasurementRegistry::CreateByNameOrNull("test_tpu", context));
BatchResourceBase::SplitBatchCostsAndRecordMetrics(
"model_name", "op_name", batch_cost_measurements, 0,
batch);
}
TEST(SplitBatchCostsAndRecordMetricsTest, SkipOnNoRequestCost) {
BatchResourceBase::BatchT batch;
batch.AddTask(MakeBatchTask(1, nullptr));
batch.AddTask(MakeBatchTask(9, nullptr));
batch.Close();
CostMeasurement::Context context{false};
std::vector<std::unique_ptr<CostMeasurement>> batch_cost_measurements;
batch_cost_measurements.push_back(
CostMeasurementRegistry::CreateByNameOrNull("test_tpu", context));
BatchResourceBase::SplitBatchCostsAndRecordMetrics(
"model_name", "op_name", batch_cost_measurements, 16,
batch);
EXPECT_EQ(batch.task(0).request_cost, nullptr);
EXPECT_EQ(batch.task(1).request_cost, nullptr);
}
TEST(SplitBatchCostsAndRecordMetricsTest, SplitSingleCostType) {
BatchResourceBase::BatchT batch;
RequestCost cost1, cost2;
batch.AddTask(MakeBatchTask(1, &cost1));
batch.AddTask(MakeBatchTask(9, &cost2));
batch.Close();
CostMeasurement::Context context{false};
std::vector<std::unique_ptr<CostMeasurement>> batch_cost_measurements;
batch_cost_measurements.push_back(
CostMeasurementRegistry::CreateByNameOrNull("test_tpu", context));
BatchResourceBase::SplitBatchCostsAndRecordMetrics(
"model_name", "op_name", batch_cost_measurements, 20,
batch);
EXPECT_THAT(
batch.task(0).request_cost->GetCosts(),
UnorderedElementsAre(Pair("test_tpu_with_smear", absl::Milliseconds(10)),
Pair("test_tpu_no_smear", absl::Milliseconds(5))));
EXPECT_THAT(
batch.task(0).request_cost->GetBatchMetrics(),
::testing::ElementsAre(::testing::FieldsAre(
20, 1, 10,
UnorderedElementsAre(Pair("test_tpu", absl::Milliseconds(100))))));
EXPECT_THAT(
batch.task(1).request_cost->GetCosts(),
UnorderedElementsAre(Pair("test_tpu_with_smear", absl::Milliseconds(90)),
Pair("test_tpu_no_smear", absl::Milliseconds(45))));
EXPECT_THAT(
batch.task(1).request_cost->GetBatchMetrics(),
::testing::ElementsAre(::testing::FieldsAre(
20, 9, 10,
UnorderedElementsAre(Pair("test_tpu", absl::Milliseconds(100))))));
}
TEST(SplitBatchCostsAndRecordMetricsTest, SplitMultiCostTypes) {
BatchResourceBase::BatchT batch;
RequestCost cost1, cost2;
batch.AddTask(MakeBatchTask(1, &cost1));
batch.AddTask(MakeBatchTask(9, &cost2));
batch.Close();
CostMeasurement::Context context{false};
std::vector<std::unique_ptr<CostMeasurement>> batch_cost_measurements;
batch_cost_measurements.push_back(
CostMeasurementRegistry::CreateByNameOrNull("test_tpu", context));
batch_cost_measurements.push_back(
CostMeasurementRegistry::CreateByNameOrNull("test_gcu", context));
BatchResourceBase::SplitBatchCostsAndRecordMetrics(
"model_name", "op_name", batch_cost_measurements, 20,
batch);
EXPECT_THAT(
batch.task(0).request_cost->GetCosts(),
UnorderedElementsAre(Pair("test_tpu_with_smear", absl::Milliseconds(10)),
Pair("test_tpu_no_smear", absl::Milliseconds(5)),
Pair("test_gcu_with_smear", absl::Milliseconds(20)),
Pair("test_gcu_no_smear", absl::Milliseconds(10))));
EXPECT_THAT(
batch.task(0).request_cost->GetBatchMetrics(),
::testing::ElementsAre(::testing::FieldsAre(
20, 1, 10,
UnorderedElementsAre(Pair("test_tpu", absl::Milliseconds(100)),
Pair("test_gcu", absl::Milliseconds(200))))));
EXPECT_THAT(
batch.task(1).request_cost->GetCosts(),
UnorderedElementsAre(Pair("test_tpu_with_smear", absl::Milliseconds(90)),
Pair("test_tpu_no_smear", absl::Milliseconds(45)),
Pair("test_gcu_with_smear", absl::Milliseconds(180)),
Pair("test_gcu_no_smear", absl::Milliseconds(90))));
EXPECT_THAT(
batch.task(1).request_cost->GetBatchMetrics(),
::testing::ElementsAre(::testing::FieldsAre(
20, 9, 10,
UnorderedElementsAre(Pair("test_tpu", absl::Milliseconds(100)),
Pair("test_gcu", absl::Milliseconds(200))))));
}
TEST(SplitBatchCostsAndRecordMetricsTest, SplitOnlyNonZeroCostTypes) {
BatchResourceBase::BatchT batch;
RequestCost cost1, cost2;
batch.AddTask(MakeBatchTask(1, &cost1));
batch.AddTask(MakeBatchTask(9, &cost2));
batch.Close();
CostMeasurement::Context context{false};
std::vector<std::unique_ptr<CostMeasurement>> batch_cost_measurements;
batch_cost_measurements.push_back(
CostMeasurementRegistry::CreateByNameOrNull("no_op", context));
batch_cost_measurements.push_back(
CostMeasurementRegistry::CreateByNameOrNull("test_tpu", context));
BatchResourceBase::SplitBatchCostsAndRecordMetrics(
"model_name", "op_name", batch_cost_measurements, 20,
batch);
EXPECT_THAT(
batch.task(0).request_cost->GetCosts(),
UnorderedElementsAre(Pair("test_tpu_with_smear", absl::Milliseconds(10)),
Pair("test_tpu_no_smear", absl::Milliseconds(5))));
EXPECT_THAT(
batch.task(0).request_cost->GetBatchMetrics(),
::testing::ElementsAre(::testing::FieldsAre(
20, 1, 10,
UnorderedElementsAre(Pair("test_tpu", absl::Milliseconds(100))))));
EXPECT_THAT(
batch.task(1).request_cost->GetCosts(),
UnorderedElementsAre(Pair("test_tpu_with_smear", absl::Milliseconds(90)),
Pair("test_tpu_no_smear", absl::Milliseconds(45))));
EXPECT_THAT(
batch.task(1).request_cost->GetBatchMetrics(),
::testing::ElementsAre(::testing::FieldsAre(
20, 9, 10,
UnorderedElementsAre(Pair("test_tpu", absl::Milliseconds(100))))));
}
TEST(SplitBatchCostsAndRecordMetricsTest, UpdatesGlobalBatchStats) {
class FakeTpuCostMeasurement : public CostMeasurement {
public:
using CostMeasurement::CostMeasurement;
absl::Duration GetTotalCost() override { return absl::Hours(555); }
absl::string_view GetCostType() const override { return kTpuCostName; }
};
CostMeasurement::Context context{ false};
std::vector<std::unique_ptr<CostMeasurement>> batch_cost_measurements;
batch_cost_measurements.push_back(
std::make_unique<FakeTpuCostMeasurement>(context));
BatchResourceBase::BatchT batch;
batch.AddTask(MakeBatchTask( 1, nullptr));
batch.Close();
const char kModelName[] = "test_updates_global_batch_stats";
BatchResourceBase::SplitBatchCostsAndRecordMetrics(
kModelName, "op_name",
batch_cost_measurements, 17, batch);
EXPECT_EQ(GlobalBatchStatsRegistry()
.model( kModelName, "op_name")
.batch_size(17)
.tpu_cost()
.mean(),
absl::Hours(555));
}
TEST(SplitBatchCostsAndRecordMetricsTest, GlobalBatchStatsProcessedSize) {
class FakeTpuCostMeasurement : public CostMeasurement {
public:
using CostMeasurement::CostMeasurement;
absl::Duration GetTotalCost() override { return absl::Hours(555); }
absl::string_view GetCostType() const override { return kTpuCostName; }
};
CostMeasurement::Context context{ false};
std::vector<std::unique_ptr<CostMeasurement>> batch_cost_measurements;
batch_cost_measurements.push_back(
std::make_unique<FakeTpuCostMeasurement>(context));
BatchResourceBase::BatchT batch;
batch.AddTask(MakeBatchTask( 1, nullptr));
batch.Close();
const char kModelName[] = "test_global_batch_stats_processed_size";
int original_cumulative_processed_size =
GlobalBatchStatsRegistry()
.model( kModelName, "op_name")
.cumulative_processed_size();
BatchResourceBase::SplitBatchCostsAndRecordMetrics(
kModelName, "op_name",
batch_cost_measurements, 17, batch);
EXPECT_EQ(GlobalBatchStatsRegistry()
.model( kModelName, "op_name")
.cumulative_processed_size(),
original_cumulative_processed_size + 1);
BatchResourceBase::BatchT batch2;
batch2.AddTask(MakeBatchTask( 1, nullptr));
batch2.AddTask(MakeBatchTask( 1, nullptr));
batch2.AddTask(MakeBatchTask( 1, nullptr));
batch2.Close();
BatchResourceBase::SplitBatchCostsAndRecordMetrics(
kModelName, "op_name",
batch_cost_measurements, 8, batch2);
EXPECT_EQ(GlobalBatchStatsRegistry()
.model( kModelName, "op_name")
.cumulative_processed_size(),
original_cumulative_processed_size + 4);
}
class BatchResourceBaseTest : public ::testing::Test {
protected:
class MyBatchResource : public BatchResourceBase {
public:
using BatchResourceBase::BatchResourceBase;
std::string DebugString() const override { return ""; }
void ProcessFuncBatchImpl(
const BatchResourceBase::BatchTask& ,
absl::Span<const Tensor> ,
std::vector<Tensor>* ,
std::function<void(const absl::Status&)> ) const override {
process_func_batch_called_.Notify();
}
Notification& process_func_batch_called() {
return process_func_batch_called_;
}
private:
mutable Notification process_func_batch_called_;
};
BatchResourceBaseTest() {
device_ = DeviceFactory::NewDevice("CPU", SessionOptions{},
"/job:a/replica:0/task:0");
NodeDefBuilder batch_function_builder("my_batch_node", "BatchFunction");
batch_function_builder.Attr("max_batch_size", 128);
batch_function_builder.Attr("num_batch_threads", 8);
batch_function_builder.Attr("allowed_batch_sizes", {2, 4, 8});
batch_function_builder.Attr("batch_timeout_micros", 100);
batch_function_builder.Attr("max_enqueued_batches", 100);
batch_function_builder.Attr("enable_large_batch_splitting", true);
std::vector<DataType> input_dtypes = {DataType::DT_INT64,
DataType::DT_INT64};
std::vector<NodeDefBuilder::NodeOut> inputs;
inputs.push_back(NodeDefBuilder::NodeOut({"n1", 0, DataType::DT_INT64}));
inputs.push_back(NodeDefBuilder::NodeOut({"n2", 1, DataType::DT_INT64}));
batch_function_builder.Attr("Tin", input_dtypes);
batch_function_builder.Input(inputs);
batch_function_builder.Attr("Tcaptured", {DataType::DT_INT64});
batch_function_builder.Input(std::vector<NodeDefBuilder::NodeOut>{
NodeDefBuilder::NodeOut({"n3", 1, DataType::DT_INT64})});
batch_function_builder.Attr("Tout", {DataType::DT_INT64});
NameAttrList f;
f.set_name("func_to_batch");
batch_function_builder.Attr("f", f);
NodeDef batch_kernel_node_def;
TF_CHECK_OK(batch_function_builder.Finalize(&batch_kernel_node_def));
absl::Status op_kernel_creation_status;
batch_kernel_ =
CreateOpKernel(DEVICE_CPU, device_.get(), device_->GetAllocator({}),
batch_kernel_node_def, TF_GRAPH_DEF_VERSION,
&op_kernel_creation_status);
TF_CHECK_OK(op_kernel_creation_status);
CHECK(batch_kernel_ != nullptr);
input_tensor_ = Tensor(DataType::DT_INT64, TensorShape({5, 2, 1}));
input_tensor_values_ = {
TensorValue(&input_tensor_),
TensorValue(&input_tensor_),
TensorValue(&input_tensor_),
};
session_metadata_.set_name("my_model_name");
params_.device = device_.get();
params_.op_kernel = batch_kernel_.get();
params_.inputs = input_tensor_values_;
params_.session_metadata = &session_metadata_;
context_ = std::make_unique<OpKernelContext>(¶ms_);
}
std::unique_ptr<Device> device_;
std::unique_ptr<OpKernel> batch_kernel_;
Tensor input_tensor_;
std::vector<TensorValue> input_tensor_values_;
SessionMetadata session_metadata_;
OpKernelContext::Params params_;
std::unique_ptr<OpKernelContext> context_;
};
TEST_F(BatchResourceBaseTest, PassesCorrectModelBatchStatsToSbs) {
using BatchTask = BatchResourceBase::BatchTask;
using SharedBatchScheduler = SharedBatchScheduler<BatchTask>;
class MySharedBatchScheduler : public SharedBatchScheduler {
public:
MySharedBatchScheduler() : SharedBatchScheduler::SharedBatchScheduler({}) {}
absl::Status AddQueue(
const QueueOptions& options,
ProcessBatchCallback process_batch_callback,
std::unique_ptr<BatchScheduler<BatchTask>>* queue) override {
queue_options_ = options;
return SharedBatchScheduler::AddQueue(options, process_batch_callback,
queue);
}
const QueueOptions& queue_options() const { return queue_options_; }
private:
QueueOptions queue_options_;
};
auto batcher = std::make_shared<MySharedBatchScheduler>();
MyBatchResource* my_batch_resource = new MyBatchResource(
true,
batcher,
{},
{});
TF_CHECK_OK(my_batch_resource->RegisterInput(
0,
context_.get(),
"batcher_queue_name",
[]() -> absl::StatusOr<std::unique_ptr<BatchResourceBase::BatchTask>> {
return std::make_unique<BatchResourceBase::BatchTask>();
},
[] {}, 0));
EXPECT_EQ(batcher->queue_options().model_batch_stats,
&GlobalBatchStatsRegistry().model( "my_model_name",
"my_batch_node"));
my_batch_resource->process_func_batch_called().WaitForNotificationWithTimeout(
absl::Seconds(1));
my_batch_resource->Unref();
}
TEST_F(BatchResourceBaseTest, ConfiguredBatchPaddingPolicyMetric) {
tensorflow::monitoring::testing::CellReader<std::string> metric(
"/tensorflow/serving/batching/configured_batch_padding_policy");
std::shared_ptr<SharedBatchScheduler<BatchResourceBase::BatchTask>> batcher;
TF_CHECK_OK(
SharedBatchScheduler<BatchResourceBase::BatchTask>::Create({}, &batcher));
MyBatchResource* my_batch_resource = new MyBatchResource(
true,
batcher,
MyBatchResource::BatcherT::QueueOptions{
.batch_padding_policy{kMinimizeTpuCostPerRequestPolicy},
},
{});
TF_CHECK_OK(my_batch_resource->RegisterInput(
0, context_.get(),
"batcher_queue_name",
[]() -> absl::StatusOr<std::unique_ptr<BatchResourceBase::BatchTask>> {
return std::make_unique<BatchResourceBase::BatchTask>();
},
[] {}, 0));
EXPECT_EQ(metric.Read( "my_model_name",
"my_batch_node"),
kMinimizeTpuCostPerRequestPolicy);
my_batch_resource->process_func_batch_called().WaitForNotificationWithTimeout(
absl::Seconds(1));
my_batch_resource->Unref();
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/batching_util/batch_resource_base.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/batching_util/batch_resource_base_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5f5ef3e3-bb1a-46e0-b920-a077c2c24a8f | cpp | google/arolla | weak_qtype | arolla/qtype/weak_qtype.cc | arolla/qtype/weak_qtype_test.cc | #include "arolla/qtype/weak_qtype.h"
#include <utility>
#include <vector>
#include "absl/base/no_destructor.h"
#include "absl/log/check.h"
#include "absl/strings/str_cat.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/derived_qtype.h"
#include "arolla/qtype/optional_qtype.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/repr.h"
namespace arolla {
namespace {
class WeakFloatQType final : public BasicDerivedQType {
public:
explicit WeakFloatQType()
: BasicDerivedQType(ConstructorArgs{
.name = "WEAK_FLOAT",
.base_qtype = GetQType<double>(),
}) {
CHECK_OK(VerifyDerivedQType(this));
}
static QTypePtr get() {
static const absl::NoDestructor<WeakFloatQType> result;
return result.get();
}
ReprToken UnsafeReprToken(const void* source) const override {
return GenReprTokenWeakFloat(*static_cast<const double*>(source));
}
};
class OptionalWeakFloatQType final : public QType,
public DerivedQTypeInterface {
public:
OptionalWeakFloatQType() : QType(MakeConstructorArgs()) {
CHECK_OK(VerifyDerivedQType(this));
}
static QTypePtr get() {
static const absl::NoDestructor<OptionalWeakFloatQType> result;
return result.get();
}
QTypePtr GetBaseQType() const final { return GetOptionalQType<double>(); }
ReprToken UnsafeReprToken(const void* source) const final {
const auto& value = *static_cast<const OptionalValue<double>*>(source);
if (value.present) {
return ReprToken{
absl::StrCat("optional_", GenReprTokenWeakFloat(value.value).str)};
}
return ReprToken{"optional_weak_float{NA}"};
}
void UnsafeCopy(const void* source, void* destination) const final {
if (source != destination) {
*static_cast<OptionalValue<double>*>(destination) =
*static_cast<const OptionalValue<double>*>(source);
}
}
void UnsafeCombineToFingerprintHasher(const void* source,
FingerprintHasher* hasher) const final {
hasher->Combine(*static_cast<const OptionalValue<double>*>(source));
}
private:
static ConstructorArgs MakeConstructorArgs() {
auto base_qtype = GetOptionalQType<double>();
std::vector<TypedSlot> fields = base_qtype->type_fields();
DCHECK_EQ(fields.size(), 2);
fields[1] = TypedSlot::UnsafeFromOffset(WeakFloatQType::get(),
fields[1].byte_offset());
return ConstructorArgs{
.name = "OPTIONAL_WEAK_FLOAT",
.type_info = base_qtype->type_info(),
.type_layout = base_qtype->type_layout(),
.type_fields = std::move(fields),
.value_qtype = WeakFloatQType::get(),
};
}
};
}
QTypePtr GetWeakFloatQType() { return WeakFloatQType::get(); }
QTypePtr GetOptionalWeakFloatQType() { return OptionalWeakFloatQType::get(); }
namespace {
static const int optional_weak_float_registered =
(RegisterOptionalQType(GetOptionalWeakFloatQType()), 1);
}
} | #include "arolla/qtype/weak_qtype.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "arolla/array/qtype/types.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/optional_qtype.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/shape_qtype.h"
#include "arolla/qtype/standard_type_properties/properties.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/util/testing/repr_token_eq.h"
#include "arolla/util/unit.h"
namespace arolla {
namespace {
using ::absl_testing::StatusIs;
using ::arolla::testing::ReprTokenEq;
using ::testing::MatchesRegex;
TEST(WeakQTypeTest, Smoke) {
auto qtype = GetWeakFloatQType();
EXPECT_EQ(qtype->name(), "WEAK_FLOAT");
auto optional_qtype = GetOptionalWeakFloatQType();
EXPECT_EQ(optional_qtype->name(), "OPTIONAL_WEAK_FLOAT");
}
TEST(WeakQTypeTest, Optional) {
QTypePtr qtype = GetWeakFloatQType();
QTypePtr optional_qtype = GetOptionalWeakFloatQType();
EXPECT_EQ(optional_qtype->value_qtype(), qtype);
EXPECT_TRUE(IsOptionalQType(optional_qtype));
ASSERT_OK_AND_ASSIGN(QTypePtr to_optional_res, ToOptionalQType(qtype));
EXPECT_EQ(to_optional_res, optional_qtype);
EXPECT_EQ(DecayOptionalQType(optional_qtype), qtype);
ASSERT_OK_AND_ASSIGN(TypedValue v, CreateMissingValue(optional_qtype));
ASSERT_EQ(v.GetType(), optional_qtype);
}
TEST(WeakQTypeTest, IsScalarQType) {
EXPECT_TRUE(IsScalarQType(GetWeakFloatQType()));
EXPECT_FALSE(IsScalarQType(GetOptionalWeakFloatQType()));
}
TEST(WeakQTypeTest, GetScalarQType) {
{
ASSERT_OK_AND_ASSIGN(QTypePtr scalar_qtype,
GetScalarQType(GetWeakFloatQType()));
EXPECT_EQ(scalar_qtype, GetWeakFloatQType());
}
{
ASSERT_OK_AND_ASSIGN(QTypePtr scalar_qtype,
GetScalarQType(GetOptionalWeakFloatQType()));
EXPECT_EQ(scalar_qtype, GetWeakFloatQType());
}
}
TEST(WeakQTypeTest, WithScalarQType) {
{
ASSERT_OK_AND_ASSIGN(
QTypePtr res_qtype,
WithScalarQType(GetQType<float>(), GetWeakFloatQType()));
EXPECT_EQ(res_qtype, GetWeakFloatQType());
}
{
ASSERT_OK_AND_ASSIGN(
QTypePtr res_qtype,
WithScalarQType(GetOptionalQType<double>(), GetWeakFloatQType()));
EXPECT_EQ(res_qtype, GetOptionalWeakFloatQType());
}
EXPECT_THAT(WithScalarQType(GetArrayQType<float>(), GetWeakFloatQType()),
StatusIs(absl::StatusCode::kInvalidArgument,
MatchesRegex("Array type with elements of type "
"WEAK_FLOAT is not registered.")));
{
ASSERT_OK_AND_ASSIGN(
QTypePtr res_qtype,
WithScalarQType(GetWeakFloatQType(), GetQType<double>()));
EXPECT_EQ(res_qtype, GetQType<double>());
}
{
ASSERT_OK_AND_ASSIGN(
QTypePtr res_qtype,
WithScalarQType(GetOptionalWeakFloatQType(), GetQType<float>()));
EXPECT_EQ(res_qtype, GetOptionalQType<float>());
}
}
TEST(WeakQTypeTest, DecayContainerQType) {
EXPECT_EQ(DecayContainerQType(GetWeakFloatQType()), GetWeakFloatQType());
EXPECT_EQ(DecayContainerQType(GetOptionalWeakFloatQType()),
GetWeakFloatQType());
}
TEST(WeakQTypeTest, GetShapeQType) {
{
ASSERT_OK_AND_ASSIGN(QTypePtr shape_qtype,
GetShapeQType(GetWeakFloatQType()));
EXPECT_EQ(shape_qtype, GetQType<ScalarShape>());
}
{
ASSERT_OK_AND_ASSIGN(QTypePtr shape_qtype,
GetShapeQType(GetOptionalWeakFloatQType()));
EXPECT_EQ(shape_qtype, GetQType<OptionalScalarShape>());
}
}
TEST(WeakQTypeTest, GetPresenceQType) {
{
ASSERT_OK_AND_ASSIGN(QTypePtr presence_qtype,
GetPresenceQType(GetWeakFloatQType()));
EXPECT_EQ(presence_qtype, GetQType<Unit>());
}
{
ASSERT_OK_AND_ASSIGN(QTypePtr presence_qtype,
GetPresenceQType(GetOptionalWeakFloatQType()));
EXPECT_EQ(presence_qtype, GetOptionalQType<Unit>());
}
}
TEST(WeakQTypeTest, OptionalLike) {
EXPECT_FALSE(IsOptionalLikeQType(GetWeakFloatQType()));
EXPECT_TRUE(IsOptionalLikeQType(GetOptionalWeakFloatQType()));
{
ASSERT_OK_AND_ASSIGN(QTypePtr optional_like_qtype,
ToOptionalLikeQType(GetWeakFloatQType()));
EXPECT_EQ(optional_like_qtype, GetOptionalWeakFloatQType());
}
{
ASSERT_OK_AND_ASSIGN(QTypePtr optional_like_qtype,
ToOptionalLikeQType(GetOptionalWeakFloatQType()));
EXPECT_EQ(optional_like_qtype, GetOptionalWeakFloatQType());
}
}
TEST(WeakQTypeTest, WeakFloatFingerprint) {
const double value_a = 1.5;
const double value_b = 2.5;
const auto float64_qvalue_a = TypedValue::FromValue(value_a);
ASSERT_OK_AND_ASSIGN(
const auto weak_float_qvalue_a1,
TypedValue::FromValueWithQType(value_a, GetWeakFloatQType()));
ASSERT_OK_AND_ASSIGN(
const auto weak_float_qvalue_a2,
TypedValue::FromValueWithQType(value_a, GetWeakFloatQType()));
ASSERT_OK_AND_ASSIGN(
const auto weak_float_qvalue_b,
TypedValue::FromValueWithQType(value_b, GetWeakFloatQType()));
EXPECT_NE(float64_qvalue_a.GetFingerprint(),
weak_float_qvalue_a1.GetFingerprint());
EXPECT_EQ(weak_float_qvalue_a1.GetFingerprint(),
weak_float_qvalue_a2.GetFingerprint());
EXPECT_NE(weak_float_qvalue_a1.GetFingerprint(),
weak_float_qvalue_b.GetFingerprint());
}
TEST(WeakQTypeTest, OptionalWeakFloatFingerprint) {
const OptionalValue<double> value_a(1.5);
const OptionalValue<double> value_b(2.5);
const auto optional_float64_qvalue_a = TypedValue::FromValue(value_a);
ASSERT_OK_AND_ASSIGN(
const auto optional_weak_float_qvalue_a1,
TypedValue::FromValueWithQType(value_a, GetOptionalWeakFloatQType()));
ASSERT_OK_AND_ASSIGN(
const auto optional_weak_float_qvalue_a2,
TypedValue::FromValueWithQType(value_a, GetOptionalWeakFloatQType()));
ASSERT_OK_AND_ASSIGN(
const auto optional_weak_float_qvalue_b,
TypedValue::FromValueWithQType(value_b, GetOptionalWeakFloatQType()));
EXPECT_NE(optional_float64_qvalue_a.GetFingerprint(),
optional_weak_float_qvalue_a1.GetFingerprint());
EXPECT_EQ(optional_weak_float_qvalue_a1.GetFingerprint(),
optional_weak_float_qvalue_a2.GetFingerprint());
EXPECT_NE(optional_weak_float_qvalue_a1.GetFingerprint(),
optional_weak_float_qvalue_b.GetFingerprint());
}
TEST(WeakQTypeTest, WeakFloatRepr) {
ASSERT_OK_AND_ASSIGN(auto qvalue, TypedValue::FromValueWithQType(
double{1.5}, GetWeakFloatQType()));
EXPECT_THAT(qvalue.GenReprToken(), ReprTokenEq("weak_float{1.5}"));
}
TEST(WeakQTypeTest, OptionalWeakFloatRepr) {
ASSERT_OK_AND_ASSIGN(
auto qvalue, TypedValue::FromValueWithQType(OptionalValue<double>(1.5),
GetOptionalWeakFloatQType()));
EXPECT_THAT(qvalue.GenReprToken(), ReprTokenEq("optional_weak_float{1.5}"));
}
TEST(WeakQTypeTest, OptionalWeakFloatMissingValueRepr) {
ASSERT_OK_AND_ASSIGN(
auto qvalue, TypedValue::FromValueWithQType(OptionalValue<double>(),
GetOptionalWeakFloatQType()));
EXPECT_THAT(qvalue.GenReprToken(), ReprTokenEq("optional_weak_float{NA}"));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/weak_qtype.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/weak_qtype_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
90efec1f-b7a5-4704-ad96-5a68bd299536 | cpp | tensorflow/tensorflow | best_fit_repacker | third_party/xla/xla/service/memory_space_assignment/best_fit_repacker.cc | third_party/xla/xla/service/memory_space_assignment/best_fit_repacker_test.cc | #include "xla/service/memory_space_assignment/best_fit_repacker.h"
#include <algorithm>
#include <cstdint>
#include <functional>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/functional/any_invocable.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "xla/comparison_util.h"
#include "xla/service/heap_simulator/allocation_block.h"
#include "xla/service/heap_simulator/heap_simulator.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
namespace xla {
namespace {
bool IsSliced(const AllocationBlock* block) {
return block->original_slice_data.has_value();
}
template <typename T>
std::vector<const AllocationBlock*> SortAllocationBlocks(const T& container) {
std::vector<const AllocationBlock*> result;
result.insert(result.end(), container.begin(), container.end());
absl::c_sort(
result, [](const AllocationBlock* lhs, const AllocationBlock* rhs) {
return std::make_tuple(lhs->inclusive_start_time, lhs->end_time,
lhs->initial_offset, lhs->size) <
std::make_tuple(rhs->inclusive_start_time, rhs->end_time,
rhs->initial_offset, rhs->size);
});
return result;
}
const SlicedAllocationData* GetSlicedAllocationDataPointer(
const std::optional<SlicedAllocationData>& sliced_allocation_data) {
if (!sliced_allocation_data.has_value()) {
return nullptr;
}
return &(*sliced_allocation_data);
}
class BestFitRepacker
: public GlobalDecreasingSizeBestFitHeap<AllocationBlock> {
public:
BestFitRepacker(
const memory_space_assignment::MemorySpaceAssignmentBestFitRepacker::
BestFitRepackOptions& options,
SliceTimePermutationIterator::Ty slice_time_permutation_iterator_type,
int64_t max_size, int64_t alignment)
: GlobalDecreasingSizeBestFitHeap<AllocationBlock>(
alignment, kCustom,
(options.buffer_interval_compare ? options.buffer_interval_compare
: DefaultBufferIntervalCompare()),
slice_time_permutation_iterator_type),
validate_(options.validate),
max_size_(max_size) {}
void ImportAllocationBlocks(absl::Span<AllocationBlock*> allocations) {
allocation_blocks_ = allocations;
for (AllocationBlock* allocation_block : allocation_blocks_) {
bool need_allocation = true;
CHECK_NE(allocation_block->next_colocated, nullptr);
for (AllocationBlock* colocated = allocation_block->next_colocated;
colocated != allocation_block;
colocated = colocated->next_colocated) {
auto aliased_it = full_buffer_interval_map_.find(colocated);
if (aliased_it != full_buffer_interval_map_.end() &&
aliased_it->second.need_allocation) {
aliased_it->second.colocations.push_back(allocation_block);
need_allocation = false;
break;
}
}
full_buffer_interval_map_.insert(
std::make_pair(allocation_block,
BufferInterval{allocation_block,
allocation_block->size,
allocation_block->inclusive_start_time,
allocation_block->end_time,
{},
need_allocation}));
}
for (AllocationBlock* allocation_block : allocation_blocks_) {
BufferInterval& full_buffer_interval =
full_buffer_interval_map_[allocation_block];
SlicedBufferInterval& sliced_buffer_interval =
sliced_buffer_interval_map_
.insert(std::make_pair(
allocation_block, SlicedBufferInterval::CreateMutableInterval(
full_buffer_interval)))
.first->second;
if (IsSliced(allocation_block)) {
const SlicedAllocationData& original_slice_data =
allocation_block->original_slice_data.value();
CHECK(!original_slice_data.slices_sorted_by_offset.empty());
sliced_buffer_interval.Slice(original_slice_data.SizesSortedByOffset());
sliced_buffer_interval.UpdateInclusiveSliceStartTimes(
original_slice_data.SortedInclusiveStartTimes());
}
buffer_intervals_[allocation_block] =
sliced_buffer_interval.IntervalForMakeFreeChunks(
sliced_buffer_interval.num_slices() - 1);
}
CHECK_EQ(allocation_blocks_.size(), buffer_intervals_.size());
CHECK_EQ(allocation_blocks_.size(), full_buffer_interval_map_.size());
CHECK_EQ(allocation_blocks_.size(), sliced_buffer_interval_map_.size());
VLOG(2) << [&]() -> std::string {
int sliced_blocks = 0;
int colocation_sets = 0;
int colocation_sets_with_multiple_sliced_blocks = 0;
absl::flat_hash_set<const AllocationBlock*> seen_blocks;
for (const auto& allocation_and_buffer_interval : buffer_intervals_) {
const AllocationBlock* block = allocation_and_buffer_interval.first;
const BufferInterval& min_buffer_interval =
allocation_and_buffer_interval.second;
if (IsSliced(block)) {
++sliced_blocks;
}
if (seen_blocks.contains(block)) {
continue;
}
seen_blocks.insert(block);
++colocation_sets;
int num_sliced_colocations = (IsSliced(block) ? 1 : 0);
for (const AllocationBlock* colocation :
GetTransitiveColocations(min_buffer_interval)) {
seen_blocks.insert(colocation);
if (IsSliced(colocation)) {
++num_sliced_colocations;
}
}
if (num_sliced_colocations > 1) {
++colocation_sets_with_multiple_sliced_blocks;
}
}
return absl::StrCat(
"Imported repacking stats: num_blocks=", allocation_blocks_.size(),
"; num_sliced_blocks=", sliced_blocks,
"; num_colocation_sets=", colocation_sets,
"; num_colocation_sets_with_multiple_sliced_blocks=",
colocation_sets_with_multiple_sliced_blocks);
}();
}
BufferIntervalCompare DefaultBufferIntervalCompare() const {
return LessThanByKey([this](const BufferInterval& x) {
const BufferInterval& full_buffer_interval =
full_buffer_interval_map_.at(x.buffer);
int64_t full_buffer_interval_end = full_buffer_interval.end;
for (auto colocation : GetTransitiveColocations(x)) {
full_buffer_interval_end =
std::max(full_buffer_interval_end,
full_buffer_interval_map_.at(colocation).end);
}
return std::make_tuple(
full_buffer_interval.start - full_buffer_interval_end,
-full_buffer_interval.size, std::cref(*full_buffer_interval.buffer));
});
}
void CommitChunks(const AllocationBlock* allocation_block,
const std::vector<Chunk>& chunks) {
VLOG(3) << "Committing repack chunks for " << allocation_block->ToString();
int64_t new_offset = -1;
std::optional<SlicedAllocationData> repacked_slice_data = std::nullopt;
if (IsSliced(allocation_block)) {
const SlicedAllocationData& original_slice_data =
allocation_block->original_slice_data.value();
CHECK_EQ(chunks.size(),
original_slice_data.slices_sorted_by_offset.size());
repacked_slice_data = SlicedAllocationData();
repacked_slice_data->slices_sorted_by_offset.reserve(chunks.size());
std::vector<int64_t> sorted_inclusive_start_times =
original_slice_data.SortedInclusiveStartTimes();
for (int i = 0; i < chunks.size(); ++i) {
const Chunk& chunk = chunks[i];
int64_t start_time = sorted_inclusive_start_times[i];
result_.heap_size = result_.UpdatedHeapSize(chunk);
VLOG(3) << "Adding sliced chunk " << chunk.ToString() << " at ["
<< start_time << ", " << allocation_block->end_time << "]";
interval_tree_.Add(start_time, allocation_block->end_time, chunk);
new_offset = (new_offset == -1 ? chunk.offset
: std::min(new_offset, chunk.offset));
repacked_slice_data->slices_sorted_by_offset.push_back(
AllocatedSlice({chunk.size, chunk.offset, start_time}));
}
absl::c_sort(repacked_slice_data->slices_sorted_by_offset,
[](const AllocatedSlice& lhs, const AllocatedSlice& rhs) {
return lhs.offset < rhs.offset;
});
} else {
CHECK_EQ(chunks.size(), 1);
new_offset = chunks.front().offset;
result_.heap_size = result_.UpdatedHeapSize(chunks.front());
VLOG(3) << "Adding unsliced chunk " << chunks.front().ToString()
<< " at [" << allocation_block->inclusive_start_time << ", "
<< allocation_block->end_time << ")";
interval_tree_.Add(allocation_block->inclusive_start_time,
allocation_block->end_time, chunks.front());
}
CHECK_NE(new_offset, -1);
CHECK(!new_offsets_.contains(allocation_block));
new_offsets_[allocation_block] = new_offset;
if (repacked_slice_data.has_value()) {
CHECK(IsSliced(allocation_block));
CHECK(!new_repacked_slicing_.contains(allocation_block));
new_repacked_slicing_[allocation_block] = *repacked_slice_data;
}
}
struct SlicedColocationData {
SlicedBufferInterval* sliced_buffer_interval;
SlicedAllocationFinder sliced_allocation_finder;
std::vector<Chunk> chunks;
};
void FindAndCommitChunks(BufferInterval* min_buffer_interval) {
const AllocationBlock* allocation_block = min_buffer_interval->buffer;
SlicedBufferInterval& sliced_buffer_interval =
sliced_buffer_interval_map_.at(allocation_block);
int64_t max_colocation_size = GetMaxColocationSize(*min_buffer_interval);
absl::flat_hash_map<const AllocationBlock*, SlicedColocationData>
sliced_buffer_map;
for (auto colocation :
SortAllocationBlocks(GetTransitiveColocations(*min_buffer_interval))) {
if (IsSliced(colocation)) {
SlicedBufferInterval& colocation_sliced_buffer_interval =
sliced_buffer_interval_map_.at(colocation);
SlicedAllocationFinder sliced_colocation_finder =
CreateSlicedAllocationFinder(
colocation_sliced_buffer_interval, max_colocation_size,
-1,
SliceTimePermutationIterator::CreateForRepack(
slice_time_permutation_iterator_type(),
GetSlicedAllocationDataPointer(
colocation->original_slice_data)),
&SlicedAllocationFinder::AllOffsetsAllowed);
sliced_buffer_map.insert(std::make_pair(
colocation,
SlicedColocationData{&colocation_sliced_buffer_interval,
std::move(sliced_colocation_finder),
{}}));
}
}
auto is_offset_allowed = [this, &sliced_buffer_map](int64_t offset) {
for (auto& block_and_colocation_data : sliced_buffer_map) {
SlicedColocationData& sliced_colocation_data =
block_and_colocation_data.second;
auto colocation_chunks =
sliced_colocation_data.sliced_allocation_finder.FindForOffset(
offset);
colocation_chunks = PostProcessFindChunkCandidatesResult(
*sliced_colocation_data.sliced_buffer_interval,
std::move(colocation_chunks));
if (colocation_chunks.empty()) {
return false;
}
sliced_colocation_data.chunks = std::move(colocation_chunks);
}
return true;
};
SlicedAllocationFinder finder = CreateSlicedAllocationFinder(
sliced_buffer_interval, max_colocation_size, -1,
SliceTimePermutationIterator::CreateForRepack(
slice_time_permutation_iterator_type(),
GetSlicedAllocationDataPointer(
allocation_block->original_slice_data)),
is_offset_allowed);
std::vector<Chunk> chunks = PostProcessFindChunkCandidatesResult(
sliced_buffer_interval, finder.Find());
int64_t min_offset =
absl::c_min_element(chunks, [](const Chunk& lhs, const Chunk& rhs) {
return lhs.offset < rhs.offset;
})->offset;
CommitChunks(allocation_block, chunks);
for (auto colocation : GetTransitiveColocations(*min_buffer_interval)) {
if (IsSliced(colocation)) {
CommitChunks(colocation, sliced_buffer_map.at(colocation).chunks);
} else {
const BufferInterval& colocation_full_buffer_interval =
full_buffer_interval_map_[colocation];
CommitChunks(colocation,
{Chunk::FromOffsetSize(
min_offset, colocation_full_buffer_interval.size)});
}
}
}
void AddToChunkMap(const AllocationBlock* buffer, Chunk chunk) override {
LOG(FATAL) << "We should never get here.";
}
absl::StatusOr<Result> Finish() override {
std::vector<BufferInterval> sorted_buffer_intervals =
GetSortedBufferIntervals();
for (auto& buffer_interval : sorted_buffer_intervals) {
if (!buffer_interval.need_allocation) {
continue;
}
FindAndCommitChunks(&buffer_interval);
}
Result result;
result.heap_size = result_.heap_size;
result.heap_results.emplace_back(result_);
return result;
}
struct TimedChunk {
std::string id;
const AllocationBlock* block;
int64_t start_inclusive;
int64_t end_inclusive;
Chunk chunk;
bool Overlaps(const TimedChunk& timed_chunk) {
if (timed_chunk.start_inclusive > end_inclusive ||
timed_chunk.end_inclusive < start_inclusive) {
return false;
}
return chunk.OverlapsWith(timed_chunk.chunk);
}
};
void DebuggingValidate() {
std::vector<TimedChunk> timed_chunks;
for (const AllocationBlock* block : allocation_blocks_) {
if (IsSliced(block)) {
for (int i = 0;
i < block->repacked_slice_data->slices_sorted_by_offset.size();
++i) {
const AllocatedSlice& slice =
block->repacked_slice_data->slices_sorted_by_offset[i];
timed_chunks.push_back(
TimedChunk{absl::StrCat(((int64_t)block), "_slice_", i), block,
slice.inclusive_start_time, block->end_time,
Chunk::FromOffsetSize(slice.offset, slice.size)});
}
} else {
timed_chunks.push_back(
TimedChunk{absl::StrCat(((int64_t)block)), block,
block->inclusive_start_time, block->end_time,
Chunk::FromOffsetSize(block->offset, block->size)});
}
}
bool overlap_found = false;
for (int i = 0; i < timed_chunks.size(); ++i) {
for (int j = i + 1; j < timed_chunks.size(); ++j) {
if (timed_chunks[i].Overlaps(timed_chunks[j])) {
overlap_found = true;
LOG(ERROR) << "Allocation block overlap\n"
<< " " << timed_chunks[i].block->ToString()
<< "\n " << timed_chunks[j].block->ToString();
}
}
}
if (overlap_found) {
LOG(FATAL) << "Allocation overlap found";
}
}
bool Repack() {
TF_CHECK_OK(Finish().status());
bool success = result_.heap_size <= max_size_;
if (!success) {
VLOG(1) << "Repacking unsuccessful with heap size " << result_.heap_size;
return false;
}
for (AllocationBlock* block : allocation_blocks_) {
CHECK(new_offsets_.contains(block));
block->offset = new_offsets_[block];
if (!IsSliced(block)) {
continue;
}
CHECK(new_repacked_slicing_.contains(block));
block->repacked_slice_data = std::move(new_repacked_slicing_[block]);
}
if (validate_) {
DebuggingValidate();
}
if (VLOG_IS_ON(2)) {
for (AllocationBlock* block : allocation_blocks_) {
VLOG(2) << "AllocationBlock after repacking: " << block->ToString();
}
}
VLOG(1) << "Repacking successful with heap size " << result_.heap_size;
return true;
}
private:
bool validate_ = false;
int64_t max_size_;
absl::Span<AllocationBlock*> allocation_blocks_;
absl::flat_hash_map<const AllocationBlock*, BufferInterval>
full_buffer_interval_map_;
absl::flat_hash_map<const AllocationBlock*, SlicedBufferInterval>
sliced_buffer_interval_map_;
absl::flat_hash_map<const AllocationBlock*, int64_t> new_offsets_;
absl::flat_hash_map<const AllocationBlock*, SlicedAllocationData>
new_repacked_slicing_;
};
}
namespace memory_space_assignment {
absl::StatusOr<bool> MemorySpaceAssignmentBestFitRepacker::Repack(
absl::Span<AllocationBlock*> allocations) {
BestFitRepacker best_fit_repacker = BestFitRepacker(
options_, slice_time_permutation_iterator_type_, max_size_, alignment_);
best_fit_repacker.ImportAllocationBlocks(allocations);
return best_fit_repacker.Repack();
}
}
} | #include "xla/service/memory_space_assignment/best_fit_repacker.h"
#include <cstdint>
#include "absl/container/flat_hash_map.h"
#include "absl/types/span.h"
#include "xla/comparison_util.h"
#include "xla/service/heap_simulator/allocation_block.h"
#include "xla/service/heap_simulator/heap_simulator.h"
#include "tsl/platform/test.h"
namespace xla {
class MemorySpaceAssignmentBestFitRepackerTest : public ::testing::Test {
protected:
MemorySpaceAssignmentBestFitRepackerTest()
: repacker_(100, 1, SliceTimePermutationIterator::Ty::kAll, options_) {}
AllocationBlock* MakeAllocationBlock(int64_t start_time, int64_t end_time,
int64_t size,
int64_t initial_offset = -1) {
allocation_blocks_.push_back(
{start_time, end_time, size, -1, initial_offset,
static_cast<int64_t>(allocation_blocks_.size())});
AllocationBlock* block = &allocation_blocks_.back();
block->next_colocated = block;
return block;
}
std::list<AllocationBlock> allocation_blocks_;
memory_space_assignment::MemorySpaceAssignmentBestFitRepacker::
BestFitRepackOptions options_{true,
nullptr};
memory_space_assignment::MemorySpaceAssignmentBestFitRepacker repacker_;
};
TEST_F(MemorySpaceAssignmentBestFitRepackerTest, Simple) {
std::vector<AllocationBlock*> allocation_blocks;
allocation_blocks.push_back(MakeAllocationBlock(10, 20, 10));
allocation_blocks.push_back(MakeAllocationBlock(5, 25, 15));
EXPECT_TRUE(*repacker_.Repack(absl::MakeSpan(allocation_blocks)));
EXPECT_EQ(allocation_blocks[0]->offset, 15);
EXPECT_EQ(allocation_blocks[1]->offset, 0);
}
TEST_F(MemorySpaceAssignmentBestFitRepackerTest, Colocation) {
std::vector<AllocationBlock*> allocation_blocks;
allocation_blocks.push_back(MakeAllocationBlock(0, 2, 10));
allocation_blocks.push_back(MakeAllocationBlock(10, 20, 10));
allocation_blocks[0]->next_colocated = allocation_blocks[1];
allocation_blocks[1]->next_colocated = allocation_blocks[0];
allocation_blocks.push_back(MakeAllocationBlock(5, 25, 15));
EXPECT_TRUE(*repacker_.Repack(absl::MakeSpan(allocation_blocks)));
EXPECT_EQ(allocation_blocks[0]->offset, 15);
EXPECT_EQ(allocation_blocks[1]->offset, 15);
EXPECT_EQ(allocation_blocks[2]->offset, 0);
}
TEST_F(MemorySpaceAssignmentBestFitRepackerTest, TooLarge) {
std::vector<AllocationBlock*> allocation_blocks;
allocation_blocks.push_back(MakeAllocationBlock(10, 20, 10));
allocation_blocks.push_back(MakeAllocationBlock(5, 25, 15));
allocation_blocks.push_back(MakeAllocationBlock(15, 20, 10));
allocation_blocks.push_back(MakeAllocationBlock(12, 22, 50));
allocation_blocks.push_back(MakeAllocationBlock(10, 18, 20));
EXPECT_FALSE(*repacker_.Repack(absl::MakeSpan(allocation_blocks)));
EXPECT_EQ(allocation_blocks[0]->offset, -1);
EXPECT_EQ(allocation_blocks[1]->offset, -1);
EXPECT_EQ(allocation_blocks[2]->offset, -1);
EXPECT_EQ(allocation_blocks[3]->offset, -1);
EXPECT_EQ(allocation_blocks[4]->offset, -1);
}
TEST_F(MemorySpaceAssignmentBestFitRepackerTest, ColocationDifferentSizes) {
std::vector<AllocationBlock*> allocation_blocks;
allocation_blocks.push_back(MakeAllocationBlock(0, 2, 5));
allocation_blocks.push_back(MakeAllocationBlock(10, 20, 10));
allocation_blocks[0]->next_colocated = allocation_blocks[1];
allocation_blocks[1]->next_colocated = allocation_blocks[0];
allocation_blocks.push_back(MakeAllocationBlock(9, 11, 2));
allocation_blocks.push_back(MakeAllocationBlock(1, 2, 2));
EXPECT_TRUE(*repacker_.Repack(absl::MakeSpan(allocation_blocks)));
EXPECT_EQ(allocation_blocks[0]->offset, 0);
EXPECT_EQ(allocation_blocks[1]->offset, 0);
EXPECT_EQ(allocation_blocks[2]->offset, 10);
EXPECT_EQ(allocation_blocks[3]->offset, 5);
}
TEST_F(MemorySpaceAssignmentBestFitRepackerTest, RepackedSlicesFit) {
std::vector<AllocationBlock*> allocation_blocks;
allocation_blocks.push_back(MakeAllocationBlock(0, 15, 2));
allocation_blocks.push_back(MakeAllocationBlock(11, 21, 3));
allocation_blocks.push_back(MakeAllocationBlock(16, 25, 4));
allocation_blocks.back()->original_slice_data = SlicedAllocationData(
{{AllocatedSlice{2, -1, 16}, AllocatedSlice{2, -1, 22}}});
allocation_blocks.push_back(MakeAllocationBlock(26, 33, 4));
allocation_blocks.back()->original_slice_data = SlicedAllocationData(
{{AllocatedSlice{2, -1, 26}, AllocatedSlice{2, -1, 30}}});
allocation_blocks.push_back(MakeAllocationBlock(19, 25, 2));
allocation_blocks.back()->original_slice_data = SlicedAllocationData(
{{AllocatedSlice{1, -1, 19}, AllocatedSlice{1, -1, 22}}});
allocation_blocks.push_back(MakeAllocationBlock(26, 29, 2));
absl::flat_hash_map<AllocationBlock*, int> sort_keys;
for (int i = 0; i < allocation_blocks.size(); ++i) {
sort_keys[allocation_blocks[i]] = i;
}
options_.buffer_interval_compare = LessThanByKey(
[sort_keys](const memory_space_assignment::
MemorySpaceAssignmentBestFitRepacker::BufferInterval& x) {
return sort_keys.at(x.buffer);
});
repacker_ = memory_space_assignment::MemorySpaceAssignmentBestFitRepacker(
100, 1, SliceTimePermutationIterator::Ty::kAll, options_);
EXPECT_TRUE(*repacker_.Repack(absl::MakeSpan(allocation_blocks)));
EXPECT_EQ(allocation_blocks[0]->offset, 0);
EXPECT_FALSE(allocation_blocks[0]->repacked_slice_data.has_value());
EXPECT_EQ(allocation_blocks[1]->offset, 2);
EXPECT_FALSE(allocation_blocks[1]->repacked_slice_data.has_value());
EXPECT_EQ(allocation_blocks[2]->offset, 0);
ASSERT_TRUE(allocation_blocks[2]->repacked_slice_data.has_value());
EXPECT_EQ(*allocation_blocks[2]->repacked_slice_data,
(SlicedAllocationData(
{{AllocatedSlice{2, 0, 16}, AllocatedSlice{2, 2, 22}}})));
EXPECT_EQ(allocation_blocks[3]->offset, 0);
ASSERT_TRUE(allocation_blocks[3]->repacked_slice_data.has_value());
EXPECT_EQ(*allocation_blocks[3]->repacked_slice_data,
(SlicedAllocationData(
{{AllocatedSlice{2, 0, 26}, AllocatedSlice{2, 2, 30}}})));
EXPECT_EQ(allocation_blocks[4]->offset, 4);
ASSERT_TRUE(allocation_blocks[4]->repacked_slice_data.has_value());
EXPECT_EQ(*allocation_blocks[4]->repacked_slice_data,
(SlicedAllocationData(
{{AllocatedSlice{1, 4, 22}, AllocatedSlice{1, 5, 19}}})));
EXPECT_EQ(allocation_blocks[5]->offset, 2);
EXPECT_FALSE(allocation_blocks[5]->repacked_slice_data.has_value());
}
TEST_F(MemorySpaceAssignmentBestFitRepackerTest,
SliceTimePermutationsMatchOriginalSizeTimeMapping) {
std::vector<AllocationBlock*> allocation_blocks;
allocation_blocks.push_back(MakeAllocationBlock(0, 10, 2, 0));
allocation_blocks.push_back(MakeAllocationBlock(5, 15, 3, 2));
allocation_blocks.back()->original_slice_data = SlicedAllocationData(
{{AllocatedSlice{2, 2, 5}, AllocatedSlice{1, 4, 11}}});
allocation_blocks.push_back(MakeAllocationBlock(5, 15, 2, 6));
absl::flat_hash_map<AllocationBlock*, int> sort_keys;
for (int i = 0; i < allocation_blocks.size(); ++i) {
sort_keys[allocation_blocks[i]] = i;
}
options_.buffer_interval_compare = LessThanByKey(
[sort_keys](const memory_space_assignment::
MemorySpaceAssignmentBestFitRepacker::BufferInterval& x) {
return sort_keys.at(x.buffer);
});
repacker_ = memory_space_assignment::MemorySpaceAssignmentBestFitRepacker(
100, 1, SliceTimePermutationIterator::Ty::kAll, options_);
EXPECT_TRUE(*repacker_.Repack(absl::MakeSpan(allocation_blocks)));
ASSERT_TRUE(allocation_blocks[1]->repacked_slice_data.has_value());
ASSERT_EQ(
allocation_blocks[1]->repacked_slice_data->slices_sorted_by_offset.size(),
2);
const AllocatedSlice& slice_with_smaller_offset =
allocation_blocks[1]->repacked_slice_data->slices_sorted_by_offset[0];
const AllocatedSlice& slice_with_larger_offset =
allocation_blocks[1]->repacked_slice_data->slices_sorted_by_offset[1];
ASSERT_GT(slice_with_smaller_offset.size, slice_with_larger_offset.size);
const AllocatedSlice& larger_slice = slice_with_smaller_offset;
const AllocatedSlice& smaller_slice = slice_with_larger_offset;
ASSERT_LT(larger_slice.inclusive_start_time,
smaller_slice.inclusive_start_time);
}
TEST_F(MemorySpaceAssignmentBestFitRepackerTest,
SliceTimePermutationsMatchOriginalSizeTimeMapping2) {
std::vector<AllocationBlock*> allocation_blocks;
allocation_blocks.push_back(MakeAllocationBlock(0, 10, 2, 0));
allocation_blocks.push_back(MakeAllocationBlock(11, 20, 2, 4));
allocation_blocks.push_back(MakeAllocationBlock(5, 15, 3, 1));
allocation_blocks.back()->original_slice_data = SlicedAllocationData(
{{AllocatedSlice{1, 1, 5}, AllocatedSlice{2, 2, 11}}});
absl::flat_hash_map<AllocationBlock*, int> sort_keys;
for (int i = 0; i < allocation_blocks.size(); ++i) {
sort_keys[allocation_blocks[i]] = i;
}
options_.buffer_interval_compare = LessThanByKey(
[sort_keys](const memory_space_assignment::
MemorySpaceAssignmentBestFitRepacker::BufferInterval& x) {
return sort_keys.at(x.buffer);
});
repacker_ = memory_space_assignment::MemorySpaceAssignmentBestFitRepacker(
100, 1, SliceTimePermutationIterator::Ty::kAll, options_);
EXPECT_TRUE(*repacker_.Repack(absl::MakeSpan(allocation_blocks)));
EXPECT_EQ(allocation_blocks[0]->offset, 0);
EXPECT_FALSE(allocation_blocks[0]->repacked_slice_data.has_value());
EXPECT_EQ(allocation_blocks[1]->offset, 0);
EXPECT_FALSE(allocation_blocks[1]->repacked_slice_data.has_value());
EXPECT_EQ(allocation_blocks[2]->offset, 2);
ASSERT_TRUE(allocation_blocks[2]->repacked_slice_data.has_value());
EXPECT_EQ(*allocation_blocks[2]->repacked_slice_data,
(SlicedAllocationData(
{{AllocatedSlice{1, 2, 5}, AllocatedSlice{2, 3, 11}}})));
}
TEST_F(MemorySpaceAssignmentBestFitRepackerTest, SlicedColocationsFit) {
std::vector<AllocationBlock*> allocation_blocks;
allocation_blocks.push_back(MakeAllocationBlock(0, 12, 2));
allocation_blocks.push_back(MakeAllocationBlock(0, 8, 2));
allocation_blocks.push_back(MakeAllocationBlock(5, 11, 2));
allocation_blocks.push_back(MakeAllocationBlock(15, 20, 5));
allocation_blocks.back()->original_slice_data = SlicedAllocationData(
{{AllocatedSlice{2, -1, 15}, AllocatedSlice{3, -1, 18}}});
allocation_blocks.push_back(MakeAllocationBlock(9, 14, 4));
allocation_blocks.back()->original_slice_data = SlicedAllocationData(
{{AllocatedSlice{2, -1, 9}, AllocatedSlice{2, -1, 12}}});
allocation_blocks.back()->next_colocated = allocation_blocks[3];
allocation_blocks[3]->next_colocated = allocation_blocks.back();
allocation_blocks.push_back(MakeAllocationBlock(15, 17, 5));
absl::flat_hash_map<AllocationBlock*, int> sort_keys;
for (int i = 0; i < allocation_blocks.size(); ++i) {
sort_keys[allocation_blocks[i]] = i;
}
options_.buffer_interval_compare = LessThanByKey(
[sort_keys](const memory_space_assignment::
MemorySpaceAssignmentBestFitRepacker::BufferInterval& x) {
return sort_keys.at(x.buffer);
});
repacker_ = memory_space_assignment::MemorySpaceAssignmentBestFitRepacker(
100, 1, SliceTimePermutationIterator::Ty::kAll, options_);
EXPECT_TRUE(*repacker_.Repack(absl::MakeSpan(allocation_blocks)));
EXPECT_EQ(allocation_blocks[0]->offset, 0);
EXPECT_FALSE(allocation_blocks[0]->repacked_slice_data.has_value());
EXPECT_EQ(allocation_blocks[1]->offset, 2);
EXPECT_FALSE(allocation_blocks[1]->repacked_slice_data.has_value());
EXPECT_EQ(allocation_blocks[2]->offset, 4);
ASSERT_FALSE(allocation_blocks[2]->repacked_slice_data.has_value());
EXPECT_EQ(allocation_blocks[3]->offset, 2);
ASSERT_TRUE(allocation_blocks[3]->repacked_slice_data.has_value());
EXPECT_EQ(*allocation_blocks[3]->repacked_slice_data,
(SlicedAllocationData(
{{AllocatedSlice{2, 2, 15}, AllocatedSlice{3, 4, 18}}})));
EXPECT_EQ(allocation_blocks[4]->offset, 2);
ASSERT_TRUE(allocation_blocks[4]->repacked_slice_data.has_value());
EXPECT_EQ(*allocation_blocks[4]->repacked_slice_data,
(SlicedAllocationData(
{{AllocatedSlice{2, 2, 9}, AllocatedSlice{2, 4, 12}}})));
EXPECT_EQ(allocation_blocks[5]->offset, 4);
EXPECT_FALSE(allocation_blocks[5]->repacked_slice_data.has_value());
}
TEST_F(MemorySpaceAssignmentBestFitRepackerTest,
SlicedColocationsPermutationsMatchOriginalSizeTimeMapping) {
std::vector<AllocationBlock*> allocation_blocks;
allocation_blocks.push_back(MakeAllocationBlock(1, 5, 2));
allocation_blocks.push_back(MakeAllocationBlock(11, 15, 2));
allocation_blocks.push_back(MakeAllocationBlock(1, 10, 5));
allocation_blocks.back()->original_slice_data = SlicedAllocationData(
{{AllocatedSlice{2, 2, 6}, AllocatedSlice{3, 4, 1}}});
allocation_blocks.push_back(MakeAllocationBlock(15, 20, 5));
allocation_blocks.back()->original_slice_data = SlicedAllocationData(
{{AllocatedSlice{2, 2, 11}, AllocatedSlice{3, 4, 16}}});
allocation_blocks.back()->next_colocated = allocation_blocks[2];
allocation_blocks[2]->next_colocated = allocation_blocks.back();
absl::flat_hash_map<AllocationBlock*, int> sort_keys;
for (int i = 0; i < allocation_blocks.size(); ++i) {
sort_keys[allocation_blocks[i]] = i;
}
options_.buffer_interval_compare = LessThanByKey(
[sort_keys](const memory_space_assignment::
MemorySpaceAssignmentBestFitRepacker::BufferInterval& x) {
return sort_keys.at(x.buffer);
});
repacker_ = memory_space_assignment::MemorySpaceAssignmentBestFitRepacker(
100, 1, SliceTimePermutationIterator::Ty::kAll, options_);
EXPECT_TRUE(*repacker_.Repack(absl::MakeSpan(allocation_blocks)));
EXPECT_EQ(allocation_blocks[0]->offset, 0);
EXPECT_FALSE(allocation_blocks[0]->repacked_slice_data.has_value());
EXPECT_EQ(allocation_blocks[1]->offset, 0);
EXPECT_FALSE(allocation_blocks[1]->repacked_slice_data.has_value());
EXPECT_EQ(allocation_blocks[2]->offset, 2);
ASSERT_TRUE(allocation_blocks[2]->repacked_slice_data.has_value());
EXPECT_EQ(*allocation_blocks[3]->repacked_slice_data,
(SlicedAllocationData(
{{AllocatedSlice{2, 2, 11}, AllocatedSlice{3, 4, 16}}})));
EXPECT_EQ(allocation_blocks[3]->offset, 2);
ASSERT_TRUE(allocation_blocks[3]->repacked_slice_data.has_value());
EXPECT_EQ(*allocation_blocks[3]->repacked_slice_data,
(SlicedAllocationData(
{{AllocatedSlice{2, 2, 11}, AllocatedSlice{3, 4, 16}}})));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/memory_space_assignment/best_fit_repacker.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/memory_space_assignment/best_fit_repacker_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3393ebe0-0d02-4328-8e95-0a40a35cb820 | cpp | tensorflow/tensorflow | tree_reduction_rewriter | third_party/xla/xla/service/gpu/transforms/tree_reduction_rewriter.cc | third_party/xla/xla/service/cpu/tests/tree_reduction_rewriter_test.cc | #include "xla/service/gpu/transforms/tree_reduction_rewriter.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <iterator>
#include <memory>
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/numeric/bits.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/gpu/reduction_utils.h"
#include "xla/service/hlo_module_config.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
absl::InlinedVector<int64_t, 2> GetSortedReducedDims(
HloReduceInstruction *reduce) {
absl::InlinedVector<int64_t, 2> reduced_dims{reduce->dimensions().begin(),
reduce->dimensions().end()};
absl::c_sort(reduced_dims);
return reduced_dims;
}
bool IsMinMaxReduction(HloReduceInstruction *reduce) {
HloComputation *called = &reduce->to_apply()[0];
if (auto reduction_kind = MatchReductionComputation(called)) {
return reduction_kind == ReductionKind::MAX ||
reduction_kind == ReductionKind::MIN;
}
return false;
}
}
class ReductionRewriterVisitor : public DfsHloRewriteVisitor {
public:
explicit ReductionRewriterVisitor(se::GpuComputeCapability gpu_version)
: gpu_version_(gpu_version) {}
absl::Status HandleReduce(HloInstruction *hlo) override {
auto *reduce = Cast<HloReduceInstruction>(hlo);
VLOG(3) << "Reduction instruction: " << reduce->ToString();
const HloModuleConfig &config = reduce->GetModule()->config();
if (!MatchReductionForSplit(reduce, config)) {
return absl::OkStatus();
}
ReductionDimensions reduction_dims =
GetReductionKindAndContiguousComponents(*hlo);
if (ReductionIsRaceFree(config, reduction_dims)) {
VLOG(3) << "Base case: dimensions fit";
return absl::OkStatus();
}
auto sorted_dims_to_reduce = GetSortedReducedDims(reduce);
CHECK_LE(sorted_dims_to_reduce.size(), 2);
if (reduction_dims.is_row_reduction &&
reduction_dims
.dimensions[ReductionDimensions::kRowMajorReducedDimension] >
BatchedReductionRaceFreeBound()) {
VLOG(2) << "Splitting batched dimension reduce into a separate reduction";
return RewriteBatchDimensionLargerThanTile(reduce, reduction_dims,
sorted_dims_to_reduce);
}
SplitParams split_params =
ComputeSplitParams(reduce, reduction_dims, sorted_dims_to_reduce);
return SplitReductionDimension(reduce, split_params, sorted_dims_to_reduce);
}
private:
bool MatchReductionForSplit(HloReduceInstruction *reduce,
const HloModuleConfig &config) {
bool reductions_via_mlir_disabled =
config.debug_options().xla_gpu_mlir_emitter_level() < 4;
if (reductions_via_mlir_disabled && IsMinMaxReduction(reduce)) {
VLOG(1) << "Not performing tree expansion on min/max-reduction: "
<< reduce->ToString()
<< " since min/max operations are associative";
return false;
}
if (!IsReductionFromOrToContiguousDimensions(*reduce)) {
VLOG(3) << "Is not a reduction from or to contiguous dimensions";
return false;
}
VLOG(3) << "Perform rewrite";
return true;
}
bool ShouldSwapInnerAndOuterReducedMinorDimension(uint64_t k1, uint64_t k2,
uint64_t n,
int64_t race_free_bound,
bool is_row_reduction) {
CHECK(k1 >= k2);
if (k1 > race_free_bound) {
return false;
}
if (is_row_reduction) {
bool maybe_vectorized = k2 % 2 == 0 && n % 2 == 0;
if (maybe_vectorized) {
return k2 * 2 < k1 || k1 % 2 == 0;
}
return n % 2 == 0 || k1 % 2 != 0;
}
return true;
}
struct SplitParams {
int64_t k1;
int64_t k2;
int64_t dim;
};
SplitParams ComputeSplitParams(
HloReduceInstruction *reduce, const ReductionDimensions &reduction_dims,
absl::Span<const int64_t> sorted_dims_to_reduce) {
absl::Span<int64_t const> input_shape_dims =
reduce->inputs()[0]->shape().dimensions();
int64_t reduced_dim = sorted_dims_to_reduce.back();
int64_t reduced_dim_size = input_shape_dims[reduced_dim];
VLOG(3) << "reduced dim size = " << reduced_dim_size;
uint64_t k2 =
static_cast<uint64_t>(std::floor(std::sqrt(reduced_dim_size)));
int64_t race_free_bound = ReductionDimensionRaceFreeBound(
reduce->GetModule()->config(), reduction_dims);
if (k2 > race_free_bound) {
k2 = race_free_bound;
}
uint64_t minimum_padding = (k2 - reduced_dim_size % k2) % k2;
uint64_t best_k1 = (reduced_dim_size + minimum_padding) / k2;
for (uint64_t i = k2 - 1; i > k2 / 2; --i) {
uint64_t padding = (i - reduced_dim_size % i) % i;
if (padding < minimum_padding ||
(padding == minimum_padding && absl::has_single_bit(i))) {
minimum_padding = padding;
best_k1 = (reduced_dim_size + padding) / i;
}
}
uint64_t padded_k = reduced_dim_size + minimum_padding;
uint64_t best_k2 = padded_k / best_k1;
if (ShouldSwapInnerAndOuterReducedMinorDimension(
best_k1, best_k2, reduced_dim_size, race_free_bound,
reduction_dims.is_row_reduction)) {
std::swap(best_k1, best_k2);
}
return SplitParams{static_cast<int64_t>(best_k1),
static_cast<int64_t>(best_k2), reduced_dim};
}
absl::Status SplitReductionDimension(
HloReduceInstruction *reduce, const SplitParams &split_params,
absl::Span<const int64_t> sorted_dims_to_reduce) {
absl::Span<int64_t const> reduce_input_dims =
reduce->inputs()[0]->shape().dimensions();
int64_t split_dim_size = reduce_input_dims[split_params.dim];
VLOG(2) << "dimension to split = " << split_params.dim << " with "
<< split_dim_size << " elements into " << split_params.k1 << " by "
<< split_params.k2;
HloInstruction::InstructionVector padded_inputs(reduce->inputs().begin(),
reduce->inputs().end());
auto padded_size = split_params.k1 * split_params.k2;
absl::InlinedVector<int64_t, 3> padded_dimensions(reduce_input_dims.begin(),
reduce_input_dims.end());
if (split_dim_size != padded_size) {
padded_dimensions[split_params.dim] = padded_size;
PaddingConfig padding_config =
MakeNoPaddingConfig(reduce_input_dims.size());
padding_config.mutable_dimensions(split_params.dim)
->set_edge_padding_high(padded_size - split_dim_size);
for (int input_idx = 0; input_idx < padded_inputs.size(); ++input_idx) {
auto &reduction_input = padded_inputs[input_idx];
Shape padded_shape = ShapeUtil::MakeShape(
reduction_input->shape().element_type(), padded_dimensions);
VLOG(2) << "Generated padded shape: " << padded_shape.ToString();
reduction_input = reduce->parent()->AddInstruction(
HloInstruction::CreatePad(padded_shape, reduction_input,
reduce->init_values()[input_idx],
padding_config),
&reduction_input->metadata());
}
}
absl::InlinedVector<int64_t, 3> reshaped_dimensions;
int64_t input_rank = reduce_input_dims.size();
for (int64_t dim_idx = 0; dim_idx < input_rank; dim_idx++) {
if (dim_idx == split_params.dim) {
reshaped_dimensions.push_back(split_params.k1);
reshaped_dimensions.push_back(split_params.k2);
} else {
reshaped_dimensions.push_back(padded_dimensions[dim_idx]);
}
}
absl::InlinedVector<int64_t, 2> inner_reduce_dims(
sorted_dims_to_reduce.begin(), sorted_dims_to_reduce.end());
auto split_dim_it = std::find(inner_reduce_dims.begin(),
inner_reduce_dims.end(), split_params.dim);
*split_dim_it += 1;
absl::InlinedVector<int64_t, 1> outer_reduce_dims{
split_params.dim -
std::distance(inner_reduce_dims.begin(), split_dim_it)};
absl::InlinedVector<int64_t, 3> inner_reduce_shape =
RemoveElements(inner_reduce_dims, reshaped_dimensions);
HloInstruction::InstructionVector reshaped_padded_inputs;
absl::InlinedVector<Shape, 2> inner_reduce_shapes;
for (HloInstruction *padded_input : padded_inputs) {
Shape reshaped_shape = ShapeUtil::MakeShape(
padded_input->shape().element_type(), reshaped_dimensions);
HloInstruction *reshaped_padded_input = reduce->parent()->AddInstruction(
HloInstruction::CreateBitcast(reshaped_shape, padded_input),
&padded_input->metadata());
VLOG(2) << "Generated reshape: " << reshaped_padded_input->ToString();
reshaped_padded_inputs.push_back(reshaped_padded_input);
inner_reduce_shapes.push_back(ShapeUtil::MakeShape(
padded_input->shape().element_type(), inner_reduce_shape));
}
HloInstruction *inner_reduce = reduce->parent()->AddInstruction(
HloInstruction::CreateReduce(
ShapeUtil::MakeMaybeTupleShape(inner_reduce_shapes),
reshaped_padded_inputs, reduce->init_values(), inner_reduce_dims,
reduce->to_apply()),
&reduce->metadata());
VLOG(1) << "Generated inner reduction: " << inner_reduce->ToString();
std::unique_ptr<HloInstruction> outer_reduce = HloInstruction::CreateReduce(
reduce->shape(), inner_reduce, reduce->init_values(), outer_reduce_dims,
reduce->to_apply());
VLOG(1) << "Generated outer reduction: " << outer_reduce->ToString();
return ReplaceWithNewInstruction(reduce, std::move(outer_reduce));
}
absl::Status RewriteBatchDimensionLargerThanTile(
HloReduceInstruction *hlo,
const ReductionDimensions &reduction_dimensions,
absl::Span<const int64_t> sorted_dims_to_reduce) {
CHECK(reduction_dimensions.is_row_reduction);
absl::InlinedVector<Shape, 2> tuple_shapes;
int64_t minor_reduction_dim = sorted_dims_to_reduce.back();
for (HloInstruction *input : hlo->inputs()) {
tuple_shapes.push_back(
ShapeUtil::DeleteDimension(minor_reduction_dim, input->shape()));
}
HloInstruction *inner_reduce =
hlo->parent()->AddInstruction(HloInstruction::CreateReduce(
ShapeUtil::MakeMaybeTupleShape(tuple_shapes), hlo->inputs(),
hlo->init_values(), {minor_reduction_dim}, hlo->to_apply()));
VLOG(1) << "Inner reduction: " << inner_reduce->ToString();
std::unique_ptr<HloInstruction> out = HloInstruction::CreateReduce(
hlo->shape(), inner_reduce, hlo->init_values(), {0}, hlo->to_apply());
VLOG(1) << "Generated: " << out->ToString();
return ReplaceWithNewInstruction(hlo, std::move(out));
}
se::GpuComputeCapability gpu_version_;
};
absl::StatusOr<bool> TreeReductionRewriter::Run(
HloModule *module,
const absl::flat_hash_set<absl::string_view> &execution_threads) {
VLOG(5) << "Rewriter input: " << module->ToString();
TF_ASSIGN_OR_RETURN(bool changed,
ReductionRewriterVisitor(gpu_version_)
.RunOnModule(module, execution_threads));
VLOG(5) << "Rewriter output: " << module->ToString();
return changed;
}
}
} | #include "xla/service/cpu/tests/cpu_codegen_test.h"
#include "tsl/platform/test.h"
namespace xla {
namespace cpu {
namespace {
class TreeReductionRewriterTest : public CpuCodegenTest {};
TEST_F(TreeReductionRewriterTest, SimpleRewrite) {
const char* hlo_text = R"(
HloModule SimpleReduction
add {
acc = f32[] parameter(1)
op = f32[] parameter(0)
ROOT out = f32[] add(acc, op)
}
ENTRY main {
input = f32[1000] parameter(0)
zero = f32[] constant(0)
ROOT out = f32[] reduce(input, zero), dimensions={0}, to_apply=add
}
)";
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-LABEL: ENTRY %main (input: f32[1000]) -> f32[] {
; CHECK-NEXT: [[INSTR_0:%[^ ]+]] = f32[1000]{0} parameter(0)
; CHECK-NEXT: [[INSTR_1:%[^ ]+]] = f32[] constant(0)
; CHECK-NEXT: [[INSTR_2:%[^ ]+]] = f32[32]{0} reduce-window([[INSTR_0]], [[INSTR_1]]), window={size=32 stride=32 pad=12_12}, to_apply=[[INSTR_3:%[^ ]+]]
; CHECK-NEXT: ROOT [[INSTR_4:%[^ ]+]] = f32[] reduce([[INSTR_2]], [[INSTR_1]]), dimensions={0}, to_apply=[[INSTR_3]]
)");
}
TEST_F(TreeReductionRewriterTest, RewriteMultipleDimensions) {
const char* hlo_text = R"(
HloModule SimpleReduction
add {
acc = f32[] parameter(1)
op = f32[] parameter(0)
ROOT out = f32[] add(acc, op)
}
ENTRY main {
input = f32[100,100] parameter(0)
zero = f32[] constant(0)
ROOT out = f32[] reduce(input, zero), dimensions={0,1}, to_apply=add
}
)";
MatchOptimizedHlo(hlo_text,
R"(
; CHECK: [[INSTR_0:%[^ ]+]] = f32[4,4]{1,0} reduce-window([[INSTR_1:%[^ ]+]], [[INSTR_2:%[^ ]+]]), window={size=32x32 stride=32x32 pad=14_14x14_14}, to_apply=[[INSTR_3:%[^ ]+]]
; CHECK-NEXT: ROOT [[INSTR_4:%[^ ]+]] = f32[] reduce([[INSTR_0]], [[INSTR_2]]), dimensions={0,1}, to_apply=[[INSTR_3]]
)");
}
TEST_F(TreeReductionRewriterTest, RewriteMultipleDimensionsSingleSmaller) {
const char* hlo_text = R"(
HloModule SimpleReduction
add {
acc = f32[] parameter(1)
op = f32[] parameter(0)
ROOT out = f32[] add(acc, op)
}
ENTRY main {
input = f32[1000,31] parameter(0)
zero = f32[] constant(0)
ROOT out = f32[] reduce(input, zero), dimensions={0,1}, to_apply=add
}
)";
MatchOptimizedHlo(hlo_text,
R"(
; CHECK: [[INSTR_0:%[^ ]+]] = f32[32,1]{1,0} reduce-window([[INSTR_1:%[^ ]+]], [[INSTR_2:%[^ ]+]]), window={size=32x31 stride=32x31 pad=12_12x0_0}, to_apply=[[INSTR_3:%[^ ]+]]
; CHECK-NEXT: ROOT [[INSTR_4:%[^ ]+]] = f32[] reduce([[INSTR_0]], [[INSTR_2]]), dimensions={0,1}, to_apply=[[INSTR_3]]
)");
}
TEST_F(TreeReductionRewriterTest, NoRewriteRequired) {
const char* hlo_text = R"(
HloModule SimpleReduction
add {
acc = f32[] parameter(1)
op = f32[] parameter(0)
ROOT out = f32[] add(acc, op)
}
ENTRY main {
input = f32[31,31] parameter(0)
zero = f32[] constant(0)
ROOT out = f32[] reduce(input, zero), dimensions={0,1}, to_apply=add
}
)";
MatchOptimizedHlo(hlo_text,
R"(
)");
}
TEST_F(TreeReductionRewriterTest, NoRewriteRequiredZeroDim) {
const char* hlo_text = R"(
HloModule SimpleReduction
add {
acc = f32[] parameter(1)
op = f32[] parameter(0)
ROOT out = f32[] add(acc, op)
}
ENTRY main {
input = f32[3000,0] parameter(0)
zero = f32[] constant(0)
ROOT out = f32[] reduce(input, zero), dimensions={0,1}, to_apply=add
}
)";
MatchOptimizedHlo(hlo_text,
R"(
)");
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/tree_reduction_rewriter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu/tests/tree_reduction_rewriter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2e44108f-0ab0-4cba-9936-a5afa356d05c | cpp | tensorflow/tensorflow | collective_param_resolver_distributed | tensorflow/core/distributed_runtime/collective_param_resolver_distributed.cc | tensorflow/core/distributed_runtime/collective_param_resolver_distributed_test.cc | #include "tensorflow/core/distributed_runtime/collective_param_resolver_distributed.h"
#include "absl/strings/escaping.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/distributed_runtime/cancellable_call.h"
#include "tensorflow/core/distributed_runtime/device_resolver_distributed.h"
#include "tensorflow/core/distributed_runtime/worker_cache.h"
#include "tensorflow/core/framework/device_attributes.pb.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
namespace {
class CompleteGroupCall : public CancellableCall {
public:
CompleteGroupCall(const CollGroupParams& group,
const DeviceAttributes& device,
CancellationManager* cancel_mgr,
const string& remote_worker, WorkerCacheInterface* wc)
: CancellableCall(cancel_mgr, remote_worker, wc) {
req_.set_group_key(group.group_key);
req_.set_group_size(group.group_size);
req_.set_device_type(group.device_type.type_string());
*req_.mutable_device_attributes() = device;
}
~CompleteGroupCall() override {}
void IssueCall(const StatusCallback& done) override {
wi_->CompleteGroupAsync(&opts_, &req_, &resp_, done);
}
CompleteGroupRequest req_;
CompleteGroupResponse resp_;
};
class CompleteInstanceCall : public CancellableCall {
public:
CompleteInstanceCall(const CollGroupParams& group,
const CollInstanceParams& instance,
const string& node_name, const string& device_name,
bool is_source, CancellationManager* cancel_mgr,
const string& remote_worker, WorkerCacheInterface* wc)
: CancellableCall(cancel_mgr, remote_worker, wc) {
req_.set_name(node_name);
req_.set_type(instance.type);
req_.set_step_id(instance.step_id);
req_.set_data_type(instance.data_type);
instance.shape.AsProto(req_.mutable_shape());
req_.set_group_key(group.group_key);
req_.set_group_size(group.group_size);
req_.set_instance_key(instance.instance_key);
req_.set_device_type(group.device_type.type_string());
for (int32_t offset : instance.impl_details.subdiv_offsets) {
req_.add_subdiv_offset(offset);
}
req_.set_device(device_name);
req_.set_is_source(is_source);
}
~CompleteInstanceCall() override {}
void IssueCall(const StatusCallback& done) override {
wi_->CompleteInstanceAsync(&opts_, &req_, &resp_, done);
}
CompleteInstanceRequest req_;
CompleteInstanceResponse resp_;
};
}
CollectiveParamResolverDistributed::CollectiveParamResolverDistributed(
const ConfigProto& config, const DeviceMgr* dev_mgr,
DeviceResolverDistributed* dev_resolver,
NcclCommunicatorInterface* nccl_communicator,
WorkerCacheInterface* worker_cache, const string& task_name)
: CollectiveParamResolverLocal(config, dev_mgr, dev_resolver,
nccl_communicator, task_name),
worker_cache_(worker_cache),
group_leader_(task_name == config.experimental().collective_group_leader()
? ""
: config.experimental().collective_group_leader()) {
VLOG(1) << "CompleteParamResolverDistributed ctor task={" << task_name
<< "} config.collective_group_leader={"
<< config.experimental().collective_group_leader() << "}"
<< " config.collective_nccl={"
<< config.experimental().collective_nccl() << "}";
}
void CollectiveParamResolverDistributed::CompleteParamsAsync(
const DeviceAttributes& device, CollectiveParams* cp,
CancellationManager* cancel_mgr, const StatusCallback& done) {
VLOG(1) << "CompleteParams distributed " << device.name() << " for " << cp
<< ": " << cp->ToString();
if (cp->run_group_initialization) {
CompleteGroupDistributed(
device, &cp->group, cancel_mgr,
[this, device, cp, cancel_mgr, done](Status s) {
if (s.ok()) {
std::vector<DeviceAttributes> devices;
devices.reserve(cp->group.group_size);
for (const CollGroupMember& m : cp->group.members) {
devices.push_back(m.device);
}
s = dev_resolver_->UpdateDeviceAttributes(devices);
}
if (s.ok()) {
CompleteInstanceDistributed(device.name(), cp, cancel_mgr, done);
} else {
done(s);
}
});
} else {
auto s = LookupGroup(cp->group.group_key, &cp->group);
if (s.ok()) {
CompleteInstanceDistributed(device.name(), cp, cancel_mgr, done);
} else {
done(s);
}
}
}
void CollectiveParamResolverDistributed::CompleteGroupAsync(
const DeviceAttributes& device, CollGroupParams* group_params,
CancellationManager* cancel_mgr, const StatusCallback& done) {
CompleteGroupDistributed(device, group_params, cancel_mgr, done);
}
void CollectiveParamResolverDistributed::CompleteInstanceAsync(
const CompleteInstanceRequest* request, CompleteInstanceResponse* response,
CancellationManager* cancel_mgr, const StatusCallback& done) {
GroupRec* gr = GetCachedGroup(request->group_key());
if (gr == nullptr) {
done(errors::FailedPrecondition(
"group ", request->group_key(),
" not found. This normally means the server has restarted"));
return;
}
CollectiveParams* cp = new CollectiveParams;
{
mutex_lock l(gr->mu);
if (!gr->status.ok()) {
done(gr->status);
return;
} else if (gr->group.members.size() != gr->group.group_size) {
done(errors::FailedPrecondition(
"group ", request->group_key(),
" failed to resolve. This normally means the server has restarted"));
return;
}
cp->group = gr->group;
}
cp->name = request->name();
cp->instance.type = CollectiveType(request->type());
cp->instance.instance_key = request->instance_key();
cp->instance.step_id = request->step_id();
cp->instance.data_type = request->data_type();
cp->instance.shape = TensorShape(request->shape());
cp->is_source = request->is_source();
for (int32_t offset : request->subdiv_offset()) {
cp->instance.impl_details.subdiv_offsets.push_back(offset);
}
StatusCallback done_and_cleanup = [cp, done](const Status& s) {
done(s);
cp->Unref();
};
CompleteInstanceDistributed(
request->device(), cp, cancel_mgr,
[this, cp, response, done_and_cleanup](Status status) {
if (status.ok()) {
bool created_irec;
InstanceRec* ir = GetOrCreateInstanceRec(cp, &created_irec);
{
mutex_lock l(ir->mu);
status = ir->status;
if (ir->status.ok()) {
response->set_instance_key(cp->instance.instance_key);
response->set_source_rank(ir->source_rank);
}
}
}
done_and_cleanup(status);
});
}
CollectiveParamResolverDistributed::GroupRec*
CollectiveParamResolverDistributed::GetCachedGroup(int32_t group_key) {
mutex_lock l(group_mu_);
auto it = group_table_.find(group_key);
if (it == group_table_.end()) {
return nullptr;
}
return it->second.get();
}
Status CollectiveParamResolverDistributed::UpdateGroupCache(
const CompleteGroupResponse& resp) {
std::unique_ptr<GroupRec> gr(new GroupRec);
{
mutex_lock grl(gr->mu);
gr->group.device_type = DeviceType(resp.device_type());
gr->group.group_key = resp.group_key();
gr->group.group_size = resp.group_size();
gr->group.num_tasks = resp.num_tasks();
if (resp.device_attributes().empty()) {
return errors::Internal(
"CompleteGroupResponse device_attributes is empty. Make sure you're "
"running the same version of Tensorflow on all workers.");
}
if (resp.device_attributes_size() != gr->group.group_size) {
return errors::Internal(
"CompleteGroupResponse group_size doesn't match device_name list");
}
gr->group.members.reserve(resp.device_attributes().size());
for (const DeviceAttributes& device : resp.device_attributes()) {
CollGroupMember member;
member.device = device;
gr->group.members.push_back(std::move(member));
gr->incarnations_by_device_name[device.name()] = device.incarnation();
}
gr->group.runtime_details.communicator_key = resp.communicator_key();
FinishGroup(gr.get());
}
GroupRec* previous_gr = nullptr;
{
mutex_lock l(group_mu_);
auto it = group_table_.find(resp.group_key());
if (it == group_table_.end()) {
VLOG(2) << "UpdateGroupCache: communicator_key="
<< absl::CEscape(resp.communicator_key());
group_table_[gr->group.group_key] = std::move(gr);
} else {
previous_gr = it->second.get();
}
}
if (previous_gr != nullptr) {
mutex_lock grl(previous_gr->mu);
if (previous_gr->group.runtime_details.communicator_key !=
resp.communicator_key()) {
return errors::Internal(
"UpdateGroupCache: CompleteGroupResponse for group ",
resp.group_key(),
" gives communicator_key=", absl::CEscape(resp.communicator_key()),
" but cache already holds communicator_key=",
absl::CEscape(previous_gr->group.runtime_details.communicator_key));
}
}
return absl::OkStatus();
}
void CollectiveParamResolverDistributed::CompleteGroupDistributed(
const DeviceAttributes& device, CollGroupParams* group_params,
CancellationManager* cancel_mgr, const StatusCallback& done) {
VLOG(1) << "CompleteGroupDistributed group_key=" << group_params->group_key
<< " dev: " << device.name()
<< " is_leader=" << (group_leader_.empty());
if (group_leader_.empty()) {
return CompleteGroupLocal(device, group_params, cancel_mgr, done);
} else if (GetCachedGroup(group_params->group_key) == nullptr) {
CompleteGroupCall* call = new CompleteGroupCall(
*group_params, device, cancel_mgr, group_leader_, worker_cache_);
CancellationToken abortion_token =
abortion_cancel_mgr_.get_cancellation_token();
bool already_aborted = !abortion_cancel_mgr_.RegisterCallback(
abortion_token, [call] { call->Cancel(); });
if (already_aborted) {
done(errors::Cancelled("collective ops already aborted"));
delete call;
return;
}
call->Start([this, device, group_params, call, cancel_mgr, abortion_token,
done](const Status& s) {
abortion_cancel_mgr_.DeregisterCallback(abortion_token);
if (s.ok()) {
Status status = UpdateGroupCache(call->resp_);
if (status.ok()) {
CompleteGroupLocal(device, group_params, cancel_mgr, done);
} else {
done(status);
}
} else {
done(s);
}
delete call;
});
return;
} else {
return CompleteGroupLocal(device, group_params, cancel_mgr, done);
}
}
bool CollectiveParamResolverDistributed::InstanceIsCached(
int32_t group_key, const CollInstanceParams& instance) {
mutex_lock l(instance_mu_);
auto group_it = instance_table_.find(group_key);
if (group_it == instance_table_.end()) {
return false;
}
auto instance_it =
group_it->second.find({instance.step_id, instance.instance_key});
return instance_it != group_it->second.end();
}
Status CollectiveParamResolverDistributed::UpdateInstanceCache(
CollectiveParams* cp, const CompleteInstanceResponse& resp) {
int32_t source_rank = resp.source_rank();
bool created_irec;
InstanceRec* ir = GetOrCreateInstanceRec(cp, &created_irec);
mutex_lock l(ir->mu);
if (!ir->status.ok()) {
return ir->status;
}
if (ir->source_rank != source_rank) {
if (ir->source_rank >= 0) {
ir->status = errors::Internal(
"UpdateInstanceCache: CompleteInstanceResponse for instance ",
cp->instance.instance_key, " gives source_rank=", source_rank,
" but cache already holds value=", ir->source_rank);
return ir->status;
}
ir->source_rank = source_rank;
}
if (ir->known_count < cp->group.group_size) {
ir->known_count = cp->group.group_size;
const int ir_known_size = ir->known.size();
if (ir_known_size != cp->group.group_size) {
ir->status = errors::Internal(
"UpdateInstanceCache:: CompleteInstanceResponse for instance ",
cp->instance.instance_key, " has known.size()=", ir->known.size(),
" < group_size=", cp->group.group_size);
return ir->status;
}
for (int i = 0; i < ir_known_size; ++i) {
ir->known[i] = true;
}
}
return ir->status;
}
void CollectiveParamResolverDistributed::CompleteInstanceDistributed(
const string& device, CollectiveParams* cp, CancellationManager* cancel_mgr,
const StatusCallback& done) {
if (group_leader_.empty()) {
return CompleteInstanceLocal(device, cp, done);
} else if (InstanceIsCached(cp->group.group_key, cp->instance)) {
return CompleteInstanceLocal(device, cp, done);
} else {
CompleteInstanceCall* call = new CompleteInstanceCall(
cp->group, cp->instance, cp->name, device, cp->is_source, cancel_mgr,
group_leader_, worker_cache_);
CancellationToken abortion_token =
abortion_cancel_mgr_.get_cancellation_token();
bool already_aborted = !abortion_cancel_mgr_.RegisterCallback(
abortion_token, [call] { call->Cancel(); });
if (already_aborted) {
done(errors::Cancelled("collective ops already aborted"));
delete call;
return;
}
call->Start([this, device, cp, call, abortion_token, done](Status s) {
abortion_cancel_mgr_.DeregisterCallback(abortion_token);
if (s.ok()) {
s = UpdateInstanceCache(cp, call->resp_);
}
if (s.ok()) {
CompleteInstanceLocal(device, cp, done);
} else {
done(s);
}
delete call;
});
return;
}
}
void CollectiveParamResolverDistributed::StartAbort(const Status& s) {
{
mutex_lock l(status_mu_);
if (!status_.ok()) {
VLOG(2) << "CollectiveParamResolverDistributed already aborted. Ignoring "
"subsequent abortion with status: "
<< s;
return;
}
status_ = s;
}
StartAbortLocal(s);
abortion_cancel_mgr_.StartCancel();
}
} | #include "tensorflow/core/distributed_runtime/collective_param_resolver_distributed.h"
#include "absl/container/flat_hash_map.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/test_collective_executor_mgr.h"
#include "tensorflow/core/distributed_runtime/device_resolver_distributed.h"
#include "tensorflow/core/distributed_runtime/test_utils.h"
#include "tensorflow/core/distributed_runtime/worker.h"
#include "tensorflow/core/distributed_runtime/worker_env.h"
#include "tensorflow/core/framework/cancellation.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/random.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
namespace {
static std::unique_ptr<Device> NewDevice(const string& type,
const string& name) {
class FakeDevice : public Device {
public:
explicit FakeDevice(const DeviceAttributes& attr) : Device(nullptr, attr) {}
Status Sync() override { return absl::OkStatus(); }
Allocator* GetAllocator(AllocatorAttributes) override { return nullptr; }
};
DeviceAttributes attr;
attr.set_name(name);
attr.set_device_type(type);
attr.mutable_locality()->set_numa_node(3);
attr.set_incarnation(random::New64());
return std::make_unique<FakeDevice>(attr);
}
class FakeCache : public TestWorkerCache {
public:
bool GetDeviceLocalityNonBlocking(const string& device,
DeviceLocality* locality) override {
return false;
}
void GetDeviceLocalityAsync(const string& device, DeviceLocality* locality,
StatusCallback done) override {
string task_name;
string dev_part;
if (!DeviceNameUtils::SplitDeviceName(device, &task_name, &dev_part)) {
done(errors::Internal("failed to parse device name"));
return;
}
auto it = workers_.find(task_name);
if (it == workers_.end()) {
done(errors::Internal("failed to find worker ", task_name));
return;
}
WorkerInterface* wi = it->second;
GetStatusRequest req;
GetStatusResponse resp;
Status status = wi->GetStatus(&req, &resp);
if (!status.ok()) {
done(status);
return;
}
for (const auto& it : resp.device_attributes()) {
if (it.name() == device) {
*locality = it.locality();
done(absl::OkStatus());
return;
}
}
done(errors::Internal("device not found: ", device));
}
};
class FakeNcclCommunicator : public NcclCommunicatorInterface {
public:
string GenerateCommunicatorKey() override { return "mock-communicator-key"; }
void Enqueue(std::shared_ptr<CollectiveContext> col_ctx,
StatusCallback done) override {
done(absl::OkStatus());
}
void StartAbort(const Status& s) override {}
};
class DeviceResDistTest : public ::testing::Test {
public:
~DeviceResDistTest() override {
for (auto& name_param : cp_) {
name_param.second->Unref();
}
}
protected:
void DefineWorkers(int num_workers, int num_devices,
const string& device_type, bool nccl) {
for (int w = 0; w < num_workers; ++w) {
string name = strings::StrCat("/job:worker/replica:0/task:", w);
DefineWorker(name, device_type, num_devices, nccl);
}
}
void DefineWorker(const string& worker_name, const string& device_type,
int num_devices, bool nccl) {
ConfigProto config;
config.mutable_experimental()->set_collective_group_leader(
"/job:worker/replica:0/task:0");
config.mutable_experimental()->set_collective_nccl(nccl);
std::vector<std::unique_ptr<Device>> devices;
for (int i = 0; i < num_devices; ++i) {
devices.push_back(NewDevice(
device_type,
strings::StrCat(worker_name, "/device:", device_type, ":", i)));
}
device_mgrs_[worker_name] =
std::make_unique<StaticDeviceMgr>(std::move(devices));
std::vector<string>* dv = &dev_by_task_[worker_name];
dv->clear();
for (auto* d : device_mgrs_[worker_name]->ListDevices()) {
dv->push_back(d->name());
}
dev_resolvers_[worker_name] = std::make_unique<DeviceResolverDistributed>(
device_mgrs_[worker_name].get());
cp_resolvers_[worker_name] =
std::make_unique<CollectiveParamResolverDistributed>(
config, device_mgrs_[worker_name].get(),
dev_resolvers_[worker_name].get(), &nccl_communicator_, &wc_,
worker_name);
auto worker_env = std::make_unique<WorkerEnv>();
worker_env->env = Env::Default();
worker_env->device_mgr = device_mgrs_[worker_name].get();
worker_env->collective_executor_mgr =
std::make_unique<TestCollectiveExecutorMgr>(
cp_resolvers_[worker_name].get(), nullptr);
workers_[worker_name] = std::make_unique<Worker>(worker_env.get());
worker_envs_[worker_name] = std::move(worker_env);
wc_.AddWorker(worker_name, workers_[worker_name].get());
}
void DefineCollectiveParams(int num_workers, int num_devices,
const string& device_type,
CollectiveType coll_type = REDUCTION_COLLECTIVE,
int source_rank = 0) {
for (int wi = 0; wi < num_workers; ++wi) {
string task_name = strings::StrCat("/job:worker/replica:0/task:", wi);
for (int di = 0; di < num_devices; ++di) {
int idx = wi * num_devices + di;
string device_name =
strings::StrCat(task_name, "/device:", device_type, ":", di);
cp_[device_name] =
CreateCollectiveParams(num_workers, num_devices, device_type,
coll_type, idx == source_rank);
}
}
}
CollectiveParams* CreateCollectiveParams(int num_workers, int num_devices,
const string& device_type,
CollectiveType coll_type,
bool is_source) {
const int kGroupKey = 5;
const int kInstanceKey = 3;
auto* cp = new CollectiveParams();
cp->is_source = is_source;
cp->group.group_key = kGroupKey;
cp->group.group_size = num_workers * num_devices;
cp->group.device_type = DeviceType(device_type);
cp->group.num_tasks = num_workers;
cp->instance.instance_key = kInstanceKey;
cp->instance.type = coll_type;
cp->instance.data_type = DT_FLOAT;
cp->instance.shape = TensorShape({64});
cp->instance.impl_details.subdiv_offsets.push_back(0);
return cp;
}
void IssueRequests(int num_workers, int num_devices) {
{
mutex_lock l(mu_);
num_done_ = 0;
}
int group_size = num_workers * num_devices;
for (int wi = 0; wi < num_workers; ++wi) {
string task_name = strings::StrCat("/job:worker/replica:0/task:", wi);
for (int di = 0; di < num_devices; ++di) {
string device_name = strings::StrCat(task_name, "/device:CPU:", di);
IssueRequest(task_name, device_name, group_size);
}
}
}
void IssueRequest(const string& task_name, const string& device_name,
int group_size) {
Device* device = nullptr;
TF_CHECK_OK(device_mgrs_[task_name]->LookupDevice(device_name, &device));
CollectiveParams* cp = cp_[device_name];
CollectiveParamResolverDistributed* cp_res = cp_resolvers_[task_name].get();
CHECK(cp_res);
cp_res->CompleteParamsAsync(
device->attributes(), cp, &cm_,
[this, device_name, group_size](const Status& s) {
status_[device_name] = s;
{
mutex_lock l(mu_);
++num_done_;
if (num_done_ == group_size) {
done_.notify_all();
}
}
});
}
void ValidateCollectiveParams(int num_workers, int num_devices) {
int device_count = num_workers * num_devices;
{
mutex_lock l(mu_);
if (num_done_ < device_count) {
done_.wait(l);
}
}
const int dev_count = num_workers * num_devices;
string dev0 = "/job:worker/replica:0/task:0/device:CPU:0";
for (int wi = 0; wi < num_workers; ++wi) {
string task_name = strings::StrCat("/job:worker/replica:0/task:", wi);
for (int di = 0; di < num_devices; ++di) {
string device_name = strings::StrCat(task_name, "/device:CPU:", di);
int idx = wi * num_devices + di;
TF_ASSERT_OK(status_[device_name]);
EXPECT_EQ(cp_[device_name]->default_rank, idx);
EXPECT_EQ(cp_[device_name]->group.members.size(), dev_count);
EXPECT_EQ(cp_[device_name]->group.members[idx].device.name(),
device_name);
EXPECT_EQ(cp_[device_name]->group.members[idx].task, task_name);
ValidateDeviceResolver(*cp_[device_name], task_name);
if (idx > 0) {
EXPECT_EQ(cp_[dev0]->group.runtime_details.communicator_key,
cp_[device_name]->group.runtime_details.communicator_key);
for (int i = 0; i < dev_count; ++i) {
EXPECT_EQ(cp_[dev0]->group.members[i].device.name(),
cp_[device_name]->group.members[i].device.name());
EXPECT_EQ(cp_[dev0]->group.members[i].task,
cp_[device_name]->group.members[i].task);
}
}
}
}
}
void ValidateDeviceResolver(const CollectiveParams& cp, const string& task) {
for (const CollGroupMember& member : cp.group.members) {
DeviceAttributes attributes;
TF_ASSERT_OK(dev_resolvers_[task]->GetDeviceAttributes(
member.device.name(), &attributes));
}
}
void RestartWorker(int worker_idx, int num_workers, int num_devices,
const string& device_type, bool nccl,
CollectiveType coll_type = REDUCTION_COLLECTIVE,
bool is_source = false) {
string worker_name =
strings::StrCat("/job:worker/replica:0/task:", worker_idx);
DefineWorker(worker_name, device_type, num_devices, nccl);
for (int i = 0; i < num_devices; ++i) {
string device_name =
strings::StrCat(worker_name, "/device:", device_type, ":", i);
if (cp_.find(device_name) != cp_.end()) {
cp_[device_name]->Unref();
}
cp_[device_name] = CreateCollectiveParams(
num_workers, num_devices, device_type, coll_type, is_source);
status_.erase(device_name);
}
}
FakeCache wc_;
FakeNcclCommunicator nccl_communicator_;
CancellationManager cm_;
absl::flat_hash_map<string, std::unique_ptr<DeviceMgr>> device_mgrs_;
absl::flat_hash_map<string, std::unique_ptr<DeviceResolverDistributed>>
dev_resolvers_;
absl::flat_hash_map<string,
std::unique_ptr<CollectiveParamResolverDistributed>>
cp_resolvers_;
absl::flat_hash_map<string, std::vector<string>> dev_by_task_;
absl::flat_hash_map<string, std::unique_ptr<WorkerEnv>> worker_envs_;
absl::flat_hash_map<string, std::unique_ptr<Worker>> workers_;
absl::flat_hash_map<string, CollectiveParams*> cp_;
absl::flat_hash_map<string, Status> status_;
mutex mu_;
int num_done_ TF_GUARDED_BY(mu_);
condition_variable done_;
};
TEST_F(DeviceResDistTest, Workers1Devices1) {
const int num_workers = 1;
const int num_devices = 1;
DefineWorkers(num_workers, num_devices, "CPU", false);
DefineCollectiveParams(num_workers, num_devices, "CPU");
IssueRequests(num_workers, num_devices);
ValidateCollectiveParams(num_workers, num_devices);
}
TEST_F(DeviceResDistTest, Workers2Devices2) {
const int num_workers = 2;
const int num_devices = 2;
DefineWorkers(num_workers, num_devices, "CPU", false);
DefineCollectiveParams(num_workers, num_devices, "CPU");
IssueRequests(num_workers, num_devices);
ValidateCollectiveParams(num_workers, num_devices);
}
TEST_F(DeviceResDistTest, DifferentIncarnation) {
const int num_workers = 2;
const int num_devices = 1;
DefineWorkers(num_workers, num_devices, "CPU", false);
DefineCollectiveParams(num_workers, num_devices, "CPU");
IssueRequests(num_workers, num_devices);
RestartWorker(1, num_workers, num_devices, "CPU", false);
const string task_name = "/job:worker/replica:0/task:1";
const string device_name = absl::StrCat(task_name, "/device:CPU:0");
IssueRequest(task_name, device_name, num_workers * num_devices);
EXPECT_TRUE(errors::IsFailedPrecondition(status_[device_name]));
}
TEST_F(DeviceResDistTest, BroadcastSourceRank0) {
const int num_workers = 2;
const int num_devices = 2;
const int source_rank = 0;
DefineWorkers(num_workers, num_devices, "CPU", false);
DefineCollectiveParams(num_workers, num_devices, "CPU", BROADCAST_COLLECTIVE,
source_rank);
IssueRequests(num_workers, num_devices);
ValidateCollectiveParams(num_workers, num_devices);
}
TEST_F(DeviceResDistTest, BroadcastSourceRank3) {
const int num_workers = 2;
const int num_devices = 2;
const int source_rank = 3;
DefineWorkers(num_workers, num_devices, "CPU", false);
DefineCollectiveParams(num_workers, num_devices, "CPU", BROADCAST_COLLECTIVE,
source_rank);
IssueRequests(num_workers, num_devices);
ValidateCollectiveParams(num_workers, num_devices);
}
TEST_F(DeviceResDistTest, Workers4Devices3) {
const int num_workers = 4;
const int num_devices = 3;
DefineWorkers(num_workers, num_devices, "CPU", true);
DefineCollectiveParams(num_workers, num_devices, "CPU");
IssueRequests(num_workers, num_devices);
ValidateCollectiveParams(num_workers, num_devices);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/collective_param_resolver_distributed.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/collective_param_resolver_distributed_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e7f6450c-05e8-4958-bf2e-9669cd67d5e5 | cpp | tensorflow/tensorflow | simplify_fp_conversions | third_party/xla/xla/service/simplify_fp_conversions.cc | third_party/xla/xla/service/gpu/tests/simplify_fp_conversions_test.cc | #include "xla/service/simplify_fp_conversions.h"
#include <cstddef>
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/primitive_util.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
absl::StatusOr<bool> RunOnComputation(HloComputation& computation) {
bool changed = false;
for (HloInstruction* instruction : computation.MakeInstructionPostOrder()) {
HloInstruction* input = instruction;
size_t convert_chain_length = 0;
while (input->opcode() == HloOpcode::kConvert &&
primitive_util::IsFloatingPointType(input->shape().element_type())) {
input = input->mutable_operand(0);
++convert_chain_length;
}
if (convert_chain_length < 2) {
continue;
}
if (instruction->shape().element_type() == input->shape().element_type()) {
TF_RETURN_IF_ERROR(
instruction->parent()->ReplaceInstruction(instruction, input));
} else {
TF_RETURN_IF_ERROR(instruction->parent()->ReplaceWithNewInstruction(
instruction,
HloInstruction::CreateConvert(instruction->shape(), input)));
}
changed = true;
}
return changed;
}
}
absl::StatusOr<bool> SimplifyFPConversions::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
XLA_VLOG_LINES(
2, absl::StrFormat("SimplifyFPConversions::Run() with before:\n%s",
module->ToString()));
bool changed = false;
for (HloComputation* computation :
module->MakeComputationPostOrder(execution_threads)) {
TF_ASSIGN_OR_RETURN(bool comp_changed, RunOnComputation(*computation));
changed |= comp_changed;
}
XLA_VLOG_LINES(2,
absl::StrFormat("SimplifyFPConversions::Run() with after:\n%s",
module->ToString()));
return changed;
}
} | #include <string_view>
#include "xla/tests/hlo_test_base.h"
#include "xla/xla.pb.h"
namespace xla {
namespace gpu {
namespace {
class SimplifyFPConversionsTest : public HloTestBase {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest();
debug_options.set_xla_allow_excess_precision(
enable_simplify_all_fp_conversions_);
return debug_options;
}
bool SupportsMultiplyBF16() {
const auto& device_description =
backend().default_stream_executor()->GetDeviceDescription();
const auto& cc = device_description.gpu_compute_capability();
return std::holds_alternative<se::CudaComputeCapability>(cc) &&
std::get<se::CudaComputeCapability>(cc).IsAtLeastHopper();
}
void SetEnableSimplifyFpConversions(bool enable_simplify_all_fp_conversions) {
enable_simplify_all_fp_conversions_ = enable_simplify_all_fp_conversions;
}
static constexpr std::string_view kHloText = R"(
HloModule module
ENTRY main {
param0 = bf16[1536]{0} parameter(0)
param1 = bf16[4,1536]{1,0} parameter(1)
s = bf16[1536]{0} rsqrt(param0)
b = bf16[4,1536]{1,0} broadcast(s), dimensions={1}
ROOT d = bf16[4,1536]{1,0} multiply(b, param1)
}
)";
private:
bool enable_simplify_all_fp_conversions_ = false;
};
TEST_F(SimplifyFPConversionsTest, RedundantTypeConversionsGetCleanedUp) {
SetEnableSimplifyFpConversions(true);
if (SupportsMultiplyBF16()) {
MatchOptimizedHlo(kHloText, R"(
)");
} else {
MatchOptimizedHlo(kHloText, R"(
)");
}
}
TEST_F(SimplifyFPConversionsTest, RedundantTypeConversionsArePresentInTest) {
if (SupportsMultiplyBF16()) {
GTEST_SKIP() << "No double convert is expected on Hopper";
}
SetEnableSimplifyFpConversions(false);
MatchOptimizedHlo(kHloText, R"(
)");
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/simplify_fp_conversions.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/tests/simplify_fp_conversions_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f5f16c60-4ca6-4bc5-b57b-52af2ce349be | cpp | google/cel-cpp | opaque_type | common/types/opaque_type.cc | common/types/opaque_type_test.cc | #include <cstddef>
#include <cstring>
#include <string>
#include <type_traits>
#include "absl/base/nullability.h"
#include "absl/log/absl_check.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "absl/types/span.h"
#include "absl/utility/utility.h"
#include "common/type.h"
#include "google/protobuf/arena.h"
namespace cel {
namespace {
std::string OpaqueDebugString(absl::string_view name,
absl::Span<const Type> parameters) {
if (parameters.empty()) {
return std::string(name);
}
return absl::StrCat(
name, "<", absl::StrJoin(parameters, ", ", absl::StreamFormatter()), ">");
}
}
namespace common_internal {
absl::Nonnull<OpaqueTypeData*> OpaqueTypeData::Create(
absl::Nonnull<google::protobuf::Arena*> arena, absl::string_view name,
absl::Span<const Type> parameters) {
return ::new (arena->AllocateAligned(
offsetof(OpaqueTypeData, parameters) + (parameters.size() * sizeof(Type)),
alignof(OpaqueTypeData))) OpaqueTypeData(name, parameters);
}
OpaqueTypeData::OpaqueTypeData(absl::string_view name,
absl::Span<const Type> parameters)
: name(name), parameters_size(parameters.size()) {
std::memcpy(this->parameters, parameters.data(),
parameters_size * sizeof(Type));
}
}
OpaqueType::OpaqueType(absl::Nonnull<google::protobuf::Arena*> arena,
absl::string_view name,
absl::Span<const Type> parameters)
: OpaqueType(
common_internal::OpaqueTypeData::Create(arena, name, parameters)) {}
std::string OpaqueType::DebugString() const {
ABSL_DCHECK(*this);
return OpaqueDebugString(name(), GetParameters());
}
absl::string_view OpaqueType::name() const {
ABSL_DCHECK(*this);
return data_->name;
}
TypeParameters OpaqueType::GetParameters() const {
ABSL_DCHECK(*this);
return TypeParameters(
absl::MakeConstSpan(data_->parameters, data_->parameters_size));
}
bool OpaqueType::IsOptional() const {
return name() == OptionalType::kName && GetParameters().size() == 1;
}
absl::optional<OptionalType> OpaqueType::AsOptional() const {
if (IsOptional()) {
return OptionalType(absl::in_place, *this);
}
return absl::nullopt;
}
OptionalType OpaqueType::GetOptional() const {
ABSL_DCHECK(IsOptional()) << DebugString();
return OptionalType(absl::in_place, *this);
}
} | #include <sstream>
#include "absl/hash/hash.h"
#include "common/type.h"
#include "internal/testing.h"
#include "google/protobuf/arena.h"
namespace cel {
namespace {
TEST(OpaqueType, Kind) {
google::protobuf::Arena arena;
EXPECT_EQ(OpaqueType(&arena, "test.Opaque", {BytesType()}).kind(),
OpaqueType::kKind);
EXPECT_EQ(Type(OpaqueType(&arena, "test.Opaque", {BytesType()})).kind(),
OpaqueType::kKind);
}
TEST(OpaqueType, Name) {
google::protobuf::Arena arena;
EXPECT_EQ(OpaqueType(&arena, "test.Opaque", {BytesType()}).name(),
"test.Opaque");
EXPECT_EQ(Type(OpaqueType(&arena, "test.Opaque", {BytesType()})).name(),
"test.Opaque");
}
TEST(OpaqueType, DebugString) {
google::protobuf::Arena arena;
{
std::ostringstream out;
out << OpaqueType(&arena, "test.Opaque", {BytesType()});
EXPECT_EQ(out.str(), "test.Opaque<bytes>");
}
{
std::ostringstream out;
out << Type(OpaqueType(&arena, "test.Opaque", {BytesType()}));
EXPECT_EQ(out.str(), "test.Opaque<bytes>");
}
{
std::ostringstream out;
out << OpaqueType(&arena, "test.Opaque", {});
EXPECT_EQ(out.str(), "test.Opaque");
}
}
TEST(OpaqueType, Hash) {
google::protobuf::Arena arena;
EXPECT_EQ(absl::HashOf(OpaqueType(&arena, "test.Opaque", {BytesType()})),
absl::HashOf(OpaqueType(&arena, "test.Opaque", {BytesType()})));
}
TEST(OpaqueType, Equal) {
google::protobuf::Arena arena;
EXPECT_EQ(OpaqueType(&arena, "test.Opaque", {BytesType()}),
OpaqueType(&arena, "test.Opaque", {BytesType()}));
EXPECT_EQ(Type(OpaqueType(&arena, "test.Opaque", {BytesType()})),
OpaqueType(&arena, "test.Opaque", {BytesType()}));
EXPECT_EQ(OpaqueType(&arena, "test.Opaque", {BytesType()}),
Type(OpaqueType(&arena, "test.Opaque", {BytesType()})));
EXPECT_EQ(Type(OpaqueType(&arena, "test.Opaque", {BytesType()})),
Type(OpaqueType(&arena, "test.Opaque", {BytesType()})));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/types/opaque_type.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/types/opaque_type_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
54892a0d-a6bd-4074-b1d2-de0af3d15773 | cpp | tensorflow/tensorflow | hlo_sharding | third_party/xla/xla/hlo/ir/hlo_sharding.cc | third_party/xla/xla/service/hlo_sharding_test.cc | #include "xla/hlo/ir/hlo_sharding.h"
#include <algorithm>
#include <array>
#include <cstdint>
#include <iterator>
#include <map>
#include <memory>
#include <optional>
#include <ostream>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_op_metadata.h"
#include "xla/overflow_util.h"
#include "xla/printer.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/protobuf.h"
namespace xla {
namespace {
using absl::StrCat;
bool GroupMinorIotaDimsSorted(absl::Span<const int64_t> dims,
absl::Span<const int> perm, int64_t group_size,
absl::InlinedVector<int64_t, 6>& new_dims,
absl::InlinedVector<int, 6>& new_perm) {
DCHECK_GT(group_size, 1);
int grouped_dims = 0;
std::optional<std::pair<int, int64_t>> split_dim_and_size;
for (int i = perm.size() - 1; i >= 0; --i) {
const int dim = perm[i];
const int64_t dim_size = dims[dim];
if (dim_size <= group_size) {
if (group_size % dim_size != 0) {
return false;
}
group_size /= dim_size;
++grouped_dims;
} else {
if (dim_size % group_size != 0) {
return false;
}
split_dim_and_size.emplace(dim, dim_size / group_size);
++grouped_dims;
group_size = 1;
break;
}
}
if (!split_dim_and_size) {
new_dims.assign(dims.begin(), dims.end());
new_perm.assign(perm.begin(), perm.end());
std::stable_sort(new_perm.end() - grouped_dims, new_perm.end());
return true;
}
new_dims.resize(dims.size() + 1);
new_perm.resize(perm.size() + 1);
const int split_i = split_dim_and_size->first;
for (int i = 0; i < split_i; ++i) {
new_dims[i] = dims[i];
}
new_dims[split_i] = split_dim_and_size->second;
new_dims[split_i + 1] = dims[split_i] / split_dim_and_size->second;
for (int i = split_i + 2; i < new_perm.size(); ++i) {
new_dims[i] = dims[i - 1];
}
int perm_split = 0;
for (int i = 0; i < perm.size(); ++i) {
const int perm_dim = perm[i];
new_perm[i] = perm_dim <= split_i ? perm_dim : (perm_dim + 1);
if (perm_dim == split_i) {
perm_split = i;
break;
}
}
new_perm[perm_split + 1] = new_perm[perm_split] + 1;
for (int i = perm_split + 2; i < new_perm.size(); ++i) {
const int perm_dim = perm[i - 1];
new_perm[i] = perm_dim <= split_i ? perm_dim : (perm_dim + 1);
}
std::stable_sort(new_perm.end() - grouped_dims, new_perm.end());
return true;
}
}
HloSharding HloSharding::AssignDevice(int64_t device_id,
absl::Span<const OpMetadata> metadata) {
return HloSharding(device_id, metadata);
}
HloSharding HloSharding::Tile1D(const Shape& input_shape, int64_t num_tiles,
absl::Span<const OpMetadata> metadata) {
CHECK_EQ(1, input_shape.rank());
CHECK_GT(num_tiles, 1);
absl::Span<const int64_t> dimensions(&num_tiles, 1);
return HloSharding(TileAssignment(dimensions, dimensions, {0}),
false, metadata);
}
HloSharding HloSharding::PartialTile(
const TileAssignment& tile_assignment_last_dim_replicate,
absl::Span<const OpMetadata> metadata) {
if (tile_assignment_last_dim_replicate.num_dimensions() == 1 ||
tile_assignment_last_dim_replicate.dimensions().back() ==
tile_assignment_last_dim_replicate.num_elements()) {
return Replicate(metadata);
}
if (tile_assignment_last_dim_replicate.dimensions().back() == 1) {
auto new_tile_dims = tile_assignment_last_dim_replicate.dimensions();
new_tile_dims.remove_suffix(1);
return HloSharding(
tile_assignment_last_dim_replicate.Reshape(new_tile_dims),
false, metadata);
}
const int64_t group_size =
tile_assignment_last_dim_replicate.dimensions().back();
if (tile_assignment_last_dim_replicate.iota_) {
auto& iota = tile_assignment_last_dim_replicate.iota_.value();
if (iota.reshape_dims()[iota.transpose_perm().back()] == group_size) {
return HloSharding(tile_assignment_last_dim_replicate,
true, metadata);
}
absl::InlinedVector<int64_t, 6> new_reshape_dims;
absl::InlinedVector<int, 6> new_transpose_perm;
if (GroupMinorIotaDimsSorted(iota.reshape_dims(), iota.transpose_perm(),
group_size, new_reshape_dims,
new_transpose_perm)) {
return HloSharding(
TileAssignment(iota.dims(), new_reshape_dims, new_transpose_perm),
true, metadata);
}
}
std::vector<int64_t> sorted_groups(
tile_assignment_last_dim_replicate.num_elements());
const int64_t num_groups =
tile_assignment_last_dim_replicate.num_elements() / group_size;
std::vector<int32_t> current_group_idx(num_groups, 0);
auto get_group_id = [&](absl::Span<const int64_t> indices) {
int64_t group_id = 0;
for (int64_t i = 0; i < indices.size() - 1; ++i) {
group_id *= tile_assignment_last_dim_replicate.dim(i);
group_id += indices[i];
}
return group_id;
};
tile_assignment_last_dim_replicate.Each(
[&](absl::Span<const int64_t> indices, const int64_t device) {
const int64_t group_id = get_group_id(indices);
sorted_groups[group_id * group_size + current_group_idx[group_id]++] =
device;
});
for (int i = 0; i < num_groups; ++i) {
std::sort(sorted_groups.begin() + i * group_size,
sorted_groups.begin() + (i + 1) * group_size);
}
absl::c_fill(current_group_idx, 0);
auto sorted_tile = std::make_shared<Array<int64_t>>(
tile_assignment_last_dim_replicate.dimensions());
sorted_tile->Each([&](absl::Span<const int64_t> indices, int64_t* device) {
const int64_t group_id = get_group_id(indices);
*device =
sorted_groups[group_id * group_size + current_group_idx[group_id]++];
});
return HloSharding(TileAssignment(std::move(sorted_tile)),
true, metadata);
}
HloSharding HloSharding::Subgroup(
const TileAssignment& tile_assignment,
absl::Span<const OpSharding::Type> subgroup_types,
absl::Span<const OpMetadata> metadata) {
if (subgroup_types.empty()) {
return HloSharding(tile_assignment,
false, metadata);
}
if (absl::c_all_of(
subgroup_types,
[&](const OpSharding::Type t) { return t == subgroup_types[0]; }) &&
Product(tile_assignment.dimensions().subspan(
0, tile_assignment.num_dimensions() - subgroup_types.size())) == 1) {
if (subgroup_types[0] == OpSharding::MANUAL) {
return Manual(metadata);
}
if (subgroup_types[0] == OpSharding::REPLICATED) {
return Replicate(metadata);
}
}
int64_t data_dims = tile_assignment.num_dimensions() - subgroup_types.size();
absl::InlinedVector<int, 6> perm(data_dims);
absl::c_iota(perm, 0);
static_assert(sizeof(std::vector<int>) >=
sizeof(absl::InlinedVector<int, 2>));
std::array<absl::InlinedVector<int, 2>, OpSharding::Type_ARRAYSIZE>
type_to_dims;
int subgroup_count = 0;
bool needs_merging = false;
absl::InlinedVector<int, 4> removed_dims;
for (int i = 0; i < subgroup_types.size(); ++i) {
if (tile_assignment.dim(i + data_dims) == 1) {
removed_dims.push_back(i + data_dims);
needs_merging = true;
continue;
}
auto& dims = type_to_dims[subgroup_types[i]];
if (!dims.empty()) {
needs_merging = true;
} else {
++subgroup_count;
}
needs_merging |= !dims.empty();
dims.push_back(i + data_dims);
}
needs_merging |= subgroup_count > 1;
auto create_sharding = [](const TileAssignment& tiles,
absl::Span<const OpSharding::Type> types,
absl::Span<const OpMetadata> metadata) {
if (types.size() == 1 && types.back() == OpSharding::REPLICATED) {
return PartialTile(tiles, metadata);
}
if (types.size() == 1 && types.back() == OpSharding::MANUAL &&
tiles.num_elements() == tiles.dimensions().back()) {
return Manual(metadata);
}
if (!types.empty() && types.back() == OpSharding::REPLICATED) {
HloSharding sharding = PartialTile(tiles, metadata);
sharding.replicate_on_last_tile_dim_ = false;
for (const OpSharding::Type type : types) {
sharding.subgroup_types_.push_back(type);
}
return sharding;
}
return HloSharding(tiles, types, metadata);
};
if (needs_merging) {
auto data_tile_shape = tile_assignment.dimensions().subspan(0, data_dims);
absl::InlinedVector<int64_t, 6> merged_shape(data_tile_shape.begin(),
data_tile_shape.end());
absl::InlinedVector<int64_t, 6> transposed_shape = merged_shape;
std::vector<OpSharding::Type> merged_types;
static constexpr std::array<OpSharding::Type, OpSharding::Type_ARRAYSIZE>
kOrderedTypes = {OpSharding::MAXIMAL, OpSharding::TUPLE,
OpSharding::OTHER, OpSharding::MANUAL,
OpSharding::REPLICATED, OpSharding::UNKNOWN};
static_assert(kOrderedTypes[0] == 1 && kOrderedTypes[1] == 2 &&
kOrderedTypes[2] == 3 && kOrderedTypes[3] == 4 &&
kOrderedTypes[4] == 0 && kOrderedTypes[5] == 5);
for (OpSharding::Type type : kOrderedTypes) {
auto& dims = type_to_dims[type];
if (dims.empty()) continue;
int64_t dim_size = 1;
for (int64_t dim : dims) {
perm.push_back(dim);
dim_size *= tile_assignment.dim(dim);
transposed_shape.push_back(tile_assignment.dim(dim));
}
merged_shape.push_back(dim_size);
merged_types.push_back(type);
}
TileAssignment new_tile_assignment = [&] {
if (tile_assignment.iota_) {
absl::c_copy(removed_dims, std::back_inserter(perm));
auto transposed_iota = tile_assignment.iota_->Transpose(perm);
if (transposed_iota) {
return TileAssignment(merged_shape, transposed_iota->reshape_dims(),
transposed_iota->transpose_perm());
}
}
auto new_tiles = std::make_shared<Array<int64_t>>(transposed_shape);
new_tiles->Each([&](absl::Span<const int64_t> indices, int64_t* value) {
std::vector<int64_t> src_indices(tile_assignment.num_dimensions(), 0);
for (int64_t i = 0; i < indices.size(); ++i) {
src_indices[perm[i]] = indices[i];
}
*value = tile_assignment(src_indices);
});
new_tiles->Reshape(merged_shape);
return TileAssignment(std::move(new_tiles));
}();
return create_sharding(new_tile_assignment, merged_types, metadata);
}
return create_sharding(tile_assignment, subgroup_types, metadata);
}
HloSharding HloSharding::Tuple(const ShapeTree<HloSharding>& sub_shardings) {
std::vector<HloSharding> flattened_list;
flattened_list.reserve(sub_shardings.leaf_count());
for (const auto& index_to_sharding : sub_shardings.leaves()) {
flattened_list.push_back(index_to_sharding.second);
}
if (flattened_list.empty()) {
flattened_list.push_back(sub_shardings.element(ShapeIndex({})));
}
return HloSharding(flattened_list);
}
HloSharding HloSharding::Tuple(const Shape& tuple_shape,
absl::Span<const HloSharding> shardings) {
CHECK(tuple_shape.IsTuple()) << ShapeUtil::HumanString(tuple_shape);
for (auto& sharding : shardings) {
CHECK(!sharding.IsTuple())
<< sharding.ToString()
<< ", tuple shape = " << ShapeUtil::HumanString(tuple_shape);
}
std::vector<HloSharding> flattened_list(shardings.begin(), shardings.end());
if (!flattened_list.empty()) {
CHECK_EQ(flattened_list.size(), RequiredLeaves(tuple_shape))
<< "Flat list has " << flattened_list.size() << ", required "
<< RequiredLeaves(tuple_shape);
}
return HloSharding(std::move(flattened_list));
}
HloSharding HloSharding::SingleTuple(const Shape& tuple_shape,
const HloSharding& sharding) {
CHECK(tuple_shape.IsTuple()) << ShapeUtil::HumanString(tuple_shape);
CHECK(!sharding.IsTuple()) << sharding.ToString();
int64_t leaf_count = RequiredLeaves(tuple_shape);
std::vector<HloSharding> flattened_list;
flattened_list.resize(leaf_count, sharding);
return HloSharding(std::move(flattened_list));
}
HloSharding HloSharding::Single(const Shape& shape,
const HloSharding& sharding) {
return shape.IsTuple() ? SingleTuple(shape, sharding) : sharding;
}
void HloSharding::Print(Printer* printer, bool include_metadata) const {
if (IsTuple()) {
CHECK(metadata_.empty());
if (ABSL_PREDICT_FALSE(tuple_elements_.empty())) {
printer->Append("{}");
return;
}
printer->Append("{");
tuple_elements_[0].Print(printer, include_metadata);
for (int i = 1; i < tuple_elements_.size(); ++i) {
if (i % 5 == 0) {
AppendCat(printer, ", ");
} else {
printer->Append(", ");
}
tuple_elements_[i].Print(printer, include_metadata);
}
printer->Append("}");
return;
}
auto print_metadata = [&] {
if (include_metadata && !metadata_.empty()) {
printer->Append(" metadata={");
if (metadata_.size() == 1) {
printer->Append(OpMetadataToString(metadata_.front()));
} else {
AppendJoin(printer, metadata_, ", ",
[](Printer* printer, auto& metadata) {
AppendCat(printer, "{", OpMetadataToString(metadata), "}");
});
}
printer->Append("}");
}
};
auto print_shard_group = [&] {
auto shard_group_str = shard_group_.ToString();
if (!shard_group_str.empty()) {
printer->Append(" " + shard_group_str);
}
};
if (replicated_) {
printer->Append("{replicated");
print_shard_group();
print_metadata();
printer->Append("}");
return;
}
if (manual_) {
printer->Append("{manual");
print_shard_group();
print_metadata();
printer->Append("}");
return;
}
if (unknown_) {
printer->Append("{unknown");
print_shard_group();
print_metadata();
printer->Append("}");
return;
}
if (maximal_) {
AppendCat(printer, "{maximal device=",
static_cast<int64_t>(*tile_assignment_.array().begin()));
print_shard_group();
print_metadata();
printer->Append("}");
return;
}
auto print_last_tile_dims = [&] {
if (!subgroup_types_.empty()) {
auto op_sharding_type_to_string = [](OpSharding::Type type) {
switch (type) {
case OpSharding::MANUAL:
return "manual";
case OpSharding::MAXIMAL:
return "maximul";
case OpSharding::REPLICATED:
return "replicated";
default:
return "error_type.";
}
};
printer->Append(" last_tile_dims={");
AppendJoin(printer, subgroup_types_, ", ",
[&](Printer* printer, OpSharding::Type sharding_type) {
printer->Append(op_sharding_type_to_string(sharding_type));
});
printer->Append("}");
}
};
printer->Append("{");
tile_assignment_.Print(printer);
if (replicate_on_last_tile_dim_) {
printer->Append(" last_tile_dim_replicate");
}
print_last_tile_dims();
print_shard_group();
print_metadata();
printer->Append("}");
}
std::string HloSharding::ToString(bool include_metadata) const {
StringPrinter printer;
Print(&printer, include_metadata);
return std::move(printer).ToString();
}
bool HloSharding::UsesDevice(int64_t device) const {
if (IsTuple()) {
return absl::c_any_of(tuple_elements_, [&](const HloSharding& s) {
return s.UsesDevice(device);
});
}
return replicated_ || manual_ || tile_assignment_.UsesDevice(device);
}
std::map<int64_t, int64_t> HloSharding::UsedDevices(int64_t* count) const {
int64_t element_count = 1;
std::map<int64_t, int64_t> device_map;
if (IsTuple()) {
for (auto& tuple_element_sharding : tuple_elements()) {
auto unique_device = tuple_element_sharding.UniqueDevice();
if (unique_device) {
device_map[*unique_device] += 1;
}
}
element_count = tuple_elements().size();
} else {
auto unique_device = UniqueDevice();
if (unique_device) {
device_map[*unique_device] += 1;
}
}
if (count != nullptr) {
*count = element_count;
}
return device_map;
}
std::vector<int64_t> HloSharding::TileIndexForDevice(int64_t device) const {
CHECK(!maximal_);
CHECK(!IsManual());
CHECK(!IsUnknown());
CHECK(!IsTuple());
std::vector<int64_t> ret_index;
tile_assignment_.Each([&](absl::Span<const int64_t> index, int64_t d) {
if (d == device) {
ret_index = {index.begin(), index.end()};
}
});
CHECK(!ret_index.empty());
ret_index.resize(TiledDataRank());
return ret_index;
}
int64_t HloSharding::DeviceForTileIndex(absl::Span<const int64_t> index) const {
CHECK(!replicated_);
CHECK(!IsManual());
CHECK(!IsUnknown());
CHECK(!IsTuple());
if (maximal_) {
return *tile_assignment_.array().begin();
}
if (index.size() == TiledDataRank() &&
index.size() < tile_assignment_.num_dimensions()) {
std::vector<int64_t> first_subgroup_index(index.begin(), index.end());
for (int64_t i = 0; i < tile_assignment_.num_dimensions() - index.size();
++i) {
first_subgroup_index.push_back(0);
}
return tile_assignment_(first_subgroup_index);
}
return tile_assignment_(index);
}
std::vector<int64_t> HloSharding::TileOffsetForDevice(const Shape& shape,
int64_t device) const {
CHECK(!IsTuple());
CHECK(!IsManual());
CHECK(!IsUnknown());
if (maximal_) {
return std::vector<int64_t>(shape.dimensions_size(), 0);
}
CHECK_EQ(shape.dimensions_size(), TiledDataRank());
std::vector<int64_t> index = TileIndexForDevice(device);
for (int64_t i = 0; i < index.size(); ++i) {
const int64_t shape_dim = shape.dimensions(i);
index[i] = std::min(
index[i] * CeilOfRatio(shape_dim, tile_assignment_.dim(i)), shape_dim);
}
return index;
}
std::vector<int64_t> HloSharding::TileLimitForDevice(const Shape& shape,
int64_t device) const {
CHECK(!IsTuple());
CHECK(!IsManual());
CHECK(!IsUnknown());
if (maximal_) {
return std::vector<int64_t>(shape.dimensions().begin(),
shape.dimensions().end());
}
CHECK_EQ(shape.dimensions_size(), TiledDataRank());
std::vector<int64_t> index = TileIndexForDevice(device);
for (int64_t i = 0; i < index.size(); ++i) {
const int64_t shape_dim = shape.dimensions(i);
index[i] = std::min(
(index[i] + 1) * CeilOfRatio(shape_dim, tile_assignment_.dim(i)),
shape_dim);
}
return index;
}
int64_t HloSharding::RequiredLeaves(const Shape& shape) {
const int64_t leaf_count = ShapeUtil::GetLeafCount(shape);
return (leaf_count == 0) ? 1 : leaf_count;
}
absl::Status HloSharding::CheckLeafCount(const Shape& shape) const {
int64_t leaf_count = ShapeUtil::GetLeafCount(shape);
if (leaf_count == 0 && tuple_elements_.size() == 1) {
return absl::OkStatus();
}
TF_RET_CHECK(leaf_count == tuple_elements_.size())
<< "Shape " << ShapeUtil::HumanString(shape) << " has " << leaf_count
<< " leaf nodes while this sharding has " << tuple_elements_.size();
return absl::OkStatus();
}
absl::StatusOr<ShapeTree<HloSharding>> HloSharding::AsShapeTree(
const Shape& shape) const {
if (IsTuple()) {
ShapeTree<HloSharding> result(shape, HloSharding::Replicate());
TF_RETURN_IF_ERROR(CheckLeafCount(shape));
auto it = tuple_elements_.begin();
for (auto& index_to_sharding : result.leaves()) {
index_to_sharding.second = *it++;
}
return std::move(result);
} else {
return ShapeTree<HloSharding>(shape, *this);
}
}
absl::StatusOr<HloSharding> HloSharding::GetTupleSharding(
const Shape& shape) const {
if (IsTuple()) {
TF_RETURN_IF_ERROR(CheckLeafCount(shape));
return *this;
}
return SingleTuple(shape, *this);
}
HloSharding HloSharding::NormalizeTupleSharding(const Shape& shape) const {
if (shape.IsTuple() && !IsTuple()) {
return HloSharding::SingleTuple(shape, *this);
}
return *this;
}
std::optional<int64_t> HloSharding::UniqueDevice() const {
if (IsTuple()) {
if (tuple_elements_.empty()) {
return std::nullopt;
}
std::optional<int64_t> unique_device;
for (auto& tuple_sharding : tuple_elements_) {
auto device = tuple_sharding.UniqueDevice();
if (!device || (unique_device && *device != *unique_device)) {
return std::nullopt;
}
unique_device = device;
}
return unique_device;
}
if (!replicated_ && maximal_) {
return static_cast<int64_t>(*tile_assignment_.array().begin());
}
return std::nullopt;
}
int64_t HloSharding::GetUniqueDevice() const {
auto device = UniqueDevice();
CHECK(device) << "Sharding does not have a unique device: " << *this;
return *device;
}
absl::Status HloSharding::ValidateTuple(
const Shape& shape, std::optional<int64_t> num_devices) const {
if (!shape.IsTuple()) {
return tsl::errors::InvalidArgument(
"Sharding is tuple-shaped but validation shape is not.");
}
TF_RETURN_IF_ERROR(CheckLeafCount(shape));
if (ShapeUtil::GetLeafCount(shape) == 0 && tuple_elements_.empty()) {
return absl::OkStatus();
}
ShapeTree<HloSharding> shape_tree = GetAsShapeTree(shape);
for (const auto& index_to_sharding : shape_tree.leaves()) {
absl::Status status = index_to_sharding.second.ValidateNonTuple(
ShapeUtil::GetSubshape(shape, index_to_sharding.first), num_devices);
if (!status.ok()) {
tsl::errors::AppendToMessage(
&status, StrCat("Note: While validating sharding tuple element ",
index_to_sharding.first.ToString(), " which is ",
index_to_sharding.second.ToString()));
return status;
}
}
return absl::OkStatus();
}
absl::Status HloSharding::Validate(const Shape& shape,
std::optional<int64_t> num_devices) const {
if (shape.IsToken()) {
return absl::OkStatus();
}
absl::Status status = IsTuple() ? ValidateTuple(shape, num_devices)
: ValidateNonTuple(shape, num_devices);
if (!status.ok()) {
tsl::errors::AppendToMessage(
&status, StrCat("Note: While validating sharding ", ToString(),
" against shape ", ShapeUtil::HumanString(shape)));
}
return status;
}
absl::Status HloSharding::ValidateNonTuple(
const Shape& shape, std::optional<int64_t> num_devices) const {
if (shape.IsTuple()) {
return absl::InvalidArgumentError(
"Validation shape is a tuple but sharding is not.");
}
if (replicated_) {
return absl::OkStatus();
}
bool all_devices_seen;
if (!tile_assignment_.iota_) {
absl::flat_hash_set<int64_t> seen_devices;
absl::Status status = tile_assignment_.array().EachStatus(
[&num_devices, &seen_devices](absl::Span<const int64_t> indices,
int32_t device) {
if (num_devices.has_value() && device >= *num_devices) {
return absl::InvalidArgumentError(
absl::StrCat("device ", device, " > num_devices (",
*num_devices, ") in tile assignment"));
} else if (seen_devices.contains(device)) {
return absl::InvalidArgumentError(absl::StrCat(
"device ", device, " is not unique in tile assignment"));
}
seen_devices.insert(device);
return absl::OkStatus();
});
TF_RETURN_IF_ERROR(status);
all_devices_seen =
!num_devices.has_value() || seen_devices.size() == *num_devices;
} else {
all_devices_seen = !num_devices.has_value() ||
tile_assignment_.iota_->num_elements() == *num_devices;
}
if (IsTileMaximal() || IsManual() || IsUnknown()) {
return absl::OkStatus();
}
if (shape.rank() != TiledDataRank()) {
return tsl::errors::InvalidArgument(
"Number of tile assignment dimensions (excluding subgroups) is "
"different than the input rank. "
"sharding=",
ToString(), ", input_shape=", ShapeUtil::HumanString(shape));
}
if (!all_devices_seen) {
return tsl::errors::InvalidArgument("tile_assignment should have ",
*num_devices, " devices");
}
if (tile_assignment_.num_elements() == 1) {
return tsl::errors::InvalidArgument(
"Tile assignment only contains a single device. If a replicated "
"sharding was intended, use HloSharding::Replicated(). If a device "
"placement was intended, use HloSharding::AssignDevice()");
}
return absl::OkStatus();
}
absl::StatusOr<HloSharding> HloSharding::FromProto(
const OpSharding& proto) {
std::vector<OpMetadata> metadata(proto.metadata().begin(),
proto.metadata().end());
std::vector<int> subgroup_types_int(proto.last_tile_dims().begin(),
proto.last_tile_dims().end());
std::vector<OpSharding::Type> subgroup_types;
absl::c_transform(
subgroup_types_int, std::back_inserter(subgroup_types),
[](const int type) { return static_cast<OpSharding::Type>(type); });
if (proto.type() == OpSharding::TUPLE) {
TF_RET_CHECK(metadata.empty())
<< "Tuple sharding is expected to have no metadata.";
std::vector<HloSharding> tuple_shardings;
tuple_shardings.reserve(proto.tuple_shardings().size());
for (const OpSharding& tuple_sharding_proto : proto.tuple_shardings()) {
TF_ASSIGN_OR_RETURN(HloSharding sharding,
HloSharding::FromProto(tuple_sharding_proto));
tuple_shardings.push_back(std::move(sharding));
}
return std::move(
HloSharding(std::move(tuple_shardings)).SetShardGroupFromProto(proto));
} else if (proto.type() == OpSharding::REPLICATED) {
return std::move(Replicate(metadata).SetShardGroupFromProto(proto));
} else if (proto.type() == OpSharding::MANUAL) {
return std::move(Manual(metadata).SetShardGroupFromProto(proto));
} else if (proto.type() == OpSharding::UNKNOWN) {
return std::move(Unknown(metadata).SetShardGroupFromProto(proto));
} else if (proto.tile_assignment_devices().size() == 1) {
return std::move(HloSharding(proto.tile_assignment_devices(0), metadata)
.SetShardGroupFromProto(proto));
} else if (!proto.iota_reshape_dims().empty() &&
absl::c_all_of(proto.iota_reshape_dims(),
[](int64_t d) { return d == 1; })) {
return std::move(HloSharding(0, metadata).SetShardGroupFromProto(proto));
}
TF_RET_CHECK(proto.type() != OpSharding::MAXIMAL)
<< "Maximal sharding is expected to have single device assignment, but "
<< proto.tile_assignment_devices().size() << " has provided.";
const bool use_iota_tile_assignments = !proto.iota_reshape_dims().empty();
if (use_iota_tile_assignments) {
TF_RET_CHECK(proto.tile_assignment_devices().empty());
TF_RET_CHECK(proto.iota_reshape_dims().size() ==
proto.iota_transpose_perm().size());
} else {
TF_RET_CHECK(proto.tile_assignment_devices().size() > 1)
<< proto.ShortDebugString();
}
TF_RET_CHECK(!proto.tile_assignment_dimensions().empty());
auto product_no_overflow =
[](absl::Span<const int64_t> dims) -> absl::StatusOr<int64_t> {
int64_t product_of_dimensions = 1;
bool any_overflow = false;
for (auto dimension : dims) {
bool overflow = false;
std::tie(product_of_dimensions, overflow) =
OverflowSafeMultiply(product_of_dimensions, dimension);
}
TF_RET_CHECK(!any_overflow);
return product_of_dimensions;
};
TF_ASSIGN_OR_RETURN(int64_t product_of_dimensions,
product_no_overflow(proto.tile_assignment_dimensions()));
if (use_iota_tile_assignments) {
TF_ASSIGN_OR_RETURN(int64_t product_of_iota_dimensions,
product_no_overflow(proto.iota_reshape_dims()));
TF_RET_CHECK(product_of_dimensions == product_of_iota_dimensions);
} else {
TF_RET_CHECK(product_of_dimensions ==
proto.tile_assignment_devices().size());
}
auto create_tile_assignment = [&] {
if (use_iota_tile_assignments) {
return TileAssignment(proto.tile_assignment_dimensions(),
proto.iota_reshape_dims(),
proto.iota_transpose_perm());
}
auto tiles =
std::make_shared<Array<int64_t>>(proto.tile_assignment_dimensions());
absl::c_copy(proto.tile_assignment_devices(), tiles->begin());
return TileAssignment(std::move(tiles));
};
if (!subgroup_types.empty()) {
TF_RET_CHECK(!proto.replicate_on_last_tile_dim());
return std::move(
Subgroup(create_tile_assignment(), subgroup_types, metadata)
.SetShardGroupFromProto(proto));
}
if (proto.replicate_on_last_tile_dim()) {
return std::move(PartialTile(create_tile_assignment(), metadata)
.SetShardGroupFromProto(proto));
}
return std::move(HloSharding(create_tile_assignment(),
false, metadata)
.SetShardGroupFromProto(proto));
}
OpSharding HloSharding::ToProto() const {
OpSharding result;
if (IsTuple()) {
CHECK(metadata_.empty());
for (const HloSharding& element : tuple_elements_) {
*result.add_tuple_shardings() = element.ToProto();
}
result.set_type(OpSharding::TUPLE);
return result;
}
result.mutable_metadata()->Reserve(metadata_.size());
for (const auto& metadata : metadata_) {
*result.add_metadata() = metadata;
}
result.mutable_tile_assignment_dimensions()->Reserve(
tile_assignment_.num_dimensions());
absl::c_copy(tile_assignment_.dimensions(),
tsl::protobuf::RepeatedFieldBackInserter(
result.mutable_tile_assignment_dimensions()));
if (tile_assignment_.iota_) {
result.mutable_iota_reshape_dims()->Reserve(
tile_assignment_.iota_->reshape_dims().size());
absl::c_copy(tile_assignment_.iota_->reshape_dims(),
tsl::protobuf::RepeatedFieldBackInserter(
result.mutable_iota_reshape_dims()));
result.mutable_iota_transpose_perm()->Reserve(
tile_assignment_.iota_->transpose_perm().size());
absl::c_copy(tile_assignment_.iota_->transpose_perm(),
tsl::protobuf::RepeatedFieldBackInserter(
result.mutable_iota_transpose_perm()));
} else {
result.mutable_tile_assignment_devices()->Reserve(
tile_assignment_.num_elements());
absl::c_copy(tile_assignment_.array(),
tsl::protobuf::RepeatedFieldBackInserter(
result.mutable_tile_assignment_devices()));
}
if (IsReplicated()) {
result.set_type(OpSharding::REPLICATED);
result.clear_tile_assignment_dimensions();
} else if (IsTileMaximal()) {
result.set_type(OpSharding::MAXIMAL);
} else if (IsManual()) {
result.set_type(OpSharding::MANUAL);
result.clear_tile_assignment_dimensions();
} else if (IsUnknown()) {
result.set_type(OpSharding::UNKNOWN);
result.clear_tile_assignment_dimensions();
} else {
result.set_type(OpSharding::OTHER);
result.set_replicate_on_last_tile_dim(ReplicateOnLastTileDim());
for (auto type : subgroup_types_) {
result.add_last_tile_dims(type);
}
}
if (IsShardGroup()) {
result.set_is_shard_group(true);
result.set_shard_group_id(shard_group_.shard_group_id);
if (shard_group_.shard_as) {
result.set_shard_group_type(OpSharding::AS);
} else {
result.set_shard_group_type(OpSharding::LIKE);
}
}
return result;
}
Shape HloSharding::TileShape(const Shape& shape) const {
if (IsTileMaximal() || IsManual() || IsUnknown()) {
return shape;
}
Shape result_shape = shape;
for (int64_t i = 0; i < TiledDataRank(); ++i) {
result_shape.set_dimensions(
i, CeilOfRatio<int64_t>(shape.dimensions(i), tile_assignment_.dim(i)));
}
return result_shape;
}
Shape HloSharding::TileShape(const Shape& shape, int64_t device) const {
if (IsTileMaximal() || IsManual() || IsUnknown()) {
return shape;
}
std::vector<int64_t> index = TileIndexForDevice(device);
Shape result_shape = shape;
for (int64_t i = 0; i < index.size(); ++i) {
const int64_t shape_dim = shape.dimensions(i);
int64_t offset = std::min(
index[i] * CeilOfRatio(shape_dim, tile_assignment_.dim(i)), shape_dim);
int64_t limit = std::min(
(index[i] + 1) * CeilOfRatio(shape_dim, tile_assignment_.dim(i)),
shape_dim);
result_shape.set_dimensions(i, limit - offset);
}
return result_shape;
}
int64_t HloSharding::TotalNumTiles() const {
if (IsTileMaximal()) {
return 1;
}
CHECK(!IsManual());
CHECK(!IsUnknown());
return Product(absl::Span<const int64_t>(tile_assignment_.dimensions()));
}
int64_t HloSharding::NumTiles() const {
if (IsTileMaximal()) {
return 1;
}
CHECK(!IsManual());
CHECK(!IsUnknown());
return Product(absl::Span<const int64_t>(tile_assignment_.dimensions())
.subspan(0, TiledDataRank()));
}
int64_t HloSharding::NumTilesLeaf() const {
DCHECK(!IsTuple());
if (IsTileMaximalLeaf()) {
return 1;
}
CHECK(!IsManualLeaf() && !IsUnknownLeaf());
return Product(absl::Span<const int64_t>(tile_assignment_.dimensions())
.subspan(0, TiledDataRankLeaf()));
}
int64_t HloSharding::NumTiles(absl::Span<const int64_t> dims) const {
if (IsTileMaximal()) {
return 1;
}
CHECK(!IsManual());
CHECK(!ReplicateOnLastTileDim() ||
!absl::c_linear_search(dims, tile_assignment().num_dimensions() - 1));
int64_t num_tiles = 1;
for (auto d : dims) {
CHECK(d < tile_assignment().num_dimensions());
num_tiles *= tile_assignment().dim(d);
}
return num_tiles;
}
HloSharding HloSharding::GetSubSharding(const Shape& shape,
const ShapeIndex& index) const {
CHECK(IsTuple());
int64_t sharding_index = 0;
const Shape* sub_shape = &shape;
for (int64_t idx : index) {
for (int64_t i = 0; i < idx; ++i) {
sharding_index += ShapeUtil::GetLeafCount(
ShapeUtil::GetSubshapeOneIndex(*sub_shape, i));
}
sub_shape = &ShapeUtil::GetSubshapeOneIndex(*sub_shape, idx);
}
if (sub_shape->IsTuple()) {
auto begin_it = tuple_elements_.begin() + sharding_index;
return HloSharding::Tuple(
*sub_shape,
absl::MakeConstSpan(
&*begin_it,
&*(begin_it + ShapeUtil::GetLeafCountTuple(*sub_shape))));
} else {
return tuple_elements_[sharding_index];
}
}
std::optional<HloSharding> HloSharding::ExtractSingleSharding() const {
if (!IsTuple()) {
return *this;
}
if (tuple_elements_.empty()) {
return std::nullopt;
}
for (int64_t i = 1; i < tuple_elements_.size(); ++i) {
if (tuple_elements_[0] != tuple_elements_[i]) {
return std::nullopt;
}
}
return tuple_elements_.front();
}
HloSharding HloSharding::WithMetadata(absl::Span<const OpMetadata> metadata,
bool overwrite) const {
auto assign_metadata = [&](HloSharding& sharding) {
if (sharding.metadata_.empty() || overwrite) {
sharding.metadata_.assign(metadata.begin(), metadata.end());
}
};
HloSharding sharding = *this;
if (sharding.IsTuple()) {
for (HloSharding& sub_sharding : sharding.tuple_elements()) {
assign_metadata(sub_sharding);
}
} else {
assign_metadata(sharding);
}
return sharding;
}
HloSharding HloSharding::WithoutMetadata() const {
HloSharding sharding = *this;
sharding.metadata_.clear();
for (HloSharding& sub_sharding : sharding.tuple_elements()) {
sub_sharding.metadata_.clear();
}
return sharding;
}
std::ostream& operator<<(std::ostream& out, const HloSharding& sharding) {
out << sharding.ToString();
return out;
}
} | #include <algorithm>
#include <set>
#include <sstream>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/hash/hash.h"
#include "xla/protobuf_util.h"
#include "xla/service/hlo_parser.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace {
Array<int64_t> MakeArray(absl::Span<const int64_t> dimensions,
absl::Span<const int64_t> contents) {
Array<int64_t> a(dimensions);
std::copy(contents.begin(), contents.end(), a.begin());
return a;
}
OpMetadata GetMetadata(const std::string& op_name) {
OpMetadata metadata;
metadata.set_op_name(op_name);
return metadata;
}
std::vector<OpMetadata> SingleMetadata() { return {GetMetadata("a")}; }
std::vector<OpMetadata> ListMetadata() {
return {GetMetadata("b"), GetMetadata("c")};
}
class HloShardingTest : public HloTestBase {};
TEST_F(HloShardingTest, Replicate) {
HloSharding sharding = HloSharding::Replicate();
EXPECT_TRUE(sharding.IsReplicated());
EXPECT_TRUE(sharding.IsTileMaximal());
EXPECT_TRUE(sharding.UsesDevice(0));
EXPECT_TRUE(sharding.UsesDevice(65535));
HloSharding other = HloSharding::Replicate();
EXPECT_EQ(other, sharding);
EXPECT_IS_OK(sharding.Validate(ShapeUtil::MakeShape(U32, {4}),
2));
EXPECT_FALSE(sharding.HasUniqueDevice());
}
TEST_F(HloShardingTest, DevicePlacement) {
HloSharding sharding = HloSharding::AssignDevice(5);
EXPECT_FALSE(sharding.IsReplicated());
EXPECT_TRUE(sharding.IsTileMaximal());
EXPECT_FALSE(sharding.UsesDevice(0));
EXPECT_TRUE(sharding.UsesDevice(5));
EXPECT_EQ(5, sharding.GetUniqueDevice());
HloSharding other = HloSharding::Replicate();
EXPECT_NE(other, sharding);
EXPECT_IS_OK(sharding.Validate(ShapeUtil::MakeShape(U32, {4}),
6));
EXPECT_IS_NOT_OK(
sharding.Validate(ShapeUtil::MakeShape(U32, {4}), 5));
ShapeTree<HloSharding> shape_tree =
sharding.GetAsShapeTree(ShapeUtil::MakeShape(U32, {4}));
EXPECT_EQ(shape_tree.element({}), sharding);
EXPECT_TRUE(shape_tree.IsLeaf({}));
}
TEST_F(HloShardingTest, ProtoRoundTrip) {
OpSharding proto;
proto.set_type(OpSharding::TUPLE);
auto* tiled = proto.add_tuple_shardings();
tiled->set_type(OpSharding::OTHER);
tiled->add_tile_assignment_devices(0);
tiled->add_tile_assignment_devices(1);
tiled->add_tile_assignment_dimensions(1);
tiled->add_tile_assignment_dimensions(2);
*tiled->add_metadata() = GetMetadata("a");
*tiled->add_metadata() = GetMetadata("b");
auto* replicated = proto.add_tuple_shardings();
replicated->set_type(OpSharding::REPLICATED);
*replicated->add_metadata() = GetMetadata("c");
auto* manual = proto.add_tuple_shardings();
manual->set_type(OpSharding::MANUAL);
HloSharding sharding = HloSharding::FromProto(proto).value();
EXPECT_TRUE(protobuf_util::ProtobufEquals(proto, sharding.ToProto()));
}
TEST_F(HloShardingTest, IotaProtoRoundTrip) {
OpSharding proto;
proto.set_type(OpSharding::TUPLE);
auto* tiled = proto.add_tuple_shardings();
tiled->set_type(OpSharding::OTHER);
tiled->add_tile_assignment_dimensions(6);
tiled->add_tile_assignment_dimensions(1);
tiled->add_iota_reshape_dims(3);
tiled->add_iota_reshape_dims(2);
tiled->add_iota_transpose_perm(1);
tiled->add_iota_transpose_perm(0);
*tiled->add_metadata() = GetMetadata("a");
*tiled->add_metadata() = GetMetadata("b");
auto* replicated = proto.add_tuple_shardings();
replicated->set_type(OpSharding::REPLICATED);
*replicated->add_metadata() = GetMetadata("c");
auto* manual = proto.add_tuple_shardings();
manual->set_type(OpSharding::MANUAL);
HloSharding sharding = HloSharding::FromProto(proto).value();
EXPECT_TRUE(protobuf_util::ProtobufEquals(proto, sharding.ToProto()));
}
TEST_F(HloShardingTest, Tile) {
{
HloSharding sharding = HloSharding::Tile(MakeArray({2, 2}, {0, 0, 2, 3}));
EXPECT_IS_NOT_OK(sharding.Validate(ShapeUtil::MakeShape(F32, {4, 6}),
4));
}
{
HloSharding sharding = HloSharding::Tile(MakeArray({2, 2}, {0, 1, 2, 3}));
EXPECT_IS_NOT_OK(sharding.Validate(ShapeUtil::MakeShape(U32, {4, 6}),
2));
}
{
HloSharding sharding = HloSharding::Tile(MakeArray({2, 2}, {0, 1, 2, 3}));
EXPECT_IS_NOT_OK(sharding.Validate(ShapeUtil::MakeShape(U32, {4, 6}),
5));
}
{
Shape shape = ShapeUtil::MakeShape(U32, {4, 5});
HloSharding sharding = HloSharding::Tile(MakeArray({2, 2}, {0, 3, 2, 1}));
EXPECT_IS_OK(sharding.Validate(ShapeUtil::MakeShape(F32, {3, 5}),
4));
EXPECT_EQ(0, sharding.DeviceForTileIndex({0, 0}));
EXPECT_EQ(3, sharding.DeviceForTileIndex({0, 1}));
EXPECT_EQ(2, sharding.DeviceForTileIndex({1, 0}));
EXPECT_EQ(1, sharding.DeviceForTileIndex({1, 1}));
EXPECT_EQ(sharding.TileOffsetForDevice(shape, 0),
(std::vector<int64_t>{0, 0}));
EXPECT_EQ(sharding.TileOffsetForDevice(shape, 3),
(std::vector<int64_t>{0, 3}));
EXPECT_EQ(sharding.TileOffsetForDevice(shape, 2),
(std::vector<int64_t>{2, 0}));
EXPECT_EQ(sharding.TileOffsetForDevice(shape, 1),
(std::vector<int64_t>{2, 3}));
EXPECT_FALSE(sharding.HasUniqueDevice());
}
}
TEST_F(HloShardingTest, V1V2TileEquivalence) {
{
HloSharding v1 = HloSharding::Tile(MakeArray({2, 2}, {0, 1, 2, 3}));
HloSharding v2 = HloSharding::IotaTile({2, 2});
EXPECT_EQ(v1, v2);
EXPECT_EQ(absl::HashOf(v1), absl::HashOf(v2));
}
{
HloSharding v1 = HloSharding::Tile(MakeArray({2, 2}, {0, 2, 1, 3}));
HloSharding v2 = HloSharding::IotaTile({2, 2}, {2, 2}, {1, 0});
EXPECT_EQ(v1, v2);
EXPECT_EQ(absl::HashOf(v1), absl::HashOf(v2));
}
{
HloSharding v1 =
HloSharding::Tile(MakeArray({2, 2, 2}, {0, 2, 4, 6, 1, 3, 5, 7}));
HloSharding v2 = HloSharding::IotaTile({2, 2, 2}, {2, 2, 2}, {2, 0, 1});
EXPECT_EQ(v1, v2);
EXPECT_EQ(absl::HashOf(v1), absl::HashOf(v2));
}
}
TEST_F(HloShardingTest, V1V2PartialTileEquivalence) {
{
HloSharding v1 = HloSharding::PartialTile(MakeArray({2, 2}, {0, 1, 2, 3}));
HloSharding v2 = HloSharding::PartialTile(TileAssignment({2, 2}));
EXPECT_EQ(v1, v2);
EXPECT_EQ(absl::HashOf(v1), absl::HashOf(v2));
}
{
HloSharding v1 = HloSharding::PartialTile(MakeArray({2, 2}, {0, 2, 1, 3}));
HloSharding v2 =
HloSharding::PartialTile(TileAssignment({2, 2}, {2, 2}, {1, 0}));
EXPECT_EQ(v1, v2);
EXPECT_EQ(absl::HashOf(v1), absl::HashOf(v2));
}
{
HloSharding v1 = HloSharding::PartialTile(
MakeArray({2, 2, 2}, {0, 2, 4, 6, 1, 3, 5, 7}));
HloSharding v2 = HloSharding::PartialTile(
TileAssignment({2, 2, 2}, {2, 2, 2}, {2, 0, 1}));
EXPECT_EQ(v1, v2);
EXPECT_EQ(absl::HashOf(v1), absl::HashOf(v2));
}
}
TEST_F(HloShardingTest, V1V2SubgroupEquivalence) {
{
HloSharding v1 =
HloSharding::Subgroup(MakeArray({2, 2}, {0, 1, 2, 3}),
{OpSharding::MANUAL, OpSharding::REPLICATED});
HloSharding v2 = HloSharding::Subgroup(
TileAssignment({2, 2}), {OpSharding::MANUAL, OpSharding::REPLICATED});
EXPECT_EQ(v1, v2);
EXPECT_EQ(absl::HashOf(v1), absl::HashOf(v2));
}
{
HloSharding v1 =
HloSharding::Subgroup(MakeArray({2, 2}, {0, 2, 1, 3}),
{OpSharding::MANUAL, OpSharding::REPLICATED});
HloSharding v2 =
HloSharding::Subgroup(TileAssignment({2, 2}, {2, 2}, {1, 0}),
{OpSharding::MANUAL, OpSharding::REPLICATED});
EXPECT_EQ(v1, v2);
EXPECT_EQ(absl::HashOf(v1), absl::HashOf(v2));
}
{
HloSharding v1 =
HloSharding::Subgroup(MakeArray({2, 2, 2}, {0, 2, 4, 6, 1, 3, 5, 7}),
{OpSharding::MANUAL, OpSharding::REPLICATED});
HloSharding v2 =
HloSharding::Subgroup(TileAssignment({2, 2, 2}, {2, 2, 2}, {2, 0, 1}),
{OpSharding::MANUAL, OpSharding::REPLICATED});
EXPECT_EQ(v1, v2);
EXPECT_EQ(absl::HashOf(v1), absl::HashOf(v2));
}
}
TEST_F(HloShardingTest, EmptySingleTuple) {
HloSharding sharding = HloSharding::SingleTuple(ShapeUtil::MakeTupleShape({}),
HloSharding::AssignDevice(0));
EXPECT_TRUE(sharding.ExtractSingleSharding());
}
TEST_F(HloShardingTest, EmptySingleTupleIsNotShardGroup) {
HloSharding sharding = HloSharding::SingleTuple(ShapeUtil::MakeTupleShape({}),
HloSharding::AssignDevice(0));
EXPECT_FALSE(sharding.IsShardGroup());
EXPECT_FALSE(sharding.IsShardAs());
EXPECT_FALSE(sharding.IsShardLike());
}
TEST_F(HloShardingTest, NestedTuple) {
Shape nested_tuple_shape = ShapeUtil::MakeTupleShape({
ShapeUtil::MakeShape(F32, {}),
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {3})}),
ShapeUtil::MakeShape(F32, {4, 6}),
});
HloSharding tiled_sharding = HloSharding::Tile(Array<int64_t>({{0, 1}}));
OpSharding proto;
proto.set_type(OpSharding::TUPLE);
*proto.add_tuple_shardings() = HloSharding::Replicate().ToProto();
*proto.add_tuple_shardings() = HloSharding::AssignDevice(0).ToProto();
*proto.add_tuple_shardings() = tiled_sharding.ToProto();
HloSharding tuple_sharding = HloSharding::FromProto(proto).value();
ShapeTree<HloSharding> shape_tree =
tuple_sharding.GetAsShapeTree(nested_tuple_shape);
EXPECT_EQ(shape_tree.element({0}), HloSharding::Replicate());
EXPECT_EQ(shape_tree.element({1, 0}), HloSharding::AssignDevice(0));
EXPECT_EQ(shape_tree.element({2}), tiled_sharding);
EXPECT_IS_OK(tuple_sharding.Validate(nested_tuple_shape, 2));
EXPECT_IS_NOT_OK(tuple_sharding.Validate(ShapeUtil::MakeTupleShape({}),
5));
EXPECT_IS_NOT_OK(tuple_sharding.Validate(ShapeUtil::MakeShape(F32, {}),
5));
}
TEST_F(HloShardingTest, NormalizeTrivialSubgroupToManual) {
HloSharding sharding =
HloSharding::Subgroup(MakeArray({1, 2, 1}, {0, 1}),
{OpSharding::MANUAL, OpSharding::REPLICATED});
EXPECT_TRUE(sharding.IsManual());
}
TEST_F(HloShardingTest, Hash) {
auto hash_compare_equal = [](const HloSharding& a, const HloSharding& b) {
if (absl::HashOf(a) != absl::HashOf(b)) {
return false;
}
return a == b;
};
{
HloSharding sharding1 = HloSharding::Replicate();
HloSharding sharding2 = HloSharding::Replicate();
EXPECT_TRUE(hash_compare_equal(sharding1, sharding2));
}
{
HloSharding sharding1 = HloSharding::AssignDevice(1);
HloSharding sharding2 = HloSharding::AssignDevice(1);
EXPECT_TRUE(hash_compare_equal(sharding1, sharding2));
}
{
HloSharding sharding1 = HloSharding::AssignDevice(1);
HloSharding sharding2 = HloSharding::AssignDevice(2);
EXPECT_FALSE(hash_compare_equal(sharding1, sharding2));
}
{
HloSharding sharding1 = HloSharding::Tile(MakeArray({2, 2}, {0, 3, 2, 1}));
HloSharding sharding2 = HloSharding::Tile(MakeArray({2, 2}, {0, 3, 2, 1}));
EXPECT_TRUE(hash_compare_equal(sharding1, sharding2));
}
{
HloSharding sharding1 = HloSharding::IotaTile({3, 4});
HloSharding sharding2 = HloSharding::Tile(
MakeArray({3, 4}, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}));
EXPECT_TRUE(hash_compare_equal(sharding1, sharding2));
}
HloSharding default_sharding = HloSharding::Replicate();
{
ShapeTree<HloSharding> shape_tree(ShapeUtil::MakeTupleShape({}),
default_sharding);
HloSharding sharding1 = HloSharding::Replicate();
HloSharding sharding2 = HloSharding::Tuple(shape_tree);
EXPECT_FALSE(hash_compare_equal(sharding1, sharding2));
}
{
ShapeTree<HloSharding> shape_tree(ShapeUtil::MakeTupleShape({}),
default_sharding);
HloSharding sharding1 = HloSharding::Tuple(shape_tree);
HloSharding sharding2 = HloSharding::Tuple(shape_tree);
EXPECT_TRUE(hash_compare_equal(sharding1, sharding2));
}
{
ShapeTree<HloSharding> shape_tree1(
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {4})}),
default_sharding);
*shape_tree1.mutable_element({0}) = HloSharding::Replicate();
ShapeTree<HloSharding> shape_tree2(
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {4})}),
default_sharding);
*shape_tree2.mutable_element({0}) = HloSharding::AssignDevice(0);
HloSharding sharding1 = HloSharding::Tuple(shape_tree1);
HloSharding sharding2 = HloSharding::Tuple(shape_tree2);
EXPECT_FALSE(hash_compare_equal(sharding1, sharding2));
}
{
ShapeTree<HloSharding> shape_tree1(
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {4})}),
default_sharding);
*shape_tree1.mutable_element({0}) = HloSharding::AssignDevice(0);
ShapeTree<HloSharding> shape_tree2(
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {4})}),
default_sharding);
*shape_tree2.mutable_element({0}) = HloSharding::AssignDevice(0);
HloSharding sharding1 = HloSharding::Tuple(shape_tree1);
HloSharding sharding2 = HloSharding::Tuple(shape_tree2);
EXPECT_TRUE(hash_compare_equal(sharding1, sharding2));
}
}
using ShardingWithMetadataParamType =
std::tuple<std::vector<OpMetadata>, std::string>;
TEST_F(HloShardingTest, ToStringReplicatedTest) {
HloSharding sharding = HloSharding::Replicate();
EXPECT_EQ(sharding.ToString(), "{replicated}");
}
class HloReplicateShardingWithMetadataTest
: public ::testing::TestWithParam<ShardingWithMetadataParamType> {};
TEST_P(HloReplicateShardingWithMetadataTest, ToStringTest) {
HloSharding sharding = HloSharding::Replicate(std::get<0>(GetParam()));
EXPECT_EQ(sharding.ToString(false), "{replicated}");
EXPECT_EQ(sharding.ToString(true),
std::get<1>(GetParam()));
}
INSTANTIATE_TEST_SUITE_P(
ToString, HloReplicateShardingWithMetadataTest,
::testing::Values(
std::make_tuple(std::vector<OpMetadata>(), "{replicated}"),
std::make_tuple(SingleMetadata(),
"{replicated metadata={op_name=\"a\"}}"),
std::make_tuple(
ListMetadata(),
"{replicated metadata={{op_name=\"b\"}, {op_name=\"c\"}}}")));
TEST_F(HloShardingTest, ToStringAssignDeviceTest) {
HloSharding sharding = HloSharding::AssignDevice(7);
EXPECT_EQ(sharding.ToString(), "{maximal device=7}");
}
class HloAssignDeviceShardingWithMetadataTest
: public ::testing::TestWithParam<ShardingWithMetadataParamType> {};
TEST_P(HloAssignDeviceShardingWithMetadataTest, ToStringTest) {
HloSharding sharding = HloSharding::AssignDevice(7, std::get<0>(GetParam()));
EXPECT_EQ(sharding.ToString(false),
"{maximal device=7}");
EXPECT_EQ(sharding.ToString(true),
std::get<1>(GetParam()));
}
INSTANTIATE_TEST_SUITE_P(
ToString, HloAssignDeviceShardingWithMetadataTest,
::testing::Values(
std::make_tuple(std::vector<OpMetadata>(), "{maximal device=7}"),
std::make_tuple(SingleMetadata(),
"{maximal device=7 metadata={op_name=\"a\"}}"),
std::make_tuple(
ListMetadata(),
"{maximal device=7 metadata={{op_name=\"b\"}, {op_name=\"c\"}}}")));
TEST_F(HloShardingTest, ToStringTiledTest) {
HloSharding sharding =
HloSharding::Tile(Array3D<int64_t>({{{2, 3}}, {{5, 7}}}));
EXPECT_EQ(sharding.ToString(), "{devices=[2,1,2]2,3,5,7}");
}
TEST_F(HloShardingTest, ToStringIotaTiledTest) {
HloSharding sharding = HloSharding::IotaTile({3, 4}, {2, 2, 3}, {2, 1, 0});
EXPECT_EQ(sharding.ToString(), "{devices=[3,4]<=[2,2,3]T(2,1,0)}");
}
class HloTiledShardingWithMetadataTest
: public ::testing::TestWithParam<ShardingWithMetadataParamType> {};
TEST_P(HloTiledShardingWithMetadataTest, ToStringTest) {
HloSharding sharding = HloSharding::Tile(
Array3D<int64_t>({{{2, 3}}, {{5, 7}}}), std::get<0>(GetParam()));
EXPECT_EQ(sharding.ToString(false),
"{devices=[2,1,2]2,3,5,7}");
EXPECT_EQ(sharding.ToString(true),
std::get<1>(GetParam()));
}
INSTANTIATE_TEST_SUITE_P(
ToString, HloTiledShardingWithMetadataTest,
::testing::Values(
std::make_tuple(std::vector<OpMetadata>(), "{devices=[2,1,2]2,3,5,7}"),
std::make_tuple(SingleMetadata(),
"{devices=[2,1,2]2,3,5,7 metadata={op_name=\"a\"}}"),
std::make_tuple(ListMetadata(),
"{devices=[2,1,2]2,3,5,7 metadata={{op_name=\"b\"}, "
"{op_name=\"c\"}}}")));
TEST_F(HloShardingTest, ToStringTupleTest) {
HloSharding sharding = HloSharding::Tuple(
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {3, 5}),
ShapeUtil::MakeShape(U32, {7, 25}),
ShapeUtil::MakeShape(S32, {9, 11})}),
{HloSharding::Replicate(), HloSharding::Tile(Array2D<int64_t>({{3, 5}})),
HloSharding::AssignDevice(3)});
EXPECT_EQ(sharding.ToString(),
"{{replicated}, {devices=[1,2]3,5}, {maximal device=3}}");
}
TEST_F(HloShardingTest, ToStringTupleWithMetadataTest) {
auto metadata = SingleMetadata();
HloSharding sharding = HloSharding::Tuple(
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {3, 5}),
ShapeUtil::MakeShape(U32, {7, 25}),
ShapeUtil::MakeShape(S32, {9, 11})}),
{HloSharding::Replicate({GetMetadata("d")}),
HloSharding::Tile(Array2D<int64_t>({{3, 5}})),
HloSharding::AssignDevice(3, {GetMetadata("e")})});
EXPECT_EQ(sharding.ToString(false),
"{{replicated}, {devices=[1,2]3,5}, {maximal device=3}}");
EXPECT_EQ(sharding.ToString(true),
"{{replicated metadata={op_name=\"d\"}}, {devices=[1,2]3,5}, "
"{maximal device=3 metadata={op_name=\"e\"}}}");
}
TEST_F(HloShardingTest, OstreamTest) {
HloSharding sharding =
HloSharding::Tile(Array4D<int64_t>({{{{0, 1}, {2, 3}}}}));
std::ostringstream oss;
oss << sharding;
EXPECT_EQ(oss.str(), "{devices=[1,1,2,2]0,1,2,3}");
}
class HloParseShardingWithMetadataTest
: public ::testing::TestWithParam<std::vector<OpMetadata>> {};
TEST_P(HloParseShardingWithMetadataTest, ParseHloString) {
auto check = [](const HloSharding& sharding) {
TF_ASSERT_OK_AND_ASSIGN(
auto parsed_sharding,
ParseSharding(sharding.ToString(true)));
EXPECT_EQ(sharding, parsed_sharding);
};
check(HloSharding::Replicate(GetParam()));
check(HloSharding::AssignDevice(2, GetParam()));
check(HloSharding::Tile(Array4D<int64_t>({{{{0}, {1}}}}), GetParam()));
check(HloSharding::Tuple(ShapeUtil::MakeTupleShape({}),
{HloSharding::Replicate(GetParam())}));
{
auto tuple_shape =
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {3, 1, 5, 7}),
ShapeUtil::MakeShape(F32, {3, 5, 7}),
ShapeUtil::MakeShape(F32, {3, 7})});
check(HloSharding::Tuple(
tuple_shape,
{HloSharding::Tile(Array4D<int64_t>({{{{0}, {1}}}})),
HloSharding::Replicate(GetParam()), HloSharding::AssignDevice(1)}));
}
{
auto tuple_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {3, 1, 5, 7}),
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {3, 5, 7}),
ShapeUtil::MakeShape(F32, {3, 7})})});
std::vector<HloSharding> leaf_shardings = {
HloSharding::Tile(Array4D<int64_t>({{{{0}, {1}}}})),
HloSharding::Replicate(), HloSharding::AssignDevice(1, GetParam())};
ShapeTree<HloSharding> sharding_tree(tuple_shape, HloSharding::Replicate());
auto it = leaf_shardings.begin();
for (auto& index_to_sharding : sharding_tree.leaves()) {
index_to_sharding.second = *it++;
}
check(HloSharding::Tuple(sharding_tree));
}
}
INSTANTIATE_TEST_SUITE_P(ParseHloString, HloParseShardingWithMetadataTest,
::testing::Values(std::vector<OpMetadata>(),
SingleMetadata(), ListMetadata()));
TEST_F(HloShardingTest, WithMetadataNoOverwrite) {
{
HloSharding sharding = HloSharding::Replicate();
auto sharding_new_metadata =
sharding.WithMetadata(SingleMetadata(), false);
ASSERT_EQ(sharding_new_metadata.metadata().size(), 1);
EXPECT_TRUE(protobuf_util::ProtobufEquals(
sharding_new_metadata.metadata().front(), SingleMetadata().front()));
}
{
HloSharding sharding = HloSharding::AssignDevice(7, SingleMetadata());
auto sharding_new_metadata =
sharding.WithMetadata(ListMetadata(), false);
ASSERT_EQ(sharding_new_metadata.metadata().size(), 1);
EXPECT_TRUE(protobuf_util::ProtobufEquals(
sharding.metadata().front(), sharding_new_metadata.metadata().front()));
}
{
HloSharding sharding = HloSharding::Tuple(
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {3, 5}),
ShapeUtil::MakeShape(U32, {7, 25}),
ShapeUtil::MakeShape(S32, {9, 11})}),
{HloSharding::Replicate(SingleMetadata()),
HloSharding::Tile(Array2D<int64_t>({{3, 5}})),
HloSharding::AssignDevice(3, SingleMetadata())});
auto sharding_new_metadata =
sharding.WithMetadata(ListMetadata(), false);
EXPECT_TRUE(sharding_new_metadata.metadata().empty());
ASSERT_TRUE(sharding_new_metadata.IsTuple());
ASSERT_EQ(sharding_new_metadata.tuple_elements().size(), 3);
ASSERT_EQ(sharding_new_metadata.tuple_elements()[0].metadata().size(), 1);
EXPECT_TRUE(protobuf_util::ProtobufEquals(
sharding_new_metadata.tuple_elements()[0].metadata().front(),
SingleMetadata().front()));
ASSERT_EQ(sharding_new_metadata.tuple_elements()[1].metadata().size(), 2);
for (int i = 0; i < 2; ++i) {
EXPECT_TRUE(protobuf_util::ProtobufEquals(
sharding_new_metadata.tuple_elements()[1].metadata()[i],
ListMetadata()[i]));
}
ASSERT_EQ(sharding_new_metadata.tuple_elements()[2].metadata().size(), 1);
EXPECT_TRUE(protobuf_util::ProtobufEquals(
sharding_new_metadata.tuple_elements()[2].metadata().front(),
SingleMetadata().front()));
}
}
TEST_F(HloShardingTest, WithMetadataOverwrite) {
{
HloSharding sharding = HloSharding::Replicate();
auto sharding_new_metadata =
sharding.WithMetadata(SingleMetadata(), true);
ASSERT_EQ(sharding_new_metadata.metadata().size(), 1);
EXPECT_TRUE(protobuf_util::ProtobufEquals(
sharding_new_metadata.metadata().front(), SingleMetadata().front()));
}
{
HloSharding sharding = HloSharding::AssignDevice(7, SingleMetadata());
auto sharding_new_metadata =
sharding.WithMetadata(ListMetadata(), true);
ASSERT_EQ(sharding_new_metadata.metadata().size(), 2);
for (int i = 0; i < 2; ++i) {
EXPECT_TRUE(protobuf_util::ProtobufEquals(
sharding_new_metadata.metadata()[i], ListMetadata()[i]));
}
}
{
HloSharding sharding = HloSharding::Tuple(
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {3, 5}),
ShapeUtil::MakeShape(U32, {7, 25}),
ShapeUtil::MakeShape(S32, {9, 11})}),
{HloSharding::Replicate(SingleMetadata()),
HloSharding::Tile(Array2D<int64_t>({{3, 5}})),
HloSharding::AssignDevice(3, SingleMetadata())});
auto sharding_new_metadata =
sharding.WithMetadata(ListMetadata(), true);
EXPECT_TRUE(sharding_new_metadata.metadata().empty());
ASSERT_TRUE(sharding_new_metadata.IsTuple());
ASSERT_EQ(sharding_new_metadata.tuple_elements().size(), 3);
for (const auto& sub_sharding : sharding_new_metadata.tuple_elements()) {
ASSERT_EQ(sub_sharding.metadata().size(), 2);
for (int i = 0; i < 2; ++i) {
EXPECT_TRUE(protobuf_util::ProtobufEquals(sub_sharding.metadata()[i],
ListMetadata()[i]));
}
}
}
}
TEST_F(HloShardingTest, WithoutMetadata) {
{
HloSharding sharding = HloSharding::Replicate();
auto sharding_no_metadata = sharding.WithoutMetadata();
EXPECT_TRUE(sharding_no_metadata.metadata().empty());
}
{
HloSharding sharding = HloSharding::AssignDevice(7, SingleMetadata());
auto sharding_no_metadata = sharding.WithoutMetadata();
EXPECT_TRUE(sharding_no_metadata.metadata().empty());
}
{
HloSharding sharding = HloSharding::Tuple(
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {3, 5}),
ShapeUtil::MakeShape(U32, {7, 25}),
ShapeUtil::MakeShape(S32, {9, 11})}),
{HloSharding::Replicate(SingleMetadata()),
HloSharding::Tile(Array2D<int64_t>({{3, 5}})),
HloSharding::AssignDevice(3, ListMetadata())});
auto sharding_no_metadata = sharding.WithoutMetadata();
EXPECT_TRUE(sharding_no_metadata.metadata().empty());
ASSERT_TRUE(sharding_no_metadata.IsTuple());
EXPECT_EQ(sharding_no_metadata.tuple_elements().size(), 3);
for (const auto& sub_sharding : sharding_no_metadata.tuple_elements()) {
EXPECT_TRUE(sub_sharding.metadata().empty());
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/ir/hlo_sharding.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_sharding_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
79f4067c-cdba-4150-810e-3c3b509d548f | cpp | tensorflow/tensorflow | mlir_to_hlo | third_party/xla/xla/pjrt/mlir_to_hlo.cc | third_party/xla/xla/pjrt/mlir_to_hlo_test.cc | #include "xla/pjrt/mlir_to_hlo.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "llvm/ADT/STLFunctionalExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TypeSwitch.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/Bytecode/BytecodeWriter.h"
#include "mlir/Conversion/ReconcileUnrealizedCasts/ReconcileUnrealizedCasts.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Func/Extensions/AllExtensions.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/MLProgram/IR/MLProgram.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Diagnostics.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/IR/Visitors.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Transforms/Passes.h"
#include "shardy/dialect/sdy/ir/register.h"
#include "stablehlo/dialect/ChloOps.h"
#include "stablehlo/dialect/Register.h"
#include "stablehlo/dialect/Serialization.h"
#include "stablehlo/dialect/StablehloOps.h"
#include "stablehlo/dialect/Version.h"
#include "stablehlo/transforms/Passes.h"
#include "xla/debug_options_flags.h"
#include "xla/hlo/translate/mhlo_to_hlo/mlir_hlo_to_hlo.h"
#include "xla/mlir/utils/error_util.h"
#include "xla/mlir_hlo/mhlo/IR/register.h"
#include "xla/mlir_hlo/mhlo/transforms/passes.h"
#include "xla/service/spmd/shardy/constants.h"
#include "xla/service/spmd/shardy/utils.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
namespace xla {
absl::Status MlirToXlaComputation(mlir::ModuleOp module,
XlaComputation& xla_computation,
bool use_tuple_args, bool return_tuple,
bool use_shardy) {
mlir::MLIRContext* context = module->getContext();
mlir::BaseScopedDiagnosticHandler diagnostic_handler(context);
{
mlir::PassManager pm(context);
pm.addPass(mlir::mhlo::createStablehloLegalizeToHloPass());
pm.addNestedPass<mlir::func::FuncOp>(
mlir::mhlo::createChloLegalizeToHloPass());
pm.addNestedPass<mlir::func::FuncOp>(mlir::createCanonicalizerPass());
pm.addNestedPass<mlir::func::FuncOp>(
mlir::mhlo::createSinkConstantsToControlFlowPass());
if (failed(pm.run(module))) {
VLOG(1) << "MHLO->HLO lowering passes failed.";
module->dump();
return diagnostic_handler.ConsumeStatus();
}
VLOG(5) << "MHLO module after lowering, before HLO import ";
if (VLOG_IS_ON(5)) {
module->dump();
}
}
if (use_tuple_args && use_shardy) {
sdy::addFrontendAttribute(module, sdy::kUseTupleArgs,
mlir::StringAttr::get(context, "t"));
use_tuple_args = false;
}
mlir::MlirToHloConversionOptions options;
options.use_tuple_args = use_tuple_args;
options.return_tuple = return_tuple;
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloModule> hlo_module,
mlir::ConvertMlirHloToHloModule(module, options));
xla_computation = XlaComputation(hlo_module->ToProto());
return absl::OkStatus();
}
absl::StatusOr<mlir::OwningOpRef<mlir::ModuleOp>> ParseMlirModuleString(
absl::string_view mlir_module_str, mlir::MLIRContext& context) {
mlir::DialectRegistry registry;
registry.insert<mlir::arith::ArithDialect>();
registry.insert<mlir::func::FuncDialect>();
registry.insert<mlir::ml_program::MLProgramDialect>();
registry.insert<mlir::shape::ShapeDialect>();
mlir::func::registerAllExtensions(registry);
mlir::mhlo::registerAllMhloDialects(registry);
mlir::sdy::registerAllDialects(registry);
mlir::stablehlo::registerAllDialects(registry);
context.appendDialectRegistry(registry);
mlir::BaseScopedDiagnosticHandler diagnostic_handler(&context);
mlir::OwningOpRef<mlir::ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(
llvm::StringRef(mlir_module_str.data(), mlir_module_str.size()),
mlir::ParserConfig{&context});
if (!module) {
mlir::emitError(mlir::UnknownLoc::get(&context))
<< "Failed to parse using StableHLO v"
<< mlir::vhlo::Version::getCurrentVersion() << ", "
<< "this could indicate forward incompatibility, >12w old "
"unsupported plugin, or a portable artifact that needs to be "
"further downgraded.";
return diagnostic_handler.ConsumeStatus();
}
TF_RETURN_IF_ERROR(UpgradeVersionedStablehlo(*module));
return std::move(module);
}
absl::Status ParseMlirModuleStringAndConvertToXlaComputation(
absl::string_view mlir_module_str, XlaComputation& xla_computation,
bool use_tuple_args, bool return_tuple) {
mlir::MLIRContext context;
TF_ASSIGN_OR_RETURN(mlir::OwningOpRef<mlir::ModuleOp> module,
xla::ParseMlirModuleString(mlir_module_str, context));
return xla::MlirToXlaComputation(*module, xla_computation, use_tuple_args,
return_tuple, false);
}
absl::StatusOr<std::string> SerializeUsingNativeBytecode(
mlir::ModuleOp module) {
std::string bytecode;
llvm::raw_string_ostream os(bytecode);
mlir::BytecodeWriterConfig config;
config.setDesiredBytecodeVersion(1);
mlir::OwningOpRef<mlir::ModuleOp> cloned = module.clone();
if (mlir::failed(mlir::writeBytecodeToFile(*cloned, os, config))) {
return absl::InvalidArgumentError("mlir::writeBytecodeToFile failed");
}
return bytecode;
}
absl::StatusOr<std::string> SerializeUsingVersionedStablehlo(
mlir::ModuleOp mlir_module, absl::string_view target, bool inplace) {
mlir::MLIRContext* context = mlir_module->getContext();
mlir::BaseScopedDiagnosticHandler diagnostic_handler(context);
mlir::PassManager pm(context);
pm.addNestedPass<mlir::func::FuncOp>(
mlir::mhlo::createChloLegalizeToHighLevelMhloPass());
pm.addNestedPass<mlir::func::FuncOp>(
mlir::stablehlo::createChloLegalizeToStablehloPass());
pm.addNestedPass<mlir::func::FuncOp>(
mlir::stablehlo::createStablehloCompatibilityExpanderPass(
{std::string(target)}));
pm.addNestedPass<mlir::func::FuncOp>(
mlir::stablehlo::createChloLegalizeToStablehloPass());
pm.addNestedPass<mlir::func::FuncOp>(
mlir::stablehlo::createShapeLegalizeToStablehloPass());
pm.addPass(mlir::createReconcileUnrealizedCastsPass());
pm.addPass(mlir::mhlo::createHloLegalizeToStablehloPass());
if (!mlir::succeeded(pm.run(mlir_module))) {
const absl::Status status = diagnostic_handler.ConsumeStatus();
return absl::InvalidArgumentError(
absl::StrCat("CHLO => [MHLO+Shape] => StableHLO failed;\n\nDetailed "
"error from MLIR: ",
status.message()));
}
mlir::OwningOpRef<mlir::ModuleOp> cloned;
if (!inplace) {
cloned = mlir_module.clone();
mlir_module = *cloned;
}
std::string buffer;
llvm::raw_string_ostream os(buffer);
if (failed(mlir::stablehlo::serializePortableArtifact(mlir_module, target,
os))) {
const absl::Status status = diagnostic_handler.ConsumeStatus();
return absl::InvalidArgumentError(absl::StrCat(
"Failed to serialize StableHLO;\n\nDetailed error from MLIR: ",
status.message()));
}
return buffer;
}
absl::Status UpgradeVersionedStablehlo(mlir::ModuleOp mlir_module) {
mlir::PassManager pm(mlir_module->getContext());
mlir::stablehlo::createStablehloDeserializePipeline(pm);
if (!mlir::succeeded(pm.run(mlir_module)))
return xla::InvalidArgument("Failed to upgrade versioned StableHLO.");
return absl::OkStatus();
}
std::string GetDefaultStablehloVersion(std::optional<int64_t> plugin_version) {
if (plugin_version.has_value() && plugin_version.value() < 54) {
return "0.19.0";
}
return mlir::vhlo::Version::fromCompatibilityRequirement(
mlir::vhlo::Version::CompatibilityRequirement::WEEK_12)
.toString();
}
absl::StatusOr<std::string> Serialize(mlir::ModuleOp module,
absl::string_view target, bool inplace) {
bool all_stablehlo = true;
module->walk([&](mlir::Operation* op) {
if (!llvm::isa<mlir::ModuleOp>(op) &&
!llvm::isa<mlir::stablehlo::StablehloDialect, mlir::func::FuncDialect,
mlir::chlo::ChloDialect>(op->getDialect())) {
all_stablehlo = false;
return mlir::WalkResult::interrupt();
}
return mlir::WalkResult::advance();
});
if (!all_stablehlo) {
return SerializeUsingNativeBytecode(module);
}
return SerializeUsingVersionedStablehlo(module, target, inplace);
}
} | #include "xla/pjrt/mlir_to_hlo.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "stablehlo/api/PortableApi.h"
#include "xla/test.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using ::testing::HasSubstr;
using ::testing::Not;
MATCHER_P(IsVhloArtifact, version, "") {
return ExplainMatchResult(HasSubstr(absl::StrCat("StableHLO_v", version)),
arg, result_listener);
}
TEST(MlirToHloTest, StablehloTest) {
constexpr char kProgram[] =
R"(
func.func @add(%arg0: tensor<1x2xf32>) -> tensor<1x2xf32> {
%cst = stablehlo.constant dense<1.0> : tensor<1x2xf32>
%0 = stablehlo.add %arg0, %cst : tensor<1x2xf32>
return %0 : tensor<1x2xf32>
}
)";
mlir::MLIRContext context;
TF_ASSERT_OK_AND_ASSIGN(mlir::OwningOpRef<mlir::ModuleOp> module,
ParseMlirModuleString(kProgram, context));
TF_ASSERT_OK_AND_ASSIGN(std::string blob, Serialize(*module, "1.0.0"));
EXPECT_THAT(blob, IsVhloArtifact("1.0.0"));
}
TEST(MlirToHloTest, ChloTest) {
constexpr char kProgram[] =
R"(
func.func @add(%arg0: tensor<1x2xf32>) -> tensor<1x2xf32> {
%cst = stablehlo.constant dense<1.0> : tensor<1x2xf32>
%0 = chlo.broadcast_add %arg0, %cst : (tensor<1x2xf32>, tensor<1x2xf32>) -> tensor<1x2xf32>
return %0 : tensor<1x2xf32>
}
)";
mlir::MLIRContext context;
TF_ASSERT_OK_AND_ASSIGN(mlir::OwningOpRef<mlir::ModuleOp> module,
ParseMlirModuleString(kProgram, context));
TF_ASSERT_OK_AND_ASSIGN(std::string blob, Serialize(*module, "1.0.0"));
EXPECT_THAT(blob, IsVhloArtifact("1.0.0"));
}
TEST(MlirToHloTest, ChloTanOpTest) {
constexpr char kProgram[] =
R"(
func.func @add(%arg0: tensor<1x2xf32>) -> tensor<1x2xf32> {
%0 = chlo.tan %arg0 : tensor<1x2xf32> -> tensor<1x2xf32>
return %0 : tensor<1x2xf32>
}
)";
mlir::MLIRContext context;
TF_ASSERT_OK_AND_ASSIGN(mlir::OwningOpRef<mlir::ModuleOp> module,
ParseMlirModuleString(kProgram, context));
TF_ASSERT_OK_AND_ASSIGN(std::string blob, Serialize(*module, "1.0.0"));
EXPECT_THAT(blob, IsVhloArtifact("1.0.0"));
}
TEST(MlirToHloTest, MhloTest) {
constexpr char kProgram[] =
R"(
func.func @add(%arg0: tensor<1x2xf32>) -> tensor<1x2xf32> {
%cst = mhlo.constant dense<1.0> : tensor<1x2xf32>
%0 = mhlo.add %arg0, %cst : tensor<1x2xf32>
return %0 : tensor<1x2xf32>
}
)";
mlir::MLIRContext context;
TF_ASSERT_OK_AND_ASSIGN(mlir::OwningOpRef<mlir::ModuleOp> module,
ParseMlirModuleString(kProgram, context));
TF_ASSERT_OK_AND_ASSIGN(std::string blob, Serialize(*module, "1.0.0"));
EXPECT_THAT(blob, Not(IsVhloArtifact("1.0.0")));
}
TEST(MlirToHloTest, InvalidBytecodeTest) {
unsigned char invalid_future_vhlo_mlirbc[] = {
0x4d, 0x4c, 0xef, 0x52, 0x0d, 0x53, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x48,
0x4c, 0x4f, 0x5f, 0x76, 0x32, 0x2e, 0x30, 0x2e, 0x30, 0x00, 0x01, 0x19,
0x05, 0x01, 0x05, 0x09, 0x01, 0x03, 0x0b, 0x03, 0x07, 0x0f, 0x13, 0x17,
0x03, 0x2b, 0x15, 0x07, 0x01, 0x0b, 0x0b, 0x13, 0x13, 0x13, 0x13, 0x03,
0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x1f, 0x03, 0x07, 0x0f, 0x13, 0x07, 0x02,
0x53, 0x05, 0x0d, 0x17, 0x01, 0x03, 0x03, 0x17, 0x01, 0x05, 0x07, 0x17,
0x01, 0x07, 0x15, 0x17, 0x01, 0x09, 0x0b, 0x03, 0x01, 0x23, 0x03, 0x1d,
0x0f, 0x1d, 0x11, 0x1f, 0x01, 0x09, 0x00, 0x00, 0x80, 0x3f, 0x29, 0x01,
0x05, 0x11, 0x01, 0x03, 0x01, 0x09, 0x04, 0x41, 0x05, 0x01, 0x50, 0x03,
0x01, 0x07, 0x04, 0x31, 0x03, 0x01, 0x05, 0x03, 0x50, 0x05, 0x03, 0x07,
0x04, 0x1d, 0x03, 0x03, 0x09, 0x05, 0x42, 0x07, 0x05, 0x03, 0x01, 0x07,
0x04, 0x09, 0x03, 0x01, 0x06, 0x03, 0x01, 0x05, 0x01, 0x00, 0xad, 0x13,
0x0f, 0x0b, 0x1b, 0x15, 0x1b, 0x11, 0x0f, 0x0b, 0x11, 0x62, 0x75, 0x69,
0x6c, 0x74, 0x69, 0x6e, 0x00, 0x76, 0x68, 0x6c, 0x6f, 0x00, 0x6d, 0x6f,
0x64, 0x75, 0x6c, 0x65, 0x00, 0x66, 0x75, 0x6e, 0x63, 0x5f, 0x76, 0x31,
0x00, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x5f, 0x76, 0x39,
0x39, 0x00, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x5f, 0x76, 0x31, 0x00,
0x2f, 0x74, 0x6d, 0x70, 0x2f, 0x74, 0x32, 0x2e, 0x6d, 0x6c, 0x69, 0x72,
0x00, 0x6d, 0x61, 0x69, 0x6e, 0x00, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63,
0x00, 0x08, 0x19, 0x07, 0x05, 0x01, 0x01, 0x0b, 0x0b, 0x0d, 0x0b, 0x0f,
0x11, 0x03, 0x13};
unsigned int invalid_future_vhlo_mlirbc_len = 243;
std::string buffer(reinterpret_cast<char*>(invalid_future_vhlo_mlirbc),
invalid_future_vhlo_mlirbc_len);
mlir::MLIRContext context;
auto status = ParseMlirModuleString(buffer, context);
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.status().message(), HasSubstr("vhlo.constant_v99"));
EXPECT_THAT(status.status().message(), HasSubstr("StableHLO_v2.0.0"));
EXPECT_THAT(status.status().message(),
HasSubstr(mlir::stablehlo::getCurrentVersion()));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/mlir_to_hlo.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/mlir_to_hlo_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
66a6130e-df50-475d-9fcc-fa07e2cdd66e | cpp | tensorflow/tensorflow | robust_stats | tensorflow/core/grappler/costs/robust_stats.cc | tensorflow/core/grappler/costs/robust_stats_test.cc | #include "tensorflow/core/grappler/costs/robust_stats.h"
#include <algorithm>
#include <cmath>
#include <utility>
namespace tensorflow {
namespace grappler {
static double SortedMedian(const std::vector<double> &values) {
const int n = values.size();
if (n == 0) return 0.0;
if (n & 1) {
return values[n / 2];
} else {
return (values[n / 2] + values[n / 2 - 1]) / 2.0;
}
}
static double Median(std::vector<double> &&values) {
const size_t n = values.size();
if (n == 0) return 0;
const auto middle = values.begin() + (n / 2);
std::nth_element(values.begin(), middle, values.end());
if (n & 1) {
return *middle;
}
const auto lower_middle = std::max_element(values.begin(), middle);
if (*lower_middle <= 0 && *middle >= 0) {
return (*lower_middle + *middle) / 2;
}
return *lower_middle + (*middle - *lower_middle) / 2;
}
static std::pair<double, double> ScaledMedianAbsoluteDeviation(
const std::vector<double> &sorted_values) {
double median = SortedMedian(sorted_values);
std::vector<double> deviations;
deviations.reserve(sorted_values.size());
for (double d : sorted_values) {
deviations.push_back(std::abs(d - median));
}
double mad = Median(std::move(deviations)) * 1.4826;
return std::pair<double, double>(median, mad);
}
RobustStats::RobustStats(const std::vector<double> &values)
: RobustStats(std::vector<double>(values)) {}
RobustStats::RobustStats(std::vector<double> &&values) {
std::sort(values.begin(), values.end());
lo_ = values[0];
hi_ = values.back();
HuberMAD(values);
}
double UpdateHuberMean(const std::vector<double> &sorted_values, double mean,
double margin) {
int num_within = 0;
double sum = 0.0;
for (double d : sorted_values) {
if (d < mean - margin) {
sum -= margin;
} else if (d > mean + margin) {
sum += margin;
} else {
sum += d;
++num_within;
}
}
if (num_within > 0) {
return sum / num_within;
} else {
return mean;
}
}
void RobustStats::HuberMAD(const std::vector<double> &sorted_values) {
const std::pair<double, double> median_mad =
ScaledMedianAbsoluteDeviation(sorted_values);
mean_ = median_mad.first;
stddev_ = median_mad.second;
const double c = 1.5;
const double margin = c * stddev_;
if (margin > 0.0) {
for (int k = 0; k < 10; ++k) {
double old_mean = mean_;
mean_ = UpdateHuberMean(sorted_values, mean_, margin);
if (mean_ == old_mean) break;
}
}
}
}
} | #include "tensorflow/core/grappler/costs/robust_stats.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
class RobustStatsTest : public ::testing::Test {
public:
void SetUp() override {
for (double d = 1.0; d <= 5.0; d += 1.0) {
values1_.push_back(5.0 - d);
values1_.push_back(5.0 + d);
values2_.push_back(25.0 - 2 * d);
values2_.push_back(25.0 + 2 * d);
values3_.push_back(-3.0 - d);
values3_.push_back(-3.0 + d);
}
values1_.push_back(5.0);
values3_.push_back(197.0);
values3_.push_back(-203.0);
}
std::vector<double> values1_;
std::vector<double> values2_;
std::vector<double> values3_;
};
TEST_F(RobustStatsTest, Simple) {
RobustStats s1(values1_);
EXPECT_EQ(5.0, s1.mean());
EXPECT_EQ(0.0, s1.lo());
EXPECT_EQ(10.0, s1.hi());
RobustStats s2(values2_);
EXPECT_EQ(25.0, s2.mean());
EXPECT_EQ(15.0, s2.lo());
EXPECT_EQ(35.0, s2.hi());
RobustStats s3(values3_);
EXPECT_EQ(-3.0, s3.mean());
EXPECT_EQ(-203.0, s3.lo());
EXPECT_EQ(197.0, s3.hi());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/costs/robust_stats.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/costs/robust_stats_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3adfd356-0a6b-4561-8157-72882331708c | cpp | tensorflow/tensorflow | index_util | third_party/xla/xla/index_util.cc | third_party/xla/xla/index_util_test.cc | #include "xla/index_util.h"
#include <algorithm>
#include <cstdint>
#include <string>
#include <vector>
#include "absl/types/span.h"
#include "xla/layout_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/logging.h"
namespace xla {
DimensionVector IndexUtil::LinearIndexToMultidimensionalIndex(
const Shape& shape, int64_t linear_index) {
DCHECK_GE(linear_index, 0);
DCHECK_LT(linear_index, ShapeUtil::ElementsIn(shape));
DimensionVector multi_index(shape.dimensions_size());
int64_t divisor = 1;
for (auto dimension : LayoutUtil::MinorToMajor(shape)) {
multi_index[dimension] =
(linear_index / divisor) % shape.dimensions(dimension);
divisor *= shape.dimensions(dimension);
}
return multi_index;
}
bool IndexUtil::BumpIndices(const Shape& shape,
absl::Span<int64_t> indices) {
for (int64_t dimno = indices.size() - 1; dimno >= 0; --dimno) {
int64_t limit = shape.dimensions(dimno);
if (indices[dimno] + 1 < limit) {
indices[dimno]++;
std::fill(indices.begin() + dimno + 1, indices.end(), 0);
return true;
}
}
return false;
}
int64_t IndexUtil::GetDimensionStride(const Shape& shape,
int64_t dimension) {
int64_t stride = 1;
for (auto dim : LayoutUtil::MinorToMajor(shape)) {
if (dim == dimension) {
break;
}
stride *= shape.dimensions()[dim];
}
return stride;
}
bool IndexUtil::IndexInBounds(const Shape& shape,
absl::Span<const int64_t> index) {
int64_t rank = shape.rank();
const int64_t index_size = index.size();
if (rank != index_size) {
return false;
}
for (int64_t d = 0; d < rank; ++d) {
if (index[d] >= shape.dimensions(d)) {
return false;
}
}
return true;
}
int IndexUtil::CompareIndices(absl::Span<const int64_t> lhs,
absl::Span<const int64_t> rhs) {
int64_t rank = lhs.size();
const int64_t rhs_rank = rhs.size();
CHECK_EQ(rhs_rank, rank);
for (int64_t dim = 0; dim < rank; ++dim) {
if (lhs[dim] < rhs[dim]) {
return -1;
} else if (lhs[dim] > rhs[dim]) {
return 1;
}
}
return 0;
}
} | #include "xla/index_util.h"
#include <initializer_list>
#include <vector>
#include "absl/types/span.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace {
void SetMinorToMajorLayout(Shape* shape, std::vector<int64_t> dimensions) {
shape->mutable_layout()->clear_minor_to_major();
for (auto dimension : dimensions) {
shape->mutable_layout()->add_minor_to_major(dimension);
}
}
TEST(IndexUtilTest, VectorIndexing) {
Shape vector_shape = ShapeUtil::MakeShape(F32, {100});
EXPECT_EQ(42,
IndexUtil::MultidimensionalIndexToLinearIndex(vector_shape, {42}));
auto multi_index =
IndexUtil::LinearIndexToMultidimensionalIndex(vector_shape, 42);
EXPECT_EQ(1, multi_index.size());
EXPECT_EQ(42, multi_index[0]);
}
TEST(IndexUtilTest, MatrixIndexingRowMajor) {
Shape matrix_shape_01 = ShapeUtil::MakeShape(F32, {10, 20});
SetMinorToMajorLayout(&matrix_shape_01, {0, 1});
EXPECT_EQ(0, IndexUtil::MultidimensionalIndexToLinearIndex(matrix_shape_01,
{0, 0}));
EXPECT_EQ(199, IndexUtil::MultidimensionalIndexToLinearIndex(matrix_shape_01,
{9, 19}));
EXPECT_EQ(53, IndexUtil::MultidimensionalIndexToLinearIndex(matrix_shape_01,
{3, 5}));
EXPECT_THAT(
IndexUtil::LinearIndexToMultidimensionalIndex(matrix_shape_01, 53),
testing::ElementsAre(3, 5));
}
TEST(IndexUtilTest, MatrixIndexingColumnMajor) {
Shape matrix_shape_10 = ShapeUtil::MakeShape(F32, {10, 20});
SetMinorToMajorLayout(&matrix_shape_10, {1, 0});
EXPECT_EQ(0, IndexUtil::MultidimensionalIndexToLinearIndex(matrix_shape_10,
{0, 0}));
EXPECT_EQ(199, IndexUtil::MultidimensionalIndexToLinearIndex(matrix_shape_10,
{9, 19}));
EXPECT_EQ(65, IndexUtil::MultidimensionalIndexToLinearIndex(matrix_shape_10,
{3, 5}));
EXPECT_THAT(
IndexUtil::LinearIndexToMultidimensionalIndex(matrix_shape_10, 65),
testing::ElementsAre(3, 5));
}
TEST(IndexUtilTest, ThreeDArrayIndexing210) {
Shape shape_210 = ShapeUtil::MakeShape(F32, {10, 20, 30});
SetMinorToMajorLayout(&shape_210, {2, 1, 0});
EXPECT_EQ(1957, IndexUtil::MultidimensionalIndexToLinearIndex(shape_210,
{3, 5, 7}));
EXPECT_EQ(5277, IndexUtil::MultidimensionalIndexToLinearIndex(shape_210,
{8, 15, 27}));
}
TEST(IndexUtilTest, ThreeDArrayIndexing120) {
Shape shape_120 = ShapeUtil::MakeShape(F32, {10, 20, 30});
SetMinorToMajorLayout(&shape_120, {1, 2, 0});
EXPECT_EQ(1945, IndexUtil::MultidimensionalIndexToLinearIndex(shape_120,
{3, 5, 7}));
EXPECT_EQ(5355, IndexUtil::MultidimensionalIndexToLinearIndex(shape_120,
{8, 15, 27}));
}
TEST(IndexUtilTest, FourDArrayIndexing3210) {
Shape shape_3210 = ShapeUtil::MakeShape(F32, {10, 20, 30, 40});
SetMinorToMajorLayout(&shape_3210, {3, 2, 1, 0});
EXPECT_EQ(78289, IndexUtil::MultidimensionalIndexToLinearIndex(shape_3210,
{3, 5, 7, 9}));
EXPECT_EQ(211113, IndexUtil::MultidimensionalIndexToLinearIndex(
shape_3210, {8, 15, 27, 33}));
}
TEST(IndexUtilTest, LinearToMultiToLinear) {
std::vector<int64_t> linear_indexes = {0, 1439999999, 1145567336,
43883404, 617295214, 1117613654};
std::vector<std::vector<int64_t>> minor_to_major_orders;
minor_to_major_orders.push_back({6, 5, 4, 3, 2, 1, 0});
minor_to_major_orders.push_back({0, 1, 2, 3, 4, 5, 6});
minor_to_major_orders.push_back({4, 5, 1, 2, 6, 0, 3});
for (auto minor_to_major_order : minor_to_major_orders) {
Shape shape = ShapeUtil::MakeShape(F32, {10, 20, 30, 40, 30, 20, 10});
SetMinorToMajorLayout(&shape, minor_to_major_order);
for (auto linear_index : linear_indexes) {
auto multi_index =
IndexUtil::LinearIndexToMultidimensionalIndex(shape, linear_index);
EXPECT_EQ(linear_index, IndexUtil::MultidimensionalIndexToLinearIndex(
shape, multi_index));
}
}
}
TEST(IndexUtilTest, BumpIndices2x2) {
auto shape = ShapeUtil::MakeShape(S32, {2, 2});
std::vector<int64_t> indices = {0, 0};
EXPECT_TRUE(IndexUtil::BumpIndices(shape, absl::MakeSpan(indices)));
EXPECT_THAT(indices, ::testing::ElementsAre(0, 1));
EXPECT_TRUE(IndexUtil::BumpIndices(shape, absl::MakeSpan(indices)));
EXPECT_THAT(indices, ::testing::ElementsAre(1, 0));
EXPECT_TRUE(IndexUtil::BumpIndices(shape, absl::MakeSpan(indices)));
EXPECT_THAT(indices, ::testing::ElementsAre(1, 1));
EXPECT_FALSE(IndexUtil::BumpIndices(shape, absl::MakeSpan(indices)));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/index_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/index_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
39ece867-becf-40a5-be85-a8bfb9996b0b | cpp | tensorflow/tensorflow | all_reduce_reassociate | third_party/xla/xla/service/all_reduce_reassociate.cc | third_party/xla/xla/service/all_reduce_reassociate_test.cc | #include "xla/service/all_reduce_reassociate.h"
#include <cstdint>
#include <optional>
#include <string>
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/literal.h"
#include "xla/primitive_util.h"
#include "xla/service/all_reduce_key.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/pattern_matcher.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace {
namespace m = match;
bool AreAllreduceKeysEqual(AllReduceKey& key0, AllReduceKey& key1,
bool ignore_element_type) {
if (ignore_element_type) {
return std::get<0>(key0) == std::get<0>(key1) &&
std::get<2>(key0) == std::get<2>(key1) &&
std::get<3>(key0) == std::get<3>(key1) &&
std::get<4>(key0) == std::get<4>(key1) &&
std::get<5>(key0) == std::get<5>(key1);
} else {
return key0 == key1;
}
}
bool AreCompatible(const HloAllReduceInstruction* ar0,
const HloAllReduceInstruction* ar1, ReductionKind op_kind,
bool ignore_element_type) {
std::optional<AllReduceKey> key0 = GetAllReduceKey(ar0);
std::optional<AllReduceKey> key1 = GetAllReduceKey(ar1);
auto kind0 = MatchReductionComputation(ar0->to_apply());
return key0 && key1 && kind0 &&
AreAllreduceKeysEqual(*key0, *key1, ignore_element_type) &&
kind0 == op_kind;
}
HloInstruction* LookThroughForAllReduce(HloInstruction* instr,
const Literal& reduction_identity) {
if (instr->opcode() == HloOpcode::kDynamicSlice) {
if (instr->operand(0)->opcode() != HloOpcode::kAllReduce ||
instr->operand(0)->user_count() != 1 || instr->user_count() != 1) {
return nullptr;
}
return instr;
}
while (instr->opcode() != HloOpcode::kAllReduce) {
if (instr->user_count() != 1) {
return nullptr;
}
if (instr->opcode() != HloOpcode::kReshape &&
instr->opcode() != HloOpcode::kPad &&
instr->opcode() != HloOpcode::kSlice &&
instr->opcode() != HloOpcode::kConvert) {
return nullptr;
}
if (instr->opcode() == HloOpcode::kPad) {
if (!instr->operand(1)->IsConstant()) {
return nullptr;
}
if (instr->operand(1)->literal() != reduction_identity) {
return nullptr;
}
}
instr = instr->mutable_operand(0);
}
if (instr->user_count() != 1) {
return nullptr;
}
return instr;
}
bool ReassociateAllReduceIsProfitable(HloInstruction* ar0, HloInstruction* ar1,
HloInstruction* reassociated_inst) {
int64_t pre_reassociated_size = ShapeUtil::ElementsIn(ar0->shape());
if (ar0 != ar1) {
pre_reassociated_size += ShapeUtil::ElementsIn(ar1->shape());
}
return pre_reassociated_size >=
ShapeUtil::ElementsIn(reassociated_inst->shape());
}
bool AreCompatibleConverts(const HloInstruction* convert0,
const HloInstruction* convert1) {
bool is_compatible = true;
if (convert0) {
is_compatible &= primitive_util::CastPreservesValues(
convert0->operand(0)->shape().element_type(),
convert0->shape().element_type());
}
if (convert1) {
is_compatible &= primitive_util::CastPreservesValues(
convert1->operand(0)->shape().element_type(),
convert1->shape().element_type());
}
if (convert0 && convert1) {
CHECK(convert0->shape().element_type() == convert1->shape().element_type());
is_compatible &= convert0->operand(0)->shape().element_type() ==
convert1->operand(0)->shape().element_type();
}
return is_compatible;
}
template <typename Pattern>
auto OptionalConvertWithOneUser(HloInstruction** optional_convert,
Pattern pattern) {
return m::AnyOf<HloInstruction>(
m::Convert(optional_convert, pattern).WithOneUser(), std::move(pattern));
}
bool MatchOperandsToAllReduceWithOptionalConvert(HloInstruction* inst,
HloInstruction** convert0,
HloInstruction** convert1) {
auto ar_op_optional_convert_pattern =
m::Op()
.WithOperand(0, OptionalConvertWithOneUser(convert0, m::AllReduce()))
.WithOperand(1, OptionalConvertWithOneUser(convert1, m::AllReduce()))
.WithPredicate([](const HloInstruction* inst) {
return inst->shape().IsArray();
});
return Match(inst, ar_op_optional_convert_pattern);
}
}
absl::StatusOr<bool> AllReduceReassociate::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
if (hlo_query::ContainsLayoutConstrainedAllReduce(*module)) {
VLOG(1)
<< "Skip AllReduceReassociate because the module contains all-reduce "
"with constrained layouts";
return false;
}
int64_t next_channel_id = hlo_query::NextChannelId(*module);
bool changed = false;
for (auto computation : module->computations(execution_threads)) {
for (HloInstruction* inst : computation->MakeInstructionPostOrder()) {
std::optional<ReductionKind> kind = MatchReductionInstruction(inst);
if (!kind) {
continue;
}
std::optional<Literal> reduction_identity =
GetReductionIdentity(*kind, inst->shape().element_type());
if (!reduction_identity) {
continue;
}
HloInstruction* lhs = LookThroughForAllReduce(inst->mutable_operand(0),
*reduction_identity);
if (lhs == nullptr) {
continue;
}
HloInstruction* rhs = LookThroughForAllReduce(inst->mutable_operand(1),
*reduction_identity);
if (rhs == nullptr) {
continue;
}
if (!inst->shape().IsArray()) {
continue;
}
if (lhs->opcode() != rhs->opcode() ||
(lhs->opcode() == HloOpcode::kDynamicSlice &&
!ShapeUtil::Compatible(lhs->operand(0)->shape(),
rhs->operand(0)->shape()))) {
continue;
}
HloAllReduceInstruction* ar0 = nullptr;
HloAllReduceInstruction* ar1 = nullptr;
bool reduce_scatter_pattern_match = false;
if (lhs->opcode() == HloOpcode::kDynamicSlice) {
HloInstruction* original_rhs_operand = rhs->mutable_operand(0);
TF_RETURN_IF_ERROR(rhs->ReplaceOperandWith(0, lhs->mutable_operand(0)));
if (!lhs->Identical(*rhs)) {
TF_RETURN_IF_ERROR(rhs->ReplaceOperandWith(0, original_rhs_operand));
continue;
}
TF_RETURN_IF_ERROR(rhs->ReplaceOperandWith(0, original_rhs_operand));
ar0 = Cast<HloAllReduceInstruction>(lhs->mutable_operand(0));
ar1 = Cast<HloAllReduceInstruction>(rhs->mutable_operand(0));
reduce_scatter_pattern_match = true;
} else {
ar0 = Cast<HloAllReduceInstruction>(lhs);
ar1 = Cast<HloAllReduceInstruction>(rhs);
}
if (!ReassociateAllReduceIsProfitable(lhs, rhs, inst)) {
continue;
}
HloInstruction* convert0 = nullptr;
HloInstruction* convert1 = nullptr;
if (!MatchOperandsToAllReduceWithOptionalConvert(inst, &convert0,
&convert1)) {
VLOG(2) << "One or both inputs are type-converted.";
}
bool should_promote_ar = convert0 || convert1;
if (should_promote_ar) {
if (!reassociate_converted_ar_) {
VLOG(2) << "Promotions of all_reduces for reassociation will be "
"disabled.";
continue;
}
if (!AreCompatibleConverts(convert0, convert1)) {
VLOG(2) << "Inputs' Converts are not preserving "
"value, skipping";
continue;
}
}
HloInstruction* op_operand0 = inst->mutable_operand(0);
HloInstruction* op_operand1 = inst->mutable_operand(1);
if (convert0) {
op_operand0 = convert0->mutable_operand(0);
}
if (convert1) {
op_operand1 = convert1->mutable_operand(0);
}
if (!AreCompatible(ar0, ar1, *kind,
should_promote_ar)) {
VLOG(2) << "All-Reduce operations are not compatible, skipping";
continue;
}
VLOG(2) << "Reassociated:";
VLOG(2) << "\tAR0: " << ar0->ToString();
VLOG(2) << "\tAR1: " << ar1->ToString();
auto op_users = inst->users();
HloInstruction* new_op_operand0 = ar0->mutable_operand(0);
HloInstruction* new_op_operand1 = ar1->mutable_operand(0);
if (convert0) {
HloInstruction* ar0_operand = ar0->mutable_operand(0);
TF_RETURN_IF_ERROR(convert0->ReplaceOperandWith(0, ar0_operand));
new_op_operand0 = convert0;
}
if (convert1) {
HloInstruction* ar1_operand = ar1->mutable_operand(0);
TF_RETURN_IF_ERROR(convert1->ReplaceOperandWith(0, ar1_operand));
new_op_operand1 = convert1;
}
HloInstruction* new_op = inst;
if (should_promote_ar) {
new_op = computation->AddInstruction(inst->CloneWithNewOperands(
inst->shape(), {new_op_operand0, new_op_operand1}));
} else if (reduce_scatter_pattern_match) {
new_op = computation->AddInstruction(inst->CloneWithNewOperands(
ar0->shape(), {new_op_operand0, new_op_operand1}));
}
Shape new_ar_out_shape = inst->shape();
CHECK(!should_promote_ar || !reduce_scatter_pattern_match);
if (should_promote_ar) {
new_ar_out_shape.set_element_type(
new_op_operand0->shape().element_type());
} else if (reduce_scatter_pattern_match) {
new_ar_out_shape = ar0->shape();
} else {
TF_RETURN_IF_ERROR(ar0->ReplaceAllUsesWith(ar0->mutable_operand(0)));
TF_RETURN_IF_ERROR(ar1->ReplaceAllUsesWith(ar1->mutable_operand(0)));
}
HloInstruction* new_ar = computation->AddInstruction(
ar0->CloneWithNewOperands(new_ar_out_shape, {new_op}));
if (new_ar->channel_id()) {
new_ar->set_channel_id(next_channel_id++);
}
if (should_promote_ar) {
HloComputation* to_apply = new_ar->to_apply();
PrimitiveType type = new_ar->shape().element_type();
std::string name = absl::StrCat(to_apply->name(), "_reassoc_promoted");
HloComputation::Builder promoted(name);
auto x = promoted.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(type, {}), "x"));
auto y = promoted.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(type, {}), "y"));
promoted.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(type, {}),
to_apply->root_instruction()->opcode(), x, y));
HloComputation* to_apply_promoted =
inst->GetModule()->AddEmbeddedComputation(promoted.Build());
new_ar->set_to_apply(to_apply_promoted);
TF_RETURN_IF_ERROR(inst->ReplaceAllUsesWith(new_ar));
} else if (reduce_scatter_pattern_match) {
auto dyn_slice_operands = lhs->mutable_operands();
dyn_slice_operands[0] = new_ar;
HloInstruction* new_dyn_slice = inst->parent()->AddInstruction(
lhs->CloneWithNewOperands(inst->shape(), dyn_slice_operands));
TF_RETURN_IF_ERROR(inst->ReplaceUsesWith(op_users, new_dyn_slice));
} else {
TF_RETURN_IF_ERROR(inst->ReplaceUsesWith(op_users, new_ar));
}
if (should_promote_ar || reduce_scatter_pattern_match) {
TF_RETURN_IF_ERROR(computation->RemoveInstruction(inst));
}
if (reduce_scatter_pattern_match) {
TF_RETURN_IF_ERROR(computation->RemoveInstruction(lhs));
if (lhs != rhs) {
TF_RETURN_IF_ERROR(computation->RemoveInstruction(rhs));
}
}
TF_RETURN_IF_ERROR(computation->RemoveInstruction(ar0));
if (ar0 != ar1) {
TF_RETURN_IF_ERROR(computation->RemoveInstruction(ar1));
}
changed = true;
}
}
return changed;
}
} | #include "xla/service/all_reduce_reassociate.h"
#include <cstddef>
#include <memory>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace m = xla::testing::opcode_matchers;
using ::testing::_;
class AllReduceSimplifierTest : public HloTestBase {
public:
absl::StatusOr<std::unique_ptr<HloModule>> RunPass(
absl::string_view hlo_module, bool expect_change,
bool reassociate_converted_ar = false) {
TF_ASSIGN_OR_RETURN(auto module, ParseAndReturnVerifiedModule(hlo_module));
auto changed =
AllReduceReassociate(reassociate_converted_ar).Run(module.get());
if (!changed.ok()) {
return changed.status();
}
EXPECT_EQ(changed.value(), expect_change);
return absl::StatusOr<std::unique_ptr<HloModule>>(std::move(module));
}
size_t AllReduceCount(std::unique_ptr<HloModule>& module) {
return absl::c_count_if(module->entry_computation()->instructions(),
HloPredicateIsOp<HloOpcode::kAllReduce>);
}
};
TEST_F(AllReduceSimplifierTest, Simple) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=sum
ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=sum
ROOT add = f32[8] add(ar0, ar1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
m::AllReduce(m::Add(m::Parameter(0), m::Parameter(1))));
EXPECT_EQ(AllReduceCount(module), 1);
}
TEST_F(AllReduceSimplifierTest, SimpleWithChannelId) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
ar0 = f32[8] all-reduce(p0), channel_id=1, replica_groups={}, to_apply=sum
ar1 = f32[8] all-reduce(p1), channel_id=1, replica_groups={}, to_apply=sum
ROOT add = f32[8] add(ar0, ar1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
m::AllReduce(m::Add(m::Parameter(0), m::Parameter(1))));
EXPECT_EQ(AllReduceCount(module), 1);
}
TEST_F(AllReduceSimplifierTest, SimpleChain) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
p2 = f32[8] parameter(2)
p3 = f32[8] parameter(3)
ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=sum
ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=sum
ar2 = f32[8] all-reduce(p2), replica_groups={}, to_apply=sum
ar3 = f32[8] all-reduce(p3), replica_groups={}, to_apply=sum
add0 = f32[8] add(ar0, ar1)
add1 = f32[8] add(add0, ar2)
ROOT add2 = f32[8] add(add1, ar3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, true));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
m::AllReduce(m::Add(
m::Add(m::Add(m::Parameter(0), m::Parameter(1)), m::Parameter(2)),
m::Parameter(3))));
EXPECT_EQ(AllReduceCount(module), 1);
}
TEST_F(AllReduceSimplifierTest, SimpleTree) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
p2 = f32[8] parameter(2)
p3 = f32[8] parameter(3)
ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=sum
ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=sum
ar2 = f32[8] all-reduce(p2), replica_groups={}, to_apply=sum
ar3 = f32[8] all-reduce(p3), replica_groups={}, to_apply=sum
add0 = f32[8] add(ar0, ar1)
add1 = f32[8] add(ar2, ar3)
ROOT add2 = f32[8] add(add0, add1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
m::AllReduce(m::Add(m::Add(m::Parameter(0), m::Parameter(1)),
m::Add(m::Parameter(2), m::Parameter(3)))));
EXPECT_EQ(AllReduceCount(module), 1);
}
TEST_F(AllReduceSimplifierTest, MismatchOp0) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
max {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT r = f32[] maximum(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=sum
ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=max
ROOT add = f32[8] add(ar0, ar1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
}
TEST_F(AllReduceSimplifierTest, MismatchOp1) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
max {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT r = f32[] maximum(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=max
ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=max
ROOT add = f32[8] add(ar0, ar1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
}
TEST_F(AllReduceSimplifierTest, MismatchReplicaGroups) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
ar0 = f32[8] all-reduce(p0), replica_groups={{0}}, to_apply=sum
ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=sum
ROOT add = f32[8] add(ar0, ar1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
}
TEST_F(AllReduceSimplifierTest, MismatchHasChannelId) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
ar0 = f32[8] all-reduce(p0), replica_groups={}, channel_id=3, to_apply=sum
ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=sum
ROOT add = f32[8] add(ar0, ar1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
}
TEST_F(AllReduceSimplifierTest, MismatchUseGlobalDeviceId) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
ar0 = f32[8] all-reduce(p0), replica_groups={{0, 1}}, channel_id=3, use_global_device_ids=true, to_apply=sum
ar1 = f32[8] all-reduce(p1), replica_groups={{0, 1}}, channel_id=4, to_apply=sum
ROOT add = f32[8] add(ar0, ar1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
}
TEST_F(AllReduceSimplifierTest, NotSingleUser) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=sum
ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=sum
add = f32[8] add(ar0, ar1)
ROOT t = (f32[8], f32[8]) tuple(ar0, add)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
}
TEST_F(AllReduceSimplifierTest, DoubleUse) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=sum
add = f32[8] add(ar0, ar0)
ROOT c = f32[8] copy(add)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, true));
}
TEST_F(AllReduceSimplifierTest, PaddedUse) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=sum
ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=sum
%constant.1 = f32[] constant(0)
pad = f32[12]{0} pad(ar0, constant.1), padding=0_4
pad.1 = f32[12]{0} pad(ar1, constant.1), padding=0_4
ROOT add = f32[12] add(pad, pad.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
m::AllReduce(m::Add(m::Pad(m::Parameter(0), _),
m::Pad(m::Parameter(1), _))));
EXPECT_EQ(AllReduceCount(module), 1);
}
TEST_F(AllReduceSimplifierTest, PaddedUseInvalidReduceValue) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=sum
ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=sum
%constant.1 = f32[] constant(-1.0)
pad = f32[12]{0} pad(ar0, constant.1), padding=0_4
pad.1 = f32[12]{0} pad(ar1, constant.1), padding=0_4
ROOT add = f32[12] add(pad, pad.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
EXPECT_EQ(AllReduceCount(module), 2);
}
TEST_F(AllReduceSimplifierTest, PaddedUseNotProfitable) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=sum
ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=sum
%constant.1 = f32[] constant(0)
pad = f32[17]{0} pad(ar0, constant.1), padding=0_9
pad.1 = f32[17]{0} pad(ar1, constant.1), padding=0_9
ROOT add = f32[17] add(pad, pad.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
EXPECT_EQ(AllReduceCount(module), 2);
}
TEST_F(AllReduceSimplifierTest, PaddedUseDoubleUseNotProfitable) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=sum
%constant.1 = f32[] constant(0)
pad = f32[9]{0} pad(ar0, constant.1), padding=0_1
ROOT add = f32[9] add(pad, pad)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
EXPECT_EQ(AllReduceCount(module), 1);
}
TEST_F(AllReduceSimplifierTest, ReshapeUse) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[1,8] parameter(0)
p1 = f32[1,8] parameter(1)
ar0 = f32[1,8] all-reduce(p0), replica_groups={}, to_apply=sum
ar1 = f32[1,8] all-reduce(p1), replica_groups={}, to_apply=sum
rshp0 = f32[8]{0} reshape(ar0)
rshp1 = f32[8]{0} reshape(ar1)
ROOT add = f32[8] add(rshp0, rshp1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
m::AllReduce(m::Add(m::Reshape(m::Parameter(0)),
m::Reshape(m::Parameter(1)))));
EXPECT_EQ(AllReduceCount(module), 1);
}
TEST_F(AllReduceSimplifierTest, SliceUse) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
p1 = f32[8] parameter(1)
ar0 = f32[8] all-reduce(p0), replica_groups={}, to_apply=sum
ar1 = f32[8] all-reduce(p1), replica_groups={}, to_apply=sum
rshp0 = f32[4]{0} slice(ar0), slice={[0:4]}
rshp1 = f32[4]{0} slice(ar1), slice={[0:4]}
ROOT add = f32[4] add(rshp0, rshp1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
m::AllReduce(m::Add(m::Slice(m::Parameter(0)),
m::Slice(m::Parameter(1)))));
EXPECT_EQ(AllReduceCount(module), 1);
}
TEST_F(AllReduceSimplifierTest, ChainWithConvert) {
absl::string_view hlo_string = R"(
HloModule m
add.1 {
x.47 = bf16[] parameter(0)
y.47 = bf16[] parameter(1)
ROOT add.2532 = bf16[] add(x.47, y.47)
}
ENTRY main {
p0 = bf16[8] parameter(0)
p1 = bf16[8] parameter(1)
p2 = bf16[8] parameter(2)
p3 = bf16[8] parameter(3)
ar0 = bf16[8] all-reduce(p0), replica_groups={}, to_apply=add.1
ar1 = bf16[8] all-reduce(p1), replica_groups={}, to_apply=add.1
ar2 = bf16[8] all-reduce(p2), replica_groups={}, to_apply=add.1
ar3 = bf16[8] all-reduce(p3), replica_groups={}, to_apply=add.1
convert0 = f32[8] convert(ar0)
convert1 = f32[8] convert(ar1)
add0 = f32[8] add(convert0, convert1)
convert2 = f32[8] convert(ar2)
add1 = f32[8] add(add0, convert2)
convert3 = f32[8] convert(ar3)
add2 = f32[8] add(add1, convert3)
ROOT convert4 = bf16[8] convert(add2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, true,
true));
SCOPED_TRACE(module->ToString());
EXPECT_THAT(
module->entry_computation()->root_instruction(),
m::Convert(m::AllReduce(m::Add(m::Add(m::Add(m::Convert(m::Parameter(0)),
m::Convert(m::Parameter(1))),
m::Convert(m::Parameter(2))),
m::Convert(m::Parameter(3))))));
EXPECT_EQ(AllReduceCount(module), 1);
EXPECT_THAT(
module->entry_computation()->root_instruction()->operand(0)->shape(),
GmockMatch(::xla::match::Shape().WithElementType(F32)));
}
TEST_F(AllReduceSimplifierTest, AllreduceWithConvertIncompatibleType) {
absl::string_view hlo_string = R"(
HloModule m
add.1 {
x.47 = bf16[] parameter(0)
y.47 = bf16[] parameter(1)
ROOT add.2532 = bf16[] add(x.47, y.47)
}
max.1 {
x.48 = bf16[] parameter(0)
y.48 = bf16[] parameter(1)
ROOT max.2533 = bf16[] maximum(x.48, y.48)
}
min.1 {
x.49 = bf16[] parameter(0)
y.49 = bf16[] parameter(1)
ROOT min.2534 = bf16[] minimum(x.49, y.49)
}
mul.1 {
x.50 = bf16[] parameter(0)
y.50 = bf16[] parameter(1)
ROOT mul.2535 = bf16[] multiply(x.50, y.50)
}
ENTRY main {
p0 = bf16[8] parameter(0)
p1 = bf16[8] parameter(1)
p2 = bf16[8] parameter(2)
p3 = bf16[8] parameter(3)
ar0 = bf16[8] all-reduce(p0), replica_groups={}, to_apply=add.1
ar1 = bf16[8] all-reduce(p1), replica_groups={}, to_apply=max.1
ar2 = bf16[8] all-reduce(p2), replica_groups={}, to_apply=min.1
ar3 = bf16[8] all-reduce(p3), replica_groups={}, to_apply=mul.1
convert0 = f32[8] convert(ar0)
convert1 = f32[8] convert(ar1)
add0 = f32[8] add(convert0, convert1)
convert2 = f32[8] convert(ar2)
add1 = f32[8] add(add0, convert2)
convert3 = f32[8] convert(ar3)
add2 = f32[8] add(add1, convert3)
ROOT convert4 = bf16[8] convert(add2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
SCOPED_TRACE(module->ToString());
}
TEST_F(AllReduceSimplifierTest, AllreduceWithLossyConvert) {
absl::string_view hlo_string = R"(
HloModule m
add.1 {
x.47 = bf16[] parameter(0)
y.47 = bf16[] parameter(1)
ROOT add.2532 = bf16[] add(x.47, y.47)
}
ENTRY main {
p0 = bf16[8] parameter(0)
p1 = bf16[8] parameter(1)
p2 = bf16[8] parameter(2)
p3 = bf16[8] parameter(3)
ar0 = bf16[8] all-reduce(p0), replica_groups={}, to_apply=add.1
ar1 = bf16[8] all-reduce(p1), replica_groups={}, to_apply=add.1
ar2 = bf16[8] all-reduce(p2), replica_groups={}, to_apply=add.1
ar3 = bf16[8] all-reduce(p3), replica_groups={}, to_apply=add.1
convert0 = u32[8] convert(ar0)
convert1 = u32[8] convert(ar1)
add0 = u32[8] add(convert0, convert1)
convert2 = u32[8] convert(ar2)
add1 = u32[8] add(add0, convert2)
convert3 = u32[8] convert(ar3)
add2 = u32[8] add(add1, convert3)
ROOT convert4 = bf16[8] convert(add2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, false));
SCOPED_TRACE(module->ToString());
}
TEST_F(AllReduceSimplifierTest, AllReduceDynamicSlicePattern) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[1,8] parameter(0)
p1 = f32[1,8] parameter(1)
p2 = f32[1,8] parameter(2)
p3 = s32[] parameter(3)
cst = s32[] constant(0)
ar0 = f32[1,8] all-reduce(p0), replica_groups={}, to_apply=sum
ar1 = f32[1,8] all-reduce(p1), replica_groups={}, to_apply=sum
ar2 = f32[1,8] all-reduce(p2), replica_groups={}, to_apply=sum
dyn0 = f32[1,4] dynamic-slice(ar0, cst, p3), dynamic_slice_sizes={1,4}
dyn1 = f32[1,4] dynamic-slice(ar1, cst, p3), dynamic_slice_sizes={1,4}
dyn2 = f32[1,4] dynamic-slice(ar2, cst, p3), dynamic_slice_sizes={1,4}
add = f32[1,4] add(dyn0, dyn1)
ROOT add1 = f32[1,4] add(add, dyn2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
m::DynamicSlice(
m::AllReduce(m::Add(m::Add(m::Parameter(0), m::Parameter(1)),
m::Parameter(2))),
m::Constant(), m::Parameter(3)));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_EQ(AllReduceCount(module), 1);
}
TEST_F(AllReduceSimplifierTest, AllReduceDynamicSlicePatternSameOperand) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[1,8] parameter(0)
p1 = f32[1,8] parameter(1)
p2 = s32[] parameter(2)
cst = s32[] constant(0)
ar0 = f32[1,8] all-reduce(p0), replica_groups={}, to_apply=sum
ar2 = f32[1,8] all-reduce(p1), replica_groups={}, to_apply=sum
dyn0 = f32[1,4] dynamic-slice(ar0, cst, p2), dynamic_slice_sizes={1,4}
dyn2 = f32[1,4] dynamic-slice(ar2, cst, p2), dynamic_slice_sizes={1,4}
add = f32[1,4] add(dyn0, dyn0)
ROOT add1 = f32[1,4] add(add, dyn2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
m::DynamicSlice(
m::AllReduce(m::Add(m::Add(m::Parameter(0), m::Parameter(0)),
m::Parameter(1))),
m::Constant(), m::Parameter(2)));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_EQ(AllReduceCount(module), 1);
}
TEST_F(AllReduceSimplifierTest, AllReduceDynamicSliceDifferentSlices) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[1,8] parameter(0)
p1 = f32[1,8] parameter(1)
p2 = f32[1,16] parameter(2)
p3 = s32[] parameter(3)
cst = s32[] constant(0)
ar0 = f32[1,8] all-reduce(p0), replica_groups={}, to_apply=sum
ar1 = f32[1,8] all-reduce(p1), replica_groups={}, to_apply=sum
ar2 = f32[1,16] all-reduce(p2), replica_groups={}, to_apply=sum
dyn0 = f32[1,4] dynamic-slice(ar0, cst, p3), dynamic_slice_sizes={1,4}
dyn1 = f32[1,4] dynamic-slice(ar1, cst, p3), dynamic_slice_sizes={1,4}
dyn2 = f32[1,4] dynamic-slice(ar2, cst, p3), dynamic_slice_sizes={1,4}
add = f32[1,4] add(dyn0, dyn1)
ROOT add1 = f32[1,4] add(add, dyn2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
RunPass(hlo_string, true));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
m::Add(m::DynamicSlice(),
m::DynamicSlice(m::AllReduce(), m::Constant(), m::Parameter(3))));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_EQ(AllReduceCount(module), 2);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/all_reduce_reassociate.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/all_reduce_reassociate_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
32b6f306-4d70-4f3a-8887-4af878ce1da0 | cpp | tensorflow/tensorflow | task_runner | tensorflow/core/data/service/task_runner.cc | tensorflow/core/data/service/task_runner_test.cc | #include "tensorflow/core/data/service/task_runner.h"
#include <algorithm>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "tensorflow/core/data/service/byte_size.h"
#include "tensorflow/core/data/service/common.h"
#include "tensorflow/core/data/service/cross_trainer_cache.h"
#include "tensorflow/core/data/service/data_transfer.h"
#include "tensorflow/core/data/service/thread_safe_buffer.h"
#include "tensorflow/core/data/service/worker.pb.h"
#include "tensorflow/core/data/standalone.h"
#include "tensorflow/core/framework/cancellation.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/thread_annotations.h"
#include "tensorflow/core/protobuf/service_config.pb.h"
namespace tensorflow {
namespace data {
namespace {
constexpr int64_t kWaitBeforeSkipUs = 100 * 1000;
constexpr size_t kDefaultCrossTrainerCacheSizeBytes =
10 * (size_t{1} << 30);
}
StandaloneTaskIterator::StandaloneTaskIterator(
std::unique_ptr<standalone::Dataset> dataset,
std::unique_ptr<standalone::Iterator> iterator)
: dataset_(std::move(dataset)), iterator_(std::move(iterator)) {}
Status StandaloneTaskIterator::GetNext(std::vector<Tensor>& element,
bool& end_of_sequence) {
return iterator_->GetNext(&element, &end_of_sequence);
}
int64_t StandaloneTaskIterator::Cardinality() const {
return dataset_->Get()->Cardinality();
}
absl::StatusOr<std::vector<Tensor>> StandaloneTaskIterator::Save() {
return iterator_->Save();
}
Status StandaloneTaskIterator::Restore(
const std::vector<Tensor>& saved_iterator) {
return iterator_->Restore(saved_iterator);
}
std::shared_ptr<model::Model> StandaloneTaskIterator::model() const {
return iterator_->model();
}
Status TaskRunner::Create(const experimental::WorkerConfig& worker_config,
const TaskDef& task_def,
std::unique_ptr<TaskIterator> iterator,
std::unique_ptr<TaskRunner>& out) {
if (task_def.optional_num_consumers_case() == TaskDef::kNumConsumers) {
int64_t cardinality = iterator->Cardinality();
if (cardinality != kInfiniteCardinality &&
cardinality != kUnknownCardinality) {
return errors::FailedPrecondition(
"Round robin reads require that the input dataset has infinite "
"cardinality, but the dataset has cardinality ",
cardinality,
". Consider adding a `.repeat()` transformation to the dataset.");
}
out = std::make_unique<RoundRobinTaskRunner>(std::move(iterator),
task_def.num_consumers(),
task_def.worker_address());
} else if (task_def.use_cross_trainer_cache()) {
const size_t max_cache_size_bytes =
worker_config.cross_trainer_cache_size_bytes() > 0
? worker_config.cross_trainer_cache_size_bytes()
: kDefaultCrossTrainerCacheSizeBytes;
out = std::make_unique<CachingTaskRunner>(std::move(iterator),
max_cache_size_bytes);
} else {
out = std::make_unique<FirstComeFirstServedTaskRunner>(std::move(iterator));
}
return absl::OkStatus();
}
FirstComeFirstServedTaskRunner::FirstComeFirstServedTaskRunner(
std::unique_ptr<TaskIterator> iterator)
: iterator_(std::move(iterator)), buffer_(1) {
RunPrefetchThread();
}
FirstComeFirstServedTaskRunner::~FirstComeFirstServedTaskRunner() { Cancel(); }
Status FirstComeFirstServedTaskRunner::GetNext(const GetElementRequest& req,
GetElementResult& result) {
if (req.allow_skip() && buffer_.Empty()) {
result.skip = true;
return absl::OkStatus();
}
return GetNext(result);
}
Status FirstComeFirstServedTaskRunner::GetNext(GetElementResult& result) {
TF_ASSIGN_OR_RETURN(result, buffer_.Pop());
return absl::OkStatus();
}
Status FirstComeFirstServedTaskRunner::PrefetchFn() {
while (true) {
TF_RETURN_IF_ERROR(buffer_.Push(GetNextFromInputIterator()));
}
return absl::OkStatus();
}
void FirstComeFirstServedTaskRunner::RunPrefetchThread() {
auto prefetch_fn = [this] {
Status status = PrefetchFn();
if (!status.ok()) {
buffer_.Cancel(status);
}
};
prefetch_thread_ = absl::WrapUnique(Env::Default()->StartThread(
{}, "tf_data_service_fcfs_prefetch_thread",
prefetch_fn));
}
absl::StatusOr<GetElementResult>
FirstComeFirstServedTaskRunner::GetNextFromInputIterator()
TF_LOCKS_EXCLUDED(mu_) {
GetElementResult result;
std::vector<Tensor> element;
bool end_of_task = false;
result.skip = false;
{
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(iterator_->GetNext(element, end_of_task));
result.end_of_sequence = end_of_task;
result.element_index = element_index_++;
}
if (!end_of_task) {
result.components = std::move(element);
}
return result;
}
void FirstComeFirstServedTaskRunner::Cancel() {
VLOG(2) << "Cancelling tf.data service FCFS task.";
buffer_.Cancel(errors::Cancelled("tf.data service FCFS task is cancelled."));
}
std::shared_ptr<model::Model> FirstComeFirstServedTaskRunner::model() const {
return model_;
}
CachingTaskRunner::CachingTaskRunner(std::unique_ptr<TaskIterator> iterator,
size_t max_cache_size_bytes)
: fcfs_task_runner_(std::move(iterator)),
cache_(max_cache_size_bytes,
std::make_unique<GetElementResultSequence>(fcfs_task_runner_)) {
LOG(INFO) << "Initialized tf.data service cross-trainer cache with "
<< ByteSize::Bytes(max_cache_size_bytes) << " of memory.";
}
CachingTaskRunner::~CachingTaskRunner() { Cancel(); }
Status CachingTaskRunner::GetNext(const GetElementRequest& req,
GetElementResult& result) {
TF_ASSIGN_OR_RETURN(std::shared_ptr<const GetElementResult> element,
cache_.Get(req.trainer_id()));
result = element->Copy();
return absl::OkStatus();
}
CachingTaskRunner::GetElementResultSequence::GetElementResultSequence(
FirstComeFirstServedTaskRunner& fcfs_task_runner)
: fcfs_task_runner_(fcfs_task_runner) {}
absl::StatusOr<GetElementResult>
CachingTaskRunner::GetElementResultSequence::GetNext() {
GetElementResult result;
TF_RETURN_IF_ERROR(fcfs_task_runner_.GetNext(result));
if (result.end_of_sequence) {
return errors::InvalidArgument(
"Cross-trainer caching requires the input dataset to be infinite. "
"However, it reached the end of sequence.");
}
return result;
}
size_t CachingTaskRunner::GetElementResultSequence::GetElementSizeBytes(
const GetElementResult& element) const {
return element.EstimatedMemoryUsageBytes();
}
void CachingTaskRunner::Cancel() {
VLOG(2) << "Cancelling tf.data service cross-trainer cache task.";
if (!cache_.IsCancelled()) {
cache_.Cancel(errors::Cancelled(
"tf.data service cross-trainer cache task is cancelled."));
}
fcfs_task_runner_.Cancel();
}
std::shared_ptr<model::Model> CachingTaskRunner::model() const {
return fcfs_task_runner_.model();
}
RoundRobinTaskRunner::RoundRobinTaskRunner(
std::unique_ptr<TaskIterator> iterator, int64_t num_consumers,
string worker_address)
: num_consumers_(num_consumers),
worker_address_(worker_address),
buffer_(num_consumers_),
prefetch_thread_(std::move(iterator), num_consumers_) {
VLOG(1) << "Creating task runner for distributing data round-robin to "
<< num_consumers << " consumers";
}
Status RoundRobinTaskRunner::ValidateRequest(const GetElementRequest& req) {
if (req.consumer_index() < 0 || req.round_index() < 0) {
return errors::FailedPrecondition(
"RoundRobinTaskRunner needs to know the consumer index and element "
"index of each request.");
}
if (req.consumer_index() >= num_consumers_) {
return errors::FailedPrecondition(
"Requesting data for consumer index ", req.consumer_index(),
", but the task is configured for only ", num_consumers_, " consumers");
}
return absl::OkStatus();
}
Status RoundRobinTaskRunner::PrepareFullRound(int64_t wait_us)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
VLOG(1) << worker_address_ << ": Preparing full round for round "
<< current_round_;
TF_RETURN_IF_ERROR(prefetch_thread_.FillBuffer(wait_us, buffer_));
round_skipped_ = buffer_.empty();
new_round_cv_.notify_all();
return absl::OkStatus();
}
Status RoundRobinTaskRunner::PreparePartialRound()
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
VLOG(1) << worker_address_ << ": Starting partial round " << first_round_
<< " for " << requests_[first_round_].size() << " consumers";
current_round_ = first_round_;
new_round_cv_.notify_all();
auto next_round_request = *(requests_[first_round_ + 1].begin()->second);
if (next_round_request.skipped_previous_round()) {
VLOG(1) << "Skipping partial round";
round_skipped_ = true;
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(prefetch_thread_.FillBuffer(-1, buffer_));
round_skipped_ = false;
return absl::OkStatus();
}
Status RoundRobinTaskRunner::PrepareRound(const GetElementRequest& req) {
mutex_lock l(mu_);
first_round_ = std::min(first_round_, req.round_index());
absl::flat_hash_map<int64_t, const GetElementRequest*>& round =
requests_[req.round_index()];
round[req.consumer_index()] = &req;
auto cleanup = gtl::MakeCleanup([&]() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
requests_[req.round_index()].erase(req.consumer_index());
});
if (current_round_ < req.round_index() && round.size() == num_consumers_) {
current_round_ = req.round_index();
int64_t wait_us = kWaitBeforeSkipUs;
if (!req.allow_skip()) {
wait_us = -1;
}
TF_RETURN_IF_ERROR(PrepareFullRound(wait_us));
}
if (current_round_ < 0 &&
requests_[first_round_].size() + requests_[first_round_ + 1].size() ==
num_consumers_) {
TF_RETURN_IF_ERROR(PreparePartialRound());
}
while (!cancelled_ && current_round_ < req.round_index()) {
TF_RETURN_IF_ERROR(prefetch_thread_.GetStatus());
new_round_cv_.wait(l);
}
if (current_round_ < req.round_index() && cancelled_) {
return errors::Cancelled("Worker is shutting down.");
}
if (current_round_ != req.round_index()) {
return errors::FailedPrecondition(
"Consumer ", req.consumer_index(), " requested data for round ",
req.round_index(), ", but the current round has already reached ",
current_round_,
". This may indicate that the consumer was restarted with the same "
"iteration "
"name.`");
}
return prefetch_thread_.GetStatus();
}
Status RoundRobinTaskRunner::GetNext(const GetElementRequest& req,
GetElementResult& result) {
TF_RETURN_IF_ERROR(ValidateRequest(req));
result.end_of_sequence = false;
VLOG(2) << worker_address_ << ": Received request from consumer index "
<< req.consumer_index() << " for round " << req.round_index();
TF_RETURN_IF_ERROR(PrepareRound(req));
tf_shared_lock l(mu_);
result.skip = round_skipped_;
if (round_skipped_) {
VLOG(1) << worker_address_ << ": Buffer not ready, skipping round "
<< current_round_ << " for consumer " << req.consumer_index();
return absl::OkStatus();
}
auto& buffer_result = buffer_[req.consumer_index()];
result.element_index = buffer_result->index;
std::vector<Tensor> element;
for (auto& component : buffer_result->components) {
element.push_back(tensor::DeepCopy(component));
}
if (VLOG_IS_ON(2)) {
int64_t size = 0;
for (auto& component : element) {
size += component.TotalBytes();
}
VLOG(2) << worker_address_ << ": Returning element " << result.element_index
<< " to consumer " << req.consumer_index() << " for round "
<< req.round_index() << ". element size " << size;
}
result.components = std::move(element);
return absl::OkStatus();
}
void RoundRobinTaskRunner::Cancel() {
mutex_lock l(mu_);
cancelled_ = true;
new_round_cv_.notify_all();
}
std::shared_ptr<model::Model> RoundRobinTaskRunner::model() const {
return prefetch_thread_.model();
}
PrefetchThread::PrefetchThread(std::unique_ptr<TaskIterator> iterator,
int64_t round_size)
: iterator_(std::move(iterator)), round_size_(round_size) {
thread_ = absl::WrapUnique(
Env::Default()->StartThread({}, "round-robin-prefetch", [&] { Run(); }));
}
PrefetchThread::~PrefetchThread() {
mutex_lock l(mu_);
cancelled_ = true;
cv_.notify_all();
}
void PrefetchThread::Run() {
while (true) {
{
mutex_lock l(mu_);
while (!cancelled_ && buffer_.size() >= round_size_) {
cv_.wait(l);
}
if (cancelled_) {
return;
}
}
std::vector<Tensor> element;
bool end_of_sequence;
Status s = iterator_->GetNext(element, end_of_sequence);
if (!s.ok()) {
mutex_lock l(mu_);
status_ = s;
cv_.notify_all();
return;
}
if (end_of_sequence) {
mutex_lock l(mu_);
status_ = errors::FailedPrecondition(
"Encountered end of sequence on a round-robin read iterator. "
"Please ensure that the dataset used for round-robin reading has "
"infinite cardinality, e.g. by adding a .repeat() transformation "
"at the end.");
cv_.notify_all();
return;
}
mutex_lock l(mu_);
buffer_.push_back(std::make_unique<Element>(std::move(element), index_++));
cv_.notify_all();
}
}
Status PrefetchThread::FillBuffer(int64_t wait_us,
std::vector<std::unique_ptr<Element>>& out) {
int64_t start_us = Env::Default()->NowMicros();
out.clear();
mutex_lock l(mu_);
while (buffer_.size() < round_size_ && !cancelled_ && status_.ok()) {
int64_t remaining_us = start_us + wait_us - Env::Default()->NowMicros();
if (wait_us >= 0 && remaining_us <= 0) {
break;
}
cv_.wait_for(l, std::chrono::microseconds(remaining_us));
}
TF_RETURN_IF_ERROR(status_);
if (cancelled_) {
return errors::Cancelled("Prefetch thread cancelled");
}
if (buffer_.size() < round_size_) {
DCHECK_GE(wait_us, 0);
return absl::OkStatus();
}
for (auto& elem : buffer_) {
out.push_back(std::move(elem));
}
buffer_.clear();
cv_.notify_all();
return absl::OkStatus();
}
Status PrefetchThread::GetStatus() {
mutex_lock l(mu_);
return status_;
}
std::shared_ptr<model::Model> PrefetchThread::model() const {
return iterator_->model();
}
}
} | #include "tensorflow/core/data/service/task_runner.h"
#include <algorithm>
#include <cstdint>
#include <iterator>
#include <memory>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/core/data/service/data_transfer.h"
#include "tensorflow/core/data/service/worker.pb.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/dataset.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/tstring.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace data {
namespace {
using ::tensorflow::testing::IsOkAndHolds;
using ::tensorflow::testing::StatusIs;
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
using ::testing::Gt;
using ::testing::HasSubstr;
using ::testing::SizeIs;
using ::testing::UnorderedElementsAreArray;
constexpr size_t kSmallCache = 100;
constexpr size_t kLargeCache = 10 * (size_t{1} << 30);
class RangeIterator : public TaskIterator {
public:
explicit RangeIterator(const int64_t range, const bool repeat)
: range_(range), repeat_(repeat) {}
Status GetNext(std::vector<Tensor>& element, bool& end_of_sequence) override {
end_of_sequence = (next_ >= range_);
if (end_of_sequence) {
return absl::OkStatus();
}
element = {Tensor{next_++}};
if (repeat_) {
next_ = next_ % range_;
}
return absl::OkStatus();
}
int64_t Cardinality() const override {
return repeat_ ? kInfiniteCardinality : range_;
}
private:
const int64_t range_;
const bool repeat_;
int64_t next_ = 0;
};
class InfiniteRangeIterator : public TaskIterator {
public:
InfiniteRangeIterator() = default;
Status GetNext(std::vector<Tensor>& element, bool& end_of_sequence) override {
element = {Tensor{next_++}};
return absl::OkStatus();
}
int64_t Cardinality() const override { return kInfiniteCardinality; }
private:
int64_t next_ = 0;
};
template <class T>
class ElementOrErrorIterator : public TaskIterator {
public:
explicit ElementOrErrorIterator(const std::vector<StatusOr<T>>& elements)
: elements_(elements) {}
Status GetNext(std::vector<Tensor>& element, bool& end_of_sequence) override {
end_of_sequence = (next_ >= elements_.size());
if (end_of_sequence) {
return absl::OkStatus();
}
const StatusOr<T>& next_element = elements_[next_++];
TF_RETURN_IF_ERROR(next_element.status());
element = {Tensor{*next_element}};
return absl::OkStatus();
}
int64_t Cardinality() const override { return elements_.size(); }
private:
const std::vector<StatusOr<T>> elements_;
int64_t next_ = 0;
};
template <class T>
StatusOr<std::vector<T>> GetTaskRunnerOutput(TaskRunner& runner,
const GetElementRequest& request) {
std::vector<T> output;
for (bool end_of_sequence = false; !end_of_sequence;) {
GetElementResult result;
TF_RETURN_IF_ERROR(runner.GetNext(request, result));
end_of_sequence = result.end_of_sequence;
if (end_of_sequence) {
break;
}
if (result.components.size() != 1) {
return errors::Internal("GetElementResult Tensor size should be 1.");
}
output.push_back(result.components[0].unaligned_flat<T>().data()[0]);
}
return output;
}
template <class T>
StatusOr<T> GetNextFromTaskRunner(TaskRunner& runner,
const GetElementRequest& request) {
GetElementResult result;
TF_RETURN_IF_ERROR(runner.GetNext(request, result));
if (result.end_of_sequence) {
return errors::OutOfRange("TaskRunner has reached the end of sequence.");
}
if (result.components.size() != 1) {
return errors::Internal("GetElementResult Tensor size should be 1.");
}
return result.components[0].unaligned_flat<T>().data()[0];
}
template <class T>
StatusOr<std::vector<T>> GetElementsFromTaskRunner(
TaskRunner& runner, const GetElementRequest& request,
const size_t num_elements) {
std::vector<T> output;
for (size_t i = 0; i < num_elements; ++i) {
TF_ASSIGN_OR_RETURN(T next, GetNextFromTaskRunner<T>(runner, request));
output.push_back(next);
}
return output;
}
std::vector<int64_t> GetRange(const size_t range) {
std::vector<int64_t> result;
for (int64_t i = 0; i < range; ++i) {
result.push_back(i);
}
return result;
}
Status RunConsumer(int64_t consumer_index, int64_t start_index,
int64_t end_index, TaskRunner& task_runner,
std::vector<int64_t>& output) {
for (int64_t next_index = start_index; next_index < end_index; ++next_index) {
GetElementRequest request;
request.set_round_index(next_index);
request.set_consumer_index(consumer_index);
request.set_skipped_previous_round(false);
request.set_allow_skip(false);
GetElementResult result;
do {
TF_RETURN_IF_ERROR(task_runner.GetNext(request, result));
if (!result.end_of_sequence) {
output.push_back(result.components[0].flat<int64_t>()(0));
}
} while (result.skip);
}
return absl::OkStatus();
}
}
TEST(FirstComeFirstServedTaskRunnerTest, GetNext) {
size_t range = 10;
FirstComeFirstServedTaskRunner runner(
std::make_unique<RangeIterator>(range, false));
TF_ASSERT_OK_AND_ASSIGN(
std::vector<int64_t> output,
GetTaskRunnerOutput<int64_t>(runner, GetElementRequest()));
EXPECT_THAT(output, ElementsAreArray(GetRange(range)));
GetElementResult result;
TF_ASSERT_OK(runner.GetNext(GetElementRequest(), result));
EXPECT_TRUE(result.end_of_sequence);
}
TEST(FirstComeFirstServedTaskRunnerTest, EmptyDataset) {
FirstComeFirstServedTaskRunner runner(
std::make_unique<RangeIterator>(0, false));
for (int i = 0; i < 5; ++i) {
GetElementResult result;
TF_ASSERT_OK(runner.GetNext(GetElementRequest(), result));
EXPECT_TRUE(result.end_of_sequence);
}
}
TEST(FirstComeFirstServedTaskRunnerTest, Cancel) {
size_t range = 10;
FirstComeFirstServedTaskRunner runner(
std::make_unique<RangeIterator>(range, false));
runner.Cancel();
for (int i = 0; i < range; ++i) {
GetElementResult result;
EXPECT_THAT(runner.GetNext(GetElementRequest(), result),
testing::StatusIs(error::CANCELLED));
}
}
TEST(FirstComeFirstServedTaskRunnerTest, ConcurrentReaders) {
size_t range = 1000;
size_t num_readers = 10;
FirstComeFirstServedTaskRunner runner(
std::make_unique<RangeIterator>(range, false));
mutex mu;
std::vector<int64_t> results;
std::vector<std::unique_ptr<Thread>> reader_threads;
for (int i = 0; i < num_readers; ++i) {
reader_threads.push_back(absl::WrapUnique(Env::Default()->StartThread(
{}, absl::StrCat("Trainer_", i),
[&runner, &results, &mu]() {
TF_ASSERT_OK_AND_ASSIGN(
std::vector<int64_t> output,
GetTaskRunnerOutput<int64_t>(runner, GetElementRequest()));
GetElementResult result;
TF_ASSERT_OK(runner.GetNext(GetElementRequest(), result));
EXPECT_TRUE(result.end_of_sequence);
mutex_lock l(mu);
std::move(output.begin(), output.end(), std::back_inserter(results));
})));
}
for (auto& thread : reader_threads) {
thread.reset();
}
EXPECT_THAT(results, UnorderedElementsAreArray(GetRange(range)));
}
TEST(FirstComeFirstServedTaskRunnerTest, GetNextAndCancel) {
size_t range = 10;
FirstComeFirstServedTaskRunner runner(
std::make_unique<RangeIterator>(range, false));
int64_t i;
for (i = 0; i < range / 2; ++i) {
EXPECT_THAT(GetNextFromTaskRunner<int64_t>(runner, GetElementRequest()),
IsOkAndHolds(i));
}
runner.Cancel();
for (; i < range; ++i) {
GetElementResult result;
EXPECT_THAT(runner.GetNext(GetElementRequest(), result),
testing::StatusIs(error::CANCELLED));
}
}
TEST(FirstComeFirstServedTaskRunnerTest, Error) {
FirstComeFirstServedTaskRunner runner(
std::make_unique<ElementOrErrorIterator<tstring>>(
std::vector<absl::StatusOr<tstring>>{
tstring("First element"),
errors::InvalidArgument("Invalid argument"),
tstring("Second element"), errors::Aborted("Aborted")}));
EXPECT_THAT(GetNextFromTaskRunner<tstring>(runner, GetElementRequest()),
IsOkAndHolds("First element"));
EXPECT_THAT(GetNextFromTaskRunner<tstring>(runner, GetElementRequest()),
testing::StatusIs(error::INVALID_ARGUMENT));
EXPECT_THAT(GetNextFromTaskRunner<tstring>(runner, GetElementRequest()),
IsOkAndHolds("Second element"));
EXPECT_THAT(GetNextFromTaskRunner<tstring>(runner, GetElementRequest()),
testing::StatusIs(error::ABORTED));
}
TEST(CachingTaskRunnerTest, GetNext) {
size_t range = 10;
CachingTaskRunner runner(std::make_unique<InfiniteRangeIterator>(),
kLargeCache);
size_t num_trainers = 10;
for (size_t i = 0; i < num_trainers; ++i) {
GetElementRequest request;
request.set_trainer_id(absl::StrCat("Trainer ", i));
TF_ASSERT_OK_AND_ASSIGN(
std::vector<int64_t> output,
GetElementsFromTaskRunner<int64_t>(runner, request, range));
EXPECT_THAT(output, ElementsAreArray(GetRange(range)));
GetElementResult result;
TF_ASSERT_OK(runner.GetNext(request, result));
EXPECT_FALSE(result.end_of_sequence);
}
}
TEST(CachingTaskRunnerTest, EmptyDataset) {
CachingTaskRunner runner(
std::make_unique<RangeIterator>(0, false),
kLargeCache);
GetElementRequest request;
request.set_trainer_id("Trainer ID");
GetElementResult result;
EXPECT_THAT(runner.GetNext(request, result),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Cross-trainer caching requires the input "
"dataset to be infinite.")));
}
TEST(CachingTaskRunnerTest, SlowClientSkipsData) {
size_t range = 1000;
CachingTaskRunner runner(std::make_unique<InfiniteRangeIterator>(),
kSmallCache);
GetElementRequest request;
request.set_trainer_id("Fast trainer");
TF_ASSERT_OK_AND_ASSIGN(
std::vector<int64_t> fast_trainer_output,
GetElementsFromTaskRunner<int64_t>(runner, request, range));
EXPECT_THAT(fast_trainer_output, ElementsAreArray(GetRange(range)));
request.set_trainer_id("Slow trainer");
TF_ASSERT_OK_AND_ASSIGN(
std::vector<int64_t> slow_trainer_output,
GetElementsFromTaskRunner<int64_t>(runner, request, range));
EXPECT_THAT(slow_trainer_output, SizeIs(range));
EXPECT_THAT(slow_trainer_output[0], Gt(0));
}
TEST(CachingTaskRunnerTest, ConcurrentTrainers) {
size_t range = 100;
size_t num_readers = 10;
CachingTaskRunner runner(std::make_unique<InfiniteRangeIterator>(),
kLargeCache);
std::vector<std::unique_ptr<Thread>> reader_threads;
for (int i = 0; i < num_readers; ++i) {
reader_threads.push_back(absl::WrapUnique(Env::Default()->StartThread(
{}, absl::StrCat("Trainer_", i),
[&runner, range, i]() {
GetElementRequest request;
request.set_trainer_id(absl::StrCat("Trainer_", i));
TF_ASSERT_OK_AND_ASSIGN(
std::vector<int64_t> output,
GetElementsFromTaskRunner<int64_t>(runner, request, range));
EXPECT_THAT(output, ElementsAreArray(GetRange(range)));
GetElementResult result;
TF_ASSERT_OK(runner.GetNext(request, result));
EXPECT_FALSE(result.end_of_sequence);
})));
}
}
TEST(CachingTaskRunnerTest, Cancel) {
CachingTaskRunner runner(std::make_unique<InfiniteRangeIterator>(),
kLargeCache);
GetElementRequest request;
request.set_trainer_id("Trainer ID");
int i;
for (i = 0; i < 10; ++i) {
EXPECT_THAT(GetNextFromTaskRunner<int64_t>(runner, request),
IsOkAndHolds(i));
}
runner.Cancel();
for (; i < 10; ++i) {
GetElementResult result;
EXPECT_THAT(runner.GetNext(request, result),
testing::StatusIs(error::CANCELLED));
}
}
TEST(CachingTaskRunnerTest, CancelConcurrentReaders) {
size_t num_readers = 10;
CachingTaskRunner runner(std::make_unique<InfiniteRangeIterator>(),
kSmallCache);
std::vector<std::unique_ptr<Thread>> reader_threads;
for (size_t i = 0; i < num_readers; ++i) {
reader_threads.push_back(absl::WrapUnique(Env::Default()->StartThread(
{}, absl::StrCat("Trainer_", i),
[&runner]() {
for (size_t j = 0; true; ++j) {
GetElementRequest request;
request.set_trainer_id(absl::StrCat("Trainer_", (j % 100)));
GetElementResult result;
Status status = runner.GetNext(request, result);
if (!status.ok()) {
return;
}
ASSERT_FALSE(result.end_of_sequence);
ASSERT_EQ(result.components.size(), 1);
}
})));
}
Env::Default()->SleepForMicroseconds(1000000);
runner.Cancel();
for (auto& thread : reader_threads) {
thread.reset();
}
GetElementRequest request;
GetElementResult result;
request.set_trainer_id(absl::StrCat("Trainer_", 0));
EXPECT_THAT(runner.GetNext(request, result),
testing::StatusIs(error::CANCELLED));
}
TEST(CachingTaskRunnerTest, Errors) {
size_t num_readers = 10;
CachingTaskRunner runner(
std::make_unique<ElementOrErrorIterator<tstring>>(
std::vector<absl::StatusOr<tstring>>{
tstring("First element"),
errors::Cancelled("Cancelled"),
tstring("Second element"),
errors::FailedPrecondition("FailedPrecondition"),
tstring("Third element"),
errors::Unavailable("Unavailable"),
}),
kLargeCache);
std::vector<std::unique_ptr<Thread>> reader_threads;
std::vector<std::vector<tstring>> results;
results.reserve(num_readers);
for (size_t i = 0; i < num_readers; ++i) {
results.emplace_back();
std::vector<tstring>& result = results.back();
reader_threads.push_back(absl::WrapUnique(Env::Default()->StartThread(
{}, absl::StrCat("Trainer_", i),
[&runner, &result, i]() {
GetElementRequest request;
request.set_trainer_id(absl::StrCat("Trainer_", i));
while (true) {
absl::StatusOr<tstring> element =
GetNextFromTaskRunner<tstring>(runner, request);
if (element.ok()) {
result.push_back(*element);
}
if (errors::IsInvalidArgument(element.status())) {
EXPECT_THAT(
element.status(),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Cross-trainer caching requires the input "
"dataset to be infinite.")));
return;
}
}
})));
}
for (auto& thread : reader_threads) {
thread.reset();
}
EXPECT_EQ(results.size(), num_readers);
for (const std::vector<tstring>& result : results) {
EXPECT_THAT(result,
ElementsAre(tstring("First element"), tstring("Second element"),
tstring("Third element")));
}
}
class ConsumeParallelTest
: public ::testing::Test,
public ::testing::WithParamInterface<std::tuple<int64_t, int64_t>> {};
TEST_P(ConsumeParallelTest, ConsumeParallel) {
int64_t num_elements = std::get<0>(GetParam());
int64_t num_consumers = std::get<1>(GetParam());
RoundRobinTaskRunner runner(
std::make_unique<RangeIterator>(num_elements, true),
num_consumers,
"test_worker_address");
std::vector<std::vector<int64_t>> per_consumer_results;
std::vector<std::unique_ptr<Thread>> consumers;
mutex mu;
Status error;
for (int consumer = 0; consumer < num_consumers; ++consumer) {
mutex_lock l(mu);
per_consumer_results.emplace_back();
consumers.push_back(absl::WrapUnique(Env::Default()->StartThread(
{}, absl::StrCat("consumer_", consumer), [&, consumer] {
std::vector<int64_t> results;
Status s = RunConsumer(consumer, 0,
num_elements, runner, results);
mutex_lock l(mu);
if (!s.ok()) {
error = s;
return;
}
per_consumer_results[consumer] = std::move(results);
})));
}
consumers.clear();
mutex_lock l(mu);
TF_ASSERT_OK(error);
for (int i = 0; i < num_elements; ++i) {
int consumer = i % num_consumers;
int round = i / num_consumers;
EXPECT_EQ(per_consumer_results[consumer][round], i);
}
}
INSTANTIATE_TEST_SUITE_P(ConsumeParallelTests, ConsumeParallelTest,
::testing::Values(std::make_tuple(1000, 5),
std::make_tuple(1003, 5),
std::make_tuple(1000, 20),
std::make_tuple(4, 20),
std::make_tuple(0, 20)));
TEST(RoundRobinTaskRunner, ConsumeParallelPartialRound) {
int64_t num_consumers = 5;
std::vector<int64_t> starting_rounds = {12, 11, 11, 12, 12};
int64_t end_index = 15;
std::vector<std::vector<int64_t>> expected_consumer_results = {
{5, 10, 15}, {1, 6, 11, 16}, {2, 7, 12, 17}, {8, 13, 18}, {9, 14, 19}};
RoundRobinTaskRunner runner(
std::make_unique<RangeIterator>(30, true), num_consumers,
"test_worker_address");
std::vector<std::vector<int64_t>> per_consumer_results;
std::vector<std::unique_ptr<Thread>> consumers;
mutex mu;
Status error;
for (int consumer = 0; consumer < num_consumers; ++consumer) {
mutex_lock l(mu);
per_consumer_results.emplace_back();
consumers.push_back(absl::WrapUnique(Env::Default()->StartThread(
{}, absl::StrCat("consumer_", consumer), [&, consumer] {
std::vector<int64_t> results;
Status s = RunConsumer(consumer, starting_rounds[consumer], end_index,
runner, results);
mutex_lock l(mu);
if (!s.ok()) {
error = s;
return;
}
per_consumer_results[consumer] = std::move(results);
})));
}
consumers.clear();
mutex_lock l(mu);
TF_ASSERT_OK(error);
for (int consumer = 0; consumer < num_consumers; ++consumer) {
EXPECT_EQ(per_consumer_results[consumer],
expected_consumer_results[consumer]);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/task_runner.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/task_runner_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5fda2d8d-3b4a-4536-afb2-12c1cb9ba632 | cpp | tensorflow/tensorflow | cuda_driver | third_party/xla/xla/stream_executor/cuda/cuda_driver.cc | third_party/xla/xla/stream_executor/cuda/cuda_driver_test.cc | #include "xla/stream_executor/cuda/cuda_driver.h"
#include <stdint.h>
#include <stdlib.h>
#include <cstdint>
#include <cstdlib>
#include <cstring>
#include <new>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/base/casts.h"
#include "absl/container/inlined_vector.h"
#include "absl/debugging/leak_check.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/notification.h"
#include "absl/types/span.h"
#include "third_party/gpus/cuda/include/cuda.h"
#include "third_party/gpus/cuda/include/cuda_runtime_api.h"
#include "third_party/gpus/cuda/include/driver_types.h"
#include "xla/stream_executor/cuda/cuda_status.h"
#include "xla/stream_executor/gpu/context.h"
#include "xla/stream_executor/gpu/context_map.h"
#include "xla/stream_executor/gpu/gpu_diagnostics.h"
#include "xla/stream_executor/gpu/gpu_driver.h"
#include "xla/stream_executor/gpu/gpu_types.h"
#include "xla/stream_executor/gpu/scoped_activate_context.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/stream_executor.h"
#include "tsl/platform/casts.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/macros.h"
#include "tsl/platform/numbers.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/threadpool.h"
namespace stream_executor {
namespace gpu {
namespace {
absl::StatusOr<CUdevice> DeviceFromContext(Context* context) {
ScopedActivateContext activated{context};
CUdevice device = -1;
auto status = cuda::ToStatus(cuCtxGetDevice(&device));
if (status.ok()) {
return device;
}
return status;
}
CUcontext CurrentContextOrDie() {
CUcontext current = nullptr;
TF_CHECK_OK(cuda::ToStatus(cuCtxGetCurrent(¤t),
"Failed to query current context"));
return current;
}
ContextMap<CUcontext, GpuContext>* GetContextMap() {
static ContextMap<CUcontext, GpuContext>* context_map =
new ContextMap<CUcontext, GpuContext>([](void* ptr) {
int device_ordinal;
absl::Status status = cuda::ToStatus(
cuPointerGetAttribute(static_cast<void*>(&device_ordinal),
CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL,
reinterpret_cast<CUdeviceptr>(ptr)));
if (!status.ok()) {
LOG(FATAL) << "Not able to get the device_ordinal for ptr: " << ptr
<< ". Error: " << status;
}
return device_ordinal;
});
return context_map;
}
CUcontext CurrentContext() {
CUcontext current = CurrentContextOrDie();
if (current != nullptr && !GetContextMap()->Has(current)) {
LOG(FATAL) << "current context was not created by the StreamExecutor "
"cuda_driver API: "
<< current
<< "; a CUDA runtime call "
"was likely performed without using a StreamExecutor context";
}
return current;
}
tsl::thread::ThreadPool* GetDriverExecutor() {
static tsl::thread::ThreadPool* thread_pool = new tsl::thread::ThreadPool(
tsl::Env::Default(), tsl::ThreadOptions(), "cuda_driver", 1);
return thread_pool;
}
}
void GpuContext::SetActive() {
TF_CHECK_OK(
cuda::ToStatus(cuCtxSetCurrent(context_), "Failed setting context"));
}
bool GpuContext::IsActive() const { return CurrentContext() == context_; }
namespace {
static absl::Status InternalInit() {
absl::Status status =
cuda::ToStatus(cuInit(0 ), "Failed call to cuInit");
if (status.ok()) {
return status;
}
LOG(ERROR) << "failed call to cuInit: " << status;
Diagnostician::LogDiagnosticInformation();
return status;
}
const char kScheduleSpinString[] = "spin";
const char kScheduleYieldString[] = "yield";
const char kScheduleBlockingSyncString[] = "blocking_sync";
int GetFlagsFromEnv() {
const char* gpu_schedule_string =
std::getenv("TF_CUDA_PLATFORM_GPU_DEVICE_SCHEDULE");
if (gpu_schedule_string == nullptr) {
return 0;
}
unsigned device_flags = 0;
if (strcmp(kScheduleSpinString, gpu_schedule_string) == 0) {
device_flags = CU_CTX_SCHED_SPIN;
} else if (strcmp(kScheduleYieldString, gpu_schedule_string) == 0) {
device_flags = CU_CTX_SCHED_YIELD;
} else if (strcmp(kScheduleBlockingSyncString, gpu_schedule_string) == 0) {
device_flags = CU_CTX_SCHED_BLOCKING_SYNC;
} else {
LOG(QFATAL) << "Unknown option for environment variable "
"TF_CUDA_PLATFORM_GPU_DEVICE_SCHEDULE "
<< gpu_schedule_string << " should be one of {"
<< kScheduleBlockingSyncString << ", " << kScheduleSpinString
<< ", " << kScheduleYieldString << "}";
}
return device_flags;
}
}
absl::Status GpuDriver::Init() {
static absl::Status* init_retval = [] {
return new absl::Status(InternalInit());
}();
return *init_retval;
}
absl::Status GpuDriver::GetDevice(int device_ordinal, CUdevice* device) {
return cuda::ToStatus(cuDeviceGet(device, device_ordinal),
"Failed call to cuDeviceGet");
}
absl::Status GpuDriver::GetDeviceName(CUdevice device,
std::string* device_name) {
static const size_t kCharLimit = 64;
absl::InlinedVector<char, 4> chars(kCharLimit);
TF_RETURN_IF_ERROR(
cuda::ToStatus(cuDeviceGetName(chars.begin(), kCharLimit - 1, device),
"Failed to get device name"));
chars[kCharLimit - 1] = '\0';
*device_name = chars.begin();
return absl::OkStatus();
}
absl::Status GpuDriver::CreateContext(int device_ordinal, CUdevice device,
Context** context) {
*context = nullptr;
int flags = GetFlagsFromEnv();
unsigned int former_primary_context_flags;
int former_primary_context_is_active;
TF_RETURN_IF_ERROR(cuda::ToStatus(
cuDevicePrimaryCtxGetState(device, &former_primary_context_flags,
&former_primary_context_is_active)));
if (former_primary_context_flags != flags) {
if (former_primary_context_is_active) {
LOG(ERROR)
<< "The primary context is active and has a different flag set ("
<< former_primary_context_flags << ") than the desired flag set ("
<< flags << ").";
} else {
TF_RETURN_IF_ERROR(
cuda::ToStatus(cuDevicePrimaryCtxSetFlags(device, flags)));
}
}
CUcontext former_context = CurrentContextOrDie();
CUcontext new_context;
TF_RETURN_IF_ERROR(
cuda::ToStatus(cuDevicePrimaryCtxRetain(&new_context, device)));
if (former_context != nullptr) {
CUdevice former_device;
if (cuCtxGetDevice(&former_device) == CUDA_SUCCESS) {
if (former_device == device) {
if (former_context == new_context) {
VLOG(2) << "The primary context " << former_context << " for device "
<< device
<< " exists before initializing the StreamExecutor.";
} else {
LOG(WARNING) << "A non-primary context " << former_context
<< " for device " << device
<< " exists before initializing the StreamExecutor. The "
<< "primary context is now " << new_context << ". We "
<< "haven't verified StreamExecutor works with that.";
}
}
} else {
LOG(ERROR) << "Failed to get the device of the current context "
<< former_context;
}
}
TF_RETURN_IF_ERROR(cuda::ToStatus(cuCtxSetCurrent(former_context)));
*context = GetContextMap()->Add(new_context, device_ordinal);
CHECK(*context != nullptr)
<< "success in this call must entail non-null result";
VLOG(2) << "created or reused context " << new_context << " for this thread";
return absl::OkStatus();
}
void GpuDriver::DestroyContext(Context* context) {
if (context == nullptr) {
return;
}
GpuContext* cuda_context = tensorflow::down_cast<GpuContext*>(context);
auto status = cuda::ToStatus(cuCtxPushCurrent(cuda_context->context()));
if (!status.ok()) {
LOG(ERROR) << "failed to Push CUDA context; leaking: " << status;
}
CUdevice device;
cuCtxGetDevice(&device);
cuCtxPopCurrent(nullptr);
status = cuda::ToStatus(cuDevicePrimaryCtxRelease(device));
if (!status.ok()) {
LOG(ERROR) << "failed to release CUDA context; leaking: " << status;
}
GetContextMap()->Remove(cuda_context->context());
}
absl::Status GpuDriver::CreateGraph(CUgraph* graph) {
VLOG(2) << "Create new CUDA graph";
TF_RETURN_IF_ERROR(cuda::ToStatus(cuGraphCreate(graph, 0),
"Failed to create CUDA graph"));
VLOG(2) << "Created CUDA graph " << *graph;
return absl::OkStatus();
}
absl::Status GpuDriver::DestroyGraph(CUgraph graph) {
VLOG(2) << "Destroy CUDA graph " << graph;
return cuda::ToStatus(cuGraphDestroy(graph), "Failed to destroy CUDA graph");
}
static std::string_view StreamCaptureModeToString(
GpuDriver::StreamCaptureMode mode) {
switch (mode) {
case GpuDriver::StreamCaptureMode::kGlobal:
return "global";
case GpuDriver::StreamCaptureMode::kThreadLocal:
return "threadlocal";
case GpuDriver::StreamCaptureMode::kRelaxed:
return "relaxed";
}
}
absl::Status GpuDriver::StreamBeginCapture(CUstream stream,
StreamCaptureMode mode) {
CUstreamCaptureMode cu_mode;
switch (mode) {
case StreamCaptureMode::kGlobal:
cu_mode = CU_STREAM_CAPTURE_MODE_GLOBAL;
break;
case StreamCaptureMode::kThreadLocal:
cu_mode = CU_STREAM_CAPTURE_MODE_THREAD_LOCAL;
break;
case StreamCaptureMode::kRelaxed:
cu_mode = CU_STREAM_CAPTURE_MODE_RELAXED;
break;
}
VLOG(2) << "Beginning stream " << stream << " capture in "
<< StreamCaptureModeToString(mode) << " mode";
return cuda::ToStatus(cuStreamBeginCapture(stream, cu_mode),
"Failed to begin stream capture");
}
absl::Status GpuDriver::StreamBeginCaptureToGraph(CUstream stream,
CUgraph graph,
StreamCaptureMode mode) {
CUstreamCaptureMode cu_mode;
switch (mode) {
case StreamCaptureMode::kGlobal:
cu_mode = CU_STREAM_CAPTURE_MODE_GLOBAL;
break;
case StreamCaptureMode::kThreadLocal:
cu_mode = CU_STREAM_CAPTURE_MODE_THREAD_LOCAL;
break;
case StreamCaptureMode::kRelaxed:
cu_mode = CU_STREAM_CAPTURE_MODE_RELAXED;
break;
}
#if CUDA_VERSION >= 12030
VLOG(2) << "Beginning stream " << stream << " capture in "
<< StreamCaptureModeToString(mode) << " mode to graph " << graph;
return cuda::ToStatus(
cuStreamBeginCaptureToGraph(stream, graph,
nullptr,
nullptr,
0, cu_mode),
"Failed to begin stream capture to graph");
#else
return absl::UnimplementedError(
"StreamBeginCaptureToGraph is not implemented");
#endif
}
absl::Status GpuDriver::StreamEndCapture(CUstream stream, CUgraph* graph) {
VLOG(2) << "End stream " << stream << " capture";
return cuda::ToStatus(cuStreamEndCapture(stream, graph),
"Failed to end stream capture");
}
absl::Status GpuDriver::GraphInstantiate(CUgraphExec* exec, CUgraph graph,
const GraphInstantiateFlags& flags) {
VLOG(2) << "Instantiate CUDA executable graph from graph " << graph << " ("
<< "auto_free_on_launch=" << flags.auto_free_on_launch << ", "
<< "device_launch=" << flags.device_launch << ", "
<< "use_node_priority=" << flags.use_node_prirotiy << ", "
<< "upload=" << flags.upload << ")";
#if CUDA_VERSION >= 12000
uint64_t cu_flags = 0;
if (flags.auto_free_on_launch)
cu_flags |= CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH;
if (flags.use_node_prirotiy)
cu_flags |= CUDA_GRAPH_INSTANTIATE_FLAG_USE_NODE_PRIORITY;
if (flags.device_launch)
cu_flags |= CUDA_GRAPH_INSTANTIATE_FLAG_DEVICE_LAUNCH;
if (flags.upload) cu_flags |= CUDA_GRAPH_INSTANTIATE_FLAG_UPLOAD;
return cuda::ToStatus(cuGraphInstantiate(exec, graph, cu_flags),
"Failed to instantiate CUDA graph");
#else
return cuda::ToStatus(cuGraphInstantiate(exec, graph, nullptr, nullptr, 0),
"Failed to instantiate CUDA graph");
#endif
}
absl::Status GpuDriver::GraphLaunch(CUgraphExec exec, CUstream stream) {
VLOG(2) << "Launching CUDA executable graph " << exec << " on a stream "
<< stream;
return cuda::ToStatus(cuGraphLaunch(exec, stream),
"Failed to launch CUDA graph");
}
absl::Status GpuDriver::GraphNodeSetEnabled(CUgraphExec exec, CUgraphNode node,
bool enabled) {
unsigned value = enabled ? 1 : 0;
VLOG(2) << "Set CUDA executable graph " << exec << " node " << node
<< " enabled flag to " << value;
return cuda::ToStatus(cuGraphNodeSetEnabled(exec, node, value),
"Failed to set CUDA graph node enabled flag");
}
absl::Status GpuDriver::GraphExecUpdate(CUgraphExec exec, CUgraph graph,
GraphExecUpdateResultInfo* result) {
VLOG(2) << "Update CUDA graph executable " << exec << " with graph " << graph;
#if CUDA_VERSION >= 12000
CUgraphExecUpdateResultInfo cu_result;
memset(&cu_result, 0, sizeof(cu_result));
CUresult err_code = cuGraphExecUpdate(exec, graph, &cu_result);
auto cu_result_enum = cu_result.result;
if (cu_result.errorFromNode) {
result->error_from_node = cu_result.errorFromNode;
}
if (cu_result.errorNode) {
result->error_node = cu_result.errorNode;
}
#else
CUgraphExecUpdateResult cu_result;
CUresult err_code = cuGraphExecUpdate(exec, graph, nullptr, &cu_result);
auto cu_result_enum = cu_result;
#endif
switch (cu_result_enum) {
case CU_GRAPH_EXEC_UPDATE_SUCCESS:
result->result = GraphExecUpdateResult::kSuccess;
break;
case CU_GRAPH_EXEC_UPDATE_ERROR:
result->result = GraphExecUpdateResult::kError;
break;
case CU_GRAPH_EXEC_UPDATE_ERROR_TOPOLOGY_CHANGED:
result->result = GraphExecUpdateResult::kTopologyChanged;
break;
case CU_GRAPH_EXEC_UPDATE_ERROR_NODE_TYPE_CHANGED:
result->result = GraphExecUpdateResult::kNodeTypeChanged;
break;
case CU_GRAPH_EXEC_UPDATE_ERROR_FUNCTION_CHANGED:
result->result = GraphExecUpdateResult::kFunctionChanged;
break;
case CU_GRAPH_EXEC_UPDATE_ERROR_PARAMETERS_CHANGED:
result->result = GraphExecUpdateResult::kParametersChanged;
break;
case CU_GRAPH_EXEC_UPDATE_ERROR_NOT_SUPPORTED:
result->result = GraphExecUpdateResult::kNotSupported;
break;
#if CUDA_VERSION >= 12000
case CU_GRAPH_EXEC_UPDATE_ERROR_UNSUPPORTED_FUNCTION_CHANGE:
result->result = GraphExecUpdateResult::kUnsupportedFunctionChange;
break;
case CU_GRAPH_EXEC_UPDATE_ERROR_ATTRIBUTES_CHANGED:
result->result = GraphExecUpdateResult::kAttributesChanged;
break;
#endif
default:
return absl::InternalError("Unknown graph update result");
}
return cuda::ToStatus(err_code, "Failed to update CUDA graph");
}
absl::StatusOr<std::vector<GpuGraphNodeHandle>>
GpuDriver::GraphNodeGetDependencies(GpuGraphNodeHandle node) {
VLOG(2) << "Get CUDA graph node " << node << " dependencies";
std::vector<CUgraphNode> dependencies;
size_t num_dependencies = 0;
TF_RETURN_IF_ERROR(cuda::ToStatus(
cuGraphNodeGetDependencies(node, nullptr, &num_dependencies),
"Failed to get CUDA graph node depedencies size"));
dependencies.resize(num_dependencies, nullptr);
TF_RETURN_IF_ERROR(cuda::ToStatus(
cuGraphNodeGetDependencies(node, dependencies.data(), &num_dependencies),
"Failed to get CUDA graph node depedencies"));
return dependencies;
}
absl::Status GpuDriver::DestroyGraphExec(CUgraphExec exec) {
VLOG(2) << "Destroying CUDA executable graph " << exec;
return cuda::ToStatus(cuGraphExecDestroy(exec),
"Failed to destroy CUDA executable graph");
}
absl::StatusOr<std::string> GpuDriver::GraphDebugDotPrint(
CUgraph graph, const char* path, bool return_printed_graph) {
#if CUDA_VERSION >= 12000
VLOG(2) << "Print CUDA graph " << graph << " debug dot file to " << path;
int flags = CU_GRAPH_DEBUG_DOT_FLAGS_VERBOSE;
TF_RETURN_IF_ERROR(cuda::ToStatus(cuGraphDebugDotPrint(graph, path, flags),
"Failed to print gpu graph debug file"));
if (return_printed_graph) {
std::string data;
if (tsl::ReadFileToString(tsl::Env::Default(), path, &data).ok()) {
return data;
} else {
LOG(WARNING) << "failed to read gpu graph debug file " << path;
}
}
#endif
return std::string(path);
}
absl::Status GpuDriver::DeviceGraphMemTrim(CUdevice device) {
VLOG(2) << "Trim CUDA device graph memory " << device;
return cuda::ToStatus(cuDeviceGraphMemTrim(device),
"Failed to trim device graph memory");
}
absl::StatusOr<bool> GpuDriver::StreamIsCapturing(CUstream stream) {
VLOG(2) << "Checking if stream " << stream << " is capturing";
CUstreamCaptureStatus status;
TF_RETURN_IF_ERROR(cuda::ToStatus(cuStreamIsCapturing(stream, &status),
"Failed to check stream capturing status"));
return status == CU_STREAM_CAPTURE_STATUS_ACTIVE;
}
absl::Status GpuDriver::GraphConditionalHandleCreate(
GpuGraphConditionalHandle* handle, CUgraph graph, Context* context,
unsigned int default_launch_value, unsigned int flags) {
VLOG(2) << "Create conditional handle for a graph " << graph
<< "; context: " << context
<< "; default_launch_value: " << default_launch_value
<< "; flags: " << flags;
#if CUDA_VERSION >= 12030
return cuda::ToStatus(
cuGraphConditionalHandleCreate(
handle, graph, tensorflow::down_cast<GpuContext*>(context)->context(),
default_launch_value, flags),
"Failed to create conditional handle for a CUDA graph");
#else
return absl::UnimplementedError(
"CUDA graph conditional nodes are not implemented");
#endif
}
static std::string ConditionalTypeToString(
GpuDriver::GpuGraphConditionalNodeParams::Type type) {
switch (type) {
case GpuDriver::GpuGraphConditionalNodeParams::Type::kIf:
return "IF";
case GpuDriver::GpuGraphConditionalNodeParams::Type::kWhile:
return "WHILE";
}
}
absl::StatusOr<GpuDriver::GpuGraphNodeResult> GpuDriver::GraphAddNode(
CUgraphNode* node, CUgraph graph, absl::Span<const CUgraphNode> deps,
const GpuGraphNodeParams& params) {
#if CUDA_VERSION >= 12030
if (auto* conditional = std::get_if<GpuGraphConditionalNodeParams>(¶ms)) {
VLOG(2) << "Add conditional node to a graph " << graph
<< "; type: " << ConditionalTypeToString(conditional->type)
<< "; deps: " << deps.size();
CUgraphNodeParams cu_params;
memset(&cu_params, 0, sizeof(cu_params));
GpuContext* gpu_context =
tensorflow::down_cast<GpuContext*>(conditional->context);
cu_params.type = CU_GRAPH_NODE_TYPE_CONDITIONAL;
cu_params.conditional.handle = conditional->handle;
cu_params.conditional.ctx = gpu_context->context();
cu_params.conditional.size = 1;
switch (conditional->type) {
case GpuDriver::GpuGraphConditionalNodeParams::Type::kIf:
cu_params.conditional.type = CU_GRAPH_COND_TYPE_IF;
break;
case GpuDriver::GpuGraphConditionalNodeParams::Type::kWhile:
cu_params.conditional.type = CU_GRAPH_COND_TYPE_WHILE;
break;
}
TF_RETURN_IF_ERROR(cuda::ToStatus(
cuGraphAddNode(node, graph, deps.data(), deps.size(), &cu_params),
"Failed to add conditional node to a CUDA graph"));
GpuGraphConditionalNodeParams::Result result;
result.graph = cu_params.conditional.phGraph_out[0];
VLOG(2) << "Created conditional CUDA graph " << result.graph;
return result;
}
#endif
return absl::UnimplementedError("unsupported node type");
}
absl::Status GpuDriver::GraphAddEmptyNode(CUgraphNode* node, CUgraph graph,
absl::Span<const CUgraphNode> deps) {
VLOG(2) << "Add empty node to a graph " << graph << "; deps: " << deps.size();
return cuda::ToStatus(
cuGraphAddEmptyNode(node, graph, deps.data(), deps.size()),
"Failed to add empty node to a CUDA graph");
}
absl::Status GpuDriver::GraphAddKernelNode(
CUgraphNode* node, CUgraph graph, absl::Span<const CUgraphNode> deps,
absl::string_view kernel_name, CUfunction function, unsigned int grid_dim_x,
unsigned int grid_dim_y, unsigned int grid_dim_z, unsigned int block_dim_x,
unsigned int block_dim_y, unsigned int block_dim_z,
unsigned int shared_mem_bytes, void** kernel_params, void** extra) {
VLOG(2) << "Add kernel node to a graph " << graph
<< "; kernel: " << kernel_name << "; gdx: " << grid_dim_x
<< " gdy: " << grid_dim_y << " gdz: " << grid_dim_z
<< " bdx: " << block_dim_x << " bdy: " << block_dim_y
<< " bdz: " << block_dim_z << "; shmem: " << shared_mem_bytes
<< "; deps: " << deps.size();
CUDA_KERNEL_NODE_PARAMS params;
memset(¶ms, 0, sizeof(params));
params.func = function;
params.gridDimX = grid_dim_x;
params.gridDimY = grid_dim_y;
params.gridDimZ = grid_dim_z;
params.blockDimX = block_dim_x;
params.blockDimY = block_dim_y;
params.blockDimZ = block_dim_z;
params.sharedMemBytes = shared_mem_bytes;
params.kernelParams = kernel_params;
params.extra = extra;
if (shared_mem_bytes != 0) {
TF_RETURN_IF_ERROR(cuda::ToStatus(
cuFuncSetAttribute(function,
CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES,
shared_mem_bytes),
"Failed to set shared memory size"));
}
return cuda::ToStatus(
cuGraphAddKernelNode(node, graph, deps.data(), deps.size(), ¶ms),
"Failed to add kernel node to a CUDA graph");
}
absl::Status GpuDriver::GraphExecKernelNodeSetParams(
CUgraphExec exec, CUgraphNode node, absl::string_view kernel_name,
CUfunction function, unsigned int grid_dim_x, unsigned int grid_dim_y,
unsigned int grid_dim_z, unsigned int block_dim_x, unsigned int block_dim_y,
unsigned int block_dim_z, unsigned int shared_mem_bytes,
void** kernel_params, void** extra) {
VLOG(2) << "Set kernel node params " << node << " in graph executable "
<< exec << "; kernel: " << kernel_name << "; gdx: " << grid_dim_x
<< " gdy: " << grid_dim_y << " gdz: " << grid_dim_z
<< " bdx: " << block_dim_x << " bdy: " << block_dim_y
<< " bdz: " << block_dim_z << "; shmem: " << shared_mem_bytes;
CUDA_KERNEL_NODE_PARAMS params;
memset(¶ms, 0, sizeof(params));
params.func = function;
params.gridDimX = grid_dim_x;
params.gridDimY = grid_dim_y;
params.gridDimZ = grid_dim_z;
params.blockDimX = block_dim_x;
params.blockDimY = block_dim_y;
params.blockDimZ = block_dim_z;
params.sharedMemBytes = shared_mem_bytes;
params.kernelParams = kernel_params;
params.extra = extra;
if (shared_mem_bytes != 0) {
TF_RETURN_IF_ERROR(cuda::ToStatus(
cuFuncSetAttribute(function,
CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES,
shared_mem_bytes),
"Failed to set shared memory size"));
}
return cuda::ToStatus(cuGraphExecKernelNodeSetParams(exec, node, ¶ms),
"Failed to set CUDA graph kernel node params");
}
absl::Status GpuDriver::GraphAddMemcpyD2DNode(
Context* context, CUgraphNode* node, CUgraph graph,
absl::Span<const CUgraphNode> deps, CUdeviceptr gpu_dst,
CUdeviceptr gpu_src, uint64_t size) {
GpuContext* gpu_context = tensorflow::down_cast<GpuContext*>(context);
VLOG(2) << "Add memcpy d2d node to a graph " << graph
<< "; dst: " << reinterpret_cast<void*>(gpu_dst)
<< "; src: " << reinterpret_cast<void*>(gpu_src) << "; size: " << size
<< "; context: " << gpu_context->context()
<< "; deps: " << deps.size();
CUDA_MEMCPY3D params;
memset(¶ms, 0, sizeof(params));
params.srcMemoryType = CU_MEMORYTYPE_DEVICE;
params.srcDevice = gpu_src;
params.dstMemoryType = CU_MEMORYTYPE_DEVICE;
params.dstDevice = gpu_dst;
params.WidthInBytes = size;
params.Height = 1;
params.Depth = 1;
return cuda::ToStatus(
cuGraphAddMemcpyNode(node, graph, deps.data(), deps.size(), ¶ms,
gpu_context->context()),
"Failed to add memcpy d2d node to a CUDA graph");
}
absl::Status GpuDriver::GraphExecMemcpyD2DNodeSetParams(
Context* context, GpuGraphExecHandle exec, GpuGraphNodeHandle node,
GpuDevicePtr gpu_dst, GpuDevicePtr gpu_src, uint64_t size) {
GpuContext* gpu_context = tensorflow::down_cast<GpuContext*>(context);
VLOG(2) << "Set memcpy d2d node params " << node << " in graph executable "
<< exec << "; dst: " << reinterpret_cast<void*>(gpu_dst)
<< "; src: " << reinterpret_cast<void*>(gpu_src) << "; size: " << size
<< "; context: " << gpu_context->context();
CUDA_MEMCPY3D params;
memset(¶ms, 0, sizeof(params));
params.srcMemoryType = CU_MEMORYTYPE_DEVICE;
params.srcDevice = gpu_src;
params.dstMemoryType = CU_MEMORYTYPE_DEVICE;
params.dstDevice = gpu_dst;
params.WidthInBytes = size;
params.Height = 1;
params.Depth = 1;
return cuda::ToStatus(cuGraphExecMemcpyNodeSetParams(exec, node, ¶ms,
gpu_context->context()),
"Failed to set memcpy d2d node params");
}
namespace {
struct BitPatternToString {
std::string operator()(uint8_t pattern) {
return absl::StrCat("u8:", pattern);
}
std::string operator()(uint16_t pattern) {
return absl::StrCat("u16:", pattern);
}
std::string operator()(uint32_t pattern) {
return absl::StrCat("u32:", pattern);
}
};
struct BitPatternToValue {
std::pair<unsigned, unsigned> operator()(uint8_t pattern) {
unsigned value = pattern;
return {(value << 24) | (value << 16) | (value << 8) | value,
1};
}
std::pair<unsigned, unsigned> operator()(uint16_t pattern) {
unsigned value = pattern;
return {(value << 16) | value, 2};
}
std::pair<unsigned, unsigned> operator()(uint32_t pattern) {
return {pattern, 4};
}
};
}
absl::Status GpuDriver::GraphAddMemsetNode(
Context* context, CUgraphNode* node, GpuGraphHandle graph,
absl::Span<const CUgraphNode> deps, CUdeviceptr dst,
std::variant<uint8_t, uint16_t, uint32_t> bit_pattern,
uint64_t num_elements) {
GpuContext* gpu_context = tensorflow::down_cast<GpuContext*>(context);
VLOG(2) << "Add memset node to a graph " << graph
<< "; dst: " << reinterpret_cast<void*>(dst)
<< "; bit_pattern: " << std::visit(BitPatternToString(), bit_pattern)
<< "; num_elements: " << num_elements
<< "; context: " << gpu_context->context()
<< "; deps: " << deps.size();
CUDA_MEMSET_NODE_PARAMS params;
memset(¶ms, 0, sizeof(params));
auto [value, element_size] = std::visit(BitPatternToValue(), bit_pattern);
params.dst = dst;
params.elementSize = element_size;
params.height = 1;
params.pitch = 0;
params.value = value;
params.width = num_elements;
return cuda::ToStatus(
cuGraphAddMemsetNode(node, graph, deps.data(), deps.size(), ¶ms,
gpu_context->context()),
"Failed to add memset node to a CUDA graph");
}
absl::Status GpuDriver::GraphExecMemsetNodeSetParams(
Context* context, CUgraphExec exec, CUgraphNode node, CUdeviceptr dst,
std::variant<uint8_t, uint16_t, uint32_t> bit_pattern,
uint64_t num_elements) {
GpuContext* gpu_context = tensorflow::down_cast<GpuContext*>(context);
VLOG(2) << "Set memset node params " << node << " in graph executable "
<< exec << "; dst: " << reinterpret_cast<void*>(dst)
<< "; bit_pattern: " << std::visit(BitPatternToString(), bit_pattern)
<< "; num_elements: " << num_elements
<< "; context: " << gpu_context->context();
CUDA_MEMSET_NODE_PARAMS params;
memset(¶ms, 0, sizeof(params));
auto [value, element_size] = std::visit(BitPatternToValue(), bit_pattern);
params.dst = dst;
params.elementSize = element_size;
params.height = 1;
params.pitch = 0;
params.value = value;
params.width = num_elements;
return cuda::ToStatus(cuGraphExecMemsetNodeSetParams(exec, node, ¶ms,
gpu_context->context()),
"Failed to set memset node params");
}
absl::Status GpuDriver::GraphAddChildNode(CUgraphNode* node, CUgraph graph,
absl::Span<const CUgraphNode> deps,
CUgraph child) {
VLOG(2) << "Create a new node by cloning the child graph " << child
<< " and add it to " << graph << "; deps: " << deps.size();
return cuda::ToStatus(
cuGraphAddChildGraphNode(node, graph, deps.data(), deps.size(), child),
"Failed to create a child graph node and add it to a CUDA graph");
}
absl::Status GpuDriver::GraphExecChildNodeSetParams(CUgraphExec exec,
CUgraphNode node,
CUgraph child) {
VLOG(2) << "Set child node params " << node << " in graph executable " << exec
<< "to params contained in " << child;
return cuda::ToStatus(cuGraphExecChildGraphNodeSetParams(exec, node, child),
"Failed to set CUDA graph child node params");
}
absl::Status GpuDriver::LaunchKernel(
Context* context, absl::string_view kernel_name, CUfunction function,
unsigned int grid_dim_x, unsigned int grid_dim_y, unsigned int grid_dim_z,
unsigned int block_dim_x, unsigned int block_dim_y,
unsigned int block_dim_z, unsigned int shared_mem_bytes, CUstream stream,
void** kernel_params, void** extra) {
ScopedActivateContext activation(context);
VLOG(2) << "launching kernel: " << kernel_name << "; gdx: " << grid_dim_x
<< " gdy: " << grid_dim_y << " gdz: " << grid_dim_z
<< " bdx: " << block_dim_x << " bdy: " << block_dim_y
<< " bdz: " << block_dim_z
<< "; shared_mem_bytes: " << shared_mem_bytes;
if (shared_mem_bytes != 0) {
TF_RETURN_IF_ERROR(cuda::ToStatus(
cuFuncSetAttribute(function,
CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES,
shared_mem_bytes),
"Failed to set shared memory size"));
}
return cuda::ToStatus(
cuLaunchKernel(function, grid_dim_x, grid_dim_y, grid_dim_z, block_dim_x,
block_dim_y, block_dim_z, shared_mem_bytes, stream,
kernel_params, extra),
absl::StrCat("Failed to launch CUDA kernel: ", kernel_name,
"; block dims: ", block_dim_x, "x", block_dim_y, "x",
block_dim_z, "; grid dims: ", grid_dim_x, "x", grid_dim_y,
"x", grid_dim_z,
"; shared memory size: ", shared_mem_bytes));
}
absl::Status GpuDriver::LaunchKernel(
Context* context, absl::string_view kernel_name, GpuFunctionHandle function,
unsigned int cluster_dim_x, unsigned int cluster_dim_y,
unsigned int cluster_dim_z, unsigned int grid_dim_x,
unsigned int grid_dim_y, unsigned int grid_dim_z, unsigned int block_dim_x,
unsigned int block_dim_y, unsigned int block_dim_z,
unsigned int shared_mem_bytes, GpuStreamHandle stream, void** kernel_params,
void** extra) {
ScopedActivateContext activation(context);
VLOG(2) << "launching kernel: " << kernel_name << "; cdx: " << cluster_dim_x
<< " cdy: " << cluster_dim_y << " cdz: " << cluster_dim_z
<< " gdx: " << grid_dim_x << " gdy: " << grid_dim_y
<< " gdz: " << grid_dim_z << " bdx: " << block_dim_x
<< " bdy: " << block_dim_y << " bdz: " << block_dim_z
<< "; shared_mem_bytes: " << shared_mem_bytes;
if (shared_mem_bytes != 0) {
TF_RETURN_IF_ERROR(cuda::ToStatus(
cuFuncSetAttribute(function,
CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES,
shared_mem_bytes),
"Failed to set shared memory size"));
}
CUlaunchConfig launch_config;
memset(&launch_config, 0, sizeof(launch_config));
launch_config.blockDimX = block_dim_x;
launch_config.blockDimY = block_dim_y;
launch_config.blockDimZ = block_dim_z;
launch_config.gridDimX = grid_dim_x;
launch_config.gridDimY = grid_dim_y;
launch_config.gridDimZ = grid_dim_z;
launch_config.hStream = stream;
launch_config.sharedMemBytes = shared_mem_bytes;
CUlaunchAttribute cluster_dims;
memset(&cluster_dims, 0, sizeof(cluster_dims));
cluster_dims.id = CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION;
cluster_dims.value.clusterDim.x = cluster_dim_x;
cluster_dims.value.clusterDim.y = cluster_dim_y;
cluster_dims.value.clusterDim.z = cluster_dim_z;
launch_config.attrs = &cluster_dims;
launch_config.numAttrs = 1;
return cuda::ToStatus(
cuLaunchKernelEx(&launch_config, function, kernel_params, extra),
absl::StrCat("Failed to launch CUDA kernel: ", kernel_name,
"; cluster dims: ", cluster_dim_x, "x", cluster_dim_y, "x",
cluster_dim_z, "; block dims: ", block_dim_x, "x",
block_dim_y, "x", block_dim_z, "; grid dims: ", grid_dim_x,
"x", grid_dim_y, "x", grid_dim_z,
"; shared memory size: ", shared_mem_bytes));
}
absl::Status GpuDriver::LoadCubin(Context* context, const char* cubin_bytes,
CUmodule* module) {
ScopedActivateContext activation(context);
return cuda::ToStatus(
cuModuleLoadFatBinary(module, cubin_bytes),
"Failed to load in-memory CUBIN (compiled for a different GPU?).");
}
absl::Status GpuDriver::LoadPtx(Context* context, const char* ptx_contents,
CUmodule* module) {
absl::Notification notification;
absl::Status ret = absl::OkStatus();
GetDriverExecutor()->Schedule(
[context, ptx_contents, module, &ret, ¬ification]() {
ScopedActivateContext activation(context);
void* ptx_data = const_cast<char*>(ptx_contents);
static const unsigned int kLogBufferBytesLimit = 1024;
unsigned int error_log_buffer_bytes = kLogBufferBytesLimit;
unsigned int info_log_buffer_bytes = kLogBufferBytesLimit;
absl::InlinedVector<char, 4> error_log_buffer(error_log_buffer_bytes);
absl::InlinedVector<char, 4> info_log_buffer(info_log_buffer_bytes);
bool log_verbose = true;
CUjit_option options[] = {CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES,
CU_JIT_ERROR_LOG_BUFFER,
CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES,
CU_JIT_INFO_LOG_BUFFER, CU_JIT_LOG_VERBOSE};
void* option_values[] = {
absl::bit_cast<void*>(uintptr_t(error_log_buffer_bytes)),
absl::bit_cast<void*>(error_log_buffer.data()),
absl::bit_cast<void*>(uintptr_t(info_log_buffer_bytes)),
absl::bit_cast<void*>(info_log_buffer.data()),
absl::bit_cast<void*>(uintptr_t(log_verbose))};
CHECK(TF_ARRAYSIZE(options) == TF_ARRAYSIZE(option_values));
absl::Status status;
{
absl::LeakCheckDisabler disabler;
status = cuda::ToStatus(cuModuleLoadDataEx(
module, ptx_data, TF_ARRAYSIZE(options), options, option_values));
}
error_log_buffer_bytes = reinterpret_cast<uintptr_t>(option_values[0]);
info_log_buffer_bytes = reinterpret_cast<uintptr_t>(option_values[2]);
CHECK_LE(error_log_buffer_bytes, kLogBufferBytesLimit);
CHECK_LE(info_log_buffer_bytes, kLogBufferBytesLimit);
if (!status.ok()) {
LOG(ERROR) << "failed to load PTX text as a module: " << status;
error_log_buffer[error_log_buffer_bytes ? error_log_buffer_bytes - 1
: 0] = '\0';
LOG(ERROR) << "error log buffer (" << error_log_buffer_bytes
<< " bytes): " << error_log_buffer.data();
if (absl::StrContains(error_log_buffer.data(),
"Register allocation failed")) {
ret = absl::ResourceExhaustedError(
absl::StrFormat("Failed to load PTX text as a module (register "
"allocation failed): %s",
status.ToString()));
} else {
ret = status;
}
notification.Notify();
return;
}
VLOG(3) << "PTX compilation info log (" << info_log_buffer_bytes
<< " bytes): " << info_log_buffer.data();
VLOG(3) << "PTX compilation error log (" << error_log_buffer_bytes
<< " bytes): " << error_log_buffer.data();
CHECK(module != nullptr);
notification.Notify();
});
notification.WaitForNotification();
return ret;
}
absl::Status GpuDriver::LoadHsaco(Context* context, const char* hsaco_contents,
CUmodule* module) {
return absl::InternalError(
"Feature not supported on CUDA platform (LoadHsaco)");
}
absl::Status GpuDriver::SynchronousMemsetUint8(Context* context,
CUdeviceptr location,
uint8_t value, size_t size) {
ScopedActivateContext activation(context);
return cuda::ToStatus(cuMemsetD8(location, value, size),
"Failed to memset memory");
}
absl::Status GpuDriver::SynchronousMemsetUint32(Context* context,
CUdeviceptr location,
uint32_t value,
size_t uint32_count) {
ScopedActivateContext activation(context);
return cuda::ToStatus(cuMemsetD32(location, value, uint32_count),
"Failed to memset memory");
}
absl::Status GpuDriver::AsynchronousMemsetUint8(Context* context,
CUdeviceptr location,
uint8_t value,
size_t uint8_count,
CUstream stream) {
ScopedActivateContext activation(context);
return cuda::ToStatus(cuMemsetD8Async(location, value, uint8_count, stream),
"Failed to enqueue async memset operation");
}
absl::Status GpuDriver::AsynchronousMemsetUint32(Context* context,
CUdeviceptr location,
uint32_t value,
size_t uint32_count,
CUstream stream) {
ScopedActivateContext activation(context);
return cuda::ToStatus(cuMemsetD32Async(location, value, uint32_count, stream),
"Failed to enqueue async memset operation");
}
absl::Status GpuDriver::AddStreamCallback(Context* context, CUstream stream,
StreamCallback callback, void* data) {
return cuda::ToStatus(cuLaunchHostFunc(stream, callback, data));
}
absl::Status GpuDriver::GetModuleFunction(Context* context, CUmodule module,
const char* kernel_name,
CUfunction* function) {
ScopedActivateContext activated{context};
CHECK(module != nullptr && kernel_name != nullptr);
cudaError_t cuda_error = cudaPeekAtLastError();
if (cuda_error != cudaSuccess) {
return absl::InternalError(
absl::StrCat("There was an error before calling cuModuleGetFunction (",
cuda_error, "): ", cudaGetErrorName(cuda_error), " : ",
cudaGetErrorString(cuda_error)));
}
return cuda::ToStatus(cuModuleGetFunction(function, module, kernel_name),
"Failed to get module function");
}
absl::Status GpuDriver::GetModuleSymbol(Context* context, CUmodule module,
const char* symbol_name,
CUdeviceptr* dptr, size_t* bytes) {
ScopedActivateContext activated{context};
CHECK(module != nullptr && symbol_name != nullptr &&
(dptr != nullptr || bytes != nullptr));
return cuda::ToStatus(
cuModuleGetGlobal(dptr, bytes, module, symbol_name),
absl::StrCat("Failed to get symbol '", symbol_name, "'"));
}
void GpuDriver::UnloadModule(Context* context, CUmodule module) {
ScopedActivateContext activated{context};
auto status = cuda::ToStatus(cuModuleUnload(module));
if (!status.ok()) {
LOG(ERROR) << "failed to unload module " << module
<< "; leaking: " << status;
}
}
absl::StatusOr<GpuStreamHandle> GpuDriver::CreateStream(Context* context,
int priority) {
ScopedActivateContext activated(context);
GpuStreamHandle stream;
if (priority == 0) {
TF_RETURN_IF_ERROR(
cuda::ToStatus(cuStreamCreate(&stream, CU_STREAM_NON_BLOCKING)));
} else {
TF_RETURN_IF_ERROR(cuda::ToStatus(
cuStreamCreateWithPriority(&stream, CU_STREAM_NON_BLOCKING, priority)));
}
VLOG(2) << "successfully created stream " << stream << " for context "
<< context << " on thread";
return stream;
}
void GpuDriver::DestroyStream(Context* context, GpuStreamHandle stream) {
if (stream == nullptr) {
return;
}
ScopedActivateContext activated{context};
CUresult res = cuStreamQuery(stream);
if (res != CUDA_SUCCESS) {
LOG(ERROR) << "stream not idle on destroy: " << cuda::ToStatus(res);
}
auto status = cuda::ToStatus(cuStreamDestroy(stream));
if (!status.ok()) {
LOG(ERROR) << "failed to destroy CUDA stream for context " << context
<< ": " << status;
} else {
VLOG(2) << "successfully destroyed stream " << stream << " for context "
<< context;
}
}
void* GpuDriver::DeviceAllocate(Context* context, uint64_t bytes) {
if (bytes == 0) {
return nullptr;
}
ScopedActivateContext activated{context};
CUdeviceptr result = 0;
auto status = cuda::ToStatus(cuMemAlloc(&result, bytes));
if (!status.ok()) {
LOG(INFO) << "failed to allocate "
<< tsl::strings::HumanReadableNumBytes(bytes) << " (" << bytes
<< " bytes) from device: " << status;
return nullptr;
}
void* ptr = reinterpret_cast<void*>(result);
VLOG(2) << "allocated " << ptr << " for context " << context << " of "
<< bytes << " bytes";
return ptr;
}
void GpuDriver::DeviceDeallocate(Context* context, void* location) {
ScopedActivateContext activation(context);
CUdeviceptr pointer = absl::bit_cast<CUdeviceptr>(location);
auto status = cuda::ToStatus(cuMemFree(pointer));
if (!status.ok()) {
LOG(ERROR) << "failed to free device memory at " << location
<< "; result: " << status;
} else {
VLOG(2) << "deallocated " << location << " for context " << context;
}
}
void* GpuDriver::UnifiedMemoryAllocate(Context* context, uint64_t bytes) {
ScopedActivateContext activation(context);
CUdeviceptr result = 0;
auto status =
cuda::ToStatus(cuMemAllocManaged(&result, bytes, CU_MEM_ATTACH_GLOBAL));
if (!status.ok()) {
LOG(ERROR) << "failed to alloc " << bytes
<< " bytes unified memory; result: " << status;
return nullptr;
}
void* ptr = reinterpret_cast<void*>(result);
VLOG(2) << "allocated " << ptr << " for context " << context << " of "
<< bytes << " bytes in unified memory";
return ptr;
}
void GpuDriver::UnifiedMemoryDeallocate(Context* context, void* location) {
ScopedActivateContext activation(context);
CUdeviceptr pointer = absl::bit_cast<CUdeviceptr>(location);
auto status = cuda::ToStatus(cuMemFree(pointer));
if (!status.ok()) {
LOG(ERROR) << "failed to free unified memory at " << location
<< "; result: " << status;
} else {
VLOG(2) << "deallocated unified memory at " << location << " for context "
<< context;
}
}
void* GpuDriver::HostAllocate(Context* context, uint64_t bytes) {
ScopedActivateContext activation(context);
void* host_mem = nullptr;
auto status = cuda::ToStatus(
cuMemHostAlloc(&host_mem, bytes, CU_MEMHOSTALLOC_PORTABLE));
if (!status.ok()) {
LOG(ERROR) << "failed to alloc " << bytes << " bytes on host: " << status;
}
return host_mem;
}
void GpuDriver::HostDeallocate(Context* context, void* location) {
ScopedActivateContext activation(context);
auto status = cuda::ToStatus(cuMemFreeHost(location));
if (!status.ok()) {
LOG(ERROR) << "error deallocating host memory at " << location << ": "
<< status;
}
}
bool GpuDriver::HostRegister(Context* context, void* location, uint64_t bytes) {
ScopedActivateContext activation(context);
auto status = cuda::ToStatus(
cuMemHostRegister(location, bytes, CU_MEMHOSTREGISTER_PORTABLE));
if (!status.ok()) {
LOG(ERROR) << "error registering host memory at " << location << ": "
<< status;
return false;
}
return true;
}
bool GpuDriver::HostUnregister(Context* context, void* location) {
ScopedActivateContext activation(context);
auto status = cuda::ToStatus(cuMemHostUnregister(location));
if (!status.ok()) {
LOG(ERROR) << "error unregistering host memory at " << location << ": "
<< status;
return false;
}
return true;
}
int GpuDriver::GetGpuStreamPriority(
Context* context, stream_executor::StreamPriority stream_priority) {
ScopedActivateContext activation(context);
if (stream_priority == stream_executor::StreamPriority::Default) {
return 0;
}
int lowest, highest;
auto status = cuda::ToStatus(cuCtxGetStreamPriorityRange(&lowest, &highest));
if (!status.ok()) {
LOG(ERROR)
<< "Could not query stream priority range. Returning default priority.";
return 0;
}
return stream_priority == stream_executor::StreamPriority::Highest ? highest
: lowest;
}
absl::Status GpuDriver::DestroyEvent(Context* context, CUevent* event) {
if (*event == nullptr) {
return absl::InvalidArgumentError("input event cannot be null");
}
ScopedActivateContext activated{context};
return cuda::ToStatus(cuEventDestroy(*event), "Error destroying CUDA event");
}
absl::Status GpuDriver::RecordEvent(Context* context, CUevent event,
CUstream stream) {
ScopedActivateContext activated{context};
return cuda::ToStatus(cuEventRecord(event, stream),
"Error recording CUDA event");
}
absl::StatusOr<float> GpuDriver::GetEventElapsedTime(Context* context,
CUevent start,
CUevent stop) {
ScopedActivateContext activated{context};
auto status = cuda::ToStatus(cuEventSynchronize(stop));
if (!status.ok()) {
LOG(ERROR) << "failed to synchronize the stop event: " << status;
return false;
}
float elapsed_milliseconds;
TF_RETURN_IF_ERROR(
cuda::ToStatus(cuEventElapsedTime(&elapsed_milliseconds, start, stop)));
return elapsed_milliseconds;
}
absl::Status GpuDriver::WaitStreamOnEvent(Context* context, CUstream stream,
CUevent event) {
ScopedActivateContext activation(context);
return cuda::ToStatus(cuStreamWaitEvent(stream, event, 0 ));
}
absl::Status GpuDriver::SynchronizeContext(Context* context) {
ScopedActivateContext activation(context);
return cuda::ToStatus(cuCtxSynchronize());
}
absl::Status GpuDriver::SynchronizeStream(Context* context, CUstream stream) {
ScopedActivateContext activated{context};
CHECK(stream != nullptr);
return cuda::ToStatus(cuStreamSynchronize(stream),
"Could not synchronize CUDA stream");
}
absl::Status GpuDriver::SynchronousMemcpyD2H(Context* context, void* host_dst,
CUdeviceptr gpu_src,
uint64_t size) {
ScopedActivateContext activation(context);
TF_RETURN_IF_ERROR(cuda::ToStatus(
cuMemcpyDtoH(host_dst, gpu_src, size),
absl::StrFormat("failed to synchronous memcpy from device to host "
"host dst: %p; GPU src: %p; size: %u=0x%x",
host_dst, absl::bit_cast<void*>(gpu_src), size, size)));
VLOG(2) << "successfully sync memcpy'd d2h of " << size << " bytes to "
<< host_dst;
return absl::OkStatus();
}
absl::Status GpuDriver::SynchronousMemcpyH2D(Context* context,
CUdeviceptr gpu_dst,
const void* host_src,
uint64_t size) {
ScopedActivateContext activation(context);
TF_RETURN_IF_ERROR(cuda::ToStatus(
cuMemcpyHtoD(gpu_dst, host_src, size),
absl::StrFormat(
"failed to synchronous memcpy from host to device: GPU dst: %p;"
" host src: %p; size: %u=0x%x",
absl::bit_cast<void*>(gpu_dst), host_src, size, size)));
VLOG(2) << "successfully enqueued sync memcpy h2d of " << size << " bytes";
return absl::OkStatus();
}
absl::Status GpuDriver::AsynchronousMemcpyD2H(Context* context, void* host_dst,
CUdeviceptr gpu_src,
uint64_t size, CUstream stream) {
ScopedActivateContext activation(context);
TF_RETURN_IF_ERROR(
cuda::ToStatus(cuMemcpyDtoHAsync(host_dst, gpu_src, size, stream)));
VLOG(2) << "successfully enqueued async memcpy d2h of " << size
<< " bytes from " << absl::bit_cast<void*>(gpu_src) << " to "
<< host_dst << " on stream " << stream;
return absl::OkStatus();
}
absl::Status GpuDriver::AsynchronousMemcpyH2D(Context* context,
CUdeviceptr gpu_dst,
const void* host_src,
uint64_t size, CUstream stream) {
ScopedActivateContext activation(context);
TF_RETURN_IF_ERROR(
cuda::ToStatus(cuMemcpyHtoDAsync(gpu_dst, host_src, size, stream)));
VLOG(2) << "successfully enqueued async memcpy h2d of " << size << " bytes"
<< " from " << host_src << " to " << absl::bit_cast<void*>(gpu_dst)
<< " on stream " << stream;
return absl::OkStatus();
}
absl::Status GpuDriver::AsynchronousMemcpyD2D(Context* context,
CUdeviceptr gpu_dst,
CUdeviceptr gpu_src,
uint64_t size, CUstream stream) {
ScopedActivateContext activation(context);
TF_ASSIGN_OR_RETURN(bool is_capturing, StreamIsCapturing(stream));
if ((gpu_dst == 0 || gpu_src == 0) || is_capturing) {
TF_RETURN_IF_ERROR(
cuda::ToStatus(cuMemcpyDtoDAsync(gpu_dst, gpu_src, size, stream)));
} else {
CUcontext dst_context =
GetContextMap()->GetAnyContext(absl::bit_cast<void*>(gpu_dst));
CUcontext src_context =
GetContextMap()->GetAnyContext(absl::bit_cast<void*>(gpu_src));
if (dst_context == src_context) {
TF_RETURN_IF_ERROR(
cuda::ToStatus(cuMemcpyDtoDAsync(gpu_dst, gpu_src, size, stream)));
} else {
TF_RETURN_IF_ERROR(cuda::ToStatus(cuMemcpyPeerAsync(
gpu_dst, dst_context, gpu_src, src_context, size, stream)));
}
}
VLOG(2) << "successfully enqueued async memcpy d2d of " << size << " bytes"
<< " from " << absl::bit_cast<void*>(gpu_src) << " to "
<< absl::bit_cast<void*>(gpu_dst) << " on stream " << stream;
return absl::OkStatus();
}
absl::Status GpuDriver::InitEvent(Context* context, CUevent* result,
EventFlags flags) {
int cuflags;
switch (flags) {
case EventFlags::kDefault:
cuflags = CU_EVENT_DEFAULT;
break;
case EventFlags::kDisableTiming:
cuflags = CU_EVENT_DISABLE_TIMING;
break;
default:
LOG(FATAL) << "impossible event flags: " << int(flags);
}
ScopedActivateContext activated{context};
return cuda::ToStatus(cuEventCreate(result, cuflags));
}
int GpuDriver::GetDeviceCount() {
int device_count = 0;
auto status = cuda::ToStatus(cuDeviceGetCount(&device_count));
if (!status.ok()) {
LOG(ERROR) << "could not retrieve CUDA device count: " << status;
return 0;
}
return device_count;
}
absl::StatusOr<MemoryType> GpuDriver::GetPointerMemorySpace(
CUdeviceptr pointer) {
unsigned int value;
TF_RETURN_IF_ERROR(cuda::ToStatus(cuPointerGetAttribute(
&value, CU_POINTER_ATTRIBUTE_MEMORY_TYPE, pointer)));
switch (value) {
case CU_MEMORYTYPE_DEVICE:
return MemoryType::kDevice;
case CU_MEMORYTYPE_HOST:
return MemoryType::kHost;
default:
return absl::InternalError(
absl::StrCat("unknown memory space provided by CUDA API: ", value));
}
}
absl::Status GpuDriver::GetPointerAddressRange(CUdeviceptr dptr,
CUdeviceptr* base,
size_t* size) {
return cuda::ToStatus(cuMemGetAddressRange(base, size, dptr));
}
absl::Status GpuDriver::GetComputeCapability(int* cc_major, int* cc_minor,
CUdevice device) {
*cc_major = 0;
*cc_minor = 0;
TF_RETURN_IF_ERROR(cuda::ToStatus(cuDeviceGetAttribute(
cc_major, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, device)));
return cuda::ToStatus(cuDeviceGetAttribute(
cc_minor, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR, device));
}
absl::Status GpuDriver::GetGpuISAVersion(int* version, CUdevice device) {
return absl::Status{
absl::StatusCode::kInternal,
"Feature not supported on CUDA platform (GetGpuISAVersion)"};
}
absl::Status GpuDriver::GetGpuGCNArchName(CUdevice, std::string*) {
return absl::Status{
absl::StatusCode::kInternal,
"Feature not supported on CUDA platform (GetGpuGCNArchName)"};
}
template <typename T>
static absl::StatusOr<T> GetSimpleAttribute(CUdevice device,
CUdevice_attribute attribute) {
int value = -1;
TF_RETURN_IF_ERROR(cuda::ToStatus(
cuDeviceGetAttribute(&value, attribute, device),
absl::StrCat("Could not retrieve CUDA device attribute (", attribute)));
T converted = value;
return converted;
}
absl::StatusOr<int> GpuDriver::GetMultiprocessorCount(CUdevice device) {
return GetSimpleAttribute<int>(device,
CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT);
}
absl::StatusOr<int64_t> GpuDriver::GetMaxSharedMemoryPerCore(CUdevice device) {
return GetSimpleAttribute<int64_t>(
device, CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR);
}
absl::StatusOr<int64_t> GpuDriver::GetMaxSharedMemoryPerBlock(CUdevice device) {
return GetSimpleAttribute<int64_t>(
device, CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK);
}
absl::StatusOr<int64_t> GpuDriver::GetMaxSharedMemoryPerBlockOptin(
CUdevice device) {
return GetSimpleAttribute<int64_t>(
device, CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN);
}
absl::StatusOr<int64_t> GpuDriver::GetMaxThreadsPerMultiprocessor(
CUdevice device) {
return GetSimpleAttribute<int64_t>(
device, CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR);
}
absl::StatusOr<int64_t> GpuDriver::GetMaxRegistersPerBlock(CUdevice device) {
return GetSimpleAttribute<int64_t>(
device, CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK);
}
absl::StatusOr<int64_t> GpuDriver::GetThreadsPerWarp(CUdevice device) {
return GetSimpleAttribute<int64_t>(device, CU_DEVICE_ATTRIBUTE_WARP_SIZE);
}
absl::Status GpuDriver::GetGridLimits(int* x, int* y, int* z, CUdevice device) {
int value;
TF_RETURN_IF_ERROR(cuda::ToStatus(
cuDeviceGetAttribute(&value, CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_X, device),
"Could not get device attribute"));
*x = value;
TF_RETURN_IF_ERROR(cuda::ToStatus(
cuDeviceGetAttribute(&value, CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Y, device),
"Could not get device attribute"));
*y = value;
TF_RETURN_IF_ERROR(cuda::ToStatus(
cuDeviceGetAttribute(&value, CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Z, device),
"Could not get device attribute"));
*z = value;
return absl::OkStatus();
}
absl::StatusOr<int32_t> GpuDriver::GetDriverVersion() {
int32_t version;
TF_RETURN_IF_ERROR(cuda::ToStatus(cuDriverGetVersion(&version),
"Could not get driver version"));
return version;
}
bool GpuDriver::GetDeviceProperties(CUdevprop* device_properties,
int device_ordinal) {
auto status =
cuda::ToStatus(cuDeviceGetProperties(device_properties, device_ordinal));
return status.ok();
}
absl::StatusOr<int> GpuDriver::GetDeviceAttribute(CUdevice_attribute attribute,
CUdevice device) {
int val;
TF_RETURN_IF_ERROR(
cuda::ToStatus(cuDeviceGetAttribute(&val, attribute, device)));
return val;
}
bool GpuDriver::IsEccEnabled(CUdevice device, bool* result) {
int value = -1;
auto status = cuda::ToStatus(
cuDeviceGetAttribute(&value, CU_DEVICE_ATTRIBUTE_ECC_ENABLED, device));
if (!status.ok()) {
LOG(ERROR) << "failed to query ECC status: " << status;
return false;
}
*result = value;
return true;
}
bool GpuDriver::GetDeviceMemoryInfo(Context* context, int64_t* free_out,
int64_t* total_out) {
ScopedActivateContext activation(context);
size_t free = 0;
size_t total = 0;
auto status = cuda::ToStatus(cuMemGetInfo(&free, &total));
if (!status.ok()) {
LOG(ERROR) << "failed to query device memory info: " << status;
return false;
}
*free_out = free;
*total_out = total;
return true;
}
bool GpuDriver::GetDeviceTotalMemory(CUdevice device, uint64_t* result) {
size_t value{};
auto status = cuda::ToStatus(cuDeviceTotalMem(&value, device));
if (!status.ok()) {
LOG(ERROR) << "failed to query total available memory: " << status;
return false;
}
*result = value;
return true;
}
std::string GpuDriver::GetPCIBusID(CUdevice device) {
std::string pci_bus_id;
static const int kBufferSize = 64;
absl::InlinedVector<char, 4> chars(kBufferSize);
chars[kBufferSize - 1] = '\0';
auto status = cuda::ToStatus(
cuDeviceGetPCIBusId(chars.begin(), kBufferSize - 1, device));
if (!status.ok()) {
LOG(ERROR) << "failed to query PCI bus id for device: " << status;
return pci_bus_id;
}
pci_bus_id = chars.begin();
return pci_bus_id;
}
bool GpuDriver::CanEnablePeerAccess(Context* from, Context* to) {
if (from == to) {
return true;
}
auto from_device = DeviceFromContext(from);
if (!from_device.ok()) {
LOG(ERROR) << "failed to resolve 'from' peer access context to a device: "
<< from_device.status();
return false;
}
auto to_device = DeviceFromContext(to);
if (!to_device.ok()) {
LOG(ERROR) << "failed to resolve 'to' peer access context to a device: "
<< to_device.status();
return false;
}
return CanEnablePeerAccess(from_device.value(), to_device.value());
}
bool GpuDriver::CanEnablePeerAccess(GpuDeviceHandle from, GpuDeviceHandle to) {
int can_access_peer = -1;
auto status =
cuda::ToStatus(cuDeviceCanAccessPeer(&can_access_peer, from, to));
if (!status.ok()) {
LOG(ERROR) << "failed to detect peer access capability: " << status;
return false;
}
return can_access_peer;
}
absl::Status GpuDriver::EnablePeerAccess(Context* from, Context* to) {
if (from == to) {
return absl::OkStatus();
}
ScopedActivateContext activated{from};
CUresult result = cuCtxEnablePeerAccess(
tensorflow::down_cast<GpuContext*>(to)->context(), 0 );
if (result != CUDA_SUCCESS &&
result != CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED) {
return absl::InternalError(
absl::StrFormat("failed to enable peer access from %p to %p: %s", from,
to, cuda::ToStatus(result).ToString()));
}
return absl::OkStatus();
}
absl::StatusOr<int> GpuDriver::GetMaxOccupiedBlocksPerCore(
Context* context, CUfunction kernel, int threads_per_block,
size_t dynamic_shared_memory_bytes) {
ScopedActivateContext activation(context);
int max_blocks;
TF_RETURN_IF_ERROR(cuda::ToStatus(
cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(
&max_blocks, kernel, threads_per_block, dynamic_shared_memory_bytes,
CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE),
absl::StrFormat("Failed to calculate occupancy of kernel %p", kernel)));
return max_blocks;
}
absl::StatusOr<size_t> GpuDriver::GraphGetNodeCount(GpuGraphHandle graph) {
size_t num_nodes;
TF_RETURN_IF_ERROR(
cuda::ToStatus(cuGraphGetNodes(graph, nullptr, &num_nodes)));
return num_nodes;
}
}
} | #include "xla/stream_executor/cuda/cuda_driver.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/cleanup/cleanup.h"
#include "absl/log/log.h"
#include "third_party/gpus/cuda/include/cuda.h"
#include "third_party/gpus/cuda/include/cuda_runtime_api.h"
#include "third_party/gpus/cuda/include/driver_types.h"
#include "xla/stream_executor/cuda/cuda_diagnostics.h"
#include "xla/stream_executor/cuda/cuda_status.h"
#include "xla/stream_executor/gpu/gpu_driver.h"
#include "xla/stream_executor/gpu/gpu_types.h"
#include "xla/stream_executor/gpu/scoped_activate_context.h"
#include "tsl/platform/status.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/test.h"
using ::tsl::testing::IsOkAndHolds;
namespace stream_executor {
namespace cuda {
void CheckCuda(CUresult result, const char* file, int line) {
TF_CHECK_OK(cuda::ToStatus(result));
}
void CheckCuda(cudaError_t result, const char* file, int line) {
if (result == cudaSuccess) {
return;
}
const char* name = cudaGetErrorName(result);
const char* message = cudaGetErrorString(result);
LOG(FATAL) << file << "(" << line << "): " << name << ", " << message;
}
#define CHECK_CUDA(result) CheckCuda(result, __FILE__, __LINE__)
class CudaDriverTest : public ::testing::Test {
protected:
static void SetUpTestSuite() { CHECK_CUDA(cuInit(0)); }
};
TEST_F(CudaDriverTest, ScopedActivateContextTest) {
CUdevice device;
CHECK_CUDA(cuDeviceGet(&device, 0));
CUcontext context0, context1;
CHECK_CUDA(cuCtxCreate(&context0, 0, device));
CHECK_CUDA(cuCtxCreate(&context1, 0, device));
gpu::GpuContext se_context1(context1, 101);
{
gpu::ScopedActivateContext scope(&se_context1);
CUcontext c;
CHECK_CUDA(cuCtxGetCurrent(&c));
EXPECT_EQ(c, context1);
}
CHECK_CUDA(cuCtxSetCurrent(context0));
{
gpu::ScopedActivateContext scope(&se_context1);
CUcontext c;
CHECK_CUDA(cuCtxGetCurrent(&c));
EXPECT_EQ(c, context1);
}
}
TEST_F(CudaDriverTest, DriverVersionParsingTest) {
auto driver_version = Diagnostician::FindKernelModuleVersion(
"... NVIDIA UNIX Open Kernel Module for x86_64 570.00 Release Build "
"... Mon Aug 12 04:17:20 UTC 2024");
TF_CHECK_OK(driver_version.status());
EXPECT_EQ("570.0.0", cuda::DriverVersionToString(driver_version.value()));
driver_version = Diagnostician::FindKernelModuleVersion(
"... NVIDIA UNIX Open Kernel Module 571.00 Release Build "
"... Mon Aug 12 04:17:20 UTC 2024");
TF_CHECK_OK(driver_version.status());
EXPECT_EQ("571.0.0", cuda::DriverVersionToString(driver_version.value()));
}
TEST_F(CudaDriverTest, GraphGetNodeCountTest) {
CUdevice device;
CHECK_CUDA(cuDeviceGet(&device, 0));
CUcontext context;
CHECK_CUDA(cuCtxCreate(&context, 0, device));
gpu::GpuGraphHandle graph;
TF_CHECK_OK(gpu::GpuDriver::CreateGraph(&graph));
absl::Cleanup cleanup(
[graph] { TF_CHECK_OK(gpu::GpuDriver::DestroyGraph(graph)); });
EXPECT_THAT(gpu::GpuDriver::GraphGetNodeCount(graph), IsOkAndHolds(0));
gpu::GpuGraphNodeHandle node;
TF_CHECK_OK(gpu::GpuDriver::GraphAddEmptyNode(&node, graph, {}));
EXPECT_THAT(gpu::GpuDriver::GraphGetNodeCount(graph), IsOkAndHolds(1));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/cuda/cuda_driver.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/cuda/cuda_driver_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bc18fd1c-491b-4d62-b0f4-3017a34034bf | cpp | google/tsl | errors | tsl/platform/errors.cc | tsl/platform/errors_test.cc | #include "tsl/platform/errors.h"
#include <errno.h>
#include <string.h>
#include "tsl/platform/status.h"
#include "tsl/platform/strcat.h"
namespace tsl {
namespace errors {
namespace {
absl::StatusCode ErrnoToCode(int err_number) {
absl::StatusCode code;
switch (err_number) {
case 0:
code = absl::StatusCode::kOk;
break;
case EINVAL:
case ENAMETOOLONG:
case E2BIG:
case EDESTADDRREQ:
case EDOM:
case EFAULT:
case EILSEQ:
case ENOPROTOOPT:
case ENOSTR:
case ENOTSOCK:
case ENOTTY:
case EPROTOTYPE:
case ESPIPE:
code = absl::StatusCode::kInvalidArgument;
break;
case ETIMEDOUT:
case ETIME:
code = absl::StatusCode::kDeadlineExceeded;
break;
case ENODEV:
case ENOENT:
case ENXIO:
case ESRCH:
code = absl::StatusCode::kNotFound;
break;
case EEXIST:
case EADDRNOTAVAIL:
case EALREADY:
code = absl::StatusCode::kAlreadyExists;
break;
case EPERM:
case EACCES:
case EROFS:
code = absl::StatusCode::kPermissionDenied;
break;
case ENOTEMPTY:
case EISDIR:
case ENOTDIR:
case EADDRINUSE:
case EBADF:
case EBUSY:
case ECHILD:
case EISCONN:
#if !defined(_WIN32) && !defined(__HAIKU__)
case ENOTBLK:
#endif
case ENOTCONN:
case EPIPE:
#if !defined(_WIN32)
case ESHUTDOWN:
#endif
case ETXTBSY:
code = absl::StatusCode::kFailedPrecondition;
break;
case ENOSPC:
#if !defined(_WIN32)
case EDQUOT:
#endif
case EMFILE:
case EMLINK:
case ENFILE:
case ENOBUFS:
case ENODATA:
case ENOMEM:
case ENOSR:
#if !defined(_WIN32) && !defined(__HAIKU__)
case EUSERS:
#endif
code = absl::StatusCode::kResourceExhausted;
break;
case EFBIG:
case EOVERFLOW:
case ERANGE:
code = absl::StatusCode::kOutOfRange;
break;
case ENOSYS:
case ENOTSUP:
case EAFNOSUPPORT:
#if !defined(_WIN32)
case EPFNOSUPPORT:
#endif
case EPROTONOSUPPORT:
#if !defined(_WIN32) && !defined(__HAIKU__)
case ESOCKTNOSUPPORT:
#endif
case EXDEV:
code = absl::StatusCode::kUnimplemented;
break;
case EAGAIN:
case ECONNREFUSED:
case ECONNABORTED:
case ECONNRESET:
case EINTR:
#if !defined(_WIN32)
case EHOSTDOWN:
#endif
case EHOSTUNREACH:
case ENETDOWN:
case ENETRESET:
case ENETUNREACH:
case ENOLCK:
case ENOLINK:
#if !(defined(__APPLE__) || defined(__FreeBSD__) || defined(_WIN32) || \
defined(__HAIKU__))
case ENONET:
#endif
code = absl::StatusCode::kUnavailable;
break;
case EDEADLK:
#if !defined(_WIN32)
case ESTALE:
#endif
code = absl::StatusCode::kAborted;
break;
case ECANCELED:
code = absl::StatusCode::kCancelled;
break;
case EBADMSG:
case EIDRM:
case EINPROGRESS:
case EIO:
case ELOOP:
case ENOEXEC:
case ENOMSG:
case EPROTO:
#if !defined(_WIN32) && !defined(__HAIKU__)
case EREMOTE:
#endif
code = absl::StatusCode::kUnknown;
break;
default: {
code = absl::StatusCode::kUnknown;
break;
}
}
return code;
}
}
absl::Status IOError(const string& context, int err_number) {
auto code = ErrnoToCode(err_number);
return absl::Status(code,
strings::StrCat(context, "; ", strerror(err_number)));
}
bool IsAborted(const absl::Status& status) {
return status.code() == tsl::error::Code::ABORTED;
}
bool IsAlreadyExists(const absl::Status& status) {
return status.code() == tsl::error::Code::ALREADY_EXISTS;
}
bool IsCancelled(const absl::Status& status) {
return status.code() == tsl::error::Code::CANCELLED;
}
bool IsDataLoss(const absl::Status& status) {
return status.code() == tsl::error::Code::DATA_LOSS;
}
bool IsDeadlineExceeded(const absl::Status& status) {
return status.code() == tsl::error::Code::DEADLINE_EXCEEDED;
}
bool IsFailedPrecondition(const absl::Status& status) {
return status.code() == tsl::error::Code::FAILED_PRECONDITION;
}
bool IsInternal(const absl::Status& status) {
return status.code() == tsl::error::Code::INTERNAL;
}
bool IsInvalidArgument(const absl::Status& status) {
return status.code() == tsl::error::Code::INVALID_ARGUMENT;
}
bool IsNotFound(const absl::Status& status) {
return status.code() == tsl::error::Code::NOT_FOUND;
}
bool IsOutOfRange(const absl::Status& status) {
return status.code() == tsl::error::Code::OUT_OF_RANGE;
}
bool IsPermissionDenied(const absl::Status& status) {
return status.code() == tsl::error::Code::PERMISSION_DENIED;
}
bool IsResourceExhausted(const absl::Status& status) {
return status.code() == tsl::error::Code::RESOURCE_EXHAUSTED;
}
bool IsUnauthenticated(const absl::Status& status) {
return status.code() == tsl::error::Code::UNAUTHENTICATED;
}
bool IsUnavailable(const absl::Status& status) {
return status.code() == tsl::error::Code::UNAVAILABLE;
}
bool IsUnimplemented(const absl::Status& status) {
return status.code() == tsl::error::Code::UNIMPLEMENTED;
}
bool IsUnknown(const absl::Status& status) {
return status.code() == tsl::error::Code::UNKNOWN;
}
}
} | #include "tsl/platform/errors.h"
#include "absl/status/status.h"
#include "tsl/platform/test.h"
namespace tsl {
TEST(AppendToMessageTest, PayloadsAreCopied) {
absl::Status status = errors::Aborted("Aborted Error Message");
status.SetPayload("payload_key", absl::Cord("payload_value"));
errors::AppendToMessage(&status, "Appended Message");
EXPECT_EQ(status.message(), "Aborted Error Message\n\tAppended Message");
EXPECT_EQ(status.GetPayload("payload_key"), absl::Cord("payload_value"));
}
TEST(Status, GetAllPayloads) {
absl::Status s_error(absl::StatusCode::kInternal, "Error message");
s_error.SetPayload("Error key", absl::Cord("foo"));
auto payloads_error_status = errors::GetPayloads(s_error);
ASSERT_EQ(payloads_error_status.size(), 1);
ASSERT_EQ(payloads_error_status["Error key"], "foo");
absl::Status s_ok = absl::Status();
auto payloads_ok_status = errors::GetPayloads(s_ok);
ASSERT_TRUE(payloads_ok_status.empty());
}
TEST(Status, OKStatusInsertPayloadsFromErrorStatus) {
absl::Status s_error(absl::StatusCode::kInternal, "Error message");
s_error.SetPayload("Error key", absl::Cord("foo"));
absl::Status s_ok = absl::Status();
errors::InsertPayloads(s_ok, errors::GetPayloads(s_error));
auto payloads_ok_status = errors::GetPayloads(s_ok);
ASSERT_TRUE(payloads_ok_status.empty());
}
TEST(Status, ErrorStatusInsertPayloadsFromOKStatus) {
absl::Status s_error(absl::StatusCode::kInternal, "Error message");
s_error.SetPayload("Error key", absl::Cord("foo"));
absl::Status s_ok = absl::Status();
errors::InsertPayloads(s_error, errors::GetPayloads(s_ok));
ASSERT_EQ(s_error.GetPayload("Error key"), "foo");
}
TEST(Status, ErrorStatusInsertPayloadsFromErrorStatus) {
absl::Status s_error1(absl::StatusCode::kInternal, "Error message");
s_error1.SetPayload("Error key 1", absl::Cord("foo"));
s_error1.SetPayload("Error key 2", absl::Cord("bar"));
absl::Status s_error2(absl::StatusCode::kInternal, "Error message");
s_error2.SetPayload("Error key", absl::Cord("bar"));
ASSERT_EQ(s_error2.GetPayload("Error key"), "bar");
errors::InsertPayloads(s_error2, errors::GetPayloads(s_error1));
ASSERT_EQ(s_error2.GetPayload("Error key 1"), "foo");
ASSERT_EQ(s_error2.GetPayload("Error key 2"), "bar");
auto payloads_error_status = errors::GetPayloads(s_error2);
ASSERT_EQ(payloads_error_status.size(), 3);
}
#if defined(PLATFORM_GOOGLE)
absl::Status GetError() {
return absl::InvalidArgumentError("An invalid argument error");
}
absl::Status PropagateError() {
TF_RETURN_IF_ERROR(GetError());
return absl::OkStatus();
}
absl::Status PropagateError2() {
TF_RETURN_IF_ERROR(PropagateError());
return absl::OkStatus();
}
TEST(Status, StackTracePropagation) {
absl::Status s = PropagateError2();
auto sources = s.GetSourceLocations();
ASSERT_EQ(sources.size(), 3);
for (int i = 0; i < 3; ++i) {
ASSERT_EQ(sources[i].file_name(),
"third_party/tensorflow/tsl/platform/errors_test.cc");
}
}
TEST(Status, SourceLocationsPreservedByAppend) {
absl::Status s = PropagateError2();
ASSERT_EQ(s.GetSourceLocations().size(), 3);
errors::AppendToMessage(&s, "A new message.");
ASSERT_EQ(s.GetSourceLocations().size(), 3);
}
TEST(Status, SourceLocationsPreservedByUpdate) {
absl::Status s = PropagateError2();
ASSERT_EQ(s.GetSourceLocations().size(), 3);
absl::Status s2 = errors::CreateWithUpdatedMessage(s, "New message.");
ASSERT_EQ(s2.GetSourceLocations().size(), 3);
}
#endif
} | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/errors.cc | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/errors_test.cc | 6d708fdcdd4f40537b7fa273371215a6fa3d4423 |
5326e467-00f3-4f31-a980-31e0e3f00c84 | cpp | google/arolla | optional_value | arolla/memory/optional_value.cc | arolla/memory/optional_value_test.cc | #include "arolla/memory/optional_value.h"
#include <cstdint>
#include "absl/strings/str_cat.h"
#include "arolla/util/bytes.h"
#include "arolla/util/repr.h"
#include "arolla/util/text.h"
namespace arolla {
ReprToken ReprTraits<OptionalValue<bool>>::operator()(
const OptionalValue<bool>& value) const {
return ReprToken{
value.present ? absl::StrCat("optional_boolean{", Repr(value.value), "}")
: "optional_boolean{NA}"};
}
ReprToken ReprTraits<OptionalValue<int32_t>>::operator()(
const OptionalValue<int32_t>& value) const {
return ReprToken{value.present
? absl::StrCat("optional_int32{", Repr(value.value), "}")
: "optional_int32{NA}"};
}
ReprToken ReprTraits<OptionalValue<int64_t>>::operator()(
const OptionalValue<int64_t>& value) const {
return ReprToken{value.present ? absl::StrCat("optional_", Repr(value.value))
: "optional_int64{NA}"};
}
ReprToken ReprTraits<OptionalValue<uint64_t>>::operator()(
const OptionalValue<uint64_t>& value) const {
return ReprToken{value.present ? absl::StrCat("optional_", Repr(value.value))
: "optional_uint64{NA}"};
}
ReprToken ReprTraits<OptionalValue<float>>::operator()(
const OptionalValue<float>& value) const {
return ReprToken{
value.present ? absl::StrCat("optional_float32{", Repr(value.value), "}")
: "optional_float32{NA}"};
}
ReprToken ReprTraits<OptionalValue<double>>::operator()(
const OptionalValue<double>& value) const {
return ReprToken{value.present ? absl::StrCat("optional_", Repr(value.value))
: "optional_float64{NA}"};
}
ReprToken ReprTraits<OptionalValue<Bytes>>::operator()(
const OptionalValue<Bytes>& value) const {
return ReprToken{value.present
? absl::StrCat("optional_bytes{", Repr(value.value), "}")
: "optional_bytes{NA}"};
}
ReprToken ReprTraits<OptionalValue<Text>>::operator()(
const OptionalValue<Text>& value) const {
return ReprToken{value.present
? absl::StrCat("optional_text{", Repr(value.value), "}")
: "optional_text{NA}"};
}
ReprToken ReprTraits<OptionalUnit>::operator()(
const OptionalUnit& value) const {
return ReprToken{value.present ? "present" : "missing"};
}
} | #include "arolla/memory/optional_value.h"
#include <cstdint>
#include <cstring>
#include <memory>
#include <new>
#include <optional>
#include <sstream>
#include <string>
#include <type_traits>
#include <utility>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/memory_allocation.h"
#include "arolla/util/bytes.h"
#include "arolla/util/repr.h"
#include "arolla/util/text.h"
#include "arolla/util/view_types.h"
namespace arolla {
namespace testing {
namespace {
using absl_testing::IsOkAndHolds;
using absl_testing::StatusIs;
using ::testing::HasSubstr;
using ::testing::Test;
TEST(OptionalValueTest, TestEmptyValues) {
OptionalValue<float> v1;
EXPECT_FALSE(v1.present);
OptionalValue<float> v2(std::optional<float>{});
EXPECT_FALSE(v2.present);
OptionalValue<float> v3(std::nullopt);
EXPECT_FALSE(v3.present);
EXPECT_EQ(v1, v2);
EXPECT_EQ(v1, v3);
v1.value = 1.0f;
v2.value = 2.0f;
EXPECT_EQ(v1, v2);
auto absl_v = v2.AsOptional();
EXPECT_FALSE(absl_v.has_value());
}
TEST(OptionalValueTest, TestConstExpr) {
static_assert(!OptionalValue<int>().present);
static_assert(OptionalValue<int>(5).present);
static_assert(OptionalValue<int>(5).value == 5);
static_assert(MakeOptionalValue(5).present);
static_assert(MakeOptionalValue(5).value == 5);
}
TEST(OptionalValueTest, TestPresentValues) {
OptionalValue<float> v1(1.0f);
EXPECT_TRUE(v1.present);
EXPECT_EQ(1.0f, v1.value);
EXPECT_EQ(Repr(v1), "optional_float32{1.}");
auto v_auto = MakeOptionalValue(1.0f);
EXPECT_TRUE(v_auto.present);
EXPECT_EQ(1.0f, v_auto.value);
EXPECT_EQ(Repr(v_auto), "optional_float32{1.}");
OptionalValue<float> v2(std::optional<float>{2.0f});
EXPECT_TRUE(v2.present);
EXPECT_EQ(2.0f, v2.value);
EXPECT_EQ(Repr(v2), "optional_float32{2.}");
EXPECT_NE(v1, v2);
v1.value = 2.0f;
EXPECT_EQ(v1, v2);
}
TEST(OptionalValueTest, TestAssignment) {
OptionalValue<float> v1;
v1 = 1.0f;
EXPECT_TRUE(v1.present);
EXPECT_EQ(v1.value, 1.0f);
v1 = std::nullopt;
EXPECT_FALSE(v1.present);
}
TEST(OptionalValueTest, MakeStatusOrOptionalValue) {
absl::StatusOr<OptionalValue<float>> v =
MakeStatusOrOptionalValue(absl::StatusOr<float>(1.0f));
ASSERT_OK(v.status());
EXPECT_TRUE(v.value().present);
EXPECT_EQ(v.value().value, 1.0f);
absl::StatusOr<OptionalValue<float>> v_error = MakeStatusOrOptionalValue(
absl::StatusOr<float>(absl::InternalError("fake")));
EXPECT_THAT(v_error.status(),
StatusIs(absl::StatusCode::kInternal, HasSubstr("fake")));
}
TEST(OptionalValueTest, OptionalUnit) {
EXPECT_EQ(OptionalUnit(), kMissing);
EXPECT_EQ(OptionalUnit(false), kMissing);
EXPECT_FALSE(kMissing);
EXPECT_FALSE(kMissing.present);
EXPECT_EQ(Repr(kMissing), "missing");
EXPECT_EQ(OptionalUnit(true), kPresent);
EXPECT_TRUE(kPresent);
EXPECT_TRUE(kPresent.present);
EXPECT_EQ(Repr(kPresent), "present");
}
TEST(OptionalValueTest, Comparison) {
OptionalValue<float> v0;
v0.value = 1.0f;
OptionalValue<float> v1(1.0f);
OptionalValue<float> v2(2.0f);
{
EXPECT_TRUE(v1 == v1);
EXPECT_TRUE(v0 == v0);
EXPECT_FALSE(v1 == v2);
EXPECT_FALSE(v1 == v0);
EXPECT_FALSE(v1 != v1);
EXPECT_FALSE(v0 != v0);
EXPECT_TRUE(v1 != v2);
EXPECT_TRUE(v1 != v0);
OptionalValue<float> v0_2;
v0_2.value = 2.0f;
EXPECT_TRUE(v0 == v0_2);
EXPECT_FALSE(v0 != v0_2);
}
{
EXPECT_TRUE(v1 == 1.0f);
EXPECT_TRUE(1.0f == v1);
EXPECT_FALSE(v1 != 1.0f);
EXPECT_FALSE(1.0f != v1);
EXPECT_FALSE(v1 == 2.0f);
EXPECT_FALSE(2.0f == v1);
EXPECT_TRUE(v1 != 2.0f);
EXPECT_TRUE(2.0f != v1);
}
{
EXPECT_FALSE(v1 == std::nullopt);
EXPECT_FALSE(std::nullopt == v1);
EXPECT_TRUE(v0 == std::nullopt);
EXPECT_TRUE(std::nullopt == v0);
EXPECT_TRUE(v1 != std::nullopt);
EXPECT_TRUE(std::nullopt != v1);
EXPECT_FALSE(v0 != std::nullopt);
EXPECT_FALSE(std::nullopt != v0);
}
}
TEST(OptionalValueTest, TestImplicitConstructors) {
OptionalValue<float> v = {};
EXPECT_EQ(v, OptionalValue<float>());
v = 3.5;
EXPECT_EQ(v, OptionalValue<float>(3.5));
v = std::optional<float>(2.5);
EXPECT_EQ(v, OptionalValue<float>(2.5));
}
TEST(OptionalValueTest, TestMoves) {
auto ptr = std::make_unique<std::string>("Hello!");
OptionalValue<std::unique_ptr<std::string>> v1(std::move(ptr));
EXPECT_TRUE(v1.present);
EXPECT_EQ("Hello!", *(v1.value));
std::optional<std::unique_ptr<std::string>> v2(std::move(v1).AsOptional());
EXPECT_TRUE(v2.has_value());
EXPECT_EQ("Hello!", **v2);
}
template <typename T>
using Slot = FrameLayout::Slot<T>;
TEST(OptionalValueTest, TestFrameLayout) {
FrameLayout::Builder builder;
builder.AddSlot<double>();
builder.AddSlot<int32_t>();
auto optional_slot = builder.AddSlot<OptionalValue<float>>();
Slot<bool> presence_slot = optional_slot.GetSubslot<0>();
Slot<float> value_slot = optional_slot.GetSubslot<1>();
FrameLayout layout = std::move(builder).Build();
MemoryAllocation alloc(&layout);
FramePtr frame = alloc.frame();
frame.Set(optional_slot, OptionalValue<float>{1.0f});
EXPECT_EQ(true, frame.Get(presence_slot));
EXPECT_EQ(1.0f, frame.Get(value_slot));
frame.Set(value_slot, 2.0f);
EXPECT_EQ(2.0, frame.Get(optional_slot).value);
}
TEST(OptionalValue, IsBZeroConstructible) {
EXPECT_TRUE(is_bzero_constructible<OptionalValue<float>>());
EXPECT_TRUE(is_bzero_constructible<OptionalValue<int>>());
EXPECT_FALSE(is_bzero_constructible<OptionalValue<std::string>>());
}
TEST(OptionalValue, BZeroStateIsEmptyValue) {
using T = OptionalValue<float>;
std::aligned_storage_t<sizeof(T), alignof(T)> storage;
memset(&storage, 0, sizeof(storage));
EXPECT_FALSE(std::launder(reinterpret_cast<const T*>(&storage))->present);
}
TEST(OptionalValue, StructuredBindings) {
{
OptionalValue<float> f;
auto [present, value] = f;
EXPECT_FALSE(present);
}
{
OptionalValue<float> f = 17.0;
auto [present, value] = f;
EXPECT_TRUE(present);
EXPECT_EQ(value, 17.0);
}
}
TEST(OptionalValue, ViewType) {
static_assert(std::is_same_v<view_type_t<OptionalValue<int64_t>>,
OptionalValue<int64_t>>);
static_assert(std::is_same_v<view_type_t<OptionalValue<Bytes>>,
OptionalValue<absl::string_view>>);
auto fn = [](OptionalValue<absl::string_view> v) -> char {
return (v.present && !v.value.empty()) ? v.value[0] : 'X';
};
EXPECT_EQ(fn(OptionalValue<Text>(Text("Hello"))), 'H');
EXPECT_EQ(fn(std::nullopt), 'X');
}
TEST(OptionalValue, WrapFnToAcceptOptionalArgs) {
{
auto fn = [](int a, OptionalValue<int64_t> b, int64_t c) -> int {
return a + c + (b.present ? b.value : 10);
};
auto opt_fn = WrapFnToAcceptOptionalArgs(fn);
EXPECT_EQ(opt_fn(1, 2, 3), OptionalValue<int>(6));
EXPECT_EQ(opt_fn(std::nullopt, 2, 3), OptionalValue<int>());
EXPECT_EQ(opt_fn(1, std::nullopt, 3), OptionalValue<int>(14));
EXPECT_EQ(opt_fn(1, 2, std::nullopt), OptionalValue<int>());
}
{
auto fn = [](const Bytes& v) -> const Bytes& { return v; };
auto opt_fn = WrapFnToAcceptOptionalArgs(fn);
EXPECT_EQ(opt_fn(Bytes("123")), OptionalValue<Bytes>("123"));
}
{
auto fn = [](absl::string_view v) { return v; };
auto opt_fn = WrapFnToAcceptOptionalArgs(fn);
EXPECT_EQ(opt_fn(MakeOptionalValue(Bytes("123"))),
MakeOptionalValue(absl::string_view("123")));
}
{
auto fn = [](int a, OptionalValue<int64_t> b,
int64_t c) -> absl::StatusOr<int> {
if (c < 0) {
return absl::InvalidArgumentError("c < 0");
} else {
return a + c + (b.present ? b.value : 10);
}
};
auto opt_fn = WrapFnToAcceptOptionalArgs(fn);
EXPECT_THAT(opt_fn(1, 2, 3), IsOkAndHolds(OptionalValue<int>(6)));
EXPECT_THAT(opt_fn(1, 2, -3),
StatusIs(absl::StatusCode::kInvalidArgument, "c < 0"));
EXPECT_THAT(opt_fn(std::nullopt, 2, -3),
IsOkAndHolds(OptionalValue<int>()));
}
}
TEST(OptionalValueReprTest, bool) {
EXPECT_EQ(Repr(OptionalValue<bool>(true)), "optional_boolean{true}");
EXPECT_EQ(Repr(OptionalValue<bool>()), "optional_boolean{NA}");
}
TEST(OptionalValueReprTest, int32_t) {
EXPECT_EQ(Repr(OptionalValue<int32_t>(1)), "optional_int32{1}");
EXPECT_EQ(Repr(OptionalValue<int32_t>()), "optional_int32{NA}");
}
TEST(OptionalValueReprTest, int64_t) {
EXPECT_EQ(Repr(OptionalValue<int64_t>(1)), "optional_int64{1}");
EXPECT_EQ(Repr(OptionalValue<int64_t>()), "optional_int64{NA}");
}
TEST(OptionalValueReprTest, uint64_t) {
EXPECT_EQ(Repr(OptionalValue<uint64_t>(1)), "optional_uint64{1}");
EXPECT_EQ(Repr(OptionalValue<uint64_t>()), "optional_uint64{NA}");
}
TEST(OptionalValueReprTest, float) {
EXPECT_EQ(Repr(OptionalValue<float>(1.5)), "optional_float32{1.5}");
EXPECT_EQ(Repr(OptionalValue<float>()), "optional_float32{NA}");
}
TEST(OptionalValueReprTest, double) {
EXPECT_EQ(Repr(OptionalValue<double>(1.5)), "optional_float64{1.5}");
EXPECT_EQ(Repr(OptionalValue<double>()), "optional_float64{NA}");
}
TEST(OptionalValueReprTest, Bytes) {
EXPECT_EQ(Repr(OptionalValue<Bytes>("abc")), "optional_bytes{b'abc'}");
EXPECT_EQ(Repr(OptionalValue<Bytes>()), "optional_bytes{NA}");
}
TEST(OptionalValueReprTest, Text) {
EXPECT_EQ(Repr(OptionalValue<Text>("abc")), "optional_text{'abc'}");
EXPECT_EQ(Repr(OptionalValue<Text>()), "optional_text{NA}");
}
TEST(OptionalValueReprTest, StreamOp) {
{
std::ostringstream oss;
oss << OptionalValue<float>(1.5);
EXPECT_EQ(oss.str(), "optional_float32{1.5}");
}
{
std::ostringstream oss;
oss << OptionalValue<float>();
EXPECT_EQ(oss.str(), "optional_float32{NA}");
}
}
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/memory/optional_value.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/memory/optional_value_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
7695cc84-31be-478b-a72f-9f410a73ea54 | cpp | tensorflow/tensorflow | remove_noop | tensorflow/lite/delegates/gpu/common/transformations/remove_noop.cc | tensorflow/lite/delegates/gpu/common/transformations/remove_noop_test.cc | #include "tensorflow/lite/delegates/gpu/common/transformations/remove_noop.h"
#include <algorithm>
#include <any>
#include <functional>
#include <iterator>
#include <memory>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/memory/memory.h"
#include "absl/types/any.h"
#include "absl/types/variant.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/model.h"
#include "tensorflow/lite/delegates/gpu/common/model_transformer.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
namespace tflite {
namespace gpu {
namespace {
using ShouldRemoveOperation = std::function<bool(GraphFloat32* graph, Node*)>;
class RemoveOperation : public SequenceTransformation {
public:
explicit RemoveOperation(ShouldRemoveOperation remove_predicate)
: remove_predicate_(std::move(remove_predicate)) {}
int ExpectedSequenceLength() const final { return 2; }
TransformResult ApplyToNodesSequence(const std::vector<Node*>& sequence,
GraphFloat32* graph) final {
Node* prev_op_node = sequence.front();
Node* op_node = sequence.back();
if (!remove_predicate_(graph, op_node)) {
return {TransformStatus::SKIPPED, ""};
}
absl::Status status = RemoveFollowingNode(graph, op_node, prev_op_node);
if (!status.ok()) {
return {TransformStatus::INVALID,
"Unable to remove a node: " + std::string(status.message())};
}
return {TransformStatus::APPLIED, ""};
}
private:
ShouldRemoveOperation remove_predicate_;
};
}
std::unique_ptr<SequenceTransformation> NewRemoveSingleInputConcat() {
auto type = ToString(OperationType::CONCAT);
return absl::make_unique<RemoveOperation>(
[type](GraphFloat32* graph, Node* node) {
return type == node->operation.type;
});
}
std::unique_ptr<SequenceTransformation> NewRemoveSingleInputAdd() {
auto type = ToString(OperationType::ADD);
return absl::make_unique<RemoveOperation>(
[type](GraphFloat32* graph, Node* node) {
if (node->operation.type != type) {
return false;
}
auto& attr = absl::any_cast<const ElementwiseAttributes&>(
node->operation.attributes);
return !absl::holds_alternative<Tensor<HWC, DataType::FLOAT32>>(
attr.param) &&
!absl::holds_alternative<Tensor<Linear, DataType::FLOAT32>>(
attr.param) &&
!absl::holds_alternative<float>(attr.param);
});
}
std::unique_ptr<SequenceTransformation> NewRemoveDegenerateUpsampling() {
auto type = ToString(OperationType::RESIZE);
return absl::make_unique<RemoveOperation>(
[type](GraphFloat32* graph, Node* node) {
if (node->operation.type != type) {
return false;
}
auto inputs = graph->FindInputs(node->id);
auto outputs = graph->FindOutputs(node->id);
return inputs.size() == 1 && outputs.size() == 1 &&
inputs[0]->tensor.shape == outputs[0]->tensor.shape;
});
}
class RemoveIdentityReshape : public NodeTransformation {
public:
TransformResult ApplyToNode(Node* node, GraphFloat32* graph) final {
if (node->operation.type != ToString(OperationType::RESHAPE)) {
return {TransformStatus::SKIPPED, ""};
}
auto input_shape = graph->FindInputs(node->id)[0]->tensor.shape;
const auto& reshape_attr =
absl::any_cast<const ReshapeAttributes&>(node->operation.attributes);
if (input_shape != reshape_attr.new_shape) {
return {TransformStatus::SKIPPED, ""};
}
auto output = graph->FindOutputs(node->id)[0];
const auto& graph_outputs = graph->outputs();
if (std::find(graph_outputs.begin(), graph_outputs.end(), output) !=
graph_outputs.end()) {
return {TransformStatus::SKIPPED,
"Can not apply transformation when node output is graph output"};
}
absl::Status status = RemoveSimpleNodeKeepInput(graph, node);
if (!status.ok()) {
return {TransformStatus::INVALID,
"Unable to remove a node: " + std::string(status.message())};
}
return {TransformStatus::APPLIED,
"Removed reshape with input_shape == output_shape."};
}
};
std::unique_ptr<NodeTransformation> NewRemoveIdentityReshape() {
return absl::make_unique<RemoveIdentityReshape>();
}
class RemoveIdentityStridedSlice : public NodeTransformation {
public:
TransformResult ApplyToNode(Node* node, GraphFloat32* graph) final {
if (node->operation.type != ToString(OperationType::SLICE)) {
return {TransformStatus::SKIPPED, ""};
}
auto input = graph->FindInputs(node->id)[0];
auto output = graph->FindOutputs(node->id)[0];
const auto& slice_attr =
absl::any_cast<const SliceAttributes&>(node->operation.attributes);
if (input->tensor.shape != output->tensor.shape) {
return {TransformStatus::SKIPPED, ""};
}
if (slice_attr.starts != BHWC(0, 0, 0, 0)) {
return {TransformStatus::SKIPPED, ""};
}
if (slice_attr.strides != BHWC(1, 1, 1, 1)) {
return {TransformStatus::SKIPPED, ""};
}
if (slice_attr.ends != output->tensor.shape) {
return {TransformStatus::SKIPPED, ""};
}
const auto& graph_outputs = graph->outputs();
const auto& graph_inputs = graph->inputs();
const bool input_is_graph_input =
std::find(graph_inputs.begin(), graph_inputs.end(), input) !=
graph_inputs.end();
const bool output_is_graph_output =
std::find(graph_outputs.begin(), graph_outputs.end(), output) !=
graph_outputs.end();
if (input_is_graph_input && output_is_graph_output) {
return {TransformStatus::SKIPPED,
"Can not apply transformation when node input is graph input and "
"node output is graph output"};
}
if (output_is_graph_output) {
if (graph->FindConsumers(input->id).size() != 1) {
return {TransformStatus::SKIPPED,
"Can not apply transformation when node output is graph output "
"and input consumed by other nodes."};
}
absl::Status status = RemoveSimpleNodeKeepOutput(graph, node);
if (!status.ok()) {
return {TransformStatus::INVALID,
"Unable to remove a node: " + std::string(status.message())};
}
return {TransformStatus::APPLIED, "Removed identity strided slice."};
}
absl::Status status = RemoveSimpleNodeKeepInput(graph, node);
if (!status.ok()) {
return {TransformStatus::INVALID,
"Unable to remove a node: " + std::string(status.message())};
}
return {TransformStatus::APPLIED, "Removed identity strided slice."};
}
};
std::unique_ptr<NodeTransformation> NewRemoveIdentityStridedSlice() {
return absl::make_unique<RemoveIdentityStridedSlice>();
}
}
} | #include "tensorflow/lite/delegates/gpu/common/transformations/remove_noop.h"
#include <any>
#include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/model.h"
#include "tensorflow/lite/delegates/gpu/common/model_transformer.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
namespace tflite {
namespace gpu {
namespace {
using ::testing::UnorderedElementsAre;
TEST(RemoveSingleInputAdd, Smoke) {
GraphFloat32 graph;
auto input = graph.NewValue();
auto first_node = graph.NewNode();
ASSERT_TRUE(graph.AddConsumer(first_node->id, input->id).ok());
auto add_node = graph.NewNode();
Value* output = nullptr;
ASSERT_TRUE(AddOutput(&graph, add_node, &output).ok());
add_node->operation.type = ToString(OperationType::ADD);
add_node->operation.attributes = ElementwiseAttributes();
Value* temp = nullptr;
ASSERT_TRUE(ConnectTwoNodes(&graph, first_node, add_node, &temp).ok());
ASSERT_EQ(2, graph.nodes().size());
ASSERT_EQ(3, graph.values().size());
auto transformation = NewRemoveSingleInputAdd();
ModelTransformer transformer(&graph);
transformer.Apply("noop", transformation.get());
EXPECT_EQ(1, graph.nodes().size());
ASSERT_EQ(2, graph.values().size());
ASSERT_EQ(first_node, graph.nodes()[0]);
ASSERT_EQ(input, graph.values()[0]);
ASSERT_EQ(output, graph.values()[1]);
}
TEST(RemoveSingleInputAdd, DoNotTrigger_TensorHWC) {
GraphFloat32 graph;
auto input = graph.NewValue();
auto first_node = graph.NewNode();
ASSERT_TRUE(graph.AddConsumer(first_node->id, input->id).ok());
auto add_node = graph.NewNode();
Value* output = nullptr;
ASSERT_TRUE(AddOutput(&graph, add_node, &output).ok());
add_node->operation.type = ToString(OperationType::ADD);
ElementwiseAttributes attr;
attr.param = Tensor<HWC, DataType::FLOAT32>();
add_node->operation.attributes = attr;
Value* temp = nullptr;
ASSERT_TRUE(ConnectTwoNodes(&graph, first_node, add_node, &temp).ok());
ASSERT_EQ(2, graph.nodes().size());
ASSERT_EQ(3, graph.values().size());
auto transformation = NewRemoveSingleInputAdd();
ModelTransformer transformer(&graph);
transformer.Apply("noop", transformation.get());
EXPECT_EQ(2, graph.nodes().size());
ASSERT_EQ(3, graph.values().size());
}
TEST(RemoveSingleInputAdd, DoNotTrigger_LinearTensor) {
GraphFloat32 graph;
auto input = graph.NewValue();
auto first_node = graph.NewNode();
ASSERT_TRUE(graph.AddConsumer(first_node->id, input->id).ok());
auto add_node = graph.NewNode();
Value* output = nullptr;
ASSERT_TRUE(AddOutput(&graph, add_node, &output).ok());
add_node->operation.type = ToString(OperationType::ADD);
ElementwiseAttributes attr;
attr.param = Tensor<Linear, DataType::FLOAT32>();
add_node->operation.attributes = attr;
Value* temp = nullptr;
ASSERT_TRUE(ConnectTwoNodes(&graph, first_node, add_node, &temp).ok());
ASSERT_EQ(2, graph.nodes().size());
ASSERT_EQ(3, graph.values().size());
auto transformation = NewRemoveSingleInputAdd();
ModelTransformer transformer(&graph);
transformer.Apply("noop", transformation.get());
EXPECT_EQ(2, graph.nodes().size());
ASSERT_EQ(3, graph.values().size());
}
TEST(RemoveSingleInputAdd, DoNotTrigger_Scalar) {
GraphFloat32 graph;
auto input = graph.NewValue();
auto first_node = graph.NewNode();
ASSERT_TRUE(graph.AddConsumer(first_node->id, input->id).ok());
auto add_node = graph.NewNode();
Value* output = nullptr;
ASSERT_TRUE(AddOutput(&graph, add_node, &output).ok());
add_node->operation.type = ToString(OperationType::ADD);
ElementwiseAttributes attr;
attr.param = 0.5f;
add_node->operation.attributes = attr;
Value* temp = nullptr;
ASSERT_TRUE(ConnectTwoNodes(&graph, first_node, add_node, &temp).ok());
ASSERT_EQ(2, graph.nodes().size());
ASSERT_EQ(3, graph.values().size());
auto transformation = NewRemoveSingleInputAdd();
ModelTransformer transformer(&graph);
transformer.Apply("noop", transformation.get());
EXPECT_EQ(2, graph.nodes().size());
ASSERT_EQ(3, graph.values().size());
}
TEST(RemoveSingleInputAdd, DoNotTrigger_Multiple) {
GraphFloat32 graph;
auto input = graph.NewValue();
auto node_a = graph.NewNode();
auto node_b = graph.NewNode();
ASSERT_TRUE(graph.AddConsumer(node_a->id, input->id).ok());
ASSERT_TRUE(graph.AddConsumer(node_b->id, input->id).ok());
auto add_node = graph.NewNode();
Value* output = nullptr;
ASSERT_TRUE(AddOutput(&graph, add_node, &output).ok());
add_node->operation.type = ToString(OperationType::ADD);
Value* temp_a = nullptr;
Value* temp_b = nullptr;
ASSERT_TRUE(ConnectTwoNodes(&graph, node_a, add_node, &temp_a).ok());
ASSERT_TRUE(ConnectTwoNodes(&graph, node_b, add_node, &temp_b).ok());
ASSERT_EQ(3, graph.nodes().size());
ASSERT_EQ(4, graph.values().size());
auto transformation = NewRemoveSingleInputAdd();
ModelTransformer transformer(&graph);
transformer.Apply("noop", transformation.get());
ASSERT_EQ(3, graph.nodes().size());
ASSERT_EQ(4, graph.values().size());
}
TEST(RemoveDegenerateUpsampling, Smoke) {
GraphFloat32 graph;
auto input = graph.NewValue();
auto first_node = graph.NewNode();
ASSERT_TRUE(graph.AddConsumer(first_node->id, input->id).ok());
auto node_to_remove = graph.NewNode();
Value* output = nullptr;
ASSERT_TRUE(AddOutput(&graph, node_to_remove, &output).ok());
output->tensor.shape = BHWC(1, 5, 5, 1);
node_to_remove->operation.type = ToString(OperationType::RESIZE);
Resize2DAttributes attr;
attr.new_shape = HW(5, 5);
attr.type = SamplingType::BILINEAR;
node_to_remove->operation.attributes = attr;
Value* link = nullptr;
ASSERT_TRUE(ConnectTwoNodes(&graph, first_node, node_to_remove, &link).ok());
link->tensor.shape = output->tensor.shape;
ASSERT_EQ(2, graph.nodes().size());
ASSERT_EQ(3, graph.values().size());
auto transformation = NewRemoveDegenerateUpsampling();
ModelTransformer transformer(&graph);
transformer.Apply("noop", transformation.get());
ASSERT_EQ(1, graph.nodes().size());
ASSERT_EQ(2, graph.values().size());
EXPECT_EQ(first_node, graph.nodes()[0]);
EXPECT_EQ(input, graph.values()[0]);
EXPECT_EQ(output, graph.values()[1]);
}
TEST(RemoveIdentityReshape, Smoke) {
GraphFloat32 graph;
Node* simple_node = graph.NewNode();
Node* producer_node = graph.NewNode();
Node* consumer_node = graph.NewNode();
Value* graph_input = graph.NewValue();
Value* graph_output = graph.NewValue();
Value* value0 = graph.NewValue();
Value* value1 = graph.NewValue();
value0->tensor.shape = BHWC(1, 1, 1, 11);
simple_node->operation.type = ToString(OperationType::RESHAPE);
ReshapeAttributes attr;
attr.new_shape = BHWC(1, 1, 1, 11);
simple_node->operation.attributes = attr;
ASSERT_TRUE(graph.AddConsumer(producer_node->id, graph_input->id).ok());
ASSERT_TRUE(graph.SetProducer(producer_node->id, value0->id).ok());
ASSERT_TRUE(graph.AddConsumer(simple_node->id, value0->id).ok());
ASSERT_TRUE(graph.SetProducer(simple_node->id, value1->id).ok());
ASSERT_TRUE(graph.AddConsumer(consumer_node->id, value1->id).ok());
ASSERT_TRUE(graph.SetProducer(consumer_node->id, graph_output->id).ok());
EXPECT_THAT(graph.inputs(), UnorderedElementsAre(graph_input));
EXPECT_THAT(graph.outputs(), UnorderedElementsAre(graph_output));
EXPECT_THAT(graph.nodes(),
UnorderedElementsAre(simple_node, producer_node, consumer_node));
auto transformation = NewRemoveIdentityReshape();
ModelTransformer transformer(&graph);
transformer.Apply("noop", transformation.get());
EXPECT_THAT(graph.inputs(), UnorderedElementsAre(graph_input));
EXPECT_THAT(graph.outputs(), UnorderedElementsAre(graph_output));
EXPECT_THAT(graph.nodes(),
UnorderedElementsAre(producer_node, consumer_node));
EXPECT_THAT(graph.values(),
UnorderedElementsAre(graph_input, graph_output, value0));
}
TEST(RemoveIdentityStridedSlice, Smoke) {
GraphFloat32 graph;
Node* simple_node = graph.NewNode();
Node* producer_node = graph.NewNode();
Node* consumer_node = graph.NewNode();
Value* graph_input = graph.NewValue();
Value* graph_output = graph.NewValue();
Value* value0 = graph.NewValue();
Value* value1 = graph.NewValue();
value0->tensor.shape = BHWC(1, 1, 1, 11);
value1->tensor.shape = BHWC(1, 1, 1, 11);
simple_node->operation.type = ToString(OperationType::SLICE);
SliceAttributes attr;
attr.starts = BHWC(0, 0, 0, 0);
attr.strides = BHWC(1, 1, 1, 1);
attr.ends = BHWC(1, 1, 1, 11);
simple_node->operation.attributes = attr;
ASSERT_TRUE(graph.AddConsumer(producer_node->id, graph_input->id).ok());
ASSERT_TRUE(graph.SetProducer(producer_node->id, value0->id).ok());
ASSERT_TRUE(graph.AddConsumer(simple_node->id, value0->id).ok());
ASSERT_TRUE(graph.SetProducer(simple_node->id, value1->id).ok());
ASSERT_TRUE(graph.AddConsumer(consumer_node->id, value1->id).ok());
ASSERT_TRUE(graph.SetProducer(consumer_node->id, graph_output->id).ok());
EXPECT_THAT(graph.inputs(), UnorderedElementsAre(graph_input));
EXPECT_THAT(graph.outputs(), UnorderedElementsAre(graph_output));
EXPECT_THAT(graph.nodes(),
UnorderedElementsAre(simple_node, producer_node, consumer_node));
auto transformation = NewRemoveIdentityStridedSlice();
ModelTransformer transformer(&graph);
transformer.Apply("noop", transformation.get());
EXPECT_THAT(graph.inputs(), UnorderedElementsAre(graph_input));
EXPECT_THAT(graph.outputs(), UnorderedElementsAre(graph_output));
EXPECT_THAT(graph.nodes(),
UnorderedElementsAre(producer_node, consumer_node));
EXPECT_THAT(graph.values(),
UnorderedElementsAre(graph_input, graph_output, value0));
}
TEST(RemoveIdentityStridedSlice, OutputIsGraphOutputInputConsumedByFewNodes) {
GraphFloat32 graph;
Node* first_node = graph.NewNode();
Node* slice_node = graph.NewNode();
Node* second_node = graph.NewNode();
Value* value0 = graph.NewValue();
Value* value1 = graph.NewValue();
Value* value2 = graph.NewValue();
Value* value3 = graph.NewValue();
value0->tensor.shape = BHWC(1, 1, 1, 11);
value1->tensor.shape = BHWC(1, 1, 1, 11);
value2->tensor.shape = BHWC(1, 1, 1, 11);
value3->tensor.shape = BHWC(1, 1, 1, 11);
slice_node->operation.type = ToString(OperationType::SLICE);
SliceAttributes attr;
attr.starts = BHWC(0, 0, 0, 0);
attr.strides = BHWC(1, 1, 1, 1);
attr.ends = BHWC(1, 1, 1, 11);
slice_node->operation.attributes = attr;
ASSERT_TRUE(graph.AddConsumer(first_node->id, value0->id).ok());
ASSERT_TRUE(graph.SetProducer(first_node->id, value1->id).ok());
ASSERT_TRUE(graph.AddConsumer(slice_node->id, value1->id).ok());
ASSERT_TRUE(graph.AddConsumer(second_node->id, value1->id).ok());
ASSERT_TRUE(graph.SetProducer(slice_node->id, value2->id).ok());
ASSERT_TRUE(graph.SetProducer(second_node->id, value3->id).ok());
EXPECT_THAT(graph.inputs(), UnorderedElementsAre(value0));
EXPECT_THAT(graph.outputs(), UnorderedElementsAre(value2, value3));
EXPECT_THAT(graph.nodes(),
UnorderedElementsAre(first_node, slice_node, second_node));
auto transformation = NewRemoveIdentityStridedSlice();
ModelTransformer transformer(&graph);
transformer.Apply("noop", transformation.get());
EXPECT_THAT(graph.inputs(), UnorderedElementsAre(value0));
EXPECT_THAT(graph.outputs(), UnorderedElementsAre(value2, value3));
EXPECT_THAT(graph.nodes(),
UnorderedElementsAre(first_node, slice_node, second_node));
EXPECT_THAT(graph.values(),
UnorderedElementsAre(value0, value1, value2, value3));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/transformations/remove_noop.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/transformations/remove_noop_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |