ID
stringlengths 36
36
| Language
stringclasses 1
value | Repository Name
stringclasses 13
values | File Name
stringlengths 2
44
| File Path in Repository
stringlengths 11
111
| File Path for Unit Test
stringlengths 16
116
| Code
stringlengths 0
278k
| Unit Test - (Ground Truth)
stringlengths 127
663k
| Code Url
stringlengths 91
198
| Test Code Url
stringlengths 96
203
| Commit Hash
stringclasses 13
values |
---|---|---|---|---|---|---|---|---|---|---|
e87f703b-9ab7-41f0-a6af-71cbc58cb452 | cpp | google/libaddressinput | address_data | cpp/src/address_data.cc | cpp/test/address_data_test.cc | #include <libaddressinput/address_data.h>
#include <libaddressinput/address_field.h>
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <ostream>
#include <string>
#include <vector>
#include <re2/re2.h>
#include "util/size.h"
namespace i18n {
namespace addressinput {
namespace {
std::string AddressData::*kStringField[] = {
&AddressData::region_code,
&AddressData::administrative_area,
&AddressData::locality,
&AddressData::dependent_locality,
&AddressData::sorting_code,
&AddressData::postal_code,
nullptr,
&AddressData::organization,
&AddressData::recipient,
};
const std::vector<std::string> AddressData::*kVectorStringField[] = {
nullptr,
nullptr,
nullptr,
nullptr,
nullptr,
nullptr,
&AddressData::address_line,
nullptr,
nullptr,
};
static_assert(size(kStringField) == size(kVectorStringField),
"field_mapping_array_size_mismatch");
bool IsStringEmpty(const std::string& str) {
static const RE2 kMatcher(R"(\S)");
return str.empty() || !RE2::PartialMatch(str, kMatcher);
}
}
bool AddressData::IsFieldEmpty(AddressField field) const {
assert(field >= 0);
assert(static_cast<size_t>(field) < size(kStringField));
if (kStringField[field] != nullptr) {
const auto& value = GetFieldValue(field);
return IsStringEmpty(value);
} else {
const auto& value = GetRepeatedFieldValue(field);
return std::find_if_not(value.begin(), value.end(), IsStringEmpty) ==
value.end();
}
}
const std::string& AddressData::GetFieldValue(AddressField field) const {
assert(field >= 0);
assert(static_cast<size_t>(field) < size(kStringField));
assert(kStringField[field] != nullptr);
return this->*kStringField[field];
}
void AddressData::SetFieldValue(AddressField field, const std::string& value) {
assert(field >= 0);
assert(static_cast<size_t>(field) < size(kStringField));
assert(kStringField[field] != nullptr);
(this->*kStringField[field]).assign(value);
}
const std::vector<std::string>& AddressData::GetRepeatedFieldValue(
AddressField field) const {
assert(IsRepeatedFieldValue(field));
return this->*kVectorStringField[field];
}
bool AddressData::operator==(const AddressData& other) const {
return region_code == other.region_code &&
address_line == other.address_line &&
administrative_area == other.administrative_area &&
locality == other.locality &&
dependent_locality == other.dependent_locality &&
postal_code == other.postal_code &&
sorting_code == other.sorting_code &&
language_code == other.language_code &&
organization == other.organization &&
recipient == other.recipient;
}
bool AddressData::IsRepeatedFieldValue(AddressField field) {
assert(field >= 0);
assert(static_cast<size_t>(field) < size(kVectorStringField));
return kVectorStringField[field] != nullptr;
}
}
}
std::ostream& operator<<(std::ostream& o,
const i18n::addressinput::AddressData& address) {
o << "region_code: \"" << address.region_code << "\"\n"
"administrative_area: \"" << address.administrative_area << "\"\n"
"locality: \"" << address.locality << "\"\n"
"dependent_locality: \"" << address.dependent_locality << "\"\n"
"postal_code: \"" << address.postal_code << "\"\n"
"sorting_code: \"" << address.sorting_code << "\"\n";
for (const auto& line : address.address_line) {
o << "address_line: \"" << line << "\"\n";
}
o << "language_code: \"" << address.language_code << "\"\n"
"organization: \"" << address.organization << "\"\n"
"recipient: \"" << address.recipient << "\"\n";
return o;
} | #include <libaddressinput/address_data.h>
#include <libaddressinput/address_field.h>
#include <sstream>
#include <gtest/gtest.h>
namespace {
using i18n::addressinput::AddressData;
using i18n::addressinput::AddressField;
using i18n::addressinput::COUNTRY;
using i18n::addressinput::ADMIN_AREA;
using i18n::addressinput::LOCALITY;
using i18n::addressinput::DEPENDENT_LOCALITY;
using i18n::addressinput::SORTING_CODE;
using i18n::addressinput::POSTAL_CODE;
using i18n::addressinput::STREET_ADDRESS;
using i18n::addressinput::ORGANIZATION;
using i18n::addressinput::RECIPIENT;
TEST(AddressDataTest, GetFieldValue) {
const AddressData address{
.region_code = "rrr",
.administrative_area = "sss",
.locality = "ccc",
.dependent_locality = "ddd",
.postal_code = "zzz",
.sorting_code = "xxx",
.organization = "ooo",
.recipient = "nnn",
};
EXPECT_EQ(address.region_code,
address.GetFieldValue(COUNTRY));
EXPECT_EQ(address.administrative_area,
address.GetFieldValue(ADMIN_AREA));
EXPECT_EQ(address.locality,
address.GetFieldValue(LOCALITY));
EXPECT_EQ(address.dependent_locality,
address.GetFieldValue(DEPENDENT_LOCALITY));
EXPECT_EQ(address.sorting_code,
address.GetFieldValue(SORTING_CODE));
EXPECT_EQ(address.postal_code,
address.GetFieldValue(POSTAL_CODE));
EXPECT_EQ(address.organization,
address.GetFieldValue(ORGANIZATION));
EXPECT_EQ(address.recipient,
address.GetFieldValue(RECIPIENT));
}
TEST(AddressDataTest, GetRepeatedFieldValue) {
const AddressData address{.address_line{
"aaa",
"222",
}};
EXPECT_EQ(address.address_line,
address.GetRepeatedFieldValue(STREET_ADDRESS));
}
TEST(AddressDataTest, IsFieldEmpty) {
AddressData address;
EXPECT_TRUE(address.IsFieldEmpty(COUNTRY));
EXPECT_TRUE(address.IsFieldEmpty(ADMIN_AREA));
EXPECT_TRUE(address.IsFieldEmpty(LOCALITY));
EXPECT_TRUE(address.IsFieldEmpty(DEPENDENT_LOCALITY));
EXPECT_TRUE(address.IsFieldEmpty(SORTING_CODE));
EXPECT_TRUE(address.IsFieldEmpty(POSTAL_CODE));
EXPECT_TRUE(address.IsFieldEmpty(STREET_ADDRESS));
EXPECT_TRUE(address.IsFieldEmpty(ORGANIZATION));
EXPECT_TRUE(address.IsFieldEmpty(RECIPIENT));
address = {
.region_code = "rrr",
.address_line{"aaa"},
.administrative_area = "sss",
.locality = "ccc",
.dependent_locality = "ddd",
.postal_code = "zzz",
.sorting_code = "xxx",
.organization = "ooo",
.recipient = "nnn",
};
EXPECT_FALSE(address.IsFieldEmpty(COUNTRY));
EXPECT_FALSE(address.IsFieldEmpty(ADMIN_AREA));
EXPECT_FALSE(address.IsFieldEmpty(LOCALITY));
EXPECT_FALSE(address.IsFieldEmpty(DEPENDENT_LOCALITY));
EXPECT_FALSE(address.IsFieldEmpty(SORTING_CODE));
EXPECT_FALSE(address.IsFieldEmpty(POSTAL_CODE));
EXPECT_FALSE(address.IsFieldEmpty(STREET_ADDRESS));
EXPECT_FALSE(address.IsFieldEmpty(ORGANIZATION));
EXPECT_FALSE(address.IsFieldEmpty(RECIPIENT));
}
TEST(AddressDataTest, IsFieldEmptyWhitespace) {
AddressData address;
address.recipient = " ";
EXPECT_TRUE(address.IsFieldEmpty(RECIPIENT));
address.recipient = "abc";
EXPECT_FALSE(address.IsFieldEmpty(RECIPIENT));
address.recipient = " b ";
EXPECT_FALSE(address.IsFieldEmpty(RECIPIENT));
}
TEST(AddressDataTest, IsFieldEmptyVector) {
AddressData address;
EXPECT_TRUE(address.IsFieldEmpty(STREET_ADDRESS));
address.address_line.emplace_back("");
EXPECT_TRUE(address.IsFieldEmpty(STREET_ADDRESS));
address.address_line.emplace_back("aaa");
EXPECT_FALSE(address.IsFieldEmpty(STREET_ADDRESS));
address.address_line.emplace_back("");
EXPECT_FALSE(address.IsFieldEmpty(STREET_ADDRESS));
}
TEST(AddressDataTest, IsFieldEmptyVectorWhitespace) {
AddressData address{.address_line{
" ",
" ",
" ",
}};
EXPECT_TRUE(address.IsFieldEmpty(STREET_ADDRESS));
address.address_line = {
"abc",
};
EXPECT_FALSE(address.IsFieldEmpty(STREET_ADDRESS));
address.address_line = {
" ",
" b ",
" ",
};
EXPECT_FALSE(address.IsFieldEmpty(STREET_ADDRESS));
}
TEST(AddressDataTest, StreamFunction) {
std::ostringstream oss;
const AddressData address{
.region_code = "R",
.address_line{
"Line 1",
"Line 2",
},
.administrative_area = "S",
.locality = "C",
.dependent_locality = "D",
.postal_code = "Z",
.sorting_code = "X",
.language_code = "zh-Hant",
.organization = "O",
.recipient = "N",
};
oss << address;
EXPECT_EQ("region_code: \"R\"\n"
"administrative_area: \"S\"\n"
"locality: \"C\"\n"
"dependent_locality: \"D\"\n"
"postal_code: \"Z\"\n"
"sorting_code: \"X\"\n"
"address_line: \"Line 1\"\n"
"address_line: \"Line 2\"\n"
"language_code: \"zh-Hant\"\n"
"organization: \"O\"\n"
"recipient: \"N\"\n", oss.str());
}
TEST(AddressDataTest, TestEquals) {
const AddressData address{
.region_code = "R",
.address_line{
"Line 1",
"Line 2",
},
.administrative_area = "S",
.locality = "C",
.dependent_locality = "D",
.postal_code = "Z",
.sorting_code = "X",
.language_code = "zh-Hant",
.organization = "O",
.recipient = "N",
};
AddressData clone = address;
EXPECT_EQ(address, clone);
clone.language_code.clear();
EXPECT_FALSE(address == clone);
}
#ifndef NDEBUG
TEST(AddressDataTest, GetFieldValueInvalid) {
const AddressData address;
ASSERT_DEATH_IF_SUPPORTED(address.GetFieldValue(STREET_ADDRESS),
"ssertion.*failed");
}
TEST(AddressDataTest, GetVectorFieldValueInvalid) {
const AddressData address;
ASSERT_DEATH_IF_SUPPORTED(address.GetRepeatedFieldValue(COUNTRY),
"ssertion.*failed");
}
TEST(AddressDataTest, IsFieldEmptyInvalid) {
static const auto invalid_field = static_cast<AddressField>(-1);
AddressData address;
ASSERT_DEATH_IF_SUPPORTED(address.IsFieldEmpty(invalid_field),
"ssertion.*failed");
}
#endif
} | https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/src/address_data.cc | https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/test/address_data_test.cc | 2610f7b1043d6784ada41392fc9392d1ea09ea07 |
372fea3a-ecdc-4712-95c9-436ca07dd10c | cpp | tensorflow/tensorflow | winograd_util | tensorflow/lite/delegates/gpu/common/winograd_util.cc | tensorflow/lite/delegates/gpu/common/winograd_util_test.cc | #include "tensorflow/lite/delegates/gpu/common/winograd_util.h"
#include <cmath>
#include <vector>
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
namespace tflite {
namespace gpu {
namespace {
std::vector<float> GetTransposedMatrixForWinograd(int width, int height) {
const float kDelta = std::sqrt(2.0f) / 2.0f;
std::vector<float> px(width);
px[0] = 0.0f;
const int points_count = (width - 1) / 2;
for (int i = 0; i < points_count; ++i) {
px[i * 2 + 1] = kDelta * (i + 1.0f);
px[i * 2 + 2] = -kDelta * (i + 1.0f);
}
px[width - 1] = 1.0f;
std::vector<float> py(width, 1.0f);
py[width - 1] = 0.0f;
std::vector<float> result(height * width);
for (int y = 0; y < width; ++y) {
for (int x = 0; x < height; ++x) {
result[x * width + y] =
std::pow(px[y], 1.0f * x) * std::pow(py[y], (height - 1.0f) - x);
}
}
return result;
}
std::vector<float> GetInversedMatrixForWinograd(int rank) {
auto matrix = GetTransposedMatrixForWinograd(rank, rank);
std::vector<float> inverted(rank * rank, 0.0f);
for (int i = 0; i < rank; ++i) {
inverted[i * rank + i] = 1.0f;
}
for (int i = 1; i < rank - 1; ++i) {
float inv_t = 1.0f / matrix[i * rank + i];
for (int x = i; x < rank; ++x) {
matrix[i * rank + x] *= inv_t;
}
for (int x = 0; x < rank; ++x) {
inverted[i * rank + x] *= inv_t;
}
for (int y = 0; y < rank; ++y) {
if (y == i) continue;
float t = matrix[y * rank + i];
for (int x = i; x < rank; ++x) {
matrix[y * rank + x] -= t * matrix[i * rank + x];
}
for (int x = 0; x < rank; ++x) {
inverted[y * rank + x] -= t * inverted[i * rank + x];
}
}
}
return inverted;
}
std::vector<float> Multiply(const std::vector<float>& a_mat,
const std::vector<float>& b_mat, int m, int n,
int k) {
std::vector<float> result(m * k);
for (int y = 0; y < m; ++y) {
for (int x = 0; x < k; ++x) {
float sum = 0.0f;
for (int i = 0; i < n; ++i) {
sum += a_mat[y * n + i] * b_mat[i * k + x];
}
result[y * k + x] = sum;
}
}
return result;
}
}
std::vector<float> AtMatrixForWinograd4x4To6x6() {
return GetTransposedMatrixForWinograd(6, 4);
}
std::vector<float> BtMatrixForWinograd4x4To6x6() {
return GetInversedMatrixForWinograd(6);
}
void RearrangeWeightsToWinograd4x4To6x6Weights(
const Tensor<OHWI, DataType::FLOAT32>& src_weights,
Tensor<OHWI, DataType::FLOAT32>* dst_weights) {
OHWI dst_shape;
dst_shape.o = src_weights.shape.o;
dst_shape.h = 6;
dst_shape.w = 6;
dst_shape.i = src_weights.shape.i;
dst_weights->shape = dst_shape;
dst_weights->data.resize(dst_shape.DimensionsProduct());
auto gt_mat = GetTransposedMatrixForWinograd(6, 3);
std::vector<float> g_mat(gt_mat.size());
for (int y = 0; y < 3; ++y) {
for (int x = 0; x < 6; ++x) {
g_mat[x * 3 + y] = gt_mat[y * 6 + x];
}
}
for (int d = 0; d < src_weights.shape.o; ++d) {
for (int s = 0; s < src_weights.shape.i; ++s) {
std::vector<float> in_vals(9);
for (int y = 0; y < 3; ++y) {
for (int x = 0; x < 3; ++x) {
const int f_index = src_weights.shape.LinearIndex({d, y, x, s});
in_vals[y * 3 + x] = src_weights.data[f_index];
}
}
auto temp_vals = Multiply(g_mat, in_vals, 6, 3, 3);
auto out_vals = Multiply(temp_vals, gt_mat, 6, 3, 6);
for (int y = 0; y < 6; ++y) {
for (int x = 0; x < 6; ++x) {
const int f_index = dst_shape.LinearIndex({d, y, x, s});
dst_weights->data[f_index] = out_vals[y * 6 + x];
}
}
}
}
}
bool IsSuitableForWinograd4x4To6x6(const Convolution2DAttributes& attr) {
return attr.weights.shape.w == 3 && attr.weights.shape.h == 3 &&
attr.dilations == HW(1, 1) && attr.strides == HW(1, 1) &&
attr.groups == 1;
}
}
} | #include "tensorflow/lite/delegates/gpu/common/winograd_util.h"
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
namespace tflite {
namespace gpu {
TEST(Winograd, CorrectAttributesFor4x4To6x6) {
Convolution2DAttributes attr;
attr.padding.prepended = HW(1, 2);
attr.padding.appended = HW(0, 1);
attr.strides = HW(1, 1);
attr.dilations = HW(1, 1);
attr.weights.shape = OHWI(1, 3, 3, 1);
EXPECT_TRUE(IsSuitableForWinograd4x4To6x6(attr));
}
TEST(Winograd, IncorrectAttributesFor4x4To6x6) {
Convolution2DAttributes attr;
attr.padding.prepended = HW(1, 2);
attr.padding.appended = HW(0, 1);
attr.strides = HW(1, 1);
attr.dilations = HW(1, 1);
attr.weights.shape = OHWI(1, 2, 3, 1);
EXPECT_FALSE(IsSuitableForWinograd4x4To6x6(attr));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/winograd_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/winograd_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
aa4eaeed-d89a-44ac-8a40-cbeca40aa15a | cpp | tensorflow/tensorflow | resource_variable_grad | tensorflow/cc/gradients/resource_variable_grad.cc | tensorflow/cc/gradients/resource_variable_grad_test.cc | #include <vector>
#include "tensorflow/cc/framework/grad_op_registry.h"
#include "tensorflow/cc/framework/gradients.h"
#include "tensorflow/cc/ops/array_ops.h"
namespace tensorflow {
namespace ops {
namespace {
Status ReadVariableOpGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
grad_outputs->push_back(Identity(scope, grad_inputs[0]));
return scope.status();
}
REGISTER_GRADIENT_OP("ReadVariableOp", ReadVariableOpGrad);
}
}
} | #include <iostream>
#include "tensorflow/cc/client/client_session.h"
#include "tensorflow/cc/framework/grad_op_registry.h"
#include "tensorflow/cc/framework/gradient_checker.h"
#include "tensorflow/cc/framework/gradients.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/testutil.h"
#include "tensorflow/cc/gradients/grad_testutil.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/resource_variable_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
namespace tensorflow {
namespace ops {
namespace {
TEST(ResourceVariableGradTest, ReadVariableOpGrad) {
TensorShape shape({});
auto scope = Scope::NewRootScope();
auto x = Placeholder(scope, DT_FLOAT, Placeholder::Shape(shape));
auto var = VarHandleOp(scope, DT_FLOAT, shape);
auto init = AssignVariableOp(scope, var, Const(scope, 2.0f, shape));
auto temp = ReadVariableOp(scope, var, DT_FLOAT);
auto y = Mul(scope, temp, x);
auto dy = Placeholder(scope, DT_FLOAT, Placeholder::Shape(shape));
OutputList dxs;
TF_ASSERT_OK(AddSymbolicGradients(scope, {y}, {var}, {dy}, &dxs));
ClientSession::FeedType feed_list;
feed_list.insert({x, 5.0f});
feed_list.insert({dy, 1.0f});
std::vector<Tensor> dxout;
ClientSession session(scope);
TF_ASSERT_OK(session.Run(feed_list, dxs, &dxout));
auto grad = dxout[0].scalar<float>()();
EXPECT_EQ(grad, 5.0f);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/gradients/resource_variable_grad.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/gradients/resource_variable_grad_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c0e9b00c-577d-4692-b0de-b284f166c3d1 | cpp | tensorflow/tensorflow | sampler | third_party/xla/xla/tsl/lib/monitoring/sampler.cc | tensorflow/core/lib/monitoring/sampler_test.cc | #include "xla/tsl/lib/monitoring/sampler.h"
#include "absl/log/check.h"
#ifdef IS_MOBILE_PLATFORM
#else
namespace tsl {
namespace monitoring {
namespace {
class ExplicitBuckets : public Buckets {
public:
~ExplicitBuckets() override = default;
explicit ExplicitBuckets(std::vector<double> bucket_limits)
: bucket_limits_(std::move(bucket_limits)) {
CHECK_GT(bucket_limits_.size(), 0);
for (size_t i = 1; i < bucket_limits_.size(); i++) {
CHECK_GT(bucket_limits_[i], bucket_limits_[i - 1]);
}
if (bucket_limits_.back() != DBL_MAX) {
bucket_limits_.push_back(DBL_MAX);
}
}
const std::vector<double>& explicit_bounds() const override {
return bucket_limits_;
}
private:
std::vector<double> bucket_limits_;
ExplicitBuckets(const ExplicitBuckets&) = delete;
void operator=(const ExplicitBuckets&) = delete;
};
class ExponentialBuckets : public Buckets {
public:
~ExponentialBuckets() override = default;
ExponentialBuckets(double scale, double growth_factor, int bucket_count)
: explicit_buckets_(
ComputeBucketLimits(scale, growth_factor, bucket_count)) {}
const std::vector<double>& explicit_bounds() const override {
return explicit_buckets_.explicit_bounds();
}
private:
static std::vector<double> ComputeBucketLimits(double scale,
double growth_factor,
int bucket_count) {
CHECK_GT(bucket_count, 0);
std::vector<double> bucket_limits;
double bound = scale;
for (int i = 0; i < bucket_count; i++) {
bucket_limits.push_back(bound);
bound *= growth_factor;
}
return bucket_limits;
}
ExplicitBuckets explicit_buckets_;
ExponentialBuckets(const ExponentialBuckets&) = delete;
void operator=(const ExponentialBuckets&) = delete;
};
}
std::unique_ptr<Buckets> Buckets::Explicit(std::vector<double> bucket_limits) {
return std::unique_ptr<Buckets>(
new ExplicitBuckets(std::move(bucket_limits)));
}
std::unique_ptr<Buckets> Buckets::Explicit(
std::initializer_list<double> bucket_limits) {
return std::unique_ptr<Buckets>(new ExplicitBuckets(bucket_limits));
}
std::unique_ptr<Buckets> Buckets::Exponential(double scale,
double growth_factor,
int bucket_count) {
return std::unique_ptr<Buckets>(
new ExponentialBuckets(scale, growth_factor, bucket_count));
}
}
}
#endif | #include "tensorflow/core/lib/monitoring/sampler.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace monitoring {
namespace {
using histogram::Histogram;
void EqHistograms(const Histogram& expected,
const HistogramProto& actual_proto) {
Histogram actual;
ASSERT_TRUE(actual.DecodeFromProto(actual_proto));
EXPECT_EQ(expected.ToString(), actual.ToString());
}
auto* sampler_with_labels =
Sampler<1>::New({"/tensorflow/test/sampler_with_labels",
"Sampler with one label.", "MyLabel"},
Buckets::Explicit({10.0, 20.0}));
TEST(LabeledSamplerTest, InitializedEmpty) {
Histogram empty;
EqHistograms(empty, sampler_with_labels->GetCell("Empty")->value());
}
TEST(LabeledSamplerTest, ExplicitBucketBoundaries) {
Histogram expected({10.0, 20.0, DBL_MAX});
auto* cell = sampler_with_labels->GetCell("BucketBoundaries");
sampler_with_labels->GetCell("AddedToCheckPreviousCellValidity");
cell->Add(-1.0);
expected.Add(-1.0);
cell->Add(10.0);
expected.Add(10.0);
cell->Add(20.0);
expected.Add(20.0);
cell->Add(31.0);
expected.Add(31.0);
EqHistograms(expected, cell->value());
}
auto* init_sampler_without_labels =
Sampler<0>::New({"/tensorflow/test/init_sampler_without_labels",
"Sampler without labels initialized as empty."},
Buckets::Explicit(std::vector<double>{1.5, 2.8}));
TEST(UnlabeledSamplerTest, InitializedEmpty) {
Histogram empty;
EqHistograms(empty, init_sampler_without_labels->GetCell()->value());
}
auto* sampler_without_labels =
Sampler<0>::New({"/tensorflow/test/sampler_without_labels",
"Sampler without labels initialized as empty."},
Buckets::Explicit({1.5, 2.8}));
TEST(UnlabeledSamplerTest, ExplicitBucketBoundaries) {
Histogram expected({1.5, 2.8, DBL_MAX});
auto* cell = sampler_without_labels->GetCell();
cell->Add(-1.0);
expected.Add(-1.0);
cell->Add(2.0);
expected.Add(2.0);
cell->Add(31.0);
expected.Add(31.0);
EqHistograms(expected, cell->value());
}
auto* sampler_with_exponential =
Sampler<1>::New({"/tensorflow/test/sampler_with_exponential",
"Sampler with exponential buckets.", "MyLabel"},
Buckets::Exponential(1, 2, 3));
TEST(ExponentialSamplerTest, ExponentialBucketBoundaries) {
Histogram expected({1.0, 2.0, 4.0, DBL_MAX});
auto* cell = sampler_with_exponential->GetCell("BucketBoundaries");
sampler_with_exponential->GetCell("AddedToCheckPreviousCellValidity");
cell->Add(-1.0);
expected.Add(-1.0);
cell->Add(0.5);
expected.Add(0.5);
cell->Add(1.001);
expected.Add(1.001);
cell->Add(3.999);
expected.Add(3.999);
cell->Add(6.0);
expected.Add(6.0);
EqHistograms(expected, cell->value());
}
TEST(ExplicitSamplerTest, SameName) {
auto* same_sampler = Sampler<1>::New({"/tensorflow/test/sampler_with_labels",
"Sampler with one label.", "MyLabel"},
Buckets::Explicit({10.0, 20.0}));
EXPECT_TRUE(sampler_with_labels->GetStatus().ok());
EXPECT_TRUE(same_sampler->GetStatus().ok());
delete same_sampler;
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/monitoring/sampler.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/monitoring/sampler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
61928d7e-efb0-4f2c-8acc-2f59f7b04420 | cpp | google/tensorstore | key | tensorstore/kvstore/zarr3_sharding_indexed/key.cc | tensorstore/kvstore/zarr3_sharding_indexed/key_test.cc | #include "tensorstore/kvstore/zarr3_sharding_indexed/key.h"
#include <stddef.h>
#include <stdint.h>
#include <algorithm>
#include <cassert>
#include <cstring>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include "absl/base/internal/endian.h"
#include "absl/status/status.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/index.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/extents.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace zarr3_sharding_indexed {
std::string IndicesToKey(span<const Index> grid_cell_indices) {
std::string key;
key.resize(grid_cell_indices.size() * 4);
for (DimensionIndex i = 0; i < grid_cell_indices.size(); ++i) {
absl::big_endian::Store32(key.data() + i * 4, grid_cell_indices[i]);
}
return key;
}
bool KeyToIndices(std::string_view key, span<Index> grid_cell_indices) {
if (key.size() != grid_cell_indices.size() * 4) {
return false;
}
for (DimensionIndex i = 0; i < grid_cell_indices.size(); ++i) {
grid_cell_indices[i] = absl::big_endian::Load32(key.data() + i * 4);
}
return true;
}
std::optional<EntryId> KeyToEntryId(std::string_view key,
span<const Index> grid_shape) {
const DimensionIndex rank = grid_shape.size();
if (rank * sizeof(uint32_t) != key.size()) return {};
EntryId id = 0;
for (DimensionIndex i = 0; i < rank; ++i) {
auto index = absl::big_endian::Load32(key.data() + i * 4);
if (index >= grid_shape[i]) return {};
id *= grid_shape[i];
id += index;
}
return id;
}
Result<EntryId> KeyToEntryIdOrError(std::string_view key,
span<const Index> grid_shape) {
if (auto entry_id = KeyToEntryId(key, grid_shape)) {
return *entry_id;
}
return absl::InvalidArgumentError(
tensorstore::StrCat("Invalid key (grid_shape=", grid_shape,
"): ", tensorstore::QuoteString(key)));
}
std::string EntryIdToKey(EntryId entry_id, span<const Index> grid_shape) {
std::string key;
key.resize(grid_shape.size() * 4);
for (DimensionIndex i = grid_shape.size(); i--;) {
const Index size = grid_shape[i];
absl::big_endian::Store32(key.data() + i * 4, entry_id % size);
entry_id /= size;
}
return key;
}
EntryId LowerBoundToEntryId(std::string_view key,
span<const Index> grid_shape) {
char key_padded[kMaxRank * 4];
const size_t full_key_size = grid_shape.size() * 4;
const size_t key_bytes_to_copy = std::min(full_key_size, key.size());
std::memcpy(key_padded, key.data(), key_bytes_to_copy);
std::memset(key_padded + key_bytes_to_copy, 0,
full_key_size - key_bytes_to_copy);
EntryId entry_id = 0;
EntryId remaining_indices_mask = ~static_cast<EntryId>(0);
EntryId max_entry_id = 1;
for (DimensionIndex i = 0; i < grid_shape.size(); ++i) {
const EntryId size = grid_shape[i];
max_entry_id *= size;
EntryId index = absl::big_endian::Load32(&key_padded[i * 4]);
entry_id *= size;
if (index >= size) {
entry_id += (size & remaining_indices_mask);
remaining_indices_mask = 0;
} else {
entry_id += (index & remaining_indices_mask);
}
}
assert(entry_id <= max_entry_id);
if (key.size() > full_key_size) {
if (entry_id < max_entry_id) {
++entry_id;
}
}
return entry_id;
}
std::pair<EntryId, EntryId> KeyRangeToEntryRange(std::string_view inclusive_min,
std::string_view exclusive_max,
span<const Index> grid_shape) {
EntryId lower_bound = LowerBoundToEntryId(inclusive_min, grid_shape);
EntryId upper_bound;
if (exclusive_max.empty()) {
upper_bound = static_cast<EntryId>(ProductOfExtents(grid_shape));
} else {
upper_bound = LowerBoundToEntryId(exclusive_max, grid_shape);
}
return {lower_bound, upper_bound};
}
EntryId InternalKeyLowerBoundToEntryId(std::string_view key,
int64_t num_entries_per_shard) {
char key_bytes[4] = {};
std::memcpy(key_bytes, key.data(),
std::min(static_cast<size_t>(4), key.size()));
EntryId entry_id = absl::big_endian::Load32(key_bytes);
if (entry_id > num_entries_per_shard) {
entry_id = num_entries_per_shard;
}
if (key.size() > 4 && entry_id < num_entries_per_shard) {
++entry_id;
}
return entry_id;
}
std::pair<EntryId, EntryId> InternalKeyRangeToEntryRange(
std::string_view inclusive_min, std::string_view exclusive_max,
int64_t num_entries_per_shard) {
return {InternalKeyLowerBoundToEntryId(inclusive_min, num_entries_per_shard),
exclusive_max.empty() ? EntryId(num_entries_per_shard)
: InternalKeyLowerBoundToEntryId(
exclusive_max, num_entries_per_shard)};
}
std::string EntryIdToInternalKey(EntryId entry_id) {
std::string key;
key.resize(4);
absl::big_endian::Store32(key.data(), entry_id);
return key;
}
EntryId InternalKeyToEntryId(std::string_view key) {
assert(key.size() == 4);
return static_cast<EntryId>(absl::big_endian::Load32(key.data()));
}
KeyRange KeyRangeToInternalKeyRange(const KeyRange& range,
span<const Index> grid_shape) {
auto [inclusive_min_entry, exclusive_max_entry] = KeyRangeToEntryRange(
range.inclusive_min, range.exclusive_max, grid_shape);
return KeyRange{EntryIdToInternalKey(inclusive_min_entry),
EntryIdToInternalKey(exclusive_max_entry)};
}
std::string DescribeEntryId(EntryId entry_id, span<const Index> grid_shape) {
Index indices[kMaxRank];
span<Index> indices_span(&indices[0], grid_shape.size());
GetContiguousIndices<c_order, Index>(entry_id, grid_shape, indices_span);
return tensorstore::StrCat("shard entry ", indices_span, "/", grid_shape);
}
std::string DescribeKey(std::string_view key, span<const Index> grid_shape) {
if (auto entry_id = KeyToEntryId(key, grid_shape)) {
return DescribeEntryId(*entry_id, grid_shape);
}
return tensorstore::StrCat("invalid shard entry ",
tensorstore::QuoteString(key), "/", grid_shape);
}
std::string DescribeInternalKey(std::string_view key,
span<const Index> grid_shape) {
return DescribeEntryId(InternalKeyToEntryId(key), grid_shape);
}
}
} | #include "tensorstore/kvstore/zarr3_sharding_indexed/key.h"
#include <optional>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/index.h"
#include "tensorstore/kvstore/key_range.h"
namespace {
using ::tensorstore::Index;
using ::tensorstore::KeyRange;
using ::tensorstore::zarr3_sharding_indexed::EntryId;
using ::tensorstore::zarr3_sharding_indexed::EntryIdToInternalKey;
using ::tensorstore::zarr3_sharding_indexed::EntryIdToKey;
using ::tensorstore::zarr3_sharding_indexed::IndicesToKey;
using ::tensorstore::zarr3_sharding_indexed::InternalKeyLowerBoundToEntryId;
using ::tensorstore::zarr3_sharding_indexed::InternalKeyRangeToEntryRange;
using ::tensorstore::zarr3_sharding_indexed::InternalKeyToEntryId;
using ::tensorstore::zarr3_sharding_indexed::KeyRangeToEntryRange;
using ::tensorstore::zarr3_sharding_indexed::KeyRangeToInternalKeyRange;
using ::tensorstore::zarr3_sharding_indexed::KeyToEntryId;
using ::tensorstore::zarr3_sharding_indexed::KeyToIndices;
using ::tensorstore::zarr3_sharding_indexed::LowerBoundToEntryId;
TEST(KeyToEntryIdTest, Basic) {
EntryId entry_id = 1 * 5 * 6 + 2 * 6 + 3;
std::string key{0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 3};
Index grid_shape[] = {4, 5, 6};
EXPECT_THAT(KeyToEntryId(key, grid_shape), ::testing::Optional(entry_id));
EXPECT_THAT(EntryIdToKey(entry_id, grid_shape), ::testing::Eq(key));
}
TEST(KeyToEntryIdTest, OutOfRange) {
EXPECT_THAT(KeyToEntryId(std::string{0, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0, 3},
{{4, 5, 6}}),
::testing::Eq(std::nullopt));
}
TEST(KeyToEntryIdTest, Invalid) {
EXPECT_THAT(
KeyToEntryId(std::string{0, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0}, {{4, 5, 6}}),
::testing::Eq(std::nullopt));
}
TEST(IndicesToKeyTest, Basic) {
const Index indices[] = {1, 2, 3};
std::string key{0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 3};
EXPECT_THAT(IndicesToKey(indices), ::testing::Eq(key));
Index decoded_indices[3];
EXPECT_TRUE(KeyToIndices(key, decoded_indices));
EXPECT_THAT(decoded_indices, ::testing::ElementsAreArray(indices));
EXPECT_FALSE(KeyToIndices(key.substr(1), decoded_indices));
}
TEST(LowerBoundToEntryId, Exact) {
Index grid_shape[] = {4, 5, 6};
EXPECT_THAT(LowerBoundToEntryId(
std::string{0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 3}, grid_shape),
::testing::Eq(1 * 5 * 6 + 2 * 6 + 3));
}
TEST(LowerBoundToEntryId, Longer) {
Index grid_shape[] = {4, 5, 6};
EXPECT_THAT(
LowerBoundToEntryId(std::string{0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 3, 0},
grid_shape),
::testing::Eq(1 * 5 * 6 + 2 * 6 + 4));
}
TEST(KeyRangeToEntryRange, Full) {
Index grid_shape[] = {4, 5, 6};
EXPECT_THAT(KeyRangeToEntryRange("", "", grid_shape),
::testing::Pair(0, 4 * 5 * 6));
}
TEST(KeyRangeToEntryRange, Partial) {
Index grid_shape[] = {4, 5, 6};
EXPECT_THAT(
KeyRangeToEntryRange(
std::string{
0, 0, 0, 2,
0, 0, 0, 3,
0, 0, 0, 4,
},
std::string{
0, 0, 0, 2,
0, 0, 0, 4,
0, 0, 0, 5,
},
grid_shape),
::testing::Pair(2 * (5 * 6) + 3 * 6 + 4, 2 * (5 * 6) + 4 * 6 + 5));
EXPECT_THAT(KeyRangeToInternalKeyRange(KeyRange{std::string{
0, 0, 0, 2,
0, 0, 0, 3,
0, 0, 0, 4,
},
std::string{
0, 0, 0, 2,
0, 0, 0, 4,
0, 0, 0, 5,
}},
grid_shape),
KeyRange(EntryIdToInternalKey(2 * (5 * 6) + 3 * 6 + 4),
EntryIdToInternalKey(2 * (5 * 6) + 4 * 6 + 5)));
}
TEST(EntryIdToInternalKeyTest, Basic) {
EntryId entry_id = 0x01020304;
std::string internal_key{0x01, 0x02, 0x03, 0x04};
EXPECT_THAT(EntryIdToInternalKey(entry_id), ::testing::Eq(internal_key));
EXPECT_THAT(InternalKeyToEntryId(internal_key), ::testing::Eq(entry_id));
}
TEST(InternalKeyLowerBoundToEntryIdTest, Basic) {
EXPECT_THAT(InternalKeyLowerBoundToEntryId(
std::string{0x01, 0x02, 0x03, 0x04}, 0x88888888),
::testing::Eq(0x01020304));
EXPECT_THAT(InternalKeyLowerBoundToEntryId(
std::string{0x01, 0x02, 0x03, 0x04, 0x0}, 0x88888888),
::testing::Eq(0x01020304 + 1));
EXPECT_THAT(
InternalKeyLowerBoundToEntryId(std::string{0x01, 0x02, 0x03}, 0x88888888),
::testing::Eq(0x01020300));
EXPECT_THAT(InternalKeyLowerBoundToEntryId(
std::string{0x01, 0x02, 0x03, 0x04}, 0x01020302),
::testing::Eq(0x01020302));
}
TEST(InternalKeyRangeToEntryRange, Basic) {
EXPECT_THAT(InternalKeyRangeToEntryRange(std::string{0x01, 0x02, 0x03, 0x04},
std::string{0x01, 0x02, 0x03, 0x07},
0x88888888),
::testing::Pair(0x01020304, 0x01020307));
EXPECT_THAT(InternalKeyRangeToEntryRange(std::string{0x01, 0x02, 0x03, 0x04},
{}, 0x88888888),
::testing::Pair(0x01020304, 0x88888888));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/zarr3_sharding_indexed/key.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/zarr3_sharding_indexed/key_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
40874743-137f-4d51-be46-efd8d9dcd11c | cpp | tensorflow/tensorflow | unidirectional_sequence_lstm | tensorflow/lite/kernels/unidirectional_sequence_lstm.cc | tensorflow/lite/kernels/unidirectional_sequence_lstm_test.cc | #include <math.h>
#include <algorithm>
#include <cstddef>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/kernel_utils.h"
#include "tensorflow/lite/kernels/internal/quantization_util.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/tensor_utils.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/lstm_eval.h"
#include "tensorflow/lite/kernels/lstm_shared.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace unidirectional_sequence_lstm {
namespace {
struct OpData {
bool use_layer_norm;
int scratch_tensor_index;
bool compute_row_sums = false;
bool recurrent_to_input_is_diag = false;
bool recurrent_to_forget_is_diag = false;
bool recurrent_to_cell_is_diag = false;
bool recurrent_to_output_is_diag = false;
lstm_eval::IntegerLstmParameter integer_lstm_param;
};
TfLiteStatus PopulateQuantizedLstmParams8x8_16(
TfLiteContext* context, TfLiteNode* node,
lstm_eval::IntegerLstmParameter* integer_lstm_param) {
const auto* params =
static_cast<TfLiteUnidirectionalSequenceLSTMParams*>(node->builtin_data);
const float cell_clip = params->cell_clip;
const float proj_clip = params->proj_clip;
const TfLiteTensor* cell_state =
GetVariableInput(context, node, lstm::full::kCellStateTensor);
TF_LITE_ENSURE(context, cell_state != nullptr);
TfLiteTensor* output_tensor;
TF_LITE_ENSURE_OK(
context,
GetOutputSafe(context, node, lstm::full::kOutputTensor, &output_tensor));
TF_LITE_ENSURE(context,
cell_state->quantization.type != kTfLiteNoQuantization);
auto* cell_state_params =
static_cast<TfLiteAffineQuantization*>(cell_state->quantization.params);
TF_LITE_ENSURE(context,
output_tensor->quantization.type != kTfLiteNoQuantization);
auto* proj_params = static_cast<TfLiteAffineQuantization*>(
output_tensor->quantization.params);
if (cell_clip > 0.0) {
integer_lstm_param->quantized_cell_clip = static_cast<int16_t>(std::min(
std::max(cell_clip / cell_state_params->scale->data[0], -32768.0f),
32767.0f));
} else {
integer_lstm_param->quantized_cell_clip = 0;
}
if (proj_clip > 0.0) {
integer_lstm_param->quantized_proj_clip = static_cast<int8_t>(std::min(
std::max(proj_clip / proj_params->scale->data[0], -128.0f), 127.0f));
} else {
integer_lstm_param->quantized_proj_clip = 0;
}
OpData* op_data = static_cast<OpData*>(node->user_data);
const bool use_layer_norm = op_data->use_layer_norm;
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, lstm::full::kInputTensor, &input));
const TfLiteTensor* input_to_input_weights = GetOptionalInputTensor(
context, node, lstm::full::kInputToInputWeightsTensor);
const TfLiteTensor* input_to_forget_weights;
TF_LITE_ENSURE_OK(
context,
GetInputSafe(context, node, lstm::full::kInputToForgetWeightsTensor,
&input_to_forget_weights));
const TfLiteTensor* input_to_cell_weights;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node,
lstm::full::kInputToCellWeightsTensor,
&input_to_cell_weights));
const TfLiteTensor* input_to_output_weights;
TF_LITE_ENSURE_OK(
context,
GetInputSafe(context, node, lstm::full::kInputToOutputWeightsTensor,
&input_to_output_weights));
const TfLiteTensor* recurrent_to_input_weights = GetOptionalInputTensor(
context, node, lstm::full::kRecurrentToInputWeightsTensor);
const TfLiteTensor* recurrent_to_forget_weights;
TF_LITE_ENSURE_OK(
context,
GetInputSafe(context, node, lstm::full::kRecurrentToForgetWeightsTensor,
&recurrent_to_forget_weights));
const TfLiteTensor* recurrent_to_cell_weights;
TF_LITE_ENSURE_OK(
context,
GetInputSafe(context, node, lstm::full::kRecurrentToCellWeightsTensor,
&recurrent_to_cell_weights));
const TfLiteTensor* recurrent_to_output_weights;
TF_LITE_ENSURE_OK(
context,
GetInputSafe(context, node, lstm::full::kRecurrentToOutputWeightsTensor,
&recurrent_to_output_weights));
const TfLiteTensor* cell_to_input_weights = GetOptionalInputTensor(
context, node, lstm::full::kCellToInputWeightsTensor);
const TfLiteTensor* cell_to_forget_weights = GetOptionalInputTensor(
context, node, lstm::full::kCellToForgetWeightsTensor);
const TfLiteTensor* cell_to_output_weights = GetOptionalInputTensor(
context, node, lstm::full::kCellToOutputWeightsTensor);
const TfLiteTensor* input_layer_norm_coefficients = GetOptionalInputTensor(
context, node, lstm::full::kInputLayerNormCoefficientsTensor);
const TfLiteTensor* forget_layer_norm_coefficients = GetOptionalInputTensor(
context, node, lstm::full::kForgetLayerNormCoefficientsTensor);
const TfLiteTensor* cell_layer_norm_coefficients = GetOptionalInputTensor(
context, node, lstm::full::kCellLayerNormCoefficientsTensor);
const TfLiteTensor* output_layer_norm_coefficients = GetOptionalInputTensor(
context, node, lstm::full::kOutputLayerNormCoefficientsTensor);
const TfLiteTensor* projection_weights = GetOptionalInputTensor(
context, node, lstm::full::kProjectionWeightsTensor);
TfLiteTensor* output_state =
GetVariableInput(context, node, lstm::full::kOutputStateTensor);
TF_LITE_ENSURE(context, output_state != nullptr);
const bool use_cifg = (input_to_input_weights == nullptr);
const bool use_peephole = (cell_to_output_weights != nullptr);
const bool use_projection = (projection_weights != nullptr);
std::vector<float> intermediate_scale;
std::vector<int32> intermediate_zp;
for (int i = 0; i < 4; ++i) {
if (use_layer_norm) {
TfLiteTensor* intermediate;
TF_LITE_ENSURE_OK(context,
GetIntermediatesSafe(context, node, i, &intermediate));
TF_LITE_ENSURE(context,
intermediate->quantization.type != kTfLiteNoQuantization);
auto* params = static_cast<TfLiteAffineQuantization*>(
intermediate->quantization.params);
intermediate_scale.push_back(params->scale->data[0]);
intermediate_zp.push_back(params->zero_point->data[0]);
} else {
intermediate_scale.push_back(std::pow(2, -12));
intermediate_zp.push_back(0);
}
}
TfLiteTensor* hidden;
TF_LITE_ENSURE_OK(context, GetIntermediatesSafe(context, node, 4, &hidden));
TF_LITE_ENSURE(context, hidden->quantization.type != kTfLiteNoQuantization);
auto* hidden_params =
static_cast<TfLiteAffineQuantization*>(hidden->quantization.params);
intermediate_scale.push_back(hidden_params->scale->data[0]);
intermediate_zp.push_back(hidden_params->zero_point->data[0]);
const float default_scale = 1.0;
float input_scale = default_scale;
float input_to_input_weight_scale = default_scale;
float recurrent_to_input_weight_scale = default_scale;
float cell_to_input_weight_scale = default_scale;
float input_to_forget_weight_scale = default_scale;
float recurrent_to_forget_weight_scale = default_scale;
float cell_to_forget_weight_scale = default_scale;
float input_to_cell_weight_scale = default_scale;
float recurrent_to_cell_weight_scale = default_scale;
float input_to_output_weight_scale = default_scale;
float recurrent_to_output_weight_scale = default_scale;
float cell_to_output_weight_scale = default_scale;
float projection_weight_scale = default_scale;
float layer_norm_input_scale = default_scale;
float layer_norm_forget_scale = default_scale;
float layer_norm_cell_scale = default_scale;
float layer_norm_output_scale = default_scale;
float output_state_scale = default_scale;
int cell_scale = 1;
float effective_input_to_input_scale = default_scale;
float effective_recurrent_to_input_scale = default_scale;
float effective_cell_to_input_scale = default_scale;
float effective_input_to_forget_scale = default_scale;
float effective_recurrent_to_forget_scale = default_scale;
float effective_cell_to_forget_scale = default_scale;
float effective_input_to_cell_scale = default_scale;
float effective_recurrent_to_cell_scale = default_scale;
float effective_input_to_output_scale = default_scale;
float effective_recurrent_to_output_scale = default_scale;
float effective_cell_to_output_scale = default_scale;
float effective_proj_scale = default_scale;
float effective_hidden_scale = default_scale;
if (!use_cifg) {
input_to_input_weight_scale = input_to_input_weights->params.scale;
recurrent_to_input_weight_scale = recurrent_to_input_weights->params.scale;
}
if (use_peephole) {
if (!use_cifg) {
cell_to_input_weight_scale = cell_to_input_weights->params.scale;
}
cell_to_forget_weight_scale = cell_to_forget_weights->params.scale;
cell_to_output_weight_scale = cell_to_output_weights->params.scale;
}
if (use_layer_norm) {
if (!use_cifg) {
layer_norm_input_scale = input_layer_norm_coefficients->params.scale;
}
layer_norm_forget_scale = forget_layer_norm_coefficients->params.scale;
layer_norm_cell_scale = cell_layer_norm_coefficients->params.scale;
layer_norm_output_scale = output_layer_norm_coefficients->params.scale;
}
if (use_projection) {
projection_weight_scale = projection_weights->params.scale;
}
output_state_scale = output_state->params.scale;
input_to_forget_weight_scale = input_to_forget_weights->params.scale;
input_to_cell_weight_scale = input_to_cell_weights->params.scale;
input_to_output_weight_scale = input_to_output_weights->params.scale;
recurrent_to_forget_weight_scale = recurrent_to_forget_weights->params.scale;
recurrent_to_cell_weight_scale = recurrent_to_cell_weights->params.scale;
recurrent_to_output_weight_scale = recurrent_to_output_weights->params.scale;
TF_LITE_ENSURE(context, CheckedLog2(cell_state->params.scale, &cell_scale));
integer_lstm_param->cell_scale = cell_scale;
input_scale = input->params.scale;
if (!use_cifg) {
effective_input_to_input_scale =
input_to_input_weight_scale * input_scale / intermediate_scale[0];
effective_recurrent_to_input_scale = recurrent_to_input_weight_scale *
output_state_scale /
intermediate_scale[0];
}
effective_input_to_forget_scale =
input_to_forget_weight_scale * input_scale / intermediate_scale[1];
effective_recurrent_to_forget_scale = recurrent_to_forget_weight_scale *
output_state_scale /
intermediate_scale[1];
effective_input_to_cell_scale =
input_to_cell_weight_scale * input_scale / intermediate_scale[2];
effective_recurrent_to_cell_scale = recurrent_to_cell_weight_scale *
output_state_scale /
intermediate_scale[2];
effective_input_to_output_scale =
input_to_output_weight_scale * input_scale / intermediate_scale[3];
effective_recurrent_to_output_scale = recurrent_to_output_weight_scale *
output_state_scale /
intermediate_scale[3];
effective_hidden_scale =
std::pow(2, -15) / intermediate_scale[4] * std::pow(2, -15);
effective_proj_scale =
projection_weight_scale * intermediate_scale[4] / output_state_scale;
if (use_peephole) {
if (!use_cifg) {
effective_cell_to_input_scale = std::pow(2, cell_scale) *
cell_to_input_weight_scale /
intermediate_scale[0];
}
effective_cell_to_forget_scale = std::pow(2, cell_scale) *
cell_to_forget_weight_scale /
intermediate_scale[1];
effective_cell_to_output_scale = std::pow(2, cell_scale) *
cell_to_output_weight_scale /
intermediate_scale[3];
}
QuantizeMultiplier(effective_input_to_input_scale,
&integer_lstm_param->effective_input_to_input_scale_a,
&integer_lstm_param->effective_input_to_input_scale_b);
QuantizeMultiplier(effective_recurrent_to_input_scale,
&integer_lstm_param->effective_recurrent_to_input_scale_a,
&integer_lstm_param->effective_recurrent_to_input_scale_b);
QuantizeMultiplier(effective_cell_to_input_scale,
&integer_lstm_param->effective_cell_to_input_scale_a,
&integer_lstm_param->effective_cell_to_input_scale_b);
QuantizeMultiplier(effective_input_to_forget_scale,
&integer_lstm_param->effective_input_to_forget_scale_a,
&integer_lstm_param->effective_input_to_forget_scale_b);
QuantizeMultiplier(
effective_recurrent_to_forget_scale,
&integer_lstm_param->effective_recurrent_to_forget_scale_a,
&integer_lstm_param->effective_recurrent_to_forget_scale_b);
QuantizeMultiplier(effective_cell_to_forget_scale,
&integer_lstm_param->effective_cell_to_forget_scale_a,
&integer_lstm_param->effective_cell_to_forget_scale_b);
QuantizeMultiplier(effective_input_to_cell_scale,
&integer_lstm_param->effective_input_to_cell_scale_a,
&integer_lstm_param->effective_input_to_cell_scale_b);
QuantizeMultiplier(effective_recurrent_to_cell_scale,
&integer_lstm_param->effective_recurrent_to_cell_scale_a,
&integer_lstm_param->effective_recurrent_to_cell_scale_b);
QuantizeMultiplier(effective_input_to_output_scale,
&integer_lstm_param->effective_input_to_output_scale_a,
&integer_lstm_param->effective_input_to_output_scale_b);
QuantizeMultiplier(
effective_recurrent_to_output_scale,
&integer_lstm_param->effective_recurrent_to_output_scale_a,
&integer_lstm_param->effective_recurrent_to_output_scale_b);
QuantizeMultiplier(effective_cell_to_output_scale,
&integer_lstm_param->effective_cell_to_output_scale_a,
&integer_lstm_param->effective_cell_to_output_scale_b);
QuantizeMultiplier(effective_proj_scale,
&integer_lstm_param->effective_proj_scale_a,
&integer_lstm_param->effective_proj_scale_b);
QuantizeMultiplier(effective_hidden_scale,
&integer_lstm_param->effective_hidden_scale_a,
&integer_lstm_param->effective_hidden_scale_b);
QuantizeMultiplier(layer_norm_input_scale,
&integer_lstm_param->layer_norm_input_scale_a,
&integer_lstm_param->layer_norm_input_scale_b);
QuantizeMultiplier(layer_norm_forget_scale,
&integer_lstm_param->layer_norm_forget_scale_a,
&integer_lstm_param->layer_norm_forget_scale_b);
QuantizeMultiplier(layer_norm_cell_scale,
&integer_lstm_param->layer_norm_cell_scale_a,
&integer_lstm_param->layer_norm_cell_scale_b);
QuantizeMultiplier(layer_norm_output_scale,
&integer_lstm_param->layer_norm_output_scale_a,
&integer_lstm_param->layer_norm_output_scale_b);
integer_lstm_param->hidden_zp = intermediate_zp[4];
if (!use_cifg) {
integer_lstm_param->input_variance_guard =
std::max(1, static_cast<int32_t>(10000 * layer_norm_input_scale));
}
integer_lstm_param->forget_variance_guard =
std::max(1, static_cast<int32_t>(10000 * layer_norm_forget_scale));
integer_lstm_param->cell_variance_guard =
std::max(1, static_cast<int32_t>(10000 * layer_norm_cell_scale));
integer_lstm_param->output_variance_guard =
std::max(1, static_cast<int32_t>(10000 * layer_norm_output_scale));
return kTfLiteOk;
}
}
enum TemporaryTensor {
kScratchBuffer = 0,
kInputQuantized = 1,
kOutputStateQuantized = 2,
kCellStateQuantized = 3,
kInputScalingFactors = 4,
kOutputStateScalingFactors = 5,
kProductScalingFactors = 6,
kRecoveredCellWeights = 7,
kAccumScratch = 8,
kInputZeroPoints = 9,
kOutputStateZeroPoints = 10,
kRowSums = 11,
kNumTemporaryTensors = 12,
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* op_data = new OpData();
context->AddTensors(context, kNumTemporaryTensors,
&op_data->scratch_tensor_index);
return op_data;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus CheckInputTensorDimensions(TfLiteContext* context,
TfLiteNode* node, int n_input,
int n_output, int n_cell,
bool use_layer_norm, bool is_integer) {
const auto* params = reinterpret_cast<TfLiteLSTMParams*>(node->builtin_data);
TF_LITE_ENSURE(context, params->cell_clip >= 0);
TF_LITE_ENSURE(context, params->proj_clip >= 0);
const TfLiteTensor* input_to_input_weights = GetOptionalInputTensor(
context, node, lstm::full::kInputToInputWeightsTensor);
if (input_to_input_weights != nullptr) {
TF_LITE_ENSURE_EQ(context, input_to_input_weights->dims->size, 2);
TF_LITE_ENSURE_EQ(context, input_to_input_weights->dims->data[0], n_cell);
TF_LITE_ENSURE_EQ(context, input_to_input_weights->dims->data[1], n_input);
}
const TfLiteTensor* input_to_forget_weights;
TF_LITE_ENSURE_OK(
context,
GetInputSafe(context, node, lstm::full::kInputToForgetWeightsTensor,
&input_to_forget_weights));
TF_LITE_ENSURE_EQ(context, input_to_forget_weights->dims->size, 2);
TF_LITE_ENSURE_EQ(context, input_to_forget_weights->dims->data[0], n_cell);
TF_LITE_ENSURE_EQ(context, input_to_forget_weights->dims->data[1], n_input);
const TfLiteTensor* input_to_cell_weights;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node,
lstm::full::kInputToCellWeightsTensor,
&input_to_cell_weights));
TF_LITE_ENSURE_EQ(context, input_to_cell_weights->dims->size, 2);
TF_LITE_ENSURE_EQ(context, input_to_cell_weights->dims->data[0], n_cell);
TF_LITE_ENSURE_EQ(context, input_to_cell_weights->dims->data[1], n_input);
const TfLiteTensor* recurrent_to_input_weights = GetOptionalInputTensor(
context, node, lstm::full::kRecurrentToInputWeightsTensor);
if (recurrent_to_input_weights != nullptr) {
bool recurrent_to_input_is_diag =
recurrent_to_input_weights->dims->size == 1;
if (recurrent_to_input_is_diag) {
TF_LITE_ENSURE_EQ(context, recurrent_to_input_weights->dims->size, 1);
} else {
TF_LITE_ENSURE_EQ(context, recurrent_to_input_weights->dims->size, 2);
TF_LITE_ENSURE_EQ(context, recurrent_to_input_weights->dims->data[1],
n_output);
TF_LITE_ENSURE_TYPES_EQ(context, recurrent_to_input_weights->type,
input_to_forget_weights->type);
}
TF_LITE_ENSURE_EQ(context, recurrent_to_input_weights->dims->data[0],
n_cell);
}
const TfLiteTensor* recurrent_to_forget_weights;
TF_LITE_ENSURE_OK(
context,
GetInputSafe(context, node, lstm::full::kRecurrentToForgetWeightsTensor,
&recurrent_to_forget_weights));
bool recurrent_to_forget_is_diag =
recurrent_to_forget_weights->dims->size == 1;
if (recurrent_to_forget_is_diag) {
TF_LITE_ENSURE_EQ(context, recurrent_to_forget_weights->dims->size, 1);
} else {
TF_LITE_ENSURE_EQ(context, recurrent_to_forget_weights->dims->size, 2);
TF_LITE_ENSURE_EQ(context, recurrent_to_forget_weights->dims->data[1],
n_output);
TF_LITE_ENSURE_TYPES_EQ(context, recurrent_to_forget_weights->type,
input_to_forget_weights->type);
}
const TfLiteTensor* recurrent_to_cell_weights;
TF_LITE_ENSURE_OK(
context,
GetInputSafe(context, node, lstm::full::kRecurrentToCellWeightsTensor,
&recurrent_to_cell_weights));
bool recurrent_to_cell_is_diag = recurrent_to_cell_weights->dims->size == 1;
if (recurrent_to_cell_is_diag) {
TF_LITE_ENSURE_EQ(context, recurrent_to_cell_weights->dims->size, 1);
} else {
TF_LITE_ENSURE_EQ(context, recurrent_to_cell_weights->dims->size, 2);
TF_LITE_ENSURE_EQ(context, recurrent_to_cell_weights->dims->data[1],
n_output);
TF_LITE_ENSURE_TYPES_EQ(context, recurrent_to_cell_weights->type,
input_to_forget_weights->type);
}
const bool cifg_weights_all_or_none =
((input_to_input_weights != nullptr) &&
(recurrent_to_input_weights != nullptr)) ||
((input_to_input_weights == nullptr) &&
(recurrent_to_input_weights == nullptr));
TF_LITE_ENSURE(context, cifg_weights_all_or_none == true);
const TfLiteTensor* cell_to_input_weights = GetOptionalInputTensor(
context, node, lstm::full::kCellToInputWeightsTensor);
if (cell_to_input_weights != nullptr) {
TF_LITE_ENSURE_EQ(context, cell_to_input_weights->dims->size, 1);
TF_LITE_ENSURE_EQ(context, cell_to_input_weights->dims->data[0], n_cell);
TF_LITE_ENSURE_TYPES_EQ(
context, cell_to_input_weights->type,
is_integer ? kTfLiteInt16 : input_to_forget_weights->type);
}
const TfLiteTensor* cell_to_forget_weights = GetOptionalInputTensor(
context, node, lstm::full::kCellToForgetWeightsTensor);
if (cell_to_forget_weights != nullptr) {
TF_LITE_ENSURE_EQ(context, cell_to_forget_weights->dims->size, 1);
TF_LITE_ENSURE_EQ(context, cell_to_forget_weights->dims->data[0], n_cell);
TF_LITE_ENSURE_TYPES_EQ(
context, cell_to_forget_weights->type,
is_integer ? kTfLiteInt16 : input_to_forget_weights->type);
}
const TfLiteTensor* cell_to_output_weights = GetOptionalInputTensor(
context, node, lstm::full::kCellToOutputWeightsTensor);
if (cell_to_output_weights != nullptr) {
TF_LITE_ENSURE_EQ(context, cell_to_output_weights->dims->size, 1);
TF_LITE_ENSURE_EQ(context, cell_to_output_weights->dims->data[0], n_cell);
TF_LITE_ENSURE_TYPES_EQ(
context, cell_to_output_weights->type,
is_integer ? kTfLiteInt16 : input_to_forget_weights->type);
}
const bool use_cifg = (input_to_input_weights == nullptr);
const bool peephole_weights_all_or_none =
((cell_to_input_weights != nullptr || use_cifg) &&
(cell_to_forget_weights != nullptr) &&
(cell_to_output_weights != nullptr)) ||
((cell_to_input_weights == nullptr) &&
(cell_to_forget_weights == nullptr) &&
(cell_to_output_weights == nullptr));
TF_LITE_ENSURE(context, peephole_weights_all_or_none == true);
const TfLiteTensor* input_gate_bias =
GetOptionalInputTensor(context, node, lstm::full::kInputGateBiasTensor);
if (use_cifg) {
TF_LITE_ENSURE_EQ(context, input_gate_bias, nullptr);
} else {
TF_LITE_ENSURE_EQ(context, input_gate_bias->dims->size, 1);
TF_LITE_ENSURE_EQ(context, input_gate_bias->dims->data[0], n_cell);
if (is_integer) {
TF_LITE_ENSURE_TYPES_EQ(context, input_gate_bias->type, kTfLiteInt32);
} else {
TF_LITE_ENSURE_TYPES_EQ(context, input_gate_bias->type, kTfLiteFloat32);
}
}
const TfLiteTensor* forget_gate_bias;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, lstm::full::kForgetGateBiasTensor,
&forget_gate_bias));
TF_LITE_ENSURE_EQ(context, forget_gate_bias->dims->size, 1);
TF_LITE_ENSURE_EQ(context, forget_gate_bias->dims->data[0], n_cell);
if (is_integer) {
TF_LITE_ENSURE_TYPES_EQ(context, forget_gate_bias->type, kTfLiteInt32);
} else {
TF_LITE_ENSURE_TYPES_EQ(context, forget_gate_bias->type, kTfLiteFloat32);
}
const TfLiteTensor* cell_gate_bias;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, lstm::full::kCellGateBiasTensor,
&cell_gate_bias));
TF_LITE_ENSURE_EQ(context, cell_gate_bias->dims->size, 1);
TF_LITE_ENSURE_EQ(context, cell_gate_bias->dims->data[0], n_cell);
if (is_integer) {
TF_LITE_ENSURE_TYPES_EQ(context, cell_gate_bias->type, kTfLiteInt32);
} else {
TF_LITE_ENSURE_TYPES_EQ(context, cell_gate_bias->type, kTfLiteFloat32);
}
const TfLiteTensor* output_gate_bias;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, lstm::full::kOutputGateBiasTensor,
&output_gate_bias));
TF_LITE_ENSURE_EQ(context, output_gate_bias->dims->size, 1);
TF_LITE_ENSURE_EQ(context, output_gate_bias->dims->data[0], n_cell);
if (is_integer) {
TF_LITE_ENSURE_TYPES_EQ(context, output_gate_bias->type, kTfLiteInt32);
} else {
TF_LITE_ENSURE_TYPES_EQ(context, output_gate_bias->type, kTfLiteFloat32);
}
const TfLiteTensor* projection_weights = GetOptionalInputTensor(
context, node, lstm::full::kProjectionWeightsTensor);
if (projection_weights != nullptr) {
TF_LITE_ENSURE_EQ(context, projection_weights->dims->size, 2);
TF_LITE_ENSURE_EQ(context, projection_weights->dims->data[0], n_output);
TF_LITE_ENSURE_EQ(context, projection_weights->dims->data[1], n_cell);
}
const TfLiteTensor* projection_bias =
GetOptionalInputTensor(context, node, lstm::full::kProjectionBiasTensor);
if (projection_bias != nullptr) {
TF_LITE_ENSURE_EQ(context, projection_bias->dims->size, 1);
TF_LITE_ENSURE_EQ(context, projection_bias->dims->data[0], n_output);
if (is_integer) {
TF_LITE_ENSURE_TYPES_EQ(context, projection_bias->type, kTfLiteInt32);
} else {
TF_LITE_ENSURE_TYPES_EQ(context, projection_bias->type, kTfLiteFloat32);
}
}
const bool projecton_tensors_consistent =
((projection_weights != nullptr) || (projection_bias == nullptr));
TF_LITE_ENSURE(context, projecton_tensors_consistent == true);
if (use_layer_norm) {
const TfLiteTensor* input_layer_norm_coefficients = GetOptionalInputTensor(
context, node, lstm::full::kInputLayerNormCoefficientsTensor);
if (use_cifg) {
TF_LITE_ENSURE_EQ(context, input_layer_norm_coefficients, nullptr);
} else {
TF_LITE_ENSURE(context, input_layer_norm_coefficients != nullptr);
TF_LITE_ENSURE_EQ(context, input_layer_norm_coefficients->dims->size, 1);
TF_LITE_ENSURE_EQ(context, input_layer_norm_coefficients->dims->data[0],
n_cell);
if (is_integer) {
TF_LITE_ENSURE_TYPES_EQ(context, input_layer_norm_coefficients->type,
kTfLiteInt16);
} else {
TF_LITE_ENSURE_TYPES_EQ(context, input_layer_norm_coefficients->type,
kTfLiteFloat32);
}
}
const TfLiteTensor* forget_layer_norm_coefficients;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node,
lstm::full::kForgetLayerNormCoefficientsTensor,
&forget_layer_norm_coefficients));
TF_LITE_ENSURE_EQ(context, forget_layer_norm_coefficients->dims->size, 1);
TF_LITE_ENSURE_EQ(context, forget_layer_norm_coefficients->dims->data[0],
n_cell);
if (is_integer) {
TF_LITE_ENSURE_TYPES_EQ(context, forget_layer_norm_coefficients->type,
kTfLiteInt16);
} else {
TF_LITE_ENSURE_TYPES_EQ(context, forget_layer_norm_coefficients->type,
kTfLiteFloat32);
}
const TfLiteTensor* cell_layer_norm_coefficients;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node,
lstm::full::kCellLayerNormCoefficientsTensor,
&cell_layer_norm_coefficients));
TF_LITE_ENSURE_EQ(context, cell_layer_norm_coefficients->dims->size, 1);
TF_LITE_ENSURE_EQ(context, cell_layer_norm_coefficients->dims->data[0],
n_cell);
if (is_integer) {
TF_LITE_ENSURE_TYPES_EQ(context, cell_layer_norm_coefficients->type,
kTfLiteInt16);
} else {
TF_LITE_ENSURE_TYPES_EQ(context, cell_layer_norm_coefficients->type,
kTfLiteFloat32);
}
const TfLiteTensor* output_layer_norm_coefficients;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node,
lstm::full::kOutputLayerNormCoefficientsTensor,
&output_layer_norm_coefficients));
TF_LITE_ENSURE_EQ(context, output_layer_norm_coefficients->dims->size, 1);
TF_LITE_ENSURE_EQ(context, output_layer_norm_coefficients->dims->data[0],
n_cell);
if (is_integer) {
TF_LITE_ENSURE_TYPES_EQ(context, output_layer_norm_coefficients->type,
kTfLiteInt16);
} else {
TF_LITE_ENSURE_TYPES_EQ(context, output_layer_norm_coefficients->type,
kTfLiteFloat32);
}
}
return kTfLiteOk;
}
TfLiteStatus PrecomputeZeroPointTimesWeightWithBias(
TfLiteContext* context, int32_t zero_point,
const TfLiteTensor* weight_tensor, const TfLiteTensor* bias_tensor,
std::unique_ptr<int32_t[]>* output) {
if (weight_tensor == nullptr) {
return kTfLiteOk;
}
const RuntimeShape& weight_shape = GetTensorShape(weight_tensor);
TF_LITE_ENSURE_EQ(context, weight_shape.DimensionsCount(), 2);
const int row = weight_shape.Dims(0);
const int col = weight_shape.Dims(1);
output->reset(new int32_t[row]);
if (bias_tensor == nullptr) {
memset(output->get(), 0, row * sizeof(int32_t));
} else {
const int32_t* bias = GetTensorData<int32_t>(bias_tensor);
memcpy(output->get(), bias, row * sizeof(int32_t));
}
if (zero_point != 0) {
const int8_t* weight = GetTensorData<int8_t>(weight_tensor);
tensor_utils::MatrixScalarMultiplyAccumulate(weight, zero_point, row, col,
output->get());
}
return kTfLiteOk;
}
TfLiteStatus PopulatePrecomputedZPTimesWeightsWithBias(TfLiteContext* context,
OpData* op_data,
TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, lstm::full::kInputTensor, &input));
const TfLiteTensor* output_state =
GetVariableInput(context, node, lstm::full::kOutputStateTensor);
TF_LITE_ENSURE(context, output_state != nullptr);
const int32_t input_zero_point = -input->params.zero_point;
const int32_t output_state_zero_point = -output_state->params.zero_point;
const TfLiteTensor* input_to_input_weights = GetOptionalInputTensor(
context, node, lstm::full::kInputToInputWeightsTensor);
const TfLiteTensor* input_to_forget_weights;
TF_LITE_ENSURE_OK(
context,
GetInputSafe(context, node, lstm::full::kInputToForgetWeightsTensor,
&input_to_forget_weights));
const TfLiteTensor* input_to_cell_weights;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node,
lstm::full::kInputToCellWeightsTensor,
&input_to_cell_weights));
const TfLiteTensor* input_to_output_weights;
TF_LITE_ENSURE_OK(
context,
GetInputSafe(context, node, lstm::full::kInputToOutputWeightsTensor,
&input_to_output_weights));
const TfLiteTensor* recurrent_to_input_weights = GetOptionalInputTensor(
context, node, lstm::full::kRecurrentToInputWeightsTensor);
const TfLiteTensor* recurrent_to_forget_weights;
TF_LITE_ENSURE_OK(
context,
GetInputSafe(context, node, lstm::full::kRecurrentToForgetWeightsTensor,
&recurrent_to_forget_weights));
const TfLiteTensor* recurrent_to_cell_weights;
TF_LITE_ENSURE_OK(
context,
GetInputSafe(context, node, lstm::full::kRecurrentToCellWeightsTensor,
&recurrent_to_cell_weights));
const TfLiteTensor* recurrent_to_output_weights;
TF_LITE_ENSURE_OK(
context,
GetInputSafe(context, node, lstm::full::kRecurrentToOutputWeightsTensor,
&recurrent_to_output_weights));
const TfLiteTensor* projection_weights = GetOptionalInputTensor(
context, node, lstm::full::kProjectionWeightsTensor);
const TfLiteTensor* projection_bias =
GetOptionalInputTensor(context, node, lstm::full::kProjectionBiasTensor);
lstm_eval::IntegerLstmParameter* integer_lstm_params =
&op_data->integer_lstm_param;
const TfLiteTensor* intermediate =
&context->tensors[node->intermediates->data[4]];
TF_LITE_ENSURE(context,
intermediate->quantization.type != kTfLiteNoQuantization);
const auto* params =
static_cast<TfLiteAffineQuantization*>(intermediate->quantization.params);
const int32_t hidden_zp = params->zero_point->data[0];
const bool is_layer_norm = op_data->use_layer_norm;
const TfLiteTensor* forget_gate_bias =
is_layer_norm
? nullptr
: GetInput(context, node, lstm::full::kForgetGateBiasTensor);
TF_LITE_ENSURE_OK(
context,
PrecomputeZeroPointTimesWeightWithBias(
context, input_zero_point, input_to_forget_weights, forget_gate_bias,
&(integer_lstm_params->input_to_forget_effective_bias)));
TF_LITE_ENSURE_OK(
context,
PrecomputeZeroPointTimesWeightWithBias(
context, output_state_zero_point, recurrent_to_forget_weights,
nullptr, &(integer_lstm_params->recurrent_to_forget_effective_bias)));
const TfLiteTensor* cell_gate_bias =
is_layer_norm ? nullptr
: GetInput(context, node, lstm::full::kCellGateBiasTensor);
TF_LITE_ENSURE_OK(
context,
PrecomputeZeroPointTimesWeightWithBias(
context, input_zero_point, input_to_cell_weights, cell_gate_bias,
&(integer_lstm_params->input_to_cell_effective_bias)));
TF_LITE_ENSURE_OK(
context,
PrecomputeZeroPointTimesWeightWithBias(
context, output_state_zero_point, recurrent_to_cell_weights, nullptr,
&(integer_lstm_params->recurrent_to_cell_effective_bias)));
const TfLiteTensor* output_gate_bias =
is_layer_norm
? nullptr
: GetInput(context, node, lstm::full::kOutputGateBiasTensor);
TF_LITE_ENSURE_OK(
context,
PrecomputeZeroPointTimesWeightWithBias(
context, input_zero_point, input_to_output_weights, output_gate_bias,
&(integer_lstm_params->input_to_output_effective_bias)));
TF_LITE_ENSURE_OK(
context,
PrecomputeZeroPointTimesWeightWithBias(
context, output_state_zero_point, recurrent_to_output_weights,
nullptr, &(integer_lstm_params->recurrent_to_output_effective_bias)));
const TfLiteTensor* input_gate_bias =
is_layer_norm ? nullptr
: GetInput(context, node, lstm::full::kInputGateBiasTensor);
TF_LITE_ENSURE_OK(
context,
PrecomputeZeroPointTimesWeightWithBias(
context, input_zero_point, input_to_input_weights, input_gate_bias,
&(integer_lstm_params->input_to_input_effective_bias)));
TF_LITE_ENSURE_OK(
context,
PrecomputeZeroPointTimesWeightWithBias(
context, output_state_zero_point, recurrent_to_input_weights, nullptr,
&(integer_lstm_params->recurrent_to_input_effective_bias)));
TF_LITE_ENSURE_OK(context,
PrecomputeZeroPointTimesWeightWithBias(
context, hidden_zp, projection_weights, projection_bias,
&(integer_lstm_params->projection_effective_bias)));
return kTfLiteOk;
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
const int scratch_tensor_index = op_data->scratch_tensor_index;
bool use_layer_norm = false;
if (node->inputs->size == 24) {
const TfLiteTensor* forget_layer_norm_coefficients = GetOptionalInputTensor(
context, node, lstm::full::kForgetLayerNormCoefficientsTensor);
if (forget_layer_norm_coefficients == nullptr) {
use_layer_norm = false;
} else {
use_layer_norm = true;
}
} else if (node->inputs->size == 20) {
use_layer_norm = false;
} else {
TF_LITE_KERNEL_LOG(
context, "The LSTM Full kernel expects 20 or 24 inputs. Got %d inputs",
node->inputs->size);
return kTfLiteError;
}
TF_LITE_ENSURE_EQ(context, node->outputs->size, 1);
op_data->use_layer_norm = use_layer_norm;
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, lstm::full::kInputTensor, &input));
const bool is_integer = input->type == kTfLiteInt8;
TF_LITE_ENSURE(context, input->dims->size > 1);
const auto* params =
reinterpret_cast<TfLiteUnidirectionalSequenceLSTMParams*>(
node->builtin_data);
const bool time_major = params->time_major;
const int n_batch = time_major ? input->dims->data[1] : input->dims->data[0];
const int n_input = input->dims->data[2];
const TfLiteTensor* input_to_output_weights;
TF_LITE_ENSURE_OK(
context,
GetInputSafe(context, node, lstm::full::kInputToOutputWeightsTensor,
&input_to_output_weights));
const int n_cell = input_to_output_weights->dims->data[0];
TF_LITE_ENSURE_EQ(context, input_to_output_weights->dims->size, 2);
TF_LITE_ENSURE_EQ(context, input_to_output_weights->dims->data[1], n_input);
const TfLiteTensor* recurrent_to_output_weights;
TF_LITE_ENSURE_OK(
context,
GetInputSafe(context, node, lstm::full::kRecurrentToOutputWeightsTensor,
&recurrent_to_output_weights));
bool recurrent_to_output_is_diag =
recurrent_to_output_weights->dims->size == 1 ? true : false;
if (recurrent_to_output_is_diag) {
TF_LITE_ENSURE_EQ(context, recurrent_to_output_weights->dims->size, 1);
} else {
TF_LITE_ENSURE_EQ(context, recurrent_to_output_weights->dims->size, 2);
TF_LITE_ENSURE_EQ(context, recurrent_to_output_weights->type,
input_to_output_weights->type);
}
TF_LITE_ENSURE_EQ(context, recurrent_to_output_weights->dims->data[0],
n_cell);
const int n_output = recurrent_to_output_is_diag
? recurrent_to_output_weights->dims->data[0]
: recurrent_to_output_weights->dims->data[1];
TF_LITE_ENSURE_OK(
context, CheckInputTensorDimensions(context, node, n_input, n_output,
n_cell, use_layer_norm, is_integer));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node,
lstm::full::kOutputTensor, &output));
TfLiteTensor* output_state =
GetVariableInput(context, node, lstm::full::kOutputStateTensor);
TF_LITE_ENSURE(context, output_state != nullptr);
TfLiteTensor* cell_state =
GetVariableInput(context, node, lstm::full::kCellStateTensor);
TF_LITE_ENSURE(context, cell_state != nullptr);
TF_LITE_ENSURE_EQ(context, NumElements(output_state), n_batch * n_output);
TF_LITE_ENSURE_EQ(context, NumElements(cell_state), n_batch * n_cell);
TfLiteIntArray* output_size = TfLiteIntArrayCopy(input->dims);
output_size->data[input->dims->size - 1] = n_output;
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, output, output_size));
if (is_integer) {
const int num_intermediate_tensors = node->intermediates->size;
TF_LITE_ENSURE(context, num_intermediate_tensors == 5);
}
TfLiteIntArrayFree(node->temporaries);
if (IsHybridOp(input, input_to_output_weights)) {
node->temporaries = TfLiteIntArrayCreate(kNumTemporaryTensors);
} else if (is_integer) {
node->temporaries = TfLiteIntArrayCreate(6);
} else {
node->temporaries = TfLiteIntArrayCreate(1);
}
node->temporaries->data[kScratchBuffer] =
scratch_tensor_index + kScratchBuffer;
TfLiteTensor* scratch_buffer;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, kScratchBuffer,
&scratch_buffer));
scratch_buffer->type = input->type;
scratch_buffer->allocation_type = kTfLiteArenaRw;
const TfLiteTensor* input_to_input_weights = GetOptionalInputTensor(
context, node, lstm::full::kInputToInputWeightsTensor);
const bool use_cifg = (input_to_input_weights == nullptr);
TfLiteIntArray* scratch_buffer_size = TfLiteIntArrayCreate(2);
scratch_buffer_size->data[0] = n_batch;
if (use_cifg) {
scratch_buffer_size->data[1] = n_cell * 4 + 16;
} else {
scratch_buffer_size->data[1] = n_cell * 5 + 16;
}
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scratch_buffer,
scratch_buffer_size));
if (IsHybridOp(input, input_to_output_weights)) {
op_data->compute_row_sums = true;
node->temporaries->data[kInputQuantized] =
scratch_tensor_index + kInputQuantized;
TfLiteTensor* input_quantized;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, kInputQuantized,
&input_quantized));
input_quantized->type = input_to_output_weights->type;
input_quantized->allocation_type = kTfLiteArenaRw;
if (!TfLiteIntArrayEqual(input_quantized->dims, input->dims)) {
TfLiteIntArray* input_quantized_size = TfLiteIntArrayCopy(input->dims);
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, input_quantized,
input_quantized_size));
}
node->temporaries->data[kOutputStateQuantized] =
scratch_tensor_index + kOutputStateQuantized;
TfLiteTensor* output_state_quantized;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, kOutputStateQuantized,
&output_state_quantized));
output_state_quantized->type = input_to_output_weights->type;
output_state_quantized->allocation_type = kTfLiteArenaRw;
if (!TfLiteIntArrayEqual(output_state_quantized->dims,
output_state->dims)) {
TfLiteIntArray* output_state_quantized_size =
TfLiteIntArrayCopy(output_state->dims);
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, output_state_quantized,
output_state_quantized_size));
}
node->temporaries->data[kCellStateQuantized] =
scratch_tensor_index + kCellStateQuantized;
TfLiteTensor* cell_state_quantized;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, kCellStateQuantized,
&cell_state_quantized));
cell_state_quantized->type = input_to_output_weights->type;
cell_state_quantized->allocation_type = kTfLiteArenaRw;
if (!TfLiteIntArrayEqual(cell_state_quantized->dims, cell_state->dims)) {
TfLiteIntArray* cell_state_quantized_size =
TfLiteIntArrayCopy(cell_state->dims);
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, cell_state_quantized,
cell_state_quantized_size));
}
node->temporaries->data[kInputScalingFactors] =
op_data->scratch_tensor_index + kInputScalingFactors;
TfLiteTensor* input_sf;
TF_LITE_ENSURE_OK(
context,
GetTemporarySafe(context, node, kInputScalingFactors, &input_sf));
input_sf->type = kTfLiteFloat32;
input_sf->allocation_type = kTfLiteArenaRw;
int scaling_dims[1] = {n_batch};
if (!TfLiteIntArrayEqualsArray(input_sf->dims, 1, scaling_dims)) {
TfLiteIntArray* input_sf_size = TfLiteIntArrayCreate(1);
input_sf_size->data[0] = n_batch;
TF_LITE_ENSURE_OK(
context, context->ResizeTensor(context, input_sf, input_sf_size));
}
node->temporaries->data[kOutputStateScalingFactors] =
op_data->scratch_tensor_index + kOutputStateScalingFactors;
TfLiteTensor* output_state_sf;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, kOutputStateScalingFactors,
&output_state_sf));
output_state_sf->type = kTfLiteFloat32;
output_state_sf->allocation_type = kTfLiteArenaRw;
if (!TfLiteIntArrayEqualsArray(output_state_sf->dims, 1, scaling_dims)) {
TfLiteIntArray* output_state_sf_size = TfLiteIntArrayCreate(1);
output_state_sf_size->data[0] = n_batch;
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, output_state_sf,
output_state_sf_size));
}
node->temporaries->data[kProductScalingFactors] =
scratch_tensor_index + kProductScalingFactors;
TfLiteTensor* prod_scaling_factors;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, kProductScalingFactors,
&prod_scaling_factors));
prod_scaling_factors->type = kTfLiteFloat32;
prod_scaling_factors->allocation_type = kTfLiteArenaRw;
if (!TfLiteIntArrayEqualsArray(prod_scaling_factors->dims, 1,
scaling_dims)) {
TfLiteIntArray* prod_scaling_factors_size = TfLiteIntArrayCreate(1);
prod_scaling_factors_size->data[0] = n_batch;
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, prod_scaling_factors,
prod_scaling_factors_size));
}
node->temporaries->data[kRecoveredCellWeights] =
scratch_tensor_index + kRecoveredCellWeights;
TfLiteTensor* recovered_cell_weights;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, kRecoveredCellWeights,
&recovered_cell_weights));
recovered_cell_weights->type = kTfLiteFloat32;
recovered_cell_weights->allocation_type = kTfLiteArenaRw;
int recovered_cell_dims[1] = {n_cell};
if (!TfLiteIntArrayEqualsArray(recovered_cell_weights->dims, 1,
recovered_cell_dims)) {
TfLiteIntArray* recovered_cell_weights_size = TfLiteIntArrayCreate(1);
recovered_cell_weights_size->data[0] = n_cell;
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, recovered_cell_weights,
recovered_cell_weights_size));
}
node->temporaries->data[kAccumScratch] =
scratch_tensor_index + kAccumScratch;
TfLiteTensor* accum_scratch;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, kAccumScratch,
&accum_scratch));
accum_scratch->type = kTfLiteInt32;
accum_scratch->allocation_type = kTfLiteArenaRw;
int accum_scratch_dims[2] = {n_cell, n_batch};
if (!TfLiteIntArrayEqualsArray(accum_scratch->dims, 2,
accum_scratch_dims)) {
TfLiteIntArray* accum_size = TfLiteIntArrayCreate(2);
accum_size->data[0] = n_cell;
accum_size->data[1] = n_batch;
TF_LITE_ENSURE_OK(
context, context->ResizeTensor(context, accum_scratch, accum_size));
}
node->temporaries->data[kInputZeroPoints] =
op_data->scratch_tensor_index + kInputZeroPoints;
TfLiteTensor* input_zp;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, kInputZeroPoints, &input_zp));
input_zp->type = kTfLiteFloat32;
input_zp->allocation_type = kTfLiteArenaRw;
if (!TfLiteIntArrayEqualsArray(input_zp->dims, 1, scaling_dims)) {
TfLiteIntArray* input_zp_size = TfLiteIntArrayCreate(1);
input_zp_size->data[0] = n_batch;
TF_LITE_ENSURE_OK(
context, context->ResizeTensor(context, input_zp, input_zp_size));
}
node->temporaries->data[kOutputStateZeroPoints] =
op_data->scratch_tensor_index + kOutputStateZeroPoints;
TfLiteTensor* output_state_zp;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, kOutputStateZeroPoints,
&output_state_zp));
output_state_zp->type = kTfLiteFloat32;
output_state_zp->allocation_type = kTfLiteArenaRw;
if (!TfLiteIntArrayEqualsArray(output_state_zp->dims, 1, scaling_dims)) {
TfLiteIntArray* output_state_zp_size = TfLiteIntArrayCreate(1);
output_state_zp_size->data[0] = n_batch;
TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, output_state_zp,
output_state_zp_size));
}
node->temporaries->data[kRowSums] = scratch_tensor_index + kRowSums;
TfLiteTensor* row_sums;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, kRowSums, &row_sums));
row_sums->type = kTfLiteInt32;
row_sums->name = "Lstm_row_sums";
row_sums->allocation_type = kTfLiteArenaRwPersistent;
int row_sums_rows = use_cifg ? 6 : 8;
const TfLiteTensor* projection_weights = GetOptionalInputTensor(
context, node, lstm::full::kProjectionWeightsTensor);
if (projection_weights != nullptr) {
row_sums_rows += ceil(static_cast<float>(n_output) / n_cell);
}
int row_sums_dims[2] = {row_sums_rows, n_cell};
if (!TfLiteIntArrayEqualsArray(row_sums->dims, 2, row_sums_dims)) {
TfLiteIntArray* row_sums_size = TfLiteIntArrayCreate(2);
row_sums_size->data[0] = row_sums_dims[0];
row_sums_size->data[1] = row_sums_dims[1];
TF_LITE_ENSURE_OK(
context, context->ResizeTensor(context, row_sums, row_sums_size));
}
}
if (is_integer) {
PopulateQuantizedLstmParams8x8_16(context, node,
&op_data->integer_lstm_param);
for (int scratch_index = 0; scratch_index < 6; ++scratch_index) {
node->temporaries->data[scratch_index] =
op_data->scratch_tensor_index + scratch_index;
TfLiteTensor* scratch_tensor;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, scratch_index,
&scratch_tensor));
scratch_tensor->type = kTfLiteInt16;
if (scratch_index == 4) {
scratch_tensor->type = kTfLiteInt8;
} else if (scratch_index == 5) {
scratch_tensor->type = kTfLiteInt32;
}
scratch_tensor->allocation_type = kTfLiteArenaRw;
const int scratch_dimension[2] = {n_batch, n_cell};
if (!TfLiteIntArrayEqualsArray(scratch_tensor->dims, 2,
scratch_dimension)) {
TfLiteIntArray* scratch_buffer_size = TfLiteIntArrayCreate(2);
scratch_buffer_size->data[0] = n_batch;
scratch_buffer_size->data[1] = n_cell;
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, scratch_tensor,
scratch_buffer_size));
}
}
TF_LITE_ENSURE_OK(context, PopulatePrecomputedZPTimesWeightsWithBias(
context, op_data, node));
}
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const auto* params =
reinterpret_cast<TfLiteUnidirectionalSequenceLSTMParams*>(
node->builtin_data);
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
const bool use_layer_norm = op_data->use_layer_norm;
const bool time_major = params->time_major;
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, lstm::full::kInputTensor, &input));
const TfLiteTensor* input_to_input_weights = GetOptionalInputTensor(
context, node, lstm::full::kInputToInputWeightsTensor);
const TfLiteTensor* input_to_forget_weights;
TF_LITE_ENSURE_OK(
context,
GetInputSafe(context, node, lstm::full::kInputToForgetWeightsTensor,
&input_to_forget_weights));
const TfLiteTensor* input_to_cell_weights;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node,
lstm::full::kInputToCellWeightsTensor,
&input_to_cell_weights));
const TfLiteTensor* input_to_output_weights;
TF_LITE_ENSURE_OK(
context,
GetInputSafe(context, node, lstm::full::kInputToOutputWeightsTensor,
&input_to_output_weights));
const TfLiteTensor* recurrent_to_input_weights = GetOptionalInputTensor(
context, node, lstm::full::kRecurrentToInputWeightsTensor);
const TfLiteTensor* recurrent_to_forget_weights;
TF_LITE_ENSURE_OK(
context,
GetInputSafe(context, node, lstm::full::kRecurrentToForgetWeightsTensor,
&recurrent_to_forget_weights));
const TfLiteTensor* recurrent_to_cell_weights;
TF_LITE_ENSURE_OK(
context,
GetInputSafe(context, node, lstm::full::kRecurrentToCellWeightsTensor,
&recurrent_to_cell_weights));
const TfLiteTensor* recurrent_to_output_weights;
TF_LITE_ENSURE_OK(
context,
GetInputSafe(context, node, lstm::full::kRecurrentToOutputWeightsTensor,
&recurrent_to_output_weights));
const TfLiteTensor* cell_to_input_weights = GetOptionalInputTensor(
context, node, lstm::full::kCellToInputWeightsTensor);
const TfLiteTensor* cell_to_forget_weights = GetOptionalInputTensor(
context, node, lstm::full::kCellToForgetWeightsTensor);
const TfLiteTensor* cell_to_output_weights = GetOptionalInputTensor(
context, node, lstm::full::kCellToOutputWeightsTensor);
const TfLiteTensor* input_gate_bias =
GetOptionalInputTensor(context, node, lstm::full::kInputGateBiasTensor);
const TfLiteTensor* forget_gate_bias;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, lstm::full::kForgetGateBiasTensor,
&forget_gate_bias));
const TfLiteTensor* cell_gate_bias;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, lstm::full::kCellGateBiasTensor,
&cell_gate_bias));
const TfLiteTensor* output_gate_bias;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, lstm::full::kOutputGateBiasTensor,
&output_gate_bias));
const TfLiteTensor* projection_weights = GetOptionalInputTensor(
context, node, lstm::full::kProjectionWeightsTensor);
const TfLiteTensor* projection_bias =
GetOptionalInputTensor(context, node, lstm::full::kProjectionBiasTensor);
TfLiteTensor* output_state =
GetVariableInput(context, node, lstm::full::kOutputStateTensor);
TFLITE_DCHECK(output_state != nullptr);
TfLiteTensor* cell_state =
GetVariableInput(context, node, lstm::full::kCellStateTensor);
TFLITE_DCHECK(cell_state != nullptr);
const TfLiteTensor* input_layer_norm_coefficients =
use_layer_norm
? GetOptionalInputTensor(
context, node, lstm::full::kInputLayerNormCoefficientsTensor)
: nullptr;
const TfLiteTensor* forget_layer_norm_coefficients =
use_layer_norm ? GetInput(context, node,
lstm::full::kForgetLayerNormCoefficientsTensor)
: nullptr;
const TfLiteTensor* cell_layer_norm_coefficients =
use_layer_norm ? GetInput(context, node,
lstm::full::kCellLayerNormCoefficientsTensor)
: nullptr;
const TfLiteTensor* output_layer_norm_coefficients =
use_layer_norm ? GetInput(context, node,
lstm::full::kOutputLayerNormCoefficientsTensor)
: nullptr;
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node,
lstm::full::kOutputTensor, &output));
TfLiteLSTMParams lstm_params;
lstm_params.activation = params->activation;
lstm_params.cell_clip = params->cell_clip;
lstm_params.proj_clip = params->proj_clip;
lstm_params.asymmetric_quantize_inputs = params->asymmetric_quantize_inputs;
switch (input_to_output_weights->type) {
case kTfLiteFloat32: {
TfLiteTensor* scratch_buffer;
TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, kScratchBuffer,
&scratch_buffer));
return lstm_eval::EvalFloat(
input, input_to_input_weights, input_to_forget_weights,
input_to_cell_weights, input_to_output_weights,
recurrent_to_input_weights, recurrent_to_forget_weights,
recurrent_to_cell_weights, recurrent_to_output_weights,
cell_to_input_weights, cell_to_forget_weights, cell_to_output_weights,
input_layer_norm_coefficients, forget_layer_norm_coefficients,
cell_layer_norm_coefficients, output_layer_norm_coefficients,
nullptr,
nullptr,
nullptr,
nullptr,
nullptr, input_gate_bias,
forget_gate_bias, cell_gate_bias, output_gate_bias,
projection_weights, projection_bias, &lstm_params,
true, time_major,
0, scratch_buffer, output_state, cell_state, output,
(recurrent_to_input_weights == nullptr ||
recurrent_to_input_weights->dims->size == 1),
(recurrent_to_forget_weights->dims->size == 1),
(recurrent_to_cell_weights->dims->size == 1),
(recurrent_to_output_weights->dims->size == 1),
CpuBackendContext::GetFromContext(context));
}
case kTfLiteUInt8:
case kTfLiteInt8: {
const bool is_hybrid = input->type == kTfLiteFloat32;
if (is_hybrid) {
TfLiteTensor* scratch_buffer;
TF_LITE_ENSURE_OK(
context,
GetTemporarySafe(context, node, kScratchBuffer, &scratch_buffer));
TfLiteTensor* row_sums;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, kRowSums, &row_sums));
const int row_sums_size = row_sums->dims->data[0];
return lstm_eval::EvalHybrid(
input, input_to_input_weights,
nullptr, input_to_forget_weights,
nullptr, input_to_cell_weights,
nullptr, input_to_output_weights,
nullptr,
recurrent_to_input_weights,
nullptr,
recurrent_to_forget_weights,
nullptr,
recurrent_to_cell_weights,
nullptr,
recurrent_to_output_weights,
nullptr,
cell_to_input_weights, cell_to_forget_weights,
cell_to_output_weights, input_layer_norm_coefficients,
forget_layer_norm_coefficients, cell_layer_norm_coefficients,
output_layer_norm_coefficients,
nullptr,
nullptr,
nullptr,
nullptr,
nullptr, input_gate_bias,
forget_gate_bias, cell_gate_bias, output_gate_bias,
projection_weights, nullptr,
projection_bias, &lstm_params,
true, time_major,
0, scratch_buffer,
GetTemporary(context, node, kInputScalingFactors),
nullptr,
GetTemporary(context, node, kOutputStateScalingFactors),
GetTemporary(context, node, kProductScalingFactors),
GetTemporary(context, node, kRecoveredCellWeights),
GetTemporary(context, node, kInputQuantized),
nullptr,
GetTemporary(context, node, kOutputStateQuantized),
GetTemporary(context, node, kCellStateQuantized), output_state,
cell_state, GetTemporary(context, node, kAccumScratch), output,
GetTemporary(context, node, kInputZeroPoints),
nullptr,
GetTemporary(context, node, kOutputStateZeroPoints), row_sums,
row_sums_size, &op_data->compute_row_sums,
(recurrent_to_input_weights == nullptr ||
recurrent_to_input_weights->dims->size == 1),
(recurrent_to_forget_weights->dims->size == 1),
(recurrent_to_cell_weights->dims->size == 1),
(recurrent_to_output_weights->dims->size == 1),
CpuBackendContext::GetFromContext(context));
} else {
TfLiteTensor* scratch0;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, 0, &scratch0));
TfLiteTensor* scratch1;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, 1, &scratch1));
TfLiteTensor* scratch2;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, 2, &scratch2));
TfLiteTensor* scratch3;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, 3, &scratch3));
TfLiteTensor* scratch4;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, 4, &scratch4));
TfLiteTensor* scratch5;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, 5, &scratch5));
return lstm_eval::EvalInteger8x8_16(
input, input_to_input_weights, input_to_forget_weights,
input_to_cell_weights, input_to_output_weights,
recurrent_to_input_weights, recurrent_to_forget_weights,
recurrent_to_cell_weights, recurrent_to_output_weights,
cell_to_input_weights, cell_to_forget_weights,
cell_to_output_weights, input_layer_norm_coefficients,
forget_layer_norm_coefficients, cell_layer_norm_coefficients,
output_layer_norm_coefficients, input_gate_bias, forget_gate_bias,
cell_gate_bias, output_gate_bias, projection_weights,
projection_bias, &lstm_params, true,
time_major, &op_data->integer_lstm_param, output_state, cell_state,
output, scratch0, scratch1, scratch2, scratch3, scratch4, scratch5,
CpuBackendContext::GetFromContext(context));
}
}
default:
TF_LITE_KERNEL_LOG(context, "Type %s is not currently supported.",
TfLiteTypeGetName(input_to_output_weights->type));
return kTfLiteError;
}
}
}
TfLiteRegistration* Register_UNIDIRECTIONAL_SEQUENCE_LSTM() {
static TfLiteRegistration r = {unidirectional_sequence_lstm::Init,
unidirectional_sequence_lstm::Free,
unidirectional_sequence_lstm::Prepare,
unidirectional_sequence_lstm::Eval};
return &r;
}
}
}
} | #include <tuple>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "benchmark/benchmark.h"
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/kernels/unidirectional_sequence_lstm_test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
class BaseUnidirectionalLstmTest : public ::testing::TestWithParam<bool> {
protected:
std::vector<float> input_to_input_weights_;
std::vector<float> input_to_cell_weights_;
std::vector<float> input_to_forget_weights_;
std::vector<float> input_to_output_weights_;
std::vector<float> input_gate_bias_;
std::vector<float> cell_gate_bias_;
std::vector<float> forget_gate_bias_;
std::vector<float> output_gate_bias_;
std::vector<float> recurrent_to_input_weights_;
std::vector<float> recurrent_to_cell_weights_;
std::vector<float> recurrent_to_forget_weights_;
std::vector<float> recurrent_to_output_weights_;
std::vector<float> cell_to_input_weights_;
std::vector<float> cell_to_forget_weights_;
std::vector<float> cell_to_output_weights_;
std::vector<float> projection_weights_;
std::vector<float> projection_bias_;
std::vector<std::vector<float>> lstm_input_;
std::vector<std::vector<float>> lstm_golden_output_;
void VerifyGoldens(const std::vector<std::vector<float>>& input,
const std::vector<std::vector<float>>& output,
UnidirectionalLSTMOpModel* lstm, float tolerance = 1e-5,
bool time_major = true) {
const int num_batches = input.size();
EXPECT_GT(num_batches, 0);
const int num_inputs = lstm->num_inputs();
EXPECT_GT(num_inputs, 0);
const int input_sequence_size = input[0].size() / num_inputs;
EXPECT_GT(input_sequence_size, 0);
if (time_major) {
for (int i = 0; i < input_sequence_size; ++i) {
for (int b = 0; b < num_batches; ++b) {
const float* batch_start = input[b].data() + i * num_inputs;
const float* batch_end = batch_start + num_inputs;
lstm->SetInput(((i * num_batches) + b) * num_inputs, batch_start,
batch_end);
}
}
} else {
for (int b = 0; b < num_batches; ++b) {
const float* batch_start = input[b].data();
const float* batch_end = batch_start + input_sequence_size * num_inputs;
lstm->SetInput(b * input_sequence_size * num_inputs, batch_start,
batch_end);
}
}
ASSERT_EQ(lstm->Invoke(), kTfLiteOk);
const int num_outputs = lstm->num_outputs();
EXPECT_GT(num_outputs, 0);
std::vector<float> expected;
if (time_major) {
for (int i = 0; i < input_sequence_size; ++i) {
for (int b = 0; b < num_batches; ++b) {
const float* golden_start_batch = output[b].data() + i * num_outputs;
const float* golden_end_batch = golden_start_batch + num_outputs;
expected.insert(expected.end(), golden_start_batch, golden_end_batch);
}
}
} else {
for (int b = 0; b < num_batches; ++b) {
const float* golden_batch_start = output[b].data();
const float* golden_batch_end =
golden_batch_start + input_sequence_size * num_outputs;
expected.insert(expected.end(), golden_batch_start, golden_batch_end);
}
}
EXPECT_THAT(lstm->GetOutput(),
ElementsAreArray(ArrayFloatNear(expected, tolerance)));
}
};
class NoCifgNoPeepholeNoProjectionNoClippingUnidirectionalLstmTest
: public BaseUnidirectionalLstmTest {
void SetUp() override {
input_to_input_weights_ = {-0.45018822, -0.02338299, -0.0870589,
-0.34550029, 0.04266912, -0.15680569,
-0.34856534, 0.43890524};
input_to_cell_weights_ = {-0.50013041, 0.1370284, 0.11810488, 0.2013163,
-0.20583314, 0.44344562, 0.22077113, -0.29909778};
input_to_forget_weights_ = {0.09701663, 0.20334584, -0.50592935,
-0.31343272, -0.40032279, 0.44781327,
0.01387155, -0.35593212};
input_to_output_weights_ = {-0.25065863, -0.28290087, 0.04613829,
0.40525138, 0.44272184, 0.03897077,
-0.1556896, 0.19487578};
input_gate_bias_ = {0., 0., 0., 0.};
cell_gate_bias_ = {0., 0., 0., 0.};
forget_gate_bias_ = {1., 1., 1., 1.};
output_gate_bias_ = {0., 0., 0., 0.};
recurrent_to_input_weights_ = {
-0.0063535, -0.2042388, 0.31454784, -0.35746509,
0.28902304, 0.08183324, -0.16555229, 0.02286911,
-0.13566875, 0.03034258, 0.48091322, -0.12528998,
0.24077177, -0.51332325, -0.33502164, 0.10629296};
recurrent_to_cell_weights_ = {
-0.3407414, 0.24443203, -0.2078532, 0.26320225,
0.05695659, -0.00123841, -0.4744786, -0.35869038,
-0.06418842, -0.13502428, -0.501764, 0.22830659,
-0.46367589, 0.26016325, -0.03894562, -0.16368064};
recurrent_to_forget_weights_ = {
-0.48684245, -0.06655136, 0.42224967, 0.2112639,
0.27654213, 0.20864892, -0.07646349, 0.45877004,
0.00141793, -0.14609534, 0.36447752, 0.09196436,
0.28053468, 0.01560611, -0.20127171, -0.01140004};
recurrent_to_output_weights_ = {
0.43385774, -0.17194885, 0.2718237, 0.09215671,
0.24107647, -0.39835793, 0.18212086, 0.01301402,
0.48572797, -0.50656658, 0.20047462, -0.20607421,
-0.51818722, -0.15390486, 0.0468148, 0.39922136};
lstm_input_ = {{2., 3., 3., 4., 1., 1.}};
lstm_golden_output_ = {{-0.02973187, 0.1229473, 0.20885126, -0.15358765,
-0.03716109, 0.12507336, 0.41193449, -0.20860538,
-0.15053082, 0.09120187, 0.24278517, -0.12222792}};
}
};
TEST_F(NoCifgNoPeepholeNoProjectionNoClippingUnidirectionalLstmTest,
LstmBlackBoxTest) {
const int n_batch = 1;
const int n_input = 2;
const int n_cell = 4;
const int n_output = 4;
const int sequence_length = 3;
UnidirectionalLSTMOpModel lstm(
n_batch, n_input, n_cell, n_output, sequence_length,
true, false, false,
false,
false,
0.0, 0.0,
{
{sequence_length, n_batch, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{0},
{0},
{0},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{0, 0},
{0},
{n_batch, n_output},
{n_batch, n_cell},
});
lstm.SetInputToInputWeights(input_to_input_weights_);
lstm.SetInputToCellWeights(input_to_cell_weights_);
lstm.SetInputToForgetWeights(input_to_forget_weights_);
lstm.SetInputToOutputWeights(input_to_output_weights_);
lstm.SetInputGateBias(input_gate_bias_);
lstm.SetCellBias(cell_gate_bias_);
lstm.SetForgetGateBias(forget_gate_bias_);
lstm.SetOutputGateBias(output_gate_bias_);
lstm.SetRecurrentToInputWeights(recurrent_to_input_weights_);
lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights_);
lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights_);
lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights_);
VerifyGoldens(lstm_input_, lstm_golden_output_, &lstm);
}
TEST_F(NoCifgNoPeepholeNoProjectionNoClippingUnidirectionalLstmTest,
LstmBlackBoxTestBatchMajor) {
const int n_batch = 1;
const int n_input = 2;
const int n_cell = 4;
const int n_output = 4;
const int sequence_length = 3;
UnidirectionalLSTMOpModel lstm(
n_batch, n_input, n_cell, n_output, sequence_length,
true, false, false,
false,
false,
0.0, 0.0,
{
{sequence_length, n_batch, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{0},
{0},
{0},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{0, 0},
{0},
{n_batch, n_output},
{n_batch, n_cell},
});
lstm.SetInputToInputWeights(input_to_input_weights_);
lstm.SetInputToCellWeights(input_to_cell_weights_);
lstm.SetInputToForgetWeights(input_to_forget_weights_);
lstm.SetInputToOutputWeights(input_to_output_weights_);
lstm.SetInputGateBias(input_gate_bias_);
lstm.SetCellBias(cell_gate_bias_);
lstm.SetForgetGateBias(forget_gate_bias_);
lstm.SetOutputGateBias(output_gate_bias_);
lstm.SetRecurrentToInputWeights(recurrent_to_input_weights_);
lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights_);
lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights_);
lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights_);
std::vector<std::vector<float>> input;
std::vector<std::vector<float>> output;
VerifyGoldens(lstm_input_, lstm_golden_output_, &lstm, 1e-5,
false);
}
TEST_P(NoCifgNoPeepholeNoProjectionNoClippingUnidirectionalLstmTest,
HybridLstmBlackBoxTestUint8) {
const int n_batch = 1;
const int n_input = 2;
const int n_cell = 4;
const int n_output = 4;
const int sequence_length = 3;
HybridUnidirectionalLSTMOpModel lstm(
n_batch, n_input, n_cell, n_output, sequence_length,
true, false, false,
false,
false, 0.0, 0.0,
{
{sequence_length, n_batch, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{0},
{0},
{0},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{0, 0},
{0},
{n_batch, n_output},
{n_batch, n_cell},
},
TensorType_UINT8, GetParam());
lstm.SetInputToInputWeights(input_to_input_weights_);
lstm.SetInputToCellWeights(input_to_cell_weights_);
lstm.SetInputToForgetWeights(input_to_forget_weights_);
lstm.SetInputToOutputWeights(input_to_output_weights_);
lstm.SetInputGateBias(input_gate_bias_);
lstm.SetCellBias(cell_gate_bias_);
lstm.SetForgetGateBias(forget_gate_bias_);
lstm.SetOutputGateBias(output_gate_bias_);
lstm.SetRecurrentToInputWeights(recurrent_to_input_weights_);
lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights_);
lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights_);
lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights_);
VerifyGoldens(lstm_input_, lstm_golden_output_, &lstm,
0.0157651);
}
TEST_P(NoCifgNoPeepholeNoProjectionNoClippingUnidirectionalLstmTest,
HybridLstmBlackBoxTestInt8) {
const int n_batch = 1;
const int n_input = 2;
const int n_cell = 4;
const int n_output = 4;
const int sequence_length = 3;
HybridUnidirectionalLSTMOpModel lstm(
n_batch, n_input, n_cell, n_output, sequence_length,
true, false, false,
false,
false, 0.0, 0.0,
{
{sequence_length, n_batch, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{0},
{0},
{0},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{0, 0},
{0},
{n_batch, n_output},
{n_batch, n_cell},
},
TensorType_INT8, GetParam());
lstm.SetInputToInputWeights(input_to_input_weights_);
lstm.SetInputToCellWeights(input_to_cell_weights_);
lstm.SetInputToForgetWeights(input_to_forget_weights_);
lstm.SetInputToOutputWeights(input_to_output_weights_);
lstm.SetInputGateBias(input_gate_bias_);
lstm.SetCellBias(cell_gate_bias_);
lstm.SetForgetGateBias(forget_gate_bias_);
lstm.SetOutputGateBias(output_gate_bias_);
lstm.SetRecurrentToInputWeights(recurrent_to_input_weights_);
lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights_);
lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights_);
lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights_);
VerifyGoldens(lstm_input_, lstm_golden_output_, &lstm,
0.0157651);
}
class CifgPeepholeNoProjectionNoClippingUnidirectionalLstmTest
: public BaseUnidirectionalLstmTest {
void SetUp() override {
input_to_cell_weights_ = {-0.49770179, -0.27711356, -0.09624726,
0.05100781, 0.04717243, 0.48944736,
-0.38535351, -0.17212132};
input_to_forget_weights_ = {-0.55291498, -0.42866567, 0.13056988,
-0.3633365, -0.22755712, 0.28253698,
0.24407166, 0.33826375};
input_to_output_weights_ = {0.10725588, -0.02335852, -0.55932593,
-0.09426838, -0.44257352, 0.54939759,
0.01533556, 0.42751634};
cell_gate_bias_ = {0., 0., 0., 0.};
forget_gate_bias_ = {1., 1., 1., 1.};
output_gate_bias_ = {0., 0., 0., 0.};
recurrent_to_cell_weights_ = {
0.54066205, -0.32668582, -0.43562764, -0.56094903,
0.42957711, 0.01841056, -0.32764608, -0.33027974,
-0.10826075, 0.20675004, 0.19069612, -0.03026325,
-0.54532051, 0.33003211, 0.44901288, 0.21193194};
recurrent_to_forget_weights_ = {
-0.13832897, -0.0515101, -0.2359007, -0.16661474,
-0.14340827, 0.36986142, 0.23414481, 0.55899,
0.10798943, -0.41174671, 0.17751795, -0.34484994,
-0.35874045, -0.11352962, 0.27268326, 0.54058349};
recurrent_to_output_weights_ = {
0.41613156, 0.42610586, -0.16495961, -0.5663873,
0.30579174, -0.05115908, -0.33941799, 0.23364776,
0.11178309, 0.09481031, -0.26424935, 0.46261835,
0.50248802, 0.26114327, -0.43736315, 0.33149987};
cell_to_forget_weights_ = {0.47485286, -0.51955009, -0.24458408,
0.31544167};
cell_to_output_weights_ = {-0.17135078, 0.82760304, 0.85573703,
-0.77109635};
lstm_input_ = {{2., 3., 3., 4., 1., 1.}};
lstm_golden_output_ = {{-0.36444446, -0.00352185, 0.12886585, -0.05163646,
-0.42312205, -0.01218222, 0.24201041, -0.08124574,
-0.358325, -0.04621704, 0.21641694, -0.06471302}};
}
};
TEST_F(CifgPeepholeNoProjectionNoClippingUnidirectionalLstmTest,
LstmBlackBoxTest) {
const int n_batch = 1;
const int n_input = 2;
const int n_cell = 4;
const int n_output = 4;
const int sequence_length = 3;
UnidirectionalLSTMOpModel lstm(
n_batch, n_input, n_cell, n_output, sequence_length,
true, true, true,
false,
false,
0.0, 0.0,
{
{sequence_length, n_batch, n_input},
{0, 0},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{0, 0},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{0},
{n_cell},
{n_cell},
{0},
{n_cell},
{n_cell},
{n_cell},
{0, 0},
{0},
{n_batch, n_output},
{n_batch, n_cell},
});
lstm.SetInputToCellWeights(input_to_cell_weights_);
lstm.SetInputToForgetWeights(input_to_forget_weights_);
lstm.SetInputToOutputWeights(input_to_output_weights_);
lstm.SetCellBias(cell_gate_bias_);
lstm.SetForgetGateBias(forget_gate_bias_);
lstm.SetOutputGateBias(output_gate_bias_);
lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights_);
lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights_);
lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights_);
lstm.SetCellToForgetWeights(cell_to_forget_weights_);
lstm.SetCellToOutputWeights(cell_to_output_weights_);
VerifyGoldens(lstm_input_, lstm_golden_output_, &lstm);
}
TEST_P(CifgPeepholeNoProjectionNoClippingUnidirectionalLstmTest,
HybridLstmBlackBoxTestUint8) {
const int n_batch = 1;
const int n_input = 2;
const int n_cell = 4;
const int n_output = 4;
const int sequence_length = 3;
HybridUnidirectionalLSTMOpModel lstm(
n_batch, n_input, n_cell, n_output, sequence_length,
true, true, true,
false,
false,
0.0, 0.0,
{
{sequence_length, n_batch, n_input},
{0, 0},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{0, 0},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{0},
{n_cell},
{n_cell},
{0},
{n_cell},
{n_cell},
{n_cell},
{0, 0},
{0},
{n_batch, n_output},
{n_batch, n_cell},
},
TensorType_UINT8, GetParam());
lstm.SetInputToCellWeights(input_to_cell_weights_);
lstm.SetInputToForgetWeights(input_to_forget_weights_);
lstm.SetInputToOutputWeights(input_to_output_weights_);
lstm.SetCellBias(cell_gate_bias_);
lstm.SetForgetGateBias(forget_gate_bias_);
lstm.SetOutputGateBias(output_gate_bias_);
lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights_);
lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights_);
lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights_);
lstm.SetCellToForgetWeights(cell_to_forget_weights_);
lstm.SetCellToOutputWeights(cell_to_output_weights_);
VerifyGoldens(lstm_input_, lstm_golden_output_, &lstm, 0.03573);
}
TEST_P(CifgPeepholeNoProjectionNoClippingUnidirectionalLstmTest,
HybridLstmBlackBoxTestInt8) {
const int n_batch = 1;
const int n_input = 2;
const int n_cell = 4;
const int n_output = 4;
const int sequence_length = 3;
HybridUnidirectionalLSTMOpModel lstm(
n_batch, n_input, n_cell, n_output, sequence_length,
true, true, true,
false,
false,
0.0, 0.0,
{
{sequence_length, n_batch, n_input},
{0, 0},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{0, 0},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{0},
{n_cell},
{n_cell},
{0},
{n_cell},
{n_cell},
{n_cell},
{0, 0},
{0},
{n_batch, n_output},
{n_batch, n_cell},
},
TensorType_INT8, GetParam());
lstm.SetInputToCellWeights(input_to_cell_weights_);
lstm.SetInputToForgetWeights(input_to_forget_weights_);
lstm.SetInputToOutputWeights(input_to_output_weights_);
lstm.SetCellBias(cell_gate_bias_);
lstm.SetForgetGateBias(forget_gate_bias_);
lstm.SetOutputGateBias(output_gate_bias_);
lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights_);
lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights_);
lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights_);
lstm.SetCellToForgetWeights(cell_to_forget_weights_);
lstm.SetCellToOutputWeights(cell_to_output_weights_);
VerifyGoldens(lstm_input_, lstm_golden_output_, &lstm, 0.03573);
}
class NoCifgPeepholeProjectionClippingUnidirectionalLstmTest
: public BaseUnidirectionalLstmTest {
void SetUp() override {
input_to_input_weights_ = {
0.021393683, 0.06124551, 0.046905167, -0.014657677, -0.03149463,
0.09171803, 0.14647801, 0.10797193, -0.0057968358, 0.0019193048,
-0.2726754, 0.10154029, -0.018539885, 0.080349885, -0.10262385,
-0.022599787, -0.09121155, -0.008675967, -0.045206103, -0.0821282,
-0.008045952, 0.015478081, 0.055217247, 0.038719587, 0.044153627,
-0.06453243, 0.05031825, -0.046935108, -0.008164439, 0.014574226,
-0.1671009, -0.15519552, -0.16819797, -0.13971269, -0.11953059,
0.25005487, -0.22790983, 0.009855087, -0.028140958, -0.11200698,
0.11295408, -0.0035217577, 0.054485075, 0.05184695, 0.064711206,
0.10989193, 0.11674786, 0.03490607, 0.07727357, 0.11390585,
-0.1863375, -0.1034451, -0.13945189, -0.049401227, -0.18767063,
0.042483903, 0.14233552, 0.13832581, 0.18350165, 0.14545603,
-0.028545704, 0.024939531, 0.050929718, 0.0076203286, -0.0029723682,
-0.042484224, -0.11827596, -0.09171104, -0.10808628, -0.16327988,
-0.2273378, -0.0993647, -0.017155107, 0.0023917493, 0.049272764,
0.0038534778, 0.054764505, 0.089753784, 0.06947234, 0.08014476,
-0.04544234, -0.0497073, -0.07135631, -0.048929106, -0.004042012,
-0.009284026, 0.018042054, 0.0036860977, -0.07427302, -0.11434604,
-0.018995456, 0.031487543, 0.012834908, 0.019977754, 0.044256654,
-0.39292613, -0.18519334, -0.11651281, -0.06809892, 0.011373677};
input_to_forget_weights_ = {
-0.0018401089, -0.004852237, 0.03698424, 0.014181704,
0.028273236, -0.016726194, -0.05249759, -0.10204261,
0.00861066, -0.040979505, -0.009899187, 0.01923892,
-0.028177269, -0.08535103, -0.14585495, 0.10662567,
-0.01909731, -0.017883534, -0.0047269356, -0.045103323,
0.0030784295, 0.076784775, 0.07463696, 0.094531395,
0.0814421, -0.12257899, -0.033945758, -0.031303465,
0.045630626, 0.06843887, -0.13492945, -0.012480007,
-0.0811829, -0.07224499, -0.09628791, 0.045100946,
0.0012300825, 0.013964662, 0.099372394, 0.02543059,
0.06958324, 0.034257296, 0.0482646, 0.06267997,
0.052625068, 0.12784666, 0.07077897, 0.025725935,
0.04165009, 0.07241905, 0.018668644, -0.037377294,
-0.06277783, -0.08833636, -0.040120605, -0.011405586,
-0.007808335, -0.010301386, -0.005102167, 0.027717464,
0.05483423, 0.11449111, 0.11289652, 0.10939839,
0.13396506, -0.08402166, -0.01901462, -0.044678304,
-0.07720565, 0.014350063, -0.11757958, -0.0652038,
-0.08185733, -0.076754324, -0.092614375, 0.10405491,
0.052960336, 0.035755895, 0.035839386, -0.012540553,
0.036881298, 0.02913376, 0.03420159, 0.05448447,
-0.054523353, 0.02582715, 0.02327355, -0.011857179,
-0.0011980024, -0.034641717, -0.026125094, -0.17582615,
-0.15923657, -0.27486774, -0.0006143371, 0.0001771948,
-8.470171e-05, 0.02651807, 0.045790765, 0.06956496};
input_to_cell_weights_ = {
-0.04580283, -0.09549462, -0.032418985, -0.06454633,
-0.043528453, 0.043018587, -0.049152344, -0.12418144,
-0.078985475, -0.07596889, 0.019484362, -0.11434962,
-0.0074034138, -0.06314844, -0.092981495, 0.0062155537,
-0.025034338, -0.0028890965, 0.048929527, 0.06235075,
0.10665918, -0.032036792, -0.08505916, -0.10843358,
-0.13002433, -0.036816437, -0.02130134, -0.016518239,
0.0047691227, -0.0025825808, 0.066017866, 0.029991534,
-0.10652836, -0.1037554, -0.13056071, -0.03266643,
-0.033702414, -0.006473424, -0.04611692, 0.014419339,
-0.025174323, 0.0396852, 0.081777506, 0.06157468,
0.10210095, -0.009658194, 0.046511717, 0.03603906,
0.0069369148, 0.015960095, -0.06507666, 0.09551598,
0.053568836, 0.06408714, 0.12835667, -0.008714329,
-0.20211966, -0.12093674, 0.029450472, 0.2849013,
-0.029227901, 0.1164364, -0.08560263, 0.09941786,
-0.036999565, -0.028842626, -0.0033637602, -0.017012902,
-0.09720865, -0.11193351, -0.029155117, -0.017936034,
-0.009768936, -0.04223324, -0.036159635, 0.06505112,
-0.021742892, -0.023377212, -0.07221364, -0.06430552,
0.05453865, 0.091149814, 0.06387331, 0.007518393,
0.055960953, 0.069779344, 0.046411168, 0.10509911,
0.07463894, 0.0075130584, 0.012850982, 0.04555431,
0.056955688, 0.06555285, 0.050801456, -0.009862683,
0.00826772, -0.026555609, -0.0073611983, -0.0014897042};
input_to_output_weights_ = {
-0.0998932, -0.07201956, -0.052803773, -0.15629593, -0.15001918,
-0.07650751, 0.02359855, -0.075155355, -0.08037709, -0.15093534,
0.029517552, -0.04751393, 0.010350531, -0.02664851, -0.016839722,
-0.023121163, 0.0077019283, 0.012851257, -0.05040649, -0.0129761,
-0.021737747, -0.038305793, -0.06870586, -0.01481247, -0.001285394,
0.10124236, 0.083122835, 0.053313006, -0.062235646, -0.075637154,
-0.027833903, 0.029774971, 0.1130802, 0.09218906, 0.09506135,
-0.086665764, -0.037162706, -0.038880914, -0.035832845, -0.014481564,
-0.09825003, -0.12048569, -0.097665586, -0.05287633, -0.0964047,
-0.11366429, 0.035777505, 0.13568819, 0.052451383, 0.050649304,
0.05798951, -0.021852335, -0.099848844, 0.014740475, -0.078897946,
0.04974699, 0.014160473, 0.06973932, 0.04964942, 0.033364646,
0.08190124, 0.025535367, 0.050893165, 0.048514254, 0.06945813,
-0.078907564, -0.06707616, -0.11844508, -0.09986688, -0.07509403,
0.06263226, 0.14925587, 0.20188436, 0.12098451, 0.14639415,
0.0015017595, -0.014267382, -0.03417257, 0.012711468, 0.0028300495,
-0.024758482, -0.05098548, -0.0821182, 0.014225672, 0.021544158,
0.08949725, 0.07505268, -0.0020780868, 0.04908258, 0.06476295,
-0.022907063, 0.027562456, 0.040185735, 0.019567577, -0.015598739,
-0.049097303, -0.017121866, -0.083368234, -0.02332002, -0.0840956};
input_gate_bias_ = {0.02234832, 0.14757581, 0.18176508, 0.10380666,
0.053110216, -0.06928846, -0.13942584, -0.11816189,
0.19483899, 0.03652339, -0.10250295, 0.036714908,
-0.18426876, 0.036065217, 0.21810818, 0.02383196,
-0.043370757, 0.08690144, -0.04444982, 0.00030581196};
forget_gate_bias_ = {0.035185695, -0.042891346, -0.03032477, 0.23027696,
0.11098921, 0.15378423, 0.09263801, 0.09790885,
0.09508917, 0.061199076, 0.07665568, -0.015443159,
-0.03499149, 0.046190713, 0.08895977, 0.10899629,
0.40694186, 0.06030037, 0.012413437, -0.06108739};
cell_gate_bias_ = {-0.024379363, 0.0055531194, 0.23377132, 0.033463873,
-0.1483596, -0.10639995, -0.091433935, 0.058573797,
-0.06809782, -0.07889636, -0.043246906, -0.09829136,
-0.4279842, 0.034901652, 0.18797937, 0.0075234566,
0.016178843, 0.1749513, 0.13975595, 0.92058027};
output_gate_bias_ = {0.046159424, -0.0012809046, 0.03563469, 0.12648113,
0.027195795, 0.35373217, -0.018957434, 0.008907322,
-0.0762701, 0.12018895, 0.04216877, 0.0022856654,
0.040952638, 0.3147856, 0.08225149, -0.057416286,
-0.14995944, -0.008040261, 0.13208859, 0.029760877};
recurrent_to_input_weights_ = {
-0.001374326, -0.078856036, 0.10672688, 0.029162422,
-0.11585556, 0.02557986, -0.13446963, -0.035785314,
-0.01244275, 0.025961924, -0.02337298, -0.044228926,
-0.055839065, -0.046598054, -0.010546039, -0.06900766,
0.027239809, 0.022582639, -0.013296484, -0.05459212,
0.08981, -0.045407712, 0.08682226, -0.06867011,
-0.14390695, -0.02916037, 0.000996957, 0.091420636,
0.14283475, -0.07390571, -0.06402044, 0.062524505,
-0.093129106, 0.04860203, -0.08364217, -0.08119002,
0.009352075, 0.22920375, 0.0016303885, 0.11583097,
-0.13732095, 0.012405723, -0.07551853, 0.06343048,
0.12162708, -0.031923793, -0.014335606, 0.01790974,
-0.10650317, -0.0724401, 0.08554849, -0.05727212,
0.06556731, -0.042729504, -0.043227166, 0.011683251,
-0.013082158, -0.029302018, -0.010899579, -0.062036745,
-0.022509435, -0.00964907, -0.01567329, 0.04260106,
-0.07787477, -0.11576462, 0.017356863, 0.048673786,
-0.017577527, -0.05527947, -0.082487635, -0.040137455,
-0.10820036, -0.04666372, 0.022746278, -0.07851417,
0.01068115, 0.032956902, 0.022433773, 0.0026891115,
0.08944216, -0.0685835, 0.010513544, 0.07228705,
0.02032331, -0.059686817, -0.0005566496, -0.086984694,
0.040414046, -0.1380399, 0.094208956, -0.05722982,
0.012092817, -0.04989123, -0.086576, -0.003399834,
-0.04696032, -0.045747425, 0.10091314, 0.048676282,
-0.029037097, 0.031399418, -0.0040285117, 0.047237843,
0.09504992, 0.041799378, -0.049185462, -0.031518843,
-0.10516937, 0.026374253, 0.10058866, -0.0033195973,
-0.041975245, 0.0073591834, 0.0033782164, -0.004325073,
-0.10167381, 0.042500053, -0.01447153, 0.06464186,
-0.017142897, 0.03312627, 0.009205989, 0.024138335,
-0.011337001, 0.035530265, -0.010912711, 0.0706555,
-0.005894094, 0.051841937, -0.1401738, -0.02351249,
0.0365468, 0.07590991, 0.08838724, 0.021681072,
-0.10086113, 0.019608743, -0.06195883, 0.077335775,
0.023646897, -0.095322326, 0.02233014, 0.09756986,
-0.048691444, -0.009579111, 0.07595467, 0.11480546,
-0.09801813, 0.019894179, 0.08502348, 0.004032281,
0.037211012, 0.068537936, -0.048005626, -0.091520436,
-0.028379958, -0.01556313, 0.06554592, -0.045599163,
-0.01672207, -0.020169014, -0.011877351, -0.20212261,
0.010889619, 0.0047078193, 0.038385306, 0.08540671,
-0.017140968, -0.0035865551, 0.016678626, 0.005633034,
0.015963363, 0.00871737, 0.060130805, 0.028611384,
0.10109069, -0.015060172, -0.07894427, 0.06401885,
0.011584063, -0.024466386, 0.0047652307, -0.09041358,
0.030737216, -0.0046374933, 0.14215417, -0.11823516,
0.019899689, 0.006106124, -0.027092824, 0.0786356,
0.05052217, -0.058925, -0.011402121, -0.024987547,
-0.0013661642, -0.06832946, -0.015667673, -0.1083353,
-0.00096863037, -0.06988685, -0.053350925, -0.027275559,
-0.033664223, -0.07978348, -0.025200296, -0.017207067,
-0.058403496, -0.055697463, 0.005798788, 0.12965427,
-0.062582195, 0.0013350133, -0.10482091, 0.0379771,
0.072521195, -0.0029455067, -0.13797039, -0.03628521,
0.013806405, -0.017858358, -0.01008298, -0.07700066,
-0.017081132, 0.019358726, 0.0027079724, 0.004635139,
0.062634714, -0.02338735, -0.039547626, -0.02050681,
0.03385117, -0.083611414, 0.002862572, -0.09421313,
0.058618143, -0.08598433, 0.00972939, 0.023867095,
-0.053934585, -0.023203006, 0.07452513, -0.048767887,
-0.07314807, -0.056307215, -0.10433547, -0.06440842,
0.04328182, 0.04389765, -0.020006588, -0.09076438,
-0.11652589, -0.021705797, 0.03345259, -0.010329105,
-0.025767034, 0.013057034, -0.07316461, -0.10145612,
0.06358255, 0.18531723, 0.07759293, 0.12006465,
0.1305557, 0.058638252, -0.03393652, 0.09622831,
-0.16253184, -2.4580743e-06, 0.079869635, -0.070196845,
-0.005644518, 0.06857898, -0.12598175, -0.035084512,
0.03156317, -0.12794146, -0.031963028, 0.04692781,
0.030070418, 0.0071660685, -0.095516115, -0.004643372,
0.040170413, -0.062104587, -0.0037324072, 0.0554317,
0.08184801, -0.019164372, 0.06791302, 0.034257166,
-0.10307039, 0.021943003, 0.046745934, 0.0790918,
-0.0265588, -0.007824208, 0.042546265, -0.00977924,
-0.0002440307, -0.017384544, -0.017990116, 0.12252321,
-0.014512694, -0.08251313, 0.08861942, 0.13589665,
0.026351685, 0.012641483, 0.07466548, 0.044301085,
-0.045414884, -0.051112458, 0.03444247, -0.08502782,
-0.04106223, -0.028126027, 0.028473156, 0.10467447};
recurrent_to_cell_weights_ = {
-0.037322544, 0.018592842, 0.0056175636, -0.06253426,
0.055647098, -0.05713207, -0.05626563, 0.005559383,
0.03375411, -0.025757805, -0.088049285, 0.06017052,
-0.06570978, 0.007384076, 0.035123326, -0.07920549,
0.053676967, 0.044480428, -0.07663568, 0.0071805613,
0.08089997, 0.05143358, 0.038261272, 0.03339287,
-0.027673481, 0.044746667, 0.028349208, 0.020090483,
-0.019443132, -0.030755889, -0.0040000007, 0.04465846,
-0.021585021, 0.0031670958, 0.0053199246, -0.056117613,
-0.10893326, 0.076739706, -0.08509834, -0.027997585,
0.037871376, 0.01449768, -0.09002357, -0.06111149,
-0.046195522, 0.0422062, -0.005683705, -0.1253618,
-0.012925729, -0.04890792, 0.06985068, 0.037654128,
0.03398274, -0.004781977, 0.007032333, -0.031787455,
0.010868644, -0.031489216, 0.09525667, 0.013939797,
0.0058680447, 0.0167067, 0.02668468, -0.04797466,
-0.048885044, -0.12722108, 0.035304096, 0.06554885,
0.00972396, -0.039238118, -0.05159735, -0.11329045,
0.1613692, -0.03750952, 0.06529313, -0.071974665,
-0.11769596, 0.015524369, -0.0013754242, -0.12446318,
0.02786344, -0.014179351, 0.005264273, 0.14376344,
0.015983658, 0.03406988, -0.06939408, 0.040699873,
0.02111075, 0.09669095, 0.041345075, -0.08316494,
-0.07684199, -0.045768797, 0.032298047, -0.041805092,
0.0119405, 0.0061010392, 0.12652606, 0.0064572375,
-0.024950314, 0.11574242, 0.04508852, -0.04335324,
0.06760663, -0.027437469, 0.07216407, 0.06977076,
-0.05438599, 0.034033038, -0.028602652, 0.05346137,
0.043184172, -0.037189785, 0.10420091, 0.00882477,
-0.054019816, -0.074273005, -0.030617684, -0.0028467078,
0.024302477, -0.0038869337, 0.005332455, 0.0013399826,
0.04361412, -0.007001822, 0.09631092, -0.06702025,
-0.042049985, -0.035070654, -0.04103342, -0.10273396,
0.0544271, 0.037184782, -0.13150354, -0.0058036847,
-0.008264958, 0.042035464, 0.05891794, 0.029673764,
0.0063542654, 0.044788733, 0.054816857, 0.062257513,
-0.00093483756, 0.048938446, -0.004952862, -0.007730018,
-0.04043371, -0.017094059, 0.07229206, -0.023670016,
-0.052195564, -0.025616996, -0.01520939, 0.045104615,
-0.007376126, 0.003533447, 0.006570588, 0.056037236,
0.12436656, 0.051817212, 0.028532185, -0.08686856,
0.11868599, 0.07663395, -0.07323171, 0.03463402,
-0.050708205, -0.04458982, -0.11590894, 0.021273347,
0.1251325, -0.15313013, -0.12224372, 0.17228661,
0.023029093, 0.086124025, 0.006445803, -0.03496501,
0.028332196, 0.04449512, -0.042436164, -0.026587414,
-0.006041347, -0.09292539, -0.05678812, 0.03897832,
0.09465633, 0.008115513, -0.02171956, 0.08304309,
0.071401566, 0.019622514, 0.032163795, -0.004167056,
0.02295182, 0.030739572, 0.056506045, 0.004612461,
0.06524936, 0.059999723, 0.046395954, -0.0045512207,
-0.1335546, -0.030136576, 0.11584653, -0.014678886,
0.0020118146, -0.09688814, -0.0790206, 0.039770417,
-0.0329582, 0.07922767, 0.029322514, 0.026405897,
0.04207835, -0.07073373, 0.063781224, 0.0859677,
-0.10925287, -0.07011058, 0.048005477, 0.03438226,
-0.09606514, -0.006669445, -0.043381985, 0.04240257,
-0.06955775, -0.06769346, 0.043903265, -0.026784198,
-0.017840602, 0.024307009, -0.040079936, -0.019946516,
0.045318738, -0.12233574, 0.026170589, 0.0074471775,
0.15978073, 0.10185836, 0.10298046, -0.015476589,
-0.039390966, -0.072174534, 0.0739445, -0.1211869,
-0.0347889, -0.07943156, 0.014809798, -0.12412325,
-0.0030663363, 0.039695457, 0.0647603, -0.08291318,
-0.018529687, -0.004423833, 0.0037507233, 0.084633216,
-0.01514876, -0.056505352, -0.012800942, -0.06994386,
0.012962922, -0.031234352, 0.07029052, 0.016418684,
0.03618972, 0.055686004, -0.08663945, -0.017404709,
-0.054761406, 0.029065743, 0.052404847, 0.020238016,
0.0048197987, -0.0214882, 0.07078733, 0.013016777,
0.06262858, 0.009184685, 0.020785125, -0.043904778,
-0.0270329, -0.03299152, -0.060088247, -0.015162964,
-0.001828936, 0.12642565, -0.056757294, 0.013586685,
0.09232601, -0.035886683, 0.06000002, 0.05229691,
-0.052580316, -0.082029596, -0.010794592, 0.012947712,
-0.036429964, -0.085508935, -0.13127148, -0.017744139,
0.031502828, 0.036232427, -0.031581745, 0.023051167,
-0.05325106, -0.03421577, 0.028793324, -0.034633752,
-0.009881397, -0.043551125, -0.018609839, 0.0019097115,
-0.008799762, 0.056595087, 0.0022273948, 0.055752404};
recurrent_to_forget_weights_ = {
-0.057784554, -0.026057621, -0.068447545, -0.022581743,
0.14811787, 0.10826372, 0.09471067, 0.03987225,
-0.0039523416, 0.00030638507, 0.053185795, 0.10572994,
0.08414449, -0.022036452, -0.00066928595, -0.09203576,
0.032950465, -0.10985798, -0.023809856, 0.0021431844,
-0.02196096, -0.00326074, 0.00058621005, -0.074678116,
-0.06193199, 0.055729095, 0.03736828, 0.020123724,
0.061878487, -0.04729229, 0.034919553, -0.07585433,
-0.04421272, -0.044019096, 0.085488975, 0.04058006,
-0.06890133, -0.030951202, -0.024628663, -0.07672815,
0.034293607, 0.08556707, -0.05293577, -0.033561368,
-0.04899627, 0.0241671, 0.015736353, -0.095442444,
-0.029564252, 0.016493602, -0.035026584, 0.022337519,
-0.026871363, 0.004780428, 0.0077918363, -0.03601621,
0.016435321, -0.03263031, -0.09543275, -0.047392778,
0.013454138, 0.028934088, 0.01685226, -0.086110644,
-0.046250615, -0.01847454, 0.047608484, 0.07339695,
0.034546845, -0.04881143, 0.009128804, -0.08802852,
0.03761666, 0.008096139, -0.014454086, 0.014361001,
-0.023502491, -0.0011840804, -0.07607001, 0.001856849,
-0.06509276, -0.006021153, -0.08570962, -0.1451793,
0.060212336, 0.055259194, 0.06974018, 0.049454916,
-0.027794661, -0.08077226, -0.016179763, 0.1169753,
0.17213494, -0.0056326236, -0.053934924, -0.0124349,
-0.11520337, 0.05409887, 0.088759385, 0.0019655675,
0.0042065294, 0.03881498, 0.019844765, 0.041858196,
-0.05695512, 0.047233116, 0.038937137, -0.06542224,
0.014429736, -0.09719407, 0.13908425, -0.05379757,
0.012321099, 0.082840554, -0.029899208, 0.044217527,
0.059855383, 0.07711018, -0.045319796, 0.0948846,
-0.011724666, -0.0033288454, -0.033542685, -0.04764985,
-0.13873616, 0.040668588, 0.034832682, -0.015319203,
-0.018715994, 0.046002675, 0.0599172, -0.043107376,
0.0294216, -0.002314414, -0.022424703, 0.0030315618,
0.0014641669, 0.0029166266, -0.11878115, 0.013738511,
0.12375372, -0.0006038222, 0.029104086, 0.087442465,
0.052958444, 0.07558703, 0.04817258, 0.044462286,
-0.015213451, -0.08783778, -0.0561384, -0.003008196,
0.047060397, -0.002058388, 0.03429439, -0.018839769,
0.024734668, 0.024614193, -0.042046934, 0.09597743,
-0.0043254104, 0.04320769, 0.0064070094, -0.0019131786,
-0.02558259, -0.022822596, -0.023273505, -0.02464396,
-0.10991725, -0.006240552, 0.0074488563, 0.024044557,
0.04383914, -0.046476185, 0.028658995, 0.060410924,
0.050786525, 0.009452605, -0.0073054377, -0.024810238,
0.0052906186, 0.0066939713, -0.0020913032, 0.014515517,
0.015898481, 0.021362653, -0.030262267, 0.016587038,
-0.011442813, 0.041154444, -0.007631438, -0.03423484,
-0.010977775, 0.036152758, 0.0066366293, 0.11915515,
0.02318443, -0.041350313, 0.021485701, -0.10906167,
-0.028218046, -0.00954771, 0.020531068, -0.11995105,
-0.03672871, 0.024019798, 0.014255957, -0.05221243,
-0.00661567, -0.04630967, 0.033188973, 0.10107534,
-0.014027541, 0.030796422, -0.10270911, -0.035999842,
0.15443139, 0.07684145, 0.036571592, -0.035900835,
-0.0034699554, 0.06209149, 0.015920248, -0.031122351,
-0.03858649, 0.01849943, 0.13872518, 0.01503974,
0.069941424, -0.06948533, -0.0088794185, 0.061282158,
-0.047401894, 0.03100163, -0.041533746, -0.10430945,
0.044574402, -0.01425562, -0.024290353, 0.034563623,
0.05866852, 0.023947537, -0.09445152, 0.035450947,
0.02247216, -0.0042998926, 0.061146557, -0.10250651,
0.020881841, -0.06747029, 0.10062043, -0.0023941975,
0.03532124, -0.016341697, 0.09685456, -0.016764693,
0.051808182, 0.05875331, -0.04536488, 0.001626336,
-0.028892258, -0.01048663, -0.009793449, -0.017093895,
0.010987891, 0.02357273, -0.00010856845, 0.0099760275,
-0.001845119, -0.03551521, 0.0018358806, 0.05763657,
-0.01769146, 0.040995963, 0.02235177, -0.060430344,
0.11475477, -0.023854522, 0.10071741, 0.0686208,
-0.014250481, 0.034261297, 0.047418304, 0.08562733,
-0.030519066, 0.0060542435, 0.014653856, -0.038836084,
0.04096551, 0.032249358, -0.08355519, -0.026823482,
0.056386515, -0.010401743, -0.028396193, 0.08507674,
0.014410365, 0.020995233, 0.17040324, 0.11511526,
0.02459721, 0.0066619175, 0.025853224, -0.023133837,
-0.081302024, 0.017264642, -0.009585969, 0.09491168,
-0.051313367, 0.054532815, -0.014298593, 0.10657464,
0.007076659, 0.10964551, 0.0409152, 0.008275321,
-0.07283536, 0.07937492, 0.04192024, -0.1075027};
recurrent_to_output_weights_ = {
0.025825322, -0.05813119, 0.09495884, -0.045984812,
-0.01255415, -0.0026479573, -0.08196161, -0.054914974,
-0.0046604523, -0.029587349, -0.044576716, -0.07480124,
-0.082868785, 0.023254942, 0.027502948, -0.0039728214,
-0.08683098, -0.08116779, -0.014675607, -0.037924774,
-0.023314456, -0.007401714, -0.09255757, 0.029460307,
-0.08829125, -0.005139627, -0.08989442, -0.0555066,
0.13596267, -0.025062224, -0.048351806, -0.03850004,
0.07266485, -0.022414139, 0.05940088, 0.075114764,
0.09597592, -0.010211725, -0.0049794707, -0.011523867,
-0.025980417, 0.072999895, 0.11091378, -0.081685916,
0.014416728, 0.043229222, 0.034178585, -0.07530371,
0.035837382, -0.085607, -0.007721233, -0.03287832,
-0.043848954, -0.06404588, -0.06632928, -0.073643476,
0.008214239, -0.045984086, 0.039764922, 0.03474462,
0.060612556, -0.080590084, 0.049127717, 0.04151091,
-0.030063879, 0.008801774, -0.023021035, -0.019558564,
0.05158114, -0.010947698, -0.011825728, 0.0075720972,
0.0699727, -0.0039981045, 0.069350146, 0.08799282,
0.016156472, 0.035502106, 0.11695009, 0.006217345,
0.13392477, -0.037875112, 0.025745004, 0.08940699,
-0.00924166, 0.0046702605, -0.036598757, -0.08811812,
0.10522024, -0.032441203, 0.008176899, -0.04454919,
0.07058152, 0.0067963637, 0.039206743, 0.03259838,
0.03725492, -0.09515802, 0.013326398, -0.052055415,
-0.025676316, 0.03198509, -0.015951829, -0.058556724,
0.036879618, 0.043357447, 0.028362012, -0.05908629,
0.0059240665, -0.04995891, -0.019187413, 0.0276265,
-0.01628143, 0.0025863599, 0.08800015, 0.035250366,
-0.022165963, -0.07328642, -0.009415526, -0.07455109,
0.11690406, 0.0363299, 0.07411125, 0.042103454,
-0.009660886, 0.019076364, 0.018299393, -0.046004917,
0.08891175, 0.0431396, -0.026327137, -0.051502608,
0.08979574, -0.051670972, 0.04940282, -0.07491107,
-0.021240504, 0.022596184, -0.034280192, 0.060163025,
-0.058211457, -0.051837247, -0.01349775, -0.04639988,
-0.035936575, -0.011681591, 0.064818054, 0.0073146066,
-0.021745546, -0.043124277, -0.06471268, -0.07053354,
-0.029321948, -0.05330136, 0.016933719, -0.053782392,
0.13747959, -0.1361751, -0.11569455, 0.0033329215,
0.05693899, -0.053219706, 0.063698, 0.07977434,
-0.07924483, 0.06936997, 0.0034815092, -0.007305279,
-0.037325785, -0.07251102, -0.033633437, -0.08677009,
0.091591336, -0.14165086, 0.021752775, 0.019683983,
0.0011612234, -0.058154266, 0.049996935, 0.0288841,
-0.0024567875, -0.14345716, 0.010955264, -0.10234828,
0.1183656, -0.0010731248, -0.023590032, -0.072285876,
-0.0724771, -0.026382286, -0.0014920527, 0.042667855,
0.0018776858, 0.02986552, 0.009814309, 0.0733756,
0.12289186, 0.018043943, -0.0458958, 0.049412545,
0.033632483, 0.05495232, 0.036686596, -0.013781798,
-0.010036754, 0.02576849, -0.08307328, 0.010112348,
0.042521734, -0.05869831, -0.071689695, 0.03876447,
-0.13275425, -0.0352966, -0.023077697, 0.10285965,
0.084736146, 0.15568255, -0.00040734606, 0.027835453,
-0.10292561, -0.032401145, 0.10053256, -0.026142767,
-0.08271222, -0.0030240538, -0.016368777, 0.1070414,
0.042672627, 0.013456989, -0.0437609, -0.022309763,
0.11576483, 0.04108048, 0.061026827, -0.0190714,
-0.0869359, 0.037901703, 0.0610107, 0.07202949,
0.01675338, 0.086139716, -0.08795751, -0.014898893,
-0.023771819, -0.01965048, 0.007955471, -0.043740474,
0.03346837, -0.10549954, 0.090567775, 0.042013682,
-0.03176985, 0.12569028, -0.02421228, -0.029526481,
0.023851605, 0.031539805, 0.05292009, -0.02344001,
-0.07811758, -0.08834428, 0.10094801, 0.16594367,
-0.06861939, -0.021256343, -0.041093912, -0.06669611,
0.035498552, 0.021757556, -0.09302526, -0.015403468,
-0.06614931, -0.051798206, -0.013874718, 0.03630673,
0.010412845, -0.08077351, 0.046185967, 0.0035662893,
0.03541868, -0.094149634, -0.034814864, 0.003128424,
-0.020674974, -0.03944324, -0.008110165, -0.11113267,
0.08484226, 0.043586485, 0.040582247, 0.0968012,
-0.065249965, -0.028036479, 0.0050708856, 0.0017462453,
0.0326779, 0.041296225, 0.09164146, -0.047743853,
-0.015952192, -0.034451712, 0.084197424, -0.05347844,
-0.11768019, 0.085926116, -0.08251791, -0.045081906,
0.0948852, 0.068401024, 0.024856757, 0.06978981,
-0.057309967, -0.012775832, -0.0032452994, 0.01977615,
-0.041040014, -0.024264973, 0.063464895, 0.05431621,
};
cell_to_input_weights_ = {
0.040369894, 0.030746894, 0.24704495, 0.018586371, -0.037586458,
-0.15312155, -0.11812848, -0.11465643, 0.20259799, 0.11418174,
-0.10116027, -0.011334949, 0.12411352, -0.076769054, -0.052169047,
0.21198851, -0.38871562, -0.09061183, -0.09683246, -0.21929175};
cell_to_forget_weights_ = {
-0.01998659, -0.15568835, -0.24248174, -0.012770197, 0.041331276,
-0.072311886, -0.052123554, -0.0066330447, -0.043891653, 0.036225766,
-0.047248036, 0.021479502, 0.033189066, 0.11952997, -0.020432774,
0.64658105, -0.06650122, -0.03467612, 0.095340036, 0.23647355};
cell_to_output_weights_ = {
0.08286371, -0.08261836, -0.51210177, 0.002913762, 0.17764764,
-0.5495371, -0.08460716, -0.24552552, 0.030037103, 0.04123544,
-0.11940523, 0.007358328, 0.1890978, 0.4833202, -0.34441817,
0.36312827, -0.26375428, 0.1457655, -0.19724406, 0.15548733};
projection_weights_ = {
-0.009802181, 0.09401916, 0.0717386, -0.13895074,
0.09641832, 0.060420845, 0.08539281, 0.054285463,
0.061395317, 0.034448683, -0.042991187, 0.019801661,
-0.16840284, -0.015726732, -0.23041931, -0.024478018,
-0.10959692, -0.013875541, 0.18600968, -0.061274476,
0.0138165, -0.08160894, -0.07661644, 0.032372914,
0.16169067, 0.22465782, -0.03993472, -0.004017731,
0.08633481, -0.28869787, 0.08682067, 0.17240396,
0.014975425, 0.056431185, 0.031037588, 0.16702051,
0.0077946745, 0.15140012, 0.29405436, 0.120285,
-0.188994, -0.027265169, 0.043389652, -0.022061434,
0.014777949, -0.20203483, 0.094781205, 0.19100232,
0.13987629, -0.036132768, -0.06426278, -0.05108664,
0.13221376, 0.009441198, -0.16715929, 0.15859416,
-0.040437475, 0.050779544, -0.022187516, 0.012166504,
0.027685808, -0.07675938, -0.0055694645, -0.09444123,
0.0046453946, 0.050794356, 0.10770313, -0.20790008,
-0.07149004, -0.11425117, 0.008225835, -0.035802525,
0.14374903, 0.15262283, 0.048710253, 0.1847461,
-0.007487823, 0.11000021, -0.09542012, 0.22619456,
-0.029149994, 0.08527916, 0.009043713, 0.0042746216,
0.016261552, 0.022461696, 0.12689082, -0.043589946,
-0.12035478, -0.08361797, -0.050666027, -0.1248618,
-0.1275799, -0.071875185, 0.07377272, 0.09944291,
-0.18897448, -0.1593054, -0.06526116, -0.040107165,
-0.004618631, -0.067624845, -0.007576253, 0.10727444,
0.041546922, -0.20424393, 0.06907816, 0.050412357,
0.00724631, 0.039827548, 0.12449835, 0.10747581,
0.13708383, 0.09134148, -0.12617786, -0.06428341,
0.09956831, 0.1208086, -0.14676677, -0.0727722,
0.1126304, 0.010139365, 0.015571211, -0.038128063,
0.022913318, -0.042050496, 0.16842307, -0.060597885,
0.10531834, -0.06411776, -0.07451711, -0.03410368,
-0.13393489, 0.06534304, 0.003620307, 0.04490757,
0.05970546, 0.05197996, 0.02839995, 0.10434969,
-0.013699693, -0.028353551, -0.07260381, 0.047201227,
-0.024575593, -0.036445823, 0.07155557, 0.009672501,
-0.02328883, 0.009533515, -0.03606021, -0.07421458,
-0.028082801, -0.2678904, -0.13221288, 0.18419984,
-0.13012612, -0.014588381, -0.035059117, -0.04824723,
0.07830115, -0.056184657, 0.03277091, 0.025466874,
0.14494097, -0.12522776, -0.098633975, -0.10766018,
-0.08317623, 0.08594209, 0.07749552, 0.039474737,
0.1776665, -0.07409566, -0.0477268, 0.29323658,
0.10801441, 0.1154011, 0.013952499, 0.10739139,
0.10708251, -0.051456142, 0.0074137426, -0.10430189,
0.10034707, 0.045594677, 0.0635285, -0.0715442,
-0.089667566, -0.10811871, 0.00026344223, 0.08298446,
-0.009525053, 0.006585689, -0.24567553, -0.09450807,
0.09648481, 0.026996298, -0.06419476, -0.04752702,
-0.11063944, -0.23441927, -0.17608605, -0.052156363,
0.067035615, 0.19271925, -0.0032889997, -0.043264326,
0.09663576, -0.057112187, -0.10100678, 0.0628376,
0.04447668, 0.017961001, -0.10094388, -0.10190601,
0.18335468, 0.10494553, -0.052095775, -0.0026118709,
0.10539724, -0.04383912, -0.042349473, 0.08438151,
-0.1947263, 0.02251204, 0.11216432, -0.10307853,
0.17351969, -0.039091777, 0.08066188, -0.00561982,
0.12633002, 0.11335965, -0.0088127935, -0.019777594,
0.06864014, -0.059751723, 0.016233567, -0.06894641,
-0.28651384, -0.004228674, 0.019708522, -0.16305895,
-0.07468996, -0.0855457, 0.099339016, -0.07580735,
-0.13775392, 0.08434318, 0.08330512, -0.12131499,
0.031935584, 0.09180414, -0.08876437, -0.08049874,
0.008753825, 0.03498998, 0.030215185, 0.03907079,
0.089751154, 0.029194152, -0.03337423, -0.019092513,
0.04331237, 0.04299654, -0.036394123, -0.12915532,
0.09793732, 0.07512415, -0.11319543, -0.032502122,
0.15661901, 0.07671967, -0.005491124, -0.19379048,
-0.218606, 0.21448623, 0.017840758, 0.1416943,
-0.07051762, 0.19488361, 0.02664691, -0.18104725,
-0.09334311, 0.15026465, -0.15493552, -0.057762887,
-0.11604192, -0.262013, -0.01391798, 0.012185008,
0.11156489, -0.07483202, 0.06693364, -0.26151478,
0.046425626, 0.036540434, -0.16435726, 0.17338543,
-0.21401681, -0.11385144, -0.08283257, -0.069031075,
0.030635102, 0.010969227, 0.11109743, 0.010919218,
0.027526086, 0.13519906, 0.01891392, -0.046839405,
-0.040167913, 0.017953383, -0.09700955, 0.0061885654,
-0.07000971, 0.026893595, -0.038844477, 0.14543656};
lstm_input_ = {
{
0.787926, 0.151646, 0.071352, 0.118426, 0.458058,
0.596268, 0.998386, 0.568695, 0.864524, 0.571277,
0.073204, 0.296072, 0.743333, 0.069199, 0.045348,
0.867394, 0.291279, 0.013714, 0.482521, 0.626339},
{
0.295743, 0.544053, 0.690064, 0.858138, 0.497181,
0.642421, 0.524260, 0.134799, 0.003639, 0.162482,
0.640394, 0.930399, 0.050782, 0.432485, 0.988078,
0.082922, 0.563329, 0.865614, 0.333232, 0.259916}
};
lstm_golden_output_ = {
{
-0.00396806, 0.029352, -0.00279226, 0.0159977, -0.00835576,
-0.0211779, 0.0283512, -0.0114597, 0.00907307, -0.0244004,
-0.0152191, -0.0259063, 0.00914318, 0.00415118, 0.017147,
0.0134203, -0.0166936, 0.0381209, 0.000889694, 0.0143363,
-0.0328911, -0.0234288, 0.0333051, -0.012229, 0.0110322,
-0.0457725, -0.000832209, -0.0202817, 0.0327257, 0.0121308,
0.0155969, 0.0312091, -0.0213783, 0.0350169, 0.000324794,
0.0276012, -0.0263374, -0.0371449, 0.0446149, -0.0205474,
0.0103729, -0.0576349, -0.0150052, -0.0292043, 0.0376827,
0.0136115, 0.0243435, 0.0354492, -0.0189322, 0.0464512,
-0.00251373, 0.0225745, -0.0308346, -0.0317124, 0.0460407,
-0.0189395, 0.0149363, -0.0530162, -0.0150767, -0.0340193,
0.0286833, 0.00824207, 0.0264887, 0.0305169},
{
-0.013869, 0.0287268, -0.00334693, 0.00733398, -0.0287926,
-0.0186926, 0.0193662, -0.0115437, 0.00422612, -0.0345232,
0.00223253, -0.00957321, 0.0210624, 0.013331, 0.0150954,
0.02168, -0.0141913, 0.0322082, 0.00227024, 0.0260507,
-0.0188721, -0.0296489, 0.0399134, -0.0160509, 0.0116039,
-0.0447318, -0.0150515, -0.0277406, 0.0316596, 0.0118233,
0.0214762, 0.0293641, -0.0204549, 0.0450315, -0.00117378,
0.0167673, -0.0375007, -0.0238314, 0.038784, -0.0174034,
0.0131743, -0.0506589, -0.0048447, -0.0240239, 0.0325789,
0.00790065, 0.0220157, 0.0333314, -0.0264787, 0.0387855,
-0.000764675, 0.0217599, -0.037537, -0.0335206, 0.0431679,
-0.0211424, 0.010203, -0.062785, -0.00832363, -0.025181,
0.0412031, 0.0118723, 0.0239643, 0.0394009}};
}
};
TEST_F(NoCifgPeepholeProjectionClippingUnidirectionalLstmTest,
LstmBlackBoxTest) {
const int n_batch = 2;
const int n_input = 5;
const int n_cell = 20;
const int n_output = 16;
const int sequence_length = 4;
UnidirectionalLSTMOpModel lstm(
n_batch, n_input, n_cell, n_output, sequence_length,
true, false, true,
true,
false,
0.0, 0.0,
{
{sequence_length, n_batch, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_output, n_cell},
{0},
{n_batch, n_output},
{n_batch, n_cell},
});
lstm.SetInputToInputWeights(input_to_input_weights_);
lstm.SetInputToCellWeights(input_to_cell_weights_);
lstm.SetInputToForgetWeights(input_to_forget_weights_);
lstm.SetInputToOutputWeights(input_to_output_weights_);
lstm.SetInputGateBias(input_gate_bias_);
lstm.SetCellBias(cell_gate_bias_);
lstm.SetForgetGateBias(forget_gate_bias_);
lstm.SetOutputGateBias(output_gate_bias_);
lstm.SetRecurrentToInputWeights(recurrent_to_input_weights_);
lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights_);
lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights_);
lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights_);
lstm.SetCellToInputWeights(cell_to_input_weights_);
lstm.SetCellToForgetWeights(cell_to_forget_weights_);
lstm.SetCellToOutputWeights(cell_to_output_weights_);
lstm.SetProjectionWeights(projection_weights_);
VerifyGoldens(lstm_input_, lstm_golden_output_, &lstm);
}
TEST_P(NoCifgPeepholeProjectionClippingUnidirectionalLstmTest,
HybridLstmBlackBoxTestUint8) {
const int n_batch = 2;
const int n_input = 5;
const int n_cell = 20;
const int n_output = 16;
const int sequence_length = 4;
if (GetParam()) {
return;
}
HybridUnidirectionalLSTMOpModel lstm(
n_batch, n_input, n_cell, n_output, sequence_length,
true, false, true,
true,
false,
0.0, 0.0,
{
{sequence_length, n_batch, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_output, n_cell},
{0},
{n_batch, n_output},
{n_batch, n_cell},
},
TensorType_UINT8, GetParam());
lstm.SetInputToInputWeights(input_to_input_weights_);
lstm.SetInputToCellWeights(input_to_cell_weights_);
lstm.SetInputToForgetWeights(input_to_forget_weights_);
lstm.SetInputToOutputWeights(input_to_output_weights_);
lstm.SetInputGateBias(input_gate_bias_);
lstm.SetCellBias(cell_gate_bias_);
lstm.SetForgetGateBias(forget_gate_bias_);
lstm.SetOutputGateBias(output_gate_bias_);
lstm.SetRecurrentToInputWeights(recurrent_to_input_weights_);
lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights_);
lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights_);
lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights_);
lstm.SetCellToInputWeights(cell_to_input_weights_);
lstm.SetCellToForgetWeights(cell_to_forget_weights_);
lstm.SetCellToOutputWeights(cell_to_output_weights_);
lstm.SetProjectionWeights(projection_weights_);
VerifyGoldens(lstm_input_, lstm_golden_output_, &lstm, 0.00467);
}
TEST_P(NoCifgPeepholeProjectionClippingUnidirectionalLstmTest,
HybridLstmBlackBoxTestInt8) {
if (GetParam()) {
return;
}
const int n_batch = 2;
const int n_input = 5;
const int n_cell = 20;
const int n_output = 16;
const int sequence_length = 4;
HybridUnidirectionalLSTMOpModel lstm(
n_batch, n_input, n_cell, n_output, sequence_length,
true, false, true,
true,
false,
0.0, 0.0,
{
{sequence_length, n_batch, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_output, n_cell},
{0},
{n_batch, n_output},
{n_batch, n_cell},
},
TensorType_INT8, GetParam());
lstm.SetInputToInputWeights(input_to_input_weights_);
lstm.SetInputToCellWeights(input_to_cell_weights_);
lstm.SetInputToForgetWeights(input_to_forget_weights_);
lstm.SetInputToOutputWeights(input_to_output_weights_);
lstm.SetInputGateBias(input_gate_bias_);
lstm.SetCellBias(cell_gate_bias_);
lstm.SetForgetGateBias(forget_gate_bias_);
lstm.SetOutputGateBias(output_gate_bias_);
lstm.SetRecurrentToInputWeights(recurrent_to_input_weights_);
lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights_);
lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights_);
lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights_);
lstm.SetCellToInputWeights(cell_to_input_weights_);
lstm.SetCellToForgetWeights(cell_to_forget_weights_);
lstm.SetCellToOutputWeights(cell_to_output_weights_);
lstm.SetProjectionWeights(projection_weights_);
VerifyGoldens(lstm_input_, lstm_golden_output_, &lstm, 0.00467);
}
class NoCifgPeepholeProjectionAndBiasClippingUnidirectionalLstmTest
: public BaseUnidirectionalLstmTest {
void SetUp() override {
input_to_input_weights_ = {
0.021393683, 0.06124551, 0.046905167, -0.014657677, -0.03149463,
0.09171803, 0.14647801, 0.10797193, -0.0057968358, 0.0019193048,
-0.2726754, 0.10154029, -0.018539885, 0.080349885, -0.10262385,
-0.022599787, -0.09121155, -0.008675967, -0.045206103, -0.0821282,
-0.008045952, 0.015478081, 0.055217247, 0.038719587, 0.044153627,
-0.06453243, 0.05031825, -0.046935108, -0.008164439, 0.014574226,
-0.1671009, -0.15519552, -0.16819797, -0.13971269, -0.11953059,
0.25005487, -0.22790983, 0.009855087, -0.028140958, -0.11200698,
0.11295408, -0.0035217577, 0.054485075, 0.05184695, 0.064711206,
0.10989193, 0.11674786, 0.03490607, 0.07727357, 0.11390585,
-0.1863375, -0.1034451, -0.13945189, -0.049401227, -0.18767063,
0.042483903, 0.14233552, 0.13832581, 0.18350165, 0.14545603,
-0.028545704, 0.024939531, 0.050929718, 0.0076203286, -0.0029723682,
-0.042484224, -0.11827596, -0.09171104, -0.10808628, -0.16327988,
-0.2273378, -0.0993647, -0.017155107, 0.0023917493, 0.049272764,
0.0038534778, 0.054764505, 0.089753784, 0.06947234, 0.08014476,
-0.04544234, -0.0497073, -0.07135631, -0.048929106, -0.004042012,
-0.009284026, 0.018042054, 0.0036860977, -0.07427302, -0.11434604,
-0.018995456, 0.031487543, 0.012834908, 0.019977754, 0.044256654,
-0.39292613, -0.18519334, -0.11651281, -0.06809892, 0.011373677};
input_to_forget_weights_ = {
-0.0018401089, -0.004852237, 0.03698424, 0.014181704,
0.028273236, -0.016726194, -0.05249759, -0.10204261,
0.00861066, -0.040979505, -0.009899187, 0.01923892,
-0.028177269, -0.08535103, -0.14585495, 0.10662567,
-0.01909731, -0.017883534, -0.0047269356, -0.045103323,
0.0030784295, 0.076784775, 0.07463696, 0.094531395,
0.0814421, -0.12257899, -0.033945758, -0.031303465,
0.045630626, 0.06843887, -0.13492945, -0.012480007,
-0.0811829, -0.07224499, -0.09628791, 0.045100946,
0.0012300825, 0.013964662, 0.099372394, 0.02543059,
0.06958324, 0.034257296, 0.0482646, 0.06267997,
0.052625068, 0.12784666, 0.07077897, 0.025725935,
0.04165009, 0.07241905, 0.018668644, -0.037377294,
-0.06277783, -0.08833636, -0.040120605, -0.011405586,
-0.007808335, -0.010301386, -0.005102167, 0.027717464,
0.05483423, 0.11449111, 0.11289652, 0.10939839,
0.13396506, -0.08402166, -0.01901462, -0.044678304,
-0.07720565, 0.014350063, -0.11757958, -0.0652038,
-0.08185733, -0.076754324, -0.092614375, 0.10405491,
0.052960336, 0.035755895, 0.035839386, -0.012540553,
0.036881298, 0.02913376, 0.03420159, 0.05448447,
-0.054523353, 0.02582715, 0.02327355, -0.011857179,
-0.0011980024, -0.034641717, -0.026125094, -0.17582615,
-0.15923657, -0.27486774, -0.0006143371, 0.0001771948,
-8.470171e-05, 0.02651807, 0.045790765, 0.06956496};
input_to_cell_weights_ = {
-0.04580283, -0.09549462, -0.032418985, -0.06454633,
-0.043528453, 0.043018587, -0.049152344, -0.12418144,
-0.078985475, -0.07596889, 0.019484362, -0.11434962,
-0.0074034138, -0.06314844, -0.092981495, 0.0062155537,
-0.025034338, -0.0028890965, 0.048929527, 0.06235075,
0.10665918, -0.032036792, -0.08505916, -0.10843358,
-0.13002433, -0.036816437, -0.02130134, -0.016518239,
0.0047691227, -0.0025825808, 0.066017866, 0.029991534,
-0.10652836, -0.1037554, -0.13056071, -0.03266643,
-0.033702414, -0.006473424, -0.04611692, 0.014419339,
-0.025174323, 0.0396852, 0.081777506, 0.06157468,
0.10210095, -0.009658194, 0.046511717, 0.03603906,
0.0069369148, 0.015960095, -0.06507666, 0.09551598,
0.053568836, 0.06408714, 0.12835667, -0.008714329,
-0.20211966, -0.12093674, 0.029450472, 0.2849013,
-0.029227901, 0.1164364, -0.08560263, 0.09941786,
-0.036999565, -0.028842626, -0.0033637602, -0.017012902,
-0.09720865, -0.11193351, -0.029155117, -0.017936034,
-0.009768936, -0.04223324, -0.036159635, 0.06505112,
-0.021742892, -0.023377212, -0.07221364, -0.06430552,
0.05453865, 0.091149814, 0.06387331, 0.007518393,
0.055960953, 0.069779344, 0.046411168, 0.10509911,
0.07463894, 0.0075130584, 0.012850982, 0.04555431,
0.056955688, 0.06555285, 0.050801456, -0.009862683,
0.00826772, -0.026555609, -0.0073611983, -0.0014897042};
input_to_output_weights_ = {
-0.0998932, -0.07201956, -0.052803773, -0.15629593, -0.15001918,
-0.07650751, 0.02359855, -0.075155355, -0.08037709, -0.15093534,
0.029517552, -0.04751393, 0.010350531, -0.02664851, -0.016839722,
-0.023121163, 0.0077019283, 0.012851257, -0.05040649, -0.0129761,
-0.021737747, -0.038305793, -0.06870586, -0.01481247, -0.001285394,
0.10124236, 0.083122835, 0.053313006, -0.062235646, -0.075637154,
-0.027833903, 0.029774971, 0.1130802, 0.09218906, 0.09506135,
-0.086665764, -0.037162706, -0.038880914, -0.035832845, -0.014481564,
-0.09825003, -0.12048569, -0.097665586, -0.05287633, -0.0964047,
-0.11366429, 0.035777505, 0.13568819, 0.052451383, 0.050649304,
0.05798951, -0.021852335, -0.099848844, 0.014740475, -0.078897946,
0.04974699, 0.014160473, 0.06973932, 0.04964942, 0.033364646,
0.08190124, 0.025535367, 0.050893165, 0.048514254, 0.06945813,
-0.078907564, -0.06707616, -0.11844508, -0.09986688, -0.07509403,
0.06263226, 0.14925587, 0.20188436, 0.12098451, 0.14639415,
0.0015017595, -0.014267382, -0.03417257, 0.012711468, 0.0028300495,
-0.024758482, -0.05098548, -0.0821182, 0.014225672, 0.021544158,
0.08949725, 0.07505268, -0.0020780868, 0.04908258, 0.06476295,
-0.022907063, 0.027562456, 0.040185735, 0.019567577, -0.015598739,
-0.049097303, -0.017121866, -0.083368234, -0.02332002, -0.0840956};
input_gate_bias_ = {0.02234832, 0.14757581, 0.18176508, 0.10380666,
0.053110216, -0.06928846, -0.13942584, -0.11816189,
0.19483899, 0.03652339, -0.10250295, 0.036714908,
-0.18426876, 0.036065217, 0.21810818, 0.02383196,
-0.043370757, 0.08690144, -0.04444982, 0.00030581196};
forget_gate_bias_ = {0.035185695, -0.042891346, -0.03032477, 0.23027696,
0.11098921, 0.15378423, 0.09263801, 0.09790885,
0.09508917, 0.061199076, 0.07665568, -0.015443159,
-0.03499149, 0.046190713, 0.08895977, 0.10899629,
0.40694186, 0.06030037, 0.012413437, -0.06108739};
cell_gate_bias_ = {-0.024379363, 0.0055531194, 0.23377132, 0.033463873,
-0.1483596, -0.10639995, -0.091433935, 0.058573797,
-0.06809782, -0.07889636, -0.043246906, -0.09829136,
-0.4279842, 0.034901652, 0.18797937, 0.0075234566,
0.016178843, 0.1749513, 0.13975595, 0.92058027};
output_gate_bias_ = {0.046159424, -0.0012809046, 0.03563469, 0.12648113,
0.027195795, 0.35373217, -0.018957434, 0.008907322,
-0.0762701, 0.12018895, 0.04216877, 0.0022856654,
0.040952638, 0.3147856, 0.08225149, -0.057416286,
-0.14995944, -0.008040261, 0.13208859, 0.029760877};
recurrent_to_input_weights_ = {
-0.001374326, -0.078856036, 0.10672688, 0.029162422,
-0.11585556, 0.02557986, -0.13446963, -0.035785314,
-0.01244275, 0.025961924, -0.02337298, -0.044228926,
-0.055839065, -0.046598054, -0.010546039, -0.06900766,
0.027239809, 0.022582639, -0.013296484, -0.05459212,
0.08981, -0.045407712, 0.08682226, -0.06867011,
-0.14390695, -0.02916037, 0.000996957, 0.091420636,
0.14283475, -0.07390571, -0.06402044, 0.062524505,
-0.093129106, 0.04860203, -0.08364217, -0.08119002,
0.009352075, 0.22920375, 0.0016303885, 0.11583097,
-0.13732095, 0.012405723, -0.07551853, 0.06343048,
0.12162708, -0.031923793, -0.014335606, 0.01790974,
-0.10650317, -0.0724401, 0.08554849, -0.05727212,
0.06556731, -0.042729504, -0.043227166, 0.011683251,
-0.013082158, -0.029302018, -0.010899579, -0.062036745,
-0.022509435, -0.00964907, -0.01567329, 0.04260106,
-0.07787477, -0.11576462, 0.017356863, 0.048673786,
-0.017577527, -0.05527947, -0.082487635, -0.040137455,
-0.10820036, -0.04666372, 0.022746278, -0.07851417,
0.01068115, 0.032956902, 0.022433773, 0.0026891115,
0.08944216, -0.0685835, 0.010513544, 0.07228705,
0.02032331, -0.059686817, -0.0005566496, -0.086984694,
0.040414046, -0.1380399, 0.094208956, -0.05722982,
0.012092817, -0.04989123, -0.086576, -0.003399834,
-0.04696032, -0.045747425, 0.10091314, 0.048676282,
-0.029037097, 0.031399418, -0.0040285117, 0.047237843,
0.09504992, 0.041799378, -0.049185462, -0.031518843,
-0.10516937, 0.026374253, 0.10058866, -0.0033195973,
-0.041975245, 0.0073591834, 0.0033782164, -0.004325073,
-0.10167381, 0.042500053, -0.01447153, 0.06464186,
-0.017142897, 0.03312627, 0.009205989, 0.024138335,
-0.011337001, 0.035530265, -0.010912711, 0.0706555,
-0.005894094, 0.051841937, -0.1401738, -0.02351249,
0.0365468, 0.07590991, 0.08838724, 0.021681072,
-0.10086113, 0.019608743, -0.06195883, 0.077335775,
0.023646897, -0.095322326, 0.02233014, 0.09756986,
-0.048691444, -0.009579111, 0.07595467, 0.11480546,
-0.09801813, 0.019894179, 0.08502348, 0.004032281,
0.037211012, 0.068537936, -0.048005626, -0.091520436,
-0.028379958, -0.01556313, 0.06554592, -0.045599163,
-0.01672207, -0.020169014, -0.011877351, -0.20212261,
0.010889619, 0.0047078193, 0.038385306, 0.08540671,
-0.017140968, -0.0035865551, 0.016678626, 0.005633034,
0.015963363, 0.00871737, 0.060130805, 0.028611384,
0.10109069, -0.015060172, -0.07894427, 0.06401885,
0.011584063, -0.024466386, 0.0047652307, -0.09041358,
0.030737216, -0.0046374933, 0.14215417, -0.11823516,
0.019899689, 0.006106124, -0.027092824, 0.0786356,
0.05052217, -0.058925, -0.011402121, -0.024987547,
-0.0013661642, -0.06832946, -0.015667673, -0.1083353,
-0.00096863037, -0.06988685, -0.053350925, -0.027275559,
-0.033664223, -0.07978348, -0.025200296, -0.017207067,
-0.058403496, -0.055697463, 0.005798788, 0.12965427,
-0.062582195, 0.0013350133, -0.10482091, 0.0379771,
0.072521195, -0.0029455067, -0.13797039, -0.03628521,
0.013806405, -0.017858358, -0.01008298, -0.07700066,
-0.017081132, 0.019358726, 0.0027079724, 0.004635139,
0.062634714, -0.02338735, -0.039547626, -0.02050681,
0.03385117, -0.083611414, 0.002862572, -0.09421313,
0.058618143, -0.08598433, 0.00972939, 0.023867095,
-0.053934585, -0.023203006, 0.07452513, -0.048767887,
-0.07314807, -0.056307215, -0.10433547, -0.06440842,
0.04328182, 0.04389765, -0.020006588, -0.09076438,
-0.11652589, -0.021705797, 0.03345259, -0.010329105,
-0.025767034, 0.013057034, -0.07316461, -0.10145612,
0.06358255, 0.18531723, 0.07759293, 0.12006465,
0.1305557, 0.058638252, -0.03393652, 0.09622831,
-0.16253184, -2.4580743e-06, 0.079869635, -0.070196845,
-0.005644518, 0.06857898, -0.12598175, -0.035084512,
0.03156317, -0.12794146, -0.031963028, 0.04692781,
0.030070418, 0.0071660685, -0.095516115, -0.004643372,
0.040170413, -0.062104587, -0.0037324072, 0.0554317,
0.08184801, -0.019164372, 0.06791302, 0.034257166,
-0.10307039, 0.021943003, 0.046745934, 0.0790918,
-0.0265588, -0.007824208, 0.042546265, -0.00977924,
-0.0002440307, -0.017384544, -0.017990116, 0.12252321,
-0.014512694, -0.08251313, 0.08861942, 0.13589665,
0.026351685, 0.012641483, 0.07466548, 0.044301085,
-0.045414884, -0.051112458, 0.03444247, -0.08502782,
-0.04106223, -0.028126027, 0.028473156, 0.10467447};
recurrent_to_cell_weights_ = {
-0.037322544, 0.018592842, 0.0056175636, -0.06253426,
0.055647098, -0.05713207, -0.05626563, 0.005559383,
0.03375411, -0.025757805, -0.088049285, 0.06017052,
-0.06570978, 0.007384076, 0.035123326, -0.07920549,
0.053676967, 0.044480428, -0.07663568, 0.0071805613,
0.08089997, 0.05143358, 0.038261272, 0.03339287,
-0.027673481, 0.044746667, 0.028349208, 0.020090483,
-0.019443132, -0.030755889, -0.0040000007, 0.04465846,
-0.021585021, 0.0031670958, 0.0053199246, -0.056117613,
-0.10893326, 0.076739706, -0.08509834, -0.027997585,
0.037871376, 0.01449768, -0.09002357, -0.06111149,
-0.046195522, 0.0422062, -0.005683705, -0.1253618,
-0.012925729, -0.04890792, 0.06985068, 0.037654128,
0.03398274, -0.004781977, 0.007032333, -0.031787455,
0.010868644, -0.031489216, 0.09525667, 0.013939797,
0.0058680447, 0.0167067, 0.02668468, -0.04797466,
-0.048885044, -0.12722108, 0.035304096, 0.06554885,
0.00972396, -0.039238118, -0.05159735, -0.11329045,
0.1613692, -0.03750952, 0.06529313, -0.071974665,
-0.11769596, 0.015524369, -0.0013754242, -0.12446318,
0.02786344, -0.014179351, 0.005264273, 0.14376344,
0.015983658, 0.03406988, -0.06939408, 0.040699873,
0.02111075, 0.09669095, 0.041345075, -0.08316494,
-0.07684199, -0.045768797, 0.032298047, -0.041805092,
0.0119405, 0.0061010392, 0.12652606, 0.0064572375,
-0.024950314, 0.11574242, 0.04508852, -0.04335324,
0.06760663, -0.027437469, 0.07216407, 0.06977076,
-0.05438599, 0.034033038, -0.028602652, 0.05346137,
0.043184172, -0.037189785, 0.10420091, 0.00882477,
-0.054019816, -0.074273005, -0.030617684, -0.0028467078,
0.024302477, -0.0038869337, 0.005332455, 0.0013399826,
0.04361412, -0.007001822, 0.09631092, -0.06702025,
-0.042049985, -0.035070654, -0.04103342, -0.10273396,
0.0544271, 0.037184782, -0.13150354, -0.0058036847,
-0.008264958, 0.042035464, 0.05891794, 0.029673764,
0.0063542654, 0.044788733, 0.054816857, 0.062257513,
-0.00093483756, 0.048938446, -0.004952862, -0.007730018,
-0.04043371, -0.017094059, 0.07229206, -0.023670016,
-0.052195564, -0.025616996, -0.01520939, 0.045104615,
-0.007376126, 0.003533447, 0.006570588, 0.056037236,
0.12436656, 0.051817212, 0.028532185, -0.08686856,
0.11868599, 0.07663395, -0.07323171, 0.03463402,
-0.050708205, -0.04458982, -0.11590894, 0.021273347,
0.1251325, -0.15313013, -0.12224372, 0.17228661,
0.023029093, 0.086124025, 0.006445803, -0.03496501,
0.028332196, 0.04449512, -0.042436164, -0.026587414,
-0.006041347, -0.09292539, -0.05678812, 0.03897832,
0.09465633, 0.008115513, -0.02171956, 0.08304309,
0.071401566, 0.019622514, 0.032163795, -0.004167056,
0.02295182, 0.030739572, 0.056506045, 0.004612461,
0.06524936, 0.059999723, 0.046395954, -0.0045512207,
-0.1335546, -0.030136576, 0.11584653, -0.014678886,
0.0020118146, -0.09688814, -0.0790206, 0.039770417,
-0.0329582, 0.07922767, 0.029322514, 0.026405897,
0.04207835, -0.07073373, 0.063781224, 0.0859677,
-0.10925287, -0.07011058, 0.048005477, 0.03438226,
-0.09606514, -0.006669445, -0.043381985, 0.04240257,
-0.06955775, -0.06769346, 0.043903265, -0.026784198,
-0.017840602, 0.024307009, -0.040079936, -0.019946516,
0.045318738, -0.12233574, 0.026170589, 0.0074471775,
0.15978073, 0.10185836, 0.10298046, -0.015476589,
-0.039390966, -0.072174534, 0.0739445, -0.1211869,
-0.0347889, -0.07943156, 0.014809798, -0.12412325,
-0.0030663363, 0.039695457, 0.0647603, -0.08291318,
-0.018529687, -0.004423833, 0.0037507233, 0.084633216,
-0.01514876, -0.056505352, -0.012800942, -0.06994386,
0.012962922, -0.031234352, 0.07029052, 0.016418684,
0.03618972, 0.055686004, -0.08663945, -0.017404709,
-0.054761406, 0.029065743, 0.052404847, 0.020238016,
0.0048197987, -0.0214882, 0.07078733, 0.013016777,
0.06262858, 0.009184685, 0.020785125, -0.043904778,
-0.0270329, -0.03299152, -0.060088247, -0.015162964,
-0.001828936, 0.12642565, -0.056757294, 0.013586685,
0.09232601, -0.035886683, 0.06000002, 0.05229691,
-0.052580316, -0.082029596, -0.010794592, 0.012947712,
-0.036429964, -0.085508935, -0.13127148, -0.017744139,
0.031502828, 0.036232427, -0.031581745, 0.023051167,
-0.05325106, -0.03421577, 0.028793324, -0.034633752,
-0.009881397, -0.043551125, -0.018609839, 0.0019097115,
-0.008799762, 0.056595087, 0.0022273948, 0.055752404};
recurrent_to_forget_weights_ = {
-0.057784554, -0.026057621, -0.068447545, -0.022581743,
0.14811787, 0.10826372, 0.09471067, 0.03987225,
-0.0039523416, 0.00030638507, 0.053185795, 0.10572994,
0.08414449, -0.022036452, -0.00066928595, -0.09203576,
0.032950465, -0.10985798, -0.023809856, 0.0021431844,
-0.02196096, -0.00326074, 0.00058621005, -0.074678116,
-0.06193199, 0.055729095, 0.03736828, 0.020123724,
0.061878487, -0.04729229, 0.034919553, -0.07585433,
-0.04421272, -0.044019096, 0.085488975, 0.04058006,
-0.06890133, -0.030951202, -0.024628663, -0.07672815,
0.034293607, 0.08556707, -0.05293577, -0.033561368,
-0.04899627, 0.0241671, 0.015736353, -0.095442444,
-0.029564252, 0.016493602, -0.035026584, 0.022337519,
-0.026871363, 0.004780428, 0.0077918363, -0.03601621,
0.016435321, -0.03263031, -0.09543275, -0.047392778,
0.013454138, 0.028934088, 0.01685226, -0.086110644,
-0.046250615, -0.01847454, 0.047608484, 0.07339695,
0.034546845, -0.04881143, 0.009128804, -0.08802852,
0.03761666, 0.008096139, -0.014454086, 0.014361001,
-0.023502491, -0.0011840804, -0.07607001, 0.001856849,
-0.06509276, -0.006021153, -0.08570962, -0.1451793,
0.060212336, 0.055259194, 0.06974018, 0.049454916,
-0.027794661, -0.08077226, -0.016179763, 0.1169753,
0.17213494, -0.0056326236, -0.053934924, -0.0124349,
-0.11520337, 0.05409887, 0.088759385, 0.0019655675,
0.0042065294, 0.03881498, 0.019844765, 0.041858196,
-0.05695512, 0.047233116, 0.038937137, -0.06542224,
0.014429736, -0.09719407, 0.13908425, -0.05379757,
0.012321099, 0.082840554, -0.029899208, 0.044217527,
0.059855383, 0.07711018, -0.045319796, 0.0948846,
-0.011724666, -0.0033288454, -0.033542685, -0.04764985,
-0.13873616, 0.040668588, 0.034832682, -0.015319203,
-0.018715994, 0.046002675, 0.0599172, -0.043107376,
0.0294216, -0.002314414, -0.022424703, 0.0030315618,
0.0014641669, 0.0029166266, -0.11878115, 0.013738511,
0.12375372, -0.0006038222, 0.029104086, 0.087442465,
0.052958444, 0.07558703, 0.04817258, 0.044462286,
-0.015213451, -0.08783778, -0.0561384, -0.003008196,
0.047060397, -0.002058388, 0.03429439, -0.018839769,
0.024734668, 0.024614193, -0.042046934, 0.09597743,
-0.0043254104, 0.04320769, 0.0064070094, -0.0019131786,
-0.02558259, -0.022822596, -0.023273505, -0.02464396,
-0.10991725, -0.006240552, 0.0074488563, 0.024044557,
0.04383914, -0.046476185, 0.028658995, 0.060410924,
0.050786525, 0.009452605, -0.0073054377, -0.024810238,
0.0052906186, 0.0066939713, -0.0020913032, 0.014515517,
0.015898481, 0.021362653, -0.030262267, 0.016587038,
-0.011442813, 0.041154444, -0.007631438, -0.03423484,
-0.010977775, 0.036152758, 0.0066366293, 0.11915515,
0.02318443, -0.041350313, 0.021485701, -0.10906167,
-0.028218046, -0.00954771, 0.020531068, -0.11995105,
-0.03672871, 0.024019798, 0.014255957, -0.05221243,
-0.00661567, -0.04630967, 0.033188973, 0.10107534,
-0.014027541, 0.030796422, -0.10270911, -0.035999842,
0.15443139, 0.07684145, 0.036571592, -0.035900835,
-0.0034699554, 0.06209149, 0.015920248, -0.031122351,
-0.03858649, 0.01849943, 0.13872518, 0.01503974,
0.069941424, -0.06948533, -0.0088794185, 0.061282158,
-0.047401894, 0.03100163, -0.041533746, -0.10430945,
0.044574402, -0.01425562, -0.024290353, 0.034563623,
0.05866852, 0.023947537, -0.09445152, 0.035450947,
0.02247216, -0.0042998926, 0.061146557, -0.10250651,
0.020881841, -0.06747029, 0.10062043, -0.0023941975,
0.03532124, -0.016341697, 0.09685456, -0.016764693,
0.051808182, 0.05875331, -0.04536488, 0.001626336,
-0.028892258, -0.01048663, -0.009793449, -0.017093895,
0.010987891, 0.02357273, -0.00010856845, 0.0099760275,
-0.001845119, -0.03551521, 0.0018358806, 0.05763657,
-0.01769146, 0.040995963, 0.02235177, -0.060430344,
0.11475477, -0.023854522, 0.10071741, 0.0686208,
-0.014250481, 0.034261297, 0.047418304, 0.08562733,
-0.030519066, 0.0060542435, 0.014653856, -0.038836084,
0.04096551, 0.032249358, -0.08355519, -0.026823482,
0.056386515, -0.010401743, -0.028396193, 0.08507674,
0.014410365, 0.020995233, 0.17040324, 0.11511526,
0.02459721, 0.0066619175, 0.025853224, -0.023133837,
-0.081302024, 0.017264642, -0.009585969, 0.09491168,
-0.051313367, 0.054532815, -0.014298593, 0.10657464,
0.007076659, 0.10964551, 0.0409152, 0.008275321,
-0.07283536, 0.07937492, 0.04192024, -0.1075027};
recurrent_to_output_weights_ = {
0.025825322, -0.05813119, 0.09495884, -0.045984812,
-0.01255415, -0.0026479573, -0.08196161, -0.054914974,
-0.0046604523, -0.029587349, -0.044576716, -0.07480124,
-0.082868785, 0.023254942, 0.027502948, -0.0039728214,
-0.08683098, -0.08116779, -0.014675607, -0.037924774,
-0.023314456, -0.007401714, -0.09255757, 0.029460307,
-0.08829125, -0.005139627, -0.08989442, -0.0555066,
0.13596267, -0.025062224, -0.048351806, -0.03850004,
0.07266485, -0.022414139, 0.05940088, 0.075114764,
0.09597592, -0.010211725, -0.0049794707, -0.011523867,
-0.025980417, 0.072999895, 0.11091378, -0.081685916,
0.014416728, 0.043229222, 0.034178585, -0.07530371,
0.035837382, -0.085607, -0.007721233, -0.03287832,
-0.043848954, -0.06404588, -0.06632928, -0.073643476,
0.008214239, -0.045984086, 0.039764922, 0.03474462,
0.060612556, -0.080590084, 0.049127717, 0.04151091,
-0.030063879, 0.008801774, -0.023021035, -0.019558564,
0.05158114, -0.010947698, -0.011825728, 0.0075720972,
0.0699727, -0.0039981045, 0.069350146, 0.08799282,
0.016156472, 0.035502106, 0.11695009, 0.006217345,
0.13392477, -0.037875112, 0.025745004, 0.08940699,
-0.00924166, 0.0046702605, -0.036598757, -0.08811812,
0.10522024, -0.032441203, 0.008176899, -0.04454919,
0.07058152, 0.0067963637, 0.039206743, 0.03259838,
0.03725492, -0.09515802, 0.013326398, -0.052055415,
-0.025676316, 0.03198509, -0.015951829, -0.058556724,
0.036879618, 0.043357447, 0.028362012, -0.05908629,
0.0059240665, -0.04995891, -0.019187413, 0.0276265,
-0.01628143, 0.0025863599, 0.08800015, 0.035250366,
-0.022165963, -0.07328642, -0.009415526, -0.07455109,
0.11690406, 0.0363299, 0.07411125, 0.042103454,
-0.009660886, 0.019076364, 0.018299393, -0.046004917,
0.08891175, 0.0431396, -0.026327137, -0.051502608,
0.08979574, -0.051670972, 0.04940282, -0.07491107,
-0.021240504, 0.022596184, -0.034280192, 0.060163025,
-0.058211457, -0.051837247, -0.01349775, -0.04639988,
-0.035936575, -0.011681591, 0.064818054, 0.0073146066,
-0.021745546, -0.043124277, -0.06471268, -0.07053354,
-0.029321948, -0.05330136, 0.016933719, -0.053782392,
0.13747959, -0.1361751, -0.11569455, 0.0033329215,
0.05693899, -0.053219706, 0.063698, 0.07977434,
-0.07924483, 0.06936997, 0.0034815092, -0.007305279,
-0.037325785, -0.07251102, -0.033633437, -0.08677009,
0.091591336, -0.14165086, 0.021752775, 0.019683983,
0.0011612234, -0.058154266, 0.049996935, 0.0288841,
-0.0024567875, -0.14345716, 0.010955264, -0.10234828,
0.1183656, -0.0010731248, -0.023590032, -0.072285876,
-0.0724771, -0.026382286, -0.0014920527, 0.042667855,
0.0018776858, 0.02986552, 0.009814309, 0.0733756,
0.12289186, 0.018043943, -0.0458958, 0.049412545,
0.033632483, 0.05495232, 0.036686596, -0.013781798,
-0.010036754, 0.02576849, -0.08307328, 0.010112348,
0.042521734, -0.05869831, -0.071689695, 0.03876447,
-0.13275425, -0.0352966, -0.023077697, 0.10285965,
0.084736146, 0.15568255, -0.00040734606, 0.027835453,
-0.10292561, -0.032401145, 0.10053256, -0.026142767,
-0.08271222, -0.0030240538, -0.016368777, 0.1070414,
0.042672627, 0.013456989, -0.0437609, -0.022309763,
0.11576483, 0.04108048, 0.061026827, -0.0190714,
-0.0869359, 0.037901703, 0.0610107, 0.07202949,
0.01675338, 0.086139716, -0.08795751, -0.014898893,
-0.023771819, -0.01965048, 0.007955471, -0.043740474,
0.03346837, -0.10549954, 0.090567775, 0.042013682,
-0.03176985, 0.12569028, -0.02421228, -0.029526481,
0.023851605, 0.031539805, 0.05292009, -0.02344001,
-0.07811758, -0.08834428, 0.10094801, 0.16594367,
-0.06861939, -0.021256343, -0.041093912, -0.06669611,
0.035498552, 0.021757556, -0.09302526, -0.015403468,
-0.06614931, -0.051798206, -0.013874718, 0.03630673,
0.010412845, -0.08077351, 0.046185967, 0.0035662893,
0.03541868, -0.094149634, -0.034814864, 0.003128424,
-0.020674974, -0.03944324, -0.008110165, -0.11113267,
0.08484226, 0.043586485, 0.040582247, 0.0968012,
-0.065249965, -0.028036479, 0.0050708856, 0.0017462453,
0.0326779, 0.041296225, 0.09164146, -0.047743853,
-0.015952192, -0.034451712, 0.084197424, -0.05347844,
-0.11768019, 0.085926116, -0.08251791, -0.045081906,
0.0948852, 0.068401024, 0.024856757, 0.06978981,
-0.057309967, -0.012775832, -0.0032452994, 0.01977615,
-0.041040014, -0.024264973, 0.063464895, 0.05431621,
};
cell_to_input_weights_ = {
0.040369894, 0.030746894, 0.24704495, 0.018586371, -0.037586458,
-0.15312155, -0.11812848, -0.11465643, 0.20259799, 0.11418174,
-0.10116027, -0.011334949, 0.12411352, -0.076769054, -0.052169047,
0.21198851, -0.38871562, -0.09061183, -0.09683246, -0.21929175};
cell_to_forget_weights_ = {
-0.01998659, -0.15568835, -0.24248174, -0.012770197, 0.041331276,
-0.072311886, -0.052123554, -0.0066330447, -0.043891653, 0.036225766,
-0.047248036, 0.021479502, 0.033189066, 0.11952997, -0.020432774,
0.64658105, -0.06650122, -0.03467612, 0.095340036, 0.23647355};
cell_to_output_weights_ = {
0.08286371, -0.08261836, -0.51210177, 0.002913762, 0.17764764,
-0.5495371, -0.08460716, -0.24552552, 0.030037103, 0.04123544,
-0.11940523, 0.007358328, 0.1890978, 0.4833202, -0.34441817,
0.36312827, -0.26375428, 0.1457655, -0.19724406, 0.15548733};
projection_weights_ = {
-0.009802181, 0.09401916, 0.0717386, -0.13895074,
0.09641832, 0.060420845, 0.08539281, 0.054285463,
0.061395317, 0.034448683, -0.042991187, 0.019801661,
-0.16840284, -0.015726732, -0.23041931, -0.024478018,
-0.10959692, -0.013875541, 0.18600968, -0.061274476,
0.0138165, -0.08160894, -0.07661644, 0.032372914,
0.16169067, 0.22465782, -0.03993472, -0.004017731,
0.08633481, -0.28869787, 0.08682067, 0.17240396,
0.014975425, 0.056431185, 0.031037588, 0.16702051,
0.0077946745, 0.15140012, 0.29405436, 0.120285,
-0.188994, -0.027265169, 0.043389652, -0.022061434,
0.014777949, -0.20203483, 0.094781205, 0.19100232,
0.13987629, -0.036132768, -0.06426278, -0.05108664,
0.13221376, 0.009441198, -0.16715929, 0.15859416,
-0.040437475, 0.050779544, -0.022187516, 0.012166504,
0.027685808, -0.07675938, -0.0055694645, -0.09444123,
0.0046453946, 0.050794356, 0.10770313, -0.20790008,
-0.07149004, -0.11425117, 0.008225835, -0.035802525,
0.14374903, 0.15262283, 0.048710253, 0.1847461,
-0.007487823, 0.11000021, -0.09542012, 0.22619456,
-0.029149994, 0.08527916, 0.009043713, 0.0042746216,
0.016261552, 0.022461696, 0.12689082, -0.043589946,
-0.12035478, -0.08361797, -0.050666027, -0.1248618,
-0.1275799, -0.071875185, 0.07377272, 0.09944291,
-0.18897448, -0.1593054, -0.06526116, -0.040107165,
-0.004618631, -0.067624845, -0.007576253, 0.10727444,
0.041546922, -0.20424393, 0.06907816, 0.050412357,
0.00724631, 0.039827548, 0.12449835, 0.10747581,
0.13708383, 0.09134148, -0.12617786, -0.06428341,
0.09956831, 0.1208086, -0.14676677, -0.0727722,
0.1126304, 0.010139365, 0.015571211, -0.038128063,
0.022913318, -0.042050496, 0.16842307, -0.060597885,
0.10531834, -0.06411776, -0.07451711, -0.03410368,
-0.13393489, 0.06534304, 0.003620307, 0.04490757,
0.05970546, 0.05197996, 0.02839995, 0.10434969,
-0.013699693, -0.028353551, -0.07260381, 0.047201227,
-0.024575593, -0.036445823, 0.07155557, 0.009672501,
-0.02328883, 0.009533515, -0.03606021, -0.07421458,
-0.028082801, -0.2678904, -0.13221288, 0.18419984,
-0.13012612, -0.014588381, -0.035059117, -0.04824723,
0.07830115, -0.056184657, 0.03277091, 0.025466874,
0.14494097, -0.12522776, -0.098633975, -0.10766018,
-0.08317623, 0.08594209, 0.07749552, 0.039474737,
0.1776665, -0.07409566, -0.0477268, 0.29323658,
0.10801441, 0.1154011, 0.013952499, 0.10739139,
0.10708251, -0.051456142, 0.0074137426, -0.10430189,
0.10034707, 0.045594677, 0.0635285, -0.0715442,
-0.089667566, -0.10811871, 0.00026344223, 0.08298446,
-0.009525053, 0.006585689, -0.24567553, -0.09450807,
0.09648481, 0.026996298, -0.06419476, -0.04752702,
-0.11063944, -0.23441927, -0.17608605, -0.052156363,
0.067035615, 0.19271925, -0.0032889997, -0.043264326,
0.09663576, -0.057112187, -0.10100678, 0.0628376,
0.04447668, 0.017961001, -0.10094388, -0.10190601,
0.18335468, 0.10494553, -0.052095775, -0.0026118709,
0.10539724, -0.04383912, -0.042349473, 0.08438151,
-0.1947263, 0.02251204, 0.11216432, -0.10307853,
0.17351969, -0.039091777, 0.08066188, -0.00561982,
0.12633002, 0.11335965, -0.0088127935, -0.019777594,
0.06864014, -0.059751723, 0.016233567, -0.06894641,
-0.28651384, -0.004228674, 0.019708522, -0.16305895,
-0.07468996, -0.0855457, 0.099339016, -0.07580735,
-0.13775392, 0.08434318, 0.08330512, -0.12131499,
0.031935584, 0.09180414, -0.08876437, -0.08049874,
0.008753825, 0.03498998, 0.030215185, 0.03907079,
0.089751154, 0.029194152, -0.03337423, -0.019092513,
0.04331237, 0.04299654, -0.036394123, -0.12915532,
0.09793732, 0.07512415, -0.11319543, -0.032502122,
0.15661901, 0.07671967, -0.005491124, -0.19379048,
-0.218606, 0.21448623, 0.017840758, 0.1416943,
-0.07051762, 0.19488361, 0.02664691, -0.18104725,
-0.09334311, 0.15026465, -0.15493552, -0.057762887,
-0.11604192, -0.262013, -0.01391798, 0.012185008,
0.11156489, -0.07483202, 0.06693364, -0.26151478,
0.046425626, 0.036540434, -0.16435726, 0.17338543,
-0.21401681, -0.11385144, -0.08283257, -0.069031075,
0.030635102, 0.010969227, 0.11109743, 0.010919218,
0.027526086, 0.13519906, 0.01891392, -0.046839405,
-0.040167913, 0.017953383, -0.09700955, 0.0061885654,
-0.07000971, 0.026893595, -0.038844477, 0.14543656};
projection_bias_ = {0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8,
0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6};
lstm_input_ = {
{
0.787926, 0.151646, 0.071352, 0.118426, 0.458058,
0.596268, 0.998386, 0.568695, 0.864524, 0.571277,
0.073204, 0.296072, 0.743333, 0.069199, 0.045348,
0.867394, 0.291279, 0.013714, 0.482521, 0.626339},
{
0.295743, 0.544053, 0.690064, 0.858138, 0.497181,
0.642421, 0.524260, 0.134799, 0.003639, 0.162482,
0.640394, 0.930399, 0.050782, 0.432485, 0.988078,
0.082922, 0.563329, 0.865614, 0.333232, 0.259916}
};
lstm_golden_output_ = {
{
0.0960319489, 0.229351997, 0.297207743, 0.415997744, 0.491644233,
0.578822136, 0.728351235, 0.788540304, 0.909073055, 0.975599587,
1.08478093, 1.17409372, 1.30914319, 1.4041512, 1.51714694,
1.61342025, 0.0634541437, 0.190279216, 0.317923307, 0.415168911,
0.458113253, 0.609743774, 0.731511116, 0.795806408, 0.876155913,
0.960330188, 1.12396312, 1.22149014, 1.33917773, 1.43213499,
1.54139447, 1.65451813, 0.0485293195, 0.160991609, 0.337073475,
0.428976893, 0.459505379, 0.617044866, 0.743735075, 0.790821671,
0.85271728, 0.946818829, 1.12779701, 1.23345077, 1.35309088,
1.44595909, 1.56173062, 1.67839324, 0.0445971154, 0.156434938,
0.341761589, 0.425259203, 0.449760497, 0.633765697, 0.745093822,
0.791106999, 0.84820503, 0.952787101, 1.13438797, 1.24063754,
1.34668994, 1.44879568, 1.57038593, 1.67956686},
{
0.0861309841, 0.228726774, 0.296653062, 0.40733397, 0.47120741,
0.581307411, 0.719366193, 0.788456261, 0.904226124, 0.965476751,
1.10223258, 1.19042683, 1.32106233, 1.41333091, 1.51509535,
1.62168002, 0.0652779415, 0.18218407, 0.324066937, 0.42611438,
0.47292757, 0.602282405, 0.739310443, 0.791508496, 0.870626807,
0.955534995, 1.10976851, 1.21598971, 1.34197009, 1.43256509,
1.54804492, 1.65581059, 0.0492607877, 0.169714347, 0.332315415,
0.419173867, 0.44699502, 0.630063772, 0.737177074, 0.792844594,
0.858417571, 0.956391335, 1.13453305, 1.23976779, 1.34693861,
1.4410423, 1.55988359, 1.67204297, 0.0390465111, 0.15099439,
0.3439475, 0.424439192, 0.444207728, 0.632501483, 0.742233515,
0.791400731, 0.845713973, 0.944575012, 1.14116096, 1.24791968,
1.35954499, 1.45086145, 1.56633317, 1.68943977}};
}
};
TEST_F(NoCifgPeepholeProjectionAndBiasClippingUnidirectionalLstmTest,
LstmBlackBoxTest) {
const int n_batch = 2;
const int n_input = 5;
const int n_cell = 20;
const int n_output = 16;
const int sequence_length = 4;
UnidirectionalLSTMOpModel lstm(
n_batch, n_input, n_cell, n_output, sequence_length,
true, false, true,
true,
true,
0.0, 0.0,
{
{sequence_length, n_batch, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_output, n_cell},
{n_output},
{n_batch, n_output},
{n_batch, n_cell},
});
lstm.SetInputToInputWeights(input_to_input_weights_);
lstm.SetInputToCellWeights(input_to_cell_weights_);
lstm.SetInputToForgetWeights(input_to_forget_weights_);
lstm.SetInputToOutputWeights(input_to_output_weights_);
lstm.SetInputGateBias(input_gate_bias_);
lstm.SetCellBias(cell_gate_bias_);
lstm.SetForgetGateBias(forget_gate_bias_);
lstm.SetOutputGateBias(output_gate_bias_);
lstm.SetRecurrentToInputWeights(recurrent_to_input_weights_);
lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights_);
lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights_);
lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights_);
lstm.SetCellToInputWeights(cell_to_input_weights_);
lstm.SetCellToForgetWeights(cell_to_forget_weights_);
lstm.SetCellToOutputWeights(cell_to_output_weights_);
lstm.SetProjectionWeights(projection_weights_);
lstm.SetProjectionBias(projection_bias_);
VerifyGoldens(lstm_input_, lstm_golden_output_, &lstm);
}
class LayerNormUnidirectionalLSTMOpModel : public UnidirectionalLSTMOpModel {
public:
LayerNormUnidirectionalLSTMOpModel(
int n_batch, int n_input, int n_cell, int n_output, int sequence_length,
bool time_major, bool use_cifg, bool use_peephole,
bool use_projection_weights, bool use_projection_bias, float cell_clip,
float proj_clip, const std::vector<std::vector<int>>& input_shapes,
const TensorType& weights_type = TensorType_FLOAT32)
: UnidirectionalLSTMOpModel(
n_batch, n_input, n_cell, n_output, sequence_length, time_major,
use_cifg, use_peephole, use_projection_weights, use_projection_bias,
cell_clip, proj_clip, input_shapes, TensorType_FLOAT32, true) {}
};
class BaseLayerNormUnidirectionalLstmTest : public ::testing::Test {
protected:
std::vector<float> input_to_input_weights_;
std::vector<float> input_to_cell_weights_;
std::vector<float> input_to_forget_weights_;
std::vector<float> input_to_output_weights_;
std::vector<float> input_gate_bias_;
std::vector<float> cell_gate_bias_;
std::vector<float> forget_gate_bias_;
std::vector<float> output_gate_bias_;
std::vector<float> recurrent_to_input_weights_;
std::vector<float> recurrent_to_cell_weights_;
std::vector<float> recurrent_to_forget_weights_;
std::vector<float> recurrent_to_output_weights_;
std::vector<float> cell_to_input_weights_;
std::vector<float> cell_to_forget_weights_;
std::vector<float> cell_to_output_weights_;
std::vector<float> projection_weights_;
std::vector<float> projection_bias_;
std::vector<float> input_layer_norm_coefficients_;
std::vector<float> forget_layer_norm_coefficients_;
std::vector<float> cell_layer_norm_coefficients_;
std::vector<float> output_layer_norm_coefficients_;
std::vector<std::vector<float>> lstm_input_;
std::vector<std::vector<float>> lstm_golden_output_;
void VerifyGoldens(const std::vector<std::vector<float>>& input,
const std::vector<std::vector<float>>& output,
UnidirectionalLSTMOpModel* lstm, float tolerance = 1e-5) {
const int num_batches = input.size();
EXPECT_GT(num_batches, 0);
const int num_inputs = lstm->num_inputs();
EXPECT_GT(num_inputs, 0);
const int input_sequence_size = input[0].size() / num_inputs;
EXPECT_GT(input_sequence_size, 0);
for (int i = 0; i < input_sequence_size; ++i) {
for (int b = 0; b < num_batches; ++b) {
const float* batch_start = input[b].data() + i * num_inputs;
const float* batch_end = batch_start + num_inputs;
lstm->SetInput(((i * num_batches) + b) * num_inputs, batch_start,
batch_end);
}
}
ASSERT_EQ(lstm->Invoke(), kTfLiteOk);
const int num_outputs = lstm->num_outputs();
EXPECT_GT(num_outputs, 0);
std::vector<float> expected;
for (int i = 0; i < input_sequence_size; ++i) {
for (int b = 0; b < num_batches; ++b) {
const float* golden_start_batch = output[b].data() + i * num_outputs;
const float* golden_end_batch = golden_start_batch + num_outputs;
expected.insert(expected.end(), golden_start_batch, golden_end_batch);
}
}
EXPECT_THAT(lstm->GetOutput(),
ElementsAreArray(ArrayFloatNear(expected, tolerance)));
}
};
class CifgPeepholeNoProjectionNoClippingLayerNormUnidirectionalLstmTest
: public BaseLayerNormUnidirectionalLstmTest {
void SetUp() override {
input_to_cell_weights_ = {-0.49770179, -0.27711356, -0.09624726,
0.05100781, 0.04717243, 0.48944736,
-0.38535351, -0.17212132};
input_to_forget_weights_ = {-0.55291498, -0.42866567, 0.13056988,
-0.3633365, -0.22755712, 0.28253698,
0.24407166, 0.33826375};
input_to_output_weights_ = {0.10725588, -0.02335852, -0.55932593,
-0.09426838, -0.44257352, 0.54939759,
0.01533556, 0.42751634};
cell_gate_bias_ = {0., 0., 0., 0.};
forget_gate_bias_ = {1., 1., 1., 1.};
output_gate_bias_ = {0., 0., 0., 0.};
recurrent_to_cell_weights_ = {
0.54066205, -0.32668582, -0.43562764, -0.56094903,
0.42957711, 0.01841056, -0.32764608, -0.33027974,
-0.10826075, 0.20675004, 0.19069612, -0.03026325,
-0.54532051, 0.33003211, 0.44901288, 0.21193194};
recurrent_to_forget_weights_ = {
-0.13832897, -0.0515101, -0.2359007, -0.16661474,
-0.14340827, 0.36986142, 0.23414481, 0.55899,
0.10798943, -0.41174671, 0.17751795, -0.34484994,
-0.35874045, -0.11352962, 0.27268326, 0.54058349};
recurrent_to_output_weights_ = {
0.41613156, 0.42610586, -0.16495961, -0.5663873,
0.30579174, -0.05115908, -0.33941799, 0.23364776,
0.11178309, 0.09481031, -0.26424935, 0.46261835,
0.50248802, 0.26114327, -0.43736315, 0.33149987};
cell_to_forget_weights_ = {0.47485286, -0.51955009, -0.24458408,
0.31544167};
cell_to_output_weights_ = {-0.17135078, 0.82760304, 0.85573703,
-0.77109635};
input_layer_norm_coefficients_ = {0.1, 0.2, 0.3, 0.5};
forget_layer_norm_coefficients_ = {0.2, 0.2, 0.4, 0.3};
cell_layer_norm_coefficients_ = {0.7, 0.2, 0.3, 0.8};
output_layer_norm_coefficients_ = {0.6, 0.2, 0.2, 0.5};
lstm_input_ = {{2., 3., 3., 4., 1., 1.}};
lstm_golden_output_ = {{-0.102089, 0.00653987, 0.0515139, -0.0630045,
-0.173317, 0.0109206, 0.0903292, -0.109497,
-0.23827, 0.0119514, 0.119525, -0.12748}};
}
};
TEST_F(CifgPeepholeNoProjectionNoClippingLayerNormUnidirectionalLstmTest,
LayerNormLstmBlackBoxTest) {
const int n_batch = 1;
const int n_input = 2;
const int n_cell = 4;
const int n_output = 4;
const int sequence_length = 3;
LayerNormUnidirectionalLSTMOpModel lstm(
n_batch, n_input, n_cell, n_output, sequence_length,
true, true, true,
false,
false,
0.0, 0.0,
{
{sequence_length, n_batch, n_input},
{0, 0},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{0, 0},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{0},
{n_cell},
{n_cell},
{0},
{n_cell},
{n_cell},
{n_cell},
{0, 0},
{0},
{n_batch, n_output},
{n_batch, n_cell},
{0},
{n_cell},
{n_cell},
{n_cell},
});
lstm.SetInputToCellWeights(input_to_cell_weights_);
lstm.SetInputToForgetWeights(input_to_forget_weights_);
lstm.SetInputToOutputWeights(input_to_output_weights_);
lstm.SetCellBias(cell_gate_bias_);
lstm.SetForgetGateBias(forget_gate_bias_);
lstm.SetOutputGateBias(output_gate_bias_);
lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights_);
lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights_);
lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights_);
lstm.SetCellToForgetWeights(cell_to_forget_weights_);
lstm.SetCellToOutputWeights(cell_to_output_weights_);
lstm.SetForgetLayerNormCoefficients(forget_layer_norm_coefficients_);
lstm.SetCellLayerNormCoefficients(cell_layer_norm_coefficients_);
lstm.SetOutputLayerNormCoefficients(output_layer_norm_coefficients_);
VerifyGoldens(lstm_input_, lstm_golden_output_, &lstm);
}
TEST_F(CifgPeepholeNoProjectionNoClippingUnidirectionalLstmTest,
NonLayerNormLstmBlackBoxTest) {
const int n_batch = 1;
const int n_input = 2;
const int n_cell = 4;
const int n_output = 4;
const int sequence_length = 3;
LayerNormUnidirectionalLSTMOpModel lstm(
n_batch, n_input, n_cell, n_output, sequence_length,
true, true, true,
false,
false,
0.0, 0.0,
{
{sequence_length, n_batch, n_input},
{0, 0},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{0, 0},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{0},
{n_cell},
{n_cell},
{0},
{n_cell},
{n_cell},
{n_cell},
{0, 0},
{0},
{n_batch, n_output},
{n_batch, n_cell},
{0},
{0},
{0},
{0},
});
lstm.SetInputToCellWeights(input_to_cell_weights_);
lstm.SetInputToForgetWeights(input_to_forget_weights_);
lstm.SetInputToOutputWeights(input_to_output_weights_);
lstm.SetCellBias(cell_gate_bias_);
lstm.SetForgetGateBias(forget_gate_bias_);
lstm.SetOutputGateBias(output_gate_bias_);
lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights_);
lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights_);
lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights_);
lstm.SetCellToForgetWeights(cell_to_forget_weights_);
lstm.SetCellToOutputWeights(cell_to_output_weights_);
VerifyGoldens(lstm_input_, lstm_golden_output_, &lstm);
}
class UnidirectionalSequenceLSTMIntegerOpModel : public SingleOpModel {
public:
UnidirectionalSequenceLSTMIntegerOpModel(
int n_batch, int n_input, int n_cell, int n_output, int sequence_length,
bool time_major, bool use_cifg, bool use_peephole,
bool use_projection_weights, bool use_projection_bias,
bool use_layer_norm, bool use_8x8_8_implementation,
const std::vector<std::pair<float, float>>& ranges,
const std::vector<std::pair<float, int>>& intermediates,
bool asymmetric_quantize_inputs = false)
: n_input_(n_input), n_output_(n_output) {
input_ = AddInput({TensorType_INT8,
{sequence_length, n_batch, n_input},
ranges[0].first,
ranges[0].second});
if (use_cifg) {
input_to_input_weights_ = AddNullInput();
} else {
input_to_input_weights_ = AddInput({TensorType_INT8,
{n_cell, n_input},
ranges[1].first,
ranges[1].second});
}
input_to_forget_weights_ = AddInput({TensorType_INT8,
{n_cell, n_input},
ranges[2].first,
ranges[2].second});
input_to_cell_weights_ = AddInput({TensorType_INT8,
{n_cell, n_input},
ranges[3].first,
ranges[3].second});
input_to_output_weights_ = AddInput({TensorType_INT8,
{n_cell, n_input},
ranges[4].first,
ranges[4].second});
if (use_cifg) {
recurrent_to_input_weights_ = AddNullInput();
} else {
recurrent_to_input_weights_ = AddInput({TensorType_INT8,
{n_cell, n_output},
ranges[5].first,
ranges[5].second});
}
recurrent_to_forget_weights_ = AddInput({TensorType_INT8,
{n_cell, n_output},
ranges[6].first,
ranges[6].second});
recurrent_to_cell_weights_ = AddInput({TensorType_INT8,
{n_cell, n_output},
ranges[7].first,
ranges[7].second});
recurrent_to_output_weights_ = AddInput({TensorType_INT8,
{n_cell, n_output},
ranges[8].first,
ranges[8].second});
if (use_peephole) {
if (use_cifg) {
cell_to_input_weights_ = AddNullInput();
} else {
cell_to_input_weights_ = AddInput(
{TensorType_INT16, {n_cell}, ranges[9].first, ranges[9].second});
}
cell_to_forget_weights_ = AddInput(
{TensorType_INT16, {n_cell}, ranges[10].first, ranges[10].second});
cell_to_output_weights_ = AddInput(
{TensorType_INT16, {n_cell}, ranges[11].first, ranges[11].second});
} else {
cell_to_input_weights_ = AddNullInput();
cell_to_forget_weights_ = AddNullInput();
cell_to_output_weights_ = AddNullInput();
}
if (use_cifg) {
input_gate_bias_ = AddNullInput();
} else {
input_gate_bias_ = AddInput(
{TensorType_INT32, {n_cell}, ranges[12].first, ranges[12].second});
}
forget_gate_bias_ = AddInput(
{TensorType_INT32, {n_cell}, ranges[13].first, ranges[13].second});
cell_gate_bias_ = AddInput(
{TensorType_INT32, {n_cell}, ranges[14].first, ranges[14].second});
output_gate_bias_ = AddInput(
{TensorType_INT32, {n_cell}, ranges[15].first, ranges[15].second});
if (use_projection_weights) {
projection_weights_ = AddInput({TensorType_INT8,
{n_output, n_cell},
ranges[16].first,
ranges[16].second});
} else {
projection_weights_ = AddNullInput();
}
if (use_projection_bias) {
CHECK(use_projection_weights);
projection_bias_ = AddInput(
{TensorType_INT32, {n_output}, ranges[17].first, ranges[17].second});
} else {
projection_bias_ = AddNullInput();
}
AddVariableInput({TensorType_INT16,
{n_batch, n_output},
ranges[18].first,
ranges[18].second});
AddVariableInput({TensorType_INT16,
{n_batch, n_cell},
ranges[19].first,
ranges[19].second});
if (use_layer_norm) {
if (use_cifg) {
input_layer_norm_coefficients_ = AddNullInput();
} else {
input_layer_norm_coefficients_ = AddInput(
{TensorType_INT16, {n_cell}, ranges[20].first, ranges[20].second});
}
forget_layer_norm_coefficients_ = AddInput(
{TensorType_INT16, {n_cell}, ranges[21].first, ranges[21].second});
cell_layer_norm_coefficients_ = AddInput(
{TensorType_INT16, {n_cell}, ranges[22].first, ranges[22].second});
output_layer_norm_coefficients_ = AddInput(
{TensorType_INT16, {n_cell}, ranges[23].first, ranges[23].second});
}
CHECK(!use_8x8_8_implementation);
EXPECT_EQ(intermediates.size(), 5);
for (int i = 0; i < intermediates.size(); ++i) {
AddIntermediate(TensorType_INT16, {intermediates[i].first},
{intermediates[i].second});
}
output_ = AddOutput({TensorType_INT8,
{n_batch, n_output},
ranges[24].first,
ranges[24].second});
SetBuiltinOp(BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM,
BuiltinOptions_UnidirectionalSequenceLSTMOptions,
CreateUnidirectionalSequenceLSTMOptions(
builder_, ActivationFunctionType_TANH, 0.0f,
0.0f, time_major, asymmetric_quantize_inputs)
.Union());
BuildInterpreter({}, -1,
false,
true, false);
}
void PerformAllocateAndDelegate() { AllocateAndDelegate(true); }
void SetInputToInputWeights(const std::vector<float>& f) {
QuantizeAndPopulate<int8_t>(input_to_input_weights_, f);
}
void SetInputToForgetWeights(const std::vector<float>& f) {
QuantizeAndPopulate<int8_t>(input_to_forget_weights_, f);
}
void SetInputToCellWeights(const std::vector<float>& f) {
QuantizeAndPopulate<int8_t>(input_to_cell_weights_, f);
}
void SetInputToOutputWeights(const std::vector<float>& f) {
QuantizeAndPopulate<int8_t>(input_to_output_weights_, f);
}
void SetRecurrentToInputWeights(const std::vector<float>& f) {
QuantizeAndPopulate<int8_t>(recurrent_to_input_weights_, f);
}
void SetRecurrentToForgetWeights(const std::vector<float>& f) {
QuantizeAndPopulate<int8_t>(recurrent_to_forget_weights_, f);
}
void SetRecurrentToCellWeights(const std::vector<float>& f) {
QuantizeAndPopulate<int8_t>(recurrent_to_cell_weights_, f);
}
void SetRecurrentToOutputWeights(const std::vector<float>& f) {
QuantizeAndPopulate<int8_t>(recurrent_to_output_weights_, f);
}
void SetCellToInputWeights(const std::vector<float>& f) {
QuantizeAndPopulate<int16_t>(cell_to_input_weights_, f);
}
void SetCellToForgetWeights(const std::vector<float>& f) {
QuantizeAndPopulate<int16_t>(cell_to_forget_weights_, f);
}
void SetCellToOutputWeights(const std::vector<float>& f) {
QuantizeAndPopulate<int16_t>(cell_to_output_weights_, f);
}
void SetInputLayerNormCoefficients(const std::vector<float>& f) {
QuantizeAndPopulate<int16_t>(input_layer_norm_coefficients_, f);
}
void SetForgetLayerNormCoefficients(const std::vector<float>& f) {
QuantizeAndPopulate<int16_t>(forget_layer_norm_coefficients_, f);
}
void SetCellLayerNormCoefficients(const std::vector<float>& f) {
QuantizeAndPopulate<int16_t>(cell_layer_norm_coefficients_, f);
}
void SetOutputLayerNormCoefficients(const std::vector<float>& f) {
QuantizeAndPopulate<int16_t>(output_layer_norm_coefficients_, f);
}
void SetInputGateBias(const std::vector<float>& f) {
QuantizeAndPopulate<int32_t>(input_gate_bias_, f);
}
void SetForgetGateBias(const std::vector<float>& f) {
QuantizeAndPopulate<int32_t>(forget_gate_bias_, f);
}
void SetCellBias(const std::vector<float>& f) {
QuantizeAndPopulate<int32_t>(cell_gate_bias_, f);
}
void SetOutputGateBias(const std::vector<float>& f) {
QuantizeAndPopulate<int32_t>(output_gate_bias_, f);
}
void SetProjectionWeights(const std::vector<float>& f) {
QuantizeAndPopulate<int8_t>(projection_weights_, f);
}
void SetProjectionBias(const std::vector<float>& f) {
QuantizeAndPopulate<int32_t>(projection_bias_, f);
}
void SetInput(const std::vector<float>& f) {
QuantizeAndPopulate<int8_t>(input_, f);
}
std::vector<int8_t> GetOutput() { return ExtractVector<int8_t>(output_); }
int num_inputs() { return n_input_; }
int num_outputs() { return n_output_; }
protected:
int input_;
int input_to_input_weights_;
int input_to_forget_weights_;
int input_to_cell_weights_;
int input_to_output_weights_;
int recurrent_to_input_weights_;
int recurrent_to_forget_weights_;
int recurrent_to_cell_weights_;
int recurrent_to_output_weights_;
int cell_to_input_weights_;
int cell_to_forget_weights_;
int cell_to_output_weights_;
int input_layer_norm_coefficients_;
int forget_layer_norm_coefficients_;
int cell_layer_norm_coefficients_;
int output_layer_norm_coefficients_;
int input_gate_bias_;
int forget_gate_bias_;
int cell_gate_bias_;
int output_gate_bias_;
int projection_weights_;
int projection_bias_;
int output_;
int n_input_;
int n_output_;
};
TEST(IntegerUnidirectionalSequenceLstmOpTest,
NoCifg_NoPeephole_Projection_LayerNorm) {
const int n_batch = 2;
const int n_input = 5;
const int n_cell = 4;
const int n_output = 3;
const int sequence_length = 3;
const std::vector<float> input_to_input_weights = {
0.5, 0.6, 0.7, -0.8, -0.9, 0.1, 0.2, 0.3, -0.4, 0.5,
-0.8, 0.7, -0.6, 0.5, -0.4, -0.5, -0.4, -0.3, -0.2, -0.1};
const std::vector<float> input_to_forget_weights = {
-0.6, -0.1, 0.3, 0.2, 0.9, -0.5, -0.2, -0.4, 0.3, -0.8,
-0.4, 0.3, -0.5, -0.4, -0.6, 0.3, -0.4, -0.6, -0.5, -0.5};
const std::vector<float> input_to_cell_weights = {
-0.4, -0.3, -0.2, -0.1, -0.5, 0.5, -0.2, -0.3, -0.2, -0.6,
0.6, -0.1, -0.4, -0.3, -0.7, 0.7, -0.9, -0.5, 0.8, 0.6};
const std::vector<float> input_to_output_weights = {
-0.8, -0.4, -0.2, -0.9, -0.1, -0.7, 0.3, -0.3, -0.8, -0.2,
0.6, -0.2, 0.4, -0.7, -0.3, -0.5, 0.1, 0.5, -0.6, -0.4};
const std::vector<float> input_gate_bias = {0.03, 0.15, 0.22, 0.38};
const std::vector<float> forget_gate_bias = {0.1, -0.3, -0.2, 0.1};
const std::vector<float> cell_gate_bias = {-0.05, 0.72, 0.25, 0.08};
const std::vector<float> output_gate_bias = {0.05, -0.01, 0.2, 0.1};
const std::vector<float> recurrent_to_input_weights = {
-0.2, -0.3, 0.4, 0.1, -0.5, 0.9, -0.2, -0.3, -0.7, 0.05, -0.2, -0.6};
const std::vector<float> recurrent_to_cell_weights = {
-0.3, 0.2, 0.1, -0.3, 0.8, -0.08, -0.2, 0.3, 0.8, -0.6, -0.1, 0.2};
const std::vector<float> recurrent_to_forget_weights = {
-0.5, -0.3, -0.5, -0.2, 0.6, 0.4, 0.9, 0.3, -0.1, 0.2, 0.5, 0.2};
const std::vector<float> recurrent_to_output_weights = {
0.3, -0.1, 0.1, -0.2, -0.5, -0.7, -0.2, -0.6, -0.1, -0.4, -0.7, -0.2};
const std::vector<float> input_layer_norm_coefficients = {0.1, 0.2, 0.3, 0.5};
const std::vector<float> forget_layer_norm_coefficients = {0.2, 0.2, 0.4,
0.3};
const std::vector<float> cell_layer_norm_coefficients = {0.7, 0.2, 0.3, 0.8};
const std::vector<float> output_layer_norm_coefficients = {0.6, 0.2, 0.2,
0.5};
const std::vector<float> projection_weights = {
-0.1, 0.2, 0.01, -0.2, 0.1, 0.5, 0.3, 0.08, 0.07, 0.2, -0.4, 0.2};
const std::vector<std::pair<float, float>> ranges = {
{-1.0, 127.0 / 128},
{-1.0, 1.0},
{-1.0, 1.0},
{-1.0, 1.0},
{-1.0, 1.0},
{-1.0, 1.0},
{-1.0, 1.0},
{-1.0, 1.0},
{-1.0, 1.0},
{-1, 1},
{-1, 1},
{-1, 1},
{-100, 100},
{-100, 100},
{-100, 100},
{-100, 100},
{-0.5, 0.5},
{-1, 1},
{-1.0, 32767.0 / 32768},
{-1, 1},
{-1.00001, 1.0},
{-1.00001, 1.0},
{-1.00001, 1.0},
{-1.00001, 1.0},
{-1.0, 32767.0 / 32768},
};
std::vector<std::pair<float, int>> intermediates = {
{0.007059, 0}, {0.007812, 0}, {0.007059, 0}, {0.007812, 0}, {0.007, 0}};
UnidirectionalSequenceLSTMIntegerOpModel lstm(
n_batch, n_input, n_cell, n_output, sequence_length, true,
false, false,
true,
false,
true,
false, ranges, intermediates);
lstm.PerformAllocateAndDelegate();
lstm.SetInputToInputWeights(input_to_input_weights);
lstm.SetInputToCellWeights(input_to_cell_weights);
lstm.SetInputToForgetWeights(input_to_forget_weights);
lstm.SetInputToOutputWeights(input_to_output_weights);
lstm.SetInputGateBias(input_gate_bias);
lstm.SetCellBias(cell_gate_bias);
lstm.SetForgetGateBias(forget_gate_bias);
lstm.SetOutputGateBias(output_gate_bias);
lstm.SetRecurrentToInputWeights(recurrent_to_input_weights);
lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights);
lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights);
lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights);
lstm.SetProjectionWeights(projection_weights);
lstm.SetInputLayerNormCoefficients(input_layer_norm_coefficients);
lstm.SetForgetLayerNormCoefficients(forget_layer_norm_coefficients);
lstm.SetCellLayerNormCoefficients(cell_layer_norm_coefficients);
lstm.SetOutputLayerNormCoefficients(output_layer_norm_coefficients);
const std::vector<float> lstm_input = {
0.7, 0.8, 0.1, 0.2, 0.3,
0.8, 0.1, 0.2, 0.4, 0.5,
0.2, 0.7, 0.7, 0.1, 0.7,
0.3, 0.2, 0.9, 0.8, 0.1,
0.7, 0.8, 0.1, 0.2, 0.3,
0.3, 0.2, 0.9, 0.8, 0.1,
};
const std::vector<int8_t> expected_output = {
127, 127, -108, -67, 127, 127, -128, 127, 127,
-128, 127, 127, 127, 127, 127, -128, 127, 127,
};
lstm.SetInput(lstm_input);
ASSERT_EQ(lstm.Invoke(), kTfLiteOk);
EXPECT_THAT(lstm.GetOutput(), ElementsAreArray(expected_output));
}
TEST(IntegerUnidirectionalSequenceLstmOpTest,
NoCifg_Peephole_Projection_LayerNorm) {
if (SingleOpModel::GetForceUseNnapi()) {
return;
}
const int n_batch = 2;
const int n_input = 5;
const int n_cell = 4;
const int n_output = 3;
const int sequence_length = 3;
const std::vector<float> input_to_input_weights = {
0.5, 0.6, 0.7, -0.8, -0.9, 0.1, 0.2, 0.3, -0.4, 0.5,
-0.8, 0.7, -0.6, 0.5, -0.4, -0.5, -0.4, -0.3, -0.2, -0.1};
const std::vector<float> input_to_forget_weights = {
-0.6, -0.1, 0.3, 0.2, 0.9, -0.5, -0.2, -0.4, 0.3, -0.8,
-0.4, 0.3, -0.5, -0.4, -0.6, 0.3, -0.4, -0.6, -0.5, -0.5};
const std::vector<float> input_to_cell_weights = {
-0.4, -0.3, -0.2, -0.1, -0.5, 0.5, -0.2, -0.3, -0.2, -0.6,
0.6, -0.1, -0.4, -0.3, -0.7, 0.7, -0.9, -0.5, 0.8, 0.6};
const std::vector<float> input_to_output_weights = {
-0.8, -0.4, -0.2, -0.9, -0.1, -0.7, 0.3, -0.3, -0.8, -0.2,
0.6, -0.2, 0.4, -0.7, -0.3, -0.5, 0.1, 0.5, -0.6, -0.4};
const std::vector<float> input_gate_bias = {0.03, 0.15, 0.22, 0.38};
const std::vector<float> forget_gate_bias = {0.1, -0.3, -0.2, 0.1};
const std::vector<float> cell_gate_bias = {-0.05, 0.72, 0.25, 0.08};
const std::vector<float> output_gate_bias = {0.05, -0.01, 0.2, 0.1};
const std::vector<float> recurrent_to_input_weights = {
-0.2, -0.3, 0.4, 0.1, -0.5, 0.9, -0.2, -0.3, -0.7, 0.05, -0.2, -0.6};
const std::vector<float> recurrent_to_cell_weights = {
-0.3, 0.2, 0.1, -0.3, 0.8, -0.08, -0.2, 0.3, 0.8, -0.6, -0.1, 0.2};
const std::vector<float> recurrent_to_forget_weights = {
-0.5, -0.3, -0.5, -0.2, 0.6, 0.4, 0.9, 0.3, -0.1, 0.2, 0.5, 0.2};
const std::vector<float> recurrent_to_output_weights = {
0.3, -0.1, 0.1, -0.2, -0.5, -0.7, -0.2, -0.6, -0.1, -0.4, -0.7, -0.2};
const std::vector<float> cell_to_input_weights = {0.3, -0.1, 0.1, -0.2};
const std::vector<float> cell_to_forget_weights = {0.2, -0.1, 0.1, -0.2};
const std::vector<float> cell_to_output_weights = {0.3, -0.1, 0.1, -0.3};
const std::vector<float> input_layer_norm_coefficients = {0.1, 0.2, 0.3, 0.5};
const std::vector<float> forget_layer_norm_coefficients = {0.2, 0.2, 0.4,
0.3};
const std::vector<float> cell_layer_norm_coefficients = {0.7, 0.2, 0.3, 0.8};
const std::vector<float> output_layer_norm_coefficients = {0.6, 0.2, 0.2,
0.5};
const std::vector<float> projection_weights = {
-0.1, 0.2, 0.01, -0.2, 0.1, 0.5, 0.3, 0.08, 0.07, 0.2, -0.4, 0.2};
const std::vector<std::pair<float, float>> ranges = {
{-1.0, 127.0 / 128},
{-1.0, 1.0},
{-1.0, 1.0},
{-1.0, 1.0},
{-1.0, 1.0},
{-1.0, 1.0},
{-0.9, 0.9},
{-1.0, 1.0},
{-1.0, 1.0},
{-0.3, 0.3},
{-0.3, 0.3},
{-0.3, 0.3},
{-100, 100},
{-100, 80},
{-100, 100},
{-100, 100},
{-0.5, 0.5},
{-1, 1},
{-1.0, 32767.0 / 32768},
{-1, 1},
{-0.5, 0.5},
{-0.5, 0.5},
{-1.0, 1.0},
{-1.0, 1.0},
{-1.0, 32767.0 / 32768},
};
std::vector<std::pair<float, int>> intermediates = {
{0.007059, 0}, {0.007812, 0}, {0.007059, 0}, {0.007812, 0}, {0.007, 0}};
UnidirectionalSequenceLSTMIntegerOpModel lstm(
n_batch, n_input, n_cell, n_output, sequence_length, true,
false, true,
true,
false,
true,
false, ranges, intermediates);
lstm.PerformAllocateAndDelegate();
lstm.SetInputToInputWeights(input_to_input_weights);
lstm.SetInputToCellWeights(input_to_cell_weights);
lstm.SetInputToForgetWeights(input_to_forget_weights);
lstm.SetInputToOutputWeights(input_to_output_weights);
lstm.SetInputGateBias(input_gate_bias);
lstm.SetCellBias(cell_gate_bias);
lstm.SetForgetGateBias(forget_gate_bias);
lstm.SetOutputGateBias(output_gate_bias);
lstm.SetRecurrentToInputWeights(recurrent_to_input_weights);
lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights);
lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights);
lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights);
lstm.SetCellToInputWeights(cell_to_input_weights);
lstm.SetCellToForgetWeights(cell_to_forget_weights);
lstm.SetCellToOutputWeights(cell_to_output_weights);
lstm.SetProjectionWeights(projection_weights);
lstm.SetInputLayerNormCoefficients(input_layer_norm_coefficients);
lstm.SetForgetLayerNormCoefficients(forget_layer_norm_coefficients);
lstm.SetCellLayerNormCoefficients(cell_layer_norm_coefficients);
lstm.SetOutputLayerNormCoefficients(output_layer_norm_coefficients);
const std::vector<float> lstm_input = {
0.7, 0.8, 0.1, 0.2, 0.3,
0.8, 0.1, 0.2, 0.4, 0.5,
0.2, 0.7, 0.7, 0.1, 0.7,
0.3, 0.2, 0.9, 0.8, 0.1,
0.7, 0.8, 0.1, 0.2, 0.3,
0.3, 0.2, 0.9, 0.8, 0.1,
};
const std::vector<int8_t> expected_output = {
127, 127, -16, -21, 127, 127, 23, 127, 127,
-128, 127, 127, 127, 127, 127, -128, 127, 127,
};
lstm.SetInput(lstm_input);
ASSERT_EQ(lstm.Invoke(), kTfLiteOk);
EXPECT_THAT(lstm.GetOutput(), ElementsAreArray(expected_output));
}
class IndyLSTMOpTest
: public ::testing::TestWithParam<std::tuple<bool, bool, bool>> {};
INSTANTIATE_TEST_SUITE_P(
PeepHoleAndCifg, IndyLSTMOpTest,
testing::Combine(testing::Bool(),
testing::Bool(),
testing::Bool()));
TEST_P(IndyLSTMOpTest, HybridCheckThatDiagAndNonDiagRecurrentWeightsAreEqual) {
const int n_batch = 1;
const int n_input = 2;
const int n_cell = 4;
const int n_output = 4;
const int sequence_length = 3;
auto params = GetParam();
const bool use_cifg = std::get<0>(params);
const bool use_peephole = std::get<1>(params);
const bool asymmetric_quantize_inputs = std::get<2>(params);
auto SetLstmWeights = [&](HybridUnidirectionalLSTMOpModel& model) -> void {
if (!use_cifg) {
model.SetInputToInputWeights({-0.45018822, -0.02338299, -0.0870589,
-0.34550029, 0.04266912, -0.15680569,
-0.34856534, 0.43890524});
}
model.SetInputToCellWeights({-0.50013041, 0.1370284, 0.11810488, 0.2013163,
-0.20583314, 0.44344562, 0.22077113,
-0.29909778});
model.SetInputToForgetWeights({0.09701663, 0.20334584, -0.50592935,
-0.31343272, -0.40032279, 0.44781327,
0.01387155, -0.35593212});
model.SetInputToOutputWeights({-0.25065863, -0.28290087, 0.04613829,
0.40525138, 0.44272184, 0.03897077,
-0.1556896, 0.19487578});
if (!use_cifg) {
model.SetInputGateBias({0., 0., 0., 0.});
}
model.SetCellBias({0., 0., 0., 0.});
model.SetForgetGateBias({1., 1., 1., 1.});
model.SetOutputGateBias({0., 0., 0., 0.});
if (use_peephole) {
if (!use_cifg) {
model.SetCellToInputWeights(
{0.040369894, 0.030746894, 0.24704495, 0.018586371,
-0.037586458, -0.15312155, -0.11812848, -0.11465643,
0.20259799, 0.11418174, -0.10116027, -0.011334949,
0.12411352, -0.076769054, -0.052169047, 0.21198851,
-0.38871562, -0.09061183, -0.09683246, -0.21929175});
}
model.SetCellToForgetWeights(
{0.47485286, -0.51955009, -0.24458408, 0.31544167});
model.SetCellToOutputWeights(
{-0.17135078, 0.82760304, 0.85573703, -0.77109635});
}
};
std::vector<int> input_weights_shape{n_cell, n_input};
if (use_cifg) {
input_weights_shape = std::vector<int>{0, 0};
}
std::vector<int> recurrent_to_input_weights_shape{n_cell, n_output};
if (use_cifg) {
input_weights_shape = std::vector<int>{0, 0};
}
std::vector<std::vector<int>> input_shapes = {
{sequence_length, n_batch, n_input},
input_weights_shape,
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
recurrent_to_input_weights_shape,
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{(use_peephole & !use_cifg) ? n_cell : 0},
{use_peephole ? n_cell : 0},
{use_peephole ? n_cell : 0},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{0, 0},
{0},
{n_batch, n_output},
{n_batch, n_cell},
};
HybridUnidirectionalLSTMOpModel lstm(
n_batch, n_input, n_cell, n_output, sequence_length, true,
use_cifg,
use_peephole,
false,
false,
0.0,
0.0, input_shapes, TensorType_UINT8,
asymmetric_quantize_inputs, false);
if (!use_cifg) {
lstm.SetRecurrentToInputWeights({-0.0063535, 0.0, 0.0, 0.0,
0.0, 0.08183324, 0.0, 0.0,
0.0, 0.0, 0.48091322, 0.0,
0.0, 0.0, 0.0, 0.10629296});
}
lstm.SetRecurrentToCellWeights({-0.3407414, 0.0, 0.0, 0.0,
0.0, -0.00123841, 0.0, 0.0,
0.0, 0.0, -0.501764, 0.0,
0.0, 0.0, 0.0, -0.16368064});
lstm.SetRecurrentToForgetWeights({-0.48684245, 0.0, 0.0, 0.0,
0.0, 0.20864892, 0.0, 0.0,
0.0, 0.0, 0.36447752, 0.0,
0.0, 0.0, 0.0, -0.01140004});
lstm.SetRecurrentToOutputWeights({0.43385774, 0.0, 0.0, 0.0,
0.0, -0.39835793, 0.0, 0.0,
0.0, 0.0, 0.20047462, 0.0,
0.0, 0.0, 0.0, 0.39922136});
input_shapes[5] = {n_cell};
input_shapes[6] = {n_cell};
input_shapes[7] = {n_cell};
input_shapes[8] = {n_cell};
HybridUnidirectionalLSTMOpModel indy_lstm(
n_batch, n_input, n_cell, n_output, sequence_length, true,
use_cifg,
use_peephole,
false,
false,
0.0,
0.0, input_shapes, TensorType_UINT8,
asymmetric_quantize_inputs, true);
SetLstmWeights(lstm);
SetLstmWeights(indy_lstm);
if (!use_cifg) {
indy_lstm.SetRecurrentToInputWeights(
{-0.0063535, 0.08183324, 0.48091322, 0.10629296});
}
indy_lstm.SetRecurrentToCellWeights(
{-0.3407414, -0.00123841, -0.501764, -0.16368064});
indy_lstm.SetRecurrentToForgetWeights(
{-0.48684245, 0.20864892, 0.36447752, -0.01140004});
indy_lstm.SetRecurrentToOutputWeights(
{0.43385774, -0.39835793, 0.20047462, 0.39922136});
static float lstm_input[] = {2., 3., 3., 4., 1., 1.};
float* batch0_start = lstm_input;
float* batch0_end = batch0_start + lstm.num_inputs() * lstm.sequence_length();
lstm.SetInput(0, batch0_start, batch0_end);
indy_lstm.SetInput(0, batch0_start, batch0_end);
ASSERT_EQ(lstm.Invoke(), kTfLiteOk);
ASSERT_EQ(indy_lstm.Invoke(), kTfLiteOk);
EXPECT_THAT(indy_lstm.GetOutput(),
ElementsAreArray(ArrayFloatNear(lstm.GetOutput(), 1e-3)));
}
TEST_P(IndyLSTMOpTest, CheckThatDiagAndNonDiagRecurrentWeightsAreEqual) {
const int n_batch = 1;
const int n_input = 2;
const int n_cell = 4;
const int n_output = 4;
const int sequence_length = 3;
auto params = GetParam();
const bool use_cifg = std::get<0>(params);
const bool use_peephole = std::get<1>(params);
auto SetLstmWeights = [&](UnidirectionalLSTMOpModel& model) -> void {
if (!use_cifg) {
model.SetInputToInputWeights({-0.45018822, -0.02338299, -0.0870589,
-0.34550029, 0.04266912, -0.15680569,
-0.34856534, 0.43890524});
}
model.SetInputToCellWeights({-0.50013041, 0.1370284, 0.11810488, 0.2013163,
-0.20583314, 0.44344562, 0.22077113,
-0.29909778});
model.SetInputToForgetWeights({0.09701663, 0.20334584, -0.50592935,
-0.31343272, -0.40032279, 0.44781327,
0.01387155, -0.35593212});
model.SetInputToOutputWeights({-0.25065863, -0.28290087, 0.04613829,
0.40525138, 0.44272184, 0.03897077,
-0.1556896, 0.19487578});
if (!use_cifg) {
model.SetInputGateBias({0., 0., 0., 0.});
}
model.SetCellBias({0., 0., 0., 0.});
model.SetForgetGateBias({1., 1., 1., 1.});
model.SetOutputGateBias({0., 0., 0., 0.});
if (use_peephole) {
if (!use_cifg) {
model.SetCellToInputWeights(
{0.040369894, 0.030746894, 0.24704495, 0.018586371,
-0.037586458, -0.15312155, -0.11812848, -0.11465643,
0.20259799, 0.11418174, -0.10116027, -0.011334949,
0.12411352, -0.076769054, -0.052169047, 0.21198851,
-0.38871562, -0.09061183, -0.09683246, -0.21929175});
}
model.SetCellToForgetWeights(
{0.47485286, -0.51955009, -0.24458408, 0.31544167});
model.SetCellToOutputWeights(
{-0.17135078, 0.82760304, 0.85573703, -0.77109635});
}
};
std::vector<int> input_weights_shape{n_cell, n_input};
if (use_cifg) {
input_weights_shape = std::vector<int>{0, 0};
}
std::vector<int> recurrent_to_input_weights_shape{n_cell, n_output};
if (use_cifg) {
input_weights_shape = std::vector<int>{0, 0};
}
std::vector<std::vector<int>> input_shapes = {
{sequence_length, n_batch, n_input},
input_weights_shape,
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
recurrent_to_input_weights_shape,
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{(use_peephole & !use_cifg) ? n_cell : 0},
{use_peephole ? n_cell : 0},
{use_peephole ? n_cell : 0},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{0, 0},
{0},
{n_batch, n_output},
{n_batch, n_cell},
};
UnidirectionalLSTMOpModel lstm(n_batch, n_input, n_cell, n_output,
sequence_length, true,
use_cifg,
use_peephole,
false,
false,
0.0,
0.0, input_shapes);
SetLstmWeights(lstm);
if (!use_cifg) {
lstm.SetRecurrentToInputWeights({-0.0063535, 0.0, 0.0, 0.0,
0.0, 0.08183324, 0.0, 0.0,
0.0, 0.0, 0.48091322, 0.0,
0.0, 0.0, 0.0, 0.10629296});
}
lstm.SetRecurrentToCellWeights({-0.3407414, 0.0, 0.0, 0.0,
0.0, -0.00123841, 0.0, 0.0,
0.0, 0.0, -0.501764, 0.0,
0.0, 0.0, 0.0, -0.16368064});
lstm.SetRecurrentToForgetWeights({-0.48684245, 0.0, 0.0, 0.0,
0.0, 0.20864892, 0.0, 0.0,
0.0, 0.0, 0.36447752, 0.0,
0.0, 0.0, 0.0, -0.01140004});
lstm.SetRecurrentToOutputWeights({0.43385774, 0.0, 0.0, 0.0,
0.0, -0.39835793, 0.0, 0.0,
0.0, 0.0, 0.20047462, 0.0,
0.0, 0.0, 0.0, 0.39922136});
input_shapes[5] = {n_cell};
input_shapes[6] = {n_cell};
input_shapes[7] = {n_cell};
input_shapes[8] = {n_cell};
UnidirectionalLSTMOpModel indy_lstm(
n_batch, n_input, n_cell, n_output, sequence_length, true,
use_cifg,
use_peephole,
false,
false,
0.0,
0.0, input_shapes, TensorType_FLOAT32,
false, false,
true);
SetLstmWeights(lstm);
SetLstmWeights(indy_lstm);
if (!use_cifg) {
indy_lstm.SetRecurrentToInputWeights(
{-0.0063535, 0.08183324, 0.48091322, 0.10629296});
}
indy_lstm.SetRecurrentToCellWeights(
{-0.3407414, -0.00123841, -0.501764, -0.16368064});
indy_lstm.SetRecurrentToForgetWeights(
{-0.48684245, 0.20864892, 0.36447752, -0.01140004});
indy_lstm.SetRecurrentToOutputWeights(
{0.43385774, -0.39835793, 0.20047462, 0.39922136});
static float lstm_input[] = {2., 3., 3., 4., 1., 1.};
float* batch0_start = lstm_input;
float* batch0_end = batch0_start + lstm.num_inputs() * lstm.sequence_length();
lstm.SetInput(0, batch0_start, batch0_end);
indy_lstm.SetInput(0, batch0_start, batch0_end);
ASSERT_EQ(lstm.Invoke(), kTfLiteOk);
ASSERT_EQ(indy_lstm.Invoke(), kTfLiteOk);
EXPECT_THAT(indy_lstm.GetOutput(),
ElementsAreArray(ArrayFloatNear(lstm.GetOutput(), 1e-6)));
}
#define QUANTIZE_PARAMETER_TEST(test) \
INSTANTIATE_TEST_SUITE_P(test, test, ::testing::ValuesIn({false, true}));
QUANTIZE_PARAMETER_TEST(
CifgPeepholeNoProjectionNoClippingUnidirectionalLstmTest);
QUANTIZE_PARAMETER_TEST(
NoCifgNoPeepholeNoProjectionNoClippingUnidirectionalLstmTest);
QUANTIZE_PARAMETER_TEST(NoCifgPeepholeProjectionClippingUnidirectionalLstmTest);
#undef QUANTIZE_PARAMETER_TEST
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/unidirectional_sequence_lstm.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/unidirectional_sequence_lstm_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d3449137-bd0a-42dc-b8f7-95502deb4561 | cpp | google/quiche | uber_quic_stream_id_manager | quiche/quic/core/uber_quic_stream_id_manager.cc | quiche/quic/core/uber_quic_stream_id_manager_test.cc | #include "quiche/quic/core/uber_quic_stream_id_manager.h"
#include <string>
#include "quiche/quic/core/quic_session.h"
#include "quiche/quic/core/quic_utils.h"
namespace quic {
UberQuicStreamIdManager::UberQuicStreamIdManager(
Perspective perspective, ParsedQuicVersion version,
QuicStreamIdManager::DelegateInterface* delegate,
QuicStreamCount max_open_outgoing_bidirectional_streams,
QuicStreamCount max_open_outgoing_unidirectional_streams,
QuicStreamCount max_open_incoming_bidirectional_streams,
QuicStreamCount max_open_incoming_unidirectional_streams)
: version_(version),
bidirectional_stream_id_manager_(delegate,
false, perspective,
version,
max_open_outgoing_bidirectional_streams,
max_open_incoming_bidirectional_streams),
unidirectional_stream_id_manager_(
delegate,
true, perspective, version,
max_open_outgoing_unidirectional_streams,
max_open_incoming_unidirectional_streams) {}
bool UberQuicStreamIdManager::MaybeAllowNewOutgoingBidirectionalStreams(
QuicStreamCount max_open_streams) {
return bidirectional_stream_id_manager_.MaybeAllowNewOutgoingStreams(
max_open_streams);
}
bool UberQuicStreamIdManager::MaybeAllowNewOutgoingUnidirectionalStreams(
QuicStreamCount max_open_streams) {
return unidirectional_stream_id_manager_.MaybeAllowNewOutgoingStreams(
max_open_streams);
}
void UberQuicStreamIdManager::SetMaxOpenIncomingBidirectionalStreams(
QuicStreamCount max_open_streams) {
bidirectional_stream_id_manager_.SetMaxOpenIncomingStreams(max_open_streams);
}
void UberQuicStreamIdManager::SetMaxOpenIncomingUnidirectionalStreams(
QuicStreamCount max_open_streams) {
unidirectional_stream_id_manager_.SetMaxOpenIncomingStreams(max_open_streams);
}
bool UberQuicStreamIdManager::CanOpenNextOutgoingBidirectionalStream() const {
return bidirectional_stream_id_manager_.CanOpenNextOutgoingStream();
}
bool UberQuicStreamIdManager::CanOpenNextOutgoingUnidirectionalStream() const {
return unidirectional_stream_id_manager_.CanOpenNextOutgoingStream();
}
QuicStreamId UberQuicStreamIdManager::GetNextOutgoingBidirectionalStreamId() {
return bidirectional_stream_id_manager_.GetNextOutgoingStreamId();
}
QuicStreamId UberQuicStreamIdManager::GetNextOutgoingUnidirectionalStreamId() {
return unidirectional_stream_id_manager_.GetNextOutgoingStreamId();
}
bool UberQuicStreamIdManager::MaybeIncreaseLargestPeerStreamId(
QuicStreamId id, std::string* error_details) {
if (QuicUtils::IsBidirectionalStreamId(id, version_)) {
return bidirectional_stream_id_manager_.MaybeIncreaseLargestPeerStreamId(
id, error_details);
}
return unidirectional_stream_id_manager_.MaybeIncreaseLargestPeerStreamId(
id, error_details);
}
void UberQuicStreamIdManager::OnStreamClosed(QuicStreamId id) {
if (QuicUtils::IsBidirectionalStreamId(id, version_)) {
bidirectional_stream_id_manager_.OnStreamClosed(id);
return;
}
unidirectional_stream_id_manager_.OnStreamClosed(id);
}
bool UberQuicStreamIdManager::OnStreamsBlockedFrame(
const QuicStreamsBlockedFrame& frame, std::string* error_details) {
if (frame.unidirectional) {
return unidirectional_stream_id_manager_.OnStreamsBlockedFrame(
frame, error_details);
}
return bidirectional_stream_id_manager_.OnStreamsBlockedFrame(frame,
error_details);
}
bool UberQuicStreamIdManager::IsAvailableStream(QuicStreamId id) const {
if (QuicUtils::IsBidirectionalStreamId(id, version_)) {
return bidirectional_stream_id_manager_.IsAvailableStream(id);
}
return unidirectional_stream_id_manager_.IsAvailableStream(id);
}
void UberQuicStreamIdManager::StopIncreasingIncomingMaxStreams() {
unidirectional_stream_id_manager_.StopIncreasingIncomingMaxStreams();
bidirectional_stream_id_manager_.StopIncreasingIncomingMaxStreams();
}
void UberQuicStreamIdManager::MaybeSendMaxStreamsFrame() {
unidirectional_stream_id_manager_.MaybeSendMaxStreamsFrame();
bidirectional_stream_id_manager_.MaybeSendMaxStreamsFrame();
}
QuicStreamCount
UberQuicStreamIdManager::GetMaxAllowdIncomingBidirectionalStreams() const {
return bidirectional_stream_id_manager_.incoming_initial_max_open_streams();
}
QuicStreamCount
UberQuicStreamIdManager::GetMaxAllowdIncomingUnidirectionalStreams() const {
return unidirectional_stream_id_manager_.incoming_initial_max_open_streams();
}
QuicStreamId UberQuicStreamIdManager::GetLargestPeerCreatedStreamId(
bool unidirectional) const {
if (unidirectional) {
return unidirectional_stream_id_manager_.largest_peer_created_stream_id();
}
return bidirectional_stream_id_manager_.largest_peer_created_stream_id();
}
QuicStreamId UberQuicStreamIdManager::next_outgoing_bidirectional_stream_id()
const {
return bidirectional_stream_id_manager_.next_outgoing_stream_id();
}
QuicStreamId UberQuicStreamIdManager::next_outgoing_unidirectional_stream_id()
const {
return unidirectional_stream_id_manager_.next_outgoing_stream_id();
}
QuicStreamCount UberQuicStreamIdManager::max_outgoing_bidirectional_streams()
const {
return bidirectional_stream_id_manager_.outgoing_max_streams();
}
QuicStreamCount UberQuicStreamIdManager::max_outgoing_unidirectional_streams()
const {
return unidirectional_stream_id_manager_.outgoing_max_streams();
}
QuicStreamCount UberQuicStreamIdManager::max_incoming_bidirectional_streams()
const {
return bidirectional_stream_id_manager_.incoming_actual_max_streams();
}
QuicStreamCount UberQuicStreamIdManager::max_incoming_unidirectional_streams()
const {
return unidirectional_stream_id_manager_.incoming_actual_max_streams();
}
QuicStreamCount
UberQuicStreamIdManager::advertised_max_incoming_bidirectional_streams() const {
return bidirectional_stream_id_manager_.incoming_advertised_max_streams();
}
QuicStreamCount
UberQuicStreamIdManager::advertised_max_incoming_unidirectional_streams()
const {
return unidirectional_stream_id_manager_.incoming_advertised_max_streams();
}
QuicStreamCount UberQuicStreamIdManager::outgoing_bidirectional_stream_count()
const {
return bidirectional_stream_id_manager_.outgoing_stream_count();
}
QuicStreamCount UberQuicStreamIdManager::outgoing_unidirectional_stream_count()
const {
return unidirectional_stream_id_manager_.outgoing_stream_count();
}
} | #include "quiche/quic/core/uber_quic_stream_id_manager.h"
#include <string>
#include <vector>
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/core/quic_versions.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/quic_stream_id_manager_peer.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
using testing::_;
using testing::StrictMock;
namespace quic {
namespace test {
namespace {
struct TestParams {
explicit TestParams(ParsedQuicVersion version, Perspective perspective)
: version(version), perspective(perspective) {}
ParsedQuicVersion version;
Perspective perspective;
};
std::string PrintToString(const TestParams& p) {
return absl::StrCat(
ParsedQuicVersionToString(p.version), "_",
(p.perspective == Perspective::IS_CLIENT ? "client" : "server"));
}
std::vector<TestParams> GetTestParams() {
std::vector<TestParams> params;
for (const ParsedQuicVersion& version : AllSupportedVersions()) {
if (!version.HasIetfQuicFrames()) {
continue;
}
params.push_back(TestParams(version, Perspective::IS_CLIENT));
params.push_back(TestParams(version, Perspective::IS_SERVER));
}
return params;
}
class MockDelegate : public QuicStreamIdManager::DelegateInterface {
public:
MOCK_METHOD(bool, CanSendMaxStreams, (), (override));
MOCK_METHOD(void, SendMaxStreams,
(QuicStreamCount stream_count, bool unidirectional), (override));
};
class UberQuicStreamIdManagerTest : public QuicTestWithParam<TestParams> {
protected:
UberQuicStreamIdManagerTest()
: manager_(perspective(), version(), &delegate_, 0, 0,
kDefaultMaxStreamsPerConnection,
kDefaultMaxStreamsPerConnection) {}
QuicStreamId GetNthClientInitiatedBidirectionalId(int n) {
return QuicUtils::GetFirstBidirectionalStreamId(transport_version(),
Perspective::IS_CLIENT) +
QuicUtils::StreamIdDelta(transport_version()) * n;
}
QuicStreamId GetNthClientInitiatedUnidirectionalId(int n) {
return QuicUtils::GetFirstUnidirectionalStreamId(transport_version(),
Perspective::IS_CLIENT) +
QuicUtils::StreamIdDelta(transport_version()) * n;
}
QuicStreamId GetNthServerInitiatedBidirectionalId(int n) {
return QuicUtils::GetFirstBidirectionalStreamId(transport_version(),
Perspective::IS_SERVER) +
QuicUtils::StreamIdDelta(transport_version()) * n;
}
QuicStreamId GetNthServerInitiatedUnidirectionalId(int n) {
return QuicUtils::GetFirstUnidirectionalStreamId(transport_version(),
Perspective::IS_SERVER) +
QuicUtils::StreamIdDelta(transport_version()) * n;
}
QuicStreamId GetNthPeerInitiatedBidirectionalStreamId(int n) {
return ((perspective() == Perspective::IS_SERVER)
? GetNthClientInitiatedBidirectionalId(n)
: GetNthServerInitiatedBidirectionalId(n));
}
QuicStreamId GetNthPeerInitiatedUnidirectionalStreamId(int n) {
return ((perspective() == Perspective::IS_SERVER)
? GetNthClientInitiatedUnidirectionalId(n)
: GetNthServerInitiatedUnidirectionalId(n));
}
QuicStreamId GetNthSelfInitiatedBidirectionalStreamId(int n) {
return ((perspective() == Perspective::IS_CLIENT)
? GetNthClientInitiatedBidirectionalId(n)
: GetNthServerInitiatedBidirectionalId(n));
}
QuicStreamId GetNthSelfInitiatedUnidirectionalStreamId(int n) {
return ((perspective() == Perspective::IS_CLIENT)
? GetNthClientInitiatedUnidirectionalId(n)
: GetNthServerInitiatedUnidirectionalId(n));
}
QuicStreamId StreamCountToId(QuicStreamCount stream_count,
Perspective perspective, bool bidirectional) {
return ((bidirectional) ? QuicUtils::GetFirstBidirectionalStreamId(
transport_version(), perspective)
: QuicUtils::GetFirstUnidirectionalStreamId(
transport_version(), perspective)) +
((stream_count - 1) * QuicUtils::StreamIdDelta(transport_version()));
}
ParsedQuicVersion version() { return GetParam().version; }
QuicTransportVersion transport_version() {
return version().transport_version;
}
Perspective perspective() { return GetParam().perspective; }
testing::StrictMock<MockDelegate> delegate_;
UberQuicStreamIdManager manager_;
};
INSTANTIATE_TEST_SUITE_P(Tests, UberQuicStreamIdManagerTest,
::testing::ValuesIn(GetTestParams()),
::testing::PrintToStringParamName());
TEST_P(UberQuicStreamIdManagerTest, Initialization) {
EXPECT_EQ(GetNthSelfInitiatedBidirectionalStreamId(0),
manager_.next_outgoing_bidirectional_stream_id());
EXPECT_EQ(GetNthSelfInitiatedUnidirectionalStreamId(0),
manager_.next_outgoing_unidirectional_stream_id());
}
TEST_P(UberQuicStreamIdManagerTest, SetMaxOpenOutgoingStreams) {
const size_t kNumMaxOutgoingStream = 123;
EXPECT_TRUE(manager_.MaybeAllowNewOutgoingBidirectionalStreams(
kNumMaxOutgoingStream));
EXPECT_TRUE(manager_.MaybeAllowNewOutgoingUnidirectionalStreams(
kNumMaxOutgoingStream + 1));
EXPECT_EQ(kNumMaxOutgoingStream,
manager_.max_outgoing_bidirectional_streams());
EXPECT_EQ(kNumMaxOutgoingStream + 1,
manager_.max_outgoing_unidirectional_streams());
int i = kNumMaxOutgoingStream;
while (i) {
EXPECT_TRUE(manager_.CanOpenNextOutgoingBidirectionalStream());
manager_.GetNextOutgoingBidirectionalStreamId();
EXPECT_TRUE(manager_.CanOpenNextOutgoingUnidirectionalStream());
manager_.GetNextOutgoingUnidirectionalStreamId();
i--;
}
EXPECT_TRUE(manager_.CanOpenNextOutgoingUnidirectionalStream());
manager_.GetNextOutgoingUnidirectionalStreamId();
EXPECT_FALSE(manager_.CanOpenNextOutgoingUnidirectionalStream());
EXPECT_FALSE(manager_.CanOpenNextOutgoingBidirectionalStream());
}
TEST_P(UberQuicStreamIdManagerTest, SetMaxOpenIncomingStreams) {
const size_t kNumMaxIncomingStreams = 456;
manager_.SetMaxOpenIncomingUnidirectionalStreams(kNumMaxIncomingStreams);
manager_.SetMaxOpenIncomingBidirectionalStreams(kNumMaxIncomingStreams + 1);
EXPECT_EQ(kNumMaxIncomingStreams + 1,
manager_.GetMaxAllowdIncomingBidirectionalStreams());
EXPECT_EQ(kNumMaxIncomingStreams,
manager_.GetMaxAllowdIncomingUnidirectionalStreams());
EXPECT_EQ(manager_.max_incoming_bidirectional_streams(),
manager_.advertised_max_incoming_bidirectional_streams());
EXPECT_EQ(manager_.max_incoming_unidirectional_streams(),
manager_.advertised_max_incoming_unidirectional_streams());
size_t i;
for (i = 0; i < kNumMaxIncomingStreams; i++) {
EXPECT_TRUE(manager_.MaybeIncreaseLargestPeerStreamId(
GetNthPeerInitiatedUnidirectionalStreamId(i), nullptr));
EXPECT_TRUE(manager_.MaybeIncreaseLargestPeerStreamId(
GetNthPeerInitiatedBidirectionalStreamId(i), nullptr));
}
EXPECT_TRUE(manager_.MaybeIncreaseLargestPeerStreamId(
GetNthPeerInitiatedBidirectionalStreamId(i), nullptr));
std::string error_details;
EXPECT_FALSE(manager_.MaybeIncreaseLargestPeerStreamId(
GetNthPeerInitiatedUnidirectionalStreamId(i), &error_details));
EXPECT_EQ(error_details,
absl::StrCat(
"Stream id ", GetNthPeerInitiatedUnidirectionalStreamId(i),
" would exceed stream count limit ", kNumMaxIncomingStreams));
EXPECT_FALSE(manager_.MaybeIncreaseLargestPeerStreamId(
GetNthPeerInitiatedBidirectionalStreamId(i + 1), &error_details));
EXPECT_EQ(error_details,
absl::StrCat("Stream id ",
GetNthPeerInitiatedBidirectionalStreamId(i + 1),
" would exceed stream count limit ",
kNumMaxIncomingStreams + 1));
}
TEST_P(UberQuicStreamIdManagerTest, GetNextOutgoingStreamId) {
EXPECT_TRUE(manager_.MaybeAllowNewOutgoingBidirectionalStreams(10));
EXPECT_TRUE(manager_.MaybeAllowNewOutgoingUnidirectionalStreams(10));
EXPECT_EQ(GetNthSelfInitiatedBidirectionalStreamId(0),
manager_.GetNextOutgoingBidirectionalStreamId());
EXPECT_EQ(GetNthSelfInitiatedBidirectionalStreamId(1),
manager_.GetNextOutgoingBidirectionalStreamId());
EXPECT_EQ(GetNthSelfInitiatedUnidirectionalStreamId(0),
manager_.GetNextOutgoingUnidirectionalStreamId());
EXPECT_EQ(GetNthSelfInitiatedUnidirectionalStreamId(1),
manager_.GetNextOutgoingUnidirectionalStreamId());
}
TEST_P(UberQuicStreamIdManagerTest, AvailableStreams) {
EXPECT_TRUE(manager_.MaybeIncreaseLargestPeerStreamId(
GetNthPeerInitiatedBidirectionalStreamId(3), nullptr));
EXPECT_TRUE(
manager_.IsAvailableStream(GetNthPeerInitiatedBidirectionalStreamId(1)));
EXPECT_TRUE(
manager_.IsAvailableStream(GetNthPeerInitiatedBidirectionalStreamId(2)));
EXPECT_TRUE(manager_.MaybeIncreaseLargestPeerStreamId(
GetNthPeerInitiatedUnidirectionalStreamId(3), nullptr));
EXPECT_TRUE(
manager_.IsAvailableStream(GetNthPeerInitiatedUnidirectionalStreamId(1)));
EXPECT_TRUE(
manager_.IsAvailableStream(GetNthPeerInitiatedUnidirectionalStreamId(2)));
}
TEST_P(UberQuicStreamIdManagerTest, MaybeIncreaseLargestPeerStreamId) {
EXPECT_TRUE(manager_.MaybeIncreaseLargestPeerStreamId(
StreamCountToId(manager_.max_incoming_bidirectional_streams(),
QuicUtils::InvertPerspective(perspective()),
true),
nullptr));
EXPECT_TRUE(manager_.MaybeIncreaseLargestPeerStreamId(
StreamCountToId(manager_.max_incoming_bidirectional_streams(),
QuicUtils::InvertPerspective(perspective()),
false),
nullptr));
std::string expected_error_details =
perspective() == Perspective::IS_SERVER
? "Stream id 400 would exceed stream count limit 100"
: "Stream id 401 would exceed stream count limit 100";
std::string error_details;
EXPECT_FALSE(manager_.MaybeIncreaseLargestPeerStreamId(
StreamCountToId(manager_.max_incoming_bidirectional_streams() + 1,
QuicUtils::InvertPerspective(perspective()),
true),
&error_details));
EXPECT_EQ(expected_error_details, error_details);
expected_error_details =
perspective() == Perspective::IS_SERVER
? "Stream id 402 would exceed stream count limit 100"
: "Stream id 403 would exceed stream count limit 100";
EXPECT_FALSE(manager_.MaybeIncreaseLargestPeerStreamId(
StreamCountToId(manager_.max_incoming_bidirectional_streams() + 1,
QuicUtils::InvertPerspective(perspective()),
false),
&error_details));
EXPECT_EQ(expected_error_details, error_details);
}
TEST_P(UberQuicStreamIdManagerTest, OnStreamsBlockedFrame) {
QuicStreamCount stream_count =
manager_.advertised_max_incoming_bidirectional_streams() - 1;
QuicStreamsBlockedFrame frame(kInvalidControlFrameId, stream_count,
false);
EXPECT_CALL(delegate_,
SendMaxStreams(manager_.max_incoming_bidirectional_streams(),
frame.unidirectional))
.Times(0);
EXPECT_TRUE(manager_.OnStreamsBlockedFrame(frame, nullptr));
stream_count = manager_.advertised_max_incoming_unidirectional_streams() - 1;
frame.stream_count = stream_count;
frame.unidirectional = true;
EXPECT_CALL(delegate_,
SendMaxStreams(manager_.max_incoming_unidirectional_streams(),
frame.unidirectional))
.Times(0);
EXPECT_TRUE(manager_.OnStreamsBlockedFrame(frame, nullptr));
}
TEST_P(UberQuicStreamIdManagerTest, SetMaxOpenOutgoingStreamsPlusFrame) {
const size_t kNumMaxOutgoingStream = 123;
EXPECT_TRUE(manager_.MaybeAllowNewOutgoingBidirectionalStreams(
kNumMaxOutgoingStream));
EXPECT_TRUE(manager_.MaybeAllowNewOutgoingUnidirectionalStreams(
kNumMaxOutgoingStream + 1));
EXPECT_EQ(kNumMaxOutgoingStream,
manager_.max_outgoing_bidirectional_streams());
EXPECT_EQ(kNumMaxOutgoingStream + 1,
manager_.max_outgoing_unidirectional_streams());
int i = kNumMaxOutgoingStream;
while (i) {
EXPECT_TRUE(manager_.CanOpenNextOutgoingBidirectionalStream());
manager_.GetNextOutgoingBidirectionalStreamId();
EXPECT_TRUE(manager_.CanOpenNextOutgoingUnidirectionalStream());
manager_.GetNextOutgoingUnidirectionalStreamId();
i--;
}
EXPECT_TRUE(manager_.CanOpenNextOutgoingUnidirectionalStream());
manager_.GetNextOutgoingUnidirectionalStreamId();
EXPECT_FALSE(manager_.CanOpenNextOutgoingUnidirectionalStream());
EXPECT_FALSE(manager_.CanOpenNextOutgoingBidirectionalStream());
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/uber_quic_stream_id_manager.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/uber_quic_stream_id_manager_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
73972d77-4a96-4dd2-9317-ea35ef86c645 | cpp | google/libaddressinput | supplier | cpp/include/libaddressinput/supplier.h | cpp/test/supplier_test.cc | #ifndef I18N_ADDRESSINPUT_SUPPLIER_H_
#define I18N_ADDRESSINPUT_SUPPLIER_H_
#include <libaddressinput/callback.h>
#include <string>
namespace i18n {
namespace addressinput {
class LookupKey;
class Rule;
class Supplier {
public:
struct RuleHierarchy;
using Callback =
i18n::addressinput::Callback<const LookupKey&, const RuleHierarchy&>;
virtual ~Supplier() = default;
virtual void Supply(const LookupKey& lookup_key,
const Callback& supplied) = 0;
virtual void SupplyGlobally(const LookupKey& lookup_key,
const Callback& supplied) = 0;
virtual size_t GetLoadedRuleDepth(const std::string& region_code) const {
return 0;
}
struct RuleHierarchy {
RuleHierarchy() : rule() {}
const Rule* rule[4];
};
};
}
}
#endif | #include <libaddressinput/supplier.h>
#include <libaddressinput/address_data.h>
#include <libaddressinput/callback.h>
#include <libaddressinput/null_storage.h>
#include <libaddressinput/ondemand_supplier.h>
#include <libaddressinput/preload_supplier.h>
#include <cstddef>
#include <cstring>
#include <memory>
#include <string>
#include <gtest/gtest.h>
#include "lookup_key.h"
#include "rule.h"
#include "testdata_source.h"
#include "util/size.h"
namespace {
using i18n::addressinput::AddressData;
using i18n::addressinput::BuildCallback;
using i18n::addressinput::LookupKey;
using i18n::addressinput::NullStorage;
using i18n::addressinput::OndemandSupplier;
using i18n::addressinput::PreloadSupplier;
using i18n::addressinput::Rule;
using i18n::addressinput::Supplier;
using i18n::addressinput::TestdataSource;
class SupplierWrapper {
public:
virtual ~SupplierWrapper() = default;
virtual void Supply(const LookupKey& lookup_key,
const Supplier::Callback& supplied) = 0;
};
class OndemandSupplierWrapper : public SupplierWrapper {
public:
OndemandSupplierWrapper(const OndemandSupplierWrapper&) = delete;
OndemandSupplierWrapper& operator=(const OndemandSupplierWrapper&) = delete;
static SupplierWrapper* Build() { return new OndemandSupplierWrapper; }
void Supply(const LookupKey& lookup_key,
const Supplier::Callback& supplied) override {
ondemand_supplier_.Supply(lookup_key, supplied);
}
private:
OndemandSupplierWrapper()
: ondemand_supplier_(new TestdataSource(false), new NullStorage) {}
OndemandSupplier ondemand_supplier_;
};
class PreloadSupplierWrapper : public SupplierWrapper {
public:
PreloadSupplierWrapper(const PreloadSupplierWrapper&) = delete;
PreloadSupplierWrapper& operator=(const PreloadSupplierWrapper&) = delete;
static SupplierWrapper* Build() { return new PreloadSupplierWrapper; }
void Supply(const LookupKey& lookup_key,
const Supplier::Callback& supplied) override {
const std::string& region_code = lookup_key.GetRegionCode();
if (!region_code.empty() && !preload_supplier_.IsLoaded(region_code)) {
preload_supplier_.LoadRules(region_code, *loaded_);
}
preload_supplier_.Supply(lookup_key, supplied);
}
private:
PreloadSupplierWrapper()
: preload_supplier_(new TestdataSource(true), new NullStorage),
loaded_(BuildCallback(this, &PreloadSupplierWrapper::Loaded)) {}
void Loaded(bool success, const std::string&, int) { ASSERT_TRUE(success); }
PreloadSupplier preload_supplier_;
const std::unique_ptr<const PreloadSupplier::Callback> loaded_;
};
class SupplierTest : public testing::TestWithParam<SupplierWrapper* (*)()> {
public:
SupplierTest(const SupplierTest&) = delete;
SupplierTest& operator=(const SupplierTest&) = delete;
protected:
SupplierTest()
: address_(),
rule_(),
called_(false),
lookup_key_(),
supplier_wrapper_((*GetParam())()),
supplied_(BuildCallback(this, &SupplierTest::Supplied)) {}
void Supply() {
lookup_key_.FromAddress(address_);
supplier_wrapper_->Supply(lookup_key_, *supplied_);
}
AddressData address_;
const Rule* rule_[size(LookupKey::kHierarchy)];
bool called_;
private:
void Supplied(bool success,
const LookupKey& lookup_key,
const Supplier::RuleHierarchy& hierarchy) {
ASSERT_TRUE(success);
ASSERT_EQ(&lookup_key_, &lookup_key);
std::memcpy(rule_, hierarchy.rule, sizeof rule_);
called_ = true;
}
LookupKey lookup_key_;
const std::unique_ptr<SupplierWrapper> supplier_wrapper_;
const std::unique_ptr<const Supplier::Callback> supplied_;
};
INSTANTIATE_TEST_SUITE_P(OndemandSupplier, SupplierTest,
testing::Values(&OndemandSupplierWrapper::Build));
INSTANTIATE_TEST_SUITE_P(PreloadSupplier, SupplierTest,
testing::Values(&PreloadSupplierWrapper::Build));
TEST_P(SupplierTest, Invalid) {
address_ = {.region_code = "QZ"};
ASSERT_NO_FATAL_FAILURE(Supply());
ASSERT_TRUE(called_);
EXPECT_TRUE(rule_[0] == nullptr);
EXPECT_TRUE(rule_[1] == nullptr);
EXPECT_TRUE(rule_[2] == nullptr);
EXPECT_TRUE(rule_[3] == nullptr);
}
TEST_P(SupplierTest, Valid) {
address_ = {.region_code = "SE"};
ASSERT_NO_FATAL_FAILURE(Supply());
ASSERT_TRUE(called_);
EXPECT_TRUE(rule_[0] != nullptr);
EXPECT_TRUE(rule_[1] == nullptr);
EXPECT_TRUE(rule_[2] == nullptr);
EXPECT_TRUE(rule_[3] == nullptr);
EXPECT_EQ("data/SE", rule_[0]->GetId());
EXPECT_FALSE(rule_[0]->GetRequired().empty());
EXPECT_FALSE(rule_[0]->GetFormat().empty());
EXPECT_TRUE(rule_[0]->GetPostalCodeMatcher() != nullptr);
}
TEST_P(SupplierTest, KeyDepthEqualsMaxDepth) {
address_ = {
.region_code = "HK",
.administrative_area = "九龍",
};
ASSERT_NO_FATAL_FAILURE(Supply());
ASSERT_TRUE(called_);
EXPECT_TRUE(rule_[0] != nullptr);
EXPECT_TRUE(rule_[1] != nullptr);
EXPECT_TRUE(rule_[2] == nullptr);
EXPECT_TRUE(rule_[3] == nullptr);
}
TEST_P(SupplierTest, KeyDepthLargerThanMaxDepth) {
address_ = {
.region_code = "HK",
.administrative_area = "九龍",
.locality = "bbb",
};
ASSERT_NO_FATAL_FAILURE(Supply());
ASSERT_TRUE(called_);
EXPECT_TRUE(rule_[0] != nullptr);
EXPECT_TRUE(rule_[1] != nullptr);
EXPECT_TRUE(rule_[2] == nullptr);
EXPECT_TRUE(rule_[3] == nullptr);
}
TEST_P(SupplierTest, KeyDepthSmallerThanMaxDepth) {
address_ = {.region_code = "HK"};
ASSERT_NO_FATAL_FAILURE(Supply());
ASSERT_TRUE(called_);
EXPECT_TRUE(rule_[0] != nullptr);
EXPECT_TRUE(rule_[1] == nullptr);
EXPECT_TRUE(rule_[2] == nullptr);
EXPECT_TRUE(rule_[3] == nullptr);
}
TEST_P(SupplierTest, KeyDepth0) {
address_ = {.region_code = "CN"};
ASSERT_NO_FATAL_FAILURE(Supply());
ASSERT_TRUE(called_);
EXPECT_TRUE(rule_[0] != nullptr);
EXPECT_TRUE(rule_[1] == nullptr);
EXPECT_TRUE(rule_[2] == nullptr);
EXPECT_TRUE(rule_[3] == nullptr);
}
TEST_P(SupplierTest, KeyDepth1) {
address_ = {
.region_code = "CN",
.administrative_area = "新疆维吾尔自治区",
};
ASSERT_NO_FATAL_FAILURE(Supply());
ASSERT_TRUE(called_);
EXPECT_TRUE(rule_[0] != nullptr);
EXPECT_TRUE(rule_[1] != nullptr);
EXPECT_TRUE(rule_[2] == nullptr);
EXPECT_TRUE(rule_[3] == nullptr);
}
TEST_P(SupplierTest, KeyDepth2) {
address_ = {
.region_code = "CN",
.administrative_area = "新疆维吾尔自治区",
.locality = "喀什地区",
};
ASSERT_NO_FATAL_FAILURE(Supply());
ASSERT_TRUE(called_);
EXPECT_TRUE(rule_[0] != nullptr);
EXPECT_TRUE(rule_[1] != nullptr);
EXPECT_TRUE(rule_[2] != nullptr);
EXPECT_TRUE(rule_[3] == nullptr);
}
TEST_P(SupplierTest, KeyDepth3) {
address_ = {
.region_code = "CN",
.administrative_area = "新疆维吾尔自治区",
.locality = "喀什地区",
.dependent_locality = "喀什市",
};
ASSERT_NO_FATAL_FAILURE(Supply());
ASSERT_TRUE(called_);
EXPECT_TRUE(rule_[0] != nullptr);
EXPECT_TRUE(rule_[1] != nullptr);
EXPECT_TRUE(rule_[2] != nullptr);
EXPECT_TRUE(rule_[3] != nullptr);
}
TEST_P(SupplierTest, RuleCache) {
address_ = {
.region_code = "US",
.administrative_area = "CA",
};
ASSERT_NO_FATAL_FAILURE(Supply());
ASSERT_TRUE(called_);
EXPECT_TRUE(rule_[0] != nullptr);
EXPECT_TRUE(rule_[1] != nullptr);
EXPECT_TRUE(rule_[2] == nullptr);
EXPECT_TRUE(rule_[3] == nullptr);
const Rule* rule[size(LookupKey::kHierarchy)];
std::memcpy(rule, rule_, sizeof rule);
called_ = false;
ASSERT_NO_FATAL_FAILURE(Supply());
ASSERT_TRUE(called_);
EXPECT_EQ(rule[0], rule_[0]);
EXPECT_EQ(rule[1], rule_[1]);
EXPECT_EQ(rule[2], rule_[2]);
EXPECT_EQ(rule[3], rule_[3]);
}
} | https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/include/libaddressinput/supplier.h | https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/test/supplier_test.cc | 2610f7b1043d6784ada41392fc9392d1ea09ea07 |
2f120f00-d9e2-4867-8061-62cad0e7bc44 | cpp | tensorflow/tensorflow | neg | tensorflow/lite/kernels/neg.cc | tensorflow/lite/delegates/xnnpack/neg_test.cc | #include "tensorflow/lite/kernels/internal/reference/neg.h"
#include <stdint.h>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace neg {
constexpr int kInputTensor = 0;
constexpr int kOutputTensor = 0;
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
output->type = input->type;
return context->ResizeTensor(context, output,
TfLiteIntArrayCopy(input->dims));
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
switch (input->type) {
case kTfLiteInt64:
reference_ops::Negate(
GetTensorShape(input), GetTensorData<int64_t>(input),
GetTensorShape(output), GetTensorData<int64_t>(output));
break;
case kTfLiteInt32:
reference_ops::Negate(
GetTensorShape(input), GetTensorData<int32_t>(input),
GetTensorShape(output), GetTensorData<int32_t>(output));
break;
case kTfLiteFloat32:
reference_ops::Negate(GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(output),
GetTensorData<float>(output));
break;
default:
TF_LITE_KERNEL_LOG(
context,
"Neg only currently supports int64, int32, and float32, got %d.",
input->type);
return kTfLiteError;
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_NEG() {
static TfLiteRegistration r = {nullptr, nullptr,
neg::Prepare, neg::Eval};
return &r;
}
}
}
} | #include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/delegates/xnnpack/unary_elementwise_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace xnnpack {
TEST(Neg, 4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, height, width, channels})
.Test(BuiltinOperator_NEG, xnnpack_delegate.get());
}
TEST(Neg, 3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, width, channels})
.Test(BuiltinOperator_NEG, xnnpack_delegate.get());
}
TEST(Neg, 2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, channels})
.Test(BuiltinOperator_NEG, xnnpack_delegate.get());
}
TEST(Neg, 1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
UnaryElementwiseTester().Shape({batch}).Test(BuiltinOperator_NEG,
xnnpack_delegate.get());
}
TEST(Neg, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, height, width, channels})
.Test(BuiltinOperator_NEG, xnnpack_delegate.get());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/neg.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/neg_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
55da78bf-faae-4166-88d4-884db9fc1158 | cpp | abseil/abseil-cpp | str_cat | absl/strings/str_cat.cc | absl/strings/str_cat_test.cc | #include "absl/strings/str_cat.h"
#include <assert.h>
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <initializer_list>
#include <limits>
#include <string>
#include "absl/base/config.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/base/nullability.h"
#include "absl/strings/internal/resize_uninitialized.h"
#include "absl/strings/string_view.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace {
inline absl::Nonnull<char*> Append(absl::Nonnull<char*> out,
const AlphaNum& x) {
char* after = out + x.size();
if (x.size() != 0) {
memcpy(out, x.data(), x.size());
}
return after;
}
inline void STLStringAppendUninitializedAmortized(std::string* dest,
size_t to_append) {
strings_internal::AppendUninitializedTraits<std::string>::Append(dest,
to_append);
}
}
std::string StrCat(const AlphaNum& a, const AlphaNum& b) {
std::string result;
constexpr uint64_t kMaxSize = uint64_t{std::numeric_limits<size_t>::max()};
const uint64_t result_size =
static_cast<uint64_t>(a.size()) + static_cast<uint64_t>(b.size());
ABSL_INTERNAL_CHECK(result_size <= kMaxSize, "size_t overflow");
absl::strings_internal::STLStringResizeUninitialized(
&result, static_cast<size_t>(result_size));
char* const begin = &result[0];
char* out = begin;
out = Append(out, a);
out = Append(out, b);
assert(out == begin + result.size());
return result;
}
std::string StrCat(const AlphaNum& a, const AlphaNum& b, const AlphaNum& c) {
std::string result;
constexpr uint64_t kMaxSize = uint64_t{std::numeric_limits<size_t>::max()};
const uint64_t result_size = static_cast<uint64_t>(a.size()) +
static_cast<uint64_t>(b.size()) +
static_cast<uint64_t>(c.size());
ABSL_INTERNAL_CHECK(result_size <= kMaxSize, "size_t overflow");
strings_internal::STLStringResizeUninitialized(
&result, static_cast<size_t>(result_size));
char* const begin = &result[0];
char* out = begin;
out = Append(out, a);
out = Append(out, b);
out = Append(out, c);
assert(out == begin + result.size());
return result;
}
std::string StrCat(const AlphaNum& a, const AlphaNum& b, const AlphaNum& c,
const AlphaNum& d) {
std::string result;
constexpr uint64_t kMaxSize = uint64_t{std::numeric_limits<size_t>::max()};
const uint64_t result_size = static_cast<uint64_t>(a.size()) +
static_cast<uint64_t>(b.size()) +
static_cast<uint64_t>(c.size()) +
static_cast<uint64_t>(d.size());
ABSL_INTERNAL_CHECK(result_size <= kMaxSize, "size_t overflow");
strings_internal::STLStringResizeUninitialized(
&result, static_cast<size_t>(result_size));
char* const begin = &result[0];
char* out = begin;
out = Append(out, a);
out = Append(out, b);
out = Append(out, c);
out = Append(out, d);
assert(out == begin + result.size());
return result;
}
namespace strings_internal {
std::string CatPieces(std::initializer_list<absl::string_view> pieces) {
std::string result;
constexpr uint64_t kMaxSize = uint64_t{std::numeric_limits<size_t>::max()};
uint64_t total_size = 0;
for (absl::string_view piece : pieces) {
total_size += piece.size();
}
ABSL_INTERNAL_CHECK(total_size <= kMaxSize, "size_t overflow");
strings_internal::STLStringResizeUninitialized(
&result, static_cast<size_t>(total_size));
char* const begin = &result[0];
char* out = begin;
for (absl::string_view piece : pieces) {
const size_t this_size = piece.size();
if (this_size != 0) {
memcpy(out, piece.data(), this_size);
out += this_size;
}
}
assert(out == begin + result.size());
return result;
}
#define ASSERT_NO_OVERLAP(dest, src) \
assert(((src).size() == 0) || \
(uintptr_t((src).data() - (dest).data()) > uintptr_t((dest).size())))
void AppendPieces(absl::Nonnull<std::string*> dest,
std::initializer_list<absl::string_view> pieces) {
size_t old_size = dest->size();
size_t to_append = 0;
for (absl::string_view piece : pieces) {
ASSERT_NO_OVERLAP(*dest, piece);
to_append += piece.size();
}
STLStringAppendUninitializedAmortized(dest, to_append);
char* const begin = &(*dest)[0];
char* out = begin + old_size;
for (absl::string_view piece : pieces) {
const size_t this_size = piece.size();
if (this_size != 0) {
memcpy(out, piece.data(), this_size);
out += this_size;
}
}
assert(out == begin + dest->size());
}
}
void StrAppend(absl::Nonnull<std::string*> dest, const AlphaNum& a) {
ASSERT_NO_OVERLAP(*dest, a);
std::string::size_type old_size = dest->size();
STLStringAppendUninitializedAmortized(dest, a.size());
char* const begin = &(*dest)[0];
char* out = begin + old_size;
out = Append(out, a);
assert(out == begin + dest->size());
}
void StrAppend(absl::Nonnull<std::string*> dest, const AlphaNum& a,
const AlphaNum& b) {
ASSERT_NO_OVERLAP(*dest, a);
ASSERT_NO_OVERLAP(*dest, b);
std::string::size_type old_size = dest->size();
STLStringAppendUninitializedAmortized(dest, a.size() + b.size());
char* const begin = &(*dest)[0];
char* out = begin + old_size;
out = Append(out, a);
out = Append(out, b);
assert(out == begin + dest->size());
}
void StrAppend(absl::Nonnull<std::string*> dest, const AlphaNum& a,
const AlphaNum& b, const AlphaNum& c) {
ASSERT_NO_OVERLAP(*dest, a);
ASSERT_NO_OVERLAP(*dest, b);
ASSERT_NO_OVERLAP(*dest, c);
std::string::size_type old_size = dest->size();
STLStringAppendUninitializedAmortized(dest, a.size() + b.size() + c.size());
char* const begin = &(*dest)[0];
char* out = begin + old_size;
out = Append(out, a);
out = Append(out, b);
out = Append(out, c);
assert(out == begin + dest->size());
}
void StrAppend(absl::Nonnull<std::string*> dest, const AlphaNum& a,
const AlphaNum& b, const AlphaNum& c, const AlphaNum& d) {
ASSERT_NO_OVERLAP(*dest, a);
ASSERT_NO_OVERLAP(*dest, b);
ASSERT_NO_OVERLAP(*dest, c);
ASSERT_NO_OVERLAP(*dest, d);
std::string::size_type old_size = dest->size();
STLStringAppendUninitializedAmortized(
dest, a.size() + b.size() + c.size() + d.size());
char* const begin = &(*dest)[0];
char* out = begin + old_size;
out = Append(out, a);
out = Append(out, b);
out = Append(out, c);
out = Append(out, d);
assert(out == begin + dest->size());
}
ABSL_NAMESPACE_END
} | #include "absl/strings/str_cat.h"
#include <cstddef>
#include <cstdint>
#include <cstdlib>
#include <limits>
#include <string>
#include <vector>
#include "gtest/gtest.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#ifdef __ANDROID__
#define ABSL_EXPECT_DEBUG_DEATH(statement, regex) \
EXPECT_DEBUG_DEATH(statement, ".*")
#else
#define ABSL_EXPECT_DEBUG_DEATH(statement, regex) \
EXPECT_DEBUG_DEATH(statement, regex)
#endif
namespace {
TEST(StrCat, Ints) {
const short s = -1;
const uint16_t us = 2;
const int i = -3;
const unsigned int ui = 4;
const long l = -5;
const unsigned long ul = 6;
const long long ll = -7;
const unsigned long long ull = 8;
const ptrdiff_t ptrdiff = -9;
const size_t size = 10;
const intptr_t intptr = -12;
const uintptr_t uintptr = 13;
std::string answer;
answer = absl::StrCat(s, us);
EXPECT_EQ(answer, "-12");
answer = absl::StrCat(i, ui);
EXPECT_EQ(answer, "-34");
answer = absl::StrCat(l, ul);
EXPECT_EQ(answer, "-56");
answer = absl::StrCat(ll, ull);
EXPECT_EQ(answer, "-78");
answer = absl::StrCat(ptrdiff, size);
EXPECT_EQ(answer, "-910");
answer = absl::StrCat(ptrdiff, intptr);
EXPECT_EQ(answer, "-9-12");
answer = absl::StrCat(uintptr, 0);
EXPECT_EQ(answer, "130");
}
TEST(StrCat, Enums) {
enum SmallNumbers { One = 1, Ten = 10 } e = Ten;
EXPECT_EQ("10", absl::StrCat(e));
EXPECT_EQ("-5", absl::StrCat(SmallNumbers(-5)));
enum class Option { Boxers = 1, Briefs = -1 };
EXPECT_EQ("-1", absl::StrCat(Option::Briefs));
enum class Airplane : uint64_t {
Airbus = 1,
Boeing = 1000,
Canary = 10000000000
};
EXPECT_EQ("10000000000", absl::StrCat(Airplane::Canary));
enum class TwoGig : int32_t {
TwoToTheZero = 1,
TwoToTheSixteenth = 1 << 16,
TwoToTheThirtyFirst = INT32_MIN
};
EXPECT_EQ("65536", absl::StrCat(TwoGig::TwoToTheSixteenth));
EXPECT_EQ("-2147483648", absl::StrCat(TwoGig::TwoToTheThirtyFirst));
EXPECT_EQ("-1", absl::StrCat(static_cast<TwoGig>(-1)));
enum class FourGig : uint32_t {
TwoToTheZero = 1,
TwoToTheSixteenth = 1 << 16,
TwoToTheThirtyFirst = 1U << 31
};
EXPECT_EQ("65536", absl::StrCat(FourGig::TwoToTheSixteenth));
EXPECT_EQ("2147483648", absl::StrCat(FourGig::TwoToTheThirtyFirst));
EXPECT_EQ("4294967295", absl::StrCat(static_cast<FourGig>(-1)));
EXPECT_EQ("10000000000", absl::StrCat(Airplane::Canary));
}
TEST(StrCat, Basics) {
std::string result;
std::string strs[] = {"Hello", "Cruel", "World"};
std::string stdstrs[] = {
"std::Hello",
"std::Cruel",
"std::World"
};
absl::string_view pieces[] = {"Hello", "Cruel", "World"};
const char* c_strs[] = {
"Hello",
"Cruel",
"World"
};
int32_t i32s[] = {'H', 'C', 'W'};
uint64_t ui64s[] = {12345678910LL, 10987654321LL};
EXPECT_EQ(absl::StrCat(), "");
result = absl::StrCat(false, true, 2, 3);
EXPECT_EQ(result, "0123");
result = absl::StrCat(-1);
EXPECT_EQ(result, "-1");
result = absl::StrCat(absl::SixDigits(0.5));
EXPECT_EQ(result, "0.5");
result = absl::StrCat(strs[1], pieces[2]);
EXPECT_EQ(result, "CruelWorld");
result = absl::StrCat(stdstrs[1], " ", stdstrs[2]);
EXPECT_EQ(result, "std::Cruel std::World");
result = absl::StrCat(strs[0], ", ", pieces[2]);
EXPECT_EQ(result, "Hello, World");
result = absl::StrCat(strs[0], ", ", strs[1], " ", strs[2], "!");
EXPECT_EQ(result, "Hello, Cruel World!");
result = absl::StrCat(pieces[0], ", ", pieces[1], " ", pieces[2]);
EXPECT_EQ(result, "Hello, Cruel World");
result = absl::StrCat(c_strs[0], ", ", c_strs[1], " ", c_strs[2]);
EXPECT_EQ(result, "Hello, Cruel World");
result = absl::StrCat("ASCII ", i32s[0], ", ", i32s[1], " ", i32s[2], "!");
EXPECT_EQ(result, "ASCII 72, 67 87!");
result = absl::StrCat(ui64s[0], ", ", ui64s[1], "!");
EXPECT_EQ(result, "12345678910, 10987654321!");
std::string one =
"1";
result = absl::StrCat("And a ", one.size(), " and a ",
&result[2] - &result[0], " and a ", one, " 2 3 4", "!");
EXPECT_EQ(result, "And a 1 and a 2 and a 1 2 3 4!");
result =
absl::StrCat("To output a char by ASCII/numeric value, use +: ", '!' + 0);
EXPECT_EQ(result, "To output a char by ASCII/numeric value, use +: 33");
float f = 100000.5;
result = absl::StrCat("A hundred K and a half is ", absl::SixDigits(f));
EXPECT_EQ(result, "A hundred K and a half is 100000");
f = 100001.5;
result =
absl::StrCat("A hundred K and one and a half is ", absl::SixDigits(f));
EXPECT_EQ(result, "A hundred K and one and a half is 100002");
double d = 100000.5;
d *= d;
result =
absl::StrCat("A hundred K and a half squared is ", absl::SixDigits(d));
EXPECT_EQ(result, "A hundred K and a half squared is 1.00001e+10");
result = absl::StrCat(1, 2, 333, 4444, 55555, 666666, 7777777, 88888888,
999999999);
EXPECT_EQ(result, "12333444455555666666777777788888888999999999");
}
TEST(StrCat, CornerCases) {
std::string result;
result = absl::StrCat("");
EXPECT_EQ(result, "");
result = absl::StrCat("", "");
EXPECT_EQ(result, "");
result = absl::StrCat("", "", "");
EXPECT_EQ(result, "");
result = absl::StrCat("", "", "", "");
EXPECT_EQ(result, "");
result = absl::StrCat("", "", "", "", "");
EXPECT_EQ(result, "");
}
TEST(StrCat, NullConstCharPtr) {
const char* null = nullptr;
EXPECT_EQ(absl::StrCat("mon", null, "key"), "monkey");
}
template <typename T>
struct Mallocator {
typedef T value_type;
typedef size_t size_type;
typedef ptrdiff_t difference_type;
typedef T* pointer;
typedef const T* const_pointer;
typedef T& reference;
typedef const T& const_reference;
size_type max_size() const {
return size_t(std::numeric_limits<size_type>::max()) / sizeof(value_type);
}
template <typename U>
struct rebind {
typedef Mallocator<U> other;
};
Mallocator() = default;
template <class U>
Mallocator(const Mallocator<U>&) {}
T* allocate(size_t n) { return static_cast<T*>(std::malloc(n * sizeof(T))); }
void deallocate(T* p, size_t) { std::free(p); }
};
template <typename T, typename U>
bool operator==(const Mallocator<T>&, const Mallocator<U>&) {
return true;
}
template <typename T, typename U>
bool operator!=(const Mallocator<T>&, const Mallocator<U>&) {
return false;
}
TEST(StrCat, CustomAllocator) {
using mstring =
std::basic_string<char, std::char_traits<char>, Mallocator<char>>;
const mstring str1("PARACHUTE OFF A BLIMP INTO MOSCONE!!");
const mstring str2("Read this book about coffee tables");
std::string result = absl::StrCat(str1, str2);
EXPECT_EQ(result,
"PARACHUTE OFF A BLIMP INTO MOSCONE!!"
"Read this book about coffee tables");
}
TEST(StrCat, MaxArgs) {
std::string result;
result = absl::StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a");
EXPECT_EQ(result, "123456789a");
result = absl::StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b");
EXPECT_EQ(result, "123456789ab");
result = absl::StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c");
EXPECT_EQ(result, "123456789abc");
result = absl::StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d");
EXPECT_EQ(result, "123456789abcd");
result = absl::StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e");
EXPECT_EQ(result, "123456789abcde");
result =
absl::StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f");
EXPECT_EQ(result, "123456789abcdef");
result = absl::StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f",
"g");
EXPECT_EQ(result, "123456789abcdefg");
result = absl::StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f",
"g", "h");
EXPECT_EQ(result, "123456789abcdefgh");
result = absl::StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f",
"g", "h", "i");
EXPECT_EQ(result, "123456789abcdefghi");
result = absl::StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f",
"g", "h", "i", "j");
EXPECT_EQ(result, "123456789abcdefghij");
result = absl::StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f",
"g", "h", "i", "j", "k");
EXPECT_EQ(result, "123456789abcdefghijk");
result = absl::StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f",
"g", "h", "i", "j", "k", "l");
EXPECT_EQ(result, "123456789abcdefghijkl");
result = absl::StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f",
"g", "h", "i", "j", "k", "l", "m");
EXPECT_EQ(result, "123456789abcdefghijklm");
result = absl::StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f",
"g", "h", "i", "j", "k", "l", "m", "n");
EXPECT_EQ(result, "123456789abcdefghijklmn");
result = absl::StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f",
"g", "h", "i", "j", "k", "l", "m", "n", "o");
EXPECT_EQ(result, "123456789abcdefghijklmno");
result = absl::StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f",
"g", "h", "i", "j", "k", "l", "m", "n", "o", "p");
EXPECT_EQ(result, "123456789abcdefghijklmnop");
result = absl::StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f",
"g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q");
EXPECT_EQ(result, "123456789abcdefghijklmnopq");
result = absl::StrCat(
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, "a", "b", "c", "d", "e", "f", "g", "h",
"i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w",
"x", "y", "z", "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L",
"M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z");
EXPECT_EQ(result,
"12345678910abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ");
}
TEST(StrAppend, Basics) {
std::string result = "existing text";
std::string strs[] = {"Hello", "Cruel", "World"};
std::string stdstrs[] = {
"std::Hello",
"std::Cruel",
"std::World"
};
absl::string_view pieces[] = {"Hello", "Cruel", "World"};
const char* c_strs[] = {
"Hello",
"Cruel",
"World"
};
int32_t i32s[] = {'H', 'C', 'W'};
uint64_t ui64s[] = {12345678910LL, 10987654321LL};
std::string::size_type old_size = result.size();
absl::StrAppend(&result);
EXPECT_EQ(result.size(), old_size);
old_size = result.size();
absl::StrAppend(&result, strs[0]);
EXPECT_EQ(result.substr(old_size), "Hello");
old_size = result.size();
absl::StrAppend(&result, strs[1], pieces[2]);
EXPECT_EQ(result.substr(old_size), "CruelWorld");
old_size = result.size();
absl::StrAppend(&result, stdstrs[0], ", ", pieces[2]);
EXPECT_EQ(result.substr(old_size), "std::Hello, World");
old_size = result.size();
absl::StrAppend(&result, strs[0], ", ", stdstrs[1], " ", strs[2], "!");
EXPECT_EQ(result.substr(old_size), "Hello, std::Cruel World!");
old_size = result.size();
absl::StrAppend(&result, pieces[0], ", ", pieces[1], " ", pieces[2]);
EXPECT_EQ(result.substr(old_size), "Hello, Cruel World");
old_size = result.size();
absl::StrAppend(&result, c_strs[0], ", ", c_strs[1], " ", c_strs[2]);
EXPECT_EQ(result.substr(old_size), "Hello, Cruel World");
old_size = result.size();
absl::StrAppend(&result, "ASCII ", i32s[0], ", ", i32s[1], " ", i32s[2], "!");
EXPECT_EQ(result.substr(old_size), "ASCII 72, 67 87!");
old_size = result.size();
absl::StrAppend(&result, ui64s[0], ", ", ui64s[1], "!");
EXPECT_EQ(result.substr(old_size), "12345678910, 10987654321!");
std::string one =
"1";
old_size = result.size();
absl::StrAppend(&result, "And a ", one.size(), " and a ",
&result[2] - &result[0], " and a ", one, " 2 3 4", "!");
EXPECT_EQ(result.substr(old_size), "And a 1 and a 2 and a 1 2 3 4!");
old_size = result.size();
absl::StrAppend(&result,
"To output a char by ASCII/numeric value, use +: ", '!' + 0);
EXPECT_EQ(result.substr(old_size),
"To output a char by ASCII/numeric value, use +: 33");
old_size = result.size();
absl::StrAppend(&result, 1, 22, 333, 4444, 55555, 666666, 7777777, 88888888,
9);
EXPECT_EQ(result.substr(old_size), "1223334444555556666667777777888888889");
old_size = result.size();
absl::StrAppend(
&result, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
"a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m",
"n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z",
"A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M",
"N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z",
"No limit thanks to C++11's variadic templates");
EXPECT_EQ(result.substr(old_size),
"12345678910abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
"No limit thanks to C++11's variadic templates");
}
TEST(StrCat, VectorBoolReferenceTypes) {
std::vector<bool> v;
v.push_back(true);
v.push_back(false);
std::vector<bool> const& cv = v;
std::string result = absl::StrCat(v[0], v[1], cv[0], cv[1]);
EXPECT_EQ(result, "1010");
}
TEST(StrCat, AvoidsMemcpyWithNullptr) {
EXPECT_EQ(absl::StrCat(42, absl::string_view{}), "42");
EXPECT_EQ(absl::StrCat(1, 2, 3, 4, 5, absl::string_view{}), "12345");
std::string result;
absl::StrAppend(&result, 1, 2, 3, 4, 5, absl::string_view{});
EXPECT_EQ(result, "12345");
}
#if GTEST_HAS_DEATH_TEST
TEST(StrAppend, Death) {
std::string s = "self";
ABSL_EXPECT_DEBUG_DEATH(absl::StrAppend(&s, s.c_str() + 1),
"ssertion.*failed");
ABSL_EXPECT_DEBUG_DEATH(absl::StrAppend(&s, s), "ssertion.*failed");
}
#endif
TEST(StrAppend, CornerCases) {
std::string result;
absl::StrAppend(&result, "");
EXPECT_EQ(result, "");
absl::StrAppend(&result, "", "");
EXPECT_EQ(result, "");
absl::StrAppend(&result, "", "", "");
EXPECT_EQ(result, "");
absl::StrAppend(&result, "", "", "", "");
EXPECT_EQ(result, "");
absl::StrAppend(&result, "", "", "", "", "");
EXPECT_EQ(result, "");
}
TEST(StrAppend, CornerCasesNonEmptyAppend) {
for (std::string result : {"hello", "a string too long to fit in the SSO"}) {
const std::string expected = result;
absl::StrAppend(&result, "");
EXPECT_EQ(result, expected);
absl::StrAppend(&result, "", "");
EXPECT_EQ(result, expected);
absl::StrAppend(&result, "", "", "");
EXPECT_EQ(result, expected);
absl::StrAppend(&result, "", "", "", "");
EXPECT_EQ(result, expected);
absl::StrAppend(&result, "", "", "", "", "");
EXPECT_EQ(result, expected);
}
}
template <typename IntType>
void CheckHex(IntType v, const char* nopad_format, const char* zeropad_format,
const char* spacepad_format) {
char expected[256];
std::string actual = absl::StrCat(absl::Hex(v, absl::kNoPad));
snprintf(expected, sizeof(expected), nopad_format, v);
EXPECT_EQ(expected, actual) << " decimal value " << v;
for (int spec = absl::kZeroPad2; spec <= absl::kZeroPad20; ++spec) {
std::string actual =
absl::StrCat(absl::Hex(v, static_cast<absl::PadSpec>(spec)));
snprintf(expected, sizeof(expected), zeropad_format,
spec - absl::kZeroPad2 + 2, v);
EXPECT_EQ(expected, actual) << " decimal value " << v;
}
for (int spec = absl::kSpacePad2; spec <= absl::kSpacePad20; ++spec) {
std::string actual =
absl::StrCat(absl::Hex(v, static_cast<absl::PadSpec>(spec)));
snprintf(expected, sizeof(expected), spacepad_format,
spec - absl::kSpacePad2 + 2, v);
EXPECT_EQ(expected, actual) << " decimal value " << v;
}
}
template <typename IntType>
void CheckDec(IntType v, const char* nopad_format, const char* zeropad_format,
const char* spacepad_format) {
char expected[256];
std::string actual = absl::StrCat(absl::Dec(v, absl::kNoPad));
snprintf(expected, sizeof(expected), nopad_format, v);
EXPECT_EQ(expected, actual) << " decimal value " << v;
for (int spec = absl::kZeroPad2; spec <= absl::kZeroPad20; ++spec) {
std::string actual =
absl::StrCat(absl::Dec(v, static_cast<absl::PadSpec>(spec)));
snprintf(expected, sizeof(expected), zeropad_format,
spec - absl::kZeroPad2 + 2, v);
EXPECT_EQ(expected, actual)
<< " decimal value " << v << " format '" << zeropad_format
<< "' digits " << (spec - absl::kZeroPad2 + 2);
}
for (int spec = absl::kSpacePad2; spec <= absl::kSpacePad20; ++spec) {
std::string actual =
absl::StrCat(absl::Dec(v, static_cast<absl::PadSpec>(spec)));
snprintf(expected, sizeof(expected), spacepad_format,
spec - absl::kSpacePad2 + 2, v);
EXPECT_EQ(expected, actual)
<< " decimal value " << v << " format '" << spacepad_format
<< "' digits " << (spec - absl::kSpacePad2 + 2);
}
}
void CheckHexDec64(uint64_t v) {
unsigned long long ullv = v;
CheckHex(ullv, "%llx", "%0*llx", "%*llx");
CheckDec(ullv, "%llu", "%0*llu", "%*llu");
long long llv = static_cast<long long>(ullv);
CheckDec(llv, "%lld", "%0*lld", "%*lld");
if (sizeof(v) == sizeof(&v)) {
auto uintptr = static_cast<uintptr_t>(v);
void* ptr = reinterpret_cast<void*>(uintptr);
CheckHex(ptr, "%llx", "%0*llx", "%*llx");
}
}
void CheckHexDec32(uint32_t uv) {
CheckHex(uv, "%x", "%0*x", "%*x");
CheckDec(uv, "%u", "%0*u", "%*u");
int32_t v = static_cast<int32_t>(uv);
CheckDec(v, "%d", "%0*d", "%*d");
if (sizeof(v) == sizeof(&v)) {
auto uintptr = static_cast<uintptr_t>(v);
void* ptr = reinterpret_cast<void*>(uintptr);
CheckHex(ptr, "%x", "%0*x", "%*x");
}
}
void CheckAll(uint64_t v) {
CheckHexDec64(v);
CheckHexDec32(static_cast<uint32_t>(v));
}
void TestFastPrints() {
for (int i = 0; i < 10000; i++) {
CheckAll(i);
}
CheckAll(std::numeric_limits<uint64_t>::max());
CheckAll(std::numeric_limits<uint64_t>::max() - 1);
CheckAll(std::numeric_limits<int64_t>::min());
CheckAll(std::numeric_limits<int64_t>::min() + 1);
CheckAll(std::numeric_limits<uint32_t>::max());
CheckAll(std::numeric_limits<uint32_t>::max() - 1);
CheckAll(std::numeric_limits<int32_t>::min());
CheckAll(std::numeric_limits<int32_t>::min() + 1);
CheckAll(999999999);
CheckAll(1000000000);
CheckAll(9999999999);
CheckAll(10000000000);
CheckAll(999999999999999999);
CheckAll(9999999999999999999u);
CheckAll(1000000000000000000);
CheckAll(10000000000000000000u);
CheckAll(999999999876543210);
CheckAll(9999999999876543210u);
CheckAll(0x123456789abcdef0);
CheckAll(0x12345678);
int8_t minus_one_8bit = -1;
EXPECT_EQ("ff", absl::StrCat(absl::Hex(minus_one_8bit)));
int16_t minus_one_16bit = -1;
EXPECT_EQ("ffff", absl::StrCat(absl::Hex(minus_one_16bit)));
}
TEST(Numbers, TestFunctionsMovedOverFromNumbersMain) {
TestFastPrints();
}
struct PointStringify {
template <typename FormatSink>
friend void AbslStringify(FormatSink& sink, const PointStringify& p) {
sink.Append("(");
sink.Append(absl::StrCat(p.x));
sink.Append(", ");
sink.Append(absl::StrCat(p.y));
sink.Append(")");
}
double x = 10.0;
double y = 20.0;
};
TEST(StrCat, AbslStringifyExample) {
PointStringify p;
EXPECT_EQ(absl::StrCat(p), "(10, 20)");
EXPECT_EQ(absl::StrCat("a ", p, " z"), "a (10, 20) z");
}
struct PointStringifyUsingFormat {
template <typename FormatSink>
friend void AbslStringify(FormatSink& sink,
const PointStringifyUsingFormat& p) {
absl::Format(&sink, "(%g, %g)", p.x, p.y);
}
double x = 10.0;
double y = 20.0;
};
TEST(StrCat, AbslStringifyExampleUsingFormat) {
PointStringifyUsingFormat p;
EXPECT_EQ(absl::StrCat(p), "(10, 20)");
EXPECT_EQ(absl::StrCat("a ", p, " z"), "a (10, 20) z");
}
enum class EnumWithStringify { Many = 0, Choices = 1 };
template <typename Sink>
void AbslStringify(Sink& sink, EnumWithStringify e) {
absl::Format(&sink, "%s", e == EnumWithStringify::Many ? "Many" : "Choices");
}
TEST(StrCat, AbslStringifyWithEnum) {
const auto e = EnumWithStringify::Choices;
EXPECT_EQ(absl::StrCat(e), "Choices");
}
template <typename Integer>
void CheckSingleArgumentIntegerLimits() {
Integer max = std::numeric_limits<Integer>::max();
Integer min = std::numeric_limits<Integer>::min();
EXPECT_EQ(absl::StrCat(max), std::to_string(max));
EXPECT_EQ(absl::StrCat(min), std::to_string(min));
}
TEST(StrCat, SingleArgumentLimits) {
CheckSingleArgumentIntegerLimits<int32_t>();
CheckSingleArgumentIntegerLimits<uint32_t>();
CheckSingleArgumentIntegerLimits<int64_t>();
CheckSingleArgumentIntegerLimits<uint64_t>();
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/str_cat.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/str_cat_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
af8f681c-9589-46e6-9745-f5b0612bf4d2 | cpp | google/quiche | moqt_messages | quiche/quic/moqt/moqt_messages.cc | quiche/quic/moqt/moqt_messages_test.cc | #include "quiche/quic/moqt/moqt_messages.h"
#include <cstdint>
#include <string>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/strings/escaping.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "quiche/quic/platform/api/quic_bug_tracker.h"
#include "quiche/common/platform/api/quiche_bug_tracker.h"
namespace moqt {
MoqtObjectStatus IntegerToObjectStatus(uint64_t integer) {
if (integer >=
static_cast<uint64_t>(MoqtObjectStatus::kInvalidObjectStatus)) {
return MoqtObjectStatus::kInvalidObjectStatus;
}
return static_cast<MoqtObjectStatus>(integer);
}
MoqtFilterType GetFilterType(const MoqtSubscribe& message) {
if (!message.end_group.has_value() && message.end_object.has_value()) {
return MoqtFilterType::kNone;
}
bool has_start =
message.start_group.has_value() && message.start_object.has_value();
if (message.end_group.has_value()) {
if (has_start) {
if (*message.end_group < *message.start_group) {
return MoqtFilterType::kNone;
} else if (*message.end_group == *message.start_group &&
*message.end_object <= *message.start_object) {
if (*message.end_object < *message.start_object) {
return MoqtFilterType::kNone;
} else if (*message.end_object == *message.start_object) {
return MoqtFilterType::kAbsoluteStart;
}
}
return MoqtFilterType::kAbsoluteRange;
}
} else {
if (has_start) {
return MoqtFilterType::kAbsoluteStart;
} else if (!message.start_group.has_value()) {
if (message.start_object.has_value()) {
if (message.start_object.value() == 0) {
return MoqtFilterType::kLatestGroup;
}
} else {
return MoqtFilterType::kLatestObject;
}
}
}
return MoqtFilterType::kNone;
}
std::string MoqtMessageTypeToString(const MoqtMessageType message_type) {
switch (message_type) {
case MoqtMessageType::kClientSetup:
return "CLIENT_SETUP";
case MoqtMessageType::kServerSetup:
return "SERVER_SETUP";
case MoqtMessageType::kSubscribe:
return "SUBSCRIBE_REQUEST";
case MoqtMessageType::kSubscribeOk:
return "SUBSCRIBE_OK";
case MoqtMessageType::kSubscribeError:
return "SUBSCRIBE_ERROR";
case MoqtMessageType::kUnsubscribe:
return "UNSUBSCRIBE";
case MoqtMessageType::kSubscribeDone:
return "SUBSCRIBE_DONE";
case MoqtMessageType::kSubscribeUpdate:
return "SUBSCRIBE_UPDATE";
case MoqtMessageType::kAnnounceCancel:
return "ANNOUNCE_CANCEL";
case MoqtMessageType::kTrackStatusRequest:
return "TRACK_STATUS_REQUEST";
case MoqtMessageType::kTrackStatus:
return "TRACK_STATUS";
case MoqtMessageType::kAnnounce:
return "ANNOUNCE";
case MoqtMessageType::kAnnounceOk:
return "ANNOUNCE_OK";
case MoqtMessageType::kAnnounceError:
return "ANNOUNCE_ERROR";
case MoqtMessageType::kUnannounce:
return "UNANNOUNCE";
case MoqtMessageType::kGoAway:
return "GOAWAY";
case MoqtMessageType::kSubscribeNamespace:
return "SUBSCRIBE_NAMESPACE";
case MoqtMessageType::kSubscribeNamespaceOk:
return "SUBSCRIBE_NAMESPACE_OK";
case MoqtMessageType::kSubscribeNamespaceError:
return "SUBSCRIBE_NAMESPACE_ERROR";
case MoqtMessageType::kUnsubscribeNamespace:
return "UNSUBSCRIBE_NAMESPACE";
case MoqtMessageType::kMaxSubscribeId:
return "MAX_SUBSCRIBE_ID";
case MoqtMessageType::kObjectAck:
return "OBJECT_ACK";
}
return "Unknown message " + std::to_string(static_cast<int>(message_type));
}
std::string MoqtDataStreamTypeToString(MoqtDataStreamType type) {
switch (type) {
case MoqtDataStreamType::kObjectDatagram:
return "OBJECT_PREFER_DATAGRAM";
case MoqtDataStreamType::kStreamHeaderTrack:
return "STREAM_HEADER_TRACK";
case MoqtDataStreamType::kStreamHeaderSubgroup:
return "STREAM_HEADER_SUBGROUP";
case MoqtDataStreamType::kPadding:
return "PADDING";
}
return "Unknown stream type " + absl::StrCat(static_cast<int>(type));
}
std::string MoqtForwardingPreferenceToString(
MoqtForwardingPreference preference) {
switch (preference) {
case MoqtForwardingPreference::kDatagram:
return "DATAGRAM";
case MoqtForwardingPreference::kTrack:
return "TRACK";
case MoqtForwardingPreference::kSubgroup:
return "SUBGROUP";
}
QUIC_BUG(quic_bug_bad_moqt_message_type_01)
<< "Unknown preference " << std::to_string(static_cast<int>(preference));
return "Unknown preference " + std::to_string(static_cast<int>(preference));
}
MoqtForwardingPreference GetForwardingPreference(MoqtDataStreamType type) {
switch (type) {
case MoqtDataStreamType::kObjectDatagram:
return MoqtForwardingPreference::kDatagram;
case MoqtDataStreamType::kStreamHeaderTrack:
return MoqtForwardingPreference::kTrack;
case MoqtDataStreamType::kStreamHeaderSubgroup:
return MoqtForwardingPreference::kSubgroup;
default:
break;
}
QUIC_BUG(quic_bug_bad_moqt_message_type_02)
<< "Message type does not indicate forwarding preference";
return MoqtForwardingPreference::kSubgroup;
};
MoqtDataStreamType GetMessageTypeForForwardingPreference(
MoqtForwardingPreference preference) {
switch (preference) {
case MoqtForwardingPreference::kDatagram:
return MoqtDataStreamType::kObjectDatagram;
case MoqtForwardingPreference::kTrack:
return MoqtDataStreamType::kStreamHeaderTrack;
case MoqtForwardingPreference::kSubgroup:
return MoqtDataStreamType::kStreamHeaderSubgroup;
}
QUIC_BUG(quic_bug_bad_moqt_message_type_03)
<< "Forwarding preference does not indicate message type";
return MoqtDataStreamType::kStreamHeaderSubgroup;
}
std::string FullTrackName::ToString() const {
std::vector<std::string> bits;
bits.reserve(tuple_.size());
for (absl::string_view raw_bit : tuple_) {
bits.push_back(absl::StrCat("\"", absl::CHexEscape(raw_bit), "\""));
}
return absl::StrCat("{", absl::StrJoin(bits, ", "), "}");
}
bool FullTrackName::operator==(const FullTrackName& other) const {
if (tuple_.size() != other.tuple_.size()) {
return false;
}
return absl::c_equal(tuple_, other.tuple_);
}
bool FullTrackName::operator<(const FullTrackName& other) const {
return absl::c_lexicographical_compare(tuple_, other.tuple_);
}
FullTrackName::FullTrackName(absl::Span<const absl::string_view> elements)
: tuple_(elements.begin(), elements.end()) {}
} | #include "quiche/quic/moqt/moqt_messages.h"
#include <vector>
#include "absl/hash/hash.h"
#include "absl/strings/string_view.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace moqt::test {
namespace {
TEST(MoqtMessagesTest, FullTrackNameConstructors) {
FullTrackName name1({"foo", "bar"});
std::vector<absl::string_view> list = {"foo", "bar"};
FullTrackName name2(list);
EXPECT_EQ(name1, name2);
EXPECT_EQ(absl::HashOf(name1), absl::HashOf(name2));
}
TEST(MoqtMessagesTest, FullTrackNameOrder) {
FullTrackName name1({"a", "b"});
FullTrackName name2({"a", "b", "c"});
FullTrackName name3({"b", "a"});
EXPECT_LT(name1, name2);
EXPECT_LT(name2, name3);
EXPECT_LT(name1, name3);
}
TEST(MoqtMessagesTest, FullTrackNameInNamespace) {
FullTrackName name1({"a", "b"});
FullTrackName name2({"a", "b", "c"});
FullTrackName name3({"d", "b"});
EXPECT_TRUE(name2.InNamespace(name1));
EXPECT_FALSE(name1.InNamespace(name2));
EXPECT_TRUE(name1.InNamespace(name1));
EXPECT_FALSE(name2.InNamespace(name3));
}
TEST(MoqtMessagesTest, FullTrackNameToString) {
FullTrackName name1({"a", "b"});
EXPECT_EQ(name1.ToString(), R"({"a", "b"})");
FullTrackName name2({"\xff", "\x61"});
EXPECT_EQ(name2.ToString(), R"({"\xff", "a"})");
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/moqt/moqt_messages.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/moqt/moqt_messages_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
ca502529-5b2d-41f7-a2a5-93772bdad653 | cpp | google/arolla | eval | arolla/expr/eval/eval.cc | arolla/expr/eval/eval_test.cc | #include "arolla/expr/eval/eval.h"
#include <algorithm>
#include <cstddef>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/types/span.h"
#include "arolla/expr/eval/dynamic_compiled_expr.h"
#include "arolla/expr/eval/prepare_expression.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_debug_string.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/expr_stack_trace.h"
#include "arolla/memory/frame.h"
#include "arolla/qexpr/evaluation_engine.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::expr {
absl::StatusOr<std::unique_ptr<CompiledExpr>> CompileForDynamicEvaluation(
const DynamicEvaluationEngineOptions& options, const ExprNodePtr& expr,
const absl::flat_hash_map<std::string, QTypePtr>& input_types,
const absl::flat_hash_map<std::string, ExprNodePtr>& side_outputs) {
auto expr_with_side_outputs = expr;
std::vector<std::string> side_output_names;
if (!side_outputs.empty()) {
side_output_names.reserve(side_outputs.size());
for (const auto& [name, _] : side_outputs) {
side_output_names.push_back(name);
}
std::sort(side_output_names.begin(), side_output_names.end());
std::vector<ExprNodePtr> exprs = {expr_with_side_outputs};
exprs.reserve(side_outputs.size() + 1);
for (const auto& name : side_output_names) {
exprs.push_back(side_outputs.at(name));
}
ASSIGN_OR_RETURN(
expr_with_side_outputs,
BindOp(eval_internal::InternalRootOperator(), std::move(exprs), {}));
}
std::shared_ptr<LightweightExprStackTrace> stack_trace = nullptr;
if (options.enable_expr_stack_trace) {
stack_trace = std::make_shared<LightweightExprStackTrace>();
}
ASSIGN_OR_RETURN(
ExprNodePtr prepared_expr,
eval_internal::PrepareExpression(expr_with_side_outputs, input_types,
options, stack_trace));
auto placeholder_keys = GetPlaceholderKeys(prepared_expr);
if (!placeholder_keys.empty()) {
return absl::FailedPreconditionError(absl::StrFormat(
"placeholders should be substituted before "
"evaluation: %s, got %s",
absl::StrJoin(placeholder_keys, ","), ToDebugString(prepared_expr)));
}
absl::flat_hash_map<Fingerprint, QTypePtr> node_types;
ASSIGN_OR_RETURN(prepared_expr, eval_internal::ExtractQTypesForCompilation(
prepared_expr, &node_types, stack_trace));
if (stack_trace != nullptr) {
stack_trace->AddRepresentations(expr_with_side_outputs, prepared_expr);
}
ASSIGN_OR_RETURN(auto used_input_types,
eval_internal::LookupLeafQTypes(prepared_expr, node_types));
ASSIGN_OR_RETURN(auto named_output_types,
eval_internal::LookupNamedOutputTypes(
prepared_expr, side_output_names, node_types));
for (const auto& [key, qtype] : used_input_types) {
if (qtype == nullptr) {
return absl::FailedPreconditionError(absl::StrFormat(
"unable to deduce input type for L.%s in the expression %s", key,
GetDebugSnippet(prepared_expr)));
}
}
ASSIGN_OR_RETURN(QTypePtr output_type,
eval_internal::LookupQType(prepared_expr, node_types));
if (output_type == nullptr) {
return absl::FailedPreconditionError(
absl::StrFormat("unable to deduce output type in the expression %s",
GetDebugSnippet(prepared_expr)));
}
return std::unique_ptr<CompiledExpr>(new eval_internal::DynamicCompiledExpr(
options, std::move(used_input_types), output_type,
std::move(named_output_types), std::move(prepared_expr),
std::move(side_output_names), std::move(node_types),
std::move(stack_trace)));
}
absl::StatusOr<std::unique_ptr<BoundExpr>> CompileAndBindForDynamicEvaluation(
const DynamicEvaluationEngineOptions& options,
FrameLayout::Builder* layout_builder, const ExprNodePtr& expr,
const absl::flat_hash_map<std::string, TypedSlot>& input_slots,
std::optional<TypedSlot> output_slot,
const absl::flat_hash_map<std::string, ExprNodePtr>& side_outputs) {
ASSIGN_OR_RETURN(auto compiled_expr,
CompileForDynamicEvaluation(
options, expr, SlotsToTypes(input_slots), side_outputs));
ASSIGN_OR_RETURN(
auto executable_expr,
compiled_expr->Bind(layout_builder, input_slots, output_slot));
if (output_slot.has_value() &&
executable_expr->output_slot() != *output_slot) {
return absl::InternalError("expression bound to a wrong output slot");
}
return executable_expr;
}
absl::StatusOr<std::shared_ptr<BoundExpr>> CompileAndBindExprOperator(
const DynamicEvaluationEngineOptions& options,
FrameLayout::Builder* layout_builder, const ExprOperatorPtr& op,
absl::Span<const TypedSlot> input_slots,
std::optional<TypedSlot> output_slot) {
std::vector<absl::StatusOr<ExprNodePtr>> inputs;
inputs.reserve(input_slots.size());
absl::flat_hash_map<std::string, TypedSlot> input_slots_map;
input_slots_map.reserve(input_slots.size());
for (size_t i = 0; i < input_slots.size(); ++i) {
std::string name = absl::StrFormat("input_%d", i);
inputs.push_back(Leaf(name));
input_slots_map.emplace(name, input_slots[i]);
}
ASSIGN_OR_RETURN(auto expr, CallOp(op, inputs));
ASSIGN_OR_RETURN(auto evaluator, CompileAndBindForDynamicEvaluation(
options, layout_builder, expr,
input_slots_map, output_slot));
return std::shared_ptr<BoundExpr>(std::move(evaluator));
}
} | #include "arolla/expr/eval/eval.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/expr/backend_wrapping_operator.h"
#include "arolla/expr/basic_expr_operator.h"
#include "arolla/expr/eval/executable_builder.h"
#include "arolla/expr/eval/extensions.h"
#include "arolla/expr/eval/invoke.h"
#include "arolla/expr/eval/prepare_expression.h"
#include "arolla/expr/eval/side_output.h"
#include "arolla/expr/eval/test_utils.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_attributes.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/expr_operator_signature.h"
#include "arolla/expr/lambda_expr_operator.h"
#include "arolla/expr/optimization/default/default_optimizer.h"
#include "arolla/expr/testing/test_operators.h"
#include "arolla/expr/testing/testing.h"
#include "arolla/expr/tuple_expr_operator.h"
#include "arolla/io/accessors_input_loader.h"
#include "arolla/io/input_loader.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/memory_allocation.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qexpr/bound_operators.h"
#include "arolla/qexpr/eval_context.h"
#include "arolla/qexpr/evaluation_engine.h"
#include "arolla/qexpr/operators.h"
#include "arolla/qexpr/qexpr_operator_signature.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/testing/qtype.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/util/fast_dynamic_downcast_final.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/text.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::expr {
namespace {
using ::absl_testing::IsOk;
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::arolla::testing::InvokeExprOperator;
using ::arolla::testing::TypedValueWith;
using ::arolla::testing::WithExportAnnotation;
using ::arolla::testing::WithNameAnnotation;
using ::arolla::testing::WithQTypeAnnotation;
using ::testing::_;
using ::testing::ElementsAre;
using ::testing::Eq;
using ::testing::FloatEq;
using ::testing::HasSubstr;
using ::testing::IsEmpty;
using ::testing::Pair;
using ::testing::Property;
using ::testing::UnorderedElementsAre;
struct TestParams {
bool use_default_optimizer = false;
};
class EvalVisitorParameterizedTest
: public ::testing::TestWithParam<TestParams> {
protected:
EvalVisitorParameterizedTest() {
if (GetParam().use_default_optimizer) {
auto optimizer_or = DefaultOptimizer();
CHECK_OK(optimizer_or.status());
options_.optimizer = optimizer_or.value();
}
options_.collect_op_descriptions = true;
}
DynamicEvaluationEngineOptions options_;
};
INSTANTIATE_TEST_SUITE_P(
Optimizer, EvalVisitorParameterizedTest,
::testing::Values(TestParams{.use_default_optimizer = false},
TestParams{.use_default_optimizer = true}));
TEST_P(EvalVisitorParameterizedTest, SmokeTest) {
ASSERT_OK_AND_ASSIGN(
auto expr, CallOp("math.add", {CallOp("math.add", {Leaf("x"), Leaf("y")}),
Leaf("z")}));
FrameLayout::Builder layout_builder;
auto x_slot = layout_builder.AddSlot<float>();
auto y_slot = layout_builder.AddSlot<float>();
auto z_slot = layout_builder.AddSlot<float>();
ASSERT_OK_AND_ASSIGN(
auto executable_expr,
CompileAndBindForDynamicEvaluation(options_, &layout_builder, expr,
{{"x", TypedSlot::FromSlot(x_slot)},
{"y", TypedSlot::FromSlot(y_slot)},
{"z", TypedSlot::FromSlot(z_slot)}}));
EXPECT_THAT(
executable_expr,
AllOf(InitOperationsAre(),
EvalOperationsAre(
"FLOAT32 [0x10] = math.add(FLOAT32 [0x00], FLOAT32 [0x04])",
"FLOAT32 [0x0C] = math.add(FLOAT32 [0x10], FLOAT32 [0x08])")));
FrameLayout layout = std::move(layout_builder).Build();
RootEvaluationContext ctx(&layout);
EXPECT_OK(executable_expr->InitializeLiterals(&ctx));
ctx.Set(x_slot, 1.0f);
ctx.Set(y_slot, 10.0f);
ctx.Set(z_slot, 100.0f);
EXPECT_THAT(executable_expr->Execute(&ctx), IsOk());
EXPECT_THAT(executable_expr->named_output_slots(), IsEmpty());
ASSERT_OK_AND_ASSIGN(auto output_slot,
executable_expr->output_slot().ToSlot<float>());
EXPECT_EQ(ctx.Get(output_slot), 111.0f);
EXPECT_EQ(ctx.Get(x_slot), 1.0f);
EXPECT_EQ(ctx.Get(y_slot), 10.0f);
EXPECT_EQ(ctx.Get(z_slot), 100.0f);
}
TEST_P(EvalVisitorParameterizedTest, ReusingInputSlots) {
ASSERT_OK_AND_ASSIGN(
auto expr,
CallOp("math.add",
{CallOp("math.add", {CallOp("math.add", {Leaf("x1"), Leaf("x2")}),
Leaf("x3")}),
Leaf("x4")}));
DynamicEvaluationEngineOptions options{.collect_op_descriptions = true};
auto create_input_slots = [](FrameLayout::Builder& layout_builder) {
return absl::flat_hash_map<std::string, TypedSlot>{
{"x1", TypedSlot::FromSlot(layout_builder.AddSlot<float>())},
{"x2", TypedSlot::FromSlot(layout_builder.AddSlot<float>())},
{"x3", TypedSlot::FromSlot(layout_builder.AddSlot<float>())},
{"x4", TypedSlot::FromSlot(layout_builder.AddSlot<float>())}};
};
{
FrameLayout::Builder layout_builder;
auto input_slots = create_input_slots(layout_builder);
EXPECT_THAT(
CompileAndBindForDynamicEvaluation(options, &layout_builder, expr,
input_slots),
IsOkAndHolds(AllOf(
InitOperationsAre(),
EvalOperationsAre(
"FLOAT32 [0x14] = math.add(FLOAT32 [0x00], FLOAT32 [0x04])",
"FLOAT32 [0x18] = math.add(FLOAT32 [0x14], FLOAT32 [0x08])",
"FLOAT32 [0x10] = math.add(FLOAT32 [0x18], FLOAT32 [0x0C])"))));
}
{
options.allow_overriding_input_slots = true;
FrameLayout::Builder layout_builder;
auto input_slots = create_input_slots(layout_builder);
EXPECT_THAT(
CompileAndBindForDynamicEvaluation(options, &layout_builder, expr,
input_slots),
IsOkAndHolds(AllOf(
InitOperationsAre(),
EvalOperationsAre(
"FLOAT32 [0x14] = math.add(FLOAT32 [0x00], FLOAT32 [0x04])",
"FLOAT32 [0x04] = math.add(FLOAT32 [0x14], FLOAT32 [0x08])",
"FLOAT32 [0x10] = math.add(FLOAT32 [0x04], FLOAT32 [0x0C])"))));
}
}
TEST_P(EvalVisitorParameterizedTest, NamedNodesTest) {
constexpr int kIters = 10;
ASSERT_OK_AND_ASSIGN(auto xpy, CallOp("math.add", {Leaf("x"), Leaf("y")}));
auto expr = xpy;
for (int i = 0; i < kIters; ++i) {
ASSERT_OK_AND_ASSIGN(
expr, CallOp("math.maximum",
{expr, WithNameAnnotation(expr, std::to_string(i))}));
}
FrameLayout::Builder layout_builder;
auto x_slot = layout_builder.AddSlot<float>();
auto y_slot = layout_builder.AddSlot<float>();
ASSERT_OK_AND_ASSIGN(
auto executable_expr,
CompileAndBindForDynamicEvaluation(options_, &layout_builder, expr,
{{"x", TypedSlot::FromSlot(x_slot)},
{"y", TypedSlot::FromSlot(y_slot)}}));
EXPECT_THAT(
executable_expr,
AllOf(InitOperationsAre(),
EvalOperationsAre(
"FLOAT32 [0x0C] = math.add(FLOAT32 [0x00], FLOAT32 [0x04])",
"FLOAT32 [0x10] = math.maximum(FLOAT32 [0x0C], FLOAT32 [0x0C])",
"FLOAT32 [0x0C] = math.maximum(FLOAT32 [0x10], FLOAT32 [0x10])",
"FLOAT32 [0x10] = math.maximum(FLOAT32 [0x0C], FLOAT32 [0x0C])",
"FLOAT32 [0x0C] = math.maximum(FLOAT32 [0x10], FLOAT32 [0x10])",
"FLOAT32 [0x10] = math.maximum(FLOAT32 [0x0C], FLOAT32 [0x0C])",
"FLOAT32 [0x0C] = math.maximum(FLOAT32 [0x10], FLOAT32 [0x10])",
"FLOAT32 [0x10] = math.maximum(FLOAT32 [0x0C], FLOAT32 [0x0C])",
"FLOAT32 [0x0C] = math.maximum(FLOAT32 [0x10], FLOAT32 [0x10])",
"FLOAT32 [0x10] = math.maximum(FLOAT32 [0x0C], FLOAT32 [0x0C])",
"FLOAT32 [0x08] = math.maximum(FLOAT32 [0x10], FLOAT32 "
"[0x10])")));
FrameLayout layout = std::move(layout_builder).Build();
EXPECT_EQ(layout.AllocSize(), sizeof(float) * 5);
RootEvaluationContext ctx(&layout);
EXPECT_OK(executable_expr->InitializeLiterals(&ctx));
ctx.Set(x_slot, 1.0f);
ctx.Set(y_slot, 10.0f);
EXPECT_THAT(executable_expr->Execute(&ctx), IsOk());
EXPECT_THAT(executable_expr->named_output_slots(), IsEmpty());
ASSERT_OK_AND_ASSIGN(auto output_slot,
executable_expr->output_slot().ToSlot<float>());
EXPECT_EQ(ctx.Get(output_slot), 11);
}
TEST_P(EvalVisitorParameterizedTest, WithUsedSubSlotOfInput) {
ASSERT_OK_AND_ASSIGN(auto expr, CallOp("core.has", {Leaf("x")}));
FrameLayout::Builder layout_builder;
auto x_slot = layout_builder.AddSlot<OptionalValue<float>>();
ASSERT_OK_AND_ASSIGN(
auto executable_expr,
CompileAndBindForDynamicEvaluation(options_, &layout_builder, expr,
{{"x", TypedSlot::FromSlot(x_slot)}}));
EXPECT_THAT(
executable_expr,
AllOf(InitOperationsAre(),
EvalOperationsAre(
"OPTIONAL_UNIT [0x08] = core._copy(OPTIONAL_UNIT [0x00])")));
FrameLayout layout = std::move(layout_builder).Build();
RootEvaluationContext ctx(&layout);
EXPECT_OK(executable_expr->InitializeLiterals(&ctx));
ctx.Set(x_slot, 1.0f);
EXPECT_THAT(executable_expr->Execute(&ctx), IsOk());
EXPECT_THAT(executable_expr->named_output_slots(), IsEmpty());
ASSERT_OK_AND_ASSIGN(auto output_slot,
executable_expr->output_slot().ToSlot<OptionalUnit>());
EXPECT_EQ(ctx.Get(output_slot), kPresent);
EXPECT_EQ(ctx.Get(x_slot), 1.0f);
}
TEST_P(EvalVisitorParameterizedTest, WithUsedSubSlotOfIntermediate) {
ASSERT_OK_AND_ASSIGN(
auto expr,
CallOp("core.has", {CallOp("math.add", {Leaf("x"), Leaf("y")})}));
FrameLayout::Builder layout_builder;
auto x_slot = layout_builder.AddSlot<OptionalValue<float>>();
auto y_slot = layout_builder.AddSlot<OptionalValue<float>>();
ASSERT_OK_AND_ASSIGN(
auto executable_expr,
CompileAndBindForDynamicEvaluation(options_, &layout_builder, expr,
{{"x", TypedSlot::FromSlot(x_slot)},
{"y", TypedSlot::FromSlot(y_slot)}}));
EXPECT_THAT(
executable_expr,
AllOf(InitOperationsAre(),
EvalOperationsAre(
"OPTIONAL_FLOAT32 [0x14] = math.add(OPTIONAL_FLOAT32 [0x00], "
"OPTIONAL_FLOAT32 [0x08])",
"OPTIONAL_UNIT [0x10] = core._copy(OPTIONAL_UNIT [0x14])")));
FrameLayout layout = std::move(layout_builder).Build();
RootEvaluationContext ctx(&layout);
EXPECT_OK(executable_expr->InitializeLiterals(&ctx));
ctx.Set(x_slot, 1.0f);
ctx.Set(y_slot, 10.0f);
EXPECT_THAT(executable_expr->Execute(&ctx), IsOk());
EXPECT_THAT(executable_expr->named_output_slots(), IsEmpty());
ASSERT_OK_AND_ASSIGN(auto output_slot,
executable_expr->output_slot().ToSlot<OptionalUnit>());
EXPECT_EQ(ctx.Get(output_slot), kPresent);
EXPECT_EQ(ctx.Get(x_slot), 1.0f);
EXPECT_EQ(ctx.Get(y_slot), 10.0f);
}
TEST_P(EvalVisitorParameterizedTest, EvalWithNamedOutput) {
DynamicEvaluationEngineOptions options;
options.collect_op_descriptions = true;
ASSERT_OK_AND_ASSIGN(
auto expr, CallOp("math.add",
{WithExportAnnotation(
CallOp("math.add", {Leaf("x"), Leaf("y")}), "x+y"),
Leaf("z")}));
ASSERT_OK_AND_ASSIGN((auto [stripped_expr, side_outputs]),
ExtractSideOutputs(expr));
FrameLayout::Builder layout_builder;
auto x_slot = layout_builder.AddSlot<float>();
auto y_slot = layout_builder.AddSlot<float>();
auto z_slot = layout_builder.AddSlot<float>();
const QTypePtr f32 = GetQType<float>();
ASSERT_OK_AND_ASSIGN(auto compiled_expr,
CompileForDynamicEvaluation(
options, stripped_expr,
{{"x", f32}, {"y", f32}, {"z", f32}}, side_outputs));
EXPECT_EQ(compiled_expr->output_type(), f32);
EXPECT_THAT(compiled_expr->named_output_types(),
UnorderedElementsAre(Pair("x+y", f32)));
auto typed_output_slot =
AddSlot(compiled_expr->output_type(), &layout_builder);
ASSERT_OK_AND_ASSIGN(auto executable_expr,
compiled_expr->Bind(&layout_builder,
{{"x", TypedSlot::FromSlot(x_slot)},
{"y", TypedSlot::FromSlot(y_slot)},
{"z", TypedSlot::FromSlot(z_slot)}},
typed_output_slot));
EXPECT_THAT(
executable_expr,
AllOf(InitOperationsAre(),
EvalOperationsAre(
"FLOAT32 [0x10] = math.add(FLOAT32 [0x00], FLOAT32 [0x04])",
"FLOAT32 [0x0C] = math.add(FLOAT32 [0x10], FLOAT32 [0x08])")));
FrameLayout layout = std::move(layout_builder).Build();
EXPECT_EQ(layout.AllocSize(), sizeof(float) * 5)
<< "Side outputs shouldn't create any extra overhead";
RootEvaluationContext ctx(&layout);
EXPECT_OK(executable_expr->InitializeLiterals(&ctx));
ctx.Set(x_slot, 1.0f);
ctx.Set(y_slot, 10.0f);
ctx.Set(z_slot, 100.0f);
EXPECT_THAT(executable_expr->Execute(&ctx), IsOk());
ASSERT_OK_AND_ASSIGN(auto output_slot, typed_output_slot.ToSlot<float>());
ASSERT_THAT(executable_expr->named_output_slots(),
UnorderedElementsAre(Pair("x+y", _)));
ASSERT_OK_AND_ASSIGN(
auto xpy_slot,
executable_expr->named_output_slots().at("x+y").ToSlot<float>());
EXPECT_EQ(ctx.Get(output_slot), 111.0f);
EXPECT_EQ(ctx.Get(xpy_slot), 11.0f);
}
TEST_P(EvalVisitorParameterizedTest, EvalWithSideOutput) {
DynamicEvaluationEngineOptions options;
options.collect_op_descriptions = true;
ASSERT_OK_AND_ASSIGN(auto expr, CallOp("math.add", {Leaf("x"), Leaf("y")}));
ASSERT_OK_AND_ASSIGN(auto side_output_expr,
CallOp("math.multiply", {Leaf("y"), Leaf("z")}));
FrameLayout::Builder layout_builder;
auto x_slot = layout_builder.AddSlot<float>();
auto y_slot = layout_builder.AddSlot<float>();
auto z_slot = layout_builder.AddSlot<float>();
ASSERT_OK_AND_ASSIGN(auto executable_expr,
CompileAndBindForDynamicEvaluation(
options, &layout_builder, expr,
{{"x", TypedSlot::FromSlot(x_slot)},
{"y", TypedSlot::FromSlot(y_slot)},
{"z", TypedSlot::FromSlot(z_slot)}},
std::nullopt,
{{"y*z", side_output_expr}}));
EXPECT_THAT(
executable_expr,
AllOf(InitOperationsAre(),
EvalOperationsAre(
"FLOAT32 [0x0C] = math.add(FLOAT32 [0x00], FLOAT32 [0x04])",
"FLOAT32 [0x10] = math.multiply(FLOAT32 [0x04], FLOAT32 "
"[0x08])")));
FrameLayout layout = std::move(layout_builder).Build();
RootEvaluationContext ctx(&layout);
EXPECT_OK(executable_expr->InitializeLiterals(&ctx));
ctx.Set(x_slot, 1.0f);
ctx.Set(y_slot, 10.0f);
ctx.Set(z_slot, 100.0f);
EXPECT_THAT(executable_expr->Execute(&ctx), IsOk());
ASSERT_OK_AND_ASSIGN(auto output_slot,
executable_expr->output_slot().ToSlot<float>());
ASSERT_THAT(executable_expr->named_output_slots(),
UnorderedElementsAre(Pair("y*z", _)));
ASSERT_OK_AND_ASSIGN(
auto side_output_slot,
executable_expr->named_output_slots().at("y*z").ToSlot<float>());
EXPECT_EQ(ctx.Get(output_slot), 11.0f);
EXPECT_EQ(ctx.Get(side_output_slot), 1000.0f);
}
TEST_P(EvalVisitorParameterizedTest, EvalWithShortCircuit) {
ASSERT_OK_AND_ASSIGN(
auto expr,
CallOp("core.where", {Leaf("do_divide"),
CallOp("math.multiply", {Leaf("x"), Leaf("y")}),
CallOp("math.floordiv", {Leaf("x"), Leaf("y")})}));
FrameLayout::Builder layout_builder;
auto x_slot = layout_builder.AddSlot<OptionalValue<int>>();
auto y_slot = layout_builder.AddSlot<int>();
auto do_divide_slot = layout_builder.AddSlot<OptionalUnit>();
ASSERT_OK_AND_ASSIGN(
auto executable_expr,
CompileAndBindForDynamicEvaluation(
options_, &layout_builder, expr,
{{"x", TypedSlot::FromSlot(x_slot)},
{"y", TypedSlot::FromSlot(y_slot)},
{"do_divide", TypedSlot::FromSlot(do_divide_slot)}}));
if (GetParam().use_default_optimizer) {
EXPECT_THAT(
executable_expr,
AllOf(InitOperationsAre(),
EvalOperationsAre(
"OPTIONAL_INT32 [0x18] = core.to_optional._scalar(INT32 "
"[0x08])",
"jump_if_not<+2>(OPTIONAL_UNIT [0x0C])",
"OPTIONAL_INT32 [0x10] = math.multiply(OPTIONAL_INT32 "
"[0x00], OPTIONAL_INT32 [0x18])",
"jump<+1>()",
"OPTIONAL_INT32 [0x10] = math.floordiv(OPTIONAL_INT32 "
"[0x00], OPTIONAL_INT32 [0x18])")));
} else {
EXPECT_THAT(
executable_expr,
AllOf(InitOperationsAre(),
EvalOperationsAre(
"OPTIONAL_INT32 [0x18] = core.to_optional._scalar(INT32 "
"[0x08])",
"OPTIONAL_INT32 [0x20] = math.multiply(OPTIONAL_INT32 "
"[0x00], OPTIONAL_INT32 [0x18])",
"OPTIONAL_INT32 [0x28] = math.floordiv(OPTIONAL_INT32 "
"[0x00], OPTIONAL_INT32 [0x18])",
"OPTIONAL_INT32 [0x10] = core.where(OPTIONAL_UNIT [0x0C], "
"OPTIONAL_INT32 [0x20], OPTIONAL_INT32 [0x28])")));
}
FrameLayout layout = std::move(layout_builder).Build();
RootEvaluationContext ctx(&layout);
EXPECT_OK(executable_expr->InitializeLiterals(&ctx));
ctx.Set(x_slot, 1);
ctx.Set(y_slot, 0);
ctx.Set(do_divide_slot, kPresent);
if (GetParam().use_default_optimizer) {
EXPECT_THAT(executable_expr->Execute(&ctx), IsOk());
ASSERT_OK_AND_ASSIGN(
auto output_slot,
executable_expr->output_slot().ToSlot<OptionalValue<int>>());
EXPECT_EQ(ctx.Get(output_slot), 0);
} else {
EXPECT_THAT(executable_expr->Execute(&ctx),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("division by zero; during evaluation of "
"operator math.floordiv")));
}
}
TEST_P(EvalVisitorParameterizedTest, EvalWithNamedOutputUnusedButExported) {
DynamicEvaluationEngineOptions options;
options.collect_op_descriptions = true;
ASSERT_OK_AND_ASSIGN(
auto first_op,
MakeLambdaOperator(ExprOperatorSignature::Make("p0, _px, _py"),
Placeholder("p0")));
ASSERT_OK_AND_ASSIGN(
auto expr,
CallOp(first_op,
{CallOp("math.add", {Leaf("x"), Leaf("z")}),
WithExportAnnotation(CallOp("math.add", {Leaf("x"), Leaf("y")}),
"x+y"),
WithExportAnnotation(
CallOp("math.multiply", {Leaf("y"), Leaf("z")}), "y*z")}));
ASSERT_OK_AND_ASSIGN((auto [stripped_expr, side_outputs]),
ExtractSideOutputs(expr));
FrameLayout::Builder layout_builder;
auto x_slot = layout_builder.AddSlot<float>();
auto y_slot = layout_builder.AddSlot<float>();
auto z_slot = layout_builder.AddSlot<float>();
ASSERT_OK_AND_ASSIGN(auto executable_expr,
CompileAndBindForDynamicEvaluation(
options, &layout_builder, stripped_expr,
{{"x", TypedSlot::FromSlot(x_slot)},
{"y", TypedSlot::FromSlot(y_slot)},
{"z", TypedSlot::FromSlot(z_slot)}},
std::nullopt, side_outputs));
EXPECT_THAT(
executable_expr,
AllOf(InitOperationsAre(),
EvalOperationsAre(
"FLOAT32 [0x0C] = math.add(FLOAT32 [0x00], FLOAT32 [0x08])",
"FLOAT32 [0x10] = math.add(FLOAT32 [0x00], FLOAT32 [0x04])",
"FLOAT32 [0x14] = math.multiply(FLOAT32 [0x04], FLOAT32 "
"[0x08])")));
FrameLayout layout = std::move(layout_builder).Build();
EXPECT_EQ(layout.AllocSize(), sizeof(float) * 6)
<< "Side outputs used outside of main expression require "
"extra slots";
RootEvaluationContext ctx(&layout);
EXPECT_OK(executable_expr->InitializeLiterals(&ctx));
ctx.Set(x_slot, 1.0f);
ctx.Set(y_slot, 10.0f);
ctx.Set(z_slot, 100.0f);
EXPECT_THAT(executable_expr->Execute(&ctx), IsOk());
ASSERT_OK_AND_ASSIGN(auto output_slot,
executable_expr->output_slot().ToSlot<float>());
EXPECT_EQ(ctx.Get(output_slot), 101.0f);
ASSERT_THAT(executable_expr->named_output_slots(),
UnorderedElementsAre(Pair("x+y", _), Pair("y*z", _)));
ASSERT_OK_AND_ASSIGN(
auto xpy_slot,
executable_expr->named_output_slots().at("x+y").ToSlot<float>());
EXPECT_EQ(ctx.Get(xpy_slot), 11.0f);
ASSERT_OK_AND_ASSIGN(
auto xtz_slot,
executable_expr->named_output_slots().at("y*z").ToSlot<float>());
EXPECT_EQ(ctx.Get(xtz_slot), 1000.0f);
}
TEST_P(EvalVisitorParameterizedTest, EvalWithExportAnnotation) {
DynamicEvaluationEngineOptions options;
options.collect_op_descriptions = true;
ASSERT_OK_AND_ASSIGN(
auto expr, CallOp("math.add",
{WithExportAnnotation(
CallOp("math.add", {Leaf("x"), Leaf("y")}), "x+y"),
Leaf("z")}));
ASSERT_OK_AND_ASSIGN((auto [stripped_expr, side_outputs]),
ExtractSideOutputs(expr));
FrameLayout::Builder layout_builder;
auto x_slot = layout_builder.AddSlot<float>();
auto y_slot = layout_builder.AddSlot<float>();
auto z_slot = layout_builder.AddSlot<float>();
ASSERT_OK_AND_ASSIGN(auto executable_expr,
CompileAndBindForDynamicEvaluation(
options, &layout_builder, stripped_expr,
{{"x", TypedSlot::FromSlot(x_slot)},
{"y", TypedSlot::FromSlot(y_slot)},
{"z", TypedSlot::FromSlot(z_slot)}},
std::nullopt, side_outputs));
EXPECT_THAT(
executable_expr,
AllOf(InitOperationsAre(),
EvalOperationsAre(
"FLOAT32 [0x10] = math.add(FLOAT32 [0x00], FLOAT32 [0x04])",
"FLOAT32 [0x0C] = math.add(FLOAT32 [0x10], FLOAT32 [0x08])")));
FrameLayout layout = std::move(layout_builder).Build();
RootEvaluationContext ctx(&layout);
EXPECT_OK(executable_expr->InitializeLiterals(&ctx));
ctx.Set(x_slot, 1.0f);
ctx.Set(y_slot, 10.0f);
ctx.Set(z_slot, 100.0f);
EXPECT_THAT(executable_expr->Execute(&ctx), IsOk());
ASSERT_OK_AND_ASSIGN(auto output_slot,
executable_expr->output_slot().ToSlot<float>());
ASSERT_THAT(executable_expr->named_output_slots(),
UnorderedElementsAre(Pair("x+y", _)));
ASSERT_OK_AND_ASSIGN(
auto xpy_slot,
executable_expr->named_output_slots().at("x+y").ToSlot<float>());
EXPECT_EQ(ctx.Get(output_slot), 111.0f);
EXPECT_EQ(ctx.Get(xpy_slot), 11.0f);
}
TEST_P(EvalVisitorParameterizedTest, EvalWithExportAnnotation_AllLiterals) {
DynamicEvaluationEngineOptions options;
options.collect_op_descriptions = true;
ASSERT_OK_AND_ASSIGN(
auto expr,
CallOp("math.add",
{Literal(1.f), WithExportAnnotation(Literal(10.f), "out_y")}));
ASSERT_OK_AND_ASSIGN((auto [stripped_expr, side_outputs]),
ExtractSideOutputs(expr));
FrameLayout::Builder layout_builder;
ASSERT_OK_AND_ASSIGN(auto executable_expr,
CompileAndBindForDynamicEvaluation(
options, &layout_builder, stripped_expr, {},
std::nullopt, side_outputs));
EXPECT_THAT(
executable_expr,
AllOf(InitOperationsAre("FLOAT32 [0x04] = 11.\n"
"FLOAT32 [0x08] = 10."),
EvalOperationsAre("FLOAT32 [0x00] = core._copy(FLOAT32 [0x04])")));
FrameLayout layout = std::move(layout_builder).Build();
RootEvaluationContext ctx(&layout);
EXPECT_OK(executable_expr->InitializeLiterals(&ctx));
EXPECT_THAT(executable_expr->Execute(&ctx), IsOk());
ASSERT_OK_AND_ASSIGN(auto output_slot,
executable_expr->output_slot().ToSlot<float>());
ASSERT_THAT(executable_expr->named_output_slots(),
UnorderedElementsAre(Pair("out_y", _)));
ASSERT_OK_AND_ASSIGN(
auto out_y_slot,
executable_expr->named_output_slots().at("out_y").ToSlot<float>());
EXPECT_EQ(ctx.Get(output_slot), 11.0f);
EXPECT_EQ(ctx.Get(out_y_slot), 10.0f);
}
TEST_P(EvalVisitorParameterizedTest, EvalWithLiteral) {
ASSERT_OK_AND_ASSIGN(auto expr,
CallOp("math.add", {Leaf("x"), Literal(1.f)}));
FrameLayout::Builder layout_builder;
auto x_slot = layout_builder.AddSlot<float>();
ASSERT_OK_AND_ASSIGN(
auto executable_expr,
CompileAndBindForDynamicEvaluation(options_, &layout_builder, expr,
{{"x", TypedSlot::FromSlot(x_slot)}}));
EXPECT_THAT(
executable_expr,
AllOf(InitOperationsAre("FLOAT32 [0x08] = 1."),
EvalOperationsAre(
"FLOAT32 [0x04] = math.add(FLOAT32 [0x00], FLOAT32 [0x08])")));
FrameLayout layout = std::move(layout_builder).Build();
RootEvaluationContext ctx(&layout);
EXPECT_OK(executable_expr->InitializeLiterals(&ctx));
ctx.Set(x_slot, 2.0f);
EXPECT_THAT(executable_expr->Execute(&ctx), IsOk());
ASSERT_OK_AND_ASSIGN(auto output_slot,
executable_expr->output_slot().ToSlot<float>());
EXPECT_THAT(ctx.Get(output_slot), Eq(3.0f));
}
TEST_P(EvalVisitorParameterizedTest, EvalSingleLeaf) {
auto expr = Leaf("x");
FrameLayout::Builder layout_builder;
auto x_slot = layout_builder.AddSlot<float>();
auto output_slot = layout_builder.AddSlot<float>();
ASSERT_OK_AND_ASSIGN(
auto executable_expr,
CompileAndBindForDynamicEvaluation(options_, &layout_builder, expr,
{{"x", TypedSlot::FromSlot(x_slot)}},
TypedSlot::FromSlot(output_slot)));
EXPECT_THAT(
executable_expr,
AllOf(InitOperationsAre(),
EvalOperationsAre("FLOAT32 [0x04] = core._copy(FLOAT32 [0x00])")));
FrameLayout layout = std::move(layout_builder).Build();
RootEvaluationContext ctx(&layout);
EXPECT_OK(executable_expr->InitializeLiterals(&ctx));
ctx.Set(x_slot, 2.0f);
EXPECT_THAT(executable_expr->Execute(&ctx), IsOk());
EXPECT_THAT(ctx.Get(output_slot), Eq(2.0f));
}
TEST_P(EvalVisitorParameterizedTest, EvalOnlyLiterals) {
auto x = Literal(2.f);
auto y = Literal(1.f);
ASSERT_OK_AND_ASSIGN(auto expr, CallOp("math.add", {x, y}));
FrameLayout::Builder layout_builder;
ASSERT_OK_AND_ASSIGN(
auto executable_expr,
CompileAndBindForDynamicEvaluation(options_, &layout_builder, expr, {}));
EXPECT_THAT(
executable_expr,
AllOf(InitOperationsAre("FLOAT32 [0x04] = 3."),
EvalOperationsAre("FLOAT32 [0x00] = core._copy(FLOAT32 [0x04])")));
FrameLayout layout = std::move(layout_builder).Build();
RootEvaluationContext ctx(&layout);
ASSERT_OK_AND_ASSIGN(auto output_slot,
executable_expr->output_slot().ToSlot<float>());
ctx.Set(output_slot, 57.0f);
EXPECT_OK(executable_expr->InitializeLiterals(&ctx));
EXPECT_THAT(ctx.Get(output_slot), Eq(57.0f));
EXPECT_THAT(executable_expr->Execute(&ctx), IsOk());
EXPECT_THAT(ctx.Get(output_slot), Eq(3.0f));
}
TEST_P(EvalVisitorParameterizedTest, EvalUnboundLeafError) {
ASSERT_OK_AND_ASSIGN(auto expr, CallOp("math.add", {Leaf("x"), Leaf("y")}));
EXPECT_THAT(
CompileForDynamicEvaluation(options_, expr, {{"y", GetQType<float>()}}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("missing QType information for inputs {x}")));
EXPECT_THAT(
CompileForDynamicEvaluation(options_, expr, {}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("missing QType information for inputs {x, y}")));
ASSERT_OK_AND_ASSIGN(auto compiled_model,
CompileForDynamicEvaluation(options_, expr,
{{"x", GetQType<float>()},
{"y", GetQType<float>()}}));
FrameLayout::Builder layout_builder;
EXPECT_THAT(compiled_model->Bind(
&layout_builder,
{{"y", TypedSlot::FromSlot(layout_builder.AddSlot<float>())}},
TypedSlot::FromSlot(layout_builder.AddSlot<float>())),
StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("missed slots: x")));
EXPECT_THAT(compiled_model->Bind(
&layout_builder, {},
TypedSlot::FromSlot(layout_builder.AddSlot<float>())),
StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("missed slots: x,y")));
}
TEST_P(EvalVisitorParameterizedTest, EvalPlaceholderError) {
auto x = Literal(2.f);
ASSERT_OK_AND_ASSIGN(
auto y, WithQTypeAnnotation(Placeholder("y"), GetQType<float>()));
ASSERT_OK_AND_ASSIGN(auto expr, CallOp("math.add", {x, y}));
EXPECT_THAT(
CompileForDynamicEvaluation(options_, expr, {{"y", GetQType<float>()}}),
StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr(
"placeholders should be substituted before evaluation: y")));
}
TEST_P(EvalVisitorParameterizedTest, EvalOperatorTakingSameNodeTwice) {
auto x = Leaf("x");
ASSERT_OK_AND_ASSIGN(auto expr, CallOp("math.add", {x, x}));
FrameLayout::Builder layout_builder;
auto x_slot = layout_builder.AddSlot<float>();
ASSERT_OK_AND_ASSIGN(
auto executable_expr,
CompileAndBindForDynamicEvaluation(options_, &layout_builder, expr,
{{"x", TypedSlot::FromSlot(x_slot)}}));
EXPECT_THAT(
executable_expr,
AllOf(InitOperationsAre(),
EvalOperationsAre(
"FLOAT32 [0x04] = math.add(FLOAT32 [0x00], FLOAT32 [0x00])")));
FrameLayout layout = std::move(layout_builder).Build();
RootEvaluationContext ctx(&layout);
EXPECT_OK(executable_expr->InitializeLiterals(&ctx));
ctx.Set(x_slot, 2.0f);
EXPECT_THAT(executable_expr->Execute(&ctx), IsOk());
ASSERT_OK_AND_ASSIGN(auto output_slot,
executable_expr->output_slot().ToSlot<float>());
EXPECT_THAT(ctx.Get(output_slot), Eq(4.0f));
}
TEST_P(EvalVisitorParameterizedTest, EvalOperatorTakingTwoEqualNodes) {
auto x = Leaf("x");
auto y = Leaf("x");
ASSERT_OK_AND_ASSIGN(auto expr, CallOp("math.add", {x, y}));
FrameLayout::Builder layout_builder;
auto x_slot = layout_builder.AddSlot<float>();
ASSERT_OK_AND_ASSIGN(
auto executable_expr,
CompileAndBindForDynamicEvaluation(options_, &layout_builder, expr,
{{"x", TypedSlot::FromSlot(x_slot)}}));
EXPECT_THAT(
executable_expr,
AllOf(InitOperationsAre(),
EvalOperationsAre(
"FLOAT32 [0x04] = math.add(FLOAT32 [0x00], FLOAT32 [0x00])")));
FrameLayout layout = std::move(layout_builder).Build();
RootEvaluationContext ctx(&layout);
EXPECT_OK(executable_expr->InitializeLiterals(&ctx));
ctx.Set(x_slot, 2.0f);
EXPECT_THAT(executable_expr->Execute(&ctx), IsOk());
ASSERT_OK_AND_ASSIGN(auto output_slot,
executable_expr->output_slot().ToSlot<float>());
EXPECT_THAT(ctx.Get(output_slot), Eq(4.0f));
}
TEST_P(EvalVisitorParameterizedTest, EvalOperatorWithUnusedInputs) {
ASSERT_OK_AND_ASSIGN(
auto op_with_unused_input,
MakeLambdaOperator(ExprOperatorSignature{{"unused_input"}},
Literal<int32_t>(1)));
ASSERT_OK_AND_ASSIGN(auto expr, CallOp(op_with_unused_input, {Leaf("x")}));
FrameLayout::Builder layout_builder;
auto x_slot = layout_builder.AddSlot<float>();
ASSERT_OK_AND_ASSIGN(
auto executable_expr,
CompileAndBindForDynamicEvaluation(options_, &layout_builder, expr,
{{"x", TypedSlot::FromSlot(x_slot)}}));
EXPECT_THAT(
executable_expr,
AllOf(InitOperationsAre("INT32 [0x08] = 1"),
EvalOperationsAre("INT32 [0x04] = core._copy(INT32 [0x08])")));
}
TEST_P(EvalVisitorParameterizedTest, GetNth) {
const auto x = Literal<float>(2.f);
const auto y = Literal<int64_t>(3);
ASSERT_OK_AND_ASSIGN(const auto tuple, CallOp("core.make_tuple", {x, y}));
ASSERT_OK_AND_ASSIGN(const auto first, CallOp("core.get_first", {tuple}));
ASSERT_OK_AND_ASSIGN(const auto second, CallOp("core.get_second", {tuple}));
ASSERT_OK_AND_ASSIGN(const auto second_by_index,
CallOp(std::make_shared<GetNthOperator>(1), {tuple}));
ASSERT_OK_AND_ASSIGN(auto executable_first,
CompileForDynamicEvaluation(options_, first));
ASSERT_OK_AND_ASSIGN(auto executable_second,
CompileForDynamicEvaluation(options_, second));
ASSERT_OK_AND_ASSIGN(auto executable_second_by_index,
CompileForDynamicEvaluation(options_, second_by_index));
FrameLayout::Builder layout_builder;
ASSERT_OK_AND_ASSIGN(auto bound_executable_first,
executable_first->Bind(&layout_builder));
EXPECT_THAT(
bound_executable_first,
AllOf(InitOperationsAre("FLOAT32 [0x04] = 2."),
EvalOperationsAre("FLOAT32 [0x00] = core._copy(FLOAT32 [0x04])")));
ASSERT_OK_AND_ASSIGN(auto bound_executable_second,
executable_second->Bind(&layout_builder));
EXPECT_THAT(
bound_executable_second,
AllOf(InitOperationsAre("INT64 [0x10] = int64{3}"),
EvalOperationsAre("INT64 [0x08] = core._copy(INT64 [0x10])")));
ASSERT_OK_AND_ASSIGN(auto bound_executable_second_by_index,
executable_second_by_index->Bind(&layout_builder));
EXPECT_THAT(
bound_executable_second_by_index,
AllOf(InitOperationsAre("INT64 [0x20] = int64{3}"),
EvalOperationsAre("INT64 [0x18] = core._copy(INT64 [0x20])")));
FrameLayout layout = std::move(layout_builder).Build();
RootEvaluationContext ctx(&layout);
ASSERT_OK_AND_ASSIGN(auto output_first,
bound_executable_first->output_slot().ToSlot<float>());
EXPECT_OK(bound_executable_first->InitializeLiterals(&ctx));
EXPECT_OK(bound_executable_first->Execute(&ctx));
EXPECT_THAT(ctx.Get(output_first), FloatEq(2.0f));
ASSERT_OK_AND_ASSIGN(
auto output_second,
bound_executable_second->output_slot().ToSlot<int64_t>());
EXPECT_OK(bound_executable_second->InitializeLiterals(&ctx));
EXPECT_OK(bound_executable_second->Execute(&ctx));
EXPECT_THAT(ctx.Get(output_second), Eq(3));
ASSERT_OK_AND_ASSIGN(
auto output_second_by_index,
bound_executable_second->output_slot().ToSlot<int64_t>());
EXPECT_OK(bound_executable_second_by_index->InitializeLiterals(&ctx));
EXPECT_OK(bound_executable_second_by_index->Execute(&ctx));
EXPECT_THAT(ctx.Get(output_second_by_index), Eq(3));
}
TEST_P(EvalVisitorParameterizedTest, OptimizedHas) {
auto ten_times_has = Leaf("x");
for (int i = 0; i < 10; ++i) {
ASSERT_OK_AND_ASSIGN(ten_times_has, CallOp("core.has", {ten_times_has}));
}
FrameLayout::Builder layout_builder;
auto x_slot = layout_builder.AddSlot<OptionalValue<float>>();
EXPECT_THAT(
CompileAndBindForDynamicEvaluation(options_, &layout_builder,
ten_times_has,
{{"x", TypedSlot::FromSlot(x_slot)}}),
IsOkAndHolds(AllOf(
InitOperationsAre(),
EvalOperationsAre(
"OPTIONAL_UNIT [0x08] = core._copy(OPTIONAL_UNIT [0x00])"))));
}
class IdentityAnnotation final : public AnnotationExprOperatorTag,
public ExprOperatorWithFixedSignature {
public:
IdentityAnnotation()
: ExprOperatorWithFixedSignature(
"id", ExprOperatorSignature::MakeArgsN(1), "",
FingerprintHasher("arolla::expr::IdentityAnnotation").Finish()) {}
absl::StatusOr<ExprAttributes> InferAttributes(
absl::Span<const ExprAttributes> inputs) const final {
return inputs[0];
}
};
TEST_P(EvalVisitorParameterizedTest, EvalAnnotation) {
auto x = Leaf("x");
const auto with_annotation = std::make_shared<IdentityAnnotation>();
ASSERT_OK_AND_ASSIGN(auto expr, CallOp(with_annotation, {x}));
EXPECT_THAT(Invoke(expr, {{"x", TypedValue::FromValue(2.0f)}}),
IsOkAndHolds(TypedValueWith<float>(2.0f)));
}
TEST_P(EvalVisitorParameterizedTest, SlotRecycling) {
ASSERT_OK_AND_ASSIGN(auto float_sum,
CallOp("math.maximum", {Leaf("x"), Literal<float>(57)}));
ASSERT_OK_AND_ASSIGN(float_sum,
CallOp("math.maximum", {float_sum, Leaf("x")}));
ASSERT_OK_AND_ASSIGN(auto float_sum_4,
CallOp("math.maximum", {float_sum, Leaf("x")}));
ASSERT_OK_AND_ASSIGN(float_sum,
CallOp("math.maximum", {float_sum_4, Leaf("x")}));
ASSERT_OK_AND_ASSIGN(float_sum,
CallOp("math.maximum", {float_sum, Leaf("x")}));
ASSERT_OK_AND_ASSIGN(float_sum,
CallOp("math.maximum", {float_sum, Leaf("x")}));
ASSERT_OK_AND_ASSIGN(auto float_sum_8,
CallOp("math.maximum", {float_sum, Leaf("x")}));
{
FrameLayout::Builder layout_builder;
auto x_slot = layout_builder.AddSlot<float>();
EXPECT_THAT(
CompileAndBindForDynamicEvaluation(
options_, &layout_builder, float_sum_8,
{{"x", TypedSlot::FromSlot(x_slot)}}),
IsOkAndHolds(AllOf(
InitOperationsAre("FLOAT32 [0x08] = 57."),
EvalOperationsAre(
"FLOAT32 [0x0C] = math.maximum(FLOAT32 [0x00], FLOAT32 [0x08])",
"FLOAT32 [0x10] = math.maximum(FLOAT32 [0x0C], FLOAT32 [0x00])",
"FLOAT32 [0x0C] = math.maximum(FLOAT32 [0x10], FLOAT32 [0x00])",
"FLOAT32 [0x10] = math.maximum(FLOAT32 [0x0C], FLOAT32 [0x00])",
"FLOAT32 [0x0C] = math.maximum(FLOAT32 [0x10], FLOAT32 [0x00])",
"FLOAT32 [0x10] = math.maximum(FLOAT32 [0x0C], FLOAT32 [0x00])",
"FLOAT32 [0x04] = math.maximum(FLOAT32 [0x10], FLOAT32 "
"[0x00])"))));
}
{
FrameLayout::Builder layout_builder;
auto x_slot = layout_builder.AddSlot<float>();
EXPECT_THAT(
CompileAndBindForDynamicEvaluation(
options_, &layout_builder, float_sum_8,
{{"x", TypedSlot::FromSlot(x_slot)}},
{},
{{"sum_of_four", float_sum_4}}),
IsOkAndHolds(AllOf(
InitOperationsAre("FLOAT32 [0x08] = 57."),
EvalOperationsAre(
"FLOAT32 [0x0C] = math.maximum(FLOAT32 [0x00], FLOAT32 [0x08])",
"FLOAT32 [0x10] = math.maximum(FLOAT32 [0x0C], FLOAT32 [0x00])",
"FLOAT32 [0x0C] = math.maximum(FLOAT32 [0x10], FLOAT32 [0x00])",
"FLOAT32 [0x10] = math.maximum(FLOAT32 [0x0C], FLOAT32 [0x00])",
"FLOAT32 [0x14] = math.maximum(FLOAT32 [0x10], FLOAT32 [0x00])",
"FLOAT32 [0x10] = math.maximum(FLOAT32 [0x14], FLOAT32 [0x00])",
"FLOAT32 [0x04] = math.maximum(FLOAT32 [0x10], FLOAT32 "
"[0x00])"),
Pointee(Property(&BoundExpr::named_output_slots,
UnorderedElementsAre(Pair(
"sum_of_four",
Property(&TypedSlot::byte_offset, 0x0C))))))));
}
{
ASSERT_OK_AND_ASSIGN(
auto int_sum,
CallOp("math.maximum", {Leaf("y"), Literal<int32_t>(57)}));
ASSERT_OK_AND_ASSIGN(int_sum, CallOp("math.maximum", {int_sum, Leaf("y")}));
ASSERT_OK_AND_ASSIGN(int_sum, CallOp("math.maximum", {int_sum, Leaf("y")}));
ASSERT_OK_AND_ASSIGN(int_sum, CallOp("math.maximum", {int_sum, Leaf("y")}));
ASSERT_OK_AND_ASSIGN(int_sum, CallOp("math.maximum", {int_sum, Leaf("y")}));
ASSERT_OK_AND_ASSIGN(int_sum, CallOp("math.maximum", {int_sum, Leaf("y")}));
ASSERT_OK_AND_ASSIGN(auto int_sum_8,
CallOp("math.maximum", {int_sum, Leaf("y")}));
ASSERT_OK_AND_ASSIGN(auto sums_pair,
CallOp("core.make_tuple", {int_sum_8, float_sum_8}));
FrameLayout::Builder layout_builder;
auto x_slot = layout_builder.AddSlot<float>();
auto y_slot = layout_builder.AddSlot<int32_t>();
EXPECT_THAT(
CompileAndBindForDynamicEvaluation(
options_, &layout_builder, sums_pair,
{{"x", TypedSlot::FromSlot(x_slot)},
{"y", TypedSlot::FromSlot(y_slot)}}),
IsOkAndHolds(AllOf(
InitOperationsAre("INT32 [0x10] = 57\n"
"FLOAT32 [0x1C] = 57."),
EvalOperationsAre(
"INT32 [0x14] = math.maximum(INT32 [0x04], INT32 [0x10])",
"INT32 [0x18] = math.maximum(INT32 [0x14], INT32 [0x04])",
"INT32 [0x14] = math.maximum(INT32 [0x18], INT32 [0x04])",
"INT32 [0x18] = math.maximum(INT32 [0x14], INT32 [0x04])",
"INT32 [0x14] = math.maximum(INT32 [0x18], INT32 [0x04])",
"INT32 [0x18] = math.maximum(INT32 [0x14], INT32 [0x04])",
"INT32 [0x14] = math.maximum(INT32 [0x18], INT32 [0x04])",
"FLOAT32 [0x20] = math.maximum(FLOAT32 [0x00], FLOAT32 [0x1C])",
"FLOAT32 [0x24] = math.maximum(FLOAT32 [0x20], FLOAT32 [0x00])",
"FLOAT32 [0x20] = math.maximum(FLOAT32 [0x24], FLOAT32 [0x00])",
"FLOAT32 [0x24] = math.maximum(FLOAT32 [0x20], FLOAT32 [0x00])",
"FLOAT32 [0x20] = math.maximum(FLOAT32 [0x24], FLOAT32 [0x00])",
"FLOAT32 [0x24] = math.maximum(FLOAT32 [0x20], FLOAT32 [0x00])",
"FLOAT32 [0x20] = math.maximum(FLOAT32 [0x24], FLOAT32 [0x00])",
"tuple<INT32,FLOAT32> [0x08] = core.make_tuple(INT32 [0x14], "
"FLOAT32 [0x20])"))));
}
}
TEST_P(EvalVisitorParameterizedTest, TupleSubslotsNotRecycled) {
ASSERT_OK_AND_ASSIGN(auto xy_tuple,
CallOp("core.make_tuple", {Leaf("x"), Leaf("y")}));
ASSERT_OK_AND_ASSIGN(auto xyz_tuple,
CallOp("core.make_tuple", {xy_tuple, Leaf("z")}));
ASSERT_OK_AND_ASSIGN(
auto x_plus_z,
CallOp("math.maximum",
{CallOp("core.get_first", {CallOp("core.get_first", {xyz_tuple})}),
CallOp("core.get_second", {xyz_tuple})}));
ASSERT_OK_AND_ASSIGN(auto x_plus_z_2,
CallOp("math.maximum", {x_plus_z, x_plus_z}));
ASSERT_OK_AND_ASSIGN(
auto x_plus_z_again,
CallOp("core.get_first",
{CallOp("core.make_tuple", {x_plus_z, x_plus_z_2})}));
FrameLayout::Builder layout_builder;
auto x_slot = layout_builder.AddSlot<float>();
auto y_slot = layout_builder.AddSlot<float>();
auto z_slot = layout_builder.AddSlot<float>();
ASSERT_OK_AND_ASSIGN(auto bound_expr,
CompileAndBindForDynamicEvaluation(
options_, &layout_builder, x_plus_z_again,
{{"x", TypedSlot::FromSlot(x_slot)},
{"y", TypedSlot::FromSlot(y_slot)},
{"z", TypedSlot::FromSlot(z_slot)}}));
if (GetParam().use_default_optimizer) {
EXPECT_THAT(bound_expr,
AllOf(InitOperationsAre(),
EvalOperationsAre("FLOAT32 [0x0C] = math.maximum(FLOAT32 "
"[0x00], FLOAT32 [0x08])")));
} else {
EXPECT_THAT(
bound_expr,
AllOf(
InitOperationsAre(),
EvalOperationsAre(
"tuple<FLOAT32,FLOAT32> [0x10]"
" = core.make_tuple(FLOAT32 [0x00], FLOAT32 [0x04])",
"tuple<tuple<FLOAT32,FLOAT32>,FLOAT32> [0x18]"
" = core.make_tuple(tuple<FLOAT32,FLOAT32> [0x10], FLOAT32 "
"[0x08])",
"FLOAT32 [0x24] = math.maximum(FLOAT32 [0x18], FLOAT32 [0x20])",
"FLOAT32 [0x28] = math.maximum(FLOAT32 [0x24], FLOAT32 [0x24])",
"tuple<FLOAT32,FLOAT32> [0x10]"
" = core.make_tuple(FLOAT32 [0x24], FLOAT32 [0x28])",
"FLOAT32 [0x0C] = core._copy(FLOAT32 [0x10])")));
}
}
struct Point3D {
float x;
float y;
float z;
};
TEST_P(EvalVisitorParameterizedTest, TestWithInputLoader) {
auto x = Leaf("x");
auto y = Leaf("y");
auto z = Leaf("z");
ASSERT_OK_AND_ASSIGN(auto xy, CallOp("math.add", {x, y}));
ASSERT_OK_AND_ASSIGN(auto expr, CallOp("math.add", {xy, z}));
FrameLayout::Builder layout_builder;
ASSERT_OK_AND_ASSIGN(auto loader,
CreateAccessorsInputLoader<Point3D>(
"x", [](const Point3D& p) { return p.x; },
"y", [](const Point3D& p) { return p.y; },
"z", [](const Point3D& p) { return p.z; }));
ASSERT_OK_AND_ASSIGN(auto output_types,
GetInputLoaderQTypes(*loader, GetLeafKeys(expr)));
auto input_slots = AddSlotsMap(output_types, &layout_builder);
ASSERT_OK_AND_ASSIGN(auto bound_loader, loader->Bind(input_slots));
ASSERT_OK_AND_ASSIGN(auto executable_expr,
CompileAndBindForDynamicEvaluation(
options_, &layout_builder, expr, input_slots));
ASSERT_OK_AND_ASSIGN(auto output,
executable_expr->output_slot().ToSlot<float>());
FrameLayout layout = std::move(layout_builder).Build();
RootEvaluationContext ctx(&layout);
EXPECT_OK(executable_expr->InitializeLiterals(&ctx));
ASSERT_OK(bound_loader({1.0f, 10.0f, 100.0f}, ctx.frame()));
EXPECT_THAT(executable_expr->Execute(&ctx), IsOk());
EXPECT_THAT(ctx.Get(output), Eq(111.0f));
}
TEST_P(EvalVisitorParameterizedTest, DetailedStackTrace) {
ASSERT_OK_AND_ASSIGN(
auto sum_of_4_lambda,
MakeLambdaOperator(
"sum_of_4", ExprOperatorSignature{{"x"}},
CallOp("math.sum",
{Placeholder("x"),
CallOp("edge.from_sizes",
{CallOp("math.multiply",
{Literal(CreateDenseArray<int64_t>({1, 1})),
Literal(2)})})})));
ASSERT_OK_AND_ASSIGN(auto expr, CallOp(sum_of_4_lambda, {Leaf("x")}));
auto options =
DynamicEvaluationEngineOptions{.enable_expr_stack_trace = true};
FrameLayout::Builder layout_builder;
auto x_slot = layout_builder.AddSlot<DenseArray<int64_t>>();
auto result_slot = layout_builder.AddSlot<DenseArray<int64_t>>();
ASSERT_OK_AND_ASSIGN(
auto executable_expr,
CompileAndBindForDynamicEvaluation(options, &layout_builder, expr,
{{"x", TypedSlot::FromSlot(x_slot)}},
TypedSlot::FromSlot(result_slot)));
auto layout = std::move(layout_builder).Build();
MemoryAllocation alloc(&layout);
EvaluationContext ctx;
executable_expr->InitializeLiterals(&ctx, alloc.frame());
executable_expr->Execute(&ctx, alloc.frame());
EXPECT_THAT(
ctx.status(),
StatusIs(
absl::StatusCode::kInvalidArgument,
HasSubstr("argument sizes mismatch: (4, 0); "
"during evaluation of operator math._sum\n"
"ORIGINAL NODE: sum_of_4(L.x)\n"
"COMPILED NODE: M.math._sum(L.x, dense_array_edge("
"split_points=dense_array([int64{0}, int64{2}, int64{4}]))"
", optional_int64{0})")));
}
TEST_P(EvalVisitorParameterizedTest, OperatorWithoutProxy) {
FrameLayout::Builder layout_builder;
ASSERT_OK_AND_ASSIGN(
auto node,
CallOp(std::make_shared<::arolla::expr::testing::DummyOp>(
"test.Dummy", ExprOperatorSignature::MakeVariadicArgs()),
{}));
EXPECT_THAT(
CompileAndBindForDynamicEvaluation(options_, &layout_builder, node, {}),
StatusIs(
absl::StatusCode::kInvalidArgument,
HasSubstr("test.Dummy is not a builtin or backend ExprOperator; "
"while compiling node test.Dummy():INT32; the expression "
"is likely not fully compiled and is using derived "
"operators that are not supported in the backend")));
}
TEST_P(EvalVisitorParameterizedTest, DenseArrayStringReplace) {
EXPECT_THAT(InvokeExprOperator<DenseArray<Text>>(
"strings.replace",
CreateDenseArray<Text>({Text("Fuzzy"), Text("Wuzzy")}),
Text("zz"), Text("zzz")),
IsOkAndHolds(::testing::ElementsAre(
absl::string_view("Fuzzzy"), absl::string_view("Wuzzzy"))));
}
TEST_P(EvalVisitorParameterizedTest, VectorPrintf) {
DenseArray<Text> format_spec =
CreateConstDenseArray<Text>(3, "%s's atomic weight is %.4f");
DenseArray<Text> elements = CreateDenseArray<Text>(
{Text("Hydrogen"), Text("Helium"), Text("Lithium")});
DenseArray<float> weights =
CreateDenseArray<float>({1.0079f, 4.0026, 6.9410});
EXPECT_THAT(InvokeExprOperator<DenseArray<Text>>(
"strings.printf", format_spec, elements, weights),
IsOkAndHolds(ElementsAre("Hydrogen's atomic weight is 1.0079",
"Helium's atomic weight is 4.0026",
"Lithium's atomic weight is 6.9410")));
}
TEST_P(EvalVisitorParameterizedTest, CompileAndBindExprOperator) {
ASSERT_OK_AND_ASSIGN(
auto x_plus_y_plus_1_op,
MakeLambdaOperator(
ExprOperatorSignature::Make("x, y"),
CallOp("math.add", {Placeholder("x"),
CallOp("math.add", {Placeholder("y"),
Literal<int64_t>(1)})})));
FrameLayout::Builder layout_builder;
auto x_slot = layout_builder.AddSlot<int64_t>();
auto y_slot = layout_builder.AddSlot<int64_t>();
auto result_slot = layout_builder.AddSlot<int64_t>();
ASSERT_OK_AND_ASSIGN(
std::shared_ptr<BoundExpr> executable,
CompileAndBindExprOperator(
options_, &layout_builder, x_plus_y_plus_1_op,
{TypedSlot::FromSlot(x_slot), TypedSlot::FromSlot(y_slot)},
TypedSlot::FromSlot(result_slot)));
FrameLayout layout = std::move(layout_builder).Build();
RootEvaluationContext ctx(&layout);
ctx.Set(x_slot, 10);
ctx.Set(y_slot, 100);
ASSERT_OK(executable->InitializeLiterals(&ctx));
ASSERT_OK(executable->Execute(&ctx));
EXPECT_THAT(ctx.Get(result_slot), Eq(111));
}
class HigherLevelTestOperator final : public BasicExprOperator {
public:
HigherLevelTestOperator()
: BasicExprOperator(
"test.higher_level_test_op", ExprOperatorSignature::MakeArgsN(1),
"",
FingerprintHasher(
"arolla::expr::eval_internal::HigherLevelTestOperator")
.Finish()) {}
absl::StatusOr<QTypePtr> GetOutputQType(
absl::Span<const QTypePtr> input_qtypes) const final {
return GetQType<float>();
}
};
class LowerLevelTestOperator final : public BasicExprOperator,
public BuiltinExprOperatorTag {
public:
LowerLevelTestOperator()
: BasicExprOperator(
"test.lower_level_test_op", ExprOperatorSignature::MakeArgsN(1), "",
FingerprintHasher(
"arolla::expr::eval_internal::LowerLevelTestOperator")
.Finish()) {}
absl::StatusOr<QTypePtr> GetOutputQType(
absl::Span<const QTypePtr> input_qtypes) const final {
return GetQType<float>();
}
};
TEST_P(EvalVisitorParameterizedTest, Extensions) {
eval_internal::NodeTransformationFn lower_transformation =
[](const DynamicEvaluationEngineOptions&,
ExprNodePtr node) -> absl::StatusOr<ExprNodePtr> {
if (node->is_op() &&
fast_dynamic_downcast_final<const HigherLevelTestOperator*>(
node->op().get()) != nullptr) {
return BindOp(std::make_shared<LowerLevelTestOperator>(),
node->node_deps(), {});
}
return node;
};
eval_internal::CompilerExtensionRegistry::GetInstance()
.RegisterNodeTransformationFn(lower_transformation);
eval_internal::CompileOperatorFn compile_test_op =
[](eval_internal::CompileOperatorFnArgs args)
-> std::optional<absl::Status> {
if (fast_dynamic_downcast_final<const LowerLevelTestOperator*>(
args.op.get()) == nullptr) {
return std::nullopt;
}
ASSIGN_OR_RETURN(auto output_slot, args.output_slot.ToSlot<float>());
args.executable_builder->AddEvalOp(
MakeBoundOperator(
[output_slot](EvaluationContext* ctx, FramePtr frame) {
frame.Set(output_slot, 57);
}),
eval_internal::FormatOperatorCall("lower level test operator", {},
{args.output_slot}),
"lower level test operator");
return absl::OkStatus();
};
eval_internal::CompilerExtensionRegistry::GetInstance()
.RegisterCompileOperatorFn(compile_test_op);
ASSERT_OK_AND_ASSIGN(
auto expr,
CallOp(std::make_shared<HigherLevelTestOperator>(), {Leaf("x")}));
FrameLayout::Builder layout_builder;
auto x_slot = TypedSlot::FromSlot(layout_builder.AddSlot<float>());
ASSERT_OK_AND_ASSIGN(auto bound_expr,
CompileAndBindForDynamicEvaluation(
options_, &layout_builder, expr, {{"x", x_slot}}));
EXPECT_THAT(
bound_expr,
AllOf(InitOperationsAre(),
EvalOperationsAre("FLOAT32 [0x04] = lower level test operator()")));
}
class OperatorThatFailsBind : public QExprOperator {
public:
OperatorThatFailsBind()
: QExprOperator(QExprOperatorSignature::Get({GetQType<float>()},
GetQType<float>())) {}
absl::StatusOr<std::unique_ptr<BoundOperator>> DoBind(
absl::Span<const TypedSlot> input_slots,
TypedSlot output_slot) const final {
return absl::InternalError("test error");
}
};
TEST_P(EvalVisitorParameterizedTest, OperatorThatFailsBind) {
OperatorRegistry qexpr_registry;
ASSERT_OK(qexpr_registry.RegisterOperator(
"test.operator_that_fails_bind",
std::make_unique<OperatorThatFailsBind>()));
ExprOperatorPtr op = std::make_shared<BackendWrappingOperator>(
"test.operator_that_fails_bind",
ExprOperatorSignature::MakeVariadicArgs(),
[](absl::Span<const QTypePtr> input_qtypes) -> absl::StatusOr<QTypePtr> {
return GetQType<float>();
},
"");
ASSERT_OK_AND_ASSIGN(auto expr, CallOp(op, {Leaf("x")}));
FrameLayout::Builder layout_builder;
auto x_slot = TypedSlot::FromSlot(layout_builder.AddSlot<float>());
DynamicEvaluationEngineOptions options(options_);
options.operator_directory = &qexpr_registry;
EXPECT_THAT(
CompileAndBindForDynamicEvaluation(options, &layout_builder, expr,
{{"x", x_slot}}),
StatusIs(absl::StatusCode::kInternal,
HasSubstr("test error; while binding operator "
"test.operator_that_fails_bind; while compiling node "
"test.operator_that_fails_bind(L.x)")));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/eval/eval.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/eval/eval_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
d13a71f9-9641-49d4-9959-d13d8a3bdbf0 | cpp | google/tensorstore | bfloat16 | tensorstore/util/bfloat16.h | tensorstore/util/bfloat16_test.cc | #ifndef TENSORSTORE_UTIL_BFLOAT16_H_
#define TENSORSTORE_UTIL_BFLOAT16_H_
#include <cassert>
#include <cmath>
#include <cstdint>
#include <cstring>
#include <limits>
#include <type_traits>
#include "absl/base/casts.h"
#include <nlohmann/json_fwd.hpp>
namespace tensorstore {
class BFloat16;
}
namespace std {
template <>
struct numeric_limits<::tensorstore::BFloat16>;
}
namespace tensorstore {
namespace internal {
BFloat16 NumericFloat32ToBfloat16RoundNearestEven(float v);
BFloat16 Float32ToBfloat16RoundNearestEven(float v);
float Bfloat16ToFloat(BFloat16 v);
}
class BFloat16 {
public:
constexpr BFloat16() : rep_(0) {}
template <typename T,
typename = std::enable_if_t<std::is_convertible_v<T, float>>>
explicit BFloat16(T x) {
if constexpr (std::is_same_v<T, bool>) {
rep_ = static_cast<uint16_t>(x) * 0x3f80;
} else if constexpr (std::numeric_limits<T>::is_integer) {
*this = internal::NumericFloat32ToBfloat16RoundNearestEven(
static_cast<float>(x));
} else {
*this =
internal::Float32ToBfloat16RoundNearestEven(static_cast<float>(x));
}
}
operator float() const { return internal::Bfloat16ToFloat(*this); }
BFloat16& operator=(float v) { return *this = static_cast<BFloat16>(v); }
BFloat16& operator=(bool v) { return *this = static_cast<BFloat16>(v); }
template <typename T>
std::enable_if_t<std::numeric_limits<T>::is_integer, BFloat16&> operator=(
T v) {
return *this = static_cast<BFloat16>(v);
}
#define TENSORSTORE_INTERNAL_BFLOAT16_ARITHMETIC_OP(OP) \
friend BFloat16 operator OP(BFloat16 a, BFloat16 b) { \
return BFloat16(static_cast<float>(a) OP static_cast<float>(b)); \
} \
template <typename T> \
friend std::enable_if_t<std::numeric_limits<T>::is_integer, BFloat16> \
operator OP(BFloat16 a, T b) { \
return BFloat16(static_cast<float>(a) OP b); \
} \
template <typename T> \
friend std::enable_if_t<std::numeric_limits<T>::is_integer, BFloat16> \
operator OP(T a, BFloat16 b) { \
return BFloat16(a OP static_cast<float>(b)); \
} \
#define TENSORSTORE_INTERNAL_BFLOAT16_ARITHMETIC_ASSIGN_OP(OP) \
friend BFloat16& operator OP##=(BFloat16& a, BFloat16 b) { \
return a = BFloat16(static_cast<float>(a) OP static_cast<float>(b)); \
} \
template <typename T> \
friend std::enable_if_t<std::numeric_limits<T>::is_integer, BFloat16&> \
operator OP##=(BFloat16& a, T b) { \
return a = BFloat16(static_cast<float>(a) OP b); \
} \
TENSORSTORE_INTERNAL_BFLOAT16_ARITHMETIC_OP(+)
TENSORSTORE_INTERNAL_BFLOAT16_ARITHMETIC_ASSIGN_OP(+)
TENSORSTORE_INTERNAL_BFLOAT16_ARITHMETIC_OP(-)
TENSORSTORE_INTERNAL_BFLOAT16_ARITHMETIC_ASSIGN_OP(-)
TENSORSTORE_INTERNAL_BFLOAT16_ARITHMETIC_OP(*)
TENSORSTORE_INTERNAL_BFLOAT16_ARITHMETIC_ASSIGN_OP(*)
TENSORSTORE_INTERNAL_BFLOAT16_ARITHMETIC_OP(/)
TENSORSTORE_INTERNAL_BFLOAT16_ARITHMETIC_ASSIGN_OP(/)
#undef TENSORSTORE_INTERNAL_BFLOAT16_ARITHMETIC_OP
#undef TENSORSTORE_INTERNAL_BFLOAT16_ARITHMETIC_ASSIGN_OP
friend BFloat16 operator-(BFloat16 a) {
BFloat16 result;
result.rep_ = a.rep_ ^ 0x8000;
return result;
}
friend BFloat16 operator+(BFloat16 a) { return a; }
friend BFloat16 operator++(BFloat16& a) {
a += BFloat16(1);
return a;
}
friend BFloat16 operator--(BFloat16& a) {
a -= BFloat16(1);
return a;
}
friend BFloat16 operator++(BFloat16& a, int) {
BFloat16 original_value = a;
++a;
return original_value;
}
friend BFloat16 operator--(BFloat16& a, int) {
BFloat16 original_value = a;
--a;
return original_value;
}
template <template <typename U, typename V, typename... Args>
class ObjectType ,
template <typename U, typename... Args>
class ArrayType ,
class StringType , class BooleanType ,
class NumberIntegerType ,
class NumberUnsignedType ,
class NumberFloatType ,
template <typename U> class AllocatorType ,
template <typename T, typename SFINAE = void>
class JSONSerializer ,
class BinaryType >
friend void to_json(
::nlohmann::basic_json<ObjectType, ArrayType, StringType, BooleanType,
NumberIntegerType, NumberUnsignedType,
NumberFloatType, AllocatorType, JSONSerializer,
BinaryType>& j,
BFloat16 v) {
j = static_cast<NumberFloatType>(v);
}
struct bitcast_construct_t {};
explicit constexpr BFloat16(bitcast_construct_t, uint16_t rep) : rep_(rep) {}
uint16_t rep_;
};
inline bool isinf(BFloat16 x) { return std::isinf(static_cast<float>(x)); }
inline bool signbit(BFloat16 x) { return std::signbit(static_cast<float>(x)); }
inline bool isnan(BFloat16 x) { return std::isnan(static_cast<float>(x)); }
inline bool isfinite(BFloat16 x) {
return std::isfinite(static_cast<float>(x));
}
inline BFloat16 abs(BFloat16 x) {
x.rep_ &= 0x7fff;
return x;
}
inline BFloat16 exp(BFloat16 x) {
return BFloat16(std::exp(static_cast<float>(x)));
}
inline BFloat16 exp2(BFloat16 x) {
return BFloat16(std::exp2(static_cast<float>(x)));
}
inline BFloat16 expm1(BFloat16 x) {
return BFloat16(std::expm1(static_cast<float>(x)));
}
inline BFloat16 log(BFloat16 x) {
return BFloat16(std::log(static_cast<float>(x)));
}
inline BFloat16 log1p(BFloat16 x) {
return BFloat16(std::log1p(static_cast<float>(x)));
}
inline BFloat16 log10(BFloat16 x) {
return BFloat16(std::log10(static_cast<float>(x)));
}
inline BFloat16 log2(BFloat16 x) {
return BFloat16(std::log2(static_cast<float>(x)));
}
inline BFloat16 sqrt(BFloat16 x) {
return BFloat16(std::sqrt(static_cast<float>(x)));
}
inline BFloat16 pow(BFloat16 x, BFloat16 y) {
return BFloat16(std::pow(static_cast<float>(x), static_cast<float>(y)));
}
inline BFloat16 sin(BFloat16 x) {
return BFloat16(std::sin(static_cast<float>(x)));
}
inline BFloat16 cos(BFloat16 x) {
return BFloat16(std::cos(static_cast<float>(x)));
}
inline BFloat16 tan(BFloat16 x) {
return BFloat16(std::tan(static_cast<float>(x)));
}
inline BFloat16 asin(BFloat16 x) {
return BFloat16(std::asin(static_cast<float>(x)));
}
inline BFloat16 acos(BFloat16 x) {
return BFloat16(std::acos(static_cast<float>(x)));
}
inline BFloat16 atan(BFloat16 x) {
return BFloat16(std::atan(static_cast<float>(x)));
}
inline BFloat16 sinh(BFloat16 x) {
return BFloat16(std::sinh(static_cast<float>(x)));
}
inline BFloat16 cosh(BFloat16 x) {
return BFloat16(std::cosh(static_cast<float>(x)));
}
inline BFloat16 tanh(BFloat16 x) {
return BFloat16(std::tanh(static_cast<float>(x)));
}
inline BFloat16 asinh(BFloat16 x) {
return BFloat16(std::asinh(static_cast<float>(x)));
}
inline BFloat16 acosh(BFloat16 x) {
return BFloat16(std::acosh(static_cast<float>(x)));
}
inline BFloat16 atanh(BFloat16 x) {
return BFloat16(std::atanh(static_cast<float>(x)));
}
inline BFloat16 floor(BFloat16 x) {
return BFloat16(std::floor(static_cast<float>(x)));
}
inline BFloat16 trunc(BFloat16 x) {
return BFloat16(std::trunc(static_cast<float>(x)));
}
inline BFloat16 rint(BFloat16 x) {
return BFloat16(std::rint(static_cast<float>(x)));
}
inline BFloat16 ceil(BFloat16 x) {
return BFloat16(std::ceil(static_cast<float>(x)));
}
inline BFloat16 fmod(BFloat16 x, BFloat16 y) {
return BFloat16(std::fmod(static_cast<float>(x), static_cast<float>(y)));
}
inline BFloat16 fmin(BFloat16 a, BFloat16 b) {
return BFloat16(std::fmin(static_cast<float>(a), static_cast<float>(b)));
}
inline BFloat16 fmax(BFloat16 a, BFloat16 b) {
return BFloat16(std::fmax(static_cast<float>(a), static_cast<float>(b)));
}
inline BFloat16 nextafter(BFloat16 from, BFloat16 to) {
const uint16_t from_as_int = absl::bit_cast<uint16_t>(from),
to_as_int = absl::bit_cast<uint16_t>(to);
const uint16_t sign_mask = 1 << 15;
float from_as_float(from), to_as_float(to);
if (std::isnan(from_as_float) || std::isnan(to_as_float)) {
return BFloat16(std::numeric_limits<float>::quiet_NaN());
}
if (from_as_int == to_as_int) {
return to;
}
if (from_as_float == 0) {
if (to_as_float == 0) {
return to;
} else {
return absl::bit_cast<BFloat16, uint16_t>((to_as_int & sign_mask) | 1);
}
}
uint16_t from_sign = from_as_int & sign_mask;
uint16_t to_sign = to_as_int & sign_mask;
uint16_t from_abs = from_as_int & ~sign_mask;
uint16_t to_abs = to_as_int & ~sign_mask;
uint16_t magnitude_adjustment =
(from_abs > to_abs || from_sign != to_sign) ? 0xFFFF : 0x0001;
return absl::bit_cast<BFloat16, uint16_t>(from_as_int + magnitude_adjustment);
}
namespace internal {
inline uint16_t GetFloat32High16(float v) {
return static_cast<uint16_t>(absl::bit_cast<uint32_t>(v) >> 16);
}
inline BFloat16 Float32ToBfloat16Truncate(float v) {
uint32_t bits = absl::bit_cast<uint32_t>(v);
if (std::isnan(v)) {
bits |= (static_cast<uint32_t>(1) << 21);
}
return absl::bit_cast<BFloat16, uint16_t>(bits >> 16);
}
inline BFloat16 NumericFloat32ToBfloat16RoundNearestEven(float v) {
assert(!std::isnan(v));
uint32_t input = absl::bit_cast<uint32_t>(v);
const uint32_t lsb = (input >> 16) & 1;
const uint32_t rounding_bias = 0x7fff + lsb;
input += rounding_bias;
return absl::bit_cast<BFloat16, uint16_t>(input >> 16);
}
inline BFloat16 Float32ToBfloat16RoundNearestEven(float v) {
if (std::isnan(v)) {
return tensorstore::BFloat16(
tensorstore::BFloat16::bitcast_construct_t{},
static_cast<uint16_t>((absl::bit_cast<uint32_t>(v) | 0x00200000u) >>
16));
}
return NumericFloat32ToBfloat16RoundNearestEven(v);
}
inline float Bfloat16ToFloat(BFloat16 v) {
return absl::bit_cast<float>(
static_cast<uint32_t>(absl::bit_cast<uint16_t>(v)) << 16);
}
}
}
namespace std {
template <>
struct numeric_limits<tensorstore::BFloat16> {
static constexpr bool is_specialized = true;
static constexpr bool is_signed = true;
static constexpr bool is_integer = false;
static constexpr bool is_exact = false;
static constexpr bool has_infinity = true;
static constexpr bool has_quiet_NaN = true;
static constexpr bool has_signaling_NaN = true;
static constexpr float_denorm_style has_denorm = std::denorm_present;
static constexpr bool has_denorm_loss = false;
static constexpr std::float_round_style round_style =
numeric_limits<float>::round_style;
static constexpr bool is_iec559 = false;
static constexpr bool is_bounded = true;
static constexpr bool is_modulo = false;
static constexpr int digits = 8;
static constexpr int digits10 = 2;
static constexpr int max_digits10 = 4;
static constexpr int radix = 2;
static constexpr int min_exponent = numeric_limits<float>::min_exponent;
static constexpr int min_exponent10 = numeric_limits<float>::min_exponent10;
static constexpr int max_exponent = numeric_limits<float>::max_exponent;
static constexpr int max_exponent10 = numeric_limits<float>::max_exponent10;
static constexpr bool traps = numeric_limits<float>::traps;
static constexpr bool tinyness_before =
numeric_limits<float>::tinyness_before;
static constexpr tensorstore::BFloat16 min() {
return tensorstore::BFloat16(tensorstore::BFloat16::bitcast_construct_t{},
static_cast<uint16_t>(0x0080));
}
static constexpr tensorstore::BFloat16 lowest() {
return tensorstore::BFloat16(tensorstore::BFloat16::bitcast_construct_t{},
static_cast<uint16_t>(0xff7f));
}
static constexpr tensorstore::BFloat16 max() {
return tensorstore::BFloat16(tensorstore::BFloat16::bitcast_construct_t{},
static_cast<uint16_t>(0x7f7f));
}
static constexpr tensorstore::BFloat16 epsilon() {
return tensorstore::BFloat16(tensorstore::BFloat16::bitcast_construct_t{},
static_cast<uint16_t>(0x3c00));
}
static constexpr tensorstore::BFloat16 round_error() {
return tensorstore::BFloat16(tensorstore::BFloat16::bitcast_construct_t{},
static_cast<uint16_t>(0x3f00));
}
static constexpr tensorstore::BFloat16 infinity() {
return tensorstore::BFloat16(tensorstore::BFloat16::bitcast_construct_t{},
static_cast<uint16_t>(0x7f80));
}
static constexpr tensorstore::BFloat16 quiet_NaN() {
return tensorstore::BFloat16(tensorstore::BFloat16::bitcast_construct_t{},
static_cast<uint16_t>(0x7fc0));
}
static constexpr tensorstore::BFloat16 signaling_NaN() {
return tensorstore::BFloat16(tensorstore::BFloat16::bitcast_construct_t{},
static_cast<uint16_t>(0x7f81));
}
static constexpr tensorstore::BFloat16 denorm_min() {
return tensorstore::BFloat16(tensorstore::BFloat16::bitcast_construct_t{},
static_cast<uint16_t>(0x0001));
}
};
}
#endif | #include "tensorstore/util/bfloat16.h"
#include <cmath>
#include <cstdint>
#include <cstring>
#include <limits>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/base/casts.h"
#include "tensorstore/internal/json_gtest.h"
namespace {
using ::tensorstore::internal::Float32ToBfloat16RoundNearestEven;
using ::tensorstore::internal::Float32ToBfloat16Truncate;
using bfloat16_t = tensorstore::BFloat16;
::testing::Matcher<bfloat16_t> MatchesBits(uint16_t bits) {
return ::testing::ResultOf(
[](bfloat16_t y) { return absl::bit_cast<uint16_t>(y); },
::testing::Eq(bits));
}
::testing::Matcher<float> NearFloat(float x, float relative_error = 1e-3) {
return ::testing::FloatNear(x, std::abs(x) * relative_error);
}
float BinaryToFloat(uint32_t sign, uint32_t exponent, uint32_t high_mantissa,
uint32_t low_mantissa) {
float dest;
uint32_t src =
(sign << 31) + (exponent << 23) + (high_mantissa << 16) + low_mantissa;
memcpy(static_cast<void*>(&dest), static_cast<const void*>(&src),
sizeof(dest));
return dest;
}
void TestTruncate(float input, float expected_truncation,
float expected_rounding) {
bfloat16_t truncated = Float32ToBfloat16Truncate(input);
bfloat16_t rounded = Float32ToBfloat16RoundNearestEven(input);
if (std::isnan(input)) {
EXPECT_TRUE(std::isnan(truncated));
EXPECT_TRUE(std::isnan(rounded));
return;
}
EXPECT_EQ(expected_truncation, static_cast<float>(truncated));
EXPECT_EQ(expected_rounding, static_cast<float>(rounded));
}
template <typename T>
void TestRoundtrips() {
for (T value : {
-std::numeric_limits<T>::infinity(),
std::numeric_limits<T>::infinity(),
T(-1.0),
T(-0.5),
T(-0.0),
T(1.0),
T(0.5),
T(0.0),
}) {
EXPECT_EQ(value, static_cast<T>(static_cast<bfloat16_t>(value)));
}
}
TEST(Bfloat16Test, FloatRoundtrips) { TestRoundtrips<float>(); }
TEST(Bfloat16Test, DoubleRoundtrips) { TestRoundtrips<double>(); }
TEST(Bfloat16Test, Float16Roundtrips) { TestRoundtrips<bfloat16_t>(); }
TEST(Bfloat16Test, ConversionFromFloat) {
EXPECT_THAT(bfloat16_t(1.0f), MatchesBits(0x3f80));
EXPECT_THAT(bfloat16_t(0.5f), MatchesBits(0x3f00));
EXPECT_THAT(bfloat16_t(0.33333f), MatchesBits(0x3eab));
EXPECT_THAT(bfloat16_t(3.38e38f), MatchesBits(0x7f7e));
EXPECT_THAT(bfloat16_t(3.40e38f), MatchesBits(0x7f80));
}
TEST(Bfloat16Test, RoundToNearestEven) {
float val1 = static_cast<float>(absl::bit_cast<bfloat16_t>(uint16_t{0x3c00}));
float val2 = static_cast<float>(absl::bit_cast<bfloat16_t>(uint16_t{0x3c01}));
float val3 = static_cast<float>(absl::bit_cast<bfloat16_t>(uint16_t{0x3c02}));
EXPECT_THAT(bfloat16_t(0.5f * (val1 + val2)), MatchesBits(0x3c00));
EXPECT_THAT(bfloat16_t(0.5f * (val2 + val3)), MatchesBits(0x3c02));
}
TEST(Bfloat16Test, ConversionFromInt) {
EXPECT_THAT(bfloat16_t(-1), MatchesBits(0xbf80));
EXPECT_THAT(bfloat16_t(0), MatchesBits(0x0000));
EXPECT_THAT(bfloat16_t(1), MatchesBits(0x3f80));
EXPECT_THAT(bfloat16_t(2), MatchesBits(0x4000));
EXPECT_THAT(bfloat16_t(3), MatchesBits(0x4040));
EXPECT_THAT(bfloat16_t(12), MatchesBits(0x4140));
}
TEST(Bfloat16Test, ConversionFromBool) {
EXPECT_THAT(bfloat16_t(false), MatchesBits(0x0000));
EXPECT_THAT(bfloat16_t(true), MatchesBits(0x3f80));
}
TEST(Bfloat16Test, ConversionToBool) {
EXPECT_EQ(static_cast<bool>(bfloat16_t(3)), true);
EXPECT_EQ(static_cast<bool>(bfloat16_t(0.33333f)), true);
EXPECT_EQ(bfloat16_t(-0.0), false);
EXPECT_EQ(static_cast<bool>(bfloat16_t(0.0)), false);
}
TEST(Bfloat16Test, ExplicitConversionToFloat) {
EXPECT_EQ(static_cast<float>(absl::bit_cast<bfloat16_t, uint16_t>(0x0000)),
0.0f);
EXPECT_EQ(static_cast<float>(absl::bit_cast<bfloat16_t, uint16_t>(0x3f80)),
1.0f);
}
TEST(Bfloat16Test, ImplicitConversionToFloat) {
EXPECT_EQ((absl::bit_cast<bfloat16_t, uint16_t>(0x0000)), 0.0f);
EXPECT_EQ((absl::bit_cast<bfloat16_t, uint16_t>(0x3f80)), 1.0f);
}
TEST(Bfloat16Test, Zero) {
EXPECT_EQ(bfloat16_t(0.0f), bfloat16_t(0.0f));
EXPECT_EQ(bfloat16_t(-0.0f), bfloat16_t(0.0f));
EXPECT_EQ(bfloat16_t(-0.0f), bfloat16_t(-0.0f));
EXPECT_THAT(bfloat16_t(0.0f), MatchesBits(0x0000));
EXPECT_THAT(bfloat16_t(-0.0f), MatchesBits(0x8000));
}
TEST(Bfloat16Test, DefaultConstruct) {
EXPECT_EQ(static_cast<float>(bfloat16_t()), 0.0f);
}
TEST(Bfloat16Test, Truncate0) {
TestTruncate(BinaryToFloat(0, 0x80, 0x48, 0xf5c3),
BinaryToFloat(0, 0x80, 0x48, 0x0000),
BinaryToFloat(0, 0x80, 0x49, 0x0000));
}
TEST(Bfloat16Test, Truncate1) {
TestTruncate(BinaryToFloat(1, 0x80, 0x48, 0xf5c3),
BinaryToFloat(1, 0x80, 0x48, 0x0000),
BinaryToFloat(1, 0x80, 0x49, 0x0000));
}
TEST(Bfloat16Test, Truncate2) {
TestTruncate(BinaryToFloat(0, 0x80, 0x48, 0x8000),
BinaryToFloat(0, 0x80, 0x48, 0x0000),
BinaryToFloat(0, 0x80, 0x48, 0x0000));
}
TEST(Bfloat16Test, Truncate3) {
TestTruncate(BinaryToFloat(0, 0xff, 0x00, 0x0001),
BinaryToFloat(0, 0xff, 0x40, 0x0000),
BinaryToFloat(0, 0xff, 0x40, 0x0000));
}
TEST(Bfloat16Test, Truncate4) {
TestTruncate(BinaryToFloat(0, 0xff, 0x7f, 0xffff),
BinaryToFloat(0, 0xff, 0x40, 0x0000),
BinaryToFloat(0, 0xff, 0x40, 0x0000));
}
TEST(Bfloat16Test, Truncate5) {
TestTruncate(BinaryToFloat(1, 0x80, 0x48, 0xc000),
BinaryToFloat(1, 0x80, 0x48, 0x0000),
BinaryToFloat(1, 0x80, 0x49, 0x0000));
}
TEST(Bfloat16Test, Truncate6) {
TestTruncate(BinaryToFloat(0, 0x80, 0x48, 0x0000),
BinaryToFloat(0, 0x80, 0x48, 0x0000),
BinaryToFloat(0, 0x80, 0x48, 0x0000));
}
TEST(Bfloat16Test, Truncate7) {
TestTruncate(BinaryToFloat(0, 0x80, 0x48, 0x4000),
BinaryToFloat(0, 0x80, 0x48, 0x0000),
BinaryToFloat(0, 0x80, 0x48, 0x0000));
}
TEST(Bfloat16Test, Truncate8) {
TestTruncate(BinaryToFloat(0, 0x80, 0x48, 0x8000),
BinaryToFloat(0, 0x80, 0x48, 0x0000),
BinaryToFloat(0, 0x80, 0x48, 0x0000));
}
TEST(Bfloat16Test, Truncate9) {
TestTruncate(BinaryToFloat(0, 0x00, 0x48, 0x8000),
BinaryToFloat(0, 0x00, 0x48, 0x0000),
BinaryToFloat(0, 0x00, 0x48, 0x0000));
}
TEST(Bfloat16Test, Truncate10) {
TestTruncate(BinaryToFloat(0, 0x00, 0x7f, 0xc000),
BinaryToFloat(0, 0x00, 0x7f, 0x0000),
BinaryToFloat(0, 0x00, 0x80, 0x0000));
}
TEST(Bfloat16Test, Conversion) {
for (int i = 0; i < 100; ++i) {
float a = i + 1.25;
bfloat16_t b = static_cast<bfloat16_t>(a);
float c = static_cast<float>(b);
EXPECT_LE(std::abs(c - a), a / 128);
}
}
TEST(Bfloat16Test, Epsilon) {
EXPECT_LE(1.0f,
static_cast<float>(std::numeric_limits<bfloat16_t>::epsilon() +
bfloat16_t(1.0f)));
EXPECT_EQ(1.0f,
static_cast<float>(std::numeric_limits<bfloat16_t>::epsilon() /
bfloat16_t(2.0f) +
bfloat16_t(1.0f)));
}
TEST(Bfloat16Test, NextAfter) {
const bfloat16_t one(1), two(2), zero(0),
nan = std::numeric_limits<bfloat16_t>::quiet_NaN(),
epsilon = std::numeric_limits<bfloat16_t>::epsilon(),
denorm_min = std::numeric_limits<bfloat16_t>::denorm_min();
EXPECT_EQ(epsilon, nextafter(one, two) - one);
EXPECT_EQ(-epsilon / 2, nextafter(one, zero) - one);
EXPECT_EQ(one, nextafter(one, one));
EXPECT_EQ(denorm_min, nextafter(zero, one));
EXPECT_EQ(-denorm_min, nextafter(zero, -one));
const bfloat16_t values[] = {zero, -zero, nan};
for (int i = 0; i < 3; ++i) {
auto a = values[i];
for (int j = 0; j < 3; ++j) {
if (i == j) continue;
auto b = values[j];
auto next_float =
std::nextafter(static_cast<float>(a), static_cast<float>(b));
auto next_bfloat16 = nextafter(a, b);
EXPECT_EQ(std::isnan(next_float), isnan(next_bfloat16));
if (!std::isnan(next_float)) {
EXPECT_EQ(next_float, next_bfloat16);
}
}
}
EXPECT_EQ(std::numeric_limits<bfloat16_t>::infinity(),
nextafter(std::numeric_limits<bfloat16_t>::max(),
std::numeric_limits<bfloat16_t>::infinity()));
}
TEST(Bfloat16Test, Negate) {
EXPECT_EQ(static_cast<float>(-bfloat16_t(3.0f)), -3.0f);
EXPECT_EQ(static_cast<float>(-bfloat16_t(-4.5f)), 4.5f);
}
#ifndef _MSC_VER
TEST(Bfloat16Test, DivisionByZero) {
EXPECT_TRUE(std::isnan(static_cast<float>(bfloat16_t(0.0 / 0.0))));
EXPECT_TRUE(std::isinf(static_cast<float>(bfloat16_t(1.0 / 0.0))));
EXPECT_TRUE(std::isinf(static_cast<float>(bfloat16_t(-1.0 / 0.0))));
EXPECT_TRUE(std::isnan(bfloat16_t(0.0 / 0.0)));
EXPECT_TRUE(std::isinf(bfloat16_t(1.0 / 0.0)));
EXPECT_TRUE(std::isinf(bfloat16_t(-1.0 / 0.0)));
}
#endif
TEST(Bfloat16Test, NonFinite) {
EXPECT_FALSE(std::isinf(
static_cast<float>(bfloat16_t(3.38e38f))));
EXPECT_FALSE(std::isnan(static_cast<float>(bfloat16_t(0.0f))));
EXPECT_TRUE(std::isinf(
static_cast<float>(absl::bit_cast<bfloat16_t, uint16_t>(0xff80))));
EXPECT_TRUE(std::isnan(
static_cast<float>(absl::bit_cast<bfloat16_t, uint16_t>(0xffc0))));
EXPECT_TRUE(std::isinf(
static_cast<float>(absl::bit_cast<bfloat16_t, uint16_t>(0x7f80))));
EXPECT_TRUE(std::isnan(
static_cast<float>(absl::bit_cast<bfloat16_t, uint16_t>(0x7fc0))));
EXPECT_FALSE(isinf(absl::bit_cast<bfloat16_t, uint16_t>(0x7bff)));
EXPECT_FALSE(isnan(absl::bit_cast<bfloat16_t, uint16_t>(0x0000)));
EXPECT_TRUE(isinf(absl::bit_cast<bfloat16_t, uint16_t>(0xff80)));
EXPECT_TRUE(isnan(absl::bit_cast<bfloat16_t, uint16_t>(0xffc0)));
EXPECT_TRUE(isinf(absl::bit_cast<bfloat16_t, uint16_t>(0x7f80)));
EXPECT_TRUE(isnan(absl::bit_cast<bfloat16_t, uint16_t>(0x7fc0)));
EXPECT_THAT(bfloat16_t(BinaryToFloat(0x0, 0xff, 0x40, 0x0)),
MatchesBits(0x7fe0));
EXPECT_THAT(bfloat16_t(BinaryToFloat(0x1, 0xff, 0x40, 0x0)),
MatchesBits(0xffe0));
EXPECT_THAT(
Float32ToBfloat16Truncate(BinaryToFloat(0x0, 0xff, 0x40, 0x0)),
MatchesBits(0x7fe0));
EXPECT_THAT(
Float32ToBfloat16Truncate(BinaryToFloat(0x1, 0xff, 0x40, 0x0)),
MatchesBits(0xffe0));
}
TEST(Bfloat16Test, NumericLimits) {
static_assert(std::numeric_limits<bfloat16_t>::is_signed);
EXPECT_EQ(
absl::bit_cast<uint16_t>(std::numeric_limits<bfloat16_t>::infinity()),
absl::bit_cast<uint16_t>(
bfloat16_t(std::numeric_limits<float>::infinity())));
constexpr uint16_t BFLOAT16_QUIET_BIT = 0x0040;
EXPECT_TRUE(isnan(std::numeric_limits<bfloat16_t>::quiet_NaN()));
EXPECT_TRUE(isnan(bfloat16_t(std::numeric_limits<float>::quiet_NaN())));
EXPECT_GT(
(absl::bit_cast<uint16_t>(std::numeric_limits<bfloat16_t>::quiet_NaN()) &
BFLOAT16_QUIET_BIT),
0);
EXPECT_GT((absl::bit_cast<uint16_t>(
bfloat16_t(std::numeric_limits<float>::quiet_NaN())) &
BFLOAT16_QUIET_BIT),
0);
EXPECT_TRUE(isnan(std::numeric_limits<bfloat16_t>::signaling_NaN()));
EXPECT_TRUE(isnan(bfloat16_t(std::numeric_limits<float>::signaling_NaN())));
EXPECT_EQ(0, (absl::bit_cast<uint16_t>(
std::numeric_limits<bfloat16_t>::signaling_NaN()) &
BFLOAT16_QUIET_BIT));
#ifndef _MSC_VER
EXPECT_EQ(0, (absl::bit_cast<uint16_t>(
bfloat16_t(std::numeric_limits<float>::signaling_NaN())) &
BFLOAT16_QUIET_BIT));
#endif
EXPECT_GT(std::numeric_limits<bfloat16_t>::min(), bfloat16_t(0.f));
EXPECT_GT(std::numeric_limits<bfloat16_t>::denorm_min(), bfloat16_t(0.f));
EXPECT_EQ(std::numeric_limits<bfloat16_t>::denorm_min() / bfloat16_t(2),
bfloat16_t(0.f));
}
TEST(Bfloat16Test, Arithmetic) {
EXPECT_EQ(static_cast<float>(bfloat16_t(2) + bfloat16_t(2)), 4);
EXPECT_EQ(static_cast<float>(bfloat16_t(2) + bfloat16_t(-2)), 0);
EXPECT_THAT(static_cast<float>(bfloat16_t(0.33333f) + bfloat16_t(0.66667f)),
NearFloat(1.0f));
EXPECT_EQ(static_cast<float>(bfloat16_t(2.0f) * bfloat16_t(-5.5f)), -11.0f);
EXPECT_THAT(static_cast<float>(bfloat16_t(1.0f) / bfloat16_t(3.0f)),
NearFloat(0.3339f));
EXPECT_EQ(static_cast<float>(-bfloat16_t(4096.0f)), -4096.0f);
EXPECT_EQ(static_cast<float>(-bfloat16_t(-4096.0f)), 4096.0f);
}
TEST(Bfloat16Test, Comparison) {
EXPECT_TRUE(bfloat16_t(1.0f) > bfloat16_t(0.5f));
EXPECT_TRUE(bfloat16_t(0.5f) < bfloat16_t(1.0f));
EXPECT_FALSE((bfloat16_t(1.0f) < bfloat16_t(0.5f)));
EXPECT_FALSE((bfloat16_t(0.5f) > bfloat16_t(1.0f)));
EXPECT_FALSE((bfloat16_t(4.0f) > bfloat16_t(4.0f)));
EXPECT_FALSE((bfloat16_t(4.0f) < bfloat16_t(4.0f)));
EXPECT_FALSE((bfloat16_t(0.0f) < bfloat16_t(-0.0f)));
EXPECT_FALSE((bfloat16_t(-0.0f) < bfloat16_t(0.0f)));
EXPECT_FALSE((bfloat16_t(0.0f) > bfloat16_t(-0.0f)));
EXPECT_FALSE((bfloat16_t(-0.0f) > bfloat16_t(0.0f)));
EXPECT_TRUE(bfloat16_t(0.2f) > bfloat16_t(-1.0f));
EXPECT_TRUE(bfloat16_t(-1.0f) < bfloat16_t(0.2f));
EXPECT_TRUE(bfloat16_t(-16.0f) < bfloat16_t(-15.0f));
EXPECT_TRUE(bfloat16_t(1.0f) == bfloat16_t(1.0f));
EXPECT_TRUE(bfloat16_t(1.0f) != bfloat16_t(2.0f));
#ifndef _MSC_VER
EXPECT_FALSE((bfloat16_t(0.0 / 0.0) == bfloat16_t(0.0 / 0.0)));
EXPECT_TRUE(bfloat16_t(0.0 / 0.0) != bfloat16_t(0.0 / 0.0));
EXPECT_FALSE((bfloat16_t(1.0) == bfloat16_t(0.0 / 0.0)));
EXPECT_FALSE((bfloat16_t(1.0) < bfloat16_t(0.0 / 0.0)));
EXPECT_FALSE((bfloat16_t(1.0) > bfloat16_t(0.0 / 0.0)));
EXPECT_TRUE(bfloat16_t(1.0) != bfloat16_t(0.0 / 0.0));
EXPECT_TRUE(bfloat16_t(1.0) < bfloat16_t(1.0 / 0.0));
EXPECT_TRUE(bfloat16_t(1.0) > bfloat16_t(-1.0 / 0.0));
#endif
}
constexpr float PI = 3.14159265358979323846f;
TEST(Bfloat16Test, BasicFunctions) {
EXPECT_EQ(static_cast<float>(abs(bfloat16_t(3.5f))), 3.5f);
EXPECT_EQ(static_cast<float>(abs(bfloat16_t(3.5f))), 3.5f);
EXPECT_EQ(static_cast<float>(abs(bfloat16_t(-3.5f))), 3.5f);
EXPECT_EQ(static_cast<float>(abs(bfloat16_t(-3.5f))), 3.5f);
EXPECT_EQ(static_cast<float>(floor(bfloat16_t(3.5f))), 3.0f);
EXPECT_EQ(static_cast<float>(floor(bfloat16_t(3.5f))), 3.0f);
EXPECT_EQ(static_cast<float>(floor(bfloat16_t(-3.5f))), -4.0f);
EXPECT_EQ(static_cast<float>(floor(bfloat16_t(-3.5f))), -4.0f);
EXPECT_EQ(static_cast<float>(ceil(bfloat16_t(3.5f))), 4.0f);
EXPECT_EQ(static_cast<float>(ceil(bfloat16_t(3.5f))), 4.0f);
EXPECT_EQ(static_cast<float>(ceil(bfloat16_t(-3.5f))), -3.0f);
EXPECT_EQ(static_cast<float>(ceil(bfloat16_t(-3.5f))), -3.0f);
EXPECT_FLOAT_EQ(static_cast<float>(sqrt(bfloat16_t(0.0f))), 0.0f);
EXPECT_FLOAT_EQ(static_cast<float>(sqrt(bfloat16_t(0.0f))), 0.0f);
EXPECT_FLOAT_EQ(static_cast<float>(sqrt(bfloat16_t(4.0f))), 2.0f);
EXPECT_FLOAT_EQ(static_cast<float>(sqrt(bfloat16_t(4.0f))), 2.0f);
EXPECT_FLOAT_EQ(static_cast<float>(pow(bfloat16_t(0.0f), bfloat16_t(1.0f))),
0.0f);
EXPECT_FLOAT_EQ(static_cast<float>(pow(bfloat16_t(0.0f), bfloat16_t(1.0f))),
0.0f);
EXPECT_FLOAT_EQ(static_cast<float>(pow(bfloat16_t(2.0f), bfloat16_t(2.0f))),
4.0f);
EXPECT_FLOAT_EQ(static_cast<float>(pow(bfloat16_t(2.0f), bfloat16_t(2.0f))),
4.0f);
EXPECT_EQ(static_cast<float>(exp(bfloat16_t(0.0f))), 1.0f);
EXPECT_EQ(static_cast<float>(exp(bfloat16_t(0.0f))), 1.0f);
EXPECT_THAT(static_cast<float>(exp(bfloat16_t(PI))),
NearFloat(20.f + static_cast<float>(PI)));
EXPECT_THAT(static_cast<float>(exp(bfloat16_t(PI))),
NearFloat(20.f + static_cast<float>(PI)));
EXPECT_EQ(static_cast<float>(expm1(bfloat16_t(0.0f))), 0.0f);
EXPECT_EQ(static_cast<float>(expm1(bfloat16_t(0.0f))), 0.0f);
EXPECT_THAT(static_cast<float>(expm1(bfloat16_t(2.0f))), NearFloat(6.375f));
EXPECT_THAT(static_cast<float>(expm1(bfloat16_t(2.0f))), NearFloat(6.375f));
EXPECT_EQ(static_cast<float>(log(bfloat16_t(1.0f))), 0.0f);
EXPECT_EQ(static_cast<float>(log(bfloat16_t(1.0f))), 0.0f);
EXPECT_THAT(static_cast<float>(log(bfloat16_t(10.0f))), NearFloat(2.296875f));
EXPECT_THAT(static_cast<float>(log(bfloat16_t(10.0f))), NearFloat(2.296875f));
EXPECT_EQ(static_cast<float>(log1p(bfloat16_t(0.0f))), 0.0f);
EXPECT_EQ(static_cast<float>(log1p(bfloat16_t(0.0f))), 0.0f);
EXPECT_THAT(static_cast<float>(log1p(bfloat16_t(10.0f))),
NearFloat(2.390625f));
EXPECT_THAT(static_cast<float>(log1p(bfloat16_t(10.0f))),
NearFloat(2.390625f));
}
TEST(Bfloat16Test, TrigonometricFunctions) {
EXPECT_THAT(cos(bfloat16_t(0.0f)), NearFloat(bfloat16_t(std::cos(0.0f))));
EXPECT_THAT(cos(bfloat16_t(0.0f)), NearFloat(bfloat16_t(std::cos(0.0f))));
EXPECT_FLOAT_EQ(cos(bfloat16_t(PI)), bfloat16_t(std::cos(PI)));
EXPECT_NEAR(cos(bfloat16_t(PI / 2)), bfloat16_t(std::cos(PI / 2)), 1e-3);
EXPECT_NEAR(cos(bfloat16_t(3 * PI / 2)), bfloat16_t(std::cos(3 * PI / 2)),
1e-2);
EXPECT_THAT(cos(bfloat16_t(3.5f)), NearFloat(bfloat16_t(std::cos(3.5f))));
EXPECT_FLOAT_EQ(sin(bfloat16_t(0.0f)), bfloat16_t(std::sin(0.0f)));
EXPECT_FLOAT_EQ(sin(bfloat16_t(0.0f)), bfloat16_t(std::sin(0.0f)));
EXPECT_NEAR(sin(bfloat16_t(PI)), bfloat16_t(std::sin(PI)), 1e-3);
EXPECT_THAT(sin(bfloat16_t(PI / 2)), NearFloat(bfloat16_t(std::sin(PI / 2))));
EXPECT_THAT(sin(bfloat16_t(3 * PI / 2)),
NearFloat(bfloat16_t(std::sin(3 * PI / 2))));
EXPECT_THAT(sin(bfloat16_t(3.5f)), NearFloat(bfloat16_t(std::sin(3.5f))));
EXPECT_FLOAT_EQ(tan(bfloat16_t(0.0f)), bfloat16_t(std::tan(0.0f)));
EXPECT_FLOAT_EQ(tan(bfloat16_t(0.0f)), bfloat16_t(std::tan(0.0f)));
EXPECT_NEAR(tan(bfloat16_t(PI)), bfloat16_t(std::tan(PI)), 1e-3);
EXPECT_THAT(tan(bfloat16_t(3.5f)), NearFloat(bfloat16_t(std::tan(3.5f))));
}
TEST(Bfloat16Test, JsonConversion) {
EXPECT_THAT(::nlohmann::json(bfloat16_t(1.5)), tensorstore::MatchesJson(1.5));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/bfloat16.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/bfloat16_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
b9c06777-8747-4fb4-8b7a-eda275f5c353 | cpp | tensorflow/tensorflow | hlo_graph_dumper | third_party/xla/xla/service/hlo_graph_dumper.cc | third_party/xla/xla/service/hlo_graph_dumper_test.cc | #include "xla/service/hlo_graph_dumper.h"
#include <cstdint>
#include <unordered_map>
#include "absl/base/const_init.h"
#include "absl/base/thread_annotations.h"
#include "absl/hash/hash.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/shape.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/file_system.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/thread_annotations.h"
#ifndef _WIN32
#include <unistd.h>
#endif
#include <algorithm>
#include <atomic>
#include <deque>
#include <functional>
#include <map>
#include <memory>
#include <optional>
#include <queue>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_replace.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/primitive_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/pattern_matcher.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/dnn.h"
#include "xla/tsl/lib/gtl/map_util.h"
#include "xla/tsl/lib/io/zlib_compression_options.h"
#include "xla/tsl/lib/io/zlib_outputbuffer.h"
#include "xla/types.h"
#include "xla/util.h"
#include "xla/window_util.h"
#include "tsl/platform/base64.h"
#include "tsl/platform/env.h"
#include "tsl/platform/numbers.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/regexp.h"
#include "tsl/platform/status.h"
namespace xla {
namespace {
using absl::StrAppend;
using absl::StrCat;
using absl::StrFormat;
using absl::StrJoin;
using std::nullopt;
using std::optional;
enum NodeFilterResult {
kNormalNode,
kHideNode,
kHighlightNode,
kSomeOperandsOmitted,
kOmitNodeOperands,
kSomeUsersOmitted,
};
class NodeFilter {
public:
NodeFilter() : filter_([](const HloInstruction*) { return kNormalNode; }) {}
explicit NodeFilter(
std::function<NodeFilterResult(const HloInstruction* instr)> filter,
std::optional<int> num_rendered = std::nullopt)
: filter_(std::move(filter)), num_rendered_(num_rendered) {}
bool Show(const HloInstruction* instr) const {
return filter_(instr) != kHideNode;
}
bool Highlight(const HloInstruction* instr) const {
return filter_(instr) == kHighlightNode;
}
bool OmitOperands(const HloInstruction* instr) const {
return filter_(instr) == kOmitNodeOperands;
}
bool SomeOrAllOperandsOmitted(const HloInstruction* instr) const {
auto result = filter_(instr);
return result == kOmitNodeOperands || result == kSomeOperandsOmitted;
}
bool Deemphasized(const HloInstruction* instr) const {
auto result = filter_(instr);
return result == kOmitNodeOperands || result == kSomeOperandsOmitted ||
result == kSomeUsersOmitted;
}
std::optional<int> GetNumRendered() const { return num_rendered_; }
private:
std::function<NodeFilterResult(const HloInstruction* instr)> filter_;
std::optional<int> num_rendered_;
};
bool IsSmall(const HloInstruction* instr) {
if (ShapeUtil::HasPrimitiveType(instr->shape(), OPAQUE_TYPE) ||
ShapeUtil::HasPrimitiveType(instr->shape(), TOKEN)) {
return true;
}
return ShapeUtil::ElementsInRecursive(instr->shape()) < 4096;
}
enum ColorScheme {
kBlue,
kBrown,
kDarkBlue,
kDarkGreen,
kDarkOrange,
kDarkRed,
kGray,
kGreen,
kOrange,
kPurple,
kRed,
kWhite,
kYellow,
kDashedBorder,
};
struct NodeColors {
std::string style;
std::string fill_color;
std::string stroke_color;
std::string font_color;
};
NodeColors NodeColorsForScheme(ColorScheme color) {
switch (color) {
case kBlue:
return NodeColors{"filled", "#bbdefb", "#8aacc8", "black"};
case kBrown:
return NodeColors{"filled", "#bcaaa4", "#8c7b75", "black"};
case kDarkBlue:
return NodeColors{"filled", "#1565c0", "#003c8f", "white"};
case kDarkGreen:
return NodeColors{"filled", "#2e7d32", "#005005", "white"};
case kDarkOrange:
return NodeColors{"filled", "#ffb74d", "#c88719", "black"};
case kDarkRed:
return NodeColors{"filled", "#b71c1c", "#7f0000", "white"};
case kGray:
return NodeColors{"filled", "#cfd8dc", "#9ea7aa", "black"};
case kGreen:
return NodeColors{"filled", "#c8e6c9", "#97b498", "black"};
case kOrange:
return NodeColors{"filled", "#ffe0b2", "#cbae82", "black"};
case kPurple:
return NodeColors{"filled", "#e1bee7", "#af8eb5", "black"};
case kRed:
return NodeColors{"filled", "#ffcdd2", "#cb9ca1", "black"};
case kWhite:
return NodeColors{"filled", "white", "#9e9e9e", "black"};
case kYellow:
return NodeColors{"filled", "#fff9c4", "#cbc693", "black"};
case kDashedBorder:
return NodeColors{"filled,dashed", "white", "#757575", "#757575"};
}
}
std::string NodeFillColorForStatistic(const Statistic& statistic) {
auto stat_val = statistic.stat_val();
if (stat_val == 0) {
return "#f5f5f5";
} else if (stat_val < 10) {
return "#f7d4cc";
} else if (stat_val < 20) {
return "#f8b2a3";
} else if (stat_val < 30) {
return "#f9a28f";
} else if (stat_val < 40) {
return "#fa917b";
} else if (stat_val < 50) {
return "#fb8066";
} else if (stat_val < 60) {
return "#fc7052";
} else if (stat_val < 70) {
return "#fd5f3d";
} else if (stat_val < 80) {
return "#fd4e29";
} else if (stat_val < 90) {
return "#fe3e14";
} else {
return "#ff2d00";
}
}
std::string NodeFontColorForStatistic(const Statistic& statistic) {
if (statistic.stat_val() < 60) {
return "black";
} else {
return "white";
}
}
std::string NodeColorAttributes(ColorScheme color) {
NodeColors node_colors = NodeColorsForScheme(color);
return StrFormat(R"(style="%s", fontcolor="%s", color="%s", fillcolor="%s")",
node_colors.style, node_colors.font_color,
node_colors.stroke_color, node_colors.fill_color);
}
std::string HtmlLikeStringSanitize(absl::string_view s) {
return absl::StrReplaceAll(s,
{{"<", "<"}, {">", ">"}, {"\"", """}});
}
bool IsFusedBroadcastOfConstantEffectiveScalar(const HloInstruction* instr) {
namespace m = match;
return instr->parent()->IsFusionComputation() &&
Match(instr, m::Broadcast(m::ConstantEffectiveScalar()));
}
optional<std::string> MatchTrivialComputation(
const HloComputation* computation) {
namespace m = match;
if (computation->instruction_count() != 3) {
return nullopt;
}
HloInstruction* root = computation->root_instruction();
const HloInstruction *param0, *param1;
if (!Match(root, m::Op()
.WithNumOperands(2)
.WithShape(m::Shape().IsEffectiveScalar())
.WithBinaryOperandsAnyOrder(
m::Parameter(¶m0, 0)
.WithShape(m::Shape().IsEffectiveScalar()),
m::Parameter(¶m1, 1)
.WithShape(m::Shape().IsEffectiveScalar())))) {
return nullopt;
}
if (root->operand(0) == param1) {
CHECK_EQ(root->operand(1), param0);
if (root->opcode() == HloOpcode()) {
switch (root->comparison_direction()) {
case ComparisonDirection::kLe:
case ComparisonDirection::kGe:
case ComparisonDirection::kGt:
case ComparisonDirection::kLt:
return nullopt;
default:
break;
}
}
}
switch (root->opcode()) {
case HloOpcode::kAdd:
return "add";
case HloOpcode::kMultiply:
return "multiply";
case HloOpcode::kMinimum:
return "min";
case HloOpcode::kMaximum:
return "max";
case HloOpcode::kXor:
return "xor";
case HloOpcode::kAnd:
return "and";
case HloOpcode::kOr:
return "or";
case HloOpcode::kCompare: {
switch (root->comparison_direction()) {
case ComparisonDirection::kLe:
return "less-or-equal";
case ComparisonDirection::kGe:
return "greater-or-equal";
case ComparisonDirection::kGt:
return "greater-than";
case ComparisonDirection::kLt:
return "less-than";
case ComparisonDirection::kEq:
return "equal-to";
case ComparisonDirection::kNe:
return "not-equal-to";
}
}
default:
return nullopt;
}
}
class HloDotDumper {
public:
HloDotDumper(
const HloComputation* computation, absl::string_view label,
const DebugOptions& debug_options, HloRenderOptions hlo_render_options,
NodeFilter filter,
std::optional<absl::flat_hash_map<const HloInstruction*, ColorStats>>
color_map = std::nullopt)
: computation_(computation),
label_(label),
debug_options_(debug_options),
hlo_render_options_(hlo_render_options),
filter_(std::move(filter)),
color_map_(color_map) {}
std::string Dump();
std::optional<std::string> CssIdForInstruction(const HloInstruction& instr) {
if (instr.opcode() == HloOpcode::kFusion) {
auto it = cluster_ids_.find(instr.called_computations()[0]);
if (it == cluster_ids_.end()) {
return std::nullopt;
}
return StrCat("#a_clust", it->second, " path");
}
auto it = node_ids_.find(&instr);
if (it == node_ids_.end()) {
return std::nullopt;
}
return StrCat("#node", it->second, " polygon");
}
private:
std::string InstructionId(const HloInstruction* instruction) {
return StrCat(reinterpret_cast<uint64_t>(instruction));
}
std::string SubcomputationId(const HloComputation* computation) {
return StrCat("cluster_", reinterpret_cast<uint64_t>(computation));
}
std::string Header();
std::string Footer();
bool ShouldShowSubcomputation(const HloComputation* subcomp);
bool ShouldShowFusionSubcomputation(const HloInstruction* instr);
bool ShouldMergeIntoUsers(const HloInstruction* instr) const;
std::string DumpSubcomputation(const HloComputation* subcomp,
const HloInstruction* parent_instr);
std::string DumpComputation(const HloComputation* comp);
std::string DumpRootTag();
std::string DumpInstruction(const HloInstruction* instr);
ColorScheme GetInstructionColor(const HloInstruction* instr);
std::string GetInstructionNodeShape(const HloInstruction* instr);
std::string GetInstructionNodeLabel(const HloInstruction* instr);
std::string GetInstructionNodeMetadata(const HloInstruction* instr);
std::string GetInstructionNodeBackendConfig(const HloInstruction* instr);
std::string GetInstructionNodeExtraInfo(const HloInstruction* instr);
std::string GetInstructionNodeInlinedOperands(const HloInstruction* instr);
void AddInstructionIncomingEdges(const HloInstruction* instr);
const HloInstruction* GetNodeForEdge(const HloInstruction* instr);
std::string GetInstructionTrivialComputationStr(const HloInstruction* instr);
const HloComputation* computation_;
const std::string label_;
const DebugOptions& debug_options_;
const HloRenderOptions hlo_render_options_;
const NodeFilter filter_;
const std::optional<absl::flat_hash_map<const HloInstruction*, ColorStats>>
color_map_;
int64_t next_node_id_ = 1;
absl::flat_hash_map<const HloInstruction*, int64_t> node_ids_;
int64_t root_node_id_;
int64_t next_edge_id_ = 1;
std::unordered_multimap<
std::pair<const HloInstruction*, const HloInstruction*>, int64_t,
absl::Hash<std::pair<const HloInstruction*, const HloInstruction*>>>
edge_ids_;
int64_t next_cluster_id_ = 1;
absl::flat_hash_map<const HloComputation*, int64_t> cluster_ids_;
std::vector<std::string> edges_;
absl::flat_hash_map<HloSharding, ColorScheme> sharding_colors_;
int64_t next_shard_color_ = 0;
};
std::string HloDotDumper::Dump() {
std::string body;
StrAppend(&body, DumpComputation(computation_));
StrAppend(&body, DumpRootTag());
std::string g = Header();
StrAppend(&g, body);
StrAppend(&g, Footer());
return g;
}
std::string HloDotDumper::Header() {
constexpr char fmt[] = R"(digraph G {
rankdir = TB;
compound = true;
label = <<b>%s</b>>;
labelloc = t;
tooltip = " ";
stylesheet=<
data:text/css,
@import url(https:
svg text {
font-family: 'Roboto';
font-size: 12px;
}
%s
>
)";
VLOG(3) << "Generating Header";
std::string graph_label =
StrCat(label_, "<br/>Computation ", computation_->name());
if (computation_->IsFusionComputation()) {
StrAppend(&graph_label, " (in fusion instruction ",
computation_->FusionInstruction()->name(), ")");
}
std::vector<std::string> edge_css_rules;
std::string kBlue = "#1976d2";
std::string kRed = "#d32f2f";
for (const auto& kv : edge_ids_) {
const HloInstruction* from_node = kv.first.first;
const HloInstruction* to_node = kv.first.second;
int64_t edge_id = kv.second;
auto add_hover_css_rule = [&](std::string elem_type, int64_t elem_id,
std::string color) {
edge_css_rules.push_back(
StrFormat(" #%s%d:hover ~ #edge%d text { fill: %s; }\n"
" #%s%d:hover ~ #edge%d path { "
"stroke: %s; stroke-width: .2em; }\n"
" #%s%d:hover ~ #edge%d polygon { "
"fill: %s; stroke: %s; stroke-width: .2em; }\n",
elem_type, elem_id, edge_id, color,
elem_type, elem_id, edge_id, color,
elem_type, elem_id, edge_id, color, color));
};
int64_t from_node_id = tsl::gtl::FindWithDefault(node_ids_, from_node, -1);
if (from_node_id == -1) {
LOG(FATAL) << from_node->name() << " was added to edges but not to nodes";
}
int64_t to_node_id = to_node
? tsl::gtl::FindWithDefault(node_ids_, to_node, -1)
: root_node_id_;
if (to_node != nullptr && to_node_id == -1) {
LOG(FATAL) << to_node->name() << " was added to edges but not to nodes";
}
add_hover_css_rule("node", from_node_id, kBlue);
add_hover_css_rule("node", to_node_id, kRed);
if (to_node) {
VLOG(3) << "Adding css for edge " << edge_id << " from node "
<< from_node->name() << " to node " << to_node->name();
} else {
VLOG(3) << "Adding css for edge " << edge_id << " from node "
<< from_node->name() << " to root tag";
}
if (to_node) {
if (from_node->IsFused() &&
from_node->parent()->root_instruction() == from_node) {
int64_t cluster_id = cluster_ids_.at(from_node->parent());
add_hover_css_rule("clust", cluster_id, kBlue);
}
if (to_node->IsFused() && to_node->opcode() == HloOpcode::kParameter) {
int64_t cluster_id = cluster_ids_.at(to_node->parent());
add_hover_css_rule("clust", cluster_id, kRed);
}
}
}
return StrFormat(
fmt, graph_label,
absl::StrReplaceAll(StrJoin(edge_css_rules, "\n"), {{"#", "%23"}}));
}
std::string HloDotDumper::Footer() {
return StrCat(StrJoin(edges_, "\n"), "\n}");
}
bool HloDotDumper::ShouldShowFusionSubcomputation(const HloInstruction* instr) {
CHECK_EQ(instr->opcode(), HloOpcode::kFusion);
return ShouldShowSubcomputation(instr->fused_instructions_computation());
}
bool HloDotDumper::ShouldShowSubcomputation(const HloComputation* subcomp) {
if (subcomp->IsFusionComputation()) {
const HloInstruction* fusion = subcomp->FusionInstruction();
if (!filter_.Show(fusion) || filter_.SomeOrAllOperandsOmitted(fusion) ||
!hlo_render_options_.show_fusion_subcomputations) {
return false;
}
}
if (!subcomp->IsFusionComputation() && MatchTrivialComputation(subcomp)) {
return false;
}
if (subcomp->WhileCallInstruction() != nullptr &&
!hlo_render_options_.show_while_subcomputations) {
return false;
}
return absl::c_any_of(
subcomp->instructions(),
[&](const HloInstruction* instr) { return filter_.Show(instr); });
}
std::string HloDotDumper::DumpSubcomputation(
const HloComputation* subcomp, const HloInstruction* parent_instr) {
VLOG(2) << "Dumping subcomputation " << subcomp->name();
if (parent_instr->opcode() != HloOpcode::kFusion) {
const HloInstruction* from = GetNodeForEdge(subcomp->root_instruction());
VLOG(2) << "Edge: from " << from->name() << " to " << parent_instr->name()
<< " as " << next_edge_id_;
edge_ids_.insert({{from, parent_instr}, next_edge_id_++});
constexpr char edge_fmt[] =
R"(%s -> %s [ltail="%s", style="dashed" tooltip="%s -> %s"];)";
edges_.push_back(StrFormat(
edge_fmt, InstructionId(from), InstructionId(parent_instr),
SubcomputationId(subcomp), subcomp->name(), parent_instr->name()));
}
if (cluster_ids_.find(subcomp) != cluster_ids_.end()) {
return "";
}
cluster_ids_[subcomp] = next_cluster_id_++;
std::string id = SubcomputationId(subcomp);
std::string subcomp_label, style;
if (parent_instr->opcode() == HloOpcode::kFusion) {
subcomp_label =
StrFormat("Fused expression for <b>%s</b><br/>%s",
HtmlLikeStringSanitize(parent_instr->name()),
HtmlLikeStringSanitize(parent_instr->ToCategory()));
std::string extra_info = GetInstructionNodeExtraInfo(parent_instr);
if (!extra_info.empty()) {
StrAppend(&subcomp_label, "<br/>", extra_info);
}
std::string node_backend_config =
GetInstructionNodeBackendConfig(parent_instr);
if (!node_backend_config.empty()) {
StrAppend(&subcomp_label, "<br/>", node_backend_config);
}
bool highlight = filter_.Highlight(parent_instr);
std::string fillcolor;
std::string strokecolor;
if (!highlight && (parent_instr->module_has_statistics() ||
parent_instr->has_statistics())) {
fillcolor = parent_instr->has_statistics()
? NodeFillColorForStatistic(
parent_instr->statistic_to_visualize())
: "#f5f5f5";
strokecolor = "#c2c2c2";
} else if (debug_options_.xla_hlo_graph_sharding_color() && !highlight) {
NodeColors node_colors =
NodeColorsForScheme(GetInstructionColor(parent_instr));
fillcolor = node_colors.fill_color;
strokecolor = node_colors.stroke_color;
} else {
fillcolor = highlight ? "#ffcdd2" : "#f5f5f5";
strokecolor = highlight ? "#b71c1c" : "#c2c2c2";
}
style =
StrFormat(R"(style="rounded,filled,bold"; fillcolor="%s"; color="%s;")",
fillcolor, strokecolor);
} else {
subcomp_label = StrFormat("Subcomputation for <b>%s</b><br/>%s",
HtmlLikeStringSanitize(parent_instr->name()),
HtmlLikeStringSanitize(subcomp->name()));
style = "style=rounded; color=black;";
}
std::string comp_body = DumpComputation(subcomp);
constexpr char computation_fmt[] = R"(subgraph %s {
%s
label = <%s>;
labelloc = t;
tooltip = " ";
%s
}
)";
return StrFormat(computation_fmt, id, style, subcomp_label, comp_body, id);
}
std::string HloDotDumper::DumpComputation(const HloComputation* comp) {
std::string g;
for (const auto* instr : comp->instructions()) {
if (!filter_.Show(instr)) {
continue;
}
for (const HloComputation* subcomp : instr->called_computations()) {
if (ShouldShowSubcomputation(subcomp)) {
StrAppend(&g, DumpSubcomputation(subcomp, instr));
}
}
StrAppend(&g, DumpInstruction(instr));
}
return g;
}
std::string HloDotDumper::DumpRootTag() {
const HloInstruction* from = GetNodeForEdge(computation_->root_instruction());
if (!filter_.Show(from) || from->opcode() == HloOpcode::kConstant ||
IsFusedBroadcastOfConstantEffectiveScalar(from)) {
return "";
}
auto from_id = InstructionId(from);
HloInstruction* to = nullptr;
auto to_id = SubcomputationId(computation_);
std::string node_body = "ROOT";
std::string node_shape = "circle";
ColorScheme color = kBrown;
VLOG(2) << "Adding root tag as node " << next_node_id_;
root_node_id_ = next_node_id_++;
VLOG(2) << "Adding edge from " << from->name() << " to root tag as "
<< next_edge_id_;
edge_ids_.insert({{from, to}, next_edge_id_++});
edges_.push_back(StrFormat(R"(%s -> %s [tooltip=" "];)", from_id, to_id));
return StrFormat(R"(%s [label=<%s>, shape=%s, tooltip=" ", %s];)"
"\n",
to_id, node_body, node_shape, NodeColorAttributes(color));
}
static const HloConstantInstruction* TryGetFusionParameterConstant(
const HloInstruction* instr) {
if (instr->opcode() != HloOpcode::kParameter || !instr->IsFused()) {
return nullptr;
}
const HloInstruction* fusion = instr->parent()->FusionInstruction();
const HloInstruction* operand = fusion->operand(instr->parameter_number());
return DynCast<HloConstantInstruction>(operand);
}
bool HloDotDumper::ShouldMergeIntoUsers(const HloInstruction* instr) const {
if ((instr->opcode() == HloOpcode::kGetTupleElement &&
instr != instr->parent()->root_instruction()) ||
TryGetFusionParameterConstant(instr) != nullptr) {
return true;
}
const int kMinUsersToOmit = 3;
return instr->opcode() == HloOpcode::kParameter && instr->shape().IsTuple() &&
!instr->IsFused() &&
absl::c_count_if(instr->users(),
[&](const HloInstruction* user) {
return filter_.Show(user);
}) > kMinUsersToOmit &&
absl::c_all_of(instr->users(), [&](const HloInstruction* user) {
return !filter_.Show(user) ||
user->opcode() == HloOpcode::kGetTupleElement;
});
}
std::string HloDotDumper::DumpInstruction(const HloInstruction* instr) {
if ((instr->opcode() == HloOpcode::kConstant ||
IsFusedBroadcastOfConstantEffectiveScalar(instr)) &&
instr != instr->parent()->root_instruction()) {
return "";
}
if (ShouldMergeIntoUsers(instr)) {
return "";
}
if (instr->opcode() == HloOpcode::kFusion &&
ShouldShowFusionSubcomputation(instr)) {
return "";
}
VLOG(2) << "Adding node " << instr->name() << " as " << next_node_id_;
node_ids_[instr] = next_node_id_++;
std::string node_shape = GetInstructionNodeShape(instr);
std::string node_label = GetInstructionNodeLabel(instr);
std::string node_metadata = GetInstructionNodeMetadata(instr);
std::string node_backend_config = GetInstructionNodeBackendConfig(instr);
std::string extra_info = GetInstructionNodeExtraInfo(instr);
std::string inlined_constants = GetInstructionNodeInlinedOperands(instr);
std::string trivial_subcomputation =
GetInstructionTrivialComputationStr(instr);
AddInstructionIncomingEdges(instr);
NodeColors node_colors;
std::string node_style;
std::string node_attributes;
if (hlo_render_options_.override_node_colors && color_map_.has_value()) {
if (color_map_->contains(instr)) {
node_colors.fill_color = color_map_->at(instr).color;
node_attributes = color_map_->at(instr).stats;
} else {
VLOG(2) << "color_map_ for instruction:" << instr->name() << "is empty"
<< "\n";
node_colors.fill_color = "#808080";
}
node_colors.style = "filled";
node_colors.font_color = "black";
node_colors.stroke_color = "#c2c2c2";
node_style =
StrFormat(R"(style="%s", fontcolor="%s", color="%s", fillcolor="%s")",
node_colors.style, node_colors.font_color,
node_colors.stroke_color, node_colors.fill_color);
} else {
ColorScheme color = GetInstructionColor(instr);
if (!debug_options_.xla_hlo_graph_sharding_color()) {
if (filter_.Deemphasized(instr)) {
color = kDashedBorder;
}
if (filter_.Highlight(instr)) {
node_shape = "diamond";
color = kDarkRed;
}
}
node_colors = NodeColorsForScheme(color);
if (instr->has_statistics()) {
const auto& statistic_to_visualize = instr->statistic_to_visualize();
node_colors.fill_color =
NodeFillColorForStatistic(statistic_to_visualize);
node_colors.stroke_color = "#c2c2c2";
node_colors.font_color =
NodeFontColorForStatistic(statistic_to_visualize);
} else if (instr->module_has_statistics()) {
node_colors.fill_color = "#f5f5f5";
node_colors.stroke_color = "#c2c2c2";
node_colors.font_color = "black";
}
node_style =
StrFormat(R"(style="%s", fontcolor="%s", color="%s", fillcolor="%s")",
node_colors.style, node_colors.font_color,
node_colors.stroke_color, node_colors.fill_color);
}
std::string node_body = node_label;
for (const std::string& s :
{trivial_subcomputation, extra_info, inlined_constants,
node_backend_config, node_attributes}) {
if (!s.empty()) {
StrAppend(&node_body, "<br/>", s);
}
}
return StrFormat(R"(%s [label=<%s>, shape=%s, tooltip="%s", %s];)"
"\n",
InstructionId(instr), node_body, node_shape, node_metadata,
node_style);
}
std::string HloDotDumper::GetInstructionNodeInlinedOperands(
const HloInstruction* instr) {
auto stringify_constant = [](const HloConstantInstruction* constant,
const Shape& shape) {
if (ShapeUtil::IsZeroElementArray(shape)) {
return StrFormat("{} (%s)", ShapeUtil::HumanString(constant->shape()));
}
optional<int64_t> elem_count;
if (shape.IsArray()) {
elem_count = ShapeUtil::ElementsIn(constant->shape());
}
if (elem_count.has_value() && *elem_count <= 8 && constant->HasLiteral()) {
std::string literal_str = constant->literal().ToStringWithoutShape();
if (literal_str.size() <= 64) {
return StrFormat("%s %s", shape.ToString(), literal_str);
}
}
std::string constant_name;
if (absl::StartsWith(constant->name(), "constant")) {
constant_name = std::string(constant->name());
} else {
constant_name = StrCat("constant ", constant->name());
}
return StrFormat("%s %s", constant_name, ShapeUtil::HumanString(shape));
};
std::vector<std::string> lines;
constexpr int64_t kMaxOperandsShown = 32;
for (int64_t i = 0; i < instr->operand_count(); ++i) {
const HloInstruction* operand = instr->operand(i);
optional<std::string> operand_str;
if (const auto* constant_operand =
DynCast<HloConstantInstruction>(operand)) {
operand_str =
stringify_constant(constant_operand, constant_operand->shape());
} else if (IsFusedBroadcastOfConstantEffectiveScalar(operand)) {
operand_str = stringify_constant(
Cast<HloConstantInstruction>(operand->operand(0)), operand->shape());
} else if (ShouldMergeIntoUsers(operand)) {
if (operand->opcode() == HloOpcode::kParameter) {
if (const HloConstantInstruction* constant =
TryGetFusionParameterConstant(operand)) {
operand_str = stringify_constant(constant, constant->shape());
} else {
operand_str = StrFormat("Parameter %d", operand->parameter_number());
}
} else if (operand->opcode() == HloOpcode::kGetTupleElement) {
operand_str =
StrFormat("tuple-element %d of %s %s", operand->tuple_index(),
operand->operand(0)->name(),
ShapeUtil::HumanStringWithLayout(operand->shape()));
} else {
operand_str = std::string(operand->name());
}
}
if (operand_str) {
if (instr->operand_count() > 1) {
lines.push_back(StrFormat("<b>operand %d</b> = %s", i, *operand_str));
} else {
lines.push_back(StrFormat("<b>operand</b> = %s", *operand_str));
}
}
if (lines.size() == kMaxOperandsShown && i < instr->operand_count() - 1) {
lines.push_back("...");
break;
}
}
if (instr->opcode() == HloOpcode::kParameter && instr->IsFused()) {
const HloInstruction* param_input =
instr->parent()->FusionInstruction()->operand(
instr->parameter_number());
if (param_input->opcode() == HloOpcode::kGetTupleElement) {
lines.push_back(
StrFormat("tuple-element %d of %s %s", param_input->tuple_index(),
param_input->operand(0)->name(),
ShapeUtil::HumanStringWithLayout(param_input->shape())));
}
}
return StrJoin(lines, "<br/>");
}
ColorScheme HloDotDumper::GetInstructionColor(const HloInstruction* instr) {
if (debug_options_.xla_hlo_graph_sharding_color()) {
if (!instr->has_sharding()) {
return kDashedBorder;
}
auto it = sharding_colors_.find(instr->sharding());
if (it != sharding_colors_.end()) {
return it->second;
}
ColorScheme color = static_cast<ColorScheme>(
kBlue + (next_shard_color_++ % (kDashedBorder - kBlue)));
sharding_colors_.emplace(instr->sharding(), color);
return color;
}
auto parameter_color = IsSmall(instr) ? kOrange : kDarkOrange;
if (absl::c_any_of(instr->operands(), [&](const HloInstruction* operand) {
return operand->opcode() == HloOpcode::kParameter &&
ShouldMergeIntoUsers(operand) &&
TryGetFusionParameterConstant(operand) == nullptr;
})) {
return parameter_color;
}
switch (instr->opcode()) {
case HloOpcode::kAbs:
case HloOpcode::kAdd:
case HloOpcode::kAnd:
case HloOpcode::kAtan2:
case HloOpcode::kBitcastConvert:
case HloOpcode::kCeil:
case HloOpcode::kClamp:
case HloOpcode::kClz:
case HloOpcode::kCompare:
case HloOpcode::kComplex:
case HloOpcode::kConvert:
case HloOpcode::kCos:
case HloOpcode::kDivide:
case HloOpcode::kErf:
case HloOpcode::kExp:
case HloOpcode::kExpm1:
case HloOpcode::kFloor:
case HloOpcode::kImag:
case HloOpcode::kIota:
case HloOpcode::kIsFinite:
case HloOpcode::kLog:
case HloOpcode::kLog1p:
case HloOpcode::kMaximum:
case HloOpcode::kMinimum:
case HloOpcode::kMultiply:
case HloOpcode::kNegate:
case HloOpcode::kNot:
case HloOpcode::kPopulationCount:
case HloOpcode::kOr:
case HloOpcode::kXor:
case HloOpcode::kPower:
case HloOpcode::kReal:
case HloOpcode::kReducePrecision:
case HloOpcode::kRemainder:
case HloOpcode::kRng:
case HloOpcode::kRngGetAndUpdateState:
case HloOpcode::kRngBitGenerator:
case HloOpcode::kRoundNearestAfz:
case HloOpcode::kRoundNearestEven:
case HloOpcode::kRsqrt:
case HloOpcode::kSelect:
case HloOpcode::kShiftLeft:
case HloOpcode::kShiftRightArithmetic:
case HloOpcode::kShiftRightLogical:
case HloOpcode::kStochasticConvert:
case HloOpcode::kLogistic:
case HloOpcode::kSign:
case HloOpcode::kSin:
case HloOpcode::kSlice:
case HloOpcode::kSort:
case HloOpcode::kTopK:
case HloOpcode::kSqrt:
case HloOpcode::kCbrt:
case HloOpcode::kSubtract:
case HloOpcode::kTan:
case HloOpcode::kTanh:
return kWhite;
case HloOpcode::kAddDependency:
case HloOpcode::kAfterAll:
case HloOpcode::kGetTupleElement:
case HloOpcode::kOptimizationBarrier:
case HloOpcode::kPad:
case HloOpcode::kTuple:
return kWhite;
case HloOpcode::kConstant:
return kWhite;
case HloOpcode::kBroadcast:
case HloOpcode::kDynamicUpdateSlice:
return kYellow;
case HloOpcode::kConcatenate:
case HloOpcode::kDynamicSlice:
case HloOpcode::kReshape:
case HloOpcode::kDynamicReshape:
case HloOpcode::kReverse:
case HloOpcode::kTranspose:
return kGreen;
case HloOpcode::kCopy:
case HloOpcode::kCopyStart:
case HloOpcode::kCopyDone:
return kGreen;
case HloOpcode::kBitcast:
if (!instr->IsFused()) {
return kWhite;
}
return kGreen;
case HloOpcode::kAsyncStart:
case HloOpcode::kAsyncUpdate:
case HloOpcode::kAsyncDone:
return GetInstructionColor(instr->async_wrapped_instruction());
case HloOpcode::kConvolution:
case HloOpcode::kDot:
case HloOpcode::kFft:
case HloOpcode::kTriangularSolve:
case HloOpcode::kCholesky:
return kDarkBlue;
case HloOpcode::kParameter:
return parameter_color;
case HloOpcode::kBatchNormGrad:
case HloOpcode::kBatchNormInference:
case HloOpcode::kBatchNormTraining:
case HloOpcode::kReduce:
case HloOpcode::kReduceWindow:
case HloOpcode::kScatter:
case HloOpcode::kSelectAndScatter:
case HloOpcode::kGather:
return kPurple;
case HloOpcode::kDomain:
case HloOpcode::kFusion:
case HloOpcode::kMap:
case HloOpcode::kGetDimensionSize:
case HloOpcode::kSetDimensionSize:
return kGray;
case HloOpcode::kAllGather:
case HloOpcode::kAllGatherStart:
case HloOpcode::kAllGatherDone:
case HloOpcode::kAllReduce:
case HloOpcode::kReduceScatter:
case HloOpcode::kAllReduceStart:
case HloOpcode::kAllReduceDone:
case HloOpcode::kAllToAll:
case HloOpcode::kCollectiveBroadcast:
case HloOpcode::kCollectivePermute:
case HloOpcode::kCollectivePermuteStart:
case HloOpcode::kCollectivePermuteDone:
case HloOpcode::kInfeed:
case HloOpcode::kOutfeed:
case HloOpcode::kPartitionId:
case HloOpcode::kRecv:
case HloOpcode::kRecvDone:
case HloOpcode::kSend:
case HloOpcode::kSendDone:
case HloOpcode::kReplicaId:
return kBrown;
case HloOpcode::kCall:
case HloOpcode::kConditional:
case HloOpcode::kCustomCall:
case HloOpcode::kWhile:
return kDarkGreen;
}
}
std::string HloDotDumper::GetInstructionNodeShape(const HloInstruction* instr) {
switch (instr->opcode()) {
case HloOpcode::kWhile:
return "ellipse";
default:
return "rect";
}
}
std::string HloDotDumper::GetInstructionNodeLabel(const HloInstruction* instr) {
if (instr->opcode() == HloOpcode::kParameter) {
return StrFormat("<b>Parameter %d</b>", instr->parameter_number());
}
if (absl::StartsWith(instr->name(), HloOpcodeString(instr->opcode()))) {
return StrFormat("<b>%s</b>", HtmlLikeStringSanitize(instr->name()));
}
std::string extended_opcode =
StrCat(HloOpcodeString(instr->opcode()),
instr->opcode() != HloOpcode::kFusion
? ""
: StrCat(":", xla::ToString(instr->fusion_kind())));
return StrFormat("<b>%s</b><br/>%s", HtmlLikeStringSanitize(instr->name()),
HtmlLikeStringSanitize(extended_opcode));
}
std::string HloDotDumper::GetInstructionNodeMetadata(
const HloInstruction* instr) {
std::vector<std::string> lines;
if (!instr->metadata().op_name().empty()) {
lines.push_back(HtmlLikeStringSanitize(instr->metadata().op_name()));
}
if (!instr->metadata().op_type().empty()) {
lines.push_back(StrFormat(
"op_type: %s", HtmlLikeStringSanitize(instr->metadata().op_type())));
}
if (!instr->metadata().source_file().empty() &&
instr->metadata().source_line() != 0) {
lines.push_back(StrFormat("source: %s:%d", instr->metadata().source_file(),
instr->metadata().source_line()));
}
if (instr->metadata().stack_frame_id() != 0) {
auto hlo_module = instr->parent()->parent();
int frame_id = instr->metadata().stack_frame_id();
while (frame_id != 0) {
HloModule::StackFrame frame = hlo_module->get_stack_frame(frame_id);
if (frame.empty()) {
break;
}
frame_id = frame.parent_frame_id;
lines.push_back(StrFormat(
"%s:%s:%d%s", frame.file_name, frame.function_name, frame.line,
frame.column == 0 ? "" : StrFormat(":%d", frame.column)));
}
}
return StrJoin(lines, "\n");
}
static std::vector<std::pair<std::string, std::string>>
ExtractCudnnConvBackendConfigProps(const gpu::CudnnConvBackendConfig& config) {
std::vector<std::pair<std::string, std::string>> props;
if (config.conv_result_scale() != 1) {
props.emplace_back("conv_result_scale", StrCat(config.conv_result_scale()));
}
if (config.side_input_scale() != 0 && config.side_input_scale() != 1) {
props.emplace_back("side_input_scale", StrCat(config.side_input_scale()));
}
if (config.activation_mode() == se::dnn::ActivationMode::kLeakyRelu) {
props.emplace_back("leakyrelu_alpha", StrCat(config.leakyrelu_alpha()));
}
props.emplace_back(
"activation_mode",
se::dnn::ActivationModeString(
static_cast<se::dnn::ActivationMode>(config.activation_mode())));
props.emplace_back("algo",
se::dnn::AlgorithmDesc(config.algorithm()).ToString());
return props;
}
static std::vector<std::pair<std::string, std::string>>
ExtractGemmBackendConfigProps(const gpu::GemmBackendConfig& config,
const HloInstruction* instr) {
std::vector<std::pair<std::string, std::string>> props;
if (primitive_util::IsComplexType(instr->shape().element_type())) {
if (config.alpha_real() != 1 || config.alpha_imag() != 1) {
props.emplace_back("alpha_real", StrCat(config.alpha_real()));
props.emplace_back("alpha_imag", StrCat(config.alpha_real()));
}
} else {
if (config.alpha_real() != 1) {
props.emplace_back("alpha", StrCat(config.alpha_real()));
}
}
if (config.beta() != 0 && config.beta() != 1) {
props.emplace_back("beta", StrCat(config.beta()));
}
props.emplace_back(
"", absl::StrReplaceAll(
DotDimensionNumbersToString(config.dot_dimension_numbers()),
{{", ", "<br/>"}}));
if (config.algorithm_case() == gpu::GemmBackendConfig::kSelectedAlgorithm) {
props.emplace_back("algorithm", StrCat(config.selected_algorithm()));
}
if (config.epilogue() != gpu::GemmBackendConfig::DEFAULT) {
props.emplace_back(
"epilogue", gpu::GemmBackendConfig::Epilogue_Name(config.epilogue()));
}
return props;
}
std::string HloDotDumper::GetInstructionNodeBackendConfig(
const HloInstruction* instr) {
std::vector<std::pair<std::string, std::string>> props;
if (gpu::IsCustomCallToDnnConvolution(*instr)) {
absl::StatusOr<gpu::GpuBackendConfig> config =
instr->backend_config<gpu::GpuBackendConfig>();
if (config.ok()) {
props = ExtractCudnnConvBackendConfigProps(
config->cudnn_conv_backend_config());
}
} else if (gpu::IsCublasGemm(*instr)) {
absl::StatusOr<gpu::GpuBackendConfig> config =
instr->backend_config<gpu::GpuBackendConfig>();
if (config.ok()) {
props =
ExtractGemmBackendConfigProps(config->gemm_backend_config(), instr);
}
}
if (!props.empty()) {
return StrCat((props.size() > 1 ? "<br/>" : ""),
StrJoin(props, "<br/>",
[](std::string* out,
const std::pair<std::string, std::string>& kv) {
if (!kv.first.empty()) {
return StrAppend(out, kv.first, "=", kv.second);
}
StrAppend(out, kv.second);
}));
}
if (!hlo_render_options_.show_backend_config ||
instr->raw_backend_config_string().empty()) {
return "";
}
return StrCat("backend_config=\"", instr->raw_backend_config_string(), "\"");
}
std::string HloDotDumper::GetInstructionNodeExtraInfo(
const HloInstruction* instr) {
std::vector<std::string> lines;
for (const auto& line : instr->ExtraAttributesToString(
HloPrintOptions().set_print_subcomputation_mode(
HloPrintOptions::PrintSubcomputationMode::kOff))) {
constexpr int kMaxDeviceIdFieldLen = 128;
if ((absl::StartsWith(line, "replica_groups=") ||
absl::StartsWith(line, "source_target_pairs=") ||
absl::StartsWith(line, "control-predecessors=")) &&
line.length() > kMaxDeviceIdFieldLen) {
lines.push_back(HtmlLikeStringSanitize(
StrCat(line.substr(0, kMaxDeviceIdFieldLen - 3), "...")));
} else if (absl::StartsWith(line, "feature_group_count=")) {
lines.push_back(StrFormat("<b>%s</b>", HtmlLikeStringSanitize(line)));
} else {
lines.push_back(HtmlLikeStringSanitize(line));
}
}
if (instr->opcode() != HloOpcode::kFusion ||
!ShouldShowFusionSubcomputation(instr)) {
bool shape_is_multidim = false;
ShapeUtil::ForEachSubshape(instr->shape(),
[&](const Shape& s, const ShapeIndex&) {
shape_is_multidim |= s.dimensions_size() > 1;
});
std::string instr_shape;
if (instr->opcode() != HloOpcode::kTuple && shape_is_multidim) {
instr_shape = ShapeUtil::HumanStringWithLayout(instr->shape());
} else {
instr_shape = ShapeUtil::HumanString(instr->shape());
}
constexpr int kMaxShapeLen = 64;
if (instr_shape.length() > kMaxShapeLen) {
instr_shape = StrCat(
absl::string_view(instr_shape).substr(0, kMaxShapeLen - 3), "...");
}
lines.push_back(HtmlLikeStringSanitize(instr_shape));
}
if (debug_options_.xla_hlo_graph_addresses()) {
lines.push_back(StrFormat("[%p]", instr));
}
return StrJoin(lines, "<br/>");
}
void HloDotDumper::AddInstructionIncomingEdges(const HloInstruction* instr) {
constexpr int kMaxEdgesBetweenTwoNodes = 64;
auto add_edge = [&](const HloInstruction* from, const HloInstruction* to,
int64_t operand_num, bool control_edge = false) {
if (edge_ids_.count({from, to}) > kMaxEdgesBetweenTwoNodes) {
return;
}
from = GetNodeForEdge(from);
if (!filter_.Show(from) || from->opcode() == HloOpcode::kConstant ||
IsFusedBroadcastOfConstantEffectiveScalar(from) ||
ShouldMergeIntoUsers(from)) {
return;
}
VLOG(2) << "Adding edge from " << from->name() << " to " << to->name()
<< " as " << next_edge_id_;
edge_ids_.insert({{from, to}, next_edge_id_++});
std::string edge_label;
if (control_edge) {
edge_label = "style=\"dotted\" color=\"gray\" label=\"ctrl\"";
} else if (instr->operand_count() > 1) {
edge_label =
StrFormat(R"( headlabel="%d", labeldistance=2)", operand_num);
}
constexpr char kEdgeFmt[] =
R"(%s -> %s [arrowhead=%s tooltip="%s -> %s" %s];)";
edges_.push_back(StrFormat(kEdgeFmt, InstructionId(from), InstructionId(to),
(IsSmall(from) ? "empty" : "normal"),
from->name(), to->name(), edge_label));
};
if (instr->opcode() == HloOpcode::kParameter && instr->IsFused()) {
if (instr->parent() != computation_) {
const HloInstruction* fusion = instr->parent()->FusionInstruction();
add_edge(fusion->operand(instr->parameter_number()), instr,
0);
}
} else {
for (int64_t i = 0; i < instr->operand_count(); ++i) {
add_edge(instr->operand(i), instr, i);
}
for (const HloInstruction* pred : instr->control_predecessors()) {
add_edge(pred, instr, 0, true);
}
}
}
std::string HloDotDumper::GetInstructionTrivialComputationStr(
const HloInstruction* instr) {
if (instr->opcode() == HloOpcode::kFusion) {
return "";
}
std::vector<std::string> lines;
for (int64_t i = 0; i < instr->called_computations().size(); ++i) {
optional<std::string> computation_type =
MatchTrivialComputation(instr->called_computations()[i]);
if (!computation_type) {
continue;
}
if (instr->called_computations().size() == 1) {
lines.push_back(StrFormat("Subcomputation: <b>%s</b>",
HtmlLikeStringSanitize(*computation_type)));
} else {
lines.push_back(StrFormat("Subcomputation %d: <b>%s</b>", i,
HtmlLikeStringSanitize(*computation_type)));
}
}
return StrJoin(lines, "<br/>");
}
const HloInstruction* HloDotDumper::GetNodeForEdge(
const HloInstruction* instr) {
if (instr->opcode() == HloOpcode::kGetTupleElement) {
instr = instr->operand(0);
}
while (instr->opcode() == HloOpcode::kFusion &&
ShouldShowFusionSubcomputation(instr)) {
instr = instr->fused_expression_root();
}
return instr;
}
NodeFilter MakeNodeRadiusAroundFilter(
const HloInstruction* root, int64_t radius,
const absl::flat_hash_set<const HloInstruction*>& boundary) {
absl::flat_hash_map<const HloInstruction*, NodeFilterResult> nodes;
std::deque<std::pair<const HloInstruction*, int64_t>> worklist;
worklist.push_back({root, 0});
while (!worklist.empty()) {
const HloInstruction* instr;
int64_t depth;
std::tie(instr, depth) = worklist.front();
worklist.pop_front();
nodes[instr] = kNormalNode;
if (depth == radius) {
continue;
}
if (boundary.contains(instr)) {
continue;
}
if (instr == root || instr->opcode() != HloOpcode::kTuple) {
for (const HloInstruction* operand : instr->operands()) {
if (!nodes.contains(operand)) {
int new_depth = (operand->opcode() == HloOpcode::kBitcast ||
instr->opcode() == HloOpcode::kBitcast)
? depth
: depth + 1;
worklist.push_back({operand, new_depth});
}
}
}
for (const HloComputation* computation : instr->called_computations()) {
worklist.push_back({computation->root_instruction(), depth + 1});
}
if (instr->opcode() == HloOpcode::kConstant) {
continue;
}
constexpr int kMaxUsersToRender = 16;
if (instr->user_count() > kMaxUsersToRender) {
nodes[instr] = kSomeUsersOmitted;
continue;
}
for (const HloInstruction* user : instr->users()) {
if (!nodes.contains(user)) {
worklist.push_back({user, depth + 1});
}
}
}
auto is_displayed = [&](const HloInstruction* instr) {
return nodes.contains(instr) || instr->opcode() == HloOpcode::kConstant ||
instr->parent() != root->parent();
};
for (auto& kv : nodes) {
const HloInstruction* instr = kv.first;
NodeFilterResult& filter_result = kv.second;
const auto& operands = instr->operands();
if (absl::c_any_of(operands, is_displayed) &&
!absl::c_all_of(operands, is_displayed)) {
filter_result = kSomeOperandsOmitted;
} else if (!operands.empty() && absl::c_none_of(operands, is_displayed)) {
filter_result = kOmitNodeOperands;
}
if (filter_result == kSomeUsersOmitted &&
absl::c_all_of(instr->users(), is_displayed)) {
filter_result = kNormalNode;
}
}
nodes[root] = kHighlightNode;
return NodeFilter(
[=](const HloInstruction* instr) {
auto it = nodes.find(instr);
if (it != nodes.end()) {
return it->second;
}
if (instr->parent() != root->parent()) {
return kNormalNode;
}
return kHideNode;
},
nodes.size());
}
NodeFilter MakeNodeFromToFilter(const HloInstruction* from,
const HloInstruction* to, int64_t max_nodes,
bool* hit_limit) {
*hit_limit = false;
std::deque<std::vector<const HloInstruction*>> queue;
queue.push_front({from});
absl::flat_hash_set<const HloInstruction*> visited;
absl::flat_hash_set<const HloInstruction*> to_display = {from, to};
while (!queue.empty() && to_display.size() < max_nodes) {
std::vector<const HloInstruction*> path = std::move(queue.front());
queue.pop_front();
if (!visited.insert(path.back()).second) {
continue;
}
for (const auto* user : path.back()->users()) {
if (user == to) {
auto it = path.begin();
for (; it != path.end() && to_display.size() < max_nodes; ++it) {
to_display.insert(*it);
}
if (it != path.end()) {
*hit_limit = true;
}
} else if (!visited.count(user)) {
auto new_path = path;
new_path.push_back(user);
queue.push_back(std::move(new_path));
}
}
}
return NodeFilter([=](const HloInstruction* instr) {
if (instr == from || instr == to) {
return kHighlightNode;
}
return to_display.count(instr) ? kNormalNode : kHideNode;
});
}
absl::Mutex url_renderer_mu(absl::kConstInit);
std::function<absl::StatusOr<std::string>(absl::string_view)>* url_renderer
ABSL_GUARDED_BY(url_renderer_mu) = nullptr;
absl::Mutex fusion_visualizer_state_mu(absl::kConstInit);
namespace {
struct FusionVisualizerProgress {
void AddState(absl::string_view dot, absl::string_view explanation,
std::optional<std::string> to_highlight) {
if (dot_graphs.empty() || dot_graphs.back() != dot) {
dot_graphs.push_back(std::string(dot));
}
frames.push_back({static_cast<int>(dot_graphs.size() - 1),
std::string(explanation), to_highlight.value_or("")});
}
std::vector<std::string> dot_graphs;
struct FusionFrame {
int dot_graph;
std::string label;
std::string to_highlight;
};
std::vector<FusionFrame> frames;
};
}
static auto& fusion_visualizer_states
TF_GUARDED_BY(fusion_visualizer_state_mu) = *new absl::flat_hash_map<
std::pair<int64_t, int64_t>, FusionVisualizerProgress>();
static std::pair<int, int> FusionVisualizerStateKey(
const HloComputation& computation) {
return std::make_pair(computation.parent()->unique_id(),
computation.unique_id());
}
}
static absl::StatusOr<std::string> CompressAndEncode(absl::string_view input) {
class WritableStringFile : public tsl::WritableFile {
public:
explicit WritableStringFile(std::string* data) : data_(data){};
~WritableStringFile() override = default;
absl::Status Append(absl::string_view data) override {
absl::StrAppend(data_, data);
return absl::OkStatus();
}
absl::Status Close() override { return absl::OkStatus(); }
absl::Status Flush() override { return absl::OkStatus(); }
absl::Status Sync() override { return absl::OkStatus(); }
private:
std::string* data_;
};
std::string compressed;
WritableStringFile f(&compressed);
auto gz_opts = tsl::io::ZlibCompressionOptions::GZIP();
tsl::io::ZlibOutputBuffer gz_file(&f, gz_opts.input_buffer_size,
gz_opts.output_buffer_size, gz_opts);
TF_RETURN_IF_ERROR(gz_file.Init());
TF_RETURN_IF_ERROR(gz_file.Append(input));
TF_RETURN_IF_ERROR(gz_file.Close());
std::string encoded;
TF_RETURN_IF_ERROR(tsl::Base64Encode(compressed, &encoded));
return absl::StrReplaceAll(encoded, {{"_", "/"}, {"-", "+"}});
}
static std::string EscapeJSONString(absl::string_view raw) {
return absl::StrCat(
"\"",
absl::StrReplaceAll(raw, {{"\n", "\\n"}, {"\"", "\\\""}, {"\\", "\\\\"}}),
"\"");
}
absl::StatusOr<std::string> WrapFusionExplorer(
const FusionVisualizerProgress& visualizer_progress,
absl::string_view graph_title) {
if (visualizer_progress.frames.empty()) {
return Internal("Empty");
}
std::string dot_graphs =
StrFormat("[%s]", StrJoin(visualizer_progress.dot_graphs, ", ",
[&](std::string* out, const std::string& dot) {
StrAppend(out, EscapeJSONString(dot));
}));
std::string frames = StrJoin(
visualizer_progress.frames, ", ", [&](std::string* out, const auto& p) {
StrAppend(out, StrFormat("[%d, %s, %s]", p.dot_graph,
EscapeJSONString(p.label),
EscapeJSONString(p.to_highlight)));
});
TF_ASSIGN_OR_RETURN(std::string dot_graphs_compressed,
CompressAndEncode(dot_graphs));
return absl::StrReplaceAll(
R"wrapper(
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<style>
html, body {height: 100%; text-align: center;}
#rendered {height: 70%; width: 80%; border:1px solid black; margin: auto; }
#label {width: 80%; margin: auto;}
#performance_note { font-size: small; color: gray; }
#frames_list {
list-style: none; text-align: left; height: 20%; overflow: scroll;
}
#frames_list li { padding: 0.2em; margin: 0.2em; }
.selected { background-color: #e0e0e0; }
.selected a { color: black; text-decoration: none; }
#rendered svg { height: 100% !important; width: 100% !important; }
</style>
</head>
<body>
<script src="https:
integrity="sha384-LigJPbR3TOfU/Xbb+PjiN1dGJYPweLk7kiGnaMgmxnUmKWaCFKbb5tH6iLlyVhPZ"
crossorigin="anonymous"></script>
<script src="https:
</script>
<title>Fusion Explorer: $TITLE</title>
<div id='rendered'><center>Loading...</center></div>
<ul id='frames_list'></ul>
<p>Use j/k for keyboard navigation.</p>
<p id='performance_note'>Loading data...</p>
<script>
<!--
const renderCache = {};
const cssregex = new RegExp('stylesheet=<([^]*)\n>\n', 'gm');
const hpccWasm = window["@hpcc-js/wasm"];
const getIdFromHash = () => {
let hash = window.location.hash;
if (hash.indexOf('frame') == -1) {
return 0;
}
return parseInt(window.location.hash.substring('#frame'.length, window.location.hash.length));
}
const renderCurrentFrame = () => {
if (!window.loaded) { return; }
const frames_list = document.getElementById('frames_list');
const currId = getIdFromHash();
for (let selected of frames_list.getElementsByClassName('selected')) {
selected.classList.remove('selected');
}
const selected = frames_list.children[currId];
selected.classList.add('selected');
selected.scrollIntoView();
const frame = frames[currId];
const dot_ptr = frame[0];
let dot_txt = window.dots[dot_ptr];
const label = frame[1];
document.getElementById('performance_note').innerText = "Rendering...";
const results = cssregex.exec(dot_txt)
let css_data = ''
if (results !== null) {
css_data = results[1].replace(/\s*data:.*\s*,/,'');
css_data = unescape(css_data);
dot_txt = dot_txt.replace(cssregex, '');
}
let render_start = performance.now();
const render_callback = svg => {
renderCache[dot_ptr] = svg;
var area = document.getElementById('rendered');
area.innerHTML = `${svg}<style>${css_data}</style>`;
var panzoom = svgPanZoom(area.children[0], {
zoomEnabled: true, controlIconsEnabled: true, maxZoom: 200, });
var to_highlight = frame[2].length ?
document.querySelector(`${frame[2]}`) : null;
if (to_highlight) {
to_highlight.style.setProperty('fill', 'red');
}
document.getElementById('performance_note').innerText =
`Rendering took ${(performance.now() - render_start).toFixed(2)}ms`;
let text_nodes = document.getElementsByTagName("text");
for (var el of text_nodes) {
if (title_to_id.has(el.innerHTML)) {
el.style.cursor = "pointer";
}
}
};
if (renderCache[dot_ptr]) {
render_callback(renderCache[dot_ptr]);
} else {
hpccWasm.graphviz.layout(dot_txt, "svg", "dot").then(render_callback);
}
};
const update = (delta) => {
let currId = getIdFromHash();
currId = (currId + delta + frames.length) % frames.length;
window.location.hash = `#frame${currId}`
};
const renderFrameList = () => {
const currId = getIdFromHash();
const frames_list = document.getElementById('frames_list');
for (let i=0; i<frames.length; i++) {
const f = frames[i];
let frame_descr = f[1];
const rendered = document.createElement("li");
if (frame_descr == "") {
frame_descr = "Unnamed state";
}
rendered.innerHTML = `<a href="#frame${i}">${frame_descr}</a>`;
if (i == currId) {
rendered.classList.add('selected');
}
frames_list.appendChild(rendered);
}
};
const decompress = async function(compressed) {
const ds = new DecompressionStream('gzip');
const in_fetch = await fetch(`data:application/octet-stream;base64,${compressed}`);
const in_blob = await in_fetch.blob();
const out_stream = in_blob.stream().pipeThrough(ds);
const out_blob = await new Response(out_stream).blob();
return await out_blob.text();
}
const dots_compressed = "$DOTS";
const frames = [$FRAMES];
let loaded = false;
window.addEventListener('hashchange', () => {
renderCurrentFrame();
});
window.addEventListener("keydown", (event) => {
if (event.defaultPrevented) {
return;
}
if (event.key == "j") {
update(1);
} else if (event.key == "k") {
update(-1);
} else {
return;
}
event.preventDefault();
}, true);
document.addEventListener("DOMContentLoaded", () => {
decompress(dots_compressed).then(text => {
window.dots = JSON.parse(text);
window.loaded = true;
renderFrameList();
renderCurrentFrame();
});
window.title_to_id = new Map();
for (let i=0; i < frames.length; i++) {
title_to_id.set(frames[i][1], i);
}
document.addEventListener("click", (event) => {
let txt = event.target.innerHTML;
if (title_to_id.has(txt)) {
let id = title_to_id.get(txt);
window.location.hash = `#frame${id}`;
}
});
});
</script>
</body>
</html>
)wrapper",
{{"$DOTS", dot_graphs_compressed},
{"$FRAMES", frames},
{"$TITLE", graph_title}});
}
static std::string GraphTitle(const HloComputation& computation) {
return absl::StrCat(computation.parent()->name(), "_", computation.name());
}
absl::StatusOr<std::string> WrapFusionExplorer(
const HloComputation& computation) {
absl::MutexLock lock(&fusion_visualizer_state_mu);
const FusionVisualizerProgress& visualizer_progress =
fusion_visualizer_states[FusionVisualizerStateKey(computation)];
return WrapFusionExplorer(visualizer_progress, GraphTitle(computation));
}
static absl::StatusOr<std::string> WrapDotInHtml(absl::string_view dot,
absl::string_view title) {
FusionVisualizerProgress progress;
progress.AddState(dot, title, std::nullopt);
return WrapFusionExplorer(progress, title);
}
static absl::StatusOr<std::string> WrapDotInFormat(
const HloComputation& computation, absl::string_view dot,
RenderedGraphFormat format) ABSL_EXCLUSIVE_LOCKS_REQUIRED(url_renderer_mu) {
switch (format) {
case RenderedGraphFormat::kUrl:
CHECK(url_renderer != nullptr)
<< "Should have checked url_renderer != null before calling.";
return (*url_renderer)(dot);
case RenderedGraphFormat::kHtml:
return WrapDotInHtml(dot, GraphTitle(computation));
case RenderedGraphFormat::kDot:
return std::string(dot);
}
}
void RegisterGraphToURLRenderer(
std::function<absl::StatusOr<std::string>(absl::string_view)> renderer) {
absl::MutexLock lock(&url_renderer_mu);
if (url_renderer != nullptr) {
LOG(WARNING) << "Multiple calls to RegisterGraphToURLRenderer. Last call "
"wins, but because order of initialization in C++ is "
"nondeterministic, this may not be what you want.";
}
delete url_renderer;
url_renderer =
new std::function<absl::StatusOr<std::string>(absl::string_view)>(
std::move(renderer));
}
void RegisterFusionState(const HloComputation& computation,
absl::string_view label,
const HloInstruction& consumer,
const HloInstruction* producer) {
absl::MutexLock lock(&fusion_visualizer_state_mu);
FusionVisualizerProgress& fusion_progress =
fusion_visualizer_states[FusionVisualizerStateKey(computation)];
static constexpr int kRenderRadius = 4;
absl::flat_hash_set<const HloInstruction*> render_boundary;
for (const HloInstruction* user : consumer.users()) {
render_boundary.insert(user);
}
HloDotDumper dumper(
consumer.parent(),
StrCat("Rendering of ", kRenderRadius, " nodes around fusion consumer"),
consumer.GetModule()->config().debug_options(), {},
MakeNodeRadiusAroundFilter(&consumer, kRenderRadius, render_boundary));
std::string dot_txt = dumper.Dump();
std::optional<std::string> producer_to_highlight;
if (producer) {
producer_to_highlight = dumper.CssIdForInstruction(*producer);
}
fusion_progress.AddState(dot_txt, label, producer_to_highlight);
}
absl::StatusOr<std::string> RenderGraph(
const HloComputation& computation, absl::string_view label,
const DebugOptions& debug_options, RenderedGraphFormat format,
HloRenderOptions hlo_render_options,
std::optional<absl::flat_hash_map<const HloInstruction*, ColorStats>>
color_map) {
absl::MutexLock lock(&url_renderer_mu);
if (format == RenderedGraphFormat::kUrl && url_renderer == nullptr) {
return Unavailable("Can't render as URL; no URL renderer was registered.");
}
std::string rendered_dot =
HloDotDumper(&computation, label, debug_options, hlo_render_options,
NodeFilter(), color_map)
.Dump();
return WrapDotInFormat(computation, rendered_dot, format);
}
absl::StatusOr<std::string> RenderAllComputationsToHtml(
const HloModule& module) {
FusionVisualizerProgress progress;
std::vector<HloInstruction*> instrs =
module.entry_computation()->MakeInstructionPostOrder();
absl::c_reverse(instrs);
for (const HloInstruction* instr : instrs) {
if (absl::c_linear_search(
std::vector<HloOpcode>{HloOpcode::kConstant,
HloOpcode::kGetTupleElement},
instr->opcode())) {
continue;
}
HloRenderOptions opts;
opts.show_fusion_subcomputations = true;
opts.show_backend_config = true;
opts.show_while_subcomputations = instr->opcode() == HloOpcode::kWhile;
static constexpr int64_t max_nodes_to_render = 100;
absl::flat_hash_set<const HloInstruction*> render_boundary;
NodeFilter filter = MakeNodeRadiusAroundFilter(instr, 2, render_boundary);
if (filter.GetNumRendered().value_or(1) > max_nodes_to_render) {
filter = MakeNodeRadiusAroundFilter(instr, 1, render_boundary);
}
std::string dot =
HloDotDumper(module.entry_computation(), instr->name(),
module.config().debug_options(), opts, filter)
.Dump();
progress.AddState(dot, instr->name(), std::nullopt);
}
return WrapFusionExplorer(progress, module.name());
}
absl::StatusOr<std::string> RenderNeighborhoodAround(
const HloInstruction& node, int radius, RenderedGraphFormat format,
HloRenderOptions hlo_render_options,
const absl::flat_hash_set<const HloInstruction*>& boundary,
std::optional<absl::flat_hash_map<const HloInstruction*, ColorStats>>
color_map) {
absl::MutexLock lock(&url_renderer_mu);
if (format == RenderedGraphFormat::kUrl && url_renderer == nullptr) {
return FailedPrecondition(
"Can't render as URL; no URL renderer was registered.");
}
std::string label =
StrCat("Neighborhood of ", radius, " nodes around ", node.name());
std::string rendered_dot =
HloDotDumper(
node.parent(), label, node.GetModule()->config().debug_options(),
hlo_render_options,
MakeNodeRadiusAroundFilter(&node, radius, boundary), color_map)
.Dump();
return WrapDotInFormat(*node.parent(), rendered_dot, format);
}
absl::StatusOr<std::string> RenderAllPathsFromTo(
const HloInstruction& from, const HloInstruction& to, int64_t max_nodes,
RenderedGraphFormat format, HloRenderOptions hlo_render_options) {
absl::MutexLock lock(&url_renderer_mu);
if (format == RenderedGraphFormat::kUrl && url_renderer == nullptr) {
return FailedPrecondition(
"Can't render as URL; no URL renderer was registered.");
}
CHECK_EQ(from.parent(), to.parent()) << "Nodes must be in same computation!";
auto debug_options = from.GetModule()->config().debug_options();
bool hit_limit = false;
NodeFilter filter = MakeNodeFromToFilter(&from, &to, max_nodes, &hit_limit);
std::string label;
if (!hit_limit) {
label = StrCat("All paths from ", from.name(), " to ", to.name());
} else {
label = StrCat(max_nodes, " nodes on the shortest paths from ", from.name(),
" to ", to.name(),
"<br/><br/>***SHOWING ONLY A SUBSET OF ALL PATHS BETWEEN "
"NODES***<br/><br/>");
}
std::string rendered_dot = HloDotDumper(from.parent(), label, debug_options,
hlo_render_options, filter)
.Dump();
return WrapDotInFormat(*from.parent(), rendered_dot, format);
}
} | #include "xla/service/hlo_graph_dumper.h"
#include "absl/container/flat_hash_map.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/test_utils.h"
#include "xla/xla.pb.h"
namespace xla {
namespace {
using absl::StrCat;
using ::testing::HasSubstr;
using HloGraphDumperTest = HloTestBase;
std::string TestName() {
return ::testing::UnitTest::GetInstance()->current_test_info()->name();
}
TEST_F(HloGraphDumperTest, NestedFusion) {
HloComputation::Builder b("b");
auto shape = ShapeUtil::MakeShape(F32, {10, 100});
std::vector<HloInstruction*> params;
for (int i = 0; i <= 4; ++i) {
params.push_back(b.AddInstruction(
HloInstruction::CreateParameter(i, shape, StrCat("param", i))));
}
std::vector<HloInstruction*> sums;
sums.push_back(b.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, params[0], params[1])));
for (int i = 0; i <= 2; ++i) {
sums.push_back(b.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, sums[i], params[i + 2])));
}
HloModuleConfig config;
HloModule m(TestName(), config);
m.AddEntryComputation(b.Build());
HloComputation* root_computation = m.entry_computation();
auto* outer_fusion = root_computation->CreateFusionInstruction(
{sums[3], sums[2], sums[1], sums[0]}, HloInstruction::FusionKind::kLoop);
std::vector<HloInstruction*> fused_sums;
for (auto* instr : outer_fusion->fused_instructions_computation()
->MakeInstructionPostOrder()) {
if (instr->opcode() == HloOpcode::kAdd) {
fused_sums.push_back(instr);
}
}
auto* inner_fusion =
outer_fusion->fused_instructions_computation()->CreateFusionInstruction(
{fused_sums[1], fused_sums[0]}, HloInstruction::FusionKind::kLoop);
TF_ASSERT_OK_AND_ASSIGN(
std::string graph,
RenderGraph(*root_computation, "", DebugOptions(),
RenderedGraphFormat::kDot));
for (const HloComputation* computation :
{root_computation,
inner_fusion->fused_instructions_computation(),
outer_fusion->fused_instructions_computation()}) {
for (const HloInstruction* instruction : computation->instructions()) {
EXPECT_THAT(graph, HasSubstr(instruction->name()));
}
}
const HloInstruction* inner_sum = nullptr;
for (const HloInstruction* instruction :
inner_fusion->fused_instructions_computation()->instructions()) {
if (instruction->opcode() == HloOpcode::kAdd) {
inner_sum = instruction;
break;
}
}
ASSERT_NE(inner_sum, nullptr);
TF_ASSERT_OK_AND_ASSIGN(std::string neighborhood_graph,
RenderNeighborhoodAround(*inner_sum, 1,
RenderedGraphFormat::kDot));
EXPECT_THAT(neighborhood_graph, HasSubstr(inner_sum->name()));
}
TEST_F(HloGraphDumperTest, Constant) {
HloComputation::Builder b("b");
auto instruction = b.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(-42)));
instruction->SetAndSanitizeName("i_am_a_constant_root_instruction");
HloModuleConfig config;
HloModule m(TestName(), config);
HloComputation* root_computation = m.AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(
std::string graph,
RenderGraph(*root_computation, "an_empty_graph", DebugOptions(),
RenderedGraphFormat::kDot));
EXPECT_THAT(graph, HasSubstr("an_empty_graph"));
}
TEST_F(HloGraphDumperTest, TupleConstant) {
Shape tuple_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {3, 2}), ShapeUtil::MakeShape(S32, {4, 5})});
HloComputation::Builder b("b");
auto constant = b.AddInstruction(
HloInstruction::CreateConstant(Literal::CreateFromShape(tuple_shape)));
auto gte = b.AddInstruction(HloInstruction::CreateGetTupleElement(
ShapeUtil::MakeShape(F32, {3, 2}), constant, 0));
HloModuleConfig config;
HloModule m(TestName(), config);
HloComputation* root_computation = m.AddEntryComputation(b.Build(gte));
TF_ASSERT_OK_AND_ASSIGN(
std::string graph,
RenderGraph(*root_computation, "tuple_constant", DebugOptions(),
RenderedGraphFormat::kDot));
EXPECT_THAT(graph, HasSubstr("tuple_constant"));
EXPECT_THAT(graph, HasSubstr("constant (f32[3,2], s32[4,5])"));
}
TEST_F(HloGraphDumperTest, Compare) {
const char* hlo_string = R"(
HloModule comp
ENTRY comp {
param.0 = f32[10] parameter(0)
param.1 = f32[10] parameter(1)
ROOT lt = pred[10] compare(param.0, param.1), direction=LT
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
std::string graph,
RenderGraph(*module->entry_computation(), "tuple_constant",
DebugOptions(), RenderedGraphFormat::kDot));
EXPECT_THAT(graph, HasSubstr("direction=LT"));
}
TEST_F(HloGraphDumperTest, HasStatisticsViz) {
const char* hlo_string = R"(
HloModule comp
ENTRY comp {
param.0 = f32[10] parameter(0), statistics={visualizing_index=0,stat-0=0.5}
param.1 = f32[10] parameter(1), statistics={visualizing_index=1,stat-0=55.5,stat-1=44.4}
ROOT lt = pred[10] compare(param.0, param.1), direction=LT
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
std::string graph,
RenderGraph(*module->entry_computation(), "tuple_constant",
DebugOptions(), RenderedGraphFormat::kDot));
}
TEST_F(HloGraphDumperTest, RootIsConstant) {
const char* hlo_string = R"(
HloModule indexed_conditional
%then_branch (empty: ()) -> f32[] {
%empty = () parameter(0)
ROOT %then = f32[] constant(1)
}
%else_branch (empty.1: ()) -> f32[] {
%empty.1 = () parameter(0)
ROOT %else = f32[] constant(2)
}
ENTRY %conditional_select (constant: pred[]) -> (f32[]) {
%constant = pred[] parameter(0)
%emptytuple = () tuple()
%conditional = f32[] conditional(pred[] %constant, () %emptytuple, () %emptytuple), true_computation=%then_branch, false_computation=%else_branch
ROOT %t = (f32[]) tuple(f32[] %conditional)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
std::string graph,
RenderGraph(*module->entry_computation(), "tuple_constant",
DebugOptions(), RenderedGraphFormat::kDot));
}
TEST_F(HloGraphDumperTest, OverrideColors) {
const char* hlo_string = R"(
HloModule comp
ENTRY comp {
param.0 = f32[10] parameter(0)
param.1 = f32[10] parameter(1)
ROOT lt = pred[10] compare(param.0, param.1), direction=LT
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
absl::flat_hash_map<const HloInstruction*, ColorStats> color_map;
ColorStats color_stats_1;
color_stats_1.color = "#A9C343";
color_stats_1.stats = absl::StrFormat("%.3f", 1.11);
ColorStats color_stats_2;
color_stats_2.color = "#BC8A3F";
color_stats_2.stats = absl::StrFormat("%.3f", 2.22);
color_map[module->entry_computation()->GetInstructionWithName("param.0")] =
color_stats_1;
color_map[module->entry_computation()->GetInstructionWithName("param.1")] =
color_stats_2;
HloRenderOptions hlo_render_options;
hlo_render_options.override_node_colors = true;
TF_ASSERT_OK_AND_ASSIGN(
std::string graph,
RenderGraph(*module->entry_computation(), "tuple_constant",
DebugOptions(), RenderedGraphFormat::kDot, hlo_render_options,
color_map));
EXPECT_THAT(graph, HasSubstr("#A9C343"));
EXPECT_THAT(graph, HasSubstr("1.110"));
EXPECT_THAT(graph, HasSubstr("#BC8A3F"));
EXPECT_THAT(graph, HasSubstr("2.220"));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_graph_dumper.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_graph_dumper_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
71217517-bd22-4e54-9891-4344e5585a1c | cpp | tensorflow/tensorflow | uniform_quantized_types | tensorflow/compiler/mlir/quantization/common/uniform_quantized_types.cc | tensorflow/compiler/mlir/quantization/common/uniform_quantized_types_test.cc | #include "tensorflow/compiler/mlir/quantization/common/uniform_quantized_types.h"
#include <cstdint>
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/MathExtras.h"
#include "mlir/Dialect/Quant/IR/QuantTypes.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/Types.h"
#include "mlir/Support/LLVM.h"
#define DEBUG_TYPE "uniform-quantized-types"
namespace mlir {
namespace quant {
UniformQuantizedType CreateI8F32UniformQuantizedType(const Location loc,
MLIRContext& context,
const double scale,
const int64_t zero_point,
const bool narrow_range) {
return UniformQuantizedType::getChecked(
loc, QuantizationFlags::Signed,
IntegerType::get(&context, 8),
FloatType::getF32(&context), scale, zero_point,
llvm::minIntN(8) + (narrow_range ? 1 : 0),
llvm::maxIntN(8));
}
UniformQuantizedType CreateI32F32UniformQuantizedType(
const Location loc, MLIRContext& context, const double scale,
const int64_t zero_point) {
return UniformQuantizedType::getChecked(
loc, QuantizationFlags::Signed,
IntegerType::get(&context, 32),
FloatType::getF32(&context), scale, zero_point,
llvm::minIntN(32),
llvm::maxIntN(32));
}
UniformQuantizedPerAxisType CreateI8F32UniformQuantizedPerAxisType(
const Location loc, MLIRContext& context, const ArrayRef<double> scales,
const ArrayRef<int64_t> zero_points, const int quantization_dimension,
const bool narrow_range) {
return UniformQuantizedPerAxisType::getChecked(
loc, QuantizationFlags::Signed,
IntegerType::get(&context, 8),
FloatType::getF32(&context),
SmallVector<double>(scales), SmallVector<int64_t>(zero_points),
quantization_dimension,
llvm::minIntN(8) + (narrow_range ? 1 : 0),
llvm::maxIntN(8));
}
UniformQuantizedPerAxisType CreateI32F32UniformQuantizedPerAxisType(
const Location loc, MLIRContext& context, const ArrayRef<double> scales,
const ArrayRef<int64_t> zero_points, const int quantization_dimension) {
return UniformQuantizedPerAxisType::getChecked(
loc, QuantizationFlags::Signed,
IntegerType::get(&context, 32),
FloatType::getF32(&context),
SmallVector<double>(scales), SmallVector<int64_t>(zero_points),
quantization_dimension, llvm::minIntN(32),
llvm::maxIntN(32));
}
bool IsStorageTypeI8(const QuantizedType quantized_type) {
const Type storage_type = quantized_type.getStorageType();
return storage_type.isInteger(8);
}
bool IsStorageTypeI32(const QuantizedType quantized_type) {
const Type storage_type = quantized_type.getStorageType();
return storage_type.isInteger(32);
}
bool IsExpressedTypeF32(const QuantizedType quantized_type) {
const Type expressed_type = quantized_type.getExpressedType();
return mlir::isa<Float32Type>(expressed_type);
}
bool IsI8F32UniformQuantizedType(const Type type) {
const UniformQuantizedType quantized_type =
mlir::dyn_cast_or_null<UniformQuantizedType>(type);
if (!quantized_type) {
LLVM_DEBUG(llvm::dbgs()
<< "Expected a uniform quantized type. Got: " << type << ".\n");
return false;
}
if (!IsStorageTypeI8(quantized_type)) {
LLVM_DEBUG(llvm::dbgs() << "Expected an i8 storage type. Got: "
<< quantized_type << ".\n");
return false;
}
if (!IsExpressedTypeF32(quantized_type)) {
LLVM_DEBUG(llvm::dbgs() << "Expected an f32 expressed type. Got: "
<< quantized_type << ".\n");
return false;
}
return true;
}
bool IsI8F32UniformQuantizedPerAxisType(const Type type) {
const UniformQuantizedPerAxisType quantized_per_axis_type =
mlir::dyn_cast_or_null<UniformQuantizedPerAxisType>(type);
if (!quantized_per_axis_type) {
LLVM_DEBUG(llvm::dbgs()
<< "Expected a uniform quantized type. Got: " << type << ".\n");
return false;
}
if (!IsStorageTypeI8(quantized_per_axis_type)) {
LLVM_DEBUG(llvm::dbgs() << "Expected an i8 storage type. Got: "
<< quantized_per_axis_type << ".\n");
return false;
}
if (!IsExpressedTypeF32(quantized_per_axis_type)) {
LLVM_DEBUG(llvm::dbgs() << "Expected an f32 expressed type. Got: "
<< quantized_per_axis_type << ".\n");
return false;
}
return true;
}
bool IsI32F32UniformQuantizedType(const Type type) {
const UniformQuantizedType quantized_type =
mlir::dyn_cast_or_null<UniformQuantizedType>(type);
if (!quantized_type) {
LLVM_DEBUG(llvm::dbgs()
<< "Expected a uniform quantized type. Got: " << type << ".\n");
return false;
}
if (!IsStorageTypeI32(quantized_type)) {
LLVM_DEBUG(llvm::dbgs() << "Expected an i32 storage type. Got: "
<< quantized_type << ".\n");
return false;
}
if (!IsExpressedTypeF32(quantized_type)) {
LLVM_DEBUG(llvm::dbgs() << "Expected an f32 expressed type. Got: "
<< quantized_type << ".\n");
return false;
}
return true;
}
bool IsI32F32UniformQuantizedPerAxisType(const Type type) {
const UniformQuantizedPerAxisType quantized_per_axis_type =
mlir::dyn_cast_or_null<UniformQuantizedPerAxisType>(type);
if (!quantized_per_axis_type) {
LLVM_DEBUG(llvm::dbgs()
<< "Expected a uniform quantized type. Got: " << type << ".\n");
return false;
}
if (!IsStorageTypeI32(quantized_per_axis_type)) {
LLVM_DEBUG(llvm::dbgs() << "Expected an i32 storage type. Got: "
<< quantized_per_axis_type << ".\n");
return false;
}
if (!IsExpressedTypeF32(quantized_per_axis_type)) {
LLVM_DEBUG(llvm::dbgs() << "Expected an f32 expressed type. Got: "
<< quantized_per_axis_type << ".\n");
return false;
}
return true;
}
bool IsSupportedByTfliteQuantizeOrDequantizeOps(IntegerType storage_type) {
if (storage_type.getWidth() == 8 ||
(storage_type.isSigned() && storage_type.getWidth() == 16)) {
return true;
}
LLVM_DEBUG(llvm::dbgs()
<< "Uniform quantize / dequantize op only supports ui8, i8 or "
"i16 for the storage type of uniform quantized type. Got: "
<< storage_type << ".\n");
return false;
}
bool IsQuantizedTensorType(Type type) {
if (!mlir::isa<TensorType>(type)) {
return false;
}
Type element_type = mlir::cast<TensorType>(type).getElementType();
return mlir::isa<QuantizedType>(element_type);
}
bool IsOpFullyQuantized(Operation* op) {
return llvm::all_of(op->getOperandTypes(), IsQuantizedTensorType) &&
llvm::all_of(op->getResultTypes(), IsQuantizedTensorType);
}
bool IsOpNotQuantized(Operation* op) {
return !llvm::any_of(op->getOperandTypes(), IsQuantizedTensorType) &&
!llvm::any_of(op->getResultTypes(), IsQuantizedTensorType);
}
}
} | #include "tensorflow/compiler/mlir/quantization/common/uniform_quantized_types.h"
#include <cstdint>
#include <limits>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Quant/IR/Quant.h"
#include "mlir/Dialect/Quant/IR/QuantTypes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
#include "stablehlo/dialect/StablehloOps.h"
#include "tensorflow/compiler/mlir/quantization/common/test_base.h"
namespace mlir {
namespace quant {
namespace {
using ::testing::ElementsAreArray;
using ::testing::IsNull;
using ::testing::Ne;
using ::testing::NotNull;
using ::testing::Test;
class CreateI8F32UniformQuantizedTypeTest : public Test {
protected:
CreateI8F32UniformQuantizedTypeTest() : ctx_() {
ctx_.loadDialect<quant::QuantDialect>();
}
MLIRContext ctx_;
};
TEST_F(CreateI8F32UniformQuantizedTypeTest, I8StorageTypeSucceeds) {
const UniformQuantizedType quantized_type =
CreateI8F32UniformQuantizedType(UnknownLoc::get(&ctx_), ctx_,
1.0, 0);
EXPECT_TRUE(quantized_type.getStorageType().isSignlessInteger(8));
}
TEST_F(CreateI8F32UniformQuantizedTypeTest, F32ExpressedTypeSucceeds) {
const UniformQuantizedType quantized_type =
CreateI8F32UniformQuantizedType(UnknownLoc::get(&ctx_), ctx_,
1.0, 0);
EXPECT_TRUE(quantized_type.getExpressedType().isF32());
}
TEST_F(CreateI8F32UniformQuantizedTypeTest, SignedQuantizedTypeSucceeds) {
const UniformQuantizedType quantized_type =
CreateI8F32UniformQuantizedType(UnknownLoc::get(&ctx_), ctx_,
1.0, 0);
EXPECT_TRUE(quantized_type.isSigned());
}
TEST_F(CreateI8F32UniformQuantizedTypeTest, StorageTypeMinMaxEqualToI8MinMax) {
const UniformQuantizedType quantized_type =
CreateI8F32UniformQuantizedType(UnknownLoc::get(&ctx_), ctx_,
1.0, 0);
EXPECT_EQ(quantized_type.getStorageTypeMin(), -128);
EXPECT_EQ(quantized_type.getStorageTypeMax(), 127);
}
TEST_F(CreateI8F32UniformQuantizedTypeTest, StorageTypeMinMaxNarrowRange) {
const UniformQuantizedType quantized_type = CreateI8F32UniformQuantizedType(
UnknownLoc::get(&ctx_), ctx_,
1.0, 0, true);
EXPECT_EQ(quantized_type.getStorageTypeMin(), -127);
EXPECT_EQ(quantized_type.getStorageTypeMax(), 127);
}
TEST_F(CreateI8F32UniformQuantizedTypeTest, HasScaleAndZeroPointProperlySet) {
const UniformQuantizedType quantized_type =
CreateI8F32UniformQuantizedType(UnknownLoc::get(&ctx_), ctx_,
8.0, 99);
EXPECT_EQ(quantized_type.getScale(), 8.0);
EXPECT_EQ(quantized_type.getZeroPoint(), 99);
}
class CreateI32F32UniformQuantizedTypeTest : public Test {
protected:
CreateI32F32UniformQuantizedTypeTest() : ctx_() {
ctx_.loadDialect<quant::QuantDialect>();
}
MLIRContext ctx_;
};
TEST_F(CreateI32F32UniformQuantizedTypeTest, I32StorageTypeSucceeds) {
const UniformQuantizedType quantized_type =
CreateI32F32UniformQuantizedType(UnknownLoc::get(&ctx_), ctx_,
1.0, 0);
EXPECT_TRUE(quantized_type.getStorageType().isSignlessInteger(32));
}
TEST_F(CreateI32F32UniformQuantizedTypeTest, F32ExpressedTypeSucceeds) {
const UniformQuantizedType quantized_type =
CreateI32F32UniformQuantizedType(UnknownLoc::get(&ctx_), ctx_,
1.0, 0);
EXPECT_TRUE(quantized_type.getExpressedType().isF32());
}
TEST_F(CreateI32F32UniformQuantizedTypeTest, SignedQuantizedTypeSucceeds) {
const UniformQuantizedType quantized_type =
CreateI32F32UniformQuantizedType(UnknownLoc::get(&ctx_), ctx_,
1.0, 0);
EXPECT_TRUE(quantized_type.isSigned());
}
TEST_F(CreateI32F32UniformQuantizedTypeTest,
StorageTypeMinMaxEqualToI32MinMax) {
const UniformQuantizedType quantized_type =
CreateI32F32UniformQuantizedType(UnknownLoc::get(&ctx_), ctx_,
1.0, 0);
EXPECT_EQ(quantized_type.getStorageTypeMin(),
std::numeric_limits<int32_t>::min());
EXPECT_EQ(quantized_type.getStorageTypeMax(),
std::numeric_limits<int32_t>::max());
}
TEST_F(CreateI32F32UniformQuantizedTypeTest, HasScaleAndZeroPointProperlySet) {
const UniformQuantizedType quantized_type =
CreateI32F32UniformQuantizedType(UnknownLoc::get(&ctx_), ctx_,
8.0, 1111);
EXPECT_EQ(quantized_type.getScale(), 8.0);
EXPECT_EQ(quantized_type.getZeroPoint(), 1111);
}
class CreateI8F32UniformQuantizedPerAxisTypeTest : public Test {
protected:
CreateI8F32UniformQuantizedPerAxisTypeTest() : ctx_() {
ctx_.loadDialect<quant::QuantDialect>();
}
MLIRContext ctx_;
};
TEST_F(CreateI8F32UniformQuantizedPerAxisTypeTest, I8StorageTypeSucceeds) {
const UniformQuantizedPerAxisType quantized_type =
CreateI8F32UniformQuantizedPerAxisType(
UnknownLoc::get(&ctx_), ctx_,
SmallVector<double, 2>{1.0, 1.0},
SmallVector<int64_t, 2>{0, 0},
0);
EXPECT_TRUE(quantized_type.getStorageType().isSignlessInteger(8));
}
TEST_F(CreateI8F32UniformQuantizedPerAxisTypeTest, F32ExpressedTypeSucceeds) {
const UniformQuantizedPerAxisType quantized_type =
CreateI8F32UniformQuantizedPerAxisType(
UnknownLoc::get(&ctx_), ctx_,
SmallVector<double, 2>{1.0, 1.0},
SmallVector<int64_t, 2>{0, 0},
0);
EXPECT_TRUE(quantized_type.getExpressedType().isF32());
}
TEST_F(CreateI8F32UniformQuantizedPerAxisTypeTest,
SignedQuantizedTypeSucceeds) {
const UniformQuantizedPerAxisType quantized_type =
CreateI8F32UniformQuantizedPerAxisType(
UnknownLoc::get(&ctx_), ctx_,
SmallVector<double, 2>{1.0, 1.0},
SmallVector<int64_t, 2>{0, 0},
0);
EXPECT_TRUE(quantized_type.isSigned());
}
TEST_F(CreateI8F32UniformQuantizedPerAxisTypeTest,
StorageTypeMinMaxEqualToI8MinMax) {
const UniformQuantizedPerAxisType quantized_type =
CreateI8F32UniformQuantizedPerAxisType(
UnknownLoc::get(&ctx_), ctx_,
SmallVector<double, 2>{1.0, 1.0},
SmallVector<int64_t, 2>{0, 0},
0);
EXPECT_EQ(quantized_type.getStorageTypeMin(), -128);
EXPECT_EQ(quantized_type.getStorageTypeMax(), 127);
}
TEST_F(CreateI8F32UniformQuantizedPerAxisTypeTest,
StorageTypeMinMaxNarrowRange) {
const UniformQuantizedPerAxisType quantized_type =
CreateI8F32UniformQuantizedPerAxisType(
UnknownLoc::get(&ctx_), ctx_,
SmallVector<double, 2>{1.0, 1.0},
SmallVector<int64_t, 2>{0, 0},
0, true);
EXPECT_EQ(quantized_type.getStorageTypeMin(), -127);
EXPECT_EQ(quantized_type.getStorageTypeMax(), 127);
}
TEST_F(CreateI8F32UniformQuantizedPerAxisTypeTest,
HasQuantizationDimensionProperlySet) {
const UniformQuantizedPerAxisType quantized_type =
CreateI8F32UniformQuantizedPerAxisType(
UnknownLoc::get(&ctx_), ctx_,
SmallVector<double, 2>{1.0, 1.0},
SmallVector<int64_t, 2>{0, 0},
3);
EXPECT_EQ(quantized_type.getQuantizedDimension(), 3);
}
TEST_F(CreateI8F32UniformQuantizedPerAxisTypeTest,
HasScaleAndZeroPointProperlySet) {
const UniformQuantizedPerAxisType quantized_type =
CreateI8F32UniformQuantizedPerAxisType(
UnknownLoc::get(&ctx_), ctx_,
SmallVector<double, 2>{8.0, 9.0},
SmallVector<int64_t, 2>{98, 99},
0);
EXPECT_THAT(quantized_type.getScales(), ElementsAreArray({8.0, 9.0}));
EXPECT_THAT(quantized_type.getZeroPoints(), ElementsAreArray({98, 99}));
}
class CreateI32F32UniformQuantizedPerAxisTypeTest : public Test {
protected:
CreateI32F32UniformQuantizedPerAxisTypeTest() : ctx_() {
ctx_.loadDialect<quant::QuantDialect>();
}
MLIRContext ctx_;
};
TEST_F(CreateI32F32UniformQuantizedPerAxisTypeTest, I32StorageTypeSucceeds) {
const UniformQuantizedPerAxisType quantized_type =
CreateI32F32UniformQuantizedPerAxisType(
UnknownLoc::get(&ctx_), ctx_,
SmallVector<double, 2>{1.0, 1.0},
SmallVector<int64_t, 2>{0, 0},
0);
EXPECT_TRUE(quantized_type.getStorageType().isSignlessInteger(32));
}
TEST_F(CreateI32F32UniformQuantizedPerAxisTypeTest, F32ExpressedTypeSucceeds) {
const UniformQuantizedPerAxisType quantized_type =
CreateI32F32UniformQuantizedPerAxisType(
UnknownLoc::get(&ctx_), ctx_,
SmallVector<double, 2>{1.0, 1.0},
SmallVector<int64_t, 2>{0, 0},
0);
EXPECT_TRUE(quantized_type.getExpressedType().isF32());
}
TEST_F(CreateI32F32UniformQuantizedPerAxisTypeTest,
StorageTypeMinMaxEqualToI32MinMax) {
const UniformQuantizedPerAxisType quantized_type =
CreateI32F32UniformQuantizedPerAxisType(
UnknownLoc::get(&ctx_), ctx_,
SmallVector<double, 2>{1.0, 1.0},
SmallVector<int64_t, 2>{0, 0},
0);
EXPECT_EQ(quantized_type.getStorageTypeMin(),
std::numeric_limits<int32_t>::min());
EXPECT_EQ(quantized_type.getStorageTypeMax(),
std::numeric_limits<int32_t>::max());
}
TEST_F(CreateI32F32UniformQuantizedPerAxisTypeTest,
HasQuantizationDimensionProperlySet) {
const UniformQuantizedPerAxisType quantized_type =
CreateI32F32UniformQuantizedPerAxisType(
UnknownLoc::get(&ctx_), ctx_,
SmallVector<double, 2>{1.0, 1.0},
SmallVector<int64_t, 2>{0, 0},
3);
EXPECT_EQ(quantized_type.getQuantizedDimension(), 3);
}
TEST_F(CreateI32F32UniformQuantizedPerAxisTypeTest,
HasScaleAndZeroPointProperlySet) {
const UniformQuantizedPerAxisType quantized_type =
CreateI32F32UniformQuantizedPerAxisType(
UnknownLoc::get(&ctx_), ctx_,
SmallVector<double, 2>{8.0, 9.0},
SmallVector<int64_t, 2>{98, 99},
0);
EXPECT_THAT(quantized_type.getScales(), ElementsAreArray({8.0, 9.0}));
EXPECT_THAT(quantized_type.getZeroPoints(), ElementsAreArray({98, 99}));
}
class IsI8F32UniformQuantizedTypeTest : public Test {
protected:
IsI8F32UniformQuantizedTypeTest() : builder_(&ctx_) {
ctx_.loadDialect<quant::QuantDialect>();
}
MLIRContext ctx_;
OpBuilder builder_;
};
TEST_F(IsI8F32UniformQuantizedTypeTest, I8F32UniformQuantizedTypeSucceeds) {
const UniformQuantizedType qi8_type = quant::UniformQuantizedType::get(
QuantizationFlags::Signed, builder_.getI8Type(),
builder_.getF32Type(), 1.0,
0, -128, 127);
EXPECT_TRUE(IsI8F32UniformQuantizedType(qi8_type));
}
TEST_F(IsI8F32UniformQuantizedTypeTest, UniformQuantizedTypeSucceeds) {
const UniformQuantizedType qi8_type = quant::UniformQuantizedType::get(
QuantizationFlags::Signed, builder_.getI8Type(),
builder_.getF32Type(), 1.0,
0, -128, 127);
EXPECT_THAT(mlir::dyn_cast_or_null<UniformQuantizedType>(qi8_type),
NotNull());
}
TEST_F(IsI8F32UniformQuantizedTypeTest, StorageTypeI8Succeeds) {
const UniformQuantizedType qi8_type = quant::UniformQuantizedType::get(
QuantizationFlags::Signed, builder_.getI8Type(),
builder_.getF32Type(), 1.0,
0, -128, 127);
EXPECT_TRUE(IsStorageTypeI8(qi8_type));
}
TEST_F(IsI8F32UniformQuantizedTypeTest, ExpressedTypeF32Succeeds) {
const UniformQuantizedType qi8_type = quant::UniformQuantizedType::get(
QuantizationFlags::Signed, builder_.getI8Type(),
builder_.getF32Type(), 1.0,
0, -128, 127);
EXPECT_TRUE(IsExpressedTypeF32(qi8_type));
}
class IsI8F32UniformQuantizedPerAxisTypeTest : public Test {
protected:
IsI8F32UniformQuantizedPerAxisTypeTest() : builder_(&ctx_) {
ctx_.loadDialect<quant::QuantDialect>();
}
MLIRContext ctx_;
OpBuilder builder_;
};
TEST_F(IsI8F32UniformQuantizedPerAxisTypeTest,
I8F32UniformQuantizedPerAxisTypeSucceeds) {
const UniformQuantizedPerAxisType qi8_per_axis_type =
quant::UniformQuantizedPerAxisType::get(
QuantizationFlags::Signed, builder_.getI8Type(),
builder_.getF32Type(),
{1.0},
{0}, 0, -128,
127);
EXPECT_TRUE(IsI8F32UniformQuantizedPerAxisType(qi8_per_axis_type));
EXPECT_FALSE(IsI8F32UniformQuantizedType(qi8_per_axis_type));
}
TEST_F(IsI8F32UniformQuantizedTypeTest, UniformQuantizedPerAxisTypeSucceeds) {
const UniformQuantizedPerAxisType qi8_per_axis_type =
quant::UniformQuantizedPerAxisType::get(
QuantizationFlags::Signed, builder_.getI8Type(),
builder_.getF32Type(),
{1.0},
{0}, 0, -128,
127);
EXPECT_THAT(
mlir::dyn_cast_or_null<UniformQuantizedPerAxisType>(qi8_per_axis_type),
NotNull());
}
TEST_F(IsI8F32UniformQuantizedPerAxisTypeTest, StorageTypeI8Succeeds) {
const UniformQuantizedPerAxisType qi8_per_axis_type =
quant::UniformQuantizedPerAxisType::get(
QuantizationFlags::Signed, builder_.getI8Type(),
builder_.getF32Type(),
{1.0},
{0}, 0, -128,
127);
EXPECT_TRUE(IsStorageTypeI8(qi8_per_axis_type));
}
TEST_F(IsI8F32UniformQuantizedPerAxisTypeTest, ExpressedTypeF32Succeeds) {
const UniformQuantizedPerAxisType qi8_per_axis_type =
quant::UniformQuantizedPerAxisType::get(
QuantizationFlags::Signed, builder_.getI8Type(),
builder_.getF32Type(),
{1.0},
{0}, 0, -128,
127);
EXPECT_TRUE(IsExpressedTypeF32(qi8_per_axis_type));
}
class IsI32F32UniformQuantizedTypeTest : public Test {
protected:
IsI32F32UniformQuantizedTypeTest() : builder_(&ctx_) {
ctx_.loadDialect<quant::QuantDialect>();
}
MLIRContext ctx_;
OpBuilder builder_;
};
TEST_F(IsI32F32UniformQuantizedTypeTest, I32F32UniformQuantizedTypeSucceeds) {
const UniformQuantizedType qi32_type = quant::UniformQuantizedType::get(
QuantizationFlags::Signed, builder_.getI32Type(),
builder_.getF32Type(),
1.0,
0, -2147483647,
2147483646);
EXPECT_TRUE(IsI32F32UniformQuantizedType(qi32_type));
}
TEST_F(IsI32F32UniformQuantizedTypeTest, UniformQuantizedTypeSucceeds) {
const UniformQuantizedType qi32_type = quant::UniformQuantizedType::get(
QuantizationFlags::Signed, builder_.getI32Type(),
builder_.getF32Type(),
1.0,
0, -2147483647,
2147483646);
EXPECT_TRUE(IsI32F32UniformQuantizedType(qi32_type));
EXPECT_THAT(mlir::dyn_cast_or_null<UniformQuantizedType>(qi32_type),
NotNull());
}
TEST_F(IsI32F32UniformQuantizedTypeTest, StorageTypeI32Succeeds) {
const UniformQuantizedType qi32_type = quant::UniformQuantizedType::get(
QuantizationFlags::Signed, builder_.getI32Type(),
builder_.getF32Type(),
1.0,
0, -2147483647,
2147483646);
EXPECT_TRUE(IsI32F32UniformQuantizedType(qi32_type));
EXPECT_TRUE(IsStorageTypeI32(qi32_type));
}
TEST_F(IsI32F32UniformQuantizedTypeTest, ExpressedTypeF32Succeeds) {
const UniformQuantizedType qi32_per_axis_type =
quant::UniformQuantizedType::get(
QuantizationFlags::Signed, builder_.getI32Type(),
builder_.getF32Type(),
1.0,
0, -2147483647,
2147483646);
EXPECT_TRUE(IsExpressedTypeF32(qi32_per_axis_type));
}
class IsI32F32UniformQuantizedPerAxisTypeTest : public Test {
protected:
IsI32F32UniformQuantizedPerAxisTypeTest() : builder_(&ctx_) {
ctx_.loadDialect<quant::QuantDialect>();
}
MLIRContext ctx_;
OpBuilder builder_;
};
TEST_F(IsI32F32UniformQuantizedPerAxisTypeTest,
I32F32UniformQuantizedPerAxisTypeSucceeds) {
const UniformQuantizedPerAxisType qi32_per_axis_type =
quant::UniformQuantizedPerAxisType::get(
QuantizationFlags::Signed, builder_.getI32Type(),
builder_.getF32Type(),
{1.0},
{0}, 0,
-2147483647, 2147483646);
EXPECT_TRUE(IsI32F32UniformQuantizedPerAxisType(qi32_per_axis_type));
EXPECT_FALSE(IsI32F32UniformQuantizedType(qi32_per_axis_type));
}
TEST_F(IsI32F32UniformQuantizedPerAxisTypeTest,
I8F32UniformQuantizedTypeFails) {
const UniformQuantizedType qi8_type = quant::UniformQuantizedType::get(
QuantizationFlags::Signed, builder_.getI8Type(),
builder_.getF32Type(),
1.0, 0, -128,
127);
EXPECT_FALSE(IsI32F32UniformQuantizedPerAxisType(qi8_type));
EXPECT_FALSE(IsStorageTypeI32(qi8_type));
EXPECT_THAT(mlir::dyn_cast_or_null<UniformQuantizedPerAxisType>(qi8_type),
IsNull());
}
TEST_F(IsI32F32UniformQuantizedTypeTest, UniformQuantizedPerAxisTypeSucceeds) {
const UniformQuantizedPerAxisType qi32_per_axis_type =
quant::UniformQuantizedPerAxisType::get(
QuantizationFlags::Signed, builder_.getI32Type(),
builder_.getF32Type(),
{1.0},
{0}, 0,
-2147483647, 2147483646);
EXPECT_THAT(
mlir::dyn_cast_or_null<UniformQuantizedPerAxisType>(qi32_per_axis_type),
NotNull());
}
TEST_F(IsI32F32UniformQuantizedPerAxisTypeTest, StorageTypeI8Succeeds) {
const UniformQuantizedPerAxisType qi32_per_axis_type =
quant::UniformQuantizedPerAxisType::get(
QuantizationFlags::Signed, builder_.getI32Type(),
builder_.getF32Type(),
{1.0},
{0}, 0,
-2147483647, 2147483646);
EXPECT_TRUE(IsStorageTypeI32(qi32_per_axis_type));
}
TEST_F(IsI32F32UniformQuantizedPerAxisTypeTest, ExpressedTypeF32Succeeds) {
const UniformQuantizedPerAxisType qi32_per_axis_type =
quant::UniformQuantizedPerAxisType::get(
QuantizationFlags::Signed, builder_.getI32Type(),
builder_.getF32Type(),
{1.0},
{0}, 0,
-2147483647, 2147483646);
EXPECT_TRUE(IsExpressedTypeF32(qi32_per_axis_type));
}
class IsSupportedByTfliteQuantizeOrDequantizeOpsTest : public Test {
protected:
IsSupportedByTfliteQuantizeOrDequantizeOpsTest() : builder_(&ctx_) {
ctx_.loadDialect<quant::QuantDialect>();
}
MLIRContext ctx_;
OpBuilder builder_;
};
TEST_F(IsSupportedByTfliteQuantizeOrDequantizeOpsTest, StorageTypeI8Succeeds) {
auto qi8_type = quant::UniformQuantizedType::get(
QuantizationFlags::Signed, builder_.getI8Type(),
builder_.getF32Type(),
1.0,
0, -128, 127);
EXPECT_TRUE(IsSupportedByTfliteQuantizeOrDequantizeOps(
dyn_cast_or_null<IntegerType>(qi8_type.getStorageType())));
}
TEST_F(IsSupportedByTfliteQuantizeOrDequantizeOpsTest, StorageTypeI16Succeeds) {
auto qi16_type = quant::UniformQuantizedType::get(
QuantizationFlags::Signed, builder_.getI8Type(),
builder_.getF32Type(),
1.0,
0, -128, 127);
EXPECT_TRUE(IsSupportedByTfliteQuantizeOrDequantizeOps(
dyn_cast_or_null<IntegerType>(qi16_type.getStorageType())));
}
TEST_F(IsSupportedByTfliteQuantizeOrDequantizeOpsTest, StorageTypeUI8Succeeds) {
auto qi8_type = quant::UniformQuantizedType::get(
QuantizationFlags::Signed, builder_.getI8Type(),
builder_.getF32Type(),
1.0,
0, -128, 127);
EXPECT_TRUE(IsSupportedByTfliteQuantizeOrDequantizeOps(
dyn_cast_or_null<IntegerType>(qi8_type.getStorageType())));
}
using IsOpFullyQuantizedTest = QuantizationTestBase;
TEST_F(IsOpFullyQuantizedTest, TrueIfOpFullyQuantized) {
constexpr absl::string_view kFullyQuantizedAdd = R"mlir(
func.func @fully_quantized_add(%arg0: tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>>) -> tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>> {
%0 = stablehlo.add %arg0, %arg0 : tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>>
return %0 : tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>>
}
)mlir";
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kFullyQuantizedAdd);
ASSERT_TRUE(module_op);
auto func_op = module_op->lookupSymbol<func::FuncOp>("fully_quantized_add");
ASSERT_THAT(func_op, NotNull());
auto add_op_itr = func_op.getBody().op_begin<mlir::stablehlo::AddOp>();
ASSERT_THAT(add_op_itr,
Ne(func_op.getBody().op_end<mlir::stablehlo::AddOp>()));
EXPECT_TRUE(IsOpFullyQuantized(*add_op_itr));
}
TEST_F(IsOpFullyQuantizedTest, FalseIfOpNotQuantized) {
constexpr absl::string_view kNotQuantizedAdd = R"mlir(
func.func @not_quantized_add(%arg0: tensor<2xf32>) -> tensor<2xf32> {
%0 = stablehlo.add %arg0, %arg0 : tensor<2xf32>
return %0 : tensor<2xf32>
}
)mlir";
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kNotQuantizedAdd);
ASSERT_TRUE(module_op);
auto func_op = module_op->lookupSymbol<func::FuncOp>("not_quantized_add");
ASSERT_THAT(func_op, NotNull());
auto add_op_itr = func_op.getBody().op_begin<mlir::stablehlo::AddOp>();
ASSERT_THAT(add_op_itr,
Ne(func_op.getBody().op_end<mlir::stablehlo::AddOp>()));
EXPECT_FALSE(IsOpFullyQuantized(*add_op_itr));
}
TEST_F(IsOpFullyQuantizedTest, FalseIfOpPartiallyQuantized) {
constexpr absl::string_view kQuantizeOp = R"mlir(
func.func @quantize(%arg0: tensor<2xf32>) -> tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>> {
%0 = stablehlo.uniform_quantize %arg0 : (tensor<2xf32>) -> tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>>
return %0 : tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>>
}
)mlir";
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kQuantizeOp);
ASSERT_TRUE(module_op);
auto func_op = module_op->lookupSymbol<func::FuncOp>("quantize");
ASSERT_THAT(func_op, NotNull());
auto uniform_quantize_op_itr =
func_op.getBody().op_begin<mlir::stablehlo::UniformQuantizeOp>();
ASSERT_THAT(
uniform_quantize_op_itr,
Ne(func_op.getBody().op_end<mlir::stablehlo::UniformQuantizeOp>()));
EXPECT_FALSE(IsOpFullyQuantized(*uniform_quantize_op_itr));
}
using IsOpNotQuantizedTest = QuantizationTestBase;
TEST_F(IsOpNotQuantizedTest, TrueIfOpNotQuantized) {
constexpr absl::string_view kNotQuantizedAdd = R"mlir(
func.func @not_quantized_add(%arg0: tensor<2xf32>) -> tensor<2xf32> {
%0 = stablehlo.add %arg0, %arg0 : tensor<2xf32>
return %0 : tensor<2xf32>
}
)mlir";
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kNotQuantizedAdd);
ASSERT_TRUE(module_op);
auto func_op = module_op->lookupSymbol<func::FuncOp>("not_quantized_add");
ASSERT_THAT(func_op, NotNull());
auto add_op_itr = func_op.getBody().op_begin<mlir::stablehlo::AddOp>();
ASSERT_THAT(add_op_itr,
Ne(func_op.getBody().op_end<mlir::stablehlo::AddOp>()));
EXPECT_TRUE(IsOpNotQuantized(*add_op_itr));
}
TEST_F(IsOpNotQuantizedTest, FalseIfOpQuantized) {
constexpr absl::string_view kQuantizedAdd = R"mlir(
func.func @quantized_add(%arg0: tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>>) -> tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>> {
%0 = stablehlo.add %arg0, %arg0 : tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>>
return %0 : tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>>
}
)mlir";
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kQuantizedAdd);
ASSERT_TRUE(module_op);
auto func_op = module_op->lookupSymbol<func::FuncOp>("quantized_add");
ASSERT_THAT(func_op, NotNull());
auto add_op_itr = func_op.getBody().op_begin<mlir::stablehlo::AddOp>();
ASSERT_THAT(add_op_itr,
Ne(func_op.getBody().op_end<mlir::stablehlo::AddOp>()));
EXPECT_FALSE(IsOpNotQuantized(*add_op_itr));
}
TEST_F(IsOpNotQuantizedTest, FalseIfOpPartiallyQuantized) {
constexpr absl::string_view kQuantizeOp = R"mlir(
func.func @quantize(%arg0: tensor<2xf32>) -> tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>> {
%0 = stablehlo.uniform_quantize %arg0 : (tensor<2xf32>) -> tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>>
return %0 : tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>>
}
)mlir";
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kQuantizeOp);
ASSERT_TRUE(module_op);
auto func_op = module_op->lookupSymbol<func::FuncOp>("quantize");
ASSERT_THAT(func_op, NotNull());
auto uniform_quantize_op_itr =
func_op.getBody().op_begin<mlir::stablehlo::UniformQuantizeOp>();
ASSERT_THAT(
uniform_quantize_op_itr,
Ne(func_op.getBody().op_end<mlir::stablehlo::UniformQuantizeOp>()));
EXPECT_FALSE(IsOpNotQuantized(*uniform_quantize_op_itr));
}
using UniformQuantizedTypeTest = QuantizationTestBase;
TEST_F(UniformQuantizedTypeTest, GetElementTypeSucceeds) {
constexpr absl::string_view kQuantizeOp = R"mlir(
func.func @quantize(%arg0: tensor<2xf32>) -> tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>> {
%0 = stablehlo.uniform_quantize %arg0 : (tensor<2xf32>) -> tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>>
return %0 : tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>>
}
)mlir";
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kQuantizeOp);
ASSERT_TRUE(module_op);
auto func_op = module_op->lookupSymbol<func::FuncOp>("quantize");
ASSERT_THAT(func_op, NotNull());
auto uniform_quantize_op =
*func_op.getOps<::mlir::stablehlo::UniformQuantizeOp>().begin();
Value result = uniform_quantize_op.getResult();
EXPECT_THAT(GetElementType(result), NotNull());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/common/uniform_quantized_types.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/common/uniform_quantized_types_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
94996556-fc86-4b9e-8614-170ea8e383c5 | cpp | tensorflow/tensorflow | tensor_id | tensorflow/core/graph/tensor_id.cc | tensorflow/core/graph/tensor_id_test.cc | #include "tensorflow/core/graph/tensor_id.h"
#include <string>
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/strings/str_util.h"
namespace tensorflow {
TensorId::TensorId(const SafeTensorId& id) : TensorId(id.first, id.second) {}
SafeTensorId::SafeTensorId(const TensorId& id)
: SafeTensorId(string(id.first), id.second) {}
TensorId ParseTensorName(const string& name) {
return ParseTensorName(StringPiece(name.data(), name.size()));
}
TensorId ParseTensorName(StringPiece name) {
const char* base = name.data();
const char* p = base + name.size() - 1;
unsigned int index = 0;
unsigned int mul = 1;
while (p > base && (*p >= '0' && *p <= '9')) {
index += ((*p - '0') * mul);
mul *= 10;
p--;
}
TensorId id;
if (p > base && *p == ':' && mul > 1) {
id.first = StringPiece(base, p - base);
id.second = index;
} else if (absl::StartsWith(name, "^")) {
id.first = StringPiece(base + 1);
id.second = Graph::kControlSlot;
} else {
id.first = name;
id.second = 0;
}
return id;
}
bool IsTensorIdControl(const TensorId& tensor_id) {
return tensor_id.index() == Graph::kControlSlot;
}
} | #include "tensorflow/core/graph/tensor_id.h"
#include <vector>
#include "tensorflow/core/lib/random/simple_philox.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace {
string ParseHelper(const string& n) { return ParseTensorName(n).ToString(); }
TEST(TensorIdTest, ParseTensorName) {
EXPECT_EQ(ParseHelper("W1"), "W1:0");
EXPECT_EQ(ParseHelper("W1:0"), "W1:0");
EXPECT_EQ(ParseHelper("weights:0"), "weights:0");
EXPECT_EQ(ParseHelper("W1:1"), "W1:1");
EXPECT_EQ(ParseHelper("W1:17"), "W1:17");
EXPECT_EQ(ParseHelper("xyz1_17"), "xyz1_17:0");
EXPECT_EQ(ParseHelper("^foo"), "^foo");
}
uint32 Skewed(random::SimplePhilox* rnd, int max_log) {
const uint32 space = 1 << (rnd->Rand32() % (max_log + 1));
return rnd->Rand32() % space;
}
void BM_ParseTensorName(::testing::benchmark::State& state) {
const int arg = state.range(0);
random::PhiloxRandom philox(301, 17);
random::SimplePhilox rnd(&philox);
std::vector<string> names;
for (int i = 0; i < 100; i++) {
string name;
switch (arg) {
case 0: {
size_t len = Skewed(&rnd, 4);
while (name.size() < len) {
name += rnd.OneIn(4) ? '0' : 'a';
}
if (rnd.OneIn(3)) {
strings::StrAppend(&name, ":", rnd.Uniform(12));
}
break;
}
case 1:
name = "W1";
break;
case 2:
name = "t0003";
break;
case 3:
name = "weights";
break;
case 4:
name = "weights:17";
break;
case 5:
name = "^weights";
break;
default:
LOG(FATAL) << "Unexpected arg";
break;
}
names.push_back(name);
}
TensorId id;
int index = 0;
int sum = 0;
for (auto s : state) {
id = ParseTensorName(names[index++ % names.size()]);
sum += id.second;
}
VLOG(2) << sum;
}
BENCHMARK(BM_ParseTensorName)->Arg(0)->Arg(1)->Arg(2)->Arg(3)->Arg(4)->Arg(5);
TEST(TensorIdTest, IsTensorIdControl) {
string input = "^foo";
TensorId tensor_id = ParseTensorName(input);
EXPECT_TRUE(IsTensorIdControl(tensor_id));
input = "foo";
tensor_id = ParseTensorName(input);
EXPECT_FALSE(IsTensorIdControl(tensor_id));
input = "foo:2";
tensor_id = ParseTensorName(input);
EXPECT_FALSE(IsTensorIdControl(tensor_id));
}
TEST(TensorIdTest, PortZero) {
for (string input : {"foo", "foo:0"}) {
TensorId tensor_id = ParseTensorName(input);
EXPECT_EQ("foo", tensor_id.node());
EXPECT_EQ(0, tensor_id.index());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/graph/tensor_id.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/graph/tensor_id_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9f641e69-b766-49cc-9efc-fb43469c2b28 | cpp | abseil/abseil-cpp | globals | absl/log/internal/globals.cc | absl/log/globals_test.cc | #include "absl/log/internal/globals.h"
#include <atomic>
#include <cstdio>
#if defined(__EMSCRIPTEN__)
#include <emscripten/console.h>
#endif
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/base/log_severity.h"
#include "absl/strings/string_view.h"
#include "absl/strings/strip.h"
#include "absl/time/time.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace log_internal {
namespace {
ABSL_CONST_INIT std::atomic<bool> logging_initialized(false);
ABSL_CONST_INIT std::atomic<absl::TimeZone*> timezone_ptr{nullptr};
ABSL_CONST_INIT std::atomic<bool> symbolize_stack_trace(true);
ABSL_CONST_INIT std::atomic<int> max_frames_in_stack_trace(64);
ABSL_CONST_INIT std::atomic<bool> exit_on_dfatal(true);
ABSL_CONST_INIT std::atomic<bool> suppress_sigabort_trace(false);
}
bool IsInitialized() {
return logging_initialized.load(std::memory_order_acquire);
}
void SetInitialized() {
logging_initialized.store(true, std::memory_order_release);
}
void WriteToStderr(absl::string_view message, absl::LogSeverity severity) {
if (message.empty()) return;
#if defined(__EMSCRIPTEN__)
const auto message_minus_newline = absl::StripSuffix(message, "\n");
#if ABSL_INTERNAL_EMSCRIPTEN_VERSION >= 3001043
emscripten_errn(message_minus_newline.data(), message_minus_newline.size());
#else
std::string null_terminated_message(message_minus_newline);
_emscripten_err(null_terminated_message.c_str());
#endif
#else
std::fwrite(message.data(), message.size(), 1, stderr);
#endif
#if defined(_WIN64) || defined(_WIN32) || defined(_WIN16)
if (severity >= absl::LogSeverity::kWarning) {
std::fflush(stderr);
}
#else
(void)severity;
#endif
}
void SetTimeZone(absl::TimeZone tz) {
absl::TimeZone* expected = nullptr;
absl::TimeZone* new_tz = new absl::TimeZone(tz);
if (!timezone_ptr.compare_exchange_strong(expected, new_tz,
std::memory_order_release,
std::memory_order_relaxed)) {
ABSL_RAW_LOG(FATAL,
"absl::log_internal::SetTimeZone() has already been called");
}
}
const absl::TimeZone* TimeZone() {
return timezone_ptr.load(std::memory_order_acquire);
}
bool ShouldSymbolizeLogStackTrace() {
return symbolize_stack_trace.load(std::memory_order_acquire);
}
void EnableSymbolizeLogStackTrace(bool on_off) {
symbolize_stack_trace.store(on_off, std::memory_order_release);
}
int MaxFramesInLogStackTrace() {
return max_frames_in_stack_trace.load(std::memory_order_acquire);
}
void SetMaxFramesInLogStackTrace(int max_num_frames) {
max_frames_in_stack_trace.store(max_num_frames, std::memory_order_release);
}
bool ExitOnDFatal() { return exit_on_dfatal.load(std::memory_order_acquire); }
void SetExitOnDFatal(bool on_off) {
exit_on_dfatal.store(on_off, std::memory_order_release);
}
bool SuppressSigabortTrace() {
return suppress_sigabort_trace.load(std::memory_order_acquire);
}
bool SetSuppressSigabortTrace(bool on_off) {
return suppress_sigabort_trace.exchange(on_off);
}
}
ABSL_NAMESPACE_END
} | #include "absl/log/globals.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/attributes.h"
#include "absl/base/log_severity.h"
#include "absl/log/internal/globals.h"
#include "absl/log/internal/test_helpers.h"
#include "absl/log/log.h"
#include "absl/log/scoped_mock_log.h"
namespace {
using ::testing::_;
using ::testing::StrEq;
auto* test_env ABSL_ATTRIBUTE_UNUSED = ::testing::AddGlobalTestEnvironment(
new absl::log_internal::LogTestEnvironment);
constexpr static absl::LogSeverityAtLeast DefaultMinLogLevel() {
return absl::LogSeverityAtLeast::kInfo;
}
constexpr static absl::LogSeverityAtLeast DefaultStderrThreshold() {
return absl::LogSeverityAtLeast::kError;
}
TEST(TestGlobals, MinLogLevel) {
EXPECT_EQ(absl::MinLogLevel(), DefaultMinLogLevel());
absl::SetMinLogLevel(absl::LogSeverityAtLeast::kError);
EXPECT_EQ(absl::MinLogLevel(), absl::LogSeverityAtLeast::kError);
absl::SetMinLogLevel(DefaultMinLogLevel());
}
TEST(TestGlobals, ScopedMinLogLevel) {
EXPECT_EQ(absl::MinLogLevel(), DefaultMinLogLevel());
{
absl::log_internal::ScopedMinLogLevel scoped_stderr_threshold(
absl::LogSeverityAtLeast::kError);
EXPECT_EQ(absl::MinLogLevel(), absl::LogSeverityAtLeast::kError);
}
EXPECT_EQ(absl::MinLogLevel(), DefaultMinLogLevel());
}
TEST(TestGlobals, StderrThreshold) {
EXPECT_EQ(absl::StderrThreshold(), DefaultStderrThreshold());
absl::SetStderrThreshold(absl::LogSeverityAtLeast::kError);
EXPECT_EQ(absl::StderrThreshold(), absl::LogSeverityAtLeast::kError);
absl::SetStderrThreshold(DefaultStderrThreshold());
}
TEST(TestGlobals, ScopedStderrThreshold) {
EXPECT_EQ(absl::StderrThreshold(), DefaultStderrThreshold());
{
absl::ScopedStderrThreshold scoped_stderr_threshold(
absl::LogSeverityAtLeast::kError);
EXPECT_EQ(absl::StderrThreshold(), absl::LogSeverityAtLeast::kError);
}
EXPECT_EQ(absl::StderrThreshold(), DefaultStderrThreshold());
}
TEST(TestGlobals, LogBacktraceAt) {
EXPECT_FALSE(absl::log_internal::ShouldLogBacktraceAt("some_file.cc", 111));
absl::SetLogBacktraceLocation("some_file.cc", 111);
EXPECT_TRUE(absl::log_internal::ShouldLogBacktraceAt("some_file.cc", 111));
EXPECT_FALSE(
absl::log_internal::ShouldLogBacktraceAt("another_file.cc", 222));
}
TEST(TestGlobals, LogPrefix) {
EXPECT_TRUE(absl::ShouldPrependLogPrefix());
absl::EnableLogPrefix(false);
EXPECT_FALSE(absl::ShouldPrependLogPrefix());
absl::EnableLogPrefix(true);
EXPECT_TRUE(absl::ShouldPrependLogPrefix());
}
TEST(TestGlobals, SetGlobalVLogLevel) {
EXPECT_EQ(absl::SetGlobalVLogLevel(42), 0);
EXPECT_EQ(absl::SetGlobalVLogLevel(1337), 42);
EXPECT_EQ(absl::SetGlobalVLogLevel(0), 1337);
}
TEST(TestGlobals, SetVLogLevel) {
EXPECT_EQ(absl::SetVLogLevel("setvloglevel", 42), 0);
EXPECT_EQ(absl::SetVLogLevel("setvloglevel", 1337), 42);
EXPECT_EQ(absl::SetVLogLevel("othersetvloglevel", 50), 0);
EXPECT_EQ(absl::SetVLogLevel("*pattern*", 1), 0);
EXPECT_EQ(absl::SetVLogLevel("*less_generic_pattern*", 2), 1);
EXPECT_EQ(absl::SetVLogLevel("pattern_match", 3), 1);
EXPECT_EQ(absl::SetVLogLevel("less_generic_pattern_match", 4), 2);
}
TEST(TestGlobals, AndroidLogTag) {
EXPECT_DEATH_IF_SUPPORTED(absl::SetAndroidNativeTag(nullptr), ".*");
EXPECT_THAT(absl::log_internal::GetAndroidNativeTag(), StrEq("native"));
absl::SetAndroidNativeTag("test_tag");
EXPECT_THAT(absl::log_internal::GetAndroidNativeTag(), StrEq("test_tag"));
EXPECT_DEATH_IF_SUPPORTED(absl::SetAndroidNativeTag("test_tag_fail"), ".*");
}
TEST(TestExitOnDFatal, OffTest) {
absl::log_internal::SetExitOnDFatal(false);
EXPECT_FALSE(absl::log_internal::ExitOnDFatal());
{
absl::ScopedMockLog log(absl::MockLogDefault::kDisallowUnexpected);
EXPECT_CALL(log, Log(absl::kLogDebugFatal, _, "This should not be fatal"));
log.StartCapturingLogs();
LOG(DFATAL) << "This should not be fatal";
}
}
#if GTEST_HAS_DEATH_TEST
TEST(TestDeathWhileExitOnDFatal, OnTest) {
absl::log_internal::SetExitOnDFatal(true);
EXPECT_TRUE(absl::log_internal::ExitOnDFatal());
EXPECT_DEBUG_DEATH({ LOG(DFATAL) << "This should be fatal in debug mode"; },
"This should be fatal in debug mode");
}
#endif
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/log/internal/globals.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/log/globals_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
19afc211-8c06-4f02-9ab1-ee2b82b6cba1 | cpp | tensorflow/tensorflow | strcat | third_party/xla/third_party/tsl/tsl/platform/strcat.cc | third_party/xla/third_party/tsl/tsl/platform/strcat_test.cc | #include "tsl/platform/strcat.h"
#include <stdarg.h>
#include <stdint.h>
#include <stdio.h>
#include <string.h>
#include <algorithm>
#include "absl/meta/type_traits.h"
#include "tsl/platform/logging.h"
namespace tsl {
namespace strings {
AlphaNum::AlphaNum(Hex hex) {
char *const end = &digits_[kFastToBufferSize];
char *writer = end;
uint64 value = hex.value;
uint64 width = hex.spec;
uint64 mask = (static_cast<uint64>(1) << (width - 1) * 4) | value;
static const char hexdigits[] = "0123456789abcdef";
do {
*--writer = hexdigits[value & 0xF];
value >>= 4;
mask >>= 4;
} while (mask != 0);
piece_ = absl::string_view(writer, end - writer);
}
static char *Append1(char *out, const AlphaNum &x) {
if (x.data() == nullptr) return out;
memcpy(out, x.data(), x.size());
return out + x.size();
}
static char *Append2(char *out, const AlphaNum &x1, const AlphaNum &x2) {
if (x1.data() != nullptr) {
memcpy(out, x1.data(), x1.size());
out += x1.size();
}
if (x2.data() == nullptr) return out;
memcpy(out, x2.data(), x2.size());
return out + x2.size();
}
static char *Append4(char *out, const AlphaNum &x1, const AlphaNum &x2,
const AlphaNum &x3, const AlphaNum &x4) {
if (x1.data() != nullptr) {
memcpy(out, x1.data(), x1.size());
out += x1.size();
}
if (x2.data() != nullptr) {
memcpy(out, x2.data(), x2.size());
out += x2.size();
}
if (x3.data() != nullptr) {
memcpy(out, x3.data(), x3.size());
out += x3.size();
}
if (x4.data() == nullptr) return out;
memcpy(out, x4.data(), x4.size());
return out + x4.size();
}
string StrCat(const AlphaNum &a) { return string(a.data(), a.size()); }
string StrCat(const AlphaNum &a, const AlphaNum &b) {
string result(a.size() + b.size(), '\0');
char *const begin = &*result.begin();
char *out = Append2(begin, a, b);
DCHECK_EQ(out, begin + result.size());
return result;
}
string StrCat(const AlphaNum &a, const AlphaNum &b, const AlphaNum &c) {
string result(a.size() + b.size() + c.size(), '\0');
char *const begin = &*result.begin();
char *out = Append2(begin, a, b);
out = Append1(out, c);
DCHECK_EQ(out, begin + result.size());
return result;
}
string StrCat(const AlphaNum &a, const AlphaNum &b, const AlphaNum &c,
const AlphaNum &d) {
string result(a.size() + b.size() + c.size() + d.size(), '\0');
char *const begin = &*result.begin();
char *out = Append4(begin, a, b, c, d);
DCHECK_EQ(out, begin + result.size());
return result;
}
namespace {
template <typename string_type, typename = void>
struct ResizeUninitializedTraits {
using HasMember = std::false_type;
static void Resize(string_type *s, size_t new_size) { s->resize(new_size); }
};
template <typename string_type>
struct ResizeUninitializedTraits<
string_type, absl::void_t<decltype(std::declval<string_type &>()
.__resize_default_init(237))> > {
using HasMember = std::true_type;
static void Resize(string_type *s, size_t new_size) {
s->__resize_default_init(new_size);
}
};
static inline void STLStringResizeUninitialized(string *s, size_t new_size) {
ResizeUninitializedTraits<string>::Resize(s, new_size);
}
template <typename string_type>
void STLStringReserveAmortized(string_type *s, size_t new_size) {
const size_t cap = s->capacity();
if (new_size > cap) {
s->reserve((std::max)(new_size, 2 * cap));
}
}
template <typename string_type>
void STLStringResizeUninitializedAmortized(string_type *s, size_t new_size) {
STLStringReserveAmortized(s, new_size);
STLStringResizeUninitialized(s, new_size);
}
}
namespace internal {
string CatPieces(std::initializer_list<absl::string_view> pieces) {
size_t total_size = 0;
for (const absl::string_view piece : pieces) total_size += piece.size();
string result(total_size, '\0');
char *const begin = &*result.begin();
char *out = begin;
for (const absl::string_view piece : pieces) {
const size_t this_size = piece.size();
memcpy(out, piece.data(), this_size);
out += this_size;
}
DCHECK_EQ(out, begin + result.size());
return result;
}
#define DCHECK_NO_OVERLAP(dest, src) \
DCHECK_GE(uintptr_t((src).data() - (dest).data()), uintptr_t((dest).size()))
void AppendPieces(string *result,
std::initializer_list<absl::string_view> pieces) {
size_t old_size = result->size();
size_t total_size = old_size;
for (const absl::string_view piece : pieces) {
DCHECK_NO_OVERLAP(*result, piece);
total_size += piece.size();
}
STLStringResizeUninitializedAmortized(result, total_size);
char *const begin = &*result->begin();
char *out = begin + old_size;
for (const absl::string_view piece : pieces) {
const size_t this_size = piece.size();
memcpy(out, piece.data(), this_size);
out += this_size;
}
DCHECK_EQ(out, begin + result->size());
}
}
void StrAppend(string *result, const AlphaNum &a) {
DCHECK_NO_OVERLAP(*result, a);
result->append(a.data(), a.size());
}
void StrAppend(string *result, const AlphaNum &a, const AlphaNum &b) {
DCHECK_NO_OVERLAP(*result, a);
DCHECK_NO_OVERLAP(*result, b);
string::size_type old_size = result->size();
STLStringResizeUninitializedAmortized(result, old_size + a.size() + b.size());
char *const begin = &*result->begin();
char *out = Append2(begin + old_size, a, b);
DCHECK_EQ(out, begin + result->size());
}
void StrAppend(string *result, const AlphaNum &a, const AlphaNum &b,
const AlphaNum &c) {
DCHECK_NO_OVERLAP(*result, a);
DCHECK_NO_OVERLAP(*result, b);
DCHECK_NO_OVERLAP(*result, c);
string::size_type old_size = result->size();
STLStringResizeUninitializedAmortized(
result, old_size + a.size() + b.size() + c.size());
char *const begin = &*result->begin();
char *out = Append2(begin + old_size, a, b);
out = Append1(out, c);
DCHECK_EQ(out, begin + result->size());
}
void StrAppend(string *result, const AlphaNum &a, const AlphaNum &b,
const AlphaNum &c, const AlphaNum &d) {
DCHECK_NO_OVERLAP(*result, a);
DCHECK_NO_OVERLAP(*result, b);
DCHECK_NO_OVERLAP(*result, c);
DCHECK_NO_OVERLAP(*result, d);
string::size_type old_size = result->size();
STLStringResizeUninitializedAmortized(
result, old_size + a.size() + b.size() + c.size() + d.size());
char *const begin = &*result->begin();
char *out = Append4(begin + old_size, a, b, c, d);
DCHECK_EQ(out, begin + result->size());
}
}
} | #include "tsl/platform/strcat.h"
#include <string>
#include "absl/strings/string_view.h"
#include "tsl/platform/stringprintf.h"
#include "tsl/platform/test.h"
#include "tsl/platform/types.h"
#ifdef _MSC_VER
typedef ptrdiff_t ssize_t;
#endif
namespace tsl {
namespace strings {
TEST(StrCat, Ints) {
const int16_t s = -1;
const uint16 us = 2;
const int i = -3;
const unsigned int ui = 4;
const int32_t l = -5;
const uint32 ul = 6;
const int64_t ll = -7;
const uint64 ull = 8;
const ptrdiff_t ptrdiff = -9;
const size_t size = 10;
const ssize_t ssize = -11;
const intptr_t intptr = -12;
const uintptr_t uintptr = 13;
string answer;
answer = StrCat(s, us);
EXPECT_EQ(answer, "-12");
answer = StrCat(i, ui);
EXPECT_EQ(answer, "-34");
answer = StrCat(l, ul);
EXPECT_EQ(answer, "-56");
answer = StrCat(ll, ull);
EXPECT_EQ(answer, "-78");
answer = StrCat(ptrdiff, size);
EXPECT_EQ(answer, "-910");
answer = StrCat(ssize, intptr);
EXPECT_EQ(answer, "-11-12");
answer = StrCat(uintptr, 0);
EXPECT_EQ(answer, "130");
}
TEST(StrCat, Floats) {
const int s = 0;
const float f = 1.5f;
const double d = 1.5;
const bfloat16 bf(1.5f);
string answer;
answer = StrCat(s, f);
EXPECT_EQ(answer, "01.5");
answer = StrCat(s, d);
EXPECT_EQ(answer, "01.5");
answer = StrCat(s, bf);
EXPECT_EQ(answer, "01.5");
}
TEST(StrCat, Nulls) {
string result;
absl::string_view v;
string strs[] = {"Hello", "Cruel", "World"};
result = StrCat(v);
EXPECT_EQ(result, "");
result = StrCat(strs[0], v);
EXPECT_EQ(result, "Hello");
result = StrCat(v, strs[0]);
EXPECT_EQ(result, "Hello");
result = StrCat(v, strs[0], strs[1]);
EXPECT_EQ(result, "HelloCruel");
result = StrCat(strs[0], v, strs[1]);
EXPECT_EQ(result, "HelloCruel");
result = StrCat(strs[0], strs[1], v);
EXPECT_EQ(result, "HelloCruel");
result = StrCat(v, strs[0], strs[1], strs[2]);
EXPECT_EQ(result, "HelloCruelWorld");
result = StrCat(strs[0], v, strs[1], strs[2]);
EXPECT_EQ(result, "HelloCruelWorld");
result = StrCat(strs[0], strs[1], v, strs[2]);
EXPECT_EQ(result, "HelloCruelWorld");
result = StrCat(strs[0], strs[1], strs[2], v);
EXPECT_EQ(result, "HelloCruelWorld");
}
TEST(StrCat, Basics) {
string result;
string strs[] = {"Hello", "Cruel", "World"};
absl::string_view pieces[] = {"Hello", "Cruel", "World"};
const char *c_strs[] = {"Hello", "Cruel", "World"};
int32 i32s[] = {'H', 'C', 'W'};
uint64 ui64s[] = {12345678910LL, 10987654321LL};
result = StrCat(false, true, 2, 3);
EXPECT_EQ(result, "0123");
result = StrCat(-1);
EXPECT_EQ(result, "-1");
result = StrCat(0.5);
EXPECT_EQ(result, "0.5");
result = StrCat(strs[1], pieces[2]);
EXPECT_EQ(result, "CruelWorld");
result = StrCat(strs[0], ", ", pieces[2]);
EXPECT_EQ(result, "Hello, World");
result = StrCat(strs[0], ", ", strs[1], " ", strs[2], "!");
EXPECT_EQ(result, "Hello, Cruel World!");
result = StrCat(pieces[0], ", ", pieces[1], " ", pieces[2]);
EXPECT_EQ(result, "Hello, Cruel World");
result = StrCat(c_strs[0], ", ", c_strs[1], " ", c_strs[2]);
EXPECT_EQ(result, "Hello, Cruel World");
result = StrCat("ASCII ", i32s[0], ", ", i32s[1], " ", i32s[2], "!");
EXPECT_EQ(result, "ASCII 72, 67 87!");
result = StrCat(ui64s[0], ", ", ui64s[1], "!");
EXPECT_EQ(result, "12345678910, 10987654321!");
string one = "1";
result = StrCat("And a ", one.size(), " and a ", &result[2] - &result[0],
" and a ", one, " 2 3 4", "!");
EXPECT_EQ(result, "And a 1 and a 2 and a 1 2 3 4!");
result = StrCat("To output a char by ASCII/numeric value, use +: ", '!' + 0);
EXPECT_EQ(result, "To output a char by ASCII/numeric value, use +: 33");
float f = 100000.5;
result = StrCat("A hundred K and a half is ", f);
EXPECT_EQ(result, "A hundred K and a half is 100000.5");
double d = f;
d *= d;
result = StrCat("A hundred K and a half squared is ", d);
EXPECT_EQ(result, "A hundred K and a half squared is 10000100000.25");
result = StrCat(1, 2, 333, 4444, 55555, 666666, 7777777, 88888888, 999999999);
EXPECT_EQ(result, "12333444455555666666777777788888888999999999");
}
TEST(StrCat, MaxArgs) {
string result;
result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a");
EXPECT_EQ(result, "123456789a");
result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b");
EXPECT_EQ(result, "123456789ab");
result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c");
EXPECT_EQ(result, "123456789abc");
result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d");
EXPECT_EQ(result, "123456789abcd");
result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e");
EXPECT_EQ(result, "123456789abcde");
result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f");
EXPECT_EQ(result, "123456789abcdef");
result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f", "g");
EXPECT_EQ(result, "123456789abcdefg");
result =
StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f", "g", "h");
EXPECT_EQ(result, "123456789abcdefgh");
result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f", "g",
"h", "i");
EXPECT_EQ(result, "123456789abcdefghi");
result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f", "g",
"h", "i", "j");
EXPECT_EQ(result, "123456789abcdefghij");
result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f", "g",
"h", "i", "j", "k");
EXPECT_EQ(result, "123456789abcdefghijk");
result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f", "g",
"h", "i", "j", "k", "l");
EXPECT_EQ(result, "123456789abcdefghijkl");
result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f", "g",
"h", "i", "j", "k", "l", "m");
EXPECT_EQ(result, "123456789abcdefghijklm");
result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f", "g",
"h", "i", "j", "k", "l", "m", "n");
EXPECT_EQ(result, "123456789abcdefghijklmn");
result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f", "g",
"h", "i", "j", "k", "l", "m", "n", "o");
EXPECT_EQ(result, "123456789abcdefghijklmno");
result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f", "g",
"h", "i", "j", "k", "l", "m", "n", "o", "p");
EXPECT_EQ(result, "123456789abcdefghijklmnop");
result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f", "g",
"h", "i", "j", "k", "l", "m", "n", "o", "p", "q");
EXPECT_EQ(result, "123456789abcdefghijklmnopq");
result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, "a", "b", "c", "d", "e", "f",
"g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r",
"s", "t", "u", "v", "w", "x", "y", "z", "A", "B", "C", "D",
"E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P",
"Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z");
EXPECT_EQ(result,
"12345678910abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ");
}
TEST(StrAppend, Basics) {
string result = "existing text";
string strs[] = {"Hello", "Cruel", "World"};
absl::string_view pieces[] = {"Hello", "Cruel", "World"};
const char *c_strs[] = {"Hello", "Cruel", "World"};
int32 i32s[] = {'H', 'C', 'W'};
uint64 ui64s[] = {12345678910LL, 10987654321LL};
string::size_type old_size = result.size();
StrAppend(&result, strs[0]);
EXPECT_EQ(result.substr(old_size), "Hello");
old_size = result.size();
StrAppend(&result, strs[1], pieces[2]);
EXPECT_EQ(result.substr(old_size), "CruelWorld");
old_size = result.size();
StrAppend(&result, strs[0], ", ", pieces[2]);
EXPECT_EQ(result.substr(old_size), "Hello, World");
old_size = result.size();
StrAppend(&result, strs[0], ", ", strs[1], " ", strs[2], "!");
EXPECT_EQ(result.substr(old_size), "Hello, Cruel World!");
old_size = result.size();
StrAppend(&result, pieces[0], ", ", pieces[1], " ", pieces[2]);
EXPECT_EQ(result.substr(old_size), "Hello, Cruel World");
old_size = result.size();
StrAppend(&result, c_strs[0], ", ", c_strs[1], " ", c_strs[2]);
EXPECT_EQ(result.substr(old_size), "Hello, Cruel World");
old_size = result.size();
StrAppend(&result, "ASCII ", i32s[0], ", ", i32s[1], " ", i32s[2], "!");
EXPECT_EQ(result.substr(old_size), "ASCII 72, 67 87!");
old_size = result.size();
StrAppend(&result, ui64s[0], ", ", ui64s[1], "!");
EXPECT_EQ(result.substr(old_size), "12345678910, 10987654321!");
string one = "1";
old_size = result.size();
StrAppend(&result, "And a ", one.size(), " and a ", &result[2] - &result[0],
" and a ", one, " 2 3 4", "!");
EXPECT_EQ(result.substr(old_size), "And a 1 and a 2 and a 1 2 3 4!");
old_size = result.size();
StrAppend(&result,
"To output a char by ASCII/numeric value, use +: ", '!' + 0);
EXPECT_EQ(result.substr(old_size),
"To output a char by ASCII/numeric value, use +: 33");
float f = 100000.5;
old_size = result.size();
StrAppend(&result, "A hundred K and a half is ", f);
EXPECT_EQ(result.substr(old_size), "A hundred K and a half is 100000.5");
double d = f;
d *= d;
old_size = result.size();
StrAppend(&result, "A hundred K and a half squared is ", d);
EXPECT_EQ(result.substr(old_size),
"A hundred K and a half squared is 10000100000.25");
old_size = result.size();
StrAppend(&result, 1, 22, 333, 4444, 55555, 666666, 7777777, 88888888, 9);
EXPECT_EQ(result.substr(old_size), "1223334444555556666667777777888888889");
old_size = result.size();
StrAppend(&result, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, "a", "b", "c", "d", "e",
"f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r",
"s", "t", "u", "v", "w", "x", "y", "z", "A", "B", "C", "D", "E",
"F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R",
"S", "T", "U", "V", "W", "X", "Y", "Z",
"No limit thanks to C++11's variadic templates");
EXPECT_EQ(result.substr(old_size),
"12345678910abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
"No limit thanks to C++11's variadic templates");
}
TEST(StrAppend, Death) {
string s = "self";
EXPECT_DEBUG_DEATH(StrAppend(&s, s.c_str() + 1), "Check failed:");
EXPECT_DEBUG_DEATH(StrAppend(&s, s), "Check failed:");
}
static void CheckHex64(uint64 v) {
string actual = StrCat(Hex(v, kZeroPad16));
string expected = Printf("%016llx", static_cast<unsigned long long>(v));
EXPECT_EQ(expected, actual) << " decimal value " << v;
actual = StrCat(Hex(v, kZeroPad8));
expected = Printf("%08llx", static_cast<unsigned long long>(v));
EXPECT_EQ(expected, actual) << " decimal value " << v;
actual = StrCat(Hex(v));
expected = Printf("%llx", static_cast<unsigned long long>(v));
EXPECT_EQ(expected, actual) << " decimal value " << v;
}
static void CheckHex32(uint32 v) {
string actual = StrCat(Hex(v, kZeroPad8));
string expected = Printf("%08x", v);
EXPECT_EQ(expected, actual) << " decimal value " << v;
actual = StrCat(Hex(v));
expected = Printf("%x", v);
EXPECT_EQ(expected, actual) << " decimal value " << v;
}
static void CheckHexSigned32(int32_t v) {
string actual = StrCat(Hex(v, kZeroPad8));
string expected = Printf("%08x", v);
EXPECT_EQ(expected, actual) << " decimal value " << v;
actual = StrCat(Hex(v));
expected = Printf("%x", v);
EXPECT_EQ(expected, actual) << " decimal value " << v;
}
static void TestFastPrints() {
for (int i = 0; i < 10000; i++) {
CheckHex64(i);
CheckHex32(i);
CheckHexSigned32(i);
CheckHexSigned32(-i);
}
CheckHex64(0x123456789abcdef0ull);
CheckHex32(0x12345678);
int8_t minus_one_8bit = -1;
EXPECT_EQ("ff", StrCat(Hex(minus_one_8bit)));
int16_t minus_one_16bit = -1;
EXPECT_EQ("ffff", StrCat(Hex(minus_one_16bit)));
}
TEST(Numbers, TestFunctionsMovedOverFromNumbersMain) { TestFastPrints(); }
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/strcat.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/strcat_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ef1402fc-707f-440d-bf71-7892e6bea482 | cpp | google/libaddressinput | string_util | cpp/src/util/string_util.cc | cpp/test/util/string_util_test.cc | #include "string_util.h"
#include <cassert>
#include <cstddef>
#include <stdint.h>
#include <string>
#include <vector>
namespace i18n {
namespace addressinput {
std::string DoReplaceStringPlaceholders(const std::string& format_string,
const std::vector<std::string>& subst) {
size_t substitutions = subst.size();
size_t sub_length = 0;
for (std::vector<std::string>::const_iterator iter = subst.begin();
iter != subst.end(); ++iter) {
sub_length += iter->length();
}
std::string formatted;
formatted.reserve(format_string.length() + sub_length);
for (std::string::const_iterator i = format_string.begin();
i != format_string.end(); ++i) {
if ('$' == *i) {
if (i + 1 != format_string.end()) {
++i;
assert('$' == *i || '1' <= *i);
if ('$' == *i) {
while (i != format_string.end() && '$' == *i) {
formatted.push_back('$');
++i;
}
--i;
} else {
uintptr_t index = 0;
while (i != format_string.end() && '0' <= *i && *i <= '9') {
index *= 10;
index += *i - '0';
++i;
}
--i;
index -= 1;
if (index < substitutions)
formatted.append(subst.at(index));
}
}
} else {
formatted.push_back(*i);
}
}
return formatted;
}
}
} | #include "util/string_util.h"
#include <string>
#include <vector>
#include <gtest/gtest.h>
namespace {
using i18n::addressinput::DoReplaceStringPlaceholders;
TEST(StringUtilTest, Ok) {
const std::vector<std::string> subst{
"A",
"B",
"C",
};
EXPECT_EQ("aA,bB,cC", DoReplaceStringPlaceholders("a$1,b$2,c$3", subst));
}
TEST(StringUtilTest, FewParameters) {
const std::vector<std::string> subst{
"A",
"B",
"C",
};
EXPECT_EQ("aA,bB,cC,d,aA",
DoReplaceStringPlaceholders("a$1,b$2,c$3,d$4,a$1", subst));
}
TEST(StringUtilTest, MoreThan9Parameters) {
const std::vector<std::string> subst{
"A",
"B",
"C",
"D",
"E",
"F",
"G",
"H",
"I",
"J",
"K",
};
EXPECT_EQ("aA,bB,cC,dD,eE,fF,gG,hH,iI,jJ,kK,aA",
DoReplaceStringPlaceholders("a$1,b$2,c$3,d$4,e$5,f$6,g$7,h$8,i$9,"
"j$10,k$11,a$1",
subst));
}
TEST(StringUtilTest, ConsecutiveDollarSigns) {
const std::vector<std::string> subst{
"A",
"B",
"C",
};
EXPECT_EQ("$1 $$2 $$$3",
DoReplaceStringPlaceholders("$$1 $$$2 $$$$3", subst));
}
} | https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/src/util/string_util.cc | https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/test/util/string_util_test.cc | 2610f7b1043d6784ada41392fc9392d1ea09ea07 |
62bf0644-5132-4e87-bff3-4b0982acdd07 | cpp | tensorflow/tensorflow | infeed_token_propagation | third_party/xla/xla/service/infeed_token_propagation.cc | third_party/xla/xla/service/infeed_token_propagation_test.cc | #include "xla/service/infeed_token_propagation.h"
#include <cstdint>
#include <string_view>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/call_graph.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/tuple_simplifier.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
bool IsDanglingInfeed(HloInstruction* infeed) {
CHECK(infeed->opcode() == HloOpcode::kInfeed);
if (infeed->has_sharding()) {
return false;
}
if (const HloInstruction* after_all = infeed->operand(0);
after_all->opcode() != HloOpcode::kAfterAll ||
after_all->operand_count() != 0) {
return false;
}
for (const HloInstruction* user : infeed->users()) {
if (user->opcode() == HloOpcode::kGetTupleElement &&
user->tuple_index() == 1) {
return false;
}
}
return true;
}
bool IsDanglingOutfeed(HloInstruction* outfeed) {
CHECK(outfeed->opcode() == HloOpcode::kOutfeed);
if (outfeed->has_sharding()) {
return false;
}
if (const HloInstruction* after_all = outfeed->operand(1);
after_all->opcode() != HloOpcode::kAfterAll ||
after_all->operand_count() != 0) {
return false;
}
if (outfeed->user_count() != 0) {
return false;
}
return true;
}
HloInstruction* ReconstructTuple(HloInstruction* tuple) {
CHECK(tuple->shape().IsTuple());
HloComputation* computation = tuple->parent();
std::vector<HloInstruction*> gtes;
gtes.resize(tuple->shape().tuple_shapes_size());
for (int64_t idx = 0; idx < gtes.size(); ++idx) {
gtes[idx] = computation->AddInstruction(
HloInstruction::CreateGetTupleElement(tuple, idx));
}
return computation->AddInstruction(HloInstruction::CreateTuple(gtes));
}
absl::StatusOr<HloInstruction*> InsertTokenIntoTuple(HloInstruction* tuple,
bool add_token_operand) {
CHECK(tuple->shape().IsTuple());
HloComputation* computation = tuple->parent();
std::vector<HloInstruction*> original_users = tuple->users();
HloInstruction* original_tuple = ReconstructTuple(tuple);
for (HloInstruction* original_user : original_users) {
for (int64_t idx : original_user->operand_indices(tuple)) {
TF_RETURN_IF_ERROR(
original_user->ReplaceOperandWith(idx, original_tuple));
}
}
*tuple->mutable_shape()->add_tuple_shapes() = ShapeUtil::MakeTokenShape();
if (add_token_operand) {
tuple->AppendOperand(
computation->AddInstruction(HloInstruction::CreateToken()));
}
HloInstruction* input_token_gte =
computation->AddInstruction(HloInstruction::CreateGetTupleElement(
tuple, tuple->shape().tuple_shapes_size() - 1));
return input_token_gte;
}
}
absl::Status CanonicalizeConditionalInstruction(HloInstruction* conditional) {
CHECK_EQ(conditional->opcode(), HloOpcode::kConditional);
for (HloComputation* branch : conditional->branch_computations()) {
HloInstruction* parameter = branch->parameter_instruction(0);
if (!parameter->shape().IsTuple()) {
*parameter->mutable_shape() =
ShapeUtil::MakeTupleShape({parameter->shape()});
HloInstruction* original = branch->AddInstruction(
HloInstruction::CreateGetTupleElement(parameter, 0));
TF_RETURN_IF_ERROR(parameter->ReplaceAllUsesWithDifferentShape(original));
}
int64_t branch_operand_idx = conditional->branch_index(branch) + 1;
HloInstruction* branch_tuple =
conditional->mutable_operand(branch_operand_idx);
if (!branch_tuple->shape().IsTuple()) {
branch_tuple = conditional->parent()->AddInstruction(
HloInstruction::CreateTuple({branch_tuple}));
TF_RETURN_IF_ERROR(conditional->ReplaceOperandWithDifferentShape(
branch_operand_idx, branch_tuple));
}
if (branch_tuple->opcode() == HloOpcode::kParameter) {
branch_tuple = ReconstructTuple(branch_tuple);
TF_RETURN_IF_ERROR(
conditional->ReplaceOperandWith(branch_operand_idx, branch_tuple));
}
HloInstruction* root = branch->root_instruction();
if (root->opcode() != HloOpcode::kTuple) {
root = ReconstructTuple(root);
branch->set_root_instruction(root);
}
}
CHECK(conditional->shape().IsTuple());
if (conditional->IsRoot()) {
HloInstruction* new_root = ReconstructTuple(conditional);
conditional->parent()->set_root_instruction(new_root);
}
return absl::OkStatus();
}
absl::Status CanonicalizeWhileInstruction(HloInstruction* loop) {
CHECK_EQ(loop->opcode(), HloOpcode::kWhile);
HloComputation* body = loop->while_body();
HloComputation* cond = loop->while_condition();
HloInstruction* body_parameter = body->parameter_instruction(0);
if (!body_parameter->shape().IsTuple()) {
*body_parameter->mutable_shape() =
ShapeUtil::MakeTupleShape({body_parameter->shape()});
HloInstruction* original = body->AddInstruction(
HloInstruction::CreateGetTupleElement(body_parameter, 0));
TF_RETURN_IF_ERROR(
body_parameter->ReplaceAllUsesWithDifferentShape(original));
}
HloInstruction* root = body->root_instruction();
if (!root->shape().IsTuple()) {
root = body->AddInstruction(HloInstruction::CreateTuple({root}));
body->set_root_instruction(root, true);
}
HloInstruction* cond_parameter = cond->parameter_instruction(0);
if (!cond_parameter->shape().IsTuple()) {
*cond_parameter->mutable_shape() =
ShapeUtil::MakeTupleShape({cond_parameter->shape()});
HloInstruction* original = cond->AddInstruction(
HloInstruction::CreateGetTupleElement(cond_parameter, 0));
TF_RETURN_IF_ERROR(
cond_parameter->ReplaceAllUsesWithDifferentShape(original));
}
if (!loop->shape().IsTuple()) {
*loop->mutable_shape() = ShapeUtil::MakeTupleShape({loop->shape()});
HloInstruction* original = loop->parent()->AddInstruction(
HloInstruction::CreateGetTupleElement(loop, 0));
TF_RETURN_IF_ERROR(loop->ReplaceAllUsesWithDifferentShape(original));
}
HloInstruction* loop_tuple = loop->mutable_operand(0);
if (!loop_tuple->shape().IsTuple()) {
loop_tuple = loop->parent()->AddInstruction(
HloInstruction::CreateTuple({loop_tuple}));
TF_RETURN_IF_ERROR(loop->ReplaceOperandWithDifferentShape(0, loop_tuple));
}
if (loop_tuple->opcode() == HloOpcode::kParameter) {
loop_tuple = ReconstructTuple(loop_tuple);
TF_RETURN_IF_ERROR(loop->ReplaceOperandWith(0, loop_tuple));
}
if (root->opcode() != HloOpcode::kTuple) {
root = ReconstructTuple(root);
body->set_root_instruction(root);
}
if (loop->IsRoot()) {
HloInstruction* new_root = ReconstructTuple(loop);
loop->parent()->set_root_instruction(new_root);
}
return absl::OkStatus();
}
absl::Status InfeedTokenPropagation::PropagateTokenThroughConditionalBranch() {
HloComputation* comp = dangling_instruction_->parent();
dangling_instruction_ = call_graph_->GetComputationCallers(comp)[0];
CHECK_EQ(dangling_instruction_->opcode(), HloOpcode::kConditional);
for (HloComputation* branch : dangling_instruction_->branch_computations()) {
HloInstruction* root = branch->root_instruction();
if (branch == comp) {
TF_RETURN_IF_ERROR(
InsertTokenIntoTuple(root, false).status());
root->AppendOperand(output_token_);
} else {
TF_RETURN_IF_ERROR(
InsertTokenIntoTuple(root, true).status());
}
}
HloInstruction* parameter = comp->parameter_instruction(0);
TF_ASSIGN_OR_RETURN(
HloInstruction * input_token_gte,
InsertTokenIntoTuple(parameter, false));
TF_RETURN_IF_ERROR(input_token_->ReplaceAllUsesWith(input_token_gte));
int64_t branch_operand_idx = dangling_instruction_->branch_index(comp) + 1;
HloInstruction* branch_tuple =
dangling_instruction_->mutable_operand(branch_operand_idx);
TF_ASSIGN_OR_RETURN(
HloInstruction * next_input_token_gte,
InsertTokenIntoTuple(branch_tuple, true));
TF_RETURN_IF_ERROR(dangling_instruction_->ReplaceOperandWithDifferentShape(
branch_operand_idx, branch_tuple));
input_token_ =
branch_tuple->mutable_operand(next_input_token_gte->tuple_index());
TF_ASSIGN_OR_RETURN(
output_token_,
InsertTokenIntoTuple(dangling_instruction_, false));
return absl::OkStatus();
}
absl::Status InfeedTokenPropagation::PropagateTokenThroughWhileBody() {
HloComputation* comp = dangling_instruction_->parent();
dangling_instruction_ = call_graph_->GetComputationCallers(comp)[0];
CHECK_EQ(dangling_instruction_->opcode(), HloOpcode::kWhile);
HloInstruction* root = comp->root_instruction();
TF_RETURN_IF_ERROR(
InsertTokenIntoTuple(root, false).status());
root->AppendOperand(output_token_);
HloInstruction* body_parameter = comp->parameter_instruction(0);
TF_ASSIGN_OR_RETURN(
HloInstruction * input_token_gte,
InsertTokenIntoTuple(body_parameter, false));
TF_RETURN_IF_ERROR(input_token_->ReplaceAllUsesWith(input_token_gte));
HloComputation* cond = dangling_instruction_->while_condition();
HloInstruction* cond_parameter = cond->parameter_instruction(0);
TF_RETURN_IF_ERROR(
InsertTokenIntoTuple(cond_parameter, false)
.status());
HloInstruction* while_tuple = dangling_instruction_->mutable_operand(0);
TF_ASSIGN_OR_RETURN(
input_token_,
InsertTokenIntoTuple(while_tuple, true));
TF_RETURN_IF_ERROR(
dangling_instruction_->ReplaceOperandWithDifferentShape(0, while_tuple));
TF_ASSIGN_OR_RETURN(
output_token_,
InsertTokenIntoTuple(dangling_instruction_, false));
return absl::OkStatus();
}
absl::Status InfeedTokenPropagation::PropagateToken() {
HloComputation* comp = dangling_instruction_->parent();
if (comp->IsEntryComputation()) {
return absl::OkStatus();
}
VLOG(2) << "Propagating tokens for: " << dangling_instruction_->name();
HloInstruction* caller = call_graph_->GetComputationCallers(comp)[0];
if (caller->has_sharding()) {
return absl::OkStatus();
}
if (caller->opcode() == HloOpcode::kConditional) {
TF_RETURN_IF_ERROR(CanonicalizeConditionalInstruction(caller));
TF_RETURN_IF_ERROR(PropagateTokenThroughConditionalBranch());
} else if (caller->opcode() == HloOpcode::kWhile &&
comp == caller->while_body()) {
TF_RETURN_IF_ERROR(CanonicalizeWhileInstruction(caller));
TF_RETURN_IF_ERROR(PropagateTokenThroughWhileBody());
} else {
VLOG(2) << "Unhandled computation: " << comp->name();
return absl::OkStatus();
}
return PropagateToken();
}
absl::StatusOr<bool> InfeedTokenPropagation::Run(
HloModule* module,
const absl::flat_hash_set<std::string_view>& execution_threads) {
VLOG(5) << "Before InfeedTokenPropagation:";
XLA_VLOG_LINES(5, module->ToString());
std::vector<HloInstruction*> dangling_infeeds;
std::vector<HloInstruction*> dangling_outfeeds;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
if (!computation->IsEntryComputation()) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kInfeed &&
IsDanglingInfeed(instruction)) {
VLOG(1) << "Found dangling infeed: " << instruction->ToString();
dangling_infeeds.push_back(instruction);
} else if (instruction->opcode() == HloOpcode::kOutfeed &&
IsDanglingOutfeed(instruction)) {
VLOG(1) << "Found dangling outfeed: " << instruction->ToString();
dangling_outfeeds.push_back(instruction);
}
}
}
}
bool changed = !dangling_infeeds.empty() || !dangling_outfeeds.empty();
if (changed) {
call_graph_ = CallGraph::Build(module);
if (!call_graph_->IsFlattened()) {
return FailedPrecondition(
"Call graph must be flattened before infeed token propagation.");
}
}
for (HloInstruction* dangling_infeed : dangling_infeeds) {
dangling_instruction_ = dangling_infeed;
input_token_ = dangling_infeed->mutable_operand(0);
output_token_ = dangling_infeed->AddInstruction(
HloInstruction::CreateGetTupleElement(dangling_infeed, 1));
TF_RETURN_IF_ERROR(PropagateToken());
}
for (HloInstruction* dangling_outfeed : dangling_outfeeds) {
dangling_instruction_ = dangling_outfeed;
input_token_ = dangling_outfeed->mutable_operand(1);
output_token_ = dangling_outfeed;
TF_RETURN_IF_ERROR(PropagateToken());
}
if (changed) {
TF_RETURN_IF_ERROR(
TupleSimplifier().Run(module, execution_threads).status());
TF_RETURN_IF_ERROR(HloDCE().Run(module, execution_threads).status());
}
VLOG(5) << "After InfeedTokenPropagation:";
XLA_VLOG_LINES(5, module->ToString());
return changed;
}
} | #include "xla/service/infeed_token_propagation.h"
#include <string_view>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace op = xla::testing::opcode_matchers;
namespace xla {
namespace {
class InfeedTokenPropagationTest : public HloTestBase {
protected:
InfeedTokenPropagationTest() = default;
};
TEST_F(InfeedTokenPropagationTest, EntryComputationInfeed) {
constexpr std::string_view hlo = R"(
HloModule main
ENTRY main {
token.0 = after-all()
infeed.0 = (s32[], token[]) infeed(token.0)
ROOT gte.0 = get-tuple-element(infeed.0), index=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));
InfeedTokenPropagation itp;
TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(InfeedTokenPropagationTest, EntryComputationOutfeed) {
constexpr std::string_view hlo = R"(
HloModule main
ENTRY main {
arg.0 = s32[] parameter(0)
tuple.0 = tuple(arg.0)
token.0 = after-all()
outfeed.0 = token[] outfeed(tuple.0, token.0), outfeed_shape=(s32[])
ROOT tuple.1 = tuple()
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));
InfeedTokenPropagation itp;
TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(InfeedTokenPropagationTest, ConditionalInfeed) {
constexpr std::string_view hlo = R"(
HloModule main
true_comp {
arg.0 = () parameter(0)
token.0 = after-all()
infeed.0 = (s32[], token[]) infeed(token.0)
ROOT tuple.0 = tuple()
}
false_comp {
arg.0 = () parameter(0)
ROOT tuple.0 = tuple()
}
ENTRY main {
pred.0 = pred[] constant(true)
true_tuple.0 = tuple()
false_tuple.0 = tuple()
ROOT cond.0 = () conditional(pred.0, true_tuple.0, false_tuple.0), true_computation=true_comp, false_computation=false_comp
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));
InfeedTokenPropagation itp;
TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get()));
EXPECT_TRUE(changed);
HloInstruction* cond = FindInstruction(module.get(), "cond.0");
EXPECT_EQ(cond->shape().tuple_shapes_size(), 1);
EXPECT_TRUE(cond->shape().tuple_shapes()[0].IsToken());
HloInstruction* true_tuple = FindInstruction(module.get(), "true_tuple.0");
EXPECT_EQ(true_tuple->shape().tuple_shapes_size(), 1);
EXPECT_TRUE(true_tuple->shape().tuple_shapes()[0].IsToken());
HloInstruction* false_tuple = FindInstruction(module.get(), "false_tuple.0");
EXPECT_EQ(false_tuple->shape().tuple_shapes_size(), 0);
HloComputation* true_comp = FindComputation(module.get(), "true_comp");
EXPECT_THAT(true_comp->root_instruction(),
op::Tuple(op::GetTupleElement(op::Infeed(), 1)));
HloComputation* false_comp = FindComputation(module.get(), "false_comp");
EXPECT_THAT(false_comp->root_instruction(), op::Tuple(op::AfterAll()));
}
TEST_F(InfeedTokenPropagationTest, ConditionalOutfeed) {
constexpr std::string_view hlo = R"(
HloModule main
true_comp {
arg.0 = (s32[]) parameter(0)
token.0 = after-all()
outfeed.0 = token[] outfeed(arg.0, token.0), outfeed_shape=(s32[])
ROOT tuple.0 = tuple()
}
false_comp {
arg.0 = () parameter(0)
ROOT tuple.0 = tuple()
}
ENTRY main {
arg.0 = s32[] parameter(0)
pred.0 = pred[] constant(true)
true_tuple.0 = tuple(arg.0)
false_tuple.0 = tuple()
ROOT cond.0 = () conditional(pred.0, true_tuple.0, false_tuple.0), true_computation=true_comp, false_computation=false_comp
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));
InfeedTokenPropagation itp;
TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get()));
EXPECT_TRUE(changed);
HloInstruction* cond = FindInstruction(module.get(), "cond.0");
EXPECT_EQ(cond->shape().tuple_shapes_size(), 1);
EXPECT_TRUE(cond->shape().tuple_shapes()[0].IsToken());
HloInstruction* true_tuple = FindInstruction(module.get(), "true_tuple.0");
EXPECT_EQ(true_tuple->shape().tuple_shapes_size(), 2);
EXPECT_TRUE(true_tuple->shape().tuple_shapes()[1].IsToken());
HloInstruction* false_tuple = FindInstruction(module.get(), "false_tuple.0");
EXPECT_EQ(false_tuple->shape().tuple_shapes_size(), 0);
HloComputation* true_comp = FindComputation(module.get(), "true_comp");
EXPECT_THAT(true_comp->root_instruction(), op::Tuple(op::Outfeed()));
HloComputation* false_comp = FindComputation(module.get(), "false_comp");
EXPECT_THAT(false_comp->root_instruction(), op::Tuple(op::AfterAll()));
}
TEST_F(InfeedTokenPropagationTest, ConditionalDuplicateOperand) {
constexpr std::string_view hlo = R"(
HloModule main
true_comp {
arg.0 = () parameter(0)
token.0 = after-all()
infeed.0 = (s32[], token[]) infeed(token.0)
ROOT tuple.0 = tuple()
}
false_comp {
arg.0 = () parameter(0)
ROOT tuple.0 = tuple()
}
ENTRY main {
pred.0 = pred[] constant(true)
tuple.0 = tuple()
ROOT cond.0 = () conditional(pred.0, tuple.0, tuple.0), true_computation=true_comp, false_computation=false_comp
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));
InfeedTokenPropagation itp;
TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get()));
EXPECT_TRUE(changed);
HloInstruction* cond = FindInstruction(module.get(), "cond.0");
EXPECT_EQ(cond->shape().tuple_shapes_size(), 1);
EXPECT_TRUE(cond->shape().tuple_shapes()[0].IsToken());
const HloInstruction* true_tuple = cond->operand(1);
EXPECT_EQ(true_tuple->shape().tuple_shapes_size(), 1);
EXPECT_TRUE(true_tuple->shape().tuple_shapes()[0].IsToken());
const HloInstruction* false_tuple = cond->operand(2);
EXPECT_EQ(false_tuple->shape().tuple_shapes_size(), 0);
HloComputation* true_comp = FindComputation(module.get(), "true_comp");
EXPECT_THAT(true_comp->root_instruction(),
op::Tuple(op::GetTupleElement(op::Infeed(), 1)));
HloComputation* false_comp = FindComputation(module.get(), "false_comp");
EXPECT_THAT(false_comp->root_instruction(), op::Tuple(op::AfterAll()));
}
TEST_F(InfeedTokenPropagationTest, NonTupleConditional) {
constexpr std::string_view hlo = R"(
HloModule main
true_comp {
arg.0 = s32[] parameter(0)
outfeed_tuple.0 = tuple(arg.0)
token.0 = after-all()
outfeed.0 = token[] outfeed(outfeed_tuple.0, token.0), outfeed_shape=(s32[])
ROOT tuple.0 = tuple()
}
false_comp {
arg.0 = () parameter(0)
ROOT tuple.0 = tuple()
}
ENTRY main {
arg.0 = s32[] parameter(0)
pred.0 = pred[] constant(true)
false_tuple.0 = tuple()
ROOT cond.0 = () conditional(pred.0, arg.0, false_tuple.0), true_computation=true_comp, false_computation=false_comp
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));
InfeedTokenPropagation itp;
TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get()));
EXPECT_TRUE(changed);
HloInstruction* cond = FindInstruction(module.get(), "cond.0");
EXPECT_EQ(cond->shape().tuple_shapes_size(), 1);
EXPECT_TRUE(cond->shape().tuple_shapes()[0].IsToken());
HloInstruction* true_tuple = cond->mutable_operand(1);
EXPECT_TRUE(true_tuple->shape().IsTuple());
EXPECT_EQ(true_tuple->shape().tuple_shapes_size(), 2);
EXPECT_TRUE(true_tuple->shape().tuple_shapes()[1].IsToken());
HloInstruction* false_tuple = FindInstruction(module.get(), "false_tuple.0");
EXPECT_EQ(false_tuple->shape().tuple_shapes_size(), 0);
HloComputation* true_comp = FindComputation(module.get(), "true_comp");
EXPECT_THAT(true_comp->root_instruction(), op::Tuple(op::Outfeed()));
HloComputation* false_comp = FindComputation(module.get(), "false_comp");
EXPECT_THAT(false_comp->root_instruction(), op::Tuple(op::AfterAll()));
}
TEST_F(InfeedTokenPropagationTest, DisjointConditionalOutfeed) {
constexpr std::string_view hlo = R"(
HloModule main
true_comp {
ROOT arg.0 = () parameter(0)
one.0 = s32[] constant(1)
outfeed_tuple.0 = tuple(one.0)
token.0 = after-all()
outfeed.0 = token[] outfeed(outfeed_tuple.0, token.0), outfeed_shape=(s32[])
}
false_comp {
arg.0 = () parameter(0)
ROOT tuple.0 = tuple()
}
ENTRY main {
pred.0 = pred[] constant(true)
true_tuple.0 = tuple()
false_tuple.0 = tuple()
ROOT cond.0 = () conditional(pred.0, true_tuple.0, false_tuple.0), true_computation=true_comp, false_computation=false_comp
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));
InfeedTokenPropagation itp;
TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get()));
EXPECT_TRUE(changed);
HloInstruction* cond = FindInstruction(module.get(), "cond.0");
EXPECT_EQ(cond->shape().tuple_shapes_size(), 1);
EXPECT_TRUE(cond->shape().tuple_shapes()[0].IsToken());
HloInstruction* true_tuple = FindInstruction(module.get(), "true_tuple.0");
EXPECT_EQ(true_tuple->shape().tuple_shapes_size(), 1);
EXPECT_TRUE(true_tuple->shape().tuple_shapes()[0].IsToken());
HloInstruction* false_tuple = FindInstruction(module.get(), "false_tuple.0");
EXPECT_EQ(false_tuple->shape().tuple_shapes_size(), 0);
HloComputation* true_comp = FindComputation(module.get(), "true_comp");
EXPECT_THAT(true_comp->root_instruction(), op::Tuple(op::Outfeed()));
HloComputation* false_comp = FindComputation(module.get(), "false_comp");
EXPECT_THAT(false_comp->root_instruction(), op::Tuple(op::AfterAll()));
}
TEST_F(InfeedTokenPropagationTest, WhileInfeed) {
constexpr std::string_view hlo = R"(
HloModule main
comp {
arg.0 = () parameter(0)
token.0 = after-all()
infeed.0 = (s32[], token[]) infeed(token.0)
ROOT tuple.0 = tuple()
}
cond {
arg.0 = () parameter(0)
ROOT true.0 = pred[] constant(true)
}
ENTRY main {
while_tuple.0 = tuple()
ROOT while.0 = () while(while_tuple.0), condition=cond, body=comp
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));
InfeedTokenPropagation itp;
TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get()));
EXPECT_TRUE(changed);
HloInstruction* loop = FindInstruction(module.get(), "while.0");
EXPECT_EQ(loop->shape().tuple_shapes_size(), 1);
EXPECT_TRUE(loop->shape().tuple_shapes()[0].IsToken());
HloInstruction* loop_tuple = FindInstruction(module.get(), "while_tuple.0");
EXPECT_EQ(loop_tuple->shape().tuple_shapes_size(), 1);
EXPECT_TRUE(loop_tuple->shape().tuple_shapes()[0].IsToken());
HloComputation* body_comp = FindComputation(module.get(), "comp");
EXPECT_THAT(body_comp->root_instruction(),
op::Tuple(op::GetTupleElement(op::Infeed(), 1)));
HloInstruction* body_param = body_comp->parameter_instruction(0);
EXPECT_EQ(body_param->shape().tuple_shapes_size(), 1);
EXPECT_TRUE(body_param->shape().tuple_shapes()[0].IsToken());
HloComputation* cond_comp = FindComputation(module.get(), "cond");
HloInstruction* cond_param = cond_comp->parameter_instruction(0);
EXPECT_EQ(cond_param->shape().tuple_shapes_size(), 1);
EXPECT_TRUE(cond_param->shape().tuple_shapes()[0].IsToken());
}
TEST_F(InfeedTokenPropagationTest, WhileOutfeed) {
constexpr std::string_view hlo = R"(
HloModule main
comp {
arg.0 = (s32[]) parameter(0)
token.0 = after-all()
outfeed.0 = token[] outfeed(arg.0, token.0), outfeed_shape=(s32[])
gte.0 = get-tuple-element(arg.0), index=0
ROOT tuple.0 = tuple(gte.0)
}
cond {
arg.0 = (s32[]) parameter(0)
ROOT true.0 = pred[] constant(true)
}
ENTRY main {
arg.0 = s32[] parameter(0)
while_tuple.0 = tuple(arg.0)
ROOT while.0 = (s32[]) while(while_tuple.0), condition=cond, body=comp
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));
InfeedTokenPropagation itp;
TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get()));
EXPECT_TRUE(changed);
HloInstruction* loop = FindInstruction(module.get(), "while.0");
EXPECT_EQ(loop->shape().tuple_shapes_size(), 2);
EXPECT_TRUE(loop->shape().tuple_shapes()[1].IsToken());
HloInstruction* loop_tuple = FindInstruction(module.get(), "while_tuple.0");
EXPECT_EQ(loop_tuple->shape().tuple_shapes_size(), 2);
EXPECT_TRUE(loop_tuple->shape().tuple_shapes()[1].IsToken());
HloComputation* body_comp = FindComputation(module.get(), "comp");
EXPECT_THAT(body_comp->root_instruction(),
op::Tuple(op::GetTupleElement(), op::Outfeed()));
HloInstruction* body_param = body_comp->parameter_instruction(0);
EXPECT_EQ(body_param->shape().tuple_shapes_size(), 2);
EXPECT_TRUE(body_param->shape().tuple_shapes()[1].IsToken());
HloComputation* cond_comp = FindComputation(module.get(), "cond");
HloInstruction* cond_param = cond_comp->parameter_instruction(0);
EXPECT_EQ(cond_param->shape().tuple_shapes_size(), 2);
EXPECT_TRUE(cond_param->shape().tuple_shapes()[1].IsToken());
}
TEST_F(InfeedTokenPropagationTest, DisjointWhileOutfeed) {
constexpr std::string_view hlo = R"(
HloModule main
comp {
ROOT arg.0 = () parameter(0)
one.0 = s32[] constant(1)
outfeed_tuple.0 = tuple(one.0)
token.0 = after-all()
outfeed.0 = token[] outfeed(outfeed_tuple.0, token.0), outfeed_shape=(s32[])
}
cond {
arg.0 = () parameter(0)
ROOT true.0 = pred[] constant(true)
}
ENTRY main {
while_tuple.0 = tuple()
ROOT while.0 = () while(while_tuple.0), condition=cond, body=comp
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));
InfeedTokenPropagation itp;
TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get()));
EXPECT_TRUE(changed);
HloInstruction* loop = FindInstruction(module.get(), "while.0");
EXPECT_EQ(loop->shape().tuple_shapes_size(), 1);
EXPECT_TRUE(loop->shape().tuple_shapes()[0].IsToken());
HloInstruction* loop_tuple = FindInstruction(module.get(), "while_tuple.0");
EXPECT_EQ(loop_tuple->shape().tuple_shapes_size(), 1);
EXPECT_TRUE(loop_tuple->shape().tuple_shapes()[0].IsToken());
HloComputation* body_comp = FindComputation(module.get(), "comp");
EXPECT_THAT(body_comp->root_instruction(), op::Tuple(op::Outfeed()));
HloInstruction* body_param = body_comp->parameter_instruction(0);
EXPECT_EQ(body_param->shape().tuple_shapes_size(), 1);
EXPECT_TRUE(body_param->shape().tuple_shapes()[0].IsToken());
HloComputation* cond_comp = FindComputation(module.get(), "cond");
HloInstruction* cond_param = cond_comp->parameter_instruction(0);
EXPECT_EQ(cond_param->shape().tuple_shapes_size(), 1);
EXPECT_TRUE(cond_param->shape().tuple_shapes()[0].IsToken());
}
TEST_F(InfeedTokenPropagationTest, NonTupleWhile) {
constexpr std::string_view hlo = R"(
HloModule main
comp {
ROOT arg.0 = s32[] parameter(0)
tuple.0 = tuple(arg.0)
token.0 = after-all()
outfeed.0 = token[] outfeed(tuple.0, token.0), outfeed_shape=(s32[])
}
cond {
arg.0 = s32[] parameter(0)
ROOT true.0 = pred[] constant(true)
}
ENTRY main {
arg.0 = s32[] parameter(0)
ROOT while.0 = s32[] while(arg.0), condition=cond, body=comp
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));
InfeedTokenPropagation itp;
TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get()));
EXPECT_TRUE(changed);
HloInstruction* loop = FindInstruction(module.get(), "while.0");
EXPECT_TRUE(loop->shape().IsTuple());
EXPECT_EQ(loop->shape().tuple_shapes_size(), 2);
EXPECT_TRUE(loop->shape().tuple_shapes()[1].IsToken());
EXPECT_THAT(loop->operand(0), op::Tuple(op::Parameter(), op::AfterAll()));
HloComputation* body_comp = FindComputation(module.get(), "comp");
EXPECT_THAT(body_comp->root_instruction(),
op::Tuple(op::GetTupleElement(), op::Outfeed()));
HloInstruction* body_param = body_comp->parameter_instruction(0);
EXPECT_EQ(body_param->shape().tuple_shapes_size(), 2);
EXPECT_TRUE(body_param->shape().tuple_shapes()[1].IsToken());
HloComputation* cond_comp = FindComputation(module.get(), "cond");
HloInstruction* cond_param = cond_comp->parameter_instruction(0);
EXPECT_EQ(cond_param->shape().tuple_shapes_size(), 2);
EXPECT_TRUE(cond_param->shape().tuple_shapes()[1].IsToken());
}
TEST_F(InfeedTokenPropagationTest, NestedInfeedOutfeed) {
constexpr std::string_view hlo = R"(
HloModule main
true_comp {
arg.0 = (s32[]) parameter(0)
token.0 = after-all()
outfeed.0 = token[] outfeed(arg.0, token.0), outfeed_shape=(s32[])
ROOT tuple.0 = tuple()
}
false_comp {
arg.0 = () parameter(0)
ROOT tuple.0 = tuple()
}
comp {
arg.0 = () parameter(0)
token.0 = after-all()
infeed.0 = (s32[], token[]) infeed(token.0)
gte.0 = get-tuple-element(infeed.0), index=0
pred.0 = pred[] constant(true)
true_tuple.0 = tuple(gte.0)
false_tuple.0 = tuple()
ROOT cond.0 = () conditional(pred.0, true_tuple.0, false_tuple.0), true_computation=true_comp, false_computation=false_comp
}
cond {
arg.0 = () parameter(0)
ROOT true.0 = pred[] constant(true)
}
ENTRY main {
while_tuple.0 = tuple()
ROOT while.0 = () while(while_tuple.0), condition=cond, body=comp
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));
InfeedTokenPropagation itp;
TF_ASSERT_OK_AND_ASSIGN(bool changed, itp.Run(module.get()));
EXPECT_TRUE(changed);
HloInstruction* loop = FindInstruction(module.get(), "while.0");
EXPECT_EQ(loop->shape().tuple_shapes_size(), 2);
EXPECT_TRUE(loop->shape().tuple_shapes()[0].IsToken());
EXPECT_TRUE(loop->shape().tuple_shapes()[1].IsToken());
HloInstruction* loop_tuple = FindInstruction(module.get(), "while_tuple.0");
EXPECT_EQ(loop_tuple->shape().tuple_shapes_size(), 2);
EXPECT_TRUE(loop_tuple->shape().tuple_shapes()[0].IsToken());
EXPECT_TRUE(loop_tuple->shape().tuple_shapes()[1].IsToken());
HloComputation* body_comp = FindComputation(module.get(), "comp");
EXPECT_THAT(body_comp->root_instruction(),
op::Tuple(op::GetTupleElement(op::Infeed(), 1),
op::GetTupleElement(op::Conditional(), 0)));
HloInstruction* cond = FindInstruction(module.get(), "cond.0");
EXPECT_EQ(cond->shape().tuple_shapes_size(), 1);
EXPECT_TRUE(cond->shape().tuple_shapes()[0].IsToken());
HloInstruction* true_tuple = FindInstruction(module.get(), "true_tuple.0");
EXPECT_EQ(true_tuple->shape().tuple_shapes_size(), 2);
EXPECT_TRUE(true_tuple->shape().tuple_shapes()[1].IsToken());
HloInstruction* false_tuple = FindInstruction(module.get(), "false_tuple.0");
EXPECT_EQ(false_tuple->shape().tuple_shapes_size(), 0);
HloComputation* true_comp = FindComputation(module.get(), "true_comp");
EXPECT_THAT(true_comp->root_instruction(), op::Tuple(op::Outfeed()));
HloComputation* false_comp = FindComputation(module.get(), "false_comp");
EXPECT_THAT(false_comp->root_instruction(), op::Tuple(op::AfterAll()));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/infeed_token_propagation.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/infeed_token_propagation_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
58e77cc6-ed69-45d3-b6b2-27bf9c79df8e | cpp | google/tensorstore | future_sender | tensorstore/util/execution/future_sender.h | tensorstore/util/execution/future_sender_test.cc | #ifndef TENSORSTORE_UTIL_EXECUTION_FUTURE_SENDER_H_
#define TENSORSTORE_UTIL_EXECUTION_FUTURE_SENDER_H_
#include <functional>
#include <type_traits>
#include <utility>
#include "absl/status/status.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/future.h"
namespace tensorstore {
namespace internal_future {
template <typename Receiver, typename = void, typename = void, typename = void,
typename = void>
struct IsFutureReceiver : public std::false_type {};
template <typename Receiver, typename T>
struct IsFutureReceiver<
Receiver, T,
decltype(execution::set_value(std::declval<Receiver&>(),
std::declval<T>())),
decltype(execution::set_error(std::declval<Receiver&>(),
std::declval<absl::Status>())),
decltype(execution::set_cancel(std::declval<Receiver&>()))>
: public std::true_type {};
}
template <typename T, typename... V>
std::enable_if_t<(!std::is_const_v<T> &&
std::is_constructible_v<typename Promise<T>::result_type,
std::in_place_t, V...>)>
set_value(const Promise<T>& promise, V&&... v) {
promise.SetResult(std::in_place, std::forward<V>(v)...);
}
template <typename T, typename... V>
std::enable_if_t<(!std::is_const_v<T> &&
std::is_constructible_v<typename Promise<T>::result_type,
std::in_place_t, V...>)>
set_value(std::reference_wrapper<const Promise<T>> promise, V&&... v) {
set_value(promise.get(), std::forward<V>(v)...);
}
template <typename T>
void set_error(const Promise<T>& promise, absl::Status error) {
promise.SetResult(std::move(error));
}
template <typename T>
void set_error(std::reference_wrapper<const Promise<T>> promise,
absl::Status error) {
set_error(promise.get(), std::move(error));
}
template <typename T>
void set_cancel(const Promise<T>& promise) {
promise.SetResult(absl::CancelledError(""));
}
template <typename T>
void set_cancel(std::reference_wrapper<const Promise<T>> promise) {
set_cancel(promise.get());
}
template <typename T, typename Receiver>
std::enable_if_t<internal_future::IsFutureReceiver<Receiver, T>::value>
submit(Future<T>& f, Receiver receiver) {
f.Force();
f.ExecuteWhenReady([r = std::move(receiver)](ReadyFuture<T> ready) mutable {
auto& result = ready.result();
if (result.has_value()) {
execution::set_value(r, result.value());
} else {
auto status = ready.status();
if (status.code() == absl::StatusCode::kCancelled) {
execution::set_cancel(r);
} else {
execution::set_error(r, std::move(status));
}
}
});
}
template <typename T, typename Receiver>
std::enable_if_t<internal_future::IsFutureReceiver<Receiver, T>::value>
submit(std::reference_wrapper<Future<T>> f, Receiver&& receiver) {
submit(f.get(), std::forward<Receiver>(receiver));
}
template <typename T, typename Sender>
Future<T> MakeSenderFuture(Sender sender) {
auto pair = PromiseFuturePair<T>::Make();
struct Callback {
Sender sender;
void operator()(Promise<T> promise) { execution::submit(sender, promise); }
};
pair.promise.ExecuteWhenForced(Callback{std::move(sender)});
return pair.future;
}
}
#endif | #include "tensorstore/util/execution/future_sender.h"
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/util/execution/any_receiver.h"
#include "tensorstore/util/execution/any_sender.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/execution/sender.h"
#include "tensorstore/util/execution/sender_testutil.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/result.h"
namespace {
using ::tensorstore::Promise;
using ::tensorstore::PromiseFuturePair;
using ::tensorstore::Result;
TEST(PromiseReceiverTest, SetCancel) {
auto pair = PromiseFuturePair<int>::Make();
tensorstore::execution::set_cancel(pair.promise);
EXPECT_EQ(pair.future.result(), Result<int>(absl::CancelledError("")));
}
TEST(PromiseReceiverTest, AnyReceiverSetCancel) {
auto pair = PromiseFuturePair<int>::Make();
tensorstore::execution::set_cancel(
tensorstore::AnyReceiver<absl::Status, int>(std::cref(pair.promise)));
EXPECT_EQ(pair.future.result(), Result<int>(absl::CancelledError("")));
}
TEST(PromiseReceiverTest, SetValue) {
auto pair = PromiseFuturePair<int>::Make();
tensorstore::execution::set_value(pair.promise, 3);
EXPECT_EQ(pair.future.result(), Result<int>(3));
}
TEST(PromiseReceiverTest, SetValueThenSetCancel) {
auto pair = PromiseFuturePair<int>::Make();
tensorstore::execution::set_value(pair.promise, 3);
tensorstore::execution::set_cancel(pair.promise);
EXPECT_EQ(pair.future.result(), Result<int>(3));
}
TEST(PromiseReceiverTest, AnyReceiverSetValue) {
auto pair = PromiseFuturePair<int>::Make();
tensorstore::execution::set_value(
tensorstore::AnyReceiver<absl::Status, int>(std::cref(pair.promise)), 3);
EXPECT_EQ(pair.future.result(), Result<int>(3));
}
TEST(PromiseReceiverTest, SetError) {
auto pair = PromiseFuturePair<int>::Make();
tensorstore::execution::set_error(
tensorstore::AnyReceiver<absl::Status, int>(pair.promise),
absl::UnknownError("message"));
EXPECT_EQ(pair.future.result(), Result<int>(absl::UnknownError("message")));
}
TEST(PromiseReceiverTest, AnyReceiverSetError) {
auto pair = PromiseFuturePair<int>::Make();
tensorstore::execution::set_error(std::cref(pair.promise),
absl::UnknownError("message"));
EXPECT_EQ(pair.future.result(), Result<int>(absl::UnknownError("message")));
}
TEST(FutureSenderTest, SetValue) {
auto pair = PromiseFuturePair<int>::Make();
bool forced = false;
pair.promise.ExecuteWhenForced([&](Promise<int>) { forced = true; });
std::vector<std::string> log1, log2;
tensorstore::execution::submit(pair.future,
tensorstore::LoggingReceiver{&log1});
tensorstore::execution::submit(pair.future,
tensorstore::LoggingReceiver{&log2});
EXPECT_THAT(log1, ::testing::ElementsAre());
EXPECT_THAT(log2, ::testing::ElementsAre());
EXPECT_TRUE(forced);
pair.promise.SetResult(3);
EXPECT_THAT(log1, ::testing::ElementsAre("set_value: 3"));
EXPECT_THAT(log2, ::testing::ElementsAre("set_value: 3"));
}
TEST(FutureSenderTest, AnySenderSetValue) {
auto pair = PromiseFuturePair<int>::Make();
bool forced = false;
pair.promise.ExecuteWhenForced([&](Promise<int>) { forced = true; });
std::vector<std::string> log;
tensorstore::execution::submit(
tensorstore::AnySender<absl::Status, int>(pair.future),
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(log, ::testing::ElementsAre());
EXPECT_TRUE(forced);
pair.promise.SetResult(3);
EXPECT_THAT(log, ::testing::ElementsAre("set_value: 3"));
}
TEST(FutureSenderTest, SetError) {
auto pair = PromiseFuturePair<int>::Make();
bool forced = false;
pair.promise.ExecuteWhenForced([&](Promise<int>) { forced = true; });
std::vector<std::string> log;
tensorstore::execution::submit(std::ref(pair.future),
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(log, ::testing::ElementsAre());
EXPECT_TRUE(forced);
pair.promise.SetResult(absl::UnknownError(""));
EXPECT_THAT(log, ::testing::ElementsAre("set_error: UNKNOWN: "));
}
TEST(FutureSenderTest, AnySenderSetError) {
auto pair = PromiseFuturePair<int>::Make();
bool forced = false;
pair.promise.ExecuteWhenForced([&](Promise<int>) { forced = true; });
std::vector<std::string> log;
tensorstore::execution::submit(
tensorstore::AnySender<absl::Status, int>(pair.future),
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(log, ::testing::ElementsAre());
EXPECT_TRUE(forced);
pair.promise.SetResult(absl::UnknownError(""));
EXPECT_THAT(log, ::testing::ElementsAre("set_error: UNKNOWN: "));
}
TEST(FutureSenderTest, SetCancel) {
auto pair = PromiseFuturePair<int>::Make();
bool forced = false;
pair.promise.ExecuteWhenForced([&](Promise<int>) { forced = true; });
std::vector<std::string> log;
tensorstore::execution::submit(pair.future,
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(log, ::testing::ElementsAre());
EXPECT_TRUE(forced);
pair.promise.SetResult(absl::CancelledError(""));
EXPECT_THAT(log, ::testing::ElementsAre("set_cancel"));
}
TEST(FutureSenderTest, AnySenderSetCancel) {
auto pair = PromiseFuturePair<int>::Make();
bool forced = false;
pair.promise.ExecuteWhenForced([&](Promise<int>) { forced = true; });
std::vector<std::string> log;
tensorstore::execution::submit(
tensorstore::AnySender<absl::Status, int>(std::ref(pair.future)),
tensorstore::LoggingReceiver{&log});
EXPECT_THAT(log, ::testing::ElementsAre());
EXPECT_TRUE(forced);
pair.promise.SetResult(absl::CancelledError(""));
EXPECT_THAT(log, ::testing::ElementsAre("set_cancel"));
}
TEST(MakeSenderFutureTest, SetValue) {
auto future =
tensorstore::MakeSenderFuture<int>(tensorstore::ValueSender<int>{3});
EXPECT_FALSE(future.ready());
EXPECT_EQ(future.result(), Result<int>(3));
}
TEST(MakeSenderFutureTest, SetError) {
auto future = tensorstore::MakeSenderFuture<int>(
tensorstore::ErrorSender<absl::Status>{absl::UnknownError("")});
EXPECT_FALSE(future.ready());
EXPECT_EQ(future.result(), Result<int>(absl::UnknownError("")));
}
TEST(MakeSenderFutureTest, SetCancel) {
auto future = tensorstore::MakeSenderFuture<int>(tensorstore::CancelSender{});
EXPECT_FALSE(future.ready());
EXPECT_EQ(future.result(), Result<int>(absl::CancelledError("")));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/execution/future_sender.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/execution/future_sender_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
d3fa6f16-f4b2-4a17-85df-c9db628b7db2 | cpp | tensorflow/tensorflow | tensor_slice_dataset_op | tensorflow/core/kernels/data/tensor_slice_dataset_op.cc | tensorflow/core/kernels/data/tensor_slice_dataset_op_test.cc | #include "tensorflow/core/kernels/data/tensor_slice_dataset_op.h"
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/global_shuffle_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/data/split_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/util/batch_util.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/thread_annotations.h"
namespace tensorflow {
namespace data {
constexpr const char* const TensorSliceDatasetOp::kDatasetType;
constexpr const char* const TensorSliceDatasetOp::kComponents;
constexpr const char* const TensorSliceDatasetOp::kToutputTypes;
constexpr const char* const TensorSliceDatasetOp::kOutputShapes;
constexpr const char* const TensorSliceDatasetOp::kIsFiles;
constexpr const char* const
TensorSliceDatasetOp::kReplicateOnSplit;
class TensorSliceDatasetOp::Dataset : public DatasetBase {
public:
explicit Dataset(OpKernelContext* ctx, std::vector<Tensor> tensors,
bool is_files, bool replicate_on_split)
: DatasetBase(DatasetContext(ctx)),
tensors_(std::move(tensors)),
is_files_(is_files),
replicate_on_split_(replicate_on_split) {
for (const Tensor& t : tensors_) {
dtypes_.push_back(t.dtype());
absl::InlinedVector<int64_t, 4UL> element_dim_sizes;
for (int i = 1; i < t.dims(); ++i) {
element_dim_sizes.push_back(t.dim_size(i));
}
partial_shapes_.emplace_back(element_dim_sizes);
shapes_.emplace_back(std::move(element_dim_sizes));
}
}
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix)});
}
Status MakeSplitProviders(std::vector<std::unique_ptr<SplitProvider>>*
split_providers) const override {
split_providers->push_back(
std::make_unique<IndexSplitProvider>(tensors_[0].dim_size(0)));
return absl::OkStatus();
}
const DataTypeVector& output_dtypes() const override { return dtypes_; }
const std::vector<PartialTensorShape>& output_shapes() const override {
return partial_shapes_;
}
string DebugString() const override {
return name_utils::DatasetDebugString(kDatasetType);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
return tensors_[0].dim_size(0);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
return absl::OkStatus();
}
Status CheckExternalState() const override { return absl::OkStatus(); }
Status Get(OpKernelContext* ctx, int64 index,
std::vector<Tensor>* out_tensors) const override {
return Get(AnyContext(ctx), index, out_tensors);
}
Status Get(AnyContext ctx, int64 index,
std::vector<Tensor>* out_tensors) const override {
TF_RETURN_IF_ERROR(CheckRandomAccessCompatible(index));
out_tensors->clear();
out_tensors->reserve(tensors_.size());
for (int i = 0; i < tensors_.size(); ++i) {
out_tensors->push_back(MaybeCopySubSlice(tensors_[i], index));
}
return absl::OkStatus();
}
absl::Status RandomIndexingCompatible() const override {
return absl::OkStatus();
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
std::vector<Node*> components;
components.reserve(tensors_.size());
for (const Tensor& t : tensors_) {
Node* node;
if (!ctx->is_graph_rewrite()) {
TF_RETURN_IF_ERROR(b->AddDatasetOrTensor(ctx, t, &node));
if (is_files_) {
Node* file_node;
TF_RETURN_IF_ERROR(
b->AddIdentity(ctx, "FileIdentity", &node, &file_node));
}
} else {
TF_RETURN_IF_ERROR(b->AddPlaceholder(t, &node));
DCHECK_NE(ctx->input_list(), nullptr);
ctx->input_list()->emplace_back(node->name(), t);
}
components.emplace_back(node);
}
AttrValue dtypes;
b->BuildAttrValue(dtypes_, &dtypes);
AttrValue is_files;
b->BuildAttrValue(is_files_, &is_files);
AttrValue replicate_on_split;
b->BuildAttrValue(replicate_on_split_, &replicate_on_split);
TF_RETURN_IF_ERROR(b->AddDataset(this, {}, {{0, components}},
{{kToutputTypes, dtypes},
{kIsFiles, is_files},
{kReplicateOnSplit, replicate_on_split}},
output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params),
global_shuffle_iterator_(dataset()) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
if (ctx->split_providers().empty() || dataset()->replicate_on_split_) {
split_provider_ = std::make_shared<IndexSplitProvider>(
dataset()->tensors_[0].dim_size(0));
} else {
TF_ASSIGN_OR_RETURN(split_provider_,
GetSingleSplitProvider(ctx, dataset()));
}
return absl::OkStatus();
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
if (ctx->index_mapper() != nullptr) {
return global_shuffle_iterator_.GetNext(ctx, out_tensors,
end_of_sequence);
}
Tensor split;
TF_RETURN_IF_ERROR(split_provider_->GetNext(&split, end_of_sequence));
if (*end_of_sequence) {
return absl::OkStatus();
}
int64_t index = split.scalar<int64_t>()();
out_tensors->reserve(dataset()->tensors_.size());
for (size_t i = 0; i < dataset()->tensors_.size(); ++i) {
out_tensors->push_back(
MaybeCopySubSlice(dataset()->tensors_[i], index));
}
*end_of_sequence = false;
return absl::OkStatus();
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeSourceNode(std::move(args));
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
TF_RETURN_IF_ERROR(split_provider_->Save(
[this](const std::string& key) { return full_name(key); }, writer));
TF_RETURN_IF_ERROR(global_shuffle_iterator_.Save(prefix(), ctx, writer));
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
if (ctx->restored_element_count().has_value()) {
return global_shuffle_iterator_.Restore(prefix(), ctx, reader);
}
return split_provider_->Restore(
[this](const std::string& key) { return full_name(key); }, reader);
}
private:
std::shared_ptr<SplitProvider> split_provider_;
GlobalShuffleIterator global_shuffle_iterator_;
};
const std::vector<Tensor> tensors_;
DataTypeVector dtypes_;
std::vector<TensorShape> shapes_;
std::vector<PartialTensorShape> partial_shapes_;
const bool is_files_;
const bool replicate_on_split_;
};
TensorSliceDatasetOp::TensorSliceDatasetOp(OpKernelConstruction* ctx)
: DatasetOpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kToutputTypes, &output_types_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputShapes, &output_shapes_));
if (ctx->HasAttr(kIsFiles)) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kIsFiles, &is_files_));
}
if (ctx->HasAttr(kReplicateOnSplit)) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kReplicateOnSplit, &replicate_on_split_));
}
}
void TensorSliceDatasetOp::MakeDataset(OpKernelContext* ctx,
DatasetBase** output) {
OpInputList inputs;
OP_REQUIRES_OK(ctx, ctx->input_list(kComponents, &inputs));
std::vector<Tensor> components;
components.reserve(inputs.size());
OP_REQUIRES(
ctx, inputs[0].dims() > 0,
errors::InvalidArgument("All components must be at least 1-dimensional"));
const int64_t num_slices = inputs[0].dim_size(0);
for (const Tensor& t : inputs) {
components.push_back(t);
OP_REQUIRES(ctx, t.dims() > 0,
errors::InvalidArgument(
"All components must be at least 1-dimensional"));
OP_REQUIRES(
ctx, t.dim_size(0) == num_slices,
errors::InvalidArgument(
"All components must have the same size in the 0th dimension"));
}
*output =
new Dataset(ctx, std::move(components), is_files_, replicate_on_split_);
OP_REQUIRES_OK(ctx,
VerifyTypesMatch((*output)->output_dtypes(), output_types_));
OP_REQUIRES_OK(
ctx, VerifyShapesCompatible((*output)->output_shapes(), output_shapes_));
}
namespace {
REGISTER_KERNEL_BUILDER(Name("TensorSliceDataset").Device(DEVICE_CPU),
TensorSliceDatasetOp);
}
}
} | #include "tensorflow/core/kernels/data/tensor_slice_dataset_op.h"
#include <string>
#include <utility>
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/serialization_utils.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "tensor_slice_dataset";
class TensorSliceDatasetOpTest : public DatasetOpsTestBase {};
TensorSliceDatasetParams PlainTensorSliceDatasetParams() {
std::vector<Tensor> components = {
CreateTensor<int64_t>(TensorShape({2}), {1, 2}),
CreateTensor<int64_t>(TensorShape({2, 2}), {1, 2, 3, 4}),
CreateTensor<uint32>(TensorShape({2}), {2, 3}),
CreateTensor<uint32>(TensorShape({2, 2}), {2, 3, 4, 5}),
CreateTensor<uint64>(TensorShape({2}), {3, 4}),
CreateTensor<uint64>(TensorShape({2, 2}), {3, 4, 5, 6}),
CreateTensor<double>(TensorShape({2, 1}), {37.0, 38.0}),
CreateTensor<tstring>(TensorShape({2, 1}), {"a", "b"})};
return {std::move(components), kNodeName};
}
TensorSliceDatasetParams NestedTensorSliceDatasetParams() {
std::vector<Tensor> components = {
CreateTensor<Variant>(
TensorShape({2, 1}),
{CreateTensor<double>(TensorShape({2, 2}), {1.0, 2.0, 3.0, 4.0}),
CreateTensor<double>(TensorShape({2, 2}), {5.0, 6.0, 7.0, 8.0})}),
CreateTensor<Variant>(
TensorShape({2, 1}),
{CreateTensor<tstring>(TensorShape({1, 2}), {"a", "b"}),
CreateTensor<tstring>(TensorShape({1, 2}), {"c", "d"})}),
CreateTensor<int64_t>(TensorShape({2, 3}), {1, 2, 3, 4, 5, 6})};
return {std::move(components), kNodeName};
}
std::vector<GetNextTestCase<TensorSliceDatasetParams>> GetNextTestCases() {
return {
{PlainTensorSliceDatasetParams(),
{CreateTensor<int64_t>(TensorShape({}), {1}),
CreateTensor<int64_t>(TensorShape({2}), {1, 2}),
CreateTensor<uint32>(TensorShape({}), {2}),
CreateTensor<uint32>(TensorShape({2}), {2, 3}),
CreateTensor<uint64>(TensorShape({}), {3}),
CreateTensor<uint64>(TensorShape({2}), {3, 4}),
CreateTensor<double>(TensorShape({1}), {37.0}),
CreateTensor<tstring>(TensorShape({1}), {"a"}),
CreateTensor<int64_t>(TensorShape({}), {2}),
CreateTensor<int64_t>(TensorShape({2}), {3, 4}),
CreateTensor<uint32>(TensorShape({}), {3}),
CreateTensor<uint32>(TensorShape({2}), {4, 5}),
CreateTensor<uint64>(TensorShape({}), {4}),
CreateTensor<uint64>(TensorShape({2}), {5, 6}),
CreateTensor<double>(TensorShape({1}), {38.0}),
CreateTensor<tstring>(TensorShape({1}), {"b"})}},
{NestedTensorSliceDatasetParams(),
{CreateTensor<Variant>(
TensorShape({1}),
{CreateTensor<double>(TensorShape({2, 2}), {1.0, 2.0, 3.0, 4.0})}),
CreateTensor<Variant>(
TensorShape({1}),
{CreateTensor<tstring>(TensorShape({1, 2}), {"a", "b"})}),
CreateTensor<int64_t>(TensorShape({3}), {1, 2, 3}),
CreateTensor<Variant>(
TensorShape({1}),
{CreateTensor<double>(TensorShape({2, 2}), {5.0, 6.0, 7.0, 8.0})}),
CreateTensor<Variant>(
TensorShape({1}),
{CreateTensor<tstring>(TensorShape({1, 2}), {"c", "d"})}),
CreateTensor<int64_t>(TensorShape({3}), {4, 5, 6})}}};
}
class ParameterizedGetNextTest
: public TensorSliceDatasetOpTest,
public ::testing::WithParamInterface<
GetNextTestCase<TensorSliceDatasetParams>> {};
TEST_P(ParameterizedGetNextTest, GetNext) {
auto test_case = GetParam();
TF_ASSERT_OK(Initialize(test_case.dataset_params));
std::vector<string> input_names;
TF_ASSERT_OK(test_case.dataset_params.GetInputNames(&input_names));
size_t num_tensors_per_slice = input_names.size();
bool end_of_sequence = false;
std::vector<Tensor> out_tensors;
int cur_slice = 0;
while (true) {
TF_EXPECT_OK(iterator_->GetNext(iterator_ctx_.get(), &out_tensors,
&end_of_sequence));
if (end_of_sequence) {
EXPECT_TRUE(out_tensors.empty());
break;
}
for (int i = 0; i < out_tensors.size(); ++i) {
EXPECT_LT(i + num_tensors_per_slice * cur_slice,
test_case.expected_outputs.size());
if (out_tensors[i].dtype() == DT_VARIANT) {
const Tensor* output = out_tensors[i].scalar<Variant>()().get<Tensor>();
const Tensor* expected_output =
test_case.expected_outputs[i + num_tensors_per_slice * cur_slice]
.scalar<Variant>()()
.get<Tensor>();
TF_EXPECT_OK(ExpectEqual(*output, *expected_output));
} else {
TF_EXPECT_OK(ExpectEqual(
out_tensors[i],
test_case.expected_outputs[i + num_tensors_per_slice * cur_slice]));
}
}
cur_slice++;
}
}
INSTANTIATE_TEST_SUITE_P(TensorSliceDatasetOpTest, ParameterizedGetNextTest,
::testing::ValuesIn(GetNextTestCases()));
TEST_F(TensorSliceDatasetOpTest, DatasetNodeName) {
auto dataset_params = PlainTensorSliceDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(TensorSliceDatasetOpTest, DatasetTypeString) {
auto dataset_params = PlainTensorSliceDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(TensorSliceDatasetOp::kDatasetType)));
}
std::vector<DatasetOutputDtypesTestCase<TensorSliceDatasetParams>>
DatasetOutputTypesTestCases() {
return {{PlainTensorSliceDatasetParams(),
PlainTensorSliceDatasetParams().output_dtypes()},
{NestedTensorSliceDatasetParams(),
NestedTensorSliceDatasetParams().output_dtypes()}};
}
DATASET_OUTPUT_DTYPES_TEST_P(TensorSliceDatasetOpTest, TensorSliceDatasetParams,
DatasetOutputTypesTestCases())
std::vector<DatasetOutputShapesTestCase<TensorSliceDatasetParams>>
DatasetOutputShapesTestCases() {
return {{PlainTensorSliceDatasetParams(),
PlainTensorSliceDatasetParams().output_shapes()},
{NestedTensorSliceDatasetParams(),
NestedTensorSliceDatasetParams().output_shapes()}};
}
DATASET_OUTPUT_SHAPES_TEST_P(TensorSliceDatasetOpTest, TensorSliceDatasetParams,
DatasetOutputShapesTestCases())
std::vector<CardinalityTestCase<TensorSliceDatasetParams>>
DatasetCardinalityTestCases() {
return {{PlainTensorSliceDatasetParams(), 2},
{NestedTensorSliceDatasetParams(), 2}};
}
DATASET_CARDINALITY_TEST_P(TensorSliceDatasetOpTest, TensorSliceDatasetParams,
DatasetCardinalityTestCases())
std::vector<IteratorOutputDtypesTestCase<TensorSliceDatasetParams>>
IteratorOutputTypesTestCases() {
return {{PlainTensorSliceDatasetParams(),
PlainTensorSliceDatasetParams().output_dtypes()},
{NestedTensorSliceDatasetParams(),
NestedTensorSliceDatasetParams().output_dtypes()}};
}
ITERATOR_OUTPUT_DTYPES_TEST_P(TensorSliceDatasetOpTest,
TensorSliceDatasetParams,
IteratorOutputTypesTestCases())
std::vector<IteratorOutputShapesTestCase<TensorSliceDatasetParams>>
IteratorOutputShapesTestCases() {
return {{PlainTensorSliceDatasetParams(),
PlainTensorSliceDatasetParams().output_shapes()},
{NestedTensorSliceDatasetParams(),
NestedTensorSliceDatasetParams().output_shapes()}};
}
ITERATOR_OUTPUT_SHAPES_TEST_P(TensorSliceDatasetOpTest,
TensorSliceDatasetParams,
IteratorOutputShapesTestCases())
TEST_F(TensorSliceDatasetOpTest, IteratorOutputPrefix) {
auto dataset_params = PlainTensorSliceDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
TensorSliceDatasetOp::kDatasetType, dataset_params.iterator_prefix())));
}
std::vector<IteratorSaveAndRestoreTestCase<TensorSliceDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {
{PlainTensorSliceDatasetParams(),
{0, 1, 2},
{CreateTensor<int64_t>(TensorShape({}), {1}),
CreateTensor<int64_t>(TensorShape({2}), {1, 2}),
CreateTensor<uint32>(TensorShape({}), {2}),
CreateTensor<uint32>(TensorShape({2}), {2, 3}),
CreateTensor<uint64>(TensorShape({}), {3}),
CreateTensor<uint64>(TensorShape({2}), {3, 4}),
CreateTensor<double>(TensorShape({1}), {37.0}),
CreateTensor<tstring>(TensorShape({1}), {"a"}),
CreateTensor<int64_t>(TensorShape({}), {2}),
CreateTensor<int64_t>(TensorShape({2}), {3, 4}),
CreateTensor<uint32>(TensorShape({}), {3}),
CreateTensor<uint32>(TensorShape({2}), {4, 5}),
CreateTensor<uint64>(TensorShape({}), {4}),
CreateTensor<uint64>(TensorShape({2}), {5, 6}),
CreateTensor<double>(TensorShape({1}), {38.0}),
CreateTensor<tstring>(TensorShape({1}), {"b"})}},
{NestedTensorSliceDatasetParams(),
{0, 1, 2},
{CreateTensor<Variant>(
TensorShape({1}),
{CreateTensor<double>(TensorShape({2, 2}), {1.0, 2.0, 3.0, 4.0})}),
CreateTensor<Variant>(
TensorShape({1}),
{CreateTensor<tstring>(TensorShape({1, 2}), {"a", "b"})}),
CreateTensor<int64_t>(TensorShape({3}), {1, 2, 3}),
CreateTensor<Variant>(
TensorShape({1}),
{CreateTensor<double>(TensorShape({2, 2}), {5.0, 6.0, 7.0, 8.0})}),
CreateTensor<Variant>(
TensorShape({1}),
{CreateTensor<tstring>(TensorShape({1, 2}), {"c", "d"})}),
CreateTensor<int64_t>(TensorShape({3}), {4, 5, 6})}}};
}
class ParameterizedIteratorSaveAndRestoreTest
: public TensorSliceDatasetOpTest,
public ::testing::WithParamInterface<
IteratorSaveAndRestoreTestCase<TensorSliceDatasetParams>> {};
TEST_P(ParameterizedIteratorSaveAndRestoreTest, SaveAndRestore) {
auto test_case = GetParam();
TF_ASSERT_OK(Initialize(test_case.dataset_params));
std::unique_ptr<SerializationContext> serialization_context;
TF_ASSERT_OK(CreateSerializationContext(&serialization_context));
int cur_iteration = 0;
bool end_of_sequence = false;
auto params =
static_cast<TensorSliceDatasetParams&>(test_case.dataset_params);
int64_t num_slices = params.num_slices();
size_t num_tensors_per_slice = params.num_tensors_per_slice();
std::vector<Tensor> out_tensors;
const std::vector<int>& breakpoints = test_case.breakpoints;
for (int breakpoint : breakpoints) {
while (cur_iteration < breakpoint) {
TF_EXPECT_OK(iterator_->GetNext(iterator_ctx_.get(), &out_tensors,
&end_of_sequence));
cur_iteration++;
}
if (breakpoint == 0) {
EXPECT_FALSE(end_of_sequence);
} else if (breakpoint <= num_slices) {
for (int i = 0; i < out_tensors.size(); ++i) {
if (out_tensors[i].dtype() == DT_VARIANT) {
const Tensor* output =
out_tensors[i].scalar<Variant>()().get<Tensor>();
const Tensor* expected_output =
test_case
.expected_outputs[i +
num_tensors_per_slice * (cur_iteration - 1)]
.scalar<Variant>()()
.get<Tensor>();
TF_EXPECT_OK(ExpectEqual(*output, *expected_output));
} else {
TF_EXPECT_OK(ExpectEqual(
out_tensors[i],
test_case.expected_outputs[i + num_tensors_per_slice *
(cur_iteration - 1)]));
}
}
} else {
EXPECT_TRUE(end_of_sequence);
}
VariantTensorDataWriter writer;
TF_ASSERT_OK(iterator_->Save(serialization_context.get(), &writer));
std::vector<const VariantTensorData*> data;
writer.GetData(&data);
VariantTensorDataReader reader(data);
TF_EXPECT_OK(RestoreIterator(iterator_ctx_.get(), &reader, "Iterator",
*dataset_, &iterator_));
}
}
INSTANTIATE_TEST_SUITE_P(
TensorSliceDatasetOpTest, ParameterizedIteratorSaveAndRestoreTest,
::testing::ValuesIn(IteratorSaveAndRestoreTestCases()));
TEST_F(TensorSliceDatasetOpTest, SplitProvider) {
auto params = TensorSliceDatasetParams(
CreateTensors<int64_t>(TensorShape({7}), {{6, 2, 3, 8, 7, 0, 10}}),
kNodeName);
TF_ASSERT_OK(InitializeRuntime(params));
TF_EXPECT_OK(CheckSplitProviderFullIteration(
params, CreateTensors<int64_t>(TensorShape({}),
{{6}, {2}, {3}, {8}, {7}, {0}, {10}})));
TF_EXPECT_OK(CheckSplitProviderShardedIteration(
params, 3, 1,
CreateTensors<int64_t>(TensorShape({}), {{2}, {7}})));
}
TEST_F(TensorSliceDatasetOpTest, SplitProviderEmpty) {
auto params = TensorSliceDatasetParams(
CreateTensors<int64_t>(TensorShape({0}), {{}}), kNodeName);
TF_ASSERT_OK(InitializeRuntime(params));
TF_EXPECT_OK(CheckSplitProviderFullIteration(
params, CreateTensors<int64_t>(TensorShape({}), {})));
TF_EXPECT_OK(CheckSplitProviderShardedIteration(
params, 3, 1,
CreateTensors<int64_t>(TensorShape({}), {})));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/tensor_slice_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/tensor_slice_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f845cd84-f662-46bb-a6e6-300d4c18375b | cpp | google/quiche | quic_lru_cache | quiche/quic/core/quic_lru_cache.h | quiche/quic/core/quic_lru_cache_test.cc | #ifndef QUICHE_QUIC_CORE_QUIC_LRU_CACHE_H_
#define QUICHE_QUIC_CORE_QUIC_LRU_CACHE_H_
#include <memory>
#include "quiche/quic/platform/api/quic_export.h"
#include "quiche/quic/platform/api/quic_flag_utils.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_logging.h"
#include "quiche/common/quiche_linked_hash_map.h"
namespace quic {
template <class K, class V, class Hash = std::hash<K>,
class Eq = std::equal_to<K>>
class QUICHE_EXPORT QuicLRUCache {
private:
using HashMapType =
typename quiche::QuicheLinkedHashMap<K, std::unique_ptr<V>, Hash, Eq>;
public:
using iterator = typename HashMapType::iterator;
using const_iterator = typename HashMapType::const_iterator;
using reverse_iterator = typename HashMapType::reverse_iterator;
using const_reverse_iterator = typename HashMapType::const_reverse_iterator;
explicit QuicLRUCache(size_t capacity) : capacity_(capacity) {}
QuicLRUCache(const QuicLRUCache&) = delete;
QuicLRUCache& operator=(const QuicLRUCache&) = delete;
iterator begin() { return cache_.begin(); }
const_iterator begin() const { return cache_.begin(); }
iterator end() { return cache_.end(); }
const_iterator end() const { return cache_.end(); }
reverse_iterator rbegin() { return cache_.rbegin(); }
const_reverse_iterator rbegin() const { return cache_.rbegin(); }
reverse_iterator rend() { return cache_.rend(); }
const_reverse_iterator rend() const { return cache_.rend(); }
void Insert(const K& key, std::unique_ptr<V> value) {
auto it = cache_.find(key);
if (it != cache_.end()) {
cache_.erase(it);
}
cache_.emplace(key, std::move(value));
if (cache_.size() > capacity_) {
cache_.pop_front();
}
QUICHE_DCHECK_LE(cache_.size(), capacity_);
}
iterator Lookup(const K& key) {
auto iter = cache_.find(key);
if (iter == cache_.end()) {
return iter;
}
std::unique_ptr<V> value = std::move(iter->second);
cache_.erase(iter);
auto result = cache_.emplace(key, std::move(value));
QUICHE_DCHECK(result.second);
return result.first;
}
iterator Erase(iterator iter) { return cache_.erase(iter); }
void Clear() { cache_.clear(); }
size_t MaxSize() const { return capacity_; }
size_t Size() const { return cache_.size(); }
private:
quiche::QuicheLinkedHashMap<K, std::unique_ptr<V>, Hash, Eq> cache_;
const size_t capacity_;
};
}
#endif | #include "quiche/quic/core/quic_lru_cache.h"
#include <memory>
#include <utility>
#include "quiche/quic/platform/api/quic_test.h"
namespace quic {
namespace test {
namespace {
struct CachedItem {
explicit CachedItem(uint32_t new_value) : value(new_value) {}
uint32_t value;
};
TEST(QuicLRUCacheTest, InsertAndLookup) {
QuicLRUCache<int, CachedItem> cache(5);
EXPECT_EQ(cache.end(), cache.Lookup(1));
EXPECT_EQ(0u, cache.Size());
EXPECT_EQ(5u, cache.MaxSize());
std::unique_ptr<CachedItem> item1(new CachedItem(11));
cache.Insert(1, std::move(item1));
EXPECT_EQ(1u, cache.Size());
EXPECT_EQ(11u, cache.Lookup(1)->second->value);
std::unique_ptr<CachedItem> item2(new CachedItem(12));
cache.Insert(1, std::move(item2));
EXPECT_EQ(1u, cache.Size());
EXPECT_EQ(12u, cache.Lookup(1)->second->value);
std::unique_ptr<CachedItem> item3(new CachedItem(13));
cache.Insert(3, std::move(item3));
EXPECT_EQ(2u, cache.Size());
auto iter = cache.Lookup(3);
ASSERT_NE(cache.end(), iter);
EXPECT_EQ(13u, iter->second->value);
cache.Erase(iter);
ASSERT_EQ(cache.end(), cache.Lookup(3));
EXPECT_EQ(1u, cache.Size());
cache.Clear();
EXPECT_EQ(0u, cache.Size());
}
TEST(QuicLRUCacheTest, Eviction) {
QuicLRUCache<int, CachedItem> cache(3);
for (size_t i = 1; i <= 4; ++i) {
std::unique_ptr<CachedItem> item(new CachedItem(10 + i));
cache.Insert(i, std::move(item));
}
EXPECT_EQ(3u, cache.Size());
EXPECT_EQ(3u, cache.MaxSize());
EXPECT_EQ(cache.end(), cache.Lookup(1));
EXPECT_EQ(14u, cache.Lookup(4)->second->value);
EXPECT_EQ(12u, cache.Lookup(2)->second->value);
std::unique_ptr<CachedItem> item5(new CachedItem(15));
cache.Insert(5, std::move(item5));
EXPECT_EQ(cache.end(), cache.Lookup(3));
EXPECT_EQ(15u, cache.Lookup(5)->second->value);
cache.Clear();
EXPECT_EQ(0u, cache.Size());
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_lru_cache.h | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_lru_cache_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
1e57cfe2-0c5a-4707-a50b-7bf3a0666fa5 | cpp | abseil/abseil-cpp | uniform_helper | absl/random/internal/uniform_helper.h | absl/random/internal/uniform_helper_test.cc | #ifndef ABSL_RANDOM_INTERNAL_UNIFORM_HELPER_H_
#define ABSL_RANDOM_INTERNAL_UNIFORM_HELPER_H_
#include <cmath>
#include <limits>
#include <type_traits>
#include "absl/base/config.h"
#include "absl/meta/type_traits.h"
#include "absl/random/internal/traits.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
template <typename IntType>
class uniform_int_distribution;
template <typename RealType>
class uniform_real_distribution;
namespace random_internal {
template <typename T>
struct TagTypeCompare {};
template <typename T>
constexpr bool operator==(TagTypeCompare<T>, TagTypeCompare<T>) {
return true;
}
template <typename T>
constexpr bool operator!=(TagTypeCompare<T>, TagTypeCompare<T>) {
return false;
}
}
struct IntervalClosedClosedTag
: public random_internal::TagTypeCompare<IntervalClosedClosedTag> {};
struct IntervalClosedOpenTag
: public random_internal::TagTypeCompare<IntervalClosedOpenTag> {};
struct IntervalOpenClosedTag
: public random_internal::TagTypeCompare<IntervalOpenClosedTag> {};
struct IntervalOpenOpenTag
: public random_internal::TagTypeCompare<IntervalOpenOpenTag> {};
namespace random_internal {
template <typename A, typename B>
using uniform_inferred_return_t =
absl::enable_if_t<absl::disjunction<is_widening_convertible<A, B>,
is_widening_convertible<B, A>>::value,
typename std::conditional<
is_widening_convertible<A, B>::value, B, A>::type>;
template <typename IntType, typename Tag>
typename absl::enable_if_t<
absl::conjunction<
IsIntegral<IntType>,
absl::disjunction<std::is_same<Tag, IntervalOpenClosedTag>,
std::is_same<Tag, IntervalOpenOpenTag>>>::value,
IntType>
uniform_lower_bound(Tag, IntType a, IntType) {
return a < (std::numeric_limits<IntType>::max)() ? (a + 1) : a;
}
template <typename FloatType, typename Tag>
typename absl::enable_if_t<
absl::conjunction<
std::is_floating_point<FloatType>,
absl::disjunction<std::is_same<Tag, IntervalOpenClosedTag>,
std::is_same<Tag, IntervalOpenOpenTag>>>::value,
FloatType>
uniform_lower_bound(Tag, FloatType a, FloatType b) {
return std::nextafter(a, b);
}
template <typename NumType, typename Tag>
typename absl::enable_if_t<
absl::disjunction<std::is_same<Tag, IntervalClosedClosedTag>,
std::is_same<Tag, IntervalClosedOpenTag>>::value,
NumType>
uniform_lower_bound(Tag, NumType a, NumType) {
return a;
}
template <typename IntType, typename Tag>
typename absl::enable_if_t<
absl::conjunction<
IsIntegral<IntType>,
absl::disjunction<std::is_same<Tag, IntervalClosedOpenTag>,
std::is_same<Tag, IntervalOpenOpenTag>>>::value,
IntType>
uniform_upper_bound(Tag, IntType, IntType b) {
return b > (std::numeric_limits<IntType>::min)() ? (b - 1) : b;
}
template <typename FloatType, typename Tag>
typename absl::enable_if_t<
absl::conjunction<
std::is_floating_point<FloatType>,
absl::disjunction<std::is_same<Tag, IntervalClosedOpenTag>,
std::is_same<Tag, IntervalOpenOpenTag>>>::value,
FloatType>
uniform_upper_bound(Tag, FloatType, FloatType b) {
return b;
}
template <typename IntType, typename Tag>
typename absl::enable_if_t<
absl::conjunction<
IsIntegral<IntType>,
absl::disjunction<std::is_same<Tag, IntervalClosedClosedTag>,
std::is_same<Tag, IntervalOpenClosedTag>>>::value,
IntType>
uniform_upper_bound(Tag, IntType, IntType b) {
return b;
}
template <typename FloatType, typename Tag>
typename absl::enable_if_t<
absl::conjunction<
std::is_floating_point<FloatType>,
absl::disjunction<std::is_same<Tag, IntervalClosedClosedTag>,
std::is_same<Tag, IntervalOpenClosedTag>>>::value,
FloatType>
uniform_upper_bound(Tag, FloatType, FloatType b) {
return std::nextafter(b, (std::numeric_limits<FloatType>::max)());
}
template <typename FloatType>
absl::enable_if_t<std::is_floating_point<FloatType>::value, bool>
is_uniform_range_valid(FloatType a, FloatType b) {
return a <= b && std::isfinite(b - a);
}
template <typename IntType>
absl::enable_if_t<IsIntegral<IntType>::value, bool>
is_uniform_range_valid(IntType a, IntType b) {
return a <= b;
}
template <typename NumType>
using UniformDistribution =
typename std::conditional<IsIntegral<NumType>::value,
absl::uniform_int_distribution<NumType>,
absl::uniform_real_distribution<NumType>>::type;
template <typename NumType>
struct UniformDistributionWrapper : public UniformDistribution<NumType> {
template <typename TagType>
explicit UniformDistributionWrapper(TagType, NumType lo, NumType hi)
: UniformDistribution<NumType>(
uniform_lower_bound<NumType>(TagType{}, lo, hi),
uniform_upper_bound<NumType>(TagType{}, lo, hi)) {}
explicit UniformDistributionWrapper(NumType lo, NumType hi)
: UniformDistribution<NumType>(
uniform_lower_bound<NumType>(IntervalClosedOpenTag(), lo, hi),
uniform_upper_bound<NumType>(IntervalClosedOpenTag(), lo, hi)) {}
explicit UniformDistributionWrapper()
: UniformDistribution<NumType>(std::numeric_limits<NumType>::lowest(),
(std::numeric_limits<NumType>::max)()) {}
};
}
ABSL_NAMESPACE_END
}
#endif | #include "absl/random/internal/uniform_helper.h"
#include <cmath>
#include <cstdint>
#include <random>
#include "gtest/gtest.h"
namespace {
using absl::IntervalClosedClosedTag;
using absl::IntervalClosedOpenTag;
using absl::IntervalOpenClosedTag;
using absl::IntervalOpenOpenTag;
using absl::random_internal::uniform_inferred_return_t;
using absl::random_internal::uniform_lower_bound;
using absl::random_internal::uniform_upper_bound;
class UniformHelperTest : public testing::Test {};
TEST_F(UniformHelperTest, UniformBoundFunctionsGeneral) {
constexpr IntervalClosedClosedTag IntervalClosedClosed;
constexpr IntervalClosedOpenTag IntervalClosedOpen;
constexpr IntervalOpenClosedTag IntervalOpenClosed;
constexpr IntervalOpenOpenTag IntervalOpenOpen;
EXPECT_EQ(uniform_lower_bound(IntervalOpenClosed, 0, 100), 1);
EXPECT_EQ(uniform_lower_bound(IntervalOpenOpen, 0, 100), 1);
EXPECT_GT(uniform_lower_bound<float>(IntervalOpenClosed, 0, 1.0), 0);
EXPECT_GT(uniform_lower_bound<float>(IntervalOpenOpen, 0, 1.0), 0);
EXPECT_GT(uniform_lower_bound<double>(IntervalOpenClosed, 0, 1.0), 0);
EXPECT_GT(uniform_lower_bound<double>(IntervalOpenOpen, 0, 1.0), 0);
EXPECT_EQ(uniform_lower_bound(IntervalClosedClosed, 0, 100), 0);
EXPECT_EQ(uniform_lower_bound(IntervalClosedOpen, 0, 100), 0);
EXPECT_EQ(uniform_lower_bound<float>(IntervalClosedClosed, 0, 1.0), 0);
EXPECT_EQ(uniform_lower_bound<float>(IntervalClosedOpen, 0, 1.0), 0);
EXPECT_EQ(uniform_lower_bound<double>(IntervalClosedClosed, 0, 1.0), 0);
EXPECT_EQ(uniform_lower_bound<double>(IntervalClosedOpen, 0, 1.0), 0);
EXPECT_EQ(uniform_upper_bound(IntervalOpenOpen, 0, 100), 99);
EXPECT_EQ(uniform_upper_bound(IntervalClosedOpen, 0, 100), 99);
EXPECT_EQ(uniform_upper_bound<float>(IntervalOpenOpen, 0, 1.0), 1.0);
EXPECT_EQ(uniform_upper_bound<float>(IntervalClosedOpen, 0, 1.0), 1.0);
EXPECT_EQ(uniform_upper_bound<double>(IntervalOpenOpen, 0, 1.0), 1.0);
EXPECT_EQ(uniform_upper_bound<double>(IntervalClosedOpen, 0, 1.0), 1.0);
EXPECT_EQ(uniform_upper_bound(IntervalOpenClosed, 0, 100), 100);
EXPECT_EQ(uniform_upper_bound(IntervalClosedClosed, 0, 100), 100);
EXPECT_GT(uniform_upper_bound<float>(IntervalOpenClosed, 0, 1.0), 1.0);
EXPECT_GT(uniform_upper_bound<float>(IntervalClosedClosed, 0, 1.0), 1.0);
EXPECT_GT(uniform_upper_bound<double>(IntervalOpenClosed, 0, 1.0), 1.0);
EXPECT_GT(uniform_upper_bound<double>(IntervalClosedClosed, 0, 1.0), 1.0);
EXPECT_EQ(uniform_lower_bound(IntervalOpenClosed, -100, -1), -99);
EXPECT_EQ(uniform_lower_bound(IntervalOpenOpen, -100, -1), -99);
EXPECT_GT(uniform_lower_bound<float>(IntervalOpenClosed, -2.0, -1.0), -2.0);
EXPECT_GT(uniform_lower_bound<float>(IntervalOpenOpen, -2.0, -1.0), -2.0);
EXPECT_GT(uniform_lower_bound<double>(IntervalOpenClosed, -2.0, -1.0), -2.0);
EXPECT_GT(uniform_lower_bound<double>(IntervalOpenOpen, -2.0, -1.0), -2.0);
EXPECT_EQ(uniform_lower_bound(IntervalClosedClosed, -100, -1), -100);
EXPECT_EQ(uniform_lower_bound(IntervalClosedOpen, -100, -1), -100);
EXPECT_EQ(uniform_lower_bound<float>(IntervalClosedClosed, -2.0, -1.0), -2.0);
EXPECT_EQ(uniform_lower_bound<float>(IntervalClosedOpen, -2.0, -1.0), -2.0);
EXPECT_EQ(uniform_lower_bound<double>(IntervalClosedClosed, -2.0, -1.0),
-2.0);
EXPECT_EQ(uniform_lower_bound<double>(IntervalClosedOpen, -2.0, -1.0), -2.0);
EXPECT_EQ(uniform_upper_bound(IntervalOpenOpen, -100, -1), -2);
EXPECT_EQ(uniform_upper_bound(IntervalClosedOpen, -100, -1), -2);
EXPECT_EQ(uniform_upper_bound<float>(IntervalOpenOpen, -2.0, -1.0), -1.0);
EXPECT_EQ(uniform_upper_bound<float>(IntervalClosedOpen, -2.0, -1.0), -1.0);
EXPECT_EQ(uniform_upper_bound<double>(IntervalOpenOpen, -2.0, -1.0), -1.0);
EXPECT_EQ(uniform_upper_bound<double>(IntervalClosedOpen, -2.0, -1.0), -1.0);
EXPECT_EQ(uniform_upper_bound(IntervalOpenClosed, -100, -1), -1);
EXPECT_EQ(uniform_upper_bound(IntervalClosedClosed, -100, -1), -1);
EXPECT_GT(uniform_upper_bound<float>(IntervalOpenClosed, -2.0, -1.0), -1.0);
EXPECT_GT(uniform_upper_bound<float>(IntervalClosedClosed, -2.0, -1.0), -1.0);
EXPECT_GT(uniform_upper_bound<double>(IntervalOpenClosed, -2.0, -1.0), -1.0);
EXPECT_GT(uniform_upper_bound<double>(IntervalClosedClosed, -2.0, -1.0),
-1.0);
EXPECT_GT(uniform_lower_bound(IntervalOpenClosed, 1.0, 2.0), 1.0);
EXPECT_LT(uniform_lower_bound(IntervalOpenClosed, 1.0, +0.0), 1.0);
EXPECT_LT(uniform_lower_bound(IntervalOpenClosed, 1.0, -0.0), 1.0);
EXPECT_LT(uniform_lower_bound(IntervalOpenClosed, 1.0, -1.0), 1.0);
}
TEST_F(UniformHelperTest, UniformBoundFunctionsIntBounds) {
constexpr IntervalOpenOpenTag IntervalOpenOpen;
constexpr auto m = (std::numeric_limits<uint64_t>::max)();
EXPECT_EQ(1, uniform_lower_bound(IntervalOpenOpen, 0u, 0u));
EXPECT_EQ(m, uniform_lower_bound(IntervalOpenOpen, m, m));
EXPECT_EQ(m, uniform_lower_bound(IntervalOpenOpen, m - 1, m - 1));
EXPECT_EQ(0, uniform_upper_bound(IntervalOpenOpen, 0u, 0u));
EXPECT_EQ(m - 1, uniform_upper_bound(IntervalOpenOpen, m, m));
constexpr auto l = (std::numeric_limits<int64_t>::min)();
constexpr auto r = (std::numeric_limits<int64_t>::max)();
EXPECT_EQ(1, uniform_lower_bound(IntervalOpenOpen, 0, 0));
EXPECT_EQ(l + 1, uniform_lower_bound(IntervalOpenOpen, l, l));
EXPECT_EQ(r, uniform_lower_bound(IntervalOpenOpen, r - 1, r - 1));
EXPECT_EQ(r, uniform_lower_bound(IntervalOpenOpen, r, r));
EXPECT_EQ(-1, uniform_upper_bound(IntervalOpenOpen, 0, 0));
EXPECT_EQ(l, uniform_upper_bound(IntervalOpenOpen, l, l));
EXPECT_EQ(r - 1, uniform_upper_bound(IntervalOpenOpen, r, r));
}
TEST_F(UniformHelperTest, UniformBoundFunctionsRealBounds) {
constexpr IntervalOpenClosedTag IntervalOpenClosed;
EXPECT_EQ(1.0, uniform_lower_bound(IntervalOpenClosed, 1.0, 1.0));
EXPECT_EQ(1.0f, uniform_lower_bound(IntervalOpenClosed, 1.0f, 1.0f));
constexpr auto r = (std::numeric_limits<double>::max)();
const auto re = std::nexttoward(r, 0.0);
constexpr auto l = -r;
const auto le = std::nexttoward(l, 0.0);
EXPECT_EQ(l, uniform_lower_bound(IntervalOpenClosed, l, l));
EXPECT_EQ(r, uniform_lower_bound(IntervalOpenClosed, r, r));
EXPECT_EQ(le, uniform_lower_bound(IntervalOpenClosed, l, r));
EXPECT_EQ(le, uniform_lower_bound(IntervalOpenClosed, l, 0.0));
EXPECT_EQ(le, uniform_lower_bound(IntervalOpenClosed, l, le));
EXPECT_EQ(r, uniform_lower_bound(IntervalOpenClosed, re, r));
EXPECT_EQ(le, uniform_upper_bound(IntervalOpenClosed, l, l));
EXPECT_EQ(r, uniform_upper_bound(IntervalOpenClosed, r, r));
EXPECT_EQ(r, uniform_upper_bound(IntervalOpenClosed, l, r));
EXPECT_EQ(r, uniform_upper_bound(IntervalOpenClosed, l, re));
EXPECT_EQ(r, uniform_upper_bound(IntervalOpenClosed, 0.0, r));
EXPECT_EQ(r, uniform_upper_bound(IntervalOpenClosed, re, r));
EXPECT_EQ(r, uniform_upper_bound(IntervalOpenClosed, le, re));
const double e = std::nextafter(1.0, 2.0);
const double f = std::nextafter(1.0, 0.0);
EXPECT_EQ(e, uniform_lower_bound(IntervalOpenClosed, 1.0, e));
EXPECT_EQ(std::nextafter(e, 2.0),
uniform_upper_bound(IntervalOpenClosed, 1.0, e));
EXPECT_EQ(1.0, uniform_lower_bound(IntervalOpenClosed, f, 1.0));
EXPECT_EQ(e, uniform_upper_bound(IntervalOpenClosed, f, 1.0));
const double g = std::numeric_limits<double>::denorm_min();
const double h = std::nextafter(g, 1.0);
EXPECT_EQ(g, uniform_lower_bound(IntervalOpenClosed, 0.0, g));
EXPECT_EQ(h, uniform_upper_bound(IntervalOpenClosed, 0.0, g));
EXPECT_EQ(h, uniform_lower_bound(IntervalOpenClosed, g, 1.0));
EXPECT_EQ(e, uniform_upper_bound(IntervalOpenClosed, g, 1.0));
EXPECT_EQ(f, uniform_lower_bound(IntervalOpenClosed, 1.0, -1.0));
}
struct Invalid {};
template <typename A, typename B>
auto InferredUniformReturnT(int) -> uniform_inferred_return_t<A, B>;
template <typename, typename>
Invalid InferredUniformReturnT(...);
template <typename A, typename B, typename Expect>
void CheckArgsInferType() {
static_assert(
absl::conjunction<
std::is_same<Expect, decltype(InferredUniformReturnT<A, B>(0))>,
std::is_same<Expect,
decltype(InferredUniformReturnT<B, A>(0))>>::value,
"");
}
TEST_F(UniformHelperTest, UniformTypeInference) {
CheckArgsInferType<uint16_t, uint16_t, uint16_t>();
CheckArgsInferType<uint32_t, uint32_t, uint32_t>();
CheckArgsInferType<uint64_t, uint64_t, uint64_t>();
CheckArgsInferType<int16_t, int16_t, int16_t>();
CheckArgsInferType<int32_t, int32_t, int32_t>();
CheckArgsInferType<int64_t, int64_t, int64_t>();
CheckArgsInferType<float, float, float>();
CheckArgsInferType<double, double, double>();
CheckArgsInferType<uint16_t, uint32_t, uint32_t>();
CheckArgsInferType<uint16_t, uint64_t, uint64_t>();
CheckArgsInferType<uint16_t, int32_t, int32_t>();
CheckArgsInferType<uint16_t, int64_t, int64_t>();
CheckArgsInferType<uint16_t, float, float>();
CheckArgsInferType<uint16_t, double, double>();
CheckArgsInferType<int16_t, int32_t, int32_t>();
CheckArgsInferType<int16_t, int64_t, int64_t>();
CheckArgsInferType<int16_t, float, float>();
CheckArgsInferType<int16_t, double, double>();
CheckArgsInferType<uint16_t, int16_t, Invalid>();
CheckArgsInferType<int16_t, uint32_t, Invalid>();
CheckArgsInferType<int16_t, uint64_t, Invalid>();
CheckArgsInferType<uint32_t, uint64_t, uint64_t>();
CheckArgsInferType<uint32_t, int64_t, int64_t>();
CheckArgsInferType<uint32_t, double, double>();
CheckArgsInferType<int32_t, int64_t, int64_t>();
CheckArgsInferType<int32_t, double, double>();
CheckArgsInferType<uint32_t, int32_t, Invalid>();
CheckArgsInferType<int32_t, uint64_t, Invalid>();
CheckArgsInferType<int32_t, float, Invalid>();
CheckArgsInferType<uint32_t, float, Invalid>();
CheckArgsInferType<uint64_t, int64_t, Invalid>();
CheckArgsInferType<int64_t, float, Invalid>();
CheckArgsInferType<int64_t, double, Invalid>();
CheckArgsInferType<float, double, double>();
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/random/internal/uniform_helper.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/random/internal/uniform_helper_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
d9e7938a-62ab-44dd-9d1c-26c951320ef7 | cpp | abseil/abseil-cpp | crc32c | absl/crc/crc32c.cc | absl/crc/crc32c_test.cc | #include "absl/crc/crc32c.h"
#include <cstdint>
#include "absl/crc/internal/crc.h"
#include "absl/crc/internal/crc32c.h"
#include "absl/crc/internal/crc_memcpy.h"
#include "absl/strings/string_view.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace {
const crc_internal::CRC* CrcEngine() {
static const crc_internal::CRC* engine = crc_internal::CRC::Crc32c();
return engine;
}
constexpr uint32_t kCRC32Xor = 0xffffffffU;
}
namespace crc_internal {
crc32c_t UnextendCrc32cByZeroes(crc32c_t initial_crc, size_t length) {
uint32_t crc = static_cast<uint32_t>(initial_crc) ^ kCRC32Xor;
CrcEngine()->UnextendByZeroes(&crc, length);
return static_cast<crc32c_t>(crc ^ kCRC32Xor);
}
crc32c_t ExtendCrc32cInternal(crc32c_t initial_crc,
absl::string_view buf_to_add) {
uint32_t crc = static_cast<uint32_t>(initial_crc) ^ kCRC32Xor;
CrcEngine()->Extend(&crc, buf_to_add.data(), buf_to_add.size());
return static_cast<crc32c_t>(crc ^ kCRC32Xor);
}
}
crc32c_t ComputeCrc32c(absl::string_view buf) {
return ExtendCrc32c(crc32c_t{0}, buf);
}
crc32c_t ExtendCrc32cByZeroes(crc32c_t initial_crc, size_t length) {
uint32_t crc = static_cast<uint32_t>(initial_crc) ^ kCRC32Xor;
CrcEngine()->ExtendByZeroes(&crc, length);
return static_cast<crc32c_t>(crc ^ kCRC32Xor);
}
crc32c_t ConcatCrc32c(crc32c_t lhs_crc, crc32c_t rhs_crc, size_t rhs_len) {
uint32_t result = static_cast<uint32_t>(lhs_crc);
CrcEngine()->ExtendByZeroes(&result, rhs_len);
return crc32c_t{result ^ static_cast<uint32_t>(rhs_crc)};
}
crc32c_t RemoveCrc32cPrefix(crc32c_t crc_a, crc32c_t crc_ab, size_t length_b) {
return ConcatCrc32c(crc_a, crc_ab, length_b);
}
crc32c_t MemcpyCrc32c(void* dest, const void* src, size_t count,
crc32c_t initial_crc) {
return static_cast<crc32c_t>(
crc_internal::Crc32CAndCopy(dest, src, count, initial_crc, false));
}
crc32c_t RemoveCrc32cSuffix(crc32c_t full_string_crc, crc32c_t suffix_crc,
size_t suffix_len) {
uint32_t result = static_cast<uint32_t>(full_string_crc) ^
static_cast<uint32_t>(suffix_crc);
CrcEngine()->UnextendByZeroes(&result, suffix_len);
return crc32c_t{result};
}
ABSL_NAMESPACE_END
} | #include "absl/crc/crc32c.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <sstream>
#include <string>
#include "gtest/gtest.h"
#include "absl/crc/internal/crc32c.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
namespace {
TEST(CRC32C, RFC3720) {
char data[32];
memset(data, 0, sizeof(data));
EXPECT_EQ(absl::ComputeCrc32c(absl::string_view(data, sizeof(data))),
absl::crc32c_t{0x8a9136aa});
memset(data, 0xff, sizeof(data));
EXPECT_EQ(absl::ComputeCrc32c(absl::string_view(data, sizeof(data))),
absl::crc32c_t{0x62a8ab43});
for (int i = 0; i < 32; ++i) data[i] = static_cast<char>(i);
EXPECT_EQ(absl::ComputeCrc32c(absl::string_view(data, sizeof(data))),
absl::crc32c_t{0x46dd794e});
for (int i = 0; i < 32; ++i) data[i] = static_cast<char>(31 - i);
EXPECT_EQ(absl::ComputeCrc32c(absl::string_view(data, sizeof(data))),
absl::crc32c_t{0x113fdb5c});
constexpr uint8_t cmd[48] = {
0x01, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x18, 0x28, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};
EXPECT_EQ(absl::ComputeCrc32c(absl::string_view(
reinterpret_cast<const char*>(cmd), sizeof(cmd))),
absl::crc32c_t{0xd9963a56});
}
std::string TestString(size_t len) {
std::string result;
result.reserve(len);
for (size_t i = 0; i < len; ++i) {
result.push_back(static_cast<char>(i % 256));
}
return result;
}
TEST(CRC32C, Compute) {
EXPECT_EQ(absl::ComputeCrc32c(""), absl::crc32c_t{0});
EXPECT_EQ(absl::ComputeCrc32c("hello world"), absl::crc32c_t{0xc99465aa});
}
TEST(CRC32C, Extend) {
uint32_t base = 0xC99465AA;
std::string extension = "Extension String";
EXPECT_EQ(
absl::ExtendCrc32c(absl::crc32c_t{base}, extension),
absl::crc32c_t{0xD2F65090});
}
TEST(CRC32C, ExtendByZeroes) {
std::string base = "hello world";
absl::crc32c_t base_crc = absl::crc32c_t{0xc99465aa};
constexpr size_t kExtendByValues[] = {100, 10000, 100000};
for (const size_t extend_by : kExtendByValues) {
SCOPED_TRACE(extend_by);
absl::crc32c_t crc2 = absl::ExtendCrc32cByZeroes(base_crc, extend_by);
EXPECT_EQ(crc2, absl::ComputeCrc32c(base + std::string(extend_by, '\0')));
}
}
TEST(CRC32C, UnextendByZeroes) {
constexpr size_t kExtendByValues[] = {2, 200, 20000, 200000, 20000000};
constexpr size_t kUnextendByValues[] = {0, 100, 10000, 100000, 10000000};
for (auto seed_crc : {absl::crc32c_t{0}, absl::crc32c_t{0xc99465aa}}) {
SCOPED_TRACE(seed_crc);
for (const size_t size_1 : kExtendByValues) {
for (const size_t size_2 : kUnextendByValues) {
size_t extend_size = std::max(size_1, size_2);
size_t unextend_size = std::min(size_1, size_2);
SCOPED_TRACE(extend_size);
SCOPED_TRACE(unextend_size);
absl::crc32c_t crc1 = seed_crc;
crc1 = absl::ExtendCrc32cByZeroes(crc1, extend_size);
crc1 = absl::crc_internal::UnextendCrc32cByZeroes(crc1, unextend_size);
absl::crc32c_t crc2 = seed_crc;
crc2 = absl::ExtendCrc32cByZeroes(crc2, extend_size - unextend_size);
EXPECT_EQ(crc1, crc2);
}
}
}
constexpr size_t kSizes[] = {0, 1, 100, 10000};
for (const size_t size : kSizes) {
SCOPED_TRACE(size);
std::string string_before = TestString(size);
std::string string_after = string_before + std::string(size, '\0');
absl::crc32c_t crc_before = absl::ComputeCrc32c(string_before);
absl::crc32c_t crc_after = absl::ComputeCrc32c(string_after);
EXPECT_EQ(crc_before,
absl::crc_internal::UnextendCrc32cByZeroes(crc_after, size));
}
}
TEST(CRC32C, Concat) {
std::string hello = "Hello, ";
std::string world = "world!";
std::string hello_world = absl::StrCat(hello, world);
absl::crc32c_t crc_a = absl::ComputeCrc32c(hello);
absl::crc32c_t crc_b = absl::ComputeCrc32c(world);
absl::crc32c_t crc_ab = absl::ComputeCrc32c(hello_world);
EXPECT_EQ(absl::ConcatCrc32c(crc_a, crc_b, world.size()), crc_ab);
}
TEST(CRC32C, Memcpy) {
constexpr size_t kBytesSize[] = {0, 1, 20, 500, 100000};
for (size_t bytes : kBytesSize) {
SCOPED_TRACE(bytes);
std::string sample_string = TestString(bytes);
std::string target_buffer = std::string(bytes, '\0');
absl::crc32c_t memcpy_crc =
absl::MemcpyCrc32c(&(target_buffer[0]), sample_string.data(), bytes);
absl::crc32c_t compute_crc = absl::ComputeCrc32c(sample_string);
EXPECT_EQ(memcpy_crc, compute_crc);
EXPECT_EQ(sample_string, target_buffer);
}
}
TEST(CRC32C, RemovePrefix) {
std::string hello = "Hello, ";
std::string world = "world!";
std::string hello_world = absl::StrCat(hello, world);
absl::crc32c_t crc_a = absl::ComputeCrc32c(hello);
absl::crc32c_t crc_b = absl::ComputeCrc32c(world);
absl::crc32c_t crc_ab = absl::ComputeCrc32c(hello_world);
EXPECT_EQ(absl::RemoveCrc32cPrefix(crc_a, crc_ab, world.size()), crc_b);
}
TEST(CRC32C, RemoveSuffix) {
std::string hello = "Hello, ";
std::string world = "world!";
std::string hello_world = absl::StrCat(hello, world);
absl::crc32c_t crc_a = absl::ComputeCrc32c(hello);
absl::crc32c_t crc_b = absl::ComputeCrc32c(world);
absl::crc32c_t crc_ab = absl::ComputeCrc32c(hello_world);
EXPECT_EQ(absl::RemoveCrc32cSuffix(crc_ab, crc_b, world.size()), crc_a);
}
TEST(CRC32C, InsertionOperator) {
{
std::ostringstream buf;
buf << absl::crc32c_t{0xc99465aa};
EXPECT_EQ(buf.str(), "c99465aa");
}
{
std::ostringstream buf;
buf << absl::crc32c_t{0};
EXPECT_EQ(buf.str(), "00000000");
}
{
std::ostringstream buf;
buf << absl::crc32c_t{17};
EXPECT_EQ(buf.str(), "00000011");
}
}
TEST(CRC32C, AbslStringify) {
EXPECT_EQ(absl::StrFormat("%v", absl::crc32c_t{0xc99465aa}), "c99465aa");
EXPECT_EQ(absl::StrFormat("%v", absl::crc32c_t{0}), "00000000");
EXPECT_EQ(absl::StrFormat("%v", absl::crc32c_t{17}), "00000011");
EXPECT_EQ(absl::StrCat(absl::crc32c_t{0xc99465aa}), "c99465aa");
EXPECT_EQ(absl::StrCat(absl::crc32c_t{0}), "00000000");
EXPECT_EQ(absl::StrCat(absl::crc32c_t{17}), "00000011");
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/crc/crc32c.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/crc/crc32c_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
ffc07e61-07c7-4b48-aa1e-c9a582db6910 | cpp | google/cel-cpp | standard_library | checker/standard_library.cc | checker/standard_library_test.cc | #include "checker/standard_library.h"
#include <string>
#include <utility>
#include "absl/base/no_destructor.h"
#include "absl/status/status.h"
#include "base/builtins.h"
#include "checker/internal/builtins_arena.h"
#include "checker/type_checker_builder.h"
#include "common/constant.h"
#include "common/decl.h"
#include "common/type.h"
#include "internal/status_macros.h"
namespace cel {
namespace {
using ::cel::checker_internal::BuiltinsArena;
TypeParamType TypeParamA() { return TypeParamType("A"); }
TypeParamType TypeParamB() { return TypeParamType("B"); }
Type ListOfA() {
static absl::NoDestructor<Type> kInstance(
ListType(BuiltinsArena(), TypeParamA()));
return *kInstance;
}
Type MapOfAB() {
static absl::NoDestructor<Type> kInstance(
MapType(BuiltinsArena(), TypeParamA(), TypeParamB()));
return *kInstance;
}
Type TypeOfA() {
static absl::NoDestructor<Type> kInstance(
TypeType(BuiltinsArena(), TypeParamA()));
return *kInstance;
}
Type TypeNullType() {
static absl::NoDestructor<Type> kInstance(
TypeType(BuiltinsArena(), NullType()));
return *kInstance;
}
Type TypeBoolType() {
static absl::NoDestructor<Type> kInstance(
TypeType(BuiltinsArena(), BoolType()));
return *kInstance;
}
Type TypeIntType() {
static absl::NoDestructor<Type> kInstance(
TypeType(BuiltinsArena(), IntType()));
return *kInstance;
}
Type TypeUintType() {
static absl::NoDestructor<Type> kInstance(
TypeType(BuiltinsArena(), UintType()));
return *kInstance;
}
Type TypeDoubleType() {
static absl::NoDestructor<Type> kInstance(
TypeType(BuiltinsArena(), DoubleType()));
return *kInstance;
}
Type TypeStringType() {
static absl::NoDestructor<Type> kInstance(
TypeType(BuiltinsArena(), StringType()));
return *kInstance;
}
Type TypeBytesType() {
static absl::NoDestructor<Type> kInstance(
TypeType(BuiltinsArena(), BytesType()));
return *kInstance;
}
Type TypeDurationType() {
static absl::NoDestructor<Type> kInstance(
TypeType(BuiltinsArena(), DurationType()));
return *kInstance;
}
Type TypeTimestampType() {
static absl::NoDestructor<Type> kInstance(
TypeType(BuiltinsArena(), TimestampType()));
return *kInstance;
}
Type TypeListType() {
static absl::NoDestructor<Type> kInstance(
TypeType(BuiltinsArena(), ListOfA()));
return *kInstance;
}
Type TypeMapType() {
static absl::NoDestructor<Type> kInstance(
TypeType(BuiltinsArena(), MapOfAB()));
return *kInstance;
}
class StandardOverloads {
public:
static constexpr char kAddInt[] = "add_int64";
static constexpr char kAddUint[] = "add_uint64";
static constexpr char kAddDouble[] = "add_double";
static constexpr char kAddDurationDuration[] = "add_duration_duration";
static constexpr char kAddDurationTimestamp[] = "add_duration_timestamp";
static constexpr char kAddTimestampDuration[] = "add_timestamp_duration";
static constexpr char kAddString[] = "add_string";
static constexpr char kAddBytes[] = "add_bytes";
static constexpr char kAddList[] = "add_list";
static constexpr char kSubtractInt[] = "subtract_int64";
static constexpr char kSubtractUint[] = "subtract_uint64";
static constexpr char kSubtractDouble[] = "subtract_double";
static constexpr char kSubtractDurationDuration[] =
"subtract_duration_duration";
static constexpr char kSubtractTimestampDuration[] =
"subtract_timestamp_duration";
static constexpr char kSubtractTimestampTimestamp[] =
"subtract_timestamp_timestamp";
static constexpr char kMultiplyInt[] = "multiply_int64";
static constexpr char kMultiplyUint[] = "multiply_uint64";
static constexpr char kMultiplyDouble[] = "multiply_double";
static constexpr char kDivideInt[] = "divide_int64";
static constexpr char kDivideUint[] = "divide_uint64";
static constexpr char kDivideDouble[] = "divide_double";
static constexpr char kModuloInt[] = "modulo_int64";
static constexpr char kModuloUint[] = "modulo_uint64";
static constexpr char kNegateInt[] = "negate_int64";
static constexpr char kNegateDouble[] = "negate_double";
static constexpr char kNot[] = "logical_not";
static constexpr char kAnd[] = "logical_and";
static constexpr char kOr[] = "logical_or";
static constexpr char kConditional[] = "conditional";
static constexpr char kNotStrictlyFalse[] = "not_strictly_false";
static constexpr char kNotStrictlyFalseDeprecated[] =
"__not_strictly_false__";
static constexpr char kEquals[] = "equals";
static constexpr char kNotEquals[] = "not_equals";
static constexpr char kLessBool[] = "less_bool";
static constexpr char kLessString[] = "less_string";
static constexpr char kLessBytes[] = "less_bytes";
static constexpr char kLessDuration[] = "less_duration";
static constexpr char kLessTimestamp[] = "less_timestamp";
static constexpr char kLessInt[] = "less_int64";
static constexpr char kLessIntUint[] = "less_int64_uint64";
static constexpr char kLessIntDouble[] = "less_int64_double";
static constexpr char kLessDouble[] = "less_double";
static constexpr char kLessDoubleInt[] = "less_double_int64";
static constexpr char kLessDoubleUint[] = "less_double_uint64";
static constexpr char kLessUint[] = "less_uint64";
static constexpr char kLessUintInt[] = "less_uint64_int64";
static constexpr char kLessUintDouble[] = "less_uint64_double";
static constexpr char kGreaterBool[] = "greater_bool";
static constexpr char kGreaterString[] = "greater_string";
static constexpr char kGreaterBytes[] = "greater_bytes";
static constexpr char kGreaterDuration[] = "greater_duration";
static constexpr char kGreaterTimestamp[] = "greater_timestamp";
static constexpr char kGreaterInt[] = "greater_int64";
static constexpr char kGreaterIntUint[] = "greater_int64_uint64";
static constexpr char kGreaterIntDouble[] = "greater_int64_double";
static constexpr char kGreaterDouble[] = "greater_double";
static constexpr char kGreaterDoubleInt[] = "greater_double_int64";
static constexpr char kGreaterDoubleUint[] = "greater_double_uint64";
static constexpr char kGreaterUint[] = "greater_uint64";
static constexpr char kGreaterUintInt[] = "greater_uint64_int64";
static constexpr char kGreaterUintDouble[] = "greater_uint64_double";
static constexpr char kGreaterEqualsBool[] = "greater_equals_bool";
static constexpr char kGreaterEqualsString[] = "greater_equals_string";
static constexpr char kGreaterEqualsBytes[] = "greater_equals_bytes";
static constexpr char kGreaterEqualsDuration[] = "greater_equals_duration";
static constexpr char kGreaterEqualsTimestamp[] = "greater_equals_timestamp";
static constexpr char kGreaterEqualsInt[] = "greater_equals_int64";
static constexpr char kGreaterEqualsIntUint[] = "greater_equals_int64_uint64";
static constexpr char kGreaterEqualsIntDouble[] =
"greater_equals_int64_double";
static constexpr char kGreaterEqualsDouble[] = "greater_equals_double";
static constexpr char kGreaterEqualsDoubleInt[] =
"greater_equals_double_int64";
static constexpr char kGreaterEqualsDoubleUint[] =
"greater_equals_double_uint64";
static constexpr char kGreaterEqualsUint[] = "greater_equals_uint64";
static constexpr char kGreaterEqualsUintInt[] = "greater_equals_uint64_int64";
static constexpr char kGreaterEqualsUintDouble[] =
"greater_equals_uint_double";
static constexpr char kLessEqualsBool[] = "less_equals_bool";
static constexpr char kLessEqualsString[] = "less_equals_string";
static constexpr char kLessEqualsBytes[] = "less_equals_bytes";
static constexpr char kLessEqualsDuration[] = "less_equals_duration";
static constexpr char kLessEqualsTimestamp[] = "less_equals_timestamp";
static constexpr char kLessEqualsInt[] = "less_equals_int64";
static constexpr char kLessEqualsIntUint[] = "less_equals_int64_uint64";
static constexpr char kLessEqualsIntDouble[] = "less_equals_int64_double";
static constexpr char kLessEqualsDouble[] = "less_equals_double";
static constexpr char kLessEqualsDoubleInt[] = "less_equals_double_int64";
static constexpr char kLessEqualsDoubleUint[] = "less_equals_double_uint64";
static constexpr char kLessEqualsUint[] = "less_equals_uint64";
static constexpr char kLessEqualsUintInt[] = "less_equals_uint64_int64";
static constexpr char kLessEqualsUintDouble[] = "less_equals_uint64_double";
static constexpr char kIndexList[] = "index_list";
static constexpr char kIndexMap[] = "index_map";
static constexpr char kInList[] = "in_list";
static constexpr char kInMap[] = "in_map";
static constexpr char kSizeBytes[] = "size_bytes";
static constexpr char kSizeList[] = "size_list";
static constexpr char kSizeMap[] = "size_map";
static constexpr char kSizeString[] = "size_string";
static constexpr char kSizeBytesMember[] = "bytes_size";
static constexpr char kSizeListMember[] = "list_size";
static constexpr char kSizeMapMember[] = "map_size";
static constexpr char kSizeStringMember[] = "string_size";
static constexpr char kContainsString[] = "contains_string";
static constexpr char kEndsWithString[] = "ends_with_string";
static constexpr char kStartsWithString[] = "starts_with_string";
static constexpr char kMatches[] = "matches";
static constexpr char kMatchesMember[] = "matches_string";
static constexpr char kTimestampToYear[] = "timestamp_to_year";
static constexpr char kTimestampToYearWithTz[] = "timestamp_to_year_with_tz";
static constexpr char kTimestampToMonth[] = "timestamp_to_month";
static constexpr char kTimestampToMonthWithTz[] =
"timestamp_to_month_with_tz";
static constexpr char kTimestampToDayOfYear[] = "timestamp_to_day_of_year";
static constexpr char kTimestampToDayOfYearWithTz[] =
"timestamp_to_day_of_year_with_tz";
static constexpr char kTimestampToDayOfMonth[] = "timestamp_to_day_of_month";
static constexpr char kTimestampToDayOfMonthWithTz[] =
"timestamp_to_day_of_month_with_tz";
static constexpr char kTimestampToDayOfWeek[] = "timestamp_to_day_of_week";
static constexpr char kTimestampToDayOfWeekWithTz[] =
"timestamp_to_day_of_week_with_tz";
static constexpr char kTimestampToDate[] =
"timestamp_to_day_of_month_1_based";
static constexpr char kTimestampToDateWithTz[] =
"timestamp_to_day_of_month_1_based_with_tz";
static constexpr char kTimestampToHours[] = "timestamp_to_hours";
static constexpr char kTimestampToHoursWithTz[] =
"timestamp_to_hours_with_tz";
static constexpr char kDurationToHours[] = "duration_to_hours";
static constexpr char kTimestampToMinutes[] = "timestamp_to_minutes";
static constexpr char kTimestampToMinutesWithTz[] =
"timestamp_to_minutes_with_tz";
static constexpr char kDurationToMinutes[] = "duration_to_minutes";
static constexpr char kTimestampToSeconds[] = "timestamp_to_seconds";
static constexpr char kTimestampToSecondsWithTz[] = "timestamp_to_seconds_tz";
static constexpr char kDurationToSeconds[] = "duration_to_seconds";
static constexpr char kTimestampToMilliseconds[] =
"timestamp_to_milliseconds";
static constexpr char kTimestampToMillisecondsWithTz[] =
"timestamp_to_milliseconds_with_tz";
static constexpr char kDurationToMilliseconds[] = "duration_to_milliseconds";
static constexpr char kToDyn[] = "to_dyn";
static constexpr char kUintToUint[] = "uint64_to_uint64";
static constexpr char kDoubleToUint[] = "double_to_uint64";
static constexpr char kIntToUint[] = "int64_to_uint64";
static constexpr char kStringToUint[] = "string_to_uint64";
static constexpr char kUintToInt[] = "uint64_to_int64";
static constexpr char kDoubleToInt[] = "double_to_int64";
static constexpr char kIntToInt[] = "int64_to_int64";
static constexpr char kStringToInt[] = "string_to_int64";
static constexpr char kTimestampToInt[] = "timestamp_to_int64";
static constexpr char kDurationToInt[] = "duration_to_int64";
static constexpr char kDoubleToDouble[] = "double_to_double";
static constexpr char kUintToDouble[] = "uint64_to_double";
static constexpr char kIntToDouble[] = "int64_to_double";
static constexpr char kStringToDouble[] = "string_to_double";
static constexpr char kBoolToBool[] = "bool_to_bool";
static constexpr char kStringToBool[] = "string_to_bool";
static constexpr char kBytesToBytes[] = "bytes_to_bytes";
static constexpr char kStringToBytes[] = "string_to_bytes";
static constexpr char kStringToString[] = "string_to_string";
static constexpr char kBytesToString[] = "bytes_to_string";
static constexpr char kBoolToString[] = "bool_to_string";
static constexpr char kDoubleToString[] = "double_to_string";
static constexpr char kIntToString[] = "int64_to_string";
static constexpr char kUintToString[] = "uint64_to_string";
static constexpr char kDurationToString[] = "duration_to_string";
static constexpr char kTimestampToString[] = "timestamp_to_string";
static constexpr char kTimestampToTimestamp[] = "timestamp_to_timestamp";
static constexpr char kIntToTimestamp[] = "int64_to_timestamp";
static constexpr char kStringToTimestamp[] = "string_to_timestamp";
static constexpr char kDurationToDuration[] = "duration_to_duration";
static constexpr char kIntToDuration[] = "int64_to_duration";
static constexpr char kStringToDuration[] = "string_to_duration";
static constexpr char kToType[] = "type";
};
absl::Status AddArithmeticOps(TypeCheckerBuilder& builder) {
FunctionDecl add_op;
add_op.set_name(builtin::kAdd);
CEL_RETURN_IF_ERROR(add_op.AddOverload(MakeOverloadDecl(
StandardOverloads::kAddInt, IntType(), IntType(), IntType())));
CEL_RETURN_IF_ERROR(add_op.AddOverload(
MakeOverloadDecl(StandardOverloads::kAddDouble, DoubleType(),
DoubleType(), DoubleType())));
CEL_RETURN_IF_ERROR(add_op.AddOverload(MakeOverloadDecl(
StandardOverloads::kAddUint, UintType(), UintType(), UintType())));
CEL_RETURN_IF_ERROR(add_op.AddOverload(
MakeOverloadDecl(StandardOverloads::kAddDurationDuration, DurationType(),
DurationType(), DurationType())));
CEL_RETURN_IF_ERROR(add_op.AddOverload(
MakeOverloadDecl(StandardOverloads::kAddDurationTimestamp,
TimestampType(), DurationType(), TimestampType())));
CEL_RETURN_IF_ERROR(add_op.AddOverload(
MakeOverloadDecl(StandardOverloads::kAddTimestampDuration,
TimestampType(), TimestampType(), DurationType())));
CEL_RETURN_IF_ERROR(add_op.AddOverload(MakeOverloadDecl(
StandardOverloads::kAddBytes, BytesType(), BytesType(), BytesType())));
CEL_RETURN_IF_ERROR(add_op.AddOverload(
MakeOverloadDecl(StandardOverloads::kAddString, StringType(),
StringType(), StringType())));
CEL_RETURN_IF_ERROR(add_op.AddOverload(MakeOverloadDecl(
StandardOverloads::kAddList, ListOfA(), ListOfA(), ListOfA())));
CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(add_op)));
FunctionDecl subtract_op;
subtract_op.set_name(builtin::kSubtract);
CEL_RETURN_IF_ERROR(subtract_op.AddOverload(MakeOverloadDecl(
StandardOverloads::kSubtractInt, IntType(), IntType(), IntType())));
CEL_RETURN_IF_ERROR(subtract_op.AddOverload(MakeOverloadDecl(
StandardOverloads::kSubtractUint, UintType(), UintType(), UintType())));
CEL_RETURN_IF_ERROR(subtract_op.AddOverload(
MakeOverloadDecl(StandardOverloads::kSubtractDouble, DoubleType(),
DoubleType(), DoubleType())));
CEL_RETURN_IF_ERROR(subtract_op.AddOverload(
MakeOverloadDecl(StandardOverloads::kSubtractDurationDuration,
DurationType(), DurationType(), DurationType())));
CEL_RETURN_IF_ERROR(subtract_op.AddOverload(
MakeOverloadDecl(StandardOverloads::kSubtractTimestampDuration,
TimestampType(), TimestampType(), DurationType())));
CEL_RETURN_IF_ERROR(subtract_op.AddOverload(
MakeOverloadDecl(StandardOverloads::kSubtractTimestampTimestamp,
DurationType(), TimestampType(), TimestampType())));
CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(subtract_op)));
FunctionDecl multiply_op;
multiply_op.set_name(builtin::kMultiply);
CEL_RETURN_IF_ERROR(multiply_op.AddOverload(MakeOverloadDecl(
StandardOverloads::kMultiplyInt, IntType(), IntType(), IntType())));
CEL_RETURN_IF_ERROR(multiply_op.AddOverload(MakeOverloadDecl(
StandardOverloads::kMultiplyUint, UintType(), UintType(), UintType())));
CEL_RETURN_IF_ERROR(multiply_op.AddOverload(
MakeOverloadDecl(StandardOverloads::kMultiplyDouble, DoubleType(),
DoubleType(), DoubleType())));
CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(multiply_op)));
FunctionDecl division_op;
division_op.set_name(builtin::kDivide);
CEL_RETURN_IF_ERROR(division_op.AddOverload(MakeOverloadDecl(
StandardOverloads::kDivideInt, IntType(), IntType(), IntType())));
CEL_RETURN_IF_ERROR(division_op.AddOverload(MakeOverloadDecl(
StandardOverloads::kDivideUint, UintType(), UintType(), UintType())));
CEL_RETURN_IF_ERROR(division_op.AddOverload(
MakeOverloadDecl(StandardOverloads::kDivideDouble, DoubleType(),
DoubleType(), DoubleType())));
CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(division_op)));
FunctionDecl modulo_op;
modulo_op.set_name(builtin::kModulo);
CEL_RETURN_IF_ERROR(modulo_op.AddOverload(MakeOverloadDecl(
StandardOverloads::kModuloInt, IntType(), IntType(), IntType())));
CEL_RETURN_IF_ERROR(modulo_op.AddOverload(MakeOverloadDecl(
StandardOverloads::kModuloUint, UintType(), UintType(), UintType())));
CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(modulo_op)));
FunctionDecl negate_op;
negate_op.set_name(builtin::kNeg);
CEL_RETURN_IF_ERROR(negate_op.AddOverload(
MakeOverloadDecl(StandardOverloads::kNegateInt, IntType(), IntType())));
CEL_RETURN_IF_ERROR(negate_op.AddOverload(MakeOverloadDecl(
StandardOverloads::kNegateDouble, DoubleType(), DoubleType())));
CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(negate_op)));
return absl::OkStatus();
}
absl::Status AddLogicalOps(TypeCheckerBuilder& builder) {
FunctionDecl not_op;
not_op.set_name(builtin::kNot);
CEL_RETURN_IF_ERROR(not_op.AddOverload(
MakeOverloadDecl(StandardOverloads::kNot, BoolType(), BoolType())));
CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(not_op)));
FunctionDecl and_op;
and_op.set_name(builtin::kAnd);
CEL_RETURN_IF_ERROR(and_op.AddOverload(MakeOverloadDecl(
StandardOverloads::kAnd, BoolType(), BoolType(), BoolType())));
CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(and_op)));
FunctionDecl or_op;
or_op.set_name(builtin::kOr);
CEL_RETURN_IF_ERROR(or_op.AddOverload(MakeOverloadDecl(
StandardOverloads::kOr, BoolType(), BoolType(), BoolType())));
CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(or_op)));
FunctionDecl conditional_op;
conditional_op.set_name(builtin::kTernary);
CEL_RETURN_IF_ERROR(conditional_op.AddOverload(
MakeOverloadDecl(StandardOverloads::kConditional, TypeParamA(),
BoolType(), TypeParamA(), TypeParamA())));
CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(conditional_op)));
FunctionDecl not_strictly_false;
not_strictly_false.set_name(builtin::kNotStrictlyFalse);
CEL_RETURN_IF_ERROR(not_strictly_false.AddOverload(MakeOverloadDecl(
StandardOverloads::kNotStrictlyFalse, BoolType(), BoolType())));
CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(not_strictly_false)));
FunctionDecl not_strictly_false_deprecated;
not_strictly_false_deprecated.set_name(builtin::kNotStrictlyFalseDeprecated);
CEL_RETURN_IF_ERROR(not_strictly_false_deprecated.AddOverload(
MakeOverloadDecl(StandardOverloads::kNotStrictlyFalseDeprecated,
BoolType(), BoolType())));
CEL_RETURN_IF_ERROR(
builder.AddFunction(std::move(not_strictly_false_deprecated)));
return absl::OkStatus();
}
absl::Status AddTypeConversions(TypeCheckerBuilder& builder) {
FunctionDecl to_dyn;
to_dyn.set_name(builtin::kDyn);
CEL_RETURN_IF_ERROR(to_dyn.AddOverload(
MakeOverloadDecl(StandardOverloads::kToDyn, DynType(), TypeParamA())));
CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(to_dyn)));
FunctionDecl to_uint;
to_uint.set_name(builtin::kUint);
CEL_RETURN_IF_ERROR(to_uint.AddOverload(MakeOverloadDecl(
StandardOverloads::kUintToUint, UintType(), UintType())));
CEL_RETURN_IF_ERROR(to_uint.AddOverload(
MakeOverloadDecl(StandardOverloads::kIntToUint, UintType(), IntType())));
CEL_RETURN_IF_ERROR(to_uint.AddOverload(MakeOverloadDecl(
StandardOverloads::kDoubleToUint, UintType(), DoubleType())));
CEL_RETURN_IF_ERROR(to_uint.AddOverload(MakeOverloadDecl(
StandardOverloads::kStringToUint, UintType(), StringType())));
CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(to_uint)));
FunctionDecl to_int;
to_int.set_name(builtin::kInt);
CEL_RETURN_IF_ERROR(to_int.AddOverload(
MakeOverloadDecl(StandardOverloads::kIntToInt, IntType(), IntType())));
CEL_RETURN_IF_ERROR(to_int.AddOverload(
MakeOverloadDecl(StandardOverloads::kUintToInt, IntType(), UintType())));
CEL_RETURN_IF_ERROR(to_int.AddOverload(MakeOverloadDecl(
StandardOverloads::kDoubleToInt, IntType(), DoubleType())));
CEL_RETURN_IF_ERROR(to_int.AddOverload(MakeOverloadDecl(
StandardOverloads::kStringToInt, IntType(), StringType())));
CEL_RETURN_IF_ERROR(to_int.AddOverload(MakeOverloadDecl(
StandardOverloads::kTimestampToInt, IntType(), TimestampType())));
CEL_RETURN_IF_ERROR(to_int.AddOverload(MakeOverloadDecl(
StandardOverloads::kDurationToInt, IntType(), DurationType())));
CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(to_int)));
FunctionDecl to_double;
to_double.set_name(builtin::kDouble);
CEL_RETURN_IF_ERROR(to_double.AddOverload(MakeOverloadDecl(
StandardOverloads::kDoubleToDouble, DoubleType(), DoubleType())));
CEL_RETURN_IF_ERROR(to_double.AddOverload(MakeOverloadDecl(
StandardOverloads::kIntToDouble, DoubleType(), IntType())));
CEL_RETURN_IF_ERROR(to_double.AddOverload(MakeOverloadDecl(
StandardOverloads::kUintToDouble, DoubleType(), UintType())));
CEL_RETURN_IF_ERROR(to_double.AddOverload(MakeOverloadDecl(
StandardOverloads::kStringToDouble, DoubleType(), StringType())));
CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(to_double)));
FunctionDecl to_bool;
to_bool.set_name("bool");
CEL_RETURN_IF_ERROR(to_bool.AddOverload(MakeOverloadDecl(
StandardOverloads::kBoolToBool, BoolType(), BoolType())));
CEL_RETURN_IF_ERROR(to_bool.AddOverload(MakeOverloadDecl(
StandardOverloads::kStringToBool, BoolType(), StringType())));
CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(to_bool)));
FunctionDecl to_string;
to_string.set_name(builtin::kString);
CEL_RETURN_IF_ERROR(to_string.AddOverload(MakeOverloadDecl(
StandardOverloads::kStringToString, StringType(), StringType())));
CEL_RETURN_IF_ERROR(to_string.AddOverload(MakeOverloadDecl(
StandardOverloads::kBytesToString, StringType(), BytesType())));
CEL_RETURN_IF_ERROR(to_string.AddOverload(MakeOverloadDecl(
StandardOverloads::kBoolToString, StringType(), BoolType())));
CEL_RETURN_IF_ERROR(to_string.AddOverload(MakeOverloadDecl(
StandardOverloads::kDoubleToString, StringType(), DoubleType())));
CEL_RETURN_IF_ERROR(to_string.AddOverload(MakeOverloadDecl(
StandardOverloads::kIntToString, StringType(), IntType())));
CEL_RETURN_IF_ERROR(to_string.AddOverload(MakeOverloadDecl(
StandardOverloads::kUintToString, StringType(), UintType())));
CEL_RETURN_IF_ERROR(to_string.AddOverload(MakeOverloadDecl(
StandardOverloads::kTimestampToString, StringType(), TimestampType())));
CEL_RETURN_IF_ERROR(to_string.AddOverload(MakeOverloadDecl(
StandardOverloads::kDurationToString, StringType(), DurationType())));
CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(to_string)));
FunctionDecl to_bytes;
to_bytes.set_name(builtin::kBytes);
CEL_RETURN_IF_ERROR(to_bytes.AddOverload(MakeOverloadDecl(
StandardOverloads::kBytesToBytes, BytesType(), BytesType())));
CEL_RETURN_IF_ERROR(to_bytes.AddOverload(MakeOverloadDecl(
StandardOverloads::kStringToBytes, BytesType(), StringType())));
CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(to_bytes)));
FunctionDecl to_timestamp;
to_timestamp.set_name(builtin::kTimestamp);
CEL_RETURN_IF_ERROR(to_timestamp.AddOverload(
MakeOverloadDecl(StandardOverloads::kTimestampToTimestamp,
TimestampType(), TimestampType())));
CEL_RETURN_IF_ERROR(to_timestamp.AddOverload(MakeOverloadDecl(
StandardOverloads::kStringToTimestamp, TimestampType(), StringType())));
CEL_RETURN_IF_ERROR(to_timestamp.AddOverload(MakeOverloadDecl(
StandardOverloads::kIntToTimestamp, TimestampType(), IntType())));
CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(to_timestamp)));
FunctionDecl to_duration;
to_duration.set_name(builtin::kDuration);
CEL_RETURN_IF_ERROR(to_duration.AddOverload(MakeOverloadDecl(
StandardOverloads::kDurationToDuration, DurationType(), DurationType())));
CEL_RETURN_IF_ERROR(to_duration.AddOverload(MakeOverloadDecl(
StandardOverloads::kStringToDuration, DurationType(), StringType())));
CEL_RETURN_IF_ERROR(to_duration.AddOverload(MakeOverloadDecl(
StandardOverloads::kIntToDuration, DurationType(), IntType())));
CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(to_duration)));
FunctionDecl to_type;
to_type.set_name(builtin::kType);
CEL_RETURN_IF_ERROR(to_type.AddOverload(MakeOverloadDecl(
StandardOverloads::kToType, Type(TypeOfA()), TypeParamA())));
CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(to_type)));
return absl::OkStatus();
}
absl::Status AddEqualityOps(TypeCheckerBuilder& builder) {
FunctionDecl equals_op;
equals_op.set_name(builtin::kEqual);
CEL_RETURN_IF_ERROR(equals_op.AddOverload(MakeOverloadDecl(
StandardOverloads::kEquals, BoolType(), TypeParamA(), TypeParamA())));
CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(equals_op)));
FunctionDecl not_equals_op;
not_equals_op.set_name(builtin::kInequal);
CEL_RETURN_IF_ERROR(not_equals_op.AddOverload(MakeOverloadDecl(
StandardOverloads::kNotEquals, BoolType(), TypeParamA(), TypeParamA())));
CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(not_equals_op)));
return absl::OkStatus();
}
absl::Status AddConatainerOps(TypeCheckerBuilder& builder) {
FunctionDecl index;
index.set_name(builtin::kIndex);
CEL_RETURN_IF_ERROR(index.AddOverload(MakeOverloadDecl(
StandardOverloads::kIndexList, TypeParamA(), ListOfA(), IntType())));
CEL_RETURN_IF_ERROR(index.AddOverload(MakeOverloadDecl(
StandardOverloads::kIndexMap, TypeParamB(), MapOfAB(), TypeParamA())));
CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(index)));
FunctionDecl in_op;
in_op.set_name(builtin::kIn);
CEL_RETURN_IF_ERROR(in_op.AddOverload(MakeOverloadDecl(
StandardOverloads::kInList, BoolType(), TypeParamA(), ListOfA())));
CEL_RETURN_IF_ERROR(in_op.AddOverload(MakeOverloadDecl(
StandardOverloads::kInMap, BoolType(), TypeParamA(), MapOfAB())));
CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(in_op)));
FunctionDecl in_function_deprecated;
in_function_deprecated.set_name(builtin::kInFunction);
CEL_RETURN_IF_ERROR(in_function_deprecated.AddOverload(MakeOverloadDecl(
StandardOverloads::kInList, BoolType(), TypeParamA(), ListOfA())));
CEL_RETURN_IF_ERROR(in_function_deprecated.AddOverload(MakeOverloadDecl(
StandardOverloads::kInMap, BoolType(), TypeParamA(), MapOfAB())));
CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(in_function_deprecated)));
FunctionDecl in_op_deprecated;
in_op_deprecated.set_name(builtin::kInDeprecated);
CEL_RETURN_IF_ERROR(in_op_deprecated.AddOverload(MakeOverloadDecl(
StandardOverloads::kInList, BoolType(), TypeParamA(), ListOfA())));
CEL_RETURN_IF_ERROR(in_op_deprecated.AddOverload(MakeOverloadDecl(
StandardOverloads::kInMap, BoolType(), TypeParamA(), MapOfAB())));
CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(in_op_deprecated)));
FunctionDecl size;
size.set_name(builtin::kSize);
CEL_RETURN_IF_ERROR(size.AddOverload(
MakeOverloadDecl(StandardOverloads::kSizeList, IntType(), ListOfA())));
CEL_RETURN_IF_ERROR(size.AddOverload(MakeMemberOverloadDecl(
StandardOverloads::kSizeListMember, IntType(), ListOfA())));
CEL_RETURN_IF_ERROR(size.AddOverload(
MakeOverloadDecl(StandardOverloads::kSizeMap, IntType(), MapOfAB())));
CEL_RETURN_IF_ERROR(size.AddOverload(MakeMemberOverloadDecl(
StandardOverloads::kSizeMapMember, IntType(), MapOfAB())));
CEL_RETURN_IF_ERROR(size.AddOverload(
MakeOverloadDecl(StandardOverloads::kSizeBytes, IntType(), BytesType())));
CEL_RETURN_IF_ERROR(size.AddOverload(MakeMemberOverloadDecl(
StandardOverloads::kSizeBytesMember, IntType(), BytesType())));
CEL_RETURN_IF_ERROR(size.AddOverload(MakeOverloadDecl(
StandardOverloads::kSizeString, IntType(), StringType())));
CEL_RETURN_IF_ERROR(size.AddOverload(MakeMemberOverloadDecl(
StandardOverloads::kSizeStringMember, IntType(), StringType())));
CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(size)));
return absl::OkStatus();
}
absl::Status AddRelationOps(TypeCheckerBuilder& builder) {
FunctionDecl less_op;
less_op.set_name(builtin::kLess);
CEL_RETURN_IF_ERROR(less_op.AddOverload(MakeOverloadDecl(
StandardOverloads::kLessInt, BoolType(), IntType(), IntType())));
CEL_RETURN_IF_ERROR(less_op.AddOverload(MakeOverloadDecl(
StandardOverloads::kLessUint, BoolType(), UintType(), UintType())));
CEL_RETURN_IF_ERROR(less_op.AddOverload(MakeOverloadDecl(
StandardOverloads::kLessDouble, BoolType(), DoubleType(), DoubleType())));
CEL_RETURN_IF_ERROR(less_op.AddOverload(MakeOverloadDecl(
StandardOverloads::kLessBool, BoolType(), BoolType(), BoolType())));
CEL_RETURN_IF_ERROR(less_op.AddOverload(MakeOverloadDecl(
StandardOverloads::kLessString, BoolType(), StringType(), StringType())));
CEL_RETURN_IF_ERROR(less_op.AddOverload(MakeOverloadDecl(
StandardOverloads::kLessBytes, BoolType(), BytesType(), BytesType())));
CEL_RETURN_IF_ERROR(less_op.AddOverload(
MakeOverloadDecl(StandardOverloads::kLessDuration, BoolType(),
DurationType(), DurationType())));
CEL_RETURN_IF_ERROR(less_op.AddOverload(
MakeOverloadDecl(StandardOverloads::kLessTimestamp, BoolType(),
TimestampType(), TimestampType())));
FunctionDecl greater_op;
greater_op.set_name(builtin::kGreater);
CEL_RETURN_IF_ERROR(greater_op.AddOverload(MakeOverloadDecl(
StandardOverloads::kGreaterInt, BoolType(), IntType(), IntType())));
CEL_RETURN_IF_ERROR(greater_op.AddOverload(MakeOverloadDecl(
StandardOverloads::kGreaterUint, BoolType(), UintType(), UintType())));
CEL_RETURN_IF_ERROR(greater_op.AddOverload(
MakeOverloadDecl(StandardOverloads::kGreaterDouble, BoolType(),
DoubleType(), DoubleType())));
CEL_RETURN_IF_ERROR(greater_op.AddOverload(MakeOverloadDecl(
StandardOverloads::kGreaterBool, BoolType(), BoolType(), BoolType())));
CEL_RETURN_IF_ERROR(greater_op.AddOverload(
MakeOverloadDecl(StandardOverloads::kGreaterString, BoolType(),
StringType(), StringType())));
CEL_RETURN_IF_ERROR(greater_op.AddOverload(MakeOverloadDecl(
StandardOverloads::kGreaterBytes, BoolType(), BytesType(), BytesType())));
CEL_RETURN_IF_ERROR(greater_op.AddOverload(
MakeOverloadDecl(StandardOverloads::kGreaterDuration, BoolType(),
DurationType(), DurationType())));
CEL_RETURN_IF_ERROR(greater_op.AddOverload(
MakeOverloadDecl(StandardOverloads::kGreaterTimestamp, BoolType(),
TimestampType(), TimestampType())));
FunctionDecl less_equals_op;
less_equals_op.set_name(builtin::kLessOrEqual);
CEL_RETURN_IF_ERROR(less_equals_op.AddOverload(MakeOverloadDecl(
StandardOverloads::kLessEqualsInt, BoolType(), IntType(), IntType())));
CEL_RETURN_IF_ERROR(less_equals_op.AddOverload(MakeOverloadDecl(
StandardOverloads::kLessEqualsUint, BoolType(), UintType(), UintType())));
CEL_RETURN_IF_ERROR(less_equals_op.AddOverload(
MakeOverloadDecl(StandardOverloads::kLessEqualsDouble, BoolType(),
DoubleType(), DoubleType())));
CEL_RETURN_IF_ERROR(less_equals_op.AddOverload(MakeOverloadDecl(
StandardOverloads::kLessEqualsBool, BoolType(), BoolType(), BoolType())));
CEL_RETURN_IF_ERROR(less_equals_op.AddOverload(
MakeOverloadDecl(StandardOverloads::kLessEqualsString, BoolType(),
StringType(), StringType())));
CEL_RETURN_IF_ERROR(less_equals_op.AddOverload(
MakeOverloadDecl(StandardOverloads::kLessEqualsBytes, BoolType(),
BytesType(), BytesType())));
CEL_RETURN_IF_ERROR(less_equals_op.AddOverload(
MakeOverloadDecl(StandardOverloads::kLessEqualsDuration, BoolType(),
DurationType(), DurationType())));
CEL_RETURN_IF_ERROR(less_equals_op.AddOverload(
MakeOverloadDecl(StandardOverloads::kLessEqualsTimestamp, BoolType(),
TimestampType(), TimestampType())));
FunctionDecl greater_equals_op;
greater_equals_op.set_name(builtin::kGreaterOrEqual);
CEL_RETURN_IF_ERROR(greater_equals_op.AddOverload(MakeOverloadDecl(
StandardOverloads::kGreaterEqualsInt, BoolType(), IntType(), IntType())));
CEL_RETURN_IF_ERROR(greater_equals_op.AddOverload(
MakeOverloadDecl(StandardOverloads::kGreaterEqualsUint, BoolType(),
UintType(), UintType())));
CEL_RETURN_IF_ERROR(greater_equals_op.AddOverload(
MakeOverloadDecl(StandardOverloads::kGreaterEqualsDouble, BoolType(),
DoubleType(), DoubleType())));
CEL_RETURN_IF_ERROR(greater_equals_op.AddOverload(
MakeOverloadDecl(StandardOverloads::kGreaterEqualsBool, BoolType(),
BoolType(), BoolType())));
CEL_RETURN_IF_ERROR(greater_equals_op.AddOverload(
MakeOverloadDecl(StandardOverloads::kGreaterEqualsString, BoolType(),
StringType(), StringType())));
CEL_RETURN_IF_ERROR(greater_equals_op.AddOverload(
MakeOverloadDecl(StandardOverloads::kGreaterEqualsBytes, BoolType(),
BytesType(), BytesType())));
CEL_RETURN_IF_ERROR(greater_equals_op.AddOverload(
MakeOverloadDecl(StandardOverloads::kGreaterEqualsDuration, BoolType(),
DurationType(), DurationType())));
CEL_RETURN_IF_ERROR(greater_equals_op.AddOverload(
MakeOverloadDecl(StandardOverloads::kGreaterEqualsTimestamp, BoolType(),
TimestampType(), TimestampType())));
if (builder.options().enable_cross_numeric_comparisons) {
CEL_RETURN_IF_ERROR(less_op.AddOverload(MakeOverloadDecl(
StandardOverloads::kLessIntUint, BoolType(), IntType(), UintType())));
CEL_RETURN_IF_ERROR(less_op.AddOverload(
MakeOverloadDecl(StandardOverloads::kLessIntDouble, BoolType(),
IntType(), DoubleType())));
CEL_RETURN_IF_ERROR(less_op.AddOverload(MakeOverloadDecl(
StandardOverloads::kLessUintInt, BoolType(), UintType(), IntType())));
CEL_RETURN_IF_ERROR(less_op.AddOverload(
MakeOverloadDecl(StandardOverloads::kLessUintDouble, BoolType(),
UintType(), DoubleType())));
CEL_RETURN_IF_ERROR(less_op.AddOverload(
MakeOverloadDecl(StandardOverloads::kLessDoubleInt, BoolType(),
DoubleType(), IntType())));
CEL_RETURN_IF_ERROR(less_op.AddOverload(
MakeOverloadDecl(StandardOverloads::kLessDoubleUint, BoolType(),
DoubleType(), UintType())));
CEL_RETURN_IF_ERROR(greater_op.AddOverload(
MakeOverloadDecl(StandardOverloads::kGreaterIntUint, BoolType(),
IntType(), UintType())));
CEL_RETURN_IF_ERROR(greater_op.AddOverload(
MakeOverloadDecl(StandardOverloads::kGreaterIntDouble, BoolType(),
IntType(), DoubleType())));
CEL_RETURN_IF_ERROR(greater_op.AddOverload(
MakeOverloadDecl(StandardOverloads::kGreaterUintInt, BoolType(),
UintType(), IntType())));
CEL_RETURN_IF_ERROR(greater_op.AddOverload(
MakeOverloadDecl(StandardOverloads::kGreaterUintDouble, BoolType(),
UintType(), DoubleType())));
CEL_RETURN_IF_ERROR(greater_op.AddOverload(
MakeOverloadDecl(StandardOverloads::kGreaterDoubleInt, BoolType(),
DoubleType(), IntType())));
CEL_RETURN_IF_ERROR(greater_op.AddOverload(
MakeOverloadDecl(StandardOverloads::kGreaterDoubleUint, BoolType(),
DoubleType(), UintType())));
CEL_RETURN_IF_ERROR(less_equals_op.AddOverload(
MakeOverloadDecl(StandardOverloads::kLessEqualsIntUint, BoolType(),
IntType(), UintType())));
CEL_RETURN_IF_ERROR(less_equals_op.AddOverload(
MakeOverloadDecl(StandardOverloads::kLessEqualsIntDouble, BoolType(),
IntType(), DoubleType())));
CEL_RETURN_IF_ERROR(less_equals_op.AddOverload(
MakeOverloadDecl(StandardOverloads::kLessEqualsUintInt, BoolType(),
UintType(), IntType())));
CEL_RETURN_IF_ERROR(less_equals_op.AddOverload(
MakeOverloadDecl(StandardOverloads::kLessEqualsUintDouble, BoolType(),
UintType(), DoubleType())));
CEL_RETURN_IF_ERROR(less_equals_op.AddOverload(
MakeOverloadDecl(StandardOverloads::kLessEqualsDoubleInt, BoolType(),
DoubleType(), IntType())));
CEL_RETURN_IF_ERROR(less_equals_op.AddOverload(
MakeOverloadDecl(StandardOverloads::kLessEqualsDoubleUint, BoolType(),
DoubleType(), UintType())));
CEL_RETURN_IF_ERROR(greater_equals_op.AddOverload(
MakeOverloadDecl(StandardOverloads::kGreaterEqualsIntUint, BoolType(),
IntType(), UintType())));
CEL_RETURN_IF_ERROR(greater_equals_op.AddOverload(
MakeOverloadDecl(StandardOverloads::kGreaterEqualsIntDouble, BoolType(),
IntType(), DoubleType())));
CEL_RETURN_IF_ERROR(greater_equals_op.AddOverload(
MakeOverloadDecl(StandardOverloads::kGreaterEqualsUintInt, BoolType(),
UintType(), IntType())));
CEL_RETURN_IF_ERROR(greater_equals_op.AddOverload(
MakeOverloadDecl(StandardOverloads::kGreaterEqualsUintDouble,
BoolType(), UintType(), DoubleType())));
CEL_RETURN_IF_ERROR(greater_equals_op.AddOverload(
MakeOverloadDecl(StandardOverloads::kGreaterEqualsDoubleInt, BoolType(),
DoubleType(), IntType())));
CEL_RETURN_IF_ERROR(greater_equals_op.AddOverload(
MakeOverloadDecl(StandardOverloads::kGreaterEqualsDoubleUint,
BoolType(), DoubleType(), UintType())));
}
CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(less_op)));
CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(greater_op)));
CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(less_equals_op)));
CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(greater_equals_op)));
return absl::OkStatus();
}
absl::Status AddStringFunctions(TypeCheckerBuilder& builder) {
FunctionDecl contains;
contains.set_name(builtin::kStringContains);
CEL_RETURN_IF_ERROR(contains.AddOverload(
MakeMemberOverloadDecl(StandardOverloads::kContainsString, BoolType(),
StringType(), StringType())));
CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(contains)));
FunctionDecl starts_with;
starts_with.set_name(builtin::kStringStartsWith);
CEL_RETURN_IF_ERROR(starts_with.AddOverload(
MakeMemberOverloadDecl(StandardOverloads::kStartsWithString, BoolType(),
StringType(), StringType())));
CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(starts_with)));
FunctionDecl ends_with;
ends_with.set_name(builtin::kStringEndsWith);
CEL_RETURN_IF_ERROR(ends_with.AddOverload(
MakeMemberOverloadDecl(StandardOverloads::kEndsWithString, BoolType(),
StringType(), StringType())));
CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(ends_with)));
return absl::OkStatus();
}
absl::Status AddRegexFunctions(TypeCheckerBuilder& builder) {
FunctionDecl matches;
matches.set_name(builtin::kRegexMatch);
CEL_RETURN_IF_ERROR(matches.AddOverload(
MakeMemberOverloadDecl(StandardOverloads::kMatchesMember, BoolType(),
StringType(), StringType())));
CEL_RETURN_IF_ERROR(matches.AddOverload(MakeOverloadDecl(
StandardOverloads::kMatches, BoolType(), StringType(), StringType())));
CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(matches)));
return absl::OkStatus();
}
absl::Status AddTimeFunctions(TypeCheckerBuilder& builder) {
FunctionDecl get_full_year;
get_full_year.set_name(builtin::kFullYear);
CEL_RETURN_IF_ERROR(get_full_year.AddOverload(MakeMemberOverloadDecl(
StandardOverloads::kTimestampToYear, IntType(), TimestampType())));
CEL_RETURN_IF_ERROR(get_full_year.AddOverload(
MakeMemberOverloadDecl(StandardOverloads::kTimestampToYearWithTz,
IntType(), TimestampType(), StringType())));
CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(get_full_year)));
FunctionDecl get_month;
get_month.set_name(builtin::kMonth);
CEL_RETURN_IF_ERROR(get_month.AddOverload(MakeMemberOverloadDecl(
StandardOverloads::kTimestampToMonth, IntType(), TimestampType())));
CEL_RETURN_IF_ERROR(get_month.AddOverload(
MakeMemberOverloadDecl(StandardOverloads::kTimestampToMonthWithTz,
IntType(), TimestampType(), StringType())));
CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(get_month)));
FunctionDecl get_day_of_year;
get_day_of_year.set_name(builtin::kDayOfYear);
CEL_RETURN_IF_ERROR(get_day_of_year.AddOverload(MakeMemberOverloadDecl(
StandardOverloads::kTimestampToDayOfYear, IntType(), TimestampType())));
CEL_RETURN_IF_ERROR(get_day_of_year.AddOverload(
MakeMemberOverloadDecl(StandardOverloads::kTimestampToDayOfYearWithTz,
IntType(), TimestampType(), StringType())));
CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(get_day_of_year)));
FunctionDecl get_day_of_month;
get_day_of_month.set_name(builtin::kDayOfMonth);
CEL_RETURN_IF_ERROR(get_day_of_month.AddOverload(MakeMemberOverloadDecl(
StandardOverloads::kTimestampToDayOfMonth, IntType(), TimestampType())));
CEL_RETURN_IF_ERROR(get_day_of_month.AddOverload(
MakeMemberOverloadDecl(StandardOverloads::kTimestampToDayOfMonthWithTz,
IntType(), TimestampType(), StringType())));
CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(get_day_of_month)));
FunctionDecl get_date;
get_date.set_name(builtin::kDate);
CEL_RETURN_IF_ERROR(get_date.AddOverload(MakeMemberOverloadDecl(
StandardOverloads::kTimestampToDate, IntType(), TimestampType())));
CEL_RETURN_IF_ERROR(get_date.AddOverload(
MakeMemberOverloadDecl(StandardOverloads::kTimestampToDateWithTz,
IntType(), TimestampType(), StringType())));
CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(get_date)));
FunctionDecl get_day_of_week;
get_day_of_week.set_name(builtin::kDayOfWeek);
CEL_RETURN_IF_ERROR(get_day_of_week.AddOverload(MakeMemberOverloadDecl(
StandardOverloads::kTimestampToDayOfWeek, IntType(), TimestampType())));
CEL_RETURN_IF_ERROR(get_day_of_week.AddOverload(
MakeMemberOverloadDecl(StandardOverloads::kTimestampToDayOfWeekWithTz,
IntType(), TimestampType(), StringType())));
CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(get_day_of_week)));
FunctionDecl get_hours;
get_hours.set_name(builtin::kHours);
CEL_RETURN_IF_ERROR(get_hours.AddOverload(MakeMemberOverloadDecl(
StandardOverloads::kTimestampToHours, IntType(), TimestampType())));
CEL_RETURN_IF_ERROR(get_hours.AddOverload(
MakeMemberOverloadDecl(StandardOverloads::kTimestampToHoursWithTz,
IntType(), TimestampType(), StringType())));
CEL_RETURN_IF_ERROR(get_hours.AddOverload(MakeMemberOverloadDecl(
StandardOverloads::kDurationToHours, IntType(), DurationType())));
CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(get_hours)));
FunctionDecl get_minutes;
get_minutes.set_name(builtin::kMinutes);
CEL_RETURN_IF_ERROR(get_minutes.AddOverload(MakeMemberOverloadDecl(
StandardOverloads::kTimestampToMinutes, IntType(), TimestampType())));
CEL_RETURN_IF_ERROR(get_minutes.AddOverload(
MakeMemberOverloadDecl(StandardOverloads::kTimestampToMinutesWithTz,
IntType(), TimestampType(), StringType())));
CEL_RETURN_IF_ERROR(get_minutes.AddOverload(MakeMemberOverloadDecl(
StandardOverloads::kDurationToMinutes, IntType(), DurationType())));
CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(get_minutes)));
FunctionDecl get_seconds;
get_seconds.set_name(builtin::kSeconds);
CEL_RETURN_IF_ERROR(get_seconds.AddOverload(MakeMemberOverloadDecl(
StandardOverloads::kTimestampToSeconds, IntType(), TimestampType())));
CEL_RETURN_IF_ERROR(get_seconds.AddOverload(
MakeMemberOverloadDecl(StandardOverloads::kTimestampToSecondsWithTz,
IntType(), TimestampType(), StringType())));
CEL_RETURN_IF_ERROR(get_seconds.AddOverload(MakeMemberOverloadDecl(
StandardOverloads::kDurationToSeconds, IntType(), DurationType())));
CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(get_seconds)));
FunctionDecl get_milliseconds;
get_milliseconds.set_name(builtin::kMilliseconds);
CEL_RETURN_IF_ERROR(get_milliseconds.AddOverload(
MakeMemberOverloadDecl(StandardOverloads::kTimestampToMilliseconds,
IntType(), TimestampType())));
CEL_RETURN_IF_ERROR(get_milliseconds.AddOverload(
MakeMemberOverloadDecl(StandardOverloads::kTimestampToMillisecondsWithTz,
IntType(), TimestampType(), StringType())));
CEL_RETURN_IF_ERROR(get_milliseconds.AddOverload(MakeMemberOverloadDecl(
StandardOverloads::kDurationToMilliseconds, IntType(), DurationType())));
CEL_RETURN_IF_ERROR(builder.AddFunction(std::move(get_milliseconds)));
return absl::OkStatus();
}
absl::Status AddTypeConstantVariables(TypeCheckerBuilder& builder) {
CEL_RETURN_IF_ERROR(
builder.AddVariable(MakeVariableDecl("bool", TypeBoolType())));
CEL_RETURN_IF_ERROR(
builder.AddVariable(MakeVariableDecl("null_type", TypeNullType())));
CEL_RETURN_IF_ERROR(
builder.AddVariable(MakeVariableDecl(builtin::kInt, TypeIntType())));
CEL_RETURN_IF_ERROR(
builder.AddVariable(MakeVariableDecl(builtin::kUint, TypeUintType())));
CEL_RETURN_IF_ERROR(builder.AddVariable(
MakeVariableDecl(builtin::kDouble, TypeDoubleType())));
CEL_RETURN_IF_ERROR(builder.AddVariable(
MakeVariableDecl(builtin::kString, TypeStringType())));
CEL_RETURN_IF_ERROR(
builder.AddVariable(MakeVariableDecl(builtin::kBytes, TypeBytesType())));
CEL_RETURN_IF_ERROR(builder.AddVariable(
MakeVariableDecl(builtin::kDuration, TypeDurationType())));
CEL_RETURN_IF_ERROR(builder.AddVariable(
MakeVariableDecl(builtin::kTimestamp, TypeTimestampType())));
CEL_RETURN_IF_ERROR(
builder.AddVariable(MakeVariableDecl("list", TypeListType())));
CEL_RETURN_IF_ERROR(
builder.AddVariable(MakeVariableDecl("map", TypeMapType())));
CEL_RETURN_IF_ERROR(builder.AddVariable(MakeVariableDecl("type", TypeOfA())));
return absl::OkStatus();
}
absl::Status AddEnumConstants(TypeCheckerBuilder& builder) {
VariableDecl pb_null;
pb_null.set_name("google.protobuf.NullValue.NULL_VALUE");
pb_null.set_type(NullType());
pb_null.set_value(Constant(nullptr));
CEL_RETURN_IF_ERROR(builder.AddVariable(std::move(pb_null)));
return absl::OkStatus();
}
absl::Status AddStandardLibraryDecls(TypeCheckerBuilder& builder) {
CEL_RETURN_IF_ERROR(AddLogicalOps(builder));
CEL_RETURN_IF_ERROR(AddArithmeticOps(builder));
CEL_RETURN_IF_ERROR(AddTypeConversions(builder));
CEL_RETURN_IF_ERROR(AddEqualityOps(builder));
CEL_RETURN_IF_ERROR(AddConatainerOps(builder));
CEL_RETURN_IF_ERROR(AddRelationOps(builder));
CEL_RETURN_IF_ERROR(AddStringFunctions(builder));
CEL_RETURN_IF_ERROR(AddRegexFunctions(builder));
CEL_RETURN_IF_ERROR(AddTimeFunctions(builder));
CEL_RETURN_IF_ERROR(AddTypeConstantVariables(builder));
CEL_RETURN_IF_ERROR(AddEnumConstants(builder));
return absl::OkStatus();
}
}
CheckerLibrary StandardLibrary() { return {"stdlib", AddStandardLibraryDecls}; }
} | #include "checker/standard_library.h"
#include <memory>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "base/ast_internal/ast_impl.h"
#include "base/ast_internal/expr.h"
#include "checker/internal/test_ast_helpers.h"
#include "checker/type_checker.h"
#include "checker/type_checker_builder.h"
#include "checker/validation_result.h"
#include "common/ast.h"
#include "common/constant.h"
#include "internal/testing.h"
namespace cel {
namespace {
using ::absl_testing::IsOk;
using ::absl_testing::StatusIs;
using ::cel::ast_internal::AstImpl;
using ::cel::ast_internal::Reference;
using ::testing::IsEmpty;
using ::testing::Pointee;
using ::testing::Property;
TEST(StandardLibraryTest, StandardLibraryAddsDecls) {
TypeCheckerBuilder builder;
EXPECT_THAT(builder.AddLibrary(StandardLibrary()), IsOk());
EXPECT_THAT(std::move(builder).Build(), IsOk());
}
TEST(StandardLibraryTest, StandardLibraryErrorsIfAddedTwice) {
TypeCheckerBuilder builder;
EXPECT_THAT(builder.AddLibrary(StandardLibrary()), IsOk());
EXPECT_THAT(builder.AddLibrary(StandardLibrary()),
StatusIs(absl::StatusCode::kAlreadyExists));
}
class StandardLibraryDefinitionsTest : public ::testing::Test {
public:
void SetUp() override {
TypeCheckerBuilder builder;
ASSERT_THAT(builder.AddLibrary(StandardLibrary()), IsOk());
ASSERT_OK_AND_ASSIGN(stdlib_type_checker_, std::move(builder).Build());
}
protected:
std::unique_ptr<TypeChecker> stdlib_type_checker_;
};
class StdlibTypeVarDefinitionTest
: public StandardLibraryDefinitionsTest,
public testing::WithParamInterface<std::string> {};
TEST_P(StdlibTypeVarDefinitionTest, DefinesTypeConstants) {
auto ast = std::make_unique<AstImpl>();
ast->root_expr().mutable_ident_expr().set_name(GetParam());
ast->root_expr().set_id(1);
ASSERT_OK_AND_ASSIGN(ValidationResult result,
stdlib_type_checker_->Check(std::move(ast)));
EXPECT_THAT(result.GetIssues(), IsEmpty());
ASSERT_OK_AND_ASSIGN(std::unique_ptr<Ast> checked_ast, result.ReleaseAst());
const auto& checked_impl = AstImpl::CastFromPublicAst(*checked_ast);
EXPECT_THAT(checked_impl.GetReference(1),
Pointee(Property(&Reference::name, GetParam())));
}
INSTANTIATE_TEST_SUITE_P(
StdlibTypeVarDefinitions, StdlibTypeVarDefinitionTest,
::testing::Values("bool", "int", "uint", "double", "string", "bytes",
"list", "map", "duration", "timestamp", "null_type"),
[](const auto& info) -> std::string { return info.param; });
TEST_F(StandardLibraryDefinitionsTest, DefinesProtoStructNull) {
auto ast = std::make_unique<AstImpl>();
auto& enumerator = ast->root_expr();
enumerator.set_id(4);
enumerator.mutable_select_expr().set_field("NULL_VALUE");
auto& enumeration = enumerator.mutable_select_expr().mutable_operand();
enumeration.set_id(3);
enumeration.mutable_select_expr().set_field("NullValue");
auto& protobuf = enumeration.mutable_select_expr().mutable_operand();
protobuf.set_id(2);
protobuf.mutable_select_expr().set_field("protobuf");
auto& google = protobuf.mutable_select_expr().mutable_operand();
google.set_id(1);
google.mutable_ident_expr().set_name("google");
ASSERT_OK_AND_ASSIGN(ValidationResult result,
stdlib_type_checker_->Check(std::move(ast)));
EXPECT_THAT(result.GetIssues(), IsEmpty());
ASSERT_OK_AND_ASSIGN(std::unique_ptr<Ast> checked_ast, result.ReleaseAst());
const auto& checked_impl = AstImpl::CastFromPublicAst(*checked_ast);
EXPECT_THAT(checked_impl.GetReference(4),
Pointee(Property(&Reference::name,
"google.protobuf.NullValue.NULL_VALUE")));
}
struct DefinitionsTestCase {
std::string expr;
bool type_check_success = true;
CheckerOptions options;
};
class StdLibDefinitionsTest
: public ::testing::TestWithParam<DefinitionsTestCase> {
public:
void SetUp() override {
TypeCheckerBuilder builder;
ASSERT_THAT(builder.AddLibrary(StandardLibrary()), IsOk());
ASSERT_OK_AND_ASSIGN(stdlib_type_checker_, std::move(builder).Build());
}
protected:
std::unique_ptr<TypeChecker> stdlib_type_checker_;
};
TEST_P(StdLibDefinitionsTest, Runner) {
TypeCheckerBuilder builder(GetParam().options);
ASSERT_THAT(builder.AddLibrary(StandardLibrary()), IsOk());
ASSERT_OK_AND_ASSIGN(std::unique_ptr<TypeChecker> type_checker,
std::move(builder).Build());
ASSERT_OK_AND_ASSIGN(std::unique_ptr<Ast> ast,
checker_internal::MakeTestParsedAst(GetParam().expr));
ASSERT_OK_AND_ASSIGN(auto result, type_checker->Check(std::move(ast)));
EXPECT_EQ(result.IsValid(), GetParam().type_check_success);
}
INSTANTIATE_TEST_SUITE_P(
Strings, StdLibDefinitionsTest,
::testing::Values(DefinitionsTestCase{
"'123'.size()",
},
DefinitionsTestCase{
"size('123')",
},
DefinitionsTestCase{
"'123' + '123'",
},
DefinitionsTestCase{
"'123' + '123'",
},
DefinitionsTestCase{
"'123' + '123'",
},
DefinitionsTestCase{
"'123'.endsWith('123')",
},
DefinitionsTestCase{
"'123'.startsWith('123')",
},
DefinitionsTestCase{
"'123'.contains('123')",
},
DefinitionsTestCase{
"'123'.matches(r'123')",
},
DefinitionsTestCase{
"matches('123', r'123')",
}));
INSTANTIATE_TEST_SUITE_P(TypeCasts, StdLibDefinitionsTest,
::testing::Values(DefinitionsTestCase{
"int(1)",
},
DefinitionsTestCase{
"uint(1)",
},
DefinitionsTestCase{
"double(1)",
},
DefinitionsTestCase{
"string(1)",
},
DefinitionsTestCase{
"bool('true')",
},
DefinitionsTestCase{
"timestamp(0)",
},
DefinitionsTestCase{
"duration('1s')",
}));
INSTANTIATE_TEST_SUITE_P(Arithmetic, StdLibDefinitionsTest,
::testing::Values(DefinitionsTestCase{
"1 + 2",
},
DefinitionsTestCase{
"1 - 2",
},
DefinitionsTestCase{
"1 / 2",
},
DefinitionsTestCase{
"1 * 2",
},
DefinitionsTestCase{
"2 % 1",
},
DefinitionsTestCase{
"-1",
}));
INSTANTIATE_TEST_SUITE_P(
TimeArithmetic, StdLibDefinitionsTest,
::testing::Values(DefinitionsTestCase{
"timestamp(0) + duration('1s')",
},
DefinitionsTestCase{
"timestamp(0) - duration('1s')",
},
DefinitionsTestCase{
"timestamp(0) - timestamp(0)",
},
DefinitionsTestCase{
"duration('1s') + duration('1s')",
},
DefinitionsTestCase{
"duration('1s') - duration('1s')",
}));
INSTANTIATE_TEST_SUITE_P(NumericComparisons, StdLibDefinitionsTest,
::testing::Values(DefinitionsTestCase{
"1 > 2",
},
DefinitionsTestCase{
"1 < 2",
},
DefinitionsTestCase{
"1 >= 2",
},
DefinitionsTestCase{
"1 <= 2",
}));
INSTANTIATE_TEST_SUITE_P(
CrossNumericComparisons, StdLibDefinitionsTest,
::testing::Values(
DefinitionsTestCase{
"1u < 2",
true,
{.enable_cross_numeric_comparisons = true}},
DefinitionsTestCase{
"1u > 2",
true,
{.enable_cross_numeric_comparisons = true}},
DefinitionsTestCase{
"1u <= 2",
true,
{.enable_cross_numeric_comparisons = true}},
DefinitionsTestCase{
"1u >= 2",
true,
{.enable_cross_numeric_comparisons = true}}));
INSTANTIATE_TEST_SUITE_P(
TimeComparisons, StdLibDefinitionsTest,
::testing::Values(DefinitionsTestCase{
"duration('1s') < duration('1s')",
},
DefinitionsTestCase{
"duration('1s') > duration('1s')",
},
DefinitionsTestCase{
"duration('1s') <= duration('1s')",
},
DefinitionsTestCase{
"duration('1s') >= duration('1s')",
},
DefinitionsTestCase{
"timestamp(0) < timestamp(0)",
},
DefinitionsTestCase{
"timestamp(0) > timestamp(0)",
},
DefinitionsTestCase{
"timestamp(0) <= timestamp(0)",
},
DefinitionsTestCase{
"timestamp(0) >= timestamp(0)",
}));
INSTANTIATE_TEST_SUITE_P(
TimeAccessors, StdLibDefinitionsTest,
::testing::Values(
DefinitionsTestCase{
"timestamp(0).getFullYear()",
},
DefinitionsTestCase{
"timestamp(0).getFullYear('-08:00')",
},
DefinitionsTestCase{
"timestamp(0).getMonth()",
},
DefinitionsTestCase{
"timestamp(0).getMonth('-08:00')",
},
DefinitionsTestCase{
"timestamp(0).getDayOfYear()",
},
DefinitionsTestCase{
"timestamp(0).getDayOfYear('-08:00')",
},
DefinitionsTestCase{
"timestamp(0).getDate()",
},
DefinitionsTestCase{
"timestamp(0).getDate('-08:00')",
},
DefinitionsTestCase{
"timestamp(0).getDayOfWeek()",
},
DefinitionsTestCase{
"timestamp(0).getDayOfWeek('-08:00')",
},
DefinitionsTestCase{
"timestamp(0).getHours()",
},
DefinitionsTestCase{
"duration('1s').getHours()",
},
DefinitionsTestCase{
"timestamp(0).getHours('-08:00')",
},
DefinitionsTestCase{
"timestamp(0).getMinutes()",
},
DefinitionsTestCase{
"duration('1s').getMinutes()",
},
DefinitionsTestCase{
"timestamp(0).getMinutes('-08:00')",
},
DefinitionsTestCase{
"timestamp(0).getSeconds()",
},
DefinitionsTestCase{
"duration('1s').getSeconds()",
},
DefinitionsTestCase{
"timestamp(0).getSeconds('-08:00')",
},
DefinitionsTestCase{
"timestamp(0).getMilliseconds()",
},
DefinitionsTestCase{
"duration('1s').getMilliseconds()",
},
DefinitionsTestCase{
"timestamp(0).getMilliseconds('-08:00')",
}));
INSTANTIATE_TEST_SUITE_P(Logic, StdLibDefinitionsTest,
::testing::Values(DefinitionsTestCase{
"true || false",
},
DefinitionsTestCase{
"true && false",
},
DefinitionsTestCase{
"!true",
}));
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/checker/standard_library.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/checker/standard_library_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
c51d0205-c7bd-4898-90c7-d1f12f00a9d1 | cpp | tensorflow/tensorflow | non_max_suppression_op | tensorflow/core/kernels/image/non_max_suppression_op.cc | tensorflow/core/kernels/image/non_max_suppression_op_test.cc | #define EIGEN_USE_THREADS
#include "tensorflow/core/kernels/image/non_max_suppression_op.h"
#include <cmath>
#include <functional>
#include <limits>
#include <queue>
#include <vector>
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/bounds_check.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/logging.h"
namespace tensorflow {
namespace {
typedef Eigen::ThreadPoolDevice CPUDevice;
static inline void CheckScoreSizes(OpKernelContext* context, int num_boxes,
const Tensor& scores) {
OP_REQUIRES(context, scores.dims() == 1,
errors::InvalidArgument(
"scores must be 1-D", scores.shape().DebugString(),
" (Shape must be rank 1 but is rank ", scores.dims(), ")"));
OP_REQUIRES(
context, scores.dim_size(0) == num_boxes,
errors::InvalidArgument("scores has incompatible shape (Dimensions must "
"be equal, but are ",
num_boxes, " and ", scores.dim_size(0), ")"));
}
static inline void ParseAndCheckOverlapSizes(OpKernelContext* context,
const Tensor& overlaps,
int* num_boxes) {
OP_REQUIRES(context, overlaps.dims() == 2,
errors::InvalidArgument("overlaps must be 2-D",
overlaps.shape().DebugString()));
*num_boxes = overlaps.dim_size(0);
OP_REQUIRES(context, overlaps.dim_size(1) == *num_boxes,
errors::InvalidArgument("overlaps must be square",
overlaps.shape().DebugString()));
}
static inline void ParseAndCheckBoxSizes(OpKernelContext* context,
const Tensor& boxes, int* num_boxes) {
OP_REQUIRES(context, boxes.dims() == 2,
errors::InvalidArgument(
"boxes must be 2-D", boxes.shape().DebugString(),
" (Shape must be rank 2 but is rank ", boxes.dims(), ")"));
*num_boxes = boxes.dim_size(0);
OP_REQUIRES(context, boxes.dim_size(1) == 4,
errors::InvalidArgument("boxes must have 4 columns (Dimension "
"must be 4 but is ",
boxes.dim_size(1), ")"));
}
static inline void CheckCombinedNMSScoreSizes(OpKernelContext* context,
int num_boxes,
const Tensor& scores) {
OP_REQUIRES(context, scores.dims() == 3,
errors::InvalidArgument("scores must be 3-D",
scores.shape().DebugString()));
OP_REQUIRES(context, scores.dim_size(1) == num_boxes,
errors::InvalidArgument("scores has incompatible shape"));
}
static inline void ParseAndCheckCombinedNMSBoxSizes(OpKernelContext* context,
const Tensor& boxes,
int* num_boxes,
const int num_classes) {
OP_REQUIRES(context, boxes.dims() == 4,
errors::InvalidArgument("boxes must be 4-D",
boxes.shape().DebugString()));
bool box_check = boxes.dim_size(2) == 1 || boxes.dim_size(2) == num_classes;
OP_REQUIRES(context, box_check,
errors::InvalidArgument(
"third dimension of boxes must be either 1 or num classes"));
*num_boxes = boxes.dim_size(1);
OP_REQUIRES(context, boxes.dim_size(3) == 4,
errors::InvalidArgument("boxes must have 4 columns"));
}
template <typename T>
static inline float IOU(typename TTypes<T, 2>::ConstTensor boxes, int i,
int j) {
const float ymin_i = Eigen::numext::mini<float>(
static_cast<float>(boxes(i, 0)), static_cast<float>(boxes(i, 2)));
const float xmin_i = Eigen::numext::mini<float>(
static_cast<float>(boxes(i, 1)), static_cast<float>(boxes(i, 3)));
const float ymax_i = Eigen::numext::maxi<float>(
static_cast<float>(boxes(i, 0)), static_cast<float>(boxes(i, 2)));
const float xmax_i = Eigen::numext::maxi<float>(
static_cast<float>(boxes(i, 1)), static_cast<float>(boxes(i, 3)));
const float ymin_j = Eigen::numext::mini<float>(
static_cast<float>(boxes(j, 0)), static_cast<float>(boxes(j, 2)));
const float xmin_j = Eigen::numext::mini<float>(
static_cast<float>(boxes(j, 1)), static_cast<float>(boxes(j, 3)));
const float ymax_j = Eigen::numext::maxi<float>(
static_cast<float>(boxes(j, 0)), static_cast<float>(boxes(j, 2)));
const float xmax_j = Eigen::numext::maxi<float>(
static_cast<float>(boxes(j, 1)), static_cast<float>(boxes(j, 3)));
const float area_i = (ymax_i - ymin_i) * (xmax_i - xmin_i);
const float area_j = (ymax_j - ymin_j) * (xmax_j - xmin_j);
if (area_i <= 0 || area_j <= 0) {
return 0.0;
}
const float intersection_ymin = Eigen::numext::maxi<float>(ymin_i, ymin_j);
const float intersection_xmin = Eigen::numext::maxi<float>(xmin_i, xmin_j);
const float intersection_ymax = Eigen::numext::mini<float>(ymax_i, ymax_j);
const float intersection_xmax = Eigen::numext::mini<float>(xmax_i, xmax_j);
const float intersection_area =
Eigen::numext::maxi<float>(intersection_ymax - intersection_ymin, 0.0) *
Eigen::numext::maxi<float>(intersection_xmax - intersection_xmin, 0.0);
return intersection_area / (area_i + area_j - intersection_area);
}
static inline float IOU(const float* boxes, int i, int j) {
const float ymin_i = Eigen::numext::mini<float>(boxes[i], boxes[i + 2]);
const float xmin_i = Eigen::numext::mini<float>(boxes[i + 1], boxes[i + 3]);
const float ymax_i = Eigen::numext::maxi<float>(boxes[i], boxes[i + 2]);
const float xmax_i = Eigen::numext::maxi<float>(boxes[i + 1], boxes[i + 3]);
const float ymin_j = Eigen::numext::mini<float>(boxes[j], boxes[j + 2]);
const float xmin_j = Eigen::numext::mini<float>(boxes[j + 1], boxes[j + 3]);
const float ymax_j = Eigen::numext::maxi<float>(boxes[j], boxes[j + 2]);
const float xmax_j = Eigen::numext::maxi<float>(boxes[j + 1], boxes[j + 3]);
const float area_i = (ymax_i - ymin_i) * (xmax_i - xmin_i);
const float area_j = (ymax_j - ymin_j) * (xmax_j - xmin_j);
if (area_i <= 0 || area_j <= 0) {
return 0.0;
}
const float intersection_ymin = Eigen::numext::maxi<float>(ymin_i, ymin_j);
const float intersection_xmin = Eigen::numext::maxi<float>(xmin_i, xmin_j);
const float intersection_ymax = Eigen::numext::mini<float>(ymax_i, ymax_j);
const float intersection_xmax = Eigen::numext::mini<float>(xmax_i, xmax_j);
const float intersection_area =
Eigen::numext::maxi<float>(intersection_ymax - intersection_ymin, 0.0) *
Eigen::numext::maxi<float>(intersection_xmax - intersection_xmin, 0.0);
return intersection_area / (area_i + area_j - intersection_area);
}
template <typename T>
static inline T Overlap(typename TTypes<T, 2>::ConstTensor overlaps, int i,
int j) {
return overlaps(i, j);
}
template <typename T>
static inline std::function<float(int, int)> CreateIOUSimilarityFn(
const Tensor& boxes) {
typename TTypes<T, 2>::ConstTensor boxes_data = boxes.tensor<T, 2>();
return std::bind(&IOU<T>, boxes_data, std::placeholders::_1,
std::placeholders::_2);
}
template <typename T>
static inline std::function<T(int, int)> CreateOverlapSimilarityFn(
const Tensor& overlaps) {
typename TTypes<T, 2>::ConstTensor overlaps_data =
overlaps.tensor<float, 2>();
return std::bind(&Overlap<T>, overlaps_data, std::placeholders::_1,
std::placeholders::_2);
}
template <typename T>
void DoNonMaxSuppressionOp(OpKernelContext* context, const Tensor& scores,
int num_boxes, const Tensor& max_output_size,
const T similarity_threshold,
const T score_threshold, const T soft_nms_sigma,
const std::function<float(int, int)>& similarity_fn,
bool return_scores_tensor = false,
bool pad_to_max_output_size = false,
int* ptr_num_valid_outputs = nullptr) {
const int output_size = max_output_size.scalar<int>()();
OP_REQUIRES(context, output_size >= 0,
errors::InvalidArgument("output size must be non-negative"));
std::vector<T> scores_data(num_boxes);
std::copy_n(scores.flat<T>().data(), num_boxes, scores_data.begin());
struct Candidate {
int box_index;
T score;
int suppress_begin_index;
};
auto cmp = [](const Candidate bs_i, const Candidate bs_j) {
return ((bs_i.score == bs_j.score) && (bs_i.box_index > bs_j.box_index)) ||
bs_i.score < bs_j.score;
};
std::priority_queue<Candidate, std::deque<Candidate>, decltype(cmp)>
candidate_priority_queue(cmp);
for (int i = 0; i < scores_data.size(); ++i) {
if (scores_data[i] > score_threshold) {
candidate_priority_queue.emplace(Candidate({i, scores_data[i], 0}));
}
}
T scale = static_cast<T>(0.0);
bool is_soft_nms = soft_nms_sigma > static_cast<T>(0.0);
if (is_soft_nms) {
scale = static_cast<T>(-0.5) / soft_nms_sigma;
}
auto suppress_weight = [similarity_threshold, scale,
is_soft_nms](const T sim) {
const T weight = Eigen::numext::exp<T>(scale * sim * sim);
return is_soft_nms || sim <= similarity_threshold ? weight
: static_cast<T>(0.0);
};
std::vector<int> selected;
std::vector<T> selected_scores;
float similarity;
T original_score;
Candidate next_candidate;
while (selected.size() < output_size && !candidate_priority_queue.empty()) {
next_candidate = candidate_priority_queue.top();
original_score = next_candidate.score;
candidate_priority_queue.pop();
bool should_hard_suppress = false;
for (int j = static_cast<int>(selected.size()) - 1;
j >= next_candidate.suppress_begin_index; --j) {
similarity = similarity_fn(next_candidate.box_index, selected[j]);
next_candidate.score *= suppress_weight(static_cast<T>(similarity));
if (!is_soft_nms && static_cast<T>(similarity) > similarity_threshold) {
should_hard_suppress = true;
break;
}
if (next_candidate.score <= score_threshold) break;
}
next_candidate.suppress_begin_index = selected.size();
if (!should_hard_suppress) {
if (next_candidate.score == original_score) {
selected.push_back(next_candidate.box_index);
selected_scores.push_back(next_candidate.score);
continue;
}
if (next_candidate.score > score_threshold) {
candidate_priority_queue.push(next_candidate);
}
}
}
int num_valid_outputs = selected.size();
if (pad_to_max_output_size) {
selected.resize(output_size, 0);
selected_scores.resize(output_size, static_cast<T>(0));
}
if (ptr_num_valid_outputs) {
*ptr_num_valid_outputs = num_valid_outputs;
}
Tensor* output_indices = nullptr;
TensorShape output_shape({static_cast<int>(selected.size())});
OP_REQUIRES_OK(context,
context->allocate_output(0, output_shape, &output_indices));
TTypes<int, 1>::Tensor output_indices_data = output_indices->tensor<int, 1>();
std::copy_n(selected.begin(), selected.size(), output_indices_data.data());
if (return_scores_tensor) {
Tensor* output_scores = nullptr;
OP_REQUIRES_OK(context,
context->allocate_output(1, output_shape, &output_scores));
typename TTypes<T, 1>::Tensor output_scores_data =
output_scores->tensor<T, 1>();
std::copy_n(selected_scores.begin(), selected_scores.size(),
output_scores_data.data());
}
}
struct ResultCandidate {
int box_index;
float score;
int class_idx;
float box_coord[4];
};
void DoNMSPerClass(int batch_idx, int class_idx, const float* boxes_data,
const float* scores_data, int num_boxes, int q,
int num_classes, const int size_per_class,
const float score_threshold, const float iou_threshold,
std::vector<ResultCandidate>& result_candidate_vec) {
struct Candidate {
int box_index;
float score;
};
auto cmp = [](const Candidate bs_i, const Candidate bs_j) {
return bs_i.score < bs_j.score;
};
std::priority_queue<Candidate, std::vector<Candidate>, decltype(cmp)>
candidate_priority_queue(cmp);
float temp_score;
for (int i = 0; i < num_boxes; ++i) {
temp_score = scores_data[i * num_classes + class_idx];
if (temp_score > score_threshold) {
candidate_priority_queue.emplace(Candidate({i, temp_score}));
}
}
std::vector<int> selected;
Candidate next_candidate;
int candidate_box_data_idx, selected_box_data_idx, class_box_idx;
class_box_idx = (q > 1) ? class_idx : 0;
float iou;
while (selected.size() < size_per_class &&
!candidate_priority_queue.empty()) {
next_candidate = candidate_priority_queue.top();
candidate_priority_queue.pop();
candidate_box_data_idx = (next_candidate.box_index * q + class_box_idx) * 4;
bool should_select = true;
for (int j = selected.size() - 1; j >= 0; --j) {
selected_box_data_idx = (selected[j] * q + class_box_idx) * 4;
iou = IOU(boxes_data, candidate_box_data_idx, selected_box_data_idx);
if (iou > iou_threshold) {
should_select = false;
break;
}
}
if (should_select) {
result_candidate_vec[selected.size() + size_per_class * class_idx] = {
next_candidate.box_index,
next_candidate.score,
class_idx,
{boxes_data[candidate_box_data_idx],
boxes_data[candidate_box_data_idx + 1],
boxes_data[candidate_box_data_idx + 2],
boxes_data[candidate_box_data_idx + 3]}};
selected.push_back(next_candidate.box_index);
}
}
}
void SelectResultPerBatch(std::vector<float>& nmsed_boxes,
std::vector<float>& nmsed_scores,
std::vector<float>& nmsed_classes,
std::vector<ResultCandidate>& result_candidate_vec,
std::vector<int>& final_valid_detections,
const int batch_idx, int total_size_per_batch,
bool pad_per_class, int max_size_per_batch,
bool clip_boxes, int per_batch_size) {
auto rc_cmp = [](const ResultCandidate rc_i, const ResultCandidate rc_j) {
return rc_i.score > rc_j.score;
};
std::sort(result_candidate_vec.begin(), result_candidate_vec.end(), rc_cmp);
int max_detections = 0;
int result_candidate_size =
std::count_if(result_candidate_vec.begin(), result_candidate_vec.end(),
[](ResultCandidate rc) { return rc.box_index > -1; });
if (!pad_per_class) {
max_detections = std::min(result_candidate_size, total_size_per_batch);
} else {
max_detections = std::min(per_batch_size, result_candidate_size);
}
final_valid_detections[batch_idx] = max_detections;
int curr_total_size = max_detections;
int result_idx = 0;
while (curr_total_size > 0 && result_idx < result_candidate_vec.size()) {
ResultCandidate next_candidate = result_candidate_vec[result_idx++];
if (clip_boxes) {
const float box_min = 0.0;
const float box_max = 1.0;
nmsed_boxes.push_back(
std::max(std::min(next_candidate.box_coord[0], box_max), box_min));
nmsed_boxes.push_back(
std::max(std::min(next_candidate.box_coord[1], box_max), box_min));
nmsed_boxes.push_back(
std::max(std::min(next_candidate.box_coord[2], box_max), box_min));
nmsed_boxes.push_back(
std::max(std::min(next_candidate.box_coord[3], box_max), box_min));
} else {
nmsed_boxes.push_back(next_candidate.box_coord[0]);
nmsed_boxes.push_back(next_candidate.box_coord[1]);
nmsed_boxes.push_back(next_candidate.box_coord[2]);
nmsed_boxes.push_back(next_candidate.box_coord[3]);
}
nmsed_scores.push_back(next_candidate.score);
nmsed_classes.push_back(next_candidate.class_idx);
curr_total_size--;
}
nmsed_boxes.resize(per_batch_size * 4, 0);
nmsed_scores.resize(per_batch_size, 0);
nmsed_classes.resize(per_batch_size, 0);
}
void BatchedNonMaxSuppressionOp(
OpKernelContext* context, const Tensor& inp_boxes, const Tensor& inp_scores,
int num_boxes, const int max_size_per_class, const int total_size_per_batch,
const float score_threshold, const float iou_threshold,
bool pad_per_class = false, bool clip_boxes = true) {
const int num_batches = inp_boxes.dim_size(0);
int num_classes = inp_scores.dim_size(2);
int q = inp_boxes.dim_size(2);
const float* scores_data =
const_cast<float*>(inp_scores.flat<float>().data());
const float* boxes_data = const_cast<float*>(inp_boxes.flat<float>().data());
int boxes_per_batch = num_boxes * q * 4;
int scores_per_batch = num_boxes * num_classes;
const int size_per_class = std::min(max_size_per_class, num_boxes);
std::vector<std::vector<ResultCandidate>> result_candidate_vec(
num_batches,
std::vector<ResultCandidate>(size_per_class * num_classes,
{-1, -1.0, -1, {0.0, 0.0, 0.0, 0.0}}));
std::vector<std::vector<float>> nmsed_boxes(num_batches);
std::vector<std::vector<float>> nmsed_scores(num_batches);
std::vector<std::vector<float>> nmsed_classes(num_batches);
std::vector<int> final_valid_detections(num_batches);
auto shard_nms = [&](int begin, int end) {
for (int idx = begin; idx < end; ++idx) {
int batch_idx = idx / num_classes;
int class_idx = idx % num_classes;
DoNMSPerClass(batch_idx, class_idx,
boxes_data + boxes_per_batch * batch_idx,
scores_data + scores_per_batch * batch_idx, num_boxes, q,
num_classes, size_per_class, score_threshold, iou_threshold,
result_candidate_vec[batch_idx]);
}
};
int length = num_batches * num_classes;
int input_bytes = num_boxes * 10 * sizeof(float);
int output_bytes = num_boxes * 10 * sizeof(float);
int compute_cycles = Eigen::TensorOpCost::AddCost<int>() * num_boxes * 14 +
Eigen::TensorOpCost::MulCost<int>() * num_boxes * 9 +
Eigen::TensorOpCost::MulCost<float>() * num_boxes * 9 +
Eigen::TensorOpCost::AddCost<float>() * num_boxes * 8;
const Eigen::TensorOpCost cost(input_bytes, output_bytes, compute_cycles);
const CPUDevice& d = context->eigen_device<CPUDevice>();
d.parallelFor(length, cost, shard_nms);
int per_batch_size = total_size_per_batch;
int max_total_size = static_cast<int>(
std::min(static_cast<int64_t>(std::numeric_limits<int>::max()),
static_cast<int64_t>(max_size_per_class) * num_classes));
if (pad_per_class) {
per_batch_size = std::min(total_size_per_batch, max_total_size);
}
Tensor* valid_detections_t = nullptr;
TensorShape valid_detections_shape({num_batches});
OP_REQUIRES_OK(context, context->allocate_output(3, valid_detections_shape,
&valid_detections_t));
auto valid_detections_flat = valid_detections_t->template flat<int>();
auto shard_result = [&](int begin, int end) {
for (int batch_idx = begin; batch_idx < end; ++batch_idx) {
SelectResultPerBatch(
nmsed_boxes[batch_idx], nmsed_scores[batch_idx],
nmsed_classes[batch_idx], result_candidate_vec[batch_idx],
final_valid_detections, batch_idx, total_size_per_batch,
pad_per_class, max_total_size, clip_boxes, per_batch_size);
valid_detections_flat(batch_idx) = final_valid_detections[batch_idx];
}
};
length = num_batches;
input_bytes =
num_boxes * 10 * sizeof(float) + per_batch_size * 6 * sizeof(float);
output_bytes =
num_boxes * 5 * sizeof(float) + per_batch_size * 6 * sizeof(float);
compute_cycles = Eigen::TensorOpCost::AddCost<int>() * num_boxes * 5 +
Eigen::TensorOpCost::AddCost<float>() * num_boxes * 5;
const Eigen::TensorOpCost cost_result(input_bytes, output_bytes,
compute_cycles);
d.parallelFor(length, cost_result, shard_result);
Tensor* nmsed_boxes_t = nullptr;
TensorShape boxes_shape({num_batches, per_batch_size, 4});
OP_REQUIRES_OK(context,
context->allocate_output(0, boxes_shape, &nmsed_boxes_t));
auto nmsed_boxes_flat = nmsed_boxes_t->template flat<float>();
Tensor* nmsed_scores_t = nullptr;
TensorShape scores_shape({num_batches, per_batch_size});
OP_REQUIRES_OK(context,
context->allocate_output(1, scores_shape, &nmsed_scores_t));
auto nmsed_scores_flat = nmsed_scores_t->template flat<float>();
Tensor* nmsed_classes_t = nullptr;
OP_REQUIRES_OK(context,
context->allocate_output(2, scores_shape, &nmsed_classes_t));
auto nmsed_classes_flat = nmsed_classes_t->template flat<float>();
auto shard_copy_result = [&](int begin, int end) {
for (int idx = begin; idx < end; ++idx) {
int batch_idx = idx / per_batch_size;
int j = idx % per_batch_size;
nmsed_scores_flat(idx) = nmsed_scores[batch_idx][j];
nmsed_classes_flat(idx) = nmsed_classes[batch_idx][j];
for (int k = 0; k < 4; ++k) {
nmsed_boxes_flat(idx * 4 + k) = nmsed_boxes[batch_idx][j * 4 + k];
}
}
};
length = num_batches * per_batch_size;
input_bytes = 6 * sizeof(float);
output_bytes = 6 * sizeof(float);
compute_cycles = Eigen::TensorOpCost::AddCost<int>() * 2 +
Eigen::TensorOpCost::MulCost<int>() * 2 +
Eigen::TensorOpCost::DivCost<float>() * 2;
const Eigen::TensorOpCost cost_copy_result(input_bytes, output_bytes,
compute_cycles);
d.parallelFor(length, cost_copy_result, shard_copy_result);
}
template <typename T>
T GetScalar(const Tensor& tensor) {
switch (tensor.dtype()) {
case DT_FLOAT:
return static_cast<T>(tensor.scalar<float>()());
case DT_DOUBLE:
return static_cast<T>(tensor.scalar<double>()());
case DT_BFLOAT16:
return static_cast<T>(tensor.scalar<Eigen::bfloat16>()());
case DT_HALF:
return static_cast<T>(tensor.scalar<Eigen::half>()());
default:
DCHECK(false) << "Unsupported type " << tensor.dtype();
break;
}
return static_cast<T>(0);
}
}
template <typename Device>
class NonMaxSuppressionOp : public OpKernel {
public:
explicit NonMaxSuppressionOp(OpKernelConstruction* context)
: OpKernel(context) {
OP_REQUIRES_OK(context, context->GetAttr("iou_threshold", &iou_threshold_));
}
void Compute(OpKernelContext* context) override {
const Tensor& boxes = context->input(0);
const Tensor& scores = context->input(1);
const Tensor& max_output_size = context->input(2);
OP_REQUIRES(
context, TensorShapeUtils::IsScalar(max_output_size.shape()),
errors::InvalidArgument("max_output_size must be 0-D, got shape ",
max_output_size.shape().DebugString()));
OP_REQUIRES(context, iou_threshold_ >= 0 && iou_threshold_ <= 1,
errors::InvalidArgument("iou_threshold must be in [0, 1]"));
int num_boxes = 0;
ParseAndCheckBoxSizes(context, boxes, &num_boxes);
CheckScoreSizes(context, num_boxes, scores);
if (!context->status().ok()) {
return;
}
auto similarity_fn = CreateIOUSimilarityFn<float>(boxes);
const float score_threshold_val = std::numeric_limits<float>::lowest();
const float dummy_soft_nms_sigma = static_cast<float>(0.0);
DoNonMaxSuppressionOp<float>(context, scores, num_boxes, max_output_size,
iou_threshold_, score_threshold_val,
dummy_soft_nms_sigma, similarity_fn);
}
private:
float iou_threshold_;
};
template <typename Device, typename T>
class NonMaxSuppressionV2Op : public OpKernel {
public:
explicit NonMaxSuppressionV2Op(OpKernelConstruction* context)
: OpKernel(context) {}
void Compute(OpKernelContext* context) override {
const Tensor& boxes = context->input(0);
const Tensor& scores = context->input(1);
const Tensor& max_output_size = context->input(2);
OP_REQUIRES(
context, TensorShapeUtils::IsScalar(max_output_size.shape()),
errors::InvalidArgument("max_output_size must be 0-D, got shape ",
max_output_size.shape().DebugString()));
const Tensor& iou_threshold = context->input(3);
OP_REQUIRES(context, TensorShapeUtils::IsScalar(iou_threshold.shape()),
errors::InvalidArgument("iou_threshold must be 0-D, got shape ",
iou_threshold.shape().DebugString()));
const T iou_threshold_val = GetScalar<T>(iou_threshold);
OP_REQUIRES(context,
iou_threshold_val >= static_cast<T>(0.0) &&
iou_threshold_val <= static_cast<T>(1.0),
errors::InvalidArgument("iou_threshold must be in [0, 1]"));
int num_boxes = 0;
ParseAndCheckBoxSizes(context, boxes, &num_boxes);
CheckScoreSizes(context, num_boxes, scores);
if (!context->status().ok()) {
return;
}
auto similarity_fn = CreateIOUSimilarityFn<T>(boxes);
const T score_threshold_val = std::numeric_limits<T>::lowest();
const T dummy_soft_nms_sigma = static_cast<T>(0.0);
DoNonMaxSuppressionOp<T>(context, scores, num_boxes, max_output_size,
iou_threshold_val, score_threshold_val,
dummy_soft_nms_sigma, similarity_fn);
}
};
template <typename Device, typename T>
class NonMaxSuppressionV3Op : public OpKernel {
public:
explicit NonMaxSuppressionV3Op(OpKernelConstruction* context)
: OpKernel(context) {}
void Compute(OpKernelContext* context) override {
const Tensor& boxes = context->input(0);
const Tensor& scores = context->input(1);
const Tensor& max_output_size = context->input(2);
OP_REQUIRES(
context, TensorShapeUtils::IsScalar(max_output_size.shape()),
errors::InvalidArgument("max_output_size must be 0-D, got shape ",
max_output_size.shape().DebugString(),
" (Shape must be rank 0 but is ", "rank ",
max_output_size.dims(), ")"));
const Tensor& iou_threshold = context->input(3);
OP_REQUIRES(context, TensorShapeUtils::IsScalar(iou_threshold.shape()),
errors::InvalidArgument("iou_threshold must be 0-D, got shape ",
iou_threshold.shape().DebugString(),
" (Shape must be rank 0 but is rank ",
iou_threshold.dims(), ")"));
const T iou_threshold_val = GetScalar<T>(iou_threshold);
OP_REQUIRES(context,
iou_threshold_val >= static_cast<T>(0.0) &&
iou_threshold_val <= static_cast<T>(1.0),
errors::InvalidArgument("iou_threshold must be in [0, 1]"));
const Tensor& score_threshold = context->input(4);
OP_REQUIRES(
context, TensorShapeUtils::IsScalar(score_threshold.shape()),
errors::InvalidArgument("score_threshold must be 0-D, got shape ",
score_threshold.shape().DebugString()));
const T score_threshold_val = GetScalar<T>(score_threshold);
int num_boxes = 0;
ParseAndCheckBoxSizes(context, boxes, &num_boxes);
CheckScoreSizes(context, num_boxes, scores);
if (!context->status().ok()) {
return;
}
auto similarity_fn = CreateIOUSimilarityFn<T>(boxes);
const T dummy_soft_nms_sigma = static_cast<T>(0.0);
DoNonMaxSuppressionOp<T>(context, scores, num_boxes, max_output_size,
iou_threshold_val, score_threshold_val,
dummy_soft_nms_sigma, similarity_fn);
}
};
template <typename Device, typename T>
class NonMaxSuppressionV4Op : public OpKernel {
public:
explicit NonMaxSuppressionV4Op(OpKernelConstruction* context)
: OpKernel(context) {
OP_REQUIRES_OK(context, context->GetAttr("pad_to_max_output_size",
&pad_to_max_output_size_));
}
void Compute(OpKernelContext* context) override {
const Tensor& boxes = context->input(0);
const Tensor& scores = context->input(1);
const Tensor& max_output_size = context->input(2);
OP_REQUIRES(
context, TensorShapeUtils::IsScalar(max_output_size.shape()),
errors::InvalidArgument("max_output_size must be 0-D, got shape ",
max_output_size.shape().DebugString()));
const Tensor& iou_threshold = context->input(3);
OP_REQUIRES(context, TensorShapeUtils::IsScalar(iou_threshold.shape()),
errors::InvalidArgument("iou_threshold must be 0-D, got shape ",
iou_threshold.shape().DebugString()));
const T iou_threshold_val = GetScalar<T>(iou_threshold);
OP_REQUIRES(context,
iou_threshold_val >= static_cast<T>(0.0) &&
iou_threshold_val <= static_cast<T>(1.0),
errors::InvalidArgument("iou_threshold must be in [0, 1]"));
const Tensor& score_threshold = context->input(4);
OP_REQUIRES(
context, TensorShapeUtils::IsScalar(score_threshold.shape()),
errors::InvalidArgument("score_threshold must be 0-D, got shape ",
score_threshold.shape().DebugString()));
const T score_threshold_val = GetScalar<T>(score_threshold);
int num_boxes = 0;
ParseAndCheckBoxSizes(context, boxes, &num_boxes);
CheckScoreSizes(context, num_boxes, scores);
if (!context->status().ok()) {
return;
}
auto similarity_fn = CreateIOUSimilarityFn<T>(boxes);
int num_valid_outputs;
bool return_scores_tensor_ = false;
const T dummy_soft_nms_sigma = static_cast<T>(0.0);
DoNonMaxSuppressionOp<T>(
context, scores, num_boxes, max_output_size, iou_threshold_val,
score_threshold_val, dummy_soft_nms_sigma, similarity_fn,
return_scores_tensor_, pad_to_max_output_size_, &num_valid_outputs);
if (!context->status().ok()) {
return;
}
Tensor* num_outputs_t = nullptr;
OP_REQUIRES_OK(context, context->allocate_output(
1, tensorflow::TensorShape{}, &num_outputs_t));
num_outputs_t->scalar<int32>().setConstant(num_valid_outputs);
}
private:
bool pad_to_max_output_size_;
};
template <typename Device, typename T>
class NonMaxSuppressionV5Op : public OpKernel {
public:
explicit NonMaxSuppressionV5Op(OpKernelConstruction* context)
: OpKernel(context) {
OP_REQUIRES_OK(context, context->GetAttr("pad_to_max_output_size",
&pad_to_max_output_size_));
}
void Compute(OpKernelContext* context) override {
const Tensor& boxes = context->input(0);
const Tensor& scores = context->input(1);
const Tensor& max_output_size = context->input(2);
OP_REQUIRES(
context, TensorShapeUtils::IsScalar(max_output_size.shape()),
errors::InvalidArgument("max_output_size must be 0-D, got shape ",
max_output_size.shape().DebugString()));
const Tensor& iou_threshold = context->input(3);
OP_REQUIRES(context, TensorShapeUtils::IsScalar(iou_threshold.shape()),
errors::InvalidArgument("iou_threshold must be 0-D, got shape ",
iou_threshold.shape().DebugString()));
const T iou_threshold_val = iou_threshold.scalar<T>()();
OP_REQUIRES(context,
iou_threshold_val >= static_cast<T>(0.0) &&
iou_threshold_val <= static_cast<T>(1.0),
errors::InvalidArgument("iou_threshold must be in [0, 1]"));
const Tensor& score_threshold = context->input(4);
OP_REQUIRES(
context, TensorShapeUtils::IsScalar(score_threshold.shape()),
errors::InvalidArgument("score_threshold must be 0-D, got shape ",
score_threshold.shape().DebugString()));
const T score_threshold_val = score_threshold.scalar<T>()();
const Tensor& soft_nms_sigma = context->input(5);
OP_REQUIRES(
context, TensorShapeUtils::IsScalar(soft_nms_sigma.shape()),
errors::InvalidArgument("soft_nms_sigma must be 0-D, got shape ",
soft_nms_sigma.shape().DebugString()));
const T soft_nms_sigma_val = soft_nms_sigma.scalar<T>()();
OP_REQUIRES(context, soft_nms_sigma_val >= static_cast<T>(0.0),
errors::InvalidArgument("soft_nms_sigma_val must be >= 0"));
int num_boxes = 0;
ParseAndCheckBoxSizes(context, boxes, &num_boxes);
CheckScoreSizes(context, num_boxes, scores);
if (!context->status().ok()) {
return;
}
auto similarity_fn = CreateIOUSimilarityFn<T>(boxes);
int num_valid_outputs;
const bool return_scores_tensor_ = true;
DoNonMaxSuppressionOp<T>(
context, scores, num_boxes, max_output_size, iou_threshold_val,
score_threshold_val, soft_nms_sigma_val, similarity_fn,
return_scores_tensor_, pad_to_max_output_size_, &num_valid_outputs);
if (!context->status().ok()) {
return;
}
Tensor* num_outputs_t = nullptr;
OP_REQUIRES_OK(context, context->allocate_output(
2, tensorflow::TensorShape{}, &num_outputs_t));
num_outputs_t->scalar<int32>().setConstant(num_valid_outputs);
}
private:
bool pad_to_max_output_size_;
};
template <typename Device>
class NonMaxSuppressionWithOverlapsOp : public OpKernel {
public:
explicit NonMaxSuppressionWithOverlapsOp(OpKernelConstruction* context)
: OpKernel(context) {}
void Compute(OpKernelContext* context) override {
const Tensor& overlaps = context->input(0);
const Tensor& scores = context->input(1);
const Tensor& max_output_size = context->input(2);
OP_REQUIRES(
context, TensorShapeUtils::IsScalar(max_output_size.shape()),
errors::InvalidArgument("max_output_size must be 0-D, got shape ",
max_output_size.shape().DebugString()));
const Tensor& overlap_threshold = context->input(3);
OP_REQUIRES(
context, TensorShapeUtils::IsScalar(overlap_threshold.shape()),
errors::InvalidArgument("overlap_threshold must be 0-D, got shape ",
overlap_threshold.shape().DebugString()));
const float overlap_threshold_val = overlap_threshold.scalar<float>()();
const Tensor& score_threshold = context->input(4);
OP_REQUIRES(
context, TensorShapeUtils::IsScalar(score_threshold.shape()),
errors::InvalidArgument("score_threshold must be 0-D, got shape ",
score_threshold.shape().DebugString()));
const float score_threshold_val = score_threshold.scalar<float>()();
int num_boxes = 0;
ParseAndCheckOverlapSizes(context, overlaps, &num_boxes);
CheckScoreSizes(context, num_boxes, scores);
if (!context->status().ok()) {
return;
}
auto similarity_fn = CreateOverlapSimilarityFn<float>(overlaps);
const float dummy_soft_nms_sigma = static_cast<float>(0.0);
DoNonMaxSuppressionOp<float>(context, scores, num_boxes, max_output_size,
overlap_threshold_val, score_threshold_val,
dummy_soft_nms_sigma, similarity_fn);
}
};
template <typename Device>
class CombinedNonMaxSuppressionOp : public OpKernel {
public:
explicit CombinedNonMaxSuppressionOp(OpKernelConstruction* context)
: OpKernel(context) {
OP_REQUIRES_OK(context, context->GetAttr("pad_per_class", &pad_per_class_));
OP_REQUIRES_OK(context, context->GetAttr("clip_boxes", &clip_boxes_));
}
void Compute(OpKernelContext* context) override {
const Tensor& boxes = context->input(0);
const Tensor& scores = context->input(1);
OP_REQUIRES(
context, (boxes.dim_size(0) == scores.dim_size(0)),
errors::InvalidArgument("boxes and scores must have same batch size"));
const Tensor& max_output_size = context->input(2);
OP_REQUIRES(
context, TensorShapeUtils::IsScalar(max_output_size.shape()),
errors::InvalidArgument("max_size_per_class must be 0-D, got shape ",
max_output_size.shape().DebugString()));
const int max_size_per_class = max_output_size.scalar<int>()();
OP_REQUIRES(context, max_size_per_class > 0,
errors::InvalidArgument("max_size_per_class must be positive"));
const Tensor& max_total_size = context->input(3);
OP_REQUIRES(
context, TensorShapeUtils::IsScalar(max_total_size.shape()),
errors::InvalidArgument("max_total_size must be 0-D, got shape ",
max_total_size.shape().DebugString()));
const int max_total_size_per_batch = max_total_size.scalar<int>()();
OP_REQUIRES(context, max_total_size_per_batch > 0,
errors::InvalidArgument("max_total_size must be > 0"));
if (max_total_size_per_batch > pow(10, 6)) {
LOG(WARNING) << "Detected a large value for `max_total_size`. This may "
<< "cause OOM error. (max_total_size: "
<< max_total_size.scalar<int>()() << ")";
}
const Tensor& iou_threshold = context->input(4);
OP_REQUIRES(context, TensorShapeUtils::IsScalar(iou_threshold.shape()),
errors::InvalidArgument("iou_threshold must be 0-D, got shape ",
iou_threshold.shape().DebugString()));
const float iou_threshold_val = iou_threshold.scalar<float>()();
const Tensor& score_threshold = context->input(5);
OP_REQUIRES(
context, TensorShapeUtils::IsScalar(score_threshold.shape()),
errors::InvalidArgument("score_threshold must be 0-D, got shape ",
score_threshold.shape().DebugString()));
const float score_threshold_val = score_threshold.scalar<float>()();
OP_REQUIRES(context, iou_threshold_val >= 0 && iou_threshold_val <= 1,
errors::InvalidArgument("iou_threshold must be in [0, 1]"));
int num_boxes = 0;
const int num_classes = scores.dim_size(2);
ParseAndCheckCombinedNMSBoxSizes(context, boxes, &num_boxes, num_classes);
CheckCombinedNMSScoreSizes(context, num_boxes, scores);
if (!context->status().ok()) {
return;
}
BatchedNonMaxSuppressionOp(context, boxes, scores, num_boxes,
max_size_per_class, max_total_size_per_batch,
score_threshold_val, iou_threshold_val,
pad_per_class_, clip_boxes_);
}
private:
bool pad_per_class_;
bool clip_boxes_;
};
REGISTER_KERNEL_BUILDER(Name("NonMaxSuppression").Device(DEVICE_CPU),
NonMaxSuppressionOp<CPUDevice>);
REGISTER_KERNEL_BUILDER(
Name("NonMaxSuppressionV2").TypeConstraint<float>("T").Device(DEVICE_CPU),
NonMaxSuppressionV2Op<CPUDevice, float>);
REGISTER_KERNEL_BUILDER(Name("NonMaxSuppressionV2")
.TypeConstraint<Eigen::half>("T")
.Device(DEVICE_CPU),
NonMaxSuppressionV2Op<CPUDevice, Eigen::half>);
REGISTER_KERNEL_BUILDER(
Name("NonMaxSuppressionV3").TypeConstraint<float>("T").Device(DEVICE_CPU),
NonMaxSuppressionV3Op<CPUDevice, float>);
REGISTER_KERNEL_BUILDER(Name("NonMaxSuppressionV3")
.TypeConstraint<Eigen::half>("T")
.Device(DEVICE_CPU),
NonMaxSuppressionV3Op<CPUDevice, Eigen::half>);
REGISTER_KERNEL_BUILDER(
Name("NonMaxSuppressionV4").TypeConstraint<float>("T").Device(DEVICE_CPU),
NonMaxSuppressionV4Op<CPUDevice, float>);
REGISTER_KERNEL_BUILDER(Name("NonMaxSuppressionV4")
.TypeConstraint<Eigen::half>("T")
.Device(DEVICE_CPU),
NonMaxSuppressionV4Op<CPUDevice, Eigen::half>);
REGISTER_KERNEL_BUILDER(
Name("NonMaxSuppressionV5").TypeConstraint<float>("T").Device(DEVICE_CPU),
NonMaxSuppressionV5Op<CPUDevice, float>);
REGISTER_KERNEL_BUILDER(Name("NonMaxSuppressionV5")
.TypeConstraint<Eigen::half>("T")
.Device(DEVICE_CPU),
NonMaxSuppressionV5Op<CPUDevice, Eigen::half>);
REGISTER_KERNEL_BUILDER(
Name("NonMaxSuppressionWithOverlaps").Device(DEVICE_CPU),
NonMaxSuppressionWithOverlapsOp<CPUDevice>);
REGISTER_KERNEL_BUILDER(Name("CombinedNonMaxSuppression").Device(DEVICE_CPU),
CombinedNonMaxSuppressionOp<CPUDevice>);
} | #include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
class NonMaxSuppressionOpTest : public OpsTestBase {
protected:
void MakeOp(float iou_threshold) {
TF_EXPECT_OK(NodeDefBuilder("non_max_suppression_op", "NonMaxSuppression")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("iou_threshold", iou_threshold)
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
}
};
TEST_F(NonMaxSuppressionOpTest, TestSelectFromThreeClusters) {
MakeOp(.5);
AddInputFromArray<float>(
TensorShape({6, 4}),
{0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
AddInputFromArray<float>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f});
AddInputFromArray<int>(TensorShape({}), {3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({3}));
test::FillValues<int>(&expected, {3, 0, 5});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
TEST_F(NonMaxSuppressionOpTest, TestSelectFromThreeClustersFlippedCoordinates) {
MakeOp(.5);
AddInputFromArray<float>(TensorShape({6, 4}),
{1, 1, 0, 0, 0, 0.1f, 1, 1.1f, 0, .9f, 1, -0.1f,
0, 10, 1, 11, 1, 10.1f, 0, 11.1f, 1, 101, 0, 100});
AddInputFromArray<float>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f});
AddInputFromArray<int>(TensorShape({}), {3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({3}));
test::FillValues<int>(&expected, {3, 0, 5});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
TEST_F(NonMaxSuppressionOpTest, TestSelectAtMostTwoBoxesFromThreeClusters) {
MakeOp(.5);
AddInputFromArray<float>(
TensorShape({6, 4}),
{0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
AddInputFromArray<float>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f});
AddInputFromArray<int>(TensorShape({}), {2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({2}));
test::FillValues<int>(&expected, {3, 0});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
TEST_F(NonMaxSuppressionOpTest, TestSelectWithNegativeScores) {
MakeOp(.5);
AddInputFromArray<float>(
TensorShape({6, 4}),
{0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
AddInputFromArray<float>(
TensorShape({6}), {.9f - 10.0f, .75f - 10.0f, .6f - 10.0f, .95f - 10.0f,
.5f - 10.0f, .3f - 10.0f});
AddInputFromArray<int>(TensorShape({}), {6});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({3}));
test::FillValues<int>(&expected, {3, 0, 5});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
TEST_F(NonMaxSuppressionOpTest, TestFirstBoxDegenerate) {
MakeOp(.5);
AddInputFromArray<float>(TensorShape({3, 4}),
{0, 0, 0, 0, 1, 1, 2, 2, 2, 2, 3, 3});
AddInputFromArray<float>(TensorShape({3}), {.9f, .75f, .6f});
AddInputFromArray<int>(TensorShape({}), {3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({3}));
test::FillValues<int>(&expected, {0, 1, 2});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
TEST_F(NonMaxSuppressionOpTest, TestSelectAtMostThirtyBoxesFromThreeClusters) {
MakeOp(.5);
AddInputFromArray<float>(
TensorShape({6, 4}),
{0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
AddInputFromArray<float>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f});
AddInputFromArray<int>(TensorShape({}), {30});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({3}));
test::FillValues<int>(&expected, {3, 0, 5});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
TEST_F(NonMaxSuppressionOpTest, TestSelectSingleBox) {
MakeOp(.5);
AddInputFromArray<float>(TensorShape({1, 4}), {0, 0, 1, 1});
AddInputFromArray<float>(TensorShape({1}), {.9f});
AddInputFromArray<int>(TensorShape({}), {3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({1}));
test::FillValues<int>(&expected, {0});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
TEST_F(NonMaxSuppressionOpTest, TestSelectFromTenIdenticalBoxes) {
MakeOp(.5);
int num_boxes = 10;
std::vector<float> corners(num_boxes * 4);
std::vector<float> scores(num_boxes);
for (int i = 0; i < num_boxes; ++i) {
corners[i * 4 + 0] = 0;
corners[i * 4 + 1] = 0;
corners[i * 4 + 2] = 1;
corners[i * 4 + 3] = 1;
scores[i] = .9;
}
AddInputFromArray<float>(TensorShape({num_boxes, 4}), corners);
AddInputFromArray<float>(TensorShape({num_boxes}), scores);
AddInputFromArray<int>(TensorShape({}), {3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({1}));
test::FillValues<int>(&expected, {0});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
TEST_F(NonMaxSuppressionOpTest, TestInconsistentBoxAndScoreShapes) {
MakeOp(.5);
AddInputFromArray<float>(
TensorShape({6, 4}),
{0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
AddInputFromArray<float>(TensorShape({5}), {.9f, .75f, .6f, .95f, .5f});
AddInputFromArray<int>(TensorShape({}), {30});
Status s = RunOpKernel();
ASSERT_FALSE(s.ok());
EXPECT_TRUE(absl::StrContains(s.ToString(), "scores has incompatible shape"))
<< s;
}
TEST_F(NonMaxSuppressionOpTest, TestInvalidIOUThreshold) {
MakeOp(1.2);
AddInputFromArray<float>(TensorShape({1, 4}), {0, 0, 1, 1});
AddInputFromArray<float>(TensorShape({1}), {.9f});
AddInputFromArray<int>(TensorShape({}), {3});
Status s = RunOpKernel();
ASSERT_FALSE(s.ok());
EXPECT_TRUE(
absl::StrContains(s.ToString(), "iou_threshold must be in [0, 1]"))
<< s;
}
TEST_F(NonMaxSuppressionOpTest, TestEmptyInput) {
MakeOp(.5);
AddInputFromArray<float>(TensorShape({0, 4}), {});
AddInputFromArray<float>(TensorShape({0}), {});
AddInputFromArray<int>(TensorShape({}), {30});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({0}));
test::FillValues<int>(&expected, {});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
class NonMaxSuppressionV2OpTest : public OpsTestBase {
protected:
void MakeOp() {
TF_EXPECT_OK(NodeDefBuilder("non_max_suppression_op", "NonMaxSuppressionV2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
}
};
TEST_F(NonMaxSuppressionV2OpTest, TestSelectFromThreeClusters) {
MakeOp();
AddInputFromArray<float>(
TensorShape({6, 4}),
{0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
AddInputFromArray<float>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f});
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<float>(TensorShape({}), {.5f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({3}));
test::FillValues<int>(&expected, {3, 0, 5});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
TEST_F(NonMaxSuppressionV2OpTest,
TestSelectFromThreeClustersFlippedCoordinates) {
MakeOp();
AddInputFromArray<float>(TensorShape({6, 4}),
{1, 1, 0, 0, 0, 0.1f, 1, 1.1f, 0, .9f, 1, -0.1f,
0, 10, 1, 11, 1, 10.1f, 0, 11.1f, 1, 101, 0, 100});
AddInputFromArray<float>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f});
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<float>(TensorShape({}), {.5f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({3}));
test::FillValues<int>(&expected, {3, 0, 5});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
TEST_F(NonMaxSuppressionV2OpTest, TestSelectAtMostTwoBoxesFromThreeClusters) {
MakeOp();
AddInputFromArray<float>(
TensorShape({6, 4}),
{0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
AddInputFromArray<float>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f});
AddInputFromArray<int>(TensorShape({}), {2});
AddInputFromArray<float>(TensorShape({}), {.5f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({2}));
test::FillValues<int>(&expected, {3, 0});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
TEST_F(NonMaxSuppressionV2OpTest,
TestSelectAtMostThirtyBoxesFromThreeClusters) {
MakeOp();
AddInputFromArray<float>(
TensorShape({6, 4}),
{0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
AddInputFromArray<float>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f});
AddInputFromArray<int>(TensorShape({}), {30});
AddInputFromArray<float>(TensorShape({}), {.5f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({3}));
test::FillValues<int>(&expected, {3, 0, 5});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
TEST_F(NonMaxSuppressionV2OpTest, TestSelectSingleBox) {
MakeOp();
AddInputFromArray<float>(TensorShape({1, 4}), {0, 0, 1, 1});
AddInputFromArray<float>(TensorShape({1}), {.9f});
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<float>(TensorShape({}), {.5f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({1}));
test::FillValues<int>(&expected, {0});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
TEST_F(NonMaxSuppressionV2OpTest, TestSelectFromTenIdenticalBoxes) {
MakeOp();
int num_boxes = 10;
std::vector<float> corners(num_boxes * 4);
std::vector<float> scores(num_boxes);
for (int i = 0; i < num_boxes; ++i) {
corners[i * 4 + 0] = 0;
corners[i * 4 + 1] = 0;
corners[i * 4 + 2] = 1;
corners[i * 4 + 3] = 1;
scores[i] = .9;
}
AddInputFromArray<float>(TensorShape({num_boxes, 4}), corners);
AddInputFromArray<float>(TensorShape({num_boxes}), scores);
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<float>(TensorShape({}), {.5f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({1}));
test::FillValues<int>(&expected, {0});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
TEST_F(NonMaxSuppressionV2OpTest, TestInconsistentBoxAndScoreShapes) {
MakeOp();
AddInputFromArray<float>(
TensorShape({6, 4}),
{0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
AddInputFromArray<float>(TensorShape({5}), {.9f, .75f, .6f, .95f, .5f});
AddInputFromArray<int>(TensorShape({}), {30});
AddInputFromArray<float>(TensorShape({}), {.5f});
Status s = RunOpKernel();
ASSERT_FALSE(s.ok());
EXPECT_TRUE(absl::StrContains(s.ToString(), "scores has incompatible shape"))
<< s;
}
TEST_F(NonMaxSuppressionV2OpTest, TestInvalidIOUThreshold) {
MakeOp();
AddInputFromArray<float>(TensorShape({1, 4}), {0, 0, 1, 1});
AddInputFromArray<float>(TensorShape({1}), {.9f});
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<float>(TensorShape({}), {1.2f});
Status s = RunOpKernel();
ASSERT_FALSE(s.ok());
EXPECT_TRUE(
absl::StrContains(s.ToString(), "iou_threshold must be in [0, 1]"))
<< s;
}
TEST_F(NonMaxSuppressionV2OpTest, TestEmptyInput) {
MakeOp();
AddInputFromArray<float>(TensorShape({0, 4}), {});
AddInputFromArray<float>(TensorShape({0}), {});
AddInputFromArray<int>(TensorShape({}), {30});
AddInputFromArray<float>(TensorShape({}), {.5f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({0}));
test::FillValues<int>(&expected, {});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
using NmsValidTypes =
::testing::Types<std::pair<float, float>, std::pair<float, Eigen::half>,
std::pair<Eigen::half, Eigen::half>,
std::pair<Eigen::half, float> >;
template <typename InputAndThresholdTypes>
class NonMaxSuppressionV3OpTest : public OpsTestBase {
protected:
using InputType = typename InputAndThresholdTypes::first_type;
using ThresholdType = typename InputAndThresholdTypes::second_type;
void MakeOp() {
constexpr DataType kInputDataType = DataTypeToEnum<InputType>::value;
constexpr DataType kThresholdDataType =
DataTypeToEnum<ThresholdType>::value;
TF_EXPECT_OK(NodeDefBuilder("non_max_suppression_op", "NonMaxSuppressionV3")
.Input(FakeInput(kInputDataType))
.Input(FakeInput(kInputDataType))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(kThresholdDataType))
.Input(FakeInput(kThresholdDataType))
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
}
};
TYPED_TEST_SUITE(NonMaxSuppressionV3OpTest, NmsValidTypes);
TYPED_TEST(NonMaxSuppressionV3OpTest, TestSelectFromThreeClusters) {
using InputType = typename TestFixture::InputType;
using ThresholdType = typename TestFixture::ThresholdType;
this->MakeOp();
this->template AddInputFromList<InputType, float>(
TensorShape({6, 4}),
{0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
this->template AddInputFromList<InputType>(TensorShape({6}),
{.9f, .75f, .6f, .95f, .5f, .3f});
this->template AddInputFromList<int>(TensorShape({}), {3});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {.5f});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {0.0f});
TF_ASSERT_OK(this->RunOpKernel());
Tensor expected(this->allocator(), DT_INT32, TensorShape({3}));
test::FillValues<int>(&expected, {3, 0, 5});
test::ExpectTensorEqual<int>(expected, *(this->GetOutput(0)));
}
TYPED_TEST(NonMaxSuppressionV3OpTest,
TestSelectFromThreeClustersWithScoreThreshold) {
using InputType = typename TestFixture::InputType;
using ThresholdType = typename TestFixture::ThresholdType;
this->MakeOp();
this->template AddInputFromList<InputType, float>(
TensorShape({6, 4}),
{0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
this->template AddInputFromList<InputType>(TensorShape({6}),
{.9f, .75f, .6f, .95f, .5f, .3f});
this->template AddInputFromList<int>(TensorShape({}), {3});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {0.5f});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {0.4f});
TF_ASSERT_OK(this->RunOpKernel());
Tensor expected(this->allocator(), DT_INT32, TensorShape({2}));
test::FillValues<int>(&expected, {3, 0});
test::ExpectTensorEqual<int>(expected, *(this->GetOutput(0)));
}
TYPED_TEST(NonMaxSuppressionV3OpTest,
TestSelectFromThreeClustersWithScoreThresholdZeroScores) {
using InputType = typename TestFixture::InputType;
using ThresholdType = typename TestFixture::ThresholdType;
this->MakeOp();
this->template AddInputFromList<InputType, float>(
TensorShape({6, 4}),
{0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
this->template AddInputFromList<InputType, float>(TensorShape({6}),
{.1, 0, 0, .3, .2, -5.0});
this->template AddInputFromList<int>(TensorShape({}), {6});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {.5f});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {-3.0f});
TF_ASSERT_OK(this->RunOpKernel());
Tensor expected(this->allocator(), DT_INT32, TensorShape({2}));
test::FillValues<int>(&expected, {3, 0});
test::ExpectTensorEqual<int>(expected, *(this->GetOutput(0)));
}
TYPED_TEST(NonMaxSuppressionV3OpTest,
TestSelectFromThreeClustersFlippedCoordinates) {
using InputType = typename TestFixture::InputType;
using ThresholdType = typename TestFixture::ThresholdType;
this->MakeOp();
this->template AddInputFromList<InputType, float>(
TensorShape({6, 4}), {1, 1, 0, 0, 0, 0.1f, 1, 1.1f, 0, .9f, 1, -0.1f,
0, 10, 1, 11, 1, 10.1f, 0, 11.1f, 1, 101, 0, 100});
this->template AddInputFromList<InputType>(TensorShape({6}),
{.9f, .75f, .6f, .95f, .5f, .3f});
this->template AddInputFromList<int>(TensorShape({}), {3});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {.5f});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {0.0f});
TF_ASSERT_OK(this->RunOpKernel());
Tensor expected(this->allocator(), DT_INT32, TensorShape({3}));
test::FillValues<int>(&expected, {3, 0, 5});
test::ExpectTensorEqual<int>(expected, *(this->GetOutput(0)));
}
TYPED_TEST(NonMaxSuppressionV3OpTest,
TestSelectAtMostTwoBoxesFromThreeClusters) {
using InputType = typename TestFixture::InputType;
using ThresholdType = typename TestFixture::ThresholdType;
this->MakeOp();
this->template AddInputFromList<InputType, float>(
TensorShape({6, 4}),
{0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
this->template AddInputFromList<InputType>(TensorShape({6}),
{.9f, .75f, .6f, .95f, .5f, .3f});
this->template AddInputFromList<int>(TensorShape({}), {2});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {.5f});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {0.0f});
TF_ASSERT_OK(this->RunOpKernel());
Tensor expected(this->allocator(), DT_INT32, TensorShape({2}));
test::FillValues<int>(&expected, {3, 0});
test::ExpectTensorEqual<int>(expected, *(this->GetOutput(0)));
}
TYPED_TEST(NonMaxSuppressionV3OpTest,
TestSelectAtMostThirtyBoxesFromThreeClusters) {
using InputType = typename TestFixture::InputType;
using ThresholdType = typename TestFixture::ThresholdType;
this->MakeOp();
this->template AddInputFromList<InputType, float>(
TensorShape({6, 4}),
{0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
this->template AddInputFromList<InputType>(TensorShape({6}),
{.9f, .75f, .6f, .95f, .5f, .3f});
this->template AddInputFromList<int>(TensorShape({}), {30});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {.5f});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {0.0f});
TF_ASSERT_OK(this->RunOpKernel());
Tensor expected(this->allocator(), DT_INT32, TensorShape({3}));
test::FillValues<int>(&expected, {3, 0, 5});
test::ExpectTensorEqual<int>(expected, *(this->GetOutput(0)));
}
TYPED_TEST(NonMaxSuppressionV3OpTest, TestSelectSingleBox) {
using InputType = typename TestFixture::InputType;
using ThresholdType = typename TestFixture::ThresholdType;
this->MakeOp();
this->template AddInputFromList<InputType>(TensorShape({1, 4}), {0, 0, 1, 1});
this->template AddInputFromList<InputType>(TensorShape({1}), {.9f});
this->template AddInputFromList<int>(TensorShape({}), {3});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {0.5});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {0});
TF_ASSERT_OK(this->RunOpKernel());
Tensor expected(this->allocator(), DT_INT32, TensorShape({1}));
test::FillValues<int>(&expected, {0});
test::ExpectTensorEqual<int>(expected, *(this->GetOutput(0)));
}
TYPED_TEST(NonMaxSuppressionV3OpTest, TestSelectFromTenIdenticalBoxes) {
using InputType = typename TestFixture::InputType;
using ThresholdType = typename TestFixture::ThresholdType;
this->MakeOp();
int num_boxes = 10;
std::vector<InputType> corners(num_boxes * 4);
std::vector<InputType> scores(num_boxes);
for (int i = 0; i < num_boxes; ++i) {
corners[i * 4 + 0] = static_cast<InputType>(0);
corners[i * 4 + 1] = static_cast<InputType>(0);
corners[i * 4 + 2] = static_cast<InputType>(1);
corners[i * 4 + 3] = static_cast<InputType>(1);
scores[i] = static_cast<InputType>(.9);
}
this->template AddInputFromArray<InputType>(TensorShape({num_boxes, 4}),
corners);
this->template AddInputFromArray<InputType>(TensorShape({num_boxes}), scores);
this->template AddInputFromList<int>(TensorShape({}), {3});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {.5f});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {0.0f});
TF_ASSERT_OK(this->RunOpKernel());
Tensor expected(this->allocator(), DT_INT32, TensorShape({1}));
test::FillValues<int>(&expected, {0});
test::ExpectTensorEqual<int>(expected, *(this->GetOutput(0)));
}
TYPED_TEST(NonMaxSuppressionV3OpTest, TestInconsistentBoxAndScoreShapes) {
using InputType = typename TestFixture::InputType;
using ThresholdType = typename TestFixture::ThresholdType;
this->MakeOp();
this->template AddInputFromList<InputType, float>(
TensorShape({6, 4}),
{0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
this->template AddInputFromList<InputType>(TensorShape({5}),
{.9f, .75f, .6f, .95f, .5f});
this->template AddInputFromList<int>(TensorShape({}), {30});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {0.5});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {0});
Status s = this->RunOpKernel();
ASSERT_FALSE(s.ok());
EXPECT_TRUE(absl::StrContains(s.ToString(), "scores has incompatible shape"))
<< s;
}
TYPED_TEST(NonMaxSuppressionV3OpTest, TestInvalidIOUThreshold) {
using InputType = typename TestFixture::InputType;
using ThresholdType = typename TestFixture::ThresholdType;
this->MakeOp();
this->template AddInputFromList<InputType>(TensorShape({1, 4}), {0, 0, 1, 1});
this->template AddInputFromList<InputType>(TensorShape({1}), {.9f});
this->template AddInputFromList<int>(TensorShape({}), {3});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {1.2f});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {0});
Status s = this->RunOpKernel();
ASSERT_FALSE(s.ok());
EXPECT_TRUE(
absl::StrContains(s.ToString(), "iou_threshold must be in [0, 1]"))
<< s;
}
TYPED_TEST(NonMaxSuppressionV3OpTest, TestEmptyInput) {
using InputType = typename TestFixture::InputType;
using ThresholdType = typename TestFixture::ThresholdType;
this->MakeOp();
this->template AddInputFromArray<InputType>(TensorShape({0, 4}), {});
this->template AddInputFromArray<InputType>(TensorShape({0}), {});
this->template AddInputFromList<int>(TensorShape({}), {30});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {.5f});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {0.0f});
TF_ASSERT_OK(this->RunOpKernel());
Tensor expected(this->allocator(), DT_INT32, TensorShape({0}));
test::FillValues<int>(&expected, {});
test::ExpectTensorEqual<int>(expected, *(this->GetOutput(0)));
}
template <typename InputAndThresholdTypes>
class NonMaxSuppressionV4OpTest : public OpsTestBase {
protected:
using InputType = typename InputAndThresholdTypes::first_type;
using ThresholdType = typename InputAndThresholdTypes::second_type;
void MakeOp() {
constexpr DataType kInputDataType = DataTypeToEnum<InputType>::value;
constexpr DataType kThresholdDataType =
DataTypeToEnum<ThresholdType>::value;
TF_EXPECT_OK(NodeDefBuilder("non_max_suppression_op", "NonMaxSuppressionV4")
.Input(FakeInput(kInputDataType))
.Input(FakeInput(kInputDataType))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(kThresholdDataType))
.Input(FakeInput(kThresholdDataType))
.Attr("pad_to_max_output_size", true)
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
}
};
TYPED_TEST_SUITE(NonMaxSuppressionV4OpTest, NmsValidTypes);
TYPED_TEST(NonMaxSuppressionV4OpTest, TestSelectFromThreeClustersPadFive) {
using InputType = typename TestFixture::InputType;
using ThresholdType = typename TestFixture::ThresholdType;
this->MakeOp();
this->template AddInputFromList<InputType, float>(
TensorShape({6, 4}),
{0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
this->template AddInputFromList<InputType>(TensorShape({6}),
{.9f, .75f, .6f, .95f, .5f, .3f});
this->template AddInputFromList<int>(TensorShape({}), {5});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {.5f});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {0.0f});
TF_ASSERT_OK(this->RunOpKernel());
const auto expected_indices = test::AsTensor<int>({3, 0, 5, 0, 0});
test::ExpectTensorEqual<int>(expected_indices, *(this->GetOutput(0)));
Tensor expected_num_valid = test::AsScalar<int>(3);
test::ExpectTensorEqual<int>(expected_num_valid, *(this->GetOutput(1)));
}
TYPED_TEST(NonMaxSuppressionV4OpTest,
TestSelectFromThreeClustersPadFiveScoreThr) {
using InputType = typename TestFixture::InputType;
using ThresholdType = typename TestFixture::ThresholdType;
this->MakeOp();
this->template AddInputFromList<InputType, float>(
TensorShape({6, 4}),
{0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
this->template AddInputFromList<InputType>(TensorShape({6}),
{.9f, .75f, .6f, .95f, .5f, .3f});
this->template AddInputFromList<int>(TensorShape({}), {6});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {.5f});
this->template AddInputFromList<ThresholdType>(TensorShape({}), {0.4f});
TF_ASSERT_OK(this->RunOpKernel());
const auto expected_indices = test::AsTensor<int>({3, 0, 0, 0, 0, 0});
test::ExpectTensorEqual<int>(expected_indices, *(this->GetOutput(0)));
Tensor expected_num_valid = test::AsScalar<int>(2);
test::ExpectTensorEqual<int>(expected_num_valid, *(this->GetOutput(1)));
}
class NonMaxSuppressionV5OpTest : public OpsTestBase {
protected:
void MakeOp() {
TF_EXPECT_OK(NodeDefBuilder("non_max_suppression_op", "NonMaxSuppressionV5")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("pad_to_max_output_size", true)
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
}
};
TEST_F(NonMaxSuppressionV5OpTest, TestSelectFromThreeClustersPadFive) {
MakeOp();
AddInputFromArray<float>(
TensorShape({6, 4}),
{0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
AddInputFromArray<float>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f});
AddInputFromArray<int>(TensorShape({}), {5});
AddInputFromArray<float>(TensorShape({}), {.5f});
AddInputFromArray<float>(TensorShape({}), {0.0f});
AddInputFromArray<float>(TensorShape({}), {0.0f});
TF_ASSERT_OK(RunOpKernel());
const auto expected_indices = test::AsTensor<int>({3, 0, 5, 0, 0});
test::ExpectTensorEqual<int>(expected_indices, *GetOutput(0));
const auto expected_scores =
test::AsTensor<float>({.95f, .9f, .3f, 0.0f, 0.0f});
test::ExpectTensorNear<float>(expected_scores, *GetOutput(1), 1e-2);
Tensor expected_num_valid = test::AsScalar<int>(3);
test::ExpectTensorEqual<int>(expected_num_valid, *GetOutput(2));
}
TEST_F(NonMaxSuppressionV5OpTest, TestSelectFromThreeClustersWithSoftNMS) {
MakeOp();
AddInputFromArray<float>(
TensorShape({6, 4}),
{0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
AddInputFromArray<float>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f});
AddInputFromArray<int>(TensorShape({}), {6});
AddInputFromArray<float>(TensorShape({}), {0.5f});
AddInputFromArray<float>(TensorShape({}), {0.0f});
AddInputFromArray<float>(TensorShape({}), {0.5f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({6}));
test::FillValues<int>(&expected, {3, 0, 1, 5, 4, 2});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
Tensor expected_scores(allocator(), DT_FLOAT, TensorShape({6}));
test::FillValues<float>(&expected_scores,
{0.95, 0.9, 0.384, 0.3, 0.256, 0.197});
test::ExpectTensorNear<float>(expected_scores, *GetOutput(1), 1e-2);
Tensor expected_num_valid = test::AsScalar<int>(6);
test::ExpectTensorEqual<int>(expected_num_valid, *GetOutput(2));
}
class NonMaxSuppressionWithOverlapsOpTest : public OpsTestBase {
protected:
void MakeOp() {
TF_EXPECT_OK(NodeDefBuilder("non_max_suppression_op",
"NonMaxSuppressionWithOverlaps")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
}
void AddIoUInput(const std::vector<float>& boxes) {
ASSERT_EQ((boxes.size() % 4), 0);
size_t num_boxes = boxes.size() / 4;
std::vector<float> iou_overlaps(num_boxes * num_boxes);
auto corner_access = [&boxes](size_t box_idx, size_t corner_idx) {
return boxes[box_idx * 4 + corner_idx];
};
for (size_t i = 0; i < num_boxes; ++i) {
for (size_t j = 0; j < num_boxes; ++j) {
const float ymin_i =
std::min<float>(corner_access(i, 0), corner_access(i, 2));
const float xmin_i =
std::min<float>(corner_access(i, 1), corner_access(i, 3));
const float ymax_i =
std::max<float>(corner_access(i, 0), corner_access(i, 2));
const float xmax_i =
std::max<float>(corner_access(i, 1), corner_access(i, 3));
const float ymin_j =
std::min<float>(corner_access(j, 0), corner_access(j, 2));
const float xmin_j =
std::min<float>(corner_access(j, 1), corner_access(j, 3));
const float ymax_j =
std::max<float>(corner_access(j, 0), corner_access(j, 2));
const float xmax_j =
std::max<float>(corner_access(j, 1), corner_access(j, 3));
const float area_i = (ymax_i - ymin_i) * (xmax_i - xmin_i);
const float area_j = (ymax_j - ymin_j) * (xmax_j - xmin_j);
float iou;
if (area_i <= 0 || area_j <= 0) {
iou = 0.0;
} else {
const float intersection_ymin = std::max<float>(ymin_i, ymin_j);
const float intersection_xmin = std::max<float>(xmin_i, xmin_j);
const float intersection_ymax = std::min<float>(ymax_i, ymax_j);
const float intersection_xmax = std::min<float>(xmax_i, xmax_j);
const float intersection_area =
std::max<float>(intersection_ymax - intersection_ymin, 0.0) *
std::max<float>(intersection_xmax - intersection_xmin, 0.0);
iou = intersection_area / (area_i + area_j - intersection_area);
}
iou_overlaps[i * num_boxes + j] = iou;
}
}
AddInputFromArray<float>(TensorShape({static_cast<signed>(num_boxes),
static_cast<signed>(num_boxes)}),
iou_overlaps);
}
};
TEST_F(NonMaxSuppressionWithOverlapsOpTest, TestSelectFromThreeClusters) {
MakeOp();
AddIoUInput({0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
AddInputFromArray<float>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f});
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<float>(TensorShape({}), {.5f});
AddInputFromArray<float>(TensorShape({}), {0.0f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({3}));
test::FillValues<int>(&expected, {3, 0, 5});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
TEST_F(NonMaxSuppressionWithOverlapsOpTest,
TestSelectFromThreeClustersFlippedCoordinates) {
MakeOp();
AddIoUInput({1, 1, 0, 0, 0, 0.1f, 1, 1.1f, 0, .9f, 1, -0.1f,
0, 10, 1, 11, 1, 10.1f, 0, 11.1f, 1, 101, 0, 100});
AddInputFromArray<float>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f});
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<float>(TensorShape({}), {.5f});
AddInputFromArray<float>(TensorShape({}), {0.0f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({3}));
test::FillValues<int>(&expected, {3, 0, 5});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
TEST_F(NonMaxSuppressionWithOverlapsOpTest,
TestSelectAtMostTwoBoxesFromThreeClusters) {
MakeOp();
AddIoUInput({0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
AddInputFromArray<float>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f});
AddInputFromArray<int>(TensorShape({}), {2});
AddInputFromArray<float>(TensorShape({}), {.5f});
AddInputFromArray<float>(TensorShape({}), {0.0f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({2}));
test::FillValues<int>(&expected, {3, 0});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
TEST_F(NonMaxSuppressionWithOverlapsOpTest,
TestSelectAtMostThirtyBoxesFromThreeClusters) {
MakeOp();
AddIoUInput({0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
AddInputFromArray<float>(TensorShape({6}), {.9f, .75f, .6f, .95f, .5f, .3f});
AddInputFromArray<int>(TensorShape({}), {30});
AddInputFromArray<float>(TensorShape({}), {.5f});
AddInputFromArray<float>(TensorShape({}), {0.0f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({3}));
test::FillValues<int>(&expected, {3, 0, 5});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
TEST_F(NonMaxSuppressionWithOverlapsOpTest, TestSelectSingleBox) {
MakeOp();
AddIoUInput({0, 0, 1, 1});
AddInputFromArray<float>(TensorShape({1}), {.9f});
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<float>(TensorShape({}), {.5f});
AddInputFromArray<float>(TensorShape({}), {0.0f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({1}));
test::FillValues<int>(&expected, {0});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
TEST_F(NonMaxSuppressionWithOverlapsOpTest, TestSelectFromTenIdenticalBoxes) {
MakeOp();
int num_boxes = 10;
std::vector<float> corners(num_boxes * 4);
std::vector<float> scores(num_boxes);
for (int i = 0; i < num_boxes; ++i) {
corners[i * 4 + 0] = 0;
corners[i * 4 + 1] = 0;
corners[i * 4 + 2] = 1;
corners[i * 4 + 3] = 1;
scores[i] = .9;
}
AddIoUInput(corners);
AddInputFromArray<float>(TensorShape({num_boxes}), scores);
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<float>(TensorShape({}), {.5f});
AddInputFromArray<float>(TensorShape({}), {0.0f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({1}));
test::FillValues<int>(&expected, {0});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
TEST_F(NonMaxSuppressionWithOverlapsOpTest, TestInconsistentBoxAndScoreShapes) {
MakeOp();
AddIoUInput({0, 0, 1, 1, 0, 0.1f, 1, 1.1f, 0, -0.1f, 1, 0.9f,
0, 10, 1, 11, 0, 10.1f, 1, 11.1f, 0, 100, 1, 101});
AddInputFromArray<float>(TensorShape({5}), {.9f, .75f, .6f, .95f, .5f});
AddInputFromArray<int>(TensorShape({}), {30});
AddInputFromArray<float>(TensorShape({}), {.5f});
AddInputFromArray<float>(TensorShape({}), {0.0f});
Status s = RunOpKernel();
ASSERT_FALSE(s.ok());
EXPECT_TRUE(absl::StrContains(s.ToString(), "scores has incompatible shape"))
<< s;
}
TEST_F(NonMaxSuppressionWithOverlapsOpTest, TestInvalidOverlapsShape) {
MakeOp();
AddInputFromArray<float>(TensorShape({2, 3}), {0, 0, 0, 0, 0, 0});
AddInputFromArray<float>(TensorShape({2}), {0.5f, 0.5f});
AddInputFromArray<int>(TensorShape({}), {30});
AddInputFromArray<float>(TensorShape({}), {0.f});
AddInputFromArray<float>(TensorShape({}), {0.0f});
Status s = RunOpKernel();
ASSERT_FALSE(s.ok());
EXPECT_TRUE(absl::StrContains(s.ToString(), "overlaps must be square")) << s;
}
TEST_F(NonMaxSuppressionWithOverlapsOpTest, TestThresholdGreaterOne) {
MakeOp();
AddIoUInput({0, 0, 1, 1});
AddInputFromArray<float>(TensorShape({1}), {.9f});
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<float>(TensorShape({}), {1.2f});
AddInputFromArray<float>(TensorShape({}), {0.0f});
TF_ASSERT_OK(RunOpKernel());
}
TEST_F(NonMaxSuppressionWithOverlapsOpTest, TestThresholdSmallerZero) {
MakeOp();
AddIoUInput({0, 0, 1, 1});
AddInputFromArray<float>(TensorShape({1}), {.9f});
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<float>(TensorShape({}), {-0.2f});
AddInputFromArray<float>(TensorShape({}), {0.0f});
TF_ASSERT_OK(RunOpKernel());
}
TEST_F(NonMaxSuppressionWithOverlapsOpTest, TestEmptyInput) {
MakeOp();
AddIoUInput({});
AddInputFromArray<float>(TensorShape({0}), {});
AddInputFromArray<int>(TensorShape({}), {30});
AddInputFromArray<float>(TensorShape({}), {.5f});
AddInputFromArray<float>(TensorShape({}), {0.0f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({0}));
test::FillValues<int>(&expected, {});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
class CombinedNonMaxSuppressionOpTest : public OpsTestBase {
protected:
void MakeOp(bool pad_per_class = false, bool clip_boxes = true) {
TF_EXPECT_OK(NodeDefBuilder("combined_non_max_suppression_op",
"CombinedNonMaxSuppression")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("pad_per_class", pad_per_class)
.Attr("clip_boxes", clip_boxes)
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
}
};
TEST_F(CombinedNonMaxSuppressionOpTest, TestEmptyInput) {
MakeOp();
AddInputFromArray<float>(TensorShape({0, 0, 0, 4}), {});
AddInputFromArray<float>(TensorShape({0, 0, 0}), {});
AddInputFromArray<int>(TensorShape({}), {30});
AddInputFromArray<int>(TensorShape({}), {10});
AddInputFromArray<float>(TensorShape({}), {.5f});
AddInputFromArray<float>(TensorShape({}), {0.0f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected_boxes(allocator(), DT_FLOAT, TensorShape({0, 10, 4}));
test::FillValues<float>(&expected_boxes, {});
test::ExpectTensorEqual<float>(expected_boxes, *GetOutput(0));
Tensor expected_scores(allocator(), DT_FLOAT, TensorShape({0, 10}));
test::FillValues<float>(&expected_scores, {});
test::ExpectTensorEqual<float>(expected_scores, *GetOutput(1));
Tensor expected_classes(allocator(), DT_FLOAT, TensorShape({0, 10}));
test::FillValues<float>(&expected_classes, {});
test::ExpectTensorEqual<float>(expected_classes, *GetOutput(2));
Tensor expected_valid_d(allocator(), DT_INT32, TensorShape({0}));
test::FillValues<int>(&expected_valid_d, {});
test::ExpectTensorEqual<int>(expected_valid_d, *GetOutput(3));
}
TEST_F(CombinedNonMaxSuppressionOpTest, TestSelectFromThreeClusters) {
MakeOp();
AddInputFromArray<float>(
TensorShape({1, 6, 1, 4}),
{0, 0, 0.1, 0.1, 0, 0.01f, 0.1, 0.11f, 0, -0.01, 0.1, 0.09f,
0, 0.11, 0.1, 0.2, 0, 0.12f, 0.1, 0.21f, 0, 0.3, 1, 0.4});
AddInputFromArray<float>(TensorShape({1, 6, 1}),
{.9f, .75f, .6f, .95f, .5f, .3f});
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<float>(TensorShape({}), {.5f});
AddInputFromArray<float>(TensorShape({}), {0.0f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected_boxes(allocator(), DT_FLOAT, TensorShape({1, 3, 4}));
test::FillValues<float>(&expected_boxes,
{0, 0.11, 0.1, 0.2, 0, 0, 0.1, 0.1, 0, 0.3, 1, 0.4});
test::ExpectTensorEqual<float>(expected_boxes, *GetOutput(0));
Tensor expected_scores(allocator(), DT_FLOAT, TensorShape({1, 3}));
test::FillValues<float>(&expected_scores, {0.95, 0.9, 0.3});
test::ExpectTensorEqual<float>(expected_scores, *GetOutput(1));
Tensor expected_classes(allocator(), DT_FLOAT, TensorShape({1, 3}));
test::FillValues<float>(&expected_classes, {0, 0, 0});
test::ExpectTensorEqual<float>(expected_classes, *GetOutput(2));
Tensor expected_valid_d(allocator(), DT_INT32, TensorShape({1}));
test::FillValues<int>(&expected_valid_d, {3});
test::ExpectTensorEqual<int>(expected_valid_d, *GetOutput(3));
}
TEST_F(CombinedNonMaxSuppressionOpTest,
TestSelectFromThreeClustersNoBoxClipping) {
MakeOp(false, false);
AddInputFromArray<float>(TensorShape({1, 6, 1, 4}),
{0, 0, 10, 10, 0, 1, 10, 11, 0, 1, 10, 9,
0, 11, 10, 20, 0, 12, 10, 21, 0, 30, 100, 40});
AddInputFromArray<float>(TensorShape({1, 6, 1}),
{.9f, .75f, .6f, .95f, .5f, .3f});
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<float>(TensorShape({}), {.5f});
AddInputFromArray<float>(TensorShape({}), {0.0f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected_boxes(allocator(), DT_FLOAT, TensorShape({1, 3, 4}));
test::FillValues<float>(&expected_boxes,
{0, 11, 10, 20, 0, 0, 10, 10, 0, 30, 100, 40});
test::ExpectTensorEqual<float>(expected_boxes, *GetOutput(0));
Tensor expected_scores(allocator(), DT_FLOAT, TensorShape({1, 3}));
test::FillValues<float>(&expected_scores, {0.95, 0.9, 0.3});
test::ExpectTensorEqual<float>(expected_scores, *GetOutput(1));
Tensor expected_classes(allocator(), DT_FLOAT, TensorShape({1, 3}));
test::FillValues<float>(&expected_classes, {0, 0, 0});
test::ExpectTensorEqual<float>(expected_classes, *GetOutput(2));
Tensor expected_valid_d(allocator(), DT_INT32, TensorShape({1}));
test::FillValues<int>(&expected_valid_d, {3});
test::ExpectTensorEqual<int>(expected_valid_d, *GetOutput(3));
}
TEST_F(CombinedNonMaxSuppressionOpTest,
TestSelectFromThreeClustersWithScoreThreshold) {
MakeOp();
AddInputFromArray<float>(
TensorShape({1, 6, 1, 4}),
{0, 0, 0.1, 0.1, 0, 0.01f, 0.1, 0.11f, 0, -0.01, 0.1, 0.09f,
0, 0.11, 0.1, 0.2, 0, 0.12f, 0.1, 0.21f, 0, 0.3, 1, 0.4});
AddInputFromArray<float>(TensorShape({1, 6, 1}),
{.9f, .75f, .6f, .95f, .5f, .3f});
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<float>(TensorShape({}), {.5f});
AddInputFromArray<float>(TensorShape({}), {0.4f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected_boxes(allocator(), DT_FLOAT, TensorShape({1, 3, 4}));
test::FillValues<float>(&expected_boxes,
{0, 0.11, 0.1, 0.2, 0, 0, 0.1, 0.1, 0, 0, 0, 0});
test::ExpectTensorEqual<float>(expected_boxes, *GetOutput(0));
Tensor expected_scores(allocator(), DT_FLOAT, TensorShape({1, 3}));
test::FillValues<float>(&expected_scores, {0.95, 0.9, 0});
test::ExpectTensorEqual<float>(expected_scores, *GetOutput(1));
Tensor expected_classes(allocator(), DT_FLOAT, TensorShape({1, 3}));
test::FillValues<float>(&expected_classes, {0, 0, 0});
test::ExpectTensorEqual<float>(expected_classes, *GetOutput(2));
Tensor expected_valid_d(allocator(), DT_INT32, TensorShape({1}));
test::FillValues<int>(&expected_valid_d, {2});
test::ExpectTensorEqual<int>(expected_valid_d, *GetOutput(3));
}
TEST_F(CombinedNonMaxSuppressionOpTest,
TestSelectFromThreeClustersWithScoreThresholdZeroScores) {
MakeOp();
AddInputFromArray<float>(
TensorShape({1, 6, 1, 4}),
{0, 0, 0.1, 0.1, 0, 0.01f, 0.1, 0.11f, 0, -0.01, 0.1, 0.09f,
0, 0.11, 0.1, 0.2, 0, 0.12f, 0.1, 0.21f, 0, 0.3, 1, 0.4});
AddInputFromArray<float>(TensorShape({1, 6, 1}),
{.1f, 0, 0, .3f, .2f, -5.0f});
AddInputFromArray<int>(TensorShape({}), {4});
AddInputFromArray<int>(TensorShape({}), {5});
AddInputFromArray<float>(TensorShape({}), {.5f});
AddInputFromArray<float>(TensorShape({}), {-3.0f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected_boxes(allocator(), DT_FLOAT, TensorShape({1, 5, 4}));
test::FillValues<float>(
&expected_boxes,
{
0, 0.11, 0.1, 0.2, 0, 0, 0.1, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
});
test::ExpectTensorEqual<float>(expected_boxes, *GetOutput(0));
Tensor expected_scores(allocator(), DT_FLOAT, TensorShape({1, 5}));
test::FillValues<float>(&expected_scores, {0.3, 0.1, 0, 0, 0});
test::ExpectTensorEqual<float>(expected_scores, *GetOutput(1));
Tensor expected_classes(allocator(), DT_FLOAT, TensorShape({1, 5}));
test::FillValues<float>(&expected_classes, {0, 0, 0, 0, 0});
test::ExpectTensorEqual<float>(expected_classes, *GetOutput(2));
Tensor expected_valid_d(allocator(), DT_INT32, TensorShape({1}));
test::FillValues<int>(&expected_valid_d, {2});
test::ExpectTensorEqual<int>(expected_valid_d, *GetOutput(3));
}
TEST_F(CombinedNonMaxSuppressionOpTest, TestSelectSingleBox) {
MakeOp();
AddInputFromArray<float>(TensorShape({1, 1, 1, 4}), {0, 0, 1, 1});
AddInputFromArray<float>(TensorShape({1, 1, 1}), {.9f});
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<int>(TensorShape({}), {1});
AddInputFromArray<float>(TensorShape({}), {.5f});
AddInputFromArray<float>(TensorShape({}), {0.0f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected_boxes(allocator(), DT_FLOAT, TensorShape({1, 1, 4}));
test::FillValues<float>(&expected_boxes, {0, 0, 1, 1});
test::ExpectTensorEqual<float>(expected_boxes, *GetOutput(0));
Tensor expected_scores(allocator(), DT_FLOAT, TensorShape({1, 1}));
test::FillValues<float>(&expected_scores, {0.9});
test::ExpectTensorEqual<float>(expected_scores, *GetOutput(1));
Tensor expected_classes(allocator(), DT_FLOAT, TensorShape({1, 1}));
test::FillValues<float>(&expected_classes, {0});
test::ExpectTensorEqual<float>(expected_classes, *GetOutput(2));
Tensor expected_valid_d(allocator(), DT_INT32, TensorShape({1}));
test::FillValues<int>(&expected_valid_d, {1});
test::ExpectTensorEqual<int>(expected_valid_d, *GetOutput(3));
}
TEST_F(CombinedNonMaxSuppressionOpTest,
TestSelectFromTwoBatchesWithScoreThreshold) {
MakeOp();
AddInputFromArray<float>(
TensorShape({2, 6, 1, 4}),
{0, 0, 0.1, 0.1, 0, 0.01f, 0.1, 0.11f, 0, -0.01, 0.1, 0.09f,
0, 0.11, 0.1, 0.2, 0, 0.12f, 0.1, 0.21f, 0, 0.3, 1, 0.4,
0, 0, 0.2, 0.2, 0, 0.02f, 0.2, 0.22f, 0, -0.02, 0.2, 0.19f,
0, 0.21, 0.2, 0.3, 0, 0.22f, 0.2, 0.31f, 0, 0.4, 1, 0.5});
AddInputFromArray<float>(
TensorShape({2, 6, 1}),
{.9f, .75f, .6f, .95f, .5f, .3f, .9f, .75f, .6f, .95f, .5f, .3f});
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<float>(TensorShape({}), {.5f});
AddInputFromArray<float>(TensorShape({}), {0.4f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected_boxes(allocator(), DT_FLOAT, TensorShape({2, 3, 4}));
test::FillValues<float>(&expected_boxes,
{0, 0.11, 0.1, 0.2, 0, 0, 0.1, 0.1, 0, 0, 0, 0,
0, 0.21, 0.2, 0.3, 0, 0, 0.2, 0.2, 0, 0, 0, 0});
test::ExpectTensorEqual<float>(expected_boxes, *GetOutput(0));
Tensor expected_scores(allocator(), DT_FLOAT, TensorShape({2, 3}));
test::FillValues<float>(&expected_scores, {0.95, 0.9, 0, 0.95, 0.9, 0});
test::ExpectTensorEqual<float>(expected_scores, *GetOutput(1));
Tensor expected_classes(allocator(), DT_FLOAT, TensorShape({2, 3}));
test::FillValues<float>(&expected_classes, {0, 0, 0, 0, 0, 0});
test::ExpectTensorEqual<float>(expected_classes, *GetOutput(2));
Tensor expected_valid_d(allocator(), DT_INT32, TensorShape({2}));
test::FillValues<int>(&expected_valid_d, {2, 2});
test::ExpectTensorEqual<int>(expected_valid_d, *GetOutput(3));
}
TEST_F(CombinedNonMaxSuppressionOpTest, TestSelectFromTwoBatchesTwoClasses) {
MakeOp();
AddInputFromArray<float>(
TensorShape({2, 6, 1, 4}),
{0, 0, 0.1, 0.1, 0, 0.01f, 0.1, 0.11f, 0, -0.01, 0.1, 0.09f,
0, 0.11, 0.1, 0.2, 0, 0.12f, 0.1, 0.21f, 0, 0.3, 1, 0.4,
0, 0, 0.2, 0.2, 0, 0.02f, 0.2, 0.22f, 0, -0.02, 0.2, 0.19f,
0, 0.21, 0.2, 0.3, 0, 0.22f, 0.2, 0.31f, 0, 0.4, 1, 0.5});
AddInputFromArray<float>(TensorShape({2, 6, 2}),
{0.1f, 0.9f, 0.75f, 0.8f, 0.6f, 0.3f, 0.95f, 0.1f,
0.5f, 0.5f, 0.3f, 0.1f, 0.1f, 0.9f, 0.75f, 0.8f,
0.6f, 0.3f, 0.95f, 0.1f, 0.5f, 0.5f, 0.3f, 0.1f});
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<float>(TensorShape({}), {.5f});
AddInputFromArray<float>(TensorShape({}), {0.0f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected_boxes(allocator(), DT_FLOAT, TensorShape({2, 3, 4}));
test::FillValues<float>(
&expected_boxes,
{0, 0.11, 0.1, 0.2, 0, 0, 0.1, 0.1, 0, 0.01f, 0.1, 0.11f,
0, 0.21, 0.2, 0.3, 0, 0, 0.2, 0.2, 0, 0.02f, 0.2, 0.22f});
test::ExpectTensorEqual<float>(expected_boxes, *GetOutput(0));
Tensor expected_scores(allocator(), DT_FLOAT, TensorShape({2, 3}));
test::FillValues<float>(&expected_scores, {0.95, 0.9, 0.75, 0.95, 0.9, 0.75});
test::ExpectTensorEqual<float>(expected_scores, *GetOutput(1));
Tensor expected_classes(allocator(), DT_FLOAT, TensorShape({2, 3}));
test::FillValues<float>(&expected_classes, {0, 1, 0, 0, 1, 0});
test::ExpectTensorEqual<float>(expected_classes, *GetOutput(2));
Tensor expected_valid_d(allocator(), DT_INT32, TensorShape({2}));
test::FillValues<int>(&expected_valid_d, {3, 3});
test::ExpectTensorEqual<int>(expected_valid_d, *GetOutput(3));
}
TEST_F(CombinedNonMaxSuppressionOpTest,
TestSelectFromTwoBatchesTwoClassesWithScoreThreshold) {
MakeOp();
AddInputFromArray<float>(
TensorShape({2, 6, 1, 4}),
{0, 0, 0.1, 0.1, 0, 0.01f, 0.1, 0.11f, 0, -0.01, 0.1, 0.09f,
0, 0.11, 0.1, 0.2, 0, 0.12f, 0.1, 0.21f, 0, 0.3, 1, 0.4,
0, 0, 0.2, 0.2, 0, 0.02f, 0.2, 0.22f, 0, -0.02, 0.2, 0.19f,
0, 0.21, 0.2, 0.3, 0, 0.22f, 0.2, 0.31f, 0, 0.4, 1, 0.5});
AddInputFromArray<float>(TensorShape({2, 6, 2}),
{0.1f, 0.9f, 0.75f, 0.8f, 0.6f, 0.3f, 0.95f, 0.1f,
0.5f, 0.5f, 0.3f, 0.1f, 0.1f, 0.9f, 0.75f, 0.8f,
0.6f, 0.3f, 0.95f, 0.1f, 0.5f, 0.5f, 0.3f, 0.1f});
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<float>(TensorShape({}), {.5f});
AddInputFromArray<float>(TensorShape({}), {0.8f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected_boxes(allocator(), DT_FLOAT, TensorShape({2, 3, 4}));
test::FillValues<float>(&expected_boxes,
{0, 0.11, 0.1, 0.2, 0, 0, 0.1, 0.1, 0, 0, 0, 0,
0, 0.21, 0.2, 0.3, 0, 0, 0.2, 0.2, 0, 0, 0, 0});
test::ExpectTensorEqual<float>(expected_boxes, *GetOutput(0));
Tensor expected_scores(allocator(), DT_FLOAT, TensorShape({2, 3}));
test::FillValues<float>(&expected_scores, {0.95, 0.9, 0, 0.95, 0.9, 0});
test::ExpectTensorEqual<float>(expected_scores, *GetOutput(1));
Tensor expected_classes(allocator(), DT_FLOAT, TensorShape({2, 3}));
test::FillValues<float>(&expected_classes, {0, 1, 0, 0, 1, 0});
test::ExpectTensorEqual<float>(expected_classes, *GetOutput(2));
Tensor expected_valid_d(allocator(), DT_INT32, TensorShape({2}));
test::FillValues<int>(&expected_valid_d, {2, 2});
test::ExpectTensorEqual<int>(expected_valid_d, *GetOutput(3));
}
TEST_F(CombinedNonMaxSuppressionOpTest,
TestSelectFromTwoBatchesTwoClassesWithScoreThresholdPaddedTotalSize) {
MakeOp(true);
AddInputFromArray<float>(
TensorShape({2, 6, 1, 4}),
{0, 0, 0.1, 0.1, 0, 0.01f, 0.1, 0.11f, 0, -0.01, 0.1, 0.09f,
0, 0.11, 0.1, 0.2, 0, 0.12f, 0.1, 0.21f, 0, 0.3, 1, 0.4,
0, 0, 0.2, 0.2, 0, 0.02f, 0.2, 0.22f, 0, -0.02, 0.2, 0.19f,
0, 0.21, 0.2, 0.3, 0, 0.22f, 0.2, 0.31f, 0, 0.4, 1, 0.5});
AddInputFromArray<float>(TensorShape({2, 6, 2}),
{0.1f, 0.9f, 0.75f, 0.8f, 0.6f, 0.3f, 0.95f, 0.1f,
0.5f, 0.5f, 0.3f, 0.1f, 0.1f, 0.9f, 0.75f, 0.8f,
0.6f, 0.3f, 0.95f, 0.1f, 0.5f, 0.5f, 0.3f, 0.1f});
AddInputFromArray<int>(TensorShape({}), {10});
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<float>(TensorShape({}), {.5f});
AddInputFromArray<float>(TensorShape({}), {0.8f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected_boxes(allocator(), DT_FLOAT, TensorShape({2, 3, 4}));
test::FillValues<float>(&expected_boxes,
{0, 0.11, 0.1, 0.2, 0, 0, 0.1, 0.1, 0, 0, 0, 0,
0, 0.21, 0.2, 0.3, 0, 0, 0.2, 0.2, 0, 0, 0, 0});
test::ExpectTensorEqual<float>(expected_boxes, *GetOutput(0));
Tensor expected_scores(allocator(), DT_FLOAT, TensorShape({2, 3}));
test::FillValues<float>(&expected_scores, {0.95, 0.9, 0, 0.95, 0.9, 0});
test::ExpectTensorEqual<float>(expected_scores, *GetOutput(1));
Tensor expected_classes(allocator(), DT_FLOAT, TensorShape({2, 3}));
test::FillValues<float>(&expected_classes, {0, 1, 0, 0, 1, 0});
test::ExpectTensorEqual<float>(expected_classes, *GetOutput(2));
Tensor expected_valid_d(allocator(), DT_INT32, TensorShape({2}));
test::FillValues<int>(&expected_valid_d, {2, 2});
test::ExpectTensorEqual<int>(expected_valid_d, *GetOutput(3));
}
TEST_F(CombinedNonMaxSuppressionOpTest,
TestSelectFromTwoBatchesTwoClassesWithScoreThresholdPaddedPerClass) {
MakeOp(true);
AddInputFromArray<float>(
TensorShape({2, 6, 1, 4}),
{0, 0, 0.1, 0.1, 0, 0.01f, 0.1, 0.11f, 0, -0.01, 0.1, 0.09f,
0, 0.11, 0.1, 0.2, 0, 0.12f, 0.1, 0.21f, 0, 0.3, 1, 0.4,
0, 0, 0.2, 0.2, 0, 0.02f, 0.2, 0.22f, 0, -0.02, 0.2, 0.19f,
0, 0.21, 0.2, 0.3, 0, 0.22f, 0.2, 0.31f, 0, 0.4, 1, 0.5});
AddInputFromArray<float>(TensorShape({2, 6, 2}),
{0.1f, 0.9f, 0.75f, 0.8f, 0.6f, 0.3f, 0.95f, 0.1f,
0.5f, 0.5f, 0.3f, 0.1f, 0.1f, 0.9f, 0.75f, 0.8f,
0.6f, 0.3f, 0.95f, 0.1f, 0.5f, 0.5f, 0.3f, 0.1f});
AddInputFromArray<int>(TensorShape({}), {2});
AddInputFromArray<int>(TensorShape({}), {50});
AddInputFromArray<float>(TensorShape({}), {.5f});
AddInputFromArray<float>(TensorShape({}), {0.8f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected_boxes(allocator(), DT_FLOAT, TensorShape({2, 4, 4}));
test::FillValues<float>(
&expected_boxes,
{0, 0.11, 0.1, 0.2, 0, 0, 0.1, 0.1, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0.21, 0.2, 0.3, 0, 0, 0.2, 0.2, 0, 0, 0, 0, 0, 0, 0, 0});
test::ExpectTensorEqual<float>(expected_boxes, *GetOutput(0));
Tensor expected_scores(allocator(), DT_FLOAT, TensorShape({2, 4}));
test::FillValues<float>(&expected_scores, {0.95, 0.9, 0, 0, 0.95, 0.9, 0, 0});
test::ExpectTensorEqual<float>(expected_scores, *GetOutput(1));
Tensor expected_classes(allocator(), DT_FLOAT, TensorShape({2, 4}));
test::FillValues<float>(&expected_classes, {0, 1, 0, 0, 0, 1, 0, 0});
test::ExpectTensorEqual<float>(expected_classes, *GetOutput(2));
Tensor expected_valid_d(allocator(), DT_INT32, TensorShape({2}));
test::FillValues<int>(&expected_valid_d, {2, 2});
test::ExpectTensorEqual<int>(expected_valid_d, *GetOutput(3));
}
TEST_F(CombinedNonMaxSuppressionOpTest,
TestSelectFromTwoBatchesTwoClassesTotalSize) {
MakeOp();
AddInputFromArray<float>(
TensorShape({2, 6, 1, 4}),
{0, 0, 0.1, 0.1, 0, 0.01f, 0.1, 0.11f, 0, -0.01, 0.1, 0.09f,
0, 0.11, 0.1, 0.2, 0, 0.12f, 0.1, 0.21f, 0, 0.3, 1, 0.4,
0, 0, 0.2, 0.2, 0, 0.02f, 0.2, 0.22f, 0, -0.02, 0.2, 0.19f,
0, 0.21, 0.2, 0.3, 0, 0.22f, 0.2, 0.31f, 0, 0.4, 1, 0.5});
AddInputFromArray<float>(TensorShape({2, 6, 2}),
{0.1f, 0.9f, 0.75f, 0.8f, 0.6f, 0.3f, 0.95f, 0.1f,
0.5f, 0.5f, 0.3f, 0.1f, 0.1f, 0.9f, 0.75f, 0.8f,
0.6f, 0.3f, 0.95f, 0.1f, 0.5f, 0.5f, 0.3f, 0.1f});
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<int>(TensorShape({}), {5});
AddInputFromArray<float>(TensorShape({}), {.5f});
AddInputFromArray<float>(TensorShape({}), {0.1f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected_boxes(allocator(), DT_FLOAT, TensorShape({2, 5, 4}));
test::FillValues<float>(
&expected_boxes, {0, 0.11, 0.1, 0.2, 0, 0, 0.1, 0.1, 0, 0.01f,
0.1, 0.11f, 0, 0.12f, 0.1, 0.21f, 0, 0.3, 1, 0.4,
0, 0.21, 0.2, 0.3, 0, 0, 0.2, 0.2, 0, 0.02f,
0.2, 0.22f, 0, 0.22f, 0.2, 0.31f, 0, 0.4, 1, 0.5});
test::ExpectTensorEqual<float>(expected_boxes, *GetOutput(0));
Tensor expected_scores(allocator(), DT_FLOAT, TensorShape({2, 5}));
test::FillValues<float>(
&expected_scores, {0.95, 0.9, 0.75, 0.5, 0.3, 0.95, 0.9, 0.75, 0.5, 0.3});
test::ExpectTensorEqual<float>(expected_scores, *GetOutput(1));
Tensor expected_classes(allocator(), DT_FLOAT, TensorShape({2, 5}));
test::FillValues<float>(&expected_classes, {0, 1, 0, 1, 0, 0, 1, 0, 1, 0});
test::ExpectTensorEqual<float>(expected_classes, *GetOutput(2));
Tensor expected_valid_d(allocator(), DT_INT32, TensorShape({2}));
test::FillValues<int>(&expected_valid_d, {5, 5});
test::ExpectTensorEqual<int>(expected_valid_d, *GetOutput(3));
}
TEST_F(CombinedNonMaxSuppressionOpTest,
TestSelectFromTwoBatchesTwoClassesForBoxesAndScores) {
MakeOp();
AddInputFromArray<float>(
TensorShape({2, 6, 2, 4}),
{0, 0, 0.1, 0.1, 0, 0, 0.1, 0.1, 0, 0.01f, 0.1, 0.11f, 0, 0.6f, 0.1, 0.7f,
0, -0.01, 0.1, 0.09f, 0, -0.01, 0.1, 0.09f, 0, 0.11, 0.1, 0.2, 0, 0.11,
0.1, 0.2, 0, 0.12f, 0.1, 0.21f, 0, 0.12f, 0.1, 0.21f, 0, 0.3, 1, 0.4, 0,
0.3, 1, 0.4,
0, 0, 0.2, 0.2, 0, 0, 0.2, 0.2, 0, 0.02f, 0.2, 0.22f, 0, 0.02f, 0.2,
0.22f, 0, -0.02, 0.2, 0.19f, 0, -0.02, 0.2, 0.19f, 0, 0.21, 0.2, 0.3, 0,
0.21, 0.2, 0.3, 0, 0.22f, 0.2, 0.31f, 0, 0.22f, 0.2, 0.31f, 0, 0.4, 1,
0.5, 0, 0.4, 1, 0.5});
AddInputFromArray<float>(TensorShape({2, 6, 2}),
{0.1f, 0.9f, 0.75f, 0.8f, 0.6f, 0.3f, 0.95f, 0.1f,
0.5f, 0.5f, 0.3f, 0.1f, 0.1f, 0.9f, 0.75f, 0.8f,
0.6f, 0.3f, 0.95f, 0.1f, 0.5f, 0.5f, 0.3f, 0.1f});
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<int>(TensorShape({}), {3});
AddInputFromArray<float>(TensorShape({}), {.5f});
AddInputFromArray<float>(TensorShape({}), {0.0f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected_boxes(allocator(), DT_FLOAT, TensorShape({2, 3, 4}));
test::FillValues<float>(
&expected_boxes,
{0, 0.11, 0.1, 0.2, 0, 0, 0.1, 0.1, 0, 0.6f, 0.1, 0.7f,
0, 0.21, 0.2, 0.3, 0, 0, 0.2, 0.2, 0, 0.02f, 0.2, 0.22f});
test::ExpectTensorEqual<float>(expected_boxes, *GetOutput(0));
Tensor expected_scores(allocator(), DT_FLOAT, TensorShape({2, 3}));
test::FillValues<float>(&expected_scores, {0.95, 0.9, 0.8, 0.95, 0.9, 0.75});
test::ExpectTensorEqual<float>(expected_scores, *GetOutput(1));
Tensor expected_classes(allocator(), DT_FLOAT, TensorShape({2, 3}));
test::FillValues<float>(&expected_classes, {0, 1, 1, 0, 1, 0});
test::ExpectTensorEqual<float>(expected_classes, *GetOutput(2));
Tensor expected_valid_d(allocator(), DT_INT32, TensorShape({2}));
test::FillValues<int>(&expected_valid_d, {3, 3});
test::ExpectTensorEqual<int>(expected_valid_d, *GetOutput(3));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/image/non_max_suppression_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/image/non_max_suppression_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c3e707eb-aa99-42f0-8beb-c5644b98862f | cpp | tensorflow/tensorflow | device_propagation | tensorflow/core/common_runtime/device_propagation.cc | tensorflow/core/common_runtime/device_propagation_test.cc | #include "tensorflow/core/common_runtime/device_propagation.h"
#include <string>
#include <utility>
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph.h"
namespace tensorflow {
namespace {
const std::string& AssignedOrRequestedDevice(const Node& node) {
if (!node.assigned_device_name().empty()) {
return node.assigned_device_name();
}
return node.requested_device();
}
bool UpdateDeviceFromInputs(
const device_propagation::NodeFilter& node_filter,
const device_propagation::DeviceFilter& device_filter, Node* node) {
if (!AssignedOrRequestedDevice(*node).empty() || !node_filter(*node)) {
return false;
}
string proposed_device = "";
Node* proposed_src = nullptr;
for (const Edge* e : node->in_edges()) {
if (e->IsControlEdge()) {
continue;
}
Node* src = e->src();
const string& src_device = AssignedOrRequestedDevice(*src);
if ((node->IsSwitch() && src->IsLoopCond()) ||
(node->IsMerge() && src->IsEnter())) {
continue;
}
if (!device_filter(src_device)) return false;
if (proposed_src == nullptr) {
proposed_device = src_device;
proposed_src = src;
} else if (proposed_device != src_device) {
return false;
}
}
if (proposed_src) {
node->set_assigned_device_name(proposed_src->assigned_device_name());
node->set_requested_device(proposed_src->requested_device());
return true;
} else {
return false;
}
}
}
void PropagateDevices(const device_propagation::NodeFilter& node_filter,
const device_propagation::DeviceFilter& device_filter,
Graph* graph) {
bool nodes_changed = true;
while (nodes_changed) {
nodes_changed = false;
BreadthFirstTraversal(
*graph, {}, [&nodes_changed, &node_filter, &device_filter](Node* node) {
nodes_changed |=
UpdateDeviceFromInputs(node_filter, device_filter, node);
});
}
}
} | #include "tensorflow/core/common_runtime/device_propagation.h"
#include <string>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/match.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/control_flow_ops.h"
#include "tensorflow/cc/ops/control_flow_ops_internal.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/lib/core/status_test_util.h"
using ::testing::UnorderedElementsAreArray;
namespace tensorflow {
namespace {
const char kTpu0[] = "/job:localhost/replica:0/task:0/device:TPU:0";
const char kTpu1[] = "/job:localhost/replica:0/task:0/device:TPU:1";
const char kTpu2[] = "/job:localhost/replica:0/task:0/device:TPU:2";
const char kGpu0[] = "/job:localhost/replica:0/task:0/device:GPU:0";
bool IsTPUDevice(StringPiece device_name) {
return absl::StrContains(device_name, "device:TPU:");
}
device_propagation::NodeFilter TargetOps(
const absl::flat_hash_set<std::string>& ops) {
return [&ops](const Node& n) { return ops.contains(n.type_string()); };
}
absl::flat_hash_map<std::string, std::string> GetNodeNameDevices(
const Graph& graph) {
absl::flat_hash_map<std::string, std::string> node_name_devices;
for (const Node* node : graph.nodes()) {
if (node->IsSource() || node->IsSink()) {
continue;
}
const string& device = node->assigned_device_name().empty()
? node->requested_device()
: node->assigned_device_name();
node_name_devices[node->name()] = device;
}
return node_name_devices;
}
TEST(DevicePropagationTest, PropagateTPUDevices) {
Scope scope = Scope::NewRootScope().ExitOnError();
auto a = ops::Placeholder(scope.WithOpName("A"), DT_FLOAT);
a.node()->set_assigned_device_name(kTpu0);
auto b = ops::Placeholder(scope.WithOpName("B"), DT_FLOAT);
b.node()->set_assigned_device_name(kTpu1);
auto c = ops::Identity(scope.WithOpName("C"), a);
auto d =
ops::Merge(scope.WithOpName("D"), std::initializer_list<Input>{a, c});
auto e =
ops::Merge(scope.WithOpName("E"), std::initializer_list<Input>{b, c});
auto f = ops::Identity(scope.WithOpName("F"), a);
f.node()->set_assigned_device_name(kTpu2);
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(scope.ToGraph(&graph));
PropagateDevices(TargetOps({"Identity", "Merge"}), IsTPUDevice, &graph);
EXPECT_THAT(
GetNodeNameDevices(graph),
UnorderedElementsAreArray(
std::vector<std::pair<std::string, std::string>>{
{"A", kTpu0},
{"B", kTpu1},
{"C", kTpu0},
{"D", kTpu0},
{"E", ""},
{"F", kTpu2},
}));
}
TEST(DevicePropagationTest, DoNotPropagateToUnsupportedOps) {
Scope scope = Scope::NewRootScope().ExitOnError();
auto a = ops::Placeholder(scope.WithOpName("A"), DT_FLOAT);
a.node()->set_assigned_device_name(kTpu0);
auto b = ops::Identity(scope.WithOpName("B"), a);
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(scope.ToGraph(&graph));
PropagateDevices(TargetOps({"Merge"}), IsTPUDevice, &graph);
EXPECT_THAT(GetNodeNameDevices(graph),
UnorderedElementsAreArray(
std::vector<std::pair<std::string, std::string>>{
{"A", kTpu0},
{"B", ""},
}));
}
TEST(DevicePropagationTest, DoNotPropagateUnmatchedDevices) {
Scope scope = Scope::NewRootScope().ExitOnError();
auto a = ops::Placeholder(scope.WithOpName("A"), DT_FLOAT);
a.node()->set_assigned_device_name(kGpu0);
auto b = ops::Identity(scope.WithOpName("B"), a);
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(scope.ToGraph(&graph));
PropagateDevices(TargetOps({"Identity"}), IsTPUDevice, &graph);
EXPECT_THAT(GetNodeNameDevices(graph),
UnorderedElementsAreArray(
std::vector<std::pair<std::string, std::string>>{
{"A", kGpu0},
{"B", ""},
}));
}
TEST(DevicePropagationTest, SwitchOpShouldIgnoreLoopCondOp) {
Scope scope = Scope::NewRootScope().ExitOnError();
auto a = ops::Placeholder(scope.WithOpName("A"), DT_BOOL);
auto b = ops::LoopCond(scope.WithOpName("B"), a);
auto c = ops::Placeholder(scope.WithOpName("C"), DT_FLOAT);
c.node()->set_assigned_device_name(kTpu2);
auto d = ops::Switch(scope.WithOpName("D"), c, b);
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(scope.ToGraph(&graph));
PropagateDevices(TargetOps({"Switch", "LoopCond"}), IsTPUDevice, &graph);
EXPECT_THAT(
GetNodeNameDevices(graph),
UnorderedElementsAreArray(std::vector<
std::pair<std::string, std::string>>{
{"A", ""},
{"B", ""},
{"C", kTpu2},
{"D", kTpu2},
}));
}
TEST(DevicePropagationTest, MergeOpShouldIgnoreEnterOp) {
Scope scope = Scope::NewRootScope().ExitOnError();
auto a = ops::Placeholder(scope.WithOpName("A"), DT_FLOAT);
auto b = ops::Placeholder(scope.WithOpName("B"), DT_FLOAT);
b.node()->set_assigned_device_name(kTpu2);
auto c = ops::internal::Enter(scope.WithOpName("C"), a, "Enter");
auto d = ops::NextIteration(scope.WithOpName("D"), b);
auto e =
ops::Merge(scope.WithOpName("E"), std::initializer_list<Input>{c, d});
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(scope.ToGraph(&graph));
PropagateDevices(TargetOps({"Enter", "Merge", "NextIteration"}), IsTPUDevice,
&graph);
EXPECT_THAT(
GetNodeNameDevices(graph),
UnorderedElementsAreArray(std::vector<
std::pair<std::string, std::string>>{
{"A", ""},
{"B", kTpu2},
{"C", ""},
{"D", kTpu2},
{"E", kTpu2},
}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/device_propagation.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/device_propagation_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7a493795-3ee7-4cb2-a7b7-639d43aba311 | cpp | tensorflow/tensorflow | softmax | tensorflow/compiler/tf2tensorrt/convert/ops/softmax.cc | tensorflow/lite/delegates/xnnpack/softmax_test.cc | #if GOOGLE_CUDA && GOOGLE_TENSORRT
#include "tensorflow/compiler/tf2tensorrt/convert/convert_nodes.h"
#include "tensorflow/compiler/tf2tensorrt/convert/op_converter_registry.h"
#include "tensorflow/compiler/tf2tensorrt/convert/ops/layer_utils.h"
namespace tensorflow {
namespace tensorrt {
namespace convert {
class ConvertSoftmax : public OpConverterBase<ConvertSoftmax> {
public:
explicit ConvertSoftmax(const OpConverterParams *params)
: OpConverterBase<ConvertSoftmax>(params) {}
static constexpr std::array<DataType, 3> AllowedDataTypes() {
return {DataType::DT_FLOAT, DataType::DT_HALF};
}
static constexpr std::array<InputArgSpec, 1> InputSpec() {
return std::array<InputArgSpec, 1>{
InputArgSpec::Create("logits", TrtInputArg::kTensor)};
}
Status Validate() {
const auto ¶ms = *this->params_;
const auto &inputs = params.inputs;
ITensorProxyPtr logits_tensor = inputs.at(0).tensor();
const int num_trt_dims = logits_tensor->getDimensions().nbDims;
if (!num_trt_dims && params.use_implicit_batch) {
return errors::InvalidArgument(
"TensorRT Softmax cannot apply on the batch dimension");
}
return OkStatus();
}
Status Convert() {
const auto ¶ms = *this->params_;
const auto &inputs = params.inputs;
const auto &node_def = params.node_def;
ITensorProxyPtr logits_tensor = inputs.at(0).tensor();
const int num_trt_dims = logits_tensor->getDimensions().nbDims;
nvinfer1::ISoftMaxLayer *layer =
params.converter->network()->addSoftMax(*logits_tensor->trt_tensor());
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
params.converter->SetLayerName(layer, node_def);
layer->setAxes(1 << (num_trt_dims - 1));
ITensorProxyPtr output_tensor = layer->getOutput(0);
params.outputs->push_back(TRT_TensorOrWeights(output_tensor));
return OkStatus();
}
};
REGISTER_DEFAULT_TRT_OP_CONVERTER(MakeConverterFunction<ConvertSoftmax>(),
"Softmax");
}
}
}
#endif | #include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/delegates/xnnpack/softmax_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
namespace tflite {
namespace xnnpack {
TEST(Softmax, 4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
SoftmaxTester()
.Shape({batch, height, width, channels})
.Test(xnnpack_delegate.get());
}
TEST(Softmax, 3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
SoftmaxTester().Shape({batch, width, channels}).Test(xnnpack_delegate.get());
}
TEST(Softmax, 2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
SoftmaxTester().Shape({batch, channels}).Test(xnnpack_delegate.get());
}
TEST(Softmax, 1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
SoftmaxTester().Shape({batch}).Test(xnnpack_delegate.get());
}
TEST(Softmax, DISABLED_Beta) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
SoftmaxTester()
.Shape({batch, height, width, channels})
.Beta(0.1f)
.Test(xnnpack_delegate.get());
SoftmaxTester()
.Shape({batch, height, width, channels})
.Beta(10.0f)
.Test(xnnpack_delegate.get());
}
TEST(Softmax, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
SoftmaxTester()
.Shape({batch, height, width, channels})
.Test(xnnpack_delegate.get());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2tensorrt/convert/ops/softmax.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/softmax_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f03a3830-3544-489a-837c-11b8e6cc9e8f | cpp | google/quiche | load_balancer_server_id | quiche/quic/load_balancer/load_balancer_server_id.cc | quiche/quic/load_balancer/load_balancer_server_id_test.cc | #include "quiche/quic/load_balancer/load_balancer_server_id.h"
#include <array>
#include <cstdint>
#include <cstring>
#include <string>
#include "absl/strings/escaping.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "quiche/quic/platform/api/quic_bug_tracker.h"
namespace quic {
LoadBalancerServerId::LoadBalancerServerId(absl::string_view data)
: LoadBalancerServerId(absl::MakeSpan(
reinterpret_cast<const uint8_t*>(data.data()), data.length())) {}
LoadBalancerServerId::LoadBalancerServerId(absl::Span<const uint8_t> data)
: length_(data.length()) {
if (length_ == 0 || length_ > kLoadBalancerMaxServerIdLen) {
QUIC_BUG(quic_bug_433312504_02)
<< "Attempted to create LoadBalancerServerId with length "
<< static_cast<int>(length_);
length_ = 0;
return;
}
memcpy(data_.data(), data.data(), data.length());
}
void LoadBalancerServerId::set_length(uint8_t length) {
QUIC_BUG_IF(quic_bug_599862571_01,
length == 0 || length > kLoadBalancerMaxServerIdLen)
<< "Attempted to set LoadBalancerServerId length to "
<< static_cast<int>(length);
length_ = length;
}
std::string LoadBalancerServerId::ToString() const {
return absl::BytesToHexString(
absl::string_view(reinterpret_cast<const char*>(data_.data()), length_));
}
} | #include "quiche/quic/load_balancer/load_balancer_server_id.h"
#include <cstdint>
#include <cstring>
#include "absl/hash/hash_testing.h"
#include "absl/types/span.h"
#include "quiche/quic/platform/api/quic_expect_bug.h"
#include "quiche/quic/platform/api/quic_test.h"
namespace quic {
namespace test {
namespace {
class LoadBalancerServerIdTest : public QuicTest {};
constexpr uint8_t kRawServerId[] = {0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b,
0x0c, 0x0d, 0x0e, 0x0f};
TEST_F(LoadBalancerServerIdTest, CreateReturnsNullIfTooLong) {
EXPECT_QUIC_BUG(EXPECT_FALSE(LoadBalancerServerId(
absl::Span<const uint8_t>(kRawServerId, 16))
.IsValid()),
"Attempted to create LoadBalancerServerId with length 16");
EXPECT_QUIC_BUG(
EXPECT_FALSE(LoadBalancerServerId(absl::Span<const uint8_t>()).IsValid()),
"Attempted to create LoadBalancerServerId with length 0");
}
TEST_F(LoadBalancerServerIdTest, CompareIdenticalExceptLength) {
LoadBalancerServerId server_id(absl::Span<const uint8_t>(kRawServerId, 15));
ASSERT_TRUE(server_id.IsValid());
EXPECT_EQ(server_id.length(), 15);
LoadBalancerServerId shorter_server_id(
absl::Span<const uint8_t>(kRawServerId, 5));
ASSERT_TRUE(shorter_server_id.IsValid());
EXPECT_EQ(shorter_server_id.length(), 5);
EXPECT_TRUE(shorter_server_id < server_id);
EXPECT_FALSE(server_id < shorter_server_id);
EXPECT_FALSE(shorter_server_id == server_id);
}
TEST_F(LoadBalancerServerIdTest, AccessorFunctions) {
LoadBalancerServerId server_id(absl::Span<const uint8_t>(kRawServerId, 5));
EXPECT_TRUE(server_id.IsValid());
EXPECT_EQ(server_id.length(), 5);
EXPECT_EQ(memcmp(server_id.data().data(), kRawServerId, 5), 0);
EXPECT_EQ(server_id.ToString(), "0001020304");
}
TEST_F(LoadBalancerServerIdTest, CompareDifferentServerIds) {
LoadBalancerServerId server_id(absl::Span<const uint8_t>(kRawServerId, 5));
ASSERT_TRUE(server_id.IsValid());
LoadBalancerServerId reverse({0x0f, 0x0e, 0x0d, 0x0c, 0x0b});
ASSERT_TRUE(reverse.IsValid());
EXPECT_TRUE(server_id < reverse);
LoadBalancerServerId long_server_id(
absl::Span<const uint8_t>(kRawServerId, 15));
EXPECT_TRUE(long_server_id < reverse);
}
TEST_F(LoadBalancerServerIdTest, EqualityOperators) {
LoadBalancerServerId server_id(absl::Span<const uint8_t>(kRawServerId, 15));
ASSERT_TRUE(server_id.IsValid());
LoadBalancerServerId shorter_server_id(
absl::Span<const uint8_t>(kRawServerId, 5));
ASSERT_TRUE(shorter_server_id.IsValid());
EXPECT_FALSE(server_id == shorter_server_id);
LoadBalancerServerId server_id2 = server_id;
EXPECT_TRUE(server_id == server_id2);
}
TEST_F(LoadBalancerServerIdTest, SupportsHash) {
LoadBalancerServerId server_id(absl::Span<const uint8_t>(kRawServerId, 15));
ASSERT_TRUE(server_id.IsValid());
LoadBalancerServerId shorter_server_id(
absl::Span<const uint8_t>(kRawServerId, 5));
ASSERT_TRUE(shorter_server_id.IsValid());
LoadBalancerServerId different_server_id({0x0f, 0x0e, 0x0d, 0x0c, 0x0b});
ASSERT_TRUE(different_server_id.IsValid());
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly({
server_id,
shorter_server_id,
different_server_id,
}));
}
TEST_F(LoadBalancerServerIdTest, SetLengthInvalid) {
LoadBalancerServerId server_id;
EXPECT_QUIC_BUG(server_id.set_length(16),
"Attempted to set LoadBalancerServerId length to 16");
EXPECT_QUIC_BUG(server_id.set_length(0),
"Attempted to set LoadBalancerServerId length to 0");
server_id.set_length(1);
EXPECT_EQ(server_id.length(), 1);
server_id.set_length(15);
EXPECT_EQ(server_id.length(), 15);
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/load_balancer/load_balancer_server_id.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/load_balancer/load_balancer_server_id_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
642de5a1-8571-42ec-86b3-0c039e9b3c62 | cpp | tensorflow/tensorflow | device_compilation_profiler | tensorflow/compiler/jit/device_compilation_profiler.cc | tensorflow/compiler/jit/device_compilation_profiler_test.cc | #include "tensorflow/compiler/jit/device_compilation_profiler.h"
#include <cstdint>
#include <optional>
#include <string>
#include <utility>
#include "absl/strings/str_cat.h"
#include "tensorflow/compiler/jit/xla_activity.pb.h"
#include "tensorflow/compiler/jit/xla_activity_listener.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tsl/platform/mutex.h"
namespace tensorflow {
namespace {
bool ShouldBeMegamorphic(int64_t compile_count, int64_t execution_count) {
const int64_t kCompileThreshold = 10;
const int64_t kMinExecutionsPerCompile = 50;
return compile_count > kCompileThreshold &&
execution_count < kMinExecutionsPerCompile * compile_count;
}
void RegisterExecutionForCluster(
const NameAttrList& function,
DeviceCompilationProfiler::ClusterCompileStats* stats) {
++stats->execution_count;
if (!stats->is_megamorphic &&
ShouldBeMegamorphic(stats->compile_count, stats->execution_count)) {
VLOG(1) << "Marking " << function.name()
<< " as megamorphic, compile_count=" << stats->compile_count
<< " execution_count=" << stats->execution_count;
stats->is_megamorphic = true;
}
}
constexpr int64_t kDefaultCompilationThreshold = 2;
constexpr int64_t kMaxNumOngoingCompilations = kNumAsyncDeviceCompilerThreads;
}
DeviceCompilationProfiler::~DeviceCompilationProfiler() {
mutex_lock lock(mu_);
cluster_compile_stats_.clear();
}
absl::StatusOr<DeviceCompilationProfiler::ClusterCompileStats>
DeviceCompilationProfiler::GetCompileStats(const NameAttrList& function) const {
mutex_lock lock(mu_);
if (auto it = cluster_compile_stats_.find(function.name());
it != cluster_compile_stats_.end()) {
return it->second;
}
return errors::NotFound("Couldn't find compilation stats for cluster: ",
function.name());
}
void DeviceCompilationProfiler::RegisterExecution(
const NameAttrList& function) {
mutex_lock lock(mu_);
auto it =
cluster_compile_stats_.emplace(function.name(), ClusterCompileStats{})
.first;
RegisterExecutionForCluster(function, &it->second);
}
Status DeviceCompilationProfiler::RegisterCompilation(
const NameAttrList& function, int64_t compile_time_us,
bool used_persistent_cache) {
metrics::UpdateXlaCompilationTime(compile_time_us);
const std::string& function_name = function.name();
mutex_lock lock(mu_);
auto it =
cluster_compile_stats_.emplace(function.name(), ClusterCompileStats{})
.first;
const uint64 compile_time_s = compile_time_us / 1.0e6;
it->second.compile_count++;
it->second.cumulative_compile_time_us += compile_time_us;
VLOG(1) << "Compiled " << function_name << " " << it->second.compile_count
<< " times, compile time: " << compile_time_us
<< " us, cumulative: " << it->second.cumulative_compile_time_us
<< " us ("
<< tensorflow::strings::HumanReadableElapsedTime(compile_time_s)
<< " / "
<< tensorflow::strings::HumanReadableElapsedTime(
it->second.cumulative_compile_time_us / 1.0e6)
<< ")";
XlaJitCompilationActivity jit_compilation_activity;
jit_compilation_activity.set_cluster_name(function_name);
jit_compilation_activity.set_compile_count(it->second.compile_count);
jit_compilation_activity.set_compile_time_us(compile_time_us);
jit_compilation_activity.set_cumulative_compile_time_us(
it->second.cumulative_compile_time_us);
jit_compilation_activity.set_used_persistent_cache(used_persistent_cache);
return BroadcastXlaActivity(std::move(jit_compilation_activity));
}
bool DeviceCompilationProfiler::ShouldCompileCluster(
const NameAttrList& function, DeviceCompileMode compile_mode,
int64_t current_request_count) {
std::optional<int64_t> compile_threshold;
if (compile_mode == DeviceCompileMode::kLazy) {
compile_threshold = kDefaultCompilationThreshold;
} else if (compile_mode == DeviceCompileMode::kAsync) {
compile_threshold = 0;
}
if (compile_mode == DeviceCompileMode::kStrict) {
return true;
}
mutex_lock lock(mu_);
auto [it, cluster_not_found] =
cluster_compile_stats_.emplace(function.name(), ClusterCompileStats{});
if (cluster_not_found) {
RegisterExecutionForCluster(function, &it->second);
}
if (it->second.is_megamorphic) {
BroadcastOptimizationRemark(XlaOptimizationRemark::MEGAMORPHIC_FUNCTION,
function.name())
.IgnoreError();
VLOG(2) << "Not compiling cluster " << function.name()
<< " because it is megamorphic.";
return false;
}
if (it->second.execution_count == 1) {
return true;
}
if (compile_mode == DeviceCompileMode::kAsync) {
if (num_ongoing_compilations_ >= kMaxNumOngoingCompilations) {
VLOG(2) << "Not asynchronously compiling cluster " << function.name()
<< " because of too many ongoing compilations.";
return false;
}
}
bool reached_compile_threshold = current_request_count >= *compile_threshold;
if (!reached_compile_threshold) {
VLOG(2) << "Not compiling cluster " << function.name()
<< " because it has not reached compile threshold; threshold is "
<< *compile_threshold << " execution count "
<< current_request_count << ".";
}
return reached_compile_threshold;
}
void DeviceCompilationProfiler::IncrementOngoingAsyncCompilations() {
mutex_lock lock(mu_);
num_ongoing_compilations_++;
}
void DeviceCompilationProfiler::DecrementOngoingAsyncCompilations() {
mutex_lock lock(mu_);
num_ongoing_compilations_--;
}
int64_t DeviceCompilationProfiler::GetNumOngoingAsyncCompilations() const {
mutex_lock lock(mu_);
return num_ongoing_compilations_;
}
std::string DeviceCompilationProfiler::DebugString() const {
std::string debug_string =
"DeviceCompilationProfiler {\ncluster_compile_stats: {\n";
{
mutex_lock lock(mu_);
for (const auto& [key, stats] : cluster_compile_stats_) {
absl::StrAppend(&debug_string, key, ": ", stats.DebugString(), "\n");
}
}
absl::StrAppend(&debug_string, "}\nnum_ongoing_compilations=",
GetNumOngoingAsyncCompilations(), "\n}\n");
return debug_string;
}
} | #include "tensorflow/compiler/jit/device_compilation_profiler.h"
#include <memory>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/compiler/jit/tests/device_compiler_test_helper.h"
#include "tensorflow/compiler/jit/xla_activity.pb.h"
#include "tensorflow/core/framework/attr_value.pb.h"
namespace tensorflow {
namespace {
TEST(DeviceCompilationProfilerTest, RegisterExecution) {
DeviceCompilationProfiler* profiler = new DeviceCompilationProfiler();
core::ScopedUnref profiler_ref(profiler);
NameAttrList function;
function.set_name("TestFunc");
for (int i = 0; i < 5; ++i) {
profiler->RegisterExecution(function);
}
TF_ASSERT_OK_AND_ASSIGN(auto stats, profiler->GetCompileStats(function));
EXPECT_EQ(stats.execution_count, 5);
}
TEST(DeviceCompilationProfilerTest, RegisterCompilation) {
DeviceCompilationProfiler* profiler = new DeviceCompilationProfiler();
core::ScopedUnref profiler_ref(profiler);
auto listener = std::make_unique<JitCompilationListener>();
auto listener_ptr = listener.get();
RegisterXlaActivityListener(std::move(listener));
NameAttrList function;
function.set_name("TestFunc");
std::vector<XlaJitCompilationActivity> expected_activities;
for (int i = 0; i < 5; ++i) {
EXPECT_TRUE(profiler->RegisterCompilation(function, 4, false).ok());
TF_ASSERT_OK_AND_ASSIGN(auto stats, profiler->GetCompileStats(function));
XlaJitCompilationActivity expected_activity;
expected_activity.set_cluster_name(function.name());
expected_activity.set_compile_count(stats.compile_count);
expected_activity.set_compile_time_us(4);
expected_activity.set_cumulative_compile_time_us(
stats.cumulative_compile_time_us);
expected_activity.set_used_persistent_cache(false);
expected_activities.push_back(expected_activity);
}
TF_ASSERT_OK_AND_ASSIGN(auto stats, profiler->GetCompileStats(function));
EXPECT_EQ(stats.compile_count, 5);
EXPECT_EQ(stats.cumulative_compile_time_us, 5 * 4);
const auto& actual_activities = listener_ptr->GetListenerHistory();
EXPECT_EQ(actual_activities.size(), expected_activities.size());
for (size_t i = 0; i < actual_activities.size(); ++i) {
EXPECT_EQ(actual_activities[i].SerializeAsString(),
expected_activities[i].SerializeAsString());
}
}
TEST(DeviceCompilationProfilerTest, OngoingAsyncCompilations) {
DeviceCompilationProfiler* profiler = new DeviceCompilationProfiler();
core::ScopedUnref profiler_ref(profiler);
for (int i = 0; i < 5; ++i) {
profiler->IncrementOngoingAsyncCompilations();
}
EXPECT_EQ(profiler->GetNumOngoingAsyncCompilations(), 5);
for (int i = 0; i < 5; ++i) {
profiler->DecrementOngoingAsyncCompilations();
}
EXPECT_EQ(profiler->GetNumOngoingAsyncCompilations(), 0);
for (int i = 0; i < 5; ++i) {
profiler->IncrementOngoingAsyncCompilations();
profiler->DecrementOngoingAsyncCompilations();
}
EXPECT_EQ(profiler->GetNumOngoingAsyncCompilations(), 0);
}
TEST(DeviceCompilationProfilerTest, ShouldCompileClusterNotFound) {
DeviceCompilationProfiler* profiler = new DeviceCompilationProfiler();
core::ScopedUnref profiler_ref(profiler);
NameAttrList function;
function.set_name("TestFunc");
EXPECT_TRUE(
profiler->ShouldCompileCluster(function, DeviceCompileMode::kAsync, 0));
EXPECT_TRUE(
profiler->ShouldCompileCluster(function, DeviceCompileMode::kLazy, 0));
EXPECT_TRUE(
profiler->ShouldCompileCluster(function, DeviceCompileMode::kStrict, 0));
}
TEST(DeviceCompilationProfilerTest, ShouldCompileClusterFirstExecution) {
DeviceCompilationProfiler* profiler = new DeviceCompilationProfiler();
core::ScopedUnref profiler_ref(profiler);
NameAttrList function;
function.set_name("TestFunc");
profiler->RegisterExecution(function);
EXPECT_TRUE(
profiler->ShouldCompileCluster(function, DeviceCompileMode::kAsync, 0));
EXPECT_TRUE(
profiler->ShouldCompileCluster(function, DeviceCompileMode::kLazy, 0));
}
TEST(DeviceCompilationProfilerTest, ShouldCompileClusterMegamorphic) {
DeviceCompilationProfiler* profiler = new DeviceCompilationProfiler();
core::ScopedUnref profiler_ref(profiler);
NameAttrList function;
function.set_name("TestFunc");
const int64_t kCompileThreshold = 10;
const int64_t kMinExecutionsPerCompile = 50;
for (int i = 0; i < kCompileThreshold + 1; ++i) {
EXPECT_TRUE(profiler->RegisterCompilation(function, 1, false).ok());
}
profiler->RegisterExecution(function);
EXPECT_FALSE(
profiler->ShouldCompileCluster(function, DeviceCompileMode::kAsync, 0));
EXPECT_FALSE(
profiler->ShouldCompileCluster(function, DeviceCompileMode::kLazy, 0));
TF_ASSERT_OK_AND_ASSIGN(auto stats, profiler->GetCompileStats(function));
EXPECT_TRUE(stats.is_megamorphic);
EXPECT_TRUE(
profiler->ShouldCompileCluster(function, DeviceCompileMode::kStrict, 0));
for (int i = 0; i < kCompileThreshold * kMinExecutionsPerCompile + 1; ++i) {
profiler->RegisterExecution(function);
}
EXPECT_FALSE(
profiler->ShouldCompileCluster(function, DeviceCompileMode::kAsync, 0));
EXPECT_FALSE(
profiler->ShouldCompileCluster(function, DeviceCompileMode::kLazy, 0));
TF_ASSERT_OK_AND_ASSIGN(stats, profiler->GetCompileStats(function));
EXPECT_TRUE(stats.is_megamorphic);
EXPECT_TRUE(
profiler->ShouldCompileCluster(function, DeviceCompileMode::kStrict, 0));
}
TEST(DeviceCompilationProfilerTest, ShouldCompileClusterAsync) {
DeviceCompilationProfiler* profiler = new DeviceCompilationProfiler();
core::ScopedUnref profiler_ref(profiler);
NameAttrList function;
function.set_name("TestFunc");
const int64_t kMaxNumOngoingCompilations = 10;
for (int i = 0; i < kMaxNumOngoingCompilations; ++i) {
profiler->IncrementOngoingAsyncCompilations();
}
profiler->RegisterExecution(function);
EXPECT_TRUE(
profiler->ShouldCompileCluster(function, DeviceCompileMode::kAsync, 0));
profiler->RegisterExecution(function);
EXPECT_FALSE(
profiler->ShouldCompileCluster(function, DeviceCompileMode::kAsync, 0));
profiler->DecrementOngoingAsyncCompilations();
EXPECT_TRUE(
profiler->ShouldCompileCluster(function, DeviceCompileMode::kAsync, 0));
}
TEST(DeviceCompilationProfilerTest, ShouldCompileClusterLazy) {
DeviceCompilationProfiler* profiler = new DeviceCompilationProfiler();
core::ScopedUnref profiler_ref(profiler);
NameAttrList function;
function.set_name("TestFunc");
constexpr int64_t kDefaultCompilationThreshold = 2;
profiler->RegisterExecution(function);
EXPECT_TRUE(
profiler->ShouldCompileCluster(function, DeviceCompileMode::kLazy, 0));
profiler->RegisterExecution(function);
for (int current_request_count = 0;
current_request_count < kDefaultCompilationThreshold;
++current_request_count) {
EXPECT_FALSE(profiler->ShouldCompileCluster(
function, DeviceCompileMode::kLazy, current_request_count));
}
EXPECT_TRUE(profiler->ShouldCompileCluster(function, DeviceCompileMode::kLazy,
kDefaultCompilationThreshold));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/device_compilation_profiler.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/device_compilation_profiler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
24bb0006-b05f-484b-99d7-4f82b5dc15ee | cpp | google/tensorstore | executor | tensorstore/util/executor.h | tensorstore/util/executor_test.cc | #ifndef TENSORSTORE_UTIL_EXECUTOR_H_
#define TENSORSTORE_UTIL_EXECUTOR_H_
#include <functional>
#include <type_traits>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/functional/any_invocable.h"
#include "absl/meta/type_traits.h"
#include "tensorstore/internal/poly/poly.h"
#include "tensorstore/internal/type_traits.h"
namespace tensorstore {
using ExecutorTask = absl::AnyInvocable<void() &&>;
using Executor = poly::Poly<0, true, void(ExecutorTask) const>;
class InlineExecutor {
public:
template <typename Func>
void operator()(Func&& func) const {
std::forward<Func>(func)();
}
};
template <typename ExecutorType, typename FunctionType>
class ExecutorBoundFunction {
public:
using Executor = ExecutorType;
using Function = FunctionType;
template <typename... T>
std::enable_if_t<std::is_invocable_v<Function&, T...>>
operator()(T&&... arg) {
executor(std::bind(std::move(function), std::forward<T>(arg)...));
}
template <typename... T>
std::enable_if_t<std::is_invocable_v<const Function&, T...>> operator()(
T&&... arg) const {
executor(std::bind(function, std::forward<T>(arg)...));
}
ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS Executor executor;
ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS Function function;
};
template <typename Executor, typename Function>
std::enable_if_t<
!std::is_same_v<absl::remove_cvref_t<Executor>, InlineExecutor>,
ExecutorBoundFunction<absl::remove_cvref_t<Executor>,
absl::remove_cvref_t<Function>>>
WithExecutor(Executor&& executor, Function&& function) {
return {std::forward<Executor>(executor), std::forward<Function>(function)};
}
template <typename Executor, typename Function>
std::enable_if_t<std::is_same_v<absl::remove_cvref_t<Executor>, InlineExecutor>,
Function&&>
WithExecutor(Executor&& executor, Function&& function) {
return std::forward<Function>(function);
}
}
#endif | #include "tensorstore/util/executor.h"
#include <functional>
#include <memory>
#include <gtest/gtest.h>
namespace {
using ::tensorstore::Executor;
using ::tensorstore::InlineExecutor;
using ::tensorstore::WithExecutor;
TEST(InlineExecutorTest, Basic) {
Executor executor = InlineExecutor{};
bool invoked = false;
executor([&] { invoked = true; });
EXPECT_TRUE(invoked);
}
TEST(WithExecutorTest, NonConst) {
InlineExecutor executor;
bool invoked = false;
struct Func {
void operator()(bool* x) const = delete;
void operator()(bool* x) { *x = true; }
};
auto with_executor = WithExecutor(executor, Func{});
with_executor(&invoked);
EXPECT_TRUE(invoked);
}
TEST(WithExecutorTest, Const) {
InlineExecutor executor;
bool invoked = false;
struct Func {
void operator()(bool* x) const { *x = true; }
void operator()(bool*) = delete;
};
const auto with_executor = WithExecutor(executor, Func{});
with_executor(&invoked);
EXPECT_TRUE(invoked);
}
TEST(ExecutorTest, MoveOnly) {
Executor executor = InlineExecutor{};
int value = 0;
executor(std::bind([&](const std::unique_ptr<int>& ptr) { value = *ptr; },
std::make_unique<int>(3)));
EXPECT_EQ(3, value);
}
TEST(WithExecutorTest, MoveOnly) {
Executor executor = InlineExecutor{};
int value = 0;
auto with_executor = WithExecutor(
executor,
std::bind([&](const std::unique_ptr<int>& ptr) { value = *ptr; },
std::make_unique<int>(3)));
with_executor();
EXPECT_EQ(3, value);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/executor.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/executor_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
e5eb671c-fe51-4224-9402-beab750cbaee | cpp | tensorflow/tensorflow | float8 | tensorflow/core/platform/float8.h | third_party/xla/xla/tests/float8_test.cc | #ifndef TENSORFLOW_CORE_PLATFORM_FLOAT8_H_
#define TENSORFLOW_CORE_PLATFORM_FLOAT8_H_
#include "tsl/platform/ml_dtypes.h"
namespace tensorflow {
typedef tsl::float8_e4m3fn float8_e4m3fn;
typedef tsl::float8_e5m2 float8_e5m2;
}
#endif | #include <cmath>
#include <memory>
#include <vector>
#include <gtest/gtest.h>
#include "xla/hlo/builder/xla_builder.h"
#include "xla/test.h"
#include "xla/tests/client_library_test_base.h"
#include "xla/tests/test_macros.h"
#include "tsl/platform/ml_dtypes.h"
namespace xla {
namespace {
template <typename T>
class Float8Test : public ClientLibraryTestBase {};
using DataTypes = ::testing::Types<tsl::float8_e5m2, tsl::float8_e4m3,
tsl::float8_e4m3fn, tsl::float8_e3m4>;
TYPED_TEST_SUITE(Float8Test, DataTypes);
XLA_TYPED_TEST(Float8Test, ScalarOperation) {
XlaBuilder builder(this->TestName());
auto x = ConstantR0<TypeParam>(&builder, static_cast<TypeParam>(2.0f));
auto y = ConstantR0<TypeParam>(&builder, static_cast<TypeParam>(1.0f));
Add(x, y);
this->template ComputeAndCompareR0<TypeParam>(
&builder, static_cast<TypeParam>(3.0f), {});
}
XLA_TYPED_TEST(Float8Test, LogOperation) {
XlaBuilder builder(this->TestName());
auto x = ConstantR0<TypeParam>(&builder, static_cast<TypeParam>(4.0f));
Log(x);
this->template ComputeAndCompareR0<TypeParam>(
&builder, static_cast<TypeParam>(1.387f), {});
}
XLA_TYPED_TEST(Float8Test, CompareOperation) {
XlaBuilder builder(this->TestName());
auto x = ConstantR1<TypeParam>(&builder, {TypeParam{1.0}, TypeParam{2.0}});
auto y = ConstantR1<TypeParam>(&builder, {TypeParam{1.0}, TypeParam{3.0}});
Eq(x, y);
this->template ComputeAndCompareR1<bool>(&builder, {true, false}, {});
}
XLA_TYPED_TEST(Float8Test, DotOperation) {
XlaBuilder builder(this->TestName());
auto x = ConstantR2<TypeParam>(&builder, {{TypeParam{0.0}, TypeParam{1.0}},
{TypeParam{2.0}, TypeParam{3.0}}});
auto y = ConstantR2<TypeParam>(&builder, {{TypeParam{3.0}, TypeParam{2.0}},
{TypeParam{1.0}, TypeParam{0.0}}});
Dot(x, y);
this->template ComputeAndCompareR2<TypeParam>(
&builder,
{{TypeParam{1.0}, TypeParam{0.0}}, {TypeParam{9.0}, TypeParam{4.0}}}, {});
}
XLA_TYPED_TEST(Float8Test, NegateScalar) {
XlaBuilder builder(this->TestName());
Neg(ConstantR0<TypeParam>(&builder, static_cast<TypeParam>(2.0f)));
this->template ComputeAndCompareR0<TypeParam>(
&builder, static_cast<TypeParam>(-2.0f), {});
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/platform/float8.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tests/float8_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2c0d47c7-2213-4e44-90ad-237902404512 | cpp | google/cel-cpp | timestamp_type | common/types/timestamp_type.h | common/types/timestamp_type_test.cc | #ifndef THIRD_PARTY_CEL_CPP_COMMON_TYPES_TIMESTAMP_TYPE_H_
#define THIRD_PARTY_CEL_CPP_COMMON_TYPES_TIMESTAMP_TYPE_H_
#include <ostream>
#include <string>
#include <utility>
#include "absl/strings/string_view.h"
#include "common/type_kind.h"
namespace cel {
class Type;
class TypeParameters;
class TimestampType final {
public:
static constexpr TypeKind kKind = TypeKind::kTimestamp;
static constexpr absl::string_view kName = "google.protobuf.Timestamp";
TimestampType() = default;
TimestampType(const TimestampType&) = default;
TimestampType(TimestampType&&) = default;
TimestampType& operator=(const TimestampType&) = default;
TimestampType& operator=(TimestampType&&) = default;
static TypeKind kind() { return kKind; }
static absl::string_view name() { return kName; }
static TypeParameters GetParameters();
static std::string DebugString() { return std::string(name()); }
constexpr void swap(TimestampType&) noexcept {}
};
inline constexpr void swap(TimestampType& lhs, TimestampType& rhs) noexcept {
lhs.swap(rhs);
}
inline constexpr bool operator==(TimestampType, TimestampType) { return true; }
inline constexpr bool operator!=(TimestampType lhs, TimestampType rhs) {
return !operator==(lhs, rhs);
}
template <typename H>
H AbslHashValue(H state, TimestampType) {
return std::move(state);
}
inline std::ostream& operator<<(std::ostream& out, const TimestampType& type) {
return out << type.DebugString();
}
}
#endif | #include <sstream>
#include "absl/hash/hash.h"
#include "common/type.h"
#include "internal/testing.h"
namespace cel {
namespace {
TEST(TimestampType, Kind) {
EXPECT_EQ(TimestampType().kind(), TimestampType::kKind);
EXPECT_EQ(Type(TimestampType()).kind(), TimestampType::kKind);
}
TEST(TimestampType, Name) {
EXPECT_EQ(TimestampType().name(), TimestampType::kName);
EXPECT_EQ(Type(TimestampType()).name(), TimestampType::kName);
}
TEST(TimestampType, DebugString) {
{
std::ostringstream out;
out << TimestampType();
EXPECT_EQ(out.str(), TimestampType::kName);
}
{
std::ostringstream out;
out << Type(TimestampType());
EXPECT_EQ(out.str(), TimestampType::kName);
}
}
TEST(TimestampType, Hash) {
EXPECT_EQ(absl::HashOf(TimestampType()), absl::HashOf(TimestampType()));
}
TEST(TimestampType, Equal) {
EXPECT_EQ(TimestampType(), TimestampType());
EXPECT_EQ(Type(TimestampType()), TimestampType());
EXPECT_EQ(TimestampType(), Type(TimestampType()));
EXPECT_EQ(Type(TimestampType()), Type(TimestampType()));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/types/timestamp_type.h | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/types/timestamp_type_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
34e66ece-9ff8-4b9c-9545-dcb0094f4b53 | cpp | tensorflow/tensorflow | partitioning_utils | tensorflow/core/common_runtime/partitioning_utils.cc | tensorflow/core/common_runtime/partitioning_utils_test.cc | #include "tensorflow/core/common_runtime/partitioning_utils.h"
#include <algorithm>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <unordered_map>
#include <utility>
#include "tensorflow/core/common_runtime/arg_ret_placement.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_partition.h"
namespace tensorflow {
namespace {
Status PartitionFunctionGraph(
const DeviceSet& device_set, Graph* graph,
std::unordered_map<string, GraphDef>* partitions,
std::function<string(const Node*)> node_to_loc,
std::function<string(const Edge*)> get_tensor_name_attr) {
PartitionOptions partition_options;
if (node_to_loc != nullptr) {
partition_options.node_to_loc = node_to_loc;
} else {
partition_options.node_to_loc = [](const Node* node) {
return node->assigned_device_name();
};
}
int64_t edge_name_counter = 0;
partition_options.new_name = [&edge_name_counter](const string& prefix) {
return strings::StrCat(prefix, "/_", ++edge_name_counter);
};
partition_options.get_incarnation =
[&device_set](const string& name) -> int64 {
const Device* d = device_set.FindDeviceByName(name);
if (d == nullptr) {
return PartitionOptions::kIllegalIncarnation;
} else {
return d->attributes().incarnation();
}
};
partition_options.control_flow_added = false;
partition_options.get_tensor_name_attr = get_tensor_name_attr;
partition_options.can_make_destructive_changes = true;
return Partition(partition_options, graph, partitions);
}
struct SendRecvPair {
Node* send_node = nullptr;
Node* recv_node = nullptr;
};
constexpr char kTensorNameAttr[] = "tensor_name";
Status MakeSendRecvDependencyExplicit(Graph* graph) {
absl::flat_hash_map<std::string, SendRecvPair> send_recv_pairs;
for (Node* node : graph->op_nodes()) {
if (node->IsSend() || node->IsRecv()) {
auto tensor_name_it = node->def().attr().find(kTensorNameAttr);
if (tensor_name_it == node->def().attr().end()) {
return errors::Internal(
"'", kTensorNameAttr,
"' attribute is not found from node: ", node->DebugString());
}
if (node->IsSend()) {
send_recv_pairs[tensor_name_it->second.s()].send_node = node;
} else {
send_recv_pairs[tensor_name_it->second.s()].recv_node = node;
}
}
}
for (const auto& [tensor_name, send_recv_pair] : send_recv_pairs) {
if (send_recv_pair.send_node == nullptr ||
send_recv_pair.recv_node == nullptr) {
return errors::Internal(
"No matching Send/Recv nodes found for tensor_name = ", tensor_name);
}
graph->AddControlEdge(send_recv_pair.send_node, send_recv_pair.recv_node);
}
return absl::OkStatus();
}
}
Status PartitionFunctionGraph(
const DeviceSet& device_set, std::unique_ptr<Graph> graph,
std::unordered_map<string, std::unique_ptr<Graph>>* subgraphs,
std::function<string(const Edge*)> get_tensor_name_attr) {
std::unordered_map<string, GraphDef> partitions;
TF_RETURN_IF_ERROR(
PartitionFunctionGraph(device_set, graph.get(), &partitions,
nullptr, get_tensor_name_attr));
const OpRegistryInterface* default_registry =
graph->flib_def().default_registry();
graph.reset();
for (auto& partition : partitions) {
const string& device = partition.first;
GraphDef& graph_def = partition.second;
auto subgraph = std::make_unique<Graph>(default_registry);
GraphConstructorOptions opts;
opts.allow_internal_ops = true;
opts.expect_device_spec = true;
TF_RETURN_IF_ERROR(
ConvertGraphDefToGraph(opts, std::move(graph_def), subgraph.get()));
subgraphs->emplace(device, std::move(subgraph));
}
return absl::OkStatus();
}
absl::StatusOr<std::unique_ptr<Graph>> InsertTransferOps(
const DeviceSet& device_set, std::unique_ptr<Graph> graph) {
auto node_to_loc = [](const Node* node) {
return node->assigned_device_name();
};
bool has_multiple_devices = false;
absl::optional<std::string> location;
for (const Node* node : graph->op_nodes()) {
if (location) {
if (*location != node_to_loc(node)) {
has_multiple_devices = true;
break;
}
} else {
location = node_to_loc(node);
}
}
if (!has_multiple_devices) {
return graph;
}
auto new_graph = std::make_unique<Graph>(graph->flib_def());
std::unordered_map<string, GraphDef> partitions;
TF_RETURN_IF_ERROR(PartitionFunctionGraph(device_set, graph.get(),
&partitions, node_to_loc,
nullptr));
GraphDef merged_graph_def;
if (!partitions.empty()) {
auto iter = partitions.begin();
merged_graph_def = std::move(iter->second);
while (++iter != partitions.end()) {
merged_graph_def.MergeFrom(iter->second);
}
}
GraphConstructorOptions opts;
opts.allow_internal_ops = true;
opts.expect_device_spec = true;
TF_RETURN_IF_ERROR(ConvertGraphDefToGraph(opts, std::move(merged_graph_def),
new_graph.get()));
TF_RETURN_IF_ERROR(MakeSendRecvDependencyExplicit(new_graph.get()));
return std::move(new_graph);
}
Status UpdateArgAndRetvalMetadata(
Graph* graph, std::vector<FunctionArgIndex>* arg_indices,
std::vector<int>* ret_indices,
std::vector<AllocatorAttributes>* arg_alloc_attrs,
std::vector<AllocatorAttributes>* ret_alloc_attrs, bool ints_on_device) {
std::vector<std::pair<Node*, FunctionArgIndex>> arg_nodes;
std::vector<std::pair<Node*, int>> ret_nodes;
const AttrValue* attr_value;
for (Node* node : graph->op_nodes()) {
if (node->IsArg()) {
TF_RETURN_IF_ERROR(node->attrs().Find("index", &attr_value));
int index = static_cast<int>(attr_value->i());
int sub_index = -1;
if (node->attrs().Find("sub_index", &attr_value).ok()) {
sub_index = static_cast<int>(attr_value->i());
}
arg_nodes.emplace_back(node, FunctionArgIndex(index, sub_index));
} else if (node->IsRetval()) {
TF_RETURN_IF_ERROR(node->attrs().Find("index", &attr_value));
int index = static_cast<int>(attr_value->i());
ret_nodes.emplace_back(node, index);
}
}
auto arg_comparator = [](std::pair<Node*, FunctionArgIndex> a,
std::pair<Node*, FunctionArgIndex> b) {
return std::tie(a.second.index, a.second.sub_index) <
std::tie(b.second.index, b.second.sub_index);
};
std::sort(arg_nodes.begin(), arg_nodes.end(), arg_comparator);
auto ret_comparator = [](std::pair<Node*, int> a, std::pair<Node*, int> b) {
return a.second < b.second;
};
std::sort(ret_nodes.begin(), ret_nodes.end(), ret_comparator);
arg_indices->reserve(arg_nodes.size());
for (const auto& pair : arg_nodes) arg_indices->push_back(pair.second);
ret_indices->reserve(ret_nodes.size());
for (const auto& pair : ret_nodes) ret_indices->push_back(pair.second);
for (int i = 0; i < arg_nodes.size(); ++i) {
Node* arg = arg_nodes[i].first;
arg->AddAttr("index", i);
}
if (arg_alloc_attrs != nullptr) {
TF_RETURN_IF_ERROR(full_type::SingleDeviceSetAllocAttrsForArgs(
arg_nodes, ints_on_device, *arg_alloc_attrs));
}
for (int i = 0; i < ret_nodes.size(); ++i) {
Node* ret = ret_nodes[i].first;
ret->AddAttr("index", i);
}
if (ret_alloc_attrs) {
TF_RETURN_IF_ERROR(full_type::SingleDeviceSetAllocAttrsForRets(
ret_nodes, ints_on_device, *ret_alloc_attrs));
}
return absl::OkStatus();
}
string FunctionNameGenerator::GetName() {
while (true) {
const string candidate = strings::StrCat(name_, "_", counter_++);
if (flib_def_->Find(candidate) == nullptr) {
return candidate;
}
}
}
} | #include "tensorflow/core/common_runtime/partitioning_utils.h"
#include <map>
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/function_testlib.h"
#include "tensorflow/core/common_runtime/int32_fulltype.h"
#include "tensorflow/core/common_runtime/placer.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
namespace {
using ::testing::SizeIs;
class PartitioningUtilsTest : public ::testing::Test {
public:
void SetUp() override {
SessionOptions options;
auto* device_count = options.config.mutable_device_count();
device_count->insert({"CPU", 2});
std::vector<std::unique_ptr<Device>> devices;
TF_CHECK_OK(DeviceFactory::AddDevices(options, "/job:a/replica:0/task:0",
&devices));
device0_ = devices[0].get();
device1_ = devices[1].get();
device_mgr_ = std::make_unique<StaticDeviceMgr>(std::move(devices));
for (auto d : device_mgr_->ListDevices()) {
device_set_.AddDevice(d);
}
}
void SwapGraph(Graph* graph, bool assign_device = false) {
Scope s = Scope::NewRootScope();
if (assign_device) {
s = s.WithDevice(device0_->name());
}
auto x = ops::_Arg(s.WithOpName("x"), DT_FLOAT, 0);
auto y = ops::_Arg(s.WithOpName("y"), DT_FLOAT, 1);
auto id_x = ops::Identity(s.WithOpName("id_x"), x);
auto id_y = ops::Identity(s.WithOpName("id_y"), y);
auto dx_retval = ops::_Retval(s.WithOpName("retval1"), id_y, 0);
auto dy_retval = ops::_Retval(s.WithOpName("retval2"), id_x, 1);
TF_ASSERT_OK(s.ToGraph(graph));
if (assign_device) {
FunctionLibraryDefinition flib_def(OpRegistry::Global());
Placer placer(graph, "", &flib_def, &device_set_, device0_);
TF_ASSERT_OK(placer.Run());
}
}
void TwoDeviceSwapGraph(Graph* graph) {
Scope s = Scope::NewRootScope();
Scope s1 = s.WithDevice("/job:a/replica:0/task:0/device:CPU:0");
Scope s2 = s.WithDevice("/job:a/replica:0/task:0/device:CPU:1");
auto x = ops::_Arg(s1.WithOpName("x"), DT_FLOAT, 0);
auto y = ops::_Arg(s2.WithOpName("y"), DT_FLOAT, 1);
auto id_x = ops::Identity(s1.WithOpName("id_x"), x);
auto id_y = ops::Identity(s2.WithOpName("id_y"), y);
auto dx_retval = ops::_Retval(s2.WithOpName("retval1"), id_y, 0);
auto dy_retval = ops::_Retval(s1.WithOpName("retval2"), id_x, 1);
TF_ASSERT_OK(s.ToGraph(graph));
FunctionLibraryDefinition flib_def(OpRegistry::Global());
Placer placer(graph, "", &flib_def, &device_set_, device0_);
TF_ASSERT_OK(placer.Run());
}
void SubGraph(Graph* subgraph, DataType dtype,
absl::Span<const int> arg_indices,
absl::Span<const int> ret_indices) {
Scope s = Scope::NewRootScope();
Scope s1 = s.WithDevice("/job:a/replica:0/task:0/device:CPU:0");
CHECK_EQ(arg_indices.size(), ret_indices.size());
for (size_t i = 0; i < arg_indices.size(); ++i) {
auto x = ops::_Arg(s1.WithOpName("x"), dtype, arg_indices[i]);
auto id_x = ops::Identity(s1.WithOpName("id_x"), x);
auto dx_retval =
ops::_Retval(s1.WithOpName("retval1"), id_x, ret_indices[i]);
}
TF_ASSERT_OK(s.ToGraph(subgraph));
FunctionLibraryDefinition flib_def(OpRegistry::Global());
Placer placer(subgraph, "", &flib_def, &device_set_, device0_);
TF_ASSERT_OK(placer.Run());
}
std::unique_ptr<DeviceMgr> device_mgr_;
Device* device0_ = nullptr;
Device* device1_ = nullptr;
DeviceSet device_set_;
};
TEST_F(PartitioningUtilsTest, GraphWithoutAssignedDevicesFails) {
std::unique_ptr<Graph> graph = std::make_unique<Graph>(OpRegistry::Global());
SwapGraph(graph.get());
std::unordered_map<string, std::unique_ptr<Graph>> subgraphs;
Status status =
PartitionFunctionGraph(device_set_, std::move(graph), &subgraphs);
ASSERT_TRUE(errors::IsInvalidArgument(status)) << status.ToString();
}
TEST_F(PartitioningUtilsTest, OneDevice) {
std::unique_ptr<Graph> graph = std::make_unique<Graph>(OpRegistry::Global());
SwapGraph(graph.get(), true);
int num_nodes = graph->num_op_nodes();
std::unordered_map<string, std::unique_ptr<Graph>> subgraphs;
Status status =
PartitionFunctionGraph(device_set_, std::move(graph), &subgraphs);
ASSERT_TRUE(status.ok()) << status.ToString();
ASSERT_EQ(1, subgraphs.size());
const auto& pair = *subgraphs.begin();
ASSERT_EQ("/job:a/replica:0/task:0/device:CPU:0", pair.first);
ASSERT_EQ(num_nodes, pair.second->num_op_nodes());
}
TEST_F(PartitioningUtilsTest, TwoDevices) {
std::unique_ptr<Graph> graph = std::make_unique<Graph>(OpRegistry::Global());
TwoDeviceSwapGraph(graph.get());
std::unordered_map<string, std::unique_ptr<Graph>> subgraphs;
Status status =
PartitionFunctionGraph(device_set_, std::move(graph), &subgraphs);
ASSERT_TRUE(status.ok()) << status.ToString();
ASSERT_EQ(2, subgraphs.size());
const auto& part1 = subgraphs["/job:a/replica:0/task:0/device:CPU:0"];
ASSERT_EQ(3, part1->num_op_nodes());
const auto& part2 = subgraphs["/job:a/replica:0/task:0/device:CPU:1"];
ASSERT_EQ(3, part2->num_op_nodes());
}
TEST_F(PartitioningUtilsTest, InsertTransferOpsWithOneDevice) {
auto graph = std::make_unique<Graph>(OpRegistry::Global());
Scope scope = Scope::NewRootScope().WithDevice(device0_->name());
auto x = ops::_Arg(scope.WithOpName("x"), DT_FLOAT, 0);
auto id_x = ops::Identity(scope.WithOpName("id_x"), x);
auto ret_x = ops::_Retval(scope.WithOpName("ret_x"), id_x, 0);
TF_ASSERT_OK(scope.ToGraph(graph.get()));
FunctionLibraryDefinition flib_def(OpRegistry::Global());
Placer placer(graph.get(), "", &flib_def, &device_set_, device0_);
TF_ASSERT_OK(placer.Run());
EXPECT_EQ(graph->num_op_nodes(), 3);
int send_count = 0, recv_count = 0;
for (const auto* op : graph->op_nodes()) {
if (op->IsSend())
++send_count;
else if (op->IsRecv())
++recv_count;
}
ASSERT_EQ(send_count, 0);
ASSERT_EQ(recv_count, 0);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<Graph> new_graph,
InsertTransferOps(device_set_, std::move(graph)));
EXPECT_EQ(new_graph->num_op_nodes(), 3);
send_count = recv_count = 0;
for (const auto* op : new_graph->op_nodes()) {
if (op->IsSend())
++send_count;
else if (op->IsRecv())
++recv_count;
}
EXPECT_EQ(send_count, 0);
EXPECT_EQ(recv_count, 0);
}
TEST_F(PartitioningUtilsTest, InsertTransferOpsWithTwoDevices) {
auto graph = std::make_unique<Graph>(OpRegistry::Global());
Scope scope = Scope::NewRootScope();
Scope scope1 = scope.WithDevice(device0_->name());
Scope scope2 = scope.WithDevice(device1_->name());
auto x = ops::_Arg(scope1.WithOpName("x"), DT_FLOAT, 0);
auto id_x = ops::Identity(scope2.WithOpName("id_x"), x);
auto ret_x = ops::_Retval(scope1.WithOpName("ret_x"), id_x, 0);
TF_ASSERT_OK(scope.ToGraph(graph.get()));
FunctionLibraryDefinition flib_def(OpRegistry::Global());
Placer placer(graph.get(), "", &flib_def, &device_set_, device0_);
TF_ASSERT_OK(placer.Run());
EXPECT_EQ(graph->num_op_nodes(), 3);
int send_count = 0, recv_count = 0;
for (const auto* op : graph->op_nodes()) {
if (op->IsSend())
++send_count;
else if (op->IsRecv())
++recv_count;
}
ASSERT_EQ(send_count, 0);
ASSERT_EQ(recv_count, 0);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<Graph> new_graph,
InsertTransferOps(device_set_, std::move(graph)));
EXPECT_EQ(new_graph->num_op_nodes(), 7);
send_count = recv_count = 0;
auto get_tensor_name_attr = [](const Node* node) -> std::string {
auto tensor_name_it = node->def().attr().find("tensor_name");
return tensor_name_it->second.s();
};
absl::flat_hash_map<std::string, std::pair<Node*, Node*>> send_recv_pairs;
for (auto* op : new_graph->op_nodes()) {
if (op->IsSend()) {
++send_count;
send_recv_pairs[get_tensor_name_attr(op)].first = op;
} else if (op->IsRecv()) {
++recv_count;
send_recv_pairs[get_tensor_name_attr(op)].second = op;
}
}
EXPECT_EQ(send_count, 2);
EXPECT_EQ(recv_count, 2);
for (const auto& [tensor_name, send_recv_pair] : send_recv_pairs) {
ASSERT_TRUE(send_recv_pair.first != nullptr &&
send_recv_pair.second != nullptr);
std::vector<const Edge*> out_edges(
send_recv_pair.first->out_edges().begin(),
send_recv_pair.first->out_edges().end());
ASSERT_THAT(out_edges, SizeIs(2));
for (const Edge* out_edge : out_edges) {
if (out_edge->dst() != new_graph->sink_node()) {
EXPECT_TRUE(out_edge->IsControlEdge());
EXPECT_EQ(out_edge->dst(), send_recv_pair.second);
}
}
}
}
void CheckRetIndices(const std::vector<int>& expected,
const std::vector<int>& actual) {
ASSERT_EQ(expected.size(), actual.size());
for (int i = 0; i < expected.size(); ++i) {
ASSERT_EQ(expected[i], actual[i]) << " at index " << i;
}
}
void CheckArgIndices(const std::vector<FunctionArgIndex>& expected,
const std::vector<FunctionArgIndex>& actual) {
ASSERT_EQ(expected.size(), actual.size());
for (int i = 0; i < expected.size(); ++i) {
ASSERT_EQ(expected[i].index, actual[i].index) << " at index " << i;
ASSERT_EQ(expected[i].sub_index, actual[i].sub_index) << " at index " << i;
}
}
void CheckAlloc(const std::vector<bool>& expected,
const std::vector<AllocatorAttributes>& actual) {
ASSERT_EQ(expected.size(), actual.size());
for (int i = 0; i < expected.size(); ++i) {
ASSERT_EQ(expected[i], actual[i].on_host()) << " at index " << i;
}
}
void CheckIndex(const Node& node, int expected_index) {
const AttrValue* attr_value;
TF_ASSERT_OK(node.attrs().Find("index", &attr_value));
int index = static_cast<int>(attr_value->i());
ASSERT_EQ(expected_index, index);
}
TEST_F(PartitioningUtilsTest, UpdateArgsAndRets) {
auto graph = std::make_unique<Graph>(OpRegistry::Global());
SubGraph(graph.get(), DT_FLOAT, {3}, {5});
std::vector<FunctionArgIndex> arg_indices;
std::vector<int> ret_indices;
std::vector<AllocatorAttributes> arg_alloc_attrs;
std::vector<AllocatorAttributes> ret_alloc_attrs;
Status status = UpdateArgAndRetvalMetadata(
graph.get(), &arg_indices, &ret_indices, &arg_alloc_attrs,
&ret_alloc_attrs, false);
ASSERT_TRUE(status.ok()) << status.ToString();
CheckArgIndices({{3, -1}}, arg_indices);
CheckRetIndices({5}, ret_indices);
CheckAlloc({false}, arg_alloc_attrs);
CheckAlloc({false}, ret_alloc_attrs);
std::unordered_map<string, Node*> nodes = graph->BuildNodeNameIndex();
ASSERT_EQ(1, nodes.count("x"));
CheckIndex(*nodes["x"], 0);
ASSERT_EQ(1, nodes.count("retval1"));
CheckIndex(*nodes["retval1"], 0);
}
TEST_F(PartitioningUtilsTest, UpdateArgsAndRetsIntsNotOnDevice) {
auto graph = std::make_unique<Graph>(OpRegistry::Global());
SubGraph(graph.get(), DT_INT32, {3}, {5});
std::vector<FunctionArgIndex> arg_indices;
std::vector<int> ret_indices;
std::vector<AllocatorAttributes> arg_alloc_attrs;
std::vector<AllocatorAttributes> ret_alloc_attrs;
Int32FulltypePass int32_fulltype;
TF_ASSERT_OK(
int32_fulltype.ProcessGraph(graph.get(), false));
Status status = UpdateArgAndRetvalMetadata(
graph.get(), &arg_indices, &ret_indices, &arg_alloc_attrs,
&ret_alloc_attrs, false);
ASSERT_TRUE(status.ok()) << status.ToString();
CheckAlloc({true}, arg_alloc_attrs);
CheckAlloc({true}, ret_alloc_attrs);
}
TEST_F(PartitioningUtilsTest, UpdateArgsAndRetsIntsOnDevice) {
auto graph = std::make_unique<Graph>(OpRegistry::Global());
SubGraph(graph.get(), DT_INT32, {3}, {5});
std::vector<FunctionArgIndex> arg_indices;
std::vector<int> ret_indices;
std::vector<AllocatorAttributes> arg_alloc_attrs;
std::vector<AllocatorAttributes> ret_alloc_attrs;
Status status = UpdateArgAndRetvalMetadata(
graph.get(), &arg_indices, &ret_indices, &arg_alloc_attrs,
&ret_alloc_attrs, true);
ASSERT_TRUE(status.ok()) << status.ToString();
CheckAlloc({false}, arg_alloc_attrs);
CheckAlloc({false}, ret_alloc_attrs);
}
TEST_F(PartitioningUtilsTest, UpdateArgsAndRets_Order) {
auto graph = std::make_unique<Graph>(OpRegistry::Global());
SubGraph(graph.get(), DT_FLOAT, {9, 7, 5, 3, 1}, {2, 4, 6, 8, 10});
const std::map<int, int> sub_indices = {
{7, 2}, {3, 1}, {1, 0}, {5, 2}, {9, 0}};
const AttrValue* attr_value;
for (Node* n : graph->op_nodes()) {
if (n->IsArg()) {
TF_ASSERT_OK(n->attrs().Find("index", &attr_value));
n->AddAttr("sub_index",
sub_indices.at(static_cast<int>(attr_value->i())));
}
}
std::vector<FunctionArgIndex> arg_indices;
std::vector<int> ret_indices;
std::vector<AllocatorAttributes> arg_alloc_attrs;
std::vector<AllocatorAttributes> ret_alloc_attrs;
Status status = UpdateArgAndRetvalMetadata(
graph.get(), &arg_indices, &ret_indices, &arg_alloc_attrs,
&ret_alloc_attrs, false);
ASSERT_TRUE(status.ok()) << status.ToString();
CheckArgIndices({{1, 0}, {3, 1}, {5, 2}, {7, 2}, {9, 0}}, arg_indices);
CheckRetIndices({2, 4, 6, 8, 10}, ret_indices);
CheckAlloc({false, false, false, false, false}, arg_alloc_attrs);
CheckAlloc({false, false, false, false, false}, ret_alloc_attrs);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/partitioning_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/partitioning_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
908bf121-508e-4c4a-9d0d-8904854bb57f | cpp | tensorflow/tensorflow | ifrt_ops_kernel | tensorflow/core/tfrt/mlrt/kernel/ifrt_ops_kernel.cc | tensorflow/core/tfrt/mlrt/kernel/ifrt_ops_kernel_test.cc | #include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/python/ifrt/future.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/resource_handle.h"
#include "tensorflow/core/framework/resource_var.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/tfrt/ifrt/checkpoint_loader.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_config.pb.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_loaded_variable_utils.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_model_context.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_model_restore_context.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_restore_tensor_registry.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/bytecode.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/context.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/future.h"
#include "tensorflow/core/tfrt/mlrt/kernel/context.h"
#include "tensorflow/core/tfrt/mlrt/kernel/kernel.h"
#include "tensorflow/core/tfrt/utils/fallback_tensor.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/tstring.h"
using tensorflow::ifrt_serving::IfrtModelContext;
namespace tensorflow {
namespace tf_mlrt {
namespace {
struct MlrtIfrtRestoreVariableKernel : mlrt::KernelFrame {
using KernelFrame::KernelFrame;
static constexpr char kName[] = "tf_mlrt.ifrt_restore_variable";
tensorflow::tfrt_stub::FallbackTensor prefix() const {
DCHECK_GT(arguments().size(), 3);
return arguments()[0].Get<tensorflow::tfrt_stub::FallbackTensor>();
}
tensorflow::tfrt_stub::FallbackTensor tensor_names() const {
DCHECK_GT(arguments().size(), 3);
return arguments()[1].Get<tensorflow::tfrt_stub::FallbackTensor>();
}
tensorflow::tfrt_stub::FallbackTensor shape_and_slices() const {
DCHECK_GT(arguments().size(), 3);
return arguments()[2].Get<tensorflow::tfrt_stub::FallbackTensor>();
}
mlrt::bc::Vector<tensorflow::DataType> restored_dtypes() const {
return attributes().GetAs<mlrt::bc::Vector<tensorflow::DataType>>(0);
}
mlrt::bc::Vector<bool> truncate_in_cast() const {
return attributes().GetAs<mlrt::bc::Vector<bool>>(1);
}
std::vector<tensorflow::tfrt_stub::FallbackTensor> var_handles() const {
DCHECK_GT(arguments().size(), 3);
std::vector<tensorflow::tfrt_stub::FallbackTensor> result;
result.reserve(arguments().size() - 3);
for (int i = 3; i < arguments().size(); ++i) {
result.push_back(
arguments()[i].Get<tensorflow::tfrt_stub::FallbackTensor>());
}
return result;
}
Context& context() { return execution_context().GetUserContext<Context>(); }
void Invoke();
private:
static constexpr int kNumRestoreClusters = 4;
absl::Status InvokeHelper();
absl::Status ValidateInput();
};
void MlrtIfrtRestoreVariableKernel::Invoke() {
absl::Status status = InvokeHelper();
if (!status.ok()) {
execution_context().Fail(std::move(status));
return;
}
}
absl::Status MlrtIfrtRestoreVariableKernel::ValidateInput() {
if (prefix().tensor().NumElements() != 1) {
return absl::InvalidArgumentError(
"The prefix tensor must be a scalar tensor.");
}
if (!TensorShapeUtils::IsVector(tensor_names().tensor().shape()) ||
!TensorShapeUtils::IsVector(shape_and_slices().tensor().shape())) {
return absl::InvalidArgumentError(
absl::StrCat("Input tensor_names and shape_and_slices "
"should be an 1-D tensors, got ",
tensor_names().tensor().shape().DebugString(), " and ",
shape_and_slices().tensor().shape().DebugString()));
}
if (tensor_names().tensor().NumElements() !=
shape_and_slices().tensor().NumElements()) {
return absl::InvalidArgumentError(
"The tensor_names and shape_and_slices tensors must have the same "
"number of elements.");
}
if (tensor_names().tensor().NumElements() != var_handles().size()) {
return absl::InvalidArgumentError(
"The tensor_names and var_handles must have the same number of "
"elements.");
}
if (tensor_names().tensor().NumElements() != restored_dtypes().size()) {
return absl::InvalidArgumentError(
"The tensor_names and restored_dtypes must have the same number of "
"elements.");
}
if (tensor_names().tensor().NumElements() != truncate_in_cast().size()) {
return absl::InvalidArgumentError(
"The tensor_names and truncate_in_cast must have the same number of "
"elements.");
}
return absl::OkStatus();
}
absl::Status MlrtIfrtRestoreVariableKernel::InvokeHelper() {
std::optional<ifrt_serving::IfrtModelRestoreContext*> model_restore_context =
context()
.resource_context()
.GetResource<ifrt_serving::IfrtModelRestoreContext>(
ifrt_serving::kIfrtModelRestoreContextName);
if (!model_restore_context.has_value()) {
return absl::InternalError(
"Did not find IfrtModelRestoreContext resource.");
}
if (*model_restore_context == nullptr) {
return absl::InternalError("IfrtModelRestoreContext must not be null.");
}
ifrt_serving::CheckpointLoader* checkpoint_loader =
(*model_restore_context)->checkpoint_loader();
if (!checkpoint_loader) {
return absl::InternalError("CheckpointLoader must not be null.");
}
TF_RETURN_IF_ERROR(ValidateInput());
std::vector<tensorflow::DataType> restored_dtypes_vec(
restored_dtypes().begin(), restored_dtypes().end());
std::vector<bool> truncate_in_cast_vec(truncate_in_cast().begin(),
truncate_in_cast().end());
return checkpoint_loader->Load(prefix(), var_handles(), tensor_names(),
shape_and_slices(), restored_dtypes_vec,
truncate_in_cast_vec, context());
}
class MlrtIfrtLoadVariableKernel : public mlrt::KernelFrame {
public:
using KernelFrame::KernelFrame;
static constexpr char kName[] = "tf_mlrt.ifrt_load_variable";
const tensorflow::Tensor& variable_handler_tensor() const {
DCHECK_GE(arguments().size(), 1);
const tensorflow::Tensor& ret =
arguments()[0].Get<tensorflow::tfrt_stub::FallbackTensor>().tensor();
DCHECK_EQ(ret.NumElements(), 1);
return ret;
}
bool used_by_host() const {
DCHECK_EQ(attributes().size(), 1);
return attributes().GetAs<bool>(0);
}
Context& context() { return execution_context().GetUserContext<Context>(); }
void Invoke();
private:
absl::Status InvokeHelper();
};
void MlrtIfrtLoadVariableKernel::Invoke() {
absl::Status status = InvokeHelper();
if (!status.ok()) {
execution_context().Fail(std::move(status));
return;
}
}
absl::Status MlrtIfrtLoadVariableKernel::InvokeHelper() {
DCHECK_EQ(2, results().size());
std::optional<IfrtModelContext*> ifrt_model_context =
context().resource_context().GetResource<IfrtModelContext>(
"IfrtModelContext");
if (!ifrt_model_context.has_value()) {
return absl::FailedPreconditionError(
"LoadVariableOp: failed to fetch IfrtModelContext: ");
}
auto tensor_promise =
mlrt::Promise::Allocate<tensorflow::tfrt_stub::FallbackTensor>();
auto tensor_future = tensor_promise.GetFuture();
ifrt_serving::IfrtRestoreTensorRegistry& ifrt_restore_tensor_registry =
(*ifrt_model_context)->GetRestoreTensorRegistry();
auto& resource_handle = variable_handler_tensor().scalar<ResourceHandle>()();
std::string runtime_name =
ifrt_serving::GetRuntimeNameFromVarHandle(resource_handle);
if (used_by_host()) {
if (ifrt_restore_tensor_registry.SetUsedByHost(runtime_name).ok()) {
xla::ifrt::Future<tensorflow::Tensor> restored_tensor_future =
ifrt_restore_tensor_registry.GetRestoredTensor(runtime_name);
restored_tensor_future.OnReady(
[tensor_promise = std::move(tensor_promise)](
absl::StatusOr<tensorflow::Tensor> restored_tensor) mutable {
if (!restored_tensor.ok()) {
std::move(tensor_promise).SetError(restored_tensor.status());
return;
}
std::move(tensor_promise)
.Set<tensorflow::tfrt_stub::FallbackTensor>(
tensorflow::tfrt_stub::FallbackTensor(*restored_tensor));
});
} else {
auto resource_manager = context()
.fallback_request_state()
.device_manager()
.HostCPU()
->resource_manager();
DCHECK(resource_manager);
Var* variable;
TF_RETURN_IF_ERROR(resource_manager->Lookup(
resource_handle.container(), resource_handle.name(), &variable));
if (tensorflow::Tensor* t = variable->tensor(); t != nullptr) {
std::move(tensor_promise)
.Set<tensorflow::tfrt_stub::FallbackTensor>(
tensorflow::tfrt_stub::FallbackTensor(*t));
} else {
std::move(tensor_promise)
.SetError(absl::InternalError(
absl::StrCat("Variable ", resource_handle.name(),
" is not found in either "
"IfrtRestoreTensorRegistry or ResourceManager")));
}
}
} else {
std::move(tensor_promise)
.Set<tensorflow::tfrt_stub::FallbackTensor>(
tensorflow::tfrt_stub::FallbackTensor());
}
tensorflow::Tensor key_tensor(tensorflow::DT_STRING, {});
key_tensor.scalar<tsl::tstring>()() = runtime_name;
results()[0].Set(tensorflow::tfrt_stub::FallbackTensor(key_tensor));
results()[1].Set(std::move(tensor_future));
return absl::OkStatus();
}
void RegisterTfMlrtIfrtKernels(mlrt::KernelRegistry& registry) {
registry.Register<MlrtIfrtLoadVariableKernel>();
registry.Register<MlrtIfrtRestoreVariableKernel>();
}
}
const bool kUnused = [] {
RegisterTfMlrtIfrtKernels(GetTfMlrtOptionalKernelRegistry());
return true;
}();
}
} | #include <cstdint>
#include <functional>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/strings/substitute.h"
#include "absl/synchronization/notification.h"
#include "absl/types/span.h"
#include "xla/python/ifrt/client.h"
#include "xla/python/ifrt/future.h"
#include "xla/python/ifrt/test_util.h"
#include "xla/tsl/framework/test_util/mock_serving_device_selector.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/resource_var.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_matcher.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/runtime_fallback/kernel/kernel_fallback_compat_request_state.h"
#include "tensorflow/core/tfrt/fallback/fallback_state.h"
#include "tensorflow/core/tfrt/fallback/op_kernel_runner.h"
#include "tensorflow/core/tfrt/ifrt/checkpoint_loader.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_config.pb.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_model_context.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_model_restore_context.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_restore_tensor_registry.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_serving_core_selector.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/bytecode.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/executable.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/builtin_kernels.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/context.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/execute.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/interpreter_testutil.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/value.h"
#include "tensorflow/core/tfrt/mlrt/kernel/context.h"
#include "tensorflow/core/tfrt/mlrt/kernel/kernel.h"
#include "tensorflow/core/tfrt/utils/fallback_tensor.h"
#include "tsl/platform/env.h"
#include "tsl/platform/refcount.h"
#include "tsl/platform/status.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/threadpool.h"
#include "tsl/platform/tstring.h"
#include "tfrt/host_context/concurrent_work_queue.h"
#include "tfrt/host_context/resource_context.h"
namespace tensorflow {
namespace tf_mlrt {
namespace {
using tensorflow::test::AsScalar;
using tensorflow::test::AsTensor;
using tensorflow::test::ExpectEqual;
using tensorflow::test::TensorEq;
constexpr absl::string_view kContainer = "test";
constexpr absl::string_view kSharedName = "y";
constexpr absl::string_view kVariableRuntimeName = "test__y";
tsl::thread::ThreadPool& GetThreadPool() {
constexpr int kMaxParallelism = 16;
static tsl::thread::ThreadPool* thread_pool =
new tsl::thread::ThreadPool(tsl::Env::Default(), tsl::ThreadOptions(),
"IfrtSharding", kMaxParallelism);
return *thread_pool;
}
std::string EncodeRestoreDtypesInt32(int num_outputs) {
mlrt::bc::Buffer buffer;
mlrt::bc::Allocator allocator(&buffer);
auto ctor = mlrt::bc::New<mlrt::bc::Vector<tensorflow::DataType>>(
&allocator, num_outputs);
for (int i = 0; i < num_outputs; ++i) {
ctor.ConstructAt(i, tensorflow::DT_INT32);
}
return std::string(buffer.data(), buffer.size());
}
std::string EncodeTruncateInCast(int num_outputs) {
mlrt::bc::Buffer buffer;
mlrt::bc::Allocator allocator(&buffer);
auto ctor = mlrt::bc::New<mlrt::bc::Vector<bool>>(&allocator, num_outputs);
for (int i = 0; i < num_outputs; ++i) {
ctor.ConstructAt(i, false);
}
return std::string(buffer.data(), buffer.size());
}
mlrt::bc::Buffer CreateExecutableForIfrtRestoreVariableOp(
int num_variables = 1) {
mlrt::bc::Buffer buffer;
mlrt::bc::Allocator allocator(&buffer);
auto executable_ctor = mlrt::bc::New<mlrt::bc::Executable>(&allocator);
mlrt::testing::SymbolTable kernels;
std::vector<std::string> kernel_names = {
"tf_mlrt.createop", "tf_mlrt.executeop", "tf_mlrt.ifrt_restore_variable",
"return"};
executable_ctor.construct_kernel_names(kernel_names.size())
.Assign(kernel_names);
kernels.Def(kernel_names);
static constexpr int kNumAttributes =
5;
mlrt::testing::AttributeTable attributes(executable_ctor.construct_attributes(
kNumAttributes + 2 * (num_variables - 1)));
std::string restore_dtypes = EncodeRestoreDtypesInt32(num_variables);
attributes.Add("restore_dtypes", restore_dtypes);
std::vector<bool> truncate_in_cast(num_variables, false);
attributes.Add("truncate_in_cast", EncodeTruncateInCast(num_variables));
for (int i = 0; i < num_variables; ++i) {
attributes.Add(
absl::StrCat("var_handle_op_node_def", i),
absl::Substitute(
R"pb(name: "$0"
op: "VarHandleOp"
device: "/job:localhost/replica:0/task:0/device:CPU:0"
attr {
key: "container"
value { s: "$1" }
}
attr {
key: "shared_name"
value { s: "$2" }
}
attr {
key: "dtype"
value { type: DT_INT16 }
}
attr {
key: "shape"
value { shape { dim { size: 3 } } }
}
)pb",
absl::StrCat("VarHandleOp", i), kContainer,
absl::StrCat(kSharedName, i)));
attributes.Add(absl::StrCat("var_handle_op_key", i), i);
}
auto functions_ctor = executable_ctor.construct_functions(1);
{
auto function_ctor = functions_ctor.ConstructAt(0);
function_ctor.construct_name("main");
mlrt::testing::SymbolTable regs;
function_ctor.construct_input_regs(3).Assign(
regs.Def({"prefix_tensor", "name_tensor", "slice_tensor"}));
const int kNumKernels = 4;
auto kernels_ctor =
function_ctor.construct_kernels(kNumKernels + 2 * (num_variables - 1));
int kernel_index = 0;
std::vector<std::string> variable_handle_names;
variable_handle_names.reserve(num_variables);
for (int i = 0; i < num_variables; ++i) {
variable_handle_names.push_back(absl::StrCat("variable_handle", i));
std::string variable_handle_op_node_def =
absl::StrCat("var_handle_op_node_def", i);
std::string variable_handle_op_key = absl::StrCat("var_handle_op_key", i);
{
auto createop_ctor = kernels_ctor.ConstructAt(kernel_index);
createop_ctor.set_code(kernels.Use("tf_mlrt.createop"));
createop_ctor.construct_arguments(0);
createop_ctor.construct_results(0);
createop_ctor.construct_attributes(2).Assign(
{attributes.GetHandle(variable_handle_op_node_def),
attributes.GetHandle(variable_handle_op_key)});
kernel_index++;
}
{
auto executeop_ctor = kernels_ctor.ConstructAt(kernel_index);
executeop_ctor.set_code(kernels.Use("tf_mlrt.executeop"));
executeop_ctor.construct_arguments(0);
executeop_ctor.construct_results(1).Assign(
{regs.Def(variable_handle_names.back())});
executeop_ctor.construct_attributes(2).Assign(
{attributes.GetHandle(variable_handle_op_node_def),
attributes.GetHandle(variable_handle_op_key)});
executeop_ctor.construct_last_uses(1).Assign({0});
kernel_index++;
}
}
{
std::vector<std::string> args;
args.reserve(3 + num_variables);
args.push_back("prefix_tensor");
args.push_back("name_tensor");
args.push_back("slice_tensor");
for (int i = 0; i < num_variables; ++i) {
args.push_back(variable_handle_names[i]);
}
auto restore_ctor = kernels_ctor.ConstructAt(kernel_index);
restore_ctor.set_code(kernels.Use("tf_mlrt.ifrt_restore_variable"));
restore_ctor.construct_arguments(args.size()).Assign(regs.Use(args));
restore_ctor.construct_results(0);
restore_ctor.construct_attributes(2).Assign(
{attributes.GetHandle("restore_dtypes"),
attributes.GetHandle("truncate_in_cast")});
kernel_index++;
}
{
auto return_ctor = kernels_ctor.ConstructAt(kernel_index);
return_ctor.set_code(kernels.Use("return"));
return_ctor.construct_arguments(0);
kernel_index++;
}
function_ctor.set_num_regs(regs.size());
}
return buffer;
}
mlrt::bc::Buffer CreateExecutableForIfrtLoadVariableOp(
bool redundant_ifrt_load_variable_op = false, bool used_by_host = false) {
mlrt::bc::Buffer buffer;
mlrt::bc::Allocator allocator(&buffer);
auto executable_ctor = mlrt::bc::New<mlrt::bc::Executable>(&allocator);
mlrt::testing::SymbolTable kernels;
std::vector<std::string> kernel_names = {
"tf_mlrt.createop", "tf_mlrt.executeop", "tf_mlrt.ifrt_load_variable",
"return"};
executable_ctor.construct_kernel_names(kernel_names.size())
.Assign(kernel_names);
kernels.Def(kernel_names);
mlrt::testing::AttributeTable attributes(
executable_ctor.construct_attributes(3));
attributes.Add("var_handle_op_node_def",
absl::Substitute(
R"pb(name: "VarHandleOp"
op: "VarHandleOp"
device: "/job:localhost/replica:0/task:0/device:CPU:0"
attr {
key: "container"
value { s: "$0" }
}
attr {
key: "shared_name"
value { s: "$1" }
}
attr {
key: "dtype"
value { type: DT_INT32 }
}
attr {
key: "shape"
value { shape { dim { size: 1 } } }
}
)pb",
kContainer, kSharedName));
attributes.Add("var_handle_op_key", 0);
attributes.Add("used_by_host", used_by_host);
auto functions_ctor = executable_ctor.construct_functions(1);
{
auto function_ctor = functions_ctor.ConstructAt(0);
function_ctor.construct_name("main");
mlrt::testing::SymbolTable regs;
function_ctor.construct_output_regs(2).Assign(
{regs.Def("output_tensor"), regs.Def("output_future")});
const int kNumKernels = 4 + (redundant_ifrt_load_variable_op ? 1 : 0);
auto kernels_ctor = function_ctor.construct_kernels(kNumKernels);
int kernel_index = 0;
{
auto createop_ctor = kernels_ctor.ConstructAt(kernel_index);
createop_ctor.set_code(kernels.Use("tf_mlrt.createop"));
createop_ctor.construct_arguments(0);
createop_ctor.construct_results(0);
createop_ctor.construct_attributes(2).Assign(
{attributes.GetHandle("var_handle_op_node_def"),
attributes.GetHandle("var_handle_op_key")});
kernel_index++;
}
{
auto executeop_ctor = kernels_ctor.ConstructAt(kernel_index);
executeop_ctor.set_code(kernels.Use("tf_mlrt.executeop"));
executeop_ctor.construct_arguments(0);
executeop_ctor.construct_results(1).Assign({regs.Def("variable_handle")});
executeop_ctor.construct_attributes(2).Assign(
{attributes.GetHandle("var_handle_op_node_def"),
attributes.GetHandle("var_handle_op_key")});
kernel_index++;
}
{
auto kernel_ctor = kernels_ctor.ConstructAt(kernel_index);
kernel_ctor.set_code(kernels.Use("tf_mlrt.ifrt_load_variable"));
kernel_ctor.construct_results(2).Assign(
{regs.Use("output_tensor"), regs.Use("output_future")});
kernel_ctor.construct_arguments(1).Assign({regs.Use("variable_handle")});
kernel_ctor.construct_attributes(1).Assign(
{attributes.GetHandle("used_by_host")});
kernel_ctor.construct_last_uses(1).Assign(
{redundant_ifrt_load_variable_op ? 0 : 1});
kernel_index++;
}
if (redundant_ifrt_load_variable_op) {
auto kernel_ctor = kernels_ctor.ConstructAt(kernel_index);
kernel_ctor.set_code(kernels.Use("tf_mlrt.ifrt_load_variable"));
kernel_ctor.construct_results(2).Assign(
{regs.Def("dummy"), regs.Def("dummy_future2")});
kernel_ctor.construct_attributes(1).Assign(
{attributes.GetHandle("used_by_host")});
kernel_ctor.construct_arguments(1).Assign({regs.Use("variable_handle")});
kernel_ctor.construct_last_uses(1).Assign({1});
kernel_index++;
}
{
auto kernel_ctor = kernels_ctor.ConstructAt(kernel_index);
kernel_ctor.set_code(kernels.Use("return"));
kernel_ctor.construct_arguments(2).Assign(
{regs.Use("output_tensor"), regs.Use("output_future")});
kernel_index++;
}
DCHECK_EQ(kernel_index, kNumKernels);
function_ctor.set_num_regs(regs.size());
}
return buffer;
}
class KernelTest : public ::testing::Test {
protected:
void SetUp() override {
mlrt::RegisterBuiltinKernels(registry_);
RegisterTfMlrtKernels(registry_);
execution_work_queue_ = tfrt::CreateMultiThreadedWorkQueue(
4, 4);
restore_work_queue_ = tfrt::CreateMultiThreadedWorkQueue(
4, 4);
TF_ASSERT_OK_AND_ASSIGN(fallback_state_, tfrt_stub::FallbackState::Create(
session_options_, fdef_lib_));
runner_ = [](const std::function<void()>& f) { f(); };
fallback_request_state_ =
std::make_unique<tfd::KernelFallbackCompatRequestState>(
&runner_, &fallback_state_->device_manager(), 0,
&runner_table_, &resource_array_,
nullptr,
std::nullopt,
&fallback_state_->process_function_library_runtime());
TF_ASSERT_OK_AND_ASSIGN(client_, xla::ifrt::test_util::GetClient());
resource_context_
.CreateResource<tensorflow::ifrt_serving::IfrtModelContext>(
"IfrtModelContext", client_, ifrt_core_selector_.get(),
&GetThreadPool(), nullptr);
tf_context_ = std::make_unique<Context>(fallback_request_state_.get(),
&resource_context_);
ifrt_model_context_ =
resource_context_
.GetResource<tensorflow::ifrt_serving::IfrtModelContext>(
"IfrtModelContext")
.value();
ifrt_model_context_->set_checkpoint_loader_queue(restore_work_queue_.get());
resource_context_
.CreateResource<tensorflow::ifrt_serving::IfrtModelRestoreContext>(
ifrt_serving::kIfrtModelRestoreContextName,
std::make_unique<tensorflow::ifrt_serving::CheckpointLoader>(
&ifrt_model_context_->GetRestoreTensorRegistry(),
ifrt_model_context_->checkpoint_loader_queue()));
serving_device_selector_ =
std::make_unique<tsl::test_util::MockServingDeviceSelector>();
ifrt_core_selector_ =
std::make_unique<ifrt_serving::IfrtServingCoreSelector>(
serving_device_selector_.get(),
client_->addressable_device_count());
}
std::unique_ptr<tsl::test_util::MockServingDeviceSelector>
serving_device_selector_;
std::unique_ptr<ifrt_serving::IfrtServingCoreSelector> ifrt_core_selector_;
mlrt::KernelRegistry registry_;
std::unique_ptr<tfrt::ConcurrentWorkQueue> execution_work_queue_;
std::unique_ptr<tfrt::ConcurrentWorkQueue> restore_work_queue_;
tensorflow::SessionOptions session_options_;
tensorflow::FunctionDefLibrary fdef_lib_;
std::function<void(std::function<void()>)> runner_;
tfrt_stub::OpKernelRunnerTable runner_table_;
tfd::FallbackResourceArray resource_array_;
std::unique_ptr<tfrt_stub::FallbackState> fallback_state_;
tfrt::ResourceContext resource_context_;
std::shared_ptr<xla::ifrt::Client> client_;
std::unique_ptr<tfd::KernelFallbackCompatRequestState>
fallback_request_state_;
std::unique_ptr<Context> tf_context_;
tensorflow::ifrt_serving::IfrtModelContext* ifrt_model_context_;
};
TEST_F(KernelTest, IfrtLoadVariableOpCanGetTensorFromResourceManager) {
auto buffer = CreateExecutableForIfrtLoadVariableOp(
false, true);
mlrt::bc::Executable executable(buffer.data());
mlrt::LoadedExecutable loaded_executable(executable, registry_);
mlrt::ExecutionContext execution_context(&loaded_executable);
execution_context.set_work_queue(execution_work_queue_.get());
execution_context.AddUserContext(std::move(tf_context_));
tensorflow::Tensor input_tensor;
TF_CHECK_OK(tensorflow::Tensor::BuildTensor(DT_INT32, {}, &input_tensor));
input_tensor.scalar<int32_t>()() = 1234;
tsl::core::RefCountPtr<Var> variable(new Var(DT_INT32));
*variable->tensor() = input_tensor;
variable->is_initialized = true;
ASSERT_OK(
fallback_state_->device_manager().HostCPU()->resource_manager()->Create(
std::string(kContainer), std::string(kSharedName), &(*variable)));
std::vector<mlrt::Value> args;
std::vector<uint8_t> last_uses;
std::vector<mlrt::Value> results;
results.resize(2);
absl::Notification notification;
execution_context.set_exit_handler(
[¬ification]() { notification.Notify(); });
execution_context.Call(executable.functions()[0], last_uses,
absl::MakeSpan(args), absl::MakeSpan(results));
mlrt::Execute(execution_context);
notification.WaitForNotification();
TF_ASSERT_OK(execution_context.status());
ExpectEqual(results[0].Get<tfrt_stub::FallbackTensor>().tensor(),
AsScalar(tsl::tstring(kVariableRuntimeName)));
auto returned_future = results[1].Get<mlrt::Future>();
ASSERT_TRUE(returned_future.IsReady());
EXPECT_THAT(returned_future.Get<tfrt_stub::FallbackTensor>().tensor(),
TensorEq(input_tensor));
}
TEST_F(KernelTest, IfrtLoadVariableOp) {
auto buffer = CreateExecutableForIfrtLoadVariableOp();
mlrt::bc::Executable executable(buffer.data());
mlrt::LoadedExecutable loaded_executable(executable, registry_);
mlrt::ExecutionContext execution_context(&loaded_executable);
execution_context.set_work_queue(execution_work_queue_.get());
execution_context.AddUserContext(std::move(tf_context_));
tensorflow::Tensor input_tensor;
TF_CHECK_OK(tensorflow::Tensor::BuildTensor(DT_INT32, {}, &input_tensor));
input_tensor.scalar<int32_t>()() = 1234;
auto input_tensor_promise =
xla::ifrt::Future<tensorflow::Tensor>::CreatePromise();
auto input_tensor_future =
xla::ifrt::Future<tensorflow::Tensor>(input_tensor_promise);
ifrt_serving::IfrtRestoreTensorRegistry::RestoredTensorInfo
restore_tensor_info{.dtype_and_shape = {.dtype = input_tensor.dtype(),
.shape = input_tensor.shape()},
.tensor_future = input_tensor_future};
input_tensor_promise.Set(input_tensor);
TF_ASSERT_OK(ifrt_model_context_->GetRestoreTensorRegistry().TryRegister(
kVariableRuntimeName, restore_tensor_info));
std::vector<mlrt::Value> args;
std::vector<uint8_t> last_uses;
std::vector<mlrt::Value> results;
results.resize(2);
absl::Notification notification;
execution_context.set_exit_handler(
[¬ification]() { notification.Notify(); });
execution_context.Call(executable.functions()[0], last_uses,
absl::MakeSpan(args), absl::MakeSpan(results));
mlrt::Execute(execution_context);
notification.WaitForNotification();
TF_ASSERT_OK(execution_context.status());
ExpectEqual(results[0].Get<tfrt_stub::FallbackTensor>().tensor(),
AsScalar(tsl::tstring(kVariableRuntimeName)));
auto returned_future = results[1].Get<mlrt::Future>();
ASSERT_TRUE(returned_future.IsReady());
EXPECT_THAT(returned_future.Get<tfrt_stub::FallbackTensor>().tensor(),
TensorEq(tensorflow::Tensor()));
}
TEST_F(KernelTest, DuplicateIfrtLoadVariableOpShallSucceed) {
auto buffer = CreateExecutableForIfrtLoadVariableOp(
true);
mlrt::bc::Executable executable(buffer.data());
mlrt::LoadedExecutable loaded_executable(executable, registry_);
mlrt::ExecutionContext execution_context(&loaded_executable);
execution_context.set_work_queue(execution_work_queue_.get());
execution_context.AddUserContext(std::move(tf_context_));
tensorflow::Tensor input_tensor;
TF_CHECK_OK(tensorflow::Tensor::BuildTensor(DT_INT32, {}, &input_tensor));
input_tensor.scalar<int32_t>()() = 1234;
auto input_tensor_promise =
xla::ifrt::Future<tensorflow::Tensor>::CreatePromise();
auto input_tensor_future =
xla::ifrt::Future<tensorflow::Tensor>(input_tensor_promise);
ifrt_serving::IfrtRestoreTensorRegistry::RestoredTensorInfo
restore_tensor_info{.dtype_and_shape = {.dtype = input_tensor.dtype(),
.shape = input_tensor.shape()},
.tensor_future = input_tensor_future};
input_tensor_promise.Set(input_tensor);
TF_ASSERT_OK(ifrt_model_context_->GetRestoreTensorRegistry().TryRegister(
kVariableRuntimeName, restore_tensor_info));
std::vector<mlrt::Value> args;
std::vector<uint8_t> last_uses;
std::vector<mlrt::Value> results;
results.resize(2);
absl::Notification notification;
execution_context.set_exit_handler(
[¬ification]() { notification.Notify(); });
execution_context.Call(executable.functions()[0], last_uses,
absl::MakeSpan(args), absl::MakeSpan(results));
mlrt::Execute(execution_context);
notification.WaitForNotification();
TF_ASSERT_OK(execution_context.status());
ExpectEqual(results[0].Get<tfrt_stub::FallbackTensor>().tensor(),
AsScalar(tsl::tstring(kVariableRuntimeName)));
auto returned_future = results[1].Get<mlrt::Future>();
ASSERT_TRUE(returned_future.IsReady());
EXPECT_THAT(returned_future.Get<tfrt_stub::FallbackTensor>().tensor(),
TensorEq(tensorflow::Tensor()));
}
TEST_F(KernelTest, IfrtRestoreVariableOp) {
std::string checkpoint_prefix =
tensorflow::GetDataDependencyFilepath(
"tensorflow/core/tfrt/mlrt/kernel/testdata/"
"gen_checkpoint_data/variables") +
"/variables";
auto buffer = CreateExecutableForIfrtRestoreVariableOp();
mlrt::bc::Executable executable(buffer.data());
mlrt::LoadedExecutable loaded_executable(executable, registry_);
mlrt::ExecutionContext execution_context(&loaded_executable);
execution_context.set_work_queue(execution_work_queue_.get());
execution_context.AddUserContext(std::move(tf_context_));
xla::ifrt::Future<tensorflow::Tensor> uninitialized_entry =
ifrt_model_context_->GetRestoreTensorRegistry().GetRestoredTensor(
kVariableRuntimeName);
ASSERT_TRUE(uninitialized_entry.IsReady());
EXPECT_THAT(uninitialized_entry.Await().status(),
::tsl::testing::StatusIs(absl::StatusCode::kNotFound));
std::vector<mlrt::Value> args;
args.resize(3);
tensorflow::Tensor prefix_tensor =
AsTensor<tsl::tstring>({tsl::tstring(checkpoint_prefix)});
args.at(0).Set(tfrt_stub::FallbackTensor(std::move(prefix_tensor)));
tensorflow::Tensor name_tensor =
AsTensor<tsl::tstring>({tsl::tstring("w/.ATTRIBUTES/VARIABLE_VALUE")});
args.at(1).Set(tfrt_stub::FallbackTensor(std::move(name_tensor)));
tensorflow::Tensor slice_tensor = AsTensor<tsl::tstring>({tsl::tstring("")});
args.at(2).Set(tfrt_stub::FallbackTensor(std::move(slice_tensor)));
std::vector<uint8_t> last_uses = {true, true, true};
std::vector<mlrt::Value> results;
absl::Notification notification;
execution_context.set_exit_handler(
[¬ification]() { notification.Notify(); });
execution_context.Call(executable.functions()[0], last_uses,
absl::MakeSpan(args), absl::MakeSpan(results));
mlrt::Execute(execution_context);
notification.WaitForNotification();
TF_ASSERT_OK(execution_context.status());
xla::ifrt::Future<tensorflow::Tensor> restored_future =
ifrt_model_context_->GetRestoreTensorRegistry().GetRestoredTensor(
absl::StrCat(kVariableRuntimeName, 0));
absl::StatusOr<tensorflow::Tensor> restored_tensor = restored_future.Await();
TF_ASSERT_OK(restored_tensor.status());
EXPECT_THAT(*restored_tensor, TensorEq(AsTensor<int16_t>({1, 2, 3}, {3})));
}
TEST_F(KernelTest, IfrtRestoreVariableOp4Variables) {
std::string checkpoint_prefix =
tensorflow::GetDataDependencyFilepath(
"tensorflow/core/tfrt/mlrt/kernel/testdata/"
"gen_checkpoint_data/variables") +
"/variables";
static constexpr int kNumVariables = 4;
auto buffer = CreateExecutableForIfrtRestoreVariableOp(kNumVariables);
mlrt::bc::Executable executable(buffer.data());
mlrt::LoadedExecutable loaded_executable(executable, registry_);
mlrt::ExecutionContext execution_context(&loaded_executable);
execution_context.set_work_queue(execution_work_queue_.get());
execution_context.AddUserContext(std::move(tf_context_));
xla::ifrt::Future<tensorflow::Tensor> uninitialized_entry =
ifrt_model_context_->GetRestoreTensorRegistry().GetRestoredTensor(
kVariableRuntimeName);
ASSERT_TRUE(uninitialized_entry.IsReady());
EXPECT_THAT(uninitialized_entry.Await().status(),
::tsl::testing::StatusIs(absl::StatusCode::kNotFound));
std::vector<mlrt::Value> args;
args.resize(3);
tensorflow::Tensor prefix_tensor =
AsTensor<tsl::tstring>({tsl::tstring(checkpoint_prefix)});
args.at(0).Set(tfrt_stub::FallbackTensor(std::move(prefix_tensor)));
tensorflow::Tensor name_tensor =
AsTensor<tsl::tstring>({tsl::tstring("w/.ATTRIBUTES/VARIABLE_VALUE"),
tsl::tstring("w1/.ATTRIBUTES/VARIABLE_VALUE"),
tsl::tstring("w2/.ATTRIBUTES/VARIABLE_VALUE"),
tsl::tstring("w3/.ATTRIBUTES/VARIABLE_VALUE")});
args.at(1).Set(tfrt_stub::FallbackTensor(std::move(name_tensor)));
tensorflow::Tensor slice_tensor = AsTensor<tsl::tstring>(
{tsl::tstring(""), tsl::tstring(""), tsl::tstring(""), tsl::tstring("")});
args.at(2).Set(tfrt_stub::FallbackTensor(std::move(slice_tensor)));
std::vector<uint8_t> last_uses = {true, true, true};
std::vector<mlrt::Value> results;
absl::Notification notification;
execution_context.set_exit_handler(
[¬ification]() { notification.Notify(); });
execution_context.Call(executable.functions()[0], last_uses,
absl::MakeSpan(args), absl::MakeSpan(results));
mlrt::Execute(execution_context);
notification.WaitForNotification();
TF_ASSERT_OK(execution_context.status());
xla::ifrt::Future<tensorflow::Tensor> restored_future =
ifrt_model_context_->GetRestoreTensorRegistry().GetRestoredTensor(
absl::StrCat(kVariableRuntimeName, 0));
absl::StatusOr<tensorflow::Tensor> restored_tensor = restored_future.Await();
TF_ASSERT_OK(restored_tensor.status());
EXPECT_THAT(*restored_tensor, TensorEq(AsTensor<int16_t>({1, 2, 3}, {3})));
xla::ifrt::Future<tensorflow::Tensor> restored_future1 =
ifrt_model_context_->GetRestoreTensorRegistry().GetRestoredTensor(
absl::StrCat(kVariableRuntimeName, 1));
absl::StatusOr<tensorflow::Tensor> restored_tensor1 =
restored_future1.Await();
TF_ASSERT_OK(restored_tensor1.status());
EXPECT_THAT(*restored_tensor1, TensorEq(AsTensor<int16_t>({4, 5, 6}, {3})));
xla::ifrt::Future<tensorflow::Tensor> restored_future2 =
ifrt_model_context_->GetRestoreTensorRegistry().GetRestoredTensor(
absl::StrCat(kVariableRuntimeName, 2));
absl::StatusOr<tensorflow::Tensor> restored_tensor2 =
restored_future2.Await();
TF_ASSERT_OK(restored_tensor2.status());
EXPECT_THAT(*restored_tensor2, TensorEq(AsTensor<int16_t>({7, 8, 9}, {3})));
xla::ifrt::Future<tensorflow::Tensor> restored_future3 =
ifrt_model_context_->GetRestoreTensorRegistry().GetRestoredTensor(
absl::StrCat(kVariableRuntimeName, 3));
absl::StatusOr<tensorflow::Tensor> restored_tensor3 =
restored_future3.Await();
TF_ASSERT_OK(restored_tensor3.status());
EXPECT_THAT(*restored_tensor3,
TensorEq(AsTensor<int16_t>({10, 11, 12}, {3})));
}
TEST_F(KernelTest, IfrtRestoreVariableOpInValidInput) {
std::string checkpoint_prefix =
tensorflow::GetDataDependencyFilepath(
"tensorflow/core/tfrt/mlrt/kernel/testdata/"
"gen_checkpoint_data/variables") +
"/variables";
static constexpr int kNumVariables = 4;
auto buffer = CreateExecutableForIfrtRestoreVariableOp(kNumVariables);
mlrt::bc::Executable executable(buffer.data());
mlrt::LoadedExecutable loaded_executable(executable, registry_);
mlrt::ExecutionContext execution_context(&loaded_executable);
execution_context.set_work_queue(execution_work_queue_.get());
execution_context.AddUserContext(std::move(tf_context_));
xla::ifrt::Future<tensorflow::Tensor> uninitialized_entry =
ifrt_model_context_->GetRestoreTensorRegistry().GetRestoredTensor(
kVariableRuntimeName);
ASSERT_TRUE(uninitialized_entry.IsReady());
EXPECT_THAT(uninitialized_entry.Await().status(),
::tsl::testing::StatusIs(absl::StatusCode::kNotFound));
std::vector<mlrt::Value> args;
args.resize(3);
tensorflow::Tensor prefix_tensor =
AsTensor<tsl::tstring>({tsl::tstring(checkpoint_prefix)});
args.at(0).Set(tfrt_stub::FallbackTensor(std::move(prefix_tensor)));
tensorflow::Tensor name_tensor =
AsTensor<tsl::tstring>({tsl::tstring("w/.ATTRIBUTES/VARIABLE_VALUE"),
tsl::tstring("w1/.ATTRIBUTES/VARIABLE_VALUE"),
tsl::tstring("w2/.ATTRIBUTES/VARIABLE_VALUE"),
tsl::tstring("w3/.ATTRIBUTES/VARIABLE_VALUE")});
args.at(1).Set(tfrt_stub::FallbackTensor(std::move(name_tensor)));
tensorflow::Tensor slice_tensor = AsTensor<tsl::tstring>(
{tsl::tstring(""), tsl::tstring(""), tsl::tstring("")});
args.at(2).Set(tfrt_stub::FallbackTensor(std::move(slice_tensor)));
std::vector<uint8_t> last_uses = {true, true, true};
std::vector<mlrt::Value> results;
absl::Notification notification;
execution_context.set_exit_handler(
[¬ification]() { notification.Notify(); });
execution_context.Call(executable.functions()[0], last_uses,
absl::MakeSpan(args), absl::MakeSpan(results));
mlrt::Execute(execution_context);
notification.WaitForNotification();
EXPECT_THAT(execution_context.status(),
::tsl::testing::StatusIs(absl::StatusCode::kInvalidArgument));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/mlrt/kernel/ifrt_ops_kernel.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/mlrt/kernel/ifrt_ops_kernel_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9fb23443-70bb-4a1a-b176-8ffde412eac7 | cpp | tensorflow/tensorflow | clamp | tensorflow/lite/experimental/shlo/legacy/src/clamp.cc | tensorflow/lite/experimental/shlo/legacy/test/clamp_test.cc | #include <algorithm>
#include <cstddef>
#include <type_traits>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/legacy/include/shlo.h"
#include "tensorflow/lite/experimental/shlo/legacy/src/dispatch.h"
#include "tensorflow/lite/experimental/shlo/legacy/src/storage.h"
#include "tensorflow/lite/experimental/shlo/legacy/src/util.h"
namespace stablehlo {
namespace {
template <typename Value>
absl::Status CheckParameters(const Value& min, const Value& operand,
const Value& max, Value& result) {
if (!(min.rank() == 0 or min.shape() == operand.shape())) {
return absl::InvalidArgumentError(
"Constraint violation: rank(min) = 0 or shape(min) = shape(operand)");
} else if (!(max.rank() == 0 or max.shape() == operand.shape())) {
return absl::InvalidArgumentError(
"Constraint violation: rank(max) = 0 or shape(max) = shape(operand)");
} else if (!(min.baseline_element_type() ==
operand.baseline_element_type() and
min.baseline_element_type() == max.baseline_element_type())) {
return absl::InvalidArgumentError(
"Constraint violation: baseline_element_type(min) = "
"baseline_element_type(operand) = baseline_element_type(max)");
} else if (!(operand.baseline_type() == result.baseline_type())) {
return absl::InvalidArgumentError(
"Constraint violation: baseline_type(operand) = baseline_type(result)");
}
if constexpr (std::is_same_v<Value, QuantizedTensor>) {
if (!(min.is_per_tensor_quantized() and max.is_per_tensor_quantized() and
operand.is_per_tensor_quantized() and
result.is_per_tensor_quantized())) {
return absl::InvalidArgumentError("Expected per-tensor quantization");
}
}
if (operand.layout().has_strides() || result.layout().has_strides()) {
return absl::InvalidArgumentError("Stides not supported yet");
}
return absl::OkStatus();
}
template <ElementType storage_type, ElementType expressed_type, typename Value>
absl::Status Clamp(const Value& min, const Value& operand, const Value& max,
Value& result) {
if (auto check = CheckParameters(min, operand, max, result); !check.ok()) {
return check;
}
using S = Storage<storage_type>;
const bool min_is_tensor = (min.rank() > 0);
const bool max_is_tensor = (max.rank() > 0);
const size_t n = result.num_elements();
auto operand_buffer = operand.buffer();
auto min_buffer = min.buffer();
auto max_buffer = max.buffer();
auto result_buffer = result.buffer();
if constexpr (std::is_same_v<Value, Tensor>) {
if (storage_type != result.element_type()) {
return absl::InvalidArgumentError("Unexpected tensor element type");
}
typename S::Type min_value;
typename S::Type max_value;
for (size_t i = 0; i < n; ++i) {
if (min_is_tensor || (i == 0)) {
min_value = S::Get(min_buffer, i);
}
if (max_is_tensor || (i == 0)) {
max_value = S::Get(max_buffer, i);
}
auto operand_value = S::Get(operand_buffer, i);
auto result_value =
std::min(max_value, std::max(min_value, operand_value));
S::Set(result_buffer, i, result_value);
}
} else {
static_assert(std::is_same_v<Value, QuantizedTensor>);
if (storage_type != result.storage_type()) {
return absl::InvalidArgumentError("Unexpected storage type");
} else if (expressed_type != result.expressed_type()) {
return absl::InvalidArgumentError("Unexpected expressed type");
}
using ET = typename Storage<expressed_type>::Type;
const QuantizedParameter& min_quant_param =
min.type().element_type().parameters(0);
const QuantizedParameter& max_quant_param =
max.type().element_type().parameters(0);
const QuantizedParameter& operand_quant_param =
operand.type().element_type().parameters(0);
const QuantizedParameter& result_quant_param =
result.type().element_type().parameters(0);
ET result_scale_inv = ET(1.0) / static_cast<ET>(result_quant_param.scale);
ET min_expressed;
ET max_expressed;
for (size_t i = 0; i < n; ++i) {
if (min_is_tensor || (i == 0)) {
auto min_storage = S::Get(min_buffer, i);
min_expressed = Dequantize<storage_type, expressed_type>(
min_storage, min_quant_param);
}
if (max_is_tensor || (i == 0)) {
auto max_storage = S::Get(max_buffer, i);
max_expressed = Dequantize<storage_type, expressed_type>(
max_storage, max_quant_param);
}
auto operand_storage = S::Get(operand_buffer, i);
auto result_storage =
DequantizeOpQuantizePartial<storage_type, expressed_type>(
operand_storage, operand_quant_param, result_scale_inv,
result_quant_param.zero_point, [=](auto x) {
return std::min(max_expressed, std::max(min_expressed, x));
});
S::Set(result_buffer, i, result_storage);
}
if (auto status = CompleteQuantization<storage_type>(result);
!status.ok()) {
return status;
}
}
return absl::OkStatus();
}
}
absl::Status Clamp(const Tensor& min, const Tensor& operand, const Tensor& max,
Tensor& result) {
DISPATCH_INT_FLOAT(Clamp, result.element_type(), min, operand, max, result);
}
absl::Status Clamp(const QuantizedTensor& min, const QuantizedTensor& operand,
const QuantizedTensor& max, QuantizedTensor& result) {
DISPATCH_QUANTIZED(Clamp, result.storage_type(), result.expressed_type(), min,
operand, max, result);
}
} | #include <initializer_list>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/shlo/legacy/include/shlo.h"
#include "tensorflow/lite/experimental/shlo/legacy/src/debug.h"
#include "tensorflow/lite/experimental/shlo/legacy/src/storage.h"
#include "tensorflow/lite/experimental/shlo/legacy/test/util.h"
namespace stablehlo {
namespace testing {
template <ElementType element_type>
void test(std::initializer_list<DimensionSize>&& shape,
std::vector<typename Storage<element_type>::Type>&& min_values,
std::vector<typename Storage<element_type>::Type>&& operand_values,
std::vector<typename Storage<element_type>::Type>&& max_values,
std::vector<typename Storage<element_type>::Type>&& expected_values) {
Shape min_shape = (min_values.size() > 1) ? Shape(shape) : Shape();
Tensor min(TensorType(std::move(min_shape), element_type), min_values.data());
Shape max_shape = (max_values.size() > 1) ? Shape(shape) : Shape();
Tensor max(TensorType(std::move(max_shape), element_type), max_values.data());
Tensor operand(TensorType(Shape(shape), element_type), operand_values.data());
Tensor expected(TensorType(Shape(shape), element_type),
expected_values.data());
std::vector<typename Storage<element_type>::Type> result_values(
expected_values.size());
Tensor result(TensorType(Shape(shape), element_type), result_values.data());
ASSERT_OK(Clamp(min, operand, max, result));
EXPECT_EQ(result, expected)
<< "min: " << min << "\nmax: " << max << "\noperand: " << operand;
}
template <ElementType storage_type, ElementType expressed_type>
void test(
QuantizedParameter&& quantized_parameter,
std::initializer_list<DimensionSize>&& shape,
std::vector<typename Storage<expressed_type>::Type>&& min_values,
std::vector<typename Storage<expressed_type>::Type>&& operand_values,
std::vector<typename Storage<expressed_type>::Type>&& max_values,
std::vector<typename Storage<expressed_type>::Type>&& expected_values) {
auto min_quant_values = QuantizeVector<storage_type, expressed_type>(
min_values, quantized_parameter);
auto operand_quant_values = QuantizeVector<storage_type, expressed_type>(
operand_values, quantized_parameter);
auto max_quant_values = QuantizeVector<storage_type, expressed_type>(
max_values, quantized_parameter);
auto expected_quant_values = QuantizeVector<storage_type, expressed_type>(
expected_values, quantized_parameter);
std::vector<typename Storage<storage_type>::Type> result_quant_values(
expected_quant_values.size());
QuantizedTensorElementType element_type(storage_type, expressed_type,
std::move(quantized_parameter));
Shape min_shape = (min_values.size() > 1) ? Shape(shape) : Shape();
QuantizedTensor min(
QuantizedTensorType(std::move(min_shape),
QuantizedTensorElementType(element_type)),
min_quant_values.data());
Shape max_shape = (max_values.size() > 1) ? Shape(shape) : Shape();
QuantizedTensor max(
QuantizedTensorType(std::move(max_shape),
QuantizedTensorElementType(element_type)),
max_quant_values.data());
QuantizedTensor operand(
QuantizedTensorType(Shape(shape),
QuantizedTensorElementType(element_type)),
operand_quant_values.data());
QuantizedTensor expected(
QuantizedTensorType(Shape(shape),
QuantizedTensorElementType(element_type)),
expected_quant_values.data());
QuantizedTensor result(
QuantizedTensorType(Shape(shape),
QuantizedTensorElementType(element_type)),
result_quant_values.data());
ASSERT_OK(Clamp(min, operand, max, result));
EXPECT_EQ(result, expected)
<< "min: " << min << "\nmax: " << max << "\noperand: " << operand;
}
TEST(Clamp, Unquantized) {
test<ElementType::kSI8>({3}, {0}, {-2, 0, 2}, {1}, {0, 0, 1});
test<ElementType::kSI16>({3}, {0}, {-2, 0, 2}, {1}, {0, 0, 1});
test<ElementType::kSI32>({3}, {0}, {-2, 0, 2}, {1}, {0, 0, 1});
test<ElementType::kBF16>({3}, {0}, {-2, 0, 2}, {1}, {0, 0, 1});
test<ElementType::kF16>({3}, {0}, {-2, 0, 2}, {1}, {0, 0, 1});
test<ElementType::kF32>({3}, {0}, {-2, 0, 2}, {1}, {0, 0, 1});
test<ElementType::kSI8>({3}, {0, 1, 1}, {-3, 0, 3}, {1, 1, 2}, {0, 1, 2});
test<ElementType::kSI16>({3}, {0, 1, 1}, {-3, 0, 3}, {1, 1, 2}, {0, 1, 2});
test<ElementType::kSI32>({3}, {0, 1, 1}, {-3, 0, 3}, {1, 1, 2}, {0, 1, 2});
test<ElementType::kBF16>({3}, {0, 1, 1}, {-3, 0, 3}, {1, 1, 2}, {0, 1, 2});
test<ElementType::kF16>({3}, {0, 1, 1}, {-3, 0, 3}, {1, 1, 2}, {0, 1, 2});
test<ElementType::kF32>({3}, {0, 1, 1}, {-3, 0, 3}, {1, 1, 2}, {0, 1, 2});
}
TEST(Clamp, Quantized) {
test<ElementType::kSI8, ElementType::kBF16>(
{.scale = 0.1, .zero_point = 0}, {3}, {0}, {-2, 0, 2}, {1}, {0, 0, 1});
test<ElementType::kSI8, ElementType::kF16>(
{.scale = 0.1, .zero_point = 0}, {3}, {0}, {-2, 0, 2}, {1}, {0, 0, 1});
test<ElementType::kSI8, ElementType::kF32>(
{.scale = 0.1, .zero_point = 0}, {3}, {0}, {-2, 0, 2}, {1}, {0, 0, 1});
test<ElementType::kSI16, ElementType::kBF16>(
{.scale = 0.1, .zero_point = 0}, {3}, {0}, {-2, 0, 2}, {1}, {0, 0, 1});
test<ElementType::kSI16, ElementType::kF16>(
{.scale = 0.1, .zero_point = 0}, {3}, {0}, {-2, 0, 2}, {1}, {0, 0, 1});
test<ElementType::kSI16, ElementType::kF32>(
{.scale = 0.1, .zero_point = 0}, {3}, {0}, {-2, 0, 2}, {1}, {0, 0, 1});
test<ElementType::kSI32, ElementType::kBF16>(
{.scale = 0.1, .zero_point = 0}, {3}, {0}, {-2, 0, 2}, {1}, {0, 0, 1});
test<ElementType::kSI32, ElementType::kF16>(
{.scale = 0.1, .zero_point = 0}, {3}, {0}, {-2, 0, 2}, {1}, {0, 0, 1});
test<ElementType::kSI32, ElementType::kF32>(
{.scale = 0.1, .zero_point = 0}, {3}, {0}, {-2, 0, 2}, {1}, {0, 0, 1});
test<ElementType::kSI8, ElementType::kBF16>({.scale = 0.1, .zero_point = 0},
{3}, {0, 1, 1}, {-3, 0, 3},
{1, 1, 2}, {0, 1, 2});
test<ElementType::kSI8, ElementType::kF16>({.scale = 0.1, .zero_point = 0},
{3}, {0, 1, 1}, {-3, 0, 3},
{1, 1, 2}, {0, 1, 2});
test<ElementType::kSI8, ElementType::kF32>({.scale = 0.1, .zero_point = 0},
{3}, {0, 1, 1}, {-3, 0, 3},
{1, 1, 2}, {0, 1, 2});
test<ElementType::kSI16, ElementType::kBF16>({.scale = 0.1, .zero_point = 0},
{3}, {0, 1, 1}, {-3, 0, 3},
{1, 1, 2}, {0, 1, 2});
test<ElementType::kSI16, ElementType::kF16>({.scale = 0.1, .zero_point = 0},
{3}, {0, 1, 1}, {-3, 0, 3},
{1, 1, 2}, {0, 1, 2});
test<ElementType::kSI16, ElementType::kF32>({.scale = 0.1, .zero_point = 0},
{3}, {0, 1, 1}, {-3, 0, 3},
{1, 1, 2}, {0, 1, 2});
test<ElementType::kSI32, ElementType::kBF16>({.scale = 0.1, .zero_point = 0},
{3}, {0, 1, 1}, {-3, 0, 3},
{1, 1, 2}, {0, 1, 2});
test<ElementType::kSI32, ElementType::kF16>({.scale = 0.1, .zero_point = 0},
{3}, {0, 1, 1}, {-3, 0, 3},
{1, 1, 2}, {0, 1, 2});
test<ElementType::kSI32, ElementType::kF32>({.scale = 0.1, .zero_point = 0},
{3}, {0, 1, 1}, {-3, 0, 3},
{1, 1, 2}, {0, 1, 2});
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/legacy/src/clamp.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/legacy/test/clamp_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d760ed97-1225-41c7-86ae-6346991c7cf1 | cpp | tensorflow/tensorflow | validator | tensorflow/lite/experimental/acceleration/mini_benchmark/validator.cc | tensorflow/lite/experimental/acceleration/mini_benchmark/validator_test.cc | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/validator.h"
#include <stdint.h>
#include <string.h>
#include <time.h>
#include <unistd.h>
#include <functional>
#include <map>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/core/acceleration/configuration/delegate_registry.h"
#include "tensorflow/lite/core/acceleration/configuration/stable_delegate_registry.h"
#include "tensorflow/lite/core/api/profiler.h"
#include "tensorflow/lite/core/c/c_api.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/core/interpreter_builder.h"
#include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/call_register.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/constants.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/decode_jpeg_register.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/status_codes.h"
#include "tensorflow/lite/logger.h"
#include "tensorflow/lite/minimal_logging.h"
#include "tensorflow/lite/mutable_op_resolver.h"
#include "tensorflow/lite/tools/benchmark/register_custom_op.h"
#include "tensorflow/lite/tools/model_loader.h"
#ifndef TEMP_FAILURE_RETRY
#ifdef __ANDROID__
#error "TEMP_FAILURE_RETRY not set although on Android"
#else
#define TEMP_FAILURE_RETRY(exp) exp
#endif
#endif
namespace tflite {
namespace acceleration {
namespace {
std::unique_ptr<tflite::delegates::DelegatePluginInterface> LoadDelegatePlugin(
const std::string& name, const tflite::TFLiteSettings& tflite_settings) {
return tflite::delegates::DelegatePluginRegistry::CreateByName(
name + "Plugin", tflite_settings);
}
void AppendTensorDataToVector(const TfLiteTensor* tensor,
std::vector<std::vector<char>>& output_vector) {
std::vector<char> char_output(TfLiteTensorByteSize(tensor));
memcpy(char_output.data(), TfLiteTensorData(tensor),
TfLiteTensorByteSize(tensor));
output_vector.emplace_back(std::move(char_output));
}
inline bool HasTensorData(tools::ModelLoader* model_loader,
const Subgraph& graph, int index) {
const TfLiteTensor* tensor = graph.tensor(index);
return tensor->allocation != nullptr ||
(model_loader->type() == tools::ModelLoader::Type::kPipeModelLoader &&
tensor->data.data != nullptr);
}
constexpr int64_t kMicrosInSecond = 1000 * 1000;
constexpr int64_t kNanosInMicro = 1000;
int64_t ElapsedTimeMicros() {
struct timespec ts;
#if defined(__ANDROID__)
int err = clock_gettime(CLOCK_BOOTTIME, &ts);
#elif defined(_WIN32)
int err = 1;
#else
int err = clock_gettime(CLOCK_MONOTONIC, &ts);
#endif
if (err) {
return -1;
}
return ts.tv_sec * kMicrosInSecond + ts.tv_nsec / kNanosInMicro;
}
class ValidatorProfiler : public ::tflite::Profiler {
public:
struct EventData {
std::string tag;
int64_t start_time_us = -1;
int64_t end_time_us = -1;
};
const std::vector<EventData>& events() { return events_; }
uint32_t BeginEvent(const char* tag, EventType event_type,
int64_t event_metadata1,
int64_t event_metadata2) override {
if (event_type != EventType::DEFAULT) {
return 0;
}
events_.push_back({tag, ElapsedTimeMicros(), -1});
return events_.size();
}
void EndEvent(uint32_t event_handle) override {
if (event_handle == 0) {
return;
}
events_[event_handle - 1].end_time_us = ElapsedTimeMicros();
}
private:
std::vector<EventData> events_;
};
}
MinibenchmarkStatus Validator::CheckGoldenOutput(Results* results_out) {
if (!interpreter_ || !model_loader_->GetModel()) {
return kMinibenchmarkPreconditionNotMet;
}
if (validation_entrypoint_->inputs().size() <= 1) {
return kMinibenchmarkValidationSubgraphHasTooFewInputs;
}
if (validation_entrypoint_->inputs().size() >
validation_entrypoint_->outputs().size()) {
return kMinibenchmarkValidationSubgraphHasTooFewOutputs;
}
if (HasTensorData(model_loader_.get(), *validation_entrypoint_,
validation_entrypoint_->inputs()[0])) {
return kMinibenchmarkSuccess;
}
TFLITE_LOG_PROD(TFLITE_LOG_INFO,
"Running on CPU to get golden output for comparison.");
tflite::InterpreterBuilder(*model_loader_->GetModel(),
*resolver_)(&golden_interpreter_);
if (!golden_interpreter_) {
return kMinibenchmarkInterpreterBuilderFailed;
}
Subgraph* golden_validation_entrypoint =
golden_interpreter_->subgraph(validation_entrypoint_index_);
if (golden_validation_entrypoint->AllocateTensors() != kTfLiteOk) {
return kMinibenchmarkAllocateTensorsFailed;
}
for (int i = 0; i < golden_validation_entrypoint->inputs().size() - 1; i++) {
TfLiteTensor* input_tensor = golden_validation_entrypoint->tensor(
golden_validation_entrypoint->inputs()[i]);
memset(input_tensor->data.data, 0, input_tensor->bytes);
}
if (golden_validation_entrypoint->Invoke() != kTfLiteOk) {
return kMinibenchmarkInvokeFailed;
}
for (int i = 0; i < validation_entrypoint_->inputs().size() - 1; i++) {
TfLiteTensor* input_tensor =
validation_entrypoint_->tensor(validation_entrypoint_->inputs()[i]);
TfLiteTensor* golden_output_tensor = golden_validation_entrypoint->tensor(
golden_validation_entrypoint->outputs()[i]);
if (input_tensor->bytes != golden_output_tensor->bytes) {
return kMinibenchmarkValidationSubgraphInputsDontMatchOutputs;
}
memcpy(input_tensor->data.data, golden_output_tensor->data.data,
golden_output_tensor->bytes);
}
return kMinibenchmarkSuccess;
}
MinibenchmarkStatus Validator::LoadDelegate() {
if (!compute_settings_) {
return kMinibenchmarkPreconditionNotMet;
}
if (opaque_delegate_) {
return kMinibenchmarkSuccess;
}
Delegate which_delegate = Delegate_NONE;
bool is_stable_delegate_path_provided = false;
auto tflite_settings = compute_settings_->tflite_settings();
if (tflite_settings) {
which_delegate = compute_settings_->tflite_settings()->delegate();
if (tflite_settings->stable_delegate_loader_settings()) {
is_stable_delegate_path_provided =
tflite_settings->stable_delegate_loader_settings()->delegate_path() &&
!tflite_settings->stable_delegate_loader_settings()
->delegate_path()
->str()
.empty();
}
}
std::string delegate_name;
if (is_stable_delegate_path_provided && which_delegate == Delegate_GPU) {
delegate_name = "GpuModule";
} else if (is_stable_delegate_path_provided) {
delegate_name = "StableDelegate";
} else {
switch (which_delegate) {
case Delegate_NONE:
return kMinibenchmarkSuccess;
case Delegate_NNAPI:
delegate_name = "Nnapi";
break;
case Delegate_GPU:
delegate_name = "Gpu";
break;
case Delegate_XNNPACK:
delegate_name = "XNNPack";
break;
case Delegate_EDGETPU:
delegate_name = "EdgeTpu";
break;
default:
return kMinibenchmarkDelegateNotSupported;
}
}
TFLITE_LOG_PROD(TFLITE_LOG_INFO, "Running mini-benchmark on %s",
delegate_name.c_str());
if (!(delegate_plugin_ = LoadDelegatePlugin(
delegate_name, *compute_settings_->tflite_settings()))) {
return kMinibenchmarkDelegatePluginNotFound;
}
if (!(delegate_ = delegate_plugin_->Create())) {
return kMinibenchmarkDelegateCreateFailed;
}
return kMinibenchmarkSuccess;
}
MinibenchmarkStatus Validator::LoadOpaqueDelegate() {
if (!compute_settings_) {
return kMinibenchmarkPreconditionNotMet;
}
bool is_stable_delegate_name_provided = false;
auto tflite_settings = compute_settings_->tflite_settings();
if (!tflite_settings) {
return kMinibenchmarkSuccess;
}
auto stable_delegate_settings =
tflite_settings->stable_delegate_loader_settings();
is_stable_delegate_name_provided =
stable_delegate_settings && stable_delegate_settings->delegate_name() &&
!stable_delegate_settings->delegate_name()->str().empty();
if (!is_stable_delegate_name_provided) {
return kMinibenchmarkSuccess;
}
std::string delegate_name = stable_delegate_settings->delegate_name()->str();
TFLITE_LOG_PROD(TFLITE_LOG_INFO, "Running mini-benchmark on %s",
delegate_name.c_str());
const TfLiteStableDelegate* stable_delegate =
delegates::StableDelegateRegistry::RetrieveStableDelegate(delegate_name);
if (!stable_delegate) {
TFLITE_LOG_PROD(TFLITE_LOG_ERROR,
"Failed to load stable delegate plugin %s",
delegate_name.c_str());
return kMinibenchmarkDelegatePluginNotFound;
}
const TfLiteOpaqueDelegatePlugin* delegate_plugin =
stable_delegate->delegate_plugin;
opaque_delegate_ = TfLiteOpaqueDelegatePtr(
delegate_plugin->create(tflite_settings), delegate_plugin->destroy);
return kMinibenchmarkSuccess;
}
MinibenchmarkStatus Validator::CreateInterpreter(int* delegate_error_out,
int* delegated_kernels_out) {
if (!delegate_error_out || !delegated_kernels_out ||
!model_loader_->GetModel()) {
return kMinibenchmarkPreconditionNotMet;
}
if (interpreter_) {
return kMinibenchmarkSuccess;
}
*delegate_error_out = 0;
if (compute_settings_->tflite_settings() &&
compute_settings_->tflite_settings()->disable_default_delegates()) {
resolver_ = std::make_unique<
::tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates>();
} else {
resolver_ = std::make_unique<::tflite::ops::builtin::BuiltinOpResolver>();
}
resolver_->AddCustom("validation/call",
::tflite::acceleration::ops::Register_CALL(), 1);
resolver_->AddCustom(
"validation/decode_jpeg",
::tflite::acceleration::decode_jpeg_kernel::Register_DECODE_JPEG(), 1);
RegisterSelectedOps(resolver_.get());
tflite::InterpreterBuilder builder(*model_loader_->GetModel(), *resolver_);
if (delegate_ != nullptr) {
builder.AddDelegate(delegate_.get());
}
if (opaque_delegate_ != nullptr) {
builder.AddDelegate(opaque_delegate_.get());
}
TfLiteStatus status = builder(&interpreter_);
if (!interpreter_) {
*delegate_error_out =
delegate_plugin_ ? delegate_plugin_->GetDelegateErrno(delegate_.get())
: 0;
TFLITE_LOG_PROD(TFLITE_LOG_ERROR,
"Creating Interpreter failed with error code %d.", status);
return kMinibenchmarkInterpreterBuilderFailed;
}
main_model_ = interpreter_->subgraph(0);
validation_entrypoint_index_ = -1;
for (int i = 0; i < interpreter_->subgraphs_size(); i++) {
Subgraph* subgraph = interpreter_->subgraph(i);
if (subgraph->GetName() == kValidationGraphName) {
validation_entrypoint_index_ = i;
validation_entrypoint_ = subgraph;
} else if (subgraph->GetName() == "VALIDATION:metrics") {
has_accuracy_validation_ = true;
}
}
if (!validation_entrypoint_) {
return kMinibenchmarkValidationSubgraphNotFound;
}
if (validation_entrypoint_->inputs().empty()) {
return kMinibenchmarkValidationSubgraphHasTooFewInputs;
}
if (!HasTensorData(model_loader_.get(), *validation_entrypoint_,
validation_entrypoint_->inputs().back())) {
return kMinibenchmarkValidationInputMissing;
}
if (validation_entrypoint_->AllocateTensors() != kTfLiteOk) {
return kMinibenchmarkAllocateTensorsFailed;
}
absl::flat_hash_set<int> checked_node_ids;
int num_delegated_kernels = 0;
for (int i = 0; i < interpreter_->execution_plan().size(); ++i) {
int node_id = interpreter_->execution_plan()[i];
if (checked_node_ids.find(node_id) != checked_node_ids.end()) {
continue;
}
const TfLiteNode& node =
interpreter_->node_and_registration(node_id)->first;
if (node.delegate != nullptr) {
num_delegated_kernels++;
checked_node_ids.insert(node_id);
}
}
*delegated_kernels_out = num_delegated_kernels;
bool fully_delegated = (num_delegated_kernels == 1 &&
interpreter_->execution_plan().size() == 1);
if (!fully_delegated) {
TFLITE_LOG_PROD(TFLITE_LOG_WARNING,
"The model will be %s executed by the delegate.",
num_delegated_kernels > 0 ? "partially" : "not");
}
return kMinibenchmarkSuccess;
}
Validator::Status Validator::RunValidation(Results* results_out) {
BenchmarkStage stage = BenchmarkStage_INITIALIZATION;
if (!results_out) {
return Validator::Status{kMinibenchmarkPreconditionNotMet, stage};
}
if (!model_loader_) {
return Validator::Status{kMinibenchmarkModelReadFailed, stage};
}
if (!model_loader_->Init()) {
return Validator::Status{kMinibenchmarkModelInitFailed, stage};
}
#define MB_RETURN_IF_ERROR(s, bs) \
{ \
MinibenchmarkStatus c = (s); \
if (c != kMinibenchmarkSuccess) return Validator::Status{c, (bs)}; \
}
int64_t delegate_load_start_time_us = ElapsedTimeMicros();
MB_RETURN_IF_ERROR(LoadOpaqueDelegate(), stage);
MB_RETURN_IF_ERROR(LoadDelegate(), stage);
MB_RETURN_IF_ERROR(CreateInterpreter(&results_out->delegate_error,
&results_out->delegated_kernels),
stage);
int64_t delegate_load_end_time_us = ElapsedTimeMicros();
ValidatorProfiler profiler;
stage = BenchmarkStage_INFERENCE;
if (has_accuracy_validation_) {
MB_RETURN_IF_ERROR(CheckGoldenOutput(results_out), stage);
}
main_model_->SetProfiler(&profiler, 0);
TfLiteStatus status = validation_entrypoint_->Invoke();
main_model_->SetProfiler(nullptr, 0);
if (status != kTfLiteOk) {
MB_RETURN_IF_ERROR(kMinibenchmarkInvokeFailed, stage);
}
int model_output_size = main_model_->outputs().size();
if (has_accuracy_validation_) {
const std::string kMetricPrefix = "metrics/";
const std::string kOk("ok");
for (int i = model_output_size;
i < validation_entrypoint_->outputs().size(); i++) {
TfLiteTensor* tensor =
validation_entrypoint_->tensor(validation_entrypoint_->outputs()[i]);
std::string name = tensor->name;
if (name.find(kMetricPrefix) != 0) {
continue;
}
name = name.substr(kMetricPrefix.size());
if (kOk == name) {
results_out->ok = *(tensor->data.b);
} else {
std::vector<float> values;
int count = 1;
for (int j = 0; j < tensor->dims->size; j++) {
count *= tensor->dims->data[j];
}
values.reserve(count);
for (int j = 0; j < count; j++) {
values.push_back(tensor->data.f[j]);
TFLITE_LOG_PROD(TFLITE_LOG_INFO, " %s %.4f", name.c_str(),
tensor->data.f[j]);
}
results_out->metrics[name] = values;
}
}
TFLITE_LOG_PROD(TFLITE_LOG_INFO, " accuracy: %s",
results_out->ok ? "ok" : "not ok");
} else {
results_out->actual_inference_output.clear();
results_out->actual_inference_output.reserve(model_output_size);
for (int i = 0; i < model_output_size; i++) {
AppendTensorDataToVector(
validation_entrypoint_->tensor(validation_entrypoint_->outputs()[i]),
results_out->actual_inference_output);
}
}
results_out->delegate_prep_time_us =
(delegate_load_end_time_us == -1 || delegate_load_start_time_us == -1)
? -1
: delegate_load_end_time_us - delegate_load_start_time_us;
TFLITE_LOG_PROD(TFLITE_LOG_INFO, " Delegate preparation took %d us",
static_cast<int>(results_out->delegate_prep_time_us));
for (const auto& e : profiler.events()) {
if (e.tag == "Invoke" && e.start_time_us != -1 && e.end_time_us != -1) {
results_out->execution_time_us.push_back(e.end_time_us - e.start_time_us);
TFLITE_LOG_PROD(TFLITE_LOG_INFO, " Inference took %d us",
static_cast<int>(e.end_time_us - e.start_time_us));
}
}
#undef MB_RETURN_IF_ERROR
return Validator::Status{kMinibenchmarkSuccess};
}
int64_t Validator::BootTimeMicros() { return ElapsedTimeMicros(); }
int64_t Validator::WallTimeMicros() {
struct timespec ts;
#ifndef _WIN32
int err = clock_gettime(CLOCK_REALTIME, &ts);
#else
int err = 1;
#endif
if (err) {
return -1;
}
return ts.tv_sec * kMicrosInSecond + ts.tv_nsec / kNanosInMicro;
}
}
} | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/validator.h"
#include <iostream>
#include <map>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#if FLATBUFFERS_LITTLEENDIAN == 0
#include "tensorflow/lite/core/model_builder.h"
#endif
#include "tensorflow/compiler/mlir/lite/schema/mutable/schema_generated.h"
#include "tensorflow/lite/acceleration/configuration/configuration.pb.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/acceleration/configuration/proto_to_flatbuffer.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_mobilenet_model.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_mobilenet_validation_model.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/mini_benchmark_test_helper.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/model_modifier/custom_validation_embedder.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/status_codes.h"
#include "tensorflow/lite/tools/model_loader.h"
namespace tflite {
namespace acceleration {
namespace {
using flatbuffers::FlatBufferBuilder;
constexpr int kOutputTensorSize = 1001;
class ValidatorTest : public ::testing::Test {
protected:
void SetUp() override {
std::string validation_model_path = MiniBenchmarkTestHelper::DumpToTempFile(
"mobilenet_quant_with_validation.tflite",
g_tflite_acceleration_embedded_mobilenet_validation_model,
g_tflite_acceleration_embedded_mobilenet_validation_model_len);
ASSERT_TRUE(!validation_model_path.empty());
validation_model_loader_ =
std::make_unique<tools::PathModelLoader>(validation_model_path);
std::string plain_model_path = MiniBenchmarkTestHelper::DumpToTempFile(
"mobilenet_quant.tflite",
g_tflite_acceleration_embedded_mobilenet_model,
g_tflite_acceleration_embedded_mobilenet_model_len);
ASSERT_TRUE(!plain_model_path.empty());
plain_model_loader_ =
std::make_unique<tools::PathModelLoader>(plain_model_path);
compute_settings_fbb_.Finish(CreateComputeSettings(compute_settings_fbb_));
default_compute_settings_ = flatbuffers::GetRoot<ComputeSettings>(
compute_settings_fbb_.GetBufferPointer());
}
std::unique_ptr<tools::ModelLoader> validation_model_loader_;
std::unique_ptr<tools::ModelLoader> plain_model_loader_;
FlatBufferBuilder compute_settings_fbb_;
const ComputeSettings* default_compute_settings_;
};
TEST_F(ValidatorTest, HappyPathOnCpuWithEmbeddedValidation) {
ASSERT_TRUE(validation_model_loader_->Init());
Validator validator(std::move(validation_model_loader_),
default_compute_settings_);
Validator::Results results;
Validator::Status validation_run = validator.RunValidation(&results);
EXPECT_EQ(validation_run.status, kMinibenchmarkSuccess);
EXPECT_EQ(validation_run.stage, BenchmarkStage_UNKNOWN);
EXPECT_TRUE(results.ok);
EXPECT_GE(results.metrics.size(), 0);
EXPECT_EQ(results.delegate_error, 0);
EXPECT_TRUE(results.actual_inference_output.empty());
}
TEST_F(ValidatorTest, HappyPathOnCpuWithCustomValidation) {
ASSERT_TRUE(plain_model_loader_->Init());
ASSERT_TRUE(validation_model_loader_->Init());
const SubGraph* main_model =
plain_model_loader_->GetModel()->GetModel()->subgraphs()->Get(0);
const int model_output_size = main_model->outputs()->size();
int model_input_byte_size = 1;
for (int shape_i :
*main_model->tensors()->Get(main_model->inputs()->Get(0))->shape()) {
model_input_byte_size *= shape_i;
}
int batch_size = 5;
FlatBufferBuilder model_with_input;
CustomValidationEmbedder embedder(
batch_size,
{std::vector<uint8_t>(batch_size * model_input_byte_size, 1)});
EXPECT_EQ(embedder.BuildModel(*plain_model_loader_->GetModel()->GetModel(),
model_with_input),
kMinibenchmarkSuccess);
std::string serialized_str(
reinterpret_cast<const char*>(model_with_input.GetBufferPointer()),
model_with_input.GetSize());
#if FLATBUFFERS_LITTLEENDIAN == 0
tflite::FlatBufferModel::ByteSwapSerializedModel(&serialized_str, true);
#endif
std::string model_path = MiniBenchmarkTestHelper::DumpToTempFile(
"mobilenet_quant_with_input.tflite",
reinterpret_cast<const unsigned char*>(serialized_str.c_str()),
serialized_str.size());
ASSERT_TRUE(!model_path.empty());
auto model_loader = std::make_unique<tools::PathModelLoader>(model_path);
Validator validator(std::move(model_loader), default_compute_settings_);
Validator::Results results;
Validator::Status validation_run = validator.RunValidation(&results);
EXPECT_EQ(validation_run.status, kMinibenchmarkSuccess);
EXPECT_EQ(validation_run.stage, BenchmarkStage_UNKNOWN);
EXPECT_FALSE(results.ok);
EXPECT_EQ(results.metrics.size(), 0);
EXPECT_EQ(results.delegate_error, 0);
EXPECT_EQ(results.actual_inference_output.size(), model_output_size);
EXPECT_EQ(results.actual_inference_output[0].size(),
batch_size * kOutputTensorSize);
}
TEST_F(ValidatorTest, DelegateNotSupported) {
proto::ComputeSettings settings_proto;
settings_proto.mutable_tflite_settings()->set_delegate(proto::CORE_ML);
flatbuffers::FlatBufferBuilder fbb;
const ComputeSettings* settings = ConvertFromProto(settings_proto, &fbb);
Validator validator(std::move(validation_model_loader_), settings);
Validator::Results results;
Validator::Status validation_run = validator.RunValidation(&results);
EXPECT_EQ(validation_run.status, kMinibenchmarkDelegateNotSupported);
EXPECT_EQ(validation_run.stage, BenchmarkStage_INITIALIZATION);
}
TEST_F(ValidatorTest, NoValidationSubgraph) {
Validator validator(std::move(plain_model_loader_),
default_compute_settings_);
Validator::Results results;
Validator::Status validation_run = validator.RunValidation(&results);
EXPECT_EQ(validation_run.status, kMinibenchmarkValidationSubgraphNotFound);
EXPECT_EQ(validation_run.stage, BenchmarkStage_INITIALIZATION);
}
TEST_F(ValidatorTest, NoValidationInputData) {
ASSERT_TRUE(plain_model_loader_->Init());
FlatBufferBuilder model_with_input;
CustomValidationEmbedder embedder(1, {{}});
EXPECT_EQ(embedder.BuildModel(*plain_model_loader_->GetModel()->GetModel(),
model_with_input),
kMinibenchmarkSuccess);
std::string model_path = MiniBenchmarkTestHelper::DumpToTempFile(
"mobilenet_quant_with_input.tflite", model_with_input.GetBufferPointer(),
model_with_input.GetSize());
ASSERT_TRUE(!model_path.empty());
auto model_loader = std::make_unique<tools::PathModelLoader>(model_path);
Validator validator(std::move(model_loader), default_compute_settings_);
Validator::Results results;
Validator::Status validation_run = validator.RunValidation(&results);
EXPECT_EQ(validation_run.status, kMinibenchmarkValidationInputMissing);
EXPECT_EQ(validation_run.stage, BenchmarkStage_INITIALIZATION);
}
TEST_F(ValidatorTest, InvalidModel) {
const std::string dump_path = MiniBenchmarkTestHelper::DumpToTempFile(
"foo.tflite", g_tflite_acceleration_embedded_mobilenet_validation_model,
g_tflite_acceleration_embedded_mobilenet_validation_model_len - 12000);
ASSERT_TRUE(!dump_path.empty());
Validator validator(std::make_unique<tools::PathModelLoader>(dump_path),
default_compute_settings_);
Validator::Results results;
Validator::Status validation_run = validator.RunValidation(&results);
EXPECT_EQ(validation_run.status, kMinibenchmarkModelInitFailed);
EXPECT_EQ(validation_run.stage, BenchmarkStage_INITIALIZATION);
}
TEST_F(ValidatorTest, EmptyModelLoader) {
Validator validator(nullptr, default_compute_settings_);
Validator::Results results;
Validator::Status validation_run = validator.RunValidation(&results);
EXPECT_EQ(validation_run.status, kMinibenchmarkModelReadFailed);
EXPECT_EQ(validation_run.stage, BenchmarkStage_INITIALIZATION);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/validator.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/validator_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ab1f52e5-859f-4781-a52a-c1677ca42465 | cpp | tensorflow/tensorflow | parallel_map_dataset_op | tensorflow/core/kernels/data/parallel_map_dataset_op.cc | tensorflow/core/kernels/data/parallel_map_dataset_op_test.cc | #include "tensorflow/core/kernels/data/parallel_map_dataset_op.h"
#include <cstddef>
#include <deque>
#include <functional>
#include <limits>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/call_once.h"
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/input_colocation_exemption_registry.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/data/stats_utils.h"
#include "tensorflow/core/data/unbounded_thread_pool.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/stats_aggregator.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/random/random.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/stringprintf.h"
#include "tensorflow/core/profiler/lib/traceme.h"
#include "tensorflow/core/profiler/lib/traceme_encode.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tsl/platform/logging.h"
namespace tensorflow {
namespace data {
constexpr const char* const ParallelMapDatasetOp::kDatasetType;
constexpr const char* const ParallelMapDatasetOp::kInputDataset;
constexpr const char* const ParallelMapDatasetOp::kOtherArguments;
constexpr const char* const
ParallelMapDatasetOp::kNumParallelCalls;
constexpr const char* const ParallelMapDatasetOp::kFunc;
constexpr const char* const ParallelMapDatasetOp::kTarguments;
constexpr const char* const ParallelMapDatasetOp::kOutputTypes;
constexpr const char* const ParallelMapDatasetOp::kOutputShapes;
constexpr const char* const
ParallelMapDatasetOp::kUseInterOpParallelism;
constexpr const char* const ParallelMapDatasetOp::kDeterministic;
constexpr const char* const ParallelMapDatasetOp::kSloppy;
constexpr const char* const
ParallelMapDatasetOp::kPreserveCardinality;
namespace {
constexpr char kParallelMapDatasetV1[] = "ParallelMapDataset";
constexpr char kParallelMapDatasetV2[] = "ParallelMapDatasetV2";
constexpr char kInvocationResults[] = "invocation_results";
constexpr char kSize[] = "size";
constexpr char kEndOfInput[] = "end_of_input";
constexpr char kErrorCode[] = "code";
constexpr char kErrorMessage[] = "error_message";
constexpr int kStatsReportingPeriodMillis = 1000;
constexpr int kUnboundedThreadpoolAutotuningFactor = 10;
}
class ParallelMapDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, const DatasetBase* input,
int64_t num_parallel_calls, const DataTypeVector& output_types,
const std::vector<PartialTensorShape>& output_shapes,
DeterminismPolicy deterministic,
std::unique_ptr<CapturedFunction> captured_func,
bool preserve_cardinality, bool use_unbounded_threadpool,
int op_version)
: Dataset(DatasetContext(ctx), input, num_parallel_calls, output_types,
output_shapes, deterministic, std::move(captured_func),
preserve_cardinality, use_unbounded_threadpool, op_version) {}
Dataset(DatasetContext dataset_context, const DatasetBase* input,
int64_t num_parallel_calls, const DataTypeVector& output_types,
const std::vector<PartialTensorShape>& output_shapes,
DeterminismPolicy deterministic,
std::unique_ptr<CapturedFunction> captured_func,
bool preserve_cardinality, bool use_unbounded_threadpool,
int op_version)
: DatasetBase(std::move(dataset_context)),
input_(input),
num_parallel_calls_(num_parallel_calls),
output_types_(output_types),
output_shapes_(output_shapes),
deterministic_(deterministic),
preserve_cardinality_(preserve_cardinality),
use_unbounded_threadpool_(use_unbounded_threadpool),
captured_func_(std::move(captured_func)),
op_version_(op_version) {
input_->Ref();
random_indexing_compatible_ = absl::OkStatus();
if (input_ != nullptr) {
random_indexing_compatible_ = input_->RandomIndexingCompatible();
}
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
name_utils::IteratorPrefixParams params;
params.op_version = op_version_;
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix, params)});
}
const DataTypeVector& output_dtypes() const override { return output_types_; }
const std::vector<PartialTensorShape>& output_shapes() const override {
return output_shapes_;
}
string DebugString() const override {
name_utils::DatasetDebugStringParams params;
params.op_version = op_version_;
return name_utils::DatasetDebugString(ParallelMapDatasetOp::kDatasetType,
params);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
if (preserve_cardinality_) {
return input_->Cardinality(options);
} else {
return kUnknownCardinality;
}
}
Status Get(OpKernelContext* ctx, int64 index,
std::vector<Tensor>* out_tensors) const override {
TF_RETURN_IF_ERROR(CheckRandomAccessCompatible(index));
absl::call_once(instantiated_captured_func_once_, [this, ctx] {
instantiated_captured_func_status_ = captured_func_->Instantiate(
InstantiateCapturedFunctionParams(ctx), &instantiated_captured_func_);
});
TF_RETURN_IF_ERROR(instantiated_captured_func_status_);
std::vector<Tensor> args;
TF_RETURN_IF_ERROR(input_->Get(ctx, index, &args));
return instantiated_captured_func_->RunInstantiated(args, out_tensors);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
TF_RETURN_IF_ERROR(captured_func_->CheckExternalState());
return input_->CheckExternalState();
}
absl::Status RandomIndexingCompatible() const override {
return random_indexing_compatible_;
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
std::vector<Node*> other_arguments;
DataTypeVector other_arguments_types;
TF_RETURN_IF_ERROR(captured_func_->AddToGraph(ctx, b, &other_arguments,
&other_arguments_types));
Node* num_parallel_calls = nullptr;
if (op_version_ == 1) {
TF_RETURN_IF_ERROR(b->AddScalar(static_cast<int32>(num_parallel_calls_),
&num_parallel_calls));
} else {
TF_RETURN_IF_ERROR(
b->AddScalar(num_parallel_calls_, &num_parallel_calls));
}
std::vector<std::pair<StringPiece, AttrValue>> attrs;
AttrValue f_attr;
b->BuildAttrValue(captured_func_->func(), &f_attr);
attrs.emplace_back(kFunc, f_attr);
AttrValue other_arguments_types_attr;
b->BuildAttrValue(other_arguments_types, &other_arguments_types_attr);
attrs.emplace_back(kTarguments, other_arguments_types_attr);
AttrValue use_inter_op_parallelism_attr;
b->BuildAttrValue(captured_func_->use_inter_op_parallelism(),
&use_inter_op_parallelism_attr);
attrs.emplace_back(kUseInterOpParallelism, use_inter_op_parallelism_attr);
if (op_version_ == 1) {
AttrValue sloppy_attr;
b->BuildAttrValue(deterministic_.IsNondeterministic(), &sloppy_attr);
attrs.emplace_back(kSloppy, sloppy_attr);
}
if (op_version_ == 2) {
AttrValue deterministic_attr;
b->BuildAttrValue(deterministic_.String(), &deterministic_attr);
attrs.emplace_back(kDeterministic, deterministic_attr);
}
AttrValue preserve_cardinality_attr;
b->BuildAttrValue(preserve_cardinality_, &preserve_cardinality_attr);
attrs.emplace_back(kPreserveCardinality, preserve_cardinality_attr);
AttrValue use_unbounded_threadpool_attr;
b->BuildAttrValue(use_unbounded_threadpool_,
&use_unbounded_threadpool_attr);
attrs.emplace_back(kUseUnboundedThreadpool, use_unbounded_threadpool_attr);
TF_RETURN_IF_ERROR(b->AddDataset(
this,
{std::make_pair(0, input_graph_node),
std::make_pair(2, num_parallel_calls)},
{std::make_pair(1, other_arguments)},
attrs, output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params),
mu_(std::make_shared<mutex>()),
cond_var_(std::make_shared<condition_variable>()),
num_parallel_calls_(std::make_shared<model::SharedState>(
params.dataset->num_parallel_calls_, mu_, cond_var_)),
deterministic_(params.dataset->deterministic_.IsDeterministic() ||
params.dataset->deterministic_.IsDefault()),
preserve_cardinality_(params.dataset->preserve_cardinality_),
use_unbounded_threadpool_(params.dataset->use_unbounded_threadpool_),
autotune_(params.dataset->num_parallel_calls_ == model::kAutotune) {}
~Iterator() override {
CancelThreads(true);
input_impl_.reset();
if (deregister_fn_) deregister_fn_();
}
bool SymbolicCheckpointCompatible() const override {
return deterministic_;
}
Status Initialize(IteratorContext* ctx) override {
mutex_lock l(*mu_);
interleave_depth_ = ctx->interleave_depth();
if (use_unbounded_threadpool_) {
unbounded_thread_pool_ = std::make_unique<UnboundedThreadPool>(
ctx->env(), "tf_data_map_unbounded_thread_pool");
}
if (num_parallel_calls_->value == model::kAutotune) {
num_parallel_calls_->value = GetAutotuneDefaultParallelism(ctx);
}
cancellation_manager_ = std::make_unique<CancellationManager>();
TF_RETURN_IF_ERROR(RegisterCancellationCallback(
ctx->cancellation_manager(),
[this]() { CancelThreads(false); }, &deregister_fn_));
auto params = std::make_unique<IteratorContext::Params>(ctx);
params->cancellation_manager = cancellation_manager_.get();
auto iter_ctx = std::make_unique<IteratorContext>(*params);
TF_RETURN_IF_ERROR(dataset()->input_->MakeIterator(
iter_ctx.get(), this, prefix(), &input_impl_));
ctx->MergeCheckpoint(iter_ctx->checkpoint());
TF_RETURN_IF_ERROR(dataset()->captured_func_->Instantiate(
ctx, &instantiated_captured_func_));
if (ctx->warm_start() && !ctx->is_restoring()) {
EnsureThreadsStarted(ctx);
}
return absl::OkStatus();
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
std::shared_ptr<InvocationResult> result;
{
mutex_lock l(*mu_);
EnsureThreadsStarted(ctx);
while (ShouldWait(&result)) {
RecordStop(ctx);
cond_var_->wait(l);
RecordStart(ctx);
}
if (cancelled_) {
return errors::Cancelled("Iterator was cancelled");
}
}
RecordStop(ctx);
result->notification.WaitForNotification();
RecordStart(ctx);
tsl::profiler::TraceMe traceme([&] {
return tsl::profiler::TraceMeEncode("ParallelMapConsume",
{{"element_id", result->uid}});
});
return ProcessResult(ctx, result, out_tensors, end_of_sequence);
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
std::shared_ptr<model::Parameter> parameter;
double max_parallelism_value = ctx->runner_threadpool_size();
if (use_unbounded_threadpool_) {
max_parallelism_value *= kUnboundedThreadpoolAutotuningFactor;
}
if (num_parallel_calls_ &&
dataset()->num_parallel_calls_ == model::kAutotune) {
parameter = model::MakeParameter(
"parallelism", num_parallel_calls_, 1,
max_parallelism_value,
GetAutotuneDefaultParallelism(ctx));
} else {
parameter =
model::MakeParameter("parallelism", num_parallel_calls_, 1,
max_parallelism_value);
}
std::optional<int64_t> estimated_element_size =
dataset()->GetEstimatedElementSize();
if (!estimated_element_size) {
VLOG(2) << absl::StrFormat(
"Cannot estimate the size of the output tensor because the "
"output shape of node %s(id:%d) is only partially known.",
args.name, args.id);
}
return model::MakeAsyncKnownRatioNode(
std::move(args),
1, {std::move(parameter)},
false, estimated_element_size);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
TF_RETURN_IF_ERROR(ctx->HandleCheckExternalStateStatus(
dataset()->captured_func_->CheckExternalState()));
if (ctx->symbolic_checkpoint()) {
return absl::OkStatus();
}
mutex_lock l(*mu_);
while (num_calls_ > 0) {
cond_var_->wait(l);
}
if (num_calls_ != 0) {
return errors::FailedPrecondition(
"Unexpected outstanding calls encountered.");
}
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), absl::StrCat(kInvocationResults, "_", kSize),
invocation_results_.size()));
for (size_t i = 0; i < invocation_results_.size(); i++) {
const auto& result = *(invocation_results_[i]);
std::string element_prefix =
absl::StrCat(prefix(), "_", kInvocationResults, "[", i, "]");
TF_RETURN_IF_ERROR(
WriteStatusLocked(writer, element_prefix, result.status));
TF_RETURN_IF_ERROR(writer->WriteScalar(element_prefix, kSize,
result.return_values.size()));
for (size_t j = 0; j < result.return_values.size(); j++) {
TF_RETURN_IF_ERROR(writer->WriteTensor(element_prefix,
absl::StrCat("[", j, "]"),
result.return_values[j]));
}
TF_RETURN_IF_ERROR(
writer->WriteScalar(element_prefix, kEndOfInput,
static_cast<int64_t>(result.end_of_input)));
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(*mu_);
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
DCHECK(invocation_results_.empty());
if (ctx->symbolic_checkpoint()) {
return absl::OkStatus();
}
int64_t invocation_results_size;
TF_RETURN_IF_ERROR(reader->ReadScalar(
prefix(), absl::StrCat(kInvocationResults, "_", kSize),
&invocation_results_size));
for (size_t i = 0; i < invocation_results_size; i++) {
invocation_results_.push_back(std::make_shared<InvocationResult>(ctx));
auto& result = *invocation_results_.back();
std::string element_prefix =
absl::StrCat(prefix(), "_", kInvocationResults, "[", i, "]");
TF_RETURN_IF_ERROR(
ReadStatusLocked(reader, element_prefix, &result.status));
size_t num_return_values;
{
int64_t size;
TF_RETURN_IF_ERROR(reader->ReadScalar(element_prefix, kSize, &size));
num_return_values = static_cast<size_t>(size);
if (num_return_values != size) {
return errors::InvalidArgument(
element_prefix, ",", kSize, ": ", size,
" is not a valid value of type size_t.");
}
}
result.return_values.reserve(num_return_values);
for (size_t j = 0; j < num_return_values; j++) {
result.return_values.emplace_back();
TF_RETURN_IF_ERROR(reader->ReadTensor(ctx->flr(), element_prefix,
absl::StrCat("[", j, "]"),
&result.return_values.back()));
}
int64_t end_of_input;
TF_RETURN_IF_ERROR(
reader->ReadScalar(element_prefix, kEndOfInput, &end_of_input));
result.end_of_input = static_cast<bool>(end_of_input);
RecordBufferEnqueue(ctx, result.return_values);
result.notification.Notify();
}
return absl::OkStatus();
}
TraceMeMetadata GetTraceMeMetadata() const override {
int64_t parallelism = -1;
if (mu_->try_lock()) {
parallelism = num_parallel_calls_->value;
mu_->unlock();
}
data::TraceMeMetadata result;
result.push_back(
std::make_pair("autotune", autotune_ ? "true" : "false"));
result.push_back(
std::make_pair("deterministic", deterministic_ ? "true" : "false"));
result.push_back(
std::make_pair("use_unbounded_threadpool",
use_unbounded_threadpool_ ? "true" : "false"));
result.push_back(std::make_pair(
"parallelism",
parallelism == -1
? kTraceInfoUnavailable
: strings::Printf("%lld", static_cast<long long>(parallelism))));
result.push_back(std::make_pair(
"interleave_depth",
strings::Printf("%lld", static_cast<long long>(interleave_depth_))));
return result;
}
private:
struct InvocationResult {
explicit InvocationResult(IteratorContext* ctx)
: uid(tensorflow::EnvTime::NowNanos()),
checkpoint(MemoryCheckpoint{ctx->id_registry()}) {}
Notification notification;
Status status;
std::vector<Tensor> return_values;
bool end_of_input = false;
const int64_t uid;
MemoryCheckpoint checkpoint;
};
void CancelThreads(bool wait) TF_LOCKS_EXCLUDED(mu_) {
cancellation_manager_->StartCancel();
mutex_lock l(*mu_);
cancelled_ = true;
cond_var_->notify_all();
while (wait && num_calls_ > 0) {
cond_var_->wait(l);
}
}
void EnsureThreadsStarted(IteratorContext* ctx)
TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {
if (!runner_thread_) {
auto ctx_copy = std::make_shared<IteratorContext>(*ctx);
runner_thread_ = ctx->StartThread(
"tf_data_parallel_map",
std::bind(&Iterator::RunnerThread, this, ctx_copy));
if (ctx->stats_aggregator()) {
stats_thread_ = ctx->StartThread(
"tf_data_parallel_map_stats",
std::bind(&Iterator::StatsThread, this, ctx_copy));
}
}
}
void CallCompleted(const std::shared_ptr<IteratorContext>& ctx,
const std::shared_ptr<InvocationResult>& result)
TF_LOCKS_EXCLUDED(*mu_) {
mutex_lock l(*mu_);
num_calls_--;
result->notification.Notify();
cond_var_->notify_all();
}
void CallFunction(const std::shared_ptr<IteratorContext>& ctx,
const std::shared_ptr<InvocationResult>& result)
TF_LOCKS_EXCLUDED(*mu_) {
tsl::profiler::TraceMe traceme([&] {
return tsl::profiler::TraceMeEncode("ParallelMapProduce",
{{"element_id", result->uid}});
});
std::vector<Tensor> input_element;
result->status = input_impl_->GetNext(ctx.get(), &input_element,
&result->end_of_input);
result->checkpoint.Merge(ctx->checkpoint());
if (result->end_of_input || !result->status.ok()) {
CallCompleted(ctx, result);
return;
}
auto done = [this, ctx, result](Status status) {
if (!status.ok()) {
result->status = AddErrorContext(status);
}
RecordBufferEnqueue(ctx.get(), result->return_values);
CallCompleted(ctx, result);
};
if (use_unbounded_threadpool_) {
auto runner_fn = [this](std::function<void()> fn) {
this->unbounded_thread_pool_->Schedule(fn);
};
instantiated_captured_func_->RunAsync(
runner_fn, ctx->cancellation_manager(), ctx->collective_executor(),
std::move(input_element), &result->return_values, done,
model_node());
} else if (dataset()->captured_func_->use_inter_op_parallelism()) {
instantiated_captured_func_->RunAsync(
ctx.get(), std::move(input_element), &result->return_values,
std::move(done), model_node());
} else {
auto fn = std::bind(
[this, ctx, result](std::vector<Tensor> input_element) {
return instantiated_captured_func_->Run(
ctx.get(), std::move(input_element), &result->return_values,
model_node());
},
std::move(input_element));
(*ctx->runner())(
[this, ctx, fn = std::move(fn), done = std::move(done)]() {
Status s;
if (IsRecording(ctx.get())) {
s = fn();
} else {
RecordStart(ctx.get());
s = fn();
RecordStop(ctx.get());
}
done(s);
});
}
}
Status ProcessResult(IteratorContext* ctx,
const std::shared_ptr<InvocationResult>& result,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) TF_LOCKS_EXCLUDED(*mu_) {
ctx->MergeCheckpoint(&result->checkpoint);
if (!result->end_of_input && result->status.ok()) {
*out_tensors = std::move(result->return_values);
RecordBufferDequeue(ctx, *out_tensors);
*end_of_sequence = false;
return absl::OkStatus();
}
if (errors::IsOutOfRange(result->status)) {
if (preserve_cardinality_) {
return errors::InvalidArgument(
"Function invocation produced OutOfRangeError: ",
result->status.message());
} else {
*end_of_sequence = true;
return absl::OkStatus();
}
}
*end_of_sequence = result->end_of_input;
return result->status;
}
void RunnerThread(const std::shared_ptr<IteratorContext>& ctx)
TF_LOCKS_EXCLUDED(*mu_) {
RecordStart(ctx.get());
auto cleanup = gtl::MakeCleanup([this, ctx] { RecordStop(ctx.get()); });
std::vector<std::shared_ptr<InvocationResult>> new_calls;
{
tf_shared_lock l(*mu_);
new_calls.reserve(num_parallel_calls_->value);
}
auto busy = [this]() TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) -> bool {
int64_t num_parallel_calls = num_parallel_calls_->value;
return num_calls_ >= num_parallel_calls ||
invocation_results_.size() >= num_parallel_calls;
};
while (true) {
{
mutex_lock l(*mu_);
while (!cancelled_ && busy()) {
RecordStop(ctx.get());
cond_var_->wait(l);
RecordStart(ctx.get());
}
if (cancelled_) {
return;
}
while (!busy()) {
invocation_results_.push_back(
std::make_shared<InvocationResult>(ctx.get()));
new_calls.push_back(invocation_results_.back());
num_calls_++;
}
cond_var_->notify_all();
}
for (const auto& call : new_calls) {
CallFunction(ctx, call);
}
new_calls.clear();
}
}
bool ShouldWait(std::shared_ptr<InvocationResult>* result)
TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {
if (cancelled_) {
return false;
}
if (!deterministic_) {
for (auto it = invocation_results_.begin();
it != invocation_results_.end(); ++it) {
if ((*it)->notification.HasBeenNotified() &&
(it == invocation_results_.begin() || !(*it)->end_of_input)) {
std::swap(*result, *it);
invocation_results_.erase(it);
cond_var_->notify_all();
return false;
}
}
} else if (!invocation_results_.empty()) {
std::swap(*result, invocation_results_.front());
invocation_results_.pop_front();
cond_var_->notify_all();
return false;
}
return true;
}
void StatsThread(const std::shared_ptr<IteratorContext>& ctx)
TF_LOCKS_EXCLUDED(*mu_) {
for (int64_t step = 0;; ++step) {
int num_calls;
int num_parallel_calls;
{
mutex_lock l(*mu_);
if (step != 0 && !cancelled_) {
cond_var_->wait_for(
l, std::chrono::milliseconds(kStatsReportingPeriodMillis));
}
if (cancelled_) {
return;
}
num_calls = num_calls_;
num_parallel_calls = num_parallel_calls_->value;
}
if (num_parallel_calls == 0) {
num_parallel_calls = 1;
}
ctx->stats_aggregator()->AddScalar(
stats_utils::ThreadUtilizationScalarName(dataset()->node_name()),
static_cast<float>(num_calls) /
static_cast<float>(num_parallel_calls),
step);
}
}
Status WriteStatusLocked(IteratorStateWriter* writer,
const std::string& prefix, const Status& status)
TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix, absl::StrCat("_", kErrorCode),
static_cast<int64_t>(status.code())));
if (!status.ok()) {
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix,
absl::StrCat("_", kErrorMessage),
std::string(status.message())));
}
return absl::OkStatus();
}
Status ReadStatusLocked(IteratorStateReader* reader,
const std::string& prefix, Status* status)
TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {
int64_t code_int;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix, absl::StrCat("_", kErrorCode), &code_int));
absl::StatusCode code = static_cast<absl::StatusCode>(code_int);
if (code != absl::StatusCode::kOk) {
tstring error_message;
TF_RETURN_IF_ERROR(reader->ReadScalar(
prefix, absl::StrCat("_", kErrorMessage), &error_message));
*status = Status(code, error_message);
} else {
*status = absl::OkStatus();
}
return absl::OkStatus();
}
const std::shared_ptr<mutex> mu_;
const std::shared_ptr<condition_variable> cond_var_;
const std::shared_ptr<model::SharedState> num_parallel_calls_;
const bool deterministic_;
const bool preserve_cardinality_;
const bool use_unbounded_threadpool_;
const bool autotune_;
int64_t num_calls_ TF_GUARDED_BY(*mu_) = 0;
std::unique_ptr<CancellationManager> cancellation_manager_;
std::unique_ptr<InstantiatedCapturedFunction> instantiated_captured_func_;
std::unique_ptr<IteratorBase> input_impl_;
std::deque<std::shared_ptr<InvocationResult>> invocation_results_
TF_GUARDED_BY(*mu_);
bool cancelled_ TF_GUARDED_BY(*mu_) = false;
std::unique_ptr<Thread> runner_thread_ TF_GUARDED_BY(*mu_);
std::unique_ptr<Thread> stats_thread_ TF_GUARDED_BY(*mu_);
std::unique_ptr<UnboundedThreadPool> unbounded_thread_pool_;
std::function<void()> deregister_fn_;
int64 interleave_depth_ = -1;
};
const DatasetBase* const input_;
const int64_t num_parallel_calls_;
const DataTypeVector output_types_;
const std::vector<PartialTensorShape> output_shapes_;
const DeterminismPolicy deterministic_;
const bool preserve_cardinality_;
const bool use_unbounded_threadpool_;
const std::unique_ptr<CapturedFunction> captured_func_;
const int op_version_;
mutable absl::once_flag instantiated_captured_func_once_;
mutable absl::Status instantiated_captured_func_status_;
mutable std::unique_ptr<InstantiatedCapturedFunction>
instantiated_captured_func_;
absl::Status random_indexing_compatible_;
};
ParallelMapDatasetOp::ParallelMapDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx), op_version_(ctx->HasAttr(kSloppy) ? 1 : 2) {
FunctionMetadata::Params params;
OP_REQUIRES_OK(ctx, ctx->GetAttr(kUseInterOpParallelism,
¶ms.use_inter_op_parallelism));
OP_REQUIRES_OK(ctx,
FunctionMetadata::Create(ctx, kFunc, params, &func_metadata_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputTypes, &output_types_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputShapes, &output_shapes_));
if (op_version_ == 1) {
bool sloppy;
OP_REQUIRES_OK(ctx, ctx->GetAttr(kSloppy, &sloppy));
if (sloppy) {
deterministic_ =
DeterminismPolicy(DeterminismPolicy::Type::kNondeterministic);
} else {
deterministic_ = DeterminismPolicy(DeterminismPolicy::Type::kDefault);
}
use_unbounded_threadpool_ = false;
}
if (op_version_ == 2) {
std::string deterministic;
OP_REQUIRES_OK(ctx, ctx->GetAttr(kDeterministic, &deterministic));
OP_REQUIRES_OK(
ctx, DeterminismPolicy::FromString(deterministic, &deterministic_));
OP_REQUIRES_OK(
ctx, ctx->GetAttr(kUseUnboundedThreadpool, &use_unbounded_threadpool_));
}
OP_REQUIRES_OK(ctx,
ctx->GetAttr(kPreserveCardinality, &preserve_cardinality_));
}
void ParallelMapDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
int64_t num_parallel_calls;
if (op_version_ == 1) {
int32_t parallel_calls;
OP_REQUIRES_OK(
ctx, ParseScalarArgument(ctx, kNumParallelCalls, ¶llel_calls));
num_parallel_calls = parallel_calls;
}
if (op_version_ == 2) {
OP_REQUIRES_OK(
ctx, ParseScalarArgument(ctx, kNumParallelCalls, &num_parallel_calls));
}
OP_REQUIRES(
ctx, num_parallel_calls > 0 || num_parallel_calls == model::kAutotune,
errors::InvalidArgument("num_parallel_calls must be greater than zero."));
std::unique_ptr<CapturedFunction> captured_func;
OP_REQUIRES_OK(ctx,
CapturedFunction::Create(ctx, func_metadata_, kOtherArguments,
&captured_func));
if (num_parallel_calls == model::kAutotune) {
metrics::RecordTFDataAutotune(kDatasetType);
}
*output = new Dataset(ctx, input, num_parallel_calls, output_types_,
output_shapes_, deterministic_,
std::move(captured_func), preserve_cardinality_,
use_unbounded_threadpool_, op_version_);
}
std::unique_ptr<DatasetBase> MakeDataServiceUncompressDataset(
DatasetBase* input, std::unique_ptr<CapturedFunction> captured_function,
const DataTypeVector& output_types,
const std::vector<PartialTensorShape>& output_shapes) {
DatasetContext::Params param;
param.type_string = kParallelMapDatasetV2;
param.node_name = kParallelMapDatasetV2;
return std::make_unique<ParallelMapDatasetOp::Dataset>(
DatasetContext(std::move(param)), input,
model::kAutotune, output_types, output_shapes,
DeterminismPolicy(DeterminismPolicy::Type::kDefault),
std::move(captured_function),
true,
false, 2);
}
namespace {
REGISTER_KERNEL_BUILDER(Name(kParallelMapDatasetV1).Device(DEVICE_CPU),
ParallelMapDatasetOp);
REGISTER_KERNEL_BUILDER(Name(kParallelMapDatasetV2).Device(DEVICE_CPU),
ParallelMapDatasetOp);
REGISTER_INPUT_COLOCATION_EXEMPTION(kParallelMapDatasetV1);
REGISTER_INPUT_COLOCATION_EXEMPTION(kParallelMapDatasetV2);
}
}
} | #include "tensorflow/core/kernels/data/parallel_map_dataset_op.h"
#include <gtest/gtest.h>
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/framework/tensor_shape.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "parallel_map_dataset";
constexpr int kOpVersion = 2;
class ParallelMapDatasetParams : public DatasetParams {
public:
template <typename T>
ParallelMapDatasetParams(
T input_dataset_params, std::vector<Tensor> other_arguments,
int num_parallel_calls, FunctionDefHelper::AttrValueWrapper func,
std::vector<FunctionDef> func_lib, DataTypeVector type_arguments,
const DataTypeVector& output_dtypes,
const std::vector<PartialTensorShape>& output_shapes,
bool use_inter_op_parallelism, const std::string& deterministic,
bool preserve_cardinality, string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
other_arguments_(std::move(other_arguments)),
num_parallel_calls_(num_parallel_calls),
func_(std::move(func)),
func_lib_(std::move(func_lib)),
type_arguments_(std::move(type_arguments)),
use_inter_op_parallelism_(use_inter_op_parallelism),
deterministic_(deterministic),
preserve_cardinality_(preserve_cardinality) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
op_version_ = kOpVersion;
name_utils::IteratorPrefixParams params;
params.op_version = op_version_;
iterator_prefix_ = name_utils::IteratorPrefix(
input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix(), params);
}
std::vector<Tensor> GetInputTensors() const override {
auto input_tensors = other_arguments_;
input_tensors.emplace_back(
CreateTensor<int64_t>(TensorShape({}), {num_parallel_calls_}));
return input_tensors;
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->emplace_back(ParallelMapDatasetOp::kInputDataset);
for (int i = 0; i < other_arguments_.size(); ++i) {
input_names->emplace_back(
absl::StrCat(ParallelMapDatasetOp::kOtherArguments, "_", i));
}
input_names->emplace_back(ParallelMapDatasetOp::kNumParallelCalls);
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
*attr_vector = {{"f", func_},
{"Targuments", type_arguments_},
{"output_shapes", output_shapes_},
{"output_types", output_dtypes_},
{"use_inter_op_parallelism", use_inter_op_parallelism_},
{"deterministic", deterministic_},
{"preserve_cardinality", preserve_cardinality_},
{"metadata", ""}};
return absl::OkStatus();
}
string dataset_type() const override {
return ParallelMapDatasetOp::kDatasetType;
}
std::vector<FunctionDef> func_lib() const override { return func_lib_; }
private:
std::vector<Tensor> other_arguments_;
int num_parallel_calls_;
FunctionDefHelper::AttrValueWrapper func_;
std::vector<FunctionDef> func_lib_;
DataTypeVector type_arguments_;
bool use_inter_op_parallelism_;
std::string deterministic_;
bool preserve_cardinality_;
};
class ParallelMapDatasetOpTest : public DatasetOpsTestBase {};
FunctionDefHelper::AttrValueWrapper MapFunc(const string& func_name,
const DataType& dtype) {
return FunctionDefHelper::FunctionRef(func_name, {{"T", dtype}});
}
ParallelMapDatasetParams ParallelMapDatasetParams1() {
return ParallelMapDatasetParams(
RangeDatasetParams(0, 10, 3),
{},
1,
MapFunc("XTimesTwo", DT_INT64),
{test::function::XTimesTwo()},
{},
{DT_INT64},
{PartialTensorShape({})},
false,
DeterminismPolicy::kDeterministic,
false,
kNodeName);
}
ParallelMapDatasetParams ParallelMapDatasetParams2() {
return ParallelMapDatasetParams(
RangeDatasetParams(0, 10, 3),
{},
2,
MapFunc("XTimesTwo", DT_INT64),
{test::function::XTimesTwo()},
{},
{DT_INT64},
{PartialTensorShape({})},
true,
DeterminismPolicy::kNondeterministic,
true,
kNodeName);
}
ParallelMapDatasetParams ParallelMapDatasetParams3() {
return ParallelMapDatasetParams(
RangeDatasetParams(0, 10, 3),
{},
3,
MapFunc("XTimesFour", DT_INT64),
{test::function::XTimesTwo(), test::function::XTimesFour()},
{},
{DT_INT64},
{PartialTensorShape({})},
true,
DeterminismPolicy::kDeterministic,
false,
kNodeName);
}
ParallelMapDatasetParams ParallelMapDatasetParams4() {
return ParallelMapDatasetParams(
RangeDatasetParams(0, 10, 3),
{},
4,
MapFunc("XTimesTwo", DT_INT64),
{test::function::XTimesTwo()},
{},
{DT_INT64},
{PartialTensorShape({})},
false,
DeterminismPolicy::kDeterministic,
false,
kNodeName);
}
ParallelMapDatasetParams ParallelMapDatasetParams5() {
return ParallelMapDatasetParams(
RangeDatasetParams(0, 10, 3),
{},
model::kAutotune,
MapFunc("XTimesFour", DT_INT64),
{test::function::XTimesTwo(), test::function::XTimesFour()},
{},
{DT_INT64},
{PartialTensorShape({})},
true,
DeterminismPolicy::kNondeterministic,
true,
kNodeName);
}
ParallelMapDatasetParams ParallelMapDatasetParams6() {
return ParallelMapDatasetParams(
RangeDatasetParams(0, 10, 3),
{},
4,
MapFunc("XTimesFour", DT_INT64),
{test::function::XTimesTwo(), test::function::XTimesFour()},
{},
{DT_INT64},
{PartialTensorShape({})},
true,
DeterminismPolicy::kDeterministic,
false,
kNodeName);
}
ParallelMapDatasetParams ParallelMapDatasetParams7() {
return ParallelMapDatasetParams(
RangeDatasetParams(0, 10, 3),
{},
2,
MapFunc("XTimesFour", DT_INT64),
{test::function::XTimesTwo(), test::function::XTimesFour()},
{},
{DT_INT64},
{PartialTensorShape({})},
false,
DeterminismPolicy::kDeterministic,
false,
kNodeName);
}
ParallelMapDatasetParams ParallelMapDatasetParams8() {
return ParallelMapDatasetParams(
RangeDatasetParams(0, 10, 3),
{},
model::kAutotune,
MapFunc("XTimesFour", DT_INT64),
{test::function::XTimesTwo(), test::function::XTimesFour()},
{},
{DT_INT64},
{PartialTensorShape({})},
false,
DeterminismPolicy::kNondeterministic,
true,
kNodeName);
}
ParallelMapDatasetParams ParallelMapDatasetParams9() {
return ParallelMapDatasetParams(
BatchDatasetParams(RangeDatasetParams(0, 4, 1),
3,
false,
false,
{DT_INT64},
{PartialTensorShape({-1})},
"batch_dataset"),
{},
1,
MapFunc("XTimesTwo", DT_INT64),
{test::function::XTimesTwo()},
{},
{DT_INT64},
{PartialTensorShape({-1})},
false,
DeterminismPolicy::kDeterministic,
false,
kNodeName);
}
ParallelMapDatasetParams ParallelMapDatasetParamsWithInvalidNumParallelCalls() {
return ParallelMapDatasetParams(
RangeDatasetParams(0, 10, 3),
{},
-4,
MapFunc("XTimesTwo", DT_INT64),
{test::function::XTimesTwo()},
{},
{DT_INT64},
{PartialTensorShape({})},
true,
DeterminismPolicy::kNondeterministic,
true,
kNodeName);
}
std::vector<GetNextTestCase<ParallelMapDatasetParams>> GetNextTestCases() {
return {{ParallelMapDatasetParams1(),
CreateTensors<int64_t>(TensorShape{}, {{0}, {6}, {12}, {18}}),
true},
{ParallelMapDatasetParams2(),
CreateTensors<int64_t>(TensorShape{}, {{0}, {6}, {12}, {18}}),
false},
{ParallelMapDatasetParams3(),
CreateTensors<int64_t>(TensorShape{}, {{0}, {12}, {24}, {36}}),
true},
{ParallelMapDatasetParams4(),
CreateTensors<int64_t>(TensorShape{}, {{0}, {6}, {12}, {18}}),
true},
{ParallelMapDatasetParams5(),
CreateTensors<int64_t>(TensorShape{}, {{0}, {12}, {24}, {36}}),
false},
{
ParallelMapDatasetParams6(),
CreateTensors<int64_t>(TensorShape{}, {{0}, {12}, {24}, {36}}),
true},
{
ParallelMapDatasetParams9(),
{CreateTensor<int64_t>(TensorShape{3}, {0, 2, 4}),
CreateTensor<int64_t>(TensorShape{1}, {6})},
true}};
}
ITERATOR_GET_NEXT_TEST_P(ParallelMapDatasetOpTest, ParallelMapDatasetParams,
GetNextTestCases())
TEST_F(ParallelMapDatasetOpTest, DatasetNodeName) {
auto dataset_params = ParallelMapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(ParallelMapDatasetOpTest, DatasetTypeString) {
auto dataset_params = ParallelMapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
name_utils::OpNameParams params;
params.op_version = dataset_params.op_version();
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(ParallelMapDatasetOp::kDatasetType, params)));
}
TEST_F(ParallelMapDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = ParallelMapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_INT64}));
}
TEST_F(ParallelMapDatasetOpTest, DatasetOutputShapes) {
auto dataset_params = ParallelMapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputShapes({PartialTensorShape({})}));
}
TEST_F(ParallelMapDatasetOpTest, DatasetElementSizeHasValue) {
auto dataset_params = ParallelMapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
auto element_size = dataset_->GetEstimatedElementSize();
ASSERT_TRUE(element_size.has_value());
EXPECT_GT(element_size.value(), 0);
}
TEST_F(ParallelMapDatasetOpTest, DatasetElementSizeNoValue) {
auto dataset_params = ParallelMapDatasetParams9();
TF_ASSERT_OK(Initialize(dataset_params));
EXPECT_FALSE(dataset_->GetEstimatedElementSize().has_value());
}
std::vector<CardinalityTestCase<ParallelMapDatasetParams>>
CardinalityTestCases() {
return {{ParallelMapDatasetParams1(),
kUnknownCardinality},
{ParallelMapDatasetParams2(),
4},
{ParallelMapDatasetParams3(),
kUnknownCardinality},
{ParallelMapDatasetParams4(),
kUnknownCardinality},
{ParallelMapDatasetParams5(),
4},
{ParallelMapDatasetParams6(),
kUnknownCardinality}};
}
DATASET_CARDINALITY_TEST_P(ParallelMapDatasetOpTest, ParallelMapDatasetParams,
CardinalityTestCases())
TEST_F(ParallelMapDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = ParallelMapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_INT64}));
}
TEST_F(ParallelMapDatasetOpTest, IteratorOutputShapes) {
auto dataset_params = ParallelMapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputShapes({PartialTensorShape({})}));
}
TEST_F(ParallelMapDatasetOpTest, IteratorPrefix) {
auto dataset_params = ParallelMapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
name_utils::IteratorPrefixParams params;
params.op_version = dataset_params.op_version();
TF_ASSERT_OK(CheckIteratorPrefix(
name_utils::IteratorPrefix(ParallelMapDatasetOp::kDatasetType,
dataset_params.iterator_prefix(), params)));
}
std::vector<IteratorSaveAndRestoreTestCase<ParallelMapDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{ParallelMapDatasetParams1(),
{0, 1, 5},
CreateTensors<int64_t>(TensorShape{}, {{0}, {6}, {12}, {18}}),
true},
{ParallelMapDatasetParams2(),
{0, 1, 5},
CreateTensors<int64_t>(TensorShape{}, {{0}, {6}, {12}, {18}}),
false},
{ParallelMapDatasetParams3(),
{0, 1, 5},
CreateTensors<int64_t>(TensorShape{}, {{0}, {12}, {24}, {36}}),
true},
{ParallelMapDatasetParams4(),
{0, 1, 5},
CreateTensors<int64_t>(TensorShape{}, {{0}, {6}, {12}, {18}}),
true},
{ParallelMapDatasetParams5(),
{0, 1, 5},
CreateTensors<int64_t>(TensorShape{}, {{0}, {12}, {24}, {36}}),
false},
{
ParallelMapDatasetParams6(),
{0, 1, 5},
CreateTensors<int64_t>(TensorShape{}, {{0}, {12}, {24}, {36}}),
true}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(ParallelMapDatasetOpTest,
ParallelMapDatasetParams,
IteratorSaveAndRestoreTestCases())
TEST_F(ParallelMapDatasetOpTest, InvalidNumParallelCalls) {
auto dataset_params = ParallelMapDatasetParamsWithInvalidNumParallelCalls();
EXPECT_EQ(Initialize(dataset_params).code(),
absl::StatusCode::kInvalidArgument);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/parallel_map_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/parallel_map_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1da53164-e7c9-403c-8625-eaec6fdf5874 | cpp | tensorflow/tensorflow | wav_to_spectrogram | tensorflow/examples/wav_to_spectrogram/wav_to_spectrogram.cc | tensorflow/examples/wav_to_spectrogram/wav_to_spectrogram_test.cc | #include "tensorflow/examples/wav_to_spectrogram/wav_to_spectrogram.h"
#include <vector>
#include "tensorflow/cc/ops/audio_ops.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/image_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/graph/default_device.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/public/session.h"
using tensorflow::DT_FLOAT;
using tensorflow::DT_UINT8;
using tensorflow::Output;
using tensorflow::TensorShape;
tensorflow::Status WavToSpectrogram(const tensorflow::string& input_wav,
int32_t window_size, int32_t stride,
float brightness,
const tensorflow::string& output_image) {
auto root = tensorflow::Scope::NewRootScope();
using namespace tensorflow::ops;
Output file_reader =
tensorflow::ops::ReadFile(root.WithOpName("input_wav"), input_wav);
DecodeWav wav_decoder =
DecodeWav(root.WithOpName("wav_decoder"), file_reader);
Output spectrogram = AudioSpectrogram(root.WithOpName("spectrogram"),
wav_decoder.audio, window_size, stride);
Output brightness_placeholder =
Placeholder(root.WithOpName("brightness_placeholder"), DT_FLOAT,
Placeholder::Attrs().Shape(TensorShape({})));
Output mul = Mul(root.WithOpName("mul"), spectrogram, brightness_placeholder);
Output min_const = Const(root.WithOpName("min_const"), 255.0f);
Output min = Minimum(root.WithOpName("min"), mul, min_const);
Output cast = Cast(root.WithOpName("cast"), min, DT_UINT8);
Output expand_dims_const = Const(root.WithOpName("expand_dims_const"), -1);
Output expand_dims =
ExpandDims(root.WithOpName("expand_dims"), cast, expand_dims_const);
Output squeeze = Squeeze(root.WithOpName("squeeze"), expand_dims,
Squeeze::Attrs().Axis({0}));
Output png_encoder = EncodePng(root.WithOpName("png_encoder"), squeeze);
tensorflow::ops::WriteFile file_writer = tensorflow::ops::WriteFile(
root.WithOpName("output_image"), output_image, png_encoder);
tensorflow::GraphDef graph;
TF_RETURN_IF_ERROR(root.ToGraphDef(&graph));
std::unique_ptr<tensorflow::Session> session(
tensorflow::NewSession(tensorflow::SessionOptions()));
TF_RETURN_IF_ERROR(session->Create(graph));
tensorflow::Tensor brightness_tensor(DT_FLOAT, TensorShape({}));
brightness_tensor.scalar<float>()() = brightness;
TF_RETURN_IF_ERROR(
session->Run({{"brightness_placeholder", brightness_tensor}}, {},
{"output_image"}, nullptr));
return absl::OkStatus();
} | #include "tensorflow/examples/wav_to_spectrogram/wav_to_spectrogram.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/wav/wav_io.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
TEST(WavToSpectrogramTest, WavToSpectrogramTest) {
const tensorflow::string input_wav =
tensorflow::io::JoinPath(tensorflow::testing::TmpDir(), "input_wav.wav");
const tensorflow::string output_image = tensorflow::io::JoinPath(
tensorflow::testing::TmpDir(), "output_image.png");
float audio[8] = {-1.0f, 0.0f, 1.0f, 0.0f, -1.0f, 0.0f, 1.0f, 0.0f};
tensorflow::string wav_string;
TF_ASSERT_OK(
tensorflow::wav::EncodeAudioAsS16LEWav(audio, 44100, 1, 8, &wav_string));
TF_ASSERT_OK(tensorflow::WriteStringToFile(tensorflow::Env::Default(),
input_wav, wav_string));
TF_ASSERT_OK(WavToSpectrogram(input_wav, 4, 4, 64.0f, output_image));
TF_EXPECT_OK(tensorflow::Env::Default()->FileExists(output_image));
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/examples/wav_to_spectrogram/wav_to_spectrogram.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/examples/wav_to_spectrogram/wav_to_spectrogram_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
52916fe4-bde7-4bfd-a579-9931b007d93f | cpp | tensorflow/tensorflow | math | third_party/xla/xla/hlo/builder/lib/math.cc | third_party/xla/xla/hlo/builder/lib/math_test.cc | #include "xla/hlo/builder/lib/math.h"
#include <algorithm>
#include <array>
#include <cmath>
#include <functional>
#include <limits>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/builder/lib/arithmetic.h"
#include "xla/hlo/builder/lib/constants.h"
#include "xla/hlo/builder/lib/loops.h"
#include "xla/hlo/builder/lib/math_impl.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/primitive_util.h"
#include "xla/shape.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
template <typename FP>
XlaOp EvaluatePolynomial(XlaOp x, absl::Span<const FP> coefficients) {
static_assert(std::is_floating_point<FP>::value,
"Template-argument 'FP' must be a floating-point type");
if (coefficients.empty()) {
return ScalarLike(x, FP(0.0));
}
XlaOp poly = ScalarLike(x, coefficients[0]);
for (int i = 1; i < coefficients.size(); ++i) {
FP c = coefficients[i];
poly = poly * x + ScalarLike(x, c);
}
return poly;
}
template <typename FP>
XlaOp EvaluateChebyshevPolynomial(XlaOp x, absl::Span<const FP> coefficients) {
static_assert(std::is_floating_point<FP>::value,
"Template-argument 'FP' must be a floating-point type");
XlaOp b0 = ScalarLike(x, 0.0);
XlaOp b1 = ScalarLike(x, 0.0);
XlaOp b2 = ScalarLike(x, 0.0);
for (FP c : coefficients) {
b2 = b1;
b1 = b0;
b0 = x * b1 - b2 + ScalarLike(x, c);
}
return ScalarLike(x, 0.5) * (b0 - b2);
}
}
static XlaOp DoWithUpcastToF32(XlaOp operand,
absl::Span<const PrimitiveType> upcast_types,
const std::function<XlaOp(XlaOp)>& operation) {
auto& b = *operand.builder();
return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(auto shape, b.GetShape(operand));
PrimitiveType elem_ty = shape.element_type();
bool needs_upcast =
upcast_types.empty()
? primitive_util::BitWidth(shape.element_type()) <= 16
: absl::c_linear_search(upcast_types, elem_ty);
if (needs_upcast) {
operand = ConvertElementType(operand, F32);
}
XlaOp result = operation(operand);
if (needs_upcast) {
result = ConvertElementType(result, elem_ty);
}
return result;
});
}
static absl::Status EnsureOperandIsRealFp(absl::string_view op_name,
XlaOp operand) {
auto& b = *operand.builder();
TF_ASSIGN_OR_RETURN(auto shape, b.GetShape(operand));
auto elem_ty = shape.element_type();
if (!primitive_util::IsFloatingPointType(elem_ty)) {
return InvalidArgument(
"Operands to %s must be real-valued floating-point, but got %s",
op_name, PrimitiveType_Name(elem_ty));
}
return absl::OkStatus();
}
XlaOp IsPosInf(XlaOp operand) {
auto& b = *operand.builder();
return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_RETURN_IF_ERROR(EnsureOperandIsRealFp("IsPosInf", operand));
TF_ASSIGN_OR_RETURN(auto shape, b.GetShape(operand));
return Eq(operand, MaxValue(&b, shape.element_type()));
});
}
XlaOp IsNegInf(XlaOp operand) {
auto& b = *operand.builder();
return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_RETURN_IF_ERROR(EnsureOperandIsRealFp("IsNegInf", operand));
TF_ASSIGN_OR_RETURN(auto shape, b.GetShape(operand));
return Eq(operand, MinValue(&b, shape.element_type()));
});
}
XlaOp IsInf(XlaOp operand) {
auto& b = *operand.builder();
return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_RETURN_IF_ERROR(EnsureOperandIsRealFp("IsInf", operand));
return IsPosInf(Abs(operand));
});
}
XlaOp IsNan(XlaOp operand) {
auto& b = *operand.builder();
return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_RETURN_IF_ERROR(EnsureOperandIsRealFp("IsNan", operand));
return Ne(operand, operand);
});
}
XlaOp IsNegZero(XlaOp operand) {
auto& b = *operand.builder();
return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_RETURN_IF_ERROR(EnsureOperandIsRealFp("IsNegZero", operand));
TF_ASSIGN_OR_RETURN(auto shape, b.GetShape(operand));
switch (shape.element_type()) {
case F64:
return Eq(BitcastConvertType(operand, U64),
ConstantR0WithType(&b, U64, uint64_t{1} << 63));
case F32:
return Eq(BitcastConvertType(operand, U32),
ConstantR0WithType(&b, U32, uint32_t{1} << 31));
case F8E3M4:
case F8E4M3:
case F8E5M2:
case F8E4M3FN:
case F8E4M3B11FNUZ:
case F8E5M2FNUZ:
case F8E4M3FNUZ:
case F16:
case BF16:
return Eq(BitcastConvertType(ConvertElementType(operand, F32), U32),
ConstantR0WithType(&b, U32, uint32_t{1} << 31));
default:
LOG(FATAL) << "Expected real fp type.";
}
});
}
XlaOp Square(XlaOp operand) { return operand * operand; }
XlaOp Reciprocal(XlaOp operand) { return ScalarLike(operand, 1.0) / operand; }
static XlaOp ErfcImpl32(XlaOp x) {
const double kMaxlog = 88.72283905206835;
static const std::array<float, 9> kErfcPCoefficient{
+2.326819970068386E-2, -1.387039388740657E-1, +3.687424674597105E-1,
-5.824733027278666E-1, +6.210004621745983E-1, -4.944515323274145E-1,
+3.404879937665872E-1, -2.741127028184656E-1, +5.638259427386472E-1,
};
static const std::array<float, 8> kErfcRCoefficient{
-1.047766399936249E+1, +1.297719955372516E+1, -7.495518717768503E+0,
+2.921019019210786E+0, -1.015265279202700E+0, +4.218463358204948E-1,
-2.820767439740514E-1, +5.641895067754075E-1,
};
XlaOp abs_x = Abs(x);
XlaOp z = Exp(-x * x);
XlaOp q = ScalarLike(x, 1) / abs_x;
XlaOp y = q * q;
XlaOp p = Select(Lt(abs_x, ScalarLike(x, 2.0)),
EvaluatePolynomial<float>(y, kErfcPCoefficient),
EvaluatePolynomial<float>(y, kErfcRCoefficient));
y = z * q * p;
XlaOp y_clamp = Select(Lt(z, ScalarLike(x, -kMaxlog)), ScalarLike(x, 0), y);
return Select(Lt(x, ScalarLike(x, 0)), ScalarLike(x, 2.0) - y_clamp, y_clamp);
}
static XlaOp ErfImpl32Cephes(XlaOp x) {
static const std::array<float, 7> kErfTCoefficient{
+7.853861353153693E-5, -8.010193625184903E-4, +5.188327685732524E-3,
-2.685381193529856E-2, +1.128358514861418E-1, -3.761262582423300E-1,
+1.128379165726710E+0,
};
return x * EvaluatePolynomial<float>(x * x, kErfTCoefficient);
}
static XlaOp ErfcImpl64(XlaOp x) {
const double kMaxlog = 7.09782712893383996843E2;
static const std::array<double, 9> kErfcPCoefficient{
2.46196981473530512524E-10, 5.64189564831068821977E-1,
7.46321056442269912687E0, 4.86371970985681366614E1,
1.96520832956077098242E2, 5.26445194995477358631E2,
9.34528527171957607540E2, 1.02755188689515710272E3,
5.57535335369399327526E2};
static const std::array<double, 9> kErfcQCoefficient{
1.00000000000000000000E0, 1.32281951154744992508E1,
8.67072140885989742329E1, 3.54937778887819891062E2,
9.75708501743205489753E2, 1.82390916687909736289E3,
2.24633760818710981792E3, 1.65666309194161350182E3,
5.57535340817727675546E2};
static const std::array<double, 6> kErfcRCoefficient{
5.64189583547755073984E-1, 1.27536670759978104416E0,
5.01905042251180477414E0, 6.16021097993053585195E0,
7.40974269950448939160E0, 2.97886665372100240670E0};
static const std::array<double, 7> kErfcSCoefficient{
1.00000000000000000000E0, 2.26052863220117276590E0,
9.39603524938001434673E0, 1.20489539808096656605E1,
1.70814450747565897222E1, 9.60896809063285878198E0,
3.36907645100081516050E0};
XlaOp z = -x * x;
XlaOp abs_x = Abs(x);
XlaOp y =
Select(Lt(abs_x, ScalarLike(x, 8.0)),
Exp(z) * EvaluatePolynomial<double>(abs_x, kErfcPCoefficient) /
EvaluatePolynomial<double>(abs_x, kErfcQCoefficient),
Exp(z) * EvaluatePolynomial<double>(abs_x, kErfcRCoefficient) /
EvaluatePolynomial<double>(abs_x, kErfcSCoefficient));
XlaOp y_clamp = Select(Lt(z, ScalarLike(x, -kMaxlog)), ScalarLike(x, 0), y);
return Select(Lt(x, ScalarLike(x, 0)), ScalarLike(x, 2.0) - y_clamp, y_clamp);
}
static XlaOp ErfImpl64(XlaOp x) {
static std::array<double, 5> kErfTCoefficient{
9.60497373987051638749E0, 9.00260197203842689217E1,
2.23200534594684319226E3, 7.00332514112805075473E3,
5.55923013010394962768E4};
static std::array<double, 6> kErfUCoefficient{
1.00000000000000000000E0, 3.35617141647503099647E1,
5.21357949780152679795E2, 4.59432382970980127987E3,
2.26290000613890934246E4, 4.92673942608635921086E4};
XlaOp z = x * x;
return x * EvaluatePolynomial<double>(z, kErfTCoefficient) /
EvaluatePolynomial<double>(z, kErfUCoefficient);
}
XlaOp Erfc(XlaOp x) {
auto& b = *x.builder();
return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_RETURN_IF_ERROR(EnsureOperandIsRealFp("Erfc", x));
TF_ASSIGN_OR_RETURN(auto shape, b.GetShape(x));
if (shape.element_type() == F64) {
return Select(Gt(Abs(x), ScalarLike(x, 1)), ErfcImpl64(x),
ScalarLike(x, 1) - ErfImpl64(x));
}
return DoWithUpcastToF32(x, {}, [](XlaOp x) {
return Select(Gt(Abs(x), ScalarLike(x, 1)), ErfcImpl32(x),
ScalarLike(x, 1) - ErfImpl32Cephes(x));
});
});
}
static XlaOp ErfImpl32(XlaOp x) {
static const std::array<float, 5> kAlpha{
0.00022905065861350646f, 0.0034082910107109506f, 0.050955695062380861f,
0.18520832239976145f, 1.128379143519084f};
static const std::array<float, 7> kBeta{-1.1791602954361697e-7,
0.000023547966471313185f,
0.0010179625278914885f,
0.014070470171167667f,
0.11098505178285362f,
0.49746925110067538f,
1.0f};
constexpr float kErfInvOneMinusHalfULP = 3.7439211627767994f;
x = Clamp(ScalarLike(x, -kErfInvOneMinusHalfULP), x,
ScalarLike(x, kErfInvOneMinusHalfULP));
auto x2 = x * x;
return (x * EvaluatePolynomial<float>(x2, kAlpha)) /
EvaluatePolynomial<float>(x2, kBeta);
}
namespace {
XlaOp ErfInv32(XlaOp x) {
constexpr int kDegree = 9;
constexpr std::array<float, 9> w_less_than_5_constants = {
2.81022636e-08f, 3.43273939e-07f, -3.5233877e-06f,
-4.39150654e-06f, 0.00021858087f, -0.00125372503f,
-0.00417768164f, 0.246640727f, 1.50140941f};
constexpr std::array<float, 9> w_greater_than_5_constants = {
-0.000200214257f, 0.000100950558f, 0.00134934322f,
-0.00367342844f, 0.00573950773f, -0.0076224613f,
0.00943887047f, 1.00167406f, 2.83297682f};
auto w = -Log1p(-x * x);
auto lt = Lt(w, ScalarLike(x, 5.0));
auto coefficient = [&](int i) {
return Select(lt, FullLike(x, w_less_than_5_constants[i]),
FullLike(x, w_greater_than_5_constants[i]));
};
w = Select(lt, w - ScalarLike(x, 2.5), Sqrt(w) - ScalarLike(x, 3.0));
auto p = coefficient(0);
for (int i = 1; i < kDegree; ++i) {
p = coefficient(i) + p * w;
}
XlaOp result = p * x;
auto& b = *x.builder();
return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(Shape shape, b.GetShape(x));
return Select(Eq(Abs(x), ScalarLike(x, 1)),
x * MaxValue(&b, shape.element_type()), result);
});
}
XlaOp ErfInv64(XlaOp x) {
constexpr std::array<double, 23> w_less_than_6_25_constants = {
-3.6444120640178196996e-21, -1.685059138182016589e-19,
1.2858480715256400167e-18, 1.115787767802518096e-17,
-1.333171662854620906e-16, 2.0972767875968561637e-17,
6.6376381343583238325e-15, -4.0545662729752068639e-14,
-8.1519341976054721522e-14, 2.6335093153082322977e-12,
-1.2975133253453532498e-11, -5.4154120542946279317e-11,
1.051212273321532285e-09, -4.1126339803469836976e-09,
-2.9070369957882005086e-08, 4.2347877827932403518e-07,
-1.3654692000834678645e-06, -1.3882523362786468719e-05,
0.0001867342080340571352, -0.00074070253416626697512,
-0.0060336708714301490533, 0.24015818242558961693,
1.6536545626831027356};
constexpr std::array<double, 19> w_less_than_16_constants = {
2.2137376921775787049e-09, 9.0756561938885390979e-08,
-2.7517406297064545428e-07, 1.8239629214389227755e-08,
1.5027403968909827627e-06, -4.013867526981545969e-06,
2.9234449089955446044e-06, 1.2475304481671778723e-05,
-4.7318229009055733981e-05, 6.8284851459573175448e-05,
2.4031110387097893999e-05, -0.0003550375203628474796,
0.00095328937973738049703, -0.0016882755560235047313,
0.0024914420961078508066, -0.0037512085075692412107,
0.005370914553590063617, 1.0052589676941592334,
3.0838856104922207635,
};
constexpr std::array<double, 17> w_greater_than_16_constants = {
-2.7109920616438573243e-11, -2.5556418169965252055e-10,
1.5076572693500548083e-09, -3.7894654401267369937e-09,
7.6157012080783393804e-09, -1.4960026627149240478e-08,
2.9147953450901080826e-08, -6.7711997758452339498e-08,
2.2900482228026654717e-07, -9.9298272942317002539e-07,
4.5260625972231537039e-06, -1.9681778105531670567e-05,
7.5995277030017761139e-05, -0.00021503011930044477347,
-0.00013871931833623122026, 1.0103004648645343977,
4.8499064014085844221,
};
auto w = -Log1p(-x * x);
auto lt_6_25 = Lt(w, ScalarLike(x, 6.25));
auto lt_16 = Lt(w, ScalarLike(x, 16));
auto coefficient = [&](int i) {
auto c = FullLike(x, w_less_than_6_25_constants[i]);
if (i < 19) {
c = Select(lt_6_25, c, FullLike(x, w_less_than_16_constants[i]));
}
if (i < 17) {
c = Select(lt_16, c, FullLike(x, w_greater_than_16_constants[i]));
}
return c;
};
auto sqrt_w = Sqrt(w);
w = Select(lt_6_25, w - ScalarLike(x, 3.125),
sqrt_w - Select(lt_16, ScalarLike(x, 3.25), ScalarLike(x, 5.0)));
auto p = coefficient(0);
for (int i = 1; i < 17; ++i) {
p = coefficient(i) + p * w;
}
for (int i = 17; i < 19; ++i) {
p = Select(lt_16, coefficient(i) + p * w, p);
}
for (int i = 19; i < 23; ++i) {
p = Select(lt_6_25, coefficient(i) + p * w, p);
}
XlaOp result = p * x;
auto& b = *x.builder();
return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(Shape shape, b.GetShape(x));
return Select(Eq(Abs(x), ScalarLike(x, 1)),
x * MaxValue(&b, shape.element_type()), result);
});
}
}
XlaOp ErfInv(XlaOp x) {
auto& b = *x.builder();
return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_RETURN_IF_ERROR(EnsureOperandIsRealFp("ErfInv", x));
TF_ASSIGN_OR_RETURN(auto shape, b.GetShape(x));
if (shape.element_type() == F64) {
return ErfInv64(x);
}
return DoWithUpcastToF32(x, {}, [](XlaOp x) { return ErfInv32(x); });
});
}
namespace {
static constexpr double kLanczosGamma = 7;
static constexpr double kBaseLanczosCoeff = 0.99999999999980993227684700473478;
static constexpr std::array<double, 8> kLanczosCoefficients = {
676.520368121885098567009190444019, -1259.13921672240287047156078755283,
771.3234287776530788486528258894, -176.61502916214059906584551354,
12.507343278686904814458936853, -0.13857109526572011689554707,
9.984369578019570859563e-6, 1.50563273514931155834e-7};
}
XlaOp Lgamma(XlaOp input) {
auto do_it = [](XlaOp input) {
XlaOp one_half = ScalarLike(input, 0.5);
XlaOp one = ScalarLike(input, 1);
XlaOp pi = ScalarLike(input, M_PI);
XlaOp log_pi = ScalarLike(input, std::log(M_PI));
XlaOp log_sqrt_two_pi =
ScalarLike(input, (std::log(2) + std::log(M_PI)) / 2);
XlaOp lanczos_gamma_plus_one_half = ScalarLike(input, kLanczosGamma + 0.5);
XlaOp log_lanczos_gamma_plus_one_half =
ScalarLike(input, std::log(kLanczosGamma + 0.5));
XlaOp base_lanczos_coeff = ScalarLike(input, kBaseLanczosCoeff);
XlaOp need_to_reflect = Lt(input, one_half);
XlaOp z = Select(need_to_reflect, -input, input - one);
XlaOp x = base_lanczos_coeff;
for (int i = 0, end = kLanczosCoefficients.size(); i < end; ++i) {
XlaOp lanczos_coefficient = ScalarLike(input, kLanczosCoefficients[i]);
XlaOp index = ScalarLike(input, i);
x = x + lanczos_coefficient / (z + index + one);
}
XlaOp t = lanczos_gamma_plus_one_half + z;
XlaOp log_t = log_lanczos_gamma_plus_one_half +
Log1p(z / lanczos_gamma_plus_one_half);
XlaOp log_y = log_sqrt_two_pi + (z + one_half - t / log_t) * log_t + Log(x);
XlaOp abs_input = Abs(input);
XlaOp abs_frac_input = abs_input - Floor(abs_input);
XlaOp reduced_frac_input =
Select(Gt(abs_frac_input, ScalarLike(abs_frac_input, 0.5)),
ScalarLike(abs_frac_input, 1) - abs_frac_input, abs_frac_input);
XlaOp reflection_denom = Log(Sin(pi * reduced_frac_input));
XlaOp reflection =
Select(IsFinite(reflection_denom), log_pi - reflection_denom - log_y,
-reflection_denom);
XlaOp result = Select(need_to_reflect, reflection, log_y);
XlaOp inf_bcast = FullLike(input, std::numeric_limits<float>::infinity());
return Select(IsInf(input), inf_bcast, result);
};
auto& b = *input.builder();
return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_RETURN_IF_ERROR(EnsureOperandIsRealFp("Lgamma", input));
return DoWithUpcastToF32(input, {}, do_it);
});
}
static XlaOp Lbeta(XlaOp a, XlaOp b) {
return Lgamma(a) + Lgamma(b) - Lgamma(a + b);
}
XlaOp Digamma(XlaOp input) {
auto do_it = [](XlaOp input) {
XlaOp zero = ScalarLike(input, 0);
XlaOp one_half = ScalarLike(input, 0.5);
XlaOp one = ScalarLike(input, 1);
XlaOp pi = ScalarLike(input, M_PI);
XlaOp lanczos_gamma = ScalarLike(input, kLanczosGamma);
XlaOp lanczos_gamma_plus_one_half = ScalarLike(input, kLanczosGamma + 0.5);
XlaOp log_lanczos_gamma_plus_one_half =
ScalarLike(input, std::log(kLanczosGamma + 0.5));
XlaOp base_lanczos_coeff = ScalarLike(input, kBaseLanczosCoeff);
XlaOp need_to_reflect = Lt(input, one_half);
XlaOp z = Select(need_to_reflect, -input, input - one);
XlaOp num = zero;
XlaOp denom = base_lanczos_coeff;
for (int i = 0, end = kLanczosCoefficients.size(); i < end; ++i) {
XlaOp lanczos_coefficient = ScalarLike(input, kLanczosCoefficients[i]);
XlaOp index = ScalarLike(input, i);
num = num - lanczos_coefficient / ((z + index + one) * (z + index + one));
denom = denom + lanczos_coefficient / (z + index + one);
}
XlaOp t = lanczos_gamma_plus_one_half + z;
XlaOp log_t = log_lanczos_gamma_plus_one_half +
Log1p(z / lanczos_gamma_plus_one_half);
XlaOp y = log_t + num / denom - lanczos_gamma / t;
XlaOp reduced_input = input + Abs(Floor(input + ScalarLike(input, 0.5)));
XlaOp reflection =
y - pi * Cos(pi * reduced_input) / Sin(pi * reduced_input);
XlaOp real_result = Select(need_to_reflect, reflection, y);
return Select(And(Le(input, zero), Eq(input, Floor(input))),
FullLike(input, std::numeric_limits<float>::quiet_NaN()),
real_result);
};
auto& b = *input.builder();
return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_RETURN_IF_ERROR(EnsureOperandIsRealFp("Digamma", input));
return DoWithUpcastToF32(input, {}, do_it);
});
}
namespace {
enum kIgammaMode { VALUE, DERIVATIVE, SAMPLE_DERIVATIVE };
template <kIgammaMode mode>
XlaOp IgammaSeries(XlaOp ax, XlaOp x, XlaOp a, XlaOp enabled,
xla::PrimitiveType type) {
auto cond = [&](absl::Span<const XlaOp> vals,
XlaBuilder* builder) -> absl::StatusOr<XlaOp> {
XlaOp enabled = vals[0];
return Any(enabled);
};
auto body = [&](absl::Span<const XlaOp> vals,
XlaBuilder* builder) -> absl::StatusOr<std::vector<XlaOp>> {
XlaOp enabled = vals[0];
XlaOp r = vals[1];
XlaOp c = vals[2];
XlaOp ans = vals[3];
XlaOp x = vals[4];
XlaOp dc_da = vals[5];
XlaOp dans_da = vals[6];
r = r + ScalarLike(r, 1);
dc_da = dc_da * (x / r) + (ScalarLike(r, -1) * c * x) / (r * r);
dans_da = dans_da + dc_da;
c = c * (x / r);
ans = ans + c;
XlaOp conditional;
if (mode == VALUE) {
conditional = And(enabled, Gt(c / ans, Epsilon(builder, type)));
} else {
conditional =
And(enabled, Gt(Abs(dc_da / dans_da), Epsilon(builder, type)));
}
return std::vector<XlaOp>{
conditional,
Select(enabled, r, vals[1]),
Select(enabled, c, vals[2]),
Select(enabled, ans, vals[3]),
Select(enabled, x, vals[4]),
Select(enabled, dc_da, vals[5]),
Select(enabled, dans_da, vals[6]),
};
};
auto& b = *ax.builder();
return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
std::vector<XlaOp> vals = {
enabled, a, FullLike(a, 1), FullLike(a, 1), x, FullLike(a, 0),
FullLike(a, 0),
};
TF_ASSIGN_OR_RETURN(vals, WhileLoopHelper(cond, body, vals, "igamma", &b));
XlaOp ans = vals[3];
XlaOp dans_da = vals[6];
if (mode == VALUE) {
return (ans * ax) / a;
}
XlaOp dlogax_da = Log(x) - Digamma(a + ScalarLike(a, 1));
switch (mode) {
case DERIVATIVE:
return ax * (ans * dlogax_da + dans_da) / a;
case SAMPLE_DERIVATIVE:
default:
return -(dans_da + ans * dlogax_da) * x / a;
}
});
}
template <kIgammaMode mode>
XlaOp IgammacContinuedFraction(XlaOp ax, XlaOp x, XlaOp a, XlaOp enabled,
xla::PrimitiveType type) {
auto cond = [&](absl::Span<const XlaOp> vals,
XlaBuilder* builder) -> absl::StatusOr<XlaOp> {
XlaOp enabled = vals[0];
XlaOp c = vals[5];
return And(Lt(c, ScalarLike(c, 2000)), Any(enabled));
};
auto body = [&](absl::Span<const XlaOp> vals,
XlaBuilder* builder) -> absl::StatusOr<std::vector<XlaOp>> {
XlaOp enabled = vals[0];
XlaOp ans = vals[1];
XlaOp t = vals[2];
XlaOp y = vals[3];
XlaOp z = vals[4];
XlaOp c = vals[5];
XlaOp pkm1 = vals[6];
XlaOp qkm1 = vals[7];
XlaOp pkm2 = vals[8];
XlaOp qkm2 = vals[9];
XlaOp dpkm2_da = vals[10];
XlaOp dqkm2_da = vals[11];
XlaOp dpkm1_da = vals[12];
XlaOp dqkm1_da = vals[13];
XlaOp dans_da = vals[14];
c = c + ScalarLike(c, 1);
y = y + ScalarLike(y, 1);
z = z + ScalarLike(z, 2);
XlaOp yc = y * c;
XlaOp pk = pkm1 * z - pkm2 * yc;
XlaOp qk = qkm1 * z - qkm2 * yc;
XlaOp qk_is_nonzero = Ne(qk, ScalarLike(qk, 0));
XlaOp r = pk / qk;
t = Select(qk_is_nonzero, Abs((ans - r) / r), FullLike(t, 1));
ans = Select(qk_is_nonzero, r, ans);
XlaOp dpk_da = dpkm1_da * z - pkm1 - dpkm2_da * yc + pkm2 * c;
XlaOp dqk_da = dqkm1_da * z - qkm1 - dqkm2_da * yc + qkm2 * c;
XlaOp dans_da_new =
Select(qk_is_nonzero, (dpk_da - ans * dqk_da) / qk, dans_da);
XlaOp grad_conditional =
Select(qk_is_nonzero, Abs(dans_da_new - dans_da), FullLike(dans_da, 1));
pkm2 = pkm1;
pkm1 = pk;
qkm2 = qkm1;
qkm1 = qk;
dpkm2_da = dpkm1_da;
dqkm2_da = dqkm1_da;
dpkm1_da = dpk_da;
dqkm1_da = dqk_da;
XlaOp rescale = Gt(Abs(pk), Reciprocal(Epsilon(builder, type)));
pkm2 = Select(rescale, pkm2 * Epsilon(builder, type), pkm2);
pkm1 = Select(rescale, pkm1 * Epsilon(builder, type), pkm1);
qkm2 = Select(rescale, qkm2 * Epsilon(builder, type), qkm2);
qkm1 = Select(rescale, qkm1 * Epsilon(builder, type), qkm1);
dpkm2_da = Select(rescale, dpkm2_da * Epsilon(builder, type), dpkm2_da);
dqkm2_da = Select(rescale, dqkm2_da * Epsilon(builder, type), dqkm2_da);
dpkm1_da = Select(rescale, dpkm1_da * Epsilon(builder, type), dpkm1_da);
dqkm1_da = Select(rescale, dqkm1_da * Epsilon(builder, type), dqkm1_da);
XlaOp conditional;
if (mode == VALUE) {
conditional = And(enabled, Gt(t, Epsilon(builder, type)));
} else {
conditional = And(enabled, Gt(grad_conditional, Epsilon(builder, type)));
}
return std::vector<XlaOp>{conditional,
Select(enabled, ans, vals[1]),
Select(enabled, t, vals[2]),
Select(enabled, y, vals[3]),
Select(enabled, z, vals[4]),
c,
Select(enabled, pkm1, vals[6]),
Select(enabled, qkm1, vals[7]),
Select(enabled, pkm2, vals[8]),
Select(enabled, qkm2, vals[9]),
Select(enabled, dpkm2_da, vals[10]),
Select(enabled, dqkm2_da, vals[11]),
Select(enabled, dpkm1_da, vals[12]),
Select(enabled, dqkm1_da, vals[13]),
Select(enabled, dans_da_new, vals[14])};
};
auto& b = *ax.builder();
return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
XlaOp y = ScalarLike(a, 1) - a;
XlaOp z = x + y + ScalarLike(x, 1);
XlaOp c = ScalarLike(x, 0);
XlaOp pkm2 = FullLike(x, 1);
XlaOp qkm2 = x;
XlaOp pkm1 = x + ScalarLike(x, 1);
XlaOp qkm1 = z * x;
XlaOp ans = pkm1 / qkm1;
XlaOp t = FullLike(x, 1);
XlaOp dpkm2_da = FullLike(x, 0);
XlaOp dqkm2_da = FullLike(x, 0);
XlaOp dpkm1_da = FullLike(x, 0);
XlaOp dqkm1_da = -x;
XlaOp dans_da = (dpkm1_da - ans * dqkm1_da) / qkm1;
std::vector<XlaOp> vals = {enabled, ans, t, y, z,
c, pkm1, qkm1, pkm2, qkm2,
dpkm2_da, dqkm2_da, dpkm1_da, dqkm1_da, dans_da};
TF_ASSIGN_OR_RETURN(vals, WhileLoopHelper(cond, body, vals, "igammac", &b));
ans = vals[1];
if (mode == VALUE) {
return ans * ax;
}
dans_da = vals[14];
XlaOp dlogax_da = Log(x) - Digamma(a);
switch (mode) {
case DERIVATIVE:
return ax * (ans * dlogax_da + dans_da);
case SAMPLE_DERIVATIVE:
default:
return -(dans_da + ans * dlogax_da) * x;
}
});
}
}
XlaOp Igamma(XlaOp a, XlaOp x) {
auto& b = *a.builder();
auto doit = [&b](XlaOp a, XlaOp x, PrimitiveType type) -> XlaOp {
XlaOp is_nan = Or(IsNan(a), IsNan(x));
XlaOp x_is_zero = Eq(x, ScalarLike(x, 0));
XlaOp x_is_infinity =
Eq(x, ScalarLike(x, std::numeric_limits<float>::infinity()));
XlaOp domain_error = Or(Lt(x, ScalarLike(x, 0)), Le(a, ScalarLike(a, 0)));
XlaOp use_igammac = And(Gt(x, ScalarLike(x, 1)), Gt(x, a));
XlaOp ax = a * Log(x) - x - Lgamma(a);
XlaOp underflow = Lt(ax, -Log(MaxFiniteValue(&b, type)));
ax = Exp(ax);
XlaOp enabled = Not(Or(Or(Or(x_is_zero, domain_error), underflow), is_nan));
const double nan = std::numeric_limits<double>::quiet_NaN();
XlaOp output = Select(
use_igammac,
ScalarLike(a, 1) - IgammacContinuedFraction<VALUE>(
ax, x, a, And(enabled, use_igammac), type),
IgammaSeries<VALUE>(ax, x, a, And(enabled, Not(use_igammac)), type));
output = Select(x_is_zero, ZerosLike(output), output);
output = Select(x_is_infinity, FullLike(output, 1), output);
output = Select(Or(domain_error, is_nan), FullLike(a, nan), output);
return output;
};
return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(auto a_shape, b.GetShape(a));
TF_ASSIGN_OR_RETURN(auto x_shape, b.GetShape(x));
if (a_shape != x_shape) {
return InvalidArgument(
"Arguments to Igamma must have equal shapes and types; got %s and %s",
a_shape.ToString(), x_shape.ToString());
}
TF_RETURN_IF_ERROR(EnsureOperandIsRealFp("Igamma", a));
PrimitiveType a_x_type = a_shape.element_type();
bool needs_upcast = false;
for (PrimitiveType type : {BF16, F16, F8E3M4, F8E4M3, F8E5M2, F8E4M3FN,
F8E4M3B11FNUZ, F8E5M2FNUZ, F8E4M3FNUZ}) {
if (a_shape.element_type() == type) {
needs_upcast = true;
break;
}
}
if (needs_upcast) {
a = ConvertElementType(a, F32);
x = ConvertElementType(x, F32);
a_x_type = F32;
}
XlaOp result = doit(a, x, a_x_type);
if (needs_upcast) {
result = ConvertElementType(result, a_shape.element_type());
}
return result;
});
}
XlaOp IgammaGradA(XlaOp a, XlaOp x) {
auto& b = *a.builder();
auto doit = [&b](XlaOp a, XlaOp x, PrimitiveType type) -> XlaOp {
XlaOp is_nan = Or(IsNan(a), IsNan(x));
XlaOp x_is_zero = Eq(x, ScalarLike(x, 0));
XlaOp domain_error = Or(Lt(x, ScalarLike(x, 0)), Le(a, ScalarLike(a, 0)));
XlaOp use_igammac = And(Gt(x, ScalarLike(x, 1)), Gt(x, a));
XlaOp ax = a * Log(x) - x - Lgamma(a);
XlaOp underflow = Lt(ax, -Log(MaxFiniteValue(&b, type)));
ax = Exp(ax);
XlaOp enabled = Not(Or(Or(Or(x_is_zero, domain_error), underflow), is_nan));
const double nan = std::numeric_limits<double>::quiet_NaN();
XlaOp output = Select(use_igammac,
-IgammacContinuedFraction<DERIVATIVE>(
ax, x, a, And(enabled, use_igammac), type),
IgammaSeries<DERIVATIVE>(
ax, x, a, And(enabled, Not(use_igammac)), type));
output = Select(x_is_zero, ZerosLike(output), output);
output = Select(Or(domain_error, is_nan), FullLike(a, nan), output);
return output;
};
return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(auto a_shape, b.GetShape(a));
TF_ASSIGN_OR_RETURN(auto x_shape, b.GetShape(x));
if (a_shape != x_shape) {
return InvalidArgument(
"Arguments to IgammaGradA must have equal shapes and types; got %s "
"and %s",
a_shape.ToString(), x_shape.ToString());
}
TF_RETURN_IF_ERROR(EnsureOperandIsRealFp("IgammaGradA", a));
bool needs_upcast = false;
for (PrimitiveType type : {BF16, F16, F8E3M4, F8E4M3, F8E5M2, F8E4M3FN,
F8E4M3B11FNUZ, F8E5M2FNUZ, F8E4M3FNUZ}) {
if (a_shape.element_type() == type) {
needs_upcast = true;
break;
}
}
if (needs_upcast) {
a = ConvertElementType(a, F32);
x = ConvertElementType(x, F32);
}
XlaOp result = doit(a, x, a_shape.element_type());
if (needs_upcast) {
result = ConvertElementType(result, a_shape.element_type());
}
return result;
});
}
XlaOp RandomGammaGrad(XlaOp a, XlaOp x) {
auto& b = *a.builder();
auto doit = [&b](XlaOp a, XlaOp x, PrimitiveType type) -> XlaOp {
XlaOp is_nan = Or(IsNan(a), IsNan(x));
XlaOp x_is_zero = Eq(x, ScalarLike(x, 0));
XlaOp domain_error = Or(Lt(x, ScalarLike(x, 0)), Le(a, ScalarLike(a, 0)));
XlaOp use_igammac = And(Gt(x, ScalarLike(x, 1)), Gt(x, a));
XlaOp ax = a * Log(x) - x - Lgamma(a);
XlaOp underflow = Lt(ax, -Log(MaxFiniteValue(&b, type)));
ax = Exp(ax);
XlaOp enabled = Not(Or(Or(Or(x_is_zero, domain_error), underflow), is_nan));
const double nan = std::numeric_limits<double>::quiet_NaN();
XlaOp output = Select(use_igammac,
-IgammacContinuedFraction<SAMPLE_DERIVATIVE>(
ax, x, a, And(enabled, use_igammac), type),
IgammaSeries<SAMPLE_DERIVATIVE>(
ax, x, a, And(enabled, Not(use_igammac)), type));
output = Select(x_is_zero, ZerosLike(output), output);
output = Select(Or(domain_error, is_nan), FullLike(a, nan), output);
return output;
};
return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(auto a_shape, b.GetShape(a));
TF_ASSIGN_OR_RETURN(auto x_shape, b.GetShape(x));
if (a_shape != x_shape) {
return InvalidArgument(
"Arguments to RandomGammaGrad must have equal shapes and types; got "
"%s and %s",
a_shape.ToString(), x_shape.ToString());
}
TF_RETURN_IF_ERROR(EnsureOperandIsRealFp("RandomGammaGrad", a));
bool needs_upcast =
a_shape.element_type() == F16 || a_shape.element_type() == BF16;
if (needs_upcast) {
a = ConvertElementType(a, F32);
x = ConvertElementType(x, F32);
}
XlaOp result = doit(a, x, a_shape.element_type());
if (needs_upcast) {
result = ConvertElementType(result, a_shape.element_type());
}
return result;
});
}
XlaOp Igammac(XlaOp a, XlaOp x) {
auto& b = *a.builder();
auto doit = [&b](XlaOp a, XlaOp x, PrimitiveType type) -> XlaOp {
XlaOp out_of_range = Or(Le(x, ScalarLike(x, 0)), Le(a, ScalarLike(a, 0)));
XlaOp use_igamma = Or(Lt(x, ScalarLike(x, 1)), Lt(x, a));
XlaOp ax = a * Log(x) - x - Lgamma(a);
XlaOp underflow = Lt(ax, -Log(MaxFiniteValue(&b, type)));
XlaOp enabled = Not(Or(out_of_range, underflow));
ax = Exp(ax);
XlaOp result =
Select(use_igamma,
ScalarLike(a, 1) - IgammaSeries<VALUE>(
ax, x, a, And(enabled, use_igamma), type),
IgammacContinuedFraction<VALUE>(
ax, x, a, And(enabled, Not(use_igamma)), type));
XlaOp x_is_infinity =
Eq(x, ScalarLike(x, std::numeric_limits<float>::infinity()));
result = Select(x_is_infinity, ZerosLike(result), result);
return Select(out_of_range, FullLike(a, 1), result);
};
return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(auto a_shape, b.GetShape(a));
TF_ASSIGN_OR_RETURN(auto x_shape, b.GetShape(x));
if (a_shape != x_shape) {
return InvalidArgument(
"Arguments to Igammac must have equal shapes and types; "
"got %s and %s",
a_shape.ToString(), x_shape.ToString());
}
TF_RETURN_IF_ERROR(EnsureOperandIsRealFp("Igammac", a));
PrimitiveType a_x_type = a_shape.element_type();
bool needs_upcast =
a_shape.element_type() == F16 || a_shape.element_type() == BF16;
if (needs_upcast) {
a = ConvertElementType(a, F32);
x = ConvertElementType(x, F32);
a_x_type = F32;
}
XlaOp result = doit(a, x, a_x_type);
if (needs_upcast) {
result = ConvertElementType(result, a_shape.element_type());
}
return result;
});
}
XlaOp RoundToEven(XlaOp x) {
auto& b = *x.builder();
return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_RETURN_IF_ERROR(EnsureOperandIsRealFp("RoundToEven", x));
return RoundNearestEven(x);
});
}
XlaOp Acos(XlaOp x) {
XlaBuilder* b = x.builder();
return b->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(auto shape, b->GetShape(x));
if (primitive_util::IsComplexType(shape.element_type())) {
auto one = ScalarLike(x, 1);
auto imag_one = Complex(
Zero(b, primitive_util::ComplexComponentType(shape.element_type())),
One(b, primitive_util::ComplexComponentType(shape.element_type())));
auto result =
Neg(imag_one * Log(x + imag_one * Sqrt((one + x) * (one - x))));
return result;
}
return Select(Ne(x, FullLike(x, -1)),
ScalarLike(x, 2.0) * Atan2(Sqrt(ScalarLike(x, 1.0) - x * x),
ScalarLike(x, 1.0) + x),
FullLike(x, M_PI));
});
}
XlaOp Asin(XlaOp x) {
XlaBuilder* b = x.builder();
auto do_it = [&](XlaOp z) -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(auto shape, b->GetShape(z));
auto elem_ty = shape.element_type();
switch (elem_ty) {
case C128:
return math_impl::AsinComplex<double>(z);
case C64:
return math_impl::AsinComplex<float>(z);
case F64:
return math_impl::AsinReal<double>(z);
case F32:
return math_impl::AsinReal<float>(z);
default:
return InvalidArgument("Asin got unsupported element type %s",
PrimitiveType_Name(elem_ty));
}
};
return DoWithUpcastToF32(
x, {}, [&](XlaOp x) { return b->ReportErrorOrReturn(do_it(x)); });
}
XlaOp Atan(XlaOp x) { return Atan2(x, ScalarLike(x, 1.0)); }
XlaOp Acosh(XlaOp x) {
XlaBuilder* b = x.builder();
return b->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(auto shape, b->GetShape(x));
auto one = ScalarLike(x, 1);
auto neg_one = ScalarLike(x, -1);
auto nan = FullLike(x, std::numeric_limits<float>::quiet_NaN());
auto naive_result = Log(x + Sqrt((x + one) * (x - one)));
if (primitive_util::IsComplexType(shape.element_type())) {
return naive_result;
}
auto overflow_result = Log(x) + Log(ScalarLike(x, 2));
auto sqrt_max_value = Sqrt(MaxFiniteValue(b, shape.element_type()));
return Select(Lt(x, neg_one), nan,
Select(Ge(x, sqrt_max_value), overflow_result, naive_result));
});
}
XlaOp Asinh(XlaOp x) {
XlaBuilder* b = x.builder();
auto do_it = [&](XlaOp x) -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(auto shape, b->GetShape(x));
auto one = ScalarLike(x, 1);
if (primitive_util::IsComplexType(shape.element_type())) {
auto x_re = Real(x);
auto x_im = Imag(x);
auto z = Asin(Complex(x_im, -x_re));
auto z_im = Imag(z);
auto on_branch_cut = And(Eq(x_re, ScalarLike(x_re, 0)),
Gt(Abs(x_im), ScalarLike(x_im, 1)));
return Complex(Select(on_branch_cut, z_im, -z_im), Real(z));
}
auto a = Abs(x);
auto small_result = Log1p(a + a * a / (one + Sqrt(a * a + one)));
auto naive_result = Log(a + Sqrt(a * a + one));
auto overflow_result = Log(Abs(a)) + Log(ScalarLike(a, 2));
auto sqrt_max_value = Sqrt(MaxFiniteValue(b, shape.element_type()));
return Sign(x) * Select(Ge(a, sqrt_max_value), overflow_result,
Select(Le(a, one), small_result, naive_result));
};
return DoWithUpcastToF32(x, {BF16, F16}, [&](XlaOp x) {
return b->ReportErrorOrReturn(do_it(x));
});
}
XlaOp Atanh(XlaOp x) {
XlaBuilder* b = x.builder();
auto do_it = [&](XlaOp x) -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(auto shape, b->GetShape(x));
auto naive_result = (Log1p(x) - Log1p(-x)) * ScalarLike(x, 0.5);
if (primitive_util::IsComplexType(shape.element_type())) {
return naive_result;
}
auto nan = FullLike(x, std::numeric_limits<float>::quiet_NaN());
return Select(Gt(Abs(x), ScalarLike(x, 1)), nan, naive_result);
};
return DoWithUpcastToF32(x, {BF16}, [&](XlaOp x) {
return b->ReportErrorOrReturn(do_it(x));
});
}
XlaOp Cosh(XlaOp x) {
XlaBuilder* b = x.builder();
auto do_it = [&](XlaOp x) -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(auto shape, b->GetShape(x));
auto log_one_half = Log(ScalarLike(x, 0.5));
auto result = Exp(x + log_one_half) + Exp(-x + log_one_half);
if (primitive_util::IsComplexType(shape.element_type())) {
return result;
}
return Max(result, ScalarLike(result, 1.0));
};
return DoWithUpcastToF32(x, {BF16, F16}, [&](XlaOp x) {
return b->ReportErrorOrReturn(do_it(x));
});
}
XlaOp Sinh(XlaOp x) {
XlaBuilder* b = x.builder();
auto do_it = [&](XlaOp x) -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(auto shape, b->GetShape(x));
auto one_half = ScalarLike(x, 0.5);
auto log_one_half = Log(ScalarLike(x, 0.5));
auto large_sinh_result = Exp(x + log_one_half) - Exp(-x + log_one_half);
if (primitive_util::IsComplexType(shape.element_type())) {
return large_sinh_result;
}
auto expm1 = Expm1(x);
auto one = ScalarLike(x, 1.);
auto small_sinh_result = one_half * (expm1 + expm1 / (expm1 + one));
return Select(Lt(Abs(x), one), small_sinh_result, large_sinh_result);
};
return DoWithUpcastToF32(x, {BF16, F16}, [&](XlaOp x) {
return b->ReportErrorOrReturn(do_it(x));
});
}
XlaOp MaybeConjugate(XlaOp x, bool conjugate) {
XlaBuilder* builder = x.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(Shape shape, builder->GetShape(x));
auto perform_conj =
primitive_util::IsComplexType(shape.element_type()) && conjugate;
return perform_conj ? Conj(x) : x;
});
}
XlaOp NextAfter(XlaOp from, XlaOp to) {
auto builder = from.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(auto shape, builder->GetShape(from));
int bitwidth = primitive_util::BitWidth(shape.element_type());
auto int_type = primitive_util::UnsignedIntegralTypeForBitWidth(bitwidth);
auto from_as_int = BitcastConvertType(from, int_type);
auto to_as_int = BitcastConvertType(to, int_type);
auto from_is_nan = Ne(from, from);
auto to_is_nan = Ne(to, to);
auto nan_input = Or(from_is_nan, to_is_nan);
auto result_for_nan =
Broadcast(ScalarLike(from, std::numeric_limits<double>::quiet_NaN()),
shape.dimensions());
result_for_nan = BitcastConvertType(result_for_nan, int_type);
const int64_t sign_mask = int64_t{1} << (bitwidth - 1);
auto from_abs = And(from_as_int, ScalarLike(from_as_int, ~sign_mask));
auto to_abs = And(to_as_int, ScalarLike(to_as_int, ~sign_mask));
auto from_and_to_are_equal = Eq(from_as_int, to_as_int);
auto result_for_equal = to_as_int;
auto from_is_zero = Eq(from_abs, ZerosLike(from_abs));
auto to_is_zero = Eq(to_abs, ZerosLike(to_abs));
auto result_for_both_zero = to_as_int;
auto from_sign = And(from_as_int, ScalarLike(from_as_int, sign_mask));
auto to_sign = And(to_as_int, ScalarLike(to_as_int, sign_mask));
auto result_for_from_zero_to_non_zero =
Or(to_sign, ScalarLike(from_as_int, 1));
auto signs_disagree = Ne(from_sign, to_sign);
auto from_magnitude_larger_than_to = Gt(from_abs, to_abs);
auto result_has_smaller_magnitude =
Or(from_magnitude_larger_than_to, signs_disagree);
auto magnitude_adjustment =
Select(result_has_smaller_magnitude,
Broadcast(ScalarLike(from_as_int, -1), shape.dimensions()),
Broadcast(ScalarLike(from_as_int, 1), shape.dimensions()));
auto result = Add(from_as_int, magnitude_adjustment);
result = Select(from_is_zero,
Select(to_is_zero, result_for_both_zero,
result_for_from_zero_to_non_zero),
result);
result = Select(from_and_to_are_equal, result_for_equal, result);
result = Select(nan_input, result_for_nan, result);
return BitcastConvertType(result, shape.element_type());
});
}
static XlaOp I0eImpl32(XlaOp x) {
static const std::array<float, 18> kI0eCoeffsA{
-1.30002500998624804212E-8f, 6.04699502254191894932E-8f,
-2.67079385394061173391E-7f, 1.11738753912010371815E-6f,
-4.41673835845875056359E-6f, 1.64484480707288970893E-5f,
-5.75419501008210370398E-5f, 1.88502885095841655729E-4f,
-5.76375574538582365885E-4f, 1.63947561694133579842E-3f,
-4.32430999505057594430E-3f, 1.05464603945949983183E-2f,
-2.37374148058994688156E-2f, 4.93052842396707084878E-2f,
-9.49010970480476444210E-2f, 1.71620901522208775349E-1f,
-3.04682672343198398683E-1f, 6.76795274409476084995E-1f};
static const std::array<float, 7> kI0eCoeffsB{
3.39623202570838634515E-9f, 2.26666899049817806459E-8f,
2.04891858946906374183E-7f, 2.89137052083475648297E-6f,
6.88975834691682398426E-5f, 3.36911647825569408990E-3f,
8.04490411014108831608E-1f};
x = Abs(x);
auto half = xla::ScalarLike(x, 0.5);
auto two = xla::ScalarLike(x, 2.0);
auto thirty_two = xla::ScalarLike(x, 32.0);
auto result_le_8 =
EvaluateChebyshevPolynomial<float>(half * x - two, kI0eCoeffsA);
auto result_gt_8 =
EvaluateChebyshevPolynomial<float>(thirty_two / x - two, kI0eCoeffsB) /
Sqrt(x);
return Select(Le(x, xla::ScalarLike(x, 8.0)), result_le_8, result_gt_8);
}
static XlaOp I0eImpl64(XlaOp x) {
static const std::array<double, 30> kI0eCoeffsA{
-4.41534164647933937950E-18, 3.33079451882223809783E-17,
-2.43127984654795469359E-16, 1.71539128555513303061E-15,
-1.16853328779934516808E-14, 7.67618549860493561688E-14,
-4.85644678311192946090E-13, 2.95505266312963983461E-12,
-1.72682629144155570723E-11, 9.67580903537323691224E-11,
-5.18979560163526290666E-10, 2.65982372468238665035E-9,
-1.30002500998624804212E-8, 6.04699502254191894932E-8,
-2.67079385394061173391E-7, 1.11738753912010371815E-6,
-4.41673835845875056359E-6, 1.64484480707288970893E-5,
-5.75419501008210370398E-5, 1.88502885095841655729E-4,
-5.76375574538582365885E-4, 1.63947561694133579842E-3,
-4.32430999505057594430E-3, 1.05464603945949983183E-2,
-2.37374148058994688156E-2, 4.93052842396707084878E-2,
-9.49010970480476444210E-2, 1.71620901522208775349E-1,
-3.04682672343198398683E-1, 6.76795274409476084995E-1};
static const std::array<double, 25> kI0eCoeffsB{
-7.23318048787475395456E-18, -4.83050448594418207126E-18,
4.46562142029675999901E-17, 3.46122286769746109310E-17,
-2.82762398051658348494E-16, -3.42548561967721913462E-16,
1.77256013305652638360E-15, 3.81168066935262242075E-15,
-9.55484669882830764870E-15, -4.15056934728722208663E-14,
1.54008621752140982691E-14, 3.85277838274214270114E-13,
7.18012445138366623367E-13, -1.79417853150680611778E-12,
-1.32158118404477131188E-11, -3.14991652796324136454E-11,
1.18891471078464383424E-11, 4.94060238822496958910E-10,
3.39623202570838634515E-9, 2.26666899049817806459E-8,
2.04891858946906374183E-7, 2.89137052083475648297E-6,
6.88975834691682398426E-5, 3.36911647825569408990E-3,
8.04490411014108831608E-1};
x = Abs(x);
auto half = xla::ScalarLike(x, 0.5);
auto two = xla::ScalarLike(x, 2.0);
auto thirty_two = xla::ScalarLike(x, 32.0);
auto result_le_8 =
EvaluateChebyshevPolynomial<double>(half * x - two, kI0eCoeffsA);
auto result_gt_8 =
EvaluateChebyshevPolynomial<double>(thirty_two / x - two, kI0eCoeffsB) /
Sqrt(x);
return Select(Le(x, xla::ScalarLike(x, 8.0)), result_le_8, result_gt_8);
}
XlaOp BesselI0e(XlaOp x) {
auto& b = *x.builder();
return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_RETURN_IF_ERROR(EnsureOperandIsRealFp("BesselI0e", x));
TF_ASSIGN_OR_RETURN(auto shape, b.GetShape(x));
if (shape.element_type() == F64) {
return I0eImpl64(x);
}
return DoWithUpcastToF32(x, {BF16, F16},
[](XlaOp x) { return I0eImpl32(x); });
});
}
static XlaOp I1eImpl32(XlaOp x) {
static const std::array<float, 17> kI1eCoeffsA{
9.38153738649577178388E-9f, -4.44505912879632808065E-8f,
2.00329475355213526229E-7f, -8.56872026469545474066E-7f,
3.47025130813767847674E-6f, -1.32731636560394358279E-5f,
4.78156510755005422638E-5f, -1.61760815825896745588E-4f,
5.12285956168575772895E-4f, -1.51357245063125314899E-3f,
4.15642294431288815669E-3f, -1.05640848946261981558E-2f,
2.47264490306265168283E-2f, -5.29459812080949914269E-2f,
1.02643658689847095384E-1f, -1.76416518357834055153E-1f,
2.52587186443633654823E-1f};
static const std::array<float, 7> kI1eCoeffsB{
-3.83538038596423702205E-9f, -2.63146884688951950684E-8f,
-2.51223623787020892529E-7f, -3.88256480887769039346E-6f,
-1.10588938762623716291E-4f, -9.76109749136146840777E-3f,
7.78576235018280120474E-1f};
XlaOp z = Abs(x);
auto half = xla::ScalarLike(x, 0.5);
auto two = xla::ScalarLike(x, 2.0);
auto thirty_two = xla::ScalarLike(x, 32.0);
auto result_le_8 =
z * EvaluateChebyshevPolynomial<float>(half * z - two, kI1eCoeffsA);
auto result_gt_8 =
EvaluateChebyshevPolynomial<float>(thirty_two / z - two, kI1eCoeffsB) /
Sqrt(z);
return Sign(x) *
Select(Le(z, xla::ScalarLike(x, 8.0)), result_le_8, result_gt_8);
}
static XlaOp I1eImpl64(XlaOp x) {
static const std::array<double, 29> kI1eCoeffsA{
2.77791411276104639959E-18, -2.11142121435816608115E-17,
1.55363195773620046921E-16, -1.10559694773538630805E-15,
7.60068429473540693410E-15, -5.04218550472791168711E-14,
3.22379336594557470981E-13, -1.98397439776494371520E-12,
1.17361862988909016308E-11, -6.66348972350202774223E-11,
3.62559028155211703701E-10, -1.88724975172282928790E-9,
9.38153738649577178388E-9, -4.44505912879632808065E-8,
2.00329475355213526229E-7, -8.56872026469545474066E-7,
3.47025130813767847674E-6, -1.32731636560394358279E-5,
4.78156510755005422638E-5, -1.61760815825896745588E-4,
5.12285956168575772895E-4, -1.51357245063125314899E-3,
4.15642294431288815669E-3, -1.05640848946261981558E-2,
2.47264490306265168283E-2, -5.29459812080949914269E-2,
1.02643658689847095384E-1, -1.76416518357834055153E-1,
2.52587186443633654823E-1};
static const std::array<double, 25> kI1eCoeffsB{
7.51729631084210481353E-18, 4.41434832307170791151E-18,
-4.65030536848935832153E-17, -3.20952592199342395980E-17,
2.96262899764595013876E-16, 3.30820231092092828324E-16,
-1.88035477551078244854E-15, -3.81440307243700780478E-15,
1.04202769841288027642E-14, 4.27244001671195135429E-14,
-2.10154184277266431302E-14, -4.08355111109219731823E-13,
-7.19855177624590851209E-13, 2.03562854414708950722E-12,
1.41258074366137813316E-11, 3.25260358301548823856E-11,
-1.89749581235054123450E-11, -5.58974346219658380687E-10,
-3.83538038596423702205E-9, -2.63146884688951950684E-8,
-2.51223623787020892529E-7, -3.88256480887769039346E-6,
-1.10588938762623716291E-4, -9.76109749136146840777E-3,
7.78576235018280120474E-1};
XlaOp z = Abs(x);
auto half = xla::ScalarLike(x, 0.5);
auto two = xla::ScalarLike(x, 2.0);
auto thirty_two = xla::ScalarLike(x, 32.0);
auto result_le_8 =
z * EvaluateChebyshevPolynomial<double>(half * z - two, kI1eCoeffsA);
auto result_gt_8 =
EvaluateChebyshevPolynomial<double>(thirty_two / z - two, kI1eCoeffsB) /
Sqrt(z);
return Sign(x) *
Select(Le(z, xla::ScalarLike(x, 8.0)), result_le_8, result_gt_8);
}
XlaOp BesselI1e(XlaOp x) {
auto& b = *x.builder();
return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_RETURN_IF_ERROR(EnsureOperandIsRealFp("BesselI1e", x));
TF_ASSIGN_OR_RETURN(auto shape, b.GetShape(x));
if (shape.element_type() == F64) {
return I1eImpl64(x);
}
return DoWithUpcastToF32(x, {BF16, F16},
[](XlaOp x) { return I1eImpl32(x); });
});
}
static XlaOp LentzThompsonBarnettAlgorithm(
int64_t num_iterations, double small, double threshold,
const ForEachIndexBodyFunction& nth_partial_numerator,
const ForEachIndexBodyFunction& nth_partial_denominator,
absl::Span<const XlaOp> inputs, absl::string_view name) {
auto& b = *inputs.front().builder();
return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_RET_CHECK(num_iterations < INT32_MAX);
enum {
kIterationIdx,
kValuesUnconvergedIdx,
kCIdx,
kDIdx,
kHIdx,
kFirstInputIdx,
};
auto while_cond_fn =
[num_iterations](absl::Span<const XlaOp> values,
XlaBuilder* cond_builder) -> absl::StatusOr<XlaOp> {
auto iteration = values[kIterationIdx];
auto iterations_remain_cond =
Lt(iteration, ScalarLike(iteration, num_iterations));
auto values_unconverged_cond = values[kValuesUnconvergedIdx];
return And(iterations_remain_cond, values_unconverged_cond);
};
auto while_body_fn =
[small, threshold, &nth_partial_numerator, &nth_partial_denominator](
absl::Span<const XlaOp> values,
XlaBuilder* body_builder) -> absl::StatusOr<std::vector<XlaOp>> {
XlaOp iteration = values[kIterationIdx];
TF_ASSIGN_OR_RETURN(
std::vector<XlaOp> partial_numerator,
nth_partial_numerator(iteration, values.subspan(kFirstInputIdx),
body_builder));
TF_RET_CHECK(partial_numerator.size() == 1);
TF_ASSIGN_OR_RETURN(
std::vector<XlaOp> partial_denominator,
nth_partial_denominator(iteration, values.subspan(kFirstInputIdx),
body_builder));
TF_RET_CHECK(partial_denominator.size() == 1);
auto c = partial_denominator[0] + partial_numerator[0] / values[kCIdx];
auto small_constant = FullLike(c, small);
c = Select(Lt(Abs(c), small_constant), small_constant, c);
auto d = partial_denominator[0] + partial_numerator[0] * values[kDIdx];
d = Select(Lt(Abs(d), small_constant), small_constant, d);
d = Reciprocal(d);
auto delta = c * d;
auto h = values[kHIdx] * delta;
std::vector<XlaOp> updated_values(values.size());
updated_values[kIterationIdx] = Add(iteration, ScalarLike(iteration, 1));
updated_values[kCIdx] = c;
updated_values[kDIdx] = d;
updated_values[kHIdx] = h;
std::copy(values.begin() + kFirstInputIdx, values.end(),
updated_values.begin() + kFirstInputIdx);
auto tolerance_comparison =
Ge(Abs(Sub(delta, FullLike(delta, 1.0))), FullLike(delta, threshold));
updated_values[kValuesUnconvergedIdx] =
ReduceAll(tolerance_comparison, ConstantR0<bool>(body_builder, false),
CreateScalarOrComputation(PRED, body_builder));
return updated_values;
};
TF_ASSIGN_OR_RETURN(std::vector<XlaOp> partial_denominator,
nth_partial_denominator(Zero(&b, U32), inputs, &b));
TF_RET_CHECK(partial_denominator.size() == 1);
auto h = partial_denominator[0];
auto small_constant = FullLike(h, small);
h = Select(Lt(Abs(h), small_constant), small_constant, h);
std::vector<XlaOp> values(kFirstInputIdx + inputs.size());
values[kIterationIdx] = One(&b, U32);
values[kValuesUnconvergedIdx] = ConstantR0<bool>(&b, true);
values[kCIdx] = h;
values[kDIdx] = FullLike(h, 0.0);
values[kHIdx] = h;
std::copy(inputs.begin(), inputs.end(), values.begin() + kFirstInputIdx);
TF_ASSIGN_OR_RETURN(values, WhileLoopHelper(while_cond_fn, while_body_fn,
values, name, &b));
return values[kHIdx];
});
}
XlaOp RegularizedIncompleteBeta(XlaOp a, XlaOp b, XlaOp x) {
auto& builder = *x.builder();
return builder.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(Shape shape, builder.GetShape(a));
TF_ASSIGN_OR_RETURN(Shape b_shape, builder.GetShape(b));
TF_ASSIGN_OR_RETURN(Shape x_shape, builder.GetShape(x));
if (b_shape.element_type() != shape.element_type() ||
x_shape.element_type() != shape.element_type()) {
return InvalidArgument(
"Operands to RegularizedIncompleteBeta must have identical types, "
"got shapes %s, %s, and %s",
shape.ToString(), b_shape.ToString(), x_shape.ToString());
}
if (!primitive_util::IsFloatingPointType(shape.element_type())) {
return InvalidArgument(
"Operands to RegularizedIncompleteBeta must be real-valued "
"floating-point, but got %s",
PrimitiveType_Name(shape.element_type()));
}
PrimitiveType element_type = shape.element_type();
if (element_type == F16 || element_type == BF16) {
element_type = F32;
a = ConvertElementType(a, F32);
b = ConvertElementType(b, F32);
x = ConvertElementType(x, F32);
}
auto NthPartialBetaincNumerator =
[&](XlaOp iteration, absl::Span<const XlaOp> inputs,
XlaBuilder* builder) -> absl::StatusOr<std::vector<XlaOp>> {
auto a = inputs[0];
auto b = inputs[1];
auto x = inputs[2];
auto iteration_bcast = Broadcast(iteration, shape.dimensions());
auto iteration_is_even =
Eq(iteration_bcast % FullLike(iteration_bcast, 2),
FullLike(iteration_bcast, 0));
auto iteration_is_one = Eq(iteration_bcast, FullLike(iteration_bcast, 1));
auto iteration_minus_one = iteration_bcast - FullLike(iteration_bcast, 1);
auto m = iteration_minus_one / FullLike(iteration_minus_one, 2);
m = ConvertElementType(m, element_type);
auto one = FullLike(a, 1.0);
auto two = FullLike(a, 2.0);
auto even_numerator =
-(a + m) * (a + b + m) * x / ((a + two * m) * (a + two * m + one));
auto odd_numerator =
m * (b - m) * x / ((a + two * m - one) * (a + two * m));
auto one_numerator = ScalarLike(x, 1.0);
auto numerator = Select(iteration_is_even, even_numerator, odd_numerator);
return std::vector<XlaOp>{
Select(iteration_is_one, one_numerator, numerator)};
};
auto NthPartialBetaincDenominator =
[&shape](XlaOp iteration, absl::Span<const XlaOp> inputs,
XlaBuilder* builder) -> absl::StatusOr<std::vector<XlaOp>> {
auto x = inputs[2];
auto iteration_bcast = Broadcast(iteration, shape.dimensions());
return std::vector<XlaOp>{
Select(Eq(iteration_bcast, ScalarLike(iteration_bcast, 0)),
ScalarLike(x, 0.0), ScalarLike(x, 1.0))};
};
auto result_is_nan =
Or(Or(Or(Le(a, ScalarLike(a, 0.0)), Le(b, ScalarLike(b, 0.0))),
Lt(x, ScalarLike(x, 0.0))),
Gt(x, ScalarLike(x, 1.0)));
auto converges_rapidly =
Lt(x, (a + FullLike(a, 1.0)) / (a + b + FullLike(b, 2.0)));
auto a_orig = a;
a = Select(converges_rapidly, a, b);
b = Select(converges_rapidly, b, a_orig);
x = Select(converges_rapidly, x, Sub(FullLike(x, 1.0), x));
XlaOp continued_fraction;
if (element_type == F32) {
continued_fraction = LentzThompsonBarnettAlgorithm(
200,
std::numeric_limits<float>::epsilon() / 2.0f,
std::numeric_limits<float>::epsilon() / 2.0f,
NthPartialBetaincNumerator,
NthPartialBetaincDenominator, {a, b, x},
"Betainc");
} else {
TF_RET_CHECK(element_type == F64);
continued_fraction = LentzThompsonBarnettAlgorithm(
600,
std::numeric_limits<double>::epsilon() / 2.0f,
std::numeric_limits<double>::epsilon() / 2.0f,
NthPartialBetaincNumerator,
NthPartialBetaincDenominator, {a, b, x},
"Betainc");
}
auto lbeta = Lbeta(a, b);
auto result =
continued_fraction * Exp(Log(x) * a + Log1p(-x) * b - lbeta) / a;
result = Select(result_is_nan, NanValue(&builder, element_type), result);
auto out =
Select(converges_rapidly, result, Sub(FullLike(result, 1.0), result));
return shape.element_type() == element_type
? out
: ConvertElementType(out, shape.element_type());
});
}
XlaOp Polygamma(XlaOp n, XlaOp x) {
auto& builder = *x.builder();
auto doit = [](XlaOp n, XlaOp x, PrimitiveType type) -> XlaOp {
XlaOp n_plus_one = n + ScalarLike(n, 1.);
XlaOp sign =
(ScalarLike(n, 2.) * Rem(n, ScalarLike(n, 2.)) - ScalarLike(n, 1.));
const double nan = std::numeric_limits<double>::quiet_NaN();
XlaOp output = Select(Eq(n, ScalarLike(n, 0.)), Digamma(x),
sign * Exp(Lgamma(n_plus_one)) * Zeta(n_plus_one, x));
output = Select(Or(Ne(n, Floor(n)), Lt(n, ScalarLike(n, 0.))),
ScalarLike(n, nan), output);
return output;
};
return builder.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(auto n_shape, builder.GetShape(n));
TF_ASSIGN_OR_RETURN(auto x_shape, builder.GetShape(x));
if (n_shape != x_shape) {
return InvalidArgument(
"Arguments to Polygamma must have equal shapes and types; "
"got %s and %s",
n_shape.ToString(), x_shape.ToString());
}
TF_RETURN_IF_ERROR(EnsureOperandIsRealFp("Zeta", x));
bool needs_upcast =
n_shape.element_type() == F16 || x_shape.element_type() == BF16;
if (needs_upcast) {
n = ConvertElementType(n, F32);
x = ConvertElementType(x, F32);
}
XlaOp result = doit(n, x, n_shape.element_type());
if (needs_upcast) {
result = ConvertElementType(result, n_shape.element_type());
}
return result;
});
}
XlaOp Zeta(XlaOp x, XlaOp q) {
auto& builder = *x.builder();
auto doit = [&builder](XlaOp x, XlaOp q, PrimitiveType type) -> XlaOp {
static constexpr int M = 12, N = 9;
static const std::array<double, M> kZetaCoeffs{
-7.1661652561756670113e18,
1.8152105401943546773e17,
-4.5979787224074726105e15,
1.1646782814350067249e14,
-2.950130727918164224e12,
7.47242496e10,
-1.8924375803183791606e9,
47900160.0,
-1209600.0,
30240.0,
-720.0,
12.0,
};
XlaOp acc = q, neg_power = ScalarLike(q, 0.);
XlaOp S = Pow(q, Neg(x));
for (int i = 0; i < N; ++i) {
acc = acc + ScalarLike(acc, 1.);
neg_power = Pow(acc, Neg(x));
S = S + neg_power;
}
acc = acc + ScalarLike(acc, 1.);
neg_power = Pow(acc, Neg(x));
XlaOp I = neg_power * acc / (x - ScalarLike(acc, 1.));
XlaOp a_inverse_square = Reciprocal(Square(acc));
XlaOp horner_sum = ScalarLike(acc, 0.);
XlaOp factor = ScalarLike(acc, 1.);
static constexpr int kTwoKMinusOne = 2 * M - 1;
for (int i = 0; i < M - 1; ++i) {
factor = (x + ScalarLike(x, kTwoKMinusOne - 1 - 2 * i)) *
(x + ScalarLike(x, kTwoKMinusOne - 2 - 2 * i));
horner_sum = factor * a_inverse_square *
(horner_sum + ScalarLike(acc, 1. / kZetaCoeffs[i]));
}
XlaOp T =
neg_power *
(ScalarLike(neg_power, 0.5) +
x / acc * (ScalarLike(acc, 1. / kZetaCoeffs[M - 1]) + horner_sum));
XlaOp accurate_result = S + I + T;
const double nan = std::numeric_limits<double>::quiet_NaN();
const double inf = std::numeric_limits<double>::infinity();
XlaOp output = Select(Lt(Abs(neg_power), Abs(S) * Epsilon(&builder, type)),
S, accurate_result);
output = Select(Eq(x, ScalarLike(x, 1.)), ScalarLike(x, inf), output);
output = Select(Lt(x, ScalarLike(x, 1.)), ScalarLike(x, nan), output);
XlaOp x_domain_error = And(Le(q, ScalarLike(x, 0.)), Ne(x, Floor(x)));
output = Select(x_domain_error, ScalarLike(x, nan), output);
XlaOp at_pole = And(Le(q, ScalarLike(x, 0.)), Eq(q, Floor(q)));
XlaOp x_is_even_int =
And(Eq(Rem(x, ScalarLike(x, 2.)), ScalarLike(x, 0.)), Eq(x, Floor(x)));
output = Select(
at_pole, Select(x_is_even_int, ScalarLike(x, inf), ScalarLike(x, nan)),
output);
return output;
};
return builder.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(auto x_shape, builder.GetShape(x));
TF_ASSIGN_OR_RETURN(auto q_shape, builder.GetShape(q));
if (x_shape != q_shape) {
return InvalidArgument(
"Arguments to Zeta must have equal shapes and types; got %s and %s",
x_shape.ToString(), q_shape.ToString());
}
TF_RETURN_IF_ERROR(EnsureOperandIsRealFp("Zeta", x));
bool needs_upcast =
x_shape.element_type() == F16 || x_shape.element_type() == BF16;
if (needs_upcast) {
x = ConvertElementType(x, F32);
q = ConvertElementType(q, F32);
}
XlaOp result = doit(x, q, x_shape.element_type());
if (needs_upcast) {
result = ConvertElementType(result, x_shape.element_type());
}
return result;
});
}
} | #include "xla/hlo/builder/lib/math.h"
#include <cmath>
#include <complex>
#include <functional>
#include <limits>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "xla/array3d.h"
#include "xla/error_spec.h"
#include "xla/hlo/builder/lib/constants.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/primitive_util.h"
#include "xla/service/service.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/client_library_test_base.h"
#include "xla/tests/test_macros.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace {
class MathTest : public ClientLibraryTestBase {
public:
ErrorSpec error_spec_{0.0001};
};
template <typename T>
class MathTypedTest : public MathTest {
public:
void TestLogEdgeCases() {
SetFastMathDisabled(true);
XlaBuilder b(TestName());
Log(AddParam(LiteralUtil::CreateR1<T>({T{0.0}, T{-0.0}}), &b));
ComputeAndCompareR1<T>(&b,
{-std::numeric_limits<T>::infinity(),
-std::numeric_limits<T>::infinity()},
{}, error_spec_);
}
void TestLog1pEdgeCases() {
SetFastMathDisabled(true);
XlaBuilder b(TestName());
Log1p(AddParam(LiteralUtil::CreateR1<T>({T{0.0}, T{-0.0}, T{-1.0}}), &b));
ComputeAndCompareR1<T>(
&b, {T{0.0}, T{-0.0}, -std::numeric_limits<T>::infinity()}, {},
error_spec_);
}
void TestIsInfOrNan() {
SetFastMathDisabled(true);
XlaBuilder b(TestName());
auto x =
ConstantR1<T>(&b, {
T{0},
T{100},
T{-1000},
T{std::numeric_limits<T>::max()},
T{std::numeric_limits<T>::lowest()},
T{std::numeric_limits<float>::infinity()},
T{-std::numeric_limits<float>::infinity()},
T{std::numeric_limits<float>::quiet_NaN()},
T{std::numeric_limits<float>::signaling_NaN()},
});
Tuple(&b, {IsFinite(x), IsInf(x), IsPosInf(x), IsNegInf(x), IsNan(x)});
auto expected = LiteralUtil::MakeTupleOwned(
LiteralUtil::CreateR1<bool>(
{true, true, true, true, true, false, false, false, false}),
LiteralUtil::CreateR1<bool>(
{false, false, false, false, false, true, true, false, false}),
LiteralUtil::CreateR1<bool>(
{false, false, false, false, false, true, false, false, false}),
LiteralUtil::CreateR1<bool>(
{false, false, false, false, false, false, true, false, false}),
LiteralUtil::CreateR1<bool>(
{false, false, false, false, false, false, false, true, true}));
ComputeAndCompareLiteral(&b, expected, {});
}
void TestIsNegZero() {
SetFastMathDisabled(true);
XlaBuilder b(TestName());
T inf(std::numeric_limits<float>::infinity());
T nan(std::numeric_limits<float>::quiet_NaN());
IsNegZero(AddParam(
LiteralUtil::CreateR1<T>({T{-0.0}, T{0}, T{1}, T{-1}, inf, -inf, nan}),
&b));
ComputeAndCompareLiteral(
&b,
LiteralUtil::CreateR1<bool>(
{true, false, false, false, false, false, false}),
{}, error_spec_);
}
void TestSqrtPowInequivalence() {
SetFastMathDisabled(true);
mutable_debug_options()->clear_xla_disable_hlo_passes();
const T inf(std::numeric_limits<float>::infinity());
const T nan(std::numeric_limits<float>::quiet_NaN());
XlaBuilder b(TestName());
auto x = AddParam(LiteralUtil::CreateR1<T>({-inf}), &b);
ConcatInDim(
&b, {Sqrt(x), Pow(x, ScalarLike(x, 0.5)), Pow(x, ScalarLike(x, 0.3))},
0);
std::vector<T> expected = {nan, inf, inf};
ComputeAndCompareR1<T>(&b, expected, {}, error_spec_);
}
void TestErfInvEdgeCases() {
SetFastMathDisabled(true);
XlaBuilder b(TestName());
auto x = AddParam(LiteralUtil::CreateR1<T>({T{-1}, T{1}, T{0}}), &b);
ErfInv(x);
const T inf(std::numeric_limits<float>::infinity());
std::vector<T> expected = {-inf, inf, T{0}};
ComputeAndCompareR1<T>(&b, expected, {}, error_spec_);
}
void TestErfEdgeCases() {
SetFastMathDisabled(true);
const T kErfInvOneMinusHalfULP = T(3.832506856900711);
const T inf(std::numeric_limits<float>::infinity());
XlaBuilder b(TestName());
auto x = AddParam(LiteralUtil::CreateR1<T>({T{-inf}, T{inf}, T{-0}, T{0},
T{-kErfInvOneMinusHalfULP},
T{kErfInvOneMinusHalfULP}}),
&b);
Erf(x);
std::vector<T> expected = {T(-1), T(1), T(-0), T(0), T(-1), T(1)};
ComputeAndCompareR1<T>(&b, expected, {}, error_spec_);
}
};
using TestTypes = ::testing::Types<float
#ifndef XLA_BACKEND_DOES_NOT_SUPPORT_FLOAT16
,
Eigen::half
#endif
#ifndef XLA_BACKEND_DOES_NOT_SUPPORT_FLOAT64
,
double
#endif
>;
TYPED_TEST_CASE(MathTypedTest, TestTypes);
XLA_TYPED_TEST(MathTypedTest, LogEdgeCases) { this->TestLogEdgeCases(); }
XLA_TYPED_TEST(MathTypedTest, Log1pEdgeCases) { this->TestLog1pEdgeCases(); }
XLA_TYPED_TEST(MathTypedTest, IsInfOrNan) { this->TestIsInfOrNan(); }
XLA_TYPED_TEST(MathTypedTest, IsNegZero) { this->TestIsNegZero(); }
XLA_TYPED_TEST(MathTypedTest, DISABLED_ON_TPU(SqrtPowInequivalence)) {
this->TestSqrtPowInequivalence();
}
XLA_TYPED_TEST(MathTypedTest, ErfInvEdgeCases) { this->TestErfInvEdgeCases(); }
XLA_TYPED_TEST(MathTypedTest, ErfEdgeCases) { this->TestErfEdgeCases(); }
XLA_TEST_F(MathTest, RealFpOnlyOps) {
for (int64_t i = PrimitiveType_MIN; i <= PrimitiveType_MAX; ++i) {
auto ty = static_cast<PrimitiveType>(i);
SCOPED_TRACE(PrimitiveType_Name(ty));
Shape shape;
if (ty == U4 || ty == S4) {
continue;
}
if (primitive_util::IsArrayType(ty)) {
shape = ShapeUtil::MakeShape(ty, {42});
} else if (ty == PrimitiveType::TUPLE) {
shape = ShapeUtil::MakeTupleShape({});
} else if (ty == PrimitiveType::OPAQUE_TYPE) {
shape = ShapeUtil::MakeOpaqueShape();
} else if (ty == PrimitiveType::TOKEN) {
shape = ShapeUtil::MakeTokenShape();
} else {
continue;
}
for (const auto& test :
std::vector<std::pair<std::function<XlaOp(XlaOp)>, std::string>>({
{IsFinite, "is_finite"},
{IsInf, "is_inf"},
{IsPosInf, "is_pos_inf"},
{IsNegInf, "is_neg_inf"},
{IsNan, "is_nan"},
{Erf, "erf"},
{Erfc, "erfc"},
{Lgamma, "lgamma"},
{Digamma, "digamma"},
{RoundToEven, "round_to_even"},
})) {
SCOPED_TRACE(test.second);
XlaBuilder b(TestName());
XlaOp p = Parameter(&b, 0, shape, "p0");
test.first(p);
if (primitive_util::IsFloatingPointType(ty)) {
TF_EXPECT_OK(b.first_error());
} else {
EXPECT_FALSE(b.first_error().ok());
}
}
}
}
XLA_TEST_F(MathTest, SqrtF32) {
XlaBuilder builder(TestName());
Literal zero_literal = LiteralUtil::Zero(PrimitiveType::F32);
std::unique_ptr<GlobalData> zero_data =
client_->TransferToServer(zero_literal).value();
XlaOp zero = Parameter(&builder, 0, zero_literal.shape(), "zero");
Sqrt(zero);
ComputeAndCompareR0<float>(&builder, 0.0f, {zero_data.get()}, error_spec_);
}
XLA_TEST_F(MathTest, SqrtF64) {
XlaBuilder builder(TestName());
Literal zero_literal = LiteralUtil::Zero(PrimitiveType::F64);
std::unique_ptr<GlobalData> zero_data =
client_->TransferToServer(zero_literal).value();
XlaOp zero = Parameter(&builder, 0, zero_literal.shape(), "zero");
Sqrt(zero);
ComputeAndCompareR0<double>(&builder, 0.0f, {zero_data.get()}, error_spec_);
}
#ifndef XLA_BACKEND_DOES_NOT_SUPPORT_FLOAT64
XLA_TEST_F(MathTest, ErfInvF64) {
XlaBuilder builder(TestName());
auto x = ConstantR1<double>(
&builder, {-0.9, -0.8, -0.7, -0.6, -0.5, -0.4, -0.3, -0.2, -0.1, 0.0, 0.1,
0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9});
ErfInv(x);
std::vector<double> expected = {-1.163087153676674, -0.9061938024368231,
-0.732869077959217, -0.5951160814499948,
-0.4769362762044698, -0.37080715859355795,
-0.27246271472675443, -0.1791434546212916,
-0.08885599049425767, 0.,
0.08885599049425777, 0.1791434546212916,
0.27246271472675443, 0.37080715859355784,
0.4769362762044698, 0.5951160814499948,
0.732869077959217, 0.9061938024368231,
1.1630871536766736};
ComputeAndCompareR1<double>(&builder, expected, {}, ErrorSpec{1e-15});
}
#endif
XLA_TEST_F(MathTest, SquareTenValues) {
XlaBuilder builder(TestName());
auto x = ConstantR1<float>(
&builder, {2.1, -2.6, 2.6, -4.0, 2.1, 2.3, -5.0, -0.9, -2.4, 1.6});
Square(x);
std::vector<float> expected = {4.41, 6.76, 6.76, 16., 4.41,
5.29, 25., 0.81, 5.76, 2.56};
ComputeAndCompareR1<float>(&builder, expected, {}, error_spec_);
}
XLA_TEST_F(MathTest, ReciprocalTenValues) {
XlaBuilder builder(TestName());
auto x = ConstantR1<float>(
&builder, {2.1, -2.6, 2.6, -4.0, 2.1, 2.3, -5.0, -0.9, -2.4, 1.6});
Reciprocal(x);
std::vector<float> expected = {
0.47619048, -0.38461538, 0.38461538, -0.25, 0.47619048,
0.43478261, -0.2, -1.11111111, -0.41666667, 0.625};
ComputeAndCompareR1<float>(&builder, expected, {}, error_spec_);
}
XLA_TEST_F(MathTest, SqrtZeroes) {
XlaBuilder builder(TestName());
auto x = ConstantR1<float>(&builder, {0.0, -0.0});
Sqrt(x);
ComputeAndCompareR1<float>(&builder, {0, 0}, {}, error_spec_);
}
XLA_TEST_F(MathTest, SqrtSixValues) {
XlaBuilder builder(TestName());
auto x = ConstantR1<float>(&builder, {16.0, 1.0, 1024.0, 0.16, 0.2, 12345});
Sqrt(x);
std::vector<float> expected = {4, 1, 32, 0.4, 0.4472, 111.1080};
ComputeAndCompareR1<float>(&builder, expected, {}, error_spec_);
}
XLA_TEST_F(MathTest, CbrtSixF32Values) {
XlaBuilder builder(TestName());
auto x = ConstantR1<float>(&builder, {8.0, 1.0, 4096.0, -64.0, 1.728, 1331});
Cbrt(x);
std::vector<float> expected = {2, 1, 16, -4, 1.2, 11};
ComputeAndCompareR1<float>(&builder, expected, {}, ErrorSpec(0.001));
}
XLA_TEST_F(MathTest, CbrtSixF64Values) {
XlaBuilder builder(TestName());
auto x = ConstantR1<double>(&builder, {8.0, 1.0, 4096.0, -64.0, 1.728, 1331});
Cbrt(x);
std::vector<double> expected = {2, 1, 16, -4, 1.2, 11};
ComputeAndCompareR1<double>(&builder, expected, {}, ErrorSpec(0.001));
}
XLA_TEST_F(MathTest, SinhSmallValues) {
XlaBuilder builder(TestName());
auto x = ConstantR1<float>(&builder, {1e-3, 1e-5, 1e-7, 1e-9, 1e-11});
Sinh(x);
std::vector<float> expected = {1e-3, 1e-5, 1e-7, 1e-9, 1e-11};
ComputeAndCompareR1<float>(&builder, expected, {}, error_spec_);
}
XLA_TEST_F(MathTest, AsinhSmallValues) {
XlaBuilder builder(TestName());
auto x = ConstantR1<float>(&builder, {1e-3, 1e-5, 1e-7, 1e-9, 1e-11});
Asinh(x);
std::vector<float> expected = {1e-3, 1e-5, 1e-7, 1e-9, 1e-11};
ComputeAndCompareR1<float>(&builder, expected, {}, error_spec_);
}
XLA_TEST_F(MathTest, AtanhSmallValues) {
XlaBuilder builder(TestName());
auto x = ConstantR1<float>(&builder, {1e-8, 1e-9, 1e-10, 1e-11});
Atanh(x);
std::vector<float> expected = {1e-8, 1e-9, 1e-10, 1e-11};
ComputeAndCompareR1<float>(&builder, expected, {}, error_spec_);
}
XLA_TEST_F(MathTest, Lgamma) {
XlaBuilder builder(TestName());
auto x = ConstantR1<float>(&builder, {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 0.5, 1.5,
2.5, -1.5, -3.5, -5.5});
Lgamma(x);
std::vector<float> expected = {
0,
0,
static_cast<float>(std::log(2)),
static_cast<float>(std::log(6)),
static_cast<float>(std::log(24)),
static_cast<float>(std::log(120)),
static_cast<float>(std::log(M_PI) / 2),
static_cast<float>(std::log(M_PI) / 2 - std::log(2)),
static_cast<float>(std::log(M_PI) / 2 - std::log(4) + std::log(3)),
static_cast<float>(std::log(M_PI) / 2 - std::log(3) + std::log(4)),
static_cast<float>(std::log(M_PI) / 2 - std::log(105) + std::log(16)),
static_cast<float>(std::log(M_PI) / 2 - std::log(10395) + std::log(64))};
error_spec_ = ErrorSpec{0.001};
ComputeAndCompareR1<float>(&builder, expected, {}, error_spec_);
}
#if !defined(XLA_BACKEND_DOES_NOT_SUPPORT_FLOAT16)
XLA_TEST_F(MathTest, LgammaF16) {
SetFastMathDisabled(true);
XlaBuilder b(TestName());
auto x = ConstantR1<half>(&b, {
half(-7360.0),
half(-4066.0),
half(-5.9605e-08),
});
Lgamma(x);
std::vector<half> expected = {
std::numeric_limits<half>::infinity(),
std::numeric_limits<half>::infinity(),
half(16.64),
};
ComputeAndCompareR1<half>(&b, expected, {}, ErrorSpec{0.1});
}
#endif
XLA_TEST_F(MathTest, Digamma) {
XlaBuilder builder(TestName());
auto x = ConstantR1<float>(&builder, {1.0, 0.5, 1 / 3.0, 0.25, 1 / 6.0, 0.125,
2.0, 3.0, 4.0, 6.0, 8.0, 9.0});
Digamma(x);
constexpr double euler_mascheroni =
0.57721566490153286060651209008240243104215933593992;
std::vector<float> expected = {
static_cast<float>(-euler_mascheroni),
static_cast<float>(-2 * std::log(2) - euler_mascheroni),
static_cast<float>(-M_PI / 2 / std::sqrt(3) - 3 * std::log(3) / 2 -
euler_mascheroni),
static_cast<float>(-M_PI / 2 - 3 * std::log(2) - euler_mascheroni),
static_cast<float>(-M_PI * std::sqrt(3) / 2 - 2 * std::log(2) -
3 * std::log(3) / 2 - euler_mascheroni),
static_cast<float>(
-M_PI / 2 - 4 * std::log(2) -
(M_PI + std::log(2 + std::sqrt(2)) - std::log(2 - std::sqrt(2))) /
std::sqrt(2) -
euler_mascheroni),
static_cast<float>(1 - euler_mascheroni),
static_cast<float>(1.5 - euler_mascheroni),
static_cast<float>(11 / 6.0 - euler_mascheroni),
static_cast<float>(137 / 60.0 - euler_mascheroni),
static_cast<float>(363 / 140.0 - euler_mascheroni),
static_cast<float>(761 / 280.0 - euler_mascheroni)};
ComputeAndCompareR1<float>(&builder, expected, {}, error_spec_);
}
XLA_TEST_F(MathTest, Igamma) {
XlaBuilder builder(TestName());
auto a = ConstantR3FromArray3D<float>(
&builder,
{{{0.3760359, 1.62685306, 0.53327996, 1.5111382, 0.3521143},
{1.79378175, 1.05317882, 0.85049253, 1.399534, 0.22073882},
{1.17725309, 0.90727209, 1.32418503, 1.53238533, 0.51984756}}});
auto x = ConstantR3FromArray3D<float>(
&builder,
{{{0.56420934, 8.97671773, 2.81068609, 4.50655124, 2.88178617},
{1.01795164, 8.86298411, 0.29232942, 8.17661015, 5.67652269},
{1.59959565, 0.54463897, 0.6585252, 9.83192283, 3.93372669}}});
Igamma(a, x);
Array3D<float> expected = {
{{0.78746926, 0.99940502, 0.98028261, 0.97033807, 0.99054696},
{0.33265522, 0.99983558, 0.32599159, 0.99923275, 0.99980893},
{0.74343963, 0.46703197, 0.33923541, 0.99978511, 0.99460685}}};
ComputeAndCompareR3<float>(&builder, expected, {}, error_spec_);
}
XLA_TEST_F(MathTest, IgammaSpecialValues) {
SetFastMathDisabled(true);
XlaBuilder builder(TestName());
const float nan = std::numeric_limits<float>::quiet_NaN();
auto a =
ConstantR1<float>(&builder, {nan, nan, 0.53327996, -6.00773744602e+37,
-1.3937809742e+31, -23.351348877});
auto x = ConstantR1<float>(
&builder, {nan, 8.97671773, nan, nan, 0.0, 6.02455484352e-39});
Igamma(a, x);
std::vector<float> expected = {nan, nan, nan, nan, nan, nan};
ComputeAndCompareR1<float>(&builder, expected, {}, error_spec_);
}
#if !defined(XLA_BACKEND_DOES_NOT_SUPPORT_FLOAT16)
XLA_TEST_F(MathTest, IgammaF16) {
SetFastMathDisabled(true);
XlaBuilder builder(TestName());
auto a = ConstantR3FromArray3D<half>(
&builder,
{{{half(0.37603), half(1.6268), half(0.53327), half(1.5111)},
{half(1.79378), half(1.05317), half(0.85049), half(1.3995)},
{half(1.17725), half(0.90727), half(1.32418), half(1.5323)}}});
Igamma(a, a);
Array3D<half> expected = {
{{half(0.7068214), half(0.6041154), half(0.67748886), half(0.60799426)},
{half(0.599202), half(0.6288743), half(0.64280254), half(0.6121421)},
{half(0.6220287), half(0.6384635), half(0.6152258), half(0.6072449)}}};
ComputeAndCompareR3<half>(&builder, expected, {}, ErrorSpec{1e-3});
}
#endif
XLA_TEST_F(MathTest, Igammac) {
XlaBuilder builder(TestName());
auto a = ConstantR3FromArray3D<float>(
&builder,
{{{0.3760359, 1.62685306, 0.53327996, 1.5111382, 0.3521143},
{1.79378175, 1.05317882, 0.85049253, 1.399534, 0.22073882},
{1.17725309, 0.90727209, 1.32418503, 1.53238533, 0.51984756}}});
auto x = ConstantR3FromArray3D<float>(
&builder,
{{{0.56420934, 8.97671773, 2.81068609, 4.50655124, 2.88178617},
{1.01795164, 8.86298411, 0.29232942, 8.17661015, 5.67652269},
{1.59959565, 0.54463897, 0.6585252, 9.83192283, 3.93372669}}});
Igammac(a, x);
Array3D<float> expected = {{{2.12530741e-01, 5.94977775e-04, 1.97173867e-02,
2.96619296e-02, 9.45303689e-03},
{6.67344782e-01, 1.64421996e-04, 6.74008406e-01,
7.67252602e-04, 1.91071108e-04},
{2.56560373e-01, 5.32968026e-01, 6.60764593e-01,
2.14889688e-04, 5.39314824e-03}}};
ComputeAndCompareR3<float>(&builder, expected, {}, error_spec_);
}
#if !defined(XLA_BACKEND_DOES_NOT_SUPPORT_FLOAT16)
XLA_TEST_F(MathTest, IgammacF16) {
SetFastMathDisabled(true);
XlaBuilder builder(TestName());
auto a = ConstantR3FromArray3D<half>(
&builder,
{{{half(0.37603), half(1.6268), half(0.53327), half(1.5111)},
{half(1.79378), half(1.05317), half(0.85049), half(1.3995)},
{half(1.17725), half(0.90727), half(1.32418), half(1.5323)}}});
Igammac(a, a);
Array3D<half> expected = {
{{half(0.29317862), half(0.39588454), half(0.32251117), half(0.39200574)},
{half(0.40079802), half(0.37112573), half(0.35719746), half(0.3878579)},
{half(0.3779713), half(0.36153653), half(0.38477424),
half(0.39275512)}}};
ComputeAndCompareR3<half>(&builder, expected, {}, ErrorSpec{1e-4});
}
#endif
XLA_TEST_F(MathTest, RoundToEven) {
XlaBuilder builder(TestName());
auto x = ConstantR1<float>(
&builder, {-1.4, -1.5, -2.5, -0.5, 0, 0.5, 1.5, 2.5, 3.5, 4.5});
RoundToEven(x);
std::vector<float> expected = {-1.0, -2.0, -2.0, -0.0, 0,
0.0, 2.0, 2.0, 4.0, 4.0};
ComputeAndCompareR1<float>(&builder, expected, {}, error_spec_);
}
XLA_TEST_F(MathTest, ErfRejectsComplexInputs) {
XlaBuilder b(TestName());
auto x = ConstantR1<std::complex<float>>(&b, {{0, 0}});
Erf(x);
EXPECT_FALSE(b.Build().status().ok());
}
XLA_TEST_F(MathTest, ErfcRejectsComplexInputs) {
XlaBuilder b(TestName());
auto x = ConstantR1<std::complex<float>>(&b, {{0, 0}});
Erfc(x);
EXPECT_FALSE(b.Build().status().ok());
}
XLA_TEST_F(MathTest, LgammaRejectsComplexInputs) {
XlaBuilder b(TestName());
auto x = ConstantR1<std::complex<float>>(&b, {{0, 0}});
Lgamma(x);
EXPECT_FALSE(b.Build().status().ok());
}
XLA_TEST_F(MathTest, DigammaRejectsComplexInputs) {
XlaBuilder b(TestName());
auto x = ConstantR1<std::complex<float>>(&b, {{0, 0}});
Digamma(x);
EXPECT_FALSE(b.Build().status().ok());
}
XLA_TEST_F(MathTest, RoundToEvenRejectsComplexInputs) {
XlaBuilder b(TestName());
auto x = ConstantR1<std::complex<float>>(&b, {{0, 0}});
RoundToEven(x);
EXPECT_FALSE(b.Build().status().ok());
}
XLA_TEST_F(MathTest, BesselI0eFloat) {
XlaBuilder builder(TestName());
auto x = ConstantR1<float>(
&builder,
{-20.0, -18.0, -16.0, -14.0, -12.0, -10.0, -8.0, -6.0, -4.0, -2.0, 0.0,
2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 18.0, 20.0});
BesselI0e(x);
std::vector<float> expected = {0.0897803118848,
0.0947062952128,
0.100544127361,
0.107615251671,
0.116426221213,
0.127833337163,
0.143431781857,
0.16665743264,
0.207001921224,
0.308508322554,
1.0,
0.308508322554,
0.207001921224,
0.16665743264,
0.143431781857,
0.127833337163,
0.116426221213,
0.107615251671,
0.100544127361,
0.0947062952128,
0.0897803118848};
ComputeAndCompareR1<float>(&builder, expected, {}, error_spec_);
}
XLA_TEST_F(MathTest, DISABLED_ON_TPU(BesselI0eDouble)) {
XlaBuilder builder(TestName());
auto x = ConstantR1<double>(
&builder,
{-20.0, -18.0, -16.0, -14.0, -12.0, -10.0, -8.0, -6.0, -4.0, -2.0, 0.0,
2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 18.0, 20.0});
BesselI0e(x);
std::vector<double> expected = {0.0897803118848,
0.0947062952128,
0.100544127361,
0.107615251671,
0.116426221213,
0.127833337163,
0.143431781857,
0.16665743264,
0.207001921224,
0.308508322554,
1.0,
0.308508322554,
0.207001921224,
0.16665743264,
0.143431781857,
0.127833337163,
0.116426221213,
0.107615251671,
0.100544127361,
0.0947062952128,
0.0897803118848};
ComputeAndCompareR1<double>(&builder, expected, {}, error_spec_);
}
XLA_TEST_F(MathTest, BesselI1eFloat) {
XlaBuilder builder(TestName());
auto x = ConstantR1<float>(
&builder,
{-20.0, -18.0, -16.0, -14.0, -12.0, -10.0, -8.0, -6.0, -4.0, -2.0, 0.0,
2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 18.0, 20.0});
BesselI1e(x);
std::vector<float> expected = {-0.0875062221833,
-0.092036796872,
-0.0973496147565,
-0.103697667463,
-0.11146429929,
-0.121262681384,
-0.134142493293,
-0.152051459309,
-0.178750839502,
-0.215269289249,
0.0,
0.215269289249,
0.178750839502,
0.152051459309,
0.134142493293,
0.121262681384,
0.11146429929,
0.103697667463,
0.0973496147565,
0.092036796872,
0.0875062221833};
ComputeAndCompareR1<float>(&builder, expected, {}, error_spec_);
}
XLA_TEST_F(MathTest, DISABLED_ON_TPU(BesselI1eDouble)) {
XlaBuilder builder(TestName());
auto x = ConstantR1<double>(
&builder,
{-20.0, -18.0, -16.0, -14.0, -12.0, -10.0, -8.0, -6.0, -4.0, -2.0, 0.0,
2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 18.0, 20.0});
BesselI1e(x);
std::vector<double> expected = {-0.0875062221833,
-0.092036796872,
-0.0973496147565,
-0.103697667463,
-0.11146429929,
-0.121262681384,
-0.134142493293,
-0.152051459309,
-0.178750839502,
-0.215269289249,
0.0,
0.215269289249,
0.178750839502,
0.152051459309,
0.134142493293,
0.121262681384,
0.11146429929,
0.103697667463,
0.0973496147565,
0.092036796872,
0.0875062221833};
ComputeAndCompareR1<double>(&builder, expected, {}, error_spec_);
}
XLA_TEST_F(MathTest, AcosComplexValues) {
XlaBuilder builder(TestName());
auto x = ConstantR1<std::complex<float>>(
&builder, {{0, 0}, {0, 1}, {1, 1}, {0.8, 0.2}});
Acos(x);
std::vector<std::complex<float>> expected = {
{1.5707963267948966, 0},
{1.5707963267948966, -0.881373587019543},
{0.9045568943023814, -1.0612750619050357},
{0.7011246914497526, -0.30527648462436596}};
ComputeAndCompareR1<std::complex<float>>(&builder, expected, {}, error_spec_);
}
XLA_TEST_F(MathTest, ZetaF64) {
XlaBuilder builder(TestName());
auto x = ConstantR1<double>(&builder, {2.0});
auto q = ConstantR1<double>(&builder, {1.0});
Zeta(x, q);
std::vector<double> expected = {1.64493406684823};
ComputeAndCompareR1<double>(&builder, expected, {},
ErrorSpec{0.00000000000001});
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/builder/lib/math.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/builder/lib/math_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5ac1b344-4708-4ddf-93cf-5d8ed5df8d87 | cpp | tensorflow/tensorflow | reader | tensorflow/cc/saved_model/reader.cc | tensorflow/cc/saved_model/reader_test.cc | #include "tensorflow/cc/saved_model/reader.h"
#include <memory>
#include <string>
#include <unordered_set>
#include <utility>
#include "absl/memory/memory.h"
#include "absl/status/statusor.h"
#include "tensorflow/cc/saved_model/constants.h"
#include "tensorflow/cc/saved_model/metrics.h"
#include "tensorflow/cc/saved_model/util.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/file_system_helper.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/protobuf/saved_model.pb.h"
#include "tensorflow/core/util/tensor_bundle/byte_swap_tensor.h"
#define IS_OSS true
namespace tensorflow {
absl::StatusOr<MetaGraphDef*> FindMetaGraphDef(
const std::unordered_set<string>& tags, SavedModel* saved_model_proto) {
LOG(INFO) << "Reading meta graph with tags { " << absl::StrJoin(tags, " ")
<< " }";
for (MetaGraphDef& graph_def : *saved_model_proto->mutable_meta_graphs()) {
std::unordered_set<string> graph_tags;
for (const string& tag : graph_def.meta_info_def().tags()) {
graph_tags.insert(tag);
}
if (graph_tags == tags) {
MetaGraphDef* meta_graph_def = &graph_def;
if (!port::kLittleEndian) {
TF_RETURN_IF_ERROR(ByteSwapTensorContentInMetaGraphDef(meta_graph_def));
}
return meta_graph_def;
}
}
return Status(
absl::StatusCode::kNotFound,
strings::StrCat(
"Could not find meta graph def matching supplied tags: { ",
absl::StrJoin(tags, " "),
" }. To inspect available tag-sets in the SavedModel, please "
"use the SavedModel CLI: `saved_model_cli`"));
}
Status ReadSavedModel(absl::string_view export_dir,
SavedModel* saved_model_proto) {
LOG(INFO) << "Reading SavedModel from: " << export_dir;
if (IS_OSS) {
const std::string saved_model_pb_path =
io::JoinPath(export_dir, kSavedModelFilenamePb);
TF_ASSIGN_OR_RETURN(
bool saved_model_pb_exists,
internal::FileExists(Env::Default(), saved_model_pb_path));
if (saved_model_pb_exists) {
Status result = ReadBinaryProto(Env::Default(), saved_model_pb_path,
saved_model_proto);
if (result.ok()) {
metrics::SavedModelReadCount(
saved_model::GetWriteVersion(*saved_model_proto))
.IncrementBy(1);
}
return result;
}
}
const std::string saved_model_pbtxt_path =
io::JoinPath(export_dir, kSavedModelFilenamePbTxt);
auto saved_model_pbtxt_exists =
internal::FileExists(Env::Default(), saved_model_pbtxt_path);
if (saved_model_pbtxt_exists.value_or(false)) {
Status result = ReadTextProto(Env::Default(), saved_model_pbtxt_path,
saved_model_proto);
if (result.ok()) {
metrics::SavedModelReadCount(
saved_model::GetWriteVersion(*saved_model_proto))
.IncrementBy(1);
}
return result;
}
if (!IS_OSS) {
}
return Status(
absl::StatusCode::kNotFound,
strings::StrCat("Could not find SavedModel .pb or .pbtxt at supplied "
"export directory path: ",
export_dir,
". Check that "
"the directory exists and that you have the right "
"permissions for accessing it."));
}
Status ReadMetaGraphDefFromSavedModel(absl::string_view export_dir,
const std::unordered_set<string>& tags,
MetaGraphDef* const meta_graph_def) {
SavedModel saved_model_proto;
TF_RETURN_IF_ERROR(ReadSavedModel(export_dir, &saved_model_proto));
TF_ASSIGN_OR_RETURN(MetaGraphDef * m,
FindMetaGraphDef(tags, &saved_model_proto));
*meta_graph_def = std::move(*m);
return absl::OkStatus();
}
Status ReadSavedModelDebugInfoIfPresent(
absl::string_view export_dir,
std::unique_ptr<GraphDebugInfo>* debug_info_proto) {
LOG(INFO) << "Reading SavedModel debug info (if present) from: "
<< export_dir;
const string debug_info_pb_path =
io::JoinPath(export_dir, "debug", "saved_model_debug_info.pb");
TF_ASSIGN_OR_RETURN(bool debug_info_pb_exists,
internal::FileExists(Env::Default(), debug_info_pb_path));
if (debug_info_pb_exists) {
GraphDebugInfo debug_info;
TF_RETURN_IF_ERROR(
ReadBinaryProto(Env::Default(), debug_info_pb_path, &debug_info));
*debug_info_proto = std::make_unique<GraphDebugInfo>(std::move(debug_info));
}
return absl::OkStatus();
}
} | #include "tensorflow/cc/saved_model/reader.h"
#include <gmock/gmock.h>
#include "tensorflow/cc/saved_model/constants.h"
#include "tensorflow/cc/saved_model/metrics.h"
#include "tensorflow/cc/saved_model/tag_constants.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/resource_loader.h"
namespace tensorflow {
namespace {
string TestDataPbTxt() {
return io::JoinPath("tensorflow", "cc", "saved_model", "testdata",
"half_plus_two_pbtxt", "00000123");
}
string TestDataSharded() {
return io::JoinPath("tensorflow", "cc", "saved_model", "testdata",
"half_plus_two", "00000123");
}
string ChunkedSavedModel() {
return io::JoinPath("tensorflow", "cc", "saved_model", "testdata",
"chunked_saved_model", "chunked_model");
}
string NonChunkedSavedModel() {
return io::JoinPath("tensorflow", "cc", "saved_model", "testdata",
"chunked_saved_model", "non_chunked_model");
}
class ReaderTest : public ::testing::Test {
protected:
ReaderTest() {}
void CheckMetaGraphDef(const MetaGraphDef& meta_graph_def) {
const auto& tags = meta_graph_def.meta_info_def().tags();
EXPECT_TRUE(std::find(tags.begin(), tags.end(), kSavedModelTagServe) !=
tags.end());
EXPECT_NE(meta_graph_def.meta_info_def().tensorflow_version(), "");
EXPECT_EQ(
meta_graph_def.signature_def().at("serving_default").method_name(),
"tensorflow/serving/predict");
}
};
TEST_F(ReaderTest, TagMatch) {
MetaGraphDef meta_graph_def;
const string export_dir = GetDataDependencyFilepath(TestDataSharded());
TF_ASSERT_OK(ReadMetaGraphDefFromSavedModel(export_dir, {kSavedModelTagServe},
&meta_graph_def));
CheckMetaGraphDef(meta_graph_def);
}
TEST_F(ReaderTest, NoTagMatch) {
MetaGraphDef meta_graph_def;
const string export_dir = GetDataDependencyFilepath(TestDataSharded());
Status st = ReadMetaGraphDefFromSavedModel(export_dir, {"missing-tag"},
&meta_graph_def);
EXPECT_FALSE(st.ok());
EXPECT_TRUE(absl::StrContains(
st.message(),
"Could not find meta graph def matching supplied tags: { missing-tag }"))
<< st.message();
}
TEST_F(ReaderTest, NoTagMatchMultiple) {
MetaGraphDef meta_graph_def;
const string export_dir = GetDataDependencyFilepath(TestDataSharded());
Status st = ReadMetaGraphDefFromSavedModel(
export_dir, {kSavedModelTagServe, "missing-tag"}, &meta_graph_def);
EXPECT_FALSE(st.ok());
EXPECT_TRUE(absl::StrContains(
st.message(), "Could not find meta graph def matching supplied tags: "))
<< st.message();
}
TEST_F(ReaderTest, InvalidExportPath) {
MetaGraphDef meta_graph_def;
const string export_dir = GetDataDependencyFilepath("missing-path");
Status st = ReadMetaGraphDefFromSavedModel(export_dir, {kSavedModelTagServe},
&meta_graph_def);
EXPECT_FALSE(st.ok());
}
TEST_F(ReaderTest, ReadSavedModelDebugInfoIfPresent) {
const string export_dir = GetDataDependencyFilepath(TestDataSharded());
std::unique_ptr<GraphDebugInfo> debug_info_proto;
TF_ASSERT_OK(ReadSavedModelDebugInfoIfPresent(export_dir, &debug_info_proto));
}
TEST_F(ReaderTest, MetricsNotUpdatedFailedRead) {
MetaGraphDef meta_graph_def;
const int read_count_v1 = metrics::SavedModelReadCount("1").value();
const int read_count_v2 = metrics::SavedModelReadCount("2").value();
const string export_dir = GetDataDependencyFilepath("missing-path");
Status st =
ReadMetaGraphDefFromSavedModel(export_dir, {"serve"}, &meta_graph_def);
EXPECT_FALSE(st.ok());
EXPECT_EQ(metrics::SavedModelReadCount("1").value(), read_count_v1);
EXPECT_EQ(metrics::SavedModelReadCount("2").value(), read_count_v2);
}
TEST_F(ReaderTest, MetricsUpdatedSuccessfulRead) {
MetaGraphDef meta_graph_def;
const int read_count_v1 = metrics::SavedModelReadCount("1").value();
const string export_dir = GetDataDependencyFilepath(TestDataSharded());
Status st =
ReadMetaGraphDefFromSavedModel(export_dir, {"serve"}, &meta_graph_def);
EXPECT_EQ(metrics::SavedModelReadCount("1").value(), read_count_v1 + 1);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/saved_model/reader.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/saved_model/reader_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
60b3f1ce-5ea1-464a-9c6d-070957e277da | cpp | tensorflow/tensorflow | hlo_bisect_state | third_party/xla/xla/tools/hlo_bisect/hlo_bisect_state.cc | third_party/xla/xla/tools/hlo_bisect/hlo_bisect_state_test.cc | #include "xla/tools/hlo_bisect/hlo_bisect_state.h"
#include <iterator>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_dce.h"
#include "xla/tests/test_utils.h"
#include "xla/util.h"
namespace xla {
namespace bisect {
namespace {
std::vector<HloInstruction*> GetModifiedInstructionPostOrder(
HloComputation* computation) {
std::vector<HloInstruction*> instructions(
computation->parameter_instructions().begin(),
computation->parameter_instructions().end());
absl::c_copy_if(computation->MakeInstructionPostOrder(),
std::back_inserter(instructions),
[&](const HloInstruction* instr) {
return instr->opcode() != HloOpcode::kParameter;
});
return instructions;
}
absl::Status MorphModuleWithOutputs(HloModule* module,
absl::Span<HloInstruction* const> outputs) {
HloComputation* entry_computation = module->entry_computation();
HloInstruction* new_root = outputs.size() == 1
? outputs[0]
: entry_computation->AddInstruction(
HloInstruction::CreateTuple(outputs));
entry_computation->set_root_instruction(new_root, true);
*module->mutable_entry_computation_layout() =
module->compute_computation_layout();
HloDCE dce;
absl::StatusOr<bool> dce_result = dce.Run(module);
return dce_result.status();
}
absl::Status MorphModuleWithInstructions(
HloModule* module, absl::Span<HloInstruction* const> instructions) {
ConstHloInstructionSet in_range_instructions(instructions.begin(),
instructions.end());
auto keep_result = [&](const HloInstruction* instruction) {
return instruction->opcode() != HloOpcode::kParameter &&
!absl::c_any_of(instruction->users(),
[&](const HloInstruction* user) {
return in_range_instructions.count(user) != 0;
});
};
std::vector<HloInstruction*> outputs;
absl::c_copy_if(instructions, std::back_inserter(outputs), keep_result);
return MorphModuleWithOutputs(module, outputs);
}
absl::Status MorphModuleWithInstructions(HloModule* module,
size_t num_instructions) {
std::vector<HloInstruction*> ordered_instructions =
GetModifiedInstructionPostOrder(module->entry_computation());
HloInstruction* const* instructions_begin = &ordered_instructions.front();
return MorphModuleWithInstructions(
module, absl::MakeSpan(instructions_begin, num_instructions));
}
absl::Status MorphModuleWithLiterals(
HloModule* module, absl::flat_hash_map<std::string, Literal> literal_map) {
HloComputation* entry_computation = module->entry_computation();
absl::flat_hash_map<HloInstruction*, Literal> replace_map;
for (HloInstruction* instruction : entry_computation->instructions()) {
auto it = literal_map.find(instruction->name());
if (it != literal_map.end()) {
replace_map.emplace(instruction, std::move(it->second));
}
}
for (auto& [instruction, literal] : replace_map) {
if (!instruction->IsDead()) {
HloInstruction* new_instruction = entry_computation->AddInstruction(
HloInstruction::CreateConstant(std::move(literal)));
absl::Status replace_status =
entry_computation->ReplaceInstruction(instruction, new_instruction);
TF_RETURN_IF_ERROR(replace_status);
}
}
xla::HloDCE dce;
absl::StatusOr<bool> dce_status = dce.Run(module);
return dce_status.status();
}
bool InstructionNotReplaceableWithConstant(HloInstruction* instruction) {
return instruction->shape().is_dynamic() ||
instruction->opcode() == HloOpcode::kConstant ||
instruction->opcode() == HloOpcode::kTuple ||
instruction->opcode() == HloOpcode::kParameter;
}
}
absl::StatusOr<bool> HloBisectState::ShouldProcess() {
return RunModule(*module_);
}
absl::StatusOr<bool> HloBisectState::TrimEntryComputation() {
bool changed_in_loop = false;
bool changed = false;
for (int iter = 0; changed || iter < 2; iter++) {
if (iter % 2 == 0) {
VLOG(2) << "Trimming by outputs, iteration " << iter;
TF_ASSIGN_OR_RETURN(changed, TrimByOutputs());
} else {
VLOG(2) << "Trimming by instructions, iteration " << iter;
TF_ASSIGN_OR_RETURN(changed, TrimByInstructions());
}
changed_in_loop |= changed;
}
VLOG(2) << "Trimming by replacing instructions with literals";
TF_ASSIGN_OR_RETURN(changed, TrimByUsingConstants());
VLOG(2) << "Final module: " << module_->ToString();
return changed || changed_in_loop;
}
std::unique_ptr<xla::HloModule>&& HloBisectState::GetResult() {
return std::move(module_);
}
absl::StatusOr<bool> HloBisectState::RunModule(const HloModule& module) {
VLOG(3) << "Modified module: " << module.ToString();
absl::StatusOr<bool> bug_result = bug_checker_->Run(module);
TF_RETURN_IF_ERROR(bug_result.status());
VLOG(3) << "Bug checker result: " << bug_result.value();
if (!bug_result.value()) {
for (HloInstruction* instr : module.entry_computation()->instructions()) {
foldable_instructions_.emplace(instr->name());
}
for (auto& [key, value] : bug_checker_->GetResults()) {
foldable_instructions_values_[key] = std::move(value);
}
}
return bug_result;
}
absl::StatusOr<bool> HloBisectState::TrimByOutputs() {
HloInstruction* root_instruction =
module_->entry_computation()->root_instruction();
if (root_instruction->opcode() != HloOpcode::kTuple ||
root_instruction->operand_count() < 2) {
return false;
}
auto run_modified = [&](int64_t start, int64_t end) -> absl::StatusOr<bool> {
std::unique_ptr<HloModule> new_module = module_->Clone("");
HloInstruction* const* new_operands =
new_module->entry_computation()->root_instruction()->operands().begin();
TF_RETURN_IF_ERROR(MorphModuleWithOutputs(
new_module.get(),
absl::MakeSpan(new_operands + start, end - start + 1)));
return RunModule(*new_module);
};
int64_t bisect_low = 0;
int64_t bisect_high = root_instruction->operand_count() - 1;
while (bisect_low < bisect_high) {
int64_t cur = bisect_low + (bisect_high - bisect_low) / 2;
VLOG(2) << "Number of outputs: " << (cur - bisect_low + 1) << " ["
<< bisect_low << ".." << cur << "]";
TF_ASSIGN_OR_RETURN(bool has_bug, run_modified(bisect_low, cur));
if (has_bug) {
bisect_high = cur;
} else {
TF_ASSIGN_OR_RETURN(has_bug, run_modified(cur + 1, bisect_high));
if (has_bug) {
bisect_low = cur + 1;
} else {
break;
}
}
}
bool changed =
(bisect_high - bisect_low) < (root_instruction->operand_count() - 1);
if (changed) {
TF_RETURN_IF_ERROR(MorphModuleWithOutputs(
module_.get(),
absl::MakeSpan(root_instruction->operands().begin() + bisect_low,
bisect_high - bisect_low + 1)));
TF_RETURN_IF_ERROR(ExpectModuleIsBuggy());
}
return changed;
}
absl::StatusOr<bool> HloBisectState::TrimByInstructions() {
HloComputation* computation = module_->entry_computation();
int64_t upper_bound = computation->instruction_count() -
computation->root_instruction()->shape().IsTuple();
int64_t bisect_low = computation->num_parameters() - 1;
int64_t bisect_high = upper_bound;
while (bisect_low + 1 < bisect_high) {
int64_t cur = bisect_low + (bisect_high - bisect_low) / 2;
VLOG(2) << "Number of instructions: " << cur << " (of "
<< computation->instruction_count() << ")";
std::unique_ptr<HloModule> new_module = module_->Clone("");
TF_RETURN_IF_ERROR(MorphModuleWithInstructions(new_module.get(), cur));
TF_ASSIGN_OR_RETURN(bool has_bug, RunModule(*new_module));
if (has_bug) {
bisect_high = cur;
} else {
bisect_low = cur;
}
}
if (bisect_high == computation->num_parameters()) {
return Internal(
"The checker fails on an empty computation! Something is not right. "
"Can't bisect.");
}
bool changed = bisect_high < upper_bound;
if (changed) {
TF_RETURN_IF_ERROR(MorphModuleWithInstructions(module_.get(), bisect_high));
TF_RETURN_IF_ERROR(ExpectModuleIsBuggy());
}
return changed;
}
absl::StatusOr<bool> HloBisectState::TrimByUsingConstants() {
absl::flat_hash_map<std::string, Literal> literal_map;
int64_t random_literals_count = 0;
for (HloInstruction* instr : module_->entry_computation()->instructions()) {
if (InstructionNotReplaceableWithConstant(instr)) {
continue;
}
if (foldable_instructions_values_.contains(instr->name())) {
auto it = foldable_instructions_values_.extract(instr->name());
literal_map.insert(std::move(it));
} else if (foldable_instructions_.contains(instr->name())) {
absl::StatusOr<Literal> literal_status = MakeFakeLiteral(instr->shape());
TF_RETURN_IF_ERROR(literal_status.status());
literal_map[instr->name()] = std::move(literal_status).value();
++random_literals_count;
}
}
VLOG(2) << "Number of literals: " << literal_map.size()
<< " (random: " << random_literals_count << ")";
std::unique_ptr<HloModule> new_module = module_->Clone("");
TF_RETURN_IF_ERROR(
MorphModuleWithLiterals(new_module.get(), std::move(literal_map)));
TF_ASSIGN_OR_RETURN(bool has_bug, RunModule(*new_module));
if (has_bug) {
std::swap(module_, new_module);
}
return has_bug;
}
absl::Status HloBisectState::ExpectModuleIsBuggy() {
TF_ASSIGN_OR_RETURN(bool has_bug, RunModule(*module_));
if (has_bug) {
return absl::OkStatus();
}
const int retry_count = 5;
int bug_count = 0;
for (int i = 0; i < retry_count; i++) {
TF_ASSIGN_OR_RETURN(has_bug, bug_checker_->Run(*module_));
if (has_bug) {
bug_count++;
}
}
if (bug_count != 0) {
return InternalStrCat("The checker is non deterministic! (only ", bug_count,
" failures seen in ", (retry_count + 1), " runs)");
}
return Internal("We \"lost\" the bug while bisecting!");
}
}
} | #include "xla/tools/hlo_bisect/hlo_bisect_state.h"
#include <initializer_list>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace bisect {
namespace {
namespace m = match;
using HloBisectStateTest = HloTestBase;
class TestBugSearch : public BugCheckerInterface {
public:
TestBugSearch(std::initializer_list<HloOpcode> opcodes) : opcodes_(opcodes) {}
absl::StatusOr<bool> Run(const HloModule& module) override {
auto has_opcode = [&](HloOpcode opcode) {
return absl::c_any_of(module.entry_computation()->instructions(),
[opcode](const HloInstruction* instr) {
return instr->opcode() == opcode;
});
};
return absl::c_all_of(opcodes_, has_opcode);
}
absl::flat_hash_map<std::string, Literal> GetResults() override { return {}; }
private:
std::vector<HloOpcode> opcodes_;
};
Literal CreateLiteral(float value) {
Literal result = Literal::CreateFromShape(ShapeUtil::MakeShape(F32, {}));
result.PopulateWithValue(value);
return result;
}
TEST_F(HloBisectStateTest, TrimByOutputs) {
const char* kModuleStr = R"(
HloModule test_module
ENTRY test_computation {
p1 = s32[8] parameter(0)
p2 = s32[8] parameter(1)
a = s32[8] add(p1, p2)
b = s32[8] multiply(p1, p2)
c = s32[8] subtract(p1, p2)
ROOT sum = tuple(a, b, c)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
TestBugSearch bug_checker({HloOpcode::kMultiply});
HloBisectState bisect(std::move(module), &bug_checker);
TF_ASSERT_OK_AND_ASSIGN(bool changed, bisect.TrimEntryComputation());
EXPECT_TRUE(changed);
auto reduced_module = std::move(bisect).GetResult();
EXPECT_THAT(reduced_module->entry_computation()->root_instruction(),
GmockMatch(m::Multiply(m::Parameter(0), m::Parameter(1))));
}
TEST_F(HloBisectStateTest, TrimByInstructions) {
const char* kModuleStr = R"(
HloModule axpy_module
ENTRY axpy_computation {
alpha = f32[] parameter(0)
broadcast = f32[10] broadcast(alpha), dimensions={}
x = f32[10] parameter(1)
ax = f32[10] multiply(broadcast, x)
y = f32[10] parameter(2)
ROOT add = f32[10] add(ax, y)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
TestBugSearch bug_checker({HloOpcode::kMultiply, HloOpcode::kBroadcast});
HloBisectState bisect(std::move(module), &bug_checker);
TF_ASSERT_OK_AND_ASSIGN(bool changed, bisect.TrimEntryComputation());
EXPECT_TRUE(changed);
auto reduced_module = std::move(bisect).GetResult();
EXPECT_THAT(
reduced_module->entry_computation()->root_instruction(),
GmockMatch(m::Multiply(m::Broadcast(m::Parameter(0)), m::Parameter(1))));
}
TEST_F(HloBisectStateTest, TrimByUsingRandomConstants) {
const char* kModuleStr = R"(
HloModule test_module
ENTRY test_computation {
p1 = f32[4] parameter(0)
p2 = f32[4] parameter(1)
a = f32[4] multiply(p1, p2)
b = f32[4] add(p1, p2)
ROOT result = f32[4] power(a, b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
TestBugSearch bug_checker({HloOpcode::kPower});
HloBisectState bisect(std::move(module), &bug_checker);
TF_ASSERT_OK_AND_ASSIGN(bool changed, bisect.TrimEntryComputation());
EXPECT_TRUE(changed);
auto reduced_module = std::move(bisect).GetResult();
EXPECT_THAT(reduced_module->entry_computation()->root_instruction(),
GmockMatch(m::Power(m::Constant(), m::Constant())));
}
TEST_F(HloBisectStateTest, TrimByUsingReferenceConstants) {
class TestBugSearchWithReferenceConstants : public TestBugSearch {
public:
TestBugSearchWithReferenceConstants()
: TestBugSearch({HloOpcode::kPower}) {}
absl::flat_hash_map<std::string, Literal> GetResults() override {
absl::flat_hash_map<std::string, Literal> results;
results["a"] = CreateLiteral(2.0f);
results["b"] = CreateLiteral(3.0f);
return results;
}
};
const char* kModuleStr = R"(
HloModule test_module
ENTRY test_computation {
p1 = f32[] parameter(0)
p2 = f32[] parameter(1)
a = f32[] multiply(p1, p2)
b = f32[] add(p1, p2)
ROOT result = f32[] power(a, b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
TestBugSearchWithReferenceConstants bug_checker;
HloBisectState bisect(std::move(module), &bug_checker);
TF_ASSERT_OK_AND_ASSIGN(bool changed, bisect.TrimEntryComputation());
EXPECT_TRUE(changed);
auto reduced_module = std::move(bisect).GetResult();
EXPECT_THAT(reduced_module->entry_computation()->root_instruction(),
GmockMatch(m::Power(m::Constant(), m::Constant())));
}
TEST_F(HloBisectStateTest, TrimByOutputsLostBug) {
class CustomBugSearch : public TestBugSearch {
public:
CustomBugSearch() : TestBugSearch({HloOpcode::kConstant}) {}
absl::StatusOr<bool> Run(const HloModule& module) override {
TF_ASSIGN_OR_RETURN(bool has_constants, TestBugSearch::Run(module));
int program_size = module.entry_computation()->instruction_count();
return program_size == 5 && !has_constants;
}
};
const char* kModuleStr = R"(
HloModule test_module
ENTRY test_computation {
p1 = s32[8] parameter(0)
p2 = s32[8] parameter(1)
a = s32[8] add(p1, p2)
b = s32[8] multiply(p1, p2)
ROOT sum = tuple(a, b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
CustomBugSearch bug_checker;
HloBisectState bisect(std::move(module), &bug_checker);
TF_ASSERT_OK_AND_ASSIGN(bool changed, bisect.TrimEntryComputation());
EXPECT_FALSE(changed);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tools/hlo_bisect/hlo_bisect_state.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tools/hlo_bisect/hlo_bisect_state_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
01655d2e-ef4c-487f-b26b-7a6ff14cd974 | cpp | tensorflow/tensorflow | hlo_expand | third_party/xla/xla/tools/hlo_expand.cc | third_party/xla/xla/tools/tests/hlo_expand_test.cc | #include "xla/tools/hlo_expand.h"
#include <vector>
#include "xla/hlo/pass/hlo_pass_pipeline.h"
#include "xla/service/batchnorm_expander.h"
#include "xla/service/cholesky_expander.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_verifier.h"
#include "xla/service/rng_bit_generator_expander.h"
#include "xla/service/rng_expander.h"
#include "xla/service/sharding_propagation.h"
#include "xla/service/spmd/stateful_rng_spmd_partitioner.h"
#include "xla/service/triangular_solve_expander.h"
#include "xla/tsl/util/command_line_flags.h"
#include "xla/xla_data.pb.h"
namespace xla {
void AddPassesToPipeline(HloExpandConfig& config, HloPassPipeline& pipeline,
const HloModuleConfig& hlo_module_config) {
if (config.batch_norm_grad_expander || config.batch_norm_inference_expander ||
config.batch_norm_training_expander) {
pipeline.AddPass<xla::BatchNormExpander>(
config.batch_norm_training_expander,
config.batch_norm_inference_expander,
config.batch_norm_grad_expander);
}
if (config.cholesky_expander) {
pipeline.AddPass<xla::CholeskyExpander>();
}
if (config.rng_expander) {
pipeline.AddPass<xla::RngExpander>();
}
if (config.rng_bit_generator_philox_expander) {
pipeline.AddPass<xla::RngBitGeneratorExpander>(
xla::RandomAlgorithm::RNG_PHILOX);
}
if (config.rng_bit_generator_three_fry_expander) {
pipeline.AddPass<xla::RngBitGeneratorExpander>(
xla::RandomAlgorithm::RNG_THREE_FRY);
}
if (config.triangular_solve_expander) {
pipeline.AddPass<xla::TriangularSolveExpander>();
}
if (config.spmd_expander) {
pipeline.AddPass<ShardingPropagation>(
true, false,
hlo_module_config.allow_spmd_sharding_propagation_to_output(),
hlo_module_config.allow_spmd_sharding_propagation_to_parameters());
pipeline.AddPass<spmd::StatefulRngSpmdPartitioner>(
hlo_module_config.num_partitions(), hlo_module_config.replica_count(),
hlo_module_config.debug_options()
.xla_gpu_threshold_for_windowed_einsum_mib());
}
if (config.verify_hlo) {
pipeline.AddPass<xla::HloVerifier>(false,
false);
}
}
std::vector<tsl::Flag> GetFlags(HloExpandConfig& config) {
return {
tsl::Flag("h", &config.help, "Alias of --help"),
tsl::Flag("help", &config.help, "Display available options"),
tsl::Flag(
"input_format", &config.input_format,
"The format of the input file. If this flag is not specified, it's"
"inferred from the file extension instead. Valid values:\n "
"* hlo|txt : HLO textual format\n"
"* pb : xla::HloProto in binary proto format\n"
"* pbtxt : xla::HloProto in text proto format"),
tsl::Flag("o", &config.output_file, "Alias of --output_file="),
tsl::Flag("output_file", &config.output_file, "Full output file path"),
tsl::Flag("output_format", &config.output_format,
"The format of the output file. Defaults to input_format. "
"Valid values:\n"
"* hlo|txt : HLO textual format\n"
"* pb : xla::HloProto in binary proto format\n"
"* pbtxt : xla::HloProto in text proto format"),
tsl::Flag("batch_norm_expander", &config.batch_norm_expander,
"Overrides and expands batch_norm_grad, batch_norm_inference, "
"and batch_norm_training ops"),
tsl::Flag("batch_norm_grad_expander", &config.batch_norm_grad_expander,
"Expands batch_norm_grad op"),
tsl::Flag("batch_norm_inference_expander",
&config.batch_norm_inference_expander,
"Expands batch_norm_inference_grad op"),
tsl::Flag("batch_norm_training_expander",
&config.batch_norm_training_expander,
"Expands batch_norm_training_grad op"),
tsl::Flag("cholesky_expander", &config.cholesky_expander,
"Expands cholesky op"),
tsl::Flag("spmd_expander", &config.spmd_expander,
"Expands SPMD sharding"),
tsl::Flag("expand_all", &config.expand_all,
"Overrides and expands all supported passes below"),
tsl::Flag("rng_expander", &config.rng_expander, "Expands rng op"),
tsl::Flag(
"rng_bit_generator_expander", &config.rng_bit_generator_expander,
"Overrides and expands rng_bit_generator op on all prng algorithms"),
tsl::Flag("rng_bit_generator_philox_expander",
&config.rng_bit_generator_philox_expander,
"Expands rng_bit_generator op using philox prng algorithm"),
tsl::Flag("rng_bit_generator_three_fry_expander",
&config.rng_bit_generator_three_fry_expander,
"Expands rng_bit_generator op using three_fry prng algorithm"),
tsl::Flag("triangular_solve_expander", &config.triangular_solve_expander,
"Expands triangular_solve op"),
tsl::Flag("verify_hlo", &config.verify_hlo,
"Run HLO verifier after passes"),
};
}
void ParseCompoundFlags(HloExpandConfig& config) {
config.batch_norm_grad_expander |=
config.expand_all || config.batch_norm_expander;
config.batch_norm_inference_expander |=
config.expand_all || config.batch_norm_expander;
config.batch_norm_training_expander |=
config.expand_all || config.batch_norm_expander;
config.cholesky_expander |= config.expand_all;
config.rng_bit_generator_philox_expander |=
config.expand_all || config.rng_bit_generator_expander;
config.rng_bit_generator_three_fry_expander |=
config.expand_all || config.rng_bit_generator_expander;
config.rng_expander |= config.expand_all;
config.triangular_solve_expander |= config.expand_all;
}
} | #include <string>
#include <vector>
#include <gmock/gmock.h>
#include "tsl/platform/path.h"
#include "tsl/platform/subprocess.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
class HloExpandTest : public ::testing::Test {
protected:
void HloOpt(std::vector<std::string>& additional_flags) {
std::string hlo_opt_bin =
tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "tools", "hlo-expand");
tsl::SubProcess proc;
std::vector<std::string> argv = {hlo_opt_bin};
argv.insert(argv.end(), additional_flags.begin(), additional_flags.end());
proc.SetProgram(hlo_opt_bin, argv);
proc.SetChannelAction(tsl::CHAN_STDOUT, tsl::ACTION_PIPE);
proc.SetChannelAction(tsl::CHAN_STDERR, tsl::ACTION_PIPE);
EXPECT_TRUE(proc.Start());
stdout_output_ = stderr_output_ = "";
int status = proc.Communicate(nullptr, &stdout_output_, &stderr_output_);
#if defined(_WIN32) || defined(_WIN64)
exited_normally_ = (status == 0);
exit_status_ = status;
#else
exited_normally_ = WIFEXITED(status);
exit_status_ = exited_normally_ ? WEXITSTATUS(status) : -1;
#endif
}
std::string stdout_output_;
std::string stderr_output_;
bool exited_normally_ = false;
int exit_status_ = -1;
};
TEST_F(HloExpandTest, CholeskyHlo) {
std::string hlo_path = tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "tools",
"tests", "cholesky.hlo");
std::vector<std::string> additional_flags = {"--input_format=hlo", hlo_path};
HloOpt(additional_flags);
const std::string& expected_hlo_string =
R"(HloModule main, entry_computation_layout={()->f64[3,3]{1,0}}
ENTRY %main.3 () -> f64[3,3] {
%constant.1 = f64[3,3]{1,0} constant({ { 1, 2, 3 }, { 2, 20, 26 }, { 3, 26, 70 } })
ROOT %cholesky.2 = f64[3,3]{1,0} cholesky(f64[3,3]{1,0} %constant.1), lower=true
})";
EXPECT_TRUE(exited_normally_);
EXPECT_EQ(exit_status_, 0);
EXPECT_THAT(stdout_output_, testing::HasSubstr(expected_hlo_string));
}
TEST_F(HloExpandTest, SpmdHlo) {
std::string hlo_path = tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "tools",
"tests", "spmd.hlo");
std::vector<std::string> additional_flags = {"--spmd_expander", hlo_path};
HloOpt(additional_flags);
const std::string& expected_hlo_string =
R"(HloModule module, entry_computation_layout={(f32[24,64]{1,0}, f32[39296,64]{1,0})->f32[24,19648]{1,0}}, num_partitions=2
ENTRY %entry_spmd (param: f32[24,64], param.1: f32[39296,64]) -> f32[24,19648] {
%param = f32[24,64]{1,0} parameter(0), sharding={replicated}
%lhs.copy.1 = f32[24,64]{1,0} copy(f32[24,64]{1,0} %param)
%param.1 = f32[39296,64]{1,0} parameter(1), sharding={replicated}
%constant = s32[2]{0} constant({0, 19648})
%partition-id = u32[] partition-id()
%dynamic-slice = s32[1]{0} dynamic-slice(s32[2]{0} %constant, u32[] %partition-id), dynamic_slice_sizes={1}
%reshape = s32[] reshape(s32[1]{0} %dynamic-slice)
%constant.1 = s32[] constant(0)
%dynamic-slice.1 = f32[19648,64]{1,0} dynamic-slice(f32[39296,64]{1,0} %param.1, s32[] %reshape, s32[] %constant.1), dynamic_slice_sizes={19648,64}
%rhs.copy.1 = f32[19648,64]{1,0} copy(f32[19648,64]{1,0} %dynamic-slice.1)
ROOT %dot.1 = f32[24,19648]{1,0} dot(f32[24,64]{1,0} %lhs.copy.1, f32[19648,64]{1,0} %rhs.copy.1), lhs_contracting_dims={1}, rhs_contracting_dims={1}
})";
EXPECT_TRUE(exited_normally_);
EXPECT_EQ(exit_status_, 0);
EXPECT_THAT(stdout_output_, testing::HasSubstr(expected_hlo_string));
}
TEST_F(HloExpandTest, CholeskyExpanderHlo) {
std::string hlo_path = tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "tools",
"tests", "cholesky.hlo");
std::vector<std::string> additional_flags = {"--input_format=hlo", hlo_path,
"--expand_all"};
HloOpt(additional_flags);
const std::string& expected_hlo_string = "%xla.cholesky_f64";
EXPECT_TRUE(exited_normally_);
EXPECT_EQ(exit_status_, 0);
EXPECT_THAT(stdout_output_, testing::HasSubstr(expected_hlo_string));
}
TEST_F(HloExpandTest, InvalidArgc) {
std::vector<std::string> additional_flags = {"--input_format=hlo", "foo",
"bar", "baz"};
HloOpt(additional_flags);
const std::string& expected_string =
"Cannot parse more than one argument. See usage below:";
EXPECT_TRUE(exited_normally_);
EXPECT_EQ(exit_status_, 1);
EXPECT_THAT(stderr_output_, testing::HasSubstr(expected_string));
}
TEST_F(HloExpandTest, InvalidInputFileExtension) {
std::string hlo_path = tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "tools",
"tests", "foo.bar");
std::vector<std::string> additional_flags = {hlo_path};
HloOpt(additional_flags);
const std::string& expected_string =
"input_format must be specified as [hlo|pb|pbtxt|txt].";
EXPECT_TRUE(exited_normally_);
EXPECT_EQ(exit_status_, 1);
EXPECT_THAT(stderr_output_, testing::HasSubstr(expected_string));
}
TEST_F(HloExpandTest, InvalidInputFormat) {
std::vector<std::string> additional_flags = {"--input_format=foo"};
HloOpt(additional_flags);
const std::string& expected_string =
"input_format must be specified as [hlo|pb|pbtxt|txt].";
EXPECT_TRUE(exited_normally_);
EXPECT_EQ(exit_status_, 1);
EXPECT_THAT(stderr_output_, testing::HasSubstr(expected_string));
}
TEST_F(HloExpandTest, InvalidOutputFileExtension) {
std::string hlo_path = tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "tools",
"tests", "cholesky.hlo");
std::string output_path = tsl::io::JoinPath(tsl::testing::XlaSrcRoot(),
"tools", "tests", "foo.bar");
std::vector<std::string> additional_flags = {"--input_format=", hlo_path,
"--output_file=" + output_path};
HloOpt(additional_flags);
const std::string& expected_string =
"output_format must be specified as [hlo|pb|pbtxt].";
EXPECT_TRUE(exited_normally_);
EXPECT_EQ(exit_status_, 1);
EXPECT_THAT(stderr_output_, testing::HasSubstr(expected_string));
}
TEST_F(HloExpandTest, InvalidOutputFormat) {
std::string hlo_path = tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "tools",
"tests", "cholesky.hlo");
std::vector<std::string> additional_flags = {"--input_format=", hlo_path,
"--output_format=foo"};
HloOpt(additional_flags);
const std::string& expected_string =
"output_format must be specified as [hlo|pb|pbtxt].";
EXPECT_TRUE(exited_normally_);
EXPECT_EQ(exit_status_, 1);
EXPECT_THAT(stderr_output_, testing::HasSubstr(expected_string));
}
TEST_F(HloExpandTest, InvalidFile) {
std::string hlo_path = tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "tools",
"tests", "foo.bar");
std::vector<std::string> additional_flags = {"--input_format=hlo", hlo_path};
HloOpt(additional_flags);
const std::string& expected_string = "Try: hlo-expand --help";
EXPECT_TRUE(exited_normally_);
EXPECT_EQ(exit_status_, 1);
EXPECT_THAT(stderr_output_, testing::HasSubstr(expected_string));
}
TEST_F(HloExpandTest, UnsupportedOutputFormat) {
std::string hlo_path = tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "tools",
"tests", "cholesky.hlo");
std::vector<std::string> additional_flags = {"--input_format=hlo",
"--output_format=pb", hlo_path};
HloOpt(additional_flags);
const std::string& expected_string =
"Printing to stdout must specify supported "
"output_format=[hlo|pbtxt|txt].";
EXPECT_TRUE(exited_normally_);
EXPECT_EQ(exit_status_, 1);
EXPECT_THAT(stderr_output_, testing::HasSubstr(expected_string));
}
TEST_F(HloExpandTest, VerificationFailure) {
std::string hlo_path = tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "tools",
"tests", "invalid_concat.hlo");
std::vector<std::string> additional_flags = {"--verify_hlo", hlo_path};
HloOpt(additional_flags);
const std::string& expected_string =
"Cannot concatenate arrays that differ in dimensions";
EXPECT_TRUE(exited_normally_);
EXPECT_EQ(exit_status_, 1);
EXPECT_THAT(stderr_output_, testing::HasSubstr(expected_string));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tools/hlo_expand.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tools/tests/hlo_expand_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
079d69b0-65c5-4dbf-b05a-cfe05100b0de | cpp | tensorflow/tensorflow | stable_delegate_plugin | tensorflow/lite/acceleration/configuration/stable_delegate_plugin.cc | tensorflow/lite/acceleration/configuration/stable_delegate_plugin_test.cc | #include "tensorflow/lite/acceleration/configuration/stable_delegate_plugin.h"
namespace tflite {
namespace delegates {
TFLITE_REGISTER_DELEGATE_FACTORY_FUNCTION(StableDelegatePlugin,
StableDelegatePlugin::New);
}
} | #include <memory>
#include <gtest/gtest.h>
#include "pthreadpool.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/core/acceleration/configuration/delegate_registry.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
namespace tflite {
class StableDelegatePluginTest : public testing::Test {
public:
static constexpr int kNumThreadsForTest = 7;
static constexpr tflite::XNNPackFlags kFlagsForTest =
tflite::XNNPackFlags::XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_QS8_QU8;
static constexpr char kDelegateBinaryPath[] =
"tensorflow/lite/delegates/utils/experimental/"
"stable_delegate/libtensorflowlite_stable_xnnpack_delegate.so";
void SetUp() override {
flatbuffers::Offset<flatbuffers::String> stable_delegate_path_offset =
flatbuffer_builder_.CreateString(kDelegateBinaryPath);
StableDelegateLoaderSettingsBuilder stable_delegate_loader_settings_builder(
flatbuffer_builder_);
stable_delegate_loader_settings_builder.add_delegate_path(
stable_delegate_path_offset);
flatbuffers::Offset<StableDelegateLoaderSettings>
stable_delegate_loader_settings =
stable_delegate_loader_settings_builder.Finish();
XNNPackSettingsBuilder xnnpack_settings_builder(flatbuffer_builder_);
xnnpack_settings_builder.add_num_threads(kNumThreadsForTest);
xnnpack_settings_builder.add_flags(kFlagsForTest);
flatbuffers::Offset<XNNPackSettings> xnnpack_settings =
xnnpack_settings_builder.Finish();
TFLiteSettingsBuilder tflite_settings_builder(flatbuffer_builder_);
tflite_settings_builder.add_stable_delegate_loader_settings(
stable_delegate_loader_settings);
tflite_settings_builder.add_xnnpack_settings(xnnpack_settings);
tflite_settings_builder.add_delegate(Delegate_XNNPACK);
flatbuffers::Offset<TFLiteSettings> tflite_settings =
tflite_settings_builder.Finish();
flatbuffer_builder_.Finish(tflite_settings);
tflite_settings_ = flatbuffers::GetRoot<TFLiteSettings>(
flatbuffer_builder_.GetBufferPointer());
delegate_plugin_ = delegates::DelegatePluginRegistry::CreateByName(
"StableDelegatePlugin", *tflite_settings_);
ASSERT_NE(delegate_plugin_, nullptr);
}
void TearDown() override { delegate_plugin_.reset(); }
protected:
flatbuffers::FlatBufferBuilder flatbuffer_builder_;
const TFLiteSettings *tflite_settings_;
std::unique_ptr<delegates::DelegatePluginInterface> delegate_plugin_;
};
TEST_F(StableDelegatePluginTest, CanCreateAndDestroyDelegate) {
delegates::TfLiteDelegatePtr delegate = delegate_plugin_->Create();
EXPECT_NE(delegate, nullptr);
}
TEST_F(StableDelegatePluginTest, CanGetDelegateErrno) {
delegates::TfLiteDelegatePtr delegate = delegate_plugin_->Create();
EXPECT_EQ(delegate_plugin_->GetDelegateErrno(delegate.get()), 0);
}
TEST_F(StableDelegatePluginTest, SetsCorrectThreadCount) {
delegates::TfLiteDelegatePtr delegate = delegate_plugin_->Create();
pthreadpool_t threadpool = static_cast<pthreadpool_t>(
TfLiteXNNPackDelegateGetThreadPool(delegate.get()));
EXPECT_EQ(pthreadpool_get_threads_count(threadpool), kNumThreadsForTest);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/acceleration/configuration/stable_delegate_plugin.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/acceleration/configuration/stable_delegate_plugin_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c396709c-ef47-4f4e-8cf4-224ba0257c00 | cpp | tensorflow/tensorflow | composite_device | tensorflow/core/common_runtime/composite_device.cc | tensorflow/core/common_runtime/composite_device_test.cc | #include "tensorflow/core/common_runtime/composite_device.h"
#include "absl/strings/str_join.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
const char* const kCompositeDeviceType = "COMPOSITE";
std::unique_ptr<CompositeDevice> CompositeDevice::MakeDevice(
const std::vector<string>& underlying_devices, const int unique_device_id,
const DeviceNameUtils::ParsedName& host_name, Status* status) {
DeviceNameUtils::ParsedName parsed_name = host_name;
parsed_name.type = kCompositeDeviceType;
parsed_name.id = unique_device_id;
const string device_name = DeviceNameUtils::ParsedNameToString(parsed_name);
return CompositeDevice::MakeDevice(underlying_devices, device_name, status);
}
std::unique_ptr<CompositeDevice> CompositeDevice::MakeDevice(
const std::vector<string>& underlying_devices, const string& device_name,
Status* status) {
if (underlying_devices.empty()) {
status->Update(
errors::InvalidArgument("underlying_devices should not be empty."));
return nullptr;
}
DeviceNameUtils::ParsedName parsed_name;
if (!DeviceNameUtils::ParseFullName(underlying_devices.at(0), &parsed_name)) {
status->Update(tensorflow::errors::InvalidArgument(
"Cannot parse device name ", underlying_devices.at(0),
" when creating CompositeDevice."));
return nullptr;
}
const string& underlying_type = parsed_name.type;
for (int i = 1; i < underlying_devices.size(); ++i) {
DeviceNameUtils::ParsedName name;
if (!DeviceNameUtils::ParseFullName(underlying_devices.at(i), &name)) {
status->Update(tensorflow::errors::InvalidArgument(
"Cannot parse device name ", underlying_devices.at(i),
" when creating CompositeDevice."));
return nullptr;
}
if (name.type != underlying_type) {
status->Update(tensorflow::errors::InvalidArgument(
"Expect device type ", parsed_name.type, "; but got type ", name.type,
" from device: ", underlying_devices.at(i),
" when creating CompositeDevice."));
return nullptr;
}
}
DeviceAttributes device_attributes;
device_attributes.set_name(device_name);
device_attributes.set_device_type(kCompositeDeviceType);
return absl::WrapUnique(
new CompositeDevice(device_attributes, underlying_devices));
}
} | #include "tensorflow/core/common_runtime/composite_device.h"
#include "tensorflow/core/lib/core/status_test_util.h"
namespace tensorflow {
TEST(CompositeDeviceTest, Basic) {
const string host_name = "/job:localhost/replica:0/task:0/device:CPU:0";
DeviceNameUtils::ParsedName parsed_host_name;
EXPECT_TRUE(DeviceNameUtils::ParseFullName(host_name, &parsed_host_name));
std::vector<string> underlying_devices;
{
Status status;
std::unique_ptr<CompositeDevice> composite_device =
CompositeDevice::MakeDevice(underlying_devices, 0,
parsed_host_name, &status);
EXPECT_EQ(composite_device, nullptr);
EXPECT_EQ(error::INVALID_ARGUMENT, status.code());
EXPECT_TRUE(absl::StrContains(status.message(),
"underlying_devices should not be empty"))
<< status.ToString();
}
{
Status status;
underlying_devices.push_back(
"/job:localhost/replica:0/task:0/device:CPU:0");
underlying_devices.push_back(
"/job:localhost/replica:0/task:0/device:CPU:1");
std::unique_ptr<CompositeDevice> composite_device =
CompositeDevice::MakeDevice(underlying_devices, 0,
parsed_host_name, &status);
TF_ASSERT_OK(status);
EXPECT_EQ(composite_device->device_type(), kCompositeDeviceType);
EXPECT_EQ(underlying_devices, *composite_device->underlying_devices());
}
{
Status status;
underlying_devices.push_back(
"/job:localhost/replica:0/task:0/device:GPU:0");
std::unique_ptr<CompositeDevice> composite_device =
CompositeDevice::MakeDevice(underlying_devices, 1,
parsed_host_name, &status);
EXPECT_EQ(composite_device, nullptr);
EXPECT_EQ(error::INVALID_ARGUMENT, status.code());
EXPECT_TRUE(absl::StrContains(status.message(),
"Expect device type CPU; but got type GPU"))
<< status.ToString();
}
}
TEST(CompositeDeviceTest, DeviceName) {
const string composite_device_name =
"/job:localhost/replica:0/task:0/device:CPU:10";
std::vector<string> underlying_devices;
underlying_devices.push_back("/job:worker/replica:0/task:0/device:CPU:0");
underlying_devices.push_back("/job:worker/replica:0/task:0/device:CPU:1");
Status status;
std::unique_ptr<CompositeDevice> composite_device =
CompositeDevice::MakeDevice(underlying_devices, composite_device_name,
&status);
TF_ASSERT_OK(status);
EXPECT_EQ(composite_device->name(), composite_device_name);
EXPECT_EQ(composite_device->device_type(), kCompositeDeviceType);
EXPECT_EQ(underlying_devices, *composite_device->underlying_devices());
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/composite_device.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/composite_device_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
53602328-c7cf-4c6d-8ee9-b9779b3bed8b | cpp | tensorflow/tensorflow | ar_crs_combiner | third_party/xla/xla/service/ar_crs_combiner.cc | third_party/xla/xla/service/ar_crs_combiner_test.cc | #include "xla/service/ar_crs_combiner.h"
#include <algorithm>
#include <cstdint>
#include <optional>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/service/call_graph.h"
#include "xla/service/hlo_replication_analysis.h"
#include "xla/service/pattern_matcher.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
absl::StatusOr<bool> ReplaceReplicatedAllReduce(HloModule* module,
int64_t partition_count) {
TF_ASSIGN_OR_RETURN(
auto replication_analysis,
HloReplicationAnalysis::Run(module, true));
bool changed = false;
int64_t next_channel = hlo_query::NextChannelId(*module);
for (auto computation : module->computations()) {
for (auto instruction : computation->instructions()) {
if (auto ar = DynCast<HloAllReduceInstruction>(instruction)) {
const Shape& shape = ar->shape();
if (ar->channel_id()) {
continue;
}
if (ar->replica_groups().size() > 1) {
continue;
}
if (shape.IsTuple() || shape.element_type() != F32) {
continue;
}
if (module->config().replica_count() < 8 * partition_count) {
continue;
}
if (replication_analysis->HloInstructionIsReplicatedAt(ar, {})) {
VLOG(2) << "Replaced replicated all-reduce:" << ar->ToString();
ar->set_channel_id(next_channel++);
auto divisor =
computation->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<float>(partition_count)));
auto bcast = computation->AddInstruction(
HloInstruction::CreateBroadcast(shape, divisor, {}));
auto div = computation->AddInstruction(HloInstruction::CreateBinary(
ar->shape(), HloOpcode::kDivide, ar, bcast));
TF_RETURN_IF_ERROR(ar->ReplaceAllUsesWith(div));
changed = true;
}
}
}
}
return changed;
}
bool HasCombinableReplicaGroup(HloInstruction* hlo, int64_t num_partitions) {
auto all_reduce = Cast<HloAllReduceInstruction>(hlo);
auto replica_groups = all_reduce->replica_groups();
const int64_t replica_count = hlo->GetModule()->config().replica_count();
CHECK(all_reduce->IsCrossModuleAllReduce());
if (all_reduce->use_global_device_ids()) {
if (replica_groups.size() != replica_count) {
return false;
}
for (const auto& group : replica_groups) {
if (group.replica_ids_size() != num_partitions) {
return false;
}
absl::flat_hash_set<int64_t> partition_ids;
int64_t replica_id = group.replica_ids(0) / num_partitions;
for (int64_t i = 0; i < num_partitions; ++i) {
if (group.replica_ids(i) / num_partitions != replica_id) {
return false;
}
partition_ids.insert(group.replica_ids(i) % num_partitions);
}
if (partition_ids.size() != num_partitions) {
return false;
}
}
return true;
}
return replica_groups.size() == replica_count;
}
}
namespace m = match;
std::optional<ArCrsCombiner::ArCrsPair> ArCrsCombiner::MatchesArCrsPattern(
HloInstruction* instruction) {
auto can_ar_move_past_instruction = [](HloInstruction* instruction) -> bool {
if (instruction->user_count() != 1) {
return false;
}
switch (instruction->opcode()) {
case HloOpcode::kBitcast:
case HloOpcode::kTranspose:
case HloOpcode::kReshape:
return true;
case HloOpcode::kConvert:
return ShapeUtil::ElementIsFloating(instruction->shape()) ==
ShapeUtil::ElementIsFloating(instruction->operand(0)->shape());
case HloOpcode::kAdd:
case HloOpcode::kSubtract:
case HloOpcode::kMultiply:
return ShapeUtil::ElementIsFloating(instruction->shape());
default:
return false;
}
};
auto computation_is_addition = [](HloComputation* c) {
return c->instruction_count() == 3 &&
Match(c->root_instruction(), m::Add(m::Parameter(), m::Parameter()));
};
if (instruction->IsCrossModuleAllReduce() &&
HasCombinableReplicaGroup(instruction, num_spatial_partitions_) &&
computation_is_addition(instruction->called_computations()[0]) &&
instruction->user_count() == 1) {
auto next = instruction->users()[0];
int64_t distance = 1;
while (!next->IsCrossReplicaAllReduce()) {
if (can_ar_move_past_instruction(next)) {
next = next->users()[0];
} else {
return std::nullopt;
}
++distance;
}
if (!Cast<HloAllReduceInstruction>(next)->IsNoop() &&
computation_is_addition(next->called_computations()[0])) {
ArCrsPair pair(instruction, next, distance);
VLOG(2) << "ArCrsPair matching pattern: " << pair.ToString();
return pair;
}
}
return std::nullopt;
}
std::optional<HloInstruction*> ArCrsCombiner::WhileFromBodyParameter(
HloInstruction* instruction) {
CHECK_EQ(HloOpcode::kParameter, instruction->opcode());
HloComputation* computation = instruction->parent();
auto caller_instructions = call_graph_->GetComputationCallers(computation);
if (caller_instructions.size() == 1) {
auto caller_instruction = caller_instructions[0];
if (caller_instruction->opcode() == HloOpcode::kWhile) {
return caller_instruction;
}
}
return std::nullopt;
}
std::optional<HloInstruction*> ArCrsCombiner::ConditionalFromBodyParameter(
HloInstruction* instruction) {
CHECK_EQ(HloOpcode::kParameter, instruction->opcode());
HloComputation* computation = instruction->parent();
auto caller_instructions = call_graph_->GetComputationCallers(computation);
if (caller_instructions.size() == 1) {
auto caller_instruction = caller_instructions[0];
if (caller_instruction->opcode() == HloOpcode::kConditional) {
return caller_instruction;
}
}
return std::nullopt;
}
std::optional<std::vector<HloInstruction*>> ArCrsCombiner::GetAllTuples(
HloInstruction* instruction,
absl::flat_hash_set<HloInstruction*>* visited) {
if (visited->find(instruction) != visited->end()) {
return std::vector<HloInstruction*>();
}
visited->insert(instruction);
switch (instruction->opcode()) {
case HloOpcode::kTuple: {
return std::vector<HloInstruction*>({instruction});
}
case HloOpcode::kDomain: {
return GetAllTuples(instruction->operands()[0], visited);
}
case HloOpcode::kParameter: {
auto maybe_while = WhileFromBodyParameter(instruction);
if (maybe_while) {
auto while_instr = *maybe_while;
auto init_tuples = GetAllTuples(while_instr->while_init(), visited);
auto body_tuples = GetAllTuples(
while_instr->while_body()->root_instruction(), visited);
if (!init_tuples || !body_tuples) {
return std::nullopt;
}
auto result = *init_tuples;
result.insert(result.end(), body_tuples->begin(), body_tuples->end());
return result;
}
auto maybe_conditional = ConditionalFromBodyParameter(instruction);
if (maybe_conditional) {
auto cond_instr = *maybe_conditional;
std::vector<HloInstruction*> tuples;
for (int64_t i = 0; i < cond_instr->branch_computations().size(); ++i) {
if (cond_instr->branch_computation(i)->parameter_instruction(0) ==
instruction) {
auto branch_tuples =
GetAllTuples(cond_instr->mutable_operand(i + 1), visited);
if (!branch_tuples) {
return std::nullopt;
}
tuples.insert(tuples.end(), branch_tuples->begin(),
branch_tuples->end());
}
}
return tuples;
}
return std::nullopt;
}
case HloOpcode::kGetTupleElement: {
std::vector<HloInstruction*> result_tuples;
auto tuples = GetAllTuples(instruction->operands()[0], visited);
if (!tuples) {
return std::nullopt;
}
for (auto tuple : *tuples) {
auto tmp_tuples = GetAllTuples(
tuple->mutable_operand(instruction->tuple_index()), visited);
if (!tmp_tuples) {
return std::nullopt;
}
result_tuples.insert(result_tuples.end(), tmp_tuples->begin(),
tmp_tuples->end());
}
return result_tuples;
}
case HloOpcode::kConditional: {
std::vector<HloInstruction*> result_tuples;
const auto& branch_computations = instruction->branch_computations();
result_tuples.reserve(branch_computations.size());
for (HloComputation* body : branch_computations) {
if (body->root_instruction()->opcode() != HloOpcode::kTuple) {
return std::nullopt;
}
result_tuples.push_back(body->root_instruction());
}
return result_tuples;
}
case HloOpcode::kWhile: {
auto init_tuples = GetAllTuples(instruction->while_init(), visited);
auto body_tuples =
GetAllTuples(instruction->while_body()->root_instruction(), visited);
if (!init_tuples || !body_tuples) {
return std::nullopt;
}
auto result = *init_tuples;
result.insert(result.end(), body_tuples->begin(), body_tuples->end());
return result;
}
default:
return std::nullopt;
}
}
bool ArCrsCombiner::TupleElementsComputeSameValue(
HloInstruction* tuple_shaped_instruction, int64_t i1, int64_t i2,
absl::flat_hash_map<int64_t, int64_t>* visited_pairs) {
absl::flat_hash_set<HloInstruction*> visited;
auto tuples = GetAllTuples(tuple_shaped_instruction, &visited);
if (!tuples) {
return false;
}
for (auto tuple : *tuples) {
CHECK_EQ(tuple->opcode(), HloOpcode::kTuple);
if (!InstructionsComputeSameValue(tuple->mutable_operand(i1),
tuple->mutable_operand(i2),
visited_pairs)) {
return false;
}
}
return true;
}
bool ArCrsCombiner::TestInstructionsComputeSameValue(HloInstruction* i1,
HloInstruction* i2) {
ArCrsCombiner combiner(2,
false);
auto module = i1->GetModule();
CHECK_EQ(module, i2->GetModule());
combiner.call_graph_ = CallGraph::Build(module);
absl::flat_hash_map<int64_t, int64_t> visited_pairs;
return combiner.InstructionsComputeSameValue(i1, i2, &visited_pairs);
}
bool ArCrsCombiner::InstructionsComputeSameValue(
HloInstruction* i1, HloInstruction* i2,
absl::flat_hash_map<int64_t, int64_t>* visited_pairs) {
if (i1 == i2) {
return true;
}
auto uid1 = i1->unique_id();
auto uid2 = i2->unique_id();
auto min_uid = std::min(uid1, uid2);
auto max_uid = std::max(uid1, uid2);
auto it = visited_pairs->find(min_uid);
if (it != visited_pairs->end() && max_uid == it->second) {
return true;
}
auto opcode1 = i1->opcode();
auto operands1 = i1->operands();
if (opcode1 != i2->opcode() || operands1.size() != i2->operands().size()) {
return false;
}
auto eq_computations = [](const HloComputation* a, const HloComputation* b) {
return *a == *b;
};
auto eq_operands = [](const HloInstruction*, const HloInstruction*) {
return true;
};
if (i1->IsCrossModuleAllReduce()) {
return i1->Identical(*i2, eq_operands, eq_computations,
false);
}
visited_pairs->emplace(min_uid, max_uid);
for (int i = 0; i < operands1.size(); ++i) {
auto operand1 = operands1[i];
auto operand2 = i2->operands()[i];
if (!InstructionsComputeSameValue(operand1, operand2, visited_pairs)) {
return false;
}
}
if (opcode1 == HloOpcode::kParameter) {
return false;
}
if (opcode1 == HloOpcode::kGetTupleElement) {
return i1->tuple_index() == i2->tuple_index() ||
TupleElementsComputeSameValue(operands1[0], i1->tuple_index(),
i2->tuple_index(), visited_pairs);
}
auto eq_instructions = [](const HloInstruction* i1,
const HloInstruction* i2) -> bool { return true; };
return i1->Identical(*i2, eq_instructions, eq_computations,
false);
}
void ArCrsCombiner::GroupAllReducesById(HloModule* module) {
absl::flat_hash_set<int64_t> discarded_ar_ids;
for (HloComputation* computation : module->MakeNonfusionComputations()) {
for (HloInstruction* instruction : computation->instructions()) {
auto maybe_pair = MatchesArCrsPattern(instruction);
if (maybe_pair) {
auto pair = *maybe_pair;
int64_t ar_id = *(instruction->channel_id());
if (discarded_ar_ids.find(ar_id) != discarded_ar_ids.end()) {
continue;
}
auto it = crs_reserved_map_.find(pair.crs);
if (it != crs_reserved_map_.end()) {
auto prev_ar_id = it->second;
CHECK(all_reduce_map_.find(ar_id) == all_reduce_map_.end());
CHECK_NE(prev_ar_id, ar_id);
auto prev_pair = all_reduce_map_[prev_ar_id].back();
int64_t prev_distance = prev_pair.distance;
if (prev_distance < pair.distance) {
VLOG(2) << "Replacing ArCrsPair: " << prev_pair.ToString()
<< " with ArCrsPair: " << pair.ToString();
all_reduce_map_.erase(prev_ar_id);
discarded_ar_ids.insert(prev_ar_id);
all_reduce_map_[ar_id].push_back(pair);
crs_reserved_map_[pair.crs] = ar_id;
} else {
discarded_ar_ids.insert(ar_id);
}
} else {
if (all_reduce_map_.find(ar_id) != all_reduce_map_.end()) {
int64_t prev_distance = all_reduce_map_[ar_id].back().distance;
CHECK_EQ(prev_distance, pair.distance)
<< "All ARs with the same AR ID must have the same distance "
"from the corresponding CRSs. Found: "
<< prev_distance << " and " << pair.distance;
}
all_reduce_map_[ar_id].push_back(pair);
crs_reserved_map_[pair.crs] = ar_id;
}
}
}
}
}
absl::Status ArCrsCombiner::KeepProvablyEqualInstructionGroupsMPMD() {
for (auto it = all_reduce_map_.begin(); it != all_reduce_map_.end();) {
auto copy_it = it++;
auto channel_id = copy_it->first;
VLOG(2)
<< "KeepProvablyEqualInstructionGroups. Checking AllReduce channel id: "
<< channel_id << "\n";
auto pairs_vec = copy_it->second;
TF_RET_CHECK(pairs_vec.size() == num_spatial_partitions_);
auto instr_0 = pairs_vec[0].ar;
for (int i = 1; i < pairs_vec.size(); ++i) {
auto instr_i = pairs_vec[i].ar;
auto next_0 = instr_0->users()[0];
auto next_i = instr_i->users()[0];
absl::flat_hash_map<int64_t, int64_t> visited_pairs;
while (true) {
if (!InstructionsComputeSameValue(next_0, next_i, &visited_pairs)) {
all_reduce_map_.erase(copy_it);
VLOG(2) << "KeepProvablyEqualInstructionGroups. Erased AllReduce "
"channel id: "
<< channel_id << "\n";
break;
}
if (next_0->IsCrossReplicaAllReduce()) {
break;
}
next_0 = next_0->users()[0];
next_i = next_i->users()[0];
}
}
}
return absl::OkStatus();
}
absl::Status ArCrsCombiner::KeepProvablyEqualInstructionGroupsSPMD(
HloModule* module) {
TF_ASSIGN_OR_RETURN(
auto replication_analysis,
HloReplicationAnalysis::Run(module, true));
for (auto it = all_reduce_map_.begin(); it != all_reduce_map_.end();) {
auto copy_it = it++;
auto channel_id = copy_it->first;
VLOG(2)
<< "KeepProvablyEqualInstructionGroups. Checking AllReduce channel id: "
<< channel_id << "\n";
auto pairs_vec = copy_it->second;
TF_RET_CHECK(pairs_vec.size() == 1);
auto instr = pairs_vec[0].ar;
auto next = instr->users()[0];
while (true) {
TF_RET_CHECK(next->shape().IsArray());
if (!replication_analysis->HloInstructionIsReplicatedAt(next, {})) {
all_reduce_map_.erase(copy_it);
VLOG(2) << "KeepProvablyEqualInstructionGroups. Erased AllReduce "
"channel id: "
<< channel_id << "\n";
break;
}
if (next->IsCrossReplicaAllReduce()) {
break;
}
next = next->users()[0];
}
}
return absl::OkStatus();
}
absl::StatusOr<bool> ArCrsCombiner::RewriteGraph() {
if (all_reduce_map_.empty()) {
return false;
}
for (const auto& it : all_reduce_map_) {
auto pairs_vec = it.second;
for (auto pair : pairs_vec) {
auto all_reduce = pair.ar;
auto parent_computation = all_reduce->parent();
auto channel_id = all_reduce->channel_id();
auto prev = all_reduce->mutable_operand(0);
auto next = all_reduce->users()[0];
TF_CHECK_OK(all_reduce->ReplaceUseWith(next, prev));
TF_CHECK_OK(parent_computation->RemoveInstruction(all_reduce));
while (!next->IsCrossReplicaAllReduce()) {
switch (next->opcode()) {
case HloOpcode::kBitcast:
case HloOpcode::kTranspose:
case HloOpcode::kReshape:
case HloOpcode::kConvert:
case HloOpcode::kMultiply:
break;
case HloOpcode::kAdd:
case HloOpcode::kSubtract: {
auto other_operand = (next->operands()[0] == prev)
? next->operands()[1]
: next->operands()[0];
if (other_operand->IsCrossModuleAllReduce() &&
other_operand->user_count() == 1) {
TF_CHECK_OK(other_operand->ReplaceAllUsesWith(
other_operand->mutable_operand(0)));
} else {
auto shape = other_operand->shape();
Literal lit(shape);
lit.PopulateWithValue<float>(num_spatial_partitions_);
auto divisor = parent_computation->AddInstruction(
HloInstruction::CreateConstant(lit.Clone()));
auto division = parent_computation->AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kDivide,
other_operand, divisor));
TF_CHECK_OK(other_operand->ReplaceUseWith(next, division));
}
break;
}
default:
LOG(FATAL) << "Unexpected instruction: " << next->ToShortString();
}
prev = next;
next = next->users()[0];
}
next->set_channel_id(channel_id);
}
}
return true;
}
absl::StatusOr<bool> ArCrsCombiner::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
call_graph_ = CallGraph::Build(module);
GroupAllReducesById(module);
if (spmd_partition_) {
TF_RETURN_IF_ERROR(KeepProvablyEqualInstructionGroupsSPMD(module));
} else {
TF_RETURN_IF_ERROR(KeepProvablyEqualInstructionGroupsMPMD());
}
TF_ASSIGN_OR_RETURN(auto changed, RewriteGraph());
if (module->config().replica_count() > 1 && spmd_partition_) {
TF_ASSIGN_OR_RETURN(auto replaced, ReplaceReplicatedAllReduce(
module, num_spatial_partitions_));
changed |= replaced;
}
return changed;
}
} | #include "xla/service/ar_crs_combiner.h"
#include <cstdint>
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace op = xla::testing::opcode_matchers;
class ArCrsCombinerTest : public HloTestBase {};
TEST_F(ArCrsCombinerTest, SameValueTestBasecase) {
const char* module_str = R"(
HloModule foobar
ENTRY %entrycomp (p: f32[2,2]) -> (f32[2,2], f32[2,2]) {
%p = f32[2,2] parameter(0)
%constant.f32.1 = f32[2,2] constant({{1, 2}, {3, 4}})
%constant.f32.2 = f32[2,2] constant({{1, 2}, {3, 4}})
ROOT %tuple = (f32[2,2], f32[2,2]) tuple(%constant.f32.1, %constant.f32.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
auto root_tuple = module->entry_computation()->root_instruction();
auto i1 = root_tuple->operands()[0];
auto i2 = root_tuple->operands()[1];
EXPECT_FALSE(ArCrsCombiner::TestInstructionsComputeSameValue(
i1, module->entry_computation()->parameter_instruction(0)));
EXPECT_TRUE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));
}
TEST_F(ArCrsCombinerTest, SameValueTestBasecase2) {
const char* module_str = R"(
HloModule foobar
ENTRY %entrycomp (x: f32[]) -> (f32[], f32[]) {
%x = f32[] parameter(0)
ROOT %tuple = (f32[], f32[]) tuple(%x, %x)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
auto root_tuple = module->entry_computation()->root_instruction();
auto i1 = root_tuple->operands()[0];
auto i2 = root_tuple->operands()[1];
EXPECT_TRUE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));
}
TEST_F(ArCrsCombinerTest, SameValueTestBasecase3) {
const char* module_str = R"(
HloModule foobar
ENTRY %entrycomp (x: f32[], y: f32[]) -> (f32[], f32[]) {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %tuple = (f32[], f32[]) tuple(%x, %y)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
auto root_tuple = module->entry_computation()->root_instruction();
auto i1 = root_tuple->operands()[0];
auto i2 = root_tuple->operands()[1];
EXPECT_FALSE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));
}
TEST_F(ArCrsCombinerTest, SameValueTestNumOperands) {
const char* module_str = R"(
HloModule foobar
ENTRY %entrycomp (p: f32[2,2]) -> ((f32[2,2]), (f32[2,2], f32[2,2])) {
%p = f32[2,2] parameter(0)
%constant.f32 = f32[2,2] constant({{1, 2}, {3, 4}})
%tuple1 = (f32[2,2]) tuple(%constant.f32)
%tuple2 = (f32[2,2], f32[2,2]) tuple(%constant.f32, %constant.f32)
ROOT %tuple = ((f32[2,2]), (f32[2,2], f32[2,2])) tuple(%tuple1, %tuple2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
auto root_tuple = module->entry_computation()->root_instruction();
auto i1 = root_tuple->operands()[0];
auto i2 = root_tuple->operands()[1];
EXPECT_FALSE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));
}
TEST_F(ArCrsCombinerTest, SameValueTestSliceIndicesMatch) {
const char* module_str = R"(
HloModule foobar
ENTRY %entrycomp (p: f32[2]) -> (f32[1], f32[1]) {
%p = f32[2] parameter(0)
%slice.1 = f32[1] slice(f32[2] %p), slice={[0:1]}
%slice.2 = f32[1] slice(f32[2] %p), slice={[0:1]}
ROOT %tuple = (f32[1], f32[1]) tuple(%slice.1, %slice.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
auto root_tuple = module->entry_computation()->root_instruction();
auto i1 = root_tuple->operands()[0];
auto i2 = root_tuple->operands()[1];
EXPECT_TRUE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));
}
TEST_F(ArCrsCombinerTest, SameValueTestSliceIndicesDontMatch) {
const char* module_str = R"(
HloModule foobar
ENTRY %entrycomp (p: f32[2]) -> (f32[1], f32[1]) {
%p = f32[2] parameter(0)
%slice.1 = f32[1] slice(f32[2] %p), slice={[0:1]}
%slice.2 = f32[1] slice(f32[2] %p), slice={[1:2]}
ROOT %tuple = (f32[1], f32[1]) tuple(%slice.1, %slice.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
auto root_tuple = module->entry_computation()->root_instruction();
auto i1 = root_tuple->operands()[0];
auto i2 = root_tuple->operands()[1];
EXPECT_FALSE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));
}
TEST_F(ArCrsCombinerTest, SameValueTestTupleElementSameIndex) {
const char* module_str = R"(
HloModule foobar
ENTRY %entrycomp (p: f32[2,2]) -> (f32[2,2], f32[2,2]) {
%p = f32[2,2] parameter(0)
%constant.f32 = f32[2,2] constant({{1, 2}, {3, 4}})
%tuple.1 = (f32[2,2], f32[2,2]) tuple(%constant.f32, %constant.f32)
%get-tuple-element.1 = f32[2,2] get-tuple-element(%tuple.1), index=0
%get-tuple-element.2 = f32[2,2] get-tuple-element(%tuple.1), index=0
ROOT %tuple = (f32[2,2], f32[2,2]) tuple(%get-tuple-element.1, %get-tuple-element.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
auto root_tuple = module->entry_computation()->root_instruction();
auto i1 = root_tuple->operands()[0];
auto i2 = root_tuple->operands()[1];
EXPECT_TRUE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));
}
TEST_F(ArCrsCombinerTest, SameValueTestTupleElementDifferentIndex1) {
const char* module_str = R"(
HloModule foobar
ENTRY %entrycomp (p: f32[2,2]) -> (f32[2,2], f32[2,2]) {
%p = f32[2,2] parameter(0)
%constant.f32 = f32[2,2] constant({{1, 2}, {3, 4}})
%tuple.1 = (f32[2,2], f32[2,2]) tuple(%constant.f32, %constant.f32)
%get-tuple-element.1 = f32[2,2] get-tuple-element(%tuple.1), index=0
%get-tuple-element.2 = f32[2,2] get-tuple-element(%tuple.1), index=1
ROOT %tuple = (f32[2,2], f32[2,2]) tuple(%get-tuple-element.1, %get-tuple-element.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
auto root_tuple = module->entry_computation()->root_instruction();
auto i1 = root_tuple->operands()[0];
auto i2 = root_tuple->operands()[1];
EXPECT_TRUE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));
}
TEST_F(ArCrsCombinerTest, SameValueTestTupleElementDifferentIndex2) {
const char* module_str = R"(
HloModule foobar
ENTRY %entrycomp (p: f32[2,2]) -> (f32[2,2], f32[2,2]) {
%p = f32[2,2] parameter(0)
%constant.f32.1 = f32[2,2] constant({{1, 2}, {3, 4}})
%constant.f32.2 = f32[2,2] constant({{2, 3}, {4, 5}})
%tuple.1 = (f32[2,2], f32[2,2]) tuple(%constant.f32.1, %constant.f32.2)
%get-tuple-element.1 = f32[2,2] get-tuple-element(%tuple.1), index=0
%get-tuple-element.2 = f32[2,2] get-tuple-element(%tuple.1), index=1
ROOT %tuple = (f32[2,2], f32[2,2]) tuple(%get-tuple-element.1, %get-tuple-element.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
auto root_tuple = module->entry_computation()->root_instruction();
auto i1 = root_tuple->operands()[0];
auto i2 = root_tuple->operands()[1];
EXPECT_FALSE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));
}
TEST_F(ArCrsCombinerTest, SameValueTestWhile1) {
const char* module_str = R"(
HloModule foobar
%condition (x: (f32[2,2], f32[2,2])) -> pred[] {
%x = (f32[2,2], f32[2,2]) parameter(0)
%constant.0 = s32[] constant(0)
%constant.1 = s32[] constant(1)
ROOT %greater-than = pred[] compare(s32[] %constant.1, s32[] %constant.0), direction=GT
}
%body (x: (f32[2,2], f32[2,2])) -> (f32[2,2], f32[2,2]) {
%x = (f32[2,2], f32[2,2]) parameter(0)
%constant.f32 = f32[2,2] constant({{1, 2}, {3, 4}})
%get-tuple-element.1 = f32[2,2] get-tuple-element(%x), index=0
%get-tuple-element.2 = f32[2,2] get-tuple-element(%x), index=1
%add.1 = f32[2,2] add(%get-tuple-element.1, %constant.f32)
%add.2 = f32[2,2] add(%get-tuple-element.2, %constant.f32)
ROOT %tuple = (f32[2,2], f32[2,2]) tuple(%add.1, %add.2)
}
ENTRY %WhileLoop () -> (f32[2,2], f32[2,2]) {
%constant.f32 = f32[2,2] constant({{3, 4}, {5, 6}})
%init.tuple = (f32[2,2], f32[2,2]) tuple(%constant.f32, %constant.f32)
ROOT %while = (f32[2,2], f32[2,2]) while(%init.tuple), condition=%condition, body=%body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
auto root_while = module->entry_computation()->root_instruction();
auto body_tuple = root_while->while_body()->root_instruction();
auto i1 = body_tuple->operands()[0];
auto i2 = body_tuple->operands()[1];
EXPECT_TRUE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));
}
TEST_F(ArCrsCombinerTest, SameValueTestWhile2) {
const char* module_str = R"(
HloModule foobar
%condition (x: (f32[2,2], f32[2,2])) -> pred[] {
%x = (f32[2,2], f32[2,2]) parameter(0)
%constant.0 = s32[] constant(0)
%constant.1 = s32[] constant(1)
ROOT %greater-than = pred[] compare(s32[] %constant.1, s32[] %constant.0), direction=GT
}
%body (x: (f32[2,2], f32[2,2])) -> (f32[2,2], f32[2,2]) {
%x = (f32[2,2], f32[2,2]) parameter(0)
%constant.f32 = f32[2,2] constant({{1, 2}, {3, 4}})
%get-tuple-element.1 = f32[2,2] get-tuple-element(%x), index=0
%get-tuple-element.2 = f32[2,2] get-tuple-element(%x), index=1
%add.1 = f32[2,2] add(%get-tuple-element.1, %constant.f32)
%add.2 = f32[2,2] add(%get-tuple-element.2, %constant.f32)
ROOT %tuple = (f32[2,2], f32[2,2]) tuple(%add.1, %add.2)
}
ENTRY %WhileLoop () -> (f32[2,2], f32[2,2]) {
%constant.f32.1 = f32[2,2] constant({{3, 4}, {5, 6}})
%constant.f32.2 = f32[2,2] constant({{3, 4}, {7, 8}})
%init.tuple = (f32[2,2], f32[2,2]) tuple(%constant.f32.1, %constant.f32.2)
ROOT %while = (f32[2,2], f32[2,2]) while(%init.tuple), condition=%condition, body=%body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
auto root_while = module->entry_computation()->root_instruction();
auto body_tuple = root_while->while_body()->root_instruction();
auto i1 = body_tuple->operands()[0];
auto i2 = body_tuple->operands()[1];
EXPECT_FALSE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));
}
TEST_F(ArCrsCombinerTest, SameValueTestWhile3) {
const char* module_str = R"(
HloModule foobar
%condition (x: (f32[2,2], f32[2,2])) -> pred[] {
%x = (f32[2,2], f32[2,2]) parameter(0)
%constant.0 = s32[] constant(0)
%constant.1 = s32[] constant(1)
ROOT %greater-than = pred[] compare(s32[] %constant.1, s32[] %constant.0), direction=GT
}
%body (x: (f32[2,2], f32[2,2])) -> (f32[2,2], f32[2,2]) {
%x = (f32[2,2], f32[2,2]) parameter(0)
%constant.f32.1 = f32[2,2] constant({{1, 2}, {3, 4}})
%constant.f32.2 = f32[2,2] constant({{3, 4}, {1, 2}})
%get-tuple-element.1 = f32[2,2] get-tuple-element(%x), index=0
%get-tuple-element.2 = f32[2,2] get-tuple-element(%x), index=1
%add.1 = f32[2,2] add(%get-tuple-element.1, %constant.f32.1)
%add.2 = f32[2,2] add(%get-tuple-element.2, %constant.f32.2)
ROOT %tuple = (f32[2,2], f32[2,2]) tuple(%add.1, %add.2)
}
ENTRY %WhileLoop () -> (f32[2,2], f32[2,2]) {
%constant.f32 = f32[2,2] constant({{3, 4}, {5, 6}})
%init.tuple = (f32[2,2], f32[2,2]) tuple(%constant.f32, %constant.f32)
ROOT %while = (f32[2,2], f32[2,2]) while(%init.tuple), condition=%condition, body=%body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
auto root_while = module->entry_computation()->root_instruction();
auto body_tuple = root_while->while_body()->root_instruction();
auto i1 = body_tuple->operands()[0]->operands()[0];
auto i2 = body_tuple->operands()[1]->operands()[0];
EXPECT_FALSE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));
}
TEST_F(ArCrsCombinerTest, SameValueTestNestedWhile) {
const char* module_str = R"(
HloModule foobar
%condition (x: (f32[2,2], f32[2,2])) -> pred[] {
%x = (f32[2,2], f32[2,2]) parameter(0)
ROOT %t = pred[] constant(true)
}
%body_inner (x: (f32[2,2], f32[2,2])) -> (f32[2,2], f32[2,2]) {
%x = (f32[2,2], f32[2,2]) parameter(0)
%constant.f32 = f32[2,2] constant({{1, 2}, {3, 4}})
%gte.1 = f32[2,2] get-tuple-element(%x), index=0
%gte.2 = f32[2,2] get-tuple-element(%x), index=1
%add.1 = f32[2,2] add(%gte.1, %constant.f32)
%add.2 = f32[2,2] add(%gte.2, %constant.f32)
ROOT %tuple = (f32[2,2], f32[2,2]) tuple(%add.1, %add.2)
}
%body_outer (x: (f32[2,2], f32[2,2])) -> (f32[2,2], f32[2,2]) {
%x = (f32[2,2], f32[2,2]) parameter(0)
%gte.1 = f32[2,2] get-tuple-element(%x), index=0
%gte.2 = f32[2,2] get-tuple-element(%x), index=1
%init = (f32[2,2], f32[2,2]) tuple(%gte.1, %gte.2)
ROOT %while.1 = (f32[2,2], f32[2,2]) while(%init), condition=%condition,
body=%body_inner
}
ENTRY %WhileLoop () -> (f32[2,2], f32[2,2]) {
%constant.f32 = f32[2,2] constant({{3, 4}, {5, 6}})
%init.tuple = (f32[2,2], f32[2,2]) tuple(%constant.f32, %constant.f32)
ROOT %while = (f32[2,2], f32[2,2]) while(%init.tuple), condition=%condition,
body=%body_outer
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
auto root_while = module->entry_computation()->root_instruction();
auto inner_while = root_while->while_body()->root_instruction();
auto i1 = inner_while->while_body()->root_instruction()->operands()[0];
auto i2 = inner_while->while_body()->root_instruction()->operands()[1];
EXPECT_TRUE(ArCrsCombiner::TestInstructionsComputeSameValue(i1, i2));
}
void CompareReplicaGroups(absl::Span<const ReplicaGroup> groups_before,
absl::Span<const ReplicaGroup> groups_after) {
ASSERT_EQ(groups_before.size(), groups_after.size());
for (int i = 0; i < groups_before.size(); ++i) {
auto group_before = groups_before[i];
std::vector<int64_t> ids_before(group_before.replica_ids().begin(),
group_before.replica_ids().end());
auto group_after = groups_after[i];
std::vector<int64_t> ids_after(group_after.replica_ids().begin(),
group_after.replica_ids().end());
EXPECT_EQ(ids_before, ids_after);
}
}
TEST_F(ArCrsCombinerTest, RewriteArConvertCrs) {
const char* module_str = R"(
HloModule foobar
%sum.bf16 (a: bf16[], b: bf16[]) -> bf16[] {
%a = bf16[] parameter(0)
%b = bf16[] parameter(1)
ROOT %add = bf16[] add(%a, %b)
}
%sum.f32 (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
ENTRY %entrycomp (p: bf16[]) -> (f32[], f32[]) {
%p = bf16[] parameter(0)
%constant.bf16 = bf16[] constant(1)
%all-reduce.ar.1 = bf16[]
all-reduce(%p),
replica_groups={{0},{1}},
channel_id=1,
to_apply=%sum.bf16,
sharding={maximal device=0}
%convert.1 = f32[]
convert(%all-reduce.ar.1),
sharding={maximal device=0}
%all-reduce.1 = f32[]
all-reduce(%convert.1),
replica_groups={{0,1}},
to_apply=%sum.f32,
sharding={maximal device=0}
%all-reduce.ar.2 = bf16[]
all-reduce(%constant.bf16),
replica_groups={{0},{1}},
channel_id=1,
to_apply=%sum.bf16,
sharding={maximal device=1}
%convert.2 = f32[]
convert(%all-reduce.ar.2),
sharding={maximal device=1}
%all-reduce.2 = f32[]
all-reduce(%convert.2),
replica_groups={{0,1}},
to_apply=%sum.f32,
sharding={maximal device=1}
ROOT %tuple = (f32[], f32[])
tuple(%all-reduce.1, %all-reduce.2),
sharding={{maximal device=0}, {maximal device=1}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str, 2));
auto crs_before =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_before = crs_before->replica_groups();
ArCrsCombiner combiner(2,
false);
auto changed = combiner.Run(module.get()).value();
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::AllReduce(op::Convert(op::Parameter())),
op::AllReduce(op::Convert(op::Constant()))));
auto crs_after =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_after = crs_after->replica_groups();
CompareReplicaGroups(replica_groups_before, replica_groups_after);
}
TEST_F(ArCrsCombinerTest, RewriteArConvertCrsSPMD) {
const char* module_str = R"(
HloModule foobar
%sum.bf16 (a: bf16[], b: bf16[]) -> bf16[] {
%a = bf16[] parameter(0)
%b = bf16[] parameter(1)
ROOT %add = bf16[] add(%a, %b)
}
%sum.f32 (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
ENTRY %entrycomp (p: bf16[]) -> (f32[]) {
%p = bf16[] parameter(0)
%all-reduce.ar.1 = bf16[]
all-reduce(%p),
replica_groups={{0},{1}},
channel_id=1,
to_apply=%sum.bf16
%convert.1 = f32[] convert(%all-reduce.ar.1)
%all-reduce.1 = f32[]
all-reduce(%convert.1),
replica_groups={{0,1}},
to_apply=%sum.f32
ROOT %tuple = (f32[]) tuple(%all-reduce.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str, 2));
auto crs_before =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_before = crs_before->replica_groups();
ArCrsCombiner combiner(2, true);
auto changed = combiner.Run(module.get()).value();
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::AllReduce(op::Convert(op::Parameter()))));
auto crs_after =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_after = crs_after->replica_groups();
CompareReplicaGroups(replica_groups_before, replica_groups_after);
}
TEST_F(ArCrsCombinerTest, RewriteArBitcastCrs) {
const char* module_str = R"(
HloModule foobar
%sum.1 (a: f32[2,1], b: f32[2,1]) -> f32[2,1] {
%a = f32[2,1] parameter(0)
%b = f32[2,1] parameter(1)
ROOT %add = f32[2,1] add(%a, %b)
}
%sum.2 (x: f32[2], y: f32[2]) -> f32[2] {
%x = f32[2] parameter(0)
%y = f32[2] parameter(1)
ROOT %add = f32[2] add(%x, %y)
}
ENTRY %entrycomp (p: f32[2,1]) -> (f32[2], f32[2]) {
%p = f32[2,1] parameter(0)
%all-reduce.ar.1 = f32[2,1]
all-reduce(%p),
replica_groups={{0},{1}},
channel_id=1,
to_apply=%sum.1,
sharding={maximal device=0}
%bitcast.1 = f32[2]{0} bitcast(f32[2,1]{1,0} %all-reduce.ar.1)
%all-reduce.1 = f32[2]
all-reduce(%bitcast.1),
replica_groups={{0,1}},
to_apply=%sum.2,
sharding={maximal device=0}
%all-reduce.ar.2 = f32[2,1]
all-reduce(%p),
replica_groups={{0},{1}},
channel_id=1,
to_apply=%sum.1,
sharding={maximal device=1}
%bitcast.2 = f32[2]{0} bitcast(f32[2,1]{1,0} %all-reduce.ar.2)
%all-reduce.2 = f32[2]
all-reduce(%bitcast.2),
replica_groups={{0,1}},
to_apply=%sum.2,
sharding={maximal device=1}
ROOT %tuple = (f32[2], f32[2])
tuple(%all-reduce.1, %all-reduce.2),
sharding={{maximal device=0}, {maximal device=1}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str, 2));
auto crs_before =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_before = crs_before->replica_groups();
ArCrsCombiner combiner(2,
false);
auto changed = combiner.Run(module.get()).value();
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::AllReduce(op::Bitcast(op::Parameter())),
op::AllReduce(op::Bitcast(op::Parameter()))));
auto crs_after =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_after = crs_after->replica_groups();
CompareReplicaGroups(replica_groups_before, replica_groups_after);
}
TEST_F(ArCrsCombinerTest, RewriteArMultiplyCrs) {
const char* module_str = R"(
HloModule foobar
%sum.f32 (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
ENTRY %entrycomp (p: f32[]) -> (f32[], f32[]) {
%p = f32[] parameter(0)
%constant.f32 = f32[] constant(123)
%all-reduce.ar.1 = f32[]
all-reduce(%p),
replica_groups={{0},{1}},
channel_id=1,
to_apply=%sum.f32,
sharding={maximal device=0}
%multiply.1 = f32[]
multiply(%all-reduce.ar.1, %constant.f32),
sharding={maximal device=0}
%all-reduce.1 = f32[]
all-reduce(%multiply.1),
replica_groups={{0,1}},
to_apply=%sum.f32,
sharding={maximal device=0}
%all-reduce.ar.2 = f32[]
all-reduce(%p),
replica_groups={{0},{1}},
channel_id=1,
to_apply=%sum.f32,
sharding={maximal device=1}
%multiply.2 = f32[]
multiply(%all-reduce.ar.2, %constant.f32),
sharding={maximal device=1}
%all-reduce.2 = f32[]
all-reduce(%multiply.2),
replica_groups={{0,1}},
to_apply=%sum.f32,
sharding={maximal device=1}
ROOT %tuple = (f32[], f32[])
tuple(%all-reduce.1, %all-reduce.2),
sharding={{maximal device=0}, {maximal device=1}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str, 2));
auto crs_before =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_before = crs_before->replica_groups();
ArCrsCombiner combiner(2,
false);
auto changed = combiner.Run(module.get()).value();
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Tuple(op::AllReduce(op::Multiply(op::Parameter(), op::Constant())),
op::AllReduce(op::Multiply(op::Parameter(), op::Constant()))));
auto crs_after =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_after = crs_after->replica_groups();
CompareReplicaGroups(replica_groups_before, replica_groups_after);
}
TEST_F(ArCrsCombinerTest, RewriteArMultiplyCrsSPMD) {
const char* module_str = R"(
HloModule foobar
%sum.f32 (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
ENTRY %entrycomp (p: f32[]) -> (f32[]) {
%p = f32[] parameter(0)
%constant.f32 = f32[] constant(123)
%all-reduce.ar.1 = f32[] all-reduce(%p), replica_groups={{0},{1}},
channel_id=1, to_apply=%sum.f32
%multiply.1 = f32[] multiply(%all-reduce.ar.1, %constant.f32)
%all-reduce.1 = f32[] all-reduce(%multiply.1), replica_groups={{0,1}},
to_apply=%sum.f32, sharding={maximal device=0}
ROOT %tuple = (f32[]) tuple(%all-reduce.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str, 2));
auto crs_before =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_before = crs_before->replica_groups();
ArCrsCombiner combiner(2,
true);
auto changed = combiner.Run(module.get()).value();
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Tuple(op::AllReduce(op::Multiply(op::Parameter(), op::Constant()))));
auto crs_after =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_after = crs_after->replica_groups();
CompareReplicaGroups(replica_groups_before, replica_groups_after);
}
TEST_F(ArCrsCombinerTest, RewriteArConvertAddCrs) {
const char* module_str = R"(
HloModule foobar
%sum.bf16 (a: bf16[], b: bf16[]) -> bf16[] {
%a = bf16[] parameter(0)
%b = bf16[] parameter(1)
ROOT %add = bf16[] add(%a, %b)
}
%sum.f32 (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
ENTRY %entrycomp (p: f32[]) -> (f32[], f32[]) {
%p = f32[] parameter(0)
%constant.bf16 = bf16[] constant(1)
%constant.f32 = f32[] constant(2)
%all-reduce.ar.1 = bf16[]
all-reduce(%constant.bf16),
replica_groups={{0},{1}},
channel_id=1,
to_apply=%sum.bf16,
sharding={maximal device=0}
%convert.1 = f32[]
convert(%all-reduce.ar.1),
sharding={maximal device=0}
%add.1 = f32[]
add(%constant.f32, %convert.1),
sharding={maximal device=0}
%all-reduce.1 = f32[]
all-reduce(%add.1),
replica_groups={{0,1}},
to_apply=%sum.f32,
sharding={maximal device=0}
%all-reduce.ar.2 = bf16[]
all-reduce(%constant.bf16),
replica_groups={{0},{1}},
channel_id=1,
to_apply=%sum.bf16,
sharding={maximal device=1}
%convert.2 = f32[]
convert(%all-reduce.ar.2),
sharding={maximal device=1}
%add.2 = f32[]
add(%constant.f32, %convert.2),
sharding={maximal device=1}
%all-reduce.2 = f32[]
all-reduce(%add.2),
replica_groups={{0,1}},
to_apply=%sum.f32,
sharding={maximal device=1}
ROOT %tuple = (f32[], f32[])
tuple(%all-reduce.1, %all-reduce.2),
sharding={{maximal device=0}, {maximal device=1}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str, 2));
auto crs_before =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_before = crs_before->replica_groups();
ArCrsCombiner combiner(2,
false);
auto changed = combiner.Run(module.get()).value();
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Tuple(
op::AllReduce(op::Add(op::Divide(op::Constant(), op::Constant()),
op::Convert())),
op::AllReduce(op::Add(op::Divide(op::Constant(), op::Constant()),
op::Convert()))));
auto crs_after =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_after = crs_after->replica_groups();
CompareReplicaGroups(replica_groups_before, replica_groups_after);
}
TEST_F(ArCrsCombinerTest, RewriteArConvertAddCrsSPMD) {
const char* module_str = R"(
HloModule foobar
%sum.bf16 (a: bf16[], b: bf16[]) -> bf16[] {
%a = bf16[] parameter(0)
%b = bf16[] parameter(1)
ROOT %add = bf16[] add(%a, %b)
}
%sum.f32 (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
ENTRY %entrycomp (p: f32[]) -> (f32[]) {
%p = f32[] parameter(0)
%constant.bf16 = bf16[] constant(1)
%constant.f32 = f32[] constant(2)
%all-reduce.ar.1 = bf16[] all-reduce(%constant.bf16), replica_groups={{0},{1}},
channel_id=1, to_apply=%sum.bf16
%convert.1 = f32[] convert(%all-reduce.ar.1), sharding={maximal device=0}
%add.1 = f32[] add(%constant.f32, %convert.1)
%all-reduce.1 = f32[] all-reduce(%add.1), replica_groups={{0,1}},
to_apply=%sum.f32
ROOT %tuple = (f32[]) tuple(%all-reduce.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str, 2));
auto crs_before =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_before = crs_before->replica_groups();
ArCrsCombiner combiner(2,
true);
auto changed = combiner.Run(module.get()).value();
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::AllReduce(op::Add(
op::Divide(op::Constant(), op::Constant()), op::Convert()))));
auto crs_after =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_after = crs_after->replica_groups();
CompareReplicaGroups(replica_groups_before, replica_groups_after);
}
TEST_F(ArCrsCombinerTest, OtherSummandNotTheSameDontRewrite) {
const char* module_str = R"(
HloModule foobar
%sum.bf16 (a: bf16[], b: bf16[]) -> bf16[] {
%a = bf16[] parameter(0)
%b = bf16[] parameter(1)
ROOT %add = bf16[] add(%a, %b)
}
%sum.f32 (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
ENTRY %entrycomp (p: f32[]) -> (f32[], f32[]) {
%p = f32[] parameter(0)
%constant.bf16 = bf16[] constant(1)
%constant.f32.1 = f32[] constant(2)
%constant.f32.2 = f32[] constant(3)
%all-reduce.ar.1 = bf16[]
all-reduce(%constant.bf16),
replica_groups={{0},{1}},
channel_id=1,
to_apply=%sum.bf16,
sharding={maximal device=0}
%convert.1 = f32[]
convert(%all-reduce.ar.1),
sharding={maximal device=0}
%add.1 = f32[]
add(%constant.f32.1, %convert.1),
sharding={maximal device=0}
%all-reduce.1 = f32[]
all-reduce(%add.1),
replica_groups={{0,1}},
to_apply=%sum.f32,
sharding={maximal device=0}
%all-reduce.ar.2 = bf16[]
all-reduce(%constant.bf16),
replica_groups={{0},{1}},
channel_id=1,
to_apply=%sum.bf16,
sharding={maximal device=1}
%convert.2 = f32[]
convert(%all-reduce.ar.2),
sharding={maximal device=1}
%add.2 = f32[]
add(%constant.f32.2, %convert.2),
sharding={maximal device=1}
%all-reduce.2 = f32[]
all-reduce(%add.2),
replica_groups={{0,1}},
to_apply=%sum.f32,
sharding={maximal device=1}
ROOT %tuple = (f32[], f32[])
tuple(%all-reduce.1, %all-reduce.2),
sharding={{maximal device=0}, {maximal device=1}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str, 2));
ArCrsCombiner combiner(2,
false);
auto changed = combiner.Run(module.get()).value();
EXPECT_FALSE(changed);
}
TEST_F(ArCrsCombinerTest, OtherSummandNotTheSameDontRewriteSPMD) {
const char* module_str = R"(
HloModule foobar
%sum.bf16 (a: bf16[], b: bf16[]) -> bf16[] {
%a = bf16[] parameter(0)
%b = bf16[] parameter(1)
ROOT %add = bf16[] add(%a, %b)
}
%sum.f32 (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
ENTRY %entrycomp (p: f32[]) -> (f32[]) {
%p = f32[] parameter(0)
%constant.bf16 = bf16[] constant(1)
%constant.f32.1 = f32[] constant(2)
%all-reduce.ar.1 = bf16[] all-reduce(%constant.bf16), replica_groups={{0},{1}},
channel_id=1, to_apply=%sum.bf16
%convert.1 = f32[] convert(%all-reduce.ar.1)
%add.1 = f32[] add(%p, %convert.1)
%all-reduce.1 = f32[] all-reduce(%add.1), replica_groups={{0,1}}, to_apply=%sum.f32
ROOT %tuple = (f32[]) tuple(%all-reduce.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str, 2));
ArCrsCombiner combiner(2,
true);
auto changed = combiner.Run(module.get()).value();
EXPECT_FALSE(changed);
}
TEST_F(ArCrsCombinerTest, ArThenCrsDontCrash) {
const char* module_str = R"(
HloModule foobar
%sum.1 (a: f32[], b: f32[]) -> f32[] {
%a = f32[] parameter(0)
%b = f32[] parameter(1)
ROOT %add = f32[] add(%a, %b)
}
ENTRY %entrycomp (p: f32[]) -> (f32[], f32[]) {
%p = f32[] parameter(0)
%constant.f32 = f32[] constant(123)
%all-reduce.ar.1 = f32[]
all-reduce(%p),
replica_groups={{0},{1}},
channel_id=1,
to_apply=%sum.1,
sharding={maximal device=0}
%all-reduce.1 = f32[]
all-reduce(%all-reduce.ar.1),
replica_groups={{0,1}},
to_apply=%sum.1,
sharding={maximal device=0}
%multiply.1 = f32[]
multiply(%all-reduce.1, %constant.f32),
sharding={maximal device=0}
%all-reduce.ar.2 = f32[]
all-reduce(%p),
replica_groups={{0},{1}},
channel_id=1,
to_apply=%sum.1,
sharding={maximal device=1}
%all-reduce.2 = f32[]
all-reduce(%all-reduce.ar.2),
replica_groups={{0,1}},
to_apply=%sum.1,
sharding={maximal device=1}
%multiply.2 = f32[]
multiply(%all-reduce.2, %constant.f32),
sharding={maximal device=1}
ROOT %tuple = (f32[], f32[])
tuple(%all-reduce.1, %all-reduce.2),
sharding={{maximal device=0}, {maximal device=1}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str, 2));
auto crs_before =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_before = crs_before->replica_groups();
ArCrsCombiner combiner(2,
false);
auto changed = combiner.Run(module.get()).value();
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::AllReduce(op::Parameter()),
op::AllReduce(op::Parameter())));
auto crs_after =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_after = crs_after->replica_groups();
CompareReplicaGroups(replica_groups_before, replica_groups_after);
}
TEST_F(ArCrsCombinerTest, RewriteMultipleAdds) {
const char* module_str = R"(
HloModule foobar
%sum (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
ENTRY %entrycomp (p: f32[]) -> (f32[], f32[]) {
%p = f32[] parameter(0)
%constant.1 = f32[] constant(1)
%constant.2 = f32[] constant(2)
%all-reduce.ar.1 = f32[]
all-reduce(%p),
replica_groups={{0},{1}},
channel_id=1,
to_apply=%sum,
sharding={maximal device=0}
%add.11 = f32[]
add(%constant.1, %all-reduce.ar.1),
sharding={maximal device=0}
%add.12 = f32[]
add(%constant.2, %add.11),
sharding={maximal device=0}
%all-reduce.1 = f32[]
all-reduce(%add.12),
replica_groups={{0,1}},
to_apply=%sum,
sharding={maximal device=0}
%all-reduce.ar.2 = f32[]
all-reduce(%p),
replica_groups={{0},{1}},
channel_id=1,
to_apply=%sum,
sharding={maximal device=0}
%add.21 = f32[]
add(%constant.1, %all-reduce.ar.2),
sharding={maximal device=0}
%add.22 = f32[]
add(%constant.2, %add.21),
sharding={maximal device=0}
%all-reduce.2 = f32[]
all-reduce(%add.22),
replica_groups={{0,1}},
to_apply=%sum,
sharding={maximal device=0}
ROOT %tuple = (f32[], f32[])
tuple(%all-reduce.1, %all-reduce.2),
sharding={{maximal device=0}, {maximal device=1}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str, 2));
auto crs_before =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_before = crs_before->replica_groups();
ArCrsCombiner combiner(2,
false);
auto changed = combiner.Run(module.get()).value();
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::AllReduce(op::Add(
op::Divide(op::Constant(), op::Constant()),
op::Add(op::Divide(op::Constant(), op::Constant()),
op::Parameter()))),
op::AllReduce(op::Add(
op::Divide(op::Constant(), op::Constant()),
op::Add(op::Divide(op::Constant(), op::Constant()),
op::Parameter())))));
auto crs_after =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_after = crs_after->replica_groups();
CompareReplicaGroups(replica_groups_before, replica_groups_after);
}
TEST_F(ArCrsCombinerTest, RewriteMultipleAddsSPMD) {
const char* module_str = R"(
HloModule foobar
%sum (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
ENTRY %entrycomp (p: f32[]) -> (f32[]) {
%p = f32[] parameter(0)
%constant.1 = f32[] constant(1)
%constant.2 = f32[] constant(2)
%all-reduce.ar.1 = f32[] all-reduce(%p), replica_groups={{0},{1}},
channel_id=1, to_apply=%sum
%add.11 = f32[] add(%constant.1, %all-reduce.ar.1)
%add.12 = f32[] add(%constant.2, %add.11)
%all-reduce.1 = f32[] all-reduce(%add.12), replica_groups={{0,1}}, to_apply=%sum
ROOT %tuple = (f32[]) tuple(%all-reduce.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str, 2));
auto crs_before =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_before = crs_before->replica_groups();
ArCrsCombiner combiner(2,
true);
auto changed = combiner.Run(module.get()).value();
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::AllReduce(
op::Add(op::Divide(op::Constant(), op::Constant()),
op::Add(op::Divide(op::Constant(), op::Constant()),
op::Parameter())))));
auto crs_after =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_after = crs_after->replica_groups();
CompareReplicaGroups(replica_groups_before, replica_groups_after);
}
TEST_F(ArCrsCombinerTest, RewriteArSubtractCrs) {
const char* module_str = R"(
HloModule foobar
%sum.f32 (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
ENTRY %entrycomp (p: f32[]) -> (f32[], f32[]) {
%p = f32[] parameter(0)
%constant.f32 = f32[] constant(123)
%all-reduce.ar.1 = f32[]
all-reduce(%p),
replica_groups={{0},{1}},
channel_id=1,
to_apply=%sum.f32,
sharding={maximal device=0}
%sub.1 = f32[]
subtract(%constant.f32, %all-reduce.ar.1),
sharding={maximal device=0}
%all-reduce.1 = f32[]
all-reduce(%sub.1),
replica_groups={{0,1}},
to_apply=%sum.f32,
sharding={maximal device=0}
%all-reduce.ar.2 = f32[]
all-reduce(%p),
replica_groups={{0},{1}},
channel_id=1,
to_apply=%sum.f32,
sharding={maximal device=1}
%sub.2 = f32[]
subtract(%constant.f32, %all-reduce.ar.2),
sharding={maximal device=1}
%all-reduce.2 = f32[]
all-reduce(%sub.2),
replica_groups={{0,1}},
to_apply=%sum.f32,
sharding={maximal device=1}
ROOT %tuple = (f32[], f32[])
tuple(%all-reduce.1, %all-reduce.2),
sharding={{maximal device=0}, {maximal device=1}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str, 2));
auto crs_before =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_before = crs_before->replica_groups();
ArCrsCombiner combiner(2,
false);
auto changed = combiner.Run(module.get()).value();
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Tuple(
op::AllReduce(op::Subtract(op::Divide(op::Constant(), op::Constant()),
op::Parameter())),
op::AllReduce(op::Subtract(op::Divide(op::Constant(), op::Constant()),
op::Parameter()))));
auto crs_after =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_after = crs_after->replica_groups();
CompareReplicaGroups(replica_groups_before, replica_groups_after);
}
TEST_F(ArCrsCombinerTest, RewriteArSubtractCrsSPMD) {
const char* module_str = R"(
HloModule foobar
%sum.f32 (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
ENTRY %entrycomp (p: f32[]) -> (f32[]) {
%p = f32[] parameter(0)
%constant.f32 = f32[] constant(123)
%all-reduce.ar.1 = f32[] all-reduce(%p), replica_groups={{0},{1}},
channel_id=1, to_apply=%sum.f32
%sub.1 = f32[] subtract(%constant.f32, %all-reduce.ar.1)
%all-reduce.1 = f32[] all-reduce(%sub.1), replica_groups={{0,1}},
to_apply=%sum.f32
ROOT %tuple = (f32[]) tuple(%all-reduce.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str, 2));
auto crs_before =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_before = crs_before->replica_groups();
ArCrsCombiner combiner(2,
true);
auto changed = combiner.Run(module.get()).value();
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Tuple(op::AllReduce(op::Subtract(
op::Divide(op::Constant(), op::Constant()), op::Parameter()))));
auto crs_after =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_after = crs_after->replica_groups();
CompareReplicaGroups(replica_groups_before, replica_groups_after);
}
TEST_F(ArCrsCombinerTest, RewriteMultipleARsLeft) {
const char* module_str = R"(
HloModule foobar
%sum (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
ENTRY %entrycomp (p: f32[]) -> (f32[], f32[]) {
%p = f32[] parameter(0)
%const1 = f32[] constant(1)
%const2 = f32[] constant(2)
%ar11 = f32[]
all-reduce(%p),
replica_groups={{0},{1}},
channel_id=1,
to_apply=%sum,
sharding={maximal device=0}
%add11 = f32[]
add(%ar11, %const1),
sharding={maximal device=0}
%ar12 = f32[]
all-reduce(%p),
replica_groups={{0},{1}},
channel_id=2,
to_apply=%sum,
sharding={maximal device=0}
%add12 = f32[]
add(%add11, %ar12),
sharding={maximal device=0}
%crs1 = f32[]
all-reduce(%add12),
replica_groups={{0,1}},
to_apply=%sum,
sharding={maximal device=0}
%ar21 = f32[]
all-reduce(%p),
replica_groups={{0},{1}},
channel_id=1,
to_apply=%sum,
sharding={maximal device=1}
%add21 = f32[]
add(%ar21, %const1),
sharding={maximal device=1}
%ar22 = f32[]
all-reduce(%p),
replica_groups={{0},{1}},
channel_id=2,
to_apply=%sum,
sharding={maximal device=1}
%add22 = f32[]
add(%add21, %ar22),
sharding={maximal device=1}
%crs2 = f32[]
all-reduce(%add22),
replica_groups={{0,1}},
to_apply=%sum,
sharding={maximal device=1}
ROOT %tuple = (f32[], f32[])
tuple(%crs1, %crs2),
sharding={{maximal device=0}, {maximal device=1}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str, 2));
auto crs_before =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_before = crs_before->replica_groups();
ArCrsCombiner combiner(2,
false);
auto changed = combiner.Run(module.get()).value();
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::AllReduce(op::Add(
op::Add(op::Parameter(),
op::Divide(op::Constant(), op::Constant())),
op::Parameter())),
op::AllReduce(op::Add(
op::Add(op::Parameter(),
op::Divide(op::Constant(), op::Constant())),
op::Parameter()))));
auto crs_after =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_after = crs_after->replica_groups();
CompareReplicaGroups(replica_groups_before, replica_groups_after);
}
TEST_F(ArCrsCombinerTest, RewriteMultipleARsLeftSPMD) {
const char* module_str = R"(
HloModule foobar
%sum (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
ENTRY %entrycomp (p: f32[]) -> (f32[]) {
%p = f32[] parameter(0)
%const1 = f32[] constant(1)
%const2 = f32[] constant(2)
%ar11 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=1,
to_apply=%sum
%add11 = f32[] add(%ar11, %const1)
%ar12 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=2,
to_apply=%sum
%add12 = f32[] add(%add11, %ar12)
%crs1 = f32[] all-reduce(%add12), replica_groups={{0,1}},
to_apply=%sum
ROOT %tuple = (f32[]) tuple(%crs1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str, 2));
auto crs_before =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_before = crs_before->replica_groups();
ArCrsCombiner combiner(2,
true);
auto changed = combiner.Run(module.get()).value();
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Tuple(op::AllReduce(op::Add(
op::Add(op::Parameter(), op::Divide(op::Constant(), op::Constant())),
op::Parameter()))));
auto crs_after =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_after = crs_after->replica_groups();
CompareReplicaGroups(replica_groups_before, replica_groups_after);
}
TEST_F(ArCrsCombinerTest, RewriteMultipleARsRight) {
const char* module_str = R"(
HloModule foobar
%sum (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
ENTRY %entrycomp (p: f32[]) -> (f32[], f32[]) {
%p = f32[] parameter(0)
%const1 = f32[] constant(1)
%const2 = f32[] constant(2)
%ar11 = f32[]
all-reduce(%p),
replica_groups={{0},{1}},
channel_id=1,
to_apply=%sum,
sharding={maximal device=0}
%ar12 = f32[]
all-reduce(%p),
replica_groups={{0},{1}},
channel_id=2,
to_apply=%sum,
sharding={maximal device=0}
%add11 = f32[]
add(%ar12, %const1),
sharding={maximal device=0}
%add12 = f32[]
add(%ar11, %add11),
sharding={maximal device=0}
%crs1 = f32[]
all-reduce(%add12),
replica_groups={{0,1}},
to_apply=%sum,
sharding={maximal device=0}
%ar21 = f32[]
all-reduce(%p),
replica_groups={{0},{1}},
channel_id=1,
to_apply=%sum,
sharding={maximal device=1}
%ar22 = f32[]
all-reduce(%p),
replica_groups={{0},{1}},
channel_id=2,
to_apply=%sum,
sharding={maximal device=1}
%add21 = f32[]
add(%ar22, %const1),
sharding={maximal device=1}
%add22 = f32[]
add(%ar21, %add21),
sharding={maximal device=1}
%crs2 = f32[]
all-reduce(%add22),
replica_groups={{0,1}},
to_apply=%sum,
sharding={maximal device=1}
ROOT %tuple = (f32[], f32[])
tuple(%crs1, %crs2),
sharding={{maximal device=0}, {maximal device=1}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str, 2));
auto crs_before =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_before = crs_before->replica_groups();
ArCrsCombiner combiner(2,
false);
auto changed = combiner.Run(module.get()).value();
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Tuple(op::AllReduce(op::Add(
op::Parameter(),
op::Add(op::Parameter(),
op::Divide(op::Constant(), op::Constant())))),
op::AllReduce(op::Add(
op::Parameter(),
op::Add(op::Parameter(),
op::Divide(op::Constant(), op::Constant()))))));
auto crs_after =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_after = crs_after->replica_groups();
CompareReplicaGroups(replica_groups_before, replica_groups_after);
}
TEST_F(ArCrsCombinerTest, RewriteMultipleARsRightSPMD) {
const char* module_str = R"(
HloModule foobar
%sum (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
ENTRY %entrycomp (p: f32[]) -> (f32[]) {
%p = f32[] parameter(0)
%const1 = f32[] constant(1)
%const2 = f32[] constant(2)
%ar11 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=1, to_apply=%sum
%ar12 = f32[] all-reduce(%p), replica_groups={{0},{1}}, channel_id=2, to_apply=%sum
%add11 = f32[] add(%ar12, %const1)
%add12 = f32[] add(%ar11, %add11)
%crs1 = f32[] all-reduce(%add12), replica_groups={{0,1}}, to_apply=%sum
ROOT %tuple = (f32[]) tuple(%crs1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str, 2));
auto crs_before =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_before = crs_before->replica_groups();
ArCrsCombiner combiner(2,
true);
auto changed = combiner.Run(module.get()).value();
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::AllReduce(op::Add(
op::Parameter(),
op::Add(op::Parameter(),
op::Divide(op::Constant(), op::Constant()))))));
auto crs_after =
module->entry_computation()->root_instruction()->operands()[0];
auto replica_groups_after = crs_after->replica_groups();
CompareReplicaGroups(replica_groups_before, replica_groups_after);
}
TEST_F(ArCrsCombinerTest, OneReplicaDontRewrite) {
const char* module_str = R"(
HloModule foobar
%sum.bf16 (a: bf16[], b: bf16[]) -> bf16[] {
%a = bf16[] parameter(0)
%b = bf16[] parameter(1)
ROOT %add = bf16[] add(%a, %b)
}
%sum.f32 (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
ENTRY %entrycomp (p: bf16[]) -> (f32[], f32[]) {
%p = bf16[] parameter(0)
%constant.bf16 = bf16[] constant(1)
%all-reduce.ar.1 = bf16[]
all-reduce(%p),
replica_groups={{0}},
channel_id=1,
to_apply=%sum.bf16,
sharding={maximal device=0}
%convert.1 = f32[]
convert(%all-reduce.ar.1),
sharding={maximal device=0}
%all-reduce.1 = f32[]
all-reduce(%convert.1),
replica_groups={{0}},
to_apply=%sum.f32,
sharding={maximal device=0}
%all-reduce.ar.2 = bf16[]
all-reduce(%constant.bf16),
replica_groups={{0}},
channel_id=1,
to_apply=%sum.bf16,
sharding={maximal device=1}
%convert.2 = f32[]
convert(%all-reduce.ar.2),
sharding={maximal device=1}
%all-reduce.2 = f32[]
all-reduce(%convert.2),
replica_groups={{0}},
to_apply=%sum.f32,
sharding={maximal device=1}
ROOT %tuple = (f32[], f32[])
tuple(%all-reduce.1, %all-reduce.2),
sharding={{maximal device=0}, {maximal device=1}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str, 1));
ArCrsCombiner combiner(2,
false);
auto changed = combiner.Run(module.get()).value();
EXPECT_FALSE(changed);
}
TEST_F(ArCrsCombinerTest, OneReplicaDontRewriteSPMD) {
const char* module_str = R"(
HloModule foobar
%sum.bf16 (a: bf16[], b: bf16[]) -> bf16[] {
%a = bf16[] parameter(0)
%b = bf16[] parameter(1)
ROOT %add = bf16[] add(%a, %b)
}
%sum.f32 (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
ENTRY %entrycomp (p: bf16[]) -> (f32[]) {
%p = bf16[] parameter(0)
%constant.bf16 = bf16[] constant(1)
%all-reduce.ar.1 = bf16[] all-reduce(%p), replica_groups={{0}},
channel_id=1, to_apply=%sum.bf16
%convert.1 = f32[] convert(%all-reduce.ar.1)
%all-reduce.1 = f32[] all-reduce(%convert.1),
replica_groups={{0}}, to_apply=%sum.f32
ROOT %tuple = (f32[]) tuple(%all-reduce.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str, 1));
ArCrsCombiner combiner(2,
true);
auto changed = combiner.Run(module.get()).value();
EXPECT_FALSE(changed);
}
TEST_F(ArCrsCombinerTest, SameValueTestConditional) {
const char* module_str = R"(
HloModule foobar
branch_true {
pt = (f32[2,4], f32[2,4]) parameter(0)
gte.0 = f32[2,4] get-tuple-element(pt), index=0
gte.1 = f32[2,4] get-tuple-element(pt), index=1
ROOT tuple.t = (f32[2,4], f32[2,4]) tuple(gte.1, gte.0)
}
branch_false {
pf = (f32[2,4], f32[2,4]) parameter(0)
gte.0 = f32[2,4] get-tuple-element(pf), index=0
gte.1 = f32[2,4] get-tuple-element(pf), index=1
add = f32[2,4] add(gte.1, gte.1)
ROOT tuple.f = (f32[2,4], f32[2,4]) tuple(gte.0, add)
}
ENTRY Parameters1.v4 {
constant = pred[] constant(true)
p = f32[2,4] parameter(0)
tuple = (f32[2,4], f32[2,4]) tuple(p, p)
ROOT conditional = (f32[2,4], f32[2,4]) conditional(constant, tuple, tuple), true_computation=branch_true, false_computation=branch_false
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
auto cond = module->entry_computation()->root_instruction();
auto branch_true = cond->branch_computation(0)->root_instruction();
auto t0 = branch_true->mutable_operand(0);
auto t1 = branch_true->mutable_operand(1);
EXPECT_TRUE(ArCrsCombiner::TestInstructionsComputeSameValue(t0, t1));
auto branch_false = cond->branch_computation(1)->root_instruction();
auto f0 = branch_false->mutable_operand(0);
auto f1 = branch_false->mutable_operand(1);
EXPECT_FALSE(ArCrsCombiner::TestInstructionsComputeSameValue(f0, f1));
}
TEST_F(ArCrsCombinerTest, AllReduceWithReplicas) {
const char* module_str = R"(
HloModule foobar
%sum.f32 (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
ENTRY %entrycomp (p: bf16[]) -> (f32[], f32[]) {
%p = bf16[] parameter(0)
%all-reduce.0 = f32[] all-reduce(%p), channel_id=1, replica_groups={{0,1}},
to_apply=%sum.f32, sharding={maximal device=0}
%all-reduce.1 = f32[] all-reduce(%p), channel_id=1, replica_groups={{0,1}},
to_apply=%sum.f32, sharding={maximal device=1}
%all-reduce.2 = f32[] all-reduce(%all-reduce.0), replica_groups={{0,1}},
to_apply=%sum.f32, sharding={maximal device=0}
%all-reduce.3 = f32[] all-reduce(%all-reduce.1), replica_groups={{0,1}},
to_apply=%sum.f32, sharding={maximal device=1}
ROOT %tuple = (f32[], f32[]) tuple(%all-reduce.2, %all-reduce.3),
sharding={{maximal device=0}, {maximal device=1}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str, 2));
ArCrsCombiner combiner(2,
false);
auto changed = combiner.Run(module.get()).value();
EXPECT_FALSE(changed);
}
TEST_F(ArCrsCombinerTest, AllReduceWithReplicasSPMD) {
const char* module_str = R"(
HloModule foobar
%sum.f32 (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
ENTRY %entrycomp (p: bf16[]) -> (f32[]) {
%p = bf16[] parameter(0)
%all-reduce.0 = f32[] all-reduce(%p), channel_id=1, replica_groups={{0},{1}},
to_apply=%sum.f32
%all-reduce.2 = f32[] all-reduce(%all-reduce.0), replica_groups={{0},{1}},
to_apply=%sum.f32
ROOT %tuple = (f32[]) tuple(%all-reduce.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str, 2));
ArCrsCombiner combiner(2,
true);
auto changed = combiner.Run(module.get()).value();
EXPECT_FALSE(changed);
}
TEST_F(ArCrsCombinerTest, ReplaceReplicatedAllReduceSPMD) {
const char* module_str = R"(
HloModule foobar
%sum.f32 (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
ENTRY %entrycomp (p: f32[2,4]) -> f32[2,4] {
%p = f32[2,4] parameter(0), sharding={replicated}
ROOT %all-reduce = f32[2,4] all-reduce(%p), to_apply=%sum.f32,
replica_groups={{0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str, 32));
ArCrsCombiner combiner(2,
true);
auto changed = combiner.Run(module.get()).value();
EXPECT_TRUE(changed);
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Divide(op::AllReduce(op::Parameter()),
op::Broadcast(op::Constant())));
auto ar = root->operand(0);
auto divisor = root->operand(1)->operand(0);
EXPECT_TRUE(ar->channel_id());
EXPECT_TRUE(divisor->literal().IsAllFloat(2));
}
TEST_F(ArCrsCombinerTest, AllReduceWithGlobalIdReplicaGroups) {
const char* module_str = R"(
HloModule foobar
%sum.f32 (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
ENTRY %entrycomp (p: bf16[]) -> (f32[]) {
%p = bf16[] parameter(0)
%all-reduce.0 = f32[] all-reduce(%p), channel_id=1,
replica_groups={{0,1,2,3},{4,5,6,7}}, use_global_device_ids=true,
to_apply=%sum.f32
%all-reduce.2 = f32[] all-reduce(%all-reduce.0), replica_groups={{0,1}},
to_apply=%sum.f32
ROOT %tuple = (f32[]) tuple(%all-reduce.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str, 2,
4));
ArCrsCombiner combiner(4,
true);
auto changed = combiner.Run(module.get()).value();
EXPECT_TRUE(changed);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/ar_crs_combiner.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/ar_crs_combiner_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ff6e614c-d7f4-4756-bc9e-cd60d9819967 | cpp | tensorflow/tensorflow | data_transfer | tensorflow/core/data/service/data_transfer.cc | tensorflow/core/data/service/data_transfer_test.cc | #include "tensorflow/core/data/service/data_transfer.h"
#include <functional>
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "absl/strings/str_join.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/framework/variant.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
namespace tensorflow {
namespace data {
namespace {
mutex* get_lock() {
static mutex lock(LINKER_INITIALIZED);
return &lock;
}
using DataTransferServerFactories =
std::unordered_map<std::string, DataTransferServer::ServerFactoryT>;
DataTransferServerFactories& transfer_server_factories() {
static auto& factories = *new DataTransferServerFactories();
return factories;
}
using DataTransferClientFactories =
std::unordered_map<std::string, DataTransferClient::ClientFactoryT>;
DataTransferClientFactories& transfer_client_factories() {
static auto& factories = *new DataTransferClientFactories();
return factories;
}
}
GetElementResult GetElementResult::Copy() const {
GetElementResult copy;
copy.components = components;
copy.element_index = element_index;
copy.end_of_sequence = end_of_sequence;
copy.skip = skip;
return copy;
}
size_t GetElementResult::EstimatedMemoryUsageBytes() const {
size_t size_bytes = components.size() * sizeof(Tensor) +
sizeof(element_index) + sizeof(end_of_sequence) +
sizeof(skip);
for (const Tensor& tensor : components) {
size_bytes += tensor.TotalBytes();
if (tensor.dtype() != DT_VARIANT) {
continue;
}
const Variant& variant = tensor.scalar<Variant>()();
const CompressedElement* compressed = variant.get<CompressedElement>();
if (compressed) {
size_bytes += compressed->SpaceUsedLong();
}
}
return size_bytes;
}
void DataTransferServer::Register(std::string name, ServerFactoryT factory) {
mutex_lock l(*get_lock());
if (!transfer_server_factories().insert({name, factory}).second) {
LOG(ERROR)
<< "Two data transfer server factories are being registered with name "
<< name << ". Which one gets used is undefined.";
}
}
Status DataTransferServer::Build(std::string name, GetElementT get_element,
std::shared_ptr<DataTransferServer>* out) {
mutex_lock l(*get_lock());
auto it = transfer_server_factories().find(name);
if (it != transfer_server_factories().end()) {
return it->second(get_element, out);
}
std::vector<std::string> available_names;
for (const auto& factory : transfer_server_factories()) {
available_names.push_back(factory.first);
}
return errors::NotFound(
"No data transfer server factory has been registered for name ", name,
". The available names are: [ ", absl::StrJoin(available_names, ", "),
" ]");
}
void DataTransferClient::Register(std::string name, ClientFactoryT factory) {
mutex_lock l(*get_lock());
if (!transfer_client_factories().insert({name, factory}).second) {
LOG(ERROR)
<< "Two data transfer client factories are being registered with name "
<< name << ". Which one gets used is undefined.";
}
}
Status DataTransferClient::Build(std::string name, Config config,
std::unique_ptr<DataTransferClient>* out) {
mutex_lock l(*get_lock());
auto it = transfer_client_factories().find(name);
if (it != transfer_client_factories().end()) {
return it->second(config, out);
}
std::vector<string> available_names;
for (const auto& factory : transfer_client_factories()) {
available_names.push_back(factory.first);
}
return errors::NotFound(
"No data transfer client factory has been registered for name ", name,
". The available names are: [ ", absl::StrJoin(available_names, ", "),
" ]");
}
}
} | #include "tensorflow/core/data/service/data_transfer.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace data {
namespace {
class TestDataTransferServer : public DataTransferServer {
public:
explicit TestDataTransferServer(bool* called) : called_(called) {}
Status Start(const experimental::WorkerConfig& unused_config) override {
*called_ = true;
return absl::OkStatus();
}
int Port() const override { return 0; }
private:
bool* called_;
};
template <class T>
GetElementResult MakeElementResult(T value) {
GetElementResult result;
result.components.push_back(Tensor(std::move(value)));
result.element_index = 0;
result.end_of_sequence = false;
return result;
}
TEST(DataTransferTest, RegisterDataTransferServerBuilder) {
bool called = false;
DataTransferServer::Register("test", [&called](auto ignore, auto* server) {
*server = std::make_shared<TestDataTransferServer>(&called);
return absl::OkStatus();
});
std::shared_ptr<DataTransferServer> server;
TF_ASSERT_OK(DataTransferServer::Build("test", {}, &server));
EXPECT_FALSE(called);
TF_ASSERT_OK(server->Start({}));
EXPECT_TRUE(called);
}
TEST(DataTransferTest, EstimateMemoryUsageBytes) {
GetElementResult empty;
EXPECT_GT(empty.EstimatedMemoryUsageBytes(), 0);
Tensor tensor(DT_INT64, TensorShape({10, 100}));
GetElementResult int64_result = MakeElementResult(tensor);
EXPECT_GT(int64_result.EstimatedMemoryUsageBytes(), 1000 * sizeof(int64_t));
EXPECT_GT(int64_result.EstimatedMemoryUsageBytes(),
int64_result.components[0].AllocatedBytes());
EXPECT_GE(int64_result.EstimatedMemoryUsageBytes(), sizeof(int64_result));
}
TEST(DataTransferTest, EstimateVariantMemoryUsageBytes) {
const size_t data_size = 1000;
std::unique_ptr<CompressedElement> compressed{
protobuf::Arena::Create<CompressedElement>(nullptr)};
compressed->set_data(std::string(data_size, 'a'));
Tensor tensor(DT_VARIANT, TensorShape({}));
tensor.scalar<Variant>()() = *compressed;
GetElementResult variant_result = MakeElementResult(tensor);
EXPECT_GT(variant_result.EstimatedMemoryUsageBytes(), data_size);
EXPECT_GT(variant_result.EstimatedMemoryUsageBytes(),
compressed->ByteSizeLong());
EXPECT_GT(variant_result.EstimatedMemoryUsageBytes(),
compressed->SpaceUsedLong());
}
TEST(DataTransferTest, CopyGetElementResult) {
std::string hello_world = "hello, world!";
GetElementResult result = MakeElementResult(hello_world);
ASSERT_EQ(result.components.size(), 1);
EXPECT_GT(result.EstimatedMemoryUsageBytes(), hello_world.size());
GetElementResult copy = result.Copy();
ASSERT_EQ(copy.components.size(), 1);
test::ExpectEqual(result.components[0], copy.components[0]);
EXPECT_EQ(copy.EstimatedMemoryUsageBytes(),
result.EstimatedMemoryUsageBytes());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/data_transfer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/data_transfer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fca111e8-154f-4c10-bf20-b92bba5d67bb | cpp | google/arolla | while_loop_impl | arolla/expr/operators/while_loop/while_loop_impl.cc | arolla/expr/operators/while_loop/while_loop_impl_test.cc | #include "arolla/expr/operators/while_loop/while_loop_impl.h"
#include <algorithm>
#include <functional>
#include <string>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/expr_visitor.h"
#include "arolla/expr/operators/while_loop/while_loop.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::expr_operators::while_loop_impl {
using ::arolla::expr::ExprNodePtr;
using ::arolla::expr::ExprOperatorPtr;
using ::arolla::expr::Placeholder;
absl::StatusOr<std::pair<ExprNodePtr, NamedExpressions>> ExtractImmutables(
const ExprNodePtr& expr, std::function<std::string(const ExprNodePtr& node)>
immutable_naming_function) {
NamedExpressions immutables;
struct Visit {
ExprNodePtr expr;
bool has_placeholder_dep;
bool has_leaf_dep;
};
ASSIGN_OR_RETURN(
(auto [converted_expr, has_placeholder_dep, has_leaf_dep]),
expr::PostOrderTraverse(
expr,
[&](const ExprNodePtr& node,
absl::Span<const Visit* const> visits) -> absl::StatusOr<Visit> {
if (node->is_placeholder()) {
return Visit{.expr = node,
.has_placeholder_dep = true,
.has_leaf_dep = false};
}
if (node->is_leaf()) {
return Visit{.expr = node,
.has_placeholder_dep = false,
.has_leaf_dep = true};
}
bool has_placeholder_dep = std::any_of(
visits.begin(), visits.end(),
[](const auto& v) { return v->has_placeholder_dep; });
bool has_leaf_dep =
std::any_of(visits.begin(), visits.end(),
[](const auto& v) { return v->has_leaf_dep; });
if (!has_placeholder_dep) {
return Visit{.expr = node,
.has_placeholder_dep = false,
.has_leaf_dep = has_leaf_dep};
}
std::vector<ExprNodePtr> new_deps;
new_deps.reserve(visits.size());
for (const auto& visit : visits) {
if (visit->has_placeholder_dep || !visit->has_leaf_dep) {
new_deps.push_back(visit->expr);
} else {
auto placeholder_key = immutable_naming_function(visit->expr);
new_deps.emplace_back(Placeholder(placeholder_key));
immutables.emplace(std::move(placeholder_key), visit->expr);
}
}
ASSIGN_OR_RETURN(auto new_node, expr::WithNewDependencies(
node, std::move(new_deps)));
return Visit{.expr = new_node,
.has_placeholder_dep = true,
.has_leaf_dep = has_leaf_dep};
}));
if (!has_placeholder_dep) {
DCHECK(immutables.empty());
auto placeholder_key = immutable_naming_function(converted_expr);
immutables.emplace(placeholder_key, converted_expr);
converted_expr = Placeholder(placeholder_key);
}
return {{std::move(converted_expr), std::move(immutables)}};
}
} | #include "arolla/expr/operators/while_loop/while_loop_impl.h"
#include <cstdint>
#include <string>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status_matchers.h"
#include "absl/strings/str_format.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/testing/testing.h"
#include "arolla/util/fingerprint.h"
namespace arolla::expr_operators::while_loop_impl {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::arolla::expr::CallOp;
using ::arolla::expr::ExprNodePtr;
using ::arolla::expr::Leaf;
using ::arolla::expr::Literal;
using ::arolla::expr::Placeholder;
using ::arolla::testing::EqualsExpr;
using ::testing::IsEmpty;
using ::testing::Pair;
using ::testing::UnorderedElementsAre;
TEST(WhileLoopImplTest, ExtractImmutables) {
absl::flat_hash_map<Fingerprint, std::string> immutable_names;
auto immutable_naming_function = [&](const ExprNodePtr& node) -> std::string {
if (auto it = immutable_names.find(node->fingerprint());
it != immutable_names.end()) {
return it->second;
}
std::string name = absl::StrFormat("_immutable_%d", immutable_names.size());
immutable_names.emplace(node->fingerprint(), name);
return name;
};
{
auto expr = Literal(int64_t{1});
EXPECT_THAT(ExtractImmutables(expr, immutable_naming_function),
IsOkAndHolds(Pair(
EqualsExpr(Placeholder("_immutable_0")),
UnorderedElementsAre(Pair(
"_immutable_0", EqualsExpr(Literal<int64_t>(1)))))));
}
{
auto expr = Leaf("fifty");
EXPECT_THAT(
ExtractImmutables(expr, immutable_naming_function),
IsOkAndHolds(Pair(EqualsExpr(Placeholder("_immutable_1")),
UnorderedElementsAre(Pair(
"_immutable_1", EqualsExpr(Leaf("fifty")))))));
}
{
auto expr = Placeholder("seven");
EXPECT_THAT(ExtractImmutables(expr, immutable_naming_function),
IsOkAndHolds(Pair(EqualsExpr(expr), IsEmpty())));
}
{
ASSERT_OK_AND_ASSIGN(
auto expr,
CallOp("math.add",
{Leaf("two"),
CallOp("math.add", {Placeholder("fifty"), Leaf("seven")})}));
EXPECT_THAT(ExtractImmutables(expr, immutable_naming_function),
IsOkAndHolds(Pair(
EqualsExpr(CallOp(
"math.add",
{Placeholder("_immutable_3"),
CallOp("math.add", {Placeholder("fifty"),
Placeholder("_immutable_2")})})),
UnorderedElementsAre(
Pair("_immutable_3", EqualsExpr(Leaf("two"))),
Pair("_immutable_2", EqualsExpr(Leaf("seven")))))));
}
{
ASSERT_OK_AND_ASSIGN(auto expr, CallOp("math.add", {Placeholder("fifty"),
Literal<int64_t>(7)}));
EXPECT_THAT(
ExtractImmutables(expr, immutable_naming_function),
IsOkAndHolds(Pair(EqualsExpr(CallOp("math.add", {Placeholder("fifty"),
Literal<int64_t>(7)})),
IsEmpty())));
}
{
ASSERT_OK_AND_ASSIGN(
auto expr57, CallOp("math.add", {Leaf("fifty"), Literal<int64_t>(7)}));
ASSERT_OK_AND_ASSIGN(auto expr,
CallOp("math.add", {expr57, Placeholder("two")}));
EXPECT_THAT(
ExtractImmutables(expr, immutable_naming_function),
IsOkAndHolds(Pair(
EqualsExpr(CallOp(
"math.add", {Placeholder("_immutable_4"), Placeholder("two")})),
UnorderedElementsAre(Pair("_immutable_4", EqualsExpr(expr57))))));
}
{
ASSERT_OK_AND_ASSIGN(
auto expr,
CallOp("math.add",
{CallOp("math.add", {Placeholder("fifty"), Leaf("seven")}),
Leaf("seven")}));
EXPECT_THAT(
ExtractImmutables(expr, immutable_naming_function),
IsOkAndHolds(Pair(
EqualsExpr(CallOp(
"math.add", {CallOp("math.add", {Placeholder("fifty"),
Placeholder("_immutable_2")}),
Placeholder("_immutable_2")})),
UnorderedElementsAre(
Pair("_immutable_2", EqualsExpr(Leaf("seven")))))));
}
{
ASSERT_OK_AND_ASSIGN(
auto expr,
CallOp("math.add",
{CallOp("math.add", {Literal<int64_t>(1), Leaf("fifty")}),
Placeholder("seven")}));
EXPECT_THAT(ExtractImmutables(expr, immutable_naming_function),
IsOkAndHolds(Pair(
EqualsExpr(CallOp("math.add", {Placeholder("_immutable_5"),
Placeholder("seven")})),
UnorderedElementsAre(Pair(
"_immutable_5",
EqualsExpr(CallOp("math.add", {Literal<int64_t>(1),
Leaf("fifty")})))))));
}
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/operators/while_loop/while_loop_impl.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/operators/while_loop/while_loop_impl_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
8e9734e8-2cbe-4ac0-bf10-2ea388d9cbf8 | cpp | tensorflow/tensorflow | gather_nd | tensorflow/lite/kernels/gather_nd.cc | tensorflow/lite/kernels/gather_nd_test.cc | #include <stdint.h>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace gather_nd {
constexpr int kParams = 0;
constexpr int kIndices = 1;
constexpr int kOutputTensor = 0;
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* params;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kParams, ¶ms));
const TfLiteTensor* indices;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kIndices, &indices));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
switch (params->type) {
case kTfLiteFloat32:
case kTfLiteUInt8:
case kTfLiteInt8:
case kTfLiteInt16:
case kTfLiteInt64:
case kTfLiteInt32:
case kTfLiteString:
case kTfLiteBool:
break;
default:
TF_LITE_KERNEL_LOG(context,
"Params of type '%s' are not supported by gather_nd.",
TfLiteTypeGetName(params->type));
return kTfLiteError;
}
switch (indices->type) {
case kTfLiteInt64:
case kTfLiteInt32:
case kTfLiteInt16:
break;
default:
TF_LITE_KERNEL_LOG(context,
"Indices of type '%s' are not supported by gather_nd.",
TfLiteTypeGetName(indices->type));
return kTfLiteError;
}
const int params_rank = NumDimensions(params);
const int indices_rank = NumDimensions(indices);
const int indices_nd = SizeOfDimension(indices, indices_rank - 1);
if (params_rank < 1) {
TF_LITE_KERNEL_LOG(context, "Params must be at least a vector.");
return kTfLiteError;
}
if (indices_rank < 1) {
TF_LITE_KERNEL_LOG(context, "Indices must be at least a vector.");
return kTfLiteError;
}
if (indices_nd > params_rank) {
TF_LITE_KERNEL_LOG(
context, "Index innermost dimension length must be <= params rank.");
return kTfLiteError;
}
output->type = params->type;
const int output_rank = indices_rank + params_rank - indices_nd - 1;
TfLiteIntArray* output_shape = TfLiteIntArrayCreate(output_rank);
int output_index = 0;
for (int i = 0; i < indices_rank - 1; ++i) {
output_shape->data[output_index++] = indices->dims->data[i];
}
for (int i = indices_nd; i < params_rank; ++i) {
output_shape->data[output_index++] = params->dims->data[i];
}
return context->ResizeTensor(context, output, output_shape);
}
template <typename ParamsT, typename IndicesT>
TfLiteStatus GatherNd(const TfLiteTensor* params, const TfLiteTensor* indices,
TfLiteTensor* output) {
return reference_ops::GatherNd(
GetTensorShape(params), GetTensorData<ParamsT>(params),
GetTensorShape(indices), GetTensorData<IndicesT>(indices),
GetTensorShape(output), GetTensorData<ParamsT>(output));
}
template <typename IndicesT>
TfLiteStatus GatherNdString(const TfLiteTensor* params,
const TfLiteTensor* indices, TfLiteTensor* output) {
return reference_ops::GatherNdString(
GetTensorShape(params), params, GetTensorShape(indices),
GetTensorData<IndicesT>(indices), GetTensorShape(output), output);
}
template <typename IndicesT>
TfLiteStatus EvalGatherNd(TfLiteContext* context, const TfLiteTensor* params,
const TfLiteTensor* indices, TfLiteTensor* output) {
bool indices_has_only_positive_elements = true;
const auto* indices_values = GetTensorData<IndicesT>(indices);
const size_t num_indices = indices->bytes / sizeof(IndicesT);
for (size_t i = 0; i < num_indices; i++) {
if (indices_values[i] < 0) {
indices_has_only_positive_elements = false;
break;
}
}
TF_LITE_ENSURE(context, indices_has_only_positive_elements);
TfLiteStatus status = kTfLiteError;
switch (params->type) {
case kTfLiteFloat32:
status = GatherNd<float, IndicesT>(params, indices, output);
break;
case kTfLiteUInt8:
status = GatherNd<uint8_t, IndicesT>(params, indices, output);
break;
case kTfLiteInt8:
status = GatherNd<int8_t, IndicesT>(params, indices, output);
break;
case kTfLiteInt16:
status = GatherNd<int16_t, IndicesT>(params, indices, output);
break;
case kTfLiteInt32:
status = GatherNd<int32_t, IndicesT>(params, indices, output);
break;
case kTfLiteInt64:
status = GatherNd<int64_t, IndicesT>(params, indices, output);
break;
case kTfLiteString:
status = GatherNdString<IndicesT>(params, indices, output);
break;
case kTfLiteBool:
status = GatherNd<bool, IndicesT>(params, indices, output);
break;
default:
TF_LITE_KERNEL_LOG(context,
"Params type '%s' are not supported by gather_nd.",
TfLiteTypeGetName(params->type));
return kTfLiteError;
}
if (status != kTfLiteOk) {
TF_LITE_KERNEL_LOG(context, "gather_nd index out of bounds");
}
return status;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* params;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kParams, ¶ms));
const TfLiteTensor* indices;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kIndices, &indices));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE(context,
(NumElements(params) == 0 && NumElements(indices) == 0) ||
NumElements(params) > 0);
switch (indices->type) {
case kTfLiteInt16:
return EvalGatherNd<int16_t>(context, params, indices, output);
case kTfLiteInt32:
return EvalGatherNd<int32_t>(context, params, indices, output);
case kTfLiteInt64:
return EvalGatherNd<int64_t>(context, params, indices, output);
default:
TF_LITE_KERNEL_LOG(context,
"Indices of type '%s' are not supported by gather_nd.",
TfLiteTypeGetName(indices->type));
return kTfLiteError;
}
}
}
TfLiteRegistration* Register_GATHER_ND() {
static TfLiteRegistration r = { nullptr, nullptr,
gather_nd::Prepare, gather_nd::Eval};
return &r;
}
}
}
} | #include <stdint.h>
#include <initializer_list>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/string_type.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
class GatherNdOpModel : public SingleOpModel {
public:
GatherNdOpModel(const TensorData& params, const TensorData& indices) {
params_ = AddInput(params);
indices_ = AddInput(indices);
output_ = AddOutput(params.type);
SetBuiltinOp(BuiltinOperator_GATHER_ND, BuiltinOptions_GatherNdOptions,
CreateGatherNdOptions(builder_).Union());
BuildInterpreter({GetShape(params_), GetShape(indices_)});
}
template <typename T>
void SetInput(std::initializer_list<T> data) {
PopulateTensor<T>(params_, data);
}
template <typename T>
void SetPositions(std::initializer_list<T> data) {
PopulateTensor<T>(indices_, data);
}
template <typename T>
std::vector<T> GetOutput() {
return ExtractVector<T>(output_);
}
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
protected:
int params_;
int indices_;
int output_;
};
TEST(GatherNdOpTest, ElementIndexingIntoMatrix) {
GatherNdOpModel m({TensorType_FLOAT32, {2, 2}}, {TensorType_INT32, {2, 2}});
m.SetInput<float>({1.1, 1.2, 2.1, 2.2});
m.SetPositions<int32_t>({0, 0, 1, 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(), Pointwise(FloatingPointEq(), {1.1, 2.2}));
}
TEST(GatherNdOpTest, ErrorOnOutOfBoundsTooLarge) {
GatherNdOpModel m({TensorType_FLOAT32, {2, 2}}, {TensorType_INT32, {2, 2}});
m.SetInput<float>({1.1, 1.2, 2.1, 2.2});
m.SetPositions<int32_t>({0, 0, 2, 0});
EXPECT_EQ(m.Invoke(), kTfLiteError);
m.SetPositions<int32_t>({0, 0, 1, 2});
EXPECT_EQ(m.Invoke(), kTfLiteError);
}
TEST(GatherNdOpTest, ErrorOnOutOfBoundsNegative) {
GatherNdOpModel m({TensorType_FLOAT32, {2, 2}}, {TensorType_INT32, {2, 2}});
m.SetInput<float>({1.1, 1.2, 2.1, 2.2});
m.SetPositions<int32_t>({1, -1, 1, 1});
EXPECT_EQ(m.Invoke(), kTfLiteError);
}
TEST(GatherNdOpTest, SliceIndexingIntoMatrix) {
GatherNdOpModel m({TensorType_FLOAT32, {2, 2}}, {TensorType_INT32, {2, 1}});
m.SetInput<float>({1.1, 1.2, 2.1, 2.2});
m.SetPositions<int32_t>({1, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(),
Pointwise(FloatingPointEq(), {2.1, 2.2, 1.1, 1.2}));
}
TEST(GatherNdOpTest, BatchedIndexingIntoMatrix1) {
GatherNdOpModel m({TensorType_FLOAT32, {2, 2}},
{TensorType_INT32, {2, 1, 1}});
m.SetInput<float>({1.1, 1.2, 2.1, 2.2});
m.SetPositions<int32_t>({1, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(),
Pointwise(FloatingPointEq(), {2.1, 2.2, 1.1, 1.2}));
}
TEST(GatherNdOpTest, BatchedIndexingIntoMatrix2) {
GatherNdOpModel m({TensorType_FLOAT32, {2, 2}},
{TensorType_INT32, {2, 1, 2}});
m.SetInput<float>({1.1, 1.2, 2.1, 2.2});
m.SetPositions<int32_t>({0, 0, 1, 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(), Pointwise(FloatingPointEq(), {1.1, 2.2}));
}
TEST(GatherNdOpTest, DuplicateIndexingIntoMatrix) {
GatherNdOpModel m({TensorType_FLOAT32, {2, 2}}, {TensorType_INT32, {2, 2}});
m.SetInput<float>({1.1, 1.2, 2.1, 2.2});
m.SetPositions<int32_t>({0, 0, 0, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(), Pointwise(FloatingPointEq(), {1.1, 1.1}));
}
TEST(GatherNdOpTest, ElementIndexingIntoRank3Tensor) {
GatherNdOpModel m({TensorType_FLOAT32, {3, 2, 3}},
{TensorType_INT32, {1, 2, 3}});
m.SetInput<float>({1.1, -1.2, 1.3, -2.1, 2.2, 2.3,
3.1, 3.2, -3.3, -4.1, -4.2, 4.3,
5.1, -5.2, 5.3, 6.1, -6.2, 6.3});
m.SetPositions<int32_t>({0, 0, 1, 1, 1, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(), Pointwise(FloatingPointEq(), {-1.2, -4.1}));
}
TEST(GatherNdOpTest, SliceIndexingIntoRank3Tensor) {
GatherNdOpModel m({TensorType_FLOAT32, {3, 2, 3}},
{TensorType_INT32, {2, 1}});
m.SetInput<float>({1.1, -1.2, 1.3, -2.1, 2.2, 2.3,
3.1, 3.2, -3.3, -4.1, -4.2, 4.3,
5.1, -5.2, 5.3, 6.1, -6.2, 6.3});
m.SetPositions<int32_t>({0, 2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(),
Pointwise(FloatingPointEq(), {1.1, -1.2, 1.3, -2.1, 2.2, 2.3, 5.1,
-5.2, 5.3, 6.1, -6.2, 6.3}));
}
TEST(GatherNdOpTest, BatchedIndexingIntoRank3Tensor1) {
GatherNdOpModel m({TensorType_FLOAT32, {3, 2, 3}},
{TensorType_INT32, {2, 1, 3}});
m.SetInput<float>({1.1, -1.2, 1.3, -2.1, 2.2, 2.3,
3.1, 3.2, -3.3, -4.1, -4.2, 4.3,
5.1, -5.2, 5.3, 6.1, -6.2, 6.3});
m.SetPositions<int32_t>({0, 0, 1, 1, 1, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(), Pointwise(FloatingPointEq(), {-1.2, -4.1}));
}
TEST(GatherNdOpTest, BatchedIndexingIntoRank3Tensor2) {
GatherNdOpModel m({TensorType_FLOAT32, {3, 2, 3}},
{TensorType_INT32, {2, 1, 1}});
m.SetInput<float>({1.1, -1.2, 1.3, -2.1, 2.2, 2.3,
3.1, 3.2, -3.3, -4.1, -4.2, 4.3,
5.1, -5.2, 5.3, 6.1, -6.2, 6.3});
m.SetPositions<int32_t>({1, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(),
Pointwise(FloatingPointEq(), {3.1, 3.2, -3.3, -4.1, -4.2, 4.3,
1.1, -1.2, 1.3, -2.1, 2.2, 2.3}));
}
TEST(GatherNdOpTest, BatchedIndexingIntoRank3Tensor3) {
GatherNdOpModel m({TensorType_FLOAT32, {3, 2, 3}},
{TensorType_INT32, {2, 2, 2}});
m.SetInput<float>({1.1, -1.2, 1.3, -2.1, 2.2, 2.3,
3.1, 3.2, -3.3, -4.1, -4.2, 4.3,
5.1, -5.2, 5.3, 6.1, -6.2, 6.3});
m.SetPositions<int32_t>({0, 1, 1, 0, 0, 0, 2, 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(),
Pointwise(FloatingPointEq(), {-2.1, 2.2, 2.3, 3.1, 3.2, -3.3, 1.1,
-1.2, 1.3, 6.1, -6.2, 6.3}));
}
TEST(GatherNdOpTest, BatchedIndexingIntoRank3Tensor4) {
GatherNdOpModel m({TensorType_FLOAT32, {3, 2, 3}},
{TensorType_INT32, {2, 2, 3}});
m.SetInput<float>({1.1, -1.2, 1.3, -2.1, 2.2, 2.3,
3.1, 3.2, -3.3, -4.1, -4.2, 4.3,
5.1, -5.2, 5.3, 6.1, -6.2, 6.3});
m.SetPositions<int32_t>({0, 0, 1, 1, 0, 1, 1, 1, 2, 2, 1, 2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(),
Pointwise(FloatingPointEq(), {-1.2, 3.2, 4.3, 6.3}));
}
TEST(GatherNdOpTest, DuplicateIndexingIntoRank3Tensor) {
GatherNdOpModel m({TensorType_FLOAT32, {3, 2, 3}},
{TensorType_INT32, {2, 2}});
m.SetInput<float>({1.1, -1.2, 1.3, -2.1, 2.2, 2.3,
3.1, 3.2, -3.3, -4.1, -4.2, 4.3,
5.1, -5.2, 5.3, 6.1, -6.2, 6.3});
m.SetPositions<int32_t>({0, 1, 0, 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(),
Pointwise(FloatingPointEq(), {-2.1, 2.2, 2.3, -2.1, 2.2, 2.3}));
}
TEST(GatherNdOpTest, Float32Int32) {
GatherNdOpModel m({TensorType_FLOAT32, {3, 2, 3}},
{TensorType_INT32, {2, 2}});
m.SetInput<float>({1.1, -1.2, 1.3, -2.1, 2.2, 2.3,
3.1, 3.2, -3.3, -4.1, -4.2, 4.3,
5.1, -5.2, 5.3, 6.1, -6.2, 6.3});
m.SetPositions<int32_t>({0, 1, 1, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(),
Pointwise(FloatingPointEq(), {-2.1, 2.2, 2.3, 3.1, 3.2, -3.3}));
}
TEST(GatherNdOpTest, Float32Int64) {
GatherNdOpModel m({TensorType_FLOAT32, {3, 2, 3}},
{TensorType_INT64, {2, 2}});
m.SetInput<float>({1.1, -1.2, 1.3, -2.1, 2.2, 2.3,
3.1, 3.2, -3.3, -4.1, -4.2, 4.3,
5.1, -5.2, 5.3, 6.1, -6.2, 6.3});
m.SetPositions<int64_t>({0LL, 1LL, 1LL, 0LL});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(),
Pointwise(FloatingPointEq(), {-2.1, 2.2, 2.3, 3.1, 3.2, -3.3}));
}
TEST(GatherNdOpTest, Int32Int32) {
GatherNdOpModel m({TensorType_INT32, {3, 2, 3}}, {TensorType_INT32, {2, 2}});
m.SetInput<int32_t>({1, -1, 1, -2, 2, 2,
3, 3, -3, -4, -4, 4,
5, -5, 5, 6, -6, 6});
m.SetPositions<int32_t>({0, 1, 1, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<int32_t>(), ElementsAreArray({-2, 2, 2, 3, 3, -3}));
}
TEST(GatherNdOpTest, Int32Int64) {
GatherNdOpModel m({TensorType_INT32, {3, 2, 3}}, {TensorType_INT64, {2, 2}});
m.SetInput<int32_t>({1, -1, 1, -2, 2, 2,
3, 3, -3, -4, -4, 4,
5, -5, 5, 6, -6, 6});
m.SetPositions<int64_t>({0LL, 1LL, 1LL, 0LL});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<int32_t>(), ElementsAreArray({-2, 2, 2, 3, 3, -3}));
}
TEST(GatherNdOpTest, Uint8Int32) {
GatherNdOpModel m({TensorType_UINT8, {3, 2, 3}}, {TensorType_INT32, {2, 2}});
m.SetInput<uint8_t>({1, 1, 1, 2, 2, 2,
3, 3, 3, 4, 4, 4,
5, 5, 5, 6, 6, 6});
m.SetPositions<int32_t>({0, 1, 1, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<uint8_t>(), ElementsAreArray({2, 2, 2, 3, 3, 3}));
}
TEST(GatherNdOpTest, Uint8Int64) {
GatherNdOpModel m({TensorType_UINT8, {3, 2, 3}}, {TensorType_INT64, {2, 2}});
m.SetInput<uint8_t>({1, 1, 1, 2, 2, 2,
3, 3, 3, 4, 4, 4,
5, 5, 5, 6, 6, 6});
m.SetPositions<int64_t>({0, 1, 1, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<uint8_t>(), ElementsAreArray({2, 2, 2, 3, 3, 3}));
}
TEST(GatherNdOpTest, Int8Int32) {
GatherNdOpModel m({TensorType_INT8, {3, 2, 3}}, {TensorType_INT32, {2, 2}});
m.SetInput<int8_t>({1, -1, 1, -2, 2, 2,
3, 3, -3, -4, -4, 4,
5, -5, 5, 6, -6, 6});
m.SetPositions<int32_t>({0, 1, 1, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<int8_t>(), ElementsAreArray({-2, 2, 2, 3, 3, -3}));
}
TEST(GatherNdOpTest, Int8Int64) {
GatherNdOpModel m({TensorType_INT8, {3, 2, 3}}, {TensorType_INT64, {2, 2}});
m.SetInput<int8_t>({1, -1, 1, -2, 2, 2,
3, 3, -3, -4, -4, 4,
5, -5, 5, 6, -6, 6});
m.SetPositions<int64_t>({0LL, 1LL, 1LL, 0LL});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<int8_t>(), ElementsAreArray({-2, 2, 2, 3, 3, -3}));
}
TEST(GatherNdOpTest, Int16Int32) {
GatherNdOpModel m({TensorType_INT16, {3, 2, 3}}, {TensorType_INT32, {2, 2}});
m.SetInput<int16_t>({1, -1, 1, -2, 2, 2,
3, 3, -3, -4, -4, 4,
5, -5, 5, 6, -6, 6});
m.SetPositions<int32_t>({0, 1, 1, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<int16_t>(), ElementsAreArray({-2, 2, 2, 3, 3, -3}));
}
TEST(GatherNdOpTest, Int16Int64) {
GatherNdOpModel m({TensorType_INT16, {3, 2, 3}}, {TensorType_INT64, {2, 2}});
m.SetInput<int16_t>({1, -1, 1, -2, 2, 2,
3, 3, -3, -4, -4, 4,
5, -5, 5, 6, -6, 6});
m.SetPositions<int64_t>({0LL, 1LL, 1LL, 0LL});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<int16_t>(), ElementsAreArray({-2, 2, 2, 3, 3, -3}));
}
TEST(GatherNdOpTest, Int64Int32) {
GatherNdOpModel m({TensorType_INT64, {3, 2, 3}}, {TensorType_INT32, {2, 2}});
m.SetInput<int64_t>({1LL, -1LL, 1LL, -2LL, 2LL, 2LL,
3LL, 3LL, -3LL, -4LL, -4LL, 4LL,
5LL, -5LL, 5LL, 6LL, -6LL, 6LL});
m.SetPositions<int32_t>({0, 1, 1, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<int64_t>(),
ElementsAreArray({-2LL, 2LL, 2LL, 3LL, 3LL, -3LL}));
}
TEST(GatherNdOpTest, Int64Int64) {
GatherNdOpModel m({TensorType_INT64, {3, 2, 3}}, {TensorType_INT64, {2, 2}});
m.SetInput<int64_t>({1LL, -1LL, 1LL, -2LL, 2LL, 2LL,
3LL, 3LL, -3LL, -4LL, -4LL, 4LL,
5LL, -5LL, 5LL, 6LL, -6LL, 6LL});
m.SetPositions<int64_t>({0LL, 1LL, 1LL, 0LL});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<int64_t>(),
ElementsAreArray({-2LL, 2LL, 2LL, 3LL, 3LL, -3LL}));
}
TEST(GatherNdOpTest, Float32Int16) {
GatherNdOpModel m({TensorType_FLOAT32, {3, 2, 3}},
{TensorType_INT16, {2, 2}});
m.SetInput<float>({1.1, -1.2, 1.3, -2.1, 2.2, 2.3,
3.1, 3.2, -3.3, -4.1, -4.2, 4.3,
5.1, -5.2, 5.3, 6.1, -6.2, 6.3});
m.SetPositions<int16_t>({0, 1, 1, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(),
Pointwise(FloatingPointEq(), {-2.1, 2.2, 2.3, 3.1, 3.2, -3.3}));
}
TEST(GatherNdOpTest, StringInt32) {
GatherNdOpModel m({TensorType_STRING, {3, 2, 3}}, {TensorType_INT32, {2, 2}});
m.SetInput<std::string>({"A", "B", "C",
"D", "E", "F",
"G", "H", "I",
"J", "K", "L",
"M", "N", "O",
"P", "Q", "R"});
m.SetPositions<int32_t>({0, 1, 1, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<std::string>(),
ElementsAreArray({"D", "E", "F", "G", "H", "I"}));
}
TEST(GatherNdOpTest, StringInt64) {
GatherNdOpModel m({TensorType_STRING, {3, 2, 3}}, {TensorType_INT64, {2, 2}});
m.SetInput<std::string>({"A", "B", "C",
"D", "E", "F",
"G", "H", "I",
"J", "K", "L",
"M", "N", "O",
"P", "Q", "R"});
m.SetPositions<int64_t>({0LL, 1LL, 1LL, 0LL});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<std::string>(),
ElementsAreArray({"D", "E", "F", "G", "H", "I"}));
}
TEST(GatherNdOpTest, StringOutOfBoundsTooLarge) {
GatherNdOpModel m({TensorType_STRING, {3, 2, 3}}, {TensorType_INT32, {2, 2}});
m.SetInput<std::string>({"A", "B", "C",
"D", "E", "F",
"G", "H", "I",
"J", "K", "L",
"M", "N", "O",
"P", "Q", "R"});
m.SetPositions<int32_t>({0, 0, 3, 0});
ASSERT_EQ(m.Invoke(), kTfLiteError);
m.SetPositions<int32_t>({0, 0, 2, 2});
ASSERT_EQ(m.Invoke(), kTfLiteError);
}
TEST(GatherNdOpTest, StringOutOfBoundsNegative) {
GatherNdOpModel m({TensorType_STRING, {3, 2, 3}}, {TensorType_INT32, {2, 2}});
m.SetInput<std::string>({"A", "B", "C",
"D", "E", "F",
"G", "H", "I",
"J", "K", "L",
"M", "N", "O",
"P", "Q", "R"});
m.SetPositions<int32_t>({1, -1, 0, 0});
ASSERT_EQ(m.Invoke(), kTfLiteError);
}
TEST(GatherNdOpTest, EmptyParamsAndIndex) {
GatherNdOpModel m({TensorType_FLOAT32, {1, 0}}, {TensorType_INT32, {0, 2}});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({0}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/gather_nd.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/gather_nd_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e580d97c-eae2-4a33-ad5b-e764f270b769 | cpp | abseil/abseil-cpp | raw_logging | absl/base/internal/raw_logging.cc | absl/base/raw_logging_test.cc | #include "absl/base/internal/raw_logging.h"
#include <cstdarg>
#include <cstddef>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <string>
#ifdef __EMSCRIPTEN__
#include <emscripten/console.h>
#endif
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/internal/atomic_hook.h"
#include "absl/base/internal/errno_saver.h"
#include "absl/base/log_severity.h"
#if defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \
defined(__hexagon__) || defined(__Fuchsia__) || \
defined(__native_client__) || defined(__OpenBSD__) || \
defined(__EMSCRIPTEN__) || defined(__ASYLO__)
#include <unistd.h>
#define ABSL_HAVE_POSIX_WRITE 1
#define ABSL_LOW_LEVEL_WRITE_SUPPORTED 1
#else
#undef ABSL_HAVE_POSIX_WRITE
#endif
#if (defined(__linux__) || defined(__FreeBSD__)) && !defined(__ANDROID__)
#include <sys/syscall.h>
#define ABSL_HAVE_SYSCALL_WRITE 1
#define ABSL_LOW_LEVEL_WRITE_SUPPORTED 1
#else
#undef ABSL_HAVE_SYSCALL_WRITE
#endif
#ifdef _WIN32
#include <io.h>
#define ABSL_HAVE_RAW_IO 1
#define ABSL_LOW_LEVEL_WRITE_SUPPORTED 1
#else
#undef ABSL_HAVE_RAW_IO
#endif
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace raw_log_internal {
namespace {
#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED
constexpr char kTruncated[] = " ... (message truncated)\n";
bool VADoRawLog(char** buf, int* size, const char* format, va_list ap)
ABSL_PRINTF_ATTRIBUTE(3, 0);
bool VADoRawLog(char** buf, int* size, const char* format, va_list ap) {
if (*size < 0) return false;
int n = vsnprintf(*buf, static_cast<size_t>(*size), format, ap);
bool result = true;
if (n < 0 || n > *size) {
result = false;
if (static_cast<size_t>(*size) > sizeof(kTruncated)) {
n = *size - static_cast<int>(sizeof(kTruncated));
} else {
n = 0;
}
}
*size -= n;
*buf += n;
return result;
}
#endif
constexpr int kLogBufSize = 3000;
bool DoRawLog(char** buf, int* size, const char* format, ...)
ABSL_PRINTF_ATTRIBUTE(3, 4);
bool DoRawLog(char** buf, int* size, const char* format, ...) {
if (*size < 0) return false;
va_list ap;
va_start(ap, format);
int n = vsnprintf(*buf, static_cast<size_t>(*size), format, ap);
va_end(ap);
if (n < 0 || n > *size) return false;
*size -= n;
*buf += n;
return true;
}
bool DefaultLogFilterAndPrefix(absl::LogSeverity, const char* file, int line,
char** buf, int* buf_size) {
DoRawLog(buf, buf_size, "[%s : %d] RAW: ", file, line);
return true;
}
ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
absl::base_internal::AtomicHook<LogFilterAndPrefixHook>
log_filter_and_prefix_hook(DefaultLogFilterAndPrefix);
ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
absl::base_internal::AtomicHook<AbortHook> abort_hook;
void RawLogVA(absl::LogSeverity severity, const char* file, int line,
const char* format, va_list ap) ABSL_PRINTF_ATTRIBUTE(4, 0);
void RawLogVA(absl::LogSeverity severity, const char* file, int line,
const char* format, va_list ap) {
char buffer[kLogBufSize];
char* buf = buffer;
int size = sizeof(buffer);
#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED
bool enabled = true;
#else
bool enabled = false;
#endif
#ifdef ABSL_MIN_LOG_LEVEL
if (severity < static_cast<absl::LogSeverity>(ABSL_MIN_LOG_LEVEL) &&
severity < absl::LogSeverity::kFatal) {
enabled = false;
}
#endif
enabled = log_filter_and_prefix_hook(severity, file, line, &buf, &size);
const char* const prefix_end = buf;
#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED
if (enabled) {
bool no_chop = VADoRawLog(&buf, &size, format, ap);
if (no_chop) {
DoRawLog(&buf, &size, "\n");
} else {
DoRawLog(&buf, &size, "%s", kTruncated);
}
AsyncSignalSafeWriteError(buffer, static_cast<size_t>(buf - buffer));
}
#else
static_cast<void>(format);
static_cast<void>(ap);
static_cast<void>(enabled);
#endif
if (severity == absl::LogSeverity::kFatal) {
abort_hook(file, line, buffer, prefix_end, buffer + kLogBufSize);
abort();
}
}
void DefaultInternalLog(absl::LogSeverity severity, const char* file, int line,
const std::string& message) {
RawLog(severity, file, line, "%.*s", static_cast<int>(message.size()),
message.data());
}
}
void AsyncSignalSafeWriteError(const char* s, size_t len) {
if (!len) return;
absl::base_internal::ErrnoSaver errno_saver;
#if defined(__EMSCRIPTEN__)
if (s[len - 1] == '\n') {
len--;
}
#if ABSL_INTERNAL_EMSCRIPTEN_VERSION >= 3001043
emscripten_errn(s, len);
#else
char buf[kLogBufSize];
if (len >= kLogBufSize) {
len = kLogBufSize - 1;
constexpr size_t trunc_len = sizeof(kTruncated) - 2;
memcpy(buf + len - trunc_len, kTruncated, trunc_len);
buf[len] = '\0';
len -= trunc_len;
} else {
buf[len] = '\0';
}
memcpy(buf, s, len);
_emscripten_err(buf);
#endif
#elif defined(ABSL_HAVE_SYSCALL_WRITE)
syscall(SYS_write, STDERR_FILENO, s, len);
#elif defined(ABSL_HAVE_POSIX_WRITE)
write(STDERR_FILENO, s, len);
#elif defined(ABSL_HAVE_RAW_IO)
_write( 2, s, static_cast<unsigned>(len));
#else
(void)s;
(void)len;
#endif
}
void RawLog(absl::LogSeverity severity, const char* file, int line,
const char* format, ...) {
va_list ap;
va_start(ap, format);
RawLogVA(severity, file, line, format, ap);
va_end(ap);
}
bool RawLoggingFullySupported() {
#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED
return true;
#else
return false;
#endif
}
ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES ABSL_DLL
absl::base_internal::AtomicHook<InternalLogFunction>
internal_log_function(DefaultInternalLog);
void RegisterLogFilterAndPrefixHook(LogFilterAndPrefixHook func) {
log_filter_and_prefix_hook.Store(func);
}
void RegisterAbortHook(AbortHook func) { abort_hook.Store(func); }
void RegisterInternalLogFunction(InternalLogFunction func) {
internal_log_function.Store(func);
}
}
ABSL_NAMESPACE_END
} | #include "absl/base/internal/raw_logging.h"
#include <tuple>
#include "gtest/gtest.h"
#include "absl/strings/str_cat.h"
namespace {
TEST(RawLoggingCompilationTest, Log) {
ABSL_RAW_LOG(INFO, "RAW INFO: %d", 1);
ABSL_RAW_LOG(INFO, "RAW INFO: %d %d", 1, 2);
ABSL_RAW_LOG(INFO, "RAW INFO: %d %d %d", 1, 2, 3);
ABSL_RAW_LOG(INFO, "RAW INFO: %d %d %d %d", 1, 2, 3, 4);
ABSL_RAW_LOG(INFO, "RAW INFO: %d %d %d %d %d", 1, 2, 3, 4, 5);
ABSL_RAW_LOG(WARNING, "RAW WARNING: %d", 1);
ABSL_RAW_LOG(ERROR, "RAW ERROR: %d", 1);
}
TEST(RawLoggingCompilationTest, LogWithNulls) {
ABSL_RAW_LOG(INFO, "RAW INFO: %s%c%s", "Hello", 0, "World");
}
TEST(RawLoggingCompilationTest, PassingCheck) {
ABSL_RAW_CHECK(true, "RAW CHECK");
}
const char kExpectedDeathOutput[] = "";
TEST(RawLoggingDeathTest, FailingCheck) {
EXPECT_DEATH_IF_SUPPORTED(ABSL_RAW_CHECK(1 == 0, "explanation"),
kExpectedDeathOutput);
}
TEST(RawLoggingDeathTest, LogFatal) {
EXPECT_DEATH_IF_SUPPORTED(ABSL_RAW_LOG(FATAL, "my dog has fleas"),
kExpectedDeathOutput);
}
TEST(InternalLog, CompilationTest) {
ABSL_INTERNAL_LOG(INFO, "Internal Log");
std::string log_msg = "Internal Log";
ABSL_INTERNAL_LOG(INFO, log_msg);
ABSL_INTERNAL_LOG(INFO, log_msg + " 2");
float d = 1.1f;
ABSL_INTERNAL_LOG(INFO, absl::StrCat("Internal log ", 3, " + ", d));
}
TEST(InternalLogDeathTest, FailingCheck) {
EXPECT_DEATH_IF_SUPPORTED(ABSL_INTERNAL_CHECK(1 == 0, "explanation"),
kExpectedDeathOutput);
}
TEST(InternalLogDeathTest, LogFatal) {
EXPECT_DEATH_IF_SUPPORTED(ABSL_INTERNAL_LOG(FATAL, "my dog has fleas"),
kExpectedDeathOutput);
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/base/internal/raw_logging.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/base/raw_logging_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
18f87e2e-9b1a-444a-9ef9-eca87df828f3 | cpp | google/tensorstore | stop_token | tensorstore/util/stop_token.h | tensorstore/util/stop_token_test.cc | #ifndef TENSORSTORE_UTIL_STOP_TOKEN_H_
#define TENSORSTORE_UTIL_STOP_TOKEN_H_
#include <atomic>
#include <cstddef>
#include <type_traits>
#include <utility>
#include "absl/base/attributes.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/util/stop_token_impl.h"
namespace tensorstore {
class StopSource;
template <typename Callback>
class StopCallback;
class StopToken {
public:
StopToken() noexcept = default;
~StopToken() noexcept = default;
StopToken(const StopToken&) noexcept = default;
StopToken(StopToken&&) noexcept = default;
StopToken& operator=(const StopToken&) noexcept = default;
StopToken& operator=(StopToken&&) noexcept = default;
[[nodiscard]] bool stop_possible() const noexcept {
return state_ != nullptr;
}
[[nodiscard]] bool stop_requested() const noexcept {
return state_ != nullptr && state_->stop_requested();
}
friend bool operator==(const StopToken& a, const StopToken& b) {
return a.state_ == b.state_;
}
friend bool operator!=(const StopToken& a, const StopToken& b) {
return !(a == b);
}
private:
friend class StopSource;
template <typename Callback>
friend class StopCallback;
StopToken(internal::IntrusivePtr<internal_stop_token::StopState> state)
: state_(std::move(state)) {}
internal::IntrusivePtr<internal_stop_token::StopState> state_{nullptr};
};
class StopSource {
public:
StopSource() noexcept
: state_(internal::MakeIntrusivePtr<internal_stop_token::StopState>()) {}
explicit StopSource(std::nullptr_t) noexcept : state_(nullptr) {}
~StopSource() noexcept = default;
StopSource(const StopSource& b) noexcept = default;
StopSource(StopSource&&) noexcept = default;
StopSource& operator=(const StopSource& b) noexcept = default;
StopSource& operator=(StopSource&&) noexcept = default;
[[nodiscard]] bool stop_possible() const noexcept {
return state_ != nullptr;
}
[[nodiscard]] bool stop_requested() const noexcept {
return state_ != nullptr && state_->stop_requested();
}
bool request_stop() const noexcept {
if (state_ != nullptr) {
return state_->RequestStop();
}
return false;
}
[[nodiscard]] StopToken get_token() const noexcept {
return StopToken(state_);
}
private:
internal::IntrusivePtr<internal_stop_token::StopState> state_;
};
template <typename Callback>
class StopCallback : private internal_stop_token::StopCallbackBase {
static_assert(std::is_invocable_v<Callback>);
public:
using callback_type = Callback;
StopCallback(const StopCallback&) = delete;
StopCallback& operator=(const StopCallback&) = delete;
StopCallback(StopCallback&&) = delete;
StopCallback& operator=(StopCallback&&) = delete;
template <
typename... Args,
std::enable_if_t<std::is_constructible_v<Callback, Args...>, int> = 0>
explicit StopCallback(const StopToken& token, Args&&... args)
: callback_(std::forward<Args>(args)...) {
internal_stop_token::StopState* state = token.state_.get();
if (state) {
invoker_ = &StopCallback::Invoker;
state->RegisterImpl(*this);
}
}
~StopCallback() {
internal_stop_token::StopState* state =
state_.exchange(nullptr, std::memory_order_acq_rel);
if (state != nullptr) {
state->UnregisterImpl(*this);
}
}
private:
static void Invoker(internal_stop_token::StopCallbackBase& self) noexcept {
static_cast<Callback&&>(static_cast<StopCallback&&>(self).callback_)();
}
ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS Callback callback_;
};
template <typename Callback>
StopCallback(StopToken token, Callback callback) -> StopCallback<Callback>;
}
#endif | #include "tensorstore/util/stop_token.h"
#include <functional>
#include <optional>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/internal/testing/concurrent.h"
namespace {
TEST(StopTokenTest, Invariants) {
tensorstore::StopSource source;
EXPECT_TRUE(source.stop_possible());
EXPECT_FALSE(source.stop_requested());
tensorstore::StopToken token = source.get_token();
EXPECT_TRUE(source.stop_possible());
EXPECT_FALSE(source.stop_requested());
EXPECT_EQ(token, source.get_token());
EXPECT_TRUE(source.request_stop());
EXPECT_TRUE(source.stop_possible());
EXPECT_TRUE(source.stop_requested());
EXPECT_TRUE(token.stop_requested());
{
tensorstore::StopSource source2;
EXPECT_NE(token, source2.get_token());
}
}
TEST(StopTokenTest, Invariants_Null) {
tensorstore::StopSource source(nullptr);
EXPECT_FALSE(source.stop_possible());
EXPECT_FALSE(source.stop_requested());
tensorstore::StopToken token = source.get_token();
EXPECT_FALSE(source.stop_possible());
EXPECT_FALSE(source.stop_requested());
EXPECT_EQ(token, source.get_token());
EXPECT_FALSE(source.request_stop());
EXPECT_FALSE(source.stop_possible());
EXPECT_FALSE(source.stop_requested());
EXPECT_FALSE(token.stop_requested());
{
tensorstore::StopSource source2;
EXPECT_NE(token, source2.get_token());
}
}
TEST(StopTokenTest, Basic_InScope) {
tensorstore::StopSource source;
bool called = false;
{
tensorstore::StopCallback callback(source.get_token(),
[&]() { called = true; });
EXPECT_FALSE(called);
EXPECT_TRUE(source.request_stop());
}
EXPECT_TRUE(called);
}
TEST(StopTokenTest, Basic_NotInScope) {
tensorstore::StopSource source;
bool called = false;
{
tensorstore::StopCallback callback(source.get_token(),
[&]() { called = true; });
EXPECT_FALSE(called);
}
EXPECT_TRUE(source.request_stop());
EXPECT_FALSE(called);
}
TEST(StopTokenTest, Basic_Null) {
tensorstore::StopSource source(nullptr);
bool called = false;
{
tensorstore::StopCallback callback(source.get_token(),
[&]() { called = true; });
EXPECT_FALSE(called);
EXPECT_FALSE(source.request_stop());
}
EXPECT_FALSE(called);
}
TEST(StopTokenTest, StopAlreadyRequested) {
tensorstore::StopSource source;
EXPECT_TRUE(source.request_stop());
bool called = false;
tensorstore::StopCallback callback(source.get_token(),
[&]() { called = true; });
EXPECT_TRUE(called);
}
TEST(StopTokenTest, CallbackOrder) {
bool called[3] = {};
auto do_nothing = []() {};
using DoNothingCallback = tensorstore::StopCallback<decltype(do_nothing)>;
tensorstore::StopSource source;
auto x = std::make_unique<DoNothingCallback>(source.get_token(), do_nothing);
tensorstore::StopCallback callback0(source.get_token(), [&]() {
EXPECT_TRUE(called[1]);
called[0] = true;
});
tensorstore::StopCallback callback1(source.get_token(), [&]() {
EXPECT_TRUE(called[2]);
called[1] = true;
});
tensorstore::StopCallback callback2(source.get_token(), [&]() {
EXPECT_FALSE(called[0]);
called[2] = true;
});
{ DoNothingCallback tmp(source.get_token(), do_nothing); }
x = nullptr;
EXPECT_TRUE(source.request_stop());
EXPECT_TRUE(called[2]);
}
TEST(StopCallbackTest, InvokeValueCategory) {
struct Callback {
void operator()() const& { value += 1; }
void operator()() && { value += 100; }
int& value;
};
tensorstore::StopSource source;
int counts[3] = {};
tensorstore::StopCallback stop_callback0(source.get_token(),
Callback{counts[0]});
Callback callback1{counts[1]};
tensorstore::StopCallback<Callback&> stop_callback1(source.get_token(),
callback1);
tensorstore::StopCallback<const Callback> stop_callback2(source.get_token(),
Callback{counts[2]});
source.request_stop();
EXPECT_THAT(counts, ::testing::ElementsAre(100, 1, 1));
}
TEST(StopTokenTest, SelfDeregister) {
tensorstore::StopSource source;
std::optional<tensorstore::StopCallback<std::function<void()>>> callback{
std::in_place, source.get_token(), [&] { callback = std::nullopt; }};
EXPECT_TRUE(source.request_stop());
EXPECT_FALSE(callback.has_value());
}
TEST(StopTokenTest, Concurrent) {
tensorstore::StopSource source;
bool called = false;
std::optional<tensorstore::StopCallback<std::function<void()>>> callback;
::tensorstore::internal_testing::TestConcurrent(
100,
[&] {
tensorstore::StopSource new_source;
source = std::move(new_source);
called = false;
},
[&] {
EXPECT_TRUE(source.stop_requested());
callback = std::nullopt;
EXPECT_TRUE(called);
},
[&] { callback.emplace(source.get_token(), [&]() { called = true; }); },
[&] { source.request_stop(); },
[&] {
tensorstore::StopCallback callback(source.get_token(), []() {});
}
);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/stop_token.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/stop_token_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
1c5201c2-01d4-4a96-b235-4a452e3fa598 | cpp | tensorflow/tensorflow | semantic_version | third_party/xla/xla/stream_executor/semantic_version.cc | third_party/xla/xla/stream_executor/semantic_version_test.cc | #include "xla/stream_executor/semantic_version.h"
#include <string>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "tsl/platform/statusor.h"
namespace stream_executor {
std::string SemanticVersion::ToString() const {
return absl::StrFormat("%d.%d.%d", major_, minor_, patch_);
}
static absl::StatusOr<unsigned> ParseUnsignedNumber(
absl::string_view component) {
unsigned number;
if (!absl::SimpleAtoi(component, &number)) {
return absl::InvalidArgumentError(
absl::StrFormat("'%s' is not an unsigned number.", component));
}
return number;
}
absl::StatusOr<SemanticVersion> SemanticVersion::ParseFromString(
absl::string_view str) {
std::vector<absl::string_view> components = absl::StrSplit(str, '.');
if (components.size() != 3) {
return absl::InvalidArgumentError(
"Version does not match the format X.Y.Z");
}
SemanticVersion result{0, 0, 0};
TF_ASSIGN_OR_RETURN(result.major(), ParseUnsignedNumber(components[0]));
TF_ASSIGN_OR_RETURN(result.minor(), ParseUnsignedNumber(components[1]));
TF_ASSIGN_OR_RETURN(result.patch(), ParseUnsignedNumber(components[2]));
return result;
}
} | #include "xla/stream_executor/semantic_version.h"
#include <algorithm>
#include <array>
#include <sstream>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/hash/hash_testing.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/test.h"
namespace stream_executor {
namespace {
TEST(SemanticVersion, Construction) {
SemanticVersion version{1, 2, 3};
EXPECT_EQ(version.major(), 1);
EXPECT_EQ(version.minor(), 2);
EXPECT_EQ(version.patch(), 3);
}
TEST(SemanticVersion, ConstructionFromArray) {
SemanticVersion version{std::array<unsigned, 3>{1, 2, 3}};
EXPECT_EQ(version.major(), 1);
EXPECT_EQ(version.minor(), 2);
EXPECT_EQ(version.patch(), 3);
}
TEST(SemanticVersion, Mutation) {
SemanticVersion version{0, 0, 0};
version.major() = 1;
version.minor() = 2;
version.patch() = 3;
EXPECT_EQ(version.major(), 1);
EXPECT_EQ(version.minor(), 2);
EXPECT_EQ(version.patch(), 3);
}
TEST(SemanticVersion, ParseFromStringSuccess) {
absl::StatusOr<SemanticVersion> version =
SemanticVersion::ParseFromString("1.2.3");
ASSERT_THAT(version, tsl::testing::IsOk());
EXPECT_EQ(version->major(), 1);
EXPECT_EQ(version->minor(), 2);
EXPECT_EQ(version->patch(), 3);
}
TEST(SemanticVersion, ParseFromStringInvalid) {
auto test = [](absl::string_view str) {
absl::StatusOr<SemanticVersion> version =
SemanticVersion::ParseFromString(str);
EXPECT_THAT(version,
tsl::testing::StatusIs(absl::StatusCode::kInvalidArgument));
};
test("1.2");
test("1.2.3dev5");
}
TEST(SemanticVersion, ToString) {
SemanticVersion version{1, 2, 3};
EXPECT_EQ(version.ToString(), "1.2.3");
}
TEST(SemanticVersion, AbslStringify) {
SemanticVersion version{1, 2, 3};
EXPECT_EQ(absl::StrCat(version), version.ToString());
}
TEST(SemanticVersion, OStream) {
SemanticVersion version{1, 2, 3};
std::ostringstream os;
os << version;
EXPECT_EQ(os.str(), version.ToString());
}
TEST(SemanticVersion, Equality) {
SemanticVersion version{1, 2, 3};
SemanticVersion other{1, 2, 4};
EXPECT_EQ(version, version);
EXPECT_FALSE(version != version);
EXPECT_NE(version, other);
EXPECT_FALSE(version == other);
}
TEST(SemanticVersion, Ordering) {
std::array<SemanticVersion, 5> versions = {
SemanticVersion{3, 3, 3}, SemanticVersion{0, 0, 0},
SemanticVersion{1, 2, 3}, SemanticVersion{1, 2, 4},
SemanticVersion{1, 3, 0}};
std::sort(versions.begin(), versions.end());
EXPECT_THAT(versions, testing::ElementsAre(
SemanticVersion{0, 0, 0}, SemanticVersion{1, 2, 3},
SemanticVersion{1, 2, 4}, SemanticVersion{1, 3, 0},
SemanticVersion{3, 3, 3}));
}
TEST(SemanticVersion, Hash) {
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly({
SemanticVersion{0, 0, 0},
SemanticVersion{1, 2, 3},
SemanticVersion{1, 2, 4},
SemanticVersion{1, 3, 0},
SemanticVersion{3, 3, 3},
}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/semantic_version.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/semantic_version_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8ff2a112-c796-41ea-bcf0-13b123b8378b | cpp | tensorflow/tensorflow | device_name_utils | third_party/xla/xla/tsl/util/device_name_utils.cc | third_party/xla/xla/tsl/util/device_name_utils_test.cc | #include "xla/tsl/util/device_name_utils.h"
#include <algorithm>
#include "tsl/platform/errors.h"
namespace tsl {
static bool IsAlpha(char c) {
return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z');
}
static bool IsAlphaNumOrUnderscore(char c) {
return IsAlpha(c) || (c >= '0' && c <= '9') || c == '_';
}
static bool IsJobName(absl::string_view in) {
return !in.empty() && IsAlpha(in.front()) &&
std::all_of(in.begin(), in.end(), IsAlphaNumOrUnderscore);
}
static bool ConsumePrefix(absl::string_view* in, string* out,
absl::string_view prefix_terminators) {
if (in->empty() || !IsAlpha(in->front())) return false;
const auto end_it =
std::find_first_of(in->begin(), in->end(), prefix_terminators.begin(),
prefix_terminators.end());
if (!std::all_of(in->begin(), end_it, IsAlphaNumOrUnderscore)) {
return false;
}
out->assign(in->begin(), end_it);
in->remove_prefix(end_it - in->begin());
return true;
}
static bool ConsumeJobName(absl::string_view* in, string* job) {
return ConsumePrefix(in, job, "/");
}
static bool ConsumeDeviceType(absl::string_view* in, string* device_type) {
return ConsumePrefix(in, device_type, "/:");
}
static bool ConsumeNumber(absl::string_view* in, int* val) {
uint64 tmp;
if (str_util::ConsumeLeadingDigits(in, &tmp)) {
*val = tmp;
return true;
} else {
return false;
}
}
static string DeviceName(const string& job, int replica, int task,
const string& device_prefix, const string& device_type,
int id) {
CHECK(IsJobName(job)) << job;
CHECK_LE(0, replica);
CHECK_LE(0, task);
CHECK(!device_type.empty());
CHECK_LE(0, id);
return strings::StrCat("/job:", job, "/replica:", replica, "/task:", task,
device_prefix, device_type, ":", id);
}
string DeviceNameUtils::FullName(const string& job, int replica, int task,
const string& type, int id) {
return DeviceName(job, replica, task, "/device:", type, id);
}
namespace {
string LegacyName(const string& job, int replica, int task, const string& type,
int id) {
return DeviceName(job, replica, task, "/", absl::AsciiStrToLower(type), id);
}
}
bool DeviceNameUtils::ParseFullName(absl::string_view fullname, ParsedName* p) {
p->Clear();
if (fullname == "/") {
return true;
}
while (!fullname.empty()) {
bool progress = false;
if (absl::ConsumePrefix(&fullname, "/job:")) {
p->has_job = !absl::ConsumePrefix(&fullname, "*");
if (p->has_job && !ConsumeJobName(&fullname, &p->job)) {
return false;
}
progress = true;
}
if (absl::ConsumePrefix(&fullname, "/replica:")) {
p->has_replica = !absl::ConsumePrefix(&fullname, "*");
if (p->has_replica && !ConsumeNumber(&fullname, &p->replica)) {
return false;
}
progress = true;
}
if (absl::ConsumePrefix(&fullname, "/task:")) {
p->has_task = !absl::ConsumePrefix(&fullname, "*");
if (p->has_task && !ConsumeNumber(&fullname, &p->task)) {
return false;
}
progress = true;
}
if (absl::ConsumePrefix(&fullname, "/device:")) {
p->has_type = !absl::ConsumePrefix(&fullname, "*");
if (p->has_type && !ConsumeDeviceType(&fullname, &p->type)) {
return false;
}
if (!absl::ConsumePrefix(&fullname, ":")) {
p->has_id = false;
} else {
p->has_id = !absl::ConsumePrefix(&fullname, "*");
if (p->has_id && !ConsumeNumber(&fullname, &p->id)) {
return false;
}
}
progress = true;
}
if (absl::ConsumePrefix(&fullname, "/cpu:") ||
absl::ConsumePrefix(&fullname, "/CPU:")) {
p->has_type = true;
p->type = "CPU";
p->has_id = !absl::ConsumePrefix(&fullname, "*");
if (p->has_id && !ConsumeNumber(&fullname, &p->id)) {
return false;
}
progress = true;
}
if (absl::ConsumePrefix(&fullname, "/gpu:") ||
absl::ConsumePrefix(&fullname, "/GPU:")) {
p->has_type = true;
p->type = "GPU";
p->has_id = !absl::ConsumePrefix(&fullname, "*");
if (p->has_id && !ConsumeNumber(&fullname, &p->id)) {
return false;
}
progress = true;
}
if (!progress) {
return false;
}
}
return true;
}
bool DeviceNameUtils::ParseFullOrLocalName(absl::string_view fullname,
ParsedName* p) {
return ParseFullName(fullname, p) || ParseLocalName(fullname, p);
}
namespace {
void CompleteName(const DeviceNameUtils::ParsedName& parsed_basename,
DeviceNameUtils::ParsedName* parsed_name) {
if (!parsed_name->has_job) {
parsed_name->job = parsed_basename.job;
parsed_name->has_job = true;
}
if (!parsed_name->has_replica) {
parsed_name->replica = parsed_basename.replica;
parsed_name->has_replica = true;
}
if (!parsed_name->has_task) {
parsed_name->task = parsed_basename.task;
parsed_name->has_task = true;
}
if (!parsed_name->has_type) {
parsed_name->type = parsed_basename.type;
parsed_name->has_type = true;
}
if (!parsed_name->has_id) {
parsed_name->id = parsed_basename.id;
parsed_name->has_id = true;
}
}
}
absl::Status DeviceNameUtils::CanonicalizeDeviceName(absl::string_view fullname,
absl::string_view basename,
string* canonical_name) {
*canonical_name = "";
ParsedName parsed_basename;
if (!ParseFullName(basename, &parsed_basename)) {
return errors::InvalidArgument("Could not parse basename: ", basename,
" into a device specification.");
}
if (!(parsed_basename.has_job && parsed_basename.has_replica &&
parsed_basename.has_task && parsed_basename.has_type &&
parsed_basename.has_id)) {
return errors::InvalidArgument("Basename: ", basename,
" should be fully "
"specified.");
}
ParsedName parsed_name;
if (ParseLocalName(fullname, &parsed_name)) {
CompleteName(parsed_basename, &parsed_name);
*canonical_name = ParsedNameToString(parsed_name);
return absl::OkStatus();
}
if (ParseFullName(fullname, &parsed_name)) {
CompleteName(parsed_basename, &parsed_name);
*canonical_name = ParsedNameToString(parsed_name);
return absl::OkStatus();
}
return errors::InvalidArgument("Could not parse ", fullname,
" into a device "
"specification.");
}
string DeviceNameUtils::ParsedNameToString(const ParsedName& pn) {
string buf;
if (pn.has_job) strings::StrAppend(&buf, "/job:", pn.job);
if (pn.has_replica) strings::StrAppend(&buf, "/replica:", pn.replica);
if (pn.has_task) strings::StrAppend(&buf, "/task:", pn.task);
if (pn.has_type) {
strings::StrAppend(&buf, "/device:", pn.type, ":");
if (pn.has_id) {
strings::StrAppend(&buf, pn.id);
} else {
strings::StrAppend(&buf, "*");
}
}
return buf;
}
bool DeviceNameUtils::IsSpecification(const ParsedName& less_specific,
const ParsedName& more_specific) {
if (less_specific.has_job &&
(!more_specific.has_job || (less_specific.job != more_specific.job))) {
return false;
}
if (less_specific.has_replica &&
(!more_specific.has_replica ||
(less_specific.replica != more_specific.replica))) {
return false;
}
if (less_specific.has_task &&
(!more_specific.has_task || (less_specific.task != more_specific.task))) {
return false;
}
if (less_specific.has_type &&
(!more_specific.has_type || (less_specific.type != more_specific.type))) {
return false;
}
if (less_specific.has_id &&
(!more_specific.has_id || (less_specific.id != more_specific.id))) {
return false;
}
return true;
}
bool DeviceNameUtils::AreCompatibleDevNames(const ParsedName& a,
const ParsedName& b) {
if (a.has_job && b.has_job && (a.job != b.job)) {
return false;
}
if (a.has_replica && b.has_replica && (a.replica != b.replica)) {
return false;
}
if (a.has_task && b.has_task && (a.task != b.task)) {
return false;
}
if (a.has_type && b.has_type && (a.type != b.type)) {
return false;
}
if (a.has_id && b.has_id && (a.id != b.id)) {
return false;
}
return true;
}
void DeviceNameUtils::EnsureSpecification(ParsedName* more_specific,
const ParsedName& less_specific) {
if (less_specific.has_job) {
more_specific->has_job = true;
more_specific->job = less_specific.job;
}
if (less_specific.has_replica) {
more_specific->has_replica = true;
more_specific->replica = less_specific.replica;
}
if (less_specific.has_task) {
more_specific->has_task = true;
more_specific->task = less_specific.task;
}
if (less_specific.has_type) {
more_specific->has_type = true;
more_specific->type = less_specific.type;
}
if (less_specific.has_id) {
more_specific->has_id = true;
more_specific->id = less_specific.id;
}
}
bool DeviceNameUtils::IsCompleteSpecification(const ParsedName& pattern,
const ParsedName& name) {
CHECK(name.has_job && name.has_replica && name.has_task && name.has_type &&
name.has_id);
if (pattern.has_job && (pattern.job != name.job)) return false;
if (pattern.has_replica && (pattern.replica != name.replica)) return false;
if (pattern.has_task && (pattern.task != name.task)) return false;
if (pattern.has_type && (pattern.type != name.type)) return false;
if (pattern.has_id && (pattern.id != name.id)) return false;
return true;
}
namespace {
absl::Status MergeDevNamesImpl(DeviceNameUtils::ParsedName* target,
const DeviceNameUtils::ParsedName& other,
bool allow_soft_placement,
bool override_conflicts) {
const auto& ParsedNameToString = DeviceNameUtils::ParsedNameToString;
if (other.has_job) {
if (target->has_job && target->job != other.job) {
return errors::InvalidArgument(
"Cannot merge devices with incompatible jobs: '",
ParsedNameToString(*target), "' and '", ParsedNameToString(other),
"'");
} else {
target->has_job = other.has_job;
target->job = other.job;
}
}
if (other.has_replica) {
if (target->has_replica && target->replica != other.replica) {
return errors::InvalidArgument(
"Cannot merge devices with incompatible replicas: '",
ParsedNameToString(*target), "' and '", ParsedNameToString(other),
"'");
} else {
target->has_replica = other.has_replica;
target->replica = other.replica;
}
}
if (other.has_task) {
if (target->has_task && target->task != other.task) {
return errors::InvalidArgument(
"Cannot merge devices with incompatible tasks: '",
ParsedNameToString(*target), "' and '", ParsedNameToString(other),
"'");
} else {
target->has_task = other.has_task;
target->task = other.task;
}
}
if (other.has_type) {
if (target->has_type && target->type != other.type) {
if (!allow_soft_placement) {
return errors::InvalidArgument(
"Cannot merge devices with incompatible types: '",
ParsedNameToString(*target), "' and '", ParsedNameToString(other),
"'");
} else if (override_conflicts) {
target->type = other.type;
} else {
target->has_id = false;
target->has_type = false;
return absl::OkStatus();
}
} else {
target->has_type = other.has_type;
target->type = other.type;
}
}
if (other.has_id) {
if (target->has_id && target->id != other.id) {
if (!allow_soft_placement) {
return errors::InvalidArgument(
"Cannot merge devices with incompatible ids: '",
ParsedNameToString(*target), "' and '", ParsedNameToString(other),
"'");
} else if (override_conflicts) {
target->id = other.id;
} else {
target->has_id = false;
return absl::OkStatus();
}
} else {
target->has_id = other.has_id;
target->id = other.id;
}
}
return absl::OkStatus();
}
}
absl::Status DeviceNameUtils::MergeDevNames(ParsedName* target,
const ParsedName& other,
bool allow_soft_placement) {
return MergeDevNamesImpl(target, other, allow_soft_placement,
false);
}
absl::Status DeviceNameUtils::MergeOverrideDevNames(ParsedName* target,
const ParsedName& other) {
return MergeDevNamesImpl(target, other, true,
true);
}
void DeviceNameUtils::MergeUnsetDevNames(ParsedName* target,
const ParsedName& other) {
if (other.has_job && !target->has_job) {
target->has_job = other.has_job;
target->job = other.job;
}
if (other.has_replica && !target->has_replica) {
target->has_replica = other.has_replica;
target->replica = other.replica;
}
if (other.has_task && !target->has_task) {
target->has_task = other.has_task;
target->task = other.task;
}
if (other.has_type && !target->has_type) {
target->has_type = other.has_type;
target->type = other.type;
}
if (other.has_id && !target->has_id) {
target->has_id = other.has_id;
target->id = other.id;
}
}
bool DeviceNameUtils::IsSameAddressSpace(const ParsedName& a,
const ParsedName& b) {
return (a.has_job && b.has_job && (a.job == b.job)) &&
(a.has_replica && b.has_replica && (a.replica == b.replica)) &&
(a.has_task && b.has_task && (a.task == b.task));
}
bool DeviceNameUtils::IsSameAddressSpace(absl::string_view src,
absl::string_view dst) {
ParsedName x;
ParsedName y;
return ParseFullName(src, &x) && ParseFullName(dst, &y) &&
IsSameAddressSpace(x, y);
}
bool DeviceNameUtils::IsDifferentAddressSpace(const ParsedName& a,
const ParsedName& b) {
return (a.has_job && b.has_job && (a.job != b.job)) ||
(a.has_replica && b.has_replica && (a.replica != b.replica)) ||
(a.has_task && b.has_task && (a.task != b.task));
}
const DeviceNameUtils::ParsedName DeviceNameUtils::AddressSpace(
const ParsedName& name) {
ParsedName address_space;
address_space.has_job = name.has_job;
address_space.has_replica = name.has_replica;
address_space.has_task = name.has_task;
address_space.job = name.job;
address_space.replica = name.replica;
address_space.task = name.task;
return address_space;
}
string DeviceNameUtils::LocalName(absl::string_view type, int id) {
return strings::StrCat("/device:", type, ":", id);
}
namespace {
string LegacyLocalName(absl::string_view type, int id) {
return strings::StrCat(type, ":", id);
}
}
string DeviceNameUtils::LocalName(absl::string_view fullname) {
ParsedName x;
CHECK(ParseFullName(fullname, &x)) << fullname;
return LocalName(x.type, x.id);
}
bool DeviceNameUtils::ParseLocalName(absl::string_view name, ParsedName* p) {
if (!ConsumeDeviceType(&name, &p->type)) {
return false;
}
p->has_type = true;
if (!absl::ConsumePrefix(&name, ":")) {
return false;
}
if (!ConsumeNumber(&name, &p->id)) {
return false;
}
p->has_id = true;
return name.empty();
}
bool DeviceNameUtils::SplitDeviceName(absl::string_view name, string* task,
string* device) {
ParsedName pn;
if (ParseFullName(name, &pn) && pn.has_type && pn.has_id) {
task->clear();
task->reserve(
(pn.has_job ? (5 + pn.job.size()) : 0) +
(pn.has_replica ? (9 + 4 ) : 0) +
(pn.has_task ? (6 + 4 ) : 0));
if (pn.has_job) {
strings::StrAppend(task, "/job:", pn.job);
}
if (pn.has_replica) {
strings::StrAppend(task, "/replica:", pn.replica);
}
if (pn.has_task) {
strings::StrAppend(task, "/task:", pn.task);
}
device->clear();
strings::StrAppend(device, pn.type, ":", pn.id);
return true;
}
return false;
}
bool DeviceNameUtils::GetTaskName(const ParsedName& pn, string* task) {
if (pn.has_job && pn.has_replica && pn.has_task) {
task->clear();
task->reserve((5 + pn.job.size()) +
(9 + 4 ) +
(6 + 4 ));
strings::StrAppend(task, "/job:", pn.job);
strings::StrAppend(task, "/replica:", pn.replica);
strings::StrAppend(task, "/task:", pn.task);
return true;
}
return false;
}
std::vector<string> DeviceNameUtils::GetNamesForDeviceMappings(
const ParsedName& pn) {
if (pn.has_job && pn.has_replica && pn.has_task && pn.has_type && pn.has_id) {
return {
DeviceNameUtils::FullName(pn.job, pn.replica, pn.task, pn.type, pn.id),
LegacyName(pn.job, pn.replica, pn.task, pn.type, pn.id)};
} else {
return {};
}
}
std::vector<string> DeviceNameUtils::GetLocalNamesForDeviceMappings(
const ParsedName& pn) {
if (pn.has_type && pn.has_id) {
return {DeviceNameUtils::LocalName(pn.type, pn.id),
LegacyLocalName(pn.type, pn.id)};
} else {
return {};
}
}
absl::Status DeviceNameUtils::DeviceNameToCpuDeviceName(
const string& device_name, string* host_device_name) {
DeviceNameUtils::ParsedName device;
if (!DeviceNameUtils::ParseFullName(device_name, &device)) {
return errors::Internal("Could not parse device name ", device_name);
}
device.type = "CPU";
device.has_type = true;
device.id = 0;
device.has_id = true;
*host_device_name = DeviceNameUtils::ParsedNameToString(device);
return absl::OkStatus();
}
std::ostream& operator<<(std::ostream& os,
const DeviceNameUtils::ParsedName& x) {
os << DeviceNameUtils::ParsedNameToString(x);
return os;
}
} | #include "xla/tsl/util/device_name_utils.h"
#include <vector>
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/strcat.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
namespace tsl {
namespace {
bool RoundTripParsedName(const string& original, const string& expected) {
DeviceNameUtils::ParsedName p;
if (!DeviceNameUtils::ParseFullName(original, &p)) {
return false;
}
string round_tripped = DeviceNameUtils::ParsedNameToString(p);
return (round_tripped == expected);
}
enum NamePart { kJob = 0x01, kReplica = 0x02, kTask = 0x04, kDevice = 0x08 };
bool RoundTripPartialName(int parts_to_test, const std::vector<string>& parts,
bool explicitDevice) {
string original, expected;
if (parts_to_test & kJob) {
strings::StrAppend(&original, "/job:", parts[0]);
strings::StrAppend(&expected, "/job:", parts[0]);
}
if (parts_to_test & kReplica) {
strings::StrAppend(&original, "/replica:", parts[1]);
strings::StrAppend(&expected, "/replica:", parts[1]);
}
if (parts_to_test & kTask) {
strings::StrAppend(&original, "/task:", parts[2]);
strings::StrAppend(&expected, "/task:", parts[2]);
}
if (parts_to_test & kDevice) {
if (explicitDevice) {
strings::StrAppend(&original, "/device:", parts[3]);
strings::StrAppend(&expected, "/device:", parts[3]);
} else {
strings::StrAppend(&original, "/", parts[3]);
strings::StrAppend(&expected,
"/device:", absl::AsciiStrToUpper(parts[3]));
}
}
return RoundTripParsedName(original, expected);
}
}
TEST(DeviceNameUtilsTest, Basic) {
EXPECT_EQ(DeviceNameUtils::FullName("hello", 1, 2, "CPU", 3),
"/job:hello/replica:1/task:2/device:CPU:3");
{
DeviceNameUtils::ParsedName p;
EXPECT_FALSE(DeviceNameUtils::ParseFullName("foobar", &p));
EXPECT_FALSE(DeviceNameUtils::ParseFullName(
"/job:123/replica:1/task:2/device:GPU:3", &p));
EXPECT_FALSE(
DeviceNameUtils::ParseFullName("/job:123/replica:1/task:2/gpu:", &p));
EXPECT_FALSE(DeviceNameUtils::ParseFullName(
"/job:123/replica:1/task:2/device:gpu:", &p));
EXPECT_FALSE(DeviceNameUtils::ParseFullName(
"/job:foo/replica:-1/task:2/device:GPU:3", &p));
EXPECT_FALSE(DeviceNameUtils::ParseFullName(
"/job:foo/replica:1/task:-2/device:GPU:3", &p));
EXPECT_FALSE(
DeviceNameUtils::ParseFullName("/job:foo/replica:1/task:2/bar:3", &p));
EXPECT_FALSE(DeviceNameUtils::ParseFullName(
"/job:foo/replica:1/task:2/device:GPU:3/extra", &p));
EXPECT_TRUE(DeviceNameUtils::ParseFullName(
"/job:foo/replica:1/task:2/device:GPU:3", &p));
EXPECT_TRUE(p.has_job);
EXPECT_TRUE(p.has_replica);
EXPECT_TRUE(p.has_task);
EXPECT_TRUE(p.has_type);
EXPECT_TRUE(p.has_id);
EXPECT_EQ(p.job, "foo");
EXPECT_EQ(p.replica, 1);
EXPECT_EQ(p.task, 2);
EXPECT_EQ(p.type, "GPU");
EXPECT_EQ(p.id, 3);
}
{
DeviceNameUtils::ParsedName p;
EXPECT_TRUE(DeviceNameUtils::ParseFullName(
"/job:foo_bar/replica:1/task:2/device:GPU:3", &p));
EXPECT_TRUE(DeviceNameUtils::ParseFullOrLocalName(
"/job:foo_bar/replica:1/task:2/device:GPU:3", &p));
EXPECT_TRUE(p.has_job);
EXPECT_TRUE(p.has_replica);
EXPECT_TRUE(p.has_task);
EXPECT_TRUE(p.has_type);
EXPECT_TRUE(p.has_id);
EXPECT_EQ(p.job, "foo_bar");
EXPECT_EQ(p.replica, 1);
EXPECT_EQ(p.task, 2);
EXPECT_EQ(p.type, "GPU");
EXPECT_EQ(p.id, 3);
}
{
DeviceNameUtils::ParsedName p;
EXPECT_TRUE(DeviceNameUtils::ParseFullName(
"/job:foo_bar/replica:1/task:2/device:GPU:3", &p));
EXPECT_TRUE(p.has_job);
EXPECT_TRUE(p.has_replica);
EXPECT_TRUE(p.has_task);
EXPECT_TRUE(p.has_type);
EXPECT_TRUE(p.has_id);
EXPECT_EQ(p.job, "foo_bar");
EXPECT_EQ(p.replica, 1);
EXPECT_EQ(p.task, 2);
EXPECT_EQ(p.type, "GPU");
EXPECT_EQ(p.id, 3);
}
{
DeviceNameUtils::ParsedName p;
EXPECT_TRUE(DeviceNameUtils::ParseFullName("/job:*/replica:4/gpu:*", &p));
EXPECT_FALSE(p.has_job);
EXPECT_TRUE(p.has_replica);
EXPECT_FALSE(p.has_task);
EXPECT_TRUE(p.has_type);
EXPECT_FALSE(p.has_id);
EXPECT_EQ(p.replica, 4);
EXPECT_EQ(p.type, "GPU");
}
{
DeviceNameUtils::ParsedName p;
EXPECT_TRUE(
DeviceNameUtils::ParseFullName("/job:*/replica:4/device:GPU:*", &p));
EXPECT_FALSE(p.has_job);
EXPECT_TRUE(p.has_replica);
EXPECT_FALSE(p.has_task);
EXPECT_TRUE(p.has_type);
EXPECT_FALSE(p.has_id);
EXPECT_EQ(p.replica, 4);
EXPECT_EQ(p.type, "GPU");
}
{
DeviceNameUtils::ParsedName p;
EXPECT_TRUE(
DeviceNameUtils::ParseFullName("/job:*/device:GPU/replica:4", &p));
EXPECT_FALSE(p.has_job);
EXPECT_TRUE(p.has_replica);
EXPECT_FALSE(p.has_task);
EXPECT_TRUE(p.has_type);
EXPECT_FALSE(p.has_id);
EXPECT_EQ(p.replica, 4);
EXPECT_EQ(p.type, "GPU");
}
{
DeviceNameUtils::ParsedName p;
EXPECT_TRUE(DeviceNameUtils::ParseFullName(
"/job:*/replica:4/device:myspecialdevice:13", &p));
EXPECT_FALSE(p.has_job);
EXPECT_TRUE(p.has_replica);
EXPECT_FALSE(p.has_task);
EXPECT_TRUE(p.has_type);
EXPECT_TRUE(p.has_id);
EXPECT_EQ(p.replica, 4);
EXPECT_EQ(p.type, "myspecialdevice");
EXPECT_EQ(p.id, 13);
}
{
DeviceNameUtils::ParsedName p;
EXPECT_TRUE(DeviceNameUtils::ParseFullName("/", &p));
EXPECT_FALSE(p.has_job);
EXPECT_FALSE(p.has_replica);
EXPECT_FALSE(p.has_task);
EXPECT_FALSE(p.has_type);
EXPECT_FALSE(p.has_id);
}
{
DeviceNameUtils::ParsedName p;
EXPECT_TRUE(
DeviceNameUtils::ParseFullName("/job:*/replica:4/device:GPU:5", &p));
EXPECT_FALSE(p.has_job);
EXPECT_TRUE(p.has_replica);
EXPECT_FALSE(p.has_task);
EXPECT_TRUE(p.has_type);
EXPECT_TRUE(p.has_id);
EXPECT_EQ(p.replica, 4);
EXPECT_EQ(p.type, "GPU");
EXPECT_EQ(p.id, 5);
}
{
DeviceNameUtils::ParsedName p;
EXPECT_TRUE(DeviceNameUtils::ParseFullName("/gpu:*/job:*/replica:4", &p));
EXPECT_FALSE(p.has_job);
EXPECT_TRUE(p.has_replica);
EXPECT_FALSE(p.has_task);
EXPECT_TRUE(p.has_type);
EXPECT_FALSE(p.has_id);
EXPECT_EQ(p.replica, 4);
EXPECT_EQ(p.type, "GPU");
}
EXPECT_TRUE(DeviceNameUtils::IsSameAddressSpace(
"/job:foo/replica:1/task:2/cpu:3",
"/job:foo/replica:1/task:2/device:GPU:4"));
EXPECT_FALSE(DeviceNameUtils::IsSameAddressSpace(
"/job:foo/replica:1/task:2/cpu:3",
"/job:foo/replica:1/task:3/device:GPU:4"));
EXPECT_FALSE(DeviceNameUtils::IsSameAddressSpace(
"/job:foo/replica:1/task:2/cpu:3",
"/job:foo/replica:10/task:2/device:GPU:4"));
EXPECT_FALSE(DeviceNameUtils::IsSameAddressSpace(
"/job:foo/replica:1/task:2/cpu:3",
"/job:bar/replica:1/task:2/device:GPU:4"));
EXPECT_EQ(DeviceNameUtils::LocalName("CPU", 1), "/device:CPU:1");
EXPECT_EQ(DeviceNameUtils::LocalName("GPU", 2), "/device:GPU:2");
EXPECT_EQ(DeviceNameUtils::LocalName("MySpecialDevice", 13),
"/device:MySpecialDevice:13");
EXPECT_EQ(
DeviceNameUtils::LocalName("/job:foo/replica:1/task:2/device:CPU:3"),
"/device:CPU:3");
EXPECT_EQ(DeviceNameUtils::LocalName("/job:foo/replica:1/task:2/cpu:3"),
"/device:CPU:3");
EXPECT_EQ(
DeviceNameUtils::LocalName("/job:foo/replica:1/task:2/device:abc:73"),
"/device:abc:73");
{
DeviceNameUtils::ParsedName p;
EXPECT_TRUE(DeviceNameUtils::ParseLocalName("CPU:10", &p));
EXPECT_TRUE(DeviceNameUtils::ParseFullOrLocalName("CPU:10", &p));
EXPECT_EQ(p.type, "CPU");
EXPECT_EQ(p.id, 10);
EXPECT_FALSE(DeviceNameUtils::ParseLocalName("cpu:abc", &p));
EXPECT_FALSE(DeviceNameUtils::ParseLocalName("abc:", &p));
EXPECT_FALSE(DeviceNameUtils::ParseLocalName("abc", &p));
EXPECT_FALSE(DeviceNameUtils::ParseLocalName("myspecialdevice", &p));
EXPECT_FALSE(DeviceNameUtils::ParseFullOrLocalName("myspecialdevice", &p));
}
{
for (int i = 0; i < 0x10; ++i) {
EXPECT_TRUE(RoundTripPartialName(i, {"foo", "3", "2", "CPU:3"},
false));
EXPECT_TRUE(RoundTripPartialName(i, {"foo", "3", "2", "GPU:3"},
false));
EXPECT_TRUE(RoundTripPartialName(i, {"foo", "3", "2", "cpu:3"},
false));
EXPECT_TRUE(RoundTripPartialName(i, {"foo", "3", "2", "gpu:3"},
false));
EXPECT_TRUE(RoundTripPartialName(i, {"foo", "3", "2", "CPU:3"},
true));
EXPECT_TRUE(RoundTripPartialName(i, {"foo", "3", "2", "GPU:3"},
true));
EXPECT_TRUE(RoundTripPartialName(i, {"foo", "3", "2", "cpu:3"},
true));
EXPECT_TRUE(RoundTripPartialName(i, {"foo", "3", "2", "gpu:3"},
true));
EXPECT_TRUE(RoundTripPartialName(i, {"foo", "3", "2", "someDevice:3"},
true));
}
}
{
DeviceNameUtils::ParsedName x, y;
DeviceNameUtils::ParseFullName("/job:work/replica:1/task:3/device:GPU:*",
&x);
DeviceNameUtils::ParseFullName("/device:CPU:*", &y);
EXPECT_FALSE(DeviceNameUtils::AreCompatibleDevNames(x, y));
}
{
DeviceNameUtils::ParsedName x, y;
DeviceNameUtils::ParseFullName("/job:work/replica:1/task:3", &x);
DeviceNameUtils::ParseFullName("/device:CPU:*", &y);
EXPECT_TRUE(DeviceNameUtils::AreCompatibleDevNames(x, y));
}
}
static bool IsCSHelper(absl::string_view pattern, absl::string_view actual) {
DeviceNameUtils::ParsedName p, a;
EXPECT_TRUE(DeviceNameUtils::ParseFullName(pattern, &p));
EXPECT_TRUE(DeviceNameUtils::ParseFullName(actual, &a));
return DeviceNameUtils::IsCompleteSpecification(p, a);
}
TEST(DeviceNameUtilsTest, IsCompleteSpecification) {
EXPECT_TRUE(IsCSHelper("/job:*", "/job:work/replica:1/task:2/device:GPU:3"));
EXPECT_TRUE(IsCSHelper("/job:*/replica:*",
"/job:work/replica:1/task:2/device:GPU:3"));
EXPECT_TRUE(
IsCSHelper("/job:*/task:*", "/job:work/replica:1/task:2/device:GPU:3"));
EXPECT_TRUE(IsCSHelper("/job:*/replica:*/task:*",
"/job:work/replica:1/task:2/device:GPU:3"));
EXPECT_TRUE(IsCSHelper("/job:*/replica:*/gpu:*",
"/job:work/replica:1/task:2/device:GPU:3"));
EXPECT_FALSE(
IsCSHelper("/cpu:*", "/job:worker/replica:1/task:2/device:GPU:3"));
EXPECT_FALSE(
IsCSHelper("/device:GPU:2", "/job:worker/replica:1/task:2/device:GPU:1"));
EXPECT_TRUE(
IsCSHelper("/gpu:*", "/job:worker/replica:1/task:2/device:GPU:3"));
}
static bool IsSpecHelper(absl::string_view pattern, absl::string_view actual) {
DeviceNameUtils::ParsedName p, a;
EXPECT_TRUE(DeviceNameUtils::ParseFullName(pattern, &p));
EXPECT_TRUE(DeviceNameUtils::ParseFullName(actual, &a));
return DeviceNameUtils::IsSpecification(p, a);
}
TEST(DeviceNameUtilsTest, IsSpecification) {
EXPECT_TRUE(
IsSpecHelper("/job:*", "/job:work/replica:1/task:2/device:GPU:3"));
EXPECT_TRUE(IsSpecHelper("/job:*", "/job:work/replica:1/device:GPU:3"));
EXPECT_TRUE(IsSpecHelper("/job:*", "/job:work/replica:1"));
EXPECT_TRUE(IsSpecHelper("/job:*", "/replica:1"));
EXPECT_TRUE(IsSpecHelper("/job:*", "/job:work"));
EXPECT_TRUE(IsSpecHelper("/job:*/replica:*",
"/job:work/replica:1/task:2/device:GPU:3"));
EXPECT_TRUE(IsSpecHelper("/job:work/replica:1/gpu:*",
"/job:work/replica:1/task:2/device:GPU:3"));
EXPECT_TRUE(IsSpecHelper("/job:work/replica:1/device:GPU:3",
"/job:work/replica:1/task:2/device:GPU:3"));
EXPECT_TRUE(IsSpecHelper("/job:work/replica:1/task:2",
"/job:work/replica:1/task:2/device:GPU:3"));
EXPECT_TRUE(IsSpecHelper("/job:work/replica:*/task:2",
"/job:work/replica:1/task:2/device:GPU:3"));
EXPECT_TRUE(IsSpecHelper("/task:*", "/job:*/replica:1/task:2/device:GPU:3"));
EXPECT_TRUE(IsSpecHelper("/task:2", "/job:*/replica:1/task:2/device:GPU:3"));
EXPECT_TRUE(IsSpecHelper("/cpu:*", "/job:*/replica:1/task:2/cpu:1"));
EXPECT_TRUE(IsSpecHelper("/cpu:0", "/cpu:0"));
EXPECT_TRUE(
IsSpecHelper("/gpu:*", "/job:worker/replica:1/task:2/device:GPU:3"));
EXPECT_FALSE(
IsSpecHelper("/job:worker/replica:1/task:2/device:GPU:3", "/gpu:*"));
EXPECT_FALSE(IsSpecHelper("/cpu:*", "/job:*/replica:1/task:2"));
EXPECT_FALSE(IsSpecHelper("/cpu:*", "/job:*/replica:1/task:2/device:GPU:1"));
EXPECT_FALSE(
IsSpecHelper("/cpu:*", "/job:worker/replica:1/task:2/device:GPU:3"));
EXPECT_FALSE(IsSpecHelper("/device:GPU:2",
"/job:worker/replica:1/task:2/device:GPU:1"));
EXPECT_FALSE(IsSpecHelper("/job:work/replica:*/task:0",
"/job:work/replica:1/task:2/device:GPU:3"));
EXPECT_FALSE(IsSpecHelper("/job:work/replica:0/task:2",
"/job:work/replica:*/task:2/device:GPU:3"));
}
TEST(DeviceNameUtilsTest, SplitDeviceName) {
string task;
string device;
EXPECT_TRUE(DeviceNameUtils::SplitDeviceName(
"/job:foo/replica:1/task:2/cpu:1", &task, &device));
EXPECT_EQ("/job:foo/replica:1/task:2", task);
EXPECT_EQ("CPU:1", device);
EXPECT_TRUE(DeviceNameUtils::SplitDeviceName(
"/job:foo/cpu:1/task:2/replica:1", &task, &device));
EXPECT_EQ("/job:foo/replica:1/task:2", task);
EXPECT_EQ("CPU:1", device);
EXPECT_TRUE(
DeviceNameUtils::SplitDeviceName("/device:GPU:3", &task, &device));
EXPECT_EQ("", task);
EXPECT_EQ("GPU:3", device);
EXPECT_FALSE(DeviceNameUtils::SplitDeviceName("gpu:3", &task, &device));
EXPECT_FALSE(DeviceNameUtils::SplitDeviceName("/job:foo/task:2/replica:1",
&task, &device));
EXPECT_TRUE(DeviceNameUtils::SplitDeviceName("/device:myspecialdevice:3",
&task, &device));
EXPECT_EQ("", task);
EXPECT_EQ("myspecialdevice:3", device);
}
static DeviceNameUtils::ParsedName Name(const string& str) {
DeviceNameUtils::ParsedName ret;
CHECK(DeviceNameUtils::ParseFullName(str, &ret)) << "Invalid name: " << str;
return ret;
}
static void MergeDevNamesHelperImpl(const string& name_a, const string& name_b,
const string& expected_merge_name,
bool allow_soft_placement) {
DeviceNameUtils::ParsedName target_a = Name(name_a);
TF_EXPECT_OK(DeviceNameUtils::MergeDevNames(&target_a, Name(name_b),
allow_soft_placement));
DeviceNameUtils::ParsedName target_b = Name(name_b);
TF_EXPECT_OK(DeviceNameUtils::MergeDevNames(&target_b, Name(name_a),
allow_soft_placement));
EXPECT_EQ(target_a, target_b);
EXPECT_EQ(target_a, Name(expected_merge_name));
EXPECT_EQ(target_b, Name(expected_merge_name));
}
static void MergeDevNamesHelper(const string& name_a, const string& name_b,
const string& expected_merge_name) {
MergeDevNamesHelperImpl(name_a, name_b, expected_merge_name, false);
}
static void MergeDevNamesHelperAllowSoftPlacement(
const string& name_a, const string& name_b,
const string& expected_merge_name) {
MergeDevNamesHelperImpl(name_a, name_b, expected_merge_name, true);
}
static void MergeDevNamesError(const string& name_a, const string& name_b,
const string& expected_error_substr) {
DeviceNameUtils::ParsedName target_a = Name(name_a);
absl::Status s = DeviceNameUtils::MergeDevNames(&target_a, Name(name_b));
EXPECT_EQ(s.code(), error::INVALID_ARGUMENT);
EXPECT_TRUE(absl::StrContains(s.message(), expected_error_substr)) << s;
}
static void MergeOverrideHelper(const string& target, const string& name,
const string& expected_merge_name) {
DeviceNameUtils::ParsedName parsed_target = Name(target);
TF_EXPECT_OK(
DeviceNameUtils::MergeOverrideDevNames(&parsed_target, Name(name)));
DeviceNameUtils::ParsedName parsed_expected = Name(expected_merge_name);
EXPECT_EQ(parsed_target, parsed_expected)
<< "parsed_target: " << DeviceNameUtils::ParsedNameToString(parsed_target)
<< " expected_name: "
<< DeviceNameUtils::ParsedNameToString(parsed_expected);
}
static void MergeUnsetDevNamesHelper(const string& name_a, const string& name_b,
const string& expected_merge_name_ab,
const string& expected_merge_name_ba) {
DeviceNameUtils::ParsedName target_a = Name(name_a);
DeviceNameUtils::MergeUnsetDevNames(&target_a, Name(name_b));
EXPECT_EQ(target_a, Name(expected_merge_name_ab));
DeviceNameUtils::ParsedName target_b = Name(name_b);
DeviceNameUtils::MergeUnsetDevNames(&target_b, Name(name_a));
EXPECT_EQ(target_b, Name(expected_merge_name_ba));
}
TEST(DeviceNameUtilsTest, MergeDevNames) {
MergeDevNamesHelper("", "", "");
MergeDevNamesHelper("/job:foo/replica:1/task:2/cpu:1",
"/job:foo/replica:1/task:2/cpu:1",
"/job:foo/replica:1/task:2/cpu:1");
MergeDevNamesHelper("", "/job:foo", "/job:foo");
MergeDevNamesHelper("", "/replica:2", "/replica:2");
MergeDevNamesHelper("", "/task:7", "/task:7");
MergeDevNamesHelper("", "/device:GPU:1", "/device:GPU:1");
MergeDevNamesHelper("/job:foo", "/task:7", "/job:foo/task:7");
MergeDevNamesHelper("/job:foo", "/device:GPU:1", "/job:foo/device:GPU:1");
MergeDevNamesHelper("/job:foo/replica:0", "/replica:0/task:1",
"/job:foo/replica:0/task:1");
MergeDevNamesHelper("", "/gpu:*", "/gpu:*");
MergeDevNamesHelper("/gpu:*", "/gpu:*", "/gpu:*");
MergeDevNamesHelper("/device:GPU:1", "/gpu:*", "/device:GPU:1");
MergeDevNamesError("/job:foo", "/job:bar", "incompatible jobs");
MergeDevNamesError("/replica:0", "/replica:1", "incompatible replicas");
MergeDevNamesError("/task:0", "/task:1", "incompatible tasks");
MergeDevNamesError("/gpu:*", "/cpu:*", "incompatible types");
MergeDevNamesError("/device:GPU:0", "/device:GPU:1", "incompatible ids");
}
TEST(DeviceNameUtilsTest, MergeDevNamesAllowSoftPlacement) {
MergeDevNamesHelperAllowSoftPlacement("/gpu:*", "/cpu:1", "");
MergeDevNamesHelperAllowSoftPlacement("/cpu:*", "/device:GPU:1", "");
MergeDevNamesHelperAllowSoftPlacement("/device:GPU:1", "/device:GPU:2",
"/device:GPU:*");
}
TEST(DeviceNameUtilsTest, MergeOverrideDevNames) {
MergeOverrideHelper("", "", "");
MergeOverrideHelper("/job:foo/replica:1/task:2/cpu:1",
"/job:foo/replica:1/task:2/cpu:1",
"/job:foo/replica:1/task:2/cpu:1");
MergeOverrideHelper("", "/job:foo", "/job:foo");
MergeOverrideHelper("", "/replica:2", "/replica:2");
MergeOverrideHelper("", "/task:7", "/task:7");
MergeOverrideHelper("", "/device:GPU:1", "/device:GPU:1");
MergeOverrideHelper("/job:foo", "/task:7", "/job:foo/task:7");
MergeOverrideHelper("/job:foo", "/device:GPU:1", "/job:foo/device:GPU:1");
MergeOverrideHelper("/job:foo/replica:0", "/replica:0/task:1",
"/job:foo/replica:0/task:1");
MergeOverrideHelper("", "/gpu:*", "/gpu:*");
MergeOverrideHelper("/gpu:*", "/gpu:*", "/gpu:*");
MergeOverrideHelper("/device:GPU:1", "/gpu:*", "/device:GPU:1");
MergeOverrideHelper("/gpu:0", "/cpu:1", "/cpu:1");
MergeOverrideHelper("/gpu:*", "/cpu:1", "/cpu:1");
MergeOverrideHelper("/cpu:*", "/device:GPU:1", "/gpu:1");
MergeOverrideHelper("/device:GPU:1", "/device:GPU:2", "/device:GPU:2");
MergeOverrideHelper("/job:foo/CPU:*", "/device:GPU:1", "/job:foo/GPU:1");
MergeOverrideHelper("/cpu:*", "/job:foo/device:GPU:1", "/job:foo/GPU:1");
MergeOverrideHelper("/task:0/cpu:*", "/device:GPU:1", "/task:0/GPU:1");
MergeOverrideHelper("/cpu:*", "/task:0/device:GPU:1", "/task:0/GPU:1");
}
TEST(DeviceNameUtilsTest, MergeUnsetDevNames) {
MergeUnsetDevNamesHelper("", "", "", "");
MergeUnsetDevNamesHelper(
"/job:foo/replica:1/task:2/cpu:1", "/job:foo/replica:1/task:2/cpu:1",
"/job:foo/replica:1/task:2/cpu:1", "/job:foo/replica:1/task:2/cpu:1");
MergeUnsetDevNamesHelper("", "/job:foo", "/job:foo", "/job:foo");
MergeUnsetDevNamesHelper("", "/replica:2", "/replica:2", "/replica:2");
MergeUnsetDevNamesHelper("", "/task:7", "/task:7", "/task:7");
MergeUnsetDevNamesHelper("", "/device:GPU:1", "/device:GPU:1",
"/device:GPU:1");
MergeUnsetDevNamesHelper("/job:foo", "/task:7", "/job:foo/task:7",
"/job:foo/task:7");
MergeUnsetDevNamesHelper("/job:foo", "/device:GPU:1", "/job:foo/device:GPU:1",
"/job:foo/device:GPU:1");
MergeUnsetDevNamesHelper("/job:foo/replica:0", "/replica:0/task:1",
"/job:foo/replica:0/task:1",
"/job:foo/replica:0/task:1");
MergeUnsetDevNamesHelper("", "/gpu:*", "/gpu:*", "/gpu:*");
MergeUnsetDevNamesHelper("/gpu:*", "/gpu:*", "/gpu:*", "/gpu:*");
MergeUnsetDevNamesHelper("/device:GPU:1", "/gpu:*", "/device:GPU:1",
"/device:GPU:1");
MergeUnsetDevNamesHelper("/job:foo", "/job:bar", "/job:foo", "/job:bar");
MergeUnsetDevNamesHelper("/replica:0", "/replica:1", "/replica:0",
"/replica:1");
MergeUnsetDevNamesHelper("/task:0", "/task:1", "/task:0", "/task:1");
MergeUnsetDevNamesHelper("/gpu:*", "/cpu:*", "/gpu:*", "/cpu:*");
MergeUnsetDevNamesHelper("/device:GPU:0", "/device:GPU:1", "/device:GPU:0",
"/device:GPU:1");
MergeUnsetDevNamesHelper("/job:foo/device:GPU", "/job:bar",
"/job:foo/device:GPU", "/job:bar/device:GPU");
}
TEST(DeviceNameUtilsTest, GetNamesForDeviceMappings) {
DeviceNameUtils::ParsedName p =
Name("/job:foo/replica:10/task:0/device:GPU:1");
EXPECT_EQ(absl::StrJoin(DeviceNameUtils::GetNamesForDeviceMappings(p), ","),
"/job:foo/replica:10/task:0/device:GPU:1,"
"/job:foo/replica:10/task:0/gpu:1");
p.has_task = false;
EXPECT_EQ(absl::StrJoin(DeviceNameUtils::GetNamesForDeviceMappings(p), ","),
"");
}
TEST(DeviceNameUtilsTest, CanonicalizeDeviceName) {
string canonical_name;
{
string basename = "/job:foo/replica:10/task:0/device:CPU:0";
TF_EXPECT_OK(DeviceNameUtils::CanonicalizeDeviceName(
"/job:foo/replica:10/task:0/device:CPU:1", basename, &canonical_name));
EXPECT_EQ("/job:foo/replica:10/task:0/device:CPU:1", canonical_name);
TF_EXPECT_OK(DeviceNameUtils::CanonicalizeDeviceName(
"/job:foo/task:0/replica:10/device:CPU:1", basename, &canonical_name));
EXPECT_EQ("/job:foo/replica:10/task:0/device:CPU:1", canonical_name);
TF_EXPECT_OK(DeviceNameUtils::CanonicalizeDeviceName(
"/job:foo/task:0/replica:10/cpu:1", basename, &canonical_name));
EXPECT_EQ("/job:foo/replica:10/task:0/device:CPU:1", canonical_name);
TF_EXPECT_OK(DeviceNameUtils::CanonicalizeDeviceName("CPU:0", basename,
&canonical_name));
EXPECT_EQ("/job:foo/replica:10/task:0/device:CPU:0", canonical_name);
absl::Status s = DeviceNameUtils::CanonicalizeDeviceName(
"/job:foo/task:0/replica/cpu:1", basename, &canonical_name);
EXPECT_EQ(s.code(), error::INVALID_ARGUMENT);
EXPECT_EQ("", canonical_name);
}
{
string fullname = "/device:CPU:0";
absl::Status s = DeviceNameUtils::CanonicalizeDeviceName(
fullname, "/device:CPU:0", &canonical_name);
EXPECT_EQ(s.code(), error::INVALID_ARGUMENT);
EXPECT_EQ("", canonical_name);
s = DeviceNameUtils::CanonicalizeDeviceName(
fullname, "/job:foo/task:0/replica/cpu:1", &canonical_name);
EXPECT_EQ(s.code(), error::INVALID_ARGUMENT);
EXPECT_EQ("", canonical_name);
}
}
TEST(DeviceNameUtilsTest, CompareFullNames) {
EXPECT_FALSE(DeviceNameUtils::CompareFullNames(
"/job:foo/replica:0/task:0/cpu:0", "/job:foo/replica:0/task:0/cpu:0"));
EXPECT_FALSE(DeviceNameUtils::CompareFullNames(
"/job:foo/replica:0/task:0/device:CPU:1",
"/job:foo/replica:0/task:0/device:CPU:0"));
EXPECT_FALSE(DeviceNameUtils::CompareFullNames(
"/job:foo/replica:0/task:1/device:CPU:0",
"/job:foo/replica:0/task:0/device:CPU:0"));
EXPECT_FALSE(DeviceNameUtils::CompareFullNames(
"/job:foo/replica:1/task:0/device:CPU:0",
"/job:foo/replica:0/task:0/device:CPU:0"));
EXPECT_FALSE(DeviceNameUtils::CompareFullNames(
"/job:goo/replica:0/task:0/device:CPU:0",
"/job:foo/replica:0/task:0/device:CPU:0"));
EXPECT_FALSE(DeviceNameUtils::CompareFullNames(
"/job:foo/replica:0/task:0/device:GPU:0",
"/job:foo/replica:0/task:0/device:CPU:0"));
EXPECT_TRUE(DeviceNameUtils::CompareFullNames(
"/job:foo/replica:0/task:0/device:CPU:0",
"/job:foo/replica:0/task:0/device:CPU:1"));
EXPECT_TRUE(DeviceNameUtils::CompareFullNames(
"/job:foo/replica:0/task:0/device:CPU:0",
"/job:foo/replica:0/task:1/device:CPU:0"));
EXPECT_TRUE(DeviceNameUtils::CompareFullNames(
"/job:foo/replica:0/task:0/device:CPU:0",
"/job:foo/replica:1/task:0/device:CPU:0"));
EXPECT_TRUE(DeviceNameUtils::CompareFullNames(
"/job:foo/replica:0/task:0/device:CPU:0",
"/job:goo/replica:0/task:0/device:CPU:0"));
EXPECT_TRUE(DeviceNameUtils::CompareFullNames(
"/job:foo/replica:0/task:0/device:CPU:0",
"/job:foo/replica:0/task:0/device:GPU:0"));
EXPECT_FALSE(
DeviceNameUtils::CompareFullNames("/device:CPU:1", "unparseablename"));
EXPECT_TRUE(
DeviceNameUtils::CompareFullNames("unparseablename", "/device:CPU:1"));
EXPECT_TRUE(DeviceNameUtils::CompareFullNames(
"/replica:0/task:0/device:CPU:1",
"/job:foo/replica:0/task:0/device:CPU:0"));
EXPECT_FALSE(DeviceNameUtils::CompareFullNames(
"/job:foo/replica:0/task:0/device:CPU:0",
"/replica:0/task:0/device:CPU:0"));
EXPECT_TRUE(DeviceNameUtils::CompareFullNames(
"/replica:0/task:0/device:CPU:0", "/replica:0/task:0/device:CPU:1"));
EXPECT_TRUE(DeviceNameUtils::CompareFullNames("/task:0/device:CPU:0",
"/task:0/device:CPU:1"));
EXPECT_TRUE(
DeviceNameUtils::CompareFullNames("/device:CPU:0", "/device:CPU:1"));
}
static void BM_ParseFullName(::testing::benchmark::State& state) {
DeviceNameUtils::ParsedName p;
for (auto s : state) {
DeviceNameUtils::ParseFullName("/job:worker/replica:3/task:0/cpu:0", &p);
}
}
BENCHMARK(BM_ParseFullName);
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/util/device_name_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/util/device_name_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2cd5293f-5da0-4198-a703-16efdd40591a | cpp | tensorflow/tensorflow | collective_permute_motion | third_party/xla/xla/service/spmd/collective_permute_motion.cc | third_party/xla/xla/service/spmd/collective_permute_motion_test.cc | #include "xla/service/spmd/collective_permute_motion.h"
#include <cstdint>
#include <deque>
#include <optional>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/while_loop_analysis.h"
#include "xla/shape_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
absl::flat_hash_set<HloInstruction*> FindLoopConsts(HloComputation* body) {
HloInstruction* root = body->root_instruction();
CHECK_EQ(root->opcode(), HloOpcode::kTuple);
absl::flat_hash_set<HloInstruction*> loop_consts;
for (int64_t i = 0; i < root->operand_count(); ++i) {
HloInstruction* output = root->mutable_operand(i);
while (output->opcode() == HloOpcode::kReshape ||
output->opcode() == HloOpcode::kCopy) {
output = output->mutable_operand(0);
}
if (output->opcode() == HloOpcode::kGetTupleElement &&
output->tuple_index() == i &&
output->operand(0) == body->parameter_instruction(0)) {
loop_consts.insert(output);
}
}
for (HloInstruction* inst : body->MakeInstructionPostOrder()) {
if (inst->IsConstant() || inst->opcode() == HloOpcode::kIota ||
inst->opcode() == HloOpcode::kReplicaId ||
inst->opcode() == HloOpcode::kPartitionId) {
loop_consts.insert(inst);
continue;
}
if (!inst->IsElementwise() && inst->opcode() != HloOpcode::kBroadcast &&
inst->opcode() != HloOpcode::kReduce &&
inst->opcode() != HloOpcode::kReshape &&
inst->opcode() != HloOpcode::kDynamicSlice &&
inst->opcode() != HloOpcode::kTranspose) {
continue;
}
if (inst->HasSideEffectNoRecurse()) {
continue;
}
if (absl::c_all_of(inst->operands(), [&](const HloInstruction* operand) {
return loop_consts.contains(operand);
})) {
loop_consts.insert(inst);
}
}
return loop_consts;
}
constexpr int64_t kMaxMovableClusterSize = 8;
struct MovableCluster {
int64_t root_tuple_index;
std::vector<HloInstruction*> reverse_order_instructions;
HloInstruction* collective_permute = nullptr;
};
std::optional<MovableCluster> FindMovableClusterAtBodyRoot(
HloComputation* body, int64_t root_tuple_index,
const absl::flat_hash_set<HloInstruction*>& loop_consts) {
HloInstruction* root = body->root_instruction();
CHECK_EQ(root->opcode(), HloOpcode::kTuple);
MovableCluster cluster;
cluster.root_tuple_index = root_tuple_index;
std::deque<HloInstruction*> queue;
queue.push_back(root->mutable_operand(root_tuple_index));
while (!queue.empty()) {
HloInstruction* visiting = queue.front();
queue.pop_front();
if (cluster.reverse_order_instructions.size() >= kMaxMovableClusterSize) {
VLOG(2) << "Cannot move: too many instructions to move";
return std::nullopt;
}
if (visiting->user_count() > 1) {
VLOG(2) << "Cannot move: " << visiting->name() << " used multiple times";
return std::nullopt;
}
cluster.reverse_order_instructions.push_back(visiting);
if (visiting->opcode() == HloOpcode::kCollectivePermute) {
if (cluster.collective_permute != nullptr) {
VLOG(2) << "Cannot move: " << visiting->name()
<< " multiple collective permutes";
return std::nullopt;
}
cluster.collective_permute = visiting;
continue;
}
if (!visiting->IsElementwise() || visiting->HasSideEffectNoRecurse()) {
VLOG(2) << "Cannot move: " << visiting->name() << " unsupported op";
return std::nullopt;
}
for (HloInstruction* operand : visiting->mutable_operands()) {
if (!loop_consts.contains(operand)) {
queue.push_back(operand);
}
}
}
if (cluster.collective_permute == nullptr) {
return std::nullopt;
}
return cluster;
}
absl::flat_hash_set<int64_t> FindIndicesUnusedAfterLoop(HloInstruction* loop) {
absl::flat_hash_set<int64_t> indices;
int64_t count = loop->shape().tuple_shapes_size();
for (int64_t i = 0; i < count; ++i) {
indices.insert(i);
}
for (HloInstruction* user : loop->users()) {
if (user->opcode() != HloOpcode::kGetTupleElement) {
indices.clear();
break;
}
indices.erase(user->tuple_index());
}
return indices;
}
absl::StatusOr<bool> MoveCollectivePermutes(HloComputation* computation,
HloInstruction* loop) {
HloComputation* body = loop->while_body();
HloInstruction* root = body->root_instruction();
if (root->opcode() != HloOpcode::kTuple ||
loop->operand(0)->opcode() != HloOpcode::kTuple) {
return false;
}
auto maybe_induction_var_idx = GetLoopInductionVarTupleIdx(loop);
if (!maybe_induction_var_idx.has_value()) {
VLOG(2) << "Skip " << loop->name() << ", no induction var";
return false;
}
absl::flat_hash_map<const HloInstruction*, int64_t> output_appear_counts;
for (const HloInstruction* operand : root->operands()) {
auto res = output_appear_counts.emplace(operand, 1);
if (!res.second) {
res.first->second++;
}
}
absl::flat_hash_set<int64_t> unused_indices_after_loop =
FindIndicesUnusedAfterLoop(loop);
const absl::flat_hash_set<HloInstruction*> loop_consts = FindLoopConsts(body);
int64_t induction_var_idx = *maybe_induction_var_idx;
std::vector<HloInstruction*> input_gtes(root->operand_count(), nullptr);
absl::flat_hash_set<int64_t> multi_use_indices;
for (HloInstruction* user : body->parameter_instruction(0)->users()) {
if (user->opcode() != HloOpcode::kGetTupleElement) {
VLOG(2) << "Skip " << loop->name() << ", non-GTE input use";
return false;
}
if (multi_use_indices.contains(user->tuple_index())) {
continue;
}
if (input_gtes[user->tuple_index()] != nullptr) {
multi_use_indices.insert(user->tuple_index());
input_gtes[user->tuple_index()] = nullptr;
} else {
input_gtes[user->tuple_index()] = user;
}
}
HloInstruction* ind_var = input_gtes[induction_var_idx];
if (ind_var == nullptr || ind_var->shape().rank() > 0) {
VLOG(2) << "Skip " << loop->name() << ", non-scalar induction var";
return false;
}
if (root->operand(induction_var_idx)->opcode() != HloOpcode::kAdd &&
root->operand(induction_var_idx)->opcode() != HloOpcode::kSubtract) {
VLOG(2) << "Skip " << loop->name() << ", non-add/sub induction var";
return false;
}
if (root->operand(induction_var_idx)->operand(0) == ind_var) {
if (!root->operand(induction_var_idx)->operand(1)->IsConstant()) {
VLOG(2) << "Skip " << loop->name() << ", non-add/sub const induction var";
return false;
}
} else if (root->operand(induction_var_idx)->operand(1) == ind_var) {
if (!root->operand(induction_var_idx)->operand(0)->IsConstant()) {
VLOG(2) << "Skip " << loop->name() << ", non-add/sub const induction var";
return false;
}
} else {
return false;
}
HloInstruction* ind_var_orig =
loop->mutable_operand(0)->mutable_operand(induction_var_idx);
if (!ind_var_orig->IsConstant()) {
VLOG(2) << "Skip " << loop->name()
<< ", non-constant initial induction var";
return false;
}
bool changed = false;
std::vector<MovableCluster> movable_outputs;
for (int64_t i = 0; i < root->operand_count(); ++i) {
if (output_appear_counts[root->operand(i)] > 1) {
VLOG(2) << "Skip " << loop->name() << " index " << i
<< " appears multiple times in output.";
continue;
}
if (!unused_indices_after_loop.contains(i)) {
VLOG(2) << "Skip " << loop->name() << " index " << i
<< " used after loop.";
continue;
}
auto cluster = FindMovableClusterAtBodyRoot(body, i, loop_consts);
if (!cluster.has_value()) {
VLOG(2) << "Skip " << loop->name() << " index " << i
<< " did not find a movable cluster.";
continue;
}
HloInstruction* input = input_gtes[cluster->root_tuple_index];
HloInstruction* cp = cluster->collective_permute;
if (input == nullptr || cp->operand(0) == input) {
VLOG(2) << "Skip " << loop->name() << " index " << i
<< " collective-permute already at top.";
continue;
}
const std::vector<HloInstruction*> original_input_users = input->users();
absl::flat_hash_map<const HloInstruction*, HloInstruction*> replacement;
replacement[cp->operand(0)] = input;
for (auto it = cluster->reverse_order_instructions.rbegin();
it != cluster->reverse_order_instructions.rend(); ++it) {
HloInstruction* inst = *it;
std::vector<HloInstruction*> new_operands;
for (HloInstruction* operand : inst->mutable_operands()) {
auto rit = replacement.find(operand);
if (rit != replacement.end()) {
new_operands.push_back(rit->second);
} else {
new_operands.push_back(operand);
}
}
HloInstruction* clone = body->AddInstruction(
inst->CloneWithNewOperands(inst->shape(), new_operands));
replacement[inst] = clone;
}
HloInstruction* new_input =
replacement[cluster->reverse_order_instructions[0]];
if (ind_var_orig->parent() != body) {
ind_var_orig = body->AddInstruction(ind_var_orig->Clone());
}
HloInstruction* is_first_iter =
body->AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::ChangeElementType(new_input->shape(), PRED),
body->AddInstruction(HloInstruction::CreateCompare(
ShapeUtil::MakeScalarShape(PRED), ind_var, ind_var_orig,
Comparison::Direction::kEq)),
{}));
new_input = body->AddInstruction(
HloInstruction::CreateTernary(new_input->shape(), HloOpcode::kSelect,
is_first_iter, input, new_input));
for (HloInstruction* user : original_input_users) {
TF_RETURN_IF_ERROR(input->ReplaceUseWith(user, new_input));
}
TF_RETURN_IF_ERROR(root->ReplaceOperandWith(cluster->root_tuple_index,
cp->mutable_operand(0)));
TF_RETURN_IF_ERROR(body->RemoveInstructionAndUnusedOperands(
cluster->reverse_order_instructions[0]));
VLOG(2) << "Moved " << loop->name() << " index " << i;
changed = true;
}
return changed;
}
absl::StatusOr<bool> CollectivePermuteMotion::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
for (HloInstruction* instr : computation->MakeInstructionPostOrder()) {
if (instr->opcode() == HloOpcode::kWhile) {
TF_ASSIGN_OR_RETURN(bool moved,
MoveCollectivePermutes(computation, instr));
changed |= moved;
}
}
}
return changed;
}
} | #include "xla/service/spmd/collective_permute_motion.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/log.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace {
using CollectivePermuteMotionTest = HloTestBase;
namespace op = xla::testing::opcode_matchers;
TEST_F(CollectivePermuteMotionTest, SimpleMove) {
absl::string_view hlo_string = R"(
HloModule test
body {
loop_var = (s32[], f32[4,4]) parameter(0)
constant.1 = s32[] constant(1)
gte0 = s32[] get-tuple-element(loop_var), index=0
add = s32[] add(gte0, constant.1)
gte1 = f32[4,4] get-tuple-element(loop_var), index=1
mul = f32[4,4] multiply(gte1, gte1)
cp = f32[4,4] collective-permute(mul), source_target_pairs={{0,1},{1,2}}
ROOT tuple = (s32[], f32[4,4]) tuple(add, cp)
}
cond {
loop_var = (s32[], f32[4,4]) parameter(0)
gte.cond = s32[] get-tuple-element(loop_var), index=0
constant.3 = s32[] constant(5)
ROOT lt = pred[] compare(gte.cond, constant.3), direction=LT
}
ENTRY main {
constant.2 = s32[] constant(0)
param = f32[4,4] parameter(0)
tuple.1 = (s32[], f32[4,4]) tuple(constant.2, param)
while = (s32[], f32[4,4]) while(tuple.1), condition=cond, body=body
ROOT result = s32[] get-tuple-element(while), index=0
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
CollectivePermuteMotion pass;
ASSERT_TRUE(pass.Run(&*module).value());
VLOG(1) << module->ToString();
const HloInstruction* loop = FindInstruction(module.get(), "while");
const HloInstruction* output =
loop->while_body()->root_instruction()->operand(1);
auto input =
AllOf(op::Shape("f32[4,4]"), op::GetTupleElement(op::Parameter(0)));
auto cp = op::CollectivePermute(input);
auto select = op::Select(op::Broadcast(op::Compare()), input, cp);
EXPECT_THAT(output, op::Multiply(select, select));
}
TEST_F(CollectivePermuteMotionTest, NoCollectivePermute) {
absl::string_view hlo_string = R"(
HloModule test
body {
loop_var = (s32[], f32[], f32[]) parameter(0)
constant.1 = s32[] constant(1)
gte0 = s32[] get-tuple-element(loop_var), index=0
add = s32[] add(gte0, constant.1)
gte1 = f32[] get-tuple-element(loop_var), index=1
constant.4 = f32[] constant(4.0)
ROOT tuple = (s32[], f32[], f32[]) tuple(add, constant.4, gte1)
}
cond {
loop_var = (s32[], f32[], f32[]) parameter(0)
gte.cond = s32[] get-tuple-element(loop_var), index=0
constant.3 = s32[] constant(5)
ROOT lt = pred[] compare(gte.cond, constant.3), direction=LT
}
ENTRY main {
constant.2 = s32[] constant(0)
param = f32[] parameter(0)
param.1 = f32[] parameter(1)
tuple.1 = (s32[], f32[], f32[]) tuple(constant.2, param, param.1)
while = (s32[], f32[], f32[]) while(tuple.1), condition=cond, body=body
ROOT result = s32[] get-tuple-element(while), index=0
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
CollectivePermuteMotion pass;
ASSERT_FALSE(pass.Run(&*module).value());
}
TEST_F(CollectivePermuteMotionTest, MoveWithElementwise) {
absl::string_view hlo_string = R"(
HloModule test
body {
loop_var = (s32[], f32[4,4]) parameter(0)
constant.1 = s32[] constant(1)
gte0 = s32[] get-tuple-element(loop_var), index=0
add = s32[] add(gte0, constant.1)
gte1 = f32[4,4] get-tuple-element(loop_var), index=1
mul = f32[4,4] multiply(gte1, gte1)
cp = f32[4,4] collective-permute(mul), source_target_pairs={{0,1},{1,2}}
constant.4 = f32[] constant(1)
broadcast = f32[4,4] broadcast(constant.4), dimensions={}
add1 = f32[4,4] add(cp, broadcast)
ROOT tuple = (s32[], f32[4,4]) tuple(add, add1)
}
cond {
loop_var = (s32[], f32[4,4]) parameter(0)
gte.cond = s32[] get-tuple-element(loop_var), index=0
constant.3 = s32[] constant(5)
ROOT lt = pred[] compare(gte.cond, constant.3), direction=LT
}
ENTRY main {
constant.2 = s32[] constant(0)
param = f32[4,4] parameter(0)
tuple.1 = (s32[], f32[4,4]) tuple(constant.2, param)
while = (s32[], f32[4,4]) while(tuple.1), condition=cond, body=body
ROOT result = s32[] get-tuple-element(while), index=0
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
CollectivePermuteMotion pass;
ASSERT_TRUE(pass.Run(&*module).value());
VLOG(1) << module->ToString();
const HloInstruction* loop = FindInstruction(module.get(), "while");
const HloInstruction* output =
loop->while_body()->root_instruction()->operand(1);
auto input =
AllOf(op::Shape("f32[4,4]"), op::GetTupleElement(op::Parameter(0)));
auto moved =
op::Add(op::CollectivePermute(input), op::Broadcast(op::Constant()));
auto select = op::Select(op::Broadcast(op::Compare()), input, moved);
EXPECT_THAT(output, op::Multiply(select, select));
}
TEST_F(CollectivePermuteMotionTest, DoNotMoveWithNonConstElementwise) {
absl::string_view hlo_string = R"(
HloModule test
body {
loop_var = (s32[], f32[4,4]) parameter(0)
constant.1 = s32[] constant(1)
gte0 = s32[] get-tuple-element(loop_var), index=0
add = s32[] add(gte0, constant.1)
gte1 = f32[4,4] get-tuple-element(loop_var), index=1
mul = f32[4,4] multiply(gte1, gte1)
cp = f32[4,4] collective-permute(mul), source_target_pairs={{0,1},{1,2}}
constant.4 = f32[] constant(1)
nonconst = f32[4,4] custom-call(), custom_call_target="unknown"
add1 = f32[4,4] add(cp, nonconst)
ROOT tuple = (s32[], f32[4,4]) tuple(add, add1)
}
cond {
loop_var = (s32[], f32[4,4]) parameter(0)
gte.cond = s32[] get-tuple-element(loop_var), index=0
constant.3 = s32[] constant(5)
ROOT lt = pred[] compare(gte.cond, constant.3), direction=LT
}
ENTRY main {
constant.2 = s32[] constant(0)
param = f32[4,4] parameter(0)
tuple.1 = (s32[], f32[4,4]) tuple(constant.2, param)
while = (s32[], f32[4,4]) while(tuple.1), condition=cond, body=body
ROOT result = s32[] get-tuple-element(while), index=0
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
CollectivePermuteMotion pass;
ASSERT_FALSE(pass.Run(&*module).value());
}
TEST_F(CollectivePermuteMotionTest, DoNotMoveIfOutputUsed) {
absl::string_view hlo_string = R"(
HloModule test
body {
loop_var = (s32[], f32[4,4]) parameter(0)
constant.1 = s32[] constant(1)
gte0 = s32[] get-tuple-element(loop_var), index=0
add = s32[] add(gte0, constant.1)
gte1 = f32[4,4] get-tuple-element(loop_var), index=1
mul = f32[4,4] multiply(gte1, gte1)
cp = f32[4,4] collective-permute(mul), source_target_pairs={{0,1},{1,2}}
ROOT tuple = (s32[], f32[4,4]) tuple(add, cp)
}
cond {
loop_var = (s32[], f32[4,4]) parameter(0)
gte.cond = s32[] get-tuple-element(loop_var), index=0
constant.3 = s32[] constant(5)
ROOT lt = pred[] compare(gte.cond, constant.3), direction=LT
}
ENTRY main {
constant.2 = s32[] constant(0)
param = f32[4,4] parameter(0)
tuple.1 = (s32[], f32[4,4]) tuple(constant.2, param)
while = (s32[], f32[4,4]) while(tuple.1), condition=cond, body=body
ROOT result = f32[4,4] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
CollectivePermuteMotion pass;
ASSERT_FALSE(pass.Run(&*module).value());
}
TEST_F(CollectivePermuteMotionTest, DoNotMoveIfIndictionVarUnknown) {
absl::string_view hlo_string = R"(
HloModule test
body {
loop_var = (s32[], f32[4,4]) parameter(0)
constant.1 = s32[] constant(1)
gte0 = s32[] get-tuple-element(loop_var), index=0
custom = s32[] custom-call(gte0, constant.1), custom_call_target="unknown"
gte1 = f32[4,4] get-tuple-element(loop_var), index=1
mul = f32[4,4] multiply(gte1, gte1)
cp = f32[4,4] collective-permute(mul), source_target_pairs={{0,1},{1,2}}
ROOT tuple = (s32[], f32[4,4]) tuple(custom, cp)
}
cond {
loop_var = (s32[], f32[4,4]) parameter(0)
gte.cond = s32[] get-tuple-element(loop_var), index=0
constant.3 = s32[] constant(5)
ROOT lt = pred[] compare(gte.cond, constant.3), direction=LT
}
ENTRY main {
constant.2 = s32[] constant(0)
param = f32[4,4] parameter(0)
tuple.1 = (s32[], f32[4,4]) tuple(constant.2, param)
while = (s32[], f32[4,4]) while(tuple.1), condition=cond, body=body
ROOT result = s32[] get-tuple-element(while), index=0
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
CollectivePermuteMotion pass;
ASSERT_FALSE(pass.Run(&*module).value());
}
TEST_F(CollectivePermuteMotionTest, DoNotMoveIfMultiOutput) {
absl::string_view hlo_string = R"(
HloModule test
body {
loop_var = (s32[], f32[4,4], f32[4,4]) parameter(0)
constant.1 = s32[] constant(1)
gte0 = s32[] get-tuple-element(loop_var), index=0
add = s32[] add(gte0, constant.1)
gte1 = f32[4,4] get-tuple-element(loop_var), index=1
mul = f32[4,4] multiply(gte1, gte1)
cp = f32[4,4] collective-permute(mul), source_target_pairs={{0,1},{1,2}}
ROOT tuple = (s32[], f32[4,4], f32[4,4]) tuple(add, cp, cp)
}
cond {
loop_var = (s32[], f32[4,4], f32[4,4]) parameter(0)
gte.cond = s32[] get-tuple-element(loop_var), index=0
constant.3 = s32[] constant(5)
ROOT lt = pred[] compare(gte.cond, constant.3), direction=LT
}
ENTRY main {
constant.2 = s32[] constant(0)
param = f32[4,4] parameter(0)
tuple.1 = (s32[], f32[4,4], f32[4,4]) tuple(constant.2, param, param)
while = (s32[], f32[4,4], f32[4,4]) while(tuple.1),
condition=cond, body=body
ROOT result = s32[] get-tuple-element(while), index=0
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
CollectivePermuteMotion pass;
ASSERT_FALSE(pass.Run(&*module).value());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/spmd/collective_permute_motion.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/spmd/collective_permute_motion_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ed71d17b-19eb-4298-88f0-ac7a9381d1f5 | cpp | abseil/abseil-cpp | str_split | absl/strings/str_split.cc | absl/strings/str_split_test.cc | #include "absl/strings/str_split.h"
#include <algorithm>
#include <cstddef>
#include <cstdlib>
#include <cstring>
#include "absl/base/config.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/strings/string_view.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace {
template <typename FindPolicy>
absl::string_view GenericFind(absl::string_view text,
absl::string_view delimiter, size_t pos,
FindPolicy find_policy) {
if (delimiter.empty() && text.length() > 0) {
return absl::string_view(text.data() + pos + 1, 0);
}
size_t found_pos = absl::string_view::npos;
absl::string_view found(text.data() + text.size(),
0);
found_pos = find_policy.Find(text, delimiter, pos);
if (found_pos != absl::string_view::npos) {
found = absl::string_view(text.data() + found_pos,
find_policy.Length(delimiter));
}
return found;
}
struct LiteralPolicy {
static size_t Find(absl::string_view text, absl::string_view delimiter,
size_t pos) {
return text.find(delimiter, pos);
}
static size_t Length(absl::string_view delimiter) {
return delimiter.length();
}
};
struct AnyOfPolicy {
static size_t Find(absl::string_view text, absl::string_view delimiter,
size_t pos) {
return text.find_first_of(delimiter, pos);
}
static size_t Length(absl::string_view ) { return 1; }
};
}
ByString::ByString(absl::string_view sp) : delimiter_(sp) {}
absl::string_view ByString::Find(absl::string_view text, size_t pos) const {
if (delimiter_.length() == 1) {
size_t found_pos = text.find(delimiter_[0], pos);
if (found_pos == absl::string_view::npos)
return absl::string_view(text.data() + text.size(), 0);
return text.substr(found_pos, 1);
}
return GenericFind(text, delimiter_, pos, LiteralPolicy());
}
absl::string_view ByAsciiWhitespace::Find(absl::string_view text,
size_t pos) const {
return GenericFind(text, " \t\v\f\r\n", pos, AnyOfPolicy());
}
absl::string_view ByChar::Find(absl::string_view text, size_t pos) const {
size_t found_pos = text.find(c_, pos);
if (found_pos == absl::string_view::npos)
return absl::string_view(text.data() + text.size(), 0);
return text.substr(found_pos, 1);
}
ByAnyChar::ByAnyChar(absl::string_view sp) : delimiters_(sp) {}
absl::string_view ByAnyChar::Find(absl::string_view text, size_t pos) const {
return GenericFind(text, delimiters_, pos, AnyOfPolicy());
}
ByLength::ByLength(ptrdiff_t length) : length_(length) {
ABSL_RAW_CHECK(length > 0, "");
}
absl::string_view ByLength::Find(absl::string_view text, size_t pos) const {
pos = std::min(pos, text.size());
absl::string_view substr = text.substr(pos);
if (substr.length() <= static_cast<size_t>(length_))
return absl::string_view(text.data() + text.size(), 0);
return absl::string_view(substr.data() + length_, 0);
}
ABSL_NAMESPACE_END
} | #include "absl/strings/str_split.h"
#include <cstddef>
#include <cstdint>
#include <deque>
#include <initializer_list>
#include <list>
#include <map>
#include <memory>
#include <set>
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/macros.h"
#include "absl/container/btree_map.h"
#include "absl/container/btree_set.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/node_hash_map.h"
#include "absl/strings/string_view.h"
namespace {
using ::testing::ElementsAre;
using ::testing::IsEmpty;
using ::testing::Pair;
using ::testing::UnorderedElementsAre;
TEST(Split, TraitsTest) {
static_assert(!absl::strings_internal::SplitterIsConvertibleTo<int>::value,
"");
static_assert(
!absl::strings_internal::SplitterIsConvertibleTo<std::string>::value, "");
static_assert(absl::strings_internal::SplitterIsConvertibleTo<
std::vector<std::string>>::value,
"");
static_assert(
!absl::strings_internal::SplitterIsConvertibleTo<std::vector<int>>::value,
"");
static_assert(absl::strings_internal::SplitterIsConvertibleTo<
std::vector<absl::string_view>>::value,
"");
static_assert(absl::strings_internal::SplitterIsConvertibleTo<
std::map<std::string, std::string>>::value,
"");
static_assert(absl::strings_internal::SplitterIsConvertibleTo<
std::map<absl::string_view, absl::string_view>>::value,
"");
static_assert(!absl::strings_internal::SplitterIsConvertibleTo<
std::map<int, std::string>>::value,
"");
static_assert(!absl::strings_internal::SplitterIsConvertibleTo<
std::map<std::string, int>>::value,
"");
}
TEST(Split, APIExamples) {
{
std::vector<std::string> v = absl::StrSplit("a,b,c", ",");
EXPECT_THAT(v, ElementsAre("a", "b", "c"));
using absl::ByString;
v = absl::StrSplit("a,b,c", ByString(","));
EXPECT_THAT(v, ElementsAre("a", "b", "c"));
EXPECT_THAT(absl::StrSplit("a,b,c", ByString(",")),
ElementsAre("a", "b", "c"));
}
{
std::vector<std::string> v = absl::StrSplit("a,b,c", ',');
EXPECT_THAT(v, ElementsAre("a", "b", "c"));
using absl::ByChar;
v = absl::StrSplit("a,b,c", ByChar(','));
EXPECT_THAT(v, ElementsAre("a", "b", "c"));
}
{
const std::vector<std::string> v = absl::StrSplit("a=>b=>c", "=>");
EXPECT_THAT(v, ElementsAre("a", "b", "c"));
}
{
std::vector<absl::string_view> v = absl::StrSplit("a,b,c", ',');
EXPECT_THAT(v, ElementsAre("a", "b", "c"));
}
{
std::vector<std::string> v = absl::StrSplit(",a,b,c,", ',');
EXPECT_THAT(v, ElementsAre("", "a", "b", "c", ""));
}
{
std::vector<std::string> v = absl::StrSplit("abc", ',');
EXPECT_THAT(v, ElementsAre("abc"));
}
{
std::vector<std::string> v = absl::StrSplit("abc", "");
EXPECT_THAT(v, ElementsAre("a", "b", "c"));
}
{
std::string embedded_nulls("a\0b\0c", 5);
std::string null_delim("\0", 1);
std::vector<std::string> v = absl::StrSplit(embedded_nulls, null_delim);
EXPECT_THAT(v, ElementsAre("a", "b", "c"));
}
{
std::pair<std::string, std::string> p = absl::StrSplit("a,b,c", ',');
EXPECT_EQ("a", p.first);
EXPECT_EQ("b", p.second);
}
{
std::set<std::string> v = absl::StrSplit("a,b,c,a,b,c,a,b,c", ',');
EXPECT_THAT(v, ElementsAre("a", "b", "c"));
}
{
char a[] = ",";
char* d = a + 0;
std::vector<std::string> v = absl::StrSplit("a,b,c", d);
EXPECT_THAT(v, ElementsAre("a", "b", "c"));
}
{
using absl::ByAnyChar;
std::vector<std::string> v = absl::StrSplit("a,b;c", ByAnyChar(",;"));
EXPECT_THAT(v, ElementsAre("a", "b", "c"));
}
{
using absl::SkipWhitespace;
std::vector<std::string> v =
absl::StrSplit(" a , ,,b,", ',', SkipWhitespace());
EXPECT_THAT(v, ElementsAre(" a ", "b"));
}
{
using absl::ByLength;
std::vector<std::string> v = absl::StrSplit("abcdefg", ByLength(3));
EXPECT_THAT(v, ElementsAre("abc", "def", "g"));
}
{
std::vector<std::string> v1 = absl::StrSplit("a,b,c", ',');
EXPECT_THAT(v1, ElementsAre("a", "b", "c"));
std::vector<std::string> v2(absl::StrSplit("a,b,c", ','));
EXPECT_THAT(v2, ElementsAre("a", "b", "c"));
auto v3 = std::vector<std::string>(absl::StrSplit("a,b,c", ','));
EXPECT_THAT(v3, ElementsAre("a", "b", "c"));
v3 = absl::StrSplit("a,b,c", ',');
EXPECT_THAT(v3, ElementsAre("a", "b", "c"));
}
{
std::map<std::string, std::string> m = absl::StrSplit("a,1,b,2,a,3", ',');
EXPECT_EQ(2, m.size());
EXPECT_EQ("3", m["a"]);
EXPECT_EQ("2", m["b"]);
}
{
std::multimap<std::string, std::string> m =
absl::StrSplit("a,1,b,2,a,3", ',');
EXPECT_EQ(3, m.size());
auto it = m.find("a");
EXPECT_EQ("1", it->second);
++it;
EXPECT_EQ("3", it->second);
it = m.find("b");
EXPECT_EQ("2", it->second);
}
{
std::string s = "x,x,x,x,x,x,x";
for (absl::string_view sp : absl::StrSplit(s, ',')) {
EXPECT_EQ("x", sp);
}
}
{
using absl::SkipWhitespace;
std::string s = " ,x,,x,,x,x,x,,";
for (absl::string_view sp : absl::StrSplit(s, ',', SkipWhitespace())) {
EXPECT_EQ("x", sp);
}
}
{
std::map<std::string, std::string> m;
for (absl::string_view sp : absl::StrSplit("a=b=c,d=e,f=,g", ',')) {
m.insert(absl::StrSplit(sp, absl::MaxSplits('=', 1)));
}
EXPECT_EQ("b=c", m.find("a")->second);
EXPECT_EQ("e", m.find("d")->second);
EXPECT_EQ("", m.find("f")->second);
EXPECT_EQ("", m.find("g")->second);
}
}
TEST(SplitIterator, Basics) {
auto splitter = absl::StrSplit("a,b", ',');
auto it = splitter.begin();
auto end = splitter.end();
EXPECT_NE(it, end);
EXPECT_EQ("a", *it);
++it;
EXPECT_NE(it, end);
EXPECT_EQ("b",
std::string(it->data(), it->size()));
it++;
EXPECT_EQ(it, end);
}
class Skip {
public:
explicit Skip(const std::string& s) : s_(s) {}
bool operator()(absl::string_view sp) { return sp != s_; }
private:
std::string s_;
};
TEST(SplitIterator, Predicate) {
auto splitter = absl::StrSplit("a,b,c", ',', Skip("b"));
auto it = splitter.begin();
auto end = splitter.end();
EXPECT_NE(it, end);
EXPECT_EQ("a", *it);
++it;
EXPECT_NE(it, end);
EXPECT_EQ("c",
std::string(it->data(), it->size()));
it++;
EXPECT_EQ(it, end);
}
TEST(SplitIterator, EdgeCases) {
struct {
std::string in;
std::vector<std::string> expect;
} specs[] = {
{"", {""}},
{"foo", {"foo"}},
{",", {"", ""}},
{",foo", {"", "foo"}},
{"foo,", {"foo", ""}},
{",foo,", {"", "foo", ""}},
{"foo,bar", {"foo", "bar"}},
};
for (const auto& spec : specs) {
SCOPED_TRACE(spec.in);
auto splitter = absl::StrSplit(spec.in, ',');
auto it = splitter.begin();
auto end = splitter.end();
for (const auto& expected : spec.expect) {
EXPECT_NE(it, end);
EXPECT_EQ(expected, *it++);
}
EXPECT_EQ(it, end);
}
}
TEST(Splitter, Const) {
const auto splitter = absl::StrSplit("a,b,c", ',');
EXPECT_THAT(splitter, ElementsAre("a", "b", "c"));
}
TEST(Split, EmptyAndNull) {
EXPECT_THAT(absl::StrSplit(absl::string_view(""), '-'), ElementsAre(""));
EXPECT_THAT(absl::StrSplit(absl::string_view(), '-'), ElementsAre());
}
TEST(SplitIterator, EqualityAsEndCondition) {
auto splitter = absl::StrSplit("a,b,c", ',');
auto it = splitter.begin();
auto it2 = it;
++it2;
++it2;
EXPECT_EQ("c", *it2);
std::vector<absl::string_view> v;
for (; it != it2; ++it) {
v.push_back(*it);
}
EXPECT_THAT(v, ElementsAre("a", "b"));
}
TEST(Splitter, RangeIterators) {
auto splitter = absl::StrSplit("a,b,c", ',');
std::vector<absl::string_view> output;
for (absl::string_view p : splitter) {
output.push_back(p);
}
EXPECT_THAT(output, ElementsAre("a", "b", "c"));
}
template <typename ContainerType, typename Splitter>
void TestConversionOperator(const Splitter& splitter) {
ContainerType output = splitter;
EXPECT_THAT(output, UnorderedElementsAre("a", "b", "c", "d"));
}
template <typename MapType, typename Splitter>
void TestMapConversionOperator(const Splitter& splitter) {
MapType m = splitter;
EXPECT_THAT(m, UnorderedElementsAre(Pair("a", "b"), Pair("c", "d")));
}
template <typename FirstType, typename SecondType, typename Splitter>
void TestPairConversionOperator(const Splitter& splitter) {
std::pair<FirstType, SecondType> p = splitter;
EXPECT_EQ(p, (std::pair<FirstType, SecondType>("a", "b")));
}
TEST(Splitter, ConversionOperator) {
auto splitter = absl::StrSplit("a,b,c,d", ',');
TestConversionOperator<std::vector<absl::string_view>>(splitter);
TestConversionOperator<std::vector<std::string>>(splitter);
TestConversionOperator<std::list<absl::string_view>>(splitter);
TestConversionOperator<std::list<std::string>>(splitter);
TestConversionOperator<std::deque<absl::string_view>>(splitter);
TestConversionOperator<std::deque<std::string>>(splitter);
TestConversionOperator<std::set<absl::string_view>>(splitter);
TestConversionOperator<std::set<std::string>>(splitter);
TestConversionOperator<std::multiset<absl::string_view>>(splitter);
TestConversionOperator<std::multiset<std::string>>(splitter);
TestConversionOperator<absl::btree_set<absl::string_view>>(splitter);
TestConversionOperator<absl::btree_set<std::string>>(splitter);
TestConversionOperator<absl::btree_multiset<absl::string_view>>(splitter);
TestConversionOperator<absl::btree_multiset<std::string>>(splitter);
TestConversionOperator<std::unordered_set<std::string>>(splitter);
TestMapConversionOperator<std::map<absl::string_view, absl::string_view>>(
splitter);
TestMapConversionOperator<std::map<absl::string_view, std::string>>(splitter);
TestMapConversionOperator<std::map<std::string, absl::string_view>>(splitter);
TestMapConversionOperator<std::map<std::string, std::string>>(splitter);
TestMapConversionOperator<
std::multimap<absl::string_view, absl::string_view>>(splitter);
TestMapConversionOperator<std::multimap<absl::string_view, std::string>>(
splitter);
TestMapConversionOperator<std::multimap<std::string, absl::string_view>>(
splitter);
TestMapConversionOperator<std::multimap<std::string, std::string>>(splitter);
TestMapConversionOperator<
absl::btree_map<absl::string_view, absl::string_view>>(splitter);
TestMapConversionOperator<absl::btree_map<absl::string_view, std::string>>(
splitter);
TestMapConversionOperator<absl::btree_map<std::string, absl::string_view>>(
splitter);
TestMapConversionOperator<absl::btree_map<std::string, std::string>>(
splitter);
TestMapConversionOperator<
absl::btree_multimap<absl::string_view, absl::string_view>>(splitter);
TestMapConversionOperator<
absl::btree_multimap<absl::string_view, std::string>>(splitter);
TestMapConversionOperator<
absl::btree_multimap<std::string, absl::string_view>>(splitter);
TestMapConversionOperator<absl::btree_multimap<std::string, std::string>>(
splitter);
TestMapConversionOperator<std::unordered_map<std::string, std::string>>(
splitter);
TestMapConversionOperator<
absl::node_hash_map<absl::string_view, absl::string_view>>(splitter);
TestMapConversionOperator<
absl::node_hash_map<absl::string_view, std::string>>(splitter);
TestMapConversionOperator<
absl::node_hash_map<std::string, absl::string_view>>(splitter);
TestMapConversionOperator<
absl::flat_hash_map<absl::string_view, absl::string_view>>(splitter);
TestMapConversionOperator<
absl::flat_hash_map<absl::string_view, std::string>>(splitter);
TestMapConversionOperator<
absl::flat_hash_map<std::string, absl::string_view>>(splitter);
TestPairConversionOperator<absl::string_view, absl::string_view>(splitter);
TestPairConversionOperator<absl::string_view, std::string>(splitter);
TestPairConversionOperator<std::string, absl::string_view>(splitter);
TestPairConversionOperator<std::string, std::string>(splitter);
}
TEST(Splitter, ToPair) {
{
std::pair<std::string, std::string> p = absl::StrSplit("", ',');
EXPECT_EQ("", p.first);
EXPECT_EQ("", p.second);
}
{
std::pair<std::string, std::string> p = absl::StrSplit("a", ',');
EXPECT_EQ("a", p.first);
EXPECT_EQ("", p.second);
}
{
std::pair<std::string, std::string> p = absl::StrSplit(",b", ',');
EXPECT_EQ("", p.first);
EXPECT_EQ("b", p.second);
}
{
std::pair<std::string, std::string> p = absl::StrSplit("a,b", ',');
EXPECT_EQ("a", p.first);
EXPECT_EQ("b", p.second);
}
{
std::pair<std::string, std::string> p = absl::StrSplit("a,b,c", ',');
EXPECT_EQ("a", p.first);
EXPECT_EQ("b", p.second);
}
}
TEST(Splitter, Predicates) {
static const char kTestChars[] = ",a, ,b,";
using absl::AllowEmpty;
using absl::SkipEmpty;
using absl::SkipWhitespace;
{
auto splitter = absl::StrSplit(kTestChars, ',');
std::vector<std::string> v = splitter;
EXPECT_THAT(v, ElementsAre("", "a", " ", "b", ""));
}
{
auto splitter = absl::StrSplit(kTestChars, ',', AllowEmpty());
std::vector<std::string> v_allowempty = splitter;
EXPECT_THAT(v_allowempty, ElementsAre("", "a", " ", "b", ""));
auto splitter_nopredicate = absl::StrSplit(kTestChars, ',');
std::vector<std::string> v_nopredicate = splitter_nopredicate;
EXPECT_EQ(v_allowempty, v_nopredicate);
}
{
auto splitter = absl::StrSplit(kTestChars, ',', SkipEmpty());
std::vector<std::string> v = splitter;
EXPECT_THAT(v, ElementsAre("a", " ", "b"));
}
{
auto splitter = absl::StrSplit(kTestChars, ',', SkipWhitespace());
std::vector<std::string> v = splitter;
EXPECT_THAT(v, ElementsAre("a", "b"));
}
}
TEST(Split, Basics) {
{
absl::StrSplit("a,b,c", ',');
}
{
std::vector<absl::string_view> v = absl::StrSplit("a,b,c", ',');
EXPECT_THAT(v, ElementsAre("a", "b", "c"));
}
{
std::vector<std::string> v = absl::StrSplit("a,b,c", ',');
EXPECT_THAT(v, ElementsAre("a", "b", "c"));
}
{
std::vector<std::string> v;
v = absl::StrSplit("a,b,c", ',');
EXPECT_THAT(v, ElementsAre("a", "b", "c"));
std::map<std::string, std::string> m;
m = absl::StrSplit("a,b,c", ',');
EXPECT_EQ(2, m.size());
std::unordered_map<std::string, std::string> hm;
hm = absl::StrSplit("a,b,c", ',');
EXPECT_EQ(2, hm.size());
}
}
absl::string_view ReturnStringView() { return "Hello World"; }
const char* ReturnConstCharP() { return "Hello World"; }
char* ReturnCharP() { return const_cast<char*>("Hello World"); }
TEST(Split, AcceptsCertainTemporaries) {
std::vector<std::string> v;
v = absl::StrSplit(ReturnStringView(), ' ');
EXPECT_THAT(v, ElementsAre("Hello", "World"));
v = absl::StrSplit(ReturnConstCharP(), ' ');
EXPECT_THAT(v, ElementsAre("Hello", "World"));
v = absl::StrSplit(ReturnCharP(), ' ');
EXPECT_THAT(v, ElementsAre("Hello", "World"));
}
TEST(Split, Temporary) {
const char input[] = "a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u";
EXPECT_LT(sizeof(std::string), ABSL_ARRAYSIZE(input))
<< "Input should be larger than fits on the stack.";
auto splitter = absl::StrSplit(std::string(input), ',');
std::string expected = "a";
for (absl::string_view letter : splitter) {
EXPECT_EQ(expected, letter);
++expected[0];
}
EXPECT_EQ("v", expected);
auto std_splitter = absl::StrSplit(std::string(input), ',');
expected = "a";
for (absl::string_view letter : std_splitter) {
EXPECT_EQ(expected, letter);
++expected[0];
}
EXPECT_EQ("v", expected);
}
template <typename T>
static std::unique_ptr<T> CopyToHeap(const T& value) {
return std::unique_ptr<T>(new T(value));
}
TEST(Split, LvalueCaptureIsCopyable) {
std::string input = "a,b";
auto heap_splitter = CopyToHeap(absl::StrSplit(input, ','));
auto stack_splitter = *heap_splitter;
heap_splitter.reset();
std::vector<std::string> result = stack_splitter;
EXPECT_THAT(result, testing::ElementsAre("a", "b"));
}
TEST(Split, TemporaryCaptureIsCopyable) {
auto heap_splitter = CopyToHeap(absl::StrSplit(std::string("a,b"), ','));
auto stack_splitter = *heap_splitter;
heap_splitter.reset();
std::vector<std::string> result = stack_splitter;
EXPECT_THAT(result, testing::ElementsAre("a", "b"));
}
TEST(Split, SplitterIsCopyableAndMoveable) {
auto a = absl::StrSplit("foo", '-');
auto b = a;
auto c = std::move(a);
b = c;
c = std::move(b);
EXPECT_THAT(c, ElementsAre("foo"));
}
TEST(Split, StringDelimiter) {
{
std::vector<absl::string_view> v = absl::StrSplit("a,b", ',');
EXPECT_THAT(v, ElementsAre("a", "b"));
}
{
std::vector<absl::string_view> v = absl::StrSplit("a,b", std::string(","));
EXPECT_THAT(v, ElementsAre("a", "b"));
}
{
std::vector<absl::string_view> v =
absl::StrSplit("a,b", absl::string_view(","));
EXPECT_THAT(v, ElementsAre("a", "b"));
}
}
#if !defined(__cpp_char8_t)
#if defined(__clang__)
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc++2a-compat"
#endif
TEST(Split, UTF8) {
std::string utf8_string = u8"\u03BA\u1F79\u03C3\u03BC\u03B5";
{
std::string to_split = "a," + utf8_string;
std::vector<absl::string_view> v = absl::StrSplit(to_split, ',');
EXPECT_THAT(v, ElementsAre("a", utf8_string));
}
{
std::string to_split = "a," + utf8_string + ",b";
std::string unicode_delimiter = "," + utf8_string + ",";
std::vector<absl::string_view> v =
absl::StrSplit(to_split, unicode_delimiter);
EXPECT_THAT(v, ElementsAre("a", "b"));
}
{
std::vector<absl::string_view> v =
absl::StrSplit(u8"Foo h\u00E4llo th\u4E1Ere", absl::ByAnyChar(" \t"));
EXPECT_THAT(v, ElementsAre("Foo", u8"h\u00E4llo", u8"th\u4E1Ere"));
}
}
#if defined(__clang__)
#pragma clang diagnostic pop
#endif
#endif
TEST(Split, EmptyStringDelimiter) {
{
std::vector<std::string> v = absl::StrSplit("", "");
EXPECT_THAT(v, ElementsAre(""));
}
{
std::vector<std::string> v = absl::StrSplit("a", "");
EXPECT_THAT(v, ElementsAre("a"));
}
{
std::vector<std::string> v = absl::StrSplit("ab", "");
EXPECT_THAT(v, ElementsAre("a", "b"));
}
{
std::vector<std::string> v = absl::StrSplit("a b", "");
EXPECT_THAT(v, ElementsAre("a", " ", "b"));
}
}
TEST(Split, SubstrDelimiter) {
std::vector<absl::string_view> results;
absl::string_view delim("
results = absl::StrSplit("", delim);
EXPECT_THAT(results, ElementsAre(""));
results = absl::StrSplit("
EXPECT_THAT(results, ElementsAre("", ""));
results = absl::StrSplit("ab", delim);
EXPECT_THAT(results, ElementsAre("ab"));
results = absl::StrSplit("ab
EXPECT_THAT(results, ElementsAre("ab", ""));
results = absl::StrSplit("ab/", delim);
EXPECT_THAT(results, ElementsAre("ab/"));
results = absl::StrSplit("a/b", delim);
EXPECT_THAT(results, ElementsAre("a/b"));
results = absl::StrSplit("a
EXPECT_THAT(results, ElementsAre("a", "b"));
results = absl::StrSplit("a
EXPECT_THAT(results, ElementsAre("a", "/b"));
results = absl::StrSplit("a
EXPECT_THAT(results, ElementsAre("a", "", "b"));
}
TEST(Split, EmptyResults) {
std::vector<absl::string_view> results;
results = absl::StrSplit("", '#');
EXPECT_THAT(results, ElementsAre(""));
results = absl::StrSplit("#", '#');
EXPECT_THAT(results, ElementsAre("", ""));
results = absl::StrSplit("#cd", '#');
EXPECT_THAT(results, ElementsAre("", "cd"));
results = absl::StrSplit("ab#cd#", '#');
EXPECT_THAT(results, ElementsAre("ab", "cd", ""));
results = absl::StrSplit("ab##cd", '#');
EXPECT_THAT(results, ElementsAre("ab", "", "cd"));
results = absl::StrSplit("ab##", '#');
EXPECT_THAT(results, ElementsAre("ab", "", ""));
results = absl::StrSplit("ab#ab#", '#');
EXPECT_THAT(results, ElementsAre("ab", "ab", ""));
results = absl::StrSplit("aaaa", 'a');
EXPECT_THAT(results, ElementsAre("", "", "", "", ""));
results = absl::StrSplit("", '#', absl::SkipEmpty());
EXPECT_THAT(results, ElementsAre());
}
template <typename Delimiter>
static bool IsFoundAtStartingPos(absl::string_view text, Delimiter d,
size_t starting_pos, int expected_pos) {
absl::string_view found = d.Find(text, starting_pos);
return found.data() != text.data() + text.size() &&
expected_pos == found.data() - text.data();
}
template <typename Delimiter>
static bool IsFoundAt(absl::string_view text, Delimiter d, int expected_pos) {
const std::string leading_text = ",x,y,z,";
return IsFoundAtStartingPos(text, d, 0, expected_pos) &&
IsFoundAtStartingPos(leading_text + std::string(text), d,
leading_text.length(),
expected_pos + leading_text.length());
}
template <typename Delimiter>
void TestComma(Delimiter d) {
EXPECT_TRUE(IsFoundAt(",", d, 0));
EXPECT_TRUE(IsFoundAt("a,", d, 1));
EXPECT_TRUE(IsFoundAt(",b", d, 0));
EXPECT_TRUE(IsFoundAt("a,b", d, 1));
EXPECT_TRUE(IsFoundAt("a,b,", d, 1));
EXPECT_TRUE(IsFoundAt("a,b,c", d, 1));
EXPECT_FALSE(IsFoundAt("", d, -1));
EXPECT_FALSE(IsFoundAt(" ", d, -1));
EXPECT_FALSE(IsFoundAt("a", d, -1));
EXPECT_FALSE(IsFoundAt("a b c", d, -1));
EXPECT_FALSE(IsFoundAt("a;b;c", d, -1));
EXPECT_FALSE(IsFoundAt(";", d, -1));
}
TEST(Delimiter, ByString) {
using absl::ByString;
TestComma(ByString(","));
ByString comma_string(",");
TestComma(comma_string);
absl::string_view abc("abc");
EXPECT_EQ(0, abc.find(""));
ByString empty("");
EXPECT_FALSE(IsFoundAt("", empty, 0));
EXPECT_FALSE(IsFoundAt("a", empty, 0));
EXPECT_TRUE(IsFoundAt("ab", empty, 1));
EXPECT_TRUE(IsFoundAt("abc", empty, 1));
}
TEST(Split, ByChar) {
using absl::ByChar;
TestComma(ByChar(','));
ByChar comma_char(',');
TestComma(comma_char);
}
TEST(Delimiter, ByAnyChar) {
using absl::ByAnyChar;
ByAnyChar one_delim(",");
EXPECT_TRUE(IsFoundAt(",", one_delim, 0));
EXPECT_TRUE(IsFoundAt("a,", one_delim, 1));
EXPECT_TRUE(IsFoundAt("a,b", one_delim, 1));
EXPECT_TRUE(IsFoundAt(",b", one_delim, 0));
EXPECT_FALSE(IsFoundAt("", one_delim, -1));
EXPECT_FALSE(IsFoundAt(" ", one_delim, -1));
EXPECT_FALSE(IsFoundAt("a", one_delim, -1));
EXPECT_FALSE(IsFoundAt("a;b;c", one_delim, -1));
EXPECT_FALSE(IsFoundAt(";", one_delim, -1));
ByAnyChar two_delims(",;");
EXPECT_TRUE(IsFoundAt(",", two_delims, 0));
EXPECT_TRUE(IsFoundAt(";", two_delims, 0));
EXPECT_TRUE(IsFoundAt(",;", two_delims, 0));
EXPECT_TRUE(IsFoundAt(";,", two_delims, 0));
EXPECT_TRUE(IsFoundAt(",;b", two_delims, 0));
EXPECT_TRUE(IsFoundAt(";,b", two_delims, 0));
EXPECT_TRUE(IsFoundAt("a;,", two_delims, 1));
EXPECT_TRUE(IsFoundAt("a,;", two_delims, 1));
EXPECT_TRUE(IsFoundAt("a;,b", two_delims, 1));
EXPECT_TRUE(IsFoundAt("a,;b", two_delims, 1));
EXPECT_FALSE(IsFoundAt("", two_delims, -1));
EXPECT_FALSE(IsFoundAt(" ", two_delims, -1));
EXPECT_FALSE(IsFoundAt("a", two_delims, -1));
EXPECT_FALSE(IsFoundAt("a=b=c", two_delims, -1));
EXPECT_FALSE(IsFoundAt("=", two_delims, -1));
ByAnyChar empty("");
EXPECT_FALSE(IsFoundAt("", empty, 0));
EXPECT_FALSE(IsFoundAt("a", empty, 0));
EXPECT_TRUE(IsFoundAt("ab", empty, 1));
EXPECT_TRUE(IsFoundAt("abc", empty, 1));
}
TEST(Split, ByAsciiWhitespace) {
using absl::ByAsciiWhitespace;
using absl::SkipEmpty;
std::vector<absl::string_view> results;
results = absl::StrSplit("aaaa\n", ByAsciiWhitespace());
EXPECT_THAT(results, ElementsAre("aaaa", ""));
results = absl::StrSplit("aaaa\n", ByAsciiWhitespace(), SkipEmpty());
EXPECT_THAT(results, ElementsAre("aaaa"));
results = absl::StrSplit(" ", ByAsciiWhitespace());
EXPECT_THAT(results, ElementsAre("", ""));
results = absl::StrSplit(" ", ByAsciiWhitespace(), SkipEmpty());
EXPECT_THAT(results, IsEmpty());
results = absl::StrSplit("a", ByAsciiWhitespace());
EXPECT_THAT(results, ElementsAre("a"));
results = absl::StrSplit("", ByAsciiWhitespace());
EXPECT_THAT(results, ElementsAre(""));
results = absl::StrSplit("", ByAsciiWhitespace(), SkipEmpty());
EXPECT_THAT(results, IsEmpty());
results = absl::StrSplit("a b\tc\n d\n", ByAsciiWhitespace());
EXPECT_THAT(results, ElementsAre("a", "b", "c", "", "", "d", ""));
results = absl::StrSplit("a b\tc\n d \n", ByAsciiWhitespace(), SkipEmpty());
EXPECT_THAT(results, ElementsAre("a", "b", "c", "d"));
results = absl::StrSplit("a\t\n\v\f\r b", ByAsciiWhitespace(), SkipEmpty());
EXPECT_THAT(results, ElementsAre("a", "b"));
}
TEST(Delimiter, ByLength) {
using absl::ByLength;
ByLength four_char_delim(4);
EXPECT_TRUE(IsFoundAt("abcde", four_char_delim, 4));
EXPECT_TRUE(IsFoundAt("abcdefghijklmnopqrstuvwxyz", four_char_delim, 4));
EXPECT_TRUE(IsFoundAt("a b,c\nd", four_char_delim, 4));
EXPECT_FALSE(IsFoundAt("", four_char_delim, 0));
EXPECT_FALSE(IsFoundAt("a", four_char_delim, 0));
EXPECT_FALSE(IsFoundAt("ab", four_char_delim, 0));
EXPECT_FALSE(IsFoundAt("abc", four_char_delim, 0));
EXPECT_FALSE(IsFoundAt("abcd", four_char_delim, 0));
}
TEST(Split, WorksWithLargeStrings) {
#if defined(ABSL_HAVE_ADDRESS_SANITIZER) || \
defined(ABSL_HAVE_MEMORY_SANITIZER) || defined(ABSL_HAVE_THREAD_SANITIZER)
constexpr size_t kSize = (uint32_t{1} << 26) + 1;
#else
constexpr size_t kSize = (uint32_t{1} << 31) + 1;
#endif
if (sizeof(size_t) > 4) {
std::string s(kSize, 'x');
s.back() = '-';
std::vector<absl::string_view> v = absl::StrSplit(s, '-');
EXPECT_EQ(2, v.size());
EXPECT_EQ('x', v[0][0]);
EXPECT_EQ('x', v[0][1]);
EXPECT_EQ('x', v[0][3]);
EXPECT_EQ("", v[1]);
}
}
TEST(SplitInternalTest, TypeTraits) {
EXPECT_FALSE(absl::strings_internal::HasMappedType<int>::value);
EXPECT_TRUE(
(absl::strings_internal::HasMappedType<std::map<int, int>>::value));
EXPECT_FALSE(absl::strings_internal::HasValueType<int>::value);
EXPECT_TRUE(
(absl::strings_internal::HasValueType<std::map<int, int>>::value));
EXPECT_FALSE(absl::strings_internal::HasConstIterator<int>::value);
EXPECT_TRUE(
(absl::strings_internal::HasConstIterator<std::map<int, int>>::value));
EXPECT_FALSE(absl::strings_internal::IsInitializerList<int>::value);
EXPECT_TRUE((absl::strings_internal::IsInitializerList<
std::initializer_list<int>>::value));
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/str_split.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/str_split_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
22062a52-3ab1-495e-b0d6-3265e90401e0 | cpp | abseil/abseil-cpp | memutil | absl/strings/internal/memutil.cc | absl/strings/internal/memutil_test.cc | #include "absl/strings/internal/memutil.h"
#include <cstdlib>
#include "absl/strings/ascii.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace strings_internal {
int memcasecmp(const char* s1, const char* s2, size_t len) {
const unsigned char* us1 = reinterpret_cast<const unsigned char*>(s1);
const unsigned char* us2 = reinterpret_cast<const unsigned char*>(s2);
for (size_t i = 0; i < len; i++) {
unsigned char c1 = us1[i];
unsigned char c2 = us2[i];
if (c1 != c2) {
c1 = c1 >= 'A' && c1 <= 'Z' ? c1 - 'A' + 'a' : c1;
c2 = c2 >= 'A' && c2 <= 'Z' ? c2 - 'A' + 'a' : c2;
const int diff = int{c1} - int{c2};
if (diff != 0) return diff;
}
}
return 0;
}
}
ABSL_NAMESPACE_END
} | #include "absl/strings/internal/memutil.h"
#include <cstdlib>
#include "gtest/gtest.h"
namespace {
TEST(MemUtil, memcasecmp) {
const char a[] = "hello there";
EXPECT_EQ(absl::strings_internal::memcasecmp(a, "heLLO there",
sizeof("hello there") - 1),
0);
EXPECT_EQ(absl::strings_internal::memcasecmp(a, "heLLO therf",
sizeof("hello there") - 1),
-1);
EXPECT_EQ(absl::strings_internal::memcasecmp(a, "heLLO therf",
sizeof("hello there") - 2),
0);
EXPECT_EQ(absl::strings_internal::memcasecmp(a, "whatever", 0), 0);
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/internal/memutil.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/internal/memutil_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
fcc9cb62-4002-4ced-aa88-1c3431b4b183 | cpp | tensorflow/tensorflow | unbounded_thread_pool | tensorflow/core/data/unbounded_thread_pool.cc | tensorflow/core/data/unbounded_thread_pool_test.cc | #include "tensorflow/core/data/unbounded_thread_pool.h"
#include <functional>
#include <memory>
#include <utility>
#include "absl/memory/memory.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/resource.h"
#include "tensorflow/core/platform/unbounded_work_queue.h"
namespace tensorflow {
namespace data {
class UnboundedThreadPool::LogicalThreadWrapper : public Thread {
public:
explicit LogicalThreadWrapper(std::shared_ptr<Notification> done)
: done_(std::move(done)) {}
~LogicalThreadWrapper() override {
done_->WaitForNotification();
}
private:
std::shared_ptr<Notification> done_;
};
class UnboundedThreadPool::LogicalThreadFactory : public ThreadFactory {
public:
explicit LogicalThreadFactory(UnboundedThreadPool* pool) : pool_(pool) {}
std::unique_ptr<Thread> StartThread(const string& name,
std::function<void()> fn) override {
auto done = std::make_shared<Notification>();
pool_->ScheduleOnWorkQueue(std::move(fn), done);
return std::make_unique<LogicalThreadWrapper>(std::move(done));
}
private:
UnboundedThreadPool* const pool_;
};
std::shared_ptr<ThreadFactory> UnboundedThreadPool::get_thread_factory() {
return std::make_shared<LogicalThreadFactory>(this);
}
void UnboundedThreadPool::Schedule(std::function<void()> fn) {
auto tagged_fn = [fn = std::move(fn)]() {
tensorflow::ResourceTagger tag(kTFDataResourceTag, "ThreadPool");
fn();
};
ScheduleOnWorkQueue(std::move(tagged_fn), nullptr);
}
int UnboundedThreadPool::NumThreads() const { return -1; }
int UnboundedThreadPool::CurrentThreadId() const { return -1; }
namespace {
void WorkQueueFunc(const std::function<void()>& fn,
std::shared_ptr<Notification> done) {
fn();
if (done) {
done->Notify();
}
}
}
void UnboundedThreadPool::ScheduleOnWorkQueue(
std::function<void()> fn, std::shared_ptr<Notification> done) {
unbounded_work_queue_.Schedule(
std::bind(&WorkQueueFunc, std::move(fn), std::move(done)));
}
}
} | #include "tensorflow/core/data/unbounded_thread_pool.h"
#include <atomic>
#include <memory>
#include <vector>
#include "tensorflow/core/lib/random/random.h"
#include "tensorflow/core/platform/blocking_counter.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace data {
namespace {
TEST(UnboundedThreadPool, ConcurrentThreadCreation) {
UnboundedThreadPool pool(Env::Default(), "test");
auto thread_factory = pool.get_thread_factory();
std::vector<std::unique_ptr<Thread>> threads;
const int kNumThreadsToCreate = 10;
std::atomic<int> i(0);
for (int j = 0; j < kNumThreadsToCreate; ++j) {
threads.push_back(thread_factory->StartThread("", [=, &i,
&thread_factory]() {
std::vector<std::unique_ptr<Thread>> nested_threads;
for (int k = 0; k < kNumThreadsToCreate; ++k) {
nested_threads.push_back(
thread_factory->StartThread("", [&i]() { ++i; }));
}
nested_threads.clear();
}));
}
threads.clear();
EXPECT_EQ(i, kNumThreadsToCreate * kNumThreadsToCreate);
}
TEST(UnboundedThreadPool, MultipleBlockingThreads) {
UnboundedThreadPool pool(Env::Default(), "test");
auto thread_factory = pool.get_thread_factory();
std::vector<std::unique_ptr<Thread>> threads;
std::vector<int> round_sizes = {5, 10, 15, 20};
for (const int round_size : round_sizes) {
Notification n;
BlockingCounter bc(round_size);
for (int j = 0; j < round_size; ++j) {
threads.push_back(thread_factory->StartThread("", [&bc, &n]() {
bc.DecrementCount();
n.WaitForNotification();
}));
}
bc.Wait();
n.Notify();
threads.clear();
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/unbounded_thread_pool.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/unbounded_thread_pool_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3bb57b45-0742-480e-8a24-200b67ed49fa | cpp | tensorflow/tensorflow | hlo_element_type_converter | third_party/xla/xla/service/hlo_element_type_converter.cc | third_party/xla/xla/service/hlo_element_type_converter_test.cc | #include "xla/service/hlo_element_type_converter.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "xla/hlo/evaluator/hlo_evaluator.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/shape_util.h"
#include "xla/types.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace {
HloInstruction* ToElementType(HloInstruction* hlo, PrimitiveType type) {
if (hlo->shape().element_type() != type) {
Shape shape = ShapeUtil::ChangeElementType(hlo->shape(), type);
hlo = hlo->parent()->AddInstruction(
HloInstruction::CreateConvert(shape, hlo));
}
CHECK_EQ(hlo->shape().element_type(), type);
return hlo;
}
bool HasOperandType(HloInstruction* hlo, PrimitiveType type) {
for (HloInstruction* operand : hlo->operands()) {
if (operand->shape().element_type() == type) {
return true;
}
}
return false;
}
Shape GetConvertedTupleShape(const Shape& shape, PrimitiveType from_type,
PrimitiveType to_type) {
std::vector<Shape> new_tuple_subshapes;
const int64_t n = ShapeUtil::TupleElementCount(shape);
new_tuple_subshapes.reserve(n);
for (int64_t i = 0; i < n; ++i) {
Shape subshape = ShapeUtil::GetTupleElementShape(shape, i);
CHECK(!subshape.IsTuple());
if (subshape.element_type() == from_type) {
subshape = ShapeUtil::ChangeElementType(subshape, to_type);
}
new_tuple_subshapes.push_back(subshape);
}
return ShapeUtil::MakeTupleShape(new_tuple_subshapes);
}
HloInstruction* ConvertTupleElements(HloInstruction* hlo,
const Shape& to_shape) {
const Shape& shape = hlo->shape();
HloComputation* computation = hlo->parent();
std::vector<HloInstruction*> tuple_elements;
for (int64_t i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) {
const Shape& ele_shape = ShapeUtil::GetTupleElementShape(shape, i);
HloInstruction* element = computation->AddInstruction(
HloInstruction::CreateGetTupleElement(ele_shape, hlo, i));
const Shape& to_ele_shape = ShapeUtil::GetTupleElementShape(to_shape, i);
CHECK(!ele_shape.IsTuple());
if (ele_shape.element_type() != to_ele_shape.element_type()) {
element = computation->AddInstruction(
HloInstruction::CreateConvert(to_ele_shape, element));
}
tuple_elements.push_back(element);
}
return computation->AddInstruction(
HloInstruction::CreateTuple(tuple_elements));
}
}
HloElementTypeConverter::HloElementTypeConverter(
PrimitiveType eliminate_type, PrimitiveType replace_with_type)
: eliminate_type_(eliminate_type), replace_with_type_(replace_with_type) {}
absl::StatusOr<bool> HloElementTypeConverter::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
XLA_VLOG_LINES(
3, "HloElementTypeConverter::Run(), before:\n" + module->ToString());
if (eliminate_type_ == replace_with_type_) {
return false;
}
HloCloneContext context(module);
bool changed = false;
for (auto* computation : module->computations(execution_threads)) {
for (auto* hlo : computation->MakeInstructionPostOrder()) {
const auto opcode = hlo->opcode();
if (opcode == HloOpcode::kParameter || opcode == HloOpcode::kConstant ||
opcode == HloOpcode::kTuple || opcode == HloOpcode::kConvert ||
opcode == HloOpcode::kBitcastConvert ||
opcode == HloOpcode::kGetTupleElement ||
opcode == HloOpcode::kInfeed || opcode == HloOpcode::kOutfeed) {
continue;
}
if (opcode == HloOpcode::kCustomCall) {
continue;
}
if (opcode == HloOpcode::kWhile || opcode == HloOpcode::kCall ||
opcode == HloOpcode::kAllReduce ||
opcode == HloOpcode::kReduceScatter ||
opcode == HloOpcode::kAllReduceStart ||
opcode == HloOpcode::kFusion || opcode == HloOpcode::kMap ||
opcode == HloOpcode::kReduce || opcode == HloOpcode::kReduceWindow ||
opcode == HloOpcode::kScatter ||
opcode == HloOpcode::kSelectAndScatter ||
opcode == HloOpcode::kSort || opcode == HloOpcode::kConditional) {
continue;
}
TF_RET_CHECK(hlo->called_computations().empty()) << hlo->ToString();
bool nullary = hlo->operands().empty();
bool wrong_element_type = hlo->shape().element_type() == eliminate_type_;
bool should_eliminate_type = (nullary && wrong_element_type) ||
HasOperandType(hlo, eliminate_type_);
if (!should_eliminate_type) {
TF_RET_CHECK(hlo->shape().element_type() != eliminate_type_);
continue;
}
std::vector<HloInstruction*> new_operands;
const auto& operands = hlo->operands();
new_operands.reserve(operands.size());
for (HloInstruction* operand : operands) {
if (operand->shape().element_type() == eliminate_type_) {
operand = ToElementType(operand, replace_with_type_);
}
new_operands.push_back(operand);
}
HloInstruction* new_hlo;
if (hlo->shape().element_type() == eliminate_type_) {
Shape shape =
ShapeUtil::ChangeElementType(hlo->shape(), replace_with_type_);
new_hlo = computation->AddInstruction(
hlo->CloneWithNewOperands(shape, new_operands, &context));
TF_RETURN_IF_ERROR(new_hlo->CopyAllControlDepsFrom(hlo));
new_hlo = ToElementType(new_hlo, eliminate_type_);
} else if (hlo->shape().IsTuple()) {
Shape old_shape = hlo->shape();
Shape new_shape = GetConvertedTupleShape(hlo->shape(), eliminate_type_,
replace_with_type_);
new_hlo = computation->AddInstruction(
hlo->CloneWithNewOperands(new_shape, new_operands, &context));
TF_RETURN_IF_ERROR(new_hlo->CopyAllControlDepsFrom(hlo));
new_hlo = ConvertTupleElements(new_hlo, old_shape);
} else {
new_hlo = computation->AddInstruction(
hlo->CloneWithNewOperands(hlo->shape(), new_operands, &context));
TF_RETURN_IF_ERROR(new_hlo->CopyAllControlDepsFrom(hlo));
}
TF_RETURN_IF_ERROR(hlo->ReplaceAllUsesWith(new_hlo));
TF_RETURN_IF_ERROR(hlo->DropAllControlDeps());
TF_RETURN_IF_ERROR(computation->RemoveInstruction(hlo));
changed = true;
}
}
XLA_VLOG_LINES(
2, "HloElementTypeConverter::Run(), after:\n" + module->ToString());
return changed;
}
} | #include "xla/service/hlo_element_type_converter.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
namespace op = xla::testing::opcode_matchers;
using ::testing::Contains;
using ::testing::ElementsAre;
using ::testing::Eq;
using ::testing::Not;
using ::testing::ResultOf;
using HloElementTypeConverterTest = HloTestBase;
TEST_F(HloElementTypeConverterTest, CustomCallsNotConverted) {
const std::string& hlo_string = R"(
HloModule custom_call
ENTRY CustomCall {
constant = bf16[1]{0} constant({12345})
ROOT custom-call = bf16[1,2,3]{0,2,1} custom-call(constant),
custom_call_target="foo"
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
HloElementTypeConverter type_converter(BF16, F32);
TF_ASSERT_OK_AND_ASSIGN(bool converted, type_converter.Run(module.get()));
EXPECT_FALSE(converted);
}
TEST_F(HloElementTypeConverterTest, InfeedsOutfeedsNotConverted) {
const std::string& hlo_string = R"(
HloModule InfeedOutfeed
ENTRY RoundTrip16MiBR1.v2 {
token0 = token[] after-all()
infeed = (bf16[4]{0}, token[]) infeed(token0)
ROOT infeed.data = bf16[4]{0} get-tuple-element(infeed), index=0
outfeed = token[] outfeed(infeed.data, token0)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
HloElementTypeConverter type_converter(BF16, F32);
TF_ASSERT_OK_AND_ASSIGN(bool converted, type_converter.Run(module.get()));
EXPECT_FALSE(converted);
}
TEST_F(HloElementTypeConverterTest, OperationsInNestedTuplesConverted) {
const std::string& hlo_string = R"(
HloModule NestedTuples
ENTRY NestedTuples.v5 {
constant.2 = f32[2]{0} constant({1, 2})
constant.3 = bf16[2]{0} constant({42, 42})
add = bf16[2]{0} add(constant.2, constant.3)
tuple = (f32[2]{0}, bf16[2]{0}) tuple(constant.2, add)
constant.5 = bf16[2]{0} constant({22, 44})
ROOT tuple.1 = ((f32[2]{0}, bf16[2]{0}), bf16[2]{0}) tuple(tuple, constant.5)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
HloElementTypeConverter type_converter(BF16, F32);
TF_ASSERT_OK_AND_ASSIGN(bool converted, type_converter.Run(module.get()));
EXPECT_TRUE(converted);
const HloInstruction* bf16_op =
module->entry_computation()->root_instruction()->operand(0)->operand(1);
EXPECT_THAT(bf16_op, op::Convert(op::Add(op::Constant(), op::Convert())));
}
TEST_F(HloElementTypeConverterTest, BatchNormGradBF16Converted) {
const std::string& hlo_string = R"(
HloModule BatchNormGrad
ENTRY BatchNormGrad.v6 {
constant.4 = bf16[2,2,2,1]{3,2,1,0} constant({ {
{ {0}, {0} }, { {0}, {0} } }, { { {0},
{0} }, { {0}, {0} } } })
constant.5 = bf16[2]{0} constant({1, 1})
constant.6 = bf16[2]{0} constant({0, 0})
constant.7 = bf16[2]{0} constant({1, 1})
constant.8 = bf16[2,2,2,1]{3,2,1,0} constant({ {
{ {1}, {2} }, { {3}, {4} } }, { {
{5}, {6} }, { {7}, {8} } } })
ROOT batch-norm-grad = (bf16[2,2,2,1]{3,2,1,0}, bf16[2]{0}, bf16[2]{0})
batch-norm-grad(constant.4, constant.5, constant.6, constant.7,
constant.8), epsilon=0, feature_index=2
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
HloElementTypeConverter type_converter(BF16, F32);
TF_ASSERT_OK_AND_ASSIGN(bool converted, type_converter.Run(module.get()));
EXPECT_TRUE(converted);
const HloInstruction* tuple_instr =
module->entry_computation()->root_instruction();
::testing::Matcher<const ::xla::HloInstruction*> batch_norm =
op::BatchNormGrad();
EXPECT_THAT(tuple_instr,
op::Tuple(op::Convert(op::GetTupleElement(batch_norm, 0)),
op::Convert(op::GetTupleElement(batch_norm, 1)),
op::Convert(op::GetTupleElement(batch_norm, 2))));
}
TEST_F(HloElementTypeConverterTest, RngIsRemoved) {
const std::string& hlo_string = R"(
HloModule RngIsRemoved
ENTRY main {
constant.3 = bf16[] constant(0)
constant.4 = bf16[] constant(1)
ROOT rng = bf16[1,1000,20]{2,1,0} rng(constant.3, constant.4), distribution=rng_uniform
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
HloElementTypeConverter type_converter(BF16, F32);
TF_ASSERT_OK_AND_ASSIGN(bool converted, type_converter.Run(module.get()));
EXPECT_TRUE(converted);
HloPredicate is_bf16_rng = [](const HloInstruction* inst) {
return inst->shape().element_type() == BF16 &&
inst->opcode() == HloOpcode::kRng;
};
EXPECT_THAT(module->entry_computation()->instructions(),
Not(Contains(ResultOf(is_bf16_rng, Eq(true)))));
}
TEST_F(HloElementTypeConverterTest, RngCtrlDep) {
const std::string& hlo_string = R"(
HloModule RngIsRemoved
ENTRY main {
constant.3 = bf16[] constant(0)
constant.4 = bf16[] constant(1)
rng0 = bf16[1,2000,20]{2,1,0} rng(constant.3, constant.4), distribution=rng_uniform
ROOT rng1 = bf16[1,1000,20]{2,1,0} rng(constant.3, constant.4), control-predecessors={%rng0}, distribution=rng_uniform
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
HloElementTypeConverter type_converter(BF16, F32);
TF_ASSERT_OK_AND_ASSIGN(bool converted, type_converter.Run(module.get()));
EXPECT_TRUE(converted);
HloInstruction *rng0, *rng1;
for (auto* inst : module->entry_computation()->instructions()) {
if (inst->opcode() == HloOpcode::kRng) {
const Shape& shape = inst->shape();
ASSERT_EQ(shape.dimensions_size(), 3);
ASSERT_TRUE(shape.dimensions(1) == 2000 || shape.dimensions(1) == 1000);
if (shape.dimensions(1) == 2000) {
rng0 = inst;
} else {
rng1 = inst;
}
}
}
EXPECT_THAT(rng0->control_successors(), ElementsAre(rng1));
EXPECT_THAT(rng1->control_predecessors(), ElementsAre(rng0));
}
TEST_F(HloElementTypeConverterTest, BitcastConvertIsUnmodified) {
const std::string& hlo_string = R"(
HloModule test
ENTRY test {
p = bf16[] parameter(0)
ROOT c = u16[] bitcast-convert(p)
})";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
HloElementTypeConverter converter(BF16, F32);
TF_ASSERT_OK_AND_ASSIGN(bool converted, RunHloPass(&converter, module.get()));
EXPECT_FALSE(converted);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_element_type_converter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_element_type_converter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
59953132-5105-4aa4-93e8-9e703673d57a | cpp | tensorflow/tensorflow | flat_map_dataset_op | tensorflow/core/kernels/data/flat_map_dataset_op.cc | tensorflow/core/kernels/data/flat_map_dataset_op_test.cc | #include "tensorflow/core/kernels/data/flat_map_dataset_op.h"
#include <algorithm>
#include <cstdint>
#include <cstdlib>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/graph_runner.h"
#include "tensorflow/core/common_runtime/input_colocation_exemption_registry.h"
#include "tensorflow/core/data/captured_function.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/flat_map_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/data/serialization_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/dataset_options.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/random/random.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/thread_annotations.h"
namespace tensorflow {
namespace data {
constexpr const char* const FlatMapDatasetOp::kDatasetType;
constexpr const char* const FlatMapDatasetOp::kInputDataset;
constexpr const char* const FlatMapDatasetOp::kOtherArguments;
constexpr const char* const FlatMapDatasetOp::kFunc;
constexpr const char* const FlatMapDatasetOp::kTarguments;
constexpr const char* const FlatMapDatasetOp::kOutputTypes;
constexpr const char* const FlatMapDatasetOp::kOutputShapes;
constexpr int64_t kMaxRandomIndexingCardinality = 100;
constexpr char kCycleLength[] = "cycle_length";
constexpr char kElementIndex[] = "element_index";
constexpr char kInputsSize[] = "inputs_size";
constexpr char kInputs[] = "inputs";
constexpr char kCurrentElementIteratorUninitialized[] =
"current_element_iterator_uninitialized";
constexpr char kExhausted[] = "exhausted";
class FlatMapDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, const DatasetBase* input,
std::unique_ptr<CapturedFunction> captured_func,
const DataTypeVector& output_types,
const std::vector<PartialTensorShape>& output_shapes)
: DatasetBase(DatasetContext(ctx)),
input_(input),
captured_func_(std::move(captured_func)),
output_types_(output_types),
output_shapes_(output_shapes),
random_access_handler_(ctx, input, *captured_func_) {
input_->Ref();
random_indexing_compatible_ = input_->RandomIndexingCompatible();
if (random_indexing_compatible_.ok() &&
input_->Cardinality() > kMaxRandomIndexingCardinality) {
random_indexing_compatible_ = absl::FailedPreconditionError(
absl::StrCat("The cardinality of the input to ", type_string(),
" is too large to support global shuffling. It is ",
input_->Cardinality(), ", which is greater than ",
kMaxRandomIndexingCardinality));
}
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix)});
}
const DataTypeVector& output_dtypes() const override { return output_types_; }
const std::vector<PartialTensorShape>& output_shapes() const override {
return output_shapes_;
}
string DebugString() const override {
return name_utils::DatasetDebugString(kDatasetType);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
if (options.compute_level() <
CardinalityOptions::CARDINALITY_COMPUTE_MODERATE) {
return kUnknownCardinality;
}
absl::StatusOr<int64_t> cardinality = random_access_handler_.Cardinality();
if (!cardinality.ok()) {
LOG(ERROR) << "Unable to compute cardinality for dataset "
<< DebugString() << " due to error: " << cardinality.status();
return kUnknownCardinality;
}
return *cardinality;
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
TF_RETURN_IF_ERROR(captured_func_->CheckExternalState());
return input_->CheckExternalState();
}
absl::Status RandomIndexingCompatible() const override {
return absl::UnimplementedError(
"Please consider applying maps on each dataset, concatenating them "
"into "
"one dataset and apply global shuffle dataset op onto the "
"dataset to achieve the same result as flat map with global "
"shuffling.");
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
std::vector<Node*> other_arguments;
DataTypeVector other_arguments_types;
TF_RETURN_IF_ERROR(captured_func_->AddToGraph(ctx, b, &other_arguments,
&other_arguments_types));
AttrValue f;
b->BuildAttrValue(captured_func_->func(), &f);
AttrValue other_arguments_types_attr;
b->BuildAttrValue(other_arguments_types, &other_arguments_types_attr);
TF_RETURN_IF_ERROR(b->AddDataset(
this, {std::make_pair(0, input_graph_node)},
{std::make_pair(1, other_arguments)},
{std::make_pair(kFunc, f),
std::make_pair(kTarguments, other_arguments_types_attr)},
output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
mutex_lock l(mu_);
input_ckpt_ = std::make_unique<MemoryCheckpoint>(ctx->id_registry());
TF_RETURN_IF_ERROR(
dataset()->input_->MakeIterator(ctx, this, prefix(), &input_impl_));
return dataset()->captured_func_->Instantiate(
ctx, &instantiated_captured_func_);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
if (ctx->index_mapper()) {
return Get(ctx, out_tensors, end_of_sequence);
}
mutex_lock l(mu_);
do {
if (!input_impl_) {
*end_of_sequence = true;
return absl::OkStatus();
}
if (current_element_iterator_) {
bool end_of_element;
auto nested_ctx = MakeNestedIteratorContext(ctx);
TF_RETURN_IF_ERROR(current_element_iterator_->GetNext(
&nested_ctx, out_tensors, &end_of_element));
ctx->MergeCheckpoint(nested_ctx.checkpoint());
if (!end_of_element) {
*end_of_sequence = false;
return absl::OkStatus();
}
ctx->MergeCheckpoint(input_ckpt_.get());
ctx->PurgeCheckpoint(current_element_iterator_->prefix());
current_element_iterator_.reset();
}
inputs_.clear();
auto input_ctx = std::make_unique<IteratorContext>(*ctx);
TF_RETURN_IF_ERROR(
input_impl_->GetNext(input_ctx.get(), &inputs_, end_of_sequence));
input_ckpt_->Merge(input_ctx->checkpoint());
if (*end_of_sequence) {
input_impl_.reset();
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(
BuildCurrentElementIteratorLocked(ctx, true));
} while (true);
}
Status SkipInternal(IteratorContext* ctx, int num_to_skip,
bool* end_of_sequence, int* num_skipped) override {
mutex_lock l(mu_);
*num_skipped = 0;
while (*num_skipped < num_to_skip) {
if (!input_impl_) {
*end_of_sequence = true;
return absl::OkStatus();
}
if (current_element_iterator_) {
bool end_of_element;
auto nested_ctx = MakeNestedIteratorContext(ctx);
int last_num_skipped;
TF_RETURN_IF_ERROR(current_element_iterator_->Skip(
&nested_ctx, num_to_skip - *num_skipped, &end_of_element,
&last_num_skipped));
*num_skipped += last_num_skipped;
ctx->MergeCheckpoint(nested_ctx.checkpoint());
if (!end_of_element) {
if (*num_skipped != num_to_skip) {
return absl::InternalError(absl::StrFormat(
"Expected `num_skipped` and `num_to_skip` to be the same. Got"
" %d(num_skipped) and %d(num_to_skip)",
*num_skipped, num_to_skip));
}
continue;
}
ctx->MergeCheckpoint(input_ckpt_.get());
ctx->PurgeCheckpoint(current_element_iterator_->prefix());
current_element_iterator_.reset();
}
inputs_.clear();
auto input_ctx = std::make_unique<IteratorContext>(*ctx);
TF_RETURN_IF_ERROR(
input_impl_->GetNext(input_ctx.get(), &inputs_, end_of_sequence));
input_ckpt_->Merge(input_ctx->checkpoint());
if (*end_of_sequence) {
input_impl_.reset();
*end_of_sequence = true;
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(
BuildCurrentElementIteratorLocked(ctx, false));
}
*end_of_sequence = false;
return absl::OkStatus();
}
absl::Status Get(IteratorContext* ctx, std::vector<Tensor>* out_tensors,
bool* end_of_sequence) TF_LOCKS_EXCLUDED(mu_) {
mutex_lock l(mu_);
TF_ASSIGN_OR_RETURN(size_t parent_index,
ctx->index_mapper()(element_count_));
FlatMapRandomAccessHandler& random_access =
dataset()->random_access_handler_;
absl::StatusOr<int64_t> dataset_index =
random_access.GetDatasetIndex(parent_index);
if (absl::IsOutOfRange(dataset_index.status())) {
*end_of_sequence = true;
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(dataset_index.status());
if (dataset_iterators_.empty()) {
TF_ASSIGN_OR_RETURN(
dataset_iterators_,
random_access.MakeInputIterators(ctx, this, prefix()));
next_positions_.resize(dataset_iterators_.size(), 0);
input_element_counts_.resize(dataset_iterators_.size(), 0);
}
IteratorContext::Params params(ctx);
params.index_mapper =
GetFlatMapIndexMapper(ctx->index_mapper(), *dataset_index);
IteratorContext global_shuffle_ctx(std::move(params));
TF_RETURN_IF_ERROR(dataset_iterators_[*dataset_index]->GetNext(
&global_shuffle_ctx, out_tensors, end_of_sequence));
ctx->MergeCheckpoint(global_shuffle_ctx.checkpoint());
++element_count_;
++input_element_counts_[*dataset_index];
return absl::OkStatus();
}
IndexMapperFn GetFlatMapIndexMapper(IndexMapperFn parent_index_mapper,
size_t input_dataset_index)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
absl::StatusOr<int64_t> cardinality =
dataset()->random_access_handler_.Cardinality();
return [this, parent_index_mapper = std::move(parent_index_mapper),
input_dataset_index, cardinality = std::move(cardinality)](
size_t element_position) -> absl::StatusOr<size_t> {
if (!cardinality.ok() || *cardinality < 0) {
return absl::FailedPreconditionError(
"Global shuffling requires finite cardinalities.");
}
FlatMapRandomAccessHandler& random_access =
dataset()->random_access_handler_;
while (next_positions_[input_dataset_index] < *cardinality) {
size_t index = next_positions_[input_dataset_index];
if (parent_index_mapper != nullptr) {
TF_ASSIGN_OR_RETURN(index, parent_index_mapper(index));
}
++next_positions_[input_dataset_index];
TF_ASSIGN_OR_RETURN(int64_t shuffled_dataset_index,
random_access.GetDatasetIndex(index));
if (input_dataset_index == shuffled_dataset_index) {
if (input_dataset_index > 0) {
TF_ASSIGN_OR_RETURN(
int64_t cumulative_cardinality,
random_access.CumulativeCardinality(input_dataset_index - 1));
index -= cumulative_cardinality;
}
return index;
}
}
return *cardinality;
};
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeInterleaveManyNode(
std::move(args),
{model::MakeNonTunableParameter(kCycleLength, 1)});
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override
TF_LOCKS_EXCLUDED(mu_) {
TF_RETURN_IF_ERROR(ctx->HandleCheckExternalStateStatus(
dataset()->captured_func_->CheckExternalState()));
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), kExhausted, static_cast<int64_t>(!input_impl_)));
if (input_impl_) {
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kElementIndex, element_index_));
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), kCurrentElementIteratorUninitialized,
static_cast<int64_t>(!current_element_iterator_)));
if (current_element_iterator_ && !ctx->symbolic_checkpoint()) {
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kInputsSize, inputs_.size()));
for (int i = 0; i < inputs_.size(); i++) {
TF_RETURN_IF_ERROR(writer->WriteTensor(
prefix(), strings::StrCat(kInputs, "[", i, "]"), inputs_[i]));
}
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, current_element_iterator_));
}
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override
TF_LOCKS_EXCLUDED(mu_) {
if (ctx->restored_element_count().has_value()) {
return RestoreForGlobalShuffle(ctx, reader);
}
mutex_lock l(mu_);
input_impl_.reset();
element_index_ = 0;
current_element_iterator_.reset();
inputs_.clear();
int64_t input_exhausted;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kExhausted, &input_exhausted));
if (!static_cast<bool>(input_exhausted)) {
TF_RETURN_IF_ERROR(
dataset()->input_->MakeIterator(ctx, this, prefix(), &input_impl_));
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
{
int64_t temp;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kElementIndex, &temp));
element_index_ = temp;
}
int64_t current_element_iterator_uninitialized;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kCurrentElementIteratorUninitialized,
¤t_element_iterator_uninitialized));
if (!static_cast<bool>(current_element_iterator_uninitialized)) {
TF_RETURN_IF_ERROR(RestoreCurrentElementIterator(ctx, reader));
}
}
return absl::OkStatus();
}
Status RestoreForGlobalShuffle(IteratorContext* ctx,
IteratorStateReader* reader)
TF_LOCKS_EXCLUDED(mu_) {
mutex_lock l(mu_);
element_count_ = *ctx->restored_element_count();
FlatMapRandomAccessHandler& random_access =
dataset()->random_access_handler_;
TF_ASSIGN_OR_RETURN(int64_t cardinality, random_access.Cardinality());
if (dataset_iterators_.empty()) {
TF_ASSIGN_OR_RETURN(
dataset_iterators_,
random_access.MakeInputIterators(ctx, this, prefix()));
}
input_element_counts_.resize(dataset_iterators_.size(), 0);
next_positions_.resize(dataset_iterators_.size(), 0);
std::fill(input_element_counts_.begin(), input_element_counts_.end(), 0);
std::fill(next_positions_.begin(), next_positions_.end(), 0);
for (size_t count = 0; count < element_count_ && count < cardinality;
++count) {
TF_ASSIGN_OR_RETURN(size_t parent_index, ctx->index_mapper()(count));
absl::StatusOr<size_t> dataset_index =
random_access.GetDatasetIndex(parent_index);
if (absl::IsOutOfRange(dataset_index.status())) {
break;
}
TF_RETURN_IF_ERROR(dataset_index.status());
++input_element_counts_[*dataset_index];
next_positions_[*dataset_index] = count + 1;
}
for (size_t i = 0; i < dataset_iterators_.size(); ++i) {
IteratorContext::Params params(ctx);
params.restored_element_count = input_element_counts_[i];
IteratorContext ctx_copy(std::move(params));
TF_RETURN_IF_ERROR(
RestoreInput(&ctx_copy, reader, dataset_iterators_[i]));
ctx->MergeCheckpoint(ctx_copy.checkpoint());
}
return absl::OkStatus();
}
private:
Status BuildCurrentElementIteratorLocked(IteratorContext* ctx,
bool is_get_next)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
std::shared_ptr<model::Node> node = is_get_next ? model_node() : nullptr;
return MakeIteratorFromInputElement(
ctx, this, inputs_, element_index_++, *instantiated_captured_func_,
prefix(), ¤t_element_iterator_, node);
}
Status RestoreCurrentElementIterator(IteratorContext* ctx,
IteratorStateReader* reader)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (ctx->symbolic_checkpoint()) {
return RestoreCurrentElementIteratorSymbolic(ctx, reader);
}
size_t inputs_size;
{
int64_t temp;
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kInputsSize, &temp));
inputs_size = static_cast<size_t>(temp);
}
inputs_.reserve(inputs_size);
for (int i = 0; i < inputs_size; i++) {
inputs_.emplace_back();
TF_RETURN_IF_ERROR(reader->ReadTensor(
ctx->flr(), prefix(), strings::StrCat(kInputs, "[", i, "]"),
&inputs_.back()));
}
element_index_--;
TF_RETURN_IF_ERROR(
BuildCurrentElementIteratorLocked(ctx, false));
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, current_element_iterator_));
return absl::OkStatus();
}
Status RestoreCurrentElementIteratorSymbolic(IteratorContext* ctx,
IteratorStateReader* reader)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
bool end_of_sequence;
auto input_ctx = std::make_unique<IteratorContext>(*ctx);
TF_RETURN_IF_ERROR(
input_impl_->GetNext(input_ctx.get(), &inputs_, &end_of_sequence));
if (end_of_sequence) {
return absl::FailedPreconditionError(
"Unexpected end of sequence while symbolically restoring "
"FlatMapDataset. Please verify that the input produces data "
"deterministically.");
}
input_ckpt_->Merge(input_ctx->checkpoint());
element_index_--;
TF_RETURN_IF_ERROR(
BuildCurrentElementIteratorLocked(ctx, false));
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, current_element_iterator_));
return absl::OkStatus();
}
mutex mu_;
size_t element_index_ TF_GUARDED_BY(mu_) = 0;
std::unique_ptr<MemoryCheckpoint> input_ckpt_ TF_GUARDED_BY(mu_);
std::vector<Tensor> inputs_ TF_GUARDED_BY(mu_);
std::unique_ptr<InstantiatedCapturedFunction> instantiated_captured_func_;
size_t element_count_ TF_GUARDED_BY(mu_) = 0;
std::vector<int64_t> input_element_counts_ TF_GUARDED_BY(mu_);
std::vector<size_t> next_positions_;
std::vector<std::unique_ptr<IteratorBase>> dataset_iterators_
TF_GUARDED_BY(mu_);
std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(mu_);
std::unique_ptr<IteratorBase> current_element_iterator_ TF_GUARDED_BY(mu_);
};
const DatasetBase* const input_;
const std::unique_ptr<CapturedFunction> captured_func_;
const DataTypeVector output_types_;
const std::vector<PartialTensorShape> output_shapes_;
absl::Status random_indexing_compatible_ = absl::OkStatus();
mutable FlatMapRandomAccessHandler random_access_handler_;
};
FlatMapDatasetOp::FlatMapDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx), graph_def_version_(ctx->graph_def_version()) {
OP_REQUIRES_OK(ctx, FunctionMetadata::Create(ctx, kFunc, {},
&func_metadata_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputTypes, &output_types_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputShapes, &output_shapes_));
}
void FlatMapDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
std::unique_ptr<CapturedFunction> captured_func;
OP_REQUIRES_OK(ctx,
CapturedFunction::Create(ctx, func_metadata_, kOtherArguments,
&captured_func));
*output = new Dataset(ctx, input, std::move(captured_func), output_types_,
output_shapes_);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("FlatMapDataset").Device(DEVICE_CPU),
FlatMapDatasetOp);
REGISTER_INPUT_COLOCATION_EXEMPTION("FlatMapDataset");
}
}
} | #include "tensorflow/core/kernels/data/flat_map_dataset_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "flat_map_dataset";
class FlatMapDatasetParams : public DatasetParams {
public:
template <typename T>
FlatMapDatasetParams(T input_dataset_params,
std::vector<Tensor> other_arguments,
FunctionDefHelper::AttrValueWrapper func,
std::vector<FunctionDef> func_lib,
DataTypeVector type_arguments,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
other_arguments_(std::move(other_arguments)),
func_(std::move(func)),
func_lib_(std::move(func_lib)),
type_arguments_(std::move(type_arguments)) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
return other_arguments_;
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->emplace_back(FlatMapDatasetOp::kInputDataset);
for (int i = 0; i < other_arguments_.size(); ++i) {
input_names->emplace_back(
absl::StrCat(FlatMapDatasetOp::kOtherArguments, "_", i));
}
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
*attr_vector = {{"f", func_},
{"Targuments", type_arguments_},
{"output_shapes", output_shapes_},
{"output_types", output_dtypes_},
{"metadata", ""}};
return absl::OkStatus();
}
string dataset_type() const override {
return FlatMapDatasetOp::kDatasetType;
}
std::vector<FunctionDef> func_lib() const override { return func_lib_; }
private:
std::vector<Tensor> other_arguments_;
FunctionDefHelper::AttrValueWrapper func_;
std::vector<FunctionDef> func_lib_;
DataTypeVector type_arguments_;
};
class FlatMapDatasetOpTest : public DatasetOpsTestBase {};
FlatMapDatasetParams FlatMapDatasetParams1() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8})},
"tensor_slice");
auto func = FunctionDefHelper::FunctionRef(
"MakeTensorSliceDataset",
{{"Toutput_types", DataTypeVector({DT_INT64})},
{"output_shapes",
std::vector<PartialTensorShape>({PartialTensorShape({1})})}});
return FlatMapDatasetParams(
std::move(tensor_slice_dataset_params),
{},
func,
{test::function::MakeTensorSliceDataset()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
FlatMapDatasetParams InvalidFlatMapDatasetParams() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8})},
"tensor_slice");
auto func = FunctionDefHelper::FunctionRef( "NonZero",
{{"T", DT_INT64}});
return FlatMapDatasetParams(std::move(tensor_slice_dataset_params),
{},
func,
{test::function::NonZero()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
std::vector<GetNextTestCase<FlatMapDatasetParams>> GetNextTestCases() {
return {
{FlatMapDatasetParams1(),
CreateTensors<int64_t>(TensorShape({1}),
{{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}})}};
}
ITERATOR_GET_NEXT_TEST_P(FlatMapDatasetOpTest, FlatMapDatasetParams,
GetNextTestCases())
std::vector<SkipTestCase<FlatMapDatasetParams>> SkipTestCases() {
return {{FlatMapDatasetParams1(),
2, 2, true,
CreateTensors<int64_t>(TensorShape({1}), {{2}})},
{FlatMapDatasetParams1(),
4, 4, true,
CreateTensors<int64_t>(TensorShape({1}), {{4}})},
{FlatMapDatasetParams1(),
9, 9, false},
{FlatMapDatasetParams1(),
10, 9, false}};
}
ITERATOR_SKIP_TEST_P(FlatMapDatasetOpTest, FlatMapDatasetParams,
SkipTestCases())
TEST_F(FlatMapDatasetOpTest, DatasetNodeName) {
auto dataset_params = FlatMapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(FlatMapDatasetOpTest, DatasetTypeString) {
auto dataset_params = FlatMapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(FlatMapDatasetOp::kDatasetType)));
}
TEST_F(FlatMapDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = FlatMapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes(dataset_params.output_dtypes()));
}
TEST_F(FlatMapDatasetOpTest, DatasetOutputShapes) {
auto dataset_params = FlatMapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputShapes(dataset_params.output_shapes()));
}
TEST_F(FlatMapDatasetOpTest, Cardinality) {
auto dataset_params = FlatMapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetCardinality(kUnknownCardinality));
}
TEST_F(FlatMapDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = FlatMapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes(dataset_params.output_dtypes()));
}
TEST_F(FlatMapDatasetOpTest, IteratorOutputShapes) {
auto dataset_params = FlatMapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputShapes(dataset_params.output_shapes()));
}
TEST_F(FlatMapDatasetOpTest, IteratorPrefix) {
auto dataset_params = FlatMapDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
FlatMapDatasetOp::kDatasetType, dataset_params.iterator_prefix())));
}
std::vector<IteratorSaveAndRestoreTestCase<FlatMapDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {
{FlatMapDatasetParams1(),
{0, 4, 11},
CreateTensors<int64_t>(TensorShape({1}),
{{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}})}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(FlatMapDatasetOpTest, FlatMapDatasetParams,
IteratorSaveAndRestoreTestCases())
TEST_F(FlatMapDatasetOpTest, InvalidMapFunc) {
auto dataset_params = InvalidFlatMapDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
bool end_of_sequence = false;
std::vector<Tensor> out_tensors;
EXPECT_EQ(
iterator_->GetNext(iterator_ctx_.get(), &out_tensors, &end_of_sequence)
.code(),
absl::StatusCode::kInvalidArgument);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/flat_map_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/flat_map_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2f0470b6-f9db-44fe-a6aa-ed8d231c295a | cpp | tensorflow/tensorflow | validate | tensorflow/core/graph/validate.cc | tensorflow/core/graph/validate_test.cc | #include "tensorflow/core/graph/validate.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/framework/graph_def_util.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op_def_util.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace graph {
Status ValidateGraphDef(const GraphDef& graph_def,
const OpRegistryInterface& op_registry) {
Status s;
const int version = graph_def.versions().producer();
for (const NodeDef& node_def : graph_def.node()) {
const OpDef* op_def;
TF_RETURN_IF_ERROR(op_registry.LookUpOpDef(node_def.op(), &op_def));
TF_RETURN_IF_ERROR(ValidateNodeDef(node_def, *op_def));
TF_RETURN_IF_ERROR(CheckOpDeprecation(*op_def, version));
}
return s;
}
Status ValidateGraphDefAgainstOpRegistry(
const GraphDef& graph_def, const OpRegistryInterface& op_registry) {
GraphDef copy(graph_def);
TF_RETURN_IF_ERROR(AddDefaultAttrsToGraphDef(©, op_registry, 0));
return ValidateGraphDef(copy, op_registry);
}
Status ValidateGraphDefAgainstOpList(const GraphDef& graph_def,
const OpList& op_list) {
OpListOpRegistry registry(&op_list);
return ValidateGraphDefAgainstOpRegistry(graph_def, registry);
}
void GetOpListForValidation(OpList* op_list, const OpRegistry& op_registry) {
op_registry.Export(false, op_list);
RemoveDescriptionsFromOpList(op_list);
}
Status ValidateGraphHasNoCycle(const Graph& graph) {
std::vector<const Node*> ready;
std::vector<int> pending_count(graph.num_node_ids(), 0);
for (int i = 0; i < graph.num_node_ids(); ++i) {
const Node* n = graph.FindNodeId(i);
if (n == nullptr) continue;
pending_count[i] = n->in_edges().size();
if (n->IsMerge()) {
for (const Edge* e : n->in_edges()) {
if (!e->IsControlEdge() && e->src()->IsNextIteration()) {
pending_count[i]--;
}
}
}
if (pending_count[i] == 0) {
ready.push_back(n);
}
}
int processed = 0;
while (!ready.empty()) {
const Node* node = ready.back();
ready.pop_back();
++processed;
for (const Edge* out : node->out_edges()) {
const int output_id = out->dst()->id();
pending_count[output_id]--;
if (pending_count[output_id] == 0) {
ready.push_back(out->dst());
}
}
}
if (processed < graph.num_nodes()) {
std::vector<string> nodes_in_cycle;
for (int i = 0; i < pending_count.size() && nodes_in_cycle.size() < 3;
++i) {
if (pending_count[i] != 0) {
nodes_in_cycle.push_back(graph.FindNodeId(i)->name());
}
}
return errors::InvalidArgument(
"Graph is invalid, contains a cycle with ",
graph.num_nodes() - processed,
" nodes, including: ", absl::StrJoin(nodes_in_cycle, ", "));
}
return absl::OkStatus();
}
Status VerifyNoDuplicateNodeNames(const GraphDef& graph) {
absl::flat_hash_set<absl::string_view> nodes;
for (const auto& node : graph.node()) {
if (nodes.contains(node.name())) {
return errors::AlreadyExists("Node already exists: ", node.name());
}
nodes.insert(node.name());
}
return absl::OkStatus();
}
}
} | #include "tensorflow/core/graph/validate.h"
#include <string>
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/graph_def_util.h"
#include "tensorflow/core/framework/op_def_builder.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/graph/subgraph.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
REGISTER_OP("FloatInput").Output("o: float");
REGISTER_OP("Int32Input").Output("o: int32");
TEST(ValidateGraphDefTest, TestValidGraph) {
const string graph_def_str =
"node { name: 'A' op: 'FloatInput' }"
"node { name: 'B' op: 'FloatInput' }"
"node { name: 'C' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }";
GraphDef graph_def;
auto parser = protobuf::TextFormat::Parser();
CHECK(parser.MergeFromString(graph_def_str, &graph_def)) << graph_def_str;
TF_ASSERT_OK(graph::ValidateGraphDef(graph_def, *OpRegistry::Global()));
}
TEST(ValidateGraphDefTest, GraphWithUnspecifiedDefaultAttr) {
const string graph_def_str =
"node { name: 'A' op: 'FloatInput' }"
"node { name: 'B' op: 'Int32Input' }"
"node { "
" name: 'C' op: 'Sum' "
" attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] "
"}";
GraphDef graph_def;
auto parser = protobuf::TextFormat::Parser();
CHECK(parser.MergeFromString(graph_def_str, &graph_def)) << graph_def_str;
Status s = graph::ValidateGraphDef(graph_def, *OpRegistry::Global());
EXPECT_FALSE(s.ok());
EXPECT_TRUE(absl::StrContains(s.ToString(), "NodeDef missing attr"));
TF_ASSERT_OK(AddDefaultAttrsToGraphDef(&graph_def, *OpRegistry::Global(), 0));
TF_ASSERT_OK(graph::ValidateGraphDef(graph_def, *OpRegistry::Global()));
}
TEST(ValidateGraphDefTest, GraphWithUnspecifiedRequiredAttr) {
const string graph_def_str =
"node { name: 'A' op: 'FloatInput' }"
"node { "
" name: 'B' op: 'Cast' "
" attr { key: 'SrcT' value { type: DT_FLOAT } }"
" input: ['A'] "
"}";
GraphDef graph_def;
auto parser = protobuf::TextFormat::Parser();
CHECK(parser.MergeFromString(graph_def_str, &graph_def)) << graph_def_str;
Status s = graph::ValidateGraphDef(graph_def, *OpRegistry::Global());
EXPECT_FALSE(s.ok());
EXPECT_TRUE(absl::StrContains(s.ToString(), "NodeDef missing attr"));
TF_ASSERT_OK(AddDefaultAttrsToGraphDef(&graph_def, *OpRegistry::Global(), 0));
s = graph::ValidateGraphDef(graph_def, *OpRegistry::Global());
EXPECT_FALSE(s.ok());
EXPECT_TRUE(absl::StrContains(s.ToString(), "NodeDef missing attr"));
}
TEST(ValidateGraphDefAgainstOpListTest, GraphWithOpOnlyInOpList) {
OpRegistrationData op_reg_data;
TF_ASSERT_OK(OpDefBuilder("UniqueSnowflake").Finalize(&op_reg_data));
OpList op_list;
*op_list.add_op() = op_reg_data.op_def;
const string graph_def_str = "node { name: 'A' op: 'UniqueSnowflake' }";
GraphDef graph_def;
auto parser = protobuf::TextFormat::Parser();
CHECK(parser.MergeFromString(graph_def_str, &graph_def)) << graph_def_str;
TF_ASSERT_OK(graph::ValidateGraphDefAgainstOpList(graph_def, op_list));
}
TEST(ValidateGraphDefAgainstOpListTest, GraphWithGlobalOpNotInOpList) {
OpRegistrationData op_reg_data;
TF_ASSERT_OK(OpDefBuilder("NotAnywhere").Finalize(&op_reg_data));
OpList op_list;
*op_list.add_op() = op_reg_data.op_def;
const string graph_def_str = "node { name: 'A' op: 'FloatInput' }";
GraphDef graph_def;
auto parser = protobuf::TextFormat::Parser();
CHECK(parser.MergeFromString(graph_def_str, &graph_def)) << graph_def_str;
ASSERT_FALSE(graph::ValidateGraphDefAgainstOpList(graph_def, op_list).ok());
}
REGISTER_OP("HasDocs").Doc("This is in the summary.");
TEST(GetOpListForValidationTest, ShouldStripDocs) {
bool found_float = false;
bool found_int32 = false;
bool found_has_docs = false;
OpList op_list;
graph::GetOpListForValidation(&op_list);
for (const OpDef& op_def : op_list.op()) {
if (op_def.name() == "FloatInput") {
EXPECT_FALSE(found_float);
found_float = true;
}
if (op_def.name() == "Int32Input") {
EXPECT_FALSE(found_int32);
found_int32 = true;
}
if (op_def.name() == "HasDocs") {
EXPECT_FALSE(found_has_docs);
found_has_docs = true;
EXPECT_TRUE(op_def.summary().empty());
}
}
EXPECT_TRUE(found_float);
EXPECT_TRUE(found_int32);
EXPECT_TRUE(found_has_docs);
}
TEST(VerifyNoDuplicateNodeNames, NoDuplicateNodeNames) {
const string graph_def_str =
"node { name: 'A' op: 'FloatInput' }"
"node { name: 'B' op: 'Int32Input' }"
"node { "
" name: 'C' op: 'Sum' "
" attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] "
"}";
GraphDef graph_def;
auto parser = protobuf::TextFormat::Parser();
CHECK(parser.MergeFromString(graph_def_str, &graph_def)) << graph_def_str;
TF_ASSERT_OK(graph::VerifyNoDuplicateNodeNames(graph_def));
}
TEST(VerifyNoDuplicateNodeNames, DuplicateNodeNames) {
const string graph_def_str =
"node { name: 'A' op: 'FloatInput' }"
"node { name: 'A' op: 'Int32Input' }"
"node { "
" name: 'C' op: 'Sum' "
" attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'A'] "
"}";
GraphDef graph_def;
auto parser = protobuf::TextFormat::Parser();
CHECK(parser.MergeFromString(graph_def_str, &graph_def)) << graph_def_str;
EXPECT_EQ(graph::VerifyNoDuplicateNodeNames(graph_def).code(),
tensorflow::error::ALREADY_EXISTS);
}
TEST(ValidateGraphHasNoCycleTest, NoCyclePasses) {
const string graph_def_str =
"node { name: 'A' op: 'FloatInput' }"
"node { name: 'B' op: 'FloatInput' }"
"node { name: 'C' op: 'Mul' attr { key: 'T' value { type: DT_FLOAT } }"
" input: ['A', 'B'] }";
GraphDef graph_def;
auto parser = protobuf::TextFormat::Parser();
CHECK(parser.MergeFromString(graph_def_str, &graph_def)) << graph_def_str;
Graph graph(OpRegistry::Global());
GraphConstructorOptions opts;
TF_ASSERT_OK(ConvertGraphDefToGraph(opts, graph_def, &graph));
TF_EXPECT_OK(graph::ValidateGraphHasNoCycle(graph));
}
TEST(ValidateGraphHasNoCycleTest, NoCycleWithMergePasses) {
const string graph_def_str =
R"EOF(
node { name: 'A' op: 'FloatInput' }
node { name: 'merge' op: 'Merge' input: [ 'A:0', 'next:0' ]
attr { key: "N" value: { i: 2 } }
attr { key: "T" value: { type: DT_FLOAT } } }
node { name: 'B' op: 'Mul'
attr { key: 'T' value { type: DT_FLOAT } }
input: [ 'merge:0', 'merge:0' ] }
node { name: 'next' op: 'NextIteration' input: ['B:0']
attr { key: "T" value: { type: DT_FLOAT } } }
)EOF";
GraphDef graph_def;
auto parser = protobuf::TextFormat::Parser();
CHECK(parser.MergeFromString(graph_def_str, &graph_def)) << graph_def_str;
Graph graph(OpRegistry::Global());
GraphConstructorOptions opts;
TF_ASSERT_OK(ConvertGraphDefToGraph(opts, graph_def, &graph));
TF_EXPECT_OK(graph::ValidateGraphHasNoCycle(graph));
}
Node* AddNodeFromNodeDef(Graph& graph, const string& name,
const string& node_type, int num_inputs) {
auto builder = NodeDefBuilder(name, node_type);
for (int i = 0; i < num_inputs; ++i) {
builder = builder.Input(strings::StrCat("node_", i), i, DT_FLOAT);
}
NodeDef node_def;
TF_CHECK_OK(builder.Finalize(&node_def));
Status s;
Node* node = graph.AddNode(node_def, &s);
TF_CHECK_OK(s);
return node;
}
TEST(ValidateGraphHasNoCycleTest, CycleFails) {
Graph graph(OpRegistry::Global());
Node* a = AddNodeFromNodeDef(graph, "A", "FloatInput", 0);
Node* c = AddNodeFromNodeDef(graph, "B", "Mul", 2);
graph.AddEdge(a, 0, c, 0);
graph.AddEdge(c, 0, c, 1);
EXPECT_THAT(
graph::ValidateGraphHasNoCycle(graph),
tsl::testing::StatusIs(
tsl::error::Code::INVALID_ARGUMENT,
::testing::ContainsRegex("Graph is invalid, contains a cycle")));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/graph/validate.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/graph/validate_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4df1949d-aa72-4d9a-8547-771218cf3f33 | cpp | google/cel-cpp | resolver | eval/compiler/resolver.cc | eval/compiler/resolver_test.cc | #include "eval/compiler/resolver.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/nullability.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "absl/strings/strip.h"
#include "absl/types/optional.h"
#include "base/kind.h"
#include "common/memory.h"
#include "common/type.h"
#include "common/value.h"
#include "common/value_manager.h"
#include "internal/status_macros.h"
#include "runtime/function_overload_reference.h"
#include "runtime/function_registry.h"
#include "runtime/type_registry.h"
namespace google::api::expr::runtime {
using ::cel::Value;
Resolver::Resolver(
absl::string_view container, const cel::FunctionRegistry& function_registry,
const cel::TypeRegistry&, cel::ValueManager& value_factory,
const absl::flat_hash_map<std::string, cel::TypeRegistry::Enumeration>&
resolveable_enums,
bool resolve_qualified_type_identifiers)
: namespace_prefixes_(),
enum_value_map_(),
function_registry_(function_registry),
value_factory_(value_factory),
resolveable_enums_(resolveable_enums),
resolve_qualified_type_identifiers_(resolve_qualified_type_identifiers) {
auto container_elements = absl::StrSplit(container, '.');
std::string prefix = "";
namespace_prefixes_.push_back(prefix);
for (const auto& elem : container_elements) {
if (elem.empty()) {
continue;
}
absl::StrAppend(&prefix, elem, ".");
namespace_prefixes_.insert(namespace_prefixes_.begin(), prefix);
}
for (const auto& prefix : namespace_prefixes_) {
for (auto iter = resolveable_enums_.begin();
iter != resolveable_enums_.end(); ++iter) {
absl::string_view enum_name = iter->first;
if (!absl::StartsWith(enum_name, prefix)) {
continue;
}
auto remainder = absl::StripPrefix(enum_name, prefix);
const auto& enum_type = iter->second;
for (const auto& enumerator : enum_type.enumerators) {
auto key = absl::StrCat(remainder, !remainder.empty() ? "." : "",
enumerator.name);
enum_value_map_[key] = value_factory.CreateIntValue(enumerator.number);
}
}
}
}
std::vector<std::string> Resolver::FullyQualifiedNames(absl::string_view name,
int64_t expr_id) const {
std::vector<std::string> names;
if (absl::StartsWith(name, ".")) {
std::string fully_qualified_name = std::string(name.substr(1));
names.push_back(fully_qualified_name);
return names;
}
for (const auto& prefix : namespace_prefixes_) {
std::string fully_qualified_name = absl::StrCat(prefix, name);
names.push_back(fully_qualified_name);
}
return names;
}
absl::optional<cel::Value> Resolver::FindConstant(absl::string_view name,
int64_t expr_id) const {
auto names = FullyQualifiedNames(name, expr_id);
for (const auto& name : names) {
auto enum_entry = enum_value_map_.find(name);
if (enum_entry != enum_value_map_.end()) {
return enum_entry->second;
}
if (resolve_qualified_type_identifiers_ || !absl::StrContains(name, ".")) {
auto type_value = value_factory_.FindType(name);
if (type_value.ok() && type_value->has_value()) {
return value_factory_.CreateTypeValue(**type_value);
}
}
}
return absl::nullopt;
}
std::vector<cel::FunctionOverloadReference> Resolver::FindOverloads(
absl::string_view name, bool receiver_style,
const std::vector<cel::Kind>& types, int64_t expr_id) const {
std::vector<cel::FunctionOverloadReference> funcs;
auto names = FullyQualifiedNames(name, expr_id);
for (auto it = names.begin(); it != names.end(); it++) {
funcs = function_registry_.FindStaticOverloads(*it, receiver_style, types);
if (!funcs.empty()) {
return funcs;
}
}
return funcs;
}
std::vector<cel::FunctionRegistry::LazyOverload> Resolver::FindLazyOverloads(
absl::string_view name, bool receiver_style,
const std::vector<cel::Kind>& types, int64_t expr_id) const {
std::vector<cel::FunctionRegistry::LazyOverload> funcs;
auto names = FullyQualifiedNames(name, expr_id);
for (const auto& name : names) {
funcs = function_registry_.FindLazyOverloads(name, receiver_style, types);
if (!funcs.empty()) {
return funcs;
}
}
return funcs;
}
absl::StatusOr<absl::optional<std::pair<std::string, cel::Type>>>
Resolver::FindType(absl::string_view name, int64_t expr_id) const {
auto qualified_names = FullyQualifiedNames(name, expr_id);
for (auto& qualified_name : qualified_names) {
CEL_ASSIGN_OR_RETURN(auto maybe_type,
value_factory_.FindType(qualified_name));
if (maybe_type.has_value()) {
return std::make_pair(std::move(qualified_name), std::move(*maybe_type));
}
}
return absl::nullopt;
}
} | #include "eval/compiler/resolver.h"
#include <memory>
#include <string>
#include <vector>
#include "absl/status/status.h"
#include "absl/types/optional.h"
#include "base/type_provider.h"
#include "common/memory.h"
#include "common/type_factory.h"
#include "common/type_manager.h"
#include "common/value.h"
#include "common/value_manager.h"
#include "common/values/legacy_value_manager.h"
#include "eval/public/cel_function.h"
#include "eval/public/cel_function_registry.h"
#include "eval/public/cel_type_registry.h"
#include "eval/public/cel_value.h"
#include "eval/public/structs/protobuf_descriptor_type_provider.h"
#include "eval/testutil/test_message.pb.h"
#include "internal/testing.h"
namespace google::api::expr::runtime {
namespace {
using ::cel::IntValue;
using ::cel::TypeFactory;
using ::cel::TypeManager;
using ::cel::TypeValue;
using ::cel::ValueManager;
using ::testing::Eq;
class FakeFunction : public CelFunction {
public:
explicit FakeFunction(const std::string& name)
: CelFunction(CelFunctionDescriptor{name, false, {}}) {}
absl::Status Evaluate(absl::Span<const CelValue> args, CelValue* result,
google::protobuf::Arena* arena) const override {
return absl::OkStatus();
}
};
class ResolverTest : public testing::Test {
public:
ResolverTest()
: value_factory_(cel::MemoryManagerRef::ReferenceCounting(),
type_registry_.GetTypeProvider()) {}
protected:
CelTypeRegistry type_registry_;
cel::common_internal::LegacyValueManager value_factory_;
};
TEST_F(ResolverTest, TestFullyQualifiedNames) {
CelFunctionRegistry func_registry;
Resolver resolver("google.api.expr", func_registry.InternalGetRegistry(),
type_registry_.InternalGetModernRegistry(), value_factory_,
type_registry_.resolveable_enums());
auto names = resolver.FullyQualifiedNames("simple_name");
std::vector<std::string> expected_names(
{"google.api.expr.simple_name", "google.api.simple_name",
"google.simple_name", "simple_name"});
EXPECT_THAT(names, Eq(expected_names));
}
TEST_F(ResolverTest, TestFullyQualifiedNamesPartiallyQualifiedName) {
CelFunctionRegistry func_registry;
Resolver resolver("google.api.expr", func_registry.InternalGetRegistry(),
type_registry_.InternalGetModernRegistry(), value_factory_,
type_registry_.resolveable_enums());
auto names = resolver.FullyQualifiedNames("expr.simple_name");
std::vector<std::string> expected_names(
{"google.api.expr.expr.simple_name", "google.api.expr.simple_name",
"google.expr.simple_name", "expr.simple_name"});
EXPECT_THAT(names, Eq(expected_names));
}
TEST_F(ResolverTest, TestFullyQualifiedNamesAbsoluteName) {
CelFunctionRegistry func_registry;
Resolver resolver("google.api.expr", func_registry.InternalGetRegistry(),
type_registry_.InternalGetModernRegistry(), value_factory_,
type_registry_.resolveable_enums());
auto names = resolver.FullyQualifiedNames(".google.api.expr.absolute_name");
EXPECT_THAT(names.size(), Eq(1));
EXPECT_THAT(names[0], Eq("google.api.expr.absolute_name"));
}
TEST_F(ResolverTest, TestFindConstantEnum) {
CelFunctionRegistry func_registry;
type_registry_.Register(TestMessage::TestEnum_descriptor());
Resolver resolver("google.api.expr.runtime.TestMessage",
func_registry.InternalGetRegistry(),
type_registry_.InternalGetModernRegistry(), value_factory_,
type_registry_.resolveable_enums());
auto enum_value = resolver.FindConstant("TestEnum.TEST_ENUM_1", -1);
ASSERT_TRUE(enum_value);
ASSERT_TRUE(enum_value->Is<IntValue>());
EXPECT_THAT(enum_value->GetInt().NativeValue(), Eq(1L));
enum_value = resolver.FindConstant(
".google.api.expr.runtime.TestMessage.TestEnum.TEST_ENUM_2", -1);
ASSERT_TRUE(enum_value);
ASSERT_TRUE(enum_value->Is<IntValue>());
EXPECT_THAT(enum_value->GetInt().NativeValue(), Eq(2L));
}
TEST_F(ResolverTest, TestFindConstantUnqualifiedType) {
CelFunctionRegistry func_registry;
Resolver resolver("cel", func_registry.InternalGetRegistry(),
type_registry_.InternalGetModernRegistry(), value_factory_,
type_registry_.resolveable_enums());
auto type_value = resolver.FindConstant("int", -1);
EXPECT_TRUE(type_value);
EXPECT_TRUE(type_value->Is<TypeValue>());
EXPECT_THAT(type_value->GetType().name(), Eq("int"));
}
TEST_F(ResolverTest, TestFindConstantFullyQualifiedType) {
google::protobuf::LinkMessageReflection<TestMessage>();
CelFunctionRegistry func_registry;
type_registry_.RegisterTypeProvider(
std::make_unique<ProtobufDescriptorProvider>(
google::protobuf::DescriptorPool::generated_pool(),
google::protobuf::MessageFactory::generated_factory()));
Resolver resolver("cel", func_registry.InternalGetRegistry(),
type_registry_.InternalGetModernRegistry(), value_factory_,
type_registry_.resolveable_enums());
auto type_value =
resolver.FindConstant(".google.api.expr.runtime.TestMessage", -1);
ASSERT_TRUE(type_value);
ASSERT_TRUE(type_value->Is<TypeValue>());
EXPECT_THAT(type_value->GetType().name(),
Eq("google.api.expr.runtime.TestMessage"));
}
TEST_F(ResolverTest, TestFindConstantQualifiedTypeDisabled) {
CelFunctionRegistry func_registry;
type_registry_.RegisterTypeProvider(
std::make_unique<ProtobufDescriptorProvider>(
google::protobuf::DescriptorPool::generated_pool(),
google::protobuf::MessageFactory::generated_factory()));
Resolver resolver("", func_registry.InternalGetRegistry(),
type_registry_.InternalGetModernRegistry(), value_factory_,
type_registry_.resolveable_enums(), false);
auto type_value =
resolver.FindConstant(".google.api.expr.runtime.TestMessage", -1);
EXPECT_FALSE(type_value);
}
TEST_F(ResolverTest, FindTypeBySimpleName) {
CelFunctionRegistry func_registry;
Resolver resolver("google.api.expr.runtime",
func_registry.InternalGetRegistry(),
type_registry_.InternalGetModernRegistry(), value_factory_,
type_registry_.resolveable_enums());
type_registry_.RegisterTypeProvider(
std::make_unique<ProtobufDescriptorProvider>(
google::protobuf::DescriptorPool::generated_pool(),
google::protobuf::MessageFactory::generated_factory()));
ASSERT_OK_AND_ASSIGN(auto type, resolver.FindType("TestMessage", -1));
EXPECT_TRUE(type.has_value());
EXPECT_EQ(type->second.name(), "google.api.expr.runtime.TestMessage");
}
TEST_F(ResolverTest, FindTypeByQualifiedName) {
CelFunctionRegistry func_registry;
type_registry_.RegisterTypeProvider(
std::make_unique<ProtobufDescriptorProvider>(
google::protobuf::DescriptorPool::generated_pool(),
google::protobuf::MessageFactory::generated_factory()));
Resolver resolver("google.api.expr.runtime",
func_registry.InternalGetRegistry(),
type_registry_.InternalGetModernRegistry(), value_factory_,
type_registry_.resolveable_enums());
ASSERT_OK_AND_ASSIGN(
auto type, resolver.FindType(".google.api.expr.runtime.TestMessage", -1));
ASSERT_TRUE(type.has_value());
EXPECT_EQ(type->second.name(), "google.api.expr.runtime.TestMessage");
}
TEST_F(ResolverTest, TestFindDescriptorNotFound) {
CelFunctionRegistry func_registry;
type_registry_.RegisterTypeProvider(
std::make_unique<ProtobufDescriptorProvider>(
google::protobuf::DescriptorPool::generated_pool(),
google::protobuf::MessageFactory::generated_factory()));
Resolver resolver("google.api.expr.runtime",
func_registry.InternalGetRegistry(),
type_registry_.InternalGetModernRegistry(), value_factory_,
type_registry_.resolveable_enums());
ASSERT_OK_AND_ASSIGN(auto type, resolver.FindType("UndefinedMessage", -1));
EXPECT_FALSE(type.has_value()) << type->second;
}
TEST_F(ResolverTest, TestFindOverloads) {
CelFunctionRegistry func_registry;
auto status =
func_registry.Register(std::make_unique<FakeFunction>("fake_func"));
ASSERT_OK(status);
status = func_registry.Register(
std::make_unique<FakeFunction>("cel.fake_ns_func"));
ASSERT_OK(status);
Resolver resolver("cel", func_registry.InternalGetRegistry(),
type_registry_.InternalGetModernRegistry(), value_factory_,
type_registry_.resolveable_enums());
auto overloads =
resolver.FindOverloads("fake_func", false, ArgumentsMatcher(0));
EXPECT_THAT(overloads.size(), Eq(1));
EXPECT_THAT(overloads[0].descriptor.name(), Eq("fake_func"));
overloads =
resolver.FindOverloads("fake_ns_func", false, ArgumentsMatcher(0));
EXPECT_THAT(overloads.size(), Eq(1));
EXPECT_THAT(overloads[0].descriptor.name(), Eq("cel.fake_ns_func"));
}
TEST_F(ResolverTest, TestFindLazyOverloads) {
CelFunctionRegistry func_registry;
auto status = func_registry.RegisterLazyFunction(
CelFunctionDescriptor{"fake_lazy_func", false, {}});
ASSERT_OK(status);
status = func_registry.RegisterLazyFunction(
CelFunctionDescriptor{"cel.fake_lazy_ns_func", false, {}});
ASSERT_OK(status);
Resolver resolver("cel", func_registry.InternalGetRegistry(),
type_registry_.InternalGetModernRegistry(), value_factory_,
type_registry_.resolveable_enums());
auto overloads =
resolver.FindLazyOverloads("fake_lazy_func", false, ArgumentsMatcher(0));
EXPECT_THAT(overloads.size(), Eq(1));
overloads = resolver.FindLazyOverloads("fake_lazy_ns_func", false,
ArgumentsMatcher(0));
EXPECT_THAT(overloads.size(), Eq(1));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/compiler/resolver.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/compiler/resolver_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
9f47e9a6-3306-4f59-a171-c8655f57841a | cpp | tensorflow/tensorflow | exponential_minus_one | tensorflow/lite/experimental/shlo/ops/exponential_minus_one.cc | tensorflow/lite/experimental/shlo/ops/exponential_minus_one_test.cc | #include "tensorflow/lite/experimental/shlo/ops/exponential_minus_one.h"
#include <cmath>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/bf16.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/f16.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct ExponentialMinusOne {
template <class T>
T operator()(T v) const {
return std::expm1(v);
}
};
template <>
F16 ExponentialMinusOne::operator()(F16 v) const {
return F16(operator()(static_cast<float>(v)));
}
template <>
BF16 ExponentialMinusOne::operator()(BF16 v) const {
return BF16(operator()(static_cast<float>(v)));
}
ExponentialMinusOneOp Create(ExponentialMinusOneOp::Attributes) { return {}; }
absl::Status Prepare(ExponentialMinusOneOp& op, const Tensor& input,
Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(input.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(
CheckSupportedTypes(CheckCtx("exponential_minus_one"), input,
IsFloatTensor, IsQuantizedPerTensorTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("exponential_minus_one"), input, output));
return absl::OkStatus();
}
absl::Status Evaluate(ExponentialMinusOneOp& op, const Tensor& input,
Tensor& output) {
ExponentialMinusOne exponential_minus_one;
if (input.IsPerTensorQuantized()) {
DISPATCH_QUANTIZED(
detail::DequantizeOpQuantizePerTensor,
input.quantized_per_tensor_element_type().StorageType(),
input.quantized_per_tensor_element_type().ExpressedType(),
exponential_minus_one, input, output)
} else if (IsFloatTensor(input)) {
DISPATCH_FLOAT(detail::EvaluateNoQuantization, input.tensor_element_type(),
exponential_minus_one, input, output);
}
return absl::FailedPreconditionError(
"stablehlo.exponential_minus_one: Unsupported tensor type.");
}
}; | #include "tensorflow/lite/experimental/shlo/ops/exponential_minus_one.h"
#include <cmath>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/shlo/bf16.h"
#include "tensorflow/lite/experimental/shlo/f16.h"
#include "tensorflow/lite/experimental/shlo/ops/test_util.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise_test_util.h"
#include "tensorflow/lite/experimental/shlo/quantize.h"
#include "tensorflow/lite/experimental/shlo/quantized_tensor_element_type.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/status_matcher.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
using testing::ElementsAreArray;
using testing::NanSensitiveFloatEq;
using testing::Pointwise;
namespace shlo_ref {
template <>
struct ParamName<ExponentialMinusOneOp> {
static std::string Get() { return "ExponentialMinusOne"; }
};
namespace {
struct ExponentialMinusOne {
template <class T>
T operator()(T v) const {
return std::expm1(v);
}
} exponential_minus_one_ref;
template <>
F16 ExponentialMinusOne::operator()(F16 v) const {
return F16(operator()(static_cast<float>(v)));
}
template <>
BF16 ExponentialMinusOne::operator()(BF16 v) const {
return BF16(operator()(static_cast<float>(v)));
}
INSTANTIATE_TYPED_TEST_SUITE_P(ExponentialMinusOne,
UnaryElementwiseOpShapePropagationTest,
ExponentialMinusOneOp, TestParamNames);
INSTANTIATE_TYPED_TEST_SUITE_P(
ExponentialMinusOne, UnaryElementwiseSameBaselineElementTypeConstraintTest,
UnaryElementwiseConstraint1Types<ExponentialMinusOneOp>, TestParamNames);
using UnsupportedTypes =
WithOpTypes<ExponentialMinusOneOp, ConcatTypes<BoolTestType, IntTestTypes,
PerAxisQuantizedTestTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(ExponentialMinusOneOp,
UnaryElementwiseUnsupportedTypeTest,
UnsupportedTypes, TestParamNames);
template <class T>
struct ExponentialMinusOneTest : ::testing::Test {};
TYPED_TEST_SUITE(ExponentialMinusOneTest, FloatTestTypes, TestParamNames);
TYPED_TEST(ExponentialMinusOneTest, FloatTensorsWork) {
using StorageT = typename TypeParam::StorageT;
const Shape shape({2, 3, 4});
Vector<StorageT> input_data = RandomBuffer<TypeParam::kStorage>(shape);
Vector<StorageT> output_data(shape.NumElements());
Tensor input_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = input_data.data()};
Tensor output_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(input_data, expected_data.begin(),
exponential_minus_one_ref);
auto op = Create(ExponentialMinusOneOp::Attributes{});
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(NanSensitiveFloatEq(), expected_data));
}
template <class T>
struct QuantizedExponentialMinusOneTest : ::testing::Test {};
TYPED_TEST_SUITE(QuantizedExponentialMinusOneTest, QuantizedTestTypes,
TestParamNames);
TYPED_TEST(QuantizedExponentialMinusOneTest, PerTensorWorks) {
using StorageT = typename TypeParam::StorageT;
using ExpressedT = typename TypeParam::ExpressedT;
const Shape shape({2, 3, 4});
Vector<StorageT> input_data = RandomBuffer<TypeParam::kStorage>(shape);
Vector<StorageT> output_data(shape.NumElements());
const ExpressedT scale = static_cast<ExpressedT>(1.5);
const StorageT zero_point = static_cast<StorageT>(5);
const QuantizedElementTypePerTensor tensor_type =
QuantizedElementTypePerTensor(TypeParam::kStorage, zero_point,
TypeParam::kExpressed, scale);
Tensor input_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = input_data.data()};
Tensor output_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(
input_data, expected_data.begin(), [zero_point, scale](auto v) {
const ExpressedT dequantized_input = Dequantize(v, zero_point, scale);
const ExpressedT dequantized_res =
exponential_minus_one_ref(dequantized_input);
return Quantize<TypeParam::kStorage, TypeParam::kExpressed>(
dequantized_res, zero_point, static_cast<ExpressedT>(1.) / scale);
});
auto op = Create(ExponentialMinusOneOp::Attributes{});
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, ElementsAreArray(expected_data));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/exponential_minus_one.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/exponential_minus_one_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
535afe38-1169-49f7-af7c-0d9f4af30529 | cpp | google/quiche | chacha20_poly1305_decrypter | quiche/quic/core/crypto/chacha20_poly1305_decrypter.cc | quiche/quic/core/crypto/chacha20_poly1305_decrypter_test.cc | #include "quiche/quic/core/crypto/chacha20_poly1305_decrypter.h"
#include "openssl/aead.h"
#include "openssl/tls1.h"
namespace quic {
namespace {
const size_t kKeySize = 32;
const size_t kNonceSize = 12;
}
ChaCha20Poly1305Decrypter::ChaCha20Poly1305Decrypter()
: ChaChaBaseDecrypter(EVP_aead_chacha20_poly1305, kKeySize, kAuthTagSize,
kNonceSize,
false) {
static_assert(kKeySize <= kMaxKeySize, "key size too big");
static_assert(kNonceSize <= kMaxNonceSize, "nonce size too big");
}
ChaCha20Poly1305Decrypter::~ChaCha20Poly1305Decrypter() {}
uint32_t ChaCha20Poly1305Decrypter::cipher_id() const {
return TLS1_CK_CHACHA20_POLY1305_SHA256;
}
QuicPacketCount ChaCha20Poly1305Decrypter::GetIntegrityLimit() const {
static_assert(kMaxIncomingPacketSize < 16384,
"This key limit requires limits on decryption payload sizes");
return 68719476736U;
}
} | #include "quiche/quic/core/crypto/chacha20_poly1305_decrypter.h"
#include <memory>
#include <string>
#include "absl/strings/escaping.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
#include "quiche/common/test_tools/quiche_test_utils.h"
namespace {
struct TestVector {
const char* key;
const char* iv;
const char* fixed;
const char* aad;
const char* ct;
const char* pt;
};
const TestVector test_vectors[] = {
{"808182838485868788898a8b8c8d8e8f"
"909192939495969798999a9b9c9d9e9f",
"4041424344454647",
"07000000",
"50515253c0c1c2c3c4c5c6c7",
"d31a8d34648e60db7b86afbc53ef7ec2"
"a4aded51296e08fea9e2b5a736ee62d6"
"3dbea45e8ca9671282fafb69da92728b"
"1a71de0a9e060b2905d6a5b67ecd3b36"
"92ddbd7f2d778b8c9803aee328091b58"
"fab324e4fad675945585808b4831d7bc"
"3ff4def08e4b7a9de576d26586cec64b"
"6116"
"1ae10b594f09e26a7e902ecb",
"4c616469657320616e642047656e746c"
"656d656e206f662074686520636c6173"
"73206f66202739393a20496620492063"
"6f756c64206f6666657220796f75206f"
"6e6c79206f6e652074697020666f7220"
"746865206675747572652c2073756e73"
"637265656e20776f756c642062652069"
"742e"},
{"808182838485868788898a8b8c8d8e8f"
"909192939495969798999a9b9c9d9e9f",
"4041424344454647",
"07000000",
"50515253c0c1c2c3c4c5c6c7",
"d31a8d34648e60db7b86afbc53ef7ec2"
"a4aded51296e08fea9e2b5a736ee62d6"
"3dbea45e8ca9671282fafb69da92728b"
"1a71de0a9e060b2905d6a5b67ecd3b36"
"92ddbd7f2d778b8c9803aee328091b58"
"fab324e4fad675945585808b4831d7bc"
"3ff4def08e4b7a9de576d26586cec64b"
"6116"
"1ae10b594f09e26a7e902ecc",
nullptr},
{"808182838485868788898a8b8c8d8e8f"
"909192939495969798999a9b9c9d9e9f",
"4041424344454647",
"07000000",
"60515253c0c1c2c3c4c5c6c7",
"d31a8d34648e60db7b86afbc53ef7ec2"
"a4aded51296e08fea9e2b5a736ee62d6"
"3dbea45e8ca9671282fafb69da92728b"
"1a71de0a9e060b2905d6a5b67ecd3b36"
"92ddbd7f2d778b8c9803aee328091b58"
"fab324e4fad675945585808b4831d7bc"
"3ff4def08e4b7a9de576d26586cec64b"
"6116"
"1ae10b594f09e26a7e902ecb",
nullptr},
{nullptr, nullptr, nullptr, nullptr, nullptr, nullptr}};
}
namespace quic {
namespace test {
QuicData* DecryptWithNonce(ChaCha20Poly1305Decrypter* decrypter,
absl::string_view nonce,
absl::string_view associated_data,
absl::string_view ciphertext) {
uint64_t packet_number;
absl::string_view nonce_prefix(nonce.data(),
nonce.size() - sizeof(packet_number));
decrypter->SetNoncePrefix(nonce_prefix);
memcpy(&packet_number, nonce.data() + nonce_prefix.size(),
sizeof(packet_number));
std::unique_ptr<char[]> output(new char[ciphertext.length()]);
size_t output_length = 0;
const bool success = decrypter->DecryptPacket(
packet_number, associated_data, ciphertext, output.get(), &output_length,
ciphertext.length());
if (!success) {
return nullptr;
}
return new QuicData(output.release(), output_length, true);
}
class ChaCha20Poly1305DecrypterTest : public QuicTest {};
TEST_F(ChaCha20Poly1305DecrypterTest, Decrypt) {
for (size_t i = 0; test_vectors[i].key != nullptr; i++) {
bool has_pt = test_vectors[i].pt;
std::string key;
std::string iv;
std::string fixed;
std::string aad;
std::string ct;
std::string pt;
ASSERT_TRUE(absl::HexStringToBytes(test_vectors[i].key, &key));
ASSERT_TRUE(absl::HexStringToBytes(test_vectors[i].iv, &iv));
ASSERT_TRUE(absl::HexStringToBytes(test_vectors[i].fixed, &fixed));
ASSERT_TRUE(absl::HexStringToBytes(test_vectors[i].aad, &aad));
ASSERT_TRUE(absl::HexStringToBytes(test_vectors[i].ct, &ct));
if (has_pt) {
ASSERT_TRUE(absl::HexStringToBytes(test_vectors[i].pt, &pt));
}
ChaCha20Poly1305Decrypter decrypter;
ASSERT_TRUE(decrypter.SetKey(key));
std::unique_ptr<QuicData> decrypted(DecryptWithNonce(
&decrypter, fixed + iv,
absl::string_view(aad.length() ? aad.data() : nullptr, aad.length()),
ct));
if (!decrypted) {
EXPECT_FALSE(has_pt);
continue;
}
EXPECT_TRUE(has_pt);
EXPECT_EQ(12u, ct.size() - decrypted->length());
ASSERT_EQ(pt.length(), decrypted->length());
quiche::test::CompareCharArraysWithHexError(
"plaintext", decrypted->data(), pt.length(), pt.data(), pt.length());
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/crypto/chacha20_poly1305_decrypter.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/crypto/chacha20_poly1305_decrypter_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
13bd3235-e3de-4413-be34-c4c9024651b4 | cpp | google/libaddressinput | address_problem | cpp/src/address_problem.cc | cpp/test/address_problem_test.cc | #include <libaddressinput/address_problem.h>
#include <cstddef>
#include <ostream>
#include "util/size.h"
using i18n::addressinput::AddressProblem;
using i18n::addressinput::size;
using i18n::addressinput::UNEXPECTED_FIELD;
using i18n::addressinput::UNSUPPORTED_FIELD;
std::ostream& operator<<(std::ostream& o, AddressProblem problem) {
static const char* const kProblemNames[] = {
"UNEXPECTED_FIELD", "MISSING_REQUIRED_FIELD", "UNKNOWN_VALUE",
"INVALID_FORMAT", "MISMATCHING_VALUE", "USES_P_O_BOX",
"UNSUPPORTED_FIELD",
};
static_assert(UNEXPECTED_FIELD == 0, "bad_base");
static_assert(UNSUPPORTED_FIELD == size(kProblemNames) - 1, "bad_length");
if (problem < 0 || static_cast<size_t>(problem) >= size(kProblemNames)) {
o << "[INVALID ENUM VALUE " << static_cast<int>(problem) << "]";
} else {
o << kProblemNames[problem];
}
return o;
} | #include <libaddressinput/address_problem.h>
#include <sstream>
#include <gtest/gtest.h>
namespace {
using i18n::addressinput::UNKNOWN_VALUE;
TEST(AddressProblemTest, ValidEnumValue) {
std::ostringstream oss;
oss << UNKNOWN_VALUE;
EXPECT_EQ("UNKNOWN_VALUE", oss.str());
}
} | https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/src/address_problem.cc | https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/test/address_problem_test.cc | 2610f7b1043d6784ada41392fc9392d1ea09ea07 |
54ac5196-c078-4979-b2bd-62baf3bb4e06 | cpp | tensorflow/tensorflow | fake_clock_env | tensorflow/core/util/fake_clock_env.cc | tensorflow/core/util/fake_clock_env_test.cc | #include "tensorflow/core/util/fake_clock_env.h"
#include <string>
namespace tensorflow {
FakeClockEnv::FakeClockEnv(Env* wrapped) : EnvWrapper(wrapped) {}
void FakeClockEnv::AdvanceByMicroseconds(int64_t micros) {
{
mutex_lock l(mu_);
current_time_ += micros;
}
}
uint64 FakeClockEnv::NowMicros() const {
{
mutex_lock l(mu_);
return current_time_;
}
}
} | #include "tensorflow/core/util/fake_clock_env.h"
#include <memory>
#include <gtest/gtest.h>
#include "tensorflow/core/platform/env.h"
namespace tensorflow {
namespace {
class FakeClockEnvTest : public ::testing::Test {
protected:
void SetUp() override {
fake_clock_env_ = std::make_unique<FakeClockEnv>(Env::Default());
}
void TearDown() override { fake_clock_env_.reset(); }
std::unique_ptr<FakeClockEnv> fake_clock_env_;
};
TEST_F(FakeClockEnvTest, TimeInitializedToZero) {
EXPECT_EQ(0, fake_clock_env_->NowMicros());
}
TEST_F(FakeClockEnvTest, AdvanceTimeByMicroseconds) {
int current_time = fake_clock_env_->NowMicros();
int64_t duration = 100;
current_time += duration;
fake_clock_env_->AdvanceByMicroseconds(duration);
EXPECT_EQ(current_time, fake_clock_env_->NowMicros());
for (int i = 0; i < 5; ++i) {
fake_clock_env_->AdvanceByMicroseconds(100);
current_time += 100;
}
EXPECT_EQ(current_time, fake_clock_env_->NowMicros());
current_time += duration;
duration = 200;
fake_clock_env_->AdvanceByMicroseconds(duration);
EXPECT_NE(current_time, fake_clock_env_->NowMicros());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/fake_clock_env.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/fake_clock_env_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
eef8dca3-7df6-482a-8512-d6ab41154110 | cpp | tensorflow/tensorflow | tensor_testutil | tensorflow/core/framework/tensor_testutil.cc | tensorflow/core/framework/tensor_testutil_test.cc | #include "tensorflow/core/framework/tensor_testutil.h"
#include <cmath>
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace test {
::testing::AssertionResult IsSameType(const Tensor& x, const Tensor& y) {
if (x.dtype() != y.dtype()) {
return ::testing::AssertionFailure()
<< "Tensors have different dtypes (" << x.dtype() << " vs "
<< y.dtype() << ")";
}
return ::testing::AssertionSuccess();
}
::testing::AssertionResult IsSameShape(const Tensor& x, const Tensor& y) {
if (!x.IsSameSize(y)) {
return ::testing::AssertionFailure()
<< "Tensors have different shapes (" << x.shape().DebugString()
<< " vs " << y.shape().DebugString() << ")";
}
return ::testing::AssertionSuccess();
}
template <typename T>
static ::testing::AssertionResult EqualFailure(const T& x, const T& y) {
return ::testing::AssertionFailure()
<< std::setprecision(std::numeric_limits<T>::digits10 + 2) << x
<< " not equal to " << y;
}
template <>
::testing::AssertionResult EqualFailure<int8>(const int8& x, const int8& y) {
return EqualFailure(static_cast<int>(x), static_cast<int>(y));
}
static ::testing::AssertionResult IsEqual(float x, float y, Tolerance t) {
if (Eigen::numext::isnan(x) && Eigen::numext::isnan(y))
return ::testing::AssertionSuccess();
if (t == Tolerance::kNone) {
if (x == y) return ::testing::AssertionSuccess();
} else {
if (::testing::internal::CmpHelperFloatingPointEQ<float>("", "", x, y))
return ::testing::AssertionSuccess();
}
return EqualFailure(x, y);
}
static ::testing::AssertionResult IsEqual(double x, double y, Tolerance t) {
if (Eigen::numext::isnan(x) && Eigen::numext::isnan(y))
return ::testing::AssertionSuccess();
if (t == Tolerance::kNone) {
if (x == y) return ::testing::AssertionSuccess();
} else {
if (::testing::internal::CmpHelperFloatingPointEQ<double>("", "", x, y))
return ::testing::AssertionSuccess();
}
return EqualFailure(x, y);
}
static ::testing::AssertionResult IsEqual(Eigen::half x, Eigen::half y,
Tolerance t) {
if (Eigen::numext::isnan(x) && Eigen::numext::isnan(y))
return ::testing::AssertionSuccess();
if (Eigen::numext::isnan(x) || Eigen::numext::isnan(y))
return EqualFailure(x, y);
auto sign_and_magnitude_to_biased = [](uint16_t sam) {
const uint16_t kSignBitMask = 0x8000;
if (kSignBitMask & sam) return ~sam + 1;
return kSignBitMask | sam;
};
auto xb = sign_and_magnitude_to_biased(Eigen::numext::bit_cast<uint16_t>(x));
auto yb = sign_and_magnitude_to_biased(Eigen::numext::bit_cast<uint16_t>(y));
if (t == Tolerance::kNone) {
if (xb == yb) return ::testing::AssertionSuccess();
} else {
auto distance = xb >= yb ? xb - yb : yb - xb;
const uint16_t kMaxUlps = 4;
if (distance <= kMaxUlps) return ::testing::AssertionSuccess();
}
return EqualFailure(x, y);
}
static ::testing::AssertionResult IsEqual(tsl::bfloat16 x, tsl::bfloat16 y,
Tolerance t) {
if (Eigen::numext::isnan(x) && Eigen::numext::isnan(y))
return ::testing::AssertionSuccess();
if (Eigen::numext::isnan(x) || Eigen::numext::isnan(y))
return EqualFailure(x, y);
auto sign_and_magnitude_to_biased = [](uint16_t sam) {
const uint16_t kSignBitMask = 0x8000;
if (kSignBitMask & sam) return ~sam + 1;
return kSignBitMask | sam;
};
auto xb = sign_and_magnitude_to_biased(Eigen::numext::bit_cast<uint16_t>(x));
auto yb = sign_and_magnitude_to_biased(Eigen::numext::bit_cast<uint16_t>(y));
if (t == Tolerance::kNone) {
if (xb == yb) return ::testing::AssertionSuccess();
} else {
auto distance = xb >= yb ? xb - yb : yb - xb;
const uint16_t kMaxUlps = 4;
if (distance <= kMaxUlps) return ::testing::AssertionSuccess();
}
return EqualFailure(x, y);
}
template <typename T>
static ::testing::AssertionResult IsEqual(const T& x, const T& y, Tolerance t) {
if (::testing::internal::CmpHelperEQ<T>("", "", x, y))
return ::testing::AssertionSuccess();
return EqualFailure(x, y);
}
template <typename T>
static ::testing::AssertionResult IsEqual(const std::complex<T>& x,
const std::complex<T>& y,
Tolerance t) {
if (IsEqual(x.real(), y.real(), t) && IsEqual(x.imag(), y.imag(), t))
return ::testing::AssertionSuccess();
return EqualFailure(x, y);
}
template <typename T>
static void ExpectEqual(const Tensor& x, const Tensor& y,
Tolerance t = Tolerance::kDefault) {
const T* Tx = x.unaligned_flat<T>().data();
const T* Ty = y.unaligned_flat<T>().data();
auto size = x.NumElements();
int max_failures = 10;
int num_failures = 0;
for (decltype(size) i = 0; i < size; ++i) {
EXPECT_TRUE(IsEqual(Tx[i], Ty[i], t)) << "i = " << (++num_failures, i);
ASSERT_LT(num_failures, max_failures) << "Too many mismatches, giving up.";
}
}
template <typename T>
static ::testing::AssertionResult IsClose(const T& x, const T& y, const T& atol,
const T& rtol) {
if (Eigen::numext::isnan(x) && Eigen::numext::isnan(y))
return ::testing::AssertionSuccess();
if (x == y) return ::testing::AssertionSuccess();
auto tolerance = atol + rtol * Eigen::numext::abs(x);
if (Eigen::numext::abs(x - y) <= tolerance)
return ::testing::AssertionSuccess();
return ::testing::AssertionFailure() << x << " not close to " << y;
}
template <typename T>
static ::testing::AssertionResult IsClose(const std::complex<T>& x,
const std::complex<T>& y,
const T& atol, const T& rtol) {
if (IsClose(x.real(), y.real(), atol, rtol) &&
IsClose(x.imag(), y.imag(), atol, rtol))
return ::testing::AssertionSuccess();
return ::testing::AssertionFailure() << x << " not close to " << y;
}
template <typename T>
static auto GetTolerance(double tolerance) {
using Real = typename Eigen::NumTraits<T>::Real;
auto default_tol = static_cast<Real>(5.0) * Eigen::NumTraits<T>::epsilon();
auto result = tolerance < 0.0 ? default_tol : static_cast<Real>(tolerance);
EXPECT_GE(result, static_cast<Real>(0));
return result;
}
template <typename T>
static void ExpectClose(const Tensor& x, const Tensor& y, double atol,
double rtol) {
auto typed_atol = GetTolerance<T>(atol);
auto typed_rtol = GetTolerance<T>(rtol);
const T* Tx = x.unaligned_flat<T>().data();
const T* Ty = y.unaligned_flat<T>().data();
auto size = x.NumElements();
int max_failures = 10;
int num_failures = 0;
for (decltype(size) i = 0; i < size; ++i) {
EXPECT_TRUE(IsClose(Tx[i], Ty[i], typed_atol, typed_rtol))
<< "i = " << (++num_failures, i) << " Tx[i] = " << Tx[i]
<< " Ty[i] = " << Ty[i];
ASSERT_LT(num_failures, max_failures)
<< "Too many mismatches (atol = " << atol << " rtol = " << rtol
<< "), giving up.";
}
EXPECT_EQ(num_failures, 0)
<< "Mismatches detected (atol = " << atol << " rtol = " << rtol << ").";
}
void ExpectEqual(const Tensor& x, const Tensor& y, Tolerance t) {
ASSERT_TRUE(IsSameType(x, y));
ASSERT_TRUE(IsSameShape(x, y));
switch (x.dtype()) {
case DT_FLOAT:
return ExpectEqual<float>(x, y, t);
case DT_DOUBLE:
return ExpectEqual<double>(x, y, t);
case DT_INT32:
return ExpectEqual<int32>(x, y);
case DT_UINT32:
return ExpectEqual<uint32>(x, y);
case DT_UINT16:
return ExpectEqual<uint16>(x, y);
case DT_UINT8:
return ExpectEqual<uint8>(x, y);
case DT_INT16:
return ExpectEqual<int16>(x, y);
case DT_INT8:
return ExpectEqual<int8>(x, y);
case DT_STRING:
return ExpectEqual<tstring>(x, y);
case DT_COMPLEX64:
return ExpectEqual<complex64>(x, y, t);
case DT_COMPLEX128:
return ExpectEqual<complex128>(x, y, t);
case DT_INT64:
return ExpectEqual<int64_t>(x, y);
case DT_UINT64:
return ExpectEqual<uint64>(x, y);
case DT_BOOL:
return ExpectEqual<bool>(x, y);
case DT_QINT8:
return ExpectEqual<qint8>(x, y);
case DT_QUINT8:
return ExpectEqual<quint8>(x, y);
case DT_QINT16:
return ExpectEqual<qint16>(x, y);
case DT_QUINT16:
return ExpectEqual<quint16>(x, y);
case DT_QINT32:
return ExpectEqual<qint32>(x, y);
case DT_BFLOAT16:
return ExpectEqual<bfloat16>(x, y, t);
case DT_HALF:
return ExpectEqual<Eigen::half>(x, y, t);
case DT_FLOAT8_E5M2:
return ExpectEqual<float8_e5m2>(x, y, t);
case DT_FLOAT8_E4M3FN:
return ExpectEqual<float8_e4m3fn>(x, y, t);
case DT_INT4:
return ExpectEqual<int4>(x, y, t);
case DT_UINT4:
return ExpectEqual<uint4>(x, y, t);
default:
EXPECT_TRUE(false) << "Unsupported type : " << DataTypeString(x.dtype());
}
}
void ExpectClose(const Tensor& x, const Tensor& y, double atol, double rtol) {
ASSERT_TRUE(IsSameType(x, y));
ASSERT_TRUE(IsSameShape(x, y));
switch (x.dtype()) {
case DT_HALF:
return ExpectClose<Eigen::half>(x, y, atol, rtol);
case DT_BFLOAT16:
return ExpectClose<Eigen::bfloat16>(x, y, atol, rtol);
case DT_FLOAT:
return ExpectClose<float>(x, y, atol, rtol);
case DT_DOUBLE:
return ExpectClose<double>(x, y, atol, rtol);
case DT_COMPLEX64:
return ExpectClose<complex64>(x, y, atol, rtol);
case DT_COMPLEX128:
return ExpectClose<complex128>(x, y, atol, rtol);
default:
EXPECT_TRUE(false) << "Unsupported type : " << DataTypeString(x.dtype());
}
}
::testing::AssertionResult internal_test::IsClose(Eigen::half x, Eigen::half y,
double atol, double rtol) {
return test::IsClose(x, y, GetTolerance<Eigen::half>(atol),
GetTolerance<Eigen::half>(rtol));
}
::testing::AssertionResult internal_test::IsClose(float x, float y, double atol,
double rtol) {
return test::IsClose(x, y, GetTolerance<float>(atol),
GetTolerance<float>(rtol));
}
::testing::AssertionResult internal_test::IsClose(double x, double y,
double atol, double rtol) {
return test::IsClose(x, y, GetTolerance<double>(atol),
GetTolerance<double>(rtol));
}
}
} | #include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace test {
namespace {
using internal_test::IsClose;
template <typename T>
void TestEdgeCasesNear() {
EXPECT_TRUE(IsClose(Eigen::NumTraits<T>::infinity(),
Eigen::NumTraits<T>::infinity(), 0.0, 0.0));
EXPECT_TRUE(IsClose(Eigen::NumTraits<T>::lowest(),
Eigen::NumTraits<T>::highest(),
Eigen::NumTraits<double>::infinity(), 0.0));
EXPECT_FALSE(
IsClose(Eigen::NumTraits<T>::lowest(), Eigen::NumTraits<T>::highest(),
static_cast<double>(Eigen::NumTraits<T>::highest()), 0.0));
EXPECT_FALSE(IsClose(Eigen::NumTraits<T>::quiet_NaN(), T(0.0), 0.0, 0.0));
EXPECT_TRUE(IsClose(Eigen::NumTraits<T>::quiet_NaN(),
Eigen::NumTraits<T>::quiet_NaN(), 0.0, 0.0));
EXPECT_FALSE(IsClose(Eigen::NumTraits<T>::quiet_NaN(), T(0.0),
Eigen::NumTraits<double>::infinity(), 0.0));
EXPECT_TRUE(IsClose(Eigen::NumTraits<T>::quiet_NaN(),
Eigen::NumTraits<T>::quiet_NaN(),
Eigen::NumTraits<double>::infinity(), 0.0));
}
template <typename T, typename U>
void dumpFloatingPointStorage(T value) {
U* integral = reinterpret_cast<U*>(&value);
int shift_amount = (sizeof(U) << 3) - 1;
int exponent_bits = 2 + (log2(sizeof(U)) * 3);
U mask = static_cast<U>(1) << shift_amount;
for (int bits = 0; bits <= shift_amount; ++bits) {
std::cout << ((*integral & mask) > 0);
if (bits == 0 || bits == exponent_bits) std::cout << " ";
mask >>= 1;
}
std::cout << std::endl;
printf("%.20lf\n", static_cast<double>(value));
}
TEST(TensorTestUtilTest, ExpectTensorNearHalf) {
typedef Eigen::half T;
EXPECT_TRUE(IsClose(static_cast<T>(1.0f), static_cast<T>(1.0f), 0.0, 0.0));
EXPECT_TRUE(IsClose(static_cast<T>(0.0f), static_cast<T>(-0.0f), 0.0, 0.0));
EXPECT_TRUE(
IsClose(static_cast<T>(3.141592f), static_cast<T>(3.141592f), 0.0, 0.0));
EXPECT_TRUE(
IsClose(static_cast<T>(8.9875f), static_cast<T>(8.99f), 0.0078125, 0.0));
EXPECT_FALSE(
IsClose(static_cast<T>(8.9875f), static_cast<T>(8.99f), 0.007, 0.0));
EXPECT_TRUE(
IsClose(static_cast<T>(720.2f), static_cast<T>(720.3f), 0.5, 0.0));
EXPECT_FALSE(
IsClose(static_cast<T>(720.2f), static_cast<T>(720.3f), 0.4, 0.0));
EXPECT_TRUE(
IsClose(static_cast<T>(1234.f), static_cast<T>(1235.f), 1.0, 0.0));
EXPECT_FALSE(
IsClose(static_cast<T>(1234.5f), static_cast<T>(1235.f), 0.5, 0.0));
EXPECT_TRUE(
IsClose(static_cast<T>(1234.5f), static_cast<T>(1235.f), 1.0, 0.0));
EXPECT_TRUE(
IsClose(static_cast<T>(-2.71f), static_cast<T>(-2.72f), 0.01, 0.0));
TestEdgeCasesNear<T>();
}
TEST(TensorTestUtilTest, ExpectTensorNearFloat) {
typedef float T;
EXPECT_TRUE(IsClose(1.0f, 1.0f, 0.0f, 0.0f));
EXPECT_TRUE(IsClose(0.0f, -0.0f, 0.0f, 0.0f));
EXPECT_TRUE(IsClose(3.14159265359f, 3.14159265359f, 0.0f, 0.0f));
EXPECT_TRUE(IsClose(8.9875f, 8.9876f, 0.0001002f, 0.0f));
EXPECT_FALSE(IsClose(8.9875f, 8.9876f, 0.0001f, 0.0f));
EXPECT_TRUE(IsClose(720.2017f, 720.2018f, 0.0001f, 0.0f));
EXPECT_FALSE(IsClose(720.20175f, 720.20185f, 0.0001f, 0.0f));
EXPECT_TRUE(IsClose(720.20175f, 720.20185f, 0.00013f, 0.0f));
EXPECT_FALSE(IsClose(123456788.f, 123456789.f, 4.0f, 0.0f));
EXPECT_TRUE(IsClose(123456788.f, 123456789.f, 8.0f, 0.0f));
EXPECT_TRUE(IsClose(-2.718281f, -2.718282f, 0.1f, 0.0f));
TestEdgeCasesNear<T>();
}
TEST(TensorTestUtilTest, ExpectTensorNearDouble) {
typedef double T;
EXPECT_TRUE(IsClose(1.0, 1.0, 0.0, 0.0));
EXPECT_TRUE(IsClose(0.0, -0.0, 0.0, 0.0));
EXPECT_TRUE(IsClose(3.14159265359, 3.14159265359, 0.0, 0.0));
EXPECT_TRUE(IsClose(8.9875, 8.9876, 0.0001, 0.0));
EXPECT_FALSE(IsClose(100720.2018, 100720.2019, 0.0001, 0.0));
EXPECT_TRUE(IsClose(100720.2018, 100720.2019, 1.00000005e-4, 0.0));
EXPECT_FALSE(IsClose(12345678901234567., 12345678901234566., 1.0, 0.0));
EXPECT_TRUE(IsClose(12345678901234567., 12345678901234566., 2.0, 0.0));
EXPECT_FALSE(IsClose(-2.71828182846, -2.71828182847, 1.0e-11, 0.0));
EXPECT_TRUE(IsClose(-2.71828182846, -2.71828182847, 1.00000009e-11, 0.0));
TestEdgeCasesNear<T>();
}
TEST(TensorTestUtilTest, ExpectTensorNearSlice) {
Tensor x(DT_FLOAT, TensorShape({7, 3}));
test::FillFn<float>(&x, [](int i) { return 1.0f; });
test::ExpectTensorNear<float>(
x.SubSlice(3), test::AsTensor<float>({1.0, 1.0, 1.0}, TensorShape({3})),
1e-10);
}
template <typename T>
void TestEdgeCasesClose() {
EXPECT_TRUE(IsClose(Eigen::NumTraits<T>::infinity(),
Eigen::NumTraits<T>::infinity(), 0.0, 0.0));
EXPECT_TRUE(IsClose(Eigen::NumTraits<T>::lowest(),
Eigen::NumTraits<T>::highest(),
Eigen::NumTraits<double>::infinity(),
Eigen::NumTraits<double>::infinity()));
EXPECT_TRUE(IsClose(Eigen::NumTraits<T>::lowest(),
Eigen::NumTraits<T>::highest(),
static_cast<double>(Eigen::NumTraits<T>::highest()),
static_cast<double>(Eigen::NumTraits<T>::highest())));
EXPECT_FALSE(IsClose(Eigen::NumTraits<T>::quiet_NaN(), T(0.0), 0.0, 0.0));
EXPECT_TRUE(IsClose(Eigen::NumTraits<T>::quiet_NaN(),
Eigen::NumTraits<T>::quiet_NaN(), 0.0, 0.0));
EXPECT_FALSE(IsClose(Eigen::NumTraits<T>::quiet_NaN(), T(0.0),
Eigen::NumTraits<double>::infinity(), 0.0));
EXPECT_TRUE(IsClose(Eigen::NumTraits<T>::quiet_NaN(),
Eigen::NumTraits<T>::quiet_NaN(),
Eigen::NumTraits<double>::infinity(), 0.0));
}
TEST(TensorTestUtilTest, ExpectTensorCloseHalf) {
typedef Eigen::half T;
EXPECT_TRUE(IsClose(static_cast<T>(1.0f), static_cast<T>(1.1f), 0.1, 0.1));
EXPECT_TRUE(IsClose(static_cast<T>(1.0f), static_cast<T>(1.0f), 0.0, 0.0));
EXPECT_FALSE(IsClose(static_cast<T>(1.0f), static_cast<T>(1.1f), 0.0, 0.0));
EXPECT_TRUE(IsClose(static_cast<T>(1.234f), static_cast<T>(1.234f)));
EXPECT_TRUE(IsClose(static_cast<T>(1.234f), static_cast<T>(1.233f)));
EXPECT_TRUE(IsClose(static_cast<T>(1.234f), static_cast<T>(1.235f)));
EXPECT_FALSE(IsClose(static_cast<T>(1.234f), static_cast<T>(1.232f)));
EXPECT_FALSE(IsClose(static_cast<T>(1.234f), static_cast<T>(1.236f)));
EXPECT_TRUE(
IsClose(static_cast<T>(1.234f), static_cast<T>(1.232f), 8e-4f, 1e-3f));
EXPECT_TRUE(
IsClose(static_cast<T>(1.234f), static_cast<T>(1.236f), 1.4e-3f, 5e-4f));
EXPECT_TRUE(
IsClose(static_cast<T>(3.141592f), static_cast<T>(3.141593f), 0.0, 0.0));
EXPECT_FALSE(IsClose(static_cast<T>(1e4f), static_cast<T>(1e-4f)));
TestEdgeCasesClose<T>();
}
TEST(TensorTestUtilTest, ExpectTensorCloseFloat) {
typedef float T;
EXPECT_TRUE(IsClose(1.0f, 1.1f, 0.1f, 0.1f));
EXPECT_TRUE(IsClose(1.0f, 1.0f, 0.0f, 0.0f));
EXPECT_FALSE(IsClose(1.0f, 1.1f, 0.0f, 0.0f));
EXPECT_TRUE(IsClose(1.234567f, 1.234567f));
EXPECT_TRUE(IsClose(1.234567f, 1.234568f));
EXPECT_TRUE(IsClose(1.234567f, 1.234566f));
EXPECT_FALSE(IsClose(1.234567f, 1.234569f));
EXPECT_FALSE(IsClose(1.234567f, 1.234565f));
EXPECT_TRUE(IsClose(1.234567f, 1.234569f, 8e-7f, 1e-6f));
EXPECT_TRUE(IsClose(1.234567f, 1.234565f, 3e-7f, 1.5e-6f));
EXPECT_TRUE(IsClose(3.14159265f, 3.14159266f, 0.0f, 0.0f));
EXPECT_FALSE(IsClose(1e8f, 1e-8f));
EXPECT_FALSE(IsClose(1e15f, 1e-15f));
TestEdgeCasesClose<T>();
}
TEST(TensorTestUtilTest, ExpectTensorCloseDouble) {
typedef double T;
EXPECT_TRUE(IsClose(1.0, 1.1, 0.1, 0.1));
EXPECT_TRUE(IsClose(1.0, 1.0, 0.0, 0.0));
EXPECT_FALSE(IsClose(1.0, 1.1, 0.0, 0.0));
EXPECT_TRUE(IsClose(1.234567890123456, 1.234567890123456));
EXPECT_TRUE(IsClose(1.234567890123456, 1.234567890123457));
EXPECT_TRUE(IsClose(1.234567890123456, 1.234567890123455));
EXPECT_TRUE(IsClose(1.234567890123456, 1.234567890123458));
EXPECT_TRUE(IsClose(1.234567890123456, 1.234567890123454));
EXPECT_FALSE(IsClose(1.234567890123456, 1.234567890123459));
EXPECT_FALSE(IsClose(1.234567890123456, 1.234567890123453));
EXPECT_TRUE(IsClose(1.234567890123456, 1.234567890123459, 9.5e-16, 1.6e-15));
EXPECT_TRUE(IsClose(1.234567890123456, 1.234567890123453, 7e-16, 2e-15));
EXPECT_TRUE(IsClose(3.141592653589793238, 3.141592653589793239, 0.0, 0.0));
EXPECT_FALSE(IsClose(1e15, 1e-15));
EXPECT_FALSE(IsClose(1e30, 1e-30));
TestEdgeCasesClose<T>();
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/tensor_testutil.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/tensor_testutil_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5c396182-72ac-466f-b33e-1ce61dd04742 | cpp | google/quiche | nghttp2 | quiche/http2/adapter/nghttp2.h | quiche/http2/adapter/nghttp2_test.cc | #ifndef QUICHE_HTTP2_ADAPTER_NGHTTP2_H_
#define QUICHE_HTTP2_ADAPTER_NGHTTP2_H_
#include <cstddef>
using ssize_t = ptrdiff_t;
#include "nghttp2/nghttp2.h"
#endif | #include "quiche/http2/adapter/nghttp2.h"
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/str_cat.h"
#include "quiche/http2/adapter/mock_nghttp2_callbacks.h"
#include "quiche/http2/adapter/nghttp2_test_utils.h"
#include "quiche/http2/adapter/nghttp2_util.h"
#include "quiche/http2/adapter/test_frame_sequence.h"
#include "quiche/http2/adapter/test_utils.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace http2 {
namespace adapter {
namespace test {
namespace {
using testing::_;
enum FrameType {
DATA,
HEADERS,
PRIORITY,
RST_STREAM,
SETTINGS,
PUSH_PROMISE,
PING,
GOAWAY,
WINDOW_UPDATE,
};
nghttp2_option* GetOptions() {
nghttp2_option* options;
nghttp2_option_new(&options);
nghttp2_option_set_no_closed_streams(options, 1);
nghttp2_option_set_no_auto_window_update(options, 1);
nghttp2_option_set_max_send_header_block_length(options, 0x2000000);
nghttp2_option_set_max_outbound_ack(options, 10000);
return options;
}
class Nghttp2Test : public quiche::test::QuicheTest {
public:
Nghttp2Test() : session_(MakeSessionPtr(nullptr)) {}
void SetUp() override { InitializeSession(); }
virtual Perspective GetPerspective() = 0;
void InitializeSession() {
auto nghttp2_callbacks = MockNghttp2Callbacks::GetCallbacks();
nghttp2_option* options = GetOptions();
nghttp2_session* ptr;
if (GetPerspective() == Perspective::kClient) {
nghttp2_session_client_new2(&ptr, nghttp2_callbacks.get(),
&mock_callbacks_, options);
} else {
nghttp2_session_server_new2(&ptr, nghttp2_callbacks.get(),
&mock_callbacks_, options);
}
nghttp2_option_del(options);
EXPECT_CALL(mock_callbacks_, Send(_, _, _))
.WillRepeatedly(
[this](const uint8_t* data, size_t length, int ) {
absl::StrAppend(&serialized_, ToStringView(data, length));
return length;
});
EXPECT_CALL(mock_callbacks_, SendData(_, _, _, _))
.WillRepeatedly([this](nghttp2_frame* , const uint8_t* framehd,
size_t length, nghttp2_data_source* source) {
QUICHE_LOG(INFO) << "Appending frame header and " << length
<< " bytes of data";
auto* s = static_cast<TestDataSource*>(source->ptr);
absl::StrAppend(&serialized_, ToStringView(framehd, 9),
s->ReadNext(length));
return 0;
});
session_ = MakeSessionPtr(ptr);
}
testing::StrictMock<MockNghttp2Callbacks> mock_callbacks_;
nghttp2_session_unique_ptr session_;
std::string serialized_;
};
class Nghttp2ClientTest : public Nghttp2Test {
public:
Perspective GetPerspective() override { return Perspective::kClient; }
};
TEST_F(Nghttp2ClientTest, ClientReceivesUnexpectedHeaders) {
const std::string initial_frames = TestFrameSequence()
.ServerPreface()
.Ping(42)
.WindowUpdate(0, 1000)
.Serialize();
testing::InSequence seq;
EXPECT_CALL(mock_callbacks_, OnBeginFrame(HasFrameHeader(0, SETTINGS, 0)));
EXPECT_CALL(mock_callbacks_, OnFrameRecv(IsSettings(testing::IsEmpty())));
EXPECT_CALL(mock_callbacks_, OnBeginFrame(HasFrameHeader(0, PING, 0)));
EXPECT_CALL(mock_callbacks_, OnFrameRecv(IsPing(42)));
EXPECT_CALL(mock_callbacks_,
OnBeginFrame(HasFrameHeader(0, WINDOW_UPDATE, 0)));
EXPECT_CALL(mock_callbacks_, OnFrameRecv(IsWindowUpdate(1000)));
ssize_t result = nghttp2_session_mem_recv(
session_.get(), ToUint8Ptr(initial_frames.data()), initial_frames.size());
ASSERT_EQ(result, initial_frames.size());
const std::string unexpected_stream_frames =
TestFrameSequence()
.Headers(1,
{{":status", "200"},
{"server", "my-fake-server"},
{"date", "Tue, 6 Apr 2021 12:54:01 GMT"}},
false)
.Data(1, "This is the response body.")
.RstStream(3, Http2ErrorCode::INTERNAL_ERROR)
.GoAway(5, Http2ErrorCode::ENHANCE_YOUR_CALM, "calm down!!")
.Serialize();
EXPECT_CALL(mock_callbacks_, OnBeginFrame(HasFrameHeader(1, HEADERS, _)));
EXPECT_CALL(mock_callbacks_, OnInvalidFrameRecv(IsHeaders(1, _, _), _));
nghttp2_session_mem_recv(session_.get(),
ToUint8Ptr(unexpected_stream_frames.data()),
unexpected_stream_frames.size());
}
TEST_F(Nghttp2ClientTest, ClientSendsRequest) {
int result = nghttp2_session_send(session_.get());
ASSERT_EQ(result, 0);
EXPECT_THAT(serialized_, testing::StrEq(spdy::kHttp2ConnectionHeaderPrefix));
serialized_.clear();
const std::string initial_frames =
TestFrameSequence().ServerPreface().Serialize();
testing::InSequence s;
EXPECT_CALL(mock_callbacks_, OnBeginFrame(HasFrameHeader(0, SETTINGS, 0)));
EXPECT_CALL(mock_callbacks_, OnFrameRecv(IsSettings(testing::IsEmpty())));
ssize_t recv_result = nghttp2_session_mem_recv(
session_.get(), ToUint8Ptr(initial_frames.data()), initial_frames.size());
EXPECT_EQ(initial_frames.size(), recv_result);
EXPECT_CALL(mock_callbacks_, BeforeFrameSend(IsSettings(testing::IsEmpty())));
EXPECT_CALL(mock_callbacks_, OnFrameSend(IsSettings(testing::IsEmpty())));
EXPECT_TRUE(nghttp2_session_want_write(session_.get()));
result = nghttp2_session_send(session_.get());
EXPECT_THAT(serialized_, EqualsFrames({spdy::SpdyFrameType::SETTINGS}));
serialized_.clear();
EXPECT_FALSE(nghttp2_session_want_write(session_.get()));
std::vector<std::pair<absl::string_view, absl::string_view>> headers = {
{":method", "POST"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}};
std::vector<nghttp2_nv> nvs;
for (const auto& h : headers) {
nvs.push_back({.name = ToUint8Ptr(h.first.data()),
.value = ToUint8Ptr(h.second.data()),
.namelen = h.first.size(),
.valuelen = h.second.size(),
.flags = NGHTTP2_NV_FLAG_NONE});
}
const absl::string_view kBody = "This is an example request body.";
TestDataSource source{kBody};
nghttp2_data_provider provider = source.MakeDataProvider();
int stream_id =
nghttp2_submit_request(session_.get(), nullptr , nvs.data(),
nvs.size(), &provider, nullptr );
EXPECT_GT(stream_id, 0);
EXPECT_TRUE(nghttp2_session_want_write(session_.get()));
EXPECT_CALL(mock_callbacks_, BeforeFrameSend(IsHeaders(stream_id, _, _)));
EXPECT_CALL(mock_callbacks_, OnFrameSend(IsHeaders(stream_id, _, _)));
EXPECT_CALL(mock_callbacks_, OnFrameSend(IsData(stream_id, kBody.size(), _)));
nghttp2_session_send(session_.get());
EXPECT_THAT(serialized_, EqualsFrames({spdy::SpdyFrameType::HEADERS,
spdy::SpdyFrameType::DATA}));
EXPECT_THAT(serialized_, testing::HasSubstr(kBody));
EXPECT_FALSE(nghttp2_session_want_write(session_.get()));
}
class Nghttp2ServerTest : public Nghttp2Test {
public:
Perspective GetPerspective() override { return Perspective::kServer; }
};
TEST_F(Nghttp2ServerTest, MismatchedContentLength) {
const std::string initial_frames =
TestFrameSequence()
.ClientPreface()
.Headers(1,
{{":method", "POST"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/"},
{"content-length", "50"}},
false)
.Data(1, "Less than 50 bytes.", true)
.Serialize();
testing::InSequence seq;
EXPECT_CALL(mock_callbacks_, OnBeginFrame(HasFrameHeader(0, SETTINGS, _)));
EXPECT_CALL(mock_callbacks_, OnFrameRecv(IsSettings(testing::IsEmpty())));
EXPECT_CALL(mock_callbacks_, OnBeginFrame(HasFrameHeader(
1, HEADERS, NGHTTP2_FLAG_END_HEADERS)));
EXPECT_CALL(mock_callbacks_,
OnBeginHeaders(IsHeaders(1, NGHTTP2_FLAG_END_HEADERS,
NGHTTP2_HCAT_REQUEST)));
EXPECT_CALL(mock_callbacks_, OnHeader(_, ":method", "POST", _));
EXPECT_CALL(mock_callbacks_, OnHeader(_, ":scheme", "https", _));
EXPECT_CALL(mock_callbacks_, OnHeader(_, ":authority", "example.com", _));
EXPECT_CALL(mock_callbacks_, OnHeader(_, ":path", "/", _));
EXPECT_CALL(mock_callbacks_, OnHeader(_, "content-length", "50", _));
EXPECT_CALL(mock_callbacks_,
OnFrameRecv(IsHeaders(1, NGHTTP2_FLAG_END_HEADERS,
NGHTTP2_HCAT_REQUEST)));
EXPECT_CALL(mock_callbacks_,
OnBeginFrame(HasFrameHeader(1, DATA, NGHTTP2_FLAG_END_STREAM)));
EXPECT_CALL(mock_callbacks_, OnDataChunkRecv(NGHTTP2_FLAG_END_STREAM, 1,
"Less than 50 bytes."));
ssize_t result = nghttp2_session_mem_recv(
session_.get(), ToUint8Ptr(initial_frames.data()), initial_frames.size());
ASSERT_EQ(result, initial_frames.size());
}
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/adapter/nghttp2.h | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/adapter/nghttp2_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
17e0abbc-0854-4ec0-a080-901466566b64 | cpp | tensorflow/tensorflow | snapshot_manager | tensorflow/core/data/service/snapshot/snapshot_manager.cc | tensorflow/core/data/service/snapshot/snapshot_manager_test.cc | #include "tensorflow/core/data/service/snapshot/snapshot_manager.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/btree_map.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/time/time.h"
#include "xla/tsl/lib/io/compression.h"
#include "xla/tsl/protobuf/status.pb.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/dispatcher.pb.h"
#include "tensorflow/core/data/service/snapshot/file_utils.h"
#include "tensorflow/core/data/service/snapshot/path_utils.h"
#include "tensorflow/core/data/service/snapshot/prefetched_split_provider.h"
#include "tensorflow/core/data/service/split_provider.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/platform/status.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/path.h"
#include "tsl/platform/status_to_from_proto.h"
#include "tsl/platform/thread_annotations.h"
#include "tsl/platform/threadpool.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace data {
namespace {
const absl::Duration kProgressLoggingInterval = absl::Minutes(1);
absl::StatusOr<int64_t> CountSplits(SplitProvider& split_provider) {
if (split_provider.Cardinality() != kUnknownCardinality) {
return split_provider.Cardinality();
}
int64_t num_splits = 0;
Tensor tensor;
for (bool end_of_splits = false; !end_of_splits; ++num_splits) {
TF_RETURN_IF_ERROR(split_provider.GetNext(&tensor, &end_of_splits));
}
--num_splits;
TF_RETURN_IF_ERROR(split_provider.Reset());
return num_splits;
}
absl::Status SkipSplit(SplitProvider& split_provider,
int64_t& repetition_index) {
Tensor tensor;
bool end_of_splits = false;
TF_RETURN_IF_ERROR(split_provider.GetNext(&tensor, &end_of_splits));
while (end_of_splits) {
++repetition_index;
TF_RETURN_IF_ERROR(split_provider.Reset());
TF_RETURN_IF_ERROR(split_provider.GetNext(&tensor, &end_of_splits));
}
return absl::OkStatus();
}
std::string PrefetchedSplitDir(const std::string& snapshot_path,
int64_t source_index) {
return tsl::io::JoinPath(snapshot_path, "prefetched_splits",
absl::StrCat("source_", source_index));
}
}
absl::StatusOr<bool> SnapshotAssignmentManager::TryAddAssignment(
absl::string_view snapshot_path, absl::string_view worker_address,
int64_t stream_index) TF_LOCKS_EXCLUDED(mu_) {
tsl::mutex_lock l(mu_);
if (assignments_[worker_address].size() >=
worker_max_concurrent_snapshots()) {
return false;
}
Assignment assignment{std::string(snapshot_path), stream_index};
auto [unused, success] = assignments_[worker_address].insert(assignment);
if (!success) {
return absl::InternalError(absl::StrCat("Worker ", worker_address,
" already had an assignment for ",
assignment.DebugString()));
}
++snapshot_assignment_counts_[snapshot_path];
return true;
}
void SnapshotAssignmentManager::RemoveAssignment(
absl::string_view snapshot_path, absl::string_view worker_address,
int64_t stream_index) TF_LOCKS_EXCLUDED(mu_) {
tsl::mutex_lock l(mu_);
auto num_erased = assignments_[worker_address].erase(
{std::string(snapshot_path), stream_index});
if ((snapshot_assignment_counts_[snapshot_path] -= num_erased) <= 0) {
snapshot_assignment_counts_.erase(snapshot_path);
}
}
void SnapshotAssignmentManager::AddSnapshot(absl::string_view snapshot_path)
TF_LOCKS_EXCLUDED(mu_) {
tsl::mutex_lock l(mu_);
if (!snapshot_assignment_counts_.contains(snapshot_path)) {
snapshot_assignment_counts_[snapshot_path] = 0;
}
}
std::vector<std::string> SnapshotAssignmentManager::LoadBalanceSnapshots(
absl::string_view worker_address) TF_LOCKS_EXCLUDED(mu_) {
std::vector<std::string> result;
tsl::mutex_lock l(mu_);
result.reserve(snapshot_assignment_counts_.size());
const auto it = assignments_.find(worker_address);
if (it != assignments_.end()) {
for (const Assignment& assignment : it->second) {
result.push_back(assignment.snapshot_path);
}
}
if (result.size() >= worker_max_concurrent_snapshots()) {
return result;
}
absl::btree_multimap<size_t, std::string> snapshots_by_count;
for (const auto& [snapshot, count] : snapshot_assignment_counts_) {
snapshots_by_count.emplace(count, snapshot);
}
for (const auto& [_, snapshot] : snapshots_by_count) {
if (absl::c_find(result, snapshot) == result.end()) {
result.push_back(snapshot);
return result;
}
}
return result;
}
absl::StatusOr<std::unique_ptr<SnapshotManager>> SnapshotManager::Start(
const SnapshotRequest& request,
SnapshotAssignmentManager& assignment_manager, Env* env) {
std::unique_ptr<SnapshotManager> snapshot_manager{
new SnapshotManager{request.path(), assignment_manager, env}};
TF_RETURN_IF_ERROR(snapshot_manager->Start(request));
return snapshot_manager;
}
absl::Status SnapshotManager::Start(const SnapshotRequest& request)
TF_LOCKS_EXCLUDED(mu_) {
LOG(INFO) << "Starting to write tf.data snapshot at " << request.path();
if (env_->FileExists(request.path()).ok()) {
return errors::AlreadyExists("tf.data snapshot at ", request.path(),
" already exists.");
}
tsl::mutex_lock l(mu_);
TF_RETURN_IF_ERROR(WriteOnDiskSkeleton());
TF_RETURN_IF_ERROR(WriteOnDiskMetadata(request));
TF_ASSIGN_OR_RETURN(sources_, CreateSources(request.dataset()));
TF_ASSIGN_OR_RETURN(num_total_splits_, GetSplitsCardinality());
metadata_ = request.metadata();
LOG(INFO) << "Started writing tf.data distributed snapshot at " << path_;
return absl::OkStatus();
}
absl::StatusOr<std::vector<SnapshotManager::Source>>
SnapshotManager::CreateSources(const DatasetDef& dataset_def) const
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
std::vector<std::unique_ptr<SplitProvider>> split_providers;
TF_RETURN_IF_ERROR(CreateSplitProviders(dataset_def, split_providers));
std::vector<SnapshotManager::Source> sources;
sources.reserve(split_providers.size());
for (size_t i = 0; i < split_providers.size(); ++i) {
TF_ASSIGN_OR_RETURN(size_t cardinality, CountSplits(*split_providers[i]));
sources.emplace_back(
std::make_unique<PrefetchedSplitProvider>(
std::move(split_providers[i]), PrefetchedSplitDir(path_, i), env_),
0, cardinality);
}
return sources;
}
absl::StatusOr<int64_t> SnapshotManager::GetSplitsCardinality()
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
return absl::c_accumulate(sources_, 0,
[](size_t cardinality, const Source& source) {
return cardinality + source.cardinality;
});
}
absl::Status SnapshotManager::WriteOnDiskSkeleton()
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
TF_RETURN_IF_ERROR(
env_->RecursivelyCreateDir(CommittedChunksDirectory(path_)));
TF_RETURN_IF_ERROR(env_->RecursivelyCreateDir(StreamsDirectory(path_)));
return absl::OkStatus();
}
absl::Status SnapshotManager::WriteOnDiskMetadata(
const SnapshotRequest& request) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
TF_RETURN_IF_ERROR(AtomicallyWriteTextProto(SnapshotMetadataFilePath(path_),
request.metadata(), env_));
TF_RETURN_IF_ERROR(AtomicallyWriteStringToFile(
DatasetSpecFilePath(path_), request.metadata().element_spec(), env_));
TF_RETURN_IF_ERROR(AtomicallyWriteBinaryProto(DatasetDefFilePath(path_),
request.dataset(), env_));
return absl::OkStatus();
}
absl::StatusOr<std::unique_ptr<SnapshotManager>> SnapshotManager::Resume(
absl::string_view path, SnapshotAssignmentManager& assignment_manager,
Env* env) {
SnapshotManager* snapshot_manager =
new SnapshotManager(path, assignment_manager, env);
TF_RETURN_IF_ERROR(snapshot_manager->Resume());
return absl::WrapUnique(snapshot_manager);
}
absl::Status SnapshotManager::Resume() TF_LOCKS_EXCLUDED(mu_) {
tsl::mutex_lock l(mu_);
if (!env_->FileExists(path_).ok()) {
return absl::InternalError(
absl::StrCat("Failed to recover tf.data snapshot at ", path_,
": the snapshot path doesn't exist."));
}
if (env_->FileExists(SnapshotDoneFilePath(path_)).ok()) {
mode_ = Mode::kDone;
LOG(INFO) << "Recovered finished tf.data snapshot at " << path_;
return absl::OkStatus();
}
if (env_->FileExists(SnapshotErrorFilePath(path_)).ok()) {
mode_ = Mode::kError;
StatusProto status_proto;
TF_RETURN_IF_ERROR(
ReadTextProto(env_, SnapshotErrorFilePath(path_), &status_proto));
status_ = tsl::StatusFromProto(status_proto);
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(ReadOnDiskMetadata());
TF_RETURN_IF_ERROR(ReadOnDiskStreams());
LOG(INFO) << "Resumed writing tf.data distributed snapshot at " << path_;
return absl::OkStatus();
}
absl::Status SnapshotManager::ReadOnDiskMetadata()
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (!env_->FileExists(SnapshotMetadataFilePath(path_)).ok()) {
return absl::InternalError(
absl::StrCat("Failed to recover snapshot at ", path_,
": snapshot has no snapshot.metadata"));
}
TF_RETURN_IF_ERROR(
ReadTextProto(env_, SnapshotMetadataFilePath(path_), &metadata_));
if (!env_->FileExists(DatasetDefFilePath(path_)).ok()) {
return absl::InternalError(
absl::StrCat("Failed to recovery snapshot at ", path_,
": snapshot has no dataset_def.proto"));
}
return absl::OkStatus();
}
absl::Status SnapshotManager::ReadOnDiskStreams()
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
std::string streams_path = StreamsDirectory(path_);
TF_ASSIGN_OR_RETURN(const std::vector<std::string> stream_directories,
GetChildren(streams_path, env_));
DatasetDef dataset_def;
TF_RETURN_IF_ERROR(
tsl::ReadBinaryProto(env_, DatasetDefFilePath(path_), &dataset_def));
std::vector<std::unique_ptr<SplitProvider>> split_providers;
TF_RETURN_IF_ERROR(CreateSplitProviders(dataset_def, split_providers));
std::vector<int64_t> repetition_indices(split_providers.size(), 0);
std::vector<int64_t> cardinalities;
for (size_t i = 0; i < split_providers.size(); ++i) {
TF_ASSIGN_OR_RETURN(int64_t cardinality, CountSplits(*split_providers[i]));
cardinalities.push_back(cardinality);
}
tsl::mutex mu;
absl::Status resume_status;
absl::flat_hash_set<int64_t> global_split_indices;
auto thread_pool = std::make_unique<tsl::thread::ThreadPool>(
env_, tsl::ThreadOptions{}, "restore_snapshot_stream_thread",
std::max(size_t{1}, stream_directories.size()));
for (const auto& stream_directory : stream_directories) {
std::string stream_path = tsl::io::JoinPath(streams_path, stream_directory);
std::vector<std::string> tokens = absl::StrSplit(stream_directory, '_');
int64_t stream_index;
if (tokens.size() != 2 || !absl::SimpleAtoi(tokens[1], &stream_index) ||
stream_index < 0) {
return absl::InternalError(absl::StrCat(
"Can't parse tf.data snapshot stream directory ", stream_path,
": filename must have the format stream_<stream_index>."));
}
thread_pool->Schedule([this, &stream_directories, stream_index,
&split_providers, &repetition_indices,
&global_split_indices, &resume_status,
&mu]() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
StreamRestorer stream_restorer(env_, path_, stream_index,
split_providers.size(),
assignment_manager_);
absl::Status s = stream_restorer.ReadOnDiskStream();
tsl::mutex_lock l(mu);
resume_status.Update(s);
resume_status.Update(RestoreFrom(stream_restorer, stream_directories,
split_providers, repetition_indices,
global_split_indices));
});
}
thread_pool.reset();
TF_RETURN_IF_ERROR(resume_status);
for (int64_t i = 0; i < split_providers.size(); ++i) {
sources_.emplace_back(
std::make_unique<PrefetchedSplitProvider>(
std::move(split_providers[i]), PrefetchedSplitDir(path_, i), env_),
repetition_indices[i], cardinalities[i]);
}
TF_ASSIGN_OR_RETURN(num_total_splits_, GetSplitsCardinality());
for (int64_t i = 0; i < global_split_indices.size(); ++i) {
if (!global_split_indices.contains(i)) {
return absl::InternalError(
absl::StrCat("Failed to restore tf.data snapshot at ", path_,
": Found missing global split index ", i, "."));
}
}
num_assigned_splits_ = global_split_indices.size();
if (!streams_.empty() && absl::c_all_of(streams_, [](const auto& stream) {
return stream.second.state == Stream::State::kDone;
})) {
mode_ = Mode::kDone;
TF_RETURN_IF_ERROR(AtomicallyWriteStringToFile(SnapshotDoneFilePath(path_),
std::string(), env_));
LOG(INFO) << "Finished writing tf.data distributed snapshot at " << path_;
}
return absl::OkStatus();
}
absl::StatusOr<std::string>
SnapshotManager::StreamRestorer::OwnerWorkerAddress() const {
std::string worker_address;
TF_RETURN_IF_ERROR(
env_->FileExists(StreamWorkerFilePath(path_, stream_index_)));
TF_RETURN_IF_ERROR(tsl::ReadFileToString(
env_, StreamWorkerFilePath(path_, stream_index_), &worker_address));
return worker_address;
}
absl::Status SnapshotManager::StreamRestorer::ReadOnDiskStream() {
absl::StatusOr<std::string> worker_address = OwnerWorkerAddress();
if (!worker_address.ok()) {
return absl::OkStatus();
}
worker_address_ = *worker_address;
restored_stream_.emplace(num_sources_);
std::string splits_path = SplitsDirectory(path_, stream_index_);
TF_ASSIGN_OR_RETURN(std::vector<std::string> source_directories,
GetChildren(splits_path, env_));
for (const auto& source_directory : source_directories) {
std::string source_path = tsl::io::JoinPath(splits_path, source_directory);
std::vector<std::string> tokens = absl::StrSplit(source_directory, '_');
int64_t source_index = 0;
if (tokens.size() != 2 || !absl::SimpleAtoi(tokens[1], &source_index) ||
source_index < 0) {
return absl::InternalError(absl::StrCat(
"Can't parse tf.data snapshot source directory ", source_path,
": filename must have the format source_<source_index>."));
}
if (source_index >= num_sources_) {
return absl::InternalError(
absl::StrCat("Found conflict between the number of sources, ",
num_sources_, ", and the filename of ", source_path));
}
TF_RETURN_IF_ERROR(ReadOnDiskSource(source_index));
}
if (env_->FileExists(StreamDoneFilePath(path_, stream_index_)).ok()) {
restored_stream_->state = Stream::State::kDone;
return absl::OkStatus();
}
TF_ASSIGN_OR_RETURN(bool assignment_added,
assignment_manager_.TryAddAssignment(
path_, *worker_address, stream_index_));
if (!assignment_added) {
return absl::InternalError(absl::StrCat(
"Failed to recover tf.data snapshot dispatcher: Worker ",
*worker_address, " was assigned too many streams. At most ",
assignment_manager_.worker_max_concurrent_snapshots(),
" streams are allowed."));
}
return absl::OkStatus();
}
absl::Status SnapshotManager::StreamRestorer::ReadOnDiskSource(
int64_t source_index) {
std::string source_directory =
SourceDirectory(path_, stream_index_, source_index);
TF_ASSIGN_OR_RETURN(std::vector<std::string> repetition_directories,
GetChildren(source_directory, env_));
for (const std::string& repetition : repetition_directories) {
std::string repetition_dir =
tsl::io::JoinPath(source_directory, repetition);
TF_ASSIGN_OR_RETURN(std::vector<std::string> split_files,
GetChildren(repetition_dir, env_));
for (const std::string& split_file : split_files) {
std::string split_path = tsl::io::JoinPath(repetition_dir, split_file);
TF_RETURN_IF_ERROR(
ReadOnDiskSplit(source_index, split_files, split_path));
}
restored_stream_->num_assigned_splits_per_source[source_index] +=
split_files.size();
}
return absl::OkStatus();
}
absl::Status SnapshotManager::StreamRestorer::ReadOnDiskSplit(
int64_t source_index, const std::vector<std::string>& split_files,
const std::string& split_file) {
TF_ASSIGN_OR_RETURN(auto split_indices, ParseSplitFilename(split_file));
auto [local_split_index, global_split_index] = split_indices;
if (global_split_indices_.contains(global_split_index)) {
return absl::InternalError(absl::StrCat(
"Failed to restore tf.data snapshot at ", path_,
": Found duplicate global split index in split ", split_file, "."));
}
global_split_indices_.insert(global_split_index);
return absl::OkStatus();
}
absl::Status SnapshotManager::RestoreFrom(
const StreamRestorer& stream_restorer,
const std::vector<std::string>& stream_directories,
std::vector<std::unique_ptr<SplitProvider>>& split_providers,
std::vector<std::int64_t>& repetition_indices,
absl::flat_hash_set<int64_t>& global_split_indices)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (!stream_restorer.GetStream().has_value()) {
return absl::OkStatus();
}
streams_.insert(
{stream_restorer.StreamIndex(), *stream_restorer.GetStream()});
auto [it, success] = assignments_.insert(
{stream_restorer.WorkerAddress(), stream_restorer.StreamIndex()});
if (!success) {
return absl::InternalError(absl::StrCat(
"tf.data dispatcher failed to assign stream ",
stream_restorer.StreamIndex(), " to snapshot worker ",
stream_restorer.WorkerAddress(),
": The worker is already assigned stream ", it->second, "."));
}
for (int64_t source_index = 0; source_index < repetition_indices.size();
++source_index) {
int64_t skip_splits = GetStream(stream_restorer.StreamIndex())
.num_assigned_splits_per_source[source_index];
for (int64_t i = 0; i < skip_splits; ++i) {
TF_RETURN_IF_ERROR(SkipSplit(*split_providers[source_index],
repetition_indices[source_index]));
}
}
for (int64_t global_split_index : stream_restorer.GlobalSplitIndices()) {
if (global_split_indices.contains(global_split_index)) {
return absl::InternalError(
absl::StrCat("Failed to restore tf.data snapshot at ", path_,
": Found ", "duplicate global split index in stream ",
stream_restorer.StreamIndex(), "."));
}
global_split_indices.insert(global_split_index);
}
return absl::OkStatus();
}
SnapshotManager::Stream& SnapshotManager::GetStream(int64_t stream_index)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
auto [it, _] = streams_.try_emplace(stream_index, num_sources());
return it->second;
}
absl::Status SnapshotManager::HandleStreamCompletion(
int64_t stream_index, absl::string_view worker_address)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
GetStream(stream_index).state = Stream::State::kDone;
assignment_manager_.RemoveAssignment(path_, worker_address, stream_index);
++num_completed_streams_;
if (absl::c_all_of(streams_, [](const auto& stream) {
return stream.second.state == Stream::State::kDone;
})) {
mode_ = Mode::kDone;
TF_RETURN_IF_ERROR(AtomicallyWriteStringToFile(SnapshotDoneFilePath(path_),
std::string(), env_));
LOG(INFO) << "Finished writing tf.data distributed snapshot at " << path_;
}
return absl::OkStatus();
}
absl::Status SnapshotManager::HandleStreamError(
absl::string_view worker_address, const StatusProto& status_proto)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (!status_.ok()) {
return absl::OkStatus();
}
mode_ = Mode::kError;
status_ = tsl::StatusFromProto(status_proto);
TF_RETURN_IF_ERROR(AtomicallyWriteTextProto(SnapshotErrorFilePath(path_),
status_proto, env_));
LOG(ERROR) << "Failed to write tf.data distributed snapshot at " << path_
<< ". Worker " << worker_address << " reported error: " << status_;
return absl::OkStatus();
}
absl::StatusOr<std::optional<int64_t>>
SnapshotManager::MaybeCreateAndAssignNewStream(absl::string_view worker_address)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
int64_t new_stream_index =
streams_.empty() ? 0 : streams_.rbegin()->first + 1;
TF_ASSIGN_OR_RETURN(bool assignment_added,
assignment_manager_.TryAddAssignment(
path_, worker_address, new_stream_index));
if (!assignment_added) {
return std::optional<int64_t>();
}
streams_.insert({new_stream_index, Stream(num_sources())});
assignments_[worker_address] = new_stream_index;
return new_stream_index;
}
absl::StatusOr<std::optional<std::pair<int64_t, bool>>>
SnapshotManager::MaybeGetOrCreateStreamAssignment(
absl::string_view worker_address,
const SnapshotTaskProgress* snapshot_progress)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
std::optional<int64_t> assigned_stream_index;
if (auto it = assignments_.find(worker_address); it != assignments_.end()) {
assigned_stream_index = it->second;
}
if (snapshot_progress) {
if (assigned_stream_index.has_value() &&
*assigned_stream_index !=
snapshot_progress->snapshot_task().stream_index()) {
return absl::InternalError(absl::StrCat(
"tf.data snapshot worker ", worker_address, " was assigned stream ",
snapshot_progress->snapshot_task().stream_index(),
", but is now assigned a different stream ", *assigned_stream_index));
}
if (assigned_stream_index.has_value() && snapshot_progress->completed()) {
TF_RETURN_IF_ERROR(HandleStreamCompletion(
snapshot_progress->snapshot_task().stream_index(), worker_address));
return std::nullopt;
}
if (snapshot_progress->status().code() != error::OK) {
TF_RETURN_IF_ERROR(
HandleStreamError(worker_address, snapshot_progress->status()));
return std::nullopt;
}
}
if (!assigned_stream_index) {
if (mode_ != Mode::kActive) {
return std::nullopt;
}
TF_ASSIGN_OR_RETURN(assigned_stream_index,
MaybeCreateAndAssignNewStream(worker_address));
if (!assigned_stream_index.has_value()) {
return std::nullopt;
}
return std::make_pair(*assigned_stream_index, true);
}
if (!assigned_stream_index.has_value() ||
GetStream(*assigned_stream_index).state == Stream::State::kDone) {
return std::nullopt;
}
return std::make_pair(*assigned_stream_index, false);
}
absl::Status SnapshotManager::WorkerHeartbeat(
const WorkerHeartbeatRequest& request, WorkerHeartbeatResponse& response)
TF_LOCKS_EXCLUDED(mu_) {
std::optional<std::pair<int64_t, bool>> assigned_stream_index;
std::vector<int64_t> repetitions_per_source;
{
tsl::mutex_lock l(mu_);
dead_workers_.erase(request.worker_address());
if (mode_ == Mode::kDone || mode_ == Mode::kError) {
return absl::OkStatus();
}
if (absl::Time now = absl::FromUnixMicros(env_->NowMicros());
now - last_progress_log_time_ > kProgressLoggingInterval) {
LOG(INFO) << "tf.data snapshot progress [" << path_
<< "]: " << num_completed_streams_ << "/" << streams_.size()
<< " streams completed; " << num_assigned_splits_ << "/"
<< num_total_splits_ << " splits assigned or completed.";
last_progress_log_time_ = now;
}
const SnapshotTaskProgress* snapshot_progress = nullptr;
if (auto it = request.snapshot_task_progress().find(path_);
it != request.snapshot_task_progress().end()) {
snapshot_progress = &it->second;
}
if (snapshot_progress && snapshot_progress->completed() &&
mode_ == Mode::kActive) {
mode_ = Mode::kWindingDown;
}
TF_ASSIGN_OR_RETURN(assigned_stream_index,
MaybeGetOrCreateStreamAssignment(
request.worker_address(), snapshot_progress));
if (!assigned_stream_index.has_value()) {
return absl::OkStatus();
}
SnapshotTaskDef* snapshot_task = response.add_snapshot_tasks();
snapshot_task->set_base_path(path_);
snapshot_task->set_num_sources(num_sources());
*snapshot_task->mutable_metadata() = metadata_;
snapshot_task->set_stream_index(assigned_stream_index->first);
for (int64_t source_index = 0; source_index < num_sources();
++source_index) {
repetitions_per_source.push_back(sources_[source_index].repetition_index);
}
}
const auto [stream_index, is_new_stream] = *assigned_stream_index;
if (is_new_stream) {
TF_RETURN_IF_ERROR(InitStreamDirectory(
stream_index, request.worker_address(), repetitions_per_source));
LOG(INFO) << "For snapshot at " << path_ << ", created stream_"
<< stream_index << " and assigned to "
<< request.worker_address();
}
return absl::OkStatus();
}
absl::Status SnapshotManager::InitStreamDirectory(
int64_t stream_index, const std::string& worker_address,
const std::vector<int64_t>& repetitions_per_source) {
for (int64_t source_index = 0; source_index < repetitions_per_source.size();
++source_index) {
for (int64_t repetition_index = 0;
repetition_index <= repetitions_per_source[source_index];
++repetition_index) {
TF_RETURN_IF_ERROR(env_->RecursivelyCreateDir(RepetitionDirectory(
path_, stream_index, source_index, repetition_index)));
}
}
return AtomicallyWriteStringToFile(StreamWorkerFilePath(path_, stream_index),
worker_address, env_);
}
absl::Status SnapshotManager::GetSnapshotSplit(
const GetSnapshotSplitRequest& request, GetSnapshotSplitResponse& response)
TF_LOCKS_EXCLUDED(get_split_mu_, mu_) {
int64_t local_split_index = 0;
int64_t global_split_index = 0;
PrefetchedSplitProvider* split_provider = nullptr;
tsl::mutex_lock get_split_lock(get_split_mu_);
{
tsl::mutex_lock l(mu_);
if (auto it = assignments_.find(request.worker_address());
it == assignments_.end()) {
return absl::InternalError(
absl::StrCat("tf.data snapshot worker ", request.worker_address(),
" was assigned stream ", request.stream_index(),
", but the assignment is no longer available."));
} else if (it->second != request.stream_index()) {
return absl::InternalError(
absl::StrCat("tf.data snapshot worker ", request.worker_address(),
" was assigned stream ", request.stream_index(),
" but is now assigned a different stream ", it->second));
}
Stream& stream = GetStream(request.stream_index());
local_split_index =
stream.num_assigned_splits_per_source[request.source_index()];
global_split_index = num_assigned_splits_;
response.set_local_split_index(local_split_index);
Source& source = sources_[request.source_index()];
if (request.repetition_index() < source.repetition_index) {
response.set_end_of_splits(true);
return absl::OkStatus();
}
while (request.repetition_index() > source.repetition_index) {
TF_RETURN_IF_ERROR(ResetSource(source, request.source_index()));
}
split_provider = source.split_provider.get();
}
std::string split_path = SplitPath(
path_, request.stream_index(), request.source_index(),
request.repetition_index(), local_split_index, global_split_index);
TF_ASSIGN_OR_RETURN(std::optional<Tensor> split,
split_provider->GetNext(split_path));
if (!split.has_value()) {
response.set_end_of_splits(true);
return absl::OkStatus();
}
split->AsProtoTensorContent(response.mutable_split());
tsl::mutex_lock l(mu_);
++GetStream(request.stream_index())
.num_assigned_splits_per_source[request.source_index()];
++num_assigned_splits_;
return absl::OkStatus();
}
absl::Status SnapshotManager::ResetSource(Source& source, int64_t source_index)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
TF_RETURN_IF_ERROR(source.split_provider->Reset());
++source.repetition_index;
LOG(INFO) << "Starting repetition_" << source.repetition_index << " "
<< "for snapshot " << path_ << ", source " << source_index;
for (const auto& [stream_index, _] : streams_) {
TF_RETURN_IF_ERROR(env_->RecursivelyCreateDir(RepetitionDirectory(
path_, stream_index, source_index, source.repetition_index)));
}
return absl::OkStatus();
}
absl::Status SnapshotManager::GetSnapshotStreams(
GetSnapshotStreamsResponse& response) TF_LOCKS_EXCLUDED(mu_) {
tsl::tf_shared_lock l(mu_);
for (const auto& [stream_index, stream] : streams_) {
SnapshotStreamInfo* stream_info = response.add_streams();
stream_info->set_index(stream_index);
stream_info->set_state(stream.state == Stream::State::kDone
? SnapshotStreamInfo::DONE
: SnapshotStreamInfo::ASSIGNED);
}
return absl::OkStatus();
}
void SnapshotManager::Cancel() {
std::vector<PrefetchedSplitProvider*> split_providers_to_cancel;
{
tsl::mutex_lock l(mu_);
for (Source& source : sources_) {
split_providers_to_cancel.push_back(source.split_provider.get());
}
}
for (PrefetchedSplitProvider* split_provider : split_providers_to_cancel) {
split_provider->Cancel();
}
}
}
} | #include "tensorflow/core/data/service/snapshot/snapshot_manager.h"
#include <memory>
#include <string>
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/tsl/protobuf/status.pb.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/dispatcher.pb.h"
#include "tensorflow/core/data/service/snapshot/path_utils.h"
#include "tensorflow/core/data/service/test_util.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/status.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/status_to_from_proto.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace data {
namespace {
using ::testing::_;
using ::testing::ElementsAre;
using ::testing::IsEmpty;
using ::testing::Not;
using ::testing::SizeIs;
using ::testing::UnorderedElementsAre;
using ::tsl::testing::IsOkAndHolds;
using ::tsl::testing::StatusIs;
template <class T>
T GetValue(const Tensor& tensor) {
return tensor.unaligned_flat<T>().data()[0];
}
TEST(SnapshotManagerTest, CreateStreamAssignment) {
std::string snapshot_path = testing::LocalTempFilename();
SnapshotRequest request;
*request.mutable_dataset() = testing::RangeDataset(10);
request.set_path(snapshot_path);
*request.mutable_metadata() =
testing::CreateDummyDistributedSnapshotMetadata();
SnapshotAssignmentManager snapshot_assignment_manager(
2);
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<SnapshotManager> snapshot_manager,
SnapshotManager::Start(request, snapshot_assignment_manager,
Env::Default()));
WorkerHeartbeatRequest heartbeat_request;
WorkerHeartbeatResponse heartbeat_response;
heartbeat_request.set_worker_address("localhost");
TF_ASSERT_OK(
snapshot_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response));
ASSERT_EQ(heartbeat_response.snapshot_tasks().size(), 1);
EXPECT_EQ(heartbeat_response.snapshot_tasks(0).base_path(), snapshot_path);
EXPECT_EQ(heartbeat_response.snapshot_tasks(0).stream_index(), 0);
EXPECT_EQ(heartbeat_response.snapshot_tasks(0).num_sources(), 1);
}
TEST(SnapshotManagerTest, GetSnapshotSplit) {
std::string snapshot_path = testing::LocalTempFilename();
SnapshotRequest request;
*request.mutable_dataset() = testing::RangeDataset(10);
request.set_path(snapshot_path);
*request.mutable_metadata() =
testing::CreateDummyDistributedSnapshotMetadata();
SnapshotAssignmentManager snapshot_assignment_manager(
2);
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<SnapshotManager> snapshot_manager,
SnapshotManager::Start(request, snapshot_assignment_manager,
Env::Default()));
WorkerHeartbeatRequest heartbeat_request;
WorkerHeartbeatResponse heartbeat_response;
heartbeat_request.set_worker_address("localhost");
TF_ASSERT_OK(
snapshot_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response));
const SnapshotTaskDef& task = heartbeat_response.snapshot_tasks(0);
GetSnapshotSplitRequest get_split_request;
GetSnapshotSplitResponse get_split_response;
get_split_request.set_worker_address("localhost");
get_split_request.set_base_path(task.base_path());
get_split_request.set_stream_index(task.stream_index());
get_split_request.set_source_index(0);
for (int64_t i = 0; i < 10; ++i) {
TF_ASSERT_OK(snapshot_manager->GetSnapshotSplit(get_split_request,
get_split_response));
Tensor tensor;
ASSERT_TRUE(tensor.FromProto(get_split_response.split()));
EXPECT_EQ(GetValue<int64_t>(tensor), i);
}
}
TEST(SnapshotManagerTest, HandleStreamCompletion) {
std::string snapshot_path = testing::LocalTempFilename();
SnapshotRequest request;
*request.mutable_dataset() = testing::RangeDataset(10);
request.set_path(snapshot_path);
*request.mutable_metadata() =
testing::CreateDummyDistributedSnapshotMetadata();
SnapshotAssignmentManager snapshot_assignment_manager(
2);
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<SnapshotManager> snapshot_manager,
SnapshotManager::Start(request, snapshot_assignment_manager,
Env::Default()));
WorkerHeartbeatRequest heartbeat_request;
WorkerHeartbeatResponse heartbeat_response;
heartbeat_request.set_worker_address("localhost:1");
TF_ASSERT_OK(
snapshot_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response));
heartbeat_request.Clear();
heartbeat_response.Clear();
heartbeat_request.set_worker_address("localhost:2");
TF_ASSERT_OK(
snapshot_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response));
ASSERT_EQ(heartbeat_response.snapshot_tasks().size(), 1);
const SnapshotTaskDef& snapshot_task = heartbeat_response.snapshot_tasks(0);
EXPECT_EQ(snapshot_task.base_path(), snapshot_path);
EXPECT_EQ(snapshot_task.stream_index(), 1);
EXPECT_EQ(snapshot_task.num_sources(), 1);
heartbeat_request.Clear();
heartbeat_response.Clear();
heartbeat_request.set_worker_address("localhost:1");
SnapshotTaskProgress progress;
*progress.mutable_snapshot_task() = snapshot_task;
progress.set_completed(true);
(*heartbeat_request.mutable_snapshot_task_progress())[snapshot_path] =
progress;
TF_ASSERT_OK(
snapshot_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response));
EXPECT_TRUE(heartbeat_response.snapshot_tasks().empty());
heartbeat_request.Clear();
heartbeat_response.Clear();
heartbeat_request.set_worker_address("localhost:1");
TF_ASSERT_OK(
snapshot_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response));
EXPECT_TRUE(heartbeat_response.snapshot_tasks().empty());
}
TEST(SnapshotManagerTest, Resume) {
std::string snapshot_path = testing::LocalTempFilename();
SnapshotRequest request;
*request.mutable_dataset() = testing::RangeDataset(10);
request.set_path(snapshot_path);
*request.mutable_metadata() =
testing::CreateDummyDistributedSnapshotMetadata();
SnapshotAssignmentManager snapshot_assignment_manager_1(
2);
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<SnapshotManager> snapshot_manager,
SnapshotManager::Start(request, snapshot_assignment_manager_1,
Env::Default()));
WorkerHeartbeatRequest heartbeat_request;
WorkerHeartbeatResponse heartbeat_response;
heartbeat_request.set_worker_address("localhost");
TF_ASSERT_OK(
snapshot_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response));
EXPECT_THAT(heartbeat_response.snapshot_tasks(), SizeIs(1));
heartbeat_response.Clear();
SnapshotAssignmentManager snapshot_assignment_manager_2(
2);
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<SnapshotManager> resumed_manager,
SnapshotManager::Resume(snapshot_path, snapshot_assignment_manager_2,
Env::Default()));
TF_EXPECT_OK(
resumed_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response));
EXPECT_THAT(heartbeat_response.snapshot_tasks(), SizeIs(1));
}
TEST(SnapshotManagerTest, SnapshotStreamError) {
std::string snapshot_path = testing::LocalTempFilename();
SnapshotRequest snapshot_request;
*snapshot_request.mutable_dataset() = testing::RangeDataset(10);
snapshot_request.set_path(snapshot_path);
*snapshot_request.mutable_metadata() =
testing::CreateDummyDistributedSnapshotMetadata();
SnapshotAssignmentManager snapshot_assignment_manager(
2);
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<SnapshotManager> snapshot_manager,
SnapshotManager::Start(snapshot_request, snapshot_assignment_manager,
Env::Default()));
WorkerHeartbeatRequest heartbeat_request;
WorkerHeartbeatResponse heartbeat_response;
heartbeat_request.set_worker_address("localhost");
TF_ASSERT_OK(
snapshot_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response));
const SnapshotTaskDef& task = heartbeat_response.snapshot_tasks(0);
heartbeat_response.Clear();
SnapshotTaskProgress snapshot_task_progress;
*snapshot_task_progress.mutable_snapshot_task() = task;
*snapshot_task_progress.mutable_status() =
tsl::StatusToProto(errors::NotFound("Not found"));
(*heartbeat_request.mutable_snapshot_task_progress())[snapshot_path] =
snapshot_task_progress;
TF_EXPECT_OK(
snapshot_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response));
EXPECT_THAT(heartbeat_response.snapshot_tasks(), IsEmpty());
TF_ASSERT_OK(
Env::Default()->FileExists(SnapshotErrorFilePath(snapshot_path)));
StatusProto status_proto;
TF_ASSERT_OK(ReadTextProto(
Env::Default(), SnapshotErrorFilePath(snapshot_path), &status_proto));
EXPECT_THAT(tsl::StatusFromProto(status_proto),
StatusIs(error::NOT_FOUND, "Not found"));
}
TEST(SnapshotManagerTest, ResumeFromError) {
std::string snapshot_path = testing::LocalTempFilename();
SnapshotRequest request;
*request.mutable_dataset() = testing::RangeDataset(10);
request.set_path(snapshot_path);
*request.mutable_metadata() =
testing::CreateDummyDistributedSnapshotMetadata();
SnapshotAssignmentManager snapshot_assignment_manager_1(
2);
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<SnapshotManager> snapshot_manager,
SnapshotManager::Start(request, snapshot_assignment_manager_1,
Env::Default()));
WorkerHeartbeatRequest heartbeat_request;
WorkerHeartbeatResponse heartbeat_response;
heartbeat_request.set_worker_address("localhost");
TF_ASSERT_OK(
snapshot_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response));
ASSERT_THAT(heartbeat_response.snapshot_tasks(), SizeIs(1));
const SnapshotTaskDef& task = heartbeat_response.snapshot_tasks(0);
heartbeat_response.Clear();
SnapshotTaskProgress snapshot_task_progress;
*snapshot_task_progress.mutable_snapshot_task() = task;
*snapshot_task_progress.mutable_status() =
tsl::StatusToProto(errors::NotFound("Not found"));
(*heartbeat_request.mutable_snapshot_task_progress())[snapshot_path] =
snapshot_task_progress;
TF_EXPECT_OK(
snapshot_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response));
EXPECT_THAT(heartbeat_response.snapshot_tasks(), IsEmpty());
heartbeat_response.Clear();
SnapshotAssignmentManager snapshot_assignment_manager_2(
2);
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<SnapshotManager> resumed_manager,
SnapshotManager::Resume(snapshot_path, snapshot_assignment_manager_2,
Env::Default()));
TF_EXPECT_OK(
resumed_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response));
EXPECT_THAT(heartbeat_response.snapshot_tasks(), IsEmpty());
}
TEST(SnapshotAssignmentManagerTest, LoadBalanceSnapshots) {
SnapshotAssignmentManager snapshot_assignment_manager(
2);
snapshot_assignment_manager.AddSnapshot("snapshot_1");
snapshot_assignment_manager.AddSnapshot("snapshot_2");
snapshot_assignment_manager.AddSnapshot("snapshot_3");
EXPECT_THAT(snapshot_assignment_manager.TryAddAssignment(
"snapshot_3", "worker_1", 0),
IsOkAndHolds(true));
EXPECT_THAT(snapshot_assignment_manager.LoadBalanceSnapshots("worker_1"),
ElementsAre("snapshot_3", _));
ASSERT_THAT(snapshot_assignment_manager.LoadBalanceSnapshots("worker_2"),
ElementsAre(Not("snapshot_3")));
EXPECT_THAT(snapshot_assignment_manager.TryAddAssignment(
"snapshot_2", "worker_1", 0),
IsOkAndHolds(true));
ASSERT_THAT(snapshot_assignment_manager.LoadBalanceSnapshots("worker_1"),
UnorderedElementsAre("snapshot_2", "snapshot_3"));
EXPECT_THAT(snapshot_assignment_manager.LoadBalanceSnapshots("worker_2"),
ElementsAre("snapshot_1"));
EXPECT_THAT(snapshot_assignment_manager.TryAddAssignment(
"snapshot_1", "worker_1", 0),
IsOkAndHolds(false));
EXPECT_THAT(snapshot_assignment_manager.TryAddAssignment(
"snapshot_2", "worker_2", 0),
IsOkAndHolds(true));
ASSERT_THAT(snapshot_assignment_manager.LoadBalanceSnapshots("worker_1"),
UnorderedElementsAre("snapshot_2", "snapshot_3"));
EXPECT_THAT(snapshot_assignment_manager.LoadBalanceSnapshots("worker_2"),
ElementsAre("snapshot_2", "snapshot_1"));
snapshot_assignment_manager.RemoveAssignment("snapshot_2", "worker_1",
0);
EXPECT_THAT(snapshot_assignment_manager.LoadBalanceSnapshots("worker_1"),
ElementsAre("snapshot_3", "snapshot_1"));
ASSERT_THAT(snapshot_assignment_manager.LoadBalanceSnapshots("worker_2"),
ElementsAre("snapshot_2", "snapshot_1"));
snapshot_assignment_manager.RemoveAssignment("snapshot_3", "worker_1",
0);
ASSERT_THAT(snapshot_assignment_manager.LoadBalanceSnapshots("worker_1"),
ElementsAre("snapshot_1"));
ASSERT_THAT(snapshot_assignment_manager.LoadBalanceSnapshots("worker_2"),
ElementsAre("snapshot_2", "snapshot_1"));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/snapshot/snapshot_manager.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/snapshot/snapshot_manager_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6fdff665-ed54-4a1b-8aa2-c0280a997a05 | cpp | tensorflow/tensorflow | tf_threadpool_concurrent_work_queue | tensorflow/core/tfrt/runtime/tf_threadpool_concurrent_work_queue.cc | tensorflow/core/tfrt/runtime/tf_threadpool_concurrent_work_queue_test.cc | #include "tensorflow/core/tfrt/runtime/tf_threadpool_concurrent_work_queue.h"
#include <memory>
#include <optional>
#include <utility>
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/threadpool.h"
#include "tensorflow/core/platform/threadpool_interface.h"
#include "tensorflow/core/tfrt/utils/thread_pool.h"
#include "tfrt/host_context/async_value.h"
#include "tfrt/host_context/execution_context.h"
#include "tfrt/host_context/task_function.h"
#include "tfrt/support/forward_decls.h"
#include "tfrt/support/latch.h"
namespace tensorflow {
namespace tfrt_stub {
using ::tensorflow::thread::ThreadPoolInterface;
absl::StatusOr<std::unique_ptr<WorkQueueInterface>>
TfThreadPoolWorkQueue::InitializeRequest(int64_t request_id) const {
return {std::make_unique<TfThreadPoolWorkQueue>(
request_id, intra_op_threadpool_, inter_op_threadpool_)};
}
void TfThreadPoolWorkQueue::AddTask(tfrt::TaskFunction work) {
auto* copy = new tfrt::TaskFunction(
tensorflow::tfrt_stub::WrapWork(id(), "inter", std::move(work)));
inter_op_threadpool_->Schedule([copy] {
(*copy)();
delete copy;
});
}
std::optional<tfrt::TaskFunction> TfThreadPoolWorkQueue::AddBlockingTask(
tfrt::TaskFunction work, bool allow_queuing) {
AddTask(std::move(work));
return std::nullopt;
}
void TfThreadPoolWorkQueue::Quiesce() {
}
void TfThreadPoolWorkQueue::Await(
tfrt::ArrayRef<tfrt::RCReference<tfrt::AsyncValue>> values) {
tfrt::latch values_remaining(values.size());
for (auto& value : values) {
value->AndThen([&values_remaining]() { values_remaining.count_down(); });
}
values_remaining.wait();
}
bool TfThreadPoolWorkQueue::IsInWorkerThread() const {
return true;
}
std::unique_ptr<TfThreadPoolWorkQueue> CreateDefaultTfThreadPoolWorkQueue(
int num_inter_op_threads, int num_intra_op_threads) {
struct ThreadPools {
TfThreadPool inter_op_threadpool;
TfThreadPool intra_op_threadpool;
ThreadPools(int num_inter_op_threads, int num_intra_op_threads)
: inter_op_threadpool("default_work_queue_inter", num_inter_op_threads),
intra_op_threadpool("default_work_queue_intra",
num_intra_op_threads) {}
};
class Wrapper : public TfThreadPoolWorkQueue {
public:
explicit Wrapper(std::unique_ptr<ThreadPools> thread_pools)
: TfThreadPoolWorkQueue(
&thread_pools->intra_op_threadpool,
&thread_pools->inter_op_threadpool),
thread_pools_(std::move(thread_pools)) {}
~Wrapper() override = default;
private:
std::unique_ptr<ThreadPools> thread_pools_;
};
return std::make_unique<Wrapper>(std::make_unique<ThreadPools>(
num_inter_op_threads, num_intra_op_threads));
}
}
} | #include "tensorflow/core/tfrt/runtime/tf_threadpool_concurrent_work_queue.h"
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/tfrt/utils/thread_pool.h"
#include "tfrt/host_context/host_allocator.h"
#include "tfrt/host_context/host_context.h"
#include "tfrt/support/latch.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
const int32_t kNumThreads = 2;
class TfThreadpoolWorkQueueTest : public ::testing::Test {
protected:
TfThreadpoolWorkQueueTest()
: tf_threadpool_cwq_(CreateDefaultTfThreadPoolWorkQueue(
kNumThreads,
kNumThreads)) {}
std::unique_ptr<TfThreadPoolWorkQueue> tf_threadpool_cwq_;
};
TEST_F(TfThreadpoolWorkQueueTest, GetParallelismLevelOk) {
EXPECT_GT(tf_threadpool_cwq_->GetParallelismLevel(), 0);
}
TEST_F(TfThreadpoolWorkQueueTest, GetNameOk) {
EXPECT_EQ(tf_threadpool_cwq_->name(), "TfThreadPoolWorkQueue");
}
TEST_F(TfThreadpoolWorkQueueTest, InitializeRequestOk) {
tfrt::RequestContextBuilder ctx_builder(nullptr,
nullptr);
auto queue = tf_threadpool_cwq_->InitializeRequest(0);
TF_ASSERT_OK(queue.status());
EXPECT_NE(*queue, nullptr);
EXPECT_NE((*queue)->GetIntraOpThreadPool(), nullptr);
}
TEST_F(TfThreadpoolWorkQueueTest, IsInWorkerThreadOk) {
EXPECT_TRUE(tf_threadpool_cwq_->IsInWorkerThread());
}
TEST_F(TfThreadpoolWorkQueueTest, RunningBlockingTask) {
tfrt::latch latch(10);
int n = 0;
tensorflow::mutex m;
for (int i = 0; i < 10; ++i) {
tf_threadpool_cwq_->AddBlockingTask(tfrt::TaskFunction([&n, &m, &latch] {
{
tensorflow::mutex_lock lock(m);
++n;
}
latch.count_down();
}),
true);
}
latch.wait();
EXPECT_EQ(n, 10);
}
TEST_F(TfThreadpoolWorkQueueTest, RunningNonBlockingTask) {
tfrt::latch latch(10);
int n = 0;
tensorflow::mutex m;
for (int i = 0; i < 10; ++i) {
tf_threadpool_cwq_->AddTask(tfrt::TaskFunction([&n, &m, &latch] {
{
tensorflow::mutex_lock lock(m);
++n;
}
latch.count_down();
}));
}
latch.wait();
EXPECT_EQ(n, 10);
}
TEST_F(TfThreadpoolWorkQueueTest, RunningMixedTask) {
tfrt::latch latch(20);
int n = 0;
tensorflow::mutex m;
for (int i = 0; i < 10; ++i) {
tf_threadpool_cwq_->AddTask(tfrt::TaskFunction([&n, &m, &latch] {
{
tensorflow::mutex_lock lock(m);
++n;
}
latch.count_down();
}));
tf_threadpool_cwq_->AddBlockingTask(tfrt::TaskFunction([&n, &m, &latch] {
{
tensorflow::mutex_lock lock(m);
++n;
}
latch.count_down();
}),
true);
}
latch.wait();
EXPECT_EQ(n, 20);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/runtime/tf_threadpool_concurrent_work_queue.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/runtime/tf_threadpool_concurrent_work_queue_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0ceab836-2f6f-4d6c-85b5-579dbed57ba0 | cpp | tensorflow/tensorflow | dot_dimension_merger | third_party/xla/xla/service/dot_dimension_merger.cc | third_party/xla/xla/service/dot_dimension_merger_test.cc | #include "xla/service/dot_dimension_merger.h"
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/layout_util.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/shape.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
std::vector<int64_t> ShiftDimensions(absl::Span<const int64_t> dimensions,
const int64_t start, const int64_t shift) {
std::vector<int64_t> new_dimensions;
new_dimensions.reserve(dimensions.size());
for (const int64_t i : dimensions) {
if (i < start) {
new_dimensions.push_back(i);
} else {
new_dimensions.push_back(i - shift);
}
}
return new_dimensions;
}
class BatchDimensionMerger : public DfsHloRewriteVisitor {
public:
absl::Status HandleDot(HloInstruction* dot) override {
const DotDimensionNumbers& dnums = dot->dot_dimension_numbers();
const Shape& lhs_shape = dot->operand(0)->shape();
const Shape& rhs_shape = dot->operand(1)->shape();
CHECK_EQ(dnums.lhs_batch_dimensions_size(),
dnums.rhs_batch_dimensions_size());
const int64_t batch_dimension_count = dnums.lhs_batch_dimensions_size();
if (batch_dimension_count < 2 ||
!DistinctNumbersAreConsecutiveIfSorted(dnums.lhs_batch_dimensions()) ||
!DistinctNumbersAreConsecutiveIfSorted(dnums.rhs_batch_dimensions()) ||
!absl::c_is_sorted(dnums.lhs_batch_dimensions()) ||
!absl::c_is_sorted(dnums.rhs_batch_dimensions()) ||
!LayoutUtil::AreDimensionsConsecutive(lhs_shape.layout(),
dnums.lhs_batch_dimensions()) ||
!LayoutUtil::AreDimensionsConsecutive(rhs_shape.layout(),
dnums.rhs_batch_dimensions())) {
return absl::OkStatus();
}
const int64_t lhs_batch_dimension =
*absl::c_min_element(dnums.lhs_batch_dimensions());
const int64_t rhs_batch_dimension =
*absl::c_min_element(dnums.rhs_batch_dimensions());
int64_t batch_size = 1;
for (const int64_t dimension_number : dnums.lhs_batch_dimensions()) {
batch_size *= lhs_shape.dimensions(dimension_number);
}
auto merge_batch_dims = [&](Shape old_shape, int64_t batch_dim) {
Shape new_shape = old_shape;
for (int64_t i = 1; i < batch_dimension_count; ++i) {
new_shape.DeleteDimension(batch_dim + 1);
}
new_shape.set_dimensions(batch_dim, batch_size);
return new_shape;
};
Shape new_lhs_shape = merge_batch_dims(lhs_shape, lhs_batch_dimension);
Shape new_rhs_shape = merge_batch_dims(rhs_shape, rhs_batch_dimension);
DotDimensionNumbers new_dot_dimension_numbers;
new_dot_dimension_numbers.add_lhs_batch_dimensions(lhs_batch_dimension);
new_dot_dimension_numbers.add_rhs_batch_dimensions(rhs_batch_dimension);
{
const std::vector<int64_t> shifted_contracting_dimensions =
ShiftDimensions(dnums.lhs_contracting_dimensions(),
lhs_batch_dimension, batch_dimension_count - 1);
new_dot_dimension_numbers.mutable_lhs_contracting_dimensions()->Assign(
shifted_contracting_dimensions.begin(),
shifted_contracting_dimensions.end());
}
{
const std::vector<int64_t> shifted_contracting_dimensions =
ShiftDimensions(dnums.rhs_contracting_dimensions(),
rhs_batch_dimension, batch_dimension_count - 1);
new_dot_dimension_numbers.mutable_rhs_contracting_dimensions()->Assign(
shifted_contracting_dimensions.begin(),
shifted_contracting_dimensions.end());
}
auto sparsity = Cast<HloDotInstruction>(dot)->sparsity();
std::vector<SparsityDescriptor> new_sparsity(sparsity.begin(),
sparsity.end());
std::vector<HloInstruction*> sparse_meta(sparsity.size());
for (int i = 0; i < sparsity.size(); ++i) {
SparsityDescriptor& descriptor = new_sparsity[i];
int64_t sparse_batch_dim =
descriptor.index() == 0 ? lhs_batch_dimension : rhs_batch_dimension;
if (descriptor.dimension() > sparse_batch_dim)
descriptor.set_dimension(descriptor.dimension() -
(batch_dimension_count - 1));
HloInstruction* meta =
dot->mutable_operand(HloDotInstruction::kOperands + i);
Shape new_meta_shape = merge_batch_dims(meta->shape(), sparse_batch_dim);
TF_ASSIGN_OR_RETURN(sparse_meta[i], MakeReshapeHlo(new_meta_shape, meta));
}
TF_ASSIGN_OR_RETURN(HloInstruction * reshaped_lhs,
MakeReshapeHlo(new_lhs_shape, dot->mutable_operand(0)));
TF_ASSIGN_OR_RETURN(HloInstruction * reshaped_rhs,
MakeReshapeHlo(new_rhs_shape, dot->mutable_operand(1)));
Shape new_dot_shape = merge_batch_dims(dot->shape(), 0);
HloInstruction* new_dot = dot->parent()->AddInstruction(
HloInstruction::CreateDot(new_dot_shape, reshaped_lhs, reshaped_rhs,
new_dot_dimension_numbers,
dot->precision_config(), new_sparsity,
sparse_meta),
&dot->metadata());
dot->SetupDerivedInstruction(new_dot);
std::unique_ptr<HloInstruction> out_reshape =
HloInstruction::CreateReshape(dot->shape(), new_dot);
return ReplaceWithNewInstruction(dot, std::move(out_reshape));
}
};
}
absl::StatusOr<bool> DotDimensionMerger::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
return BatchDimensionMerger().RunOnModule(module, execution_threads);
}
} | #include "xla/service/dot_dimension_merger.h"
#include <memory>
#include <string>
#include <gtest/gtest.h>
#include "xla/service/hlo_parser.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using DotDimensionMergerTest = HloTestBase;
TEST_F(DotDimensionMergerTest, MergeConsecutiveBatchDimensions) {
const std::string kHloText = R"(
HloModule m
ENTRY e {
p0 = bf16[79,2,4,12,11] parameter(0)
p1 = bf16[79,2,4,11,44] parameter(1)
ROOT d = bf16[2,4,12,44] dot(p0, p1),
lhs_batch_dims={1,2}, lhs_contracting_dims={0,4},
rhs_batch_dims={1,2}, rhs_contracting_dims={0,3},
metadata={op_name="testname"}
})";
RunAndFilecheckHloRewrite(kHloText, DotDimensionMerger(), R"(
; CHECK: %[[R0:.*]] = bf16[79,8,12,11]{3,2,1,0} reshape(%p0)
; CHECK: %[[R1:.*]] = bf16[79,8,11,44]{3,2,1,0} reshape(%p1)
; CHECK: %[[DOT:.*]] = bf16[8,12,44]{2,1,0} dot(%[[R0]], %[[R1]])
; CHECK-SAME: lhs_batch_dims={1}
; CHECK-SAME: lhs_contracting_dims={0,3}
; CHECK-SAME: rhs_batch_dims={1}
; CHECK-SAME: rhs_contracting_dims={0,2}
; CHECK-NEXT: ROOT {{[^ ]+}} = bf16[2,4,12,44]{3,2,1,0} reshape(%[[DOT]])
; CHECK-SAME: metadata={op_name="testname"}
)");
}
TEST_F(DotDimensionMergerTest,
MergeConsecutiveBatchDimensionsNonDefaultLayouts) {
const std::string kHloText = R"(
HloModule m
ENTRY e {
p0 = bf16[79,2,4,12,11]{4,0,3,2,1} parameter(0)
p1 = bf16[79,2,4,11,44]{3,0,4,2,1} parameter(1)
ROOT d = bf16[2,4,12,44]{3,1,0,2} dot(p0, p1),
lhs_batch_dims={1,2}, lhs_contracting_dims={0,4},
rhs_batch_dims={1,2}, rhs_contracting_dims={0,3},
metadata={op_name="testname"}
})";
RunAndFilecheckHloRewrite(kHloText, DotDimensionMerger(), R"(
; CHECK: %[[R0:.*]] = bf16[79,8,12,11]{3,0,2,1} reshape(%p0)
; CHECK: %[[R1:.*]] = bf16[79,8,11,44]{2,0,3,1} reshape(%p1)
; CHECK: %[[DOT:.*]] = bf16[8,12,44]{2,0,1} dot(%[[R0]], %[[R1]])
; CHECK-SAME: lhs_batch_dims={1}
; CHECK-SAME: lhs_contracting_dims={0,3}
; CHECK-SAME: rhs_batch_dims={1}
; CHECK-SAME: rhs_contracting_dims={0,2}
; CHECK-NEXT: ROOT {{[^ ]+}} = bf16[2,4,12,44]{3,1,0,2} reshape(%[[DOT]])
; CHECK-SAME: metadata={op_name="testname"}
)");
}
TEST_F(DotDimensionMergerTest, SkipPhysicallyNonConsecutiveBatchDimensions) {
const std::string kHloText = R"(
HloModule m
ENTRY e {
p0 = bf16[2,4,12,13]{3,1,2,0} parameter(0)
p1 = bf16[2,4,13,55]{3,2,1,0} parameter(1)
ROOT d = bf16[2,4,12,55]{3,2,1,0} dot(p0, p1),
lhs_batch_dims={0,1}, lhs_contracting_dims={3},
rhs_batch_dims={0,1}, rhs_contracting_dims={2}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloText));
TF_ASSERT_OK_AND_ASSIGN(bool modified,
DotDimensionMerger().Run(module.get()));
EXPECT_FALSE(modified);
}
TEST_F(DotDimensionMergerTest, SkipUnsortedBatchDimensions) {
const std::string kHloText = R"(
HloModule m
ENTRY e {
p0 = bf16[4,2,12,13] parameter(0)
p1 = bf16[2,4,13,55] parameter(1)
ROOT d = bf16[2,4,12,55] dot(p0, p1),
lhs_batch_dims={1,0}, lhs_contracting_dims={3},
rhs_batch_dims={0,1}, rhs_contracting_dims={2}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloText));
TF_ASSERT_OK_AND_ASSIGN(bool modified,
DotDimensionMerger().Run(module.get()));
EXPECT_FALSE(modified);
}
TEST_F(DotDimensionMergerTest, SkipLogicallyNonConsecutiveBatchDimensions) {
const std::string kHloText = R"(
HloModule m
ENTRY e {
p0 = bf16[2,12,4,13] parameter(0)
p1 = bf16[2,4,13,55] parameter(1)
ROOT d = bf16[2,4,12,55] dot(p0, p1),
lhs_batch_dims={0,2}, lhs_contracting_dims={3},
rhs_batch_dims={0,1}, rhs_contracting_dims={2}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloText));
TF_ASSERT_OK_AND_ASSIGN(bool modified,
DotDimensionMerger().Run(module.get()));
EXPECT_FALSE(modified);
}
TEST_F(DotDimensionMergerTest, SparseDotUpdatesDescriptor) {
const std::string kHloText = R"(
HloModule m
ENTRY e {
p0 = bf16[3,4,5,6,16] parameter(0)
p1 = bf16[3,4,5,32,6] parameter(1)
meta = u16[3,4,5,6,2] parameter(2)
ROOT d = bf16[4,5,6,6] dot(p0, p1, meta), sparsity=L.4@2:4,
lhs_batch_dims={1,2}, lhs_contracting_dims={0,4},
rhs_batch_dims={1,2}, rhs_contracting_dims={0,3}
})";
RunAndFilecheckHloRewrite(kHloText, DotDimensionMerger(), R"(
; CHECK: %[[R0:.*]] = bf16[3,20,6,16]{3,2,1,0} reshape(%p0)
; CHECK: %[[R1:.*]] = bf16[3,20,32,6]{3,2,1,0} reshape(%p1)
; CHECK: %[[R2:.*]] = u16[3,20,6,2]{3,2,1,0} reshape(%meta)
; CHECK: %[[DOT:.*]] = bf16[20,6,6]{2,1,0} dot(%[[R0]], %[[R1]], %[[R2]])
; CHECK-SAME: lhs_batch_dims={1}
; CHECK-SAME: lhs_contracting_dims={0,3}
; CHECK-SAME: rhs_batch_dims={1}
; CHECK-SAME: rhs_contracting_dims={0,2}
; CHECK-SAME: sparsity=L.3@2:4
; CHECK-NEXT: ROOT {{.+}} = bf16[4,5,6,6]{3,2,1,0} reshape(%[[DOT]])
)");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/dot_dimension_merger.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/dot_dimension_merger_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
974b7765-e143-439c-bdc2-2ea618027569 | cpp | tensorflow/tensorflow | cache | third_party/xla/xla/tsl/lib/io/cache.cc | third_party/xla/xla/tsl/lib/io/cache_test.cc | #include "xla/tsl/lib/io/cache.h"
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "tsl/platform/mutex.h"
#include "tsl/platform/raw_coding.h"
namespace tsl {
namespace table {
Cache::~Cache() {}
namespace {
struct LRUHandle {
void* value;
void (*deleter)(const Slice&, void* value);
LRUHandle* next_hash;
LRUHandle* next;
LRUHandle* prev;
size_t charge;
size_t key_length;
bool in_cache;
uint32_t refs;
uint32_t hash;
char key_data[1];
Slice key() const {
assert(next != this);
return Slice(key_data, key_length);
}
};
class HandleTable {
public:
HandleTable() : length_(0), elems_(0), list_(nullptr) { Resize(); }
~HandleTable() { delete[] list_; }
LRUHandle* Lookup(const Slice& key, uint32_t hash) {
return *FindPointer(key, hash);
}
LRUHandle* Insert(LRUHandle* h) {
LRUHandle** ptr = FindPointer(h->key(), h->hash);
LRUHandle* old = *ptr;
h->next_hash = (old == nullptr ? nullptr : old->next_hash);
*ptr = h;
if (old == nullptr) {
++elems_;
if (elems_ > length_) {
Resize();
}
}
return old;
}
LRUHandle* Remove(const Slice& key, uint32_t hash) {
LRUHandle** ptr = FindPointer(key, hash);
LRUHandle* result = *ptr;
if (result != nullptr) {
*ptr = result->next_hash;
--elems_;
}
return result;
}
private:
uint32_t length_;
uint32_t elems_;
LRUHandle** list_;
LRUHandle** FindPointer(const Slice& key, uint32_t hash) {
LRUHandle** ptr = &list_[hash & (length_ - 1)];
while (*ptr != nullptr && ((*ptr)->hash != hash || key != (*ptr)->key())) {
ptr = &(*ptr)->next_hash;
}
return ptr;
}
void Resize() {
uint32_t new_length = 4;
while (new_length < elems_) {
new_length *= 2;
}
LRUHandle** new_list = new LRUHandle*[new_length];
memset(new_list, 0, sizeof(new_list[0]) * new_length);
uint32_t count = 0;
for (uint32_t i = 0; i < length_; i++) {
LRUHandle* h = list_[i];
while (h != nullptr) {
LRUHandle* next = h->next_hash;
uint32_t hash = h->hash;
LRUHandle** ptr = &new_list[hash & (new_length - 1)];
h->next_hash = *ptr;
*ptr = h;
h = next;
count++;
}
}
assert(elems_ == count);
delete[] list_;
list_ = new_list;
length_ = new_length;
}
};
class LRUCache {
public:
LRUCache();
~LRUCache();
void SetCapacity(size_t capacity) { capacity_ = capacity; }
Cache::Handle* Insert(const Slice& key, uint32_t hash, void* value,
size_t charge,
void (*deleter)(const Slice& key, void* value));
Cache::Handle* Lookup(const Slice& key, uint32_t hash);
void Release(Cache::Handle* handle);
void Erase(const Slice& key, uint32_t hash);
void Prune();
size_t TotalCharge() const {
mutex_lock l(mutex_);
return usage_;
}
private:
void LRU_Remove(LRUHandle* e);
void LRU_Append(LRUHandle* list, LRUHandle* e);
void Ref(LRUHandle* e);
void Unref(LRUHandle* e);
bool FinishErase(LRUHandle* e) TF_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
size_t capacity_;
mutable mutex mutex_;
size_t usage_ TF_GUARDED_BY(mutex_);
LRUHandle lru_ TF_GUARDED_BY(mutex_);
LRUHandle in_use_ TF_GUARDED_BY(mutex_);
HandleTable table_ TF_GUARDED_BY(mutex_);
};
LRUCache::LRUCache() : capacity_(0), usage_(0) {
lru_.next = &lru_;
lru_.prev = &lru_;
in_use_.next = &in_use_;
in_use_.prev = &in_use_;
}
LRUCache::~LRUCache() {
assert(in_use_.next == &in_use_);
for (LRUHandle* e = lru_.next; e != &lru_;) {
LRUHandle* next = e->next;
assert(e->in_cache);
e->in_cache = false;
assert(e->refs == 1);
Unref(e);
e = next;
}
}
void LRUCache::Ref(LRUHandle* e) {
if (e->refs == 1 && e->in_cache) {
LRU_Remove(e);
LRU_Append(&in_use_, e);
}
e->refs++;
}
void LRUCache::Unref(LRUHandle* e) {
assert(e->refs > 0);
e->refs--;
if (e->refs == 0) {
assert(!e->in_cache);
(*e->deleter)(e->key(), e->value);
free(e);
} else if (e->in_cache && e->refs == 1) {
LRU_Remove(e);
LRU_Append(&lru_, e);
}
}
void LRUCache::LRU_Remove(LRUHandle* e) {
e->next->prev = e->prev;
e->prev->next = e->next;
}
void LRUCache::LRU_Append(LRUHandle* list, LRUHandle* e) {
e->next = list;
e->prev = list->prev;
e->prev->next = e;
e->next->prev = e;
}
Cache::Handle* LRUCache::Lookup(const Slice& key, uint32_t hash) {
mutex_lock l(mutex_);
LRUHandle* e = table_.Lookup(key, hash);
if (e != nullptr) {
Ref(e);
}
return reinterpret_cast<Cache::Handle*>(e);
}
void LRUCache::Release(Cache::Handle* handle) {
mutex_lock l(mutex_);
Unref(reinterpret_cast<LRUHandle*>(handle));
}
Cache::Handle* LRUCache::Insert(const Slice& key, uint32_t hash, void* value,
size_t charge,
void (*deleter)(const Slice& key,
void* value)) {
mutex_lock l(mutex_);
LRUHandle* e =
reinterpret_cast<LRUHandle*>(malloc(sizeof(LRUHandle) - 1 + key.size()));
e->value = value;
e->deleter = deleter;
e->charge = charge;
e->key_length = key.size();
e->hash = hash;
e->in_cache = false;
e->refs = 1;
memcpy(e->key_data, key.data(), key.size());
if (capacity_ > 0) {
e->refs++;
e->in_cache = true;
LRU_Append(&in_use_, e);
usage_ += charge;
FinishErase(table_.Insert(e));
} else {
e->next = nullptr;
}
while (usage_ > capacity_ && lru_.next != &lru_) {
LRUHandle* old = lru_.next;
assert(old->refs == 1);
bool erased = FinishErase(table_.Remove(old->key(), old->hash));
if (!erased) {
assert(erased);
}
}
return reinterpret_cast<Cache::Handle*>(e);
}
bool LRUCache::FinishErase(LRUHandle* e) {
if (e != nullptr) {
assert(e->in_cache);
LRU_Remove(e);
e->in_cache = false;
usage_ -= e->charge;
Unref(e);
}
return e != nullptr;
}
void LRUCache::Erase(const Slice& key, uint32_t hash) {
mutex_lock l(mutex_);
FinishErase(table_.Remove(key, hash));
}
void LRUCache::Prune() {
mutex_lock l(mutex_);
while (lru_.next != &lru_) {
LRUHandle* e = lru_.next;
assert(e->refs == 1);
bool erased = FinishErase(table_.Remove(e->key(), e->hash));
if (!erased) {
assert(erased);
}
}
}
static const int kNumShardBits = 4;
static const int kNumShards = 1 << kNumShardBits;
class ShardedLRUCache : public Cache {
private:
LRUCache shard_[kNumShards];
mutex id_mutex_;
uint64_t last_id_;
static inline uint32_t HashSlice(const Slice& s) {
return Hash(s.data(), s.size(), 0);
}
static uint32_t Shard(uint32_t hash) { return hash >> (32 - kNumShardBits); }
public:
explicit ShardedLRUCache(size_t capacity) : last_id_(0) {
const size_t per_shard = (capacity + (kNumShards - 1)) / kNumShards;
for (int s = 0; s < kNumShards; s++) {
shard_[s].SetCapacity(per_shard);
}
}
~ShardedLRUCache() override {}
Handle* Insert(const Slice& key, void* value, size_t charge,
void (*deleter)(const Slice& key, void* value)) override {
const uint32_t hash = HashSlice(key);
return shard_[Shard(hash)].Insert(key, hash, value, charge, deleter);
}
Handle* Lookup(const Slice& key) override {
const uint32_t hash = HashSlice(key);
return shard_[Shard(hash)].Lookup(key, hash);
}
void Release(Handle* handle) override {
LRUHandle* h = reinterpret_cast<LRUHandle*>(handle);
shard_[Shard(h->hash)].Release(handle);
}
void Erase(const Slice& key) override {
const uint32_t hash = HashSlice(key);
shard_[Shard(hash)].Erase(key, hash);
}
void* Value(Handle* handle) override {
return reinterpret_cast<LRUHandle*>(handle)->value;
}
uint64_t NewId() override {
mutex_lock l(id_mutex_);
return ++(last_id_);
}
void Prune() override {
for (int s = 0; s < kNumShards; s++) {
shard_[s].Prune();
}
}
size_t TotalCharge() const override {
size_t total = 0;
for (int s = 0; s < kNumShards; s++) {
total += shard_[s].TotalCharge();
}
return total;
}
private:
static uint32_t Hash(const char* data, size_t n, uint32_t seed) {
const uint32_t m = 0xc6a4a793;
const uint32_t r = 24;
const char* limit = data + n;
uint32_t h = seed ^ (n * m);
while (data + 4 <= limit) {
uint32_t w = core::DecodeFixed32(data);
data += 4;
h += w;
h *= m;
h ^= (h >> 16);
}
switch (limit - data) {
case 3:
h += static_cast<uint8_t>(data[2]) << 16;
ABSL_FALLTHROUGH_INTENDED;
case 2:
h += static_cast<uint8_t>(data[1]) << 8;
ABSL_FALLTHROUGH_INTENDED;
case 1:
h += static_cast<uint8_t>(data[0]);
h *= m;
h ^= (h >> r);
break;
}
return h;
}
};
}
Cache* NewLRUCache(size_t capacity) { return new ShardedLRUCache(capacity); }
}
} | #include "xla/tsl/lib/io/cache.h"
#include <string>
#include <vector>
#include "tsl/platform/coding.h"
#include "tsl/platform/raw_coding.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace table {
static std::string EncodeKey(int k) {
std::string result;
core::PutFixed32(&result, k);
return result;
}
static int DecodeKey(const Slice& k) {
assert(k.size() == 4);
return core::DecodeFixed32(k.data());
}
static void* EncodeValue(uintptr_t v) { return reinterpret_cast<void*>(v); }
static int DecodeValue(void* v) { return reinterpret_cast<uintptr_t>(v); }
class CacheTest : public ::testing::Test {
public:
static void Deleter(const Slice& key, void* v) {
current_->deleted_keys_.push_back(DecodeKey(key));
current_->deleted_values_.push_back(DecodeValue(v));
}
static constexpr int kCacheSize = 1000;
std::vector<int> deleted_keys_;
std::vector<int> deleted_values_;
Cache* cache_;
CacheTest() : cache_(NewLRUCache(kCacheSize)) { current_ = this; }
~CacheTest() { delete cache_; }
int Lookup(int key) {
Cache::Handle* handle = cache_->Lookup(EncodeKey(key));
const int r = (handle == nullptr) ? -1 : DecodeValue(cache_->Value(handle));
if (handle != nullptr) {
cache_->Release(handle);
}
return r;
}
void Insert(int key, int value, int charge = 1) {
cache_->Release(cache_->Insert(EncodeKey(key), EncodeValue(value), charge,
&CacheTest::Deleter));
}
Cache::Handle* InsertAndReturnHandle(int key, int value, int charge = 1) {
return cache_->Insert(EncodeKey(key), EncodeValue(value), charge,
&CacheTest::Deleter);
}
void Erase(int key) { cache_->Erase(EncodeKey(key)); }
static CacheTest* current_;
};
CacheTest* CacheTest::current_;
TEST_F(CacheTest, HitAndMiss) {
ASSERT_EQ(-1, Lookup(100));
Insert(100, 101);
ASSERT_EQ(101, Lookup(100));
ASSERT_EQ(-1, Lookup(200));
ASSERT_EQ(-1, Lookup(300));
Insert(200, 201);
ASSERT_EQ(101, Lookup(100));
ASSERT_EQ(201, Lookup(200));
ASSERT_EQ(-1, Lookup(300));
Insert(100, 102);
ASSERT_EQ(102, Lookup(100));
ASSERT_EQ(201, Lookup(200));
ASSERT_EQ(-1, Lookup(300));
ASSERT_EQ(1, deleted_keys_.size());
ASSERT_EQ(100, deleted_keys_[0]);
ASSERT_EQ(101, deleted_values_[0]);
}
TEST_F(CacheTest, Erase) {
Erase(200);
ASSERT_EQ(0, deleted_keys_.size());
Insert(100, 101);
Insert(200, 201);
Erase(100);
ASSERT_EQ(-1, Lookup(100));
ASSERT_EQ(201, Lookup(200));
ASSERT_EQ(1, deleted_keys_.size());
ASSERT_EQ(100, deleted_keys_[0]);
ASSERT_EQ(101, deleted_values_[0]);
Erase(100);
ASSERT_EQ(-1, Lookup(100));
ASSERT_EQ(201, Lookup(200));
ASSERT_EQ(1, deleted_keys_.size());
}
TEST_F(CacheTest, EntriesArePinned) {
Insert(100, 101);
Cache::Handle* h1 = cache_->Lookup(EncodeKey(100));
ASSERT_EQ(101, DecodeValue(cache_->Value(h1)));
Insert(100, 102);
Cache::Handle* h2 = cache_->Lookup(EncodeKey(100));
ASSERT_EQ(102, DecodeValue(cache_->Value(h2)));
ASSERT_EQ(0, deleted_keys_.size());
cache_->Release(h1);
ASSERT_EQ(1, deleted_keys_.size());
ASSERT_EQ(100, deleted_keys_[0]);
ASSERT_EQ(101, deleted_values_[0]);
Erase(100);
ASSERT_EQ(-1, Lookup(100));
ASSERT_EQ(1, deleted_keys_.size());
cache_->Release(h2);
ASSERT_EQ(2, deleted_keys_.size());
ASSERT_EQ(100, deleted_keys_[1]);
ASSERT_EQ(102, deleted_values_[1]);
}
TEST_F(CacheTest, EvictionPolicy) {
Insert(100, 101);
Insert(200, 201);
Insert(300, 301);
Cache::Handle* h = cache_->Lookup(EncodeKey(300));
for (int i = 0; i < kCacheSize + 100; i++) {
Insert(1000 + i, 2000 + i);
ASSERT_EQ(2000 + i, Lookup(1000 + i));
ASSERT_EQ(101, Lookup(100));
}
ASSERT_EQ(101, Lookup(100));
ASSERT_EQ(-1, Lookup(200));
ASSERT_EQ(301, Lookup(300));
cache_->Release(h);
}
TEST_F(CacheTest, UseExceedsCacheSize) {
std::vector<Cache::Handle*> h;
for (int i = 0; i < kCacheSize + 100; i++) {
h.push_back(InsertAndReturnHandle(1000 + i, 2000 + i));
}
for (int i = 0; i < h.size(); i++) {
ASSERT_EQ(2000 + i, Lookup(1000 + i));
}
for (int i = 0; i < h.size(); i++) {
cache_->Release(h[i]);
}
}
TEST_F(CacheTest, HeavyEntries) {
const int kLight = 1;
const int kHeavy = 10;
int added = 0;
int index = 0;
while (added < 2 * kCacheSize) {
const int weight = (index & 1) ? kLight : kHeavy;
Insert(index, 1000 + index, weight);
added += weight;
index++;
}
int cached_weight = 0;
for (int i = 0; i < index; i++) {
const int weight = (i & 1 ? kLight : kHeavy);
int r = Lookup(i);
if (r >= 0) {
cached_weight += weight;
ASSERT_EQ(1000 + i, r);
}
}
ASSERT_LE(cached_weight, kCacheSize + kCacheSize / 10);
}
TEST_F(CacheTest, NewId) {
uint64_t a = cache_->NewId();
uint64_t b = cache_->NewId();
ASSERT_NE(a, b);
}
TEST_F(CacheTest, Prune) {
Insert(1, 100);
Insert(2, 200);
Cache::Handle* handle = cache_->Lookup(EncodeKey(1));
ASSERT_TRUE(handle);
cache_->Prune();
cache_->Release(handle);
ASSERT_EQ(100, Lookup(1));
ASSERT_EQ(-1, Lookup(2));
}
TEST_F(CacheTest, ZeroSizeCache) {
delete cache_;
cache_ = NewLRUCache(0);
Insert(1, 100);
ASSERT_EQ(-1, Lookup(1));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/io/cache.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/io/cache_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0f77a7a9-b07e-42da-9f2a-f6b620f364f6 | cpp | google/arolla | frame | arolla/memory/frame.cc | arolla/memory/frame_test.cc | #include "arolla/memory/frame.h"
#include <algorithm>
#include <cstddef>
#include <cstring>
#include <tuple>
#include <typeindex>
#include <typeinfo>
#include <utility>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "arolla/util/algorithms.h"
#include "arolla/util/memory.h"
namespace arolla {
std::type_index FrameLayout::FieldFactory::type_index() const { return type_; }
void FrameLayout::FieldFactory::Add(size_t offset) {
offsets_.push_back(offset);
}
void FrameLayout::FieldFactory::AddDerived(
const FieldFactory& derived_factory) {
DCHECK(type_index() == derived_factory.type_index());
for (size_t cur_offset : derived_factory.offsets_) {
offsets_.push_back(cur_offset);
}
}
FrameLayout::FieldFactory FrameLayout::FieldFactory::Derive(
size_t offset) const {
FieldFactory res = *this;
for (size_t& cur_offset : res.offsets_) {
cur_offset += offset;
}
return res;
}
void FrameLayout::FieldInitializers::AddOffsetToFactory(
size_t offset, FieldFactory empty_factory) {
auto it = type2factory.find(empty_factory.type_index());
if (it == type2factory.end()) {
bool inserted;
std::tie(it, inserted) =
type2factory.emplace(empty_factory.type_index(), factories.size());
factories.push_back(std::move(empty_factory));
}
DCHECK_LT(it->second, factories.size());
if (it->second < factories.size()) {
factories[it->second].Add(offset);
}
}
void FrameLayout::FieldInitializers::AddDerived(
size_t offset, const FieldInitializers& derived_initializers) {
for (const auto& [derived_tpe, derived_id] :
derived_initializers.type2factory) {
const auto& derived_factory = derived_initializers.factories[derived_id];
if (auto it = type2factory.find(derived_tpe); it != type2factory.end()) {
factories[it->second].AddDerived(derived_factory.Derive(offset));
} else {
type2factory.emplace(derived_tpe, factories.size());
factories.push_back(derived_factory.Derive(offset));
}
}
}
FrameLayout::Slot<void> FrameLayout::Builder::AddSubFrame(
const FrameLayout& subframe) {
alloc_size_ = RoundUp(alloc_size_, subframe.AllocAlignment().value);
size_t offset = alloc_size_;
alloc_size_ += subframe.AllocSize();
alloc_alignment_ =
std::max(alloc_alignment_, subframe.AllocAlignment().value);
initializers_.AddDerived(offset, subframe.initializers_);
#ifndef NDEBUG
for (const auto& [field_offset, field_type] : subframe.registered_fields_) {
registered_fields_.emplace(offset + field_offset, field_type);
}
#endif
return FrameLayout::Slot<void>(offset);
}
absl::Status FrameLayout::Builder::RegisterUnsafeSlot(
size_t byte_offset, size_t byte_size, const std::type_info& type) {
return RegisterSlot(byte_offset, byte_size, type);
}
absl::Status FrameLayout::Builder::RegisterSlot(size_t byte_offset,
size_t byte_size,
const std::type_info& type,
bool allow_duplicates) {
if (byte_offset == FrameLayout::Slot<float>::kUninitializedOffset) {
return absl::FailedPreconditionError(
"unable to register uninitialized slot");
}
if (byte_offset > alloc_size_ || byte_size > alloc_size_ - byte_offset) {
return absl::FailedPreconditionError(absl::StrCat(
"unable to register slot after the end of alloc, offset: ", byte_offset,
", size: ", byte_size, ", alloc size: ", alloc_size_));
}
#ifndef NDEBUG
if (!registered_fields_.emplace(byte_offset, std::type_index(type)).second &&
!allow_duplicates) {
return absl::FailedPreconditionError(absl::StrCat(
"slot is already registered ", byte_offset, " ", type.name()));
}
#endif
return absl::OkStatus();
}
} | #include "arolla/memory/frame.h"
#include <array>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <sstream>
#include <string>
#include <type_traits>
#include <utility>
#include <variant>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/dynamic_annotations.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "arolla/memory/memory_allocation.h"
#include "arolla/util/demangle.h"
#include "arolla/util/is_bzero_constructible.h"
#include "arolla/util/memory.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::testing {
namespace {
using ::absl_testing::IsOk;
using ::absl_testing::StatusIs;
using ::testing::ElementsAre;
using ::testing::Eq;
using ::testing::HasSubstr;
using ::testing::IsEmpty;
struct SimpleStruct {
int a;
float b;
};
struct InitializedStruct {
int a = 1;
float b = 2.0;
};
TEST(FrameLayoutTest, SlotOutput) {
FrameLayout::Builder builder;
auto slot = builder.AddSlot<int>();
std::ostringstream ss;
ss << slot;
EXPECT_EQ(ss.str(), std::string("Slot<") + TypeName<int>() + ">(0)");
}
TEST(FrameLayoutTest, SimpleFields) {
FrameLayout::Builder builder;
auto slot1 = builder.AddSlot<int>();
auto slot2 = builder.AddSlot<float>();
auto slot3 = builder.AddSlot<double>();
auto layout = std::move(builder).Build();
MemoryAllocation alloc(&layout);
FramePtr frame = alloc.frame();
EXPECT_THAT(frame.Get(slot1), Eq(0));
EXPECT_THAT(frame.Get(slot2), Eq(0.0f));
EXPECT_THAT(frame.Get(slot3), Eq(0.0));
frame.Set(slot1, 1);
frame.Set(slot2, 2.0f);
frame.Set(slot3, M_PI);
EXPECT_THAT(frame.Get(slot1), Eq(1));
EXPECT_THAT(frame.Get(slot2), Eq(2.0f));
EXPECT_THAT(frame.Get(slot3), Eq(M_PI));
}
TEST(FrameLayoutTest, SimpleArrays) {
FrameLayout::Builder builder;
auto slot1 = builder.AddSlot<std::array<int, 4>>();
auto slot2 = builder.AddSlot<std::array<float, 4>>();
auto slot3 = builder.AddSlot<std::array<char, 4>>();
auto layout = std::move(builder).Build();
MemoryAllocation alloc(&layout);
FramePtr frame = alloc.frame();
EXPECT_THAT(frame.Get(slot1), ElementsAre(0, 0, 0, 0));
EXPECT_THAT(frame.Get(slot2), ElementsAre(0.0f, 0.0f, 0.0f, 0.0f));
EXPECT_THAT(frame.Get(slot3), ElementsAre(0, 0, 0, 0));
frame.Set(slot1, std::array<int, 4>{1, 2, 3, 4});
frame.Set(slot2, std::array<float, 4>{1.0f, 2.0f, 3.0f, 4.0f});
frame.Set(slot3, std::array<char, 4>{'a', 'b', 'c', 'd'});
EXPECT_THAT(frame.Get(slot1), ElementsAre(1, 2, 3, 4));
EXPECT_THAT(frame.Get(slot2), ElementsAre(1.0f, 2.0f, 3.0f, 4.0f));
EXPECT_THAT(frame.Get(slot3), ElementsAre('a', 'b', 'c', 'd'));
}
TEST(FrameLayoutTest, SimplePointers) {
FrameLayout::Builder builder;
auto slot1 = builder.AddSlot<int*>();
auto slot2 = builder.AddSlot<char*>();
auto layout = std::move(builder).Build();
MemoryAllocation alloc(&layout);
FramePtr frame = alloc.frame();
EXPECT_THAT(frame.Get(slot1), Eq(nullptr));
EXPECT_THAT(frame.Get(slot2), Eq(nullptr));
int int_values[] = {1, 2, 3, 4};
char text[] = "It was a dark and stormy night.";
frame.Set(slot1, int_values);
frame.Set(slot2, text);
EXPECT_THAT(frame.Get(slot1), Eq(int_values));
EXPECT_THAT(frame.Get(slot2), Eq(text));
}
TEST(FrameLayoutTest, SmartPointers) {
FrameLayout::Builder builder;
auto slot1 = builder.AddSlot<std::unique_ptr<int>>();
auto slot2 = builder.AddSlot<std::unique_ptr<std::string>>();
auto layout = std::move(builder).Build();
MemoryAllocation alloc(&layout);
FramePtr frame = alloc.frame();
EXPECT_THAT(frame.Get(slot1), Eq(nullptr));
EXPECT_THAT(frame.Get(slot2), Eq(nullptr));
frame.Set(slot1, std::make_unique<int>(12));
frame.Set(slot2,
std::make_unique<std::string>("It was a dark and stormy night."));
EXPECT_THAT(*frame.Get(slot1), Eq(12));
EXPECT_THAT(*frame.Get(slot2), Eq("It was a dark and stormy night."));
}
TEST(FrameLayoutTest, Vector) {
FrameLayout::Builder builder;
auto slot1 = builder.AddSlot<std::vector<int>>();
auto slot2 = builder.AddSlot<std::vector<std::string>>();
auto layout = std::move(builder).Build();
MemoryAllocation alloc(&layout);
FramePtr frame = alloc.frame();
EXPECT_THAT(frame.Get(slot1), IsEmpty());
EXPECT_THAT(frame.Get(slot2), IsEmpty());
auto* int_vector = frame.GetMutable(slot1);
int_vector->push_back(1);
int_vector->push_back(2);
int_vector->push_back(3);
auto* string_vector = frame.GetMutable(slot2);
string_vector->push_back("How");
string_vector->push_back("now");
string_vector->push_back("brown");
string_vector->push_back("cow?");
EXPECT_THAT(frame.Get(slot1), ElementsAre(1, 2, 3));
EXPECT_THAT(frame.Get(slot2), ElementsAre("How", "now", "brown", "cow?"));
}
TEST(FrameLayoutTest, Structs) {
FrameLayout::Builder builder;
auto slot1 = builder.AddSlot<SimpleStruct>();
auto slot2 = builder.AddSlot<InitializedStruct>();
auto layout = std::move(builder).Build();
MemoryAllocation alloc(&layout);
FramePtr frame = alloc.frame();
const SimpleStruct& s1 = frame.Get(slot1);
EXPECT_THAT(s1.a, Eq(0));
EXPECT_THAT(s1.b, Eq(0.0f));
const InitializedStruct& s2 = frame.Get(slot2);
EXPECT_THAT(s2.a, Eq(1));
EXPECT_THAT(s2.b, Eq(2.0f));
}
TEST(FrameLayoutTest, AFewDifferentTypesWellInitialized) {
FrameLayout::Builder builder;
auto slot1 = builder.AddSlot<std::vector<int>>();
auto slot2 = builder.AddSlot<std::vector<std::string>>();
auto slot3 = builder.AddSlot<std::vector<int>>();
auto slot4 = builder.AddSlot<SimpleStruct>();
auto slot5 = builder.AddSlot<InitializedStruct>();
auto slot6 = builder.AddSlot<std::vector<int>>();
auto slot7 = builder.AddSlot<std::vector<std::string>>();
auto slot8 = builder.AddSlot<std::vector<double>>();
auto slot9 = builder.AddSlot<InitializedStruct>();
auto layout = std::move(builder).Build();
MemoryAllocation alloc(&layout);
FramePtr frame = alloc.frame();
EXPECT_THAT(frame.Get(slot1), IsEmpty());
EXPECT_THAT(frame.Get(slot2), IsEmpty());
EXPECT_THAT(frame.Get(slot3), IsEmpty());
EXPECT_THAT(frame.Get(slot6), IsEmpty());
EXPECT_THAT(frame.Get(slot7), IsEmpty());
EXPECT_THAT(frame.Get(slot8), IsEmpty());
const SimpleStruct& simple = frame.Get(slot4);
EXPECT_THAT(simple.a, Eq(0));
EXPECT_THAT(simple.b, Eq(0.0f));
for (const InitializedStruct& init : {frame.Get(slot5), frame.Get(slot9)}) {
EXPECT_THAT(init.a, Eq(1));
EXPECT_THAT(init.b, Eq(2.0f));
}
}
TEST(FrameLayoutTest, HasField) {
FrameLayout::Builder builder;
auto slot1 = builder.AddSlot<int>();
auto slot2 = builder.AddSlot<std::vector<int>>();
auto slot3 = builder.AddSlot<SimpleStruct>();
auto slot4 = builder.AddSlot<std::array<SimpleStruct, 4>>();
auto slot5 = builder.AddSlot<InitializedStruct>();
auto slot6 = builder.AddSlot<std::array<InitializedStruct, 4>>();
auto layout = std::move(builder).Build();
EXPECT_TRUE(layout.HasField(slot1.byte_offset(), typeid(int)));
EXPECT_TRUE(layout.HasField(slot2.byte_offset(), typeid(std::vector<int>)));
EXPECT_TRUE(layout.HasField(slot3.byte_offset(), typeid(SimpleStruct)));
EXPECT_TRUE(layout.HasField(slot4.byte_offset(),
typeid(std::array<SimpleStruct, 4>)));
EXPECT_TRUE(layout.HasField(slot5.byte_offset(), typeid(InitializedStruct)));
EXPECT_TRUE(layout.HasField(slot6.byte_offset(),
typeid(std::array<InitializedStruct, 4>)));
}
TEST(FrameLayoutTest, RegisterUnsafeSlotWithEmptyField) {
FrameLayout::Builder builder;
ASSERT_TRUE(builder.RegisterUnsafeSlot(0, 0, typeid(std::monostate())).ok());
auto layout = std::move(builder).Build();
EXPECT_TRUE(layout.HasField(0, typeid(std::monostate())));
}
TEST(FrameLayoutTest, FieldDescriptorsRegisterUnsafe) {
FrameLayout::Builder builder;
auto slot = builder.AddSlot<int32_t>();
auto slot_1part =
FrameLayout::Slot<int16_t>::UnsafeSlotFromOffset(slot.byte_offset());
auto slot_2part =
FrameLayout::Slot<int16_t>::UnsafeSlotFromOffset(slot.byte_offset() + 2);
ASSERT_THAT(builder.RegisterUnsafeSlot(slot_1part), IsOk());
ASSERT_THAT(builder.RegisterUnsafeSlot(slot_2part), IsOk());
ASSERT_THAT(builder.RegisterUnsafeSlot(slot.byte_offset() + 2, sizeof(int8_t),
typeid(int8_t)),
IsOk());
#ifndef NDEBUG
EXPECT_THAT(builder.RegisterUnsafeSlot(slot_2part),
StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("slot is already registered")));
EXPECT_THAT(builder.RegisterUnsafeSlot(slot_2part, true),
IsOk());
#endif
auto layout = std::move(builder).Build();
EXPECT_TRUE(layout.HasField(slot.byte_offset(), typeid(int32_t)));
EXPECT_TRUE(layout.HasField(slot.byte_offset(), typeid(int16_t)));
EXPECT_TRUE(layout.HasField(slot.byte_offset() + 2, typeid(int16_t)));
EXPECT_TRUE(layout.HasField(slot.byte_offset() + 2, typeid(int8_t)));
#ifndef NDEBUG
EXPECT_FALSE(layout.HasField(slot.byte_offset() + 2, typeid(float)));
EXPECT_FALSE(layout.HasField(slot.byte_offset() + 1, typeid(int8_t)));
#endif
}
TEST(FrameLayoutTest, FieldDescriptorsRegisterUnsafeErrors) {
FrameLayout::Builder builder;
auto slot = builder.AddSlot<int32_t>();
auto slot_1part =
FrameLayout::Slot<int16_t>::UnsafeSlotFromOffset(slot.byte_offset());
auto slot_after_end =
FrameLayout::Slot<int16_t>::UnsafeSlotFromOffset(slot.byte_offset() + 4);
auto uninitialized_slot =
FrameLayout::Slot<int16_t>::UnsafeUninitializedSlot();
auto status = builder.RegisterUnsafeSlot(slot_1part);
ASSERT_OK(status);
#ifndef NDEBUG
status = builder.RegisterUnsafeSlot(slot);
ASSERT_FALSE(status.ok());
ASSERT_EQ(status.code(), absl::StatusCode::kFailedPrecondition);
EXPECT_THAT(status.message(), HasSubstr("slot is already registered"));
status = builder.RegisterUnsafeSlot(slot_1part);
ASSERT_FALSE(status.ok());
ASSERT_EQ(status.code(), absl::StatusCode::kFailedPrecondition);
EXPECT_THAT(status.message(), HasSubstr("slot is already registered"));
#endif
status = builder.RegisterUnsafeSlot(slot_after_end);
ASSERT_FALSE(status.ok());
ASSERT_EQ(status.code(), absl::StatusCode::kFailedPrecondition);
EXPECT_THAT(status.message(),
HasSubstr("unable to register slot after the end of alloc"));
status = builder.RegisterUnsafeSlot(100, sizeof(int), typeid(int));
ASSERT_FALSE(status.ok());
ASSERT_EQ(status.code(), absl::StatusCode::kFailedPrecondition);
EXPECT_THAT(status.message(),
HasSubstr("unable to register slot after the end of alloc, "
"offset: 100, size: 4, alloc size: 4"));
status = builder.RegisterUnsafeSlot(uninitialized_slot);
ASSERT_FALSE(status.ok());
ASSERT_EQ(status.code(), absl::StatusCode::kFailedPrecondition);
EXPECT_THAT(status.message(),
HasSubstr("unable to register uninitialized slot"));
}
struct SelfReference {
const SelfReference* self;
SelfReference() : self(this) {}
SelfReference(const SelfReference&) = delete;
SelfReference& operator=(const SelfReference&) = delete;
~SelfReference() {
volatile auto secure_ptr = &self;
*secure_ptr = nullptr;
}
};
TEST(FrameLayoutTest, AddSubFrame) {
FrameLayout subframe_layout;
std::vector<FrameLayout::Slot<SelfReference>> field_slots;
{
FrameLayout::Builder builder;
for (int i = 0; i < 2; ++i) {
field_slots.push_back(builder.AddSlot<SelfReference>());
}
subframe_layout = std::move(builder).Build();
}
FrameLayout frame_layout;
std::vector<FrameLayout::Slot<void>> subframe_slots;
{
FrameLayout::Builder builder;
builder.AddSlot<float>();
for (int j = 0; j < 3; ++j) {
subframe_slots.push_back(builder.AddSubFrame(subframe_layout));
builder.AddSlot<double>();
}
frame_layout = std::move(builder).Build();
}
for (const auto& subframe_slot : subframe_slots) {
for (const auto& field_slot : field_slots) {
EXPECT_TRUE(frame_layout.HasField(
subframe_slot.byte_offset() + field_slot.byte_offset(),
typeid(SelfReference)));
}
}
const auto alloc =
AlignedAlloc(frame_layout.AllocAlignment(), frame_layout.AllocSize());
frame_layout.InitializeAlignedAlloc(alloc.get());
FramePtr frame(alloc.get(), &frame_layout);
for (const auto& subframe_slot : subframe_slots) {
for (const auto& field_slot : field_slots) {
const void* subframe_ptr =
frame.GetRawPointer(subframe_slot.byte_offset());
ConstFramePtr subframe(subframe_ptr, &subframe_layout);
const SelfReference& field = subframe.Get(field_slot);
EXPECT_TRUE(field.self == &field);
}
}
frame_layout.DestroyAlloc(alloc.get());
ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(alloc.get(), frame_layout.AllocSize());
for (const auto& subframe_slot : subframe_slots) {
for (const auto& field_slot : field_slots) {
const void* subframe_ptr =
frame.GetRawPointer(subframe_slot.byte_offset());
ConstFramePtr subframe(subframe_ptr, &subframe_layout);
const SelfReference& field = subframe.Get(field_slot);
EXPECT_TRUE(field.self == nullptr);
}
}
}
TEST(FrameLayoutTest, AddSubFrameAllocAlignment) {
FrameLayout::Builder builder;
builder.AddSubFrame(MakeTypeLayout<std::aligned_storage_t<16, 16>>());
builder.AddSubFrame(MakeTypeLayout<std::aligned_storage_t<16, 16>>());
auto frame_layout = std::move(builder).Build();
EXPECT_EQ(frame_layout.AllocSize(), 32);
EXPECT_EQ(frame_layout.AllocAlignment().value, 16);
}
TEST(FrameLayoutTest, ArrayCompatibility) {
FrameLayout::Builder builder;
builder.AddSlot<std::aligned_storage_t<16, 16>>();
builder.AddSlot<std::aligned_storage_t<1, 1>>();
auto frame_layout = std::move(builder).Build();
EXPECT_EQ(frame_layout.AllocSize(), 32);
EXPECT_EQ(frame_layout.AllocAlignment().value, 16);
}
TEST(FrameLayoutTest, InitDestroyAllocN) {
static int instance_counter = 0;
struct InstanceCounted {
InstanceCounted() { ++instance_counter; }
~InstanceCounted() { --instance_counter; }
};
struct SelfReferenced {
SelfReferenced() : self(this) {}
SelfReferenced* self;
};
FrameLayout::Builder builder;
auto int_slot = builder.AddSlot<int>();
auto self_ref_slot = builder.AddSlot<SelfReferenced>();
builder.AddSlot<InstanceCounted>();
auto layout = std::move(builder).Build();
const int n = 10;
const auto alloc =
AlignedAlloc(layout.AllocAlignment(), layout.AllocSize() * n);
layout.InitializeAlignedAllocN(alloc.get(), n);
EXPECT_EQ(instance_counter, n);
for (int i = 0; i < n; ++i) {
ConstFramePtr ith_frame(
static_cast<const std::byte*>(alloc.get()) + i * layout.AllocSize(),
&layout);
EXPECT_EQ(ith_frame.Get(int_slot), 0);
EXPECT_EQ(ith_frame.Get(self_ref_slot).self, &ith_frame.Get(self_ref_slot));
}
layout.DestroyAllocN(alloc.get(), n);
EXPECT_EQ(instance_counter, 0);
}
struct IsBZeroConstructible {
static bool ctor_called;
static bool dtor_called;
IsBZeroConstructible() { ctor_called = true; }
~IsBZeroConstructible() { dtor_called = true; }
};
bool IsBZeroConstructible::ctor_called;
bool IsBZeroConstructible::dtor_called;
}
}
namespace arolla {
template <>
struct is_bzero_constructible<::arolla::testing::IsBZeroConstructible>
: std::true_type {};
}
namespace arolla::testing {
namespace {
TEST(FrameLayoutTest, IsBZeroConstructibleHandling) {
ASSERT_FALSE(IsBZeroConstructible::ctor_called);
ASSERT_FALSE(IsBZeroConstructible::dtor_called);
{
auto layout = MakeTypeLayout<IsBZeroConstructible>();
MemoryAllocation alloc(&layout);
}
EXPECT_FALSE(IsBZeroConstructible::ctor_called);
EXPECT_TRUE(IsBZeroConstructible::dtor_called);
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/memory/frame.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/memory/frame_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
93ddf3a3-54f7-4d52-9e60-c7a46d275e27 | cpp | google/tensorstore | codec_chain_spec | tensorstore/driver/zarr3/codec/codec_chain_spec.cc | tensorstore/driver/zarr3/codec/codec_chain_spec_test.cc | #include "tensorstore/driver/zarr3/codec/codec_chain_spec.h"
#include <stddef.h>
#include <cassert>
#include <optional>
#include <string>
#include <string_view>
#include <type_traits>
#include <utility>
#include <vector>
#include "absl/container/fixed_array.h"
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include <nlohmann/json.hpp>
#include "tensorstore/codec_spec.h"
#include "tensorstore/codec_spec_registry.h"
#include "tensorstore/driver/zarr3/codec/bytes.h"
#include "tensorstore/driver/zarr3/codec/codec.h"
#include "tensorstore/driver/zarr3/codec/codec_spec.h"
#include "tensorstore/driver/zarr3/codec/registry.h"
#include "tensorstore/driver/zarr3/codec/transpose.h"
#include "tensorstore/driver/zarr3/name_configuration_json_binder.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/cache_key/cache_key.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/json_binding/std_array.h"
#include "tensorstore/internal/json_binding/std_optional.h"
#include "tensorstore/internal/unaligned_data_type_functions.h"
#include "tensorstore/rank.h"
#include "tensorstore/serialization/fwd.h"
#include "tensorstore/serialization/json_bindable.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_zarr3 {
namespace jb = ::tensorstore::internal_json_binding;
namespace {
struct ZarrCodecJsonBinderImpl {
static absl::Status FromJson(const ZarrCodecSpec::FromJsonOptions& options,
ZarrCodecSpec::Ptr* obj, ::nlohmann::json* j);
static absl::Status ToJson(const ZarrCodecSpec::ToJsonOptions& options,
const ZarrCodecSpec* const* obj,
::nlohmann::json* j);
absl::Status operator()(std::true_type is_loading,
const ZarrCodecSpec::FromJsonOptions& options,
ZarrCodecSpec::Ptr* obj, ::nlohmann::json* j) const {
return FromJson(options, obj, j);
}
template <typename T>
absl::Status operator()(std::false_type is_loading,
const ZarrCodecSpec::ToJsonOptions& options, T* obj,
::nlohmann::json* j) const {
static_assert(
std::is_convertible_v<decltype(&**obj), const ZarrCodecSpec*>);
const ZarrCodecSpec* ptr = &**obj;
return ToJson(options, &ptr, j);
}
};
constexpr inline ZarrCodecJsonBinderImpl ZarrCodecJsonBinder{};
constexpr auto ZarrCodecJsonBinderImplBase =
[](auto is_loading, const auto& options, auto* obj, auto* j) {
const auto& registry = GetCodecRegistry();
if constexpr (is_loading) {
if (options.constraints && j->is_string()) {
::nlohmann::json::object_t j_obj;
j_obj.emplace("name", std::move(*j));
*j = std::move(j_obj);
}
}
return jb::Object(NameConfigurationJsonBinder(
registry.KeyBinder(), registry.RegisteredObjectBinder()))
(is_loading, options, obj, j);
};
absl::Status ZarrCodecJsonBinderImpl::FromJson(
const ZarrCodecSpec::FromJsonOptions& options, ZarrCodecSpec::Ptr* obj,
::nlohmann::json* j) {
return ZarrCodecJsonBinderImplBase(std::true_type{}, options, obj, j);
}
absl::Status ZarrCodecJsonBinderImpl::ToJson(
const ZarrCodecSpec::ToJsonOptions& options,
const ZarrCodecSpec* const* obj, ::nlohmann::json* j) {
return ZarrCodecJsonBinderImplBase(std::false_type{}, options, obj, j);
}
constexpr auto ZarrCodecChainSpecJsonBinderImpl = jb::Compose<
std::vector<ZarrCodecSpec::Ptr>>(
[](auto is_loading, const auto& options, auto* obj, auto* j) {
if constexpr (is_loading) {
auto it = j->begin(), end = j->end();
for (; it != end && (*it)->kind() == ZarrCodecKind::kArrayToArray;
++it) {
obj->array_to_array.push_back(
internal::static_pointer_cast<const ZarrArrayToArrayCodecSpec>(
std::move(*it)));
}
if (it != end && (*it)->kind() == ZarrCodecKind::kArrayToBytes) {
obj->array_to_bytes =
internal::static_pointer_cast<const ZarrArrayToBytesCodecSpec>(
std::move(*it));
++it;
} else if (!options.constraints) {
return absl::InvalidArgumentError(
"array -> bytes codec must be specified");
}
for (; it != end; ++it) {
if ((*it)->kind() != ZarrCodecKind::kBytesToBytes) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Expected bytes -> bytes codec, but received: ",
jb::ToJson(*it, ZarrCodecJsonBinder).value().dump()));
}
obj->bytes_to_bytes.push_back(
internal::static_pointer_cast<const ZarrBytesToBytesCodecSpec>(
std::move(*it)));
}
} else {
j->insert(j->end(), obj->array_to_array.begin(),
obj->array_to_array.end());
if (obj->array_to_bytes) {
j->push_back(obj->array_to_bytes);
}
j->insert(j->end(), obj->bytes_to_bytes.begin(),
obj->bytes_to_bytes.end());
}
return absl::OkStatus();
},
jb::Array(ZarrCodecJsonBinder));
}
TENSORSTORE_DEFINE_JSON_DEFAULT_BINDER(ZarrCodecChainSpec,
ZarrCodecChainSpecJsonBinderImpl);
namespace {
Result<ZarrArrayToBytesCodecSpec::Ptr> GetDefaultArrayToBytesCodecSpec(
const ArrayCodecResolveParameters& decoded) {
if (internal::IsTrivialDataType(decoded.dtype)) {
return DefaultBytesCodec();
}
return absl::InternalError(tensorstore::StrCat(
"No default codec defined for data type ", decoded.dtype));
}
absl::Status CodecResolveError(const ZarrCodecSpec& codec_spec,
std::string_view message,
const absl::Status& status) {
return tensorstore::MaybeAnnotateStatus(
status, tensorstore::StrCat(
"Error ", message, " through ",
jb::ToJson(&codec_spec, ZarrCodecJsonBinder).value().dump()));
}
}
size_t ZarrCodecChainSpec::sharding_height() const {
return array_to_bytes ? array_to_bytes->sharding_height() : 0;
}
absl::Status ZarrCodecChainSpec::GetDecodedChunkLayout(
const ArrayDataTypeAndShapeInfo& array_info,
ArrayCodecChunkLayoutInfo& decoded) const {
absl::FixedArray<ArrayDataTypeAndShapeInfo, 2> array_infos(
array_to_array.size());
const ArrayDataTypeAndShapeInfo* decoded_array_info = &array_info;
for (size_t i = 0; i < array_to_array.size(); ++i) {
const auto& codec_spec = *array_to_array[i];
auto& encoded_array_info = array_infos[i];
TENSORSTORE_RETURN_IF_ERROR(
codec_spec.PropagateDataTypeAndShape(*decoded_array_info,
encoded_array_info),
CodecResolveError(codec_spec, "propagating data type and shape", _));
decoded_array_info = &encoded_array_info;
}
std::optional<ArrayCodecChunkLayoutInfo> temp_info[2];
const ArrayCodecChunkLayoutInfo* encoded_info;
if (array_to_bytes) {
auto& decoded_info = array_infos.empty() ? decoded : temp_info[0].emplace();
TENSORSTORE_RETURN_IF_ERROR(
array_to_bytes->GetDecodedChunkLayout(
array_infos.empty() ? array_info : array_infos.back(),
decoded_info),
CodecResolveError(*array_to_bytes, "propagating chunk layout", _));
encoded_info = &decoded_info;
} else if (!array_to_array.empty()) {
encoded_info = &temp_info[0].emplace();
}
for (size_t i = array_to_array.size(); i--;) {
auto& decoded_info =
i == 0 ? decoded : temp_info[(array_to_array.size() - i) % 2].emplace();
const auto& codec_spec = *array_to_array[i];
TENSORSTORE_RETURN_IF_ERROR(
codec_spec.GetDecodedChunkLayout(
array_infos[i], *encoded_info,
i == 0 ? array_info : array_infos[i - 1], decoded_info),
CodecResolveError(codec_spec, "propagating chunk layout", _));
encoded_info = &decoded_info;
}
return absl::OkStatus();
}
Result<internal::IntrusivePtr<const ZarrCodecChain>>
ZarrCodecChainSpec::Resolve(ArrayCodecResolveParameters&& decoded,
BytesCodecResolveParameters& encoded,
ZarrCodecChainSpec* resolved_spec) const {
auto chain = internal::MakeIntrusivePtr<ZarrCodecChain>();
std::optional<ArrayCodecResolveParameters> temp_array_resolve_params[2];
chain->array_to_array.reserve(array_to_array.size());
chain->bytes_to_bytes.reserve(bytes_to_bytes.size());
if (resolved_spec) {
assert(resolved_spec != this);
assert(resolved_spec->array_to_array.empty());
resolved_spec->array_to_array.reserve(array_to_array.size());
assert(!resolved_spec->array_to_bytes);
assert(resolved_spec->bytes_to_bytes.empty());
resolved_spec->bytes_to_bytes.reserve(bytes_to_bytes.size());
}
ArrayCodecResolveParameters* decoded_params = &decoded;
size_t temp_i = 0;
const auto resolve_array_to_array =
[&](const ZarrArrayToArrayCodecSpec& codec_spec) -> absl::Status {
auto& encoded_params = temp_array_resolve_params[(temp_i++) % 2].emplace();
TENSORSTORE_ASSIGN_OR_RETURN(
auto codec,
codec_spec.Resolve(std::move(*decoded_params), encoded_params,
resolved_spec
? &resolved_spec->array_to_array.emplace_back()
: nullptr),
CodecResolveError(codec_spec, "resolving codec spec", _));
chain->array_to_array.push_back(std::move(codec));
decoded_params = &encoded_params;
return absl::OkStatus();
};
for (size_t i = 0; i < array_to_array.size(); ++i) {
TENSORSTORE_RETURN_IF_ERROR(resolve_array_to_array(*array_to_array[i]));
}
std::optional<BytesCodecResolveParameters> temp_bytes_resolve_params[2];
auto* bytes_decoded_params = &temp_bytes_resolve_params[0].emplace();
ZarrArrayToBytesCodecSpec::Ptr temp_array_to_bytes_codec;
auto* array_to_bytes_codec_ptr = this->array_to_bytes.get();
if (!array_to_bytes_codec_ptr) {
TENSORSTORE_ASSIGN_OR_RETURN(
temp_array_to_bytes_codec,
GetDefaultArrayToBytesCodecSpec(*decoded_params));
array_to_bytes_codec_ptr = temp_array_to_bytes_codec.get();
}
DimensionIndex preferred_order[kMaxRank];
if (DimensionIndex rank = decoded_params->rank;
decoded_params->inner_order &&
!array_to_bytes_codec_ptr->SupportsInnerOrder(
*decoded_params, span<DimensionIndex>(&preferred_order[0], rank))) {
const auto& existing_inner_order = *decoded_params->inner_order;
std::vector<DimensionIndex> new_order(rank);
for (DimensionIndex i = 0; i < rank; ++i) {
new_order[preferred_order[i]] = existing_inner_order[i];
}
TENSORSTORE_RETURN_IF_ERROR(
resolve_array_to_array(*internal::MakeIntrusivePtr<TransposeCodecSpec>(
TransposeCodecSpec::Options{std::move(new_order)})));
}
TENSORSTORE_ASSIGN_OR_RETURN(
chain->array_to_bytes,
array_to_bytes_codec_ptr->Resolve(
std::move(*decoded_params), *bytes_decoded_params,
resolved_spec ? &resolved_spec->array_to_bytes : nullptr),
CodecResolveError(*array_to_bytes, "resolving codec spec", _));
if (chain->array_to_bytes->is_sharding_codec() && !bytes_to_bytes.empty()) {
return absl::InvalidArgumentError(absl::StrFormat(
"Sharding codec %s is not compatible with subsequent bytes -> "
"bytes codecs %s that apply to the entire shard. Instead, "
"bytes -> bytes codecs may be specified as inner codecs that apply "
"to each sub-chunk individually.",
jb::ToJson(array_to_bytes_codec_ptr, ZarrCodecJsonBinder)
.value()
.dump(),
jb::ToJson(bytes_to_bytes, jb::Array(ZarrCodecJsonBinder))
.value()
.dump()));
}
for (size_t i = 0; i < bytes_to_bytes.size(); ++i) {
auto& encoded_params = temp_bytes_resolve_params[(i + 1) % 2].emplace();
const auto& codec_spec = *bytes_to_bytes[i];
TENSORSTORE_ASSIGN_OR_RETURN(
auto codec,
codec_spec.Resolve(std::move(*bytes_decoded_params), encoded_params,
resolved_spec
? &resolved_spec->bytes_to_bytes.emplace_back()
: nullptr),
CodecResolveError(codec_spec, "resolving codec spec", _));
bytes_decoded_params = &encoded_params;
chain->bytes_to_bytes.push_back(std::move(codec));
}
encoded = std::move(*bytes_decoded_params);
return chain;
}
namespace {
template <typename T, typename Binder>
std::string MergeErrorMessage(const T& a, const T& b, const Binder& binder) {
return absl::StrFormat("Cannot merge zarr codec constraints %s and %s",
jb::ToJson(a, binder).value().dump(),
jb::ToJson(b, binder).value().dump());
}
std::string MergeErrorMessage(const ZarrCodecSpec& a, const ZarrCodecSpec& b) {
return MergeErrorMessage(ZarrCodecSpec::Ptr(&a), ZarrCodecSpec::Ptr(&b),
ZarrCodecJsonBinder);
}
template <typename T>
void EnsureMutableCodecSpec(internal::IntrusivePtr<const T>& ptr) {
static_assert(std::is_base_of_v<ZarrCodecSpec, T>);
assert(ptr);
if (ptr->use_count() > 1) {
ptr = internal::static_pointer_cast<const T>(ptr->Clone());
}
}
absl::Status MergeZarrCodecSpecs(ZarrCodecSpec::Ptr& target,
const ZarrCodecSpec* source, bool strict) {
if (!source) {
return absl::OkStatus();
}
if (!target) {
target.reset(source);
return absl::OkStatus();
}
absl::Status status;
const auto& target_ref = *target;
const auto& source_ref = *source;
if (typeid(target_ref) != typeid(source_ref)) {
status = absl::FailedPreconditionError("");
} else {
EnsureMutableCodecSpec(target);
status = const_cast<ZarrCodecSpec&>(*target).MergeFrom(*source, strict);
}
if (status.ok()) return absl::OkStatus();
return tensorstore::MaybeAnnotateStatus(status,
MergeErrorMessage(*target, *source));
}
template <typename T>
absl::Status MergeZarrCodecSpecs(typename T::Ptr& target, const T* source,
bool strict) {
static_assert(std::is_base_of_v<ZarrCodecSpec, T>);
ZarrCodecSpec::Ptr target_base = std::move(target);
auto status = MergeZarrCodecSpecs(target_base, source, strict);
target = internal::static_pointer_cast<const T>(std::move(target_base));
TENSORSTORE_RETURN_IF_ERROR(status);
return absl::OkStatus();
}
template <typename T>
absl::Status MergeZarrCodecSpecs(std::vector<T>& targets,
const std::vector<T>& sources, bool strict) {
constexpr bool kIsArrayToArray =
std::is_same_v<ZarrArrayToArrayCodecSpec::Ptr, T>;
size_t merge_count = targets.size();
bool size_mismatch = targets.size() != sources.size();
if constexpr (kIsArrayToArray) {
if (!strict) {
if (sources.size() == targets.size() + 1 &&
typeid(*sources.back()) == typeid(TransposeCodecSpec)) {
targets.push_back(sources.back());
size_mismatch = false;
} else if (sources.size() + 1 == targets.size() &&
typeid(*targets.back()) == typeid(TransposeCodecSpec)) {
--merge_count;
size_mismatch = false;
}
}
}
if (size_mismatch) {
return tensorstore::MaybeAnnotateStatus(
absl::FailedPreconditionError(absl::StrFormat(
"Mismatch in number of %s codecs (%d vs %d)",
kIsArrayToArray ? "array -> array" : "bytes -> bytes",
targets.size(), sources.size())),
MergeErrorMessage(targets, sources, jb::Array(ZarrCodecJsonBinder)));
}
for (size_t i = 0; i < merge_count; ++i) {
TENSORSTORE_RETURN_IF_ERROR(
MergeZarrCodecSpecs(targets[i], sources[i].get(), strict));
}
return absl::OkStatus();
}
}
absl::Status ZarrCodecChainSpec::MergeFrom(const ZarrCodecChainSpec& other,
bool strict) {
if (!strict) {
size_t self_sharding_height = sharding_height();
size_t other_sharding_height = other.sharding_height();
if (self_sharding_height > other_sharding_height &&
array_to_array.empty() && bytes_to_bytes.empty()) {
EnsureMutableCodecSpec(array_to_bytes);
return static_cast<ZarrShardingCodecSpec&>(
const_cast<ZarrArrayToBytesCodecSpec&>(*array_to_bytes))
.MergeSubChunkCodecsFrom(other, strict);
}
if (self_sharding_height < other_sharding_height &&
other.array_to_array.empty() && other.bytes_to_bytes.empty()) {
auto new_array_to_bytes_codec =
internal::static_pointer_cast<const ZarrShardingCodecSpec>(
other.array_to_bytes->Clone());
TENSORSTORE_RETURN_IF_ERROR(
const_cast<ZarrShardingCodecSpec&>(*new_array_to_bytes_codec)
.MergeSubChunkCodecsFrom(*this, strict));
array_to_array.clear();
bytes_to_bytes.clear();
array_to_bytes = std::move(new_array_to_bytes_codec);
return absl::OkStatus();
}
}
TENSORSTORE_RETURN_IF_ERROR(
MergeZarrCodecSpecs(array_to_array, other.array_to_array, strict));
TENSORSTORE_RETURN_IF_ERROR(
MergeZarrCodecSpecs(array_to_bytes, other.array_to_bytes.get(), strict));
TENSORSTORE_RETURN_IF_ERROR(
MergeZarrCodecSpecs(bytes_to_bytes, other.bytes_to_bytes, strict));
return absl::OkStatus();
}
absl::Status MergeZarrCodecSpecs(
std::optional<ZarrCodecChainSpec>& target,
const std::optional<ZarrCodecChainSpec>& source, bool strict) {
if (!target) {
if (source) {
target = *source;
}
return absl::OkStatus();
}
if (!source) {
return absl::OkStatus();
}
return target->MergeFrom(*source, strict);
}
bool ZarrShardingCodecSpec::SupportsInnerOrder(
const ArrayCodecResolveParameters& decoded,
span<DimensionIndex> preferred_inner_order) const {
return true;
}
size_t ZarrShardingCodecSpec::sharding_height() const {
auto* sub_chunk_codecs = this->GetSubChunkCodecs();
return 1 + (sub_chunk_codecs ? sub_chunk_codecs->sharding_height() : 0);
}
CodecSpec TensorStoreCodecSpec::Clone() const {
return internal::CodecDriverSpec::Make<TensorStoreCodecSpec>(*this);
}
absl::Status TensorStoreCodecSpec::DoMergeFrom(
const internal::CodecDriverSpec& other_base) {
if (typeid(other_base) != typeid(TensorStoreCodecSpec)) {
return absl::InvalidArgumentError("");
}
auto& other = static_cast<const TensorStoreCodecSpec&>(other_base);
return MergeZarrCodecSpecs(codecs, other.codecs, false);
}
TENSORSTORE_DEFINE_JSON_DEFAULT_BINDER(
TensorStoreCodecSpec,
jb::Sequence(
jb::Member("codecs",
jb::Projection<&TensorStoreCodecSpec::codecs>(jb::Optional(
ZarrCodecChainJsonBinder<true>)))
))
namespace {
const internal::CodecSpecRegistration<TensorStoreCodecSpec>
encoding_registration;
}
}
namespace internal {
void CacheKeyEncoder<internal_zarr3::ZarrCodecChainSpec>::Encode(
std::string* out, const internal_zarr3::ZarrCodecChainSpec& value) {
internal::EncodeCacheKey(out, value.ToJson().value().dump());
}
}
}
TENSORSTORE_DEFINE_SERIALIZER_SPECIALIZATION(
tensorstore::internal_zarr3::ZarrCodecChainSpec,
tensorstore::serialization::JsonBindableSerializer<
tensorstore::internal_zarr3::ZarrCodecChainSpec>()) | #include "tensorstore/driver/zarr3/codec/codec_chain_spec.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/codec_spec.h"
#include "tensorstore/driver/zarr3/codec/codec_test_util.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::CodecSpec;
using ::tensorstore::MatchesJson;
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal_zarr3::GetDefaultBytesCodecJson;
using ::tensorstore::internal_zarr3::TestCodecMerge;
using ::tensorstore::internal_zarr3::ZarrCodecChainSpec;
TEST(CodecMergeTest, Basic) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto a,
CodecSpec::FromJson({
{"driver", "zarr3"},
{"codecs",
{{
{"name", "sharding_indexed"},
{"configuration",
{
{"chunk_shape", {30, 40, 50}},
{"index_codecs",
{GetDefaultBytesCodecJson(), {{"name", "crc32c"}}}},
{"codecs",
{
{{"name", "transpose"},
{"configuration", {{"order", {2, 0, 1}}}}},
GetDefaultBytesCodecJson(),
{{"name", "gzip"}, {"configuration", {{"level", 6}}}},
}},
}},
}}},
}));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto b, CodecSpec::FromJson(
{{"driver", "zarr3"},
{"codecs",
{{{"name", "gzip"}, {"configuration", {{"level", 5}}}}}}}));
EXPECT_THAT(a.MergeFrom(b),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
".*: Incompatible \"level\": 6 vs 5"));
}
TEST(CodecChainSpecTest, MissingArrayToBytes) {
EXPECT_THAT(ZarrCodecChainSpec::FromJson(::nlohmann::json::array_t()),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"array -> bytes codec must be specified"));
}
TEST(CodecChainSpecTest, MergeCodecNameMismatch) {
EXPECT_THAT(
TestCodecMerge({"gzip"}, {"crc32c"}, true),
MatchesStatus(absl::StatusCode::kFailedPrecondition, "Cannot merge .*"));
}
TEST(CodecChainSpecTest, MergeArrayToBytes) {
EXPECT_THAT(
TestCodecMerge(
{{{"name", "bytes"}, {"configuration", {{"endian", "little"}}}}},
::nlohmann::json::array_t(), true),
::testing::Optional(MatchesJson(
{{{"name", "bytes"}, {"configuration", {{"endian", "little"}}}}})));
}
TEST(CodecChainSpecTest, ExtraTranspose) {
::nlohmann::json a = {
{{"name", "transpose"}, {"configuration", {{"order", {0, 2, 1}}}}},
{{"name", "bytes"}, {"configuration", {{"endian", "little"}}}},
};
::nlohmann::json b = {
{{"name", "bytes"}, {"configuration", {{"endian", "little"}}}},
};
EXPECT_THAT(TestCodecMerge(a, b, false),
::testing::Optional(MatchesJson(a)));
EXPECT_THAT(
TestCodecMerge(a, b, true),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
".*: Mismatch in number of array -> array codecs.*"));
}
TEST(CodecChainSpecTest, ExtraSharding) {
::nlohmann::json a = {{
{"name", "sharding_indexed"},
{"configuration",
{
{"chunk_shape", {30, 40, 50}},
{"index_codecs", {GetDefaultBytesCodecJson(), {{"name", "crc32c"}}}},
{"codecs",
{
{{"name", "transpose"},
{"configuration", {{"order", {2, 0, 1}}}}},
GetDefaultBytesCodecJson(),
{{"name", "gzip"}, {"configuration", {{"level", 6}}}},
}},
}},
}};
::nlohmann::json b = {
{{"name", "transpose"}, {"configuration", {{"order", {2, 0, 1}}}}},
GetDefaultBytesCodecJson(),
{{"name", "gzip"}, {"configuration", {{"level", 6}}}},
};
::nlohmann::json c = {
GetDefaultBytesCodecJson(),
{{"name", "gzip"}, {"configuration", {{"level", 6}}}},
};
EXPECT_THAT(TestCodecMerge(a, b, false),
::testing::Optional(MatchesJson(a)));
EXPECT_THAT(TestCodecMerge(a, c, false),
::testing::Optional(MatchesJson(a)));
EXPECT_THAT(
TestCodecMerge(a, b, true),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
".*: Mismatch in number of array -> array codecs.*"));
EXPECT_THAT(TestCodecMerge(a, c, true),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"Cannot merge zarr codec constraints .*"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/zarr3/codec/codec_chain_spec.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/zarr3/codec/codec_chain_spec_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
b0a5d668-8561-4789-820f-0d628bfa1730 | cpp | tensorflow/tensorflow | journal | tensorflow/core/data/service/journal.cc | tensorflow/core/data/service/journal_test.cc | #include "tensorflow/core/data/service/journal.h"
#include <algorithm>
#include <memory>
#include <string>
#include <vector>
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "tensorflow/core/data/service/journal.pb.h"
#include "tensorflow/core/lib/io/record_reader.h"
#include "tensorflow/core/lib/io/record_writer.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/regexp.h"
namespace tensorflow {
namespace data {
namespace {
constexpr StringPiece kJournal = "journal";
Status ParseSequenceNumber(const std::string& journal_file,
int64_t* sequence_number) {
if (!RE2::FullMatch(journal_file, ".*_(\\d+)", sequence_number)) {
return errors::InvalidArgument("Failed to parse journal file name: ",
journal_file);
}
return absl::OkStatus();
}
}
std::string DataServiceJournalFile(const std::string& journal_dir,
int64_t sequence_number) {
return io::JoinPath(journal_dir,
absl::StrCat(kJournal, "_", sequence_number));
}
FileJournalWriter::FileJournalWriter(Env* env, const std::string& journal_dir)
: env_(env), journal_dir_(journal_dir) {}
Status FileJournalWriter::EnsureInitialized() {
if (writer_) {
return absl::OkStatus();
}
std::vector<std::string> journal_files;
TF_RETURN_IF_ERROR(env_->RecursivelyCreateDir(journal_dir_));
TF_RETURN_IF_ERROR(env_->GetChildren(journal_dir_, &journal_files));
int64_t latest_sequence_number = -1;
for (const auto& file : journal_files) {
int64_t sequence_number;
TF_RETURN_IF_ERROR(ParseSequenceNumber(file, &sequence_number));
latest_sequence_number = std::max(latest_sequence_number, sequence_number);
}
std::string journal_file =
DataServiceJournalFile(journal_dir_, latest_sequence_number + 1);
TF_RETURN_IF_ERROR(env_->NewAppendableFile(journal_file, &file_));
writer_ = std::make_unique<io::RecordWriter>(file_.get());
VLOG(1) << "Created journal writer to write to " << journal_file;
return absl::OkStatus();
}
Status FileJournalWriter::Write(const Update& update) {
TF_RETURN_IF_ERROR(EnsureInitialized());
std::string s = update.SerializeAsString();
if (s.empty()) {
return errors::Internal("Failed to serialize update ", update.DebugString(),
" to string");
}
TF_RETURN_IF_ERROR(writer_->WriteRecord(s));
TF_RETURN_IF_ERROR(writer_->Flush());
TF_RETURN_IF_ERROR(file_->Sync());
if (VLOG_IS_ON(4)) {
VLOG(4) << "Wrote journal entry: " << update.DebugString();
}
return absl::OkStatus();
}
FileJournalReader::FileJournalReader(Env* env, StringPiece journal_dir)
: env_(env), journal_dir_(journal_dir) {}
Status FileJournalReader::EnsureInitialized() {
if (reader_) {
return absl::OkStatus();
}
return UpdateFile(DataServiceJournalFile(journal_dir_, 0));
}
Status FileJournalReader::Read(Update& update, bool& end_of_journal) {
TF_RETURN_IF_ERROR(EnsureInitialized());
while (true) {
tstring record;
Status s = reader_->ReadRecord(&record);
if (absl::IsOutOfRange(s)) {
sequence_number_++;
std::string next_journal_file =
DataServiceJournalFile(journal_dir_, sequence_number_);
if (absl::IsNotFound(env_->FileExists(next_journal_file))) {
VLOG(3) << "Next journal file " << next_journal_file
<< " does not exist. End of journal reached.";
end_of_journal = true;
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(UpdateFile(next_journal_file));
continue;
}
TF_RETURN_IF_ERROR(s);
if (!update.ParseFromString(record)) {
return errors::DataLoss("Failed to parse journal record.");
}
if (VLOG_IS_ON(4)) {
VLOG(4) << "Read journal entry: " << update.DebugString();
}
end_of_journal = false;
return absl::OkStatus();
}
}
Status FileJournalReader::UpdateFile(const std::string& filename) {
VLOG(1) << "Reading from journal file " << filename;
TF_RETURN_IF_ERROR(env_->NewRandomAccessFile(filename, &file_));
io::RecordReaderOptions opts;
opts.buffer_size = 2 << 20;
reader_ = std::make_unique<io::SequentialRecordReader>(file_.get(), opts);
return absl::OkStatus();
}
}
} | #include "tensorflow/core/data/service/journal.h"
#include <memory>
#include <string>
#include <vector>
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/journal.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/data_service.pb.h"
namespace tensorflow {
namespace data {
namespace {
using ::testing::HasSubstr;
bool NewJournalDir(std::string& journal_dir) {
std::string filename = testing::TmpDir();
if (!Env::Default()->CreateUniqueFileName(&filename, "journal_dir")) {
return false;
}
journal_dir = filename;
return true;
}
Update MakeCreateIterationUpdate() {
Update update;
CreateIterationUpdate* create_iteration = update.mutable_create_iteration();
create_iteration->set_job_id(3);
create_iteration->set_iteration_id(8);
create_iteration->set_repetition(5);
return update;
}
Update MakeFinishTaskUpdate() {
Update update;
FinishTaskUpdate* finish_task = update.mutable_finish_task();
finish_task->set_task_id(8);
return update;
}
Update MakeRegisterDatasetUpdate() {
Update update;
RegisterDatasetUpdate* register_dataset = update.mutable_register_dataset();
register_dataset->set_dataset_id("dataset_id");
register_dataset->set_fingerprint(3);
return update;
}
Status CheckJournalContent(StringPiece journal_dir,
const std::vector<Update>& expected) {
FileJournalReader reader(Env::Default(), journal_dir);
for (const auto& update : expected) {
Update result;
bool end_of_journal = true;
TF_RETURN_IF_ERROR(reader.Read(result, end_of_journal));
EXPECT_FALSE(end_of_journal);
EXPECT_EQ(result.SerializeAsString(), update.SerializeAsString());
}
Update result;
bool end_of_journal = false;
TF_RETURN_IF_ERROR(reader.Read(result, end_of_journal));
EXPECT_TRUE(end_of_journal);
return absl::OkStatus();
}
}
TEST(Journal, RoundTripMultiple) {
std::string journal_dir;
EXPECT_TRUE(NewJournalDir(journal_dir));
std::vector<Update> updates = {MakeCreateIterationUpdate(),
MakeRegisterDatasetUpdate(),
MakeFinishTaskUpdate()};
FileJournalWriter writer(Env::Default(), journal_dir);
for (const auto& update : updates) {
TF_EXPECT_OK(writer.Write(update));
}
TF_EXPECT_OK(CheckJournalContent(journal_dir, updates));
}
TEST(Journal, AppendExistingJournal) {
std::string journal_dir;
EXPECT_TRUE(NewJournalDir(journal_dir));
std::vector<Update> updates = {MakeCreateIterationUpdate(),
MakeRegisterDatasetUpdate(),
MakeFinishTaskUpdate()};
for (const auto& update : updates) {
FileJournalWriter writer(Env::Default(), journal_dir);
TF_EXPECT_OK(writer.Write(update));
}
TF_EXPECT_OK(CheckJournalContent(journal_dir, updates));
}
TEST(Journal, MissingFile) {
std::string journal_dir;
EXPECT_TRUE(NewJournalDir(journal_dir));
FileJournalReader reader(Env::Default(), journal_dir);
Update result;
bool end_of_journal = true;
Status s = reader.Read(result, end_of_journal);
EXPECT_TRUE(absl::IsNotFound(s));
}
TEST(Journal, NonRecordData) {
std::string journal_dir;
EXPECT_TRUE(NewJournalDir(journal_dir));
TF_ASSERT_OK(Env::Default()->RecursivelyCreateDir(journal_dir));
{
std::unique_ptr<WritableFile> file;
TF_ASSERT_OK(Env::Default()->NewAppendableFile(
DataServiceJournalFile(journal_dir, 0), &file));
TF_ASSERT_OK(file->Append("not record data"));
}
FileJournalReader reader(Env::Default(), journal_dir);
Update result;
bool end_of_journal = true;
Status s = reader.Read(result, end_of_journal);
EXPECT_THAT(s.message(), HasSubstr("corrupted record"));
EXPECT_EQ(s.code(), error::DATA_LOSS);
}
TEST(Journal, InvalidRecordData) {
std::string journal_dir;
EXPECT_TRUE(NewJournalDir(journal_dir));
TF_ASSERT_OK(Env::Default()->RecursivelyCreateDir(journal_dir));
{
std::unique_ptr<WritableFile> file;
TF_ASSERT_OK(Env::Default()->NewAppendableFile(
DataServiceJournalFile(journal_dir, 0), &file));
auto writer = std::make_unique<io::RecordWriter>(file.get());
TF_ASSERT_OK(writer->WriteRecord("not serialized proto"));
}
FileJournalReader reader(Env::Default(), journal_dir);
Update result;
bool end_of_journal = true;
Status s = reader.Read(result, end_of_journal);
EXPECT_THAT(s.message(), HasSubstr("Failed to parse journal record"));
EXPECT_EQ(s.code(), error::DATA_LOSS);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/journal.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/journal_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9023f77c-6d2f-4bb9-b69d-4e2e666b234d | cpp | tensorflow/tensorflow | stream | tensorflow/core/tfrt/runtime/stream.cc | tensorflow/core/tfrt/runtime/stream_test.cc | #include "tensorflow/core/tfrt/runtime/stream.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/functional/any_invocable.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "absl/utility/utility.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tsl/platform/random.h"
#include "tsl/platform/threadpool_interface.h"
#include "tsl/profiler/lib/traceme.h"
namespace tensorflow {
namespace tfrt_stub {
absl::StatusOr<std::optional<StreamCallbackId>> CreateStreamCallbackId(
absl::string_view model_name, mlir::ModuleOp module) {
mlir::Builder builder(module.getContext());
std::vector<mlir::TF::PwStreamResultsOp> ops;
module->walk([&](mlir::TF::PwStreamResultsOp op) { ops.push_back(op); });
if (ops.empty()) {
return std::nullopt;
}
auto& stream_interface = GetGlobalStreamCallbackRegistry().stream_interface();
auto controller_address = stream_interface.controller_address();
auto controller_address_attr = builder.getStringAttr(controller_address);
auto model_name_attr = builder.getStringAttr(model_name);
const StreamCallbackId callback_id(
static_cast<int64_t>(tsl::random::New64()));
auto callback_id_attr = builder.getI64IntegerAttr(callback_id.id);
for (auto op : ops) {
op->setAttr("_controller_address", controller_address_attr);
op->setAttr("_model_name", model_name_attr);
op->setAttr("_callback_id", callback_id_attr);
}
return callback_id;
}
absl::Status StreamCallbackRegistry::CallbackState::Invoke(
tsl::thread::ThreadPoolInterface* thread_pool, StreamedResult result) {
{
absl::MutexLock lock(&mu_);
if (closed_) {
return absl::InternalError(
"Failed to invole the callback that is closed.");
}
++num_outstanding_;
}
thread_pool->Schedule([this, result = std::move(result)]() mutable {
InvokeCallback(std::move(result));
absl::MutexLock lock(&mu_);
--num_outstanding_;
});
return absl::OkStatus();
}
void StreamCallbackRegistry::CallbackState::Close() {
{
absl::MutexLock lock(&mu_);
closed_ = true;
auto not_running = [this]() ABSL_SHARED_LOCKS_REQUIRED(mu_) {
return num_outstanding_ == 0;
};
mu_.Await(absl::Condition(¬_running));
}
}
void StreamCallbackRegistry::CallbackState::InvokeCallback(
StreamedResult result) {
absl::Duration dequeue_latency = absl::Now() - result.enqueued_time;
interface().RecordDequeueLatency(model_name_, dequeue_latency);
tsl::profiler::TraceMe trace_me("StreamCallbackInvocation");
trace_me.AppendMetadata([&]() {
return tsl::profiler::TraceMeEncode({
{"callback_id", callback_id_.id},
{"step_id", step_id_.id},
});
});
absl::Time start_time = absl::Now();
callback_(std::move(result.tensors));
interface().RecordCallbackLatency(model_name_, absl::Now() - start_time);
}
absl::StatusOr<ScopedStreamCallback> StreamCallbackRegistry::Register(
absl::string_view model_name, StreamCallbackId callback_id, StepId step_id,
absl::AnyInvocable<
void(absl::flat_hash_map<std::string, tensorflow::Tensor>)>
callback) {
absl::MutexLock l(&mu_);
const auto [it, inserted] =
stream_callbacks_.insert({std::make_pair(callback_id, step_id), nullptr});
if (!inserted) {
return absl::AlreadyExistsError(absl::StrCat(
"Stream callback ", callback_id, " @ ", step_id, " already exists"));
}
it->second = std::make_unique<CallbackState>(this, model_name, callback_id,
step_id, std::move(callback));
return ScopedStreamCallback(this, callback_id, step_id);
}
absl::Status StreamCallbackRegistry::Invoke(
tsl::thread::ThreadPoolInterface* thread_pool, StreamCallbackId callback_id,
StepId step_id, StreamedResult result) {
absl::MutexLock lock(&mu_);
auto iter = stream_callbacks_.find({callback_id, step_id});
if (iter == stream_callbacks_.end()) {
return absl::NotFoundError(absl::StrCat(
"Stream callback ", callback_id, " @ ", step_id,
" does not exist; this usually indicates that a streaming signature "
"was called by a non-streaming request"));
}
auto* state = iter->second.get();
DCHECK(state);
return state->Invoke(thread_pool, std::move(result));
}
std::unique_ptr<StreamCallbackRegistry::CallbackState>
StreamCallbackRegistry::Unregister(StreamCallbackId callback_id,
StepId step_id) {
absl::MutexLock l(&mu_);
const auto it = stream_callbacks_.find({callback_id, step_id});
if (it == stream_callbacks_.end()) {
return nullptr;
}
auto state = std::move(it->second);
stream_callbacks_.erase(it);
return state;
}
ScopedStreamCallback::ScopedStreamCallback(ScopedStreamCallback&& other)
: registry_(other.registry_),
callback_id_(other.callback_id_),
step_id_(other.step_id_) {
other.callback_id_ = std::nullopt;
other.step_id_ = StepId::GetInvalidStepId();
}
ScopedStreamCallback& ScopedStreamCallback::operator=(
ScopedStreamCallback&& other) {
Unregister();
registry_ = other.registry_;
callback_id_ = other.callback_id_;
step_id_ = other.step_id_;
other.callback_id_ = std::nullopt;
other.step_id_ = StepId::GetInvalidStepId();
return *this;
}
void ScopedStreamCallback::Unregister() {
if (!callback_id_.has_value()) {
return;
}
tsl::profiler::TraceMe trace_me("ScopedStreamCallback::Unregister");
trace_me.AppendMetadata([&]() {
return tsl::profiler::TraceMeEncode({
{"callback_id", callback_id_->id},
{"step_id", step_id_.id},
});
});
DCHECK(registry_);
auto state = registry_->Unregister(*callback_id_, step_id_);
DCHECK(state);
state->Close();
callback_id_.reset();
}
StreamInterfaceFactory& GetGlobalStreamInterfaceFactory() {
static auto* stream_interface_factory = new StreamInterfaceFactory;
return *stream_interface_factory;
}
StreamCallbackRegistry& GetGlobalStreamCallbackRegistry() {
static auto* stream_callback_registry =
new StreamCallbackRegistry(GetGlobalStreamInterfaceFactory()
.CreateControllerStreamInterface()
.value());
return *stream_callback_registry;
}
}
} | #include "tensorflow/core/tfrt/runtime/stream.h"
#include <cstdint>
#include <functional>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/tfrt/runtime/step_id.h"
#include "tensorflow/core/tfrt/saved_model/saved_model_testutil.h"
#include "tensorflow/core/tfrt/utils/thread_pool.h"
#include "tsl/platform/env.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
using ::tensorflow::test::AsTensor;
using ::testing::AnyOf;
using ::testing::ElementsAreArray;
using ::testing::Pair;
using ::testing::UnorderedElementsAre;
using ::testing::status::StatusIs;
TEST(StreamTest, Simple) {
StreamCallbackId callback_id(1234);
StepId step_id(5678);
std::vector<absl::flat_hash_map<std::string, tensorflow::Tensor>> outputs;
ScopedStreamCallback scoped_stream_callback;
{
TF_ASSERT_OK_AND_ASSIGN(
scoped_stream_callback,
GetGlobalStreamCallbackRegistry().Register(
"test_model", callback_id, step_id,
[&](absl::flat_hash_map<std::string, tensorflow::Tensor> arg) {
outputs.push_back(std::move(arg));
}));
std::vector<absl::flat_hash_map<std::string, tensorflow::Tensor>> expected =
{{{"a", AsTensor<int32_t>({100})}, {"b", AsTensor<int32_t>({200})}},
{{"c", AsTensor<int32_t>({300})}}};
auto thread = absl::WrapUnique(tsl::Env::Default()->StartThread(
tsl::ThreadOptions(), "fake_stream_client", [&]() {
for (const auto& map : expected) {
TfThreadPool thread_pool("test", 4);
CHECK_OK(GetGlobalStreamCallbackRegistry().Invoke(
&thread_pool, callback_id, step_id, {map, absl::Now()}));
}
}));
}
EXPECT_EQ(outputs.size(), 2);
EXPECT_THAT(GetTfTensorData<int32_t>(outputs[0]["a"]),
ElementsAreArray({100}));
EXPECT_THAT(GetTfTensorData<int32_t>(outputs[0]["b"]),
ElementsAreArray({200}));
EXPECT_THAT(GetTfTensorData<int32_t>(outputs[1]["c"]),
ElementsAreArray({300}));
ScopedStreamCallback scoped_stream_callback_copy;
scoped_stream_callback_copy = std::move(scoped_stream_callback);
auto status = GetGlobalStreamCallbackRegistry().Register(
"test_model", callback_id, step_id,
[&](absl::flat_hash_map<std::string, tensorflow::Tensor> arg) {
outputs.push_back(std::move(arg));
});
EXPECT_THAT(status, StatusIs(absl::StatusCode::kAlreadyExists));
}
TEST(StreamTest, MultipleWriters) {
StreamCallbackId callback_id(1234);
StepId step_id(5678);
std::vector<absl::flat_hash_map<std::string, std::vector<int32_t>>> outputs;
{
TfThreadPool thread_pool("test", 4);
TF_ASSERT_OK_AND_ASSIGN(
auto scoped_stream_callback,
GetGlobalStreamCallbackRegistry().Register(
"test_model", callback_id, step_id,
[&](absl::flat_hash_map<std::string, tensorflow::Tensor> arg) {
absl::flat_hash_map<std::string, std::vector<int32_t>> out;
for (const auto& p : arg) {
out[p.first] = GetTfTensorData<int32_t>(p.second);
}
outputs.push_back(std::move(out));
}));
std::vector<absl::flat_hash_map<std::string, tensorflow::Tensor>> expected =
{{{"a", AsTensor<int32_t>({100})}, {"b", AsTensor<int32_t>({200})}},
{{"c", AsTensor<int32_t>({300})}}};
for (const auto& p : expected) {
tsl::Env::Default()->SchedClosure([&, callback_id, step_id, p]() {
TfThreadPool thread_pool("test", 4);
GetGlobalStreamCallbackRegistry()
.Invoke(&thread_pool, callback_id, step_id, {p, absl::Now()})
.IgnoreError();
});
}
absl::SleepFor(absl::Microseconds(100));
}
LOG(INFO) << "StreamCallback receives " << outputs.size() << " outputs.";
for (const auto& output : outputs) {
EXPECT_THAT(
output,
AnyOf(UnorderedElementsAre(Pair("a", ElementsAreArray({100})),
Pair("b", ElementsAreArray({200}))),
UnorderedElementsAre(Pair("c", ElementsAreArray({300})))));
}
}
class TestStreamControllerInterface : public StreamControllerInterface {
public:
TestStreamControllerInterface()
: StreamControllerInterface("test_controller_address") {}
};
TEST(StreamControllerInterface, Initialize) {
GetGlobalStreamInterfaceFactory().RegisterController(
[]() { return std::make_unique<TestStreamControllerInterface>(); });
TF_ASSERT_OK_AND_ASSIGN(
auto controller_interface,
GetGlobalStreamInterfaceFactory().CreateControllerStreamInterface());
EXPECT_EQ(controller_interface->controller_address(),
"test_controller_address");
}
class TestStreamWorkerInterface : public StreamWorkerInterface {
public:
explicit TestStreamWorkerInterface(std::string worker_address)
: StreamWorkerInterface(worker_address) {}
absl::Status InvokeStreamCallback(
const StreamCallbackId& callback_id,
const std::vector<std::string>& names,
const std::vector<std::pair<int64_t, std::vector<tensorflow::Tensor>>>&
responses) override {
return absl::OkStatus();
}
};
TEST(StreamWorkerInterface, Initialize) {
GetGlobalStreamInterfaceFactory().RegisterWorker(
[](absl::string_view address)
-> absl::StatusOr<std::unique_ptr<TestStreamWorkerInterface>> {
return std::make_unique<TestStreamWorkerInterface>(
"test_worker_address");
});
TF_ASSERT_OK_AND_ASSIGN(
auto worker_interface,
GetGlobalStreamInterfaceFactory().CreateWorkerStreamInterface()(
"test_worker_address"));
EXPECT_EQ(worker_interface->controller_address(), "test_worker_address");
}
TEST(StepId, Generate) {
StepId step_id(1234);
EXPECT_EQ(step_id.id, 1234);
StepIdGenerator step_id_generator;
EXPECT_EQ(step_id_generator.GetNextStepId(), StepId(1));
EXPECT_EQ(step_id_generator.GetNextStepId(), StepId(2));
EXPECT_EQ(step_id_generator.GetNextStepId(), StepId(3));
}
TEST(StepId, GlobalInitial) {
EXPECT_EQ(GetGlobalInitialStepId(), 0);
TEST_ScopedInitialStepId test_id(127);
EXPECT_EQ(GetGlobalInitialStepId(), 127);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/runtime/stream.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/runtime/stream_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
748cb136-f55b-4f82-b4cb-af329ea655c6 | cpp | tensorflow/tensorflow | sharding_propagation | third_party/xla/xla/service/sharding_propagation.cc | third_party/xla/xla/service/sharding_propagation_test.cc | #include "xla/service/sharding_propagation.h"
#include <algorithm>
#include <cstdint>
#include <functional>
#include <iterator>
#include <list>
#include <map>
#include <memory>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/attributes.h"
#include "absl/base/call_once.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/strings/str_join.h"
#include "absl/types/span.h"
#include "xla/array.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/hlo/ir/hlo_sharding_metadata.h"
#include "xla/hlo/utils/hlo_sharding_util.h"
#include "xla/protobuf_util.h"
#include "xla/service/dot_as_convolution_util.h"
#include "xla/service/host_memory_offload_annotations.h"
#include "xla/service/spmd/shard_barrier_partitioner.h"
#include "xla/shape.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "xla/sharding_op_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
std::optional<HloSharding> ReturnImprovedSharding(
HloSharding sharding, HloInstruction* instruction,
bool may_combine_partial_sharding,
bool allow_aggressive_resharding = false) {
return hlo_sharding_util::ReturnImprovedShardingImpl(
std::move(sharding),
instruction->has_sharding() ? &instruction->sharding() : nullptr,
instruction->shape(), may_combine_partial_sharding,
allow_aggressive_resharding);
}
std::optional<HloSharding> ReturnImprovedSubSharding(
HloSharding sharding, HloInstruction* instruction, const ShapeIndex& index,
bool may_combine_partial_sharding,
bool allow_aggressive_resharding = false) {
if (instruction->has_sharding()) {
const HloSharding to_improved =
instruction->sharding().GetSubSharding(instruction->shape(), index);
return hlo_sharding_util::ReturnImprovedShardingImpl(
std::move(sharding), &to_improved,
ShapeUtil::GetSubshape(instruction->shape(), index),
may_combine_partial_sharding, allow_aggressive_resharding);
} else {
return hlo_sharding_util::ReturnImprovedShardingImpl(
std::move(sharding), nullptr,
ShapeUtil::GetSubshape(instruction->shape(), index),
may_combine_partial_sharding, allow_aggressive_resharding);
}
}
bool MaybeImproveInstructionSharding(HloSharding sharding,
HloInstruction* instruction,
bool may_combine_partial_sharding,
bool allow_aggressive_resharding = false) {
if (auto new_sharding = ReturnImprovedSharding(
std::move(sharding), instruction, may_combine_partial_sharding,
allow_aggressive_resharding)) {
instruction->set_sharding(std::move(*new_sharding));
return true;
}
return false;
}
bool MaybeImproveInstructionSubSharding(
HloSharding sharding, HloInstruction* instruction, const ShapeIndex& index,
bool may_combine_partial_sharding,
bool allow_aggressive_resharding = false) {
if (instruction->shape().IsTuple()) {
if (auto new_sub_sharding = ReturnImprovedSubSharding(
std::move(sharding), instruction, index,
may_combine_partial_sharding, allow_aggressive_resharding)) {
HloSharding new_sharding =
instruction->has_sharding()
? instruction->sharding()
: HloSharding::Single(instruction->shape(),
HloSharding::Replicate());
ShapeTree<HloSharding> sharding_shape_tree =
new_sharding.GetAsShapeTree(instruction->shape());
*sharding_shape_tree.mutable_element(index) = new_sub_sharding.value();
instruction->set_sharding(HloSharding::Tuple(sharding_shape_tree));
return true;
} else {
return false;
}
}
CHECK(index.size() == 1 && index[0] == 0);
return MaybeImproveInstructionSharding(std::move(sharding), instruction,
may_combine_partial_sharding,
allow_aggressive_resharding);
}
bool IsConvolutionKernelSmall(const HloInstruction* instruction) {
CHECK_EQ(instruction->opcode(), HloOpcode::kConvolution);
const HloInstruction* rhs = instruction->operand(1);
const auto& dnums = instruction->convolution_dimension_numbers();
int64_t kernel_dim_prod = 1;
int64_t output_dim_prod = 1;
for (int64_t i = 0; i < dnums.input_spatial_dimensions().size(); ++i) {
int64_t kernel_dim =
rhs->shape().dimensions(dnums.kernel_spatial_dimensions(i));
kernel_dim_prod *= kernel_dim;
int64_t output_dim =
instruction->shape().dimensions(dnums.output_spatial_dimensions(i));
output_dim_prod *= output_dim;
if (kernel_dim >= output_dim &&
(i < 2 || kernel_dim > 3 || kernel_dim_prod >= output_dim_prod)) {
return false;
}
}
return true;
}
bool IsPassthroughCustomOps(const HloInstruction* hlo) {
if (hlo->IsCustomCall({"Sharding", "X64Combine", "LayoutConstraint"})) {
return true;
}
if (hlo->operand_count() != 1 || !hlo->shape().IsArray() ||
!hlo->operand(0)->shape().IsArray() ||
hlo->operand(0)->shape().rank() != hlo->shape().rank()) {
return false;
}
return hlo->IsCustomCall(
{"ResizeNearest", "ResizeBilinear", "ResizeNearestGrad",
"ResizeBilinearGrad", "Cholesky",
host_memory_offload_annotations::kMoveToDeviceCustomCallTarget,
host_memory_offload_annotations::kMoveToHostCustomCallTarget});
}
const HloInstruction* PickRepresentativeOperand(
const HloInstruction* instruction) {
switch (instruction->opcode()) {
case HloOpcode::kMap:
case HloOpcode::kPad:
case HloOpcode::kPower:
case HloOpcode::kOptimizationBarrier:
case HloOpcode::kReverse:
case HloOpcode::kSlice:
case HloOpcode::kShiftLeft:
case HloOpcode::kShiftRightArithmetic:
case HloOpcode::kShiftRightLogical:
if (instruction->operand(0)->has_sharding()) {
return instruction->operand(0);
}
return nullptr;
case HloOpcode::kAbs:
case HloOpcode::kAdd:
case HloOpcode::kAnd:
case HloOpcode::kAtan2:
case HloOpcode::kBitcastConvert:
case HloOpcode::kCeil:
case HloOpcode::kClamp:
case HloOpcode::kClz:
case HloOpcode::kCompare:
case HloOpcode::kComplex:
case HloOpcode::kConcatenate:
case HloOpcode::kConvert:
case HloOpcode::kCopy:
case HloOpcode::kCos:
case HloOpcode::kAllGather:
case HloOpcode::kAllReduce:
case HloOpcode::kReduceScatter:
case HloOpcode::kAllToAll:
case HloOpcode::kCollectiveBroadcast:
case HloOpcode::kCollectivePermute:
case HloOpcode::kDivide:
case HloOpcode::kErf:
case HloOpcode::kExp:
case HloOpcode::kExpm1:
case HloOpcode::kFloor:
case HloOpcode::kImag:
case HloOpcode::kIsFinite:
case HloOpcode::kLog:
case HloOpcode::kLog1p:
case HloOpcode::kLogistic:
case HloOpcode::kMaximum:
case HloOpcode::kMinimum:
case HloOpcode::kMultiply:
case HloOpcode::kNegate:
case HloOpcode::kNot:
case HloOpcode::kOr:
case HloOpcode::kPopulationCount:
case HloOpcode::kReal:
case HloOpcode::kReducePrecision:
case HloOpcode::kRemainder:
case HloOpcode::kRoundNearestAfz:
case HloOpcode::kRoundNearestEven:
case HloOpcode::kRsqrt:
case HloOpcode::kSelect:
case HloOpcode::kSign:
case HloOpcode::kSin:
case HloOpcode::kTopK:
case HloOpcode::kSort:
case HloOpcode::kSqrt:
case HloOpcode::kCbrt:
case HloOpcode::kSubtract:
case HloOpcode::kStochasticConvert:
case HloOpcode::kTan:
case HloOpcode::kTanh:
case HloOpcode::kWhile:
case HloOpcode::kXor: {
const HloInstruction* best_operand = nullptr;
for (const HloInstruction* operand : instruction->operands()) {
if (operand->has_sharding() &&
(best_operand == nullptr ||
hlo_sharding_util::IsShardingMoreSpecific(
operand->sharding(), best_operand->sharding()))) {
best_operand = operand;
}
}
return best_operand;
}
case HloOpcode::kCustomCall: {
if (IsPassthroughCustomOps(instruction)) {
return instruction->operand(0);
}
return nullptr;
}
case HloOpcode::kAddDependency:
case HloOpcode::kAfterAll:
case HloOpcode::kAsyncStart:
case HloOpcode::kAsyncUpdate:
case HloOpcode::kAsyncDone:
case HloOpcode::kAllGatherStart:
case HloOpcode::kAllGatherDone:
case HloOpcode::kAllReduceStart:
case HloOpcode::kAllReduceDone:
case HloOpcode::kBatchNormGrad:
case HloOpcode::kBatchNormInference:
case HloOpcode::kBatchNormTraining:
case HloOpcode::kBitcast:
case HloOpcode::kBroadcast:
case HloOpcode::kCall:
case HloOpcode::kCholesky:
case HloOpcode::kCollectivePermuteDone:
case HloOpcode::kCollectivePermuteStart:
case HloOpcode::kConditional:
case HloOpcode::kConstant:
case HloOpcode::kConvolution:
case HloOpcode::kCopyDone:
case HloOpcode::kCopyStart:
case HloOpcode::kDomain:
case HloOpcode::kDot:
case HloOpcode::kDynamicSlice:
case HloOpcode::kDynamicUpdateSlice:
case HloOpcode::kDynamicReshape:
case HloOpcode::kFft:
case HloOpcode::kFusion:
case HloOpcode::kGather:
case HloOpcode::kGetTupleElement:
case HloOpcode::kInfeed:
case HloOpcode::kIota:
case HloOpcode::kOutfeed:
case HloOpcode::kParameter:
case HloOpcode::kPartitionId:
case HloOpcode::kRecv:
case HloOpcode::kRecvDone:
case HloOpcode::kReduce:
case HloOpcode::kReduceWindow:
case HloOpcode::kReplicaId:
case HloOpcode::kReshape:
case HloOpcode::kRng:
case HloOpcode::kRngGetAndUpdateState:
case HloOpcode::kRngBitGenerator:
case HloOpcode::kScatter:
case HloOpcode::kSelectAndScatter:
case HloOpcode::kSend:
case HloOpcode::kSendDone:
case HloOpcode::kTranspose:
case HloOpcode::kTriangularSolve:
case HloOpcode::kTuple:
case HloOpcode::kGetDimensionSize:
case HloOpcode::kSetDimensionSize:
return nullptr;
}
}
bool SupportSpatialPartitioning(
const HloInstruction* instruction,
const ShardingPropagation::ComputationMap& computation_map, bool is_spmd,
bool allow_spmd_sharding_propagation_to_output,
bool allow_spmd_sharding_propagation_to_parameters,
const CustomCallShardingHelper* sharding_helper) {
const bool is_entry_root = instruction->parent()
->parent()
->entry_computation()
->root_instruction() == instruction;
if (instruction->parent()->root_instruction() == instruction &&
computation_map.find(instruction->parent()) == computation_map.end() &&
!(is_entry_root && allow_spmd_sharding_propagation_to_output)) {
return false;
}
if (instruction->IsElementwise() &&
(instruction->opcode() != HloOpcode::kRng || is_spmd)) {
return true;
}
switch (instruction->opcode()) {
case HloOpcode::kBroadcast:
case HloOpcode::kConcatenate:
case HloOpcode::kConditional:
case HloOpcode::kConstant:
case HloOpcode::kConvolution:
case HloOpcode::kOptimizationBarrier:
case HloOpcode::kDot:
case HloOpcode::kDynamicSlice:
case HloOpcode::kDynamicUpdateSlice:
case HloOpcode::kGather:
case HloOpcode::kGetTupleElement:
case HloOpcode::kInfeed:
case HloOpcode::kIota:
case HloOpcode::kPad:
case HloOpcode::kReduceWindow:
case HloOpcode::kReshape:
case HloOpcode::kScatter:
case HloOpcode::kSelectAndScatter:
case HloOpcode::kSlice:
case HloOpcode::kSort:
case HloOpcode::kTranspose:
case HloOpcode::kTuple:
case HloOpcode::kWhile:
case HloOpcode::kReduce:
case HloOpcode::kRngBitGenerator:
case HloOpcode::kAllReduce:
case HloOpcode::kReduceScatter:
return true;
case HloOpcode::kParameter:
return allow_spmd_sharding_propagation_to_parameters ||
computation_map.find(instruction->parent()) !=
computation_map.end();
case HloOpcode::kReverse:
return is_spmd;
case HloOpcode::kCustomCall:
if (!is_spmd) {
return false;
}
if (auto* partitioner =
GetCustomCallPartitioner(instruction->custom_call_target())) {
return partitioner->IsCustomCallShardable(instruction);
}
return (IsPassthroughCustomOps(instruction) ||
sharding_helper->IsCustomCallShardable(instruction));
default:
return false;
}
}
std::optional<HloSharding> LookaheadUserSharding(HloInstruction* instr,
bool is_spmd,
const CallGraph& call_graph) {
if (instr->user_count() != 1) {
return std::nullopt;
}
HloInstruction* current_user = instr->users()[0];
std::optional<HloSharding> sharding;
std::vector<HloInstruction*> users_chain = {instr, current_user};
while (!current_user->has_sharding()) {
if (current_user->users().size() != 1) {
users_chain.clear();
break;
}
current_user = current_user->users()[0];
users_chain.push_back(current_user);
}
if (users_chain.empty()) {
return std::nullopt;
}
for (int i = users_chain.size() - 1; i >= 1; --i) {
HloInstruction* user = users_chain[i];
HloInstruction* current = users_chain[i - 1];
CHECK(user->has_sharding());
sharding = ShardingPropagation::GetShardingFromUser(
*current, *user, INT64_MAX, is_spmd, call_graph,
nullptr);
if (sharding.has_value() && i != 1) {
current->set_sharding(*sharding);
continue;
}
break;
}
for (int i = 1; i < users_chain.size() - 1; ++i) {
users_chain[i]->clear_sharding();
}
return sharding;
}
bool InferGatherParallelShardingFromOperands(
HloInstruction* instruction,
const hlo_sharding_util::GatherScatterParallelDims& parallel_dims,
bool may_combine_partial_sharding) {
CHECK(DynCast<HloGatherInstruction>(instruction));
bool changed = false;
auto output_parallel_dims = hlo_sharding_util::GetGatherParallelOutputDims(
*instruction, parallel_dims);
if (hlo_sharding_util::IsSpatiallyPartitioned(instruction->operand(0))) {
changed |= MaybeImproveInstructionSharding(
hlo_sharding_util::
InferGatherScatterParallelShardingFromOperandSharding(
instruction->operand(0)->sharding(), instruction->shape(),
absl::MakeConstSpan(parallel_dims.operand_parallel_dims),
absl::MakeConstSpan(output_parallel_dims)),
instruction, may_combine_partial_sharding);
}
if (hlo_sharding_util::IsSpatiallyPartitioned(instruction->operand(1))) {
changed |= MaybeImproveInstructionSharding(
hlo_sharding_util::
InferGatherScatterParallelShardingFromOperandSharding(
instruction->operand(1)->sharding(), instruction->shape(),
absl::MakeConstSpan(parallel_dims.indices_parallel_dims),
absl::MakeConstSpan(output_parallel_dims)),
instruction, may_combine_partial_sharding);
}
return changed;
}
bool InferScatterParallelShardingFromOperands(
HloInstruction* instruction,
const hlo_sharding_util::GatherScatterParallelDims& parallel_dims,
bool may_combine_partial_sharding) {
HloScatterInstruction* scatter = DynCast<HloScatterInstruction>(instruction);
CHECK(scatter);
const int64_t operand_count = scatter->scatter_operand_count();
auto scatter_operands = scatter->scatter_operands();
auto scatter_indices = scatter->scatter_indices();
auto scatter_updates = scatter->scatter_updates();
bool changed = false;
auto update_parallel_dims = hlo_sharding_util::GetScatterParallelUpdateDims(
*instruction, parallel_dims);
Shape shape = operand_count == 1
? instruction->shape()
: ShapeUtil::GetSubshape(instruction->shape(), {0});
for (int64_t i = 0; i != operand_count; ++i) {
if (hlo_sharding_util::IsSpatiallyPartitioned(scatter_operands[i])) {
changed |= MaybeImproveInstructionSubSharding(
hlo_sharding_util::
InferGatherScatterParallelShardingFromOperandSharding(
scatter_operands[i]->sharding(), shape,
absl::MakeConstSpan(parallel_dims.operand_parallel_dims),
absl::MakeConstSpan(parallel_dims.operand_parallel_dims)),
instruction, {i}, may_combine_partial_sharding);
}
}
if (hlo_sharding_util::IsSpatiallyPartitioned(scatter_indices)) {
auto parallel_sharding_from_indices = hlo_sharding_util::
InferGatherScatterParallelShardingFromOperandSharding(
scatter_indices->sharding(), shape,
absl::MakeConstSpan(parallel_dims.indices_parallel_dims),
absl::MakeConstSpan(parallel_dims.operand_parallel_dims));
for (int64_t i = 0; i != operand_count; ++i) {
changed |= MaybeImproveInstructionSubSharding(
parallel_sharding_from_indices, instruction, {i},
may_combine_partial_sharding);
}
}
for (int64_t i = 0; i != operand_count; ++i) {
if (hlo_sharding_util::IsSpatiallyPartitioned(scatter_updates[i])) {
changed |= MaybeImproveInstructionSubSharding(
hlo_sharding_util::
InferGatherScatterParallelShardingFromOperandSharding(
scatter_updates[i]->sharding(), shape,
absl::MakeConstSpan(update_parallel_dims),
absl::MakeConstSpan(parallel_dims.operand_parallel_dims)),
instruction, {i}, may_combine_partial_sharding);
}
}
return changed;
}
bool CanPropagateThroughAtAggressiveLevel(const HloInstruction& inst,
int64_t aggressiveness) {
if (aggressiveness < 1 &&
!(inst.IsElementwise() || inst.IsCustomCall("Sharding")) &&
inst.opcode() != HloOpcode::kTranspose &&
inst.opcode() != HloOpcode::kReshape &&
inst.opcode() != HloOpcode::kTuple &&
inst.opcode() != HloOpcode::kGetTupleElement &&
inst.opcode() != HloOpcode::kWhile &&
inst.opcode() != HloOpcode::kDynamicSlice &&
inst.opcode() != HloOpcode::kDynamicUpdateSlice &&
inst.opcode() != HloOpcode::kOptimizationBarrier &&
inst.opcode() != HloOpcode::kConcatenate &&
inst.opcode() != HloOpcode::kCall && inst.opcode() != HloOpcode::kCopy) {
return false;
}
if (aggressiveness < 2 && inst.opcode() == HloOpcode::kBroadcast) {
return false;
}
return true;
}
bool SameShardingMetadata(const HloSharding& a, const HloSharding& b) {
DCHECK_EQ(a, b);
auto same_metadata = [](absl::Span<const OpMetadata> a,
absl::Span<const OpMetadata> b) {
if (a.size() != b.size()) return false;
for (int i = 0, e = a.size(); i < e; ++i) {
if (!protobuf_util::ProtobufEquals(a[i], b[i])) {
return false;
}
}
return true;
};
if (a.IsTuple()) {
for (int i = 0, e = a.tuple_elements().size(); i < e; ++i) {
if (!same_metadata(a.tuple_elements()[i].metadata(),
b.tuple_elements()[i].metadata())) {
return false;
}
}
return true;
} else {
return same_metadata(a.metadata(), b.metadata());
}
}
bool AssignShardingMetadata(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloComputation* computation : module->computations(execution_threads)) {
for (HloInstruction* instruction : computation->instructions()) {
const auto& metadata = instruction->metadata();
if (!instruction->has_sharding() || metadata.ByteSizeLong() == 0) {
continue;
}
HloSharding sharding_with_metadata =
instruction->sharding().WithMetadata({metadata},
false);
if (!SameShardingMetadata(instruction->sharding(),
sharding_with_metadata)) {
instruction->set_sharding(std::move(sharding_with_metadata));
changed = true;
}
}
}
return changed;
}
bool RemoveShardingMetadata(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloComputation* computation : module->computations(execution_threads)) {
for (HloInstruction* instruction : computation->instructions()) {
if (!instruction->has_sharding()) {
continue;
}
HloSharding sharding_no_metadata =
instruction->sharding().WithoutMetadata();
if (!SameShardingMetadata(instruction->sharding(),
sharding_no_metadata)) {
instruction->set_sharding(std::move(sharding_no_metadata));
changed = true;
}
}
}
return changed;
}
absl::Status CheckAndUpdateDeviceAssignmentsInWhileBody(
HloInstruction* while_instruction) {
auto bad_status = [](HloInstruction* instruction, int64_t device,
HloInstruction* channel_instruction,
int64_t correct_device) {
return FailedPrecondition(
"Instruction: %s is on device: %d, which conflicts with device: %d "
"of channel instruction: %s",
instruction->name(), device, correct_device,
channel_instruction->name());
};
CHECK_EQ(while_instruction->opcode(), HloOpcode::kWhile);
HloComputation* while_body = while_instruction->while_body();
std::map<int64_t, HloInstruction*> devices_to_instructions;
std::optional<int64_t> unique_device = std::nullopt;
HloInstruction* channel_instruction = nullptr;
for (HloInstruction* instruction : while_body->instructions()) {
if (instruction->sharding_unique_device()) {
auto opcode = instruction->opcode();
int64_t device = *instruction->sharding_unique_device();
if (unique_device.has_value()) {
if (*unique_device != device) {
return bad_status(instruction, device, channel_instruction,
*unique_device);
}
} else if (((opcode == HloOpcode::kSend || opcode == HloOpcode::kRecv) &&
!Cast<HloSendRecvInstruction>(instruction)
->is_host_transfer())
|| ((opcode == HloOpcode::kAllReduce ||
opcode == HloOpcode::kReduceScatter) &&
instruction->channel_id())) {
channel_instruction = instruction;
unique_device = device;
if (!devices_to_instructions.empty()) {
for (auto it = devices_to_instructions.begin();
it != devices_to_instructions.end(); ++it) {
if (*unique_device != it->first) {
return bad_status(it->second, it->first, channel_instruction,
*unique_device);
}
}
}
} else {
devices_to_instructions[device] = instruction;
}
}
}
if (unique_device.has_value()) {
auto while_device = while_instruction->sharding_unique_device();
if (while_device.has_value() && *unique_device != *while_device) {
return bad_status(while_instruction, *while_device, channel_instruction,
*unique_device);
}
auto body_root = while_body->root_instruction();
auto root_device = body_root->sharding_unique_device();
if (!root_device.has_value()) {
body_root->set_device_sharding(*unique_device);
} else if (*unique_device != *root_device) {
return bad_status(body_root, *root_device, channel_instruction,
*unique_device);
}
}
return absl::OkStatus();
}
bool RefineManualAutoShardingFromAuto(
const HloSharding& to_merge, absl::Span<const int64_t> unspecified_dims,
HloSharding* auto_sharding, HloSharding* manual_sharding) {
if (!manual_sharding->IsManualSubgroup() ||
auto_sharding->IsManualSubgroup() ||
!manual_sharding->HasPartialReplication() ||
manual_sharding->subgroup_types().size() != 2) {
return false;
}
HloSharding partial_rep =
hlo_sharding_util::PartiallyReplicateTiledShardingOnAllDimsExcept(
to_merge, unspecified_dims);
if (partial_rep.IsTileMaximal()) {
return false;
}
if (!hlo_sharding_util::MergeShardingIfCompatible(partial_rep,
auto_sharding)) {
return false;
}
const int64_t data_rank = partial_rep.TiledDataRank();
std::vector<int64_t> partial_manual_shape(
partial_rep.tile_assignment().dimensions().begin(),
partial_rep.tile_assignment().dimensions().end());
partial_manual_shape.insert(partial_manual_shape.begin() + data_rank, 1);
auto partial_tiling_for_manual =
partial_rep.tile_assignment().Reshape(partial_manual_shape);
HloSharding partial_rep_for_manual = HloSharding::PartialTile(
partial_tiling_for_manual, partial_rep.metadata());
auto man_tiling = manual_sharding->tile_assignment();
if (manual_sharding->subgroup_types().back() != OpSharding::REPLICATED) {
std::vector<int> transposed_dims(man_tiling.num_dimensions());
absl::c_iota(transposed_dims, 0);
std::swap(transposed_dims.back(), transposed_dims[data_rank]);
man_tiling = man_tiling.Transpose(transposed_dims);
}
HloSharding tmp_sharding_for_merging = HloSharding::PartialTile(
std::move(man_tiling), manual_sharding->metadata());
if (!hlo_sharding_util::MergeShardingIfCompatible(
partial_rep_for_manual, &tmp_sharding_for_merging)) {
return false;
}
std::vector<OpSharding::Type> subgroup_types;
subgroup_types.push_back(OpSharding::MANUAL);
if (tmp_sharding_for_merging.HasPartialReplication()) {
subgroup_types.push_back(OpSharding::REPLICATED);
}
*manual_sharding = HloSharding::Subgroup(
tmp_sharding_for_merging.tile_assignment(), subgroup_types,
tmp_sharding_for_merging.metadata());
return true;
}
bool RefineManualAutoShardingFromManual(
const HloSharding& to_merge, absl::Span<const int64_t> unspecified_dims,
HloSharding* auto_sharding, HloSharding* manual_sharding) {
if (!to_merge.IsManualSubgroup() || !manual_sharding->IsManualSubgroup() ||
!manual_sharding->HasPartialReplication() ||
auto_sharding->IsManualSubgroup() ||
manual_sharding->subgroup_types().size() != 2) {
return false;
}
HloSharding partial_rep =
hlo_sharding_util::PartiallyReplicateTiledShardingOnAllDimsExcept(
to_merge, unspecified_dims);
if (partial_rep.IsTileMaximal()) {
return false;
}
if (!hlo_sharding_util::MergeShardingIfCompatible(partial_rep,
manual_sharding)) {
return false;
}
HloSharding partial_rep_for_auto = HloSharding::Subgroup(
partial_rep.tile_assignment(),
std::vector<OpSharding::Type>(partial_rep.subgroup_types().size(),
OpSharding::REPLICATED),
partial_rep.metadata());
if (!hlo_sharding_util::MergeShardingIfCompatible(partial_rep_for_auto,
auto_sharding)) {
return false;
}
return true;
}
bool InferUnspecifiedDimsFromOperand(HloInstruction* annotate_op,
absl::Span<const int64_t> unspecified_dims,
HloInstruction** man_conversion_op_after) {
CHECK(annotate_op->IsCustomCall("Sharding") ||
annotate_op->opcode() == HloOpcode::kCopy);
if (!hlo_sharding_util::IsSpatiallyPartitioned(annotate_op->operand(0))) {
return false;
}
const HloSharding& operand_sharding = annotate_op->operand(0)->sharding();
if (!operand_sharding.IsTiled()) {
return false;
}
HloInstruction* man_conversion_op = nullptr;
if (annotate_op->user_count() == 1) {
HloInstruction* user = annotate_op->users()[0];
if (user->IsCustomCall("SPMDFullToShardShape") ||
user->IsCustomCall("SPMDShardToFullShape")) {
std::vector<int64_t> user_unspec_dims;
if (!sharding_op_util::ParseAttributes(
Cast<HloCustomCallInstruction>(user)->opaque(),
&user_unspec_dims)
.ok()) {
return false;
}
absl::c_sort(user_unspec_dims);
if (unspecified_dims != user_unspec_dims) {
return false;
}
man_conversion_op = user;
}
}
*man_conversion_op_after = man_conversion_op;
if (man_conversion_op == nullptr) {
HloSharding partial_replicated =
hlo_sharding_util::PartiallyReplicateTiledShardingOnAllDimsExcept(
operand_sharding, unspecified_dims);
HloSharding sharding = annotate_op->sharding();
if (!hlo_sharding_util::MergeShardingIfCompatible(partial_replicated,
&sharding)) {
return false;
}
annotate_op->set_sharding(sharding);
return true;
}
if (man_conversion_op->IsCustomCall("SPMDFullToShardShape")) {
HloSharding auto_sharding = annotate_op->sharding();
HloSharding manual_sharding = man_conversion_op->sharding();
if (!RefineManualAutoShardingFromAuto(operand_sharding, unspecified_dims,
&auto_sharding, &manual_sharding)) {
return false;
}
annotate_op->set_sharding(auto_sharding);
man_conversion_op->set_sharding(manual_sharding);
return true;
}
CHECK(man_conversion_op->IsCustomCall("SPMDShardToFullShape"));
HloSharding manual_sharding = annotate_op->sharding();
HloSharding auto_sharding = man_conversion_op->sharding();
if (!RefineManualAutoShardingFromManual(operand_sharding, unspecified_dims,
&auto_sharding, &manual_sharding)) {
return false;
}
annotate_op->set_sharding(manual_sharding);
man_conversion_op->set_sharding(auto_sharding);
return true;
}
bool InferUnspecifiedDimsFromOneUser(HloInstruction* annotate_op,
const HloInstruction* user,
int64_t aggressiveness, bool is_spmd,
absl::Span<const int64_t> unspecified_dims,
HloInstruction* man_conversion_op,
const CallGraph& call_graph) {
CHECK(annotate_op->IsCustomCall("Sharding") ||
annotate_op->opcode() == HloOpcode::kCopy);
if (!user->has_sharding() || !user->sharding().IsTiled()) {
return false;
}
std::optional<HloSharding> user_sharding =
ShardingPropagation::GetShardingFromUser(
man_conversion_op == nullptr ? *annotate_op : *man_conversion_op,
*user, aggressiveness, is_spmd, call_graph,
nullptr);
if (!user_sharding.has_value() || user_sharding->IsTileMaximal()) {
return false;
}
if (man_conversion_op == nullptr) {
HloSharding partial_replicated =
hlo_sharding_util::PartiallyReplicateTiledShardingOnAllDimsExcept(
*user_sharding, unspecified_dims);
HloSharding sharding = annotate_op->sharding();
if (!hlo_sharding_util::MergeShardingIfCompatible(partial_replicated,
&sharding)) {
return false;
}
annotate_op->set_sharding(sharding);
return true;
}
if (man_conversion_op->IsCustomCall("SPMDFullToShardShape")) {
HloSharding auto_sharding = annotate_op->sharding();
HloSharding manual_sharding = man_conversion_op->sharding();
if (!RefineManualAutoShardingFromManual(*user_sharding, unspecified_dims,
&auto_sharding, &manual_sharding)) {
return false;
}
annotate_op->set_sharding(auto_sharding);
man_conversion_op->set_sharding(manual_sharding);
return true;
}
CHECK(man_conversion_op->IsCustomCall("SPMDShardToFullShape"));
HloSharding manual_sharding = annotate_op->sharding();
HloSharding auto_sharding = man_conversion_op->sharding();
if (!RefineManualAutoShardingFromAuto(*user_sharding, unspecified_dims,
&auto_sharding, &manual_sharding)) {
return false;
}
annotate_op->set_sharding(manual_sharding);
man_conversion_op->set_sharding(auto_sharding);
return true;
}
bool InferUnspecifiedDimsFromUsers(HloInstruction* annotate_op,
absl::Span<const int64_t> unspecified_dims,
int64_t aggressiveness, bool is_spmd,
HloInstruction** man_conversion_op_after,
const CallGraph& call_graph) {
HloInstruction* man_conversion_op = nullptr;
if (annotate_op->user_count() == 1) {
HloInstruction* user = annotate_op->users()[0];
if (user->IsCustomCall("SPMDFullToShardShape") ||
user->IsCustomCall("SPMDShardToFullShape")) {
std::vector<int64_t> user_unspec_dims;
absl::c_sort(user_unspec_dims);
if (!sharding_op_util::ParseAttributes(
Cast<HloCustomCallInstruction>(user)->opaque(),
&user_unspec_dims)
.ok() ||
unspecified_dims != user_unspec_dims) {
return false;
}
man_conversion_op = user;
}
}
*man_conversion_op_after = man_conversion_op;
HloInstruction* op_for_users =
man_conversion_op == nullptr ? annotate_op : man_conversion_op;
bool changed = false;
for (HloInstruction* user : op_for_users->users()) {
changed |= InferUnspecifiedDimsFromOneUser(
annotate_op, user, aggressiveness, is_spmd, unspecified_dims,
man_conversion_op, call_graph);
}
return changed;
}
bool InferUnspecifiedDimsFromShardGroup(
HloInstruction* annotate_op, absl::Span<const int64_t> unspecified_dims,
const absl::flat_hash_set<HloInstruction*>& shard_group) {
CHECK(annotate_op->IsCustomCall("Sharding") ||
annotate_op->opcode() == HloOpcode::kCopy);
if (annotate_op->IsCustomCall(spmd::kShardBarrierTo)) {
return false;
}
bool changed = false;
for (const HloInstruction* member : shard_group) {
if (member == annotate_op) {
continue;
}
if (member->IsCustomCall(spmd::kShardBarrierFrom)) {
continue;
}
if (!hlo_sharding_util::IsSpatiallyPartitioned(member)) {
continue;
}
const HloSharding& member_sharding = member->sharding();
if (!member_sharding.IsTiled()) {
continue;
}
HloSharding partial_replicated =
hlo_sharding_util::PartiallyReplicateTiledShardingOnAllDimsExcept(
member_sharding, unspecified_dims);
HloSharding sharding = annotate_op->sharding();
if (!hlo_sharding_util::MergeShardingIfCompatible(partial_replicated,
&sharding)) {
continue;
}
annotate_op->set_sharding(sharding);
changed |= true;
}
return changed;
}
bool IsCSEPreventionTarget(const HloInstruction* instruction) {
return instruction->opcode() == HloOpcode::kBroadcast &&
instruction->operand(0)->shape().rank() == 0;
}
HloSharding SetCSEPreventionSharding(const HloSharding& sharding) {
OpMetadata metadata;
metadata.set_op_name("_sharding_propagation_cse_prevention");
return sharding.WithMetadata({metadata}, true);
}
bool IsCSEPreventionSharding(const HloSharding& sharding) {
if (sharding.metadata().size() != 1) {
return false;
}
return sharding.metadata()[0].op_name() ==
"_sharding_propagation_cse_prevention";
}
}
bool InferDotShardingFromOperands(
HloInstruction* instruction, const CallGraph& call_graph,
const dot_as_convolution_util::DotConvolutionDimsInfo& dnums,
bool may_combine_partial_sharding, bool is_spmd) {
auto from_operand = [&](int64_t operand_index) {
auto operand = instruction->operand(operand_index);
const HloSharding& operand_sharding = operand->sharding();
if (operand_sharding.IsTileMaximal()) {
return operand_sharding;
}
std::vector<int64_t> contracting_dims;
contracting_dims.reserve(dnums.contracting_dims.size());
for (const auto& dim : dnums.contracting_dims) {
contracting_dims.push_back(operand_index == 0 ? dim.lhs : dim.rhs);
}
for (const auto& dim : operand_index == 0
? dnums.rhs_non_contracting_dims
: dnums.lhs_non_contracting_dims) {
int64_t d = operand_index == 0 ? dim.lhs : dim.rhs;
if (d >= 0) {
contracting_dims.push_back(d);
}
}
auto replicate_contracting_dims =
hlo_sharding_util::PartiallyReplicateTiledShardingOnDims(
operand_sharding, contracting_dims);
std::vector<int64_t> out_dims_to_op_perm(instruction->shape().rank(), -1);
std::vector<int64_t> op_dims_to_output_perm(operand->shape().rank(), -1);
for (const auto& dim : dnums.batch_dims) {
out_dims_to_op_perm[dim.output] = operand_index == 0 ? dim.lhs : dim.rhs;
op_dims_to_output_perm[operand_index == 0 ? dim.lhs : dim.rhs] =
dim.output;
}
for (const auto& dim : operand_index == 0
? dnums.lhs_non_contracting_dims
: dnums.rhs_non_contracting_dims) {
out_dims_to_op_perm[dim.output] = operand_index == 0 ? dim.lhs : dim.rhs;
op_dims_to_output_perm[operand_index == 0 ? dim.lhs : dim.rhs] =
dim.output;
}
return *hlo_sharding_util::TransposeShardingWithCollapsedDims(
replicate_contracting_dims, op_dims_to_output_perm,
out_dims_to_op_perm);
};
std::optional<HloSharding> improved_operand_0;
std::optional<HloSharding> improved_operand_1;
if (hlo_sharding_util::IsSpatiallyPartitioned(instruction->operand(0))) {
improved_operand_0 = ReturnImprovedSharding(
from_operand(0), instruction, may_combine_partial_sharding,
false);
}
if (hlo_sharding_util::IsSpatiallyPartitioned(instruction->operand(1))) {
improved_operand_1 = ReturnImprovedSharding(
from_operand(1), instruction, may_combine_partial_sharding,
false);
}
if (!improved_operand_0.has_value() && !improved_operand_1.has_value()) {
return false;
}
if (improved_operand_0.has_value() && !improved_operand_1.has_value()) {
instruction->set_sharding(*improved_operand_0);
return true;
}
if (!improved_operand_0.has_value() && improved_operand_1.has_value()) {
instruction->set_sharding(*improved_operand_1);
return true;
}
CHECK(improved_operand_0.has_value() && improved_operand_1.has_value());
std::optional<HloSharding> lookahead_sharding =
LookaheadUserSharding(instruction, is_spmd, call_graph);
std::array<HloSharding, 2> sharding_priority = {*improved_operand_0,
*improved_operand_1};
bool priority_defined_with_lookahead = false;
if (lookahead_sharding.has_value()) {
const bool operand_0_is_lookahead_subtiling =
hlo_sharding_util::IsSubTilingOrEqualSharding(
instruction->shape(), *lookahead_sharding, *improved_operand_0);
const bool operand_1_is_lookahead_subtiling =
hlo_sharding_util::IsSubTilingOrEqualSharding(
instruction->shape(), *lookahead_sharding, *improved_operand_1);
if (operand_0_is_lookahead_subtiling && !operand_1_is_lookahead_subtiling) {
priority_defined_with_lookahead = true;
}
if (!operand_0_is_lookahead_subtiling && operand_1_is_lookahead_subtiling) {
instruction->set_sharding(*improved_operand_1);
std::swap(sharding_priority[0], sharding_priority[1]);
priority_defined_with_lookahead = true;
}
}
if (!priority_defined_with_lookahead &&
ShapeUtil::ByteSizeOf(instruction->operand(0)->shape()) <
ShapeUtil::ByteSizeOf(instruction->operand(1)->shape())) {
std::swap(sharding_priority[0], sharding_priority[1]);
}
instruction->set_sharding(sharding_priority[0]);
MaybeImproveInstructionSharding(sharding_priority[1], instruction,
may_combine_partial_sharding);
return true;
}
bool InferConvolutionShardingFromOperands(HloInstruction* instruction,
const CallGraph& call_graph,
int64_t aggressiveness,
bool may_combine_partial_sharding,
bool is_spmd) {
auto get_partitions_for_dims =
[&](const HloInstruction* inst,
absl::Span<
const dot_as_convolution_util::DotConvolutionDimsInfo::DimNums>
dims,
int lhs_or_rhs) {
int64_t partitions = 1;
if (!inst->has_sharding()) {
return partitions;
}
const auto& sharding = inst->sharding();
if (sharding.IsTileMaximal()) {
return partitions;
}
for (const auto& dim : dims) {
if (lhs_or_rhs == 0) {
partitions *= sharding.tile_assignment().dim(dim.lhs);
} else {
CHECK_EQ(lhs_or_rhs, 1);
partitions *= sharding.tile_assignment().dim(dim.rhs);
}
}
return partitions;
};
auto dot_dims =
dot_as_convolution_util::ParseConvolutionDimsInfo(instruction);
const int64_t lhs_conv_spatial_partitions = get_partitions_for_dims(
instruction->operand(0), dot_dims.conv_spatial_dims, 0);
const int64_t rhs_conv_spatial_partitions = get_partitions_for_dims(
instruction->operand(1), dot_dims.conv_spatial_dims, 1);
if (dot_dims.conv_spatial_dims.empty() ||
(lhs_conv_spatial_partitions == 1 && rhs_conv_spatial_partitions == 1 &&
instruction->batch_group_count() == 1 &&
instruction->feature_group_count() == 1)) {
return InferDotShardingFromOperands(instruction, call_graph, dot_dims,
may_combine_partial_sharding, is_spmd);
}
const auto& dnums = instruction->convolution_dimension_numbers();
const HloInstruction* lhs = instruction->operand(0);
auto get_tiled_sharding_based_on_lhs = [&] {
CHECK(!lhs->sharding().IsTileMaximal());
std::vector<int64_t> output_to_lhs_indices(instruction->shape().rank());
output_to_lhs_indices[dnums.output_batch_dimension()] =
dnums.input_batch_dimension();
output_to_lhs_indices[dnums.output_feature_dimension()] =
dnums.input_feature_dimension();
for (int64_t i = 0; i < dnums.input_spatial_dimensions_size(); ++i) {
output_to_lhs_indices[dnums.output_spatial_dimensions(i)] =
dnums.input_spatial_dimensions(i);
}
return hlo_sharding_util::TransposeSharding(lhs->sharding(),
output_to_lhs_indices);
};
if (!hlo_sharding_util::IsSpatiallyPartitioned(lhs)) {
return false;
}
if (lhs->sharding().IsTileMaximal()) {
return MaybeImproveInstructionSharding(lhs->sharding(), instruction,
may_combine_partial_sharding);
}
if (IsConvolutionKernelSmall(instruction)) {
const auto& tile_assignment = lhs->sharding().tile_assignment();
if (tile_assignment.dim(dnums.input_feature_dimension()) > 1) {
return false;
}
return MaybeImproveInstructionSharding(get_tiled_sharding_based_on_lhs(),
instruction,
may_combine_partial_sharding);
}
return MaybeImproveInstructionSharding(
hlo_sharding_util::PartiallyReplicateTiledShardingOnAllDimsExcept(
lhs->sharding(), {dnums.input_batch_dimension()}),
instruction, may_combine_partial_sharding);
}
std::optional<HloSharding> InferBroadcastOperandSharding(
const HloInstruction& instruction, bool is_spmd) {
if (instruction.sharding().IsReplicated() ||
instruction.sharding().IsManual()) {
return instruction.sharding();
}
std::vector<int64_t> dims_to_replicate;
bool needs_replication = false;
for (int64_t i = 0; i < instruction.shape().rank(); ++i) {
if (absl::c_count(instruction.dimensions(), i) == 0) {
dims_to_replicate.push_back(i);
if (instruction.sharding().tile_assignment().dim(i) > 1) {
needs_replication = true;
}
}
}
if (!is_spmd && needs_replication) {
return std::nullopt;
}
return hlo_sharding_util::RemoveShapeDimensions(
hlo_sharding_util::PartiallyReplicateTiledShardingOnDims(
instruction.sharding(), dims_to_replicate),
dims_to_replicate);
}
bool InferReduceShardingFromOperand(HloInstruction* instruction,
bool may_combine_partial_sharding,
bool is_spmd) {
auto get_maybe_tuple_sharding = [&](HloSharding sharding) {
if (instruction->shape().IsArray()) {
return sharding;
}
std::vector<HloSharding> tuple(instruction->shape().tuple_shapes_size(),
std::move(sharding));
return HloSharding::Tuple(instruction->shape(), tuple);
};
auto* reduce = Cast<HloReduceInstruction>(instruction);
bool changed = false;
for (int64_t i = 0; i != reduce->inputs().size(); ++i) {
HloInstruction* operand = reduce->inputs()[i];
if (!hlo_sharding_util::IsSpatiallyPartitioned(operand)) {
continue;
}
if (operand->sharding().IsManual()) {
changed |= MaybeImproveInstructionSubSharding(
operand->sharding(), reduce, {i}, may_combine_partial_sharding,
ComputeNonRootUsers(instruction) == 1);
continue;
}
if (operand->sharding().IsReplicated() ||
(!is_spmd &&
absl::c_any_of(instruction->dimensions(), [operand](int64_t dim) {
return operand->sharding().tile_assignment().dim(dim) > 1;
}))) {
changed |= MaybeImproveInstructionSharding(
get_maybe_tuple_sharding(
hlo_sharding_util::ReplicateAllDataDims(operand->sharding())),
reduce, may_combine_partial_sharding,
ComputeNonRootUsers(instruction) == 1);
continue;
}
auto after_partial_replication =
operand->sharding().IsReplicated()
? operand->sharding()
: hlo_sharding_util::PartiallyReplicateTiledShardingOnDims(
operand->sharding(), reduce->dimensions());
if (after_partial_replication.IsReplicated()) {
changed |= MaybeImproveInstructionSharding(
get_maybe_tuple_sharding(after_partial_replication), reduce,
may_combine_partial_sharding,
ComputeNonRootUsers(instruction) == 1);
continue;
}
HloSharding new_sharding =
get_maybe_tuple_sharding(hlo_sharding_util::RemoveShapeDimensions(
after_partial_replication, reduce->dimensions()));
changed |= MaybeImproveInstructionSharding(
std::move(new_sharding), reduce, may_combine_partial_sharding,
ComputeNonRootUsers(reduce) == 1);
}
return changed;
}
absl::StatusOr<bool> ProcessShardingInstruction(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads,
bool replace_sharding_with_copy,
absl::flat_hash_map<const HloInstruction*, std::vector<int64_t>>*
unspecified_dims,
std::vector<HloSharding>* saved_root_shardings,
absl::flat_hash_map<int64_t, HloSharding>* saved_parameter_shardings,
absl::flat_hash_map<HloInstruction*, int64_t>*
instruction_to_shard_group_id,
absl::flat_hash_map<int64_t, absl::flat_hash_set<HloInstruction*>>*
shard_group_id_to_shard_as_group,
absl::flat_hash_map<int64_t, absl::flat_hash_set<HloInstruction*>>*
shard_group_id_to_shard_like_group,
const std::vector<bool>*
allow_spmd_sharding_propagation_to_parameters_vector,
bool remove_unknown_shardings) {
bool changed = false;
const bool use_shard_group = instruction_to_shard_group_id &&
shard_group_id_to_shard_as_group &&
shard_group_id_to_shard_like_group;
auto process_shard_group_instruction =
[&](HloInstruction* instruction,
bool replaced_with_copy) -> absl::StatusOr<bool> {
if (replace_sharding_with_copy) {
if (use_shard_group && instruction->has_sharding() &&
instruction->sharding().IsShardGroup()) {
if (instruction->IsCustomCall("Sharding")) {
CHECK(instruction->operand(0)->opcode() != HloOpcode::kParameter ||
(allow_spmd_sharding_propagation_to_parameters_vector &&
allow_spmd_sharding_propagation_to_parameters_vector->size() ==
module->entry_computation()->num_parameters() &&
allow_spmd_sharding_propagation_to_parameters_vector->at(
instruction->operand(0)->parameter_number())));
}
if (instruction->IsCustomCall("Sharding") && !replaced_with_copy) {
HloSharding operand_sharding =
instruction->operand(0)->has_sharding()
? instruction->operand(0)->sharding()
: HloSharding::Unknown();
operand_sharding.SetShardGroup(
instruction->sharding().GetShardGroup());
instruction->mutable_operand(0)->set_sharding(
std::move(operand_sharding));
return true;
} else {
const int64_t shard_group_id =
instruction->sharding().GetShardGroup().shard_group_id;
(*instruction_to_shard_group_id)[instruction] = shard_group_id;
if (instruction->sharding().IsShardAs()) {
auto& shard_as_group =
(*shard_group_id_to_shard_as_group)[shard_group_id];
if (!shard_as_group.empty()) {
CHECK(ShapeUtil::SameDimensions(
instruction->shape(), (*shard_as_group.begin())->shape()))
<< "Instruction: " << instruction->ToString()
<< " has different shape from the shapes of the other "
"instructions within the same shard_as group: "
<< (*shard_as_group.begin())->shape().ToString();
}
shard_as_group.insert(instruction);
} else {
auto& shard_like_group =
(*shard_group_id_to_shard_like_group)[shard_group_id];
if (!shard_like_group.empty()) {
CHECK(ShapeUtil::SameDimensions(
instruction->shape(), (*shard_like_group.begin())->shape()))
<< "Instruction: " << instruction->ToString()
<< " has different shape from the shapes of the other "
"instructions within the same shard_like group: "
<< (*shard_like_group.begin())->shape().ToString();
}
shard_like_group.insert(instruction);
}
HloSharding sharding = instruction->sharding();
sharding.ClearShardGroup();
instruction->set_sharding(std::move(sharding));
}
}
}
return false;
};
for (HloComputation* computation : module->computations(execution_threads)) {
auto instructions = computation->MakeInstructionPostOrder();
for (auto it = instructions.rbegin(); it != instructions.rend(); ++it) {
HloInstruction* instruction = *it;
if (instruction->IsCustomCall("Sharding")) {
TF_RET_CHECK(instruction->has_sharding())
<< "Sharding instruction must have a sharding attribute";
VLOG(3) << "ProcessShardingInstruction: " << instruction->ToString();
HloSharding original_sharding = instruction->sharding();
std::vector<int64_t> unspec_dims;
TF_RETURN_IF_ERROR(sharding_op_util::ParseAttributes(
Cast<HloCustomCallInstruction>(instruction)->opaque(),
&unspec_dims));
bool replaced_with_copy =
replace_sharding_with_copy &&
(!original_sharding.IsUnknown() || remove_unknown_shardings ||
instruction->operand(0)->opcode() == HloOpcode::kParameter);
if (replaced_with_copy) {
auto copy = computation->AddInstruction(HloInstruction::CreateUnary(
instruction->shape(), HloOpcode::kCopy,
instruction->mutable_operand(0)));
TF_ASSIGN_OR_RETURN(
std::ignore, computation->ReplaceInstruction(
instruction, copy, false,
false,
false));
copy->set_sharding(std::move(original_sharding));
instruction = copy;
changed = true;
}
TF_ASSIGN_OR_RETURN(
bool shard_group_remove_instruction,
process_shard_group_instruction(instruction, replaced_with_copy));
if (!unspec_dims.empty()) {
absl::c_sort(unspec_dims);
unspecified_dims->emplace(instruction, std::move(unspec_dims));
} else if (!instruction->operand(0)->has_sharding()) {
instruction->mutable_operand(0)->set_sharding(
instruction->sharding());
}
if (shard_group_remove_instruction) {
TF_ASSIGN_OR_RETURN(std::ignore,
computation->ReplaceInstruction(
instruction, instruction->mutable_operand(0),
false,
false,
false));
}
} else {
TF_ASSIGN_OR_RETURN(std::ignore,
process_shard_group_instruction(
instruction, false));
}
}
}
HloInstruction* root_instr = module->entry_computation()->root_instruction();
if (saved_root_shardings != nullptr && root_instr->shape().IsTuple() &&
root_instr->has_sharding()) {
saved_root_shardings->reserve(
root_instr->sharding().tuple_elements().size());
for (const HloSharding& sharding :
root_instr->sharding().tuple_elements()) {
saved_root_shardings->push_back(sharding);
}
}
if (saved_parameter_shardings != nullptr) {
auto params = module->entry_computation()->parameter_instructions();
for (int64_t i = 0; i < params.size(); ++i) {
if (params[i]->has_sharding()) {
saved_parameter_shardings->insert({i, params[i]->sharding()});
}
}
}
return changed;
}
int64_t ComputeNonRootUsers(const HloInstruction* instr) {
int64_t non_root_users = instr->users().size();
for (int i = 0; i < instr->users().size(); ++i) {
if (instr->users()[i] == instr->parent()->root_instruction()) {
--non_root_users;
}
}
return non_root_users;
}
absl::Status ShardingPropagation::NormalizeDomain(
const DomainMetadata::Domain& domain, const DomainMetadata* metadata) {
if (metadata != nullptr) {
TF_ASSIGN_OR_RETURN(const auto& sharding_metadata,
ShardingMetadata::ToShardingMetadata(metadata));
const auto& sharding = sharding_metadata->sharding();
if (sharding != nullptr) {
bool is_spatially_partitioned = !sharding->HasUniqueDevice();
if (sharding->IsTuple()) {
is_spatially_partitioned = absl::c_any_of(
sharding->tuple_elements(),
[](const HloSharding& s) { return !s.HasUniqueDevice(); });
}
if (is_spatially_partitioned) {
for (HloInstruction* d : domain.exit_domains) {
HloInstruction* operand = d->mutable_operand(0);
if (!operand->has_sharding() || operand->sharding() != *sharding) {
HloSharding operand_sharding = *sharding;
if (operand->shape().IsTuple() && !sharding->IsTuple()) {
operand_sharding =
HloSharding::SingleTuple(operand->shape(), *sharding);
}
operand->set_sharding(std::move(operand_sharding));
}
}
return absl::OkStatus();
}
}
}
return ShardingMetadata::NormalizeShardingDomain(domain, metadata);
}
std::optional<HloSharding> ShardingPropagation::GetShardingFromUser(
const HloInstruction& instruction, const HloInstruction& user,
int64_t aggressiveness, bool is_spmd, const CallGraph& call_graph,
const CustomCallShardingHelper* sharding_helper) {
if (!CanPropagateThroughAtAggressiveLevel(user, aggressiveness)) {
return std::nullopt;
}
if (!hlo_sharding_util::IsSpatiallyPartitioned(&user)) {
return std::nullopt;
}
const bool may_combine_partial_sharding = is_spmd && aggressiveness > 0;
switch (user.opcode()) {
case HloOpcode::kBroadcast: {
return InferBroadcastOperandSharding(user, is_spmd);
}
case HloOpcode::kConcatenate: {
if (aggressiveness == 0) {
return std::nullopt;
}
if (user.sharding().IsReplicated()) {
return user.sharding();
}
const int64_t cdim = user.concatenate_dimension();
auto& tile_assignment = user.sharding().tile_assignment();
if (tile_assignment.dim(cdim) == 1) {
return user.sharding();
}
if (is_spmd) {
return user.sharding();
}
int64_t start_offset = 0;
for (HloInstruction* op : user.operands()) {
if (op == &instruction) {
break;
}
start_offset += op->shape().dimensions(cdim);
}
const int64_t tile_shape = CeilOfRatio(
user.shape().dimensions(cdim), tile_assignment.dimensions()[cdim]);
std::vector<int64_t> start_indices(tile_assignment.num_dimensions());
std::vector<int64_t> end_indices(tile_assignment.dimensions().begin(),
tile_assignment.dimensions().end());
start_indices[cdim] = start_offset / tile_shape;
end_indices[cdim] = CeilOfRatio(
start_offset + instruction.shape().dimensions(cdim), tile_shape);
auto new_tile_assignment =
tile_assignment.array().Slice(start_indices, end_indices);
if (new_tile_assignment.num_elements() == 1) {
return HloSharding::AssignDevice(*new_tile_assignment.begin(),
user.sharding().metadata());
}
return HloSharding::Tile(std::move(new_tile_assignment),
user.sharding().metadata());
}
case HloOpcode::kConvolution: {
auto dot_dims = dot_as_convolution_util::ParseConvolutionDimsInfo(&user);
if (dot_dims.conv_spatial_dims.empty()) {
int64_t op_idx = user.operand_index(&instruction);
return hlo_sharding_util::InferDotOperandSharding(
&user, op_idx, dot_dims, true,
may_combine_partial_sharding);
}
return std::nullopt;
}
case HloOpcode::kDynamicSlice:
case HloOpcode::kDynamicUpdateSlice: {
if (aggressiveness == 0) {
return std::nullopt;
}
if (user.sharding().IsReplicated()) {
return user.sharding();
}
if (user.opcode() == HloOpcode::kDynamicUpdateSlice &&
&instruction == user.operand(0)) {
return user.sharding();
}
const HloInstruction* operand = user.opcode() == HloOpcode::kDynamicSlice
? user.operand(0)
: user.operand(1);
if (&instruction != operand) {
return std::nullopt;
}
std::vector<int64_t> slice_dims;
for (int64_t i = 0; i < user.shape().rank(); ++i) {
if (user.shape().dimensions(i) != operand->shape().dimensions(i)) {
slice_dims.push_back(i);
}
}
return hlo_sharding_util::PartiallyReplicateTiledShardingOnDims(
user.sharding(), slice_dims);
}
case HloOpcode::kReduceWindow: {
auto* reduce_window = Cast<HloReduceWindowInstruction>(&user);
if (!absl::c_linear_search(reduce_window->inputs(), &instruction)) {
return std::nullopt;
}
if (reduce_window->shape().IsTuple()) {
auto sub_sharding = reduce_window->sharding().GetSubSharding(
reduce_window->shape(),
{reduce_window->operand_index(&instruction)});
return sub_sharding;
}
return reduce_window->sharding();
}
case HloOpcode::kReshape: {
return hlo_sharding_util::PropagateShardingThroughReshape(
user.shape(), instruction.shape(), user.sharding());
}
case HloOpcode::kPad: {
if (&instruction != user.operand(0)) {
return std::nullopt;
}
return user.sharding();
}
case HloOpcode::kSlice: {
return user.sharding();
}
case HloOpcode::kTranspose: {
std::vector<int64_t> reverse_dimensions(user.dimensions().size());
for (int64_t i = 0; i < user.dimensions().size(); ++i) {
reverse_dimensions[user.dimensions(i)] = i;
}
return hlo_sharding_util::TransposeSharding(user.sharding(),
reverse_dimensions);
}
case HloOpcode::kTuple: {
auto sub_sharding = user.sharding().GetSubSharding(
user.shape(), {user.operand_index(&instruction)});
for (int64_t i = 0; i < user.shape().tuple_shapes_size(); ++i) {
if (user.operand(i) == &instruction) {
HloSharding alternative_sub_sharding =
user.sharding().GetSubSharding(user.shape(), {i});
if (hlo_sharding_util::IsShardingMoreSpecific(
alternative_sub_sharding, sub_sharding)) {
sub_sharding = alternative_sub_sharding;
}
}
}
return sub_sharding;
}
case HloOpcode::kGetTupleElement: {
int64_t sharding_index = 0;
for (int i = 0; i < instruction.shape().tuple_shapes_size(); ++i) {
if (i == user.tuple_index()) {
break;
}
if (instruction.shape().tuple_shapes(i).IsArray()) {
sharding_index += 1;
} else {
sharding_index +=
ShapeUtil::GetLeafCount(instruction.shape().tuple_shapes(i));
}
}
auto base_instruction_sharding = [&](const HloSharding& user_sharding) {
if (instruction.has_sharding()) {
return instruction.sharding();
} else {
std::vector<HloSharding> shardings;
ShapeUtil::ForEachSubshape(
instruction.shape(),
[&](const Shape& sub_shape, const ShapeIndex& index) {
if (ShapeUtil::IsLeafIndex(instruction.shape(), index)) {
shardings.push_back(hlo_sharding_util::ReplicateAllDataDims(
user_sharding, sub_shape.dimensions_size()));
}
});
return HloSharding::Tuple(instruction.shape(), shardings);
}
};
if (user.shape().IsArray()) {
HloSharding new_sharding = base_instruction_sharding(user.sharding());
new_sharding.tuple_elements()[sharding_index] = user.sharding();
return new_sharding;
} else {
if (user.sharding().tuple_elements().empty()) {
return std::nullopt;
}
HloSharding new_sharding =
base_instruction_sharding(user.sharding().tuple_elements()[0]);
for (int64_t i = 0; i < user.sharding().tuple_elements().size(); ++i) {
new_sharding.tuple_elements()[sharding_index + i] =
user.sharding().tuple_elements()[i];
}
return new_sharding;
}
}
case HloOpcode::kDot: {
int64_t op_idx = user.operand_index(&instruction);
auto dnums = dot_as_convolution_util::ParseDotGeneralFromDot(&user);
return hlo_sharding_util::InferDotOperandSharding(
&user, op_idx, dnums, true,
may_combine_partial_sharding);
}
case HloOpcode::kReduce: {
if (instruction.shape().rank() == 0) {
return std::nullopt;
}
auto user_sharding =
user.shape().IsTuple()
? user.sharding().GetSubSharding(
user.shape(), {user.operand_index(&instruction)})
: user.sharding();
if (!user_sharding.IsTileMaximal()) {
std::vector<int64_t> target_tile_assignment_dimensions(
instruction.shape().rank() +
(user_sharding.ReplicateOnLastTileDim() ? 1 : 0) +
user_sharding.subgroup_types().size());
const auto& dimensions = user.dimensions();
int64_t next_output_dim = 0;
for (int64_t i = 0; i < target_tile_assignment_dimensions.size(); ++i) {
if (absl::c_find(dimensions, i) == dimensions.end()) {
target_tile_assignment_dimensions[i] =
user_sharding.tile_assignment().dim(next_output_dim++);
} else {
target_tile_assignment_dimensions[i] = 1;
}
}
auto tile_assignment = user_sharding.tile_assignment().Reshape(
target_tile_assignment_dimensions);
user_sharding =
user_sharding.ReplicateOnLastTileDim()
? HloSharding::PartialTile(tile_assignment,
user_sharding.metadata())
: HloSharding::Subgroup(tile_assignment,
user_sharding.subgroup_types(),
user_sharding.metadata());
}
const auto* reduce = Cast<const HloReduceInstruction>(&user);
for (const HloInstruction* operand : reduce->inputs()) {
if (operand != &instruction && operand->has_sharding()) {
hlo_sharding_util::MergeShardingIfCompatible(operand->sharding(),
&user_sharding);
}
}
return user_sharding;
}
case HloOpcode::kSort: {
HloSharding user_sharding = user.sharding();
if (user_sharding.IsTuple()) {
return user_sharding.GetSubSharding(user.shape(),
{user.operand_index(&instruction)});
}
return user_sharding;
}
case HloOpcode::kReverse: {
return hlo_sharding_util::ReverseSharding(user.sharding(),
user.dimensions());
}
case HloOpcode::kOutfeed: {
if (&instruction != user.operand(0)) {
return std::nullopt;
}
std::vector<Shape> operand_shapes(user.operand_count());
for (int i = 0; i < user.operand_count(); ++i) {
operand_shapes[i] = user.operand(i)->shape();
}
return user.sharding().GetSubSharding(
ShapeUtil::MakeTupleShape(operand_shapes), {0});
}
case HloOpcode::kGather: {
if (&instruction == user.operand(1)) {
return hlo_sharding_util::
GatherIndexShardingFromOutputIndexPassthroughDimensions(
user.sharding(), &user);
}
if (is_spmd) {
return hlo_sharding_util::GatherOperandShardingFromOutput(
user.sharding(), user, call_graph);
}
return std::nullopt;
}
case HloOpcode::kScatter: {
auto& scatter_user = *Cast<HloScatterInstruction>(&user);
const int64_t operand_count = scatter_user.scatter_operand_count();
auto scatter_operands = scatter_user.scatter_operands();
auto scatter_indices = scatter_user.scatter_indices();
auto scatter_updates = scatter_user.scatter_updates();
const int64_t operand_index =
absl::c_find(scatter_operands, &instruction) -
scatter_operands.cbegin();
if (operand_index < operand_count) {
return user.sharding().IsTuple() ? user.sharding().GetSubSharding(
user.shape(), {operand_index})
: user.sharding();
}
if (&instruction == scatter_indices) {
std::vector<const HloInstruction*> partitioned_updates;
for (const HloInstruction* update : scatter_updates) {
if (hlo_sharding_util::IsSpatiallyPartitioned(update)) {
partitioned_updates.push_back(update);
}
}
if (partitioned_updates.empty()) {
return std::nullopt;
}
std::vector<HloSharding> shardings;
absl::c_transform(
partitioned_updates, std::back_inserter(shardings),
[&scatter_user](const HloInstruction* update) {
return hlo_sharding_util::
ScatterIndexShardingFromUpdateIndexPassthroughDimensions(
update->sharding(), &scatter_user);
});
return hlo_sharding_util::FindCommonSharding(shardings);
}
const int64_t update_index = absl::c_find(scatter_updates, &instruction) -
scatter_updates.cbegin();
CHECK_LE(update_index, operand_count);
auto from_indices =
hlo_sharding_util::IsSpatiallyPartitioned(scatter_indices)
? hlo_sharding_util::
ScatterUpdateShardingFromIndexIndexPassthroughDimensions(
scatter_indices->sharding(), &scatter_user)
: HloSharding::Replicate();
if (is_spmd) {
auto from_output = hlo_sharding_util::ScatterUpdateShardingFromOutput(
user.sharding().IsTuple()
? user.sharding().GetSubSharding(user.shape(), {update_index})
: user.sharding(),
scatter_user, call_graph);
if (from_output.has_value()) {
hlo_sharding_util::MergeShardingIfCompatible(from_indices,
&*from_output);
if (!from_output->IsTileMaximal()) {
return from_output;
}
}
}
if (!from_indices.IsTileMaximal()) {
return from_indices;
}
return std::nullopt;
}
case HloOpcode::kCustomCall: {
bool compatible_shapes = ShapeUtil::CompatibleIgnoringElementType(
instruction.shape(), user.shape());
if (!compatible_shapes) {
return std::nullopt;
}
if (!sharding_helper) {
return user.sharding();
}
if (sharding_helper->CanPropagateShardingToOperands(&user)) {
return user.sharding();
}
return std::nullopt;
}
default: {
if (ShapeUtil::CompatibleIgnoringElementType(instruction.shape(),
user.shape())) {
return user.sharding();
}
return std::nullopt;
}
}
}
bool AggressiveConcatOperandShardingCanPassThrough(
const HloInstruction* concat_operand) {
return (
hlo_sharding_util::IsSpatiallyPartitioned(concat_operand) &&
(concat_operand->has_sharding() &&
concat_operand->sharding().NumTiles() > 1) &&
concat_operand->opcode() == HloOpcode::kReshape &&
(concat_operand->operand(0)->opcode() == HloOpcode::kParameter ||
concat_operand->operand(0)->opcode() == HloOpcode::kGetTupleElement));
}
bool InferDynamicUpdateSliceShardingFromOperand1(
HloInstruction* instruction, bool may_combine_partial_sharding) {
CHECK(instruction->opcode() == HloOpcode::kDynamicSlice ||
instruction->opcode() == HloOpcode::kDynamicUpdateSlice);
const HloInstruction* operand =
instruction->opcode() == HloOpcode::kDynamicSlice
? instruction->operand(0)
: instruction->operand(1);
if (!hlo_sharding_util::IsSpatiallyPartitioned(operand)) {
return false;
}
CHECK(!operand->sharding().IsManual());
std::vector<int64_t> slice_dims;
for (int64_t i = 0; i < instruction->shape().rank(); ++i) {
if (instruction->shape().dimensions(i) != operand->shape().dimensions(i)) {
slice_dims.push_back(i);
}
}
return MaybeImproveInstructionSharding(
hlo_sharding_util::PartiallyReplicateTiledShardingOnDims(
operand->sharding(), slice_dims),
instruction, may_combine_partial_sharding,
ComputeNonRootUsers(instruction) == 1);
}
bool InferDynamicUpdateSliceShardingFromOperand0(
HloInstruction* instruction, bool may_combine_partial_sharding) {
CHECK_EQ(instruction->opcode(), HloOpcode::kDynamicUpdateSlice);
if (!hlo_sharding_util::IsSpatiallyPartitioned(instruction->operand(0))) {
return false;
}
return MaybeImproveInstructionSharding(instruction->operand(0)->sharding(),
instruction,
may_combine_partial_sharding);
}
bool ShardingPropagation::InferShardingFromShardGroup(
HloInstruction* instruction, int64_t aggressiveness,
const absl::flat_hash_set<HloInstruction*>& shard_group) {
if (!CanPropagateThroughAtAggressiveLevel(*instruction, aggressiveness)) {
return false;
}
if (instruction->has_sharding() && instruction->sharding().IsManual()) {
return false;
}
if (instruction->IsCustomCall(spmd::kShardBarrierTo)) {
return false;
}
if (!instruction->has_sharding() || instruction->sharding().IsTileMaximal()) {
for (const HloInstruction* member : shard_group) {
if (!member->has_sharding() || !member->sharding().IsManual() ||
member == instruction) {
continue;
}
instruction->set_sharding(member->sharding());
return true;
}
}
const bool may_combine_partial_sharding = is_spmd_ && aggressiveness > 0;
bool changed = false;
for (const HloInstruction* member : shard_group) {
if (member == instruction ||
member->IsCustomCall(spmd::kShardBarrierFrom)) {
continue;
}
changed |= MaybeImproveInstructionSharding(member->sharding(), instruction,
may_combine_partial_sharding);
}
return changed;
}
bool ShardingPropagation::InferShardingFromOperands(
HloInstruction* instruction, const ComputationMap& computation_map,
int64_t aggressiveness, const CallGraph& call_graph,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
if (!CanPropagateThroughAtAggressiveLevel(*instruction, aggressiveness)) {
return false;
}
if (instruction->has_sharding() && instruction->sharding().IsManual()) {
return false;
}
const bool custom_call_condition =
instruction->opcode() == HloOpcode::kCustomCall &&
instruction->shape().IsTuple();
const bool async_instr_condition =
instruction->IsAsynchronous() &&
!HloInstruction::IsThreadIncluded(instruction->async_execution_thread(),
execution_threads);
if ((!instruction->has_sharding() ||
instruction->sharding().IsTileMaximal()) &&
(instruction->shape().IsArray() ||
instruction->opcode() == HloOpcode::kReduce ||
instruction->opcode() == HloOpcode::kSort ||
instruction->opcode() == HloOpcode::kReduceWindow ||
custom_call_condition || async_instr_condition)) {
for (const HloInstruction* op : instruction->operands()) {
if (!op->has_sharding() || !op->sharding().IsManual()) continue;
if (instruction->IsCustomCall("SPMDShardToFullShape")) {
return false;
}
if (aggressiveness == 0 &&
(instruction->opcode() == HloOpcode::kConcatenate ||
instruction->opcode() == HloOpcode::kDynamicSlice)) {
return false;
}
instruction->set_sharding(
HloSharding::Manual(op->sharding().metadata())
.NormalizeTupleSharding(instruction->shape()));
return true;
}
}
const bool may_combine_partial_sharding = is_spmd_ && aggressiveness > 0;
if (!SupportSpatialPartitioning(
instruction, computation_map, is_spmd_,
allow_spmd_sharding_propagation_to_output_,
false,
sharding_helper_.get())) {
if (instruction->shape().IsTuple() || instruction->operand_count() == 0 ||
instruction == instruction->parent()->root_instruction() ||
instruction->HasSideEffect()) {
return false;
}
for (const HloInstruction* op : instruction->operands()) {
if (op->has_sharding() && op->sharding().IsTileMaximal() &&
!op->sharding().HasUniqueDevice()) {
return MaybeImproveInstructionSharding(op->sharding(), instruction,
may_combine_partial_sharding);
}
}
return false;
}
auto get_maybe_tuple_sharding = [&](HloSharding sharding) {
if (instruction->shape().IsArray()) {
return sharding;
}
std::vector<HloSharding> tuple(instruction->shape().tuple_shapes_size(),
std::move(sharding));
return HloSharding::Tuple(instruction->shape(), tuple);
};
switch (instruction->opcode()) {
case HloOpcode::kGetTupleElement: {
const HloInstruction* operand = instruction->operand(0);
if (!hlo_sharding_util::IsSpatiallyPartitioned(operand)) {
return false;
}
HloSharding new_sharding = operand->sharding().GetSubSharding(
operand->shape(), {instruction->tuple_index()});
if (new_sharding.IsManual()) {
instruction->set_sharding(std::move(new_sharding));
return true;
}
return MaybeImproveInstructionSharding(
std::move(new_sharding), instruction, may_combine_partial_sharding,
ComputeNonRootUsers(instruction) == 1);
}
case HloOpcode::kTuple: {
if (absl::c_none_of(
instruction->operands(), [](const HloInstruction* hlo) {
return hlo_sharding_util::IsSpatiallyPartitioned(hlo);
})) {
return false;
}
const Shape& shape = instruction->shape();
std::vector<HloSharding> sub_shardings;
if (instruction->has_sharding()) {
sub_shardings = instruction->sharding().tuple_elements();
} else {
sub_shardings.assign(HloSharding::RequiredLeaves(shape),
HloSharding::Replicate());
}
auto is_more_specific = [instruction](const HloSharding& operand_sharding,
const HloSharding& existing) {
return !instruction->has_sharding() ||
hlo_sharding_util::IsShardingMoreSpecific(operand_sharding,
existing);
};
int64_t sub_sharding_index = 0;
for (int64_t i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) {
const HloInstruction* operand = instruction->operand(i);
if (operand->has_sharding()) {
if (operand->shape().IsTuple()) {
for (int64_t j = 0, e = ShapeUtil::GetLeafCount(operand->shape());
j < e; ++j) {
if (is_more_specific(operand->sharding().tuple_elements()[j],
sub_shardings[sub_sharding_index + j])) {
sub_shardings[sub_sharding_index + j] =
operand->sharding().tuple_elements()[j];
}
}
} else {
std::optional<HloSharding> op_sharding =
hlo_sharding_util::GetOutputSharding(operand);
CHECK(op_sharding.has_value())
<< "Expected sharding for " << operand->ToString();
if (is_more_specific(op_sharding.value(),
sub_shardings[sub_sharding_index])) {
sub_shardings[sub_sharding_index] = op_sharding.value();
}
}
}
sub_sharding_index += ShapeUtil::GetLeafCount(operand->shape());
}
HloSharding new_sharding = HloSharding::Tuple(shape, sub_shardings);
if (!instruction->has_sharding() ||
new_sharding != instruction->sharding()) {
instruction->set_sharding(std::move(new_sharding));
return true;
}
return false;
}
case HloOpcode::kReduce: {
return InferReduceShardingFromOperand(
instruction, may_combine_partial_sharding, is_spmd_);
}
case HloOpcode::kBroadcast: {
if (aggressiveness < 3) {
return false;
}
const HloInstruction* op = instruction->operand(0);
if (!hlo_sharding_util::IsSpatiallyPartitioned(op) ||
op->sharding().IsReplicated()) {
return false;
}
std::vector<int64_t> target_tile_assignment_dimensions;
const auto& dimensions = instruction->dimensions();
for (int64_t i = 0; i < instruction->shape().rank(); ++i) {
auto it = absl::c_find(dimensions, i);
if (it == dimensions.end()) {
target_tile_assignment_dimensions.push_back(1);
} else {
const int64_t source_dim = std::distance(dimensions.begin(), it);
target_tile_assignment_dimensions.push_back(
op->sharding().tile_assignment().dim(source_dim));
}
}
for (int64_t i = op->sharding().TiledDataRank();
i < op->sharding().tile_assignment().num_dimensions(); ++i) {
target_tile_assignment_dimensions.push_back(
op->sharding().tile_assignment().dim(i));
}
auto new_tile_assignment = op->sharding().tile_assignment().Reshape(
target_tile_assignment_dimensions);
HloSharding new_sharding =
op->sharding().ReplicateOnLastTileDim()
? HloSharding::PartialTile(new_tile_assignment,
op->sharding().metadata())
: HloSharding::Subgroup(new_tile_assignment,
op->sharding().subgroup_types(),
op->sharding().metadata());
return MaybeImproveInstructionSharding(
std::move(new_sharding), instruction, may_combine_partial_sharding,
ComputeNonRootUsers(instruction) ==
1);
}
case HloOpcode::kConcatenate: {
const HloInstruction* operand = PickRepresentativeOperand(instruction);
if (!operand || !hlo_sharding_util::IsSpatiallyPartitioned(operand)) {
return false;
}
if (aggressiveness == 0) {
for (const HloInstruction* concat_operand : instruction->operands()) {
if (!AggressiveConcatOperandShardingCanPassThrough(concat_operand)) {
return false;
}
const auto& tile_assignment =
concat_operand->sharding().tile_assignment();
for (int64_t i = 0; i < instruction->shape().rank(); ++i) {
if (absl::c_linear_search(instruction->dimensions(), i) &&
tile_assignment.dim(i) > 1) {
return false;
}
}
}
}
return MaybeImproveInstructionSharding(
operand->sharding(), instruction, may_combine_partial_sharding,
ComputeNonRootUsers(instruction) ==
1);
}
case HloOpcode::kConvolution:
return InferConvolutionShardingFromOperands(
instruction, call_graph, aggressiveness, may_combine_partial_sharding,
is_spmd_);
case HloOpcode::kTranspose: {
const HloInstruction* input = instruction->operand(0);
if (!hlo_sharding_util::IsSpatiallyPartitioned(input)) {
return false;
}
HloSharding sharding = hlo_sharding_util::TransposeSharding(
input->sharding(), instruction->dimensions());
return MaybeImproveInstructionSharding(
std::move(sharding), instruction, may_combine_partial_sharding,
ComputeNonRootUsers(instruction) ==
1);
}
case HloOpcode::kReduceWindow: {
auto* reduce_window = Cast<HloReduceWindowInstruction>(instruction);
auto has_dilation = [](const WindowDimension& dimensions) {
return dimensions.base_dilation() > 1 ||
dimensions.window_dilation() > 1;
};
if (absl::c_any_of(instruction->window().dimensions(), has_dilation)) {
VLOG(2) << "Not applying sharding to reduce window because dilatation "
"isn't supported yet: "
<< reduce_window->ToString();
return false;
}
bool changed = false;
for (HloInstruction* operand : reduce_window->inputs()) {
if (!hlo_sharding_util::IsSpatiallyPartitioned(operand)) {
continue;
}
changed |= MaybeImproveInstructionSharding(
get_maybe_tuple_sharding(operand->sharding()), reduce_window,
may_combine_partial_sharding,
ComputeNonRootUsers(instruction) == 1);
}
return changed;
}
case HloOpcode::kSelectAndScatter: {
const HloInstruction* lhs = instruction->operand(0);
if (!hlo_sharding_util::IsSpatiallyPartitioned(lhs)) {
return false;
}
auto has_base_dilation = [](const WindowDimension& dimensions) {
return dimensions.base_dilation() > 1;
};
if (absl::c_any_of(instruction->window().dimensions(),
has_base_dilation)) {
VLOG(2) << "Not applying sharding to select-and-scatter because "
"base dilation isn't supported yet: "
<< instruction->ToString();
return false;
}
return MaybeImproveInstructionSharding(
lhs->sharding(), instruction, may_combine_partial_sharding,
ComputeNonRootUsers(instruction) ==
1);
}
case HloOpcode::kReshape: {
if (!hlo_sharding_util::IsSpatiallyPartitioned(instruction->operand(0))) {
return false;
}
HloSharding new_sharding =
hlo_sharding_util::PropagateShardingThroughReshape(
instruction->operand(0)->shape(), instruction->shape(),
instruction->operand(0)->sharding());
return MaybeImproveInstructionSharding(
std::move(new_sharding), instruction, may_combine_partial_sharding,
ComputeNonRootUsers(instruction) == 1);
return false;
}
case HloOpcode::kReverse: {
const HloInstruction* operand = instruction->operand(0);
if (!hlo_sharding_util::IsSpatiallyPartitioned(operand)) {
return false;
}
return MaybeImproveInstructionSharding(
hlo_sharding_util::ReverseSharding(operand->sharding(),
instruction->dimensions()),
instruction, may_combine_partial_sharding,
ComputeNonRootUsers(instruction) ==
1);
}
case HloOpcode::kDot: {
const auto& dnums =
dot_as_convolution_util::ParseDotGeneralFromDot(instruction);
return InferDotShardingFromOperands(instruction, call_graph, dnums,
may_combine_partial_sharding,
is_spmd_);
}
case HloOpcode::kParameter: {
auto parent_it = computation_map.find(instruction->parent());
if (parent_it == computation_map.end()) {
return false;
}
const HloInstruction* parent = parent_it->second;
switch (parent->opcode()) {
case HloOpcode::kConditional: {
for (int64_t i = 1; i < parent->operand_count(); ++i) {
if (parent->called_computations()[i - 1] == instruction->parent()) {
if (parent->operand(i)->has_sharding()) {
return MaybeImproveInstructionSharding(
parent->operand(i)->sharding(), instruction,
may_combine_partial_sharding);
}
return false;
}
}
return false;
}
case HloOpcode::kCall: {
int64_t i = instruction->parameter_number();
if (parent->operand(i)->has_sharding()) {
return MaybeImproveInstructionSharding(
parent->operand(i)->sharding(), instruction,
may_combine_partial_sharding);
}
return false;
}
default:
return false;
}
}
case HloOpcode::kSort: {
const HloInstruction* operand = PickRepresentativeOperand(instruction);
if (!operand || !hlo_sharding_util::IsSpatiallyPartitioned(operand)) {
return false;
}
HloSortInstruction* sort = DynCast<HloSortInstruction>(instruction);
CHECK(sort);
const int64_t sort_dim = sort->sort_dimension();
if (!operand->sharding().IsTileMaximal() &&
operand->sharding().tile_assignment().dim(sort_dim) != 1) {
if (!hlo_sharding_util::IsSortOperandShardingMovable(operand, sort_dim))
return false;
}
if (instruction->shape().IsTuple()) {
return MaybeImproveInstructionSharding(
HloSharding::SingleTuple(instruction->shape(), operand->sharding()),
instruction, may_combine_partial_sharding,
ComputeNonRootUsers(instruction) == 1);
} else {
return MaybeImproveInstructionSharding(
operand->sharding(), instruction, may_combine_partial_sharding,
ComputeNonRootUsers(instruction) == 1);
}
}
case HloOpcode::kDynamicSlice: {
return InferDynamicUpdateSliceShardingFromOperand1(
instruction, may_combine_partial_sharding);
}
case HloOpcode::kDynamicUpdateSlice: {
bool changed = InferDynamicUpdateSliceShardingFromOperand1(
instruction, may_combine_partial_sharding);
changed |= InferDynamicUpdateSliceShardingFromOperand0(
instruction, may_combine_partial_sharding);
return changed;
}
case HloOpcode::kGather: {
bool changed = false;
const GatherDimensionNumbers& dnums =
instruction->gather_dimension_numbers();
if (!dnums.operand_batching_dims().empty()) {
hlo_sharding_util::GatherScatterParallelDims explict_batch_dims;
explict_batch_dims.operand_parallel_dims.assign(
dnums.operand_batching_dims().begin(),
dnums.operand_batching_dims().end());
explict_batch_dims.indices_parallel_dims.assign(
dnums.start_indices_batching_dims().begin(),
dnums.start_indices_batching_dims().end());
changed |= InferGatherParallelShardingFromOperands(
instruction, explict_batch_dims, may_combine_partial_sharding);
}
if (hlo_sharding_util::IsSpatiallyPartitioned(instruction->operand(1))) {
HloSharding new_sharding = hlo_sharding_util::
GatherOutputShardingFromIndexIndexPassthroughDimensions(
instruction->operand(1)->sharding(), instruction);
changed |= MaybeImproveInstructionSharding(
std::move(new_sharding), instruction, may_combine_partial_sharding);
}
if (is_spmd_) {
auto gather_parallel_dims =
hlo_sharding_util::GetGatherParallelBatchDims(*instruction,
call_graph);
if (gather_parallel_dims) {
changed |= InferGatherParallelShardingFromOperands(
instruction, *gather_parallel_dims, may_combine_partial_sharding);
}
if (hlo_sharding_util::IsSpatiallyPartitioned(
instruction->operand(0))) {
absl::Span<const int64_t> operand_parallel_dims;
if (gather_parallel_dims) {
operand_parallel_dims = absl::MakeConstSpan(
gather_parallel_dims->operand_parallel_dims);
}
HloSharding filtered_operand_sharding =
hlo_sharding_util::PartiallyReplicateTiledShardingOnDims(
instruction->operand(0)->sharding(), operand_parallel_dims);
auto maybe_from_data = hlo_sharding_util::
GatherOutputShardingFromOperandOperandPassthroughDimensions(
filtered_operand_sharding, *instruction);
if (maybe_from_data) {
changed |= MaybeImproveInstructionSharding(
std::move(*maybe_from_data), instruction,
may_combine_partial_sharding);
}
}
}
return changed;
}
case HloOpcode::kScatter: {
auto& scatter = *Cast<HloScatterInstruction>(instruction);
bool changed = false;
const ScatterDimensionNumbers& dnums =
instruction->scatter_dimension_numbers();
if (!dnums.input_batching_dims().empty()) {
hlo_sharding_util::GatherScatterParallelDims explict_batch_dims;
explict_batch_dims.operand_parallel_dims.assign(
dnums.input_batching_dims().begin(),
dnums.input_batching_dims().end());
explict_batch_dims.indices_parallel_dims.assign(
dnums.scatter_indices_batching_dims().begin(),
dnums.scatter_indices_batching_dims().end());
changed |= InferScatterParallelShardingFromOperands(
instruction, explict_batch_dims, may_combine_partial_sharding);
}
const int64_t operand_count = scatter.scatter_operand_count();
auto scatter_operands = scatter.scatter_operands();
auto scatter_indices = scatter.scatter_indices();
auto scatter_updates = scatter.scatter_updates();
if (is_spmd_) {
for (int64_t i = 0; i != operand_count; ++i) {
if (hlo_sharding_util::IsSpatiallyPartitioned(scatter_operands[i])) {
changed |= MaybeImproveInstructionSubSharding(
scatter_operands[i]->sharding(), instruction, {i},
may_combine_partial_sharding);
}
}
if (!hlo_sharding_util::IsSpatiallyPartitioned(scatter_indices) &&
absl::c_none_of(scatter_updates, [](const HloInstruction* update) {
return hlo_sharding_util::IsSpatiallyPartitioned(update);
})) {
return changed;
}
if (auto scatter_parallel_dims =
hlo_sharding_util::GetScatterParallelBatchDims(*instruction,
call_graph)) {
changed |= InferScatterParallelShardingFromOperands(
instruction, *scatter_parallel_dims,
may_combine_partial_sharding);
}
for (int64_t i = 0; i != operand_count; ++i) {
if (hlo_sharding_util::IsSpatiallyPartitioned(scatter_updates[i])) {
auto maybe_from_update =
hlo_sharding_util::ScatterOutputShardingFromUpdate(
scatter_updates[i]->sharding(), scatter);
if (maybe_from_update) {
changed |= MaybeImproveInstructionSubSharding(
std::move(*maybe_from_update), instruction, {i},
may_combine_partial_sharding);
}
}
}
} else {
for (int64_t i = 0; i != operand_count; ++i) {
changed |= MaybeImproveInstructionSubSharding(
HloSharding::Replicate(), instruction, {i},
may_combine_partial_sharding);
}
}
return changed;
}
case HloOpcode::kWhile: {
if (!instruction->operand(0)->has_sharding()) {
return false;
}
auto sharding = instruction->operand(0)->sharding();
if (instruction->has_sharding()) {
hlo_sharding_util::MergeSharding(instruction->sharding(), &sharding,
may_combine_partial_sharding);
}
return MaybeImproveInstructionSharding(std::move(sharding), instruction,
may_combine_partial_sharding);
}
case HloOpcode::kCustomCall: {
HloSharding inferred_operand_sharding = HloSharding::Replicate();
if (auto* partitioner =
GetCustomCallPartitioner(instruction->custom_call_target());
partitioner && partitioner->IsCustomCallShardable(instruction)) {
if (auto sharding =
partitioner->InferShardingFromOperands(instruction)) {
inferred_operand_sharding = *sharding;
} else {
return false;
}
} else if (sharding_helper_->IsCustomCallShardable(instruction)) {
if (auto sharding =
sharding_helper_->InferShardingFromOperands(instruction)) {
inferred_operand_sharding = *sharding;
} else {
return false;
}
} else {
const HloInstruction* operand = PickRepresentativeOperand(instruction);
if (!operand || !hlo_sharding_util::IsSpatiallyPartitioned(operand)) {
return false;
}
inferred_operand_sharding = operand->sharding();
}
return MaybeImproveInstructionSharding(
inferred_operand_sharding, instruction, may_combine_partial_sharding,
ComputeNonRootUsers(instruction) ==
1);
}
default: {
if (instruction->IsElementwise() && may_combine_partial_sharding) {
bool changed = false;
for (auto operand : instruction->operands()) {
if (hlo_sharding_util::IsSpatiallyPartitioned(operand)) {
if (instruction->opcode() == HloOpcode::kRng) {
changed |= MaybeImproveInstructionSharding(
hlo_sharding_util::ReplicateAllDataDims(
operand->sharding(), instruction->shape().rank()),
instruction, may_combine_partial_sharding,
ComputeNonRootUsers(instruction) == 1);
continue;
}
changed |= MaybeImproveInstructionSharding(
operand->sharding(), instruction, may_combine_partial_sharding,
instruction->operands().size() == 1 &&
ComputeNonRootUsers(instruction) == 1);
}
}
return changed;
}
const HloInstruction* operand = PickRepresentativeOperand(instruction);
if (!operand || !hlo_sharding_util::IsSpatiallyPartitioned(operand)) {
return false;
}
return MaybeImproveInstructionSharding(
operand->sharding(), instruction, may_combine_partial_sharding,
ComputeNonRootUsers(instruction) ==
1);
}
}
return false;
}
bool ShardingPropagation::InferShardingFromUsers(
HloInstruction* instruction,
const ShardingPropagation::ComputationMap& computation_map,
int64_t aggressiveness, bool is_spmd,
const CustomCallShardingHelper* sharding_helper,
const CallGraph& call_graph) {
if (aggressiveness < 2 && instruction->opcode() == HloOpcode::kBroadcast) {
return false;
}
if (instruction->has_sharding() && instruction->sharding().IsManual()) {
return false;
}
if (!instruction->has_sharding() || instruction->sharding().IsTileMaximal()) {
for (const HloInstruction* user : instruction->users()) {
if (!user->has_sharding() || user->IsCustomCall("SPMDFullToShardShape"))
continue;
if (instruction->shape().IsArray() && user->sharding().IsManual()) {
instruction->set_sharding(
HloSharding::Manual(user->sharding().metadata()));
return true;
} else {
std::optional<HloSharding> user_sharding =
ShardingPropagation::GetShardingFromUser(
*instruction, *user, aggressiveness, is_spmd, call_graph,
sharding_helper);
if (user_sharding && user_sharding->IsManual()) {
instruction->set_sharding(std::move(*user_sharding));
return true;
}
}
}
}
if (!SupportSpatialPartitioning(
instruction, computation_map, is_spmd,
false,
allow_spmd_sharding_propagation_to_parameters_, sharding_helper)) {
return false;
}
bool improved_sharding = false;
const bool may_combine_partial_sharding = is_spmd && aggressiveness > 0;
for (const HloInstruction* user : instruction->users()) {
if (user->opcode() == HloOpcode::kRngBitGenerator) {
instruction->set_sharding(HloSharding::Replicate());
return true;
}
std::optional<HloSharding> user_sharding =
ShardingPropagation::GetShardingFromUser(*instruction, *user,
aggressiveness, is_spmd,
call_graph, sharding_helper);
if (user_sharding && instruction->opcode() == HloOpcode::kCustomCall) {
if (auto* partitioner =
GetCustomCallPartitioner(instruction->custom_call_target())) {
if (partitioner->IsCustomCallShardable(instruction)) {
user_sharding = partitioner->PropagateUserSharding(instruction, user,
*user_sharding);
}
} else if (sharding_helper->IsCustomCallShardable(instruction)) {
user_sharding = sharding_helper->PropagateUserSharding(
instruction, user, *user_sharding);
}
}
if (user_sharding) {
improved_sharding |= MaybeImproveInstructionSharding(
std::move(*user_sharding), instruction, may_combine_partial_sharding);
}
}
return improved_sharding;
}
void ShardingPropagation::MaybeComputationPropagation(
const ComputationMap& computation_map,
const absl::flat_hash_set<const HloInstruction*>& provided_shardings,
HloInstruction* instruction,
absl::flat_hash_set<HloInstruction*>* changed) {
auto propagate_to_instruction = [&](HloInstruction* search_inst) {
auto related_instructions =
GetRelatedInstructions(search_inst, computation_map);
if (absl::c_count(related_instructions, instruction)) {
for (HloInstruction* inst : related_instructions) {
if ((!inst->has_sharding() ||
inst->sharding() != instruction->sharding()) &&
!provided_shardings.contains(inst)) {
VLOG(2) << "Add computation sharding: " << inst->name() << " "
<< instruction->sharding().ToString();
inst->copy_sharding(instruction);
changed->insert(inst);
MaybeComputationPropagation(computation_map, provided_shardings, inst,
changed);
}
}
}
};
if (instruction->opcode() == HloOpcode::kConditional ||
instruction->opcode() == HloOpcode::kWhile ||
instruction->opcode() == HloOpcode::kCustomCall ||
instruction->opcode() == HloOpcode::kCall) {
propagate_to_instruction(instruction);
}
if (instruction->opcode() == HloOpcode::kParameter ||
instruction->parent()->root_instruction() == instruction) {
auto it = computation_map.find(instruction->parent());
if (it != computation_map.end()) {
propagate_to_instruction(it->second);
if (instruction->opcode() == HloOpcode::kParameter &&
(it->second->opcode() == HloOpcode::kConditional ||
it->second->opcode() == HloOpcode::kCall)) {
propagate_to_instruction(instruction);
}
}
}
}
absl::StatusOr<bool> ShardingPropagation::RunToFixPoint(
int64_t aggressiveness, bool propagate_shard_group,
const ComputationMap& computation_map,
const absl::flat_hash_set<const HloInstruction*>& provided_shardings,
const CallGraph& call_graph, HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads,
absl::flat_hash_map<const HloInstruction*, std::vector<int64_t>>&
unspecified_dims,
absl::flat_hash_map<HloInstruction*, int64_t>&
instruction_to_shard_group_id,
absl::flat_hash_map<int64_t, absl::flat_hash_set<HloInstruction*>>&
shard_group_id_to_shard_as_group,
absl::flat_hash_map<int64_t, absl::flat_hash_set<HloInstruction*>>&
shard_group_id_to_shard_like_group,
int64_t& iterations) {
bool changed = false;
absl::flat_hash_set<const HloInstruction*> already_inferred_from_shard_group;
absl::flat_hash_set<const HloInstruction*> already_inferred_from_operands;
absl::flat_hash_set<const HloInstruction*> already_inferred_from_users;
bool changed_last_iter = true;
const bool may_merge_partial = is_spmd_ && aggressiveness > 0;
while (changed_last_iter) {
changed_last_iter = false;
int64_t inferred_from_shard_group_counter = 0;
int64_t inferred_from_operand_counter = 0;
int64_t inferred_from_user_counter = 0;
int64_t instruction_counter = 0;
int64_t already_sharded_counter = 0;
for (const HloComputation* computation :
module->computations(execution_threads)) {
VLOG(2) << "Consider computation: " << computation->name();
std::vector<HloInstruction*> instructions =
computation->MakeInstructionPostOrder();
instruction_counter += instructions.size();
already_sharded_counter += absl::c_count_if(
instructions,
[](const HloInstruction* inst) { return inst->has_sharding(); });
auto clear_cache = [&](HloInstruction* hlo,
HloInstruction* hlo_for_users = nullptr) {
for (auto operand : hlo->operands()) {
already_inferred_from_users.erase(operand);
}
if (hlo_for_users == nullptr) {
hlo_for_users = hlo;
}
for (auto user : hlo_for_users->users()) {
already_inferred_from_operands.erase(user);
for (auto c : user->called_computations()) {
for (auto parameter : c->parameter_instructions()) {
already_inferred_from_operands.erase(parameter);
}
}
}
if (instruction_to_shard_group_id.contains(hlo)) {
const int64_t shard_group_id = instruction_to_shard_group_id.at(hlo);
const absl::flat_hash_set<HloInstruction*>& shard_group =
shard_group_id_to_shard_as_group.contains(shard_group_id)
? shard_group_id_to_shard_as_group.at(shard_group_id)
: shard_group_id_to_shard_like_group.at(shard_group_id);
for (HloInstruction* member : shard_group) {
if (member != hlo) {
already_inferred_from_shard_group.erase(member);
}
}
}
};
if (propagate_shard_group) {
for (HloInstruction* instruction : instructions) {
if (already_inferred_from_shard_group.contains(instruction)) {
continue;
}
if (!instruction_to_shard_group_id.contains(instruction)) {
continue;
}
const int64_t shard_group_id =
instruction_to_shard_group_id.at(instruction);
const absl::flat_hash_set<HloInstruction*>& shard_group =
shard_group_id_to_shard_as_group.contains(shard_group_id)
? shard_group_id_to_shard_as_group.at(shard_group_id)
: shard_group_id_to_shard_like_group.at(shard_group_id);
if (provided_shardings.contains(instruction)) {
if (!may_merge_partial) {
continue;
}
auto it = unspecified_dims.find(instruction);
if (it != unspecified_dims.end() &&
InferUnspecifiedDimsFromShardGroup(instruction, it->second,
shard_group)) {
++inferred_from_shard_group_counter;
VLOG(2) << "Refined partial sharding (shard group): "
<< instruction->ToString();
clear_cache(instruction);
already_inferred_from_shard_group.insert(instruction);
changed_last_iter = true;
}
continue;
}
already_inferred_from_shard_group.insert(instruction);
if (InferShardingFromShardGroup(instruction, aggressiveness,
shard_group)) {
++inferred_from_shard_group_counter;
changed = true;
VLOG(2) << "Add sharding (shard group): "
<< instruction->ToString();
absl::flat_hash_set<HloInstruction*> changed_in_comp_prop;
MaybeComputationPropagation(computation_map, provided_shardings,
instruction, &changed_in_comp_prop);
clear_cache(instruction);
for (auto hlo : changed_in_comp_prop) {
clear_cache(hlo);
}
changed_last_iter = true;
}
}
}
for (HloInstruction* instruction : instructions) {
if (already_inferred_from_operands.contains(instruction)) {
continue;
}
if (provided_shardings.contains(instruction)) {
if (!may_merge_partial) {
continue;
}
auto it = unspecified_dims.find(instruction);
HloInstruction* man_conversion_op_after;
if (it != unspecified_dims.end() &&
InferUnspecifiedDimsFromOperand(instruction, it->second,
&man_conversion_op_after)) {
++inferred_from_operand_counter;
VLOG(2) << "Refined partial sharding (forward-pass): "
<< instruction->ToString();
clear_cache(instruction, man_conversion_op_after);
already_inferred_from_operands.insert(instruction);
changed_last_iter = true;
}
continue;
}
already_inferred_from_operands.insert(instruction);
if (InferShardingFromOperands(instruction, computation_map,
aggressiveness, call_graph,
execution_threads)) {
++inferred_from_operand_counter;
changed = true;
VLOG(2) << "Add sharding (forward-pass): " << instruction->ToString();
absl::flat_hash_set<HloInstruction*> changed_in_comp_prop;
MaybeComputationPropagation(computation_map, provided_shardings,
instruction, &changed_in_comp_prop);
clear_cache(instruction);
for (auto hlo : changed_in_comp_prop) {
clear_cache(hlo);
}
changed_last_iter = true;
}
}
for (auto it = instructions.rbegin(); it != instructions.rend(); ++it) {
if ((*it)->IsCustomCall("SPMDFullToShardShape") ||
(*it)->IsCustomCall("SPMDShardToFullShape")) {
if (!already_inferred_from_users.contains(*it)) {
already_inferred_from_users.erase((*it)->operand(0));
}
}
if (already_inferred_from_users.contains(*it)) {
continue;
}
if (provided_shardings.contains(*it)) {
if (!may_merge_partial) {
continue;
}
auto uit = unspecified_dims.find(*it);
HloInstruction* man_conversion_op_after;
if (uit != unspecified_dims.end() &&
InferUnspecifiedDimsFromUsers(*it, uit->second, aggressiveness,
is_spmd_, &man_conversion_op_after,
call_graph)) {
++inferred_from_user_counter;
VLOG(2) << "Refined partial sharding (backward-pass): "
<< (*it)->ToString();
clear_cache(*it, man_conversion_op_after);
already_inferred_from_users.insert(*it);
if (man_conversion_op_after != nullptr) {
already_inferred_from_users.insert(man_conversion_op_after);
}
changed_last_iter = true;
}
continue;
}
already_inferred_from_users.insert(*it);
if (InferShardingFromUsers(*it, computation_map, aggressiveness,
is_spmd_, sharding_helper_.get(),
call_graph)) {
++inferred_from_user_counter;
changed = true;
VLOG(2) << "Add sharding (backward-pass): " << (*it)->ToString();
absl::flat_hash_set<HloInstruction*> changed_in_comp_prop;
MaybeComputationPropagation(computation_map, provided_shardings, *it,
&changed_in_comp_prop);
clear_cache(*it);
for (auto hlo : changed_in_comp_prop) {
clear_cache(hlo);
}
changed_last_iter = true;
}
}
}
VLOG(1) << "Sharding propagation iteration " << iterations << ";"
<< "\n total instructions: " << instruction_counter
<< "\n instructions already sharded: " << already_sharded_counter
<< "\n shardings inferred from shard group: "
<< inferred_from_shard_group_counter
<< "\n shardings inferred from operands: "
<< inferred_from_operand_counter
<< "\n shardings inferred from users: "
<< inferred_from_user_counter
<< "\n aggressiveness: " << aggressiveness;
++iterations;
}
return changed;
}
std::vector<HloInstruction*> ShardingPropagation::GetRelatedInstructions(
HloInstruction* inst, const ComputationMap& computation_map) {
if (inst->opcode() == HloOpcode::kWhile) {
return std::vector<HloInstruction*>{
inst, inst->while_body()->root_instruction(),
inst->while_body()->parameter_instruction(0),
inst->while_condition()->parameter_instruction(0)};
} else if (inst->opcode() == HloOpcode::kConditional) {
const auto& called_computations = inst->called_computations();
std::vector<HloInstruction*> comps;
comps.reserve(called_computations.size() + 1);
comps.push_back(inst);
for (HloComputation* c : called_computations) {
comps.push_back(c->root_instruction());
}
return comps;
} else if (inst->opcode() == HloOpcode::kCustomCall) {
if (sharding_helper_ && sharding_helper_->IsCustomCallShardable(inst)) {
return sharding_helper_->GetRelatedInstructions(inst);
} else {
return std::vector<HloInstruction*>{};
}
} else if (inst->opcode() == HloOpcode::kCall) {
HloComputation* callee = inst->called_computations().front();
return std::vector<HloInstruction*>{inst, callee->root_instruction()};
} else if (inst->opcode() == HloOpcode::kParameter) {
auto it = computation_map.find(inst->parent());
if (it != computation_map.end()) {
if (it->second->opcode() == HloOpcode::kConditional) {
HloInstruction* cond = it->second;
for (int64_t i = 1; i < cond->operand_count(); ++i) {
if (cond->called_computations()[i - 1] == inst->parent()) {
return std::vector<HloInstruction*>{inst, cond->mutable_operand(i)};
}
}
}
if (it->second->opcode() == HloOpcode::kCall) {
HloInstruction* call = it->second;
int64_t operand_index = inst->parameter_number();
CHECK_LT(operand_index, call->operand_count());
return std::vector<HloInstruction*>{
inst, call->mutable_operand(operand_index)};
}
}
return std::vector<HloInstruction*>{};
} else {
CHECK(false);
}
};
absl::StatusOr<bool> ShardingPropagation::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
ABSL_CONST_INIT static absl::once_flag did_registration;
absl::call_once(did_registration, [] {
RegisterCustomCallPartitioner(
spmd::kShardBarrierFrom,
std::make_unique<spmd::ShardBarrierFromPartitioner>());
RegisterCustomCallPartitioner(
spmd::kShardBarrierTo,
std::make_unique<spmd::ShardBarrierToPartitioner>());
});
std::optional<absl::flat_hash_map<const HloInstruction*, HloSharding>>
original_sharding;
bool any_changed = false;
if (cse_prevention_only_) {
original_sharding.emplace();
for (auto computation : module->computations(execution_threads)) {
for (auto instruction : computation->instructions()) {
if (instruction->has_sharding()) {
original_sharding->emplace(instruction, instruction->sharding());
}
}
}
} else {
for (auto computation : module->computations(execution_threads)) {
for (auto instruction : computation->instructions()) {
if (instruction->has_sharding() &&
IsCSEPreventionSharding(instruction->sharding())) {
instruction->clear_sharding();
any_changed = true;
}
}
}
}
any_changed |= propagate_metadata_
? AssignShardingMetadata(module, execution_threads)
: RemoveShardingMetadata(module, execution_threads);
absl::flat_hash_map<const HloInstruction*, std::vector<int64_t>>
unspecified_dims;
std::vector<HloSharding> saved_root_shardings;
absl::flat_hash_map<int64_t, HloSharding> saved_parameter_shardings;
absl::flat_hash_map<HloInstruction*, int64_t> instruction_to_shard_group_id;
absl::flat_hash_map<int64_t, absl::flat_hash_set<HloInstruction*>>
shard_group_id_to_shard_as_group;
absl::flat_hash_map<int64_t, absl::flat_hash_set<HloInstruction*>>
shard_group_id_to_shard_like_group;
TF_ASSIGN_OR_RETURN(
bool changed,
ProcessShardingInstruction(
module, execution_threads, !cse_prevention_only_, &unspecified_dims,
allow_spmd_sharding_propagation_to_output_ ? &saved_root_shardings
: nullptr,
allow_spmd_sharding_propagation_to_parameters_
? &saved_parameter_shardings
: nullptr,
&instruction_to_shard_group_id, &shard_group_id_to_shard_as_group,
&shard_group_id_to_shard_like_group,
&allow_spmd_sharding_propagation_to_parameters_vector_));
any_changed |= changed;
for (const auto& [shard_group_id, shard_as_group] :
shard_group_id_to_shard_as_group) {
VLOG(5) << "Shard-As group " << shard_group_id << " contains:";
for (auto instruction : shard_as_group) {
VLOG(5) << " " << instruction->ToString();
}
}
for (const auto& [shard_group_id, shard_like_group] :
shard_group_id_to_shard_like_group) {
VLOG(5) << "Shard-Like group " << shard_group_id << " contains:";
for (auto instruction : shard_like_group) {
VLOG(5) << " " << instruction->ToString();
}
}
if (allow_spmd_sharding_propagation_to_output_) {
CHECK(!module->entry_computation()->root_instruction()->has_sharding() ||
allow_spmd_sharding_propagation_to_output_vector_.size() == 1 ||
module->entry_computation()
->root_instruction()
->sharding()
.tuple_elements()
.size() ==
allow_spmd_sharding_propagation_to_output_vector_.size())
<< "allow-spmd-sharding-propagation-to-output-vector's size can be "
"either 1 or the number of elements in the root tuple of entry "
"computation.";
}
if (allow_spmd_sharding_propagation_to_parameters_) {
auto is_same_sized_tuple = [](HloModule* module, int64_t size) {
if (module->entry_computation()->num_parameters() != 1) {
return false;
}
HloInstruction* param =
module->entry_computation()->parameter_instruction(0);
return param->shape().IsTuple() &&
size == param->shape().tuple_shapes_size();
};
auto size = allow_spmd_sharding_propagation_to_parameters_vector_.size();
CHECK(size == 1 || size == module->entry_computation()->num_parameters() ||
is_same_sized_tuple(module, size))
<< "allow-spmd-sharding-propagation-to-parameters-vector's size can be "
"either 1 or the number of parameters in the entry computation.";
}
ComputationMap computation_map;
absl::flat_hash_set<const HloInstruction*> provided_shardings;
for (auto computation : module->computations(execution_threads)) {
for (auto instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kWhile) {
TF_RETURN_IF_ERROR(
CheckAndUpdateDeviceAssignmentsInWhileBody(instruction));
}
}
}
for (auto computation : module->computations(execution_threads)) {
for (auto instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kWhile ||
instruction->opcode() == HloOpcode::kConditional ||
instruction->opcode() == HloOpcode::kCall) {
const HloInstruction* sharded_inst = nullptr;
auto related_instructions =
GetRelatedInstructions(instruction, computation_map);
for (auto inst : related_instructions) {
if (inst->has_sharding()) {
sharded_inst = inst;
break;
}
}
if (sharded_inst != nullptr) {
for (auto inst : related_instructions) {
inst->copy_sharding(sharded_inst);
}
}
if (instruction->opcode() == HloOpcode::kWhile) {
computation_map[instruction->while_body()] = instruction;
computation_map[instruction->while_condition()] = instruction;
} else {
for (HloComputation* c : instruction->called_computations()) {
computation_map[c] = instruction;
}
}
}
}
}
for (const HloComputation* computation :
module->computations(execution_threads)) {
for (const HloInstruction* inst : computation->instructions()) {
if (inst->has_sharding() &&
inst != module->entry_computation()->root_instruction() &&
inst->opcode() != HloOpcode::kParameter &&
!inst->sharding().IsUnknown()) {
provided_shardings.insert(inst);
}
}
}
HloInstruction* entry_root = module->entry_computation()->root_instruction();
if (!allow_spmd_sharding_propagation_to_output_ &&
(!entry_root->has_sharding() || !entry_root->sharding().IsUnknown())) {
if (entry_root->opcode() == HloOpcode::kWhile) {
HloInstruction* copy = module->entry_computation()->AddInstruction(
HloInstruction::CreateUnary(entry_root->shape(), HloOpcode::kCopy,
entry_root));
if (entry_root->has_sharding()) {
copy->set_sharding(entry_root->sharding());
}
module->entry_computation()->set_root_instruction(copy);
entry_root = copy;
any_changed = true;
}
provided_shardings.insert(entry_root);
}
if (!allow_spmd_sharding_propagation_to_parameters_) {
for (auto param : module->entry_computation()->parameter_instructions()) {
if (param->has_sharding() && !param->sharding().IsUnknown()) {
provided_shardings.insert(param);
}
}
}
for (HloComputation* computation : module->computations(execution_threads)) {
auto instructions = computation->MakeInstructionPostOrder();
for (auto it = instructions.rbegin(); it != instructions.rend(); ++it) {
HloInstruction* instruction = *it;
if (instruction->has_sharding() && instruction->sharding().IsUnknown()) {
instruction->set_sharding(
HloSharding::Replicate(instruction->sharding().metadata()));
}
}
}
int64_t iterations = 0;
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module);
for (int64_t aggressiveness = 0; aggressiveness < 4; ++aggressiveness) {
TF_ASSIGN_OR_RETURN(
bool changed,
RunToFixPoint(aggressiveness, true,
computation_map, provided_shardings, *call_graph, module,
execution_threads, unspecified_dims,
instruction_to_shard_group_id,
shard_group_id_to_shard_as_group,
shard_group_id_to_shard_like_group, iterations));
any_changed = any_changed || changed;
}
for (const auto& [shard_as_group_id, shard_as_group] :
shard_group_id_to_shard_as_group) {
HloSharding default_sharding = HloSharding::Replicate();
std::vector<HloSharding> shardings;
for (HloInstruction* instruction : shard_as_group) {
if (instruction->has_sharding()) {
shardings.push_back(instruction->sharding());
if (!instruction->IsCustomCall(spmd::kShardBarrierFrom) &&
default_sharding.IsReplicated()) {
default_sharding = instruction->sharding();
}
}
}
HloSharding common_sharding = shardings.empty()
? default_sharding
: hlo_sharding_util::FindCommonSharding(
shardings, default_sharding);
VLOG(2) << "Aligning shard group: " << shard_as_group_id
<< " to sharding:" << common_sharding.ToString();
for (HloInstruction* member : shard_as_group) {
if (member->IsCustomCall(spmd::kShardBarrierTo)) {
continue;
}
if (provided_shardings.contains(member)) {
auto it = unspecified_dims.find(member);
if (it != unspecified_dims.end()) {
HloSharding partial_replicated =
hlo_sharding_util::PartiallyReplicateTiledShardingOnAllDimsExcept(
common_sharding, it->second);
HloSharding sharding = member->sharding();
if (hlo_sharding_util::MergeShardingIfCompatible(partial_replicated,
&sharding)) {
member->set_sharding(sharding);
}
}
}
member->set_sharding(common_sharding);
}
}
for (HloComputation* computation : module->computations(execution_threads)) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->IsCustomCall(spmd::kShardBarrierFrom) &&
instruction_to_shard_group_id.contains(instruction) &&
shard_group_id_to_shard_as_group.contains(
instruction_to_shard_group_id.at(instruction))) {
HloSharding sharding = instruction->sharding();
hlo_sharding_util::MergeShardingIfCompatible(
instruction->mutable_operand(0)->sharding(), sharding.NumTiles(),
&sharding);
instruction->mutable_operand(0)->set_sharding(std::move(sharding));
}
}
}
{
TF_ASSIGN_OR_RETURN(
bool changed,
RunToFixPoint(3, true,
computation_map, provided_shardings, *call_graph, module,
execution_threads, unspecified_dims,
instruction_to_shard_group_id,
shard_group_id_to_shard_as_group,
shard_group_id_to_shard_like_group, iterations));
any_changed = any_changed || changed;
}
for (HloComputation* computation : module->computations(execution_threads)) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->IsCustomCall(spmd::kShardBarrierFrom) &&
instruction_to_shard_group_id.contains(instruction) &&
shard_group_id_to_shard_as_group.contains(
instruction_to_shard_group_id.at(instruction))) {
HloSharding sharding = instruction->sharding();
hlo_sharding_util::MergeShardingIfCompatible(
instruction->mutable_operand(0)->sharding(), sharding.NumTiles(),
&sharding);
instruction->mutable_operand(0)->set_sharding(std::move(sharding));
}
if (instruction->IsCustomCall(spmd::kShardBarrierFrom) ||
instruction->IsCustomCall(spmd::kShardBarrierTo)) {
TF_ASSIGN_OR_RETURN(std::ignore,
computation->ReplaceInstruction(
instruction, instruction->mutable_operand(0),
false,
false,
false));
}
}
}
if (cse_prevention_only_) {
for (auto computation : module->computations(execution_threads)) {
for (auto instruction : computation->instructions()) {
if (!instruction->has_sharding()) {
continue;
}
if (IsCSEPreventionTarget(instruction) && instruction->has_sharding()) {
if (!(*original_sharding).contains(instruction)) {
instruction->set_sharding(
SetCSEPreventionSharding(instruction->sharding()));
}
continue;
}
auto it = (*original_sharding).find(instruction);
if (it != (*original_sharding).end()) {
instruction->set_sharding(it->second);
} else {
instruction->clear_sharding();
}
}
}
}
HloInstruction* root_instruction =
module->entry_computation()->root_instruction();
if (saved_root_shardings.size() ==
allow_spmd_sharding_propagation_to_output_vector_.size() &&
root_instruction->has_sharding()) {
HloSharding root_sharding = root_instruction->sharding();
for (int i = 0; i < saved_root_shardings.size(); ++i) {
if (!allow_spmd_sharding_propagation_to_output_vector_[i] &&
!saved_root_shardings[i].IsUnknown()) {
root_sharding.tuple_elements()[i] = saved_root_shardings[i];
}
}
root_instruction->set_sharding(std::move(root_sharding));
}
auto params = module->entry_computation()->parameter_instructions();
if (allow_spmd_sharding_propagation_to_parameters_) {
if (allow_spmd_sharding_propagation_to_parameters_vector_.size() ==
params.size()) {
for (int64_t i = 0; i < params.size(); ++i) {
if (!allow_spmd_sharding_propagation_to_parameters_vector_[i]) {
if (saved_parameter_shardings.contains(i) &&
!saved_parameter_shardings.at(i).IsUnknown()) {
params[i]->set_sharding(saved_parameter_shardings.at(i));
} else {
params[i]->clear_sharding();
}
}
}
} else if (params.size() == 1 && saved_parameter_shardings.size() == 1 &&
params[0]->shape().IsTuple() &&
params[0]->shape().tuple_shapes_size() ==
allow_spmd_sharding_propagation_to_parameters_vector_
.size()) {
HloSharding param_sharding = params[0]->sharding();
for (int64_t i = 0; i < params[0]->shape().tuple_shapes_size(); ++i) {
HloSharding saved_subsharding =
saved_parameter_shardings.at(0).GetSubSharding(params[0]->shape(),
{i});
if (!allow_spmd_sharding_propagation_to_parameters_vector_[i] &&
!saved_subsharding.IsUnknown()) {
param_sharding.tuple_elements()[i] = saved_subsharding;
}
}
params[0]->set_sharding(std::move(param_sharding));
}
}
std::function<bool(const Shape&, const HloSharding&)> evenly_partitions =
[&evenly_partitions](const Shape& shape,
const HloSharding& sharding) -> bool {
if (!sharding.IsTiled()) {
return true;
}
if (sharding.IsTileMaximal()) {
return sharding.IsReplicated();
}
if (sharding.IsTuple()) {
for (int64_t i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) {
if (!evenly_partitions(ShapeUtil::GetTupleElementShape(shape, i),
sharding.GetSubSharding(shape, {i}))) {
return false;
}
}
}
for (int64_t i = 0; i < shape.dimensions_size(); ++i) {
if (shape.dimensions(i) % sharding.tile_assignment().dim(i) != 0) {
return false;
}
}
return true;
};
if (allow_spmd_sharding_propagation_to_output_ &&
root_instruction->has_sharding()) {
if (root_instruction->shape().IsTuple() &&
allow_spmd_sharding_propagation_to_output_vector_.size() ==
root_instruction->shape().tuple_shapes_size()) {
HloSharding root_sharding = root_instruction->sharding();
for (int64_t i = 0; i < root_instruction->shape().tuple_shapes_size();
++i) {
if (allow_spmd_sharding_propagation_to_output_vector_[i] &&
!evenly_partitions(root_instruction->shape().tuple_shapes(i),
root_sharding.tuple_elements()[i])) {
root_sharding.tuple_elements()[i] = HloSharding::Replicate();
}
}
root_instruction->set_sharding(std::move(root_sharding));
} else if (!root_instruction->shape().IsTuple()) {
if (!evenly_partitions(root_instruction->shape(),
root_instruction->sharding())) {
root_instruction->set_sharding(HloSharding::Replicate());
}
}
}
if (allow_spmd_sharding_propagation_to_parameters_) {
if (allow_spmd_sharding_propagation_to_parameters_vector_.size() ==
params.size()) {
for (int64_t i = 0; i < params.size(); ++i) {
if (params[i]->has_sharding() &&
allow_spmd_sharding_propagation_to_parameters_vector_[i] &&
!evenly_partitions(params[i]->shape(), params[i]->sharding())) {
params[i]->set_sharding(HloSharding::Replicate());
}
}
} else if (params.size() == 1 && params[0]->shape().IsTuple() &&
params[0]->has_sharding() &&
params[0]->shape().tuple_shapes_size() ==
allow_spmd_sharding_propagation_to_parameters_vector_
.size()) {
HloSharding param_sharding = params[0]->sharding();
for (int64_t i = 0; i < params[0]->shape().tuple_shapes_size(); ++i) {
if (allow_spmd_sharding_propagation_to_parameters_vector_[i] &&
!evenly_partitions(
ShapeUtil::GetSubshapeOneIndex(params[0]->shape(), i),
params[0]->sharding().GetSubSharding(params[0]->shape(),
{i}))) {
param_sharding.tuple_elements()[i] = HloSharding::Replicate();
}
}
params[0]->set_sharding(std::move(param_sharding));
}
}
TF_RETURN_IF_ERROR(
hlo_sharding_util::CanonicalizeLayoutAfterShardingPropagation(
module, allow_spmd_sharding_propagation_to_output_,
allow_spmd_sharding_propagation_to_parameters_));
VLOG(1) << "Sharding propagation completed after " << iterations
<< " iterations";
return any_changed;
}
} | #include "xla/service/sharding_propagation.h"
#include <ostream>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_op_metadata.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/hlo/transforms/hlo_constant_splitter.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/protobuf_util.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/hlo_parser.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace op = xla::testing::opcode_matchers;
namespace xla {
namespace {
using ShardingPropagationTest = HloTestBase;
void ClearMetadata(HloModule* module) {
for (HloComputation* computation : module->computations()) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->metadata().ByteSizeLong() != 0) {
instruction->set_metadata(OpMetadata());
}
if (!instruction->has_sharding()) {
continue;
}
instruction->set_sharding(instruction->sharding().WithoutMetadata());
}
}
}
struct MetadataTestParameter {
explicit MetadataTestParameter(bool propagate_metadata, bool clear_metadata)
: propagate_metadata(propagate_metadata),
clear_metadata(clear_metadata) {}
bool propagate_metadata = false;
bool clear_metadata = false;
};
struct MetadataTestParameterWithOutput {
explicit MetadataTestParameterWithOutput(bool propagate_metadata,
bool clear_metadata,
bool allow_root_sharding_propagation)
: propagate_metadata(propagate_metadata),
clear_metadata(clear_metadata),
allow_root_sharding_propagation(allow_root_sharding_propagation) {}
bool propagate_metadata = false;
bool clear_metadata = false;
bool allow_root_sharding_propagation = false;
};
class ParameterizedMetadataTest
: public HloTestBase,
public ::testing::WithParamInterface<MetadataTestParameter> {};
class ParameterizedMetadataTestWithOutput
: public HloTestBase,
public ::testing::WithParamInterface<MetadataTestParameterWithOutput> {};
std::string OpMetadataListToString(absl::Span<const OpMetadata> metadata) {
std::vector<std::string> metadata_strings;
metadata_strings.reserve(metadata.size());
for (const OpMetadata& element : metadata) {
metadata_strings.push_back(
absl::StrCat("{", OpMetadataToString(element), "}"));
}
return absl::StrCat("{", absl::StrJoin(metadata_strings, ", "), "}");
}
class HloShardingMetadataMatcher
: public ::testing::MatcherInterface<const HloSharding&> {
public:
explicit HloShardingMetadataMatcher(absl::Span<const OpMetadata> metadata)
: metadata_(metadata.begin(), metadata.end()) {}
bool MatchAndExplain(
const HloSharding& sharding,
::testing::MatchResultListener* listener) const override {
if (sharding.metadata().size() != metadata_.size()) {
*listener << sharding.ToString(true)
<< " has incorrect sharding metadata (expected: "
<< OpMetadataListToString(metadata_) << ")";
return false;
}
for (int i = 0, e = metadata_.size(); i < e; ++i) {
if (!protobuf_util::ProtobufEquals(sharding.metadata()[i],
metadata_[i])) {
*listener << sharding.ToString(true)
<< " has incorrect sharding metadata (expected: "
<< OpMetadataListToString(metadata_) << ")";
return false;
}
}
return true;
}
void DescribeTo(std::ostream* os) const override {
*os << OpMetadataListToString(metadata_);
}
private:
std::vector<OpMetadata> metadata_;
};
::testing::Matcher<const HloSharding&> ShardingMetadata(
absl::Span<const OpMetadata> metadata) {
return ::testing::MakeMatcher(new HloShardingMetadataMatcher(metadata));
}
OpMetadata CreateMetadata(const std::string& op_name) {
OpMetadata metadata;
metadata.set_op_name(op_name);
return metadata;
}
INSTANTIATE_TEST_SUITE_P(
ShardingPropagation, ParameterizedMetadataTest,
::testing::Values(MetadataTestParameter(false,
false),
MetadataTestParameter(false,
true),
MetadataTestParameter(true,
false),
MetadataTestParameter(true,
true)),
[](const ::testing::TestParamInfo<MetadataTestParameter>& info) {
return absl::StrCat(info.param.propagate_metadata
? "MetadataPropagation"
: "NoMetadataPropagation",
"_",
info.param.clear_metadata ? "NoMetadataInModule"
: "MetadataInModule");
});
INSTANTIATE_TEST_SUITE_P(
ShardingPropagation, ParameterizedMetadataTestWithOutput,
::testing::Values(MetadataTestParameterWithOutput(
false,
false,
false),
MetadataTestParameterWithOutput(
false,
true,
false),
MetadataTestParameterWithOutput(
true,
false,
false),
MetadataTestParameterWithOutput(
true,
true,
false),
MetadataTestParameterWithOutput(
false,
false,
true),
MetadataTestParameterWithOutput(
false,
true,
true),
MetadataTestParameterWithOutput(
true,
false,
true),
MetadataTestParameterWithOutput(
true,
true,
true)),
[](const ::testing::TestParamInfo<MetadataTestParameterWithOutput>& info) {
return absl::StrCat(
info.param.propagate_metadata ? "MetadataPropagation"
: "NoMetadataPropagation",
"_",
info.param.clear_metadata ? "NoMetadataInModule" : "MetadataInModule",
"_",
info.param.allow_root_sharding_propagation ? "PropagateToRoot"
: "NoPropagateToRoot");
});
TEST_P(ParameterizedMetadataTest, ShardingMetadataFromInstruction) {
const char* const hlo_string = R"(
HloModule module
ENTRY %elementwise {
%param0 = f32[5,7,11,13]{3,2,1,0} parameter(0),
sharding={devices=[1,2,2,1]0,1,2,3},
metadata={op_name="test"}
ROOT %copy = f32[5,7,11,13]{3,2,1,0} copy(%param0)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
EXPECT_EQ(changed,
GetParam().propagate_metadata && !GetParam().clear_metadata);
auto* instruction = FindInstruction(module.get(), "param0");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2,1]0,1,2,3}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("test")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_F(ShardingPropagationTest, ShardingMetadataFromInstructionNoOverwrite) {
const char* const hlo_string = R"(
HloModule module
ENTRY %elementwise {
%param0 = f32[5,7,11,13]{3,2,1,0} parameter(0),
sharding={devices=[1,2,2,1]0,1,2,3 metadata={op_name="name"}},
metadata={op_name="test"}
ROOT %copy = f32[5,7,11,13]{3,2,1,0} copy(%param0)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
ShardingPropagation(false,
true)
.Run(module.get()));
EXPECT_FALSE(changed);
auto* instruction = FindInstruction(module.get(), "param0");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2,1]0,1,2,3}"));
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("name")}));
}
TEST_F(ShardingPropagationTest, ShardingMetadataFromInstructionNoMetadata) {
const char* const hlo_string = R"(
HloModule module
ENTRY %elementwise {
%param0 = f32[5,7,11,13]{3,2,1,0} parameter(0),
sharding={devices=[1,2,2,1]0,1,2,3 metadata={op_name="name"}}
ROOT %copy = f32[5,7,11,13]{3,2,1,0} copy(%param0)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
ShardingPropagation(false,
true)
.Run(module.get()));
EXPECT_FALSE(changed);
auto* instruction = FindInstruction(module.get(), "param0");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2,1]0,1,2,3}"));
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("name")}));
}
TEST_F(ShardingPropagationTest, ShardingNoMetadataAndInstructionNoMetadata) {
const char* const hlo_string = R"(
HloModule module
ENTRY %elementwise {
%param0 = f32[5,7,11,13]{3,2,1,0} parameter(0),
sharding={devices=[1,2,2,1]0,1,2,3}
ROOT %copy = f32[5,7,11,13]{3,2,1,0} copy(%param0)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
ShardingPropagation(false,
true)
.Run(module.get()));
EXPECT_FALSE(changed);
auto* instruction = FindInstruction(module.get(), "param0");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2,1]0,1,2,3}"));
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
TEST_P(ParameterizedMetadataTest, ElementwiseOperationForwardPass) {
const char* const hlo_string = R"(
HloModule module
ENTRY %elementwise {
%param0 = f32[5,7,11,13]{3,2,1,0} parameter(0),
sharding={devices=[1,2,2,1]0,1,2,3 metadata={op_name="a"}}
%param1 = f32[5,7,11,13]{3,2,1,0} parameter(1)
%add = f32[5,7,11,13]{3,2,1,0} add(%param0, %param1)
ROOT %copy = f32[5,7,11,13]{3,2,1,0} copy(%add)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "add");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2,1]0,1,2,3}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ElementwiseOperationBackwardPass) {
const char* const hlo_string = R"(
HloModule module
ENTRY %elementwise {
%param0 = f32[5,7,11,13]{3,2,1,0} parameter(0)
%param1 = f32[5,7,11,13]{3,2,1,0} parameter(1)
%add = f32[5,7,11,13]{3,2,1,0} add(%param0, %param1)
ROOT %copy = f32[5,7,11,13]{3,2,1,0} copy(%add),
sharding={devices=[1,2,2,1]0,1,2,3 metadata={op_name="a"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "add");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2,1]0,1,2,3}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTestWithOutput, BroadcastForwardPass) {
const char* const hlo_string = R"(
HloModule module
ENTRY %broadcast {
%param0 = f32[3,2048,2048]{2,1,0} parameter(0),
sharding={devices=[1,2,2]0,1,2,3 metadata={op_name="a"}}
%broadcast = f32[3,2048,2048,3]{3,2,1,0} broadcast(%param0), dimensions={0,1,2}
ROOT %copy = f32[3,2048,2048,3]{3,2,1,0} copy(%broadcast)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata,
{GetParam().allow_root_sharding_propagation})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "broadcast");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2,1]0,1,2,3}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
if (GetParam().allow_root_sharding_propagation) {
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Sharding("{devices=[1,2,2,1]0,1,2,3}"));
}
}
TEST_P(ParameterizedMetadataTestWithOutput, BroadcastForwardPassWithBarrier) {
const char* const hlo_string = R"(
HloModule module
ENTRY %broadcast {
%param0 = f32[3,2048,2048]{2,1,0} parameter(0),
sharding={devices=[1,2,2]0,1,2,3 metadata={op_name="a"}}
%shard-barrier-from = f32[3,2048,2048]{2,1,0} custom-call(%param0), custom_call_target="ShardBarrierFrom", custom_call_has_side_effect=true
%broadcast = f32[3,2048,2048,3]{3,2,1,0} broadcast(%shard-barrier-from), dimensions={0,1,2}
ROOT %copy = f32[3,2048,2048,3]{3,2,1,0} copy(%broadcast)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
std::ignore,
ShardingPropagation(true, GetParam().propagate_metadata,
{GetParam().allow_root_sharding_propagation})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
auto* instruction = FindInstruction(module.get(), "broadcast");
ASSERT_NE(instruction, nullptr);
EXPECT_FALSE(instruction->has_sharding());
}
TEST_P(ParameterizedMetadataTest, BroadcastBackwardPass) {
const char* const hlo_string = R"(
HloModule module
ENTRY %broadcast {
%param0 = f32[13]{0} parameter(0)
%broadcast = f32[5,7,11,13]{3,2,1,0} broadcast(%param0), dimensions={3}
ROOT %copy = f32[5,7,11,13]{3,2,1,0} copy(%broadcast),
sharding={devices=[1,2,2,1]0,1,2,3 metadata={op_name="a"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "broadcast");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2,1]0,1,2,3}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, BroadcastBackwardPassWithBarrier) {
const char* const hlo_string = R"(
HloModule module
ENTRY %broadcast {
%param0 = f32[13]{0} parameter(0)
%param0_copy = f32[13]{0} copy(param0)
%shard-barrier-to = f32[13]{0} custom-call(%param0_copy), custom_call_target="ShardBarrierTo", custom_call_has_side_effect=true
%broadcast = f32[5,7,11,13]{3,2,1,0} broadcast(%shard-barrier-to), dimensions={3}
ROOT %copy = f32[5,7,11,13]{3,2,1,0} copy(%broadcast),
sharding={devices=[1,1,2,2]0,1,2,3 metadata={op_name="a"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
std::ignore,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
auto* instruction = FindInstruction(module.get(), "param0_copy");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{replicated}"));
}
TEST_P(ParameterizedMetadataTest, Broadcast1DBackwardNoChange) {
const char* const hlo_string = R"(
HloModule module
ENTRY %broadcast {
%param0 = s32[128]{0} parameter(0)
%constant0 = s32[] constant(0), sharding={replicated}
%broadcast = s32[128]{0} broadcast(%constant0), dimensions={}, sharding={replicated}
ROOT %compare = pred[128]{0} compare(s32[128]{0} %param0, s32[128]{0} %broadcast),
direction=NE, sharding={devices=[4]0,1,2,3}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
EXPECT_FALSE(changed);
auto* instruction = FindInstruction(module.get(), "broadcast");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{replicated}"));
}
TEST_P(ParameterizedMetadataTestWithOutput, BroadcastForwardPartial) {
const char* const hlo_string = R"(
HloModule module
ENTRY %broadcast {
%param0 = f32[3,2048]parameter(0),
sharding={devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="a"}}
%broadcast = f32[3,2048,3] broadcast(%param0), dimensions={0,1}
ROOT %copy = f32[3,2048,3] copy(%broadcast)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata,
{GetParam().allow_root_sharding_propagation})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "broadcast");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(
instruction,
op::Sharding("{devices=[1,2,1,2]0,1,2,3 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
if (GetParam().allow_root_sharding_propagation) {
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Sharding("{devices=[1,2,1,2]0,1,2,3 last_tile_dim_replicate}"));
}
}
TEST_P(ParameterizedMetadataTest, BroadcastMerge) {
const char* const hlo_string = R"(
HloModule module
ENTRY %broadcast {
%param0 = f32[3,2048]parameter(0),
sharding={devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="a"}}
%broadcast = f32[3,2048,3] broadcast(%param0), dimensions={0,1}
ROOT %copy = f32[3,2048,3] copy(%broadcast),
sharding={devices=[1,1,2,2]0,2,1,3 last_tile_dim_replicate metadata={op_name="b"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "broadcast");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2]0,1,2,3}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a"), CreateMetadata("b")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, BroadcastUser) {
const char* const hlo_string = R"(
HloModule module
ENTRY %broadcast {
%param0 = f32[24,8]{0,1} parameter(0)
%copy = f32[24,8]{0,1} copy(%param0)
ROOT %broadcast = f32[4,24,6,8]{3,2,1,0} broadcast(%copy), dimensions={1,3},
sharding={devices=[1,2,1,4]0,1,2,3,4,5,6,7 metadata={op_name="a"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "copy");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[2,4]0,1,2,3,4,5,6,7}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTestWithOutput, BroadcastUserPartial) {
const char* const hlo_string = R"(
HloModule module
ENTRY %broadcast {
%param0 = f32[24,8]{0,1} parameter(0)
%copy = f32[24,8]{0,1} copy(%param0)
ROOT %broadcast = f32[4,24,6,8] broadcast(%copy), dimensions={1,3},
sharding={devices=[4,2,1,1]0,1,2,3,4,5,6,7 metadata={op_name="a"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata,
{GetParam().allow_root_sharding_propagation})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "copy");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(
instruction,
op::Sharding("{devices=[2,1,4]0,2,4,6,1,3,5,7 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
if (GetParam().allow_root_sharding_propagation) {
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Sharding("{devices=[4,2,1,1]0,1,2,3,4,5,6,7}"));
}
}
TEST_P(ParameterizedMetadataTest, MaximalReduceForwardPass) {
const char* const hlo_string = R"(
HloModule module
%add {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(%lhs, %rhs)
}
ENTRY %reduce {
%param0 = f32[5,7,11,13]{3,2,1,0} parameter(0),
sharding={devices=[1,2,2,1]0,1,2,3 metadata={op_name="a"}}
%init = f32[] parameter(1)
%reduce = f32[5,7]{1,0} reduce(%param0, %init), dimensions={2,3}, to_apply=%add
ROOT %copy = f32[5,7]{0,1} copy(%reduce)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "reduce");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{replicated}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_F(ShardingPropagationTest, ManualTupleReduceForwardPass) {
const char* const hlo_string = R"(
HloModule module
%minmax_func {
%lhs_value = f32[] parameter(0)
%rhs_value = f32[] parameter(2)
%compare.2 = pred[] compare(%lhs_value, %rhs_value), direction=GT
%select.4 = f32[] select(%compare.2, %lhs_value, %rhs_value)
%lhs_index = s32[] parameter(1)
%rhs_index = s32[] parameter(3)
%select.5 = s32[] select(%compare.2, %lhs_index, %rhs_index)
ROOT %tuple.2 = (f32[], s32[]) tuple(%select.4, %select.5)
}
ENTRY %reduce {
get-tuple-element.416 = f32[2,1,128]{2,1,0} parameter(0), sharding={manual}
get-tuple-element.417 = s32[2,1,128]{2,1,0} parameter(1), sharding={manual}
constant.3793 = f32[] constant(0)
constant.3795 = s32[] constant(0)
reduce.418 = (f32[2,1]{1,0}, s32[2,1]{1,0}) reduce(
get-tuple-element.416, get-tuple-element.417, constant.3793, constant.3795),
dimensions={2}, to_apply=minmax_func
ROOT %copy = (f32[2,1]{1,0}, s32[2,1]{1,0}) copy(%reduce.418)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "reduce.418");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{{manual}, {manual}}"));
}
TEST_P(ParameterizedMetadataTest, ShardedReduceForwardPass) {
const char* const hlo_string = R"(
HloModule module
%add {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(%lhs, %rhs)
}
ENTRY %reduce {
%param0 = f32[5,7,11,13]{3,2,1,0} parameter(0),
sharding={devices=[1,2,2,1]0,1,2,3 metadata={op_name="a"}}
%init = f32[] parameter(1)
%reduce = f32[7,11]{1,0} reduce(%param0, %init), dimensions={0,3}, to_apply=%add
ROOT %copy = f32[7,11]{0,1} copy(f32[7,11]{1,0} %reduce)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "reduce");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[2,2]0,1,2,3}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ReduceForwardPassWithBarrier) {
const char* const hlo_string = R"(
HloModule module
%add {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(%lhs, %rhs)
}
ENTRY %reduce {
%param0 = f32[5,7,11,13]{3,2,1,0} parameter(0),
sharding={devices=[1,2,2,1]0,1,2,3 metadata={op_name="a"}}
%init = f32[] parameter(1)
%shard-barrier-from = f32[5,7,11,13]{3,2,1,0} custom-call(%param0), custom_call_target="ShardBarrierFrom", custom_call_has_side_effect=true
%reduce = f32[7,11]{1,0} reduce(%shard-barrier-from, %init), dimensions={0,3}, to_apply=%add
ROOT %copy = f32[7,11]{0,1} copy(f32[7,11]{1,0} %reduce)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
std::ignore,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
auto* instruction = FindInstruction(module.get(), "reduce");
ASSERT_NE(instruction, nullptr);
EXPECT_FALSE(instruction->has_sharding());
}
TEST_P(ParameterizedMetadataTest, ReducePartiallyOnTiledDims) {
const char* const hlo_string = R"(
HloModule module
%add {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(%lhs, %rhs)
}
ENTRY %reduce {
%param0 = f32[8,8] parameter(0),
sharding={devices=[2,2]0,1,2,3 metadata={op_name="a"}}
%init = f32[] parameter(1)
%reduce = f32[8] reduce(%param0, %init), dimensions={0}, to_apply=%add
ROOT %copy = f32[8] copy(%reduce)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "reduce");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction,
op::Sharding("{devices=[2,2]0,2,1,3 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ReducePartiallyOnTiledDims2) {
const char* const hlo_string = R"(
HloModule module
%add {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(%lhs, %rhs)
}
ENTRY %reduce {
%param0 = f32[8,8] parameter(0),
sharding={devices=[2,2,2]0,1,2,3,4,5,6,7 last_tile_dim_replicate metadata={op_name="a"}}
%init = f32[] parameter(1)
%reduce = f32[8] reduce(%param0, %init), dimensions={0}, to_apply=%add
ROOT %copy = f32[8] copy(%reduce)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "reduce");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(
instruction,
op::Sharding("{devices=[2,4]0,1,4,5,2,3,6,7 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ReducePartiallyBackward) {
const char* const hlo_string = R"(
HloModule module
%add {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(%lhs, %rhs)
}
ENTRY %reduce {
%param0 = f32[8,8] parameter(0)
%input = f32[8,8] copy(%param0)
%init = f32[] parameter(1)
%reduce = f32[8] reduce(%input, %init), dimensions={0}, to_apply=%add,
sharding={devices=[2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="a"}}
ROOT %copy = f32[8] copy(%reduce)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "input");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction,
op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ReduceBackwardWithBarrier) {
const char* const hlo_string = R"(
HloModule module
%add {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(%lhs, %rhs)
}
ENTRY %reduce {
%param0 = f32[8,8] parameter(0)
%input = f32[8,8] copy(%param0)
%init = f32[] parameter(1)
%shard-barrier-to = f32[8,8] custom-call(%input), custom_call_target="ShardBarrierTo", custom_call_has_side_effect=true
%reduce = f32[8] reduce(%shard-barrier-to, %init), dimensions={0}, to_apply=%add,
sharding={devices=[2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="a"}}
ROOT %copy = f32[8] copy(%reduce)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
std::ignore,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
auto* instruction = FindInstruction(module.get(), "input");
ASSERT_NE(instruction, nullptr);
EXPECT_FALSE(instruction->has_sharding());
}
TEST_P(ParameterizedMetadataTestWithOutput,
ShardedOnNonReduceDimTupleReduceForwardAndBackwardPass) {
const char* const hlo_string = R"(
HloModule module
%minmax_func {
%lhs_value = f32[] parameter(0)
%rhs_value = f32[] parameter(2)
%compare.2 = pred[] compare(%lhs_value, %rhs_value), direction=GT
%select.4 = f32[] select(%compare.2, %lhs_value, %rhs_value)
%lhs_index = s32[] parameter(1)
%rhs_index = s32[] parameter(3)
%select.5 = s32[] select(%compare.2, %lhs_index, %rhs_index)
ROOT %tuple.2 = (f32[], s32[]) tuple(%select.4, %select.5)
}
ENTRY %main {
%param0 = f32[28,10] parameter(0)
%param1 = s32[28,10] parameter(1), sharding={devices=[2,1]0,1 metadata={op_name="a"}}
%copy_param0 = f32[28,10] copy(%param0)
%init0 = f32[] parameter(2)
%init1 = s32[] parameter(3)
%reduce = (f32[28], s32[28]) reduce(%copy_param0, %param1, %init0, %init1),
dimensions={1}, to_apply=%minmax_func
%gte0 = f32[28] get-tuple-element(%reduce), index=0
%gte1 = s32[28] get-tuple-element(%reduce), index=1
%copy0 = f32[28] copy(%gte0)
%copy1 = s32[28] copy(%gte1)
ROOT %tuple = (f32[28], s32[28]) tuple(%copy0, %copy1)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata,
{GetParam().allow_root_sharding_propagation})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* reduce = FindInstruction(module.get(), "reduce");
ASSERT_NE(reduce, nullptr);
EXPECT_THAT(reduce, op::Sharding("{{devices=[2]0,1},{devices=[2]0,1}}"));
auto* copy_param0 = FindInstruction(module.get(), "copy_param0");
ASSERT_NE(copy_param0, nullptr);
EXPECT_THAT(copy_param0, op::Sharding("{devices=[2,1]0,1}"));
for (const HloSharding& sharding :
{copy_param0->sharding(), reduce->sharding().tuple_elements()[0],
reduce->sharding().tuple_elements()[1]}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(sharding, ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(sharding, ShardingMetadata({}));
}
}
if (GetParam().allow_root_sharding_propagation) {
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Sharding("{{devices=[2]0,1},{devices=[2]0,1}}"));
}
}
TEST_P(ParameterizedMetadataTestWithOutput,
ShardedOnReduceDimTupleReduceForwardAndBackwardPass) {
const char* const hlo_string = R"(
HloModule module
%minmax_func {
%lhs_value = f32[] parameter(0)
%rhs_value = f32[] parameter(2)
%compare.2 = pred[] compare(%lhs_value, %rhs_value), direction=GT
%select.4 = f32[] select(%compare.2, %lhs_value, %rhs_value)
%lhs_index = s32[] parameter(1)
%rhs_index = s32[] parameter(3)
%select.5 = s32[] select(%compare.2, %lhs_index, %rhs_index)
ROOT %tuple.2 = (f32[], s32[]) tuple(%select.4, %select.5)
}
ENTRY %main {
%param0 = f32[28,10] parameter(0)
%param1 = s32[28,10] parameter(1), sharding={devices=[2,2]0,1,2,3 metadata={op_name="a"}}
%copy_param0 = f32[28,10] copy(%param0)
%init0 = f32[] parameter(2)
%init1 = s32[] parameter(3)
%reduce = (f32[28], s32[28]) reduce(%copy_param0, %param1, %init0, %init1),
dimensions={1}, to_apply=%minmax_func
%gte0 = f32[28] get-tuple-element(%reduce), index=0
%gte1 = s32[28] get-tuple-element(%reduce), index=1
%copy0 = f32[28] copy(%gte0)
%copy1 = s32[28] copy(%gte1)
ROOT %tuple = (f32[28], s32[28]) tuple(%copy0, %copy1)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata,
{GetParam().allow_root_sharding_propagation})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* reduce = FindInstruction(module.get(), "reduce");
ASSERT_NE(reduce, nullptr);
EXPECT_THAT(reduce, op::Sharding("{{devices=[2,2]0,1,2,3 "
"last_tile_dim_replicate},{devices=[2,2]0,1,"
"2,3 last_tile_dim_replicate}}"));
auto* copy_param0 = FindInstruction(module.get(), "copy_param0");
ASSERT_NE(copy_param0, nullptr);
EXPECT_THAT(copy_param0, op::Sharding("{devices=[2,2]0,1,2,3}"));
for (const HloSharding& sharding :
{copy_param0->sharding(), reduce->sharding().tuple_elements()[0],
reduce->sharding().tuple_elements()[1]}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(sharding, ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(sharding, ShardingMetadata({}));
}
}
if (GetParam().allow_root_sharding_propagation) {
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Sharding("{{devices=[2,2]0,1,2,3 "
"last_tile_dim_replicate},{devices=[2,2]0,1,2,3 "
"last_tile_dim_replicate}}"));
}
}
TEST_P(ParameterizedMetadataTestWithOutput, GetTupleElementForwardPass) {
const char* const hlo_string = R"(
HloModule module
ENTRY %gte {
%param0 = f32[5,7,11,13]{3,2,1,0} parameter(0)
%tuple = (f32[5,7,11,13]{3,2,1,0}, f32[5,7,11,13]{3,2,1,0}) tuple(
%param0, %param0)
%tuple.1 = (f32[5,7,11,13]{3,2,1,0},
(f32[5,7,11,13]{3,2,1,0}, f32[5,7,11,13]{3,2,1,0})) tuple(
%param0, %tuple),
sharding={{devices=[1,2,2,1]0,1,2,3 metadata={op_name="a"}},
{replicated metadata={op_name="b"}},
{devices=[1,2,2,1]0,1,2,3 metadata={op_name="c"}}}
%gte = f32[5,7,11,13]{3,2,1,0} get-tuple-element(%tuple.1), index=0
%gte.1 = (f32[5,7,11,13]{3,2,1,0}, f32[5,7,11,13]{3,2,1,0}) get-tuple-element(
%tuple.1), index=1
%gte.2 = f32[5,7,11,13]{3,2,1,0} get-tuple-element(%gte.1), index=0
ROOT %copy = f32[5,7,11,13]{3,2,1,0} copy(%gte.2)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata,
{GetParam().allow_root_sharding_propagation})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* gte = FindInstruction(module.get(), "gte");
ASSERT_NE(gte, nullptr);
EXPECT_THAT(gte, op::Sharding("{devices=[1,2,2,1]0,1,2,3}"));
auto* gte1 = FindInstruction(module.get(), "gte.1");
ASSERT_NE(gte1, nullptr);
EXPECT_THAT(gte1, op::Sharding("{{replicated}, {devices=[1,2,2,1]0,1,2,3}}"));
auto* gte2 = FindInstruction(module.get(), "gte.2");
ASSERT_NE(gte2, nullptr);
EXPECT_THAT(gte2, op::Sharding("{replicated}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(gte->sharding(), ShardingMetadata({CreateMetadata("a")}));
EXPECT_THAT(gte1->sharding().tuple_elements()[0],
ShardingMetadata({CreateMetadata("b")}));
EXPECT_THAT(gte1->sharding().tuple_elements()[1],
ShardingMetadata({CreateMetadata("c")}));
EXPECT_THAT(gte2->sharding(), ShardingMetadata({CreateMetadata("b")}));
} else {
for (const HloSharding& sharding :
{gte->sharding(), gte1->sharding().tuple_elements()[0],
gte1->sharding().tuple_elements()[1], gte2->sharding()}) {
EXPECT_THAT(sharding, ShardingMetadata({}));
}
}
if (GetParam().allow_root_sharding_propagation) {
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Sharding("{replicated}"));
}
}
TEST_P(ParameterizedMetadataTestWithOutput,
GetTupleElementForwardPassWithBarrier) {
const char* const hlo_string = R"(
HloModule module
ENTRY %gte {
%param0 = f32[5,7,11,13]{3,2,1,0} parameter(0)
%tuple = (f32[5,7,11,13]{3,2,1,0}, f32[5,7,11,13]{3,2,1,0}) tuple(
%param0, %param0), sharding={{devices=[1,2,2,1]0,1,2,3 metadata={op_name="a"}},
{replicated metadata={op_name="b"}}}
%shard-barrier-from = (f32[5,7,11,13]{3,2,1,0}, f32[5,7,11,13]{3,2,1,0}) custom-call(%tuple), custom_call_target="ShardBarrierFrom", custom_call_has_side_effect=true
%gte = f32[5,7,11,13]{3,2,1,0} get-tuple-element(%shard-barrier-from), index=0
ROOT %copy = f32[5,7,11,13]{3,2,1,0} copy(%gte)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
std::ignore,
ShardingPropagation(false, GetParam().propagate_metadata,
{GetParam().allow_root_sharding_propagation})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
auto* gte = FindInstruction(module.get(), "gte");
ASSERT_NE(gte, nullptr);
EXPECT_FALSE(gte->has_sharding());
}
TEST_P(ParameterizedMetadataTest, TupleForwardPass) {
const char* const hlo_string = R"(
HloModule module
ENTRY %tuple {
%param0 = f32[5,7,11,13]{3,2,1,0} parameter(0),
sharding={replicated metadata={op_name="a"}}
%param1 = f32[5,7,11,13]{3,2,1,0} parameter(1),
sharding={devices=[1,2,2,1]0,1,2,3 metadata={op_name="b"}}
%param2 = f32[5,7,11,13]{3,2,1,0} parameter(2)
%tuple = (f32[5,7,11,13]{3,2,1,0}, f32[5,7,11,13]{3,2,1,0}) tuple(
%param1, %param2)
%tuple.1 = (f32[5,7,11,13]{3,2,1,0},
(f32[5,7,11,13]{3,2,1,0}, f32[5,7,11,13]{3,2,1,0})) tuple(
%param0, %tuple)
ROOT %copy = (f32[5,7,11,13]{3,2,1,0},
(f32[5,7,11,13]{3,2,1,0}, f32[5,7,11,13]{3,2,1,0})) copy(
%tuple.1)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* tuple = FindInstruction(module.get(), "tuple");
ASSERT_NE(tuple, nullptr);
EXPECT_THAT(tuple, op::Sharding("{{devices=[1,2,2,1]0,1,2,3},"
" {replicated}}"));
auto* tuple1 = FindInstruction(module.get(), "tuple.1");
ASSERT_NE(tuple1, nullptr);
EXPECT_THAT(tuple1, op::Sharding("{{replicated},"
" {devices=[1,2,2,1]0,1,2,3},"
" {replicated}}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(tuple->sharding().tuple_elements()[0],
ShardingMetadata({CreateMetadata("b")}));
EXPECT_THAT(tuple->sharding().tuple_elements()[1], ShardingMetadata({}));
EXPECT_THAT(tuple1->sharding().tuple_elements()[0],
ShardingMetadata({CreateMetadata("a")}));
EXPECT_THAT(tuple1->sharding().tuple_elements()[1],
ShardingMetadata({CreateMetadata("b")}));
EXPECT_THAT(tuple1->sharding().tuple_elements()[2], ShardingMetadata({}));
} else {
for (const HloSharding& tuple_sharding :
{tuple->sharding(), tuple1->sharding()}) {
for (const HloSharding& sub_sharding : tuple_sharding.tuple_elements()) {
EXPECT_THAT(sub_sharding, ShardingMetadata({}));
}
}
}
}
TEST_P(ParameterizedMetadataTest, TupleForwardPass_SplatBug) {
const char* const hlo_string = R"(
HloModule module
ENTRY %tuple {
%param0 = f32[5,7,11,13]{3,2,1,0} parameter(0),
sharding={replicated metadata={op_name="a"}}
%param1 = f32[5,7,11,13]{3,2,1,0} parameter(1),
sharding={devices=[1,2,2,1,2]0,1,2,3,4,5,6,7 last_tile_dims={manual} metadata={op_name="b"}}
%param2 = f32[5,7,11,13]{3,2,1,0} parameter(2)
%tuple = (f32[5,7,11,13]{3,2,1,0}, f32[5,7,11,13]{3,2,1,0}) tuple(
%param1, %param2)
ROOT %copy = (f32[5,7,11,13]{3,2,1,0}, f32[5,7,11,13]{3,2,1,0}) copy(%tuple)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* tuple = FindInstruction(module.get(), "tuple");
ASSERT_NE(tuple, nullptr);
EXPECT_THAT(tuple, op::Sharding("{{devices=[1,2,2,1,2]0,1,2,3,4,5,6,7 "
"last_tile_dims={manual}}, {replicated}}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(tuple->sharding().tuple_elements()[0],
ShardingMetadata({CreateMetadata("b")}));
EXPECT_THAT(tuple->sharding().tuple_elements()[1], ShardingMetadata({}));
} else {
for (const HloSharding& sub_sharding : tuple->sharding().tuple_elements()) {
EXPECT_THAT(sub_sharding, ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest, TupleForwardPassAndBackWardPass) {
const char* const hlo_string = R"(
HloModule module
ENTRY %tuple {
%param0 = f32[256,2]{1,0} parameter(0),
sharding={manual metadata={op_name="a"}}
%param1 = f32[256,2]{1,0} parameter(1),
sharding={devices=[1,2]0,1 metadata={op_name="b"}}
%constant = s32[1,2]{1,0} constant({{0,1}})
%gather = f32[1,32,2]{2,1,0} gather(param0, constant), offset_dims={1,2}, collapsed_slice_dims={}, start_index_map={0,1}, index_vector_dim=1, slice_sizes={32,2}
%tuple = (f32[1,32,2]{2,1,0}, f32[256,2]{1,0}) tuple(
%gather, %param1)
ROOT %copy = (f32[1,32,2]{2,1,0}, f32[256,2]{1,0}) copy(%tuple)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* tuple = FindInstruction(module.get(), "tuple");
ASSERT_NE(tuple, nullptr);
EXPECT_THAT(tuple, op::Sharding("{{manual}, {devices=[1,2]0,1}}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(tuple->sharding().tuple_elements()[0],
ShardingMetadata({CreateMetadata("a")}));
EXPECT_THAT(tuple->sharding().tuple_elements()[1],
ShardingMetadata({CreateMetadata("b")}));
} else {
for (const HloSharding& sub_sharding : tuple->sharding().tuple_elements()) {
EXPECT_THAT(sub_sharding, ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest, TupleShapedBackWardPass) {
const char* const hlo_string = R"(
HloModule module
%cond {
%vars.cond = (u32[], f32[]) parameter(0)
%count.cond = u32[] get-tuple-element(%vars.cond), index=0
%limit = u32[] constant(10)
ROOT %lt = pred[] compare(%count.cond, %limit), direction=LT
}
%body {
%param = (u32[], f32[]) parameter(0)
%count = u32[] get-tuple-element(%param), index=0
%after-all = token[] after-all()
%recv = (f32[], u32[], token[]) recv(%after-all), channel_id=1
%recv-done = (f32[], token[]) recv-done(%recv), channel_id=1
%data = f32[] get-tuple-element(%recv-done), index=0
ROOT %tuple = (u32[], f32[]) tuple(%count, %data)
}
ENTRY %entry {
%zero = u32[] constant(0), sharding={replicated metadata={op_name="a"}}
%p0 = f32[] parameter(0), sharding={manual metadata={op_name="b"}}
%tuple = (u32[], f32[]) tuple(%zero, %p0)
%while = (u32[], f32[]) while(%tuple), body=%body, condition=%cond,
sharding={{manual metadata={op_name="c"}},
{manual metadata={op_name="d"}}}
ROOT %result = f32[] get-tuple-element(%while), index=1
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* tuple = FindInstruction(module.get(), "tuple");
ASSERT_NE(tuple, nullptr);
EXPECT_THAT(tuple, op::Sharding("{{manual}, {manual}}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(tuple->sharding().tuple_elements()[0],
ShardingMetadata({CreateMetadata("c")}));
EXPECT_THAT(tuple->sharding().tuple_elements()[1],
ShardingMetadata({CreateMetadata("d")}));
} else {
for (const HloSharding& sub_sharding : tuple->sharding().tuple_elements()) {
EXPECT_THAT(sub_sharding, ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest,
PartiallyManualTupleWithRepeatedOperandsBackWardPass) {
const char* const hlo_string = R"(
HloModule module
%cond {
%vars.cond = (s32[], s32[], s32[]) parameter(0)
%count.cond = s32[] get-tuple-element(%vars.cond), index=0
%limit = s32[] constant(10)
ROOT %lt = pred[] compare(%count.cond, %limit), direction=LT
}
%body {
%param = (s32[], s32[], s32[]) parameter(0)
%count = s32[] get-tuple-element(%param), index=0
%lhs = s32[] get-tuple-element(%param), index=1
%rhs = s32[] get-tuple-element(%param), index=2
%add = s32[] add(%lhs, %rhs)
ROOT %tuple = (s32[], s32[], s32[]) tuple(%count, %lhs, %add)
}
ENTRY %entry {
%zero = s32[] constant(0)
%p0 = s32[] parameter(0), sharding={manual metadata={op_name="a"}}
%tuple = (s32[], s32[], s32[]) tuple(%zero, %zero, %p0)
%while = (s32[], s32[], s32[]) while(%tuple), body=%body, condition=%cond
ROOT %copy = (s32[], s32[], s32[]) copy(%while)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* tuple = module->entry_computation()->root_instruction()->operand(0);
ASSERT_NE(tuple, nullptr);
EXPECT_THAT(tuple, op::Sharding("{{manual}, {manual}, {manual}}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(tuple->sharding().tuple_elements()[0],
ShardingMetadata({CreateMetadata("a")}));
EXPECT_THAT(tuple->sharding().tuple_elements()[1],
ShardingMetadata({CreateMetadata("a")}));
EXPECT_THAT(tuple->sharding().tuple_elements()[2],
ShardingMetadata({CreateMetadata("a")}));
} else {
for (const HloSharding& sub_sharding : tuple->sharding().tuple_elements()) {
EXPECT_THAT(sub_sharding, ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest, ForwardConvolutionForwardPass) {
const char* const hlo_string = R"(
HloModule module
ENTRY %conv {
%lhs = f32[5,7,11,13]{3,2,1,0} parameter(0),
sharding={devices=[2,2,2,1]0,1,2,3,4,5,6,7 metadata={op_name="a"}}
%rhs = f32[3,3,13,17]{3,2,1,0} parameter(1)
%convolution = f32[5,7,11,17]{3,2,1,0} convolution(%lhs, %rhs),
window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f
ROOT %copy = f32[5,7,11,17]{3,2,1,0} copy(%convolution)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "convolution");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[2,2,2,1]0,1,2,3,4,5,6,7}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ForwardConvolutionLargeDilationForwardPass) {
const char* const hlo_string = R"(
HloModule module
ENTRY %conv {
%lhs = f32[8,64,2]{2,1,0} parameter(0),
sharding={devices=[1,4,1]0,1,2,3 metadata={op_name="a"}}
%rhs = f32[3,2,2]{2,1,0} parameter(1)
%convolution = f32[8,32,2]{2,1,0} convolution(%lhs, %rhs),
window={size=3 rhs_dilate=16}, dim_labels=b0f_0io->b0f
ROOT %copy = f32[8,32,2]{2,1,0} copy(%convolution)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "convolution");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,4,1]0,1,2,3}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ForwardConvolution3DSmallKernel) {
const char* const hlo_string = R"(
HloModule module
ENTRY %conv {
%lhs = bf16[32,32,8,7,128]{4,3,2,1,0} parameter(0),
sharding={devices=[1,4,1,1,1]0,1,2,3 metadata={op_name="a"}}
%rhs = bf16[3,3,3,128,256]{4,3,2,1,0} parameter(1)
%convolution = bf16[16,16,8,3,256]{4,3,2,1,0}
convolution(bf16[32,32,8,7,128]{4,3,2,1,0} %lhs,
bf16[3,3,3,128,256]{4,3,2,1,0} %rhs),
window={size=3x3x3 stride=2x2x2 pad=1_1x1_1x0_0},
dim_labels=01b2f_012io->01b2f
ROOT %copy = bf16[16,16,8,3,256]{4,3,2,1,0} copy(%convolution)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "convolution");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,4,1,1,1]0,1,2,3}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, TransposeForwardPass) {
const char* const hlo_string = R"(
HloModule module
ENTRY %transpose {
%param = f32[7,11,13]{2,1,0} parameter(0),
sharding={devices=[2,1,2]0,1,2,3 metadata={op_name="a"}}
%transpose = f32[11,13,7]{2,1,0} transpose(%param), dimensions={1,2,0}
ROOT %copy = f32[11,13,7]{2,1,0} copy(%transpose)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "transpose");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2]0,2,1,3}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, TransposeForwardPassWithBarrier) {
const char* const hlo_string = R"(
HloModule module
ENTRY %transpose {
%param = f32[7,11,13]{2,1,0} parameter(0),
sharding={devices=[2,1,2]0,1,2,3 metadata={op_name="a"}}
%shard-barrier-from = f32[7,11,13]{2,1,0} custom-call(%param), custom_call_target="ShardBarrierFrom", custom_call_has_side_effect=true
%transpose = f32[11,13,7]{2,1,0} transpose(%shard-barrier-from), dimensions={1,2,0}
ROOT %copy = f32[11,13,7]{2,1,0} copy(%transpose)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
std::ignore,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
auto* instruction = FindInstruction(module.get(), "transpose");
ASSERT_NE(instruction, nullptr);
EXPECT_FALSE(instruction->has_sharding());
}
TEST_P(ParameterizedMetadataTest, TransposeBackwardPass) {
const char* const hlo_string = R"(
HloModule module
ENTRY %transpose {
%param = f32[7,11,13]{2,1,0} parameter(0)
%copy = f32[7,11,13]{2,1,0} copy(%param)
ROOT %transpose = f32[11,13,7]{2,1,0} transpose(%copy), dimensions={1,2,0},
sharding={devices=[1,2,2]0,1,2,3 metadata={op_name="a"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "copy");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[2,1,2]0,2,1,3}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, TransposeBackwardPassWithBarrier) {
const char* const hlo_string = R"(
HloModule module
ENTRY %transpose {
%param = f32[7,11,13]{2,1,0} parameter(0)
%copy = f32[7,11,13]{2,1,0} copy(%param)
%shard-barrier-to = f32[7,11,13]{2,1,0} custom-call(%copy), custom_call_target="ShardBarrierTo", custom_call_has_side_effect=true
ROOT %transpose = f32[11,13,7]{2,1,0} transpose(%shard-barrier-to), dimensions={1,2,0},
sharding={devices=[1,2,2]0,1,2,3 metadata={op_name="a"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
std::ignore,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
auto* instruction = FindInstruction(module.get(), "copy");
ASSERT_NE(instruction, nullptr);
EXPECT_FALSE(instruction->has_sharding());
}
TEST_P(ParameterizedMetadataTest, ReshapeForwardPass) {
const char* const hlo_string = R"(
HloModule module
ENTRY %reshape {
%param0 = f32[1430,1]{1,0} parameter(0),
sharding={devices=[2,1]0,1 metadata={op_name="a"}}
%reshape = f32[10,11,13]{2,1,0} reshape(%param0)
ROOT %copy = f32[10,11,13]{2,1,0} copy(%reshape)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "reshape");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[2,1,1]0,1}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ReshapeForwardPassWithBarrier) {
const char* const hlo_string = R"(
HloModule module
ENTRY %reshape {
%param0 = f32[1430,1]{1,0} parameter(0),
sharding={devices=[2,1]0,1 metadata={op_name="a"}}
%shard-barrier-from = f32[1430,1]{1,0} custom-call(%param0), custom_call_target="ShardBarrierFrom", custom_call_has_side_effect=true
%reshape = f32[10,11,13]{2,1,0} reshape(%shard-barrier-from)
ROOT %copy = f32[10,11,13]{2,1,0} copy(%reshape)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
std::ignore,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
auto* instruction = FindInstruction(module.get(), "reshape");
ASSERT_NE(instruction, nullptr);
EXPECT_FALSE(instruction->has_sharding());
}
TEST_P(ParameterizedMetadataTest, ReshapeForwardPassPartialMatch) {
const char* const hlo_string = R"(
HloModule module
ENTRY %reshape {
%param0 = f32[14,32] parameter(0),
sharding={devices=[4,4]0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 metadata={op_name="a"}}
%reshape = f32[7,2,2,16] reshape(%param0)
ROOT %copy = f32[7,2,2,16] copy(%reshape)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "reshape");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction,
op::Sharding("{devices=[1,1,2,2,4]0,4,8,12,1,5,9,13,2,6,10,14,3,"
"7,11,15 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ReshapeForwardPassPartialMatch2) {
const char* const hlo_string = R"(
HloModule module
ENTRY %reshape {
%param0 = f32[12,8] parameter(0),
sharding={devices=[2,4]0,1,2,3,4,5,6,7 metadata={op_name="a"}}
%reshape = f32[8,12] reshape(%param0)
ROOT %copy = f32[8,12] copy(%reshape)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "reshape");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(
instruction,
op::Sharding("{devices=[2,1,4]0,1,2,3,4,5,6,7 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ReshapeForwardPassTranspose) {
const char* const hlo_string = R"(
HloModule module
ENTRY %reshape {
%param0 = f32[6,4,5] parameter(0), sharding={devices=[6,2,1]<=[12] metadata={op_name="a"}}
%reshape.1 = f32[2,3,20] reshape(%param0)
%reshape.2 = f32[2,4,3,5] reshape(%param0)
%reshape.3 = f32[20,6] reshape(%param0)
%reshape.4 = f32[3,5,8] reshape(%param0)
%reshape.5 = f32[10,4,3] reshape(%param0)
%reshape.6 = f32[5,8,3] reshape(%param0)
ROOT %tuple = tuple(%reshape.1, %reshape.2, %reshape.3, %reshape.4, %reshape.5, %reshape.6)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
std::vector<std::pair<std::string, std::string>> instruction_and_sharding = {
{"reshape.1", "{devices=[2,3,2]<=[12]}"},
{"reshape.2", "{devices=[2,1,1,1,6]<=[12] last_tile_dim_replicate}"},
{"reshape.3", "{devices=[2,1,6]<=[12] last_tile_dim_replicate}"},
{"reshape.4", "{devices=[3,1,1,4]<=[12] last_tile_dim_replicate}"},
{"reshape.5", "{devices=[2,1,1,6]<=[12] last_tile_dim_replicate}"},
{"reshape.6", "{replicated}"}};
for (const auto& [name, sharding] : instruction_and_sharding) {
auto* instruction = FindInstruction(module.get(), name);
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding(sharding));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest, ReshapeBackwardPass) {
const char* const hlo_string = R"(
HloModule module
ENTRY %reshape {
%param0 = f32[2002,1]{1,0} parameter(0)
%copy = f32[2002,1]{1,0} copy(f32[2002,1]{1,0} %param0)
ROOT %reshape = f32[14,11,13]{2,1,0} reshape(%copy),
sharding={devices=[2,1,1]0,1 metadata={op_name="a"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "copy");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[2,1]0,1}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ReshapeBackwardPassWithBarrier) {
const char* const hlo_string = R"(
HloModule module
ENTRY %reshape {
%param0 = f32[2002,1]{1,0} parameter(0)
%copy = f32[2002,1]{1,0} copy(f32[2002,1]{1,0} %param0)
%shard-barrier-to = f32[2002,1]{1,0} custom-call(%copy), custom_call_target="ShardBarrierTo", custom_call_has_side_effect=true
ROOT %reshape = f32[14,11,13]{2,1,0} reshape(%shard-barrier-to),
sharding={devices=[2,1,1]0,1 metadata={op_name="a"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
std::ignore,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
auto* instruction = FindInstruction(module.get(), "copy");
ASSERT_NE(instruction, nullptr);
EXPECT_FALSE(instruction->has_sharding());
}
TEST_P(ParameterizedMetadataTest, PadForwardPass) {
const char* const hlo_string = R"(
HloModule module
ENTRY %pad {
%input = f32[11,17]{1,0} parameter(0),
sharding={devices=[2,2]0,1,2,3 metadata={op_name="a"}}
%pad_value = f32[] parameter(1)
%pad = f32[27,51]{1,0} pad(%input, %pad_value), padding=2_4_1x1_1_2
ROOT %copy = f32[27,51]{1,0} copy(%pad)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "pad");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[2,2]0,1,2,3}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, PadBackwardPass) {
const char* const hlo_string = R"(
HloModule module
ENTRY %pad {
%input = f32[11,17]{1,0} parameter(0)
%copy = f32[11,17]{1,0} copy(%input)
%pad_value = f32[] parameter(1)
%pad = f32[27,51]{1,0} pad(%copy, %pad_value), padding=2_4_1x1_1_2,
sharding={devices=[2,2]0,1,2,3 metadata={op_name="a"}}
ROOT %result = f32[27,51]{1,0} copy(%pad)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "copy");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[2,2]0,1,2,3}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, PartialReplicatedPadForwardPass) {
const char* const hlo_string = R"(
HloModule module
ENTRY %pad {
%input = f32[11,17]{1,0} parameter(0),
sharding={devices=[2,2,2]0,1,2,3,4,5,6,7 last_tile_dim_replicate metadata={op_name="a"}}
%pad_value = f32[] parameter(1)
%pad = f32[27,51]{1,0} pad(%input, %pad_value), padding=2_4_1x1_1_2
ROOT %copy = f32[27,51]{1,0} copy(%pad)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "pad");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(
instruction,
op::Sharding("{devices=[2,2,2]0,1,2,3,4,5,6,7 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ShardedPreferredOverReplicated) {
const char* const hlo_string = R"(
HloModule module
ENTRY %replicated {
%param0 = f32[5,7,11,13]{3,2,1,0} parameter(0),
sharding={replicated metadata={op_name="a"}}
%copy = f32[5,7,11,13]{3,2,1,0} copy(%param0)
%param1 = f32[5,7,11,13]{3,2,1,0} parameter(1),
sharding={devices=[1,2,2,1]0,1,2,3 metadata={op_name="b"}}
%copy.1 = f32[5,7,11,13]{3,2,1,0} copy(%param1)
%add = f32[5,7,11,13]{3,2,1,0} add(%copy, %copy.1)
ROOT %copy.2 = f32[5,7,11,13]{3,2,1,0} copy(%add)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* copy = FindInstruction(module.get(), "copy");
ASSERT_NE(copy, nullptr);
EXPECT_THAT(copy, op::Sharding("{devices=[1,2,2,1]0,1,2,3}"));
auto* copy1 = FindInstruction(module.get(), "copy.1");
ASSERT_NE(copy1, nullptr);
EXPECT_THAT(copy1, op::Sharding("{devices=[1,2,2,1]0,1,2,3}"));
auto* add = FindInstruction(module.get(), "add");
ASSERT_NE(add, nullptr);
EXPECT_THAT(add, op::Sharding("{devices=[1,2,2,1]0,1,2,3}"));
for (const HloSharding& sharding :
{copy->sharding(), copy1->sharding(), add->sharding()}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(sharding, ShardingMetadata({CreateMetadata("b")}));
} else {
EXPECT_THAT(sharding, ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest, PartialReplicateReshapeForwardPass) {
const char* const hlo_string = R"(
HloModule module
ENTRY %reshape {
%param0 = f32[1430,1]{1,0} parameter(0),
sharding={devices=[2,1,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="a"}}
%reshape = f32[10,11,13]{2,1,0} reshape(%param0)
ROOT %copy = f32[10,11,13]{2,1,0} copy(%reshape)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "reshape");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(
instruction,
op::Sharding("{devices=[2,1,1,2]0,1,2,3 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, PartialReplicateReshapeBackwardPass) {
const char* const hlo_string = R"(
HloModule module
ENTRY %reshape {
%param0 = f32[2002,1]{1,0} parameter(0)
%copy = f32[2002,1]{1,0} copy(f32[2002,1]{1,0} %param0)
ROOT %reshape = f32[14,11,13]{2,1,0} reshape(%copy),
sharding={devices=[2,1,1,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="a"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "copy");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction,
op::Sharding("{devices=[2,1,2]0,1,2,3 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, DontShardTuplesIfAllInputIsMaximal) {
const char* const hlo_string = R"(
HloModule module
ENTRY %tuple {
%param0 = f32[5,7,11,13]{3,2,1,0} parameter(0),
sharding={maximal device=0 metadata={op_name="a"}}
%param1 = f32[5,7,11,13]{3,2,1,0} parameter(1),
sharding={maximal device=1 metadata={op_name="b"}}
%tuple = (f32[5,7,11,13]{3,2,1,0}, f32[5,7,11,13]{3,2,1,0}) tuple(
%param0, %param1)
ROOT %copy = (f32[5,7,11,13]{3,2,1,0}, f32[5,7,11,13]{3,2,1,0}) copy(%tuple)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
EXPECT_EQ(changed,
!GetParam().propagate_metadata && !GetParam().clear_metadata);
auto* instruction = FindInstruction(module.get(), "tuple");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::NoSharding());
}
TEST_P(ParameterizedMetadataTest, ValidConvolution) {
const char* const hlo_string = R"(
HloModule module
ENTRY conv {
%lhs = f32[13,17,19]{2,1,0} parameter(0),
sharding={devices=[1,2,1]0,1 metadata={op_name="a"}}
%rhs = f32[19,5,19]{2,1,0} parameter(1)
%conv = f32[13,13,19]{2,1,0} convolution(%lhs, %rhs),
window={size=5}, dim_labels=b0f_i0o->b0f
ROOT %tuple = (f32[13,13,19]{2,1,0}) tuple(%conv)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "conv");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,1]0,1}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, StridedSlice) {
const char* const hlo_string = R"(
HloModule module
ENTRY %slice {
%param = f32[17,13]{1,0} parameter(0),
sharding={devices=[2,1]0,1 metadata={op_name="a"}}
%slice = f32[7,5]{1,0} slice(%param), slice={[1:15:2], [5:10:1]}
ROOT %tuple = (f32[7,5]{1,0}) tuple(%slice)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "slice");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[2,1]0,1}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, PartialReplicatedStridedSlice) {
const char* const hlo_string = R"(
HloModule module
ENTRY %slice {
%param = f32[17,13]{1,0} parameter(0),
sharding={devices=[2,1,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="a"}}
%slice = f32[7,5]{1,0} slice(%param), slice={[1:15:2], [5:10:1]}
ROOT %tuple = (f32[7,5]{1,0}) tuple(%slice)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "slice");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction,
op::Sharding("{devices=[2,1,2]0,1,2,3 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ReduceWindowBackwardPass) {
const char* const hlo_string = R"(
HloModule module
%add (lhs: f32[], rhs: f32[]) -> f32[] {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(%lhs, %rhs)
}
ENTRY %reduce_window {
%param = f32[13,17]{1,0} parameter(0)
%param.copy = f32[13,17]{1,0} copy(%param)
%init = f32[] parameter(1)
ROOT %reduce-window = f32[7,17]{1,0} reduce-window(%param.copy, %init),
window={size=3x2 stride=2x1 pad=1_1x0_1}, to_apply=%add,
sharding={devices=[2,1]0,1 metadata={op_name="a"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* param_copy = FindInstruction(module.get(), "param.copy");
ASSERT_NE(param_copy, nullptr);
EXPECT_THAT(param_copy, op::Sharding("{devices=[2,1]0,1}"));
auto* reduce_window = FindInstruction(module.get(), "reduce-window");
ASSERT_NE(reduce_window, nullptr);
EXPECT_THAT(reduce_window, op::Sharding("{devices=[2,1]0,1}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(param_copy->sharding(),
ShardingMetadata({CreateMetadata("a")}));
EXPECT_THAT(reduce_window->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(param_copy->sharding(), ShardingMetadata({}));
EXPECT_THAT(reduce_window->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ReduceWindowBackwardPassWithBarrier) {
const char* const hlo_string = R"(
HloModule module
%add (lhs: f32[], rhs: f32[]) -> f32[] {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(%lhs, %rhs)
}
ENTRY %reduce_window {
%param = f32[13,17]{1,0} parameter(0)
%param.copy = f32[13,17]{1,0} copy(%param)
%init = f32[] parameter(1)
%shard-barrier-to = f32[13,17]{1,0} custom-call(%param.copy), custom_call_target="ShardBarrierTo", custom_call_has_side_effect=true
ROOT %reduce-window = f32[7,17]{1,0} reduce-window(%shard-barrier-to, %init),
window={size=3x2 stride=2x1 pad=1_1x0_1}, to_apply=%add,
sharding={devices=[2,1]0,1 metadata={op_name="a"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
std::ignore,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
auto* param_copy = FindInstruction(module.get(), "param.copy");
ASSERT_NE(param_copy, nullptr);
EXPECT_FALSE(param_copy->has_sharding());
}
TEST_P(ParameterizedMetadataTest, VariadicReduceWindowBackwardPass) {
const char* const hlo_string = R"(
HloModule module
%add (a: f32[], b: s32[], c: f32[], d: s32[]) -> (f32[], s32[]) {
%a = f32[] parameter(0)
%b = s32[] parameter(1)
%c = f32[] parameter(2)
%d = s32[] parameter(3)
%add.0 = f32[] add(%a, %c)
%add.1 = s32[] add(%b, %d)
ROOT %t = tuple(%add.0, %add.1)
}
ENTRY %reduce_window {
%param.0 = f32[13,17]{1,0} parameter(0)
%param.0.copy = f32[13,17]{1,0} copy(%param.0)
%param.1 = s32[13,17]{1,0} parameter(1)
%param.1.copy = s32[13,17]{1,0} copy(%param.1)
%init.0 = f32[] parameter(2)
%init.1 = s32[] parameter(3)
ROOT %reduce-window = (f32[7,17]{1,0}, s32[7,17]{1,0}) reduce-window(%param.0.copy, %param.1.copy, %init.0, %init.1),
window={size=3x2 stride=2x1 pad=1_1x0_1}, to_apply=%add,
sharding={{devices=[2,1]0,1 metadata={op_name="a"}}, {devices=[2,1]0,1 metadata={op_name="b"}}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* param_0_copy = FindInstruction(module.get(), "param.0.copy");
ASSERT_NE(param_0_copy, nullptr);
EXPECT_THAT(param_0_copy, op::Sharding("{devices=[2,1]0,1}"));
auto* param_1_copy = FindInstruction(module.get(), "param.1.copy");
ASSERT_NE(param_1_copy, nullptr);
EXPECT_THAT(param_1_copy, op::Sharding("{devices=[2,1]0,1}"));
auto* reduce_window = FindInstruction(module.get(), "reduce-window");
ASSERT_NE(reduce_window, nullptr);
EXPECT_THAT(reduce_window,
op::Sharding("{{devices=[2,1]0,1}, {devices=[2,1]0,1}}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(param_0_copy->sharding(),
ShardingMetadata({CreateMetadata("a")}));
EXPECT_THAT(param_1_copy->sharding(),
ShardingMetadata({CreateMetadata("b")}));
EXPECT_THAT(reduce_window->sharding().tuple_elements()[0],
ShardingMetadata({CreateMetadata("a")}));
EXPECT_THAT(reduce_window->sharding().tuple_elements()[1],
ShardingMetadata({CreateMetadata("b")}));
} else {
EXPECT_THAT(param_0_copy->sharding(), ShardingMetadata({}));
EXPECT_THAT(param_1_copy->sharding(), ShardingMetadata({}));
EXPECT_THAT(reduce_window->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ReplicatedConvolutionLhs) {
const char* const hlo_string = R"(
HloModule module
ENTRY conv {
%lhs = f32[3,2,3]{2,1,0} parameter(0),
sharding={replicated metadata={op_name="a"}}
%rhs = f32[2,2,1]{2,1,0} parameter(1)
%conv = f32[3,2,3]{2,1,0} convolution(%lhs, %rhs),
window={size=1}, dim_labels=bf0_oi0->bf0
ROOT %tuple = (f32[3,2,3]{2,1,0}) tuple(%conv)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* lhs = FindInstruction(module.get(), "lhs");
ASSERT_NE(lhs, nullptr);
EXPECT_THAT(lhs, op::Sharding("{replicated}"));
auto* conv = FindInstruction(module.get(), "conv");
ASSERT_NE(conv, nullptr);
EXPECT_THAT(conv, op::Sharding("{replicated}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(lhs->sharding(), ShardingMetadata({CreateMetadata("a")}));
EXPECT_THAT(conv->sharding(), ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(lhs->sharding(), ShardingMetadata({}));
EXPECT_THAT(conv->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ConvolutionShardedFeature) {
const char* const hlo_string = R"(
HloModule module
ENTRY conv {
%lhs = f32[3,2,3]{2,1,0} parameter(0),
sharding={devices=[1,2,1]0,1 metadata={op_name="a"}}
%rhs = f32[2,2,1]{2,1,0} parameter(1)
%conv = f32[3,2,3]{2,1,0} convolution(%lhs, %rhs),
window={size=1}, dim_labels=bf0_oi0->bf0
ROOT %tuple = (f32[3,2,3]{2,1,0}) tuple(%conv)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "conv");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{replicated}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ConvolutionDifferentDimensionNumbers) {
const char* const hlo_string = R"(
HloModule module
ENTRY conv {
%lhs = f32[8,16,512] parameter(0),
sharding={devices=[1,2,1]0,1 metadata={op_name="a"}}
%rhs = f32[8,2,512] parameter(1)
%conv = f32[3,512,512] convolution(%lhs, %rhs),
window={size=2 stride=5},
dim_labels=f0b_i0o->0bf
ROOT %tuple = (f32[3,512,512]) tuple(%conv)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "conv");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[2,1,1]0,1}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, Concatenate) {
const char* const hlo_string = R"(
HloModule module
ENTRY %concat {
%param.0 = f32[5,7] parameter(0),
sharding={devices=[2,1]0,1 metadata={op_name="a"}}
%param.1 = f32[5,9] parameter(1),
sharding={devices=[2,1]0,1 metadata={op_name="b"}}
%concat = f32[5,16] concatenate(%param.0, %param.1),
dimensions={1}
ROOT %tuple = (f32[5,16]) tuple(%concat)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "concat");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[2,1]0,1}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ConcatenateForwardWithBarrier) {
const char* const hlo_string = R"(
HloModule module
ENTRY %concat {
%param.0 = f32[5,7] parameter(0),
sharding={devices=[2,1]0,1 metadata={op_name="a"}}
%param.1 = f32[5,9] parameter(1),
sharding={devices=[2,1]0,1 metadata={op_name="b"}}
%shard-barrier-from.0 = f32[5,7] custom-call(%param.0), custom_call_target="ShardBarrierFrom", custom_call_has_side_effect=true
%shard-barrier-from.1 = f32[5,9] custom-call(%param.1), custom_call_target="ShardBarrierFrom", custom_call_has_side_effect=true
%concat = f32[5,16] concatenate(%shard-barrier-from.0, %shard-barrier-from.1),
dimensions={1}
ROOT %tuple = (f32[5,16]) tuple(%concat)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
std::ignore,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
auto* instruction = FindInstruction(module.get(), "concat");
ASSERT_NE(instruction, nullptr);
EXPECT_FALSE(instruction->has_sharding());
}
TEST_P(ParameterizedMetadataTest, ConcatenateBackwardWithBarrier) {
const char* const hlo_string = R"(
HloModule module
ENTRY %concat {
%param.0 = f32[5,7] parameter(0)
%copy.0 = f32[5,7] copy(%param.0)
%param.1 = f32[5,9] parameter(1)
%copy.1 = f32[5,9] copy(%param.1)
%shard-barrier-to = f32[5,9] custom-call(%copy.1), custom_call_target="ShardBarrierTo", custom_call_has_side_effect=true
%concat = f32[5,16] concatenate(%copy.0, %shard-barrier-to),
dimensions={1}, sharding={devices=[2,1]0,1 metadata={op_name="a"}}
ROOT %tuple = (f32[5,16]) tuple(%concat)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
std::ignore,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
auto* instruction = FindInstruction(module.get(), "copy.1");
ASSERT_NE(instruction, nullptr);
EXPECT_FALSE(instruction->has_sharding());
}
TEST_P(ParameterizedMetadataTest, TupleBackwardPass) {
const char* const hlo_string = R"(
HloModule module
ENTRY %tuple {
%param.0 = f32[1] parameter(0)
%param.1 = f32[3] parameter(1)
%copy.0 = f32[1] copy(%param.0)
%copy.1 = f32[3] copy(%param.1)
ROOT %tuple = (f32[1], f32[3]) tuple(%copy.0, %copy.1),
sharding={{replicated metadata={op_name="a"}},
{devices=[2]0,1 metadata={op_name="b"}}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* copy0 = FindInstruction(module.get(), "copy.0");
ASSERT_NE(copy0, nullptr);
EXPECT_THAT(copy0, op::Sharding("{replicated}"));
auto* copy1 = FindInstruction(module.get(), "copy.1");
ASSERT_NE(copy1, nullptr);
EXPECT_THAT(copy1, op::Sharding("{devices=[2]0,1}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(copy0->sharding(), ShardingMetadata({CreateMetadata("a")}));
EXPECT_THAT(copy1->sharding(), ShardingMetadata({CreateMetadata("b")}));
} else {
EXPECT_THAT(copy0->sharding(), ShardingMetadata({}));
EXPECT_THAT(copy1->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, AllReduce) {
const char* const hlo_string = R"(
HloModule module
%add (lhs: f32[], rhs: f32[]) -> f32[] {
%add_lhs = f32[] parameter(0)
%add_rhs = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %add_lhs, f32[] %add_rhs)
}
ENTRY %entry {
%param.0 = f32[3] parameter(0)
%param.1 = f32[3] parameter(1)
%copy_f_t = f32[3] copy(%param.1),
sharding={devices=[2]0,1 metadata={op_name="a"}}
%crs_f.tiled = f32[3] all-reduce(%copy_f_t), to_apply=%add
%crs_f.none = f32[3] all-reduce(%copy_f_t), to_apply=%add,
channel_id=1
%crs_b.replicated = f32[3] all-reduce(%param.0), to_apply=%add
%copy_b_r = f32[3] copy(%crs_b.replicated),
sharding={replicated metadata={op_name="b"}}
ROOT %tuple = (f32[3], f32[3], f32[3]) tuple(
%crs_f.tiled, crs_f.none, %copy_b_r)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* crs_f_tiled = FindInstruction(module.get(), "crs_f.tiled");
ASSERT_NE(crs_f_tiled, nullptr);
EXPECT_THAT(crs_f_tiled, op::Sharding("{devices=[2]0,1}"));
auto* crs_f_none = FindInstruction(module.get(), "crs_f.none");
ASSERT_NE(crs_f_none, nullptr);
EXPECT_THAT(crs_f_none, op::Sharding("{devices=[2]0,1}"));
auto* crs_b_replicated = FindInstruction(module.get(), "crs_b.replicated");
ASSERT_NE(crs_b_replicated, nullptr);
EXPECT_THAT(crs_b_replicated, op::Sharding("{replicated}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(crs_f_tiled->sharding(),
ShardingMetadata({CreateMetadata("a")}));
EXPECT_THAT(crs_b_replicated->sharding(),
ShardingMetadata({CreateMetadata("b")}));
} else {
EXPECT_THAT(crs_f_tiled->sharding(), ShardingMetadata({}));
EXPECT_THAT(crs_b_replicated->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, While) {
const char* const hlo_string = R"(
HloModule module
%cond {
%vars.cond = (u32[], f32[10,10]) parameter(0)
%count.cond = u32[] get-tuple-element((u32[], f32[10,10]) %vars.cond), index=0
%limit = u32[] constant(10)
ROOT %lt = pred[] compare(u32[] %count.cond, u32[] %limit), direction=LT
}
%body {
%vars = (u32[], f32[10,10]) parameter(0)
%count = u32[] get-tuple-element(%vars), index=0
%acc = f32[10,10] get-tuple-element((u32[], f32[10,10]) %vars), index=1
%one = u32[] constant(1)
%count.1 = u32[] add(u32[] %count, u32[] %one), sharding={replicated}
%acc.1 = f32[10,10] add(f32[10,10] %acc, f32[10,10] %acc)
ROOT %tuple = (u32[], f32[10,10]) tuple(u32[] %count.1, f32[10,10] %acc.1)
}
ENTRY %entry {
%p0 = f32[10,10] parameter(0)
%p0.copy = f32[10,10] copy(f32[10,10] %p0)
%p1 = f32[10,10] parameter(1)
%zero = u32[] constant(0)
%init = (u32[], f32[10,10]) tuple(u32[] %zero, f32[10,10] %p0.copy)
%while = (u32[], f32[10,10]) while((u32[], f32[10,10]) %init),
body=%body, condition=%cond
%res = f32[10,10] get-tuple-element((u32[], f32[10,10]) %while), index=1
%prev = f32[10,10] get-tuple-element((u32[], f32[10,10]) %init), index=1
%res.1 = f32[10,10] multiply(f32[10,10] %res, %prev)
ROOT %res_tuple = (f32[10,10]) tuple(f32[10,10] %res.1)
})";
auto while_is_sharded =
[this](HloModule* module, const HloSharding& sharding,
absl::Span<const absl::Span<const OpMetadata>> sharding_metadata) {
if (GetParam().clear_metadata) {
ClearMetadata(module);
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module));
EXPECT_TRUE(changed);
auto while_instr = FindInstruction(module, "while");
EXPECT_NE(nullptr, while_instr);
std::vector<const HloInstruction*> instructions{
while_instr, while_instr->while_body()->root_instruction(),
while_instr->while_body()->parameter_instruction(0),
while_instr->while_condition()->parameter_instruction(0)};
for (auto instr : instructions) {
ASSERT_TRUE(instr->has_sharding());
EXPECT_EQ(sharding, instr->sharding());
ASSERT_EQ(instr->sharding().tuple_elements().size(),
sharding_metadata.size());
for (int i = 0, e = sharding_metadata.size(); i < e; ++i) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instr->sharding().tuple_elements()[i],
ShardingMetadata(sharding_metadata[i]));
} else {
EXPECT_THAT(instr->sharding().tuple_elements()[i],
ShardingMetadata({}));
}
}
}
};
{
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto body_root = FindInstruction(module.get(), "tuple");
EXPECT_NE(nullptr, body_root);
auto sharding = ParseSharding(
"{{replicated metadata={op_name=\"b\"}}, "
"{devices=[2,1]0,1 metadata={op_name=\"c\"}}}")
.value();
body_root->set_sharding(sharding);
while_is_sharded(module.get(), sharding.WithoutMetadata(),
{{CreateMetadata("b")}, {CreateMetadata("c")}});
}
{
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto acc_1 = FindInstruction(module.get(), "acc.1");
EXPECT_NE(nullptr, acc_1);
acc_1->set_sharding(
ParseSharding("{devices=[2,1]0,1 metadata={op_name=\"b\"}}").value());
while_is_sharded(
module.get(),
ParseSharding("{{replicated}, {devices=[2,1]0,1}}").value(),
{{}, {CreateMetadata("b")}});
}
{
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto acc_1 = FindInstruction(module.get(), "acc.1");
EXPECT_NE(nullptr, acc_1);
acc_1->set_sharding(
ParseSharding("{devices=[2,1,2]0,1,2,3 last_tile_dim_replicate "
"metadata={op_name=\"b\"}}")
.value());
auto p0 = FindInstruction(module.get(), "p0");
p0->set_sharding(
ParseSharding("{devices=[1,2,2]0,2,1,3 last_tile_dim_replicate "
"metadata={op_name=\"c\"}}")
.value());
while_is_sharded(module.get(),
ParseSharding("{{replicated}, "
"{devices=[2,2]0,1,2,3}}")
.value(),
{{}, {CreateMetadata("c"), CreateMetadata("b")}});
}
}
TEST_F(ShardingPropagationTest, PropagateShardingInWhileCondition) {
const char* const hlo_string = R"(
HloModule module
%cond {
%vars.cond = (u32[], f32[]) parameter(0)
%count.cond = u32[] get-tuple-element(%vars.cond), index=0
%limit = u32[] constant(10)
ROOT %lt = pred[] compare(%count.cond, %limit), direction=LT
}
%body {
%vars = (u32[], f32[]) parameter(0)
%count = u32[] get-tuple-element(%vars), index=0
%acc = f32[] get-tuple-element(%vars), index=1
%one = u32[] constant(1)
%count.1 = u32[] add(u32[] %count, u32[] %one)
%acc.1 = f32[] add(f32[] %acc, f32[] %acc)
ROOT %tuple = (u32[], f32[]) tuple(%count.1, %acc.1)
}
ENTRY %entry {
%p0 = f32[] parameter(0), sharding={devices=[2,2]<=[4] last_tile_dims={manual, replicated}}
%zero = u32[] constant(0), sharding={devices=[2,2]<=[4] last_tile_dims={manual, replicated}}
%init = (u32[], f32[]) tuple(%zero, %p0)
ROOT %while = (u32[], f32[]) while(%init), body=%body, condition=%cond
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, false,
{true})
.Run(module.get()));
EXPECT_TRUE(changed);
HloSharding single_sharding =
ParseSharding("{devices=[2,2]<=[4] last_tile_dims={manual, replicated}}")
.value();
HloSharding tuple_sharding = HloSharding::SingleTuple(
module->entry_computation()->root_instruction()->shape(),
single_sharding);
for (const HloComputation* computation : module->computations()) {
for (const HloInstruction* instruction : computation->instructions()) {
EXPECT_TRUE(instruction->has_sharding());
EXPECT_EQ(instruction->sharding(), instruction->shape().IsTuple()
? tuple_sharding
: single_sharding);
}
}
}
TEST_P(ParameterizedMetadataTest, WhileGetShardingFromRecvInBody) {
const char* const hlo_string = R"(
HloModule module
%cond {
%vars.cond = (u32[], f32[]) parameter(0)
%count.cond = u32[] get-tuple-element(%vars.cond), index=0
%limit = u32[] constant(10)
ROOT %lt = pred[] compare(%count.cond, %limit), direction=LT
}
%body {
%param = (u32[], f32[]) parameter(0)
%count = u32[] get-tuple-element(%param), index=0
%after-all = token[] after-all()
%recv = (f32[], u32[], token[]) recv(%after-all), channel_id=1,
sharding={{maximal device=1 metadata={op_name="a"}},
{maximal device=1}, {maximal device=1}}
%recv-done = (f32[], token[]) recv-done(%recv), channel_id=1
%data = f32[] get-tuple-element(%recv-done), index=0
ROOT %tuple = (u32[], f32[]) tuple(%count, %data)
}
ENTRY %entry {
%p0 = f32[] parameter(0)
%zero = u32[] constant(0)
%init = (u32[], f32[]) tuple(%zero, %p0)
%while = (u32[], f32[]) while(%init), body=%body, condition=%cond
ROOT %result = f32[] get-tuple-element(%while), index=1
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
EXPECT_EQ(changed,
!GetParam().propagate_metadata && !GetParam().clear_metadata);
auto sharding =
ParseSharding("{{maximal device=1}, {maximal device=1}}").value();
auto while_instr = FindInstruction(module.get(), "while");
ASSERT_NE(nullptr, while_instr);
std::vector<const HloInstruction*> instructions{
while_instr, while_instr->while_body()->root_instruction(),
while_instr->while_body()->parameter_instruction(0),
while_instr->while_condition()->parameter_instruction(0)};
for (auto instr : instructions) {
ASSERT_TRUE(instr->has_sharding());
EXPECT_EQ(sharding, instr->sharding());
for (const HloSharding& sub_sharding : instr->sharding().tuple_elements()) {
EXPECT_THAT(sub_sharding, ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest, WhileConflictingShardingInBodyBeforeRecv) {
const char* const hlo_string = R"(
HloModule module
%cond {
%vars.cond = (u32[], f32[]) parameter(0)
%count.cond = u32[] get-tuple-element(%vars.cond), index=0
%limit = u32[] constant(10)
ROOT %lt = pred[] compare(%count.cond, %limit), direction=LT
}
%body {
%param = (u32[], f32[]) parameter(0)
%count = u32[] get-tuple-element(%param), index=0,
sharding={maximal device=0 metadata={op_name="a"}}
%after-all = token[] after-all()
%recv = (f32[], u32[], token[]) recv(%after-all), channel_id=1,
sharding={{maximal device=1 metadata={op_name="b"}},
{maximal device=1}, {maximal device=1}}
%recv-done = (f32[], token[]) recv-done(%recv), channel_id=1
%data = f32[] get-tuple-element(%recv-done), index=0
ROOT %tuple = (u32[], f32[]) tuple(%count, %data)
}
ENTRY %entry {
%p0 = f32[] parameter(0)
%zero = u32[] constant(0)
%init = (u32[], f32[]) tuple(%zero, %p0)
%while = (u32[], f32[]) while(%init), body=%body, condition=%cond
ROOT %result = f32[] get-tuple-element(%while), index=1
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
auto result =
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get());
EXPECT_THAT(result.status().message(),
::testing::HasSubstr(
"Instruction: count is on device: 0, which conflicts with "
"device: 1 of channel instruction: recv"));
}
TEST_P(ParameterizedMetadataTest, WhileConflictingShardingInBodyAfterRecv) {
const char* const hlo_string = R"(
HloModule module
%cond {
%vars.cond = (u32[], f32[]) parameter(0)
%count.cond = u32[] get-tuple-element(%vars.cond), index=0
%limit = u32[] constant(10)
ROOT %lt = pred[] compare(%count.cond, %limit), direction=LT
}
%body {
%param = (u32[], f32[]) parameter(0)
%count = u32[] get-tuple-element(%param), index=0
%after-all = token[] after-all()
%recv = (f32[], u32[], token[]) recv(%after-all), channel_id=1,
sharding={{maximal device=1 metadata={op_name="a"}},
{maximal device=1}, {maximal device=1}}
%recv-done = (f32[], token[]) recv-done(%recv), channel_id=1
%data = f32[] get-tuple-element(%recv-done), index=0,
sharding={maximal device=0 metadata={op_name="b"}}
ROOT %tuple = (u32[], f32[]) tuple(%count, %data)
}
ENTRY %entry {
%p0 = f32[] parameter(0)
%zero = u32[] constant(0)
%init = (u32[], f32[]) tuple(%zero, %p0)
%while = (u32[], f32[]) while(%init), body=%body, condition=%cond
ROOT %result = f32[] get-tuple-element(%while), index=1
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
auto result =
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get());
EXPECT_THAT(result.status().message(),
::testing::HasSubstr(
"Instruction: data is on device: 0, which conflicts with "
"device: 1 of channel instruction: recv"));
}
TEST_P(ParameterizedMetadataTest, WhileConflictingShardingOnWhileInstruction) {
const char* const hlo_string = R"(
HloModule module
%cond {
%vars.cond = (u32[], f32[]) parameter(0)
%count.cond = u32[] get-tuple-element(%vars.cond), index=0
%limit = u32[] constant(10)
ROOT %lt = pred[] compare(%count.cond, %limit), direction=LT
}
%body {
%param = (u32[], f32[]) parameter(0)
%count = u32[] get-tuple-element(%param), index=0
%after-all = token[] after-all()
%recv = (f32[], u32[], token[]) recv(%after-all), channel_id=1,
sharding={{maximal device=1 metadata={op_name="a"}},
{maximal device=1}, {maximal device=1}}
%recv-done = (f32[], token[]) recv-done(%recv), channel_id=1
%data = f32[] get-tuple-element(%recv-done), index=0
ROOT %tuple = (u32[], f32[]) tuple(%count, %data)
}
ENTRY %entry {
%p0 = f32[] parameter(0)
%zero = u32[] constant(0)
%init = (u32[], f32[]) tuple(%zero, %p0)
%while = (u32[], f32[]) while(%init), body=%body, condition=%cond,
sharding={{maximal device=0 metadata={op_name="b"}},{maximal device=0}}
ROOT %result = f32[] get-tuple-element(%while), index=1
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
auto result =
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get());
EXPECT_THAT(result.status().message(),
::testing::HasSubstr(
"Instruction: while is on device: 0, which conflicts with "
"device: 1 of channel instruction: recv"));
}
TEST_P(ParameterizedMetadataTest, WhileConv) {
const char* const hlo_string = R"(
HloModule module
%cond {
%vars.cond = (u32[], bf16[2, 2048, 768], bf16[128,512,2048], bf16[128,512,768], s32[]) parameter(0)
%count.cond = u32[] get-tuple-element(%vars.cond), index=0
%limit = u32[] constant(2)
ROOT %lt = pred[] compare(%count.cond, %limit), direction=LT
}
%body {
%param = (u32[], bf16[2, 2048, 768], bf16[128,512,2048], bf16[128,512,768], s32[]) parameter(0)
%i0 = s32[] constant(0)
%count = u32[] get-tuple-element(%param), index=0
%gte0 = bf16[2,2048,768]{2,1,0}
get-tuple-element(%param), index=1
%index = s32[] get-tuple-element(%param), index=4
%dys = bf16[1,2048,768]{2,1,0} dynamic-slice(%gte0, s32[] %index, s32[] %i0, s32[] %i0),
dynamic_slice_sizes={1,2048,768}
%kernel = bf16[2048, 768]{1,0}
reshape(%dys)
%lhs = bf16[128,512,2048]{2,1,0}
get-tuple-element(%param), index=2,
sharding={devices=[8,1,2]0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}
%reshape = bf16[2048,768,1]{2,1,0} reshape(bf16[2048,768]{1,0} %kernel)
%convolution = bf16[128,512,768]{2,1,0}
convolution(bf16[128,512,2048]{2,1,0} %lhs,
bf16[2048,768,1]{2,1,0} %reshape), window={size=1},
dim_labels=0bf_io0->0bf, sharding={devices=[8,1,2]0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}
ROOT %tuple = (u32[], bf16[2,2048,768], bf16[128,512,2048], bf16[128,512,768], s32[]) tuple(%count, %gte0, %lhs, %convolution, index)
}
ENTRY %entry {
%p0 = bf16[2048,768] parameter(0),
sharding={devices=[2,1,8]0,2,4,6,8,10,12,14,1,3,5,7,9,11,13,15 last_tile_dim_replicate}
%p1 = bf16[128,512,2048] parameter(1),
sharding={devices=[8,1,2]0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}
%p2 = bf16[128,512,768] parameter(2)
%reshape0 = bf16[1,2048,768] reshape(%p0)
%concat0 = bf16[2,2048,768] concatenate(%reshape0, %reshape0), dimensions={0}
%zero = u32[] constant(0)
%p3 = s32[] parameter(3)
%init = (u32[], bf16[2, 2048, 768], bf16[128,512,2048], bf16[128,512,768], s32[]) tuple(%zero, %concat0, %p1, %p2, %p3)
%while = (u32[], bf16[2, 2048, 768], bf16[128,512,2048], bf16[128,512,768], s32[]) while(%init), body=%body, condition=%cond
ROOT %result = bf16[128,512,768] get-tuple-element(%while), index=3, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* kernel = FindInstruction(module.get(), "kernel");
ASSERT_NE(kernel, nullptr);
EXPECT_THAT(kernel, op::Sharding("{devices=[2,1,8]0,2,4,6,8,10,12,14,1,3,5,"
"7,9,11,13,15 last_tile_dim_replicate}"));
}
TEST_P(ParameterizedMetadataTest, DoNotPassThroughConcatAtFirstIteration) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%p0 = bf16[16,2048,768] parameter(0),
sharding={devices=[2,1,1,8]0,2,4,6,8,10,12,14,1,3,5,7,9,11,13,15 last_tile_dim_replicate}
%concat = bf16[32,2048,768] concatenate(%p0, %p0), dimensions={0}
%add = bf16[32,2048,768] add(%concat, %concat),
sharding={devices=[8,1,2]0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}
ROOT %result = bf16[32,2048,768] copy(%add)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* kernel = FindInstruction(module.get(), "concat");
ASSERT_NE(kernel, nullptr);
EXPECT_THAT(kernel, op::Sharding("{devices=[8,1,2]0,1,2,3,4,5,6,7,8,"
"9,10,11,12,13,14,15}"));
}
TEST_P(ParameterizedMetadataTest, DoNotPassThroughConcatAtFirstIteration2) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%p0 = bf16[16,2048,768] parameter(0),
sharding={devices=[1,2,1,8]0,2,4,6,8,10,12,14,1,3,5,7,9,11,13,15 last_tile_dim_replicate}
%concat = bf16[32,2048,768] concatenate(%p0, %p0), dimensions={0}
%add = bf16[32,2048,768] add(%concat, %concat),
sharding={devices=[8,1,2]0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}
ROOT %result = bf16[32,2048,768] copy(%add)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* kernel = FindInstruction(module.get(), "concat");
ASSERT_NE(kernel, nullptr);
EXPECT_THAT(kernel, op::Sharding("{devices=[8,1,2]0,1,2,3,4,5,6,7,8,"
"9,10,11,12,13,14,15}"));
}
TEST_P(ParameterizedMetadataTest,
DoNotPassThroughDynamicSliceAtFirstIteration) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%p0 = bf16[64,2048,768] parameter(0),
sharding={devices=[2,1,1,8]0,2,4,6,8,10,12,14,1,3,5,7,9,11,13,15 last_tile_dim_replicate}
%p1 = s32[] parameter(1)
%i0 = s32[] constant(0)
%dys = bf16[32,2048,768] dynamic-slice(%p0, s32[] %p1, s32[] %i0, s32[] %i0),
dynamic_slice_sizes={32,2048,768}
%add = bf16[32,2048,768] add(%dys, %dys),
sharding={devices=[8,1,2]0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}
ROOT %result = bf16[32,2048,768] copy(%add)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* kernel = FindInstruction(module.get(), "dys");
ASSERT_NE(kernel, nullptr);
EXPECT_THAT(kernel, op::Sharding("{devices=[8,1,2]0,1,2,3,4,5,6,7,8,"
"9,10,11,12,13,14,15}"));
}
TEST_P(ParameterizedMetadataTest, Dot) {
const char* const hlo_string = R"(
HloModule module
ENTRY %conv {
%param.0 = f32[8,256,128] parameter(0)
%param.1 = f32[8,128,512] parameter(1)
%param.2 = f32[8,128] parameter(2)
%p0_copy_0 = f32[8,256,128] copy(%param.0),
sharding={devices=[1,4,1]0,1,2,3 metadata={op_name="a"}}
%p1_copy_0 = f32[8,128,512] copy(%param.1),
sharding={devices=[1,1,4]0,1,2,3 metadata={op_name="b"}}
%p2_copy = f32[8,128] copy(%param.2)
%dot_prop_rhs = f32[8,256,512] dot(%p0_copy_0, %p1_copy_0),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={1}
%dot_prop_lhs = f32[8,512,256] dot(%p1_copy_0, %p0_copy_0),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={1}, rhs_contracting_dims={2}
%dot_mat_vec = f32[8,256] dot(%p0_copy_0, %p2_copy),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={1}
%p0_copy_1 = f32[8,256,128] copy(%param.0)
%p1_copy_1 = f32[8,128,512] copy(%param.1)
%dot_back_prop_rhs = f32[8,256,512] dot(%p0_copy_1, %p1_copy_1),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={1}
%copy_back_prop_rhs = f32[8,256,512] copy(%dot_back_prop_rhs),
sharding={devices=[1,2,2]0,1,2,3 metadata={op_name="c"}}
ROOT %tuple = (f32[8,512,256], f32[8,256,512], f32[8,256], f32[8,256,512])
tuple(%dot_prop_lhs, %dot_prop_rhs, %dot_mat_vec, %copy_back_prop_rhs)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* dot_prop_rhs = FindInstruction(module.get(), "dot_prop_rhs");
ASSERT_NE(dot_prop_rhs, nullptr);
EXPECT_THAT(dot_prop_rhs, op::Sharding("{devices=[1,1,4]0,1,2,3}"));
auto* dot_prop_lhs = FindInstruction(module.get(), "dot_prop_lhs");
ASSERT_NE(dot_prop_lhs, nullptr);
EXPECT_THAT(dot_prop_lhs, op::Sharding("{devices=[1,4,1]0,1,2,3}"));
auto* dot_mat_vec = FindInstruction(module.get(), "dot_mat_vec");
ASSERT_NE(dot_mat_vec, nullptr);
EXPECT_THAT(dot_mat_vec, op::Sharding("{devices=[1,4]0,1,2,3}"));
auto* p0_copy_1 = FindInstruction(module.get(), "p0_copy_1");
ASSERT_NE(p0_copy_1, nullptr);
EXPECT_THAT(
p0_copy_1,
op::Sharding("{devices=[1,2,1,2]0,1,2,3 last_tile_dim_replicate}"));
auto* p1_copy_1 = FindInstruction(module.get(), "p1_copy_1");
ASSERT_NE(p1_copy_1, nullptr);
EXPECT_THAT(
p1_copy_1,
op::Sharding("{devices=[1,1,2,2]0,2,1,3 last_tile_dim_replicate}"));
auto* dot_back_prop_rhs = FindInstruction(module.get(), "dot_back_prop_rhs");
ASSERT_NE(dot_back_prop_rhs, nullptr);
EXPECT_THAT(dot_back_prop_rhs, op::Sharding("{devices=[1,2,2]0,1,2,3}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(dot_prop_rhs->sharding(),
ShardingMetadata({CreateMetadata("b")}));
EXPECT_THAT(dot_prop_lhs->sharding(),
ShardingMetadata({CreateMetadata("b")}));
EXPECT_THAT(dot_mat_vec->sharding(),
ShardingMetadata({CreateMetadata("a")}));
EXPECT_THAT(p0_copy_1->sharding(), ShardingMetadata({CreateMetadata("c")}));
EXPECT_THAT(p1_copy_1->sharding(), ShardingMetadata({CreateMetadata("c")}));
EXPECT_THAT(dot_back_prop_rhs->sharding(),
ShardingMetadata({CreateMetadata("c")}));
} else {
for (HloInstruction* instruction :
{dot_prop_rhs, dot_prop_lhs, dot_mat_vec, p0_copy_1, p1_copy_1,
dot_back_prop_rhs}) {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest, DotTiledBatchDim) {
const char* const hlo_string = R"(
HloModule module
ENTRY %conv {
%p0 = f32[8,256,512] parameter(0)
%p1 = f32[8,512,128] parameter(1)
%add = f32[8,256,512] add(%p0, %p0)
%dot = f32[8,256,128] dot(%add, %p1),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={1}
%res = f32[8,32768] reshape(%dot),
sharding={devices=[2,2]0,1,2,3 metadata={op_name="a"}}
ROOT %tuple = (f32[8,32768]) tuple(%res)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "add");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[2,2,1]0,1,2,3}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, DotMergeOperands) {
const char* const hlo_string = R"(
HloModule module
ENTRY %conv {
%p0 = f32[8,256,512] parameter(0),
sharding={devices=[2,2,1,2]0,1,2,3,4,5,6,7 last_tile_dim_replicate metadata={op_name="a"}}
%p1 = f32[8,128,512] parameter(1),
sharding={devices=[2,2,1,2]0,2,1,3,4,6,5,7 last_tile_dim_replicate metadata={op_name="b"}}
%dot = f32[8,256,128] dot(%p0, %p1),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={2}
ROOT %copy = f32[8,256,128] copy(%dot)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "dot");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[2,2,2]0,1,2,3,4,5,6,7}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("b"), CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, DotMergeOperands2) {
const char* const hlo_string = R"(
HloModule module
ENTRY %conv {
%p0 = f32[8,256,512] parameter(0),
sharding={devices=[2,2,2]0,1,2,3,4,5,6,7 metadata={op_name="a"}}
%p1 = f32[8,128,512] parameter(1),
sharding={devices=[2,2,2]0,1,2,3,4,5,6,7 metadata={op_name="b"}}
%dot = f32[8,256,128] dot(%p0, %p1),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={2}
ROOT %copy = f32[8,256,128] copy(%dot)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "dot");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(
instruction,
op::Sharding(
"{devices=[2,2,1,2]0,1,2,3,4,5,6,7 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, DotMergeOperands3) {
const char* const hlo_string = R"(
HloModule module
ENTRY %conv {
%p0 = f32[256,512] parameter(0),
sharding={devices=[2,4]0,1,2,3,4,5,6,7 metadata={op_name="a"}}
%p1 = f32[128,512] parameter(1),
sharding={devices=[4,2]0,4,2,6,3,7,1,5 metadata={op_name="b"}}
%dot = f32[256,128] dot(%p0, %p1),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
ROOT %copy = f32[256,128] copy(%dot)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "dot");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[2,4]0,2,3,1,4,6,7,5}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("b"), CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ForwardDotWithBarrier) {
const char* const hlo_string = R"(
HloModule module
ENTRY %conv {
%p0 = f32[8,256,512] parameter(0),
sharding={devices=[2,2,2]0,1,2,3,4,5,6,7 metadata={op_name="a"}}
%p1 = f32[8,128,512] parameter(1)
%shard-barrier-from = f32[8,256,512] custom-call(%p0), custom_call_target="ShardBarrierFrom", custom_call_has_side_effect=true
%dot = f32[8,256,128] dot(%shard-barrier-from, %p1),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={2}
ROOT %copy = f32[8,256,128] copy(%dot)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
std::ignore,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
auto* instruction = FindInstruction(module.get(), "dot");
ASSERT_NE(instruction, nullptr);
EXPECT_FALSE(instruction->has_sharding());
}
TEST_P(ParameterizedMetadataTest, BackwardDotWithBarrier) {
const char* const hlo_string = R"(
HloModule module
ENTRY %conv {
%p0 = f32[8,256,512] parameter(0),
sharding={devices=[2,2,2]0,1,2,3,4,5,6,7 metadata={op_name="a"}}
%p1 = f32[8,128,512] parameter(1)
%copy1 = f32[8,128,512] copy(%p1)
%shard-barrier-to = f32[8,128,512] custom-call(%copy1), custom_call_target="ShardBarrierTo", custom_call_has_side_effect=true
%dot = f32[8,256,128] dot(%p0, %shard-barrier-to),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={2},
sharding={devices=[2,1,2,2]0,1,2,3,4,5,6,7 last_tile_dim_replicate metadata={op_name="b"}}
ROOT %copy = f32[8,256,128] copy(%dot)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
std::ignore,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
auto* instruction = FindInstruction(module.get(), "copy1");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{replicated}"));
}
TEST_P(ParameterizedMetadataTest, BackwardDotFromContracting) {
const char* const hlo_string = R"(
HloModule module
ENTRY %conv {
%p0 = f32[8,256,512] parameter(0),
sharding={devices=[2,2,2]0,1,2,3,4,5,6,7 metadata={op_name="a"}}
%p1 = f32[8,128,512] parameter(1)
%copy1 = f32[8,128,512] copy(%p1)
%dot = f32[8,256,128] dot(%p0, %copy1),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={2},
sharding={devices=[2,1,2,2]0,1,2,3,4,5,6,7 last_tile_dim_replicate metadata={op_name="b"}}
ROOT %copy = f32[8,256,128] copy(%dot)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "copy1");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[2,2,2]0,1,2,3,4,5,6,7}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a"), CreateMetadata("b")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, BackwardDotFromContractingWithManual) {
const char* const hlo_string = R"(
HloModule module
ENTRY %dot {
%p0 = f32[8,512] parameter(0),
sharding={devices=[1,2,2]0,1,2,3 last_tile_dims={manual} metadata={op_name="a"}}
%p1 = f32[512,128] parameter(1)
%copy1 = f32[512,128] copy(%p1)
%dot = f32[8,128] dot(%p0, %copy1),
lhs_batch_dims={}, rhs_batch_dims={},
lhs_contracting_dims={1}, rhs_contracting_dims={0},
sharding={devices=[1,1,2,2]0,1,2,3 last_tile_dims={replicated, manual} metadata={op_name="b"}}
ROOT %copy = f32[8,128] copy(%dot)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "copy1");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction,
op::Sharding("{devices=[2,1,2]0,1,2,3 last_tile_dims={manual}}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ConvAsDotOnTrivialDimsForward) {
const char* const hlo_string = R"(
HloModule module
ENTRY %conv {
%lhs = f32[128,1,1,1001] parameter(0),
sharding={devices=[1,2,1,1]0,1 metadata={op_name="a"}}
%rhs = f32[1,1,1024,1001] parameter(1),
sharding={devices=[1,2,1,1]0,1 metadata={op_name="b"}}
%convolution = f32[128,1,1,1024] convolution(%lhs, %rhs),
window={size=1x1 rhs_reversal=1x1}, dim_labels=b01f_01oi->b01f
ROOT %copy = f32[128,1,1,1024] copy(%convolution)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "convolution");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,1,2,1]0,1}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("b")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ConvAsDotForwardWithBarrier) {
const char* const hlo_string = R"(
HloModule module
ENTRY %conv {
%lhs = f32[128,1,1,1001] parameter(0),
sharding={devices=[1,2,1,1]0,1 metadata={op_name="a"}}
%rhs = f32[1,1,1024,1001] parameter(1),
sharding={devices=[1,2,1,1]0,1 metadata={op_name="b"}}
%shard-barrier-from = f32[1,1,1024,1001] custom-call(%rhs), custom_call_target="ShardBarrierFrom", custom_call_has_side_effect=true
%convolution = f32[128,1,1,1024] convolution(%lhs, %shard-barrier-from),
window={size=1x1 rhs_reversal=1x1}, dim_labels=b01f_01oi->b01f
ROOT %copy = f32[128,1,1,1024] copy(%convolution)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
std::ignore,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
auto* instruction = FindInstruction(module.get(), "convolution");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{replicated}"));
}
TEST_P(ParameterizedMetadataTest, ConvAsDotOnTrivialDimsBackward) {
const char* const hlo_string = R"(
HloModule module
ENTRY %conv {
%p0 = f32[128,5,5,128] parameter(0)
%lhs = f32[128,5,5,128] copy(%p0)
%p1 = f32[5,5,128,768] parameter(1)
%rhs = f32[5,5,128,768] copy(%p1)
%convolution = f32[128,1,1,768] convolution(%lhs, %rhs), window={size=5x5},
dim_labels=b01f_01io->b01f,
sharding={devices=[1,2,1,1]0,1 metadata={op_name="a"}}
ROOT %copy = f32[128,1,1,768] copy(%convolution)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* lhs = FindInstruction(module.get(), "lhs");
ASSERT_NE(lhs, nullptr);
auto* rhs = FindInstruction(module.get(), "rhs");
ASSERT_NE(rhs, nullptr);
for (HloInstruction* instruction : {lhs, rhs}) {
EXPECT_THAT(instruction, op::Sharding("{replicated}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest, ConvAsDotBackwardWithBarrier) {
const char* const hlo_string = R"(
HloModule module
ENTRY %conv {
%p0 = f32[128,5,5,128] parameter(0)
%lhs = f32[128,5,5,128] copy(%p0)
%p1 = f32[5,5,128,768] parameter(1)
%rhs = f32[5,5,128,768] copy(%p1)
%shard-barrier-from = f32[128,5,5,128] custom-call(%lhs), custom_call_target="ShardBarrierFrom", custom_call_has_side_effect=true
%convolution = f32[128,1,1,768] convolution(%shard-barrier-from, %rhs), window={size=5x5},
dim_labels=b01f_01io->b01f,
sharding={devices=[1,2,1,1]0,1 metadata={op_name="a"}}
ROOT %copy = f32[128,1,1,768] copy(%convolution)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* lhs = FindInstruction(module.get(), "lhs");
ASSERT_NE(lhs, nullptr);
EXPECT_THAT(lhs, op::Sharding("{replicated}"));
}
TEST_P(ParameterizedMetadataTest,
ConvolutionFilterIFOFPartitionedInputPartialReplicate) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[128,112,112,12] parameter(0)
%lhs.copy = f32[128,112,112,12] copy(f32[128,112,112,12] %lhs),
sharding={devices=[1,1,1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="a"}}
%rhs = f32[7,7,12,64] parameter(1)
%rhs.copy = f32[7,7,12,64] copy(f32[7,7,12,64] %rhs),
sharding={devices=[1,1,2,2]0,1,2,3 metadata={op_name="b"}}
%conv = f32[128,56,56,64] convolution(
f32[128,112,112,12] %lhs.copy,
f32[7,7,12,64] %rhs.copy),
window={size=7x7 stride=2x2 pad=3_3x3_3},
dim_labels=b01f_01io->b01f
ROOT %copy = f32[128,56,56,64] copy(conv)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "conv");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(
instruction,
op::Sharding("{devices=[1,1,1,2,2]0,2,1,3 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("b")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ConvolutionDataParallelism) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
p0 = f32[256,512,16,32] parameter(0), sharding={devices=[2,2,2,2]<=[16] metadata={op_name="lhs_sharding"}}
p1 = f32[512,1,12,28] parameter(1), sharding={replicated metadata={op_name="rhs_sharding"}}
conv = f32[256,512,5,5] convolution(p0, p1), window={size=12x28}, dim_labels=bf01_oi01->bf01, feature_group_count=512
ROOT copy = f32[256,512,5,5] copy(conv)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "conv");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(
instruction,
op::Sharding("{devices=[2,1,1,1,8]<=[16] last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("lhs_sharding")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ConcatFromUserUnshardedDim) {
const char* const hlo_string = R"(
HloModule module
ENTRY %conv {
%p0 = f32[8,128] parameter(0)
%p1 = f32[8,128] parameter(1)
%c0 = f32[8,128] copy(%p0)
%c1 = f32[8,128] copy(%p1)
%concat = f32[16,128] concatenate(%c0, %c1),
dimensions={0},
sharding={devices=[1,2]0,1 metadata={op_name="a"}}
ROOT %tuple = (f32[16,128]) tuple(%concat)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* c0 = FindInstruction(module.get(), "c0");
ASSERT_NE(c0, nullptr);
auto* c1 = FindInstruction(module.get(), "c1");
ASSERT_NE(c1, nullptr);
for (HloInstruction* instruction : {c0, c1}) {
EXPECT_THAT(instruction, op::Sharding("{devices=[1,2]0,1}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest, ConcatFromUserShardedDim) {
const char* const hlo_string = R"(
HloModule module
ENTRY %conv {
%p0 = f32[8,128] parameter(0)
%p1 = f32[8,128] parameter(1)
%c0 = f32[8,128] copy(%p0)
%c1 = f32[8,128] copy(%p1)
%concat = f32[16,128] concatenate(%c0, %c1),
dimensions={0},
sharding={devices=[3,1]0,1,2 metadata={op_name="a"}}
ROOT %tuple = (f32[16,128]) tuple(%concat)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* c0 = FindInstruction(module.get(), "c0");
EXPECT_THAT(c0, op::Sharding("{devices=[2,1]0,1}"));
ASSERT_NE(c0, nullptr);
auto* c1 = FindInstruction(module.get(), "c1");
ASSERT_NE(c1, nullptr);
EXPECT_THAT(c1, op::Sharding("{devices=[2,1]1,2}"));
for (HloInstruction* instruction : {c0, c1}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest, ConcatFromUserShardedDimMaximalOperand) {
const char* const hlo_string = R"(
HloModule module
ENTRY %conv {
%p0 = f32[8,128] parameter(0)
%p1 = f32[24,128] parameter(1)
%c0 = f32[8,128] copy(%p0)
%c1 = f32[24,128] copy(%p1)
%concat = f32[32,128] concatenate(%c0, %c1),
dimensions={0},
sharding={devices=[4,1]0,1,2,3 metadata={op_name="a"}}
ROOT %tuple = (f32[32,128]) tuple(%concat)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* c0 = FindInstruction(module.get(), "c0");
ASSERT_NE(c0, nullptr);
EXPECT_THAT(c0, op::NoSharding());
auto* c1 = FindInstruction(module.get(), "c1");
ASSERT_NE(c1, nullptr);
EXPECT_THAT(c1, op::Sharding("{devices=[3,1]1,2,3}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(c1->sharding(), ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(c1->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ReplicatedToSideEffecting) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry_computation {
%const.0 = s32[] constant(0),
sharding={replicated metadata={op_name="a"}}
%const.1 = s32[] constant(2147483647),
sharding={replicated metadata={op_name="b"}}
%rng = s32[4]{0} rng(%const.0, %const.1),
distribution=rng_uniform
ROOT %root = (s32[4]{0}) tuple(%rng)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
EXPECT_EQ(changed,
!GetParam().propagate_metadata && !GetParam().clear_metadata);
auto* instruction = FindInstruction(module.get(), "rng");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::NoSharding());
}
TEST_P(ParameterizedMetadataTest, PartReplicatedTupleUser) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry_computation {
%param.0 = f32[5] parameter(0)
%param.1 = f32[7] parameter(1)
%param.2 = f32[9] parameter(2)
%tuple.0 = (f32[5], f32[7]) tuple(%param.0, %param.1)
ROOT %tuple.1 = ((f32[5], f32[7]), f32[9]) tuple(%tuple.0, %param.2),
sharding={{maximal device=0 metadata={op_name="a"}},
{replicated metadata={op_name="b"}},
{maximal device=1 metadata={op_name="c"}}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "tuple.0");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{{maximal device=0}, {replicated}}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding().tuple_elements()[0],
ShardingMetadata({CreateMetadata("a")}));
EXPECT_THAT(instruction->sharding().tuple_elements()[1],
ShardingMetadata({CreateMetadata("b")}));
} else {
for (const HloSharding& sub_sharding :
instruction->sharding().tuple_elements()) {
EXPECT_THAT(sub_sharding, ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest, Conditional) {
const char* const hlo_string = R"(
HloModule module
%add-call {
%x = f32[4,4] parameter(0)
ROOT %add = f32[4,4] add(%x, %x)
}
%true_comp {
%tp = (f32[3,5], f32[4,4]) parameter(0)
%tgte.0 = f32[3,5] get-tuple-element(%tp), index=0
%ttr = f32[5,3] transpose(%tgte.0), dimensions={1,0}
%tgte.1 = f32[4,4] get-tuple-element(%tp), index=1
%tadd = f32[4,4] call(%tgte.1), to_apply=%add-call
ROOT %tr = (f32[5,3], f32[4,4]) tuple(%ttr, %tadd)
}
%mul-call {
%y = f32[4,4] parameter(0)
ROOT %mul = f32[4,4] multiply(%y, %y)
}
%false_comp {
%fp = (f32[5,3], f32[4,4]) parameter(0)
%fgte.0 = f32[5,3] get-tuple-element(%fp), index=0
%fgte.1 = f32[4,4] get-tuple-element(%fp), index=1
%fmul = f32[4,4] call(%fgte.1), to_apply=%mul-call
ROOT %fr = (f32[5,3], f32[4,4]) tuple(%fgte.0, %fmul)
}
ENTRY entry {
%cond = pred[] parameter(0)
%tp.0 = f32[3,5] parameter(1), sharding={devices=[1,2]0,1 metadata={op_name="a"}}
%fp.0 = f32[5,3] parameter(2), sharding={devices=[1,3]0,1,2 metadata={op_name="b"}}
%constant = f32[4] constant({1,2,3,4}), sharding={devices=[4]0,1,2,3 metadata={op_name="c"}}
%broadcast = f32[4,4] broadcast(%constant), dimensions={1}
%add = f32[4,4] add(%broadcast, %broadcast)
%true_param = (f32[3,5], f32[4,4]) tuple(%tp.0, %add)
%false_param = (f32[5,3], f32[4,4]) tuple(%fp.0, %add)
%conditional = (f32[5,3], f32[4,4]) conditional(
%cond, %true_param, %false_param),
true_computation=%true_comp,
false_computation=%false_comp
ROOT %root = f32[5,3] get-tuple-element(%conditional), index=0
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* tp = FindInstruction(module.get(), "tp");
auto* tgte_0 = FindInstruction(module.get(), "tgte.0");
auto* ttr = FindInstruction(module.get(), "ttr");
auto* tgte_1 = FindInstruction(module.get(), "tgte.1");
auto* tadd = FindInstruction(module.get(), "tadd");
auto* tr = FindInstruction(module.get(), "tr");
auto* fp = FindInstruction(module.get(), "fp");
auto* fgte_0 = FindInstruction(module.get(), "fgte.0");
auto* fgte_1 = FindInstruction(module.get(), "fgte.1");
auto* fmul = FindInstruction(module.get(), "fmul");
auto* fr = FindInstruction(module.get(), "fr");
auto* x = FindInstruction(module.get(), "x");
auto* add = FindInstruction(module.get(), "add");
auto* y = FindInstruction(module.get(), "y");
auto* mul = FindInstruction(module.get(), "mul");
auto* conditional = FindInstruction(module.get(), "conditional");
const std::vector<HloInstruction*> instructions(
{tp, tgte_0, ttr, tgte_1, tadd, tr, fp, fgte_0, fgte_1, fmul, fr, x, add,
y, mul, conditional});
for (HloInstruction* instruction : instructions) {
EXPECT_NE(instruction, nullptr);
EXPECT_TRUE(instruction->has_sharding());
}
for (HloInstruction* instruction :
{tgte_1, tadd, fgte_1, fmul, x, add, y, mul}) {
EXPECT_THAT(instruction, op::Sharding("{devices=[1,4]0,1,2,3}"));
}
for (HloInstruction* instruction : {tr, fr, conditional, fp}) {
EXPECT_THAT(instruction,
op::Sharding("{{devices=[1,3]0,1,2}, {devices=[1,4]0,1,2,3}}"));
}
EXPECT_THAT(tp, op::Sharding("{{devices=[1,2]0,1}, {devices=[1,4]0,1,2,3}}"));
EXPECT_THAT(tgte_0, op::Sharding("{devices=[1,2]0,1}"));
EXPECT_THAT(ttr, op::Sharding("{devices=[2,1]0,1}"));
EXPECT_THAT(fgte_0, op::Sharding("{devices=[1,3]0,1,2}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
for (HloInstruction* instruction :
{tgte_1, tadd, fgte_1, fmul, x, add, y, mul}) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("c")}));
}
for (HloInstruction* instruction : {tr, fr, conditional, fp}) {
const std::vector<HloSharding>& shardings =
instruction->sharding().tuple_elements();
EXPECT_THAT(shardings[0], ShardingMetadata({CreateMetadata("b")}));
EXPECT_THAT(shardings[1], ShardingMetadata({CreateMetadata("c")}));
}
for (HloInstruction* instruction : {tgte_0, ttr}) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
}
EXPECT_THAT(fgte_0->sharding(), ShardingMetadata({CreateMetadata("b")}));
} else {
for (HloInstruction* instruction : instructions) {
if (instruction->sharding().IsTuple()) {
for (const HloSharding& tuple_element :
instruction->sharding().tuple_elements()) {
EXPECT_THAT(tuple_element, ShardingMetadata({}));
}
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
}
TEST_P(ParameterizedMetadataTest, TupleFromUser) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%p0 = f32[13] parameter(0)
%p1 = f32[15] parameter(1)
%p2 = f32[17] parameter(2)
%t0 = (f32[13], f32[15]) tuple(%p0, %p1)
%t1 = ((f32[13], f32[15]), f32[17]) tuple(%t0, %p2)
%gte.0 = (f32[13], f32[15]) get-tuple-element(%t1), index=0
%gte.1 = f32[13] get-tuple-element(%gte.0), index=0
%gte.2 = f32[15] get-tuple-element(%gte.0), index=1
%gte.3 = f32[17] get-tuple-element(%t1), index=1
ROOT %t2 = (f32[13], f32[15], f32[17]) tuple(%gte.1, %gte.2, %gte.3),
sharding={{replicated metadata={op_name="a"}},
{devices=[2]0,1 metadata={op_name="b"}},
{devices=[3]1,2,3 metadata={op_name="c"}}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* t0 = FindInstruction(module.get(), "t0");
ASSERT_NE(t0, nullptr);
EXPECT_THAT(t0, op::Sharding("{{replicated}, {devices=[2]0,1}}"));
auto* t1 = FindInstruction(module.get(), "t1");
ASSERT_NE(t1, nullptr);
EXPECT_THAT(
t1, op::Sharding("{{replicated}, {devices=[2]0,1}, {devices=[3]1,2,3}}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(t0->sharding().tuple_elements()[0],
ShardingMetadata({CreateMetadata("a")}));
EXPECT_THAT(t0->sharding().tuple_elements()[1],
ShardingMetadata({CreateMetadata("b")}));
EXPECT_THAT(t1->sharding().tuple_elements()[0],
ShardingMetadata({CreateMetadata("a")}));
EXPECT_THAT(t1->sharding().tuple_elements()[1],
ShardingMetadata({CreateMetadata("b")}));
EXPECT_THAT(t1->sharding().tuple_elements()[2],
ShardingMetadata({CreateMetadata("c")}));
} else {
for (HloInstruction* instruction : {t0, t1}) {
for (const HloSharding& sub_sharding :
instruction->sharding().tuple_elements()) {
EXPECT_THAT(sub_sharding, ShardingMetadata({}));
}
}
}
}
TEST_P(ParameterizedMetadataTest, DynamicSliceForwardPassWithBarrier) {
const char* hlo_string = R"(
HloModule module
ENTRY %entry {
%p0 = f32[11,13,15] parameter(0)
%c0 = f32[11,13,15] copy(%p0),
sharding={devices=[1,1,2]0,1 metadata={op_name="a"}}
%p1 = s32[] parameter(1)
%i0 = s32[] constant(0)
%shard-barrier-from = f32[11,13,15] custom-call(%c0), custom_call_target="ShardBarrierFrom", custom_call_has_side_effect=true
%ds = f32[11,1,15] dynamic-slice(%shard-barrier-from, %i0, %p1, %i0),
dynamic_slice_sizes={11,1,15}
ROOT %root = (f32[11,1,15]) tuple(%ds)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
std::ignore,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
auto* instruction = FindInstruction(module.get(), "ds");
ASSERT_NE(instruction, nullptr);
EXPECT_FALSE(instruction->has_sharding());
}
TEST_P(ParameterizedMetadataTest, DynamicSliceForwardPass) {
const char* hlo_string = R"(
HloModule module
ENTRY %entry {
%p0 = f32[11,13,15] parameter(0)
%c0 = f32[11,13,15] copy(%p0),
sharding={devices=[2,2,2]<=[8] metadata={op_name="a"}}
%p1 = s32[] parameter(1)
%i0 = s32[] constant(0)
%ds = f32[11,1,15] dynamic-slice(%c0, %i0, %p1, %i0),
dynamic_slice_sizes={11,1,15}
ROOT %root = (f32[11,1,15]) tuple(%ds)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "ds");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(
instruction,
op::Sharding(
"{devices=[2,1,2,2]<=[2,2,2]T(0,2,1) last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, DynamicSliceBackwardPass) {
const char* hlo_string = R"(
HloModule module
ENTRY %entry {
%p0 = f32[11,13,15] parameter(0)
%c0 = f32[11,13,15] copy(%p0)
%p1 = s32[] parameter(1)
%i0 = s32[] constant(0)
%ds = f32[11,1,15] dynamic-slice(%c0, %i0, %p1, %i0),
dynamic_slice_sizes={11,1,15},
sharding={devices=[2,2,2]<=[8] metadata={op_name="a"}}
ROOT %root = (f32[11,1,15]) tuple(%ds)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "c0");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(
instruction,
op::Sharding(
"{devices=[2,1,2,2]<=[2,2,2]T(0,2,1) last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, DynamicSliceBackwardPassWithBarrier) {
const char* hlo_string = R"(
HloModule module
ENTRY %entry {
%p0 = f32[11,13,15] parameter(0)
%c0 = f32[11,13,15] copy(%p0)
%p1 = s32[] parameter(1)
%i0 = s32[] constant(0)
%shard-barrier-to = f32[11,13,15] custom-call(%c0), custom_call_target="ShardBarrierTo", custom_call_has_side_effect=true
%ds = f32[11,1,15] dynamic-slice(%shard-barrier-to, %i0, %p1, %i0),
dynamic_slice_sizes={11,1,15},
sharding={devices=[1,1,2]0,1 metadata={op_name="a"}}
ROOT %root = (f32[11,1,15]) tuple(%ds)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
std::ignore,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
auto* instruction = FindInstruction(module.get(), "c0");
ASSERT_NE(instruction, nullptr);
EXPECT_FALSE(instruction->has_sharding());
}
TEST_P(ParameterizedMetadataTest, DynamicUpdateSliceForwardPassBase) {
const char* hlo_string = R"(
HloModule module
ENTRY %entry {
%p0 = f32[11,13,15] parameter(0)
%c0 = f32[11,13,15] copy(%p0),
sharding={devices=[2,2,2]<=[8] metadata={op_name="a"}}
%p1 = f32[11,1,15] parameter(1)
%c1 = f32[11,1,15] copy(%p1)
%p2 = s32[] parameter(2)
%i0 = s32[] constant(0)
%dus = f32[11,13,15] dynamic-update-slice(%c0, %c1, %i0, %p2, %i0)
ROOT %root = (f32[11,13,15]) tuple(%dus)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* dus = FindInstruction(module.get(), "dus");
ASSERT_NE(dus, nullptr);
EXPECT_THAT(dus, op::Sharding("{devices=[2,2,2]<=[8]}"));
auto* c1 = FindInstruction(module.get(), "c1");
ASSERT_NE(c1, nullptr);
EXPECT_THAT(
c1, op::Sharding(
"{devices=[2,1,2,2]<=[2,2,2]T(0,2,1) last_tile_dim_replicate}"));
for (HloInstruction* instruction : {dus, c1}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest, DynamicUpdateSliceForwardPassWithBarrier) {
const char* hlo_string = R"(
HloModule module
ENTRY %entry {
%p0 = f32[11,13,15] parameter(0)
%c0 = f32[11,13,15] copy(%p0),
sharding={devices=[1,1,2]0,1 metadata={op_name="a"}}
%p1 = f32[11,1,15] parameter(1)
%c1 = f32[11,1,15] copy(%p1)
%p2 = s32[] parameter(2)
%i0 = s32[] constant(0)
%shard-barrier-from = f32[11,13,15] custom-call(%c0), custom_call_target="ShardBarrierFrom", custom_call_has_side_effect=true
%dus = f32[11,13,15] dynamic-update-slice(%shard-barrier-from, %c1, %i0, %p2, %i0)
ROOT %root = (f32[11,13,15]) tuple(%dus)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
std::ignore,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
auto* dus = FindInstruction(module.get(), "dus");
ASSERT_NE(dus, nullptr);
EXPECT_FALSE(dus->has_sharding());
}
TEST_P(ParameterizedMetadataTest, DynamicUpdateSliceForwardPassUpdate) {
const char* hlo_string = R"(
HloModule module
ENTRY %entry {
%p0 = f32[11,13,15] parameter(0)
%c0 = f32[11,13,15] copy(%p0)
%p1 = f32[11,1,15] parameter(1)
%c1 = f32[11,1,15] copy(%p1),
sharding={devices=[2,2,2]<=[8] metadata={op_name="a"}}
%p2 = s32[] parameter(2)
%i0 = s32[] constant(0)
%dus = f32[11,13,15] dynamic-update-slice(%c0, %c1, %i0, %p2, %i0)
ROOT %root = (f32[11,13,15]) tuple(%dus)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* dus = FindInstruction(module.get(), "dus");
ASSERT_NE(dus, nullptr);
EXPECT_THAT(
dus, op::Sharding(
"{devices=[2,1,2,2]<=[2,2,2]T(0,2,1) last_tile_dim_replicate}"));
auto* c0 = FindInstruction(module.get(), "c0");
ASSERT_NE(c0, nullptr);
EXPECT_THAT(
c0, op::Sharding(
"{devices=[2,1,2,2]<=[2,2,2]T(0,2,1) last_tile_dim_replicate}"));
for (HloInstruction* instruction : {dus, c0}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest, DynamicUpdateSliceBackwardPass) {
const char* hlo_string = R"(
HloModule module
ENTRY %entry {
%p0 = f32[11,13,15] parameter(0)
%c0 = f32[11,13,15] copy(%p0)
%p1 = f32[11,1,15] parameter(1)
%c1 = f32[11,1,15] copy(%p1)
%p2 = s32[] parameter(2)
%i0 = s32[] constant(0)
%dus = f32[11,13,15] dynamic-update-slice(%c0, %c1, %i0, %p2, %i0),
sharding={devices=[2,2,2]<=[8] metadata={op_name="a"}}
ROOT %root = (f32[11,13,15]) tuple(%dus)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* c0 = FindInstruction(module.get(), "c0");
ASSERT_NE(c0, nullptr);
EXPECT_THAT(c0, op::Sharding("{devices=[2,2,2]<=[8]}"));
auto* c1 = FindInstruction(module.get(), "c1");
ASSERT_NE(c1, nullptr);
EXPECT_THAT(
c1, op::Sharding(
"{devices=[2,1,2,2]<=[2,2,2]T(0,2,1) last_tile_dim_replicate}"));
for (HloInstruction* instruction : {c0, c1}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest, DynamicUpdateSliceBackwardPassWithBarrier) {
const char* hlo_string = R"(
HloModule module
ENTRY %entry {
%p0 = f32[11,13,15] parameter(0)
%c0 = f32[11,13,15] copy(%p0)
%p1 = f32[11,1,15] parameter(1)
%c1 = f32[11,1,15] copy(%p1)
%p2 = s32[] parameter(2)
%i0 = s32[] constant(0)
%shard-barrier-to = f32[11,13,15] custom-call(%c0), custom_call_target="ShardBarrierTo", custom_call_has_side_effect=true
%dus = f32[11,13,15] dynamic-update-slice(%shard-barrier-to, %c1, %i0, %p2, %i0),
sharding={devices=[1,1,2]0,1 metadata={op_name="a"}}
ROOT %root = (f32[11,13,15]) tuple(%dus)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
std::ignore,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
auto* c0 = FindInstruction(module.get(), "c0");
ASSERT_NE(c0, nullptr);
EXPECT_FALSE(c0->has_sharding());
}
TEST_P(ParameterizedMetadataTestWithOutput, EinsumLHSBatchPartitioned) {
const char* hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,24,64] parameter(0)
%lhs.copy = f32[32,24,64] copy(%lhs),
sharding={devices=[2,1,1]0,1 metadata={op_name="a"}}
%rhs = f32[32,39296,64] parameter(1)
%rhs.copy = f32[32,39296,64] copy(%rhs)
%conv = f32[32,24,39296] convolution(%lhs.copy, %rhs.copy),
dim_labels=0bf_0oi->0bf, window={size=32 stride=31 lhs_dilate=32}
ROOT %copy = f32[32,24,39296] copy(%conv)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata,
{GetParam().allow_root_sharding_propagation})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* rhs_copy = FindInstruction(module.get(), "rhs.copy");
ASSERT_NE(rhs_copy, nullptr);
EXPECT_THAT(rhs_copy, op::Sharding("{devices=[2,1,1]0,1}"));
auto* conv = FindInstruction(module.get(), "conv");
ASSERT_NE(conv, nullptr);
EXPECT_THAT(conv, op::Sharding("{devices=[2,1,1]0,1}"));
for (HloInstruction* instruction : {rhs_copy, conv}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
if (GetParam().allow_root_sharding_propagation) {
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Sharding("{devices=[2,1,1]0,1}"));
}
}
TEST_P(ParameterizedMetadataTest, EinsumOutputBatchPartitioned) {
const char* hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,24,64] parameter(0)
%lhs.copy = f32[32,24,64] copy(%lhs)
%rhs = f32[32,39296,64] parameter(1)
%rhs.copy = f32[32,39296,64] copy(%rhs)
%conv = f32[32,24,39296] convolution(%lhs.copy, %rhs.copy),
dim_labels=0bf_0oi->0bf, window={size=32 stride=31 lhs_dilate=32},
sharding={devices=[2,1,1]0,1 metadata={op_name="a"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* lhs_copy = FindInstruction(module.get(), "lhs.copy");
ASSERT_NE(lhs_copy, nullptr);
EXPECT_THAT(lhs_copy, op::Sharding("{devices=[2,1,1]0,1}"));
auto* rhs_copy = FindInstruction(module.get(), "rhs.copy");
ASSERT_NE(rhs_copy, nullptr);
EXPECT_THAT(rhs_copy, op::Sharding("{devices=[2,1,1]0,1}"));
for (HloInstruction* instruction : {lhs_copy, rhs_copy}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest, EinsumLHSNonContractingPartitioned) {
const char* hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,24,64,128] parameter(0)
%lhs.copy = f32[32,24,64,128] copy(%lhs),
sharding={devices=[1,2,1,2]0,1,2,3 metadata={op_name="a"}}
%rhs = f32[32,39296,64,1] parameter(1)
%rhs.copy = f32[32,39296,64,1] copy(%rhs)
%conv = f32[32,24,39296,128] convolution(%lhs.copy, %rhs.copy),
dim_labels=0bf1_0oi1->0bf1, window={size=32x1 stride=31x1 lhs_dilate=32x1}
ROOT %copy = f32[32,24,39296,128] copy(%conv)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "conv");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,1,2]0,1,2,3}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, EinsumOutputLHSNonContractingPartitioned) {
const char* hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,24,64,128] parameter(0)
%lhs.copy = f32[32,24,64,128] copy(%lhs)
%rhs = f32[32,39296,64,1] parameter(1)
%rhs.copy = f32[32,39296,64,1] copy(%rhs)
ROOT %conv = f32[32,24,39296,128] convolution(%lhs.copy, %rhs.copy),
dim_labels=0bf1_0oi1->0bf1, window={size=32x1 stride=31x1 lhs_dilate=32x1},
sharding={devices=[1,2,1,2]0,1,2,3 metadata={op_name="a"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "lhs.copy");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,1,2]0,1,2,3}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, EinsumRHSNonContractingPartitioned) {
const char* hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,24,64,1] parameter(0)
%lhs.copy = f32[32,24,64,1] copy(%lhs)
%rhs = f32[32,39296,64,128] parameter(1)
%rhs.copy = f32[32,39296,64,128] copy(%rhs),
sharding={devices=[1,2,1,2]0,1,2,3 metadata={op_name="a"}}
%conv = f32[32,24,39296,128] convolution(%lhs.copy, %rhs.copy),
dim_labels=0bf1_0oi1->0bf1,
window={size=32x128 stride=31x1 pad=0_0x127_127 lhs_dilate=32x1 rhs_reversal=0x1}
ROOT %copy = f32[32,24,39296,128] copy(%conv)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "conv");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,1,2,2]0,1,2,3}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, EinsumOutputRHSNonContractingPartitioned) {
const char* hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,24,64,1] parameter(0)
%lhs.copy = f32[32,24,64,1] copy(%lhs)
%rhs = f32[32,39296,64,128] parameter(1)
%rhs.copy = f32[32,39296,64,128] copy(%rhs)
ROOT %conv = f32[32,24,39296,128] convolution(%lhs.copy, %rhs.copy),
dim_labels=0bf1_0oi1->0bf1,
window={size=32x128 stride=31x1 pad=0_0x127_127 lhs_dilate=32x1 rhs_reversal=0x1},
sharding={devices=[1,1,2,2]0,1,2,3 metadata={op_name="a"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "rhs.copy");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,1,2]0,1,2,3}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, EinsumChooseLargerOperand) {
const char* hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,24,64,1] parameter(0)
%lhs.copy = f32[32,24,64,1] copy(%lhs),
sharding={devices=[1,4,1,1]0,1,2,3 metadata={op_name="a"}}
%rhs = f32[32,39296,64,128] parameter(1)
%rhs.copy = f32[32,39296,64,128] copy(%rhs),
sharding={devices=[1,2,1,2]0,1,2,3 metadata={op_name="b"}}
%conv = f32[32,24,39296,128] convolution(%lhs.copy, %rhs.copy),
dim_labels=0bf1_0oi1->0bf1,
window={size=32x128 stride=31x1 pad=0_0x127_127 lhs_dilate=32x1 rhs_reversal=0x1}
ROOT %copy = f32[32,24,39296,128] copy(%conv)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "conv");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,1,2,2]0,1,2,3}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("b")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, EinsumChooseBatchFirst) {
const char* hlo_string = R"(
HloModule module
ENTRY entry {
%lhs = f32[32,24,64,1] parameter(0)
%lhs.copy = f32[32,24,64,1] copy(%lhs),
sharding={devices=[1,2,1,1]0,1 metadata={op_name="a"}}
%rhs = f32[32,39296,64,128] parameter(1)
%rhs.copy = f32[32,39296,64,128] copy(%rhs),
sharding={devices=[2,1,1,1]0,1 metadata={op_name="b"}}
%conv = f32[32,24,39296,128] convolution(%lhs.copy, %rhs.copy),
dim_labels=0bf1_0oi1->0bf1,
window={size=32x128 stride=31x1 pad=0_0x127_127 lhs_dilate=32x1 rhs_reversal=0x1}
ROOT %copy = f32[32,24,39296,128] copy(%conv)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "conv");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[2,1,1,1]0,1}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("b")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, GatherFromIndex) {
const char* hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[2,2,9] parameter(0),
sharding={replicated metadata={op_name="a"}}
%indices = s32[2,3,4] parameter(1),
sharding={devices=[1,2,1]0,1 metadata={op_name="b"}}
%gather = f32[3,4,9] gather(%input, %indices), offset_dims={2},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,9}
ROOT %copy = f32[3,4,9] copy(%gather)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "gather");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[2,1,1]0,1}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("b")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, GatherFromIndex_PartialReplicate) {
const char* hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[2,9] parameter(0),
sharding={replicated metadata={op_name="a"}}
%indices = s32[3] parameter(1),
sharding={devices=[2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="b"}}
%gather = f32[3,9] gather(%input, %indices), offset_dims={1},
collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1,
slice_sizes={1,9}
ROOT %copy = f32[3,9] copy(%gather)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "gather");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction,
op::Sharding("{devices=[2,1,2]0,1,2,3 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("b")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, GatherFromDataOperand) {
const char* hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[2,9] parameter(0),
sharding={devices=[1,2]0,1 metadata={op_name="a"}}
%indices = s32[3] parameter(1),
sharding={replicated metadata={op_name="b"}}
%gather = f32[3,9] gather(%input, %indices), offset_dims={1},
collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1,
slice_sizes={1,9}
ROOT %copy = f32[3,9] copy(%gather)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "gather");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,2]0,1}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, GatherFromDataOperand_PartialReplicate) {
const char* hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[2,9] parameter(0),
sharding={devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="a"}}
%indices = s32[3] parameter(1),
sharding={replicated metadata={op_name="b"}}
%gather = f32[3,9] gather(%input, %indices), offset_dims={1},
collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1,
slice_sizes={1,9}
ROOT %copy = f32[3,9] copy(%gather)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "gather");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction,
op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, GatherToIndex) {
const char* hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[2,9] parameter(0),
sharding={replicated metadata={op_name="a"}}
%p1 = s32[3] parameter(1)
%indices = s32[3] copy(%p1)
ROOT %gather = f32[3,9] gather(%input, %indices), offset_dims={1},
collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1,
slice_sizes={1,9},
sharding={devices=[2,1]0,1 metadata={op_name="b"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "indices");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[2]0,1}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("b")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, GatherToIndex_PartialReplicate) {
const char* hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[2,9] parameter(0),
sharding={replicated metadata={op_name="a"}}
%p1 = s32[3] parameter(1)
%indices = s32[3] copy(%p1)
ROOT %gather = f32[3,9] gather(%input, %indices), offset_dims={1},
collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1,
slice_sizes={1,9},
sharding={devices=[2,1,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="b"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "indices");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction,
op::Sharding("{devices=[2,2]0,1,2,3 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("b")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, GatherToIndex2) {
const char* hlo_string = R"(
HloModule module
ENTRY entry {
%input = bf16[2,4819,4] parameter(0),
sharding={replicated metadata={op_name="a"}}
%p1 = s32[2,1000,2] parameter(1)
%indices = s32[2,1000,2] copy(%p1)
ROOT %gather = bf16[2,1000,4]
gather(bf16[2,4819,4] %input, s32[2,1000,2] %indices),
offset_dims={2}, collapsed_slice_dims={0,1},
start_index_map={0,1}, index_vector_dim=2, slice_sizes={1,1,4},
sharding={devices=[1,2,1]0,1 metadata={op_name="b"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "indices");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,1]0,1}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("b")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, GatherToIndex2_PartialReplicate) {
const char* hlo_string = R"(
HloModule module
ENTRY entry {
%input = bf16[2,4819,4] parameter(0),
sharding={replicated metadata={op_name="a"}}
%p1 = s32[2,1000,2] parameter(1)
%indices = s32[2,1000,2] copy(%p1)
ROOT %gather = bf16[2,1000,4]
gather(bf16[2,4819,4] %input, s32[2,1000,2] %indices),
offset_dims={2}, collapsed_slice_dims={0,1},
start_index_map={0,1}, index_vector_dim=2, slice_sizes={1,1,4},
sharding={devices=[1,2,1,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="b"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "indices");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(
instruction,
op::Sharding("{devices=[1,2,1,2]0,1,2,3 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("b")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, GatherToIndex3) {
const char* hlo_string = R"(
HloModule module
ENTRY entry {
%input = bf16[2,4819,4] parameter(0),
sharding={replicated metadata={op_name="a"}}
%p1 = s32[2,2,1000] parameter(1)
%indices = s32[2,2,1000] copy(%p1)
ROOT %gather = bf16[2,1000,4]
gather(bf16[2,4819,4] %input, s32[2,2,1000] %indices),
offset_dims={2}, collapsed_slice_dims={0,1},
start_index_map={0,1}, index_vector_dim=1, slice_sizes={1,1,4},
sharding={devices=[1,2,1]0,1 metadata={op_name="b"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "indices");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,1,2]0,1}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("b")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, GatherToDataOperand) {
const char* hlo_string = R"(
HloModule module
ENTRY entry {
%p0 = f32[2,9] parameter(0)
%input = f32[2,9] copy(%p0)
%indices = s32[3] parameter(1),
sharding={replicated metadata={op_name="a"}}
ROOT %gather = f32[3,9] gather(%input, %indices), offset_dims={1},
collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1,
slice_sizes={1,9},
sharding={devices=[1,2]0,1 metadata={op_name="b"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "input");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,2]0,1}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("b")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, GatherToDataOperand_PartialReplicate) {
const char* hlo_string = R"(
HloModule module
ENTRY entry {
%p0 = f32[2,9] parameter(0)
%input = f32[2,9] copy(%p0)
%indices = s32[3] parameter(1),
sharding={replicated metadata={op_name="a"}}
ROOT %gather = f32[3,9] gather(%input, %indices), offset_dims={1},
collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1,
slice_sizes={1,9},
sharding={devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="b"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "input");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction,
op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("b")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, DataOperandToScatter) {
const char* const hlo_string = R"(
HloModule module
add (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT sum = f32[] add(lhs, rhs)
}
ENTRY entry {
%input = f32[2,9] parameter(0),
sharding={devices=[1,2]0,1 metadata={op_name="a"}}
%indices = s32[3] parameter(1),
sharding={replicated metadata={op_name="b"}}
%updates = f32[3,9] parameter(2),
sharding={replicated metadata={op_name="c"}}
%scatter = f32[2,9] scatter(%input, %indices, %updates),
to_apply=add,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
ROOT %copy = f32[2,9] copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "scatter");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,2]0,1}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, DataOperandToScatter_PartialReplicate) {
const char* const hlo_string = R"(
HloModule module
add (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT sum = f32[] add(lhs, rhs)
}
ENTRY entry {
%input = f32[2,9] parameter(0),
sharding={devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="a"}}
%indices = s32[3] parameter(1),
sharding={replicated metadata={op_name="b"}}
%updates = f32[3,9] parameter(2),
sharding={replicated metadata={op_name="c"}}
%scatter = f32[2,9] scatter(%input, %indices, %updates),
to_apply=add,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
ROOT %copy = f32[2,9] copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "scatter");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction,
op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, DataOperandToScatter_Variadic) {
const char* const hlo_string = R"(
HloModule module
add (lhs.0: f32[], lhs.1: f32[], rhs.0: f32[], rhs.1: f32[]) -> (f32[], f32[]) {
lhs.0 = f32[] parameter(0)
lhs.1 = f32[] parameter(1)
rhs.0 = f32[] parameter(2)
rhs.1 = f32[] parameter(3)
sum.0 = f32[] add(lhs.0, rhs.0)
sum.1 = f32[] add(lhs.1, rhs.1)
ROOT tuple = tuple(sum.0, sum.1)
}
ENTRY entry {
%input.0 = f32[2,9] parameter(0),
sharding={devices=[1,4]0,1,2,3 metadata={op_name="a"}}
%input.1 = f32[2,9] parameter(1),
sharding={devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="b"}}
%indices = s32[3] parameter(2),
sharding={replicated metadata={op_name="c"}}
%updates.0 = f32[3,9] parameter(3),
sharding={replicated metadata={op_name="d"}}
%updates.1 = f32[3,9] parameter(4),
sharding={replicated metadata={op_name="e"}}
%scatter = (f32[2,9],f32[2,9]) scatter(%input.0, %input.1, %indices, %updates.0, %updates.1),
to_apply=add,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
ROOT %copy = (f32[2,9],f32[2,9]) copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "scatter");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction,
op::Sharding("{{devices=[1,4]0,1,2,3}, {devices=[1,2,2]0,1,2,3 "
"last_tile_dim_replicate}}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding().tuple_elements()[0],
ShardingMetadata({CreateMetadata("a")}));
EXPECT_THAT(instruction->sharding().tuple_elements()[1],
ShardingMetadata({CreateMetadata("b")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, UpdateOperandToScatter) {
const char* const hlo_string = R"(
HloModule module
add (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT sum = f32[] add(lhs, rhs)
}
ENTRY entry {
%input = f32[2,9] parameter(0),
sharding={replicated metadata={op_name="a"}}
%indices = s32[3] parameter(1),
sharding={replicated metadata={op_name="b"}}
%updates = f32[3,9] parameter(2),
sharding={devices=[1,2]0,1 metadata={op_name="c"}}
%scatter = f32[2,9] scatter(%input, %indices, %updates),
to_apply=add,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
ROOT %copy = f32[2,9] copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "scatter");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,2]0,1}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("c")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, UpdateOperandToScatter_PartialReplicate) {
const char* const hlo_string = R"(
HloModule module
add (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT sum = f32[] add(lhs, rhs)
}
ENTRY entry {
%input = f32[2,9] parameter(0),
sharding={replicated metadata={op_name="a"}}
%indices = s32[3] parameter(1),
sharding={replicated metadata={op_name="b"}}
%updates = f32[3,9] parameter(2),
sharding={devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="c"}}
%scatter = f32[2,9] scatter(%input, %indices, %updates),
to_apply=add,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
ROOT %copy = f32[2,9] copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "scatter");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction,
op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("c")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, UpdateOperandToScatter_Variadic) {
const char* const hlo_string = R"(
HloModule module
add (lhs.0: f32[], lhs.1: f32[], rhs.0: f32[], rhs.1: f32[]) -> (f32[], f32[]) {
lhs.0 = f32[] parameter(0)
lhs.1 = f32[] parameter(1)
rhs.0 = f32[] parameter(2)
rhs.1 = f32[] parameter(3)
sum.0 = f32[] add(lhs.0, rhs.0)
sum.1 = f32[] add(lhs.1, rhs.1)
ROOT tuple = tuple(sum.0, sum.1)
}
ENTRY entry {
%input.0 = f32[2,9] parameter(0),
sharding={replicated metadata={op_name="a"}}
%input.1 = f32[2,9] parameter(1),
sharding={replicated metadata={op_name="b"}}
%indices = s32[3] parameter(2),
sharding={replicated metadata={op_name="c"}}
%updates.0 = f32[3,9] parameter(3),
sharding={devices=[1,4]0,1,2,3 metadata={op_name="d"}}
%updates.1 = f32[3,9] parameter(4),
sharding={devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="e"}}
%scatter = (f32[2,9],f32[2,9]) scatter(%input.0, %input.1, %indices, %updates.0, %updates.1),
to_apply=add,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
ROOT %copy = (f32[2,9],f32[2,9]) copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "scatter");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction,
op::Sharding("{{devices=[1,4] 0,1,2,3}, {devices=[1,2,2]0,1,2,3 "
"last_tile_dim_replicate}}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding().tuple_elements()[0],
ShardingMetadata({CreateMetadata("d")}));
EXPECT_THAT(instruction->sharding().tuple_elements()[1],
ShardingMetadata({CreateMetadata("e")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ScatterToDataOperand_PartialReplicate) {
const char* const hlo_string = R"(
HloModule module
add (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT sum = f32[] add(lhs, rhs)
}
ENTRY entry {
%p0 = f32[2,9] parameter(0)
%input = f32[2,9] copy(%p0)
%indices = s32[3] parameter(1),
sharding={replicated metadata={op_name="a"}}
%updates = f32[3,9] parameter(2),
sharding={replicated metadata={op_name="b"}}
ROOT %scatter = f32[2,9] scatter(%input, %indices, %updates),
to_apply=add,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1,
sharding={devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="c"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "input");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction,
op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("c")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ScatterToDataOperand) {
const char* const hlo_string = R"(
HloModule module
add (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT sum = f32[] add(lhs, rhs)
}
ENTRY entry {
%p0 = f32[2,9] parameter(0)
%input = f32[2,9] copy(%p0)
%indices = s32[3] parameter(1),
sharding={replicated metadata={op_name="a"}}
%updates = f32[3,9] parameter(2),
sharding={replicated metadata={op_name="b"}}
ROOT %scatter = f32[2,9] scatter(%input, %indices, %updates),
to_apply=add,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1,
sharding={devices=[1,2]0,1 metadata={op_name="c"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "input");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,2]0,1}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("c")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ScatterToDataOperand_Variadic) {
const char* const hlo_string = R"(
HloModule module
add (lhs.0: f32[], lhs.1: f32[], rhs.0: f32[], rhs.1: f32[]) -> (f32[], f32[]) {
lhs.0 = f32[] parameter(0)
lhs.1 = f32[] parameter(1)
rhs.0 = f32[] parameter(2)
rhs.1 = f32[] parameter(3)
sum.0 = f32[] add(lhs.0, rhs.0)
sum.1 = f32[] add(lhs.1, rhs.1)
ROOT tuple = tuple(sum.0, sum.1)
}
ENTRY entry {
%p0 = f32[2,9] parameter(0)
%input.0 = f32[2,9] copy(%p0)
%p1 = f32[2,9] parameter(1)
%input.1 = f32[2,9] copy(%p1)
%indices = s32[3] parameter(2),
sharding={replicated metadata={op_name="a"}}
%updates.0 = f32[3,9] parameter(3),
sharding={replicated metadata={op_name="b"}}
%updates.1 = f32[3,9] parameter(4),
sharding={replicated metadata={op_name="c"}}
ROOT %scatter = (f32[2,9],f32[2,9]) scatter(%input.0, %input.1, %indices, %updates.0, %updates.1),
to_apply=add,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1,
sharding={{devices=[1,4]0,1,2,3 metadata={op_name="d"}}, {devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="e"}}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "input.0");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,4]0,1,2,3}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("d")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
instruction = FindInstruction(module.get(), "input.1");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction,
op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("e")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ScatterToUpdateOperand_PartialReplicate) {
const char* const hlo_string = R"(
HloModule module
add (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT sum = f32[] add(lhs, rhs)
}
ENTRY entry {
%input = f32[2,9] parameter(0)
%indices = s32[3] parameter(1),
sharding={replicated metadata={op_name="a"}}
%p2 = f32[3,9] parameter(2)
%updates = f32[3,9] copy(%p2)
ROOT %scatter = f32[2,9] scatter(%input, %indices, %updates),
to_apply=add,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1,
sharding={devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="b"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "updates");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction,
op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("b")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ScatterToUpdateOperand) {
const char* const hlo_string = R"(
HloModule module
add (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT sum = f32[] add(lhs, rhs)
}
ENTRY entry {
%input = f32[2,9] parameter(0)
%indices = s32[3] parameter(1),
sharding={replicated metadata={op_name="a"}}
%p2 = f32[3,9] parameter(2)
%updates = f32[3,9] copy(%p2)
ROOT %scatter = f32[2,9] scatter(%input, %indices, %updates),
to_apply=add,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1,
sharding={devices=[1,2]0,1 metadata={op_name="b"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "updates");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,2]0,1}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("b")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ScatterToUpdateOperand_Variadic) {
const char* const hlo_string = R"(
HloModule module
add (lhs.0: f32[], lhs.1: f32[], rhs.0: f32[], rhs.1: f32[]) -> (f32[], f32[]) {
lhs.0 = f32[] parameter(0)
lhs.1 = f32[] parameter(1)
rhs.0 = f32[] parameter(2)
rhs.1 = f32[] parameter(3)
sum.0 = f32[] add(lhs.0, rhs.0)
sum.1 = f32[] add(lhs.1, rhs.1)
ROOT tuple = tuple(sum.0, sum.1)
}
ENTRY entry {
%input.0 = f32[2,9] parameter(0)
%input.1 = f32[2,9] parameter(1)
%indices = s32[3] parameter(2),
sharding={replicated metadata={op_name="a"}}
%p3 = f32[3,9] parameter(3)
%updates.0 = f32[3,9] copy(%p3)
%p4 = f32[3,9] parameter(4)
%updates.1 = f32[3,9] copy(%p4)
ROOT %scatter = (f32[2,9],f32[2,9]) scatter(%input.0, %input.1, %indices, %updates.0, %updates.1),
to_apply=add,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1,
sharding={{devices=[1,4]0,1,2,3 metadata={op_name="b"}}, {devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="c"}}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "updates.0");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,4]0,1,2,3}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("b")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
instruction = FindInstruction(module.get(), "updates.1");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction,
op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("c")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ScatterUpdateToIndex) {
const char* const hlo_string = R"(
HloModule module
add (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT sum = f32[] add(lhs, rhs)
}
ENTRY entry {
%input = f32[2,9] parameter(0),
sharding={replicated metadata={op_name="a"}}
%p1 = s32[3] parameter(1),
sharding={replicated metadata={op_name="b"}}
%indices = s32[3] copy(%p1)
%updates = f32[3,9] parameter(2),
sharding={devices=[2,1]0,1 metadata={op_name="c"}}
ROOT %scatter = f32[2,9] scatter(%input, %indices, %updates),
to_apply=add,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1,
sharding={replicated metadata={op_name="d"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "indices");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[2]0,1}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("c")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ScatterUpdateToIndex2) {
const char* const hlo_string = R"(
HloModule module
add (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT sum = f32[] add(lhs, rhs)
}
ENTRY entry {
%input = f32[2,9] parameter(0),
sharding={replicated metadata={op_name="a"}}
%p1 = s32[1,3] parameter(1),
sharding={replicated metadata={op_name="b"}}
%indices = s32[1,3] copy(%p1)
%updates = f32[3,9] parameter(2),
sharding={devices=[2,1]0,1 metadata={op_name="c"}}
ROOT %scatter = f32[2,9] scatter(%input, %indices, %updates),
to_apply=add,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=0,
sharding={replicated metadata={op_name="d"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "indices");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,2]0,1}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("c")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ScatterUpdateToIndex_PartialReplicate) {
const char* const hlo_string = R"(
HloModule module
add (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT sum = f32[] add(lhs, rhs)
}
ENTRY entry {
%input = f32[2,9] parameter(0),
sharding={replicated metadata={op_name="a"}}
%p1 = s32[3] parameter(1),
sharding={replicated metadata={op_name="b"}}
%indices = s32[3] copy(%p1)
%updates = f32[3,9] parameter(2),
sharding={devices=[2,1,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="c"}}
ROOT %scatter = f32[2,9] scatter(%input, %indices, %updates),
to_apply=add,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1,
sharding={replicated metadata={op_name="d"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "indices");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction,
op::Sharding("{devices=[2,2]0,1,2,3 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("c")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ScatterUpdateToIndex_RankMismatch) {
const char* const hlo_string = R"(
HloModule module
add (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT sum = f32[] add(lhs, rhs)
}
ENTRY entry {
%input = f32[1,24,24,24,3,3] parameter(0),
sharding={replicated metadata={op_name="a"}}
%p1 = s32[1,24,24,24,5] parameter(1),
sharding={replicated metadata={op_name="b"}}
%indices = s32[1,24,24,24,5] copy(%p1)
%updates = f32[1,24,24,24,3] parameter(2),
sharding={devices=[1,2,2,2,1]0,1,2,3,4,5,6,7 metadata={op_name="c"}}
%scatter = f32[1,24,24,24,3,3] scatter(%input, %indices, %updates),
to_apply=add,
update_window_dims={4},
inserted_window_dims={0,1,2,3,4},
scatter_dims_to_operand_dims={0,1,2,3,4},
index_vector_dim=4,
sharding={replicated metadata={op_name="d"}}
ROOT %copy = f32[1,24,24,24,3,3] copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "indices");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction,
op::Sharding("{devices=[1,2,2,2,1]0,1,2,3,4,5,6,7}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("c")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ScatterUpdateToIndex_Variadic) {
const char* const hlo_string = R"(
HloModule module
add (lhs.0: f32[], lhs.1: f32[], rhs.0: f32[], rhs.1: f32[]) -> (f32[], f32[]) {
lhs.0 = f32[] parameter(0)
lhs.1 = f32[] parameter(1)
rhs.0 = f32[] parameter(2)
rhs.1 = f32[] parameter(3)
sum.0 = f32[] add(lhs.0, rhs.0)
sum.1 = f32[] add(lhs.1, rhs.1)
ROOT tuple = tuple(sum.0, sum.1)
}
ENTRY entry {
%input.0 = f32[2,9] parameter(0),
sharding={replicated metadata={op_name="a"}}
%input.1 = f32[2,9] parameter(1),
sharding={replicated metadata={op_name="b"}}
%p2 = s32[3,3] parameter(2),
sharding={replicated metadata={op_name="c"}}
%indices = s32[3,3] copy(%p2)
%updates.0 = f32[3,3,9] parameter(3),
sharding={devices=[2,1,1,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="d"}}
%updates.1 = f32[3,3,9] parameter(4),
sharding={devices=[1,2,1,2]0,2,1,3 last_tile_dim_replicate metadata={op_name="e"}}
ROOT %scatter = (f32[2,9],f32[2,9]) scatter(%input.0, %input.1, %indices, %updates.0, %updates.1),
to_apply=add,
update_window_dims={2},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=2,
sharding={{replicated metadata={op_name="d"}}, {replicated metadata={op_name="e"}}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "indices");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[2,2]0,1,2,3}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("d"), CreateMetadata("e")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ScatterIndexToUpdate) {
const char* const hlo_string = R"(
HloModule module
add (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT sum = f32[] add(lhs, rhs)
}
ENTRY entry {
%input = f32[2,9] parameter(0),
sharding={replicated metadata={op_name="a"}}
%indices = s32[3] parameter(1),
sharding={devices=[2]0,1 metadata={op_name="b"}}
%p2 = f32[3,9] parameter(2),
sharding={replicated metadata={op_name="c"}}
%updates = f32[3,9] copy(%p2)
ROOT %scatter = f32[2,9] scatter(%input, %indices, %updates),
to_apply=add,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1,
sharding={replicated metadata={op_name="d"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "updates");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[2,1]0,1}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("b")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ScatterIndexToUpdate_PartialReplicate) {
const char* const hlo_string = R"(
HloModule module
add (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT sum = f32[] add(lhs, rhs)
}
ENTRY entry {
%input = f32[2,9] parameter(0),
sharding={replicated metadata={op_name="a"}}
%indices = s32[3] parameter(1),
sharding={devices=[2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="b"}}
%p2 = f32[3,9] parameter(2),
sharding={replicated metadata={op_name="c"}}
%updates = f32[3,9] copy(%p2)
ROOT %scatter = f32[2,9] scatter(%input, %indices, %updates),
to_apply=add,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1,
sharding={replicated metadata={op_name="d"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "updates");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction,
op::Sharding("{devices=[2,1,2]0,1,2,3 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("b")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ScatterIndexToUpdate2_PartialReplicate) {
const char* const hlo_string = R"(
HloModule module
add (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT sum = f32[] add(lhs, rhs)
}
ENTRY entry {
%input = bf16[15,8] parameter(0),
sharding={replicated metadata={op_name="a"}}
%indices = s32[8,1,1] parameter(1),
sharding={devices=[2,1,1,4]0,1,2,3,4,5,6,7
last_tile_dim_replicate metadata={op_name="b"}}
%p2 = bf16[8,1,8] parameter(2),
sharding={replicated metadata={op_name="c"}}
%updates = bf16[8,1,8] copy(%p2)
ROOT %scatter = bf16[15,8]{1,0} scatter(bf16[15,8] %input,
s32[8,1,1] %indices, bf16[8,1,8] %updates),
update_window_dims={2},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0}, index_vector_dim=2, to_apply=%add,
sharding={replicated metadata={op_name="d"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "updates");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(
instruction,
op::Sharding(
"{devices=[2,1,1,4]0,1,2,3,4,5,6,7 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("b")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ScatterIndexToUpdate_Variadic) {
const char* const hlo_string = R"(
HloModule module
add (lhs.0: f32[], lhs.1: f32[], rhs.0: f32[], rhs.1: f32[]) -> (f32[], f32[]) {
lhs.0 = f32[] parameter(0)
lhs.1 = f32[] parameter(1)
rhs.0 = f32[] parameter(2)
rhs.1 = f32[] parameter(3)
sum.0 = f32[] add(lhs.0, rhs.0)
sum.1 = f32[] add(lhs.1, rhs.1)
ROOT tuple = tuple(sum.0, sum.1)
}
ENTRY entry {
%input.0 = f32[2,9] parameter(0),
sharding={replicated metadata={op_name="a"}}
%input.1 = f32[2,9] parameter(1),
sharding={replicated metadata={op_name="b"}}
%indices = s32[3,3] parameter(2),
sharding={devices=[2,2]0,1,2,3 metadata={op_name="c"}}
%p3 = f32[3,3,9] parameter(3),
sharding={replicated metadata={op_name="d"}}
%updates.0 = f32[3,3,9] copy(%p3)
%p4 = f32[3,3,9] parameter(4),
sharding={replicated metadata={op_name="e"}}
%updates.1 = f32[3,3,9] copy(%p4)
ROOT %scatter = (f32[2,9],f32[2,9])scatter(%input.0, %input.1, %indices, %updates.0, %updates.1),
to_apply=add,
update_window_dims={2},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=2,
sharding={replicated metadata={op_name="d"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "updates.0");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[2,2,1]0,1,2,3}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("c")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
instruction = FindInstruction(module.get(), "updates.1");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[2,2,1]0,1,2,3}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("c")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, PartialShardingOnElementwise) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%p0 = f32[2,9] parameter(0),
sharding={devices=[1,2,2]0,1,2,3 last_tile_dim_replicate metadata={op_name="a"}}
%p1 = f32[2,9] parameter(1),
sharding={devices=[2,1,2]0,2,1,3 last_tile_dim_replicate metadata={op_name="b"}}
%lhs = f32[2,9] copy(%p0)
%rhs = f32[2,9] copy(%p1)
%add = f32[2,9] add(%lhs, %rhs)
ROOT %copy = f32[2,9] copy(%add)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* lhs = FindInstruction(module.get(), "lhs");
ASSERT_NE(lhs, nullptr);
EXPECT_THAT(lhs, op::Sharding("{devices=[2,2]0,2,1,3}"));
auto* rhs = FindInstruction(module.get(), "rhs");
ASSERT_NE(rhs, nullptr);
EXPECT_THAT(rhs, op::Sharding("{devices=[2,2]0,2,1,3}"));
auto* add = FindInstruction(module.get(), "add");
ASSERT_NE(add, nullptr);
EXPECT_THAT(add, op::Sharding("{devices=[2,2]0,2,1,3}"));
for (HloInstruction* instruction : {lhs, rhs, add}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("b"), CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest, PartialShardingOnElementwise2) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%p0 = f32[2,9] parameter(0),
sharding={devices=[1,2,4]0,1,2,3,4,5,6,7 last_tile_dim_replicate metadata={op_name="a"}}
%p1 = f32[2,9] parameter(1),
sharding={devices=[2,1,4]0,1,4,5,2,3,6,7 last_tile_dim_replicate metadata={op_name="b"}}
%lhs = f32[2,9] copy(%p0)
%rhs = f32[2,9] copy(%p1)
%add = f32[2,9] add(%lhs, %rhs)
ROOT %copy = f32[2,9] copy(%add)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* lhs = FindInstruction(module.get(), "lhs");
ASSERT_NE(lhs, nullptr);
EXPECT_THAT(
lhs,
op::Sharding("{devices=[2,2,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}"));
auto* rhs = FindInstruction(module.get(), "rhs");
ASSERT_NE(rhs, nullptr);
EXPECT_THAT(
rhs,
op::Sharding("{devices=[2,2,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}"));
auto* add = FindInstruction(module.get(), "add");
ASSERT_NE(add, nullptr);
EXPECT_THAT(
add,
op::Sharding("{devices=[2,2,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(lhs->sharding(),
ShardingMetadata({CreateMetadata("b"), CreateMetadata("a")}));
EXPECT_THAT(rhs->sharding(),
ShardingMetadata({CreateMetadata("b"), CreateMetadata("a")}));
EXPECT_THAT(add->sharding(),
ShardingMetadata({CreateMetadata("b"), CreateMetadata("a")}));
} else {
for (HloInstruction* instruction : {lhs, rhs}) {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest, PartialShardingTransposeForwardPass) {
const char* const hlo_string = R"(
HloModule module
ENTRY %transpose {
%param = f32[7,11,13]{2,1,0} parameter(0),
sharding={devices=[2,1,2,2]0,1,2,3,4,5,6,7 last_tile_dim_replicate metadata={op_name="a"}}
%transpose = f32[11,13,7]{2,1,0} transpose(%param), dimensions={1,2,0}
ROOT %copy = f32[11,13,7]{2,1,0} copy(%transpose)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "transpose");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(
instruction,
op::Sharding(
"{devices=[1,2,2,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, PartialShardingTransposeBackwardPass) {
const char* const hlo_string = R"(
HloModule module
ENTRY %transpose {
%param = f32[7,11,13]{2,1,0} parameter(0)
%copy = f32[7,11,13]{2,1,0} copy(%param)
ROOT %transpose = f32[11,13,7]{2,1,0} transpose(%copy), dimensions={1,2,0},
sharding={devices=[1,2,2,2]0,1,2,3,4,5,6,7 last_tile_dim_replicate metadata={op_name="a"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "copy");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(
instruction,
op::Sharding(
"{devices=[2,1,2,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, GatherForwardPassWithBarrier) {
const char* const hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[8,1,1,1]0,1,4,5,2,3,6,7 metadata={op_name="a"}}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0}
%shard-barrier-from.0 = s32[8,4,2,2]{3,2,1,0} custom-call(%parameter.0), custom_call_target="ShardBarrierFrom", custom_call_has_side_effect=true
%shard-barrier-from.1 = s32[2,8,4]{2,1,0} custom-call(%concatenate.19), custom_call_target="ShardBarrierFrom", custom_call_has_side_effect=true
%gather = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %shard-barrier-from.0,
s32[2,8,4]{2,1,0} %shard-barrier-from.1), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
std::ignore,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
auto* instruction = FindInstruction(module.get(), "gather");
ASSERT_NE(instruction, nullptr);
EXPECT_FALSE(instruction->has_sharding());
}
TEST_P(ParameterizedMetadataTest, GatherBackwardPassWithBarrier) {
const char* const hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%copy.p = s32[8,4,2,2]{3,2,1,0} copy(%parameter.0)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2
%concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0}
%shard-barrier-to = s32[8,4,2,2]{3,2,1,0} custom-call(%copy.p), custom_call_target="ShardBarrierTo", custom_call_has_side_effect=true
%gather = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %shard-barrier-to,
s32[2,8,4]{2,1,0} %concatenate), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2},
sharding={devices=[8,1,1,1]0,1,4,5,2,3,6,7 metadata={op_name="a"}}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* concatenate = FindInstruction(module.get(), "concatenate");
ASSERT_NE(concatenate, nullptr);
EXPECT_THAT(concatenate, op::Sharding("{devices=[1,8,1]0,1,4,5,2,3,6,7}"));
auto* copy_p = FindInstruction(module.get(), "copy.p");
ASSERT_NE(copy_p, nullptr);
EXPECT_THAT(copy_p, op::Sharding("{replicated}"));
}
TEST_F(ShardingPropagationTest, GatherExplicitBatchDimsFromOperandToResult) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[10,3,14,4] parameter(0), sharding={devices=[2,2,2,2]<=[16]}
%indices = s32[14,10,6,2] parameter(1)
ROOT %gather = f32[14,10,6,4] gather(%input, %indices), offset_dims={3},
collapsed_slice_dims={1}, operand_batching_dims={0,2},
start_indices_batching_dims={1,0}, start_index_map={1,3},
index_vector_dim=3, slice_sizes={1,1,1,4}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true,
{true})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Sharding("{devices=[2,2,1,2,2]<=[2,2,2,2]T(2,0,"
"3,1) last_tile_dim_replicate}"));
}
TEST_F(ShardingPropagationTest, GatherExplicitBatchDimsFromIndicesToResult) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[10,3,14,4] parameter(0)
%indices = s32[14,10,6,2] parameter(1), sharding={devices=[2,2,2,2]<=[16]}
ROOT %gather = f32[14,10,6,4] gather(%input, %indices), offset_dims={3},
collapsed_slice_dims={1}, operand_batching_dims={0,2},
start_indices_batching_dims={1,0}, start_index_map={1,3},
index_vector_dim=3, slice_sizes={1,1,1,4}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true,
{true})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Sharding("{devices=[2,2,2,1,2]<=[16] last_tile_dim_replicate}"));
}
TEST_F(ShardingPropagationTest, GatherBackwardWithExplicitBatchDims) {
const char* const hlo_string = R"(
HloModule module
ENTRY entry {
%input = f32[10,3,14,4] parameter(0)
%indices = s32[14,10,6,2] parameter(1)
ROOT %gather = f32[14,10,6,4] gather(%input, %indices), offset_dims={3},
collapsed_slice_dims={1}, operand_batching_dims={0,2},
start_indices_batching_dims={1,0}, start_index_map={1,3},
index_vector_dim=3, slice_sizes={1,1,1,4},
sharding={devices=[2,2,2,2]<=[16]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{true},
{true, true})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->parameter_instruction(0),
op::Sharding("{devices=[2,1,2,2,2]<=[2,2,2,2]T(1,0,3,2) "
"last_tile_dim_replicate}"));
EXPECT_THAT(
module->entry_computation()->parameter_instruction(1),
op::Sharding("{devices=[2,2,2,1,2]<=[16] last_tile_dim_replicate}"));
}
TEST_F(ShardingPropagationTest, ScatterExplicitBatchDimsFromOperandToResult) {
const char* const hlo_string = R"(
HloModule module
min (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT min = f32[] minimum(lhs, rhs)
}
ENTRY entry {
%input = f32[10,6,14,4] parameter(0), sharding={devices=[2,2,2,2]<=[16]}
%indices = s32[14,10,6,2] parameter(1)
%updates = f32[14,10,6,2] parameter(2)
ROOT %scatter = f32[10,6,14,4] scatter(%input, %indices, %updates),
to_apply=min, update_window_dims={3}, inserted_window_dims={1},
scatter_dims_to_operand_dims={1,3}, input_batching_dims={0,2},
scatter_indices_batching_dims={1,0}, index_vector_dim=3
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true,
{true})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Sharding("{devices=[2,2,2,2]<=[16]}"));
}
TEST_F(ShardingPropagationTest, ScatterExplicitBatchDimsFromIndicesToResult) {
const char* const hlo_string = R"(
HloModule module
min (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT min = f32[] minimum(lhs, rhs)
}
ENTRY entry {
%input = f32[10,6,14,4] parameter(0)
%indices = s32[14,10,6,2] parameter(1), sharding={devices=[2,2,2,2]<=[16]}
%updates = f32[14,10,6,2] parameter(2)
ROOT %scatter = f32[10,6,14,4] scatter(%input, %indices, %updates),
to_apply=min, update_window_dims={3}, inserted_window_dims={1},
scatter_dims_to_operand_dims={1,3}, input_batching_dims={0,2},
scatter_indices_batching_dims={1,0}, index_vector_dim=3
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true,
{true})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Sharding(
"{devices=[2,1,2,1,4]<=[2,2,4]T(1,0,2) last_tile_dim_replicate}"));
}
TEST_F(ShardingPropagationTest, ScatterExplicitBatchDimsFromUpdatesToResult) {
const char* const hlo_string = R"(
HloModule module
min (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT min = f32[] minimum(lhs, rhs)
}
ENTRY entry {
%input = f32[10,6,14,4] parameter(0)
%indices = s32[14,10,6,2] parameter(1)
%updates = f32[14,10,6,4] parameter(2), sharding={devices=[2,2,2,2]<=[16]}
ROOT %scatter = f32[10,6,14,4] scatter(%input, %indices, %updates),
to_apply=min, update_window_dims={3}, inserted_window_dims={1},
scatter_dims_to_operand_dims={1,3}, input_batching_dims={0,2},
scatter_indices_batching_dims={1,0}, index_vector_dim=3
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true,
{true})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Sharding("{devices=[2,1,2,2,2]<=[2,2,2,2]T(1,0,3,2) "
"last_tile_dim_replicate}"));
}
TEST_F(ShardingPropagationTest, ScatterBackwardWithExplicitBatchDims) {
const char* const hlo_string = R"(
HloModule module
min (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT min = f32[] minimum(lhs, rhs)
}
ENTRY entry {
%input = f32[10,6,14,4] parameter(0)
%indices = s32[14,10,6,2] parameter(1)
%updates = f32[14,10,6,4] parameter(2)
ROOT %scatter = f32[10,6,14,4] scatter(%input, %indices, %updates),
to_apply=min, update_window_dims={3}, inserted_window_dims={1},
scatter_dims_to_operand_dims={1,3}, input_batching_dims={0,2},
scatter_indices_batching_dims={1,0}, index_vector_dim=3, sharding={devices=[2,2,2,2]<=[16]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{true},
{true, true, true})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->parameter_instruction(0),
op::Sharding("{devices=[2,2,2,2]<=[16]}"));
EXPECT_THAT(module->entry_computation()->parameter_instruction(1),
op::Sharding("{devices=[2,2,1,1,4]<=[2,2,2,2]T(2,0,1,3) "
"last_tile_dim_replicate}"));
EXPECT_THAT(module->entry_computation()->parameter_instruction(2),
op::Sharding("{devices=[2,2,1,2,2]<=[2,2,2,2]T(2,0,3,1) "
"last_tile_dim_replicate}"));
}
TEST_P(ParameterizedMetadataTest, ParallelGatherFromOperandForwardPass) {
const char* const hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[8,1,1,1]0,1,4,5,2,3,6,7 metadata={op_name="a"}}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0}
%gather = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "gather");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[8,1,1,1]0,1,4,5,2,3,6,7}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ParallelGatherFromIndexForwardPass) {
const char* const hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,8,1]0,1,4,5,2,3,6,7 metadata={op_name="a"}}
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0}
%gather = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "gather");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[8,1,1,1]0,1,4,5,2,3,6,7}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ParallelGatherBackwardPass) {
const char* const hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%copy.p = s32[8,4,2,2]{3,2,1,0} copy(%parameter.0)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2
%concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0}
%gather = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %copy.p,
s32[2,8,4]{2,1,0} %concatenate), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2},
sharding={devices=[8,1,1,1]0,1,4,5,2,3,6,7 metadata={op_name="a"}}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* concatenate = FindInstruction(module.get(), "concatenate");
ASSERT_NE(concatenate, nullptr);
EXPECT_THAT(concatenate, op::Sharding("{devices=[1,8,1]0,1,4,5,2,3,6,7}"));
auto* copy_p = FindInstruction(module.get(), "copy.p");
ASSERT_NE(copy_p, nullptr);
EXPECT_THAT(copy_p, op::Sharding("{devices=[8,1,1,1]0,1,4,5,2,3,6,7}"));
for (HloInstruction* instruction : {concatenate, copy_p}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest, ParallelGatherBackwardPass2) {
const char* const hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = s32[4,8,2,2]{3,2,1,0} parameter(0)
%copy.p = s32[4,8,2,2]{3,2,1,0} copy(%parameter.0)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2
%concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0}
%gather = s32[8,4,2,2]{3,2,1,0} gather(
s32[4,8,2,2]{3,2,1,0} %copy.p,
s32[2,8,4]{2,1,0} %concatenate), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={1,0}, index_vector_dim=0,
slice_sizes={1,1,2,2},
sharding={devices=[1,4,1,1]0,1,4,5 metadata={op_name="a"}}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* concatenate = FindInstruction(module.get(), "concatenate");
ASSERT_NE(concatenate, nullptr);
EXPECT_THAT(concatenate, op::Sharding("{devices=[1,1,4]0,1,4,5}"));
auto* copy_p = FindInstruction(module.get(), "copy.p");
ASSERT_NE(copy_p, nullptr);
EXPECT_THAT(copy_p, op::Sharding("{devices=[4,1,1,1]0,1,4,5}"));
for (HloInstruction* instruction : {concatenate, copy_p}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest,
PartialShardingParallelGatherFromOperandForwardPass) {
const char* const hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate metadata={op_name="a"}}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0}
%gather = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "gather");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(
instruction,
op::Sharding(
"{devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest,
PartialShardingParallelGatherFromIndexForwardPass) {
const char* const hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,4,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate metadata={op_name="a"}}
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2
%concatenate.19 = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0}
%gather = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate.19), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "gather");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(
instruction,
op::Sharding(
"{devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, PartialShardingParallelGatherBackwardPass) {
const char* const hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%copy.p = s32[8,4,2,2]{3,2,1,0} copy(%parameter.0)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2
%concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0}
%gather = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %copy.p,
s32[2,8,4]{2,1,0} %concatenate), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2},
sharding={devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate metadata={op_name="a"}}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* concatenate = FindInstruction(module.get(), "concatenate");
ASSERT_NE(concatenate, nullptr);
EXPECT_THAT(
concatenate,
op::Sharding(
"{devices=[1,4,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}"));
auto* copy_p = FindInstruction(module.get(), "copy.p");
ASSERT_NE(copy_p, nullptr);
EXPECT_THAT(
copy_p,
op::Sharding(
"{devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}"));
for (HloInstruction* instruction : {concatenate, copy_p}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest, PartialShardingParallelGatherBackwardPass2) {
const char* const hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = s32[4,8,2,2]{3,2,1,0} parameter(0)
%copy.p = s32[4,8,2,2]{3,2,1,0} copy(%parameter.0)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2
%concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0}
%gather = s32[8,4,2,2]{3,2,1,0} gather(
s32[4,8,2,2]{3,2,1,0} %copy.p,
s32[2,8,4]{2,1,0} %concatenate), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={1,0}, index_vector_dim=0,
slice_sizes={1,1,2,2},
sharding={devices=[1,2,1,1,2]0,1,4,5 last_tile_dim_replicate metadata={op_name="a"}}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* concatenate = FindInstruction(module.get(), "concatenate");
ASSERT_NE(concatenate, nullptr);
EXPECT_THAT(
concatenate,
op::Sharding("{devices=[1,1,2,2]0,1,4,5 last_tile_dim_replicate}"));
auto* copy_p = FindInstruction(module.get(), "copy.p");
ASSERT_NE(copy_p, nullptr);
EXPECT_THAT(
copy_p,
op::Sharding("{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}"));
for (HloInstruction* instruction : {concatenate, copy_p}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest, ScatterForwardPassWithBarrier) {
const char* const hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[8,1,1,1]0,1,4,5,2,3,6,7 metadata={op_name="a"}}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2
%concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0}
%parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1)
%shard-barrier-from.0 = s32[8,4,2,2]{3,2,1,0} custom-call(%parameter.0), custom_call_target="ShardBarrierFrom", custom_call_has_side_effect=true
%shard-barrier-from.1 = s32[2,8,4]{2,1,0} custom-call(%concatenate), custom_call_target="ShardBarrierFrom", custom_call_has_side_effect=true
%shard-barrier-from.2 = s32[8,4,2,2]{3,2,1,0} custom-call(%parameter.1), custom_call_target="ShardBarrierFrom", custom_call_has_side_effect=true
%scatter = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %shard-barrier-from.0,
s32[2,8,4]{2,1,0} %shard-barrier-from.1,
s32[8,4,2,2]{3,2,1,0} %shard-barrier-from.2),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
std::ignore,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
auto* instruction = FindInstruction(module.get(), "scatter");
ASSERT_NE(instruction, nullptr);
EXPECT_FALSE(instruction->has_sharding());
}
TEST_P(ParameterizedMetadataTest, ScatterBackwardPassWithBarrier) {
const char* const hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%copy.p0 = s32[8,4,2,2]{3,2,1,0} copy(%parameter.0)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2
%concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0}
%parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1)
%copy.p1 = s32[8,4,2,2]{3,2,1,0} copy(%parameter.1)
%shard-barrier-to.0 = s32[8,4,2,2]{3,2,1,0} custom-call(%copy.p0), custom_call_target="ShardBarrierTo", custom_call_has_side_effect=true
%scatter = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %shard-barrier-to.0,
s32[2,8,4]{2,1,0} %concatenate,
s32[8,4,2,2]{3,2,1,0} %copy.p1),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0,
sharding={devices=[8,1,1,1]0,1,4,5,2,3,6,7 metadata={op_name="a"}}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* concatenate = FindInstruction(module.get(), "concatenate");
ASSERT_NE(concatenate, nullptr);
EXPECT_THAT(concatenate, op::Sharding("{devices=[1,8,1]0,1,4,5,2,3,6,7}"));
auto* copy_p0 = FindInstruction(module.get(), "copy.p0");
ASSERT_NE(copy_p0, nullptr);
EXPECT_THAT(copy_p0, op::Sharding("{replicated}"));
auto* copy_p1 = FindInstruction(module.get(), "copy.p1");
ASSERT_NE(copy_p1, nullptr);
EXPECT_THAT(copy_p1, op::Sharding("{devices=[8,1,1,1]0,1,4,5,2,3,6,7}"));
}
TEST_P(ParameterizedMetadataTest, ParallelScatterFromOperandForwardPass) {
const char* const hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[8,1,1,1]0,1,4,5,2,3,6,7 metadata={op_name="a"}}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2
%concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0}
%parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1)
%scatter = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate,
s32[8,4,2,2]{3,2,1,0} %parameter.1),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "scatter");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[8,1,1,1]0,1,4,5,2,3,6,7}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ParallelScatterFromIndexForwardPass) {
const char* const hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,8,1]0,1,4,5,2,3,6,7 metadata={op_name="a"}}
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2
%concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0}
%parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1)
%scatter = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate,
s32[8,4,2,2]{3,2,1,0} %parameter.1),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "scatter");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[8,1,1,1]0,1,4,5,2,3,6,7}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ParallelScatterFromUpdateForwardPass) {
const char* const hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2
%concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0}
%parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1),
sharding={devices=[8,1,1,1]0,1,4,5,2,3,6,7 metadata={op_name="a"}}
%scatter = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate,
s32[8,4,2,2]{3,2,1,0} %parameter.1),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "scatter");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[8,1,1,1]0,1,4,5,2,3,6,7}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ParallelScatterBackwardPass) {
const char* const hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%copy.p0 = s32[8,4,2,2]{3,2,1,0} copy(%parameter.0)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2
%concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0}
%parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1)
%copy.p1 = s32[8,4,2,2]{3,2,1,0} copy(%parameter.1)
%scatter = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %copy.p0,
s32[2,8,4]{2,1,0} %concatenate,
s32[8,4,2,2]{3,2,1,0} %copy.p1),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0,
sharding={devices=[8,1,1,1]0,1,4,5,2,3,6,7 metadata={op_name="a"}}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* concatenate = FindInstruction(module.get(), "concatenate");
ASSERT_NE(concatenate, nullptr);
EXPECT_THAT(concatenate, op::Sharding("{devices=[1,8,1]0,1,4,5,2,3,6,7}"));
auto* copy_p0 = FindInstruction(module.get(), "copy.p0");
ASSERT_NE(copy_p0, nullptr);
EXPECT_THAT(copy_p0, op::Sharding("{devices=[8,1,1,1]0,1,4,5,2,3,6,7}"));
auto* copy_p1 = FindInstruction(module.get(), "copy.p1");
ASSERT_NE(copy_p1, nullptr);
EXPECT_THAT(copy_p1, op::Sharding("{devices=[8,1,1,1]0,1,4,5,2,3,6,7}"));
for (HloInstruction* instruction : {concatenate, copy_p0, copy_p1}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest, ParallelScatterBackwardPass2) {
const char* const hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%parameter.0 = s32[4,8,2,2]{3,2,1,0} parameter(0)
%copy.p0 = s32[4,8,2,2]{3,2,1,0} copy(%parameter.0)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2
%concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0}
%parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1)
%copy.p1 = s32[8,4,2,2]{3,2,1,0} copy(%parameter.1)
%scatter = s32[4,8,2,2]{3,2,1,0} scatter(
s32[4,8,2,2]{3,2,1,0} %copy.p0,
s32[2,8,4]{2,1,0} %concatenate,
s32[8,4,2,2]{3,2,1,0} %copy.p1),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={1,0},
index_vector_dim=0,
sharding={devices=[4,1,1,1]0,1,4,5 metadata={op_name="a"}}
ROOT %copy = s32[4,8,2,2]{3,2,1,0} copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* concatenate = FindInstruction(module.get(), "concatenate");
ASSERT_NE(concatenate, nullptr);
EXPECT_THAT(concatenate, op::Sharding("{devices=[1,1,4]0,1,4,5}"));
auto* copy_p0 = FindInstruction(module.get(), "copy.p0");
ASSERT_NE(copy_p0, nullptr);
EXPECT_THAT(copy_p0, op::Sharding("{devices=[4,1,1,1]0,1,4,5}"));
auto* copy_p1 = FindInstruction(module.get(), "copy.p1");
ASSERT_NE(copy_p1, nullptr);
EXPECT_THAT(copy_p1, op::Sharding("{devices=[1,4,1,1]0,1,4,5}"));
for (HloInstruction* instruction : {concatenate, copy_p0, copy_p1}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest,
PartialShardingParallelScatterFromOperandForwardPass) {
const char* const hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate metadata={op_name="a"}}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2
%concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0}
%parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1)
%scatter = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate,
s32[8,4,2,2]{3,2,1,0} %parameter.1),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "scatter");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(
instruction,
op::Sharding(
"{devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest,
PartialShardingParallelScatterFromIndexForwardPass) {
const char* const hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,4,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate metadata={op_name="a"}}
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2
%concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0}
%parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1)
%scatter = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate,
s32[8,4,2,2]{3,2,1,0} %parameter.1),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "scatter");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(
instruction,
op::Sharding(
"{devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest,
PartialShardingParallelScatterFromUpdateForwardPass) {
const char* const hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2
%concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0}
%parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1),
sharding={devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate metadata={op_name="a"}}
%scatter = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[2,8,4]{2,1,0} %concatenate,
s32[8,4,2,2]{3,2,1,0} %parameter.1),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "scatter");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(
instruction,
op::Sharding(
"{devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, PartialShardingParallelScatterBackwardPass) {
const char* const hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%copy.p0 = s32[8,4,2,2]{3,2,1,0} copy(%parameter.0)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2
%concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0}
%parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1)
%copy.p1 = s32[8,4,2,2]{3,2,1,0} copy(%parameter.1)
%scatter = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %copy.p0,
s32[2,8,4]{2,1,0} %concatenate,
s32[8,4,2,2]{3,2,1,0} %copy.p1),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0,
sharding={devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate metadata={op_name="a"}}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* concatenate = FindInstruction(module.get(), "concatenate");
ASSERT_NE(concatenate, nullptr);
EXPECT_THAT(
concatenate,
op::Sharding(
"{devices=[1,4,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}"));
auto* copy_p0 = FindInstruction(module.get(), "copy.p0");
ASSERT_NE(copy_p0, nullptr);
EXPECT_THAT(
copy_p0,
op::Sharding(
"{devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}"));
auto* copy_p1 = FindInstruction(module.get(), "copy.p1");
ASSERT_NE(copy_p1, nullptr);
EXPECT_THAT(
copy_p1,
op::Sharding(
"{devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}"));
for (HloInstruction* instruction : {concatenate, copy_p0, copy_p1}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest, PartialShardingParallelScatterBackwardPass2) {
const char* const hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%parameter.0 = s32[4,8,2,2]{3,2,1,0} parameter(0)
%copy.p0 = s32[4,8,2,2]{3,2,1,0} copy(%parameter.0)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2
%concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0}
%parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1)
%copy.p1 = s32[8,4,2,2]{3,2,1,0} copy(%parameter.1)
%scatter = s32[4,8,2,2]{3,2,1,0} scatter(
s32[4,8,2,2]{3,2,1,0} %copy.p0,
s32[2,8,4]{2,1,0} %concatenate,
s32[8,4,2,2]{3,2,1,0} %copy.p1),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={1,0},
index_vector_dim=0,
sharding={devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate metadata={op_name="a"}}
ROOT %copy = s32[4,8,2,2]{3,2,1,0} copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* concatenate = FindInstruction(module.get(), "concatenate");
ASSERT_NE(concatenate, nullptr);
EXPECT_THAT(
concatenate,
op::Sharding("{devices=[1,1,2,2]0,1,4,5 last_tile_dim_replicate}"));
auto* copy_p0 = FindInstruction(module.get(), "copy.p0");
ASSERT_NE(copy_p0, nullptr);
EXPECT_THAT(
copy_p0,
op::Sharding("{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}"));
auto* copy_p1 = FindInstruction(module.get(), "copy.p1");
ASSERT_NE(copy_p1, nullptr);
EXPECT_THAT(
copy_p1,
op::Sharding("{devices=[1,2,1,1,2]0,1,4,5 last_tile_dim_replicate}"));
for (HloInstruction* instruction : {concatenate, copy_p0, copy_p1}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest,
ParallelScatterFromOperandForwardPass_Variadic) {
const char* const hlo_string = R"(
HloModule module
add (lhs.0: s32[], lhs.1: s32[], rhs.0: s32[], rhs.1: s32[]) -> (s32[], s32[]) {
lhs.0 = s32[] parameter(0)
lhs.1 = s32[] parameter(1)
rhs.0 = s32[] parameter(2)
rhs.1 = s32[] parameter(3)
sum.0 = s32[] add(lhs.0, rhs.0)
sum.1 = s32[] add(lhs.1, rhs.1)
ROOT tuple = tuple(sum.0, sum.1)
}
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[8,1,1,1]0,1,4,5,2,3,6,7 metadata={op_name="a"}}
%parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1),
sharding={devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate metadata={op_name="b"}}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2
%concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0}
%parameter.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)
%parameter.3 = s32[8,4,2,2]{3,2,1,0} parameter(3)
%scatter = (s32[8,4,2,2]{3,2,1,0},s32[8,4,2,2]{3,2,1,0}) scatter(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[8,4,2,2]{3,2,1,0} %parameter.1,
s32[2,8,4]{2,1,0} %concatenate,
s32[8,4,2,2]{3,2,1,0} %parameter.2,
s32[8,4,2,2]{3,2,1,0} %parameter.3),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0
ROOT %copy = (s32[8,4,2,2]{3,2,1,0},s32[8,4,2,2]{3,2,1,0}) copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "scatter");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction,
op::Sharding("{{devices=[8,1,1,1]0,1,4,5,2,3,6,7},{devices=[4,1,"
"1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding().tuple_elements()[0],
ShardingMetadata({CreateMetadata("a")}));
EXPECT_THAT(instruction->sharding().tuple_elements()[1],
ShardingMetadata({CreateMetadata("b")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest,
ParallelScatterFromIndexForwardPass_Variadic) {
const char* const hlo_string = R"(
HloModule module
add (lhs.0: s32[], lhs.1: s32[], rhs.0: s32[], rhs.1: s32[]) -> (s32[], s32[]) {
lhs.0 = s32[] parameter(0)
lhs.1 = s32[] parameter(1)
rhs.0 = s32[] parameter(2)
rhs.1 = s32[] parameter(3)
sum.0 = s32[] add(lhs.0, rhs.0)
sum.1 = s32[] add(lhs.1, rhs.1)
ROOT tuple = tuple(sum.0, sum.1)
}
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1,
sharding={devices=[1,4,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate metadata={op_name="a"}}
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2
%concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0}
%parameter.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)
%parameter.3 = s32[8,4,2,2]{3,2,1,0} parameter(3)
%scatter = (s32[8,4,2,2]{3,2,1,0},s32[8,4,2,2]{3,2,1,0}) scatter(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[8,4,2,2]{3,2,1,0} %parameter.1,
s32[2,8,4]{2,1,0} %concatenate,
s32[8,4,2,2]{3,2,1,0} %parameter.2,
s32[8,4,2,2]{3,2,1,0} %parameter.3),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0
ROOT %copy = (s32[8,4,2,2]{3,2,1,0},s32[8,4,2,2]{3,2,1,0}) copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "scatter");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction,
op::Sharding("{{devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 "
"last_tile_dim_replicate},{devices=[4,1,1,1,2]0,1,4,"
"5,2,3,6,7 last_tile_dim_replicate}}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding().tuple_elements()[0],
ShardingMetadata({CreateMetadata("a")}));
EXPECT_THAT(instruction->sharding().tuple_elements()[1],
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest,
ParallelScatterFromUpdateForwardPass_Variadic) {
const char* const hlo_string = R"(
HloModule module
add (lhs.0: s32[], lhs.1: s32[], rhs.0: s32[], rhs.1: s32[]) -> (s32[], s32[]) {
lhs.0 = s32[] parameter(0)
lhs.1 = s32[] parameter(1)
rhs.0 = s32[] parameter(2)
rhs.1 = s32[] parameter(3)
sum.0 = s32[] add(lhs.0, rhs.0)
sum.1 = s32[] add(lhs.1, rhs.1)
ROOT tuple = tuple(sum.0, sum.1)
}
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2
%concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0}
%parameter.2 = s32[8,4,2,2]{3,2,1,0} parameter(2),
sharding={devices=[1,8,1,1]0,1,4,5,2,3,6,7 metadata={op_name="a"}}
%parameter.3 = s32[8,4,2,2]{3,2,1,0} parameter(3),
sharding={devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate metadata={op_name="b"}}
%scatter = (s32[8,4,2,2]{3,2,1,0},s32[8,4,2,2]{3,2,1,0}) scatter(
s32[8,4,2,2]{3,2,1,0} %parameter.0,
s32[8,4,2,2]{3,2,1,0} %parameter.1,
s32[2,8,4]{2,1,0} %concatenate,
s32[8,4,2,2]{3,2,1,0} %parameter.2,
s32[8,4,2,2]{3,2,1,0} %parameter.3),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0
ROOT %copy = (s32[8,4,2,2]{3,2,1,0},s32[8,4,2,2]{3,2,1,0}) copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "scatter");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction,
op::Sharding("{{devices=[1,8,1,1]0,1,4,5,2,3,6,7},{devices=[4,1,"
"1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding().tuple_elements()[0],
ShardingMetadata({CreateMetadata("a")}));
EXPECT_THAT(instruction->sharding().tuple_elements()[1],
ShardingMetadata({CreateMetadata("b")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ParallelScatterBackwardPass_Variadic) {
const char* const hlo_string = R"(
HloModule module
add (lhs.0: s32[], lhs.1: s32[], rhs.0: s32[], rhs.1: s32[]) -> (s32[], s32[]) {
lhs.0 = s32[] parameter(0)
lhs.1 = s32[] parameter(1)
rhs.0 = s32[] parameter(2)
rhs.1 = s32[] parameter(3)
sum.0 = s32[] add(lhs.0, rhs.0)
sum.1 = s32[] add(lhs.1, rhs.1)
ROOT tuple = tuple(sum.0, sum.1)
}
ENTRY %module {
%parameter.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%copy.p0 = s32[8,4,2,2]{3,2,1,0} copy(%parameter.0)
%parameter.1 = s32[8,4,2,2]{3,2,1,0} parameter(1)
%copy.p1 = s32[8,4,2,2]{3,2,1,0} copy(%parameter.1)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2
%concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0}
%parameter.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)
%copy.p2 = s32[8,4,2,2]{3,2,1,0} copy(%parameter.2)
%parameter.3 = s32[8,4,2,2]{3,2,1,0} parameter(3)
%copy.p3 = s32[8,4,2,2]{3,2,1,0} copy(%parameter.3)
%scatter = (s32[8,4,2,2]{3,2,1,0},s32[8,4,2,2]{3,2,1,0}) scatter(
s32[8,4,2,2]{3,2,1,0} %copy.p0,
s32[8,4,2,2]{3,2,1,0} %copy.p1,
s32[2,8,4]{2,1,0} %concatenate,
s32[8,4,2,2]{3,2,1,0} %copy.p2,
s32[8,4,2,2]{3,2,1,0} %copy.p3),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0,
sharding={{devices=[8,1,1,1]0,1,4,5,2,3,6,7 metadata={op_name="a"}},
{devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate metadata={op_name="b"}}}
ROOT %copy = (s32[8,4,2,2]{3,2,1,0},s32[8,4,2,2]{3,2,1,0}) copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* concatenate = FindInstruction(module.get(), "concatenate");
ASSERT_NE(concatenate, nullptr);
EXPECT_THAT(concatenate, op::Sharding("{devices=[1,8,1]0,1,4,5,2,3,6,7}"));
auto* copy_p0 = FindInstruction(module.get(), "copy.p0");
ASSERT_NE(copy_p0, nullptr);
EXPECT_THAT(copy_p0, op::Sharding("{devices=[8,1,1,1]0,1,4,5,2,3,6,7}"));
auto* copy_p1 = FindInstruction(module.get(), "copy.p1");
ASSERT_NE(copy_p1, nullptr);
EXPECT_THAT(
copy_p1,
op::Sharding(
"{devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}"));
auto* copy_p2 = FindInstruction(module.get(), "copy.p2");
ASSERT_NE(copy_p2, nullptr);
EXPECT_THAT(copy_p2, op::Sharding("{devices=[8,1,1,1]0,1,4,5,2,3,6,7}"));
auto* copy_p3 = FindInstruction(module.get(), "copy.p3");
ASSERT_NE(copy_p3, nullptr);
EXPECT_THAT(
copy_p3,
op::Sharding(
"{devices=[4,1,1,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}"));
for (HloInstruction* instruction : {concatenate, copy_p0, copy_p2}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
for (HloInstruction* instruction : {copy_p1, copy_p3}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("b")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest, ParallelScatterBackwardPass2_Variadic) {
const char* const hlo_string = R"(
HloModule module
add (lhs.0: s32[], lhs.1: s32[], rhs.0: s32[], rhs.1: s32[]) -> (s32[], s32[]) {
lhs.0 = s32[] parameter(0)
lhs.1 = s32[] parameter(1)
rhs.0 = s32[] parameter(2)
rhs.1 = s32[] parameter(3)
sum.0 = s32[] add(lhs.0, rhs.0)
sum.1 = s32[] add(lhs.1, rhs.1)
ROOT tuple = tuple(sum.0, sum.1)
}
ENTRY %module {
%parameter.0 = s32[4,8,2,2]{3,2,1,0} parameter(0)
%copy.p0 = s32[4,8,2,2]{3,2,1,0} copy(%parameter.0)
%parameter.1 = s32[4,8,2,2]{3,2,1,0} parameter(1)
%copy.p1 = s32[4,8,2,2]{3,2,1,0} copy(%parameter.1)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%iota2 = s32[1,8,4]{2,1,0} iota(), iota_dimension=2
%concatenate = s32[2,8,4]{2,1,0} concatenate(s32[1,8,4]{2,1,0} %iota,
s32[1,8,4]{2,1,0} %iota2), dimensions={0}
%parameter.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)
%copy.p2 = s32[8,4,2,2]{3,2,1,0} copy(%parameter.2)
%parameter.3 = s32[8,4,2,2]{3,2,1,0} parameter(3)
%copy.p3 = s32[8,4,2,2]{3,2,1,0} copy(%parameter.3)
%scatter = (s32[4,8,2,2]{3,2,1,0},s32[4,8,2,2]{3,2,1,0}) scatter(
s32[4,8,2,2]{3,2,1,0} %copy.p0,
s32[4,8,2,2]{3,2,1,0} %copy.p1,
s32[2,8,4]{2,1,0} %concatenate,
s32[8,4,2,2]{3,2,1,0} %copy.p2,
s32[8,4,2,2]{3,2,1,0} %copy.p3),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={1,0},
index_vector_dim=0,
sharding={{devices=[4,1,1,1]0,1,4,5 metadata={op_name="a"}},
{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate metadata={op_name="b"}}}
ROOT %copy = (s32[4,8,2,2]{3,2,1,0},s32[4,8,2,2]{3,2,1,0}) copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* concatenate = FindInstruction(module.get(), "concatenate");
ASSERT_NE(concatenate, nullptr);
EXPECT_THAT(concatenate, op::Sharding("{devices=[1,1,4]0,1,4,5}"));
auto* copy_p0 = FindInstruction(module.get(), "copy.p0");
ASSERT_NE(copy_p0, nullptr);
EXPECT_THAT(copy_p0, op::Sharding("{devices=[4,1,1,1]0,1,4,5}"));
auto* copy_p1 = FindInstruction(module.get(), "copy.p1");
ASSERT_NE(copy_p1, nullptr);
EXPECT_THAT(
copy_p1,
op::Sharding("{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}"));
auto* copy_p2 = FindInstruction(module.get(), "copy.p2");
ASSERT_NE(copy_p2, nullptr);
EXPECT_THAT(copy_p2, op::Sharding("{devices=[1,4,1,1]0,1,4,5}"));
auto* copy_p3 = FindInstruction(module.get(), "copy.p3");
ASSERT_NE(copy_p3, nullptr);
EXPECT_THAT(
copy_p3,
op::Sharding("{devices=[1,2,1,1,2]0,1,4,5 last_tile_dim_replicate}"));
for (HloInstruction* instruction : {concatenate, copy_p0, copy_p2}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
for (HloInstruction* instruction : {copy_p1, copy_p3}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("b")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest,
GatherMergedIndexParallelAndOperandPassthroughFromOperandForwardPass) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%arg.1 = s32[1,8,4]{2,1,0} parameter(1)
%operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0),
sharding={devices=[2,1,2,1]0,1,4,5 metadata={op_name="a"}}
%indices = s32[1,8,4]{2,1,0} copy(s32[1,8,4]{2,1,0} %arg.1)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%concatenate = s32[2,8,4]{2,1,0} concatenate(
s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %indices), dimensions={0}
%gather = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %operand,
s32[2,8,4]{2,1,0} %concatenate), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* operand = FindInstruction(module.get(), "operand");
ASSERT_NE(operand, nullptr);
EXPECT_THAT(operand, op::Sharding("{devices=[2,1,2,1]0,1,4,5 }"));
const HloInstruction* indices = FindInstruction(module.get(), "concatenate");
ASSERT_NE(indices, nullptr);
EXPECT_THAT(
indices,
op::Sharding("{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}"));
const HloInstruction* gather = FindInstruction(module.get(), "gather");
ASSERT_NE(gather, nullptr);
EXPECT_THAT(gather, op::Sharding("{devices=[2,1,2,1]0,1,4,5}"));
for (const HloInstruction* instruction : {operand, indices, gather}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest,
GatherMergedIndexParallelAndOperandPassthroughBackwardPass) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%arg.1 = s32[1,8,4]{2,1,0} parameter(1)
%operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0)
%indices = s32[1,8,4]{2,1,0} copy(s32[1,8,4]{2,1,0} %arg.1)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%concatenate = s32[2,8,4]{2,1,0} concatenate(
s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %indices), dimensions={0}
%gather = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %operand,
s32[2,8,4]{2,1,0} %concatenate), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2},
sharding={devices=[2,1,2,1]0,1,4,5 metadata={op_name="a"}}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* operand = FindInstruction(module.get(), "operand");
ASSERT_NE(operand, nullptr);
EXPECT_THAT(operand, op::Sharding("{devices=[2,1,2,1]0,1,4,5 }"));
const HloInstruction* indices = FindInstruction(module.get(), "concatenate");
ASSERT_NE(indices, nullptr);
EXPECT_THAT(
indices,
op::Sharding("{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}"));
const HloInstruction* gather = FindInstruction(module.get(), "gather");
ASSERT_NE(gather, nullptr);
EXPECT_THAT(gather, op::Sharding("{devices=[2,1,2,1]0,1,4,5}"));
for (const HloInstruction* instruction : {operand, indices, gather}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest,
GatherMergedIndexParallelAndIndexPassthroughFromIndicesForwardPass) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%arg.1 = s32[1,8,4]{2,1,0} parameter(1)
%operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0)
%indices = s32[1,8,4]{2,1,0} copy(s32[1,8,4]{2,1,0} %arg.1),
sharding={devices=[1,2,2]0,1,4,5 metadata={op_name="a"}}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%concatenate = s32[2,8,4]{2,1,0} concatenate(
s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %indices), dimensions={0}
%gather = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %operand,
s32[2,8,4]{2,1,0} %concatenate), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* operand = FindInstruction(module.get(), "operand");
ASSERT_NE(operand, nullptr);
EXPECT_THAT(
operand,
op::Sharding("{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}"));
const HloInstruction* indices = FindInstruction(module.get(), "concatenate");
ASSERT_NE(indices, nullptr);
EXPECT_THAT(indices, op::Sharding("{devices=[1,2,2]0,1,4,5}"));
const HloInstruction* gather = FindInstruction(module.get(), "gather");
ASSERT_NE(gather, nullptr);
EXPECT_THAT(gather, op::Sharding("{devices=[2,2,1,1]0,1,4,5}"));
for (const HloInstruction* instruction : {operand, indices, gather}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest,
GatherMergedIndexParallelAndIndexPassthroughBackwardPass) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%arg.1 = s32[1,8,4]{2,1,0} parameter(1)
%operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0)
%indices = s32[1,8,4]{2,1,0} copy(s32[1,8,4]{2,1,0} %arg.1)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%concatenate = s32[2,8,4]{2,1,0} concatenate(
s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %indices), dimensions={0}
%gather = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %operand,
s32[2,8,4]{2,1,0} %concatenate), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2},
sharding={devices=[2,2,1,1]0,1,4,5 metadata={op_name="a"}}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* operand = FindInstruction(module.get(), "operand");
ASSERT_NE(operand, nullptr);
EXPECT_THAT(
operand,
op::Sharding("{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}"));
const HloInstruction* indices = FindInstruction(module.get(), "concatenate");
ASSERT_NE(indices, nullptr);
EXPECT_THAT(indices, op::Sharding("{devices=[1,2,2]0,1,4,5}"));
const HloInstruction* gather = FindInstruction(module.get(), "gather");
ASSERT_NE(gather, nullptr);
EXPECT_THAT(gather, op::Sharding("{devices=[2,2,1,1]0,1,4,5}"));
for (const HloInstruction* instruction : {operand, indices, gather}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest,
GatherMergedIndexParallelAndTrivialSlicedOperandFromOperandForwardPass) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%arg.1 = s32[1,8,4]{2,1,0} parameter(1)
%operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0),
sharding={devices=[2,2,1,1]0,1,4,5 metadata={op_name="a"}}
%indices = s32[1,8,4]{2,1,0} copy(s32[1,8,4]{2,1,0} %arg.1)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%concatenate = s32[2,8,4]{2,1,0} concatenate(
s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %indices), dimensions={0}
%gather = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %operand,
s32[2,8,4]{2,1,0} %concatenate), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* operand = FindInstruction(module.get(), "operand");
ASSERT_NE(operand, nullptr);
EXPECT_THAT(operand, op::Sharding("{devices=[2,2,1,1]0,1,4,5 }"));
const HloInstruction* indices = FindInstruction(module.get(), "concatenate");
ASSERT_NE(indices, nullptr);
EXPECT_THAT(
indices,
op::Sharding("{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}"));
const HloInstruction* gather = FindInstruction(module.get(), "gather");
ASSERT_NE(gather, nullptr);
EXPECT_THAT(
gather,
op::Sharding("{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}"));
for (const HloInstruction* instruction : {operand, indices, gather}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest,
GatherMergedIndexParallelAndTrivialSlicedOperandBackwardPass) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[1,2,1,1,2]0,4,1,5 last_tile_dim_replicate metadata={op_name="a"}}
%arg.1 = s32[1,8,4]{2,1,0} parameter(1)
%operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0)
%indices = s32[1,8,4]{2,1,0} copy(s32[1,8,4]{2,1,0} %arg.1)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%concatenate = s32[2,8,4]{2,1,0} concatenate(
s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %indices), dimensions={0}
%gather = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %operand,
s32[2,8,4]{2,1,0} %concatenate), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2},
sharding={devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate metadata={op_name="a"}}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* operand = FindInstruction(module.get(), "operand");
ASSERT_NE(operand, nullptr);
EXPECT_THAT(operand, op::Sharding("{devices=[2,2,1,1]0,1,4,5}"));
const HloInstruction* indices = FindInstruction(module.get(), "concatenate");
ASSERT_NE(indices, nullptr);
EXPECT_THAT(
indices,
op::Sharding("{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}"));
const HloInstruction* gather = FindInstruction(module.get(), "gather");
ASSERT_NE(gather, nullptr);
EXPECT_THAT(
gather,
op::Sharding("{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}"));
for (const HloInstruction* instruction : {operand, indices, gather}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(
ParameterizedMetadataTest,
GatherMergedOperandPassthroughAndTrivialSlicedOperandFromOperandForwardPass) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%arg.1 = s32[2,8,4]{2,1,0} parameter(1)
%operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0),
sharding={devices=[1,2,2,1]0,4,1,5 metadata={op_name="a"}}
%indices = s32[2,8,4]{2,1,0} copy(s32[2,8,4]{2,1,0} %arg.1)
%gather = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %operand,
s32[2,8,4]{2,1,0} %indices), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* operand = FindInstruction(module.get(), "operand");
ASSERT_NE(operand, nullptr);
EXPECT_THAT(operand, op::Sharding("{devices=[1,2,2,1]0,4,1,5}"));
const HloInstruction* indices = FindInstruction(module.get(), "indices");
ASSERT_NE(indices, nullptr);
EXPECT_THAT(indices, op::Sharding("{replicated}"));
const HloInstruction* gather = FindInstruction(module.get(), "gather");
ASSERT_NE(gather, nullptr);
EXPECT_THAT(
gather,
op::Sharding("{devices=[1,1,2,1,2]0,1,4,5 last_tile_dim_replicate}"));
for (const HloInstruction* instruction : {operand, indices, gather}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest,
GatherMergedOperandPassthroughAndTrivialSlicedOperandBackwardPass) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[1,2,1,1,2]0,4,1,5 last_tile_dim_replicate metadata={op_name="a"}}
%arg.1 = s32[2,8,4]{2,1,0} parameter(1)
%operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0)
%indices = s32[2,8,4]{2,1,0} copy(s32[2,8,4]{2,1,0} %arg.1)
%gather = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %operand,
s32[2,8,4]{2,1,0} %indices), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2},
sharding={devices=[1,1,2,1,2]0,1,4,5 last_tile_dim_replicate metadata={op_name="a"}}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* operand = FindInstruction(module.get(), "operand");
ASSERT_NE(operand, nullptr);
EXPECT_THAT(operand, op::Sharding("{devices=[1,2,2,1]0,4,1,5}"));
const HloInstruction* indices = FindInstruction(module.get(), "indices");
ASSERT_NE(indices, nullptr);
EXPECT_THAT(indices, op::Sharding("{replicated}"));
const HloInstruction* gather = FindInstruction(module.get(), "gather");
ASSERT_NE(gather, nullptr);
EXPECT_THAT(
gather,
op::Sharding("{devices=[1,1,2,1,2]0,1,4,5 last_tile_dim_replicate}"));
for (const HloInstruction* instruction : {operand, indices, gather}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest,
GatherMergedOperandAndIndexPassthroughFromOperandAndIndexForwardPass) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%arg.1 = s32[2,8,4]{2,1,0} parameter(1)
%operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0),
sharding={devices=[1,1,2,1,2]0,4,1,5 last_tile_dim_replicate metadata={op_name="a"}}
%indices = s32[2,8,4]{2,1,0} copy(s32[2,8,4]{2,1,0} %arg.1),
sharding={devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate metadata={op_name="a"}}
%gather = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %operand,
s32[2,8,4]{2,1,0} %indices), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* operand = FindInstruction(module.get(), "operand");
ASSERT_NE(operand, nullptr);
EXPECT_THAT(
operand,
op::Sharding("{devices=[1,1,2,1,2]0,4,1,5 last_tile_dim_replicate}"));
const HloInstruction* indices = FindInstruction(module.get(), "indices");
ASSERT_NE(indices, nullptr);
EXPECT_THAT(
indices,
op::Sharding("{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}"));
const HloInstruction* gather = FindInstruction(module.get(), "gather");
ASSERT_NE(gather, nullptr);
EXPECT_THAT(gather, op::Sharding("{devices=[2,1,2,1]0,1,4,5}"));
for (const HloInstruction* instruction : {operand, indices, gather}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest,
GatherMergedOperandPassthroughAndIndexPassthroughBackwardPass) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%arg.1 = s32[2,8,4]{2,1,0} parameter(1)
%operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0)
%indices = s32[2,8,4]{2,1,0} copy(s32[2,8,4]{2,1,0} %arg.1)
%gather = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %operand,
s32[2,8,4]{2,1,0} %indices), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2},
sharding={devices=[2,1,2,1]0,1,4,5 metadata={op_name="a"}}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* operand = FindInstruction(module.get(), "operand");
ASSERT_NE(operand, nullptr);
EXPECT_THAT(
operand,
op::Sharding("{devices=[1,1,2,1,2]0,4,1,5 last_tile_dim_replicate}"));
const HloInstruction* indices = FindInstruction(module.get(), "indices");
ASSERT_NE(indices, nullptr);
EXPECT_THAT(
indices,
op::Sharding("{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}"));
const HloInstruction* gather = FindInstruction(module.get(), "gather");
ASSERT_NE(gather, nullptr);
EXPECT_THAT(gather, op::Sharding("{devices=[2,1,2,1]0,1,4,5}"));
for (const HloInstruction* instruction : {operand, indices, gather}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(
ParameterizedMetadataTest,
GatherMergedTrivialSlicedOperandAndIndexPassthroughFromOperandAndIndexForwardPass) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%arg.1 = s32[2,8,4]{2,1,0} parameter(1)
%operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0),
sharding={devices=[1,2,1,1,2]0,4,1,5 last_tile_dim_replicate metadata={op_name="a"}}
%indices = s32[2,8,4]{2,1,0} copy(s32[2,8,4]{2,1,0} %arg.1),
sharding={devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate metadata={op_name="a"}}
%gather = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %operand,
s32[2,8,4]{2,1,0} %indices), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* operand = FindInstruction(module.get(), "operand");
ASSERT_NE(operand, nullptr);
EXPECT_THAT(
operand,
op::Sharding("{devices=[1,2,1,1,2]0,4,1,5 last_tile_dim_replicate}"));
const HloInstruction* indices = FindInstruction(module.get(), "indices");
ASSERT_NE(indices, nullptr);
EXPECT_THAT(
indices,
op::Sharding("{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}"));
const HloInstruction* gather = FindInstruction(module.get(), "gather");
ASSERT_NE(gather, nullptr);
EXPECT_THAT(
gather,
op::Sharding("{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}"));
for (const HloInstruction* instruction : {operand, indices, gather}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest,
GatherMergedTrivialSlicedOperandAndIndexPassthroughBackwardPass) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0),
sharding={devices=[1,2,1,1,2]0,4,1,5 last_tile_dim_replicate metadata={op_name="a"}}
%arg.1 = s32[2,8,4]{2,1,0} parameter(1)
%operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0)
%indices = s32[2,8,4]{2,1,0} copy(s32[2,8,4]{2,1,0} %arg.1)
%gather = s32[8,4,2,2]{3,2,1,0} gather(
s32[8,4,2,2]{3,2,1,0} %operand,
s32[2,8,4]{2,1,0} %indices), offset_dims={2,3},
collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=0,
slice_sizes={1,1,2,2},
sharding={devices=[1,1,2,1,2]0,1,4,5 last_tile_dim_replicate metadata={op_name="a"}}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%gather)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* operand = FindInstruction(module.get(), "operand");
ASSERT_NE(operand, nullptr);
EXPECT_THAT(operand, op::Sharding("{devices=[1,2,2,1]0,4,1,5}"));
const HloInstruction* indices = FindInstruction(module.get(), "indices");
ASSERT_NE(indices, nullptr);
EXPECT_THAT(indices, op::Sharding("{replicated}"));
const HloInstruction* gather = FindInstruction(module.get(), "gather");
ASSERT_NE(gather, nullptr);
EXPECT_THAT(
gather,
op::Sharding("{devices=[1,1,2,1,2]0,1,4,5 last_tile_dim_replicate}"));
for (const HloInstruction* instruction : {operand, indices, gather}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest,
ScatterMergedIndexParallelAndOperandPassthroughFromOperandForwardPass) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%arg.1 = s32[1,8,4]{2,1,0} parameter(1)
%arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)
%operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0),
sharding={devices=[2,1,2,1]0,1,4,5 metadata={op_name="a"}}
%indices = s32[1,8,4]{2,1,0} copy(s32[1,8,4]{2,1,0} %arg.1)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%concatenate = s32[2,8,4]{2,1,0} concatenate(
s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %indices), dimensions={0}
%update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2)
%scatter = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %operand,
s32[2,8,4]{2,1,0} %concatenate,
s32[8,4,2,2]{3,2,1,0} %update),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* operand = FindInstruction(module.get(), "operand");
ASSERT_NE(operand, nullptr);
EXPECT_THAT(operand, op::Sharding("{devices=[2,1,2,1]0,1,4,5 }"));
const HloInstruction* indices = FindInstruction(module.get(), "concatenate");
ASSERT_NE(indices, nullptr);
EXPECT_THAT(
indices,
op::Sharding("{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}"));
const HloInstruction* update = FindInstruction(module.get(), "update");
ASSERT_NE(update, nullptr);
EXPECT_THAT(update, op::Sharding("{devices=[2,1,2,1]0,1,4,5}"));
const HloInstruction* scatter = FindInstruction(module.get(), "scatter");
ASSERT_NE(scatter, nullptr);
EXPECT_THAT(scatter, op::Sharding("{devices=[2,1,2,1]0,1,4,5}"));
for (const HloInstruction* instruction :
{operand, indices, update, scatter}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest,
ScatterMergedIndexParallelAndOperandPassthroughFromUpdateForwardPass) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%arg.1 = s32[1,8,4]{2,1,0} parameter(1)
%arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)
%operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0)
%indices = s32[1,8,4]{2,1,0} copy(s32[1,8,4]{2,1,0} %arg.1)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%concatenate = s32[2,8,4]{2,1,0} concatenate(
s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %indices), dimensions={0}
%update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2),
sharding={devices=[2,1,2,1]0,1,4,5 metadata={op_name="a"}}
%scatter = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %operand,
s32[2,8,4]{2,1,0} %concatenate,
s32[8,4,2,2]{3,2,1,0} %update),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* operand = FindInstruction(module.get(), "operand");
ASSERT_NE(operand, nullptr);
EXPECT_THAT(operand, op::Sharding("{devices=[2,1,2,1]0,1,4,5 }"));
const HloInstruction* indices = FindInstruction(module.get(), "concatenate");
ASSERT_NE(indices, nullptr);
EXPECT_THAT(
indices,
op::Sharding("{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}"));
const HloInstruction* update = FindInstruction(module.get(), "update");
ASSERT_NE(update, nullptr);
EXPECT_THAT(update, op::Sharding("{devices=[2,1,2,1]0,1,4,5}"));
const HloInstruction* scatter = FindInstruction(module.get(), "scatter");
ASSERT_NE(scatter, nullptr);
EXPECT_THAT(scatter, op::Sharding("{devices=[2,1,2,1]0,1,4,5}"));
for (const HloInstruction* instruction :
{operand, indices, update, scatter}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest,
ScatterMergedIndexParallelAndOperandPassthroughBackwardPass) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%arg.1 = s32[1,8,4]{2,1,0} parameter(1)
%arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)
%operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0)
%indices = s32[1,8,4]{2,1,0} copy(s32[1,8,4]{2,1,0} %arg.1)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%concatenate = s32[2,8,4]{2,1,0} concatenate(
s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %indices), dimensions={0}
%update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2)
%scatter = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %operand,
s32[2,8,4]{2,1,0} %concatenate,
s32[8,4,2,2]{3,2,1,0} %update),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0,
sharding={devices=[2,1,2,1]0,1,4,5 metadata={op_name="a"}}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* operand = FindInstruction(module.get(), "operand");
ASSERT_NE(operand, nullptr);
EXPECT_THAT(operand, op::Sharding("{devices=[2,1,2,1]0,1,4,5 }"));
const HloInstruction* indices = FindInstruction(module.get(), "concatenate");
ASSERT_NE(indices, nullptr);
EXPECT_THAT(
indices,
op::Sharding("{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}"));
const HloInstruction* update = FindInstruction(module.get(), "update");
ASSERT_NE(update, nullptr);
EXPECT_THAT(update, op::Sharding("{devices=[2,1,2,1]0,1,4,5}"));
const HloInstruction* scatter = FindInstruction(module.get(), "scatter");
ASSERT_NE(scatter, nullptr);
EXPECT_THAT(scatter, op::Sharding("{devices=[2,1,2,1]0,1,4,5}"));
for (const HloInstruction* instruction :
{operand, indices, update, scatter}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(
ParameterizedMetadataTest,
ScatterMergedIndexParallelAndTrivialSlicedOperandFromOperandForwardPass) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%arg.1 = s32[1,8,4]{2,1,0} parameter(1)
%arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)
%operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0),
sharding={devices=[2,2,1,1]0,1,4,5 metadata={op_name="a"}}
%indices = s32[1,8,4]{2,1,0} copy(s32[1,8,4]{2,1,0} %arg.1)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%concatenate = s32[2,8,4]{2,1,0} concatenate(
s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %indices), dimensions={0}
%update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2)
%scatter = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %operand,
s32[2,8,4]{2,1,0} %concatenate,
s32[8,4,2,2]{3,2,1,0} %update),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* operand = FindInstruction(module.get(), "operand");
ASSERT_NE(operand, nullptr);
EXPECT_THAT(operand, op::Sharding("{devices=[2,2,1,1]0,1,4,5 }"));
const HloInstruction* indices = FindInstruction(module.get(), "concatenate");
ASSERT_NE(indices, nullptr);
EXPECT_THAT(
indices,
op::Sharding("{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}"));
const HloInstruction* update = FindInstruction(module.get(), "update");
ASSERT_NE(update, nullptr);
EXPECT_THAT(
update,
op::Sharding("{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}"));
const HloInstruction* scatter = FindInstruction(module.get(), "scatter");
ASSERT_NE(scatter, nullptr);
EXPECT_THAT(scatter, op::Sharding("{devices=[2,2,1,1]0,1,4,5}"));
for (const HloInstruction* instruction :
{operand, indices, update, scatter}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest,
ScatterMergedIndexParallelAndTrivialSlicedOperandBackwardPass) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%arg.1 = s32[1,8,4]{2,1,0} parameter(1)
%arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)
%operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0)
%indices = s32[1,8,4]{2,1,0} copy(s32[1,8,4]{2,1,0} %arg.1)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%concatenate = s32[2,8,4]{2,1,0} concatenate(
s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %indices), dimensions={0}
%update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2)
%scatter = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %operand,
s32[2,8,4]{2,1,0} %concatenate,
s32[8,4,2,2]{3,2,1,0} %update),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0,
sharding={devices=[2,2,1,1]0,1,4,5 metadata={op_name="a"}}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* operand = FindInstruction(module.get(), "operand");
ASSERT_NE(operand, nullptr);
EXPECT_THAT(operand, op::Sharding("{devices=[2,2,1,1]0,1,4,5 }"));
const HloInstruction* indices = FindInstruction(module.get(), "concatenate");
ASSERT_NE(indices, nullptr);
EXPECT_THAT(
indices,
op::Sharding("{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}"));
const HloInstruction* update = FindInstruction(module.get(), "update");
ASSERT_NE(update, nullptr);
EXPECT_THAT(
update,
op::Sharding("{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}"));
const HloInstruction* scatter = FindInstruction(module.get(), "scatter");
ASSERT_NE(scatter, nullptr);
EXPECT_THAT(scatter, op::Sharding("{devices=[2,2,1,1]0,1,4,5}"));
for (const HloInstruction* instruction :
{operand, indices, update, scatter}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest,
ScatterMergedIndexParallelAndIndexPassthroughFromIndexForwardPass) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%arg.1 = s32[1,8,4]{2,1,0} parameter(1)
%arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)
%operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0)
%indices = s32[1,8,4]{2,1,0} copy(s32[1,8,4]{2,1,0} %arg.1),
sharding={devices=[1,2,2]0,1,4,5 metadata={op_name="a"}}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%concatenate = s32[2,8,4]{2,1,0} concatenate(
s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %indices), dimensions={0}
%update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2)
%scatter = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %operand,
s32[2,8,4]{2,1,0} %concatenate,
s32[8,4,2,2]{3,2,1,0} %update),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* operand = FindInstruction(module.get(), "operand");
ASSERT_NE(operand, nullptr);
EXPECT_THAT(
operand,
op::Sharding("{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}"));
const HloInstruction* indices = FindInstruction(module.get(), "concatenate");
ASSERT_NE(indices, nullptr);
EXPECT_THAT(indices, op::Sharding("{devices=[1,2,2]0,1,4,5}"));
const HloInstruction* update = FindInstruction(module.get(), "update");
ASSERT_NE(update, nullptr);
EXPECT_THAT(update, op::Sharding("{devices=[2,2,1,1]0,1,4,5 }"));
const HloInstruction* scatter = FindInstruction(module.get(), "scatter");
ASSERT_NE(scatter, nullptr);
EXPECT_THAT(
scatter,
op::Sharding("{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}"));
for (const HloInstruction* instruction :
{operand, indices, update, scatter}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest,
ScatterMergedIndexParallelAndIndexPassthroughFromUpdateForwardPass) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%arg.1 = s32[1,8,4]{2,1,0} parameter(1)
%arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)
%operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0)
%indices = s32[1,8,4]{2,1,0} copy(s32[1,8,4]{2,1,0} %arg.1)
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%concatenate = s32[2,8,4]{2,1,0} concatenate(
s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %indices), dimensions={0}
%update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2),
sharding={devices=[2,2,1,1]0,1,4,5 metadata={op_name="a"}}
%scatter = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %operand,
s32[2,8,4]{2,1,0} %concatenate,
s32[8,4,2,2]{3,2,1,0} %update),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* operand = FindInstruction(module.get(), "operand");
ASSERT_NE(operand, nullptr);
EXPECT_THAT(
operand,
op::Sharding("{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}"));
const HloInstruction* indices = FindInstruction(module.get(), "concatenate");
ASSERT_NE(indices, nullptr);
EXPECT_THAT(indices, op::Sharding("{devices=[1,2,2]0,1,4,5}"));
const HloInstruction* update = FindInstruction(module.get(), "update");
ASSERT_NE(update, nullptr);
EXPECT_THAT(update, op::Sharding("{devices=[2,2,1,1]0,1,4,5 }"));
const HloInstruction* scatter = FindInstruction(module.get(), "scatter");
ASSERT_NE(scatter, nullptr);
EXPECT_THAT(
scatter,
op::Sharding("{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}"));
for (const HloInstruction* instruction :
{operand, indices, update, scatter}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest,
ScatterMergedIndexParallelAndIndexPassthroughBackwardPass) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%arg.1 = s32[1,8,4]{2,1,0} parameter(1)
%arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)
%operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0)
%indices = s32[1,8,4]{2,1,0} copy(s32[1,8,4]{2,1,0} %arg.1),
sharding={devices=[1,1,2,2]0,4,1,5 last_tile_dim_replicate metadata={op_name="a"}}
%iota = s32[1,8,4]{2,1,0} iota(), iota_dimension=1
%concatenate = s32[2,8,4]{2,1,0} concatenate(
s32[1,8,4]{2,1,0} %iota, s32[1,8,4]{2,1,0} %indices), dimensions={0}
%update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2)
%scatter = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %operand,
s32[2,8,4]{2,1,0} %concatenate,
s32[8,4,2,2]{3,2,1,0} %update),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0,
sharding={devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate metadata={op_name="a"}}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* operand = FindInstruction(module.get(), "operand");
ASSERT_NE(operand, nullptr);
EXPECT_THAT(
operand,
op::Sharding("{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}"));
const HloInstruction* indices = FindInstruction(module.get(), "concatenate");
ASSERT_NE(indices, nullptr);
EXPECT_THAT(indices, op::Sharding("{devices=[1,2,2]0,1,4,5}"));
const HloInstruction* update = FindInstruction(module.get(), "update");
ASSERT_NE(update, nullptr);
EXPECT_THAT(update, op::Sharding("{devices=[2,2,1,1]0,1,4,5 }"));
const HloInstruction* scatter = FindInstruction(module.get(), "scatter");
ASSERT_NE(scatter, nullptr);
EXPECT_THAT(
scatter,
op::Sharding("{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}"));
for (const HloInstruction* instruction :
{operand, indices, update, scatter}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(
ParameterizedMetadataTest,
ScatterMergedOperandPassthroughAndTrivialSlicedOperandFromOperandForwardPass) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%arg.1 = s32[2,8,4]{2,1,0} parameter(1)
%arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)
%operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0),
sharding={devices=[1,2,2,1]0,1,4,5 metadata={op_name="a"}}
%indices = s32[2,8,4]{2,1,0} copy(s32[2,8,4]{2,1,0} %arg.1)
%update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2)
%scatter = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %operand,
s32[2,8,4]{2,1,0} %indices,
s32[8,4,2,2]{3,2,1,0} %update),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* operand = FindInstruction(module.get(), "operand");
ASSERT_NE(operand, nullptr);
EXPECT_THAT(operand, op::Sharding("{devices=[1,2,2,1]0,1,4,5}"));
const HloInstruction* indices = FindInstruction(module.get(), "indices");
ASSERT_NE(indices, nullptr);
EXPECT_THAT(indices, op::Sharding("{replicated}"));
const HloInstruction* update = FindInstruction(module.get(), "update");
ASSERT_NE(update, nullptr);
EXPECT_THAT(
update,
op::Sharding("{devices=[1,1,2,1,2]0,4,1,5 last_tile_dim_replicate}"));
const HloInstruction* scatter = FindInstruction(module.get(), "scatter");
ASSERT_NE(scatter, nullptr);
EXPECT_THAT(scatter, op::Sharding("{devices=[1,2,2,1]0,1,4,5}"));
for (const HloInstruction* instruction :
{operand, indices, update, scatter}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest,
ScatterMergedOperandPassthroughAndTrivialSlicedOperandBackwardPass) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%arg.1 = s32[2,8,4]{2,1,0} parameter(1)
%arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)
%operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0)
%indices = s32[2,8,4]{2,1,0} copy(s32[2,8,4]{2,1,0} %arg.1)
%update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2)
%scatter = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %operand,
s32[2,8,4]{2,1,0} %indices,
s32[8,4,2,2]{3,2,1,0} %update),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0,
sharding={devices=[1,2,2,1]0,1,4,5 metadata={op_name="a"}}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* operand = FindInstruction(module.get(), "operand");
ASSERT_NE(operand, nullptr);
EXPECT_THAT(operand, op::Sharding("{devices=[1,2,2,1]0,1,4,5}"));
const HloInstruction* indices = FindInstruction(module.get(), "indices");
ASSERT_NE(indices, nullptr);
EXPECT_THAT(indices, op::Sharding("{replicated}"));
const HloInstruction* update = FindInstruction(module.get(), "update");
ASSERT_NE(update, nullptr);
EXPECT_THAT(
update,
op::Sharding("{devices=[1,1,2,1,2]0,4,1,5 last_tile_dim_replicate}"));
const HloInstruction* scatter = FindInstruction(module.get(), "scatter");
ASSERT_NE(scatter, nullptr);
EXPECT_THAT(scatter, op::Sharding("{devices=[1,2,2,1]0,1,4,5}"));
for (const HloInstruction* instruction :
{operand, indices, update, scatter}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest,
ScatterMergedOperandAndIndexPassthroughFromOperandAndIndexForwardPass) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%arg.1 = s32[2,8,4]{2,1,0} parameter(1)
%arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)
%operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0),
sharding={devices=[1,1,2,1,2]0,4,1,5 last_tile_dim_replicate metadata={op_name="a"}}
%indices = s32[2,8,4]{2,1,0} copy(s32[2,8,4]{2,1,0} %arg.1),
sharding={devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate metadata={op_name="a"}}
%update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2)
%scatter = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %operand,
s32[2,8,4]{2,1,0} %indices,
s32[8,4,2,2]{3,2,1,0} %update),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* operand = FindInstruction(module.get(), "operand");
ASSERT_NE(operand, nullptr);
EXPECT_THAT(
operand,
op::Sharding("{devices=[1,1,2,1,2]0,4,1,5 last_tile_dim_replicate}"));
const HloInstruction* indices = FindInstruction(module.get(), "indices");
ASSERT_NE(indices, nullptr);
EXPECT_THAT(
indices,
op::Sharding("{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}"));
const HloInstruction* update = FindInstruction(module.get(), "update");
ASSERT_NE(update, nullptr);
EXPECT_THAT(update, op::Sharding("{devices=[2,1,2,1]0,1,4,5}"));
const HloInstruction* scatter = FindInstruction(module.get(), "scatter");
ASSERT_NE(scatter, nullptr);
EXPECT_THAT(
scatter,
op::Sharding("{devices=[1,1,2,1,2]0,4,1,5 last_tile_dim_replicate}"));
for (const HloInstruction* instruction :
{operand, indices, update, scatter}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(
ParameterizedMetadataTest,
ScatterMergedOperandPassthroughAndIndexPassthroughFromUpdateForwardPass) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%arg.1 = s32[2,8,4]{2,1,0} parameter(1)
%arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)
%operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0)
%indices = s32[2,8,4]{2,1,0} copy(s32[2,8,4]{2,1,0} %arg.1)
%update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2),
sharding={devices=[2,1,2,1]0,1,4,5 metadata={op_name="a"}}
%scatter = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %operand,
s32[2,8,4]{2,1,0} %indices,
s32[8,4,2,2]{3,2,1,0} %update),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* operand = FindInstruction(module.get(), "operand");
ASSERT_NE(operand, nullptr);
EXPECT_THAT(
operand,
op::Sharding("{devices=[1,1,2,1,2]0,4,1,5 last_tile_dim_replicate}"));
const HloInstruction* indices = FindInstruction(module.get(), "indices");
ASSERT_NE(indices, nullptr);
EXPECT_THAT(
indices,
op::Sharding("{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}"));
const HloInstruction* update = FindInstruction(module.get(), "update");
ASSERT_NE(update, nullptr);
EXPECT_THAT(update, op::Sharding("{devices=[2,1,2,1]0,1,4,5}"));
const HloInstruction* scatter = FindInstruction(module.get(), "scatter");
ASSERT_NE(scatter, nullptr);
EXPECT_THAT(
scatter,
op::Sharding("{devices=[1,1,2,1,2]0,4,1,5 last_tile_dim_replicate}"));
for (const HloInstruction* instruction :
{operand, indices, update, scatter}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest,
ScatterMergedOperandPassthroughAndIndexPassthroughBackwardPass) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%arg.1 = s32[2,8,4]{2,1,0} parameter(1)
%arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)
%operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0)
%indices = s32[2,8,4]{2,1,0} copy(s32[2,8,4]{2,1,0} %arg.1),
sharding={devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate metadata={op_name="a"}}
%update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2)
%scatter = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %operand,
s32[2,8,4]{2,1,0} %indices,
s32[8,4,2,2]{3,2,1,0} %update),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0,
sharding={devices=[1,1,2,1,2]0,4,1,5 last_tile_dim_replicate metadata={op_name="a"}}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* operand = FindInstruction(module.get(), "operand");
ASSERT_NE(operand, nullptr);
EXPECT_THAT(
operand,
op::Sharding("{devices=[1,1,2,1,2]0,4,1,5 last_tile_dim_replicate}"));
const HloInstruction* indices = FindInstruction(module.get(), "indices");
ASSERT_NE(indices, nullptr);
EXPECT_THAT(
indices,
op::Sharding("{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}"));
const HloInstruction* update = FindInstruction(module.get(), "update");
ASSERT_NE(update, nullptr);
EXPECT_THAT(update, op::Sharding("{devices=[2,1,2,1]0,1,4,5}"));
const HloInstruction* scatter = FindInstruction(module.get(), "scatter");
ASSERT_NE(scatter, nullptr);
EXPECT_THAT(
scatter,
op::Sharding("{devices=[1,1,2,1,2]0,4,1,5 last_tile_dim_replicate}"));
for (const HloInstruction* instruction :
{operand, indices, update, scatter}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(
ParameterizedMetadataTest,
ScatterMergedTrivialSlicedOperandAndIndexPassthroughFromOperandAndIndexForwardPass) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%arg.1 = s32[2,8,4]{2,1,0} parameter(1)
%arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)
%operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0),
sharding={devices=[1,2,1,1,2]0,4,1,5 last_tile_dim_replicate metadata={op_name="a"}}
%indices = s32[2,8,4]{2,1,0} copy(s32[2,8,4]{2,1,0} %arg.1),
sharding={devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate metadata={op_name="a"}}
%update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2)
%scatter = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %operand,
s32[2,8,4]{2,1,0} %indices,
s32[8,4,2,2]{3,2,1,0} %update),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* operand = FindInstruction(module.get(), "operand");
ASSERT_NE(operand, nullptr);
EXPECT_THAT(
operand,
op::Sharding("{devices=[1,2,1,1,2]0,4,1,5 last_tile_dim_replicate}"));
const HloInstruction* indices = FindInstruction(module.get(), "indices");
ASSERT_NE(indices, nullptr);
EXPECT_THAT(
indices,
op::Sharding("{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}"));
const HloInstruction* update = FindInstruction(module.get(), "update");
ASSERT_NE(update, nullptr);
EXPECT_THAT(
update,
op::Sharding("{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}"));
const HloInstruction* scatter = FindInstruction(module.get(), "scatter");
ASSERT_NE(scatter, nullptr);
EXPECT_THAT(
scatter,
op::Sharding("{devices=[1,2,1,1,2]0,4,1,5 last_tile_dim_replicate}"));
for (const HloInstruction* instruction :
{operand, indices, update, scatter}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(
ParameterizedMetadataTest,
ScatterMergedTrivialSlicedOperandAndIndexPassthroughFromOperandAndUpdateForwardPass) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%arg.1 = s32[2,8,4]{2,1,0} parameter(1)
%arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)
%operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0),
sharding={devices=[1,2,1,1,2]0,4,1,5 last_tile_dim_replicate metadata={op_name="a"}}
%indices = s32[2,8,4]{2,1,0} copy(s32[2,8,4]{2,1,0} %arg.1)
%update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2),
sharding={devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate metadata={op_name="a"}}
%scatter = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %operand,
s32[2,8,4]{2,1,0} %indices,
s32[8,4,2,2]{3,2,1,0} %update),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* operand = FindInstruction(module.get(), "operand");
ASSERT_NE(operand, nullptr);
EXPECT_THAT(
operand,
op::Sharding("{devices=[1,2,1,1,2]0,4,1,5 last_tile_dim_replicate}"));
const HloInstruction* indices = FindInstruction(module.get(), "indices");
ASSERT_NE(indices, nullptr);
EXPECT_THAT(
indices,
op::Sharding("{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}"));
const HloInstruction* update = FindInstruction(module.get(), "update");
ASSERT_NE(update, nullptr);
EXPECT_THAT(
update,
op::Sharding("{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}"));
const HloInstruction* scatter = FindInstruction(module.get(), "scatter");
ASSERT_NE(scatter, nullptr);
EXPECT_THAT(
scatter,
op::Sharding("{devices=[1,2,1,1,2]0,4,1,5 last_tile_dim_replicate}"));
for (const HloInstruction* instruction :
{operand, indices, update, scatter}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest,
ScatterMergedTrivialSlicedOperandAndIndexPassthroughBackwardPass) {
absl::string_view hlo_string = R"(
HloModule module
add (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT sum = s32[] add(lhs, rhs)
}
ENTRY %module {
%arg.0 = s32[8,4,2,2]{3,2,1,0} parameter(0)
%arg.1 = s32[2,8,4]{2,1,0} parameter(1)
%arg.2 = s32[8,4,2,2]{3,2,1,0} parameter(2)
%operand = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.0)
%indices = s32[2,8,4]{2,1,0} copy(s32[2,8,4]{2,1,0} %arg.1),
sharding={devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate metadata={op_name="a"}}
%update = s32[8,4,2,2]{3,2,1,0} copy(s32[8,4,2,2]{3,2,1,0} %arg.2)
%scatter = s32[8,4,2,2]{3,2,1,0} scatter(
s32[8,4,2,2]{3,2,1,0} %operand,
s32[2,8,4]{2,1,0} %indices,
s32[8,4,2,2]{3,2,1,0} %update),
to_apply=add,
update_window_dims={2,3},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0,
sharding={devices=[1,2,1,1,2]0,4,1,5 last_tile_dim_replicate metadata={op_name="a"}}
ROOT %copy = s32[8,4,2,2]{3,2,1,0} copy(%scatter)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* operand = FindInstruction(module.get(), "operand");
ASSERT_NE(operand, nullptr);
EXPECT_THAT(
operand,
op::Sharding("{devices=[1,2,1,1,2]0,4,1,5 last_tile_dim_replicate}"));
const HloInstruction* indices = FindInstruction(module.get(), "indices");
ASSERT_NE(indices, nullptr);
EXPECT_THAT(
indices,
op::Sharding("{devices=[1,2,1,2]0,1,4,5 last_tile_dim_replicate}"));
const HloInstruction* update = FindInstruction(module.get(), "update");
ASSERT_NE(update, nullptr);
EXPECT_THAT(
update,
op::Sharding("{devices=[2,1,1,1,2]0,1,4,5 last_tile_dim_replicate}"));
const HloInstruction* scatter = FindInstruction(module.get(), "scatter");
ASSERT_NE(scatter, nullptr);
EXPECT_THAT(
scatter,
op::Sharding("{devices=[1,2,1,1,2]0,4,1,5 last_tile_dim_replicate}"));
for (const HloInstruction* instruction :
{operand, indices, update, scatter}) {
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
}
TEST_P(ParameterizedMetadataTest, CorrectlyReplicateGatherIndex) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = bf16[1,2,2,2,8]{4,3,2,1,0} parameter(0)
%parameter.1 = s32[1,2,2]{2,1,0} parameter(1)
%index = s32[1,2,2]{2,1,0} copy(%parameter.1)
%gather = bf16[1,2,2,2,8]{4,3,2,1,0} gather(
bf16[1,2,2,2,8]{4,3,2,1,0} %parameter.0, s32[1,2,2]{2,1,0} %index),
offset_dims={2,3,4}, collapsed_slice_dims={0,1}, start_index_map={0,1},
index_vector_dim=2, slice_sizes={1,1,2,2,8},
sharding={devices=[1,1,2,1,1]0,1 metadata={op_name="a"}}
ROOT %copy = bf16[1,2,2,2,8]{4,3,2,1,0} copy(%gather)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* index = FindInstruction(module.get(), "index");
ASSERT_NE(index, nullptr);
EXPECT_THAT(index, op::Sharding("{replicated}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(index->sharding(), ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(index->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, GatherToOperand_ParallelDimIsNotPartitioned) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %module {
%parameter.0 = s32[2,1000,1]{2,1,0} parameter(0)
%parameter.1 = bf16[2,4819,4]{2,1,0} parameter(1)
%iota = s32[2,1000,1]{1,0,2} iota(), iota_dimension=0
%operand = bf16[2,4819,4]{2,1,0} copy(%parameter.1)
%index = s32[2,1000,2]{2,1,0} concatenate(s32[2,1000,1]{1,0,2} %iota,
s32[2,1000,1]{2,1,0} %parameter.0), dimensions={2},
sharding={devices=[1,4,1]0,1,2,3}
ROOT %gather = bf16[2,1000,4]{2,1,0} gather(bf16[2,4819,4]{2,1,0} %operand,
s32[2,1000,2]{2,1,0} %index), offset_dims={2},
collapsed_slice_dims={0,1}, start_index_map={0,1},
index_vector_dim=2, slice_sizes={1,1,4},
sharding={devices=[1,4,1]0,1,2,3}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* operand = FindInstruction(module.get(), "operand");
EXPECT_THAT(operand, op::Sharding("{replicated}"));
}
TEST_P(ParameterizedMetadataTest, ManualSubgroupForward) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[6,3]{1,0} parameter(0),
sharding={devices=[1,2,2]0,1,2,3 last_tile_dims={manual} metadata={op_name="a"}}
%copy = f32[6,3]{1,0} copy(%param0)
%param1 = f32[6,3]{1,0} parameter(1),
sharding={devices=[1,2,2]0,1,2,3 last_tile_dims={manual} metadata={op_name="a"}}
%copy.1 = f32[6,3]{1,0} copy(%param1)
%add = f32[6,3]{1,0} add(%copy, %copy.1)
ROOT %copy.2 = f32[6,3]{1,0} copy(%add)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "add");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction,
op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dims={manual}}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ManualSubgroup_SingleOperandHasSharding) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[6,3]{1,0} parameter(0),
sharding={devices=[1,2,2]0,1,2,3 last_tile_dims={manual} metadata={op_name="a"}}
%copy = f32[6,3]{1,0} copy(%param0)
%param1 = f32[6,3]{1,0} parameter(1)
%copy.1 = f32[6,3]{1,0} copy(%param1)
%add = f32[6,3]{1,0} add(%copy, %copy.1)
ROOT %copy.2 = f32[6,3]{1,0} copy(%add)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "add");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction,
op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dims={manual}}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
auto* operand = FindInstruction(module.get(), "copy");
ASSERT_NE(operand, nullptr);
EXPECT_THAT(operand,
op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dims={manual}}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(operand->sharding(), ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(operand->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ManualSubgroup_OneOperandReplicate) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[6,3]{1,0} parameter(0),
sharding={devices=[1,2,2]0,1,2,3 last_tile_dims={manual} metadata={op_name="a"}}
%copy = f32[6,3]{1,0} copy(%param0)
%param1 = f32[6,3]{1,0} parameter(1),
sharding={devices=[1,1,2,2]0,1,2,3 last_tile_dims={replicated, manual} metadata={op_name="a"}}
%copy.1 = f32[6,3]{1,0} copy(%param1)
%add = f32[6,3]{1,0} add(%copy, %copy.1)
ROOT %copy.2 = f32[6,3]{1,0} copy(%add)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "add");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction,
op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dims={manual}}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
auto* operand = FindInstruction(module.get(), "copy");
ASSERT_NE(operand, nullptr);
EXPECT_THAT(operand,
op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dims={manual}}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(operand->sharding(), ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(operand->sharding(), ShardingMetadata({}));
}
}
TEST_P(ParameterizedMetadataTest, ManualSubgroupBackward) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[6,3]{1,0} parameter(0)
%copy = f32[6,3]{1,0} copy(%param0)
%param1 = f32[6,3]{1,0} parameter(1)
%copy.1 = f32[6,3]{1,0} copy(%param1)
%add = f32[6,3]{1,0} add(%copy, %copy.1),
sharding={devices=[1,2,2]0,1,2,3 last_tile_dims={manual} metadata={op_name="a"}}
ROOT %copy.2 = f32[6,3]{1,0} copy(%add)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(false, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "copy");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction,
op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dims={manual}}"));
if (GetParam().propagate_metadata && !GetParam().clear_metadata) {
EXPECT_THAT(instruction->sharding(),
ShardingMetadata({CreateMetadata("a")}));
} else {
EXPECT_THAT(instruction->sharding(), ShardingMetadata({}));
}
}
TEST_F(ShardingPropagationTest, SimpleManual) {
const char* const hlo_string = R"(
HloModule module
%add {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(%lhs, %rhs)
}
ENTRY %entry {
%param0 = f32[6,3] parameter(0)
%copy = f32[6,3] copy(%param0), sharding={devices=[2,1]0,1}
%annotate = f32[6,3] custom-call(%copy), custom_call_target="Sharding",
sharding={devices=[2,1]0,1}
%to_manual = f32[3,3] custom-call(%annotate),
custom_call_target="SPMDFullToShardShape", sharding={manual}
%zero = f32[] constant(0)
%reduce = f32[3] reduce(%to_manual, %zero), dimensions={1}, to_apply=%add
%annotate2 = f32[3] custom-call(%reduce), custom_call_target="Sharding",
sharding={manual}
%to_auto = f32[6] custom-call(%annotate2),
custom_call_target="SPMDShardToFullShape", sharding={devices=[2]0,1}
ROOT %copy.2 = f32[6] copy(%to_auto)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "reduce");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{manual}"));
}
TEST_F(ShardingPropagationTest, SimpleManualTuple) {
const char* const hlo_string = R"(
HloModule module
%add {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(%lhs, %rhs)
}
ENTRY %entry {
%param0 = f32[6,3] parameter(0)
%copy = f32[6,3] copy(%param0), sharding={devices=[2,1]0,1}
%annotate = f32[6,3] custom-call(%copy), custom_call_target="Sharding",
sharding={devices=[2,1]0,1}
%to_manual = f32[3,3] custom-call(%annotate),
custom_call_target="SPMDFullToShardShape", sharding={manual}
%t = (f32[3,3]) tuple(%to_manual)
%gte = f32[3,3] get-tuple-element(%t), index=0
%to_auto = f32[3,3] custom-call(%gte),
custom_call_target="SPMDShardToFullShape", sharding={devices=[2,1]0,1}
ROOT %copy.2 = f32[3,3] copy(%to_auto)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "t");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{{manual}}"));
instruction = FindInstruction(module.get(), "gte");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{manual}"));
}
TEST_F(ShardingPropagationTest, DefaultManualCustomCallForward) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[6,3]{1,0} parameter(0),
sharding={manual metadata={op_name="a"}}
%copy = f32[6,3]{1,0} copy(%param0)
%param1 = f32[6,3]{1,0} parameter(1)
%copy.1 = f32[6,3]{1,0} copy(%param1)
%param2 = f32[6,3]{1,0} parameter(2)
%copy.2 = f32[6,3]{1,0} copy(%param2)
%custom-call = (f32[], f32[6,3]{1,0}) custom-call(%copy, %copy.1, %copy.2), custom_call_target="some_custom_call"
ROOT %copy.3 = (f32[], f32[6,3]{1,0}) copy(%custom-call)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "custom-call");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{{manual},{manual}}"));
}
TEST_F(ShardingPropagationTest, RefineUnspecifiedDims) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[6,3] parameter(0)
%copy = f32[6,3] copy(%param0),
sharding={devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}
%annotate = f32[6,3] custom-call(%copy), custom_call_target="Sharding",
backend_config="unspecified_dims=[1]",
sharding={devices=[2,1,2]0,2,1,3 last_tile_dim_replicate}
%copy.2 = f32[6,3] copy(%annotate)
ROOT %copy.3 = f32[6,3] copy(%copy.2)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "copy.2");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[2,2]0,2,1,3}"));
}
TEST_F(ShardingPropagationTest, RefineUnspecifiedDimsWithManualConversion) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[6,3,8] parameter(0)
%copy = f32[6,3,8] copy(%param0),
sharding={devices=[1,2,1,4]0,1,2,3,4,5,6,7 last_tile_dim_replicate}
%annotate = f32[6,3,8] custom-call(%copy), custom_call_target="Sharding",
backend_config="unspecified_dims=[1,2]",
sharding={devices=[2,1,1,4]0,1,4,5,2,3,6,7 last_tile_dim_replicate}
%to_manual = f32[3,3,8] custom-call(%annotate),
custom_call_target="SPMDFullToShardShape",
backend_config="unspecified_dims=[1,2]",
sharding={devices=[1,1,1,4,2]0,2,1,3,4,6,5,7 last_tile_dims={replicated,manual}}
%annotate2 = f32[3,3,8] custom-call(%to_manual), custom_call_target="Sharding",
backend_config="unspecified_dims=[1,2]",
sharding={devices=[1,1,1,4,2]0,2,1,3,4,6,5,7 last_tile_dims={replicated,manual}}
%to_auto = f32[6,3,8] custom-call(%annotate2),
custom_call_target="SPMDShardToFullShape",
backend_config="unspecified_dims=[1,2]",
sharding={devices=[2,1,1,4]0,1,4,5,2,3,6,7 last_tile_dim_replicate}
%copy.2 = f32[6,3,8] copy(%to_auto)
ROOT %copy.3 = f32[6,3,8] copy(%copy.2),
sharding={devices=[1,1,2,4]0,2,4,6,1,3,5,7 last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* copy2 = FindInstruction(module.get(), "copy.2");
ASSERT_NE(copy2, nullptr);
EXPECT_THAT(copy2, op::Sharding("{devices=[2,2,2]0,1,4,5,2,3,6,7}"));
auto* to_manual = FindInstruction(module.get(), "to_manual");
ASSERT_NE(to_manual, nullptr);
EXPECT_THAT(
to_manual,
op::Sharding(
"{devices=[1,2,2,2]0,2,1,3,4,6,5,7 last_tile_dims={manual}}"));
auto* to_auto = FindInstruction(module.get(), "to_auto");
ASSERT_NE(to_auto, nullptr);
EXPECT_THAT(to_auto, op::Sharding("{devices=[2,2,2]0,1,4,5,2,3,6,7}"));
}
TEST_F(ShardingPropagationTest, RefineUnspecifiedDimsWithManualConversion2) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[6,3,8] parameter(0)
%copy = f32[6,3,8] copy(%param0)
%annotate1 = f32[6,3,8] custom-call(%copy), custom_call_target="Sharding",
backend_config="unspecified_dims=[1,2]",
sharding={devices=[2,1,1,4]0,1,4,5,2,3,6,7 last_tile_dim_replicate}
%to_manual = f32[3,3,8] custom-call(%annotate1),
custom_call_target="SPMDFullToShardShape",
backend_config="unspecified_dims=[1,2]",
sharding={devices=[1,1,1,4,2]0,2,1,3,4,6,5,7 last_tile_dims={replicated,manual}}
%annotate2 = f32[3,3,8] custom-call(%to_manual), custom_call_target="Sharding",
backend_config="unspecified_dims=[1,2]",
sharding={devices=[1,1,1,4,2]0,2,1,3,4,6,5,7 last_tile_dims={replicated,manual}}
%annotate3 = f32[3,3,8] custom-call(%annotate2), custom_call_target="Sharding",
backend_config="unspecified_dims=[1,2]",
sharding={devices=[1,1,1,4,2]0,2,1,3,4,6,5,7 last_tile_dims={replicated,manual}}
%to_auto = f32[6,3,8] custom-call(%annotate3),
custom_call_target="SPMDShardToFullShape",
backend_config="unspecified_dims=[1,2]",
sharding={devices=[2,1,1,4]0,1,4,5,2,3,6,7 last_tile_dim_replicate}
%copy.2 = f32[6,3,8] copy(%to_auto),
sharding={devices=[1,2,1,4]0,1,2,3,4,5,6,7 last_tile_dim_replicate}
ROOT %copy.3 = f32[6,3,8] copy(%copy.2)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* copy = FindInstruction(module.get(), "copy");
ASSERT_NE(copy, nullptr);
EXPECT_THAT(
copy, op::Sharding(
"{devices=[2,2,1,2]0,1,4,5,2,3,6,7 last_tile_dim_replicate}"));
}
TEST_F(ShardingPropagationTest, DoNotRefineUnspecifiedDimsOnManual) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[6,3] parameter(0), sharding={manual}
%annotate = f32[6,3] custom-call(%param0), custom_call_target="Sharding",
backend_config="unspecified_dims=[1]", sharding={manual}
ROOT %copy.2 = f32[6,3] copy(%annotate), sharding={manual}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true)
.Run(module.get()));
EXPECT_TRUE(changed);
for (auto* hlo : module->entry_computation()->instructions()) {
EXPECT_TRUE(hlo->sharding().IsManual());
}
}
TEST_F(ShardingPropagationTest, DoNotPassManualShardingToSPMDShardToFullShape) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
p.0 = f32[2,3]{1,0} parameter(0), sharding={replicated}
custom-call.2 = f32[2,3]{1,0} custom-call(p.0), custom_call_target="Sharding", sharding={replicated}
custom-call.3 = f32[2,3]{1,0} custom-call(custom-call.2), custom_call_target="SPMDFullToShardShape", sharding={manual}
custom-call.4 = f32[2,3]{1,0} custom-call(custom-call.3), custom_call_target="Sharding", sharding={manual}
ROOT custom-call.5 = f32[16,3]{1,0} custom-call(custom-call.4), custom_call_target="SPMDShardToFullShape", sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true,
{true})
.Run(module.get()));
EXPECT_TRUE(changed);
auto spmd_shard_to_full = module->entry_computation()->root_instruction();
CHECK(spmd_shard_to_full->IsCustomCall("SPMDShardToFullShape"));
EXPECT_FALSE(spmd_shard_to_full->sharding().IsManual());
}
TEST_F(ShardingPropagationTest, ManualShardingPassThroughSplitConstant) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
p.0 = f32[2,3]{1,0} parameter(0), sharding={replicated}
p.1 = f32[2,3]{1,0} parameter(1), sharding={replicated}
constant = f32[2,3]{1,0} constant({{0,1,2},{3,4,5}})
custom-call.0 = f32[2,3]{1,0} custom-call(p.0), custom_call_target="Sharding", sharding={replicated}
custom-call.1 = f32[2,3]{1,0} custom-call(custom-call.0), custom_call_target="SPMDFullToShardShape", sharding={manual}
add.0 = f32[2,3]{1,0} add(constant, custom-call.1)
custom-call.2 = f32[2,3]{1,0} custom-call(add.0), custom_call_target="SPMDShardToFullShape", sharding={replicated}
add.1 = f32[2,3]{1,0} add(constant, p.1)
ROOT tuple = (f32[2,3]{1,0}, f32[2,3]{1,0}) tuple(custom-call.2, add.1)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool is_split,
HloConstantSplitter(true).Run(module.get()));
EXPECT_TRUE(is_split);
TF_ASSERT_OK_AND_ASSIGN(auto _, HloDCE().Run(module.get()));
(void)_;
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* add0 = FindInstruction(module.get(), "add.0");
const HloInstruction* manual_constant = add0->operand(0);
EXPECT_TRUE(manual_constant->IsConstant() &&
manual_constant->sharding().IsManual());
const HloInstruction* add1 = FindInstruction(module.get(), "add.1");
const HloInstruction* replicate_constant = add1->operand(0);
EXPECT_TRUE(replicate_constant->IsConstant() &&
replicate_constant->sharding().IsReplicated());
}
TEST_F(ShardingPropagationTest, ReshapeNoMatchSubgroupManual) {
const char* const hlo_string = R"(
HloModule module
ENTRY %reshape {
%param0 = f32[1,3,3] parameter(0),
sharding={devices=[2,1,1,2]0,1,2,3 last_tile_dims={manual}}
%reshape = f32[3,1,3,1] reshape(%param0)
ROOT %copy = f32[3,1,3,1] copy(%reshape)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "reshape");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(
instruction,
op::Sharding(
"{devices=[1,1,1,1,2,2]0,2,1,3 last_tile_dims={manual,replicated}}"));
}
TEST_F(ShardingPropagationTest, X64Combine) {
const char* const hlo_string = R"(
HloModule module
ENTRY %reshape {
%param0 = f32[102,192,192] parameter(0),
sharding={devices=[1,2,2]0,1,2,3}
%param1 = f32[102,192,192] parameter(1),
sharding={devices=[1,2,2]0,1,2,3}
%custom-call = f64[102,192,192] custom-call(f32[102,192,192] %param0, f32[102,192,192] %param1), custom_call_target="X64Combine"
ROOT %copy = f64[102,192,192] copy(%custom-call),
sharding={devices=[1,2,1,2]0,1,2,3 last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "custom-call");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2]0,1,2,3}"));
}
TEST_F(ShardingPropagationTest, LayoutConstraint) {
const char* const hlo_string = R"(
HloModule module
ENTRY %reshape {
%param0 = f32[102,192,192] parameter(0),
sharding={devices=[1,2,2]0,1,2,3}
%custom-call = f32[102,192,192]{0,1,2} custom-call(f32[102,192,192] %param0), custom_call_target="LayoutConstraint"
ROOT %copy = f32[102,192,192] copy(%custom-call),
sharding={devices=[1,2,1,2]0,1,2,3 last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "custom-call");
EXPECT_THAT(instruction->shape(), ShapeUtil::MakeShapeWithDenseLayout(
F32, {102, 192, 192}, {0, 1, 2}));
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[1,2,2]0,1,2,3}"));
}
TEST_F(ShardingPropagationTest, OffloadingPropagation) {
const char* const hlo_string = R"(
HloModule module
ENTRY %offloading {
%param0 = f32[1,256,128] parameter(0), sharding={devices=[1,1,4]0,1,2,3}
%zero = f32[] constant(0.0)
%broadcast = f32[256,256,128] broadcast(%zero), dimensions={}
%izero = s32[] constant(0)
%custom-call.0 = f32[1,256,128] custom-call(f32[1,256,128] %param0), custom_call_target="MoveToHost"
%dynamic-update-slice = f32[256,256,128] dynamic-update-slice(%broadcast, %custom-call.0, %izero, %izero, %izero)
%dynamic-slice = f32[1,256,128] dynamic-slice(%dynamic-update-slice, %izero, %izero, %izero), dynamic_slice_sizes={1,256,128}
%custom-call.1 = f32[1,256,128] custom-call(f32[1,256,128] %dynamic-slice), custom_call_target="MoveToDevice"
ROOT %copy = f32[1,256,128] copy(%custom-call.1), sharding={devices=[1,4,1]0,1,2,3}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* to_host = FindInstruction(module.get(), "custom-call.0");
EXPECT_THAT(to_host, op::Sharding("{devices=[1,1,4]0,1,2,3}"));
auto* from_host_input =
FindInstruction(module.get(), "custom-call.1")->operand(0);
EXPECT_THAT(from_host_input, op::Sharding("{devices=[1,1,4]0,1,2,3}"));
}
TEST_P(ParameterizedMetadataTest, PropagateThroughSingleUsers) {
const char* const hlo_string = R"(
HloModule module
%cond {
%vars.cond = (u32[], f32[10,10], f32[10,10]) parameter(0)
%count.cond = u32[] get-tuple-element((u32[], f32[10,10], f32[10,10]) %vars.cond), index=0
%limit = u32[] constant(10)
ROOT %lt = pred[] compare(u32[] %count.cond, u32[] %limit), direction=LT
}
%body {
%vars = (u32[], f32[10,10], f32[10,10]) parameter(0)
%count = u32[] get-tuple-element(%vars), index=0
%acc = f32[10,10] get-tuple-element((u32[], f32[10,10],f32[10,10]) %vars), index=1
%cvt = s32[10,10] convert(acc)
%one = u32[] constant(1)
%count.1 = u32[] add(u32[] %count, u32[] %one)
%acc.i = s32[10,10] add(s32[10,10] %cvt, s32[10,10] %cvt), sharding={devices=[2,1,2]0,2,1,3 last_tile_dim_replicate}
%acc.1 = f32[10,10] convert(acc.i)
ROOT %tuple = (u32[], f32[10,10], f32[10,10]) tuple(u32[] %count.1, f32[10,10] %acc, f32[10,10] %acc.1)
}
ENTRY %entry {
%p0 = f32[10,10] parameter(0)
%p0.copy = f32[10,10] copy(f32[10,10] %p0), sharding={devices=[4,1]0,1,2,3}
%p1 = f32[10,10] parameter(1)
%p2 = f32[10,10] parameter(2)
%p2.copy = f32[10,10] copy(f32[10,10] %p2)
%zero = u32[] constant(0)
%init = (u32[], f32[10,10], f32[10,10]) tuple(u32[] %zero, f32[10,10] %p0.copy, f32[10,10] %p2.copy)
%while = (u32[], f32[10,10], f32[10,10]) while((u32[], f32[10,10], f32[10,10]) %init),
body=%body, condition=%cond
%g1 = u32[] get-tuple-element((u32[], f32[10,10], f32[10,10]) %while), index=0
%g2 = f32[10,10] get-tuple-element((u32[], f32[10,10], f32[10,10]) %while), index=1
%g3 = f32[10,10] get-tuple-element((u32[], f32[10,10], f32[10,10]) %while), index=2
ROOT %t = (u32[], f32[10,10], f32[10,10]) tuple(%g1, %g2, %g3)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto body_root = FindInstruction(module.get(), "tuple");
EXPECT_NE(nullptr, body_root);
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
VLOG(1) << "Mod:";
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* convert_instr = FindInstruction(module.get(), "cvt");
EXPECT_THAT(convert_instr, op::Sharding("{devices=[4,1]0,1,2,3}"));
}
TEST_P(ParameterizedMetadataTest, NestedTupleFromUserSharding) {
const char* const hlo_string = R"(
HloModule module
%cond {
%vars.cond = (u32[], ((f32[10,10], f32[10,10]), f32[]), f32[10,10]) parameter(0)
%count.cond = u32[] get-tuple-element(%vars.cond), index=0
%limit = u32[] constant(10)
ROOT %lt = pred[] compare(u32[] %count.cond, u32[] %limit), direction=LT
}
%body {
%vars = (u32[], ((f32[10,10], f32[10,10]), f32[]), f32[10,10]) parameter(0)
%count = u32[] get-tuple-element(%vars), index=0
%fwd = ((f32[10,10], f32[10,10]), f32[]) get-tuple-element(%vars), index=1
%acc = f32[10,10] get-tuple-element(%vars), index=2
%cvt = s32[10,10] convert(acc)
%one = u32[] constant(1)
%count.1 = u32[] add(u32[] %count, u32[] %one)
%acc.i = s32[10,10] add(s32[10,10] %cvt, s32[10,10] %cvt)
%acc.1 = f32[10,10] convert(acc.i)
ROOT %tuple = (u32[], ((f32[10,10], f32[10,10]), f32[]), f32[10,10]) tuple(%count.1, %fwd, %acc.1)
}
ENTRY %entry {
%p0 = f32[10,10] parameter(0)
%p0.copy = f32[10,10] copy(f32[10,10] %p0)
%p1 = f32[10,10] parameter(1)
%p1.copy = f32[10,10] copy(f32[10,10] %p1)
%p2 = f32[10,10] parameter(2)
%p2.copy = f32[10,10] copy(f32[10,10] %p2)
%zero = u32[] constant(0)
%zerof = f32[] constant(0)
%init0 = (f32[10,10], f32[10,10]) tuple(%p0.copy, %p1.copy)
%init1 = ((f32[10,10], f32[10,10]), f32[]) tuple(%init0, %zerof)
%init = (u32[], ((f32[10,10], f32[10,10]), f32[]), f32[10,10]) tuple(%zero, %init1, %p2.copy)
%while = (u32[], ((f32[10,10], f32[10,10]), f32[]), f32[10,10]) while(%init),
body=%body, condition=%cond
%g1 = u32[] get-tuple-element(%while), index=0
%g2 = ((f32[10,10], f32[10,10]), f32[]) get-tuple-element(%while), index=1
%g2.0 = (f32[10,10], f32[10,10]) get-tuple-element(%g2), index=0
%g2.0.0 = f32[10,10] get-tuple-element(%g2.0), index=0
%g3 = f32[10,10] get-tuple-element(%while), index=2
%copy.g3 = f32[10,10] copy(%g3), sharding={devices=[4,1]0,1,2,3}
ROOT %t = (u32[], f32[10,10], f32[10,10]) tuple(%g1, %g2.0.0, %g3)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto body_root = FindInstruction(module.get(), "tuple");
EXPECT_NE(nullptr, body_root);
if (GetParam().clear_metadata) {
ClearMetadata(module.get());
}
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, GetParam().propagate_metadata)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* convert_instr =
FindInstruction(module.get(), "p2.copy");
EXPECT_THAT(convert_instr, op::Sharding("{devices=[4,1]0,1,2,3}"));
}
TEST_F(ShardingPropagationTest, CSEPreventionOnly) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[] parameter(0), sharding={replicated}
%br = f32[4] broadcast(%param0), dimensions={}
%add = f32[4] add(%br, %br)
%annotate = f32[4] custom-call(%add), custom_call_target="Sharding",
backend_config="unspecified_dims=[0]", sharding={replicated}
ROOT %copy = f32[4] copy(%annotate), sharding={devices=[4]0,1,2,3}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{false},
{false},
true)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* br = FindInstruction(module.get(), "br");
EXPECT_THAT(br, op::Sharding("{devices=[4]0,1,2,3}"));
EXPECT_THAT(br->sharding(), ShardingMetadata({CreateMetadata(
"_sharding_propagation_cse_prevention")}));
EXPECT_THAT(FindInstruction(module.get(), "annotate"),
AllOf(op::Sharding("{replicated}"), op::CustomCall()));
EXPECT_FALSE(FindInstruction(module.get(), "add")->has_sharding());
}
TEST_F(ShardingPropagationTest, RemoveCSEPrevention) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[] parameter(0), sharding={replicated}
%br = f32[4] broadcast(%param0), dimensions={},
sharding={devices=[4]0,1,2,3 metadata={op_name="_sharding_propagation_cse_prevention"}}
%add = f32[4] add(%br, %br)
%annotate = f32[4] custom-call(%add), custom_call_target="Sharding",
backend_config="unspecified_dims=[0]", sharding={replicated}
ROOT %copy = f32[4] copy(%annotate), sharding={devices=[4]3,2,1,0}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(FindInstruction(module.get(), "br"),
op::Sharding("{devices=[4]3,2,1,0}"));
EXPECT_THAT(FindInstruction(module.get(), "add"),
op::Sharding("{devices=[4]3,2,1,0}"));
}
TEST_F(ShardingPropagationTest, ReshapeTrivialDimPartialReplicate) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[8,128] parameter(0), sharding={replicated}
%c = f32[8,128] copy(%param0)
%rsp = f32[8,1,128] reshape(%c),
sharding={devices=[1,2,4]0,1,2,3,4,5,6,7}
ROOT %copy = f32[8,1,128] copy(%rsp),
sharding={devices=[1,2,4]0,1,2,3,4,5,6,7}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(
FindInstruction(module.get(), "c"),
op::Sharding("{devices=[1,4,2]0,1,2,3,4,5,6,7 last_tile_dim_replicate}"));
}
TEST_F(ShardingPropagationTest, EmptyTupleWithinTuple) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[2] parameter(0), sharding={replicated}
%et = () tuple()
%tuple = (f32[2], (), (), f32[2]) tuple(%param0, %et, %et, %param0)
ROOT %copy = (f32[2], (), (), f32[2]) copy(%tuple)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
}
TEST_F(ShardingPropagationTest, ContractingAsNonContractingCrash) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%p0 = f32[20,64,56,56]{3,2,1,0} parameter(0), sharding={replicated}
%p1 = f32[1,1,256,64]{2,3,1,0} parameter(1), sharding={devices=[4,2,1,1]0,1,2,3,4,5,6,7}
%convolution.4512 = f32[20,256,56,56]{3,2,1,0} convolution(%p0, %p1), window={size=1x1}, dim_labels=bf01_01oi->bf01
ROOT %copy = f32[20,256,56,56]{3,2,1,0} copy(%convolution.4512)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
}
TEST_F(ShardingPropagationTest, PropagateReduceManualTuple) {
const char* const hlo_string = R"(
HloModule pjit
orclone {
lhs.1 = u32[] parameter(0)
rhs.1 = u32[] parameter(2)
or.2 = u32[] or(lhs.1, rhs.1)
lhs.0 = u32[] parameter(1)
rhs.0 = u32[] parameter(3)
or.3 = u32[] or(lhs.0, rhs.0)
ROOT tuple.4 = (u32[], u32[]) tuple(or.2, or.3)
}
ENTRY %main.21 {
select.104 = u32[2,2]{1,0} parameter(0), sharding={manual}
shift-left.5 = u32[2,2]{1,0} parameter(1), sharding={manual}
constant.4183 = u32[] constant(0), sharding={manual}
reduce.1 = (u32[2]{0}, u32[2]{0}) reduce(shift-left.5, select.104, constant.4183, constant.4183), dimensions={1}, to_apply=orclone
ROOT get-tuple-element.13 = u32[2]{0} get-tuple-element(reduce.1), index=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true)
.Run(module.get()));
EXPECT_THAT(FindInstruction(module.get(), "reduce.1"),
op::Sharding("{{manual}, {manual}}"));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
}
TEST_F(ShardingPropagationTest, MergeCompatibleTiles) {
const char* const hlo_string = R"(
HloModule pjit
ENTRY %main.21 {
p = bf16[8,4,256,1024,12288]{4,3,2,1,0} parameter(0), sharding={devices=[8,1,1,1,1]0,1,2,3,4,5,6,7}
p2 = bf16[8,4,256,1024,12288]{4,3,2,1,0} parameter(1), sharding={devices=[4,1,1,1,1,2]0,1,2,3,4,5,6,7 last_tile_dim_replicate}
c0 = bf16[8,4,256,1024,12288]{4,3,2,1,0} copy(p)
c1 = bf16[8,4,256,1024,12288]{4,3,2,1,0} copy(p2)
a = bf16[8,4,256,1024,12288]{4,3,2,1,0} add(c0, c1)
ROOT c2 = bf16[8,4,256,1024,12288]{4,3,2,1,0} copy(a), sharding={devices=[8,1,1,1,1]0,1,2,3,4,5,6,7}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(FindInstruction(module.get(), "c1"),
op::Sharding("{devices=[8,1,1,1,1]0,1,2,3,4,5,6,7}"));
}
TEST_F(ShardingPropagationTest, OutfeedUser) {
const char* const hlo_string = R"(
HloModule pjit
ENTRY %main.21 {
p = f32[10,128]{1,0} parameter(0)
c = f32[10,128]{1,0} copy(p)
t = (f32[10,128]{1,0}) tuple(c)
a = token[] after-all()
ROOT of = token[] outfeed((f32[10,128]{1,0}) %t, token[] %a), outfeed_shape=(f32[10,128]{1,0}), sharding={{devices=[2,1]0,1}, {maximal device=0}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(FindInstruction(module.get(), "c"),
op::Sharding("{devices=[2,1]0,1}"));
}
TEST_F(ShardingPropagationTest, SortForwardWithBarrier) {
const char* const hlo_string = R"(
HloModule module
compare {
p.0.lhs = f32[] parameter(0), sharding={replicated}
p.0.rhs = f32[] parameter(1), sharding={replicated}
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT, sharding={replicated}
}
ENTRY entry {
param.0 = f32[1024,1024]{1,0} parameter(0)
negate.0 = f32[1024,1024]{1,0} negate(param.0), sharding={devices=[1,8]0,1,2,3,4,5,6,7}
%shard-barrier-from = f32[1024,1024]{1,0} custom-call(%negate.0), custom_call_target="ShardBarrierFrom", custom_call_has_side_effect=true
sort.0 = f32[1024,1024]{1,0} sort(shard-barrier-from), dimensions={1}, is_stable=true, to_apply=compare
ROOT copy.0 = f32[1024,1024]{1,0} copy(sort.0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
std::ignore,
ShardingPropagation(true, true)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_FALSE(FindInstruction(module.get(), "sort.0")->has_sharding());
}
TEST_F(ShardingPropagationTest, SortBackwardWithBarrier) {
const char* const hlo_string = R"(
HloModule module
compare {
p.0.lhs = f32[] parameter(0), sharding={replicated}
p.0.rhs = f32[] parameter(1), sharding={replicated}
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT, sharding={replicated}
}
ENTRY entry {
param.0 = f32[1024,1024]{1,0} parameter(0)
negate.0 = f32[1024,1024]{1,0} negate(param.0)
%shard-barrier-to = f32[1024,1024]{1,0} custom-call(%negate.0), custom_call_target="ShardBarrierTo", custom_call_has_side_effect=true
sort.0 = f32[1024,1024]{1,0} sort(shard-barrier-to), dimensions={1}, is_stable=true, to_apply=compare,
sharding={devices=[1,8]0,1,2,3,4,5,6,7}
ROOT copy.0 = f32[1024,1024]{1,0} copy(sort.0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
std::ignore,
ShardingPropagation(true, true)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_THAT(FindInstruction(module.get(), "negate.0"),
op::Sharding("{replicated}"));
}
TEST_F(ShardingPropagationTest, SortOperandShardedOnSortDim_RankOne) {
const char* const hlo_string = R"(
HloModule module, entry_computation_layout={(f32[1024]{0})->(f32[1024]{0}, s32[1024]{0})}
compare {
p.0.lhs = f32[] parameter(0), sharding={replicated}
p.0.rhs = f32[] parameter(1), sharding={replicated}
p.1.lhs = s32[] parameter(2), sharding={replicated}
p.1.rhs = s32[] parameter(3), sharding={replicated}
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT, sharding={replicated}
}
ENTRY entry {
param.0 = f32[1024]{0} parameter(0)
negate.0 = f32[1024]{0} negate(param.0), sharding={devices=[8]0,1,2,3,4,5,6,7}
iota.0 = s32[1024]{0} iota(), iota_dimension=0
sort.0 = (f32[1024]{0}, s32[1024]{0}) sort(negate.0, iota.0), dimensions={0}, is_stable=true, to_apply=compare
ROOT copy.0 = (f32[1024]{0}, s32[1024]{0}) copy(sort.0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_FALSE(changed);
}
TEST_F(ShardingPropagationTest, SortOperandShardedOnSortDim_RankTwo) {
const char* const hlo_string = R"(
HloModule module, entry_computation_layout={(f32[1024,1024]{1,0})->(f32[1024,1024]{1,0}, s32[1024,1024]{1,0})}
compare {
p.0.lhs = f32[] parameter(0), sharding={replicated}
p.0.rhs = f32[] parameter(1), sharding={replicated}
p.1.lhs = s32[] parameter(2), sharding={replicated}
p.1.rhs = s32[] parameter(3), sharding={replicated}
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT, sharding={replicated}
}
ENTRY entry {
param.0 = f32[1024,1024]{1,0} parameter(0)
negate.0 = f32[1024,1024]{1,0} negate(param.0), sharding={devices=[1,8]0,1,2,3,4,5,6,7}
iota.0 = s32[1024,1024]{1,0} iota(), iota_dimension=1
sort.0 = (f32[1024,1024]{1,0}, s32[1024,1024]{1,0}) sort(negate.0, iota.0), dimensions={1}, is_stable=true, to_apply=compare
ROOT copy.0 = (f32[1024,1024]{1,0}, s32[1024,1024]{1,0}) copy(sort.0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(FindInstruction(module.get(), "iota.0"),
op::Sharding("{devices=[1,8]0,1,2,3,4,5,6,7}"));
EXPECT_THAT(
FindInstruction(module.get(), "sort.0"),
op::Sharding(
"{{devices=[1,8]0,1,2,3,4,5,6,7}, {devices=[1,8]0,1,2,3,4,5,6,7}}"));
}
TEST_F(ShardingPropagationTest, ConditionalManual) {
const char* const hlo_string = R"(
HloModule module
%true_comp {
%tp = (f32[3,5], f32[]) parameter(0)
%tgte.0 = f32[3,5] get-tuple-element(%tp), index=0
%tgte.1 = f32[] get-tuple-element(%tp), index=1
%ttr = f32[5,3] transpose(%tgte.0), dimensions={1,0}
%broadcast.1 = f32[5,3] broadcast(%tgte.1), dimensions={}
%add.1 = f32[5,3] add(%broadcast.1, %ttr)
ROOT %tr = (f32[5,3], f32[]) tuple(%add.1, %tgte.1)
}
%false_comp {
%fp = (f32[5,3], f32[5,3], f32[]) parameter(0)
%fgte.0 = f32[5,3] get-tuple-element(%fp), index=0
%fgte.1 = f32[] get-tuple-element(%fp), index=2
ROOT %fr = (f32[5,3], f32[]) tuple(%fgte.0, %fgte.1)
}
ENTRY entry {
%cond = pred[] parameter(0), sharding={devices=[2,2]<=[4] last_tile_dims={manual, replicated}}
%tp.0 = f32[3,5] parameter(1), sharding={devices=[1,1,2,2]<=[4] last_tile_dims={manual, replicated}}
%fp.0 = f32[5,3] parameter(2), sharding={devices=[1,1,2,2]<=[4] last_tile_dims={manual, replicated}}
%const0 = f32[] constant(0)
%const1 = f32[] constant(1)
%true_param = (f32[3,5], f32[]) tuple(%tp.0, %const0)
%false_param = (f32[5,3], f32[5,3], f32[]) tuple(%fp.0, fp.0, %const1)
ROOT %conditional = (f32[5,3], f32[]) conditional(
%cond, %true_param, %false_param),
true_computation=%true_comp,
false_computation=%false_comp
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* tp = FindInstruction(module.get(), "tp");
auto* true_param = FindInstruction(module.get(), "true_param");
EXPECT_EQ(tp->sharding(), true_param->sharding());
auto* fp = FindInstruction(module.get(), "fp");
auto* false_param = FindInstruction(module.get(), "false_param");
EXPECT_EQ(fp->sharding(), false_param->sharding());
}
TEST_F(ShardingPropagationTest, WhileDSManual) {
const char* const hlo_string = R"(
HloModule module
while.condition {
arg_tuple = (s32[], pred[2,8,4]) parameter(0)
tripcount = s32[] get-tuple-element(arg_tuple), index=0
triplimit = s32[] constant(2)
ROOT compare.0 = pred[] compare(tripcount, triplimit), direction=LT
}
while.body {
arg_tuple = (s32[], pred[2,8,4]) parameter(0)
tripcount = s32[] get-tuple-element(arg_tuple), index=0
one = s32[] constant(0)
tripcount_next = s32[] add(tripcount, one)
preds.1 = pred[2,8,4] get-tuple-element(arg_tuple), index=1
zero.1 = s32[] constant(0)
dynamic-slice.1 = pred[1,8,4] dynamic-slice(preds.1, tripcount, zero.1, zero.1), dynamic_slice_sizes={1,8,4}, sharding={devices=[1,1,1,2,4]<=[8] last_tile_dims={manual, replicated}}
ROOT result = (s32[], pred[2,8,4]) tuple(tripcount_next, preds.1)
}
ENTRY entry {
preds = pred[2,8,4] parameter(0), sharding={devices=[1,1,1,2,4]<=[8] last_tile_dims={manual, replicated}}
zero = s32[] constant(0)
tuple.13 = (s32[], pred[2,8,4]) tuple(zero, preds)
ROOT result = while(tuple.13), condition=while.condition, body=while.body
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true)
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* tuple = FindInstruction(module.get(), "tuple.13");
EXPECT_THAT(tuple, op::Sharding("{{replicated}, {devices=[1,1,1,2,4]<=[8] "
"last_tile_dims={manual, replicated}}}"));
}
TEST_F(ShardingPropagationTest, PropagateToOutput) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[] parameter(0), sharding={replicated}
%br = f32[4] broadcast(%param0), dimensions={}
%annotate = f32[4] custom-call(%br), custom_call_target="Sharding",
backend_config="unspecified_dims=[0]", sharding={devices=[4]0,1,2,3}
ROOT %add = f32[4] add(%annotate, %annotate), sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true,
{true})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Sharding("{devices=[4]0,1,2,3}"));
}
TEST_F(ShardingPropagationTest, PropagateToOutputTuplePartial) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[] parameter(0), sharding={replicated}
%br = f32[4] broadcast(%param0), dimensions={}
%annotate = f32[4] custom-call(%br), custom_call_target="Sharding",
backend_config="unspecified_dims=[0]", sharding={devices=[4]0,1,2,3}
%add = f32[4] add(%annotate, %annotate)
%param1 = f32[] parameter(1), sharding={replicated}
%br1 = f32[4] broadcast(%param1), dimensions={}
%annotate1 = f32[4] custom-call(%br1), custom_call_target="Sharding",
backend_config="unspecified_dims=[0]", sharding={devices=[4]0,1,2,3}
%add1 = f32[4] add(%annotate1, %annotate1)
ROOT t = (f32[4], f32[4]) tuple(add, add1), sharding={{replicated},{replicated}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{true, false})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Sharding("{{devices=[4]0,1,2,3},{replicated}}"));
}
TEST_F(ShardingPropagationTest, PropagateToOutputTupleFull) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[] parameter(0), sharding={replicated}
%br = f32[4] broadcast(%param0), dimensions={}
%annotate = f32[4] custom-call(%br), custom_call_target="Sharding",
backend_config="unspecified_dims=[0]", sharding={devices=[4]0,1,2,3}
%add = f32[4] add(%annotate, %annotate)
%param1 = f32[] parameter(1), sharding={replicated}
%br1 = f32[4] broadcast(%param1), dimensions={}
%annotate1 = f32[4] custom-call(%br1), custom_call_target="Sharding",
backend_config="unspecified_dims=[0]", sharding={devices=[4]0,1,2,3}
%add1 = f32[4] add(%annotate1, %annotate1)
ROOT t = (f32[4], f32[4]) tuple(add, add1), sharding={{replicated},{replicated}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(true, true,
{true})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Sharding("{{devices=[4]0,1,2,3},{devices=[4]0,1,2,3}}"));
}
TEST_F(ShardingPropagationTest, PropagateToParametersNotEnabled1) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[4] parameter(0)
ROOT %add = f32[4] add(%param0, %param0), sharding={devices=[4]0,1,2,3}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{false},
{false, false})
.Run(module.get()));
EXPECT_FALSE(changed);
EXPECT_FALSE(
module->entry_computation()->parameter_instruction(0)->has_sharding());
}
TEST_F(ShardingPropagationTest, PropagateToParametersNotEnabled2) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[4] parameter(0), sharding={replicated}
ROOT %add = f32[4] add(%param0, %param0), sharding={devices=[4]0,1,2,3}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed, ShardingPropagation(true).Run(module.get()));
EXPECT_FALSE(changed);
EXPECT_THAT(module->entry_computation()->parameter_instruction(0),
op::Sharding("{replicated}"));
}
TEST_F(ShardingPropagationTest, PropagateToParametersNotEnabled3) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[4] parameter(0)
%param1 = f32[4] parameter(1), sharding={replicated}
ROOT %add = f32[4] add(%param0, %param1), sharding={devices=[4]0,1,2,3}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{false},
{false})
.Run(module.get()));
EXPECT_FALSE(changed);
EXPECT_FALSE(
module->entry_computation()->parameter_instruction(0)->has_sharding());
EXPECT_THAT(module->entry_computation()->parameter_instruction(1),
op::Sharding("{replicated}"));
}
TEST_F(ShardingPropagationTest, PropagateToParametersNotEnabled4) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[4] parameter(0), sharding={replicated}
%param1 = f32[4] parameter(1), sharding={replicated}
ROOT %add = f32[4] add(%param0, %param1), sharding={devices=[4]0,1,2,3}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{false},
{false, false})
.Run(module.get()));
EXPECT_FALSE(changed);
EXPECT_THAT(module->entry_computation()->parameter_instruction(0),
op::Sharding("{replicated}"));
EXPECT_THAT(module->entry_computation()->parameter_instruction(1),
op::Sharding("{replicated}"));
}
TEST_F(ShardingPropagationTest, PropagateToParametersPartial1) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[4] parameter(0), sharding={replicated}
%param1 = f32[4] parameter(1), sharding={replicated}
ROOT %add = f32[4] add(%param0, %param1), sharding={devices=[4]0,1,2,3}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{false},
{false, true})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->parameter_instruction(0),
op::Sharding("{replicated}"));
EXPECT_THAT(module->entry_computation()->parameter_instruction(1),
op::Sharding("{devices=[4]0,1,2,3}"));
}
TEST_F(ShardingPropagationTest, PropagateToParametersPartial2) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[4] parameter(0)
%param1 = f32[4] parameter(1), sharding={replicated}
ROOT %add = f32[4] add(%param0, %param1), sharding={devices=[4]0,1,2,3}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{false},
{false, true})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_FALSE(
module->entry_computation()->parameter_instruction(0)->has_sharding());
EXPECT_THAT(module->entry_computation()->parameter_instruction(1),
op::Sharding("{devices=[4]0,1,2,3}"));
}
TEST_F(ShardingPropagationTest, PropagateToParametersPartial3) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[4] parameter(0), sharding={replicated}
%param1 = f32[4] parameter(1)
ROOT %add = f32[4] add(%param0, %param1), sharding={devices=[4]0,1,2,3}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{false},
{false, true})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->parameter_instruction(0),
op::Sharding("{replicated}"));
EXPECT_THAT(module->entry_computation()->parameter_instruction(1),
op::Sharding("{devices=[4]0,1,2,3}"));
}
TEST_F(ShardingPropagationTest, PropagateToParametersPartial4) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[4] parameter(0)
%param1 = f32[4] parameter(1)
ROOT %add = f32[4] add(%param0, %param1), sharding={devices=[4]0,1,2,3}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{false},
{false, true})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_FALSE(
module->entry_computation()->parameter_instruction(0)->has_sharding());
EXPECT_THAT(module->entry_computation()->parameter_instruction(1),
op::Sharding("{devices=[4]0,1,2,3}"));
}
TEST_F(ShardingPropagationTest, PropagateToParametersFull1) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[4] parameter(0)
%param1 = f32[4] parameter(1)
ROOT %add = f32[4] add(%param0, %param1), sharding={devices=[4]0,1,2,3}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{false},
{true})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->parameter_instruction(0),
op::Sharding("{devices=[4]0,1,2,3}"));
EXPECT_THAT(module->entry_computation()->parameter_instruction(1),
op::Sharding("{devices=[4]0,1,2,3}"));
}
TEST_F(ShardingPropagationTest, PropagateToParametersFull2) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[4] parameter(0), sharding={replicated}
%param1 = f32[4] parameter(1)
ROOT %add = f32[4] add(%param0, %param1), sharding={devices=[4]0,1,2,3}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{false},
{true, true})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->parameter_instruction(0),
op::Sharding("{devices=[4]0,1,2,3}"));
EXPECT_THAT(module->entry_computation()->parameter_instruction(1),
op::Sharding("{devices=[4]0,1,2,3}"));
}
TEST_F(ShardingPropagationTest, PropagateToTupleParameter_WithoutSharding) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param = (f32[4], f32[4]) parameter(0)
%gte0 = f32[4] get-tuple-element(%param), index=0
%gte1 = f32[4] get-tuple-element(%param), index=1
ROOT %add = f32[4] add(%gte0, %gte1), sharding={devices=[4]0,1,2,3}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{false},
{true, true})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->parameter_instruction(0),
op::Sharding("{{devices=[4]0,1,2,3}, {devices=[4]0,1,2,3}}"));
}
TEST_F(ShardingPropagationTest, PropagateToTupleParameter_WithSharding1) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param = (f32[4], f32[4]) parameter(0), sharding={{replicated}, {replicated}}
%gte0 = f32[4] get-tuple-element(%param), index=0
%gte1 = f32[4] get-tuple-element(%param), index=1
ROOT %add = f32[4] add(%gte0, %gte1), sharding={devices=[4]0,1,2,3}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{false},
{false, true})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->parameter_instruction(0),
op::Sharding("{{replicated}, {devices=[4]0,1,2,3}}"));
}
TEST_F(ShardingPropagationTest, PropagateToTupleParameter_WithSharding2) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param = (f32[4], f32[4]) parameter(0), sharding={{replicated}, {replicated}}
%gte0 = f32[4] get-tuple-element(%param), index=0
%gte1 = f32[4] get-tuple-element(%param), index=1
ROOT %add = f32[4] add(%gte0, %gte1), sharding={devices=[4]0,1,2,3}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{false},
{true, false})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->parameter_instruction(0),
op::Sharding("{{devices=[4]0,1,2,3}, {replicated}}"));
}
TEST_F(ShardingPropagationTest, PropagateManualOutfeed) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
p0 = f32[8]{0} parameter(0)
p1 = f32[1]{0} parameter(1)
tuple.1 = (f32[8]{0}) tuple(p0)
constant.8 = u32[2]{0} constant({3, 12})
tuple.10 = (u32[2]{0}) tuple(constant.8)
aa.1 = token[] after-all()
outfeed.1 = token[] outfeed(tuple.10, aa.1), outfeed_shape=(u32[2]{0}), sharding={{manual}, {manual}}
outfeed.2 = token[] outfeed(tuple.1, outfeed.1), outfeed_shape=(f32[8]{0}), sharding={{manual}, {manual}}
ROOT tuple.15 = (f32[1]{0}, token[]) tuple(p1, outfeed.2)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{true, true},
{true, true})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Sharding("{{replicated}, {manual}}"));
}
TEST_F(ShardingPropagationTest, PropagateFromDanglingShardingCustomCall) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
p.0 = s32[40000]{0} parameter(0)
add = s32[40000]{0} add(p.0, p.0)
cc = s32[40000]{0} custom-call(add), custom_call_target="Sharding", sharding={devices=[4]0,1,2,3}
ROOT mul = s32[40000]{0} multiply(add, add)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{true},
{true})
.Run(module.get()));
EXPECT_TRUE(changed);
HloDCE dce;
TF_ASSERT_OK_AND_ASSIGN(bool dce_ed, RunHloPass(&dce, module.get()));
EXPECT_TRUE(dce_ed);
XLA_VLOG_LINES(1, module->ToString());
auto* instruction = FindInstruction(module.get(), "param0");
EXPECT_EQ(instruction, nullptr);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Sharding("{devices=[4]0,1,2,3}"));
}
TEST_F(ShardingPropagationTest,
DoNotPropagateToParameterIfNotDivisible_WithSharding) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[4] parameter(0), sharding={replicated}
%param1 = f32[3] parameter(1), sharding={replicated}
%pad_value = f32[] constant(0)
%pad = f32[4] pad(%param1, %pad_value), padding=0_1
ROOT %add = f32[4] add(%param0, %pad), sharding={devices=[4]0,1,2,3}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{false},
{false, true})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->parameter_instruction(0),
op::Sharding("{replicated}"));
EXPECT_THAT(module->entry_computation()->parameter_instruction(1),
op::Sharding("{replicated}"));
}
TEST_F(ShardingPropagationTest,
DoNotPropagateToParameterIfNotDivisible_WithoutSharding) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[4] parameter(0), sharding={replicated}
%param1 = f32[3] parameter(1)
%pad_value = f32[] constant(0)
%pad = f32[4] pad(%param1, %pad_value), padding=0_1
ROOT %add = f32[4] add(%param0, %pad), sharding={devices=[4]0,1,2,3}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{false},
{false, true})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->parameter_instruction(0),
op::Sharding("{replicated}"));
EXPECT_THAT(module->entry_computation()->parameter_instruction(1),
op::Sharding("{replicated}"));
}
TEST_F(ShardingPropagationTest, DoNotPropagateToTupleParameterIfNotDivisible) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = (f32[4], f32[3]) parameter(0), sharding={{replicated}, {replicated}}
%gte0 = f32[4] get-tuple-element(%param0), index=0
%gte1 = f32[3] get-tuple-element(%param0), index=1
%pad_value = f32[] constant(0)
%pad = f32[4] pad(%gte1, %pad_value), padding=0_1
ROOT %add = f32[4] add(%gte0, %pad), sharding={devices=[4]0,1,2,3}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{false},
{false, true})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->parameter_instruction(0),
op::Sharding("{{replicated}, {replicated}}"));
}
TEST_F(ShardingPropagationTest,
DoNotPropagateToOutputIfNotDivisible_WithSharding) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[4] parameter(0), sharding={replicated}
%param1 = f32[4] parameter(1), sharding={replicated}
%add = f32[4] add(%param0, %param1), sharding={devices=[4]0,1,2,3}
ROOT %slice = f32[3] slice(%add), slice={[0:3:1]}, sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{true},
{false, false})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Sharding("{replicated}"));
}
TEST_F(ShardingPropagationTest,
DoNotPropagateToOutputIfNotDivisible_WithoutSharding) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[4] parameter(0), sharding={replicated}
%param1 = f32[4] parameter(1), sharding={replicated}
%add = f32[4] add(%param0, %param1), sharding={devices=[4]0,1,2,3}
ROOT %slice = f32[3] slice(%add), slice={[0:3:1]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{true},
{false, false})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Sharding("{replicated}"));
}
TEST_F(ShardingPropagationTest,
DoNotPropagateToOutputTupleIfNotDivisible_WithSharding) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[4] parameter(0), sharding={replicated}
%param1 = f32[4] parameter(1), sharding={replicated}
%add = f32[4] add(%param0, %param1), sharding={devices=[4]0,1,2,3}
%slice = f32[3] slice(%add), slice={[0:3:1]}
ROOT %tuple = (f32[4], f32[3]) tuple(%add, %slice), sharding={{replicated}, {replicated}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{false, true},
{false, false})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Sharding("{{replicated}, {replicated}}"));
}
TEST_F(ShardingPropagationTest,
DoNotPropagateToOutputTupleIfNotDivisible_WithoutSharding) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
%param0 = f32[4] parameter(0), sharding={replicated}
%param1 = f32[4] parameter(1), sharding={replicated}
%add = f32[4] add(%param0, %param1), sharding={devices=[4]0,1,2,3}
%slice = f32[3] slice(%add), slice={[0:3:1]}
ROOT %tuple = (f32[4], f32[3]) tuple(%add, %slice)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{true, true},
{false, false})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Sharding("{{devices=[4]0,1,2,3}, {replicated}}"));
}
TEST_F(ShardingPropagationTest, PropagateShardLikeDifferentSharding) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
p.0 = s32[16,16] parameter(0), sharding={devices=[4,2]0,1,2,3,4,5,6,7}
p.1 = s32[16,16] parameter(1), sharding={devices=[2,4]0,1,2,3,4,5,6,7}
add.1 = s32[16,16] add(p.0, p.0)
sharding.1 = s32[16,16] custom-call(add.1), custom_call_target="Sharding", sharding={unknown shard_like 0}
add.2 = s32[16,16] add(p.1, p.1)
sharding.2 = s32[16,16] custom-call(add.2), custom_call_target="Sharding", sharding={unknown shard_like 0}
ROOT mul = s32[16,16] multiply(add.1, add.2)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{true},
{false, false})
.Run(module.get()));
EXPECT_TRUE(changed);
XLA_VLOG_LINES(1, module->ToString());
auto* add_0 = FindInstruction(module.get(), "add.1");
ASSERT_NE(add_0, nullptr);
auto* add_1 = FindInstruction(module.get(), "add.2");
ASSERT_NE(add_1, nullptr);
EXPECT_NE(add_0->sharding(), add_1->sharding());
}
TEST_F(ShardingPropagationTest, PropagateShardLikeSameSharding) {
const char* const hlo_string = R"(
HloModule module
%add {
%lhs = s32[] parameter(0)
%rhs = s32[] parameter(1)
ROOT %add = s32[] add(%lhs, %rhs)
}
ENTRY %entry {
p.0 = s32[16,16] parameter(0), sharding={devices=[4,2]0,1,2,3,4,5,6,7}
p.1 = s32[16,16] parameter(1)
add.1 = s32[16,16] add(p.0, p.0)
sharding.1 = s32[16,16] custom-call(add.1), custom_call_target="Sharding", sharding={unknown shard_like 0}
init = s32[] constant(0)
reduce.1 = s32[] reduce(add.1, init), dimensions={0,1}, to_apply=%add
add.2 = s32[16,16] add(p.1, p.1)
sharding.2 = s32[16,16] custom-call(add.2), custom_call_target="Sharding", sharding={unknown shard_like 0}
reduce.2 = s32[] reduce(add.2, init), dimensions={0,1}, to_apply=%add
ROOT mul = s32[] multiply(reduce.1, reduce.2)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{true},
{false, false})
.Run(module.get()));
EXPECT_TRUE(changed);
XLA_VLOG_LINES(1, module->ToString());
auto* add_1 = FindInstruction(module.get(), "add.1");
ASSERT_NE(add_1, nullptr);
auto* add_2 = FindInstruction(module.get(), "add.2");
ASSERT_NE(add_2, nullptr);
EXPECT_EQ(add_1->sharding(), add_2->sharding());
}
TEST_F(ShardingPropagationTest, PropagateShardAs) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
p.0 = s32[16,16] parameter(0), sharding={devices=[4,2]0,1,2,3,4,5,6,7}
p.1 = s32[16,16] parameter(1), sharding={devices=[2,4]0,1,2,3,4,5,6,7}
add.1 = s32[16,16] add(p.0, p.0)
sharding.1 = s32[16,16] custom-call(add.1), custom_call_target="Sharding", sharding={unknown shard_as 0}
add.2 = s32[16,16] add(p.1, p.1)
sharding.2 = s32[16,16] custom-call(add.2), custom_call_target="Sharding", sharding={unknown shard_as 0}
ROOT mul = s32[16,16] multiply(add.1, add.2)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{true},
{false, false})
.Run(module.get()));
EXPECT_TRUE(changed);
XLA_VLOG_LINES(1, module->ToString());
auto* add_1 = FindInstruction(module.get(), "add.1");
ASSERT_NE(add_1, nullptr);
auto* add_2 = FindInstruction(module.get(), "add.2");
ASSERT_NE(add_2, nullptr);
EXPECT_EQ(add_1->sharding(), add_2->sharding());
}
TEST_F(ShardingPropagationTest, PropagateShardAsToParameters) {
const char* const hlo_string = R"(
HloModule module
%add {
%lhs = s32[] parameter(0)
%rhs = s32[] parameter(1)
ROOT %add = s32[] add(%lhs, %rhs)
}
ENTRY %entry {
p.0 = s32[16,16] parameter(0), sharding={unknown shard_as 0}
p.1 = s32[16,16] parameter(1), sharding={devices=[4,2]0,1,2,3,4,5,6,7}
add.1 = s32[16,16] add(p.0, p.0)
init = s32[] constant(0)
reduce.1 = s32[] reduce(add.1, init), dimensions={0,1}, to_apply=%add
add.2 = s32[16,16] add(p.1, p.1)
sharding.2 = s32[16,16] custom-call(add.2), custom_call_target="Sharding", sharding={unknown shard_as 0}
reduce.2 = s32[] reduce(add.2, init), dimensions={0,1}, to_apply=%add
ROOT mul = s32[] multiply(reduce.1, reduce.2)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{false},
{true, true})
.Run(module.get()));
EXPECT_TRUE(changed);
XLA_VLOG_LINES(1, module->ToString());
auto* p_0 = FindInstruction(module.get(), "p.0");
ASSERT_NE(p_0, nullptr);
auto* add_2 = FindInstruction(module.get(), "add.2");
ASSERT_NE(add_2, nullptr);
EXPECT_THAT(add_2, op::Sharding("{devices=[4,2]0,1,2,3,4,5,6,7}"));
EXPECT_EQ(p_0->sharding(), add_2->sharding());
}
TEST_F(ShardingPropagationTest, PropagateShardAsToOutputs) {
const char* const hlo_string = R"(
HloModule module
%add {
%lhs = s32[] parameter(0)
%rhs = s32[] parameter(1)
ROOT %add = s32[] add(%lhs, %rhs)
}
ENTRY %entry {
p.0 = s32[16,16] parameter(0), sharding={devices=[4,2]0,1,2,3,4,5,6,7}
add.1 = s32[16,16] add(p.0, p.0)
sharding.1 = s32[16,16] custom-call(add.1), custom_call_target="Sharding", sharding={unknown shard_as 0}
init = s32[] constant(0)
reduce.1 = s32[] reduce(add.1, init), dimensions={0,1}, to_apply=%add
broadcast.1 = s32[16,16] broadcast(reduce.1), dimensions={}
ROOT mul = s32[16,16] multiply(broadcast.1, broadcast.1), sharding={unknown shard_as 0}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{true},
{false})
.Run(module.get()));
EXPECT_TRUE(changed);
XLA_VLOG_LINES(1, module->ToString());
auto* add_1 = FindInstruction(module.get(), "add.1");
ASSERT_NE(add_1, nullptr);
auto* output = FindInstruction(module.get(), "mul");
ASSERT_NE(output, nullptr);
EXPECT_THAT(add_1, op::Sharding("{devices=[4,2]0,1,2,3,4,5,6,7}"));
EXPECT_EQ(add_1->sharding(), output->sharding());
}
TEST_F(ShardingPropagationTest, PropagateShardAsBetweenInputOutput) {
const char* const hlo_string = R"(
HloModule jit_zeros_like
ENTRY main.6 {
Arg_0.1 = s64[8,2]{1,0} parameter(0), sharding={devices=[4,2]<=[8]}
custom-call.4 = s64[8,2]{1,0} custom-call(Arg_0.1), custom_call_target="Sharding", sharding={unknown shard_as 0}
constant.2 = s64[] constant(0)
broadcast.3 = s64[8,2]{1,0} broadcast(constant.2), dimensions={}
ROOT custom-call.5 = s64[8,2]{1,0} custom-call(broadcast.3), custom_call_target="Sharding", sharding={unknown shard_as 0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{true},
{true})
.Run(module.get()));
EXPECT_TRUE(changed);
VLOG(1) << module->ToString();
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Sharding("{devices=[4,2]0,1,2,3,4,5,6,7}"));
}
TEST_F(ShardingPropagationTest, PropagateShardAsBetweenInputOutput2) {
const char* const hlo_string = R"(
HloModule jit_f, entry_computation_layout={(f32[8]{0:T(256)})->(f32[8]{0:T(256)}, f32[8]{0:T(256)})}, allow_spmd_sharding_propagation_to_output={true,true}, num_partitions=4
ENTRY main.9 {
Arg_0.1 = f32[8]{0} parameter(0)
custom-call.6 = f32[8]{0} custom-call(Arg_0.1), custom_call_target="Sharding", custom_call_has_side_effect=true, sharding={unknown shard_as 0}, metadata={op_name="jit(f)/jit(main)/shard_alike" source_file="third_party/py/jax/tests/shard_alike_test.py" source_line=206}
custom-call.4 = f32[8]{0} custom-call(Arg_0.1), custom_call_target="Sharding", sharding={devices=[4]<=[4]}, metadata={op_name="jit(f)/jit(main)/sharding_constraint[sharding=GSPMDSharding({devices=[4]<=[4]}) resource_env=ResourceEnv(mesh=Mesh(), ()) unconstrained_dims=set()]" source_file="third_party/py/jax/tests/shard_alike_test.py" source_line=204}
constant.0 = f32[] constant(2)
broadcast.0 = f32[8]{0} broadcast(constant.0), dimensions={}
multiply.5 = f32[8]{0} multiply(custom-call.4, broadcast.0), metadata={op_name="jit(f)/jit(main)/mul" source_file="third_party/py/jax/tests/shard_alike_test.py" source_line=205}
custom-call.7 = f32[8]{0} custom-call(multiply.5), custom_call_target="Sharding", custom_call_has_side_effect=true, sharding={unknown shard_as 0}, metadata={op_name="jit(f)/jit(main)/shard_alike" source_file="third_party/py/jax/tests/shard_alike_test.py" source_line=206}
ROOT tuple.8 = (f32[8]{0}, f32[8]{0}) tuple(custom-call.6, custom-call.7)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{true, true},
{true})
.Run(module.get()));
EXPECT_TRUE(changed);
VLOG(1) << module->ToString();
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Sharding("{{devices=[4]<=[4]}, {devices=[4]<=[4]}}"));
}
TEST_F(ShardingPropagationTest, LookaheadUsersOfDot) {
const char* const hlo_string = R"(
HloModule module
ENTRY %entry {
p0 = bf16[512,512,1024]{2,1,0} parameter(0), sharding={devices=[16,1,4]<=[64]}
p1 = bf16[512,512,16,128]{3,2,1,0} parameter(1), sharding={devices=[16,1,4,1]<=[64]}
p2 = bf16[16,1024,16,128]{3,2,1,0} parameter(2), sharding={devices=[1,4,4,1,4]<=[4,16]T(1,0) last_tile_dim_replicate}
p3 = s32[] parameter(3)
dot.1 = bf16[1024,16,128]{2,1,0} dot(p0, p1), lhs_contracting_dims={0,1}, rhs_contracting_dims={0,1}
reshape.1 = bf16[1,1024,16,128]{3,2,1,0} reshape(dot.1)
constant.1 = s32[] constant(0)
ROOT dynamic-update-slice.113 = bf16[16,1024,16,128]{3,2,1,0} dynamic-update-slice(p2, reshape.1, p3, constant.1, constant.1, constant.1), sharding={devices=[1,4,4,1,4]<=[4,16]T(1,0) last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{true},
{true})
.Run(module.get()));
EXPECT_TRUE(changed);
XLA_VLOG_LINES(1, module->ToString());
auto* instruction = FindInstruction(module.get(), "dot.1");
EXPECT_THAT(instruction,
op::Sharding(
"{devices=[4,4,1,4]<=[4,16]T(1,0) last_tile_dim_replicate}"));
}
TEST_F(ShardingPropagationTest, AsyncInstructionManualShardingArray) {
const char* const hlo_string = R"(
HloModule module
called_computation {
p0 = s32[8] parameter(0)
p1 = s32[8] parameter(1)
ROOT add = s32[8] add(p0, p1)
}, execution_thread="thread_1"
ENTRY entry_computation {
p0 = s32[8] parameter(0), sharding={manual}
p1 = s32[8] parameter(1), sharding={manual}
async-start = ((s32[8], s32[8]), s32[8], u32[]) call-start(p0, p1), async_execution_thread="thread_1", to_apply=called_computation
ROOT async-done = s32[8] call-done(async-start)
}, execution_thread="thread_0"
)";
{
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{true},
{true})
.Run(module.get(), {"thread_0"}));
EXPECT_TRUE(changed);
XLA_VLOG_LINES(1, module->ToString());
auto* instruction = FindInstruction(module.get(), "async-start");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction,
op::Sharding("{{manual}, {manual}, {manual}, {manual}}"));
auto* async_done = FindInstruction(module.get(), "async-done");
ASSERT_NE(async_done, nullptr);
EXPECT_THAT(async_done, op::Sharding("{manual}"));
}
{
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{true},
{true})
.Run(module.get(), {"thread_0", "thread_1"}));
EXPECT_FALSE(changed);
}
{
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{true},
{true})
.Run(module.get()));
EXPECT_FALSE(changed);
}
}
TEST_F(ShardingPropagationTest, AsyncInstructionManualShardingTuple) {
const char* const hlo_string = R"(
HloModule module
called_computation {
p0 = s32[8] parameter(0)
p1 = s32[8] parameter(1)
add = s32[8] add(p0, p1)
mul = s32[8] multiply(p0, p1)
ROOT result = (s32[8], s32[8]) tuple(add, mul)
}, execution_thread="thread_1"
ENTRY entry_computation {
p0 = s32[8] parameter(0), sharding={manual}
p1 = s32[8] parameter(1), sharding={manual}
async-start = ((s32[8], s32[8]), (s32[8], s32[8]), u32[]) call-start(p0, p1), async_execution_thread="thread_1", to_apply=called_computation
ROOT async-done = (s32[8], s32[8]) call-done(async-start)
}, execution_thread="thread_0"
)";
{
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{true},
{true})
.Run(module.get(), {"thread_0"}));
EXPECT_TRUE(changed);
XLA_VLOG_LINES(1, module->ToString());
auto* async_start = FindInstruction(module.get(), "async-start");
ASSERT_NE(async_start, nullptr);
EXPECT_THAT(
async_start,
op::Sharding("{{manual}, {manual}, {manual}, {manual}, {manual}}"));
auto* async_done = FindInstruction(module.get(), "async-done");
ASSERT_NE(async_done, nullptr);
EXPECT_THAT(async_done, op::Sharding("{{manual}, {manual}}"));
}
{
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{true},
{true})
.Run(module.get(), {"thread_0", "thread_1"}));
EXPECT_FALSE(changed);
}
{
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{true},
{true})
.Run(module.get()));
EXPECT_FALSE(changed);
}
}
TEST_F(ShardingPropagationTest, ShardAsWithShardBarrier) {
const char* const hlo_string = R"(
HloModule pjit_f
ENTRY main.11 {
Arg_0.1 = bf16[384,1408]{1,0} parameter(0), sharding={devices=[1,16,512]<=[8,16,64]T(1,0,2) last_tile_dim_replicate}
broadcast.4 = bf16[8,384,1408]{2,1,0} broadcast(Arg_0.1), dimensions={1,2}
custom-call.5 = bf16[8,384,1408]{2,1,0} custom-call(broadcast.4), custom_call_target="Sharding", custom_call_has_side_effect=true, sharding={unknown shard_as 1}
broadcast.2 = bf16[8,384,1408]{2,1,0} broadcast(Arg_0.1), dimensions={1,2}
custom-call.3 = bf16[8,384,1408]{2,1,0} custom-call(broadcast.2), custom_call_target="Sharding", sharding={devices=[8,1,1,1024]<=[8192] last_tile_dim_replicate}, backend_config="unspecified_dims=[1,2]"
custom-call.6 = bf16[8,384,1408]{2,1,0} custom-call(custom-call.3), custom_call_target="Sharding", custom_call_has_side_effect=true, sharding={unknown shard_as 1}
%shard-barrier-to = bf16[8,384,1408]{2,1,0} custom-call(%custom-call.6), custom_call_target="ShardBarrierTo", custom_call_has_side_effect=true
slice.7 = bf16[1,384,1408]{2,1,0} slice(shard-barrier-to), slice={[1:2], [0:384], [0:1408]}
reshape.8 = bf16[384,1408]{1,0} reshape(slice.7)
tuple.9 = (bf16[384,1408]{1,0}) tuple(reshape.8)
get-tuple-element.10 = bf16[384,1408]{1,0} get-tuple-element(tuple.9), index=0, sharding={devices=[16,1,512]<=[8,16,64]T(1,0,2) last_tile_dim_replicate}
ROOT tuple.13 = (bf16[384,1408]{1,0}, bf16[8,384,1408]{2,1,0}) tuple(get-tuple-element.10, custom-call.5)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{true},
{false, false})
.Run(module.get()));
EXPECT_TRUE(changed);
XLA_VLOG_LINES(1, module->ToString());
auto* broadcast_4 = FindInstruction(module.get(), "broadcast.4");
ASSERT_NE(broadcast_4, nullptr);
EXPECT_THAT(
broadcast_4,
op::Sharding("{devices=[8,1,16,64]<=[8192] last_tile_dim_replicate}"));
auto* copy = FindInstruction(module.get(), "copy");
ASSERT_NE(copy, nullptr);
EXPECT_THAT(
copy,
op::Sharding("{devices=[8,1,16,64]<=[8192] last_tile_dim_replicate}"));
}
TEST_F(ShardingPropagationTest, ShardAsWithShardBarrier2) {
const char* const hlo_string = R"(
HloModule module
ENTRY %elementwise {
%param0 = f32[5,7,11,13]{3,2,1,0} parameter(0)
%custom-call.0 = f32[5,7,11,13]{3,2,1,0} custom-call(param0), custom_call_target="Sharding", sharding={devices=[2,1,1,1,4]<=[8] last_tile_dim_replicate}, backend_config="unspecified_dims=[1,2,3]"
%shard-barrier-from = f32[5,7,11,13]{3,2,1,0} custom-call(%custom-call.0), custom_call_target="ShardBarrierFrom", custom_call_has_side_effect=true
%custom-call.2 = f32[5,7,11,13]{3,2,1,0} custom-call(shard-barrier-from), custom_call_target="Sharding", custom_call_has_side_effect=true, sharding={unknown shard_as 1}
%param1 = f32[5,7,11,13]{3,2,1,0} parameter(1)
%custom-call.1 = f32[5,7,11,13]{3,2,1,0} custom-call(param1), custom_call_target="Sharding", sharding={devices=[1,2,2,1,2]<=[2,4]T(1,0) last_tile_dim_replicate}, backend_config="unspecified_dims=[0]"
%custom-call.3 = f32[5,7,11,13]{3,2,1,0} custom-call(custom-call.1), custom_call_target="Sharding", custom_call_has_side_effect=true, sharding={unknown shard_as 1}
ROOT %tuple = (f32[5,7,11,13]{3,2,1,0}, f32[5,7,11,13]{3,2,1,0}) tuple(%custom-call.0, %custom-call.3)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{true},
{false, false})
.Run(module.get()));
EXPECT_TRUE(changed);
XLA_VLOG_LINES(1, module->ToString());
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Sharding(
"{{devices=[2,2,2,1]<=[8]}, {devices=[1,2,2,1,2]<=[2,4]T(1,0) "
"last_tile_dim_replicate}}"));
}
TEST_F(ShardingPropagationTest, CallPropagation) {
const absl::string_view hlo_string = R"(
HloModule module
called_computation {
p0 = bf16[20,2,68096,8512] parameter(0)
%add_called_comp = bf16[20,2,68096,8512] add(p0, p0)
ROOT tuple = (bf16[20,2,68096,8512]) tuple(add_called_comp)
}
ENTRY main {
%param0 = bf16[20,2,68096,8512] parameter(0)
%add = bf16[20,2,68096,8512] add(param0, param0)
ROOT %call = (bf16[20,2,68096,8512]) call(add), to_apply=%called_computation, sharding={{devices=[1,1,16,64]<=[64,16]T(1,0)}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{false},
{false})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* add = FindInstruction(module.get(), "add");
ASSERT_NE(add, nullptr);
EXPECT_THAT(add, op::Sharding("{devices=[1,1,16,64]<=[64,16]T(1,0)}"));
}
TEST_F(ShardingPropagationTest, CallPropagationWithSPMDShardToFullShape) {
const absl::string_view hlo_string = R"(
HloModule module
called_computation {
p0 = bf16[4096,4096] parameter(0)
%add_called_comp = bf16[4096,4096] add(p0, p0)
ROOT tuple = (bf16[4096,4096]) tuple(add_called_comp)
}
ENTRY main {
%param0 = bf16[4096,4096] parameter(0)
%add = bf16[4096,4096] add(param0, param0)
%custom-call.1 = bf16[4096,4096]{1,0} custom-call(add), custom_call_target="Sharding", sharding={devices=[2,1,2]<=[4] last_tile_dim_replicate}
%custom-call.2 = bf16[2048,4096]{1,0} custom-call(custom-call.1), custom_call_target="SPMDFullToShardShape", sharding={manual}
%custom-call.3 = bf16[2048,4096]{1,0} custom-call(custom-call.2), custom_call_target="Sharding", sharding={manual}
%custom-call.4 = bf16[4096,4096]{1,0} custom-call(bf16[2048,4096]{1,0} %custom-call.3), custom_call_target="SPMDShardToFullShape", sharding={devices=[2,1,2]<=[4] last_tile_dim_replicate}
ROOT %call = (bf16[4096,4096]) call(custom-call.4), to_apply=%called_computation, sharding={devices=[2,2]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{false},
{false})
.Run(module.get()));
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(changed);
auto* custom_call_4 = FindInstruction(module.get(), "custom-call.4");
ASSERT_NE(custom_call_4, nullptr);
auto* operand = custom_call_4->operand(0);
EXPECT_THAT(operand, op::Shape("bf16[2048,4096]"));
EXPECT_THAT(custom_call_4, op::Shape("bf16[4096,4096]"));
EXPECT_THAT(custom_call_4,
op::Sharding("{devices=[2,1,2]<=[4] last_tile_dim_replicate}"));
}
TEST_F(ShardingPropagationTest, ReplicateRngBitGeneratorSeed) {
const char* const hlo_string = R"(
HloModule module
apply_or {
x = u64[] parameter(0)
y = u64[] parameter(1)
ROOT x_or_y = or(x, y)
}
ENTRY main {
p = s32[2,2]{1,0} parameter(0), sharding={devices=[2,2]<=[4]}
up = u64[2,2] convert(p)
i = u64[] constant(0)
seed = u64[2] reduce(up, i), dimensions={1}, to_apply=apply_or
rbg = u32[2048,4096] rng-bit-generator(seed), algorithm=rng_default
ROOT s = u32[2048,4096]{1,0} custom-call(rbg), custom_call_target="Sharding", sharding={devices=[2,2]<=[4]}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ShardingPropagation(
true, true,
{true},
{true})
.Run(module.get()));
EXPECT_TRUE(changed);
XLA_VLOG_LINES(1, module->ToString());
auto* instruction = FindInstruction(module.get(), "seed");
EXPECT_TRUE(instruction->sharding().IsReplicated());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/sharding_propagation.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/sharding_propagation_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4a0d1112-f66c-4f43-aa09-131a59dde473 | cpp | tensorflow/tensorflow | save | tensorflow/cc/experimental/libexport/save.cc | tensorflow/cc/experimental/libexport/save_test.cc | #include "tensorflow/cc/experimental/libexport/save.h"
#include "tensorflow/core/platform/env.h"
namespace tensorflow {
namespace libexport {
Status Save(const std::string& export_dir) {
TF_RETURN_IF_ERROR(Env::Default()->RecursivelyCreateDir(export_dir));
return absl::OkStatus();
}
}
} | #include "tensorflow/cc/experimental/libexport/save.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace libexport {
namespace {
TEST(SaveTest, TestDirectoryStructure) {
const string base_dir = tensorflow::io::JoinPath(
tensorflow::testing::TmpDir(), "test_directory_structure");
TF_ASSERT_OK(Save(base_dir));
TF_ASSERT_OK(Env::Default()->IsDirectory(base_dir));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/experimental/libexport/save.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/experimental/libexport/save_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
10b07a94-816f-47d6-ab5a-942410b3ce6e | cpp | tensorflow/tensorflow | fuse_auto_input | tensorflow/lite/delegates/gpu/gl/compiler/fuse_auto_input.cc | tensorflow/lite/delegates/gpu/gl/compiler/fuse_auto_input_test.cc | #include "tensorflow/lite/delegates/gpu/gl/compiler/fuse_auto_input.h"
#include <any>
#include <string>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_replace.h"
#include "absl/types/any.h"
#include "tensorflow/lite/delegates/gpu/common/model.h"
#include "tensorflow/lite/delegates/gpu/common/model_transformer.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
#include "tensorflow/lite/delegates/gpu/gl/compiler/compiled_node.h"
#include "tensorflow/lite/delegates/gpu/gl/node_shader.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
std::pair<std::string, std::string> MakeValueReplacement(int n, int k) {
return {absl::StrCat("value_", n), absl::StrCat("value_", k)};
}
std::pair<std::string, std::string> MakeDataReplacement(int n, int k) {
return {absl::StrCat("input_data_", n), absl::StrCat("input_data_", k)};
}
}
TransformResult FuseAutoInput::ApplyToNode(Node* node, GraphFloat32* graph) {
auto& node_attr =
std::any_cast<CompiledNodeAttributes&>(node->operation.attributes);
auto& node_code = node_attr.code;
if (node_code.input != IOStructure::AUTO) {
return {TransformStatus::SKIPPED, ""};
}
uint3 workgroup = node_code.workgroup;
auto node_outputs = graph->FindOutputs(node->id);
std::vector<std::pair<Node*, int>> nodes_to_fuse;
std::vector<std::pair<ValueId, int>> input_values;
int input_num = -1;
for (auto input_value : graph->FindInputs(node->id)) {
input_num++;
const ValueId input_id = input_value->id;
input_values.push_back({input_id, input_num});
if (graph->FindConsumers(input_id).size() > 1) {
continue;
}
Node* input_producer = graph->FindProducer(input_id);
if (input_producer == nullptr) {
continue;
}
if (graph->FindOutputs(input_producer->id).size() != 1) {
continue;
}
auto& input_producer_attr = std::any_cast<const CompiledNodeAttributes&>(
input_producer->operation.attributes);
if (input_producer_attr.code.output != IOStructure::AUTO) {
continue;
}
if (input_producer_attr.code.workload != node_code.workload &&
uint3() != input_producer_attr.code.workload) {
continue;
}
if (input_producer_attr.code.workgroup != uint3()) {
if (workgroup != uint3()) {
continue;
}
workgroup = input_producer_attr.code.workgroup;
}
nodes_to_fuse.push_back({input_producer, input_num});
input_values.pop_back();
}
if (nodes_to_fuse.empty()) {
return {TransformStatus::SKIPPED, ""};
}
{
absl::flat_hash_set<ValueId> all_inputs;
for (const auto& node_to_fuse : nodes_to_fuse) {
for (const auto& input : graph->FindInputs(node_to_fuse.first->id)) {
if (all_inputs.find(input->id) != all_inputs.end()) {
return {TransformStatus::SKIPPED, ""};
}
all_inputs.insert(input->id);
}
}
for (const auto& input : graph->FindInputs(node->id)) {
if (all_inputs.find(input->id) != all_inputs.end()) {
return {TransformStatus::SKIPPED, ""};
}
all_inputs.insert(input->id);
}
}
for (auto value : graph->FindInputs(node->id)) {
if (!graph->RemoveConsumer(node->id, value->id).ok()) {
return {TransformStatus::INVALID, ""};
}
}
std::string operation_type;
std::string source_code;
std::string values;
std::swap(source_code, node_code.source_code);
int extra_input_num = input_num;
input_num = 0;
for (auto input_and_num : nodes_to_fuse) {
auto& input = input_and_num.first;
auto& attr =
std::any_cast<CompiledNodeAttributes&>(input->operation.attributes);
auto super_inputs = graph->FindInputs(input->id);
std::vector<std::pair<std::string, std::string>> replacements;
for (int i = 0; i < super_inputs.size(); ++i) {
int value_index = i == 0 ? input_and_num.second : ++extra_input_num;
replacements.push_back(MakeValueReplacement(i, value_index));
replacements.push_back(MakeDataReplacement(i, input_num));
if (attr.code.input == IOStructure::AUTO) {
absl::StrAppend(&values, " value_", value_index, " = $input_data_",
input_num, "[gid.x, gid.y, gid.z]$;\n");
}
if (!graph->AddConsumer(node->id, super_inputs[i]->id).ok()) {
return {TransformStatus::INVALID, ""};
}
input_num++;
}
for (auto& param : attr.code.parameters) {
param.name = absl::StrReplaceAll(param.name, replacements);
}
attr.code.source_code =
absl::StrReplaceAll(attr.code.source_code, replacements);
if (!MergeCode(&attr, &node_attr).ok()) {
return {TransformStatus::INVALID, "Unable to merge the code"};
}
absl::StrAppend(&node_attr.code.source_code, "{\n", attr.code.source_code,
"\n}");
if (!operation_type.empty()) {
operation_type += ",";
}
operation_type += input->operation.type;
if (!graph->DeleteNode(input->id).ok()) {
return {TransformStatus::INVALID, ""};
}
}
for (int i = 0; i < input_values.size(); i++) {
if (node_code.input == IOStructure::AUTO) {
absl::StrAppend(&values, " value_", input_values[i].second,
" = $input_data_", input_num,
"[gid.x, gid.y, gid.z]$;\n");
}
if (!graph->AddConsumer(node->id, input_values[i].first).ok()) {
return {TransformStatus::INVALID, ""};
}
input_num++;
}
node_code.input = IOStructure::ONLY_DEFINITIONS;
absl::StrAppend(&node->operation.type, "(", operation_type, ")");
node_code.source_code =
absl::StrCat(values, node_code.source_code, "{
node->operation.type, "\n", source_code, "\n}");
return {TransformStatus::APPLIED, ""};
}
}
}
} | #include "tensorflow/lite/delegates/gpu/gl/compiler/fuse_auto_input.h"
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/types/any.h"
#include "tensorflow/lite/delegates/gpu/common/model.h"
#include "tensorflow/lite/delegates/gpu/common/model_transformer.h"
#include "tensorflow/lite/delegates/gpu/gl/compiler/compiled_node.h"
#include "tensorflow/lite/delegates/gpu/gl/node_shader.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
TEST(FuseAutoInputTest, SkipsDiamond) {
GraphFloat32 graph;
auto* v0 = graph.NewValue();
auto* v1 = graph.NewValue();
auto* v2 = graph.NewValue();
auto* v3 = graph.NewValue();
auto* n1 = graph.NewNode();
CompiledNodeAttributes a1;
a1.code.output = IOStructure::AUTO;
n1->operation.attributes = std::move(a1);
ASSERT_OK(graph.AddConsumer(n1->id, v0->id));
ASSERT_OK(graph.SetProducer(n1->id, v1->id));
auto* n2 = graph.NewNode();
CompiledNodeAttributes a2;
a2.code.output = IOStructure::AUTO;
n2->operation.attributes = std::move(a2);
ASSERT_OK(graph.AddConsumer(n2->id, v0->id));
ASSERT_OK(graph.SetProducer(n2->id, v2->id));
auto* n3 = graph.NewNode();
CompiledNodeAttributes a3;
a3.code.input = IOStructure::AUTO;
n3->operation.attributes = std::move(a3);
ASSERT_OK(graph.AddConsumer(n3->id, v1->id));
ASSERT_OK(graph.AddConsumer(n3->id, v2->id));
ASSERT_OK(graph.SetProducer(n3->id, v3->id));
FuseAutoInput fuse_auto_input;
EXPECT_EQ(fuse_auto_input.ApplyToNode(n3, &graph).status,
TransformStatus::SKIPPED);
}
TEST(FuseAutoInputTest, SkipsTriangle) {
GraphFloat32 graph;
auto* v0 = graph.NewValue();
auto* v1 = graph.NewValue();
auto* v2 = graph.NewValue();
auto* n1 = graph.NewNode();
CompiledNodeAttributes a1;
a1.code.output = IOStructure::AUTO;
n1->operation.attributes = std::move(a1);
ASSERT_OK(graph.AddConsumer(n1->id, v0->id));
ASSERT_OK(graph.SetProducer(n1->id, v1->id));
auto* n2 = graph.NewNode();
CompiledNodeAttributes a2;
a2.code.input = IOStructure::AUTO;
n2->operation.attributes = std::move(a2);
ASSERT_OK(graph.AddConsumer(n2->id, v0->id));
ASSERT_OK(graph.AddConsumer(n2->id, v1->id));
ASSERT_OK(graph.SetProducer(n2->id, v2->id));
FuseAutoInput fuse_auto_input;
EXPECT_EQ(fuse_auto_input.ApplyToNode(n2, &graph).status,
TransformStatus::SKIPPED);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/compiler/fuse_auto_input.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/compiler/fuse_auto_input_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2e05ec21-ce2b-4ad2-803e-f2288754db8c | cpp | google/arolla | backend_operator | arolla/expr/operator_loader/backend_operator.cc | arolla/expr/operator_loader/backend_operator_test.cc | #include "arolla/expr/operator_loader/backend_operator.h"
#include <memory>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "arolla/expr/basic_expr_operator.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_attributes.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/expr_operator_signature.h"
#include "arolla/expr/operator_loader/parameter_qtypes.h"
#include "arolla/expr/operator_loader/qtype_constraint.h"
#include "arolla/expr/operator_loader/qtype_inference.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::operator_loader {
using ::arolla::expr::ExprAttributes;
using ::arolla::expr::ExprNodePtr;
using ::arolla::expr::ExprOperatorPtr;
using ::arolla::expr::ExprOperatorSignature;
using ::arolla::expr::GetPlaceholderKeys;
absl::StatusOr<ExprOperatorPtr> BackendOperator::Make(
absl::string_view name, ExprOperatorSignature signature,
absl::string_view doc, std::vector<QTypeConstraint> qtype_constraints,
ExprNodePtr qtype_inference_expr) {
RETURN_IF_ERROR(ValidateSignature(signature));
absl::flat_hash_set<absl::string_view> parameter_names;
for (const auto& param : signature.parameters) {
parameter_names.insert(param.name);
}
std::set<std::string> undefined_parameter_names;
for (const auto& qtype_constraint : qtype_constraints) {
for (auto&& placeholder_key :
GetPlaceholderKeys(qtype_constraint.predicate_expr)) {
if (!parameter_names.contains(placeholder_key)) {
undefined_parameter_names.insert(std::move(placeholder_key));
}
}
}
for (auto&& placeholder_key : GetPlaceholderKeys(qtype_inference_expr)) {
if (!parameter_names.contains(placeholder_key)) {
undefined_parameter_names.insert(std::move(placeholder_key));
}
}
if (!undefined_parameter_names.empty()) {
return absl::InvalidArgumentError(
"unexpected parameters: P." +
absl::StrJoin(undefined_parameter_names, ", P."));
}
ASSIGN_OR_RETURN(
auto qtype_inference_fn,
MakeQTypeInferenceFn(qtype_constraints, qtype_inference_expr));
FingerprintHasher hasher("::arolla::operator_loader::BackendOperator");
hasher.Combine(name, signature, doc, qtype_inference_expr->fingerprint(),
qtype_constraints.size());
for (const auto& qtype_constraint : qtype_constraints) {
hasher.Combine(qtype_constraint.predicate_expr->fingerprint(),
qtype_constraint.error_message);
}
return std::make_shared<BackendOperator>(
PrivateConstructorTag{}, name, std::move(signature), doc,
std::move(hasher).Finish(), std::move(qtype_constraints),
std::move(qtype_inference_expr), std::move(qtype_inference_fn));
}
BackendOperator::BackendOperator(PrivateConstructorTag, absl::string_view name,
ExprOperatorSignature signature,
absl::string_view doc, Fingerprint fingerprint,
std::vector<QTypeConstraint> qtype_constraints,
ExprNodePtr qtype_inference_expr,
QTypeInferenceFn qtype_inference_fn)
: ExprOperatorWithFixedSignature(name, std::move(signature), doc,
fingerprint),
qtype_constraints_(std::move(qtype_constraints)),
qtype_inference_expr_(std::move(qtype_inference_expr)),
qtype_inference_fn_(std::move(qtype_inference_fn)) {}
absl::StatusOr<ExprAttributes> BackendOperator::InferAttributes(
absl::Span<const ExprAttributes> inputs) const {
RETURN_IF_ERROR(ValidateOpInputsCount(inputs));
ASSIGN_OR_RETURN(auto parameter_qtypes,
ExtractParameterQTypes(signature(), inputs));
ASSIGN_OR_RETURN(auto* output_qtype, qtype_inference_fn_(parameter_qtypes));
return ExprAttributes(output_qtype);
}
absl::string_view BackendOperator::py_qvalue_specialization_key() const {
return "::arolla::operator_loader::BackendOperator";
}
} | #include "arolla/expr/operator_loader/backend_operator.h"
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/status/statusor.h"
#include "arolla/array/array.h"
#include "arolla/array/qtype/types.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/expr/eval/invoke.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/expr_operator_signature.h"
#include "arolla/expr/operator_loader/qtype_constraint.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/tuple_qtype.h"
#include "arolla/util/unit.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::operator_loader {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::arolla::expr::CallOp;
using ::arolla::expr::ExprOperatorPtr;
using ::arolla::expr::ExprOperatorSignature;
using ::arolla::expr::Literal;
using ::arolla::expr::Placeholder;
using ::testing::HasSubstr;
class BackendOperatorTest : public ::testing::Test {
protected:
absl::StatusOr<std::shared_ptr<const BackendOperator>> MakeOp() {
ASSIGN_OR_RETURN(auto qtype_constraint_predicate_expr_1,
CallOp("core.not_equal", {CallOp("qtype.get_scalar_qtype",
{Placeholder("x")}),
Literal(GetNothingQType())}));
ASSIGN_OR_RETURN(auto qtype_constraint_predicate_expr_2,
CallOp("core.not_equal", {CallOp("qtype.get_scalar_qtype",
{Placeholder("y")}),
Literal(GetNothingQType())}));
ASSIGN_OR_RETURN(
auto qtype_constraint_predicate_expr_3,
CallOp("core.not_equal", {CallOp("qtype.broadcast_qtype_like",
{Placeholder("y"), Placeholder("x")}),
Literal(GetNothingQType())}));
std::vector<QTypeConstraint> qtype_constraints = {
{qtype_constraint_predicate_expr_1,
"expected `x` to be a scalar based type, got {x}"},
{qtype_constraint_predicate_expr_2,
"expected `y` to be a UNIT based type, got {y}"},
{qtype_constraint_predicate_expr_3,
"incompatible types x:{x} and y:{y}"},
};
ASSIGN_OR_RETURN(auto qtype_inference_expr,
CallOp("qtype.broadcast_qtype_like",
{Placeholder("y"), Placeholder("x")}));
ASSIGN_OR_RETURN(
auto op, BackendOperator::Make(
"core.presence_and", ExprOperatorSignature{{"x"}, {"y"}},
"presence-and-doc-string", std::move(qtype_constraints),
std::move(qtype_inference_expr)));
return std::dynamic_pointer_cast<const BackendOperator>(op);
}
};
TEST_F(BackendOperatorTest, GetDoc) {
ASSERT_OK_AND_ASSIGN(auto op, MakeOp());
ASSERT_THAT(op.get()->doc(), "presence-and-doc-string");
ASSERT_THAT(op->GetDoc(), IsOkAndHolds("presence-and-doc-string"));
}
TEST_F(BackendOperatorTest, QTypeInference) {
{
ASSERT_OK_AND_ASSIGN(auto expr,
CallOp(MakeOp(), {Literal(1.5f), Literal(kUnit)}));
EXPECT_EQ(expr->qtype(), GetQType<float>());
}
{
ASSERT_OK_AND_ASSIGN(
auto expr,
CallOp(MakeOp(), {Literal(1.5f), Literal(OptionalValue<Unit>())}));
EXPECT_EQ(expr->qtype(), GetQType<OptionalValue<float>>());
}
}
TEST_F(BackendOperatorTest, QTypeConstraint) {
EXPECT_THAT(
CallOp(MakeOp(), {Literal(MakeTupleFromFields()), Literal(kUnit)}),
StatusIs(
absl::StatusCode::kInvalidArgument,
HasSubstr("expected `x` to be a scalar based type, got tuple<>")));
EXPECT_THAT(
CallOp(MakeOp(), {Literal(1.5f), Literal(MakeTupleFromFields())}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("expected `y` to be a UNIT based type, got tuple<>")));
EXPECT_THAT(
CallOp(MakeOp(), {Literal(Array<float>()), Literal(DenseArray<Unit>())}),
StatusIs(
absl::StatusCode::kInvalidArgument,
HasSubstr(
"incompatible types x:ARRAY_FLOAT32 and y:DENSE_ARRAY_UNIT")));
}
TEST_F(BackendOperatorTest, Eval) {
ASSERT_OK_AND_ASSIGN(
auto expr,
CallOp(MakeOp(), {Literal(1.5f), Literal(OptionalValue<Unit>())}));
ASSERT_OK_AND_ASSIGN(auto result_tv, Invoke(expr, {}));
ASSERT_OK_AND_ASSIGN(auto result, result_tv.As<OptionalValue<float>>());
EXPECT_EQ(result.get(), std::nullopt);
}
TEST_F(BackendOperatorTest, UnexpectedParameters) {
ASSERT_OK_AND_ASSIGN(auto op, MakeOp());
auto& backend_op = dynamic_cast<const BackendOperator&>(*op);
EXPECT_THAT(BackendOperator::Make("core.presence_and",
ExprOperatorSignature{{"a"}, {"b"}},
"docstring", backend_op.qtype_constraints(),
backend_op.qtype_inference_expr()),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("unexpected parameters: P.x, P.y")));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/operator_loader/backend_operator.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/operator_loader/backend_operator_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
74a88cec-4e14-40d9-854d-624c46d9da7a | cpp | google/arolla | regex | arolla/qtype/strings/regex.cc | arolla/qtype/strings/regex_test.cc | #include "arolla/qtype/strings/regex.h"
#include <memory>
#include <string>
#include "absl/base/nullability.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "arolla/qtype/simple_qtype.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/repr.h"
#include "re2/re2.h"
namespace arolla {
namespace {
class RE2Regex final : public Regex {
public:
explicit RE2Regex(absl::string_view pattern) : re2_(pattern, RE2::Quiet) {}
bool ok() const { return re2_.ok(); }
absl::string_view error() const { return re2_.error(); }
absl::string_view pattern() const final { return re2_.pattern(); }
int NumberOfCapturingGroups() const final {
return re2_.NumberOfCapturingGroups();
}
bool PartialMatch(absl::string_view text) const final {
return re2_.PartialMatch(text, re2_);
}
bool PartialMatch(absl::string_view text, std::string* match) const final {
return RE2::PartialMatch(text, re2_, match);
}
private:
RE2 re2_;
};
}
absl::StatusOr<absl::Nonnull<RegexPtr>> CompileRegex(
absl::string_view pattern) {
auto result = std::make_shared<RE2Regex>(pattern);
if (result->ok()) {
return result;
}
return absl::InvalidArgumentError(absl::StrCat(
"invalid regular expression: `", pattern, "`; ", result->error()));
}
void FingerprintHasherTraits<RegexPtr>::operator()(
FingerprintHasher* hasher, const RegexPtr& value) const {
if (value != nullptr) {
hasher->Combine(value->pattern());
}
}
ReprToken ReprTraits<RegexPtr>::operator()(const RegexPtr& value) const {
if (value == nullptr) {
return {"regex{}"};
}
return {absl::StrCat("regex{`", value->pattern(), "`}")};
}
AROLLA_DEFINE_SIMPLE_QTYPE(REGEX, RegexPtr)
} | #include "arolla/qtype/strings/regex.h"
#include <string>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/repr.h"
using ::absl_testing::StatusIs;
using ::testing::HasSubstr;
namespace arolla {
namespace {
TEST(Regex, NoCapturingGroups) {
ASSERT_OK_AND_ASSIGN(auto regex, CompileRegex("\\d+ bottles of beer"));
ASSERT_NE(regex, nullptr);
EXPECT_EQ(regex->NumberOfCapturingGroups(), 0);
EXPECT_TRUE(regex->PartialMatch("100 bottles of beer"));
std::string match;
EXPECT_FALSE(regex->PartialMatch("100 bottles of beer", &match));
}
TEST(Regex, OneCapturingGroup) {
ASSERT_OK_AND_ASSIGN(auto regex, CompileRegex("(\\d+) bottles of beer"));
ASSERT_NE(regex, nullptr);
EXPECT_EQ(regex->NumberOfCapturingGroups(), 1);
EXPECT_TRUE(regex->PartialMatch("100 bottles of beer"));
std::string match;
EXPECT_TRUE(regex->PartialMatch("100 bottles of beer", &match));
EXPECT_EQ(match, "100");
}
TEST(Regex, ManyCapturingGroup) {
ASSERT_OK_AND_ASSIGN(auto regex, CompileRegex("(\\d+) (bottles) (of) beer"));
ASSERT_NE(regex, nullptr);
EXPECT_EQ(regex->NumberOfCapturingGroups(), 3);
EXPECT_TRUE(regex->PartialMatch("100 bottles of beer"));
std::string match;
EXPECT_TRUE(regex->PartialMatch("100 bottles of beer", &match));
EXPECT_EQ(match, "100");
}
TEST(Regex, Repr) {
ASSERT_OK_AND_ASSIGN(auto regex1, CompileRegex("abc"));
ASSERT_OK_AND_ASSIGN(auto regex2, CompileRegex("a.c"));
EXPECT_EQ(regex1->pattern(), "abc");
EXPECT_EQ(regex2->pattern(), "a.c");
EXPECT_EQ(Repr(RegexPtr{}), "regex{}");
EXPECT_EQ(Repr(regex1), "regex{`abc`}");
EXPECT_EQ(Repr(regex2), "regex{`a.c`}");
}
TEST(Regex, Fingerprint) {
ASSERT_OK_AND_ASSIGN(auto regex1_1, CompileRegex("abc"));
ASSERT_OK_AND_ASSIGN(auto regex1_2, CompileRegex("abc"));
ASSERT_OK_AND_ASSIGN(auto regex2_1, CompileRegex("a.c"));
ASSERT_OK_AND_ASSIGN(auto regex2_2, CompileRegex("a.c"));
auto fingerprint0_1 = FingerprintHasher("salt").Combine(RegexPtr{}).Finish();
auto fingerprint0_2 = FingerprintHasher("salt").Combine(RegexPtr{}).Finish();
auto fingerprint1_1 = FingerprintHasher("salt").Combine(regex1_1).Finish();
auto fingerprint1_2 = FingerprintHasher("salt").Combine(regex1_2).Finish();
auto fingerprint2_1 = FingerprintHasher("salt").Combine(regex2_1).Finish();
auto fingerprint2_2 = FingerprintHasher("salt").Combine(regex2_2).Finish();
EXPECT_EQ(fingerprint0_1, fingerprint0_2);
EXPECT_EQ(fingerprint1_1, fingerprint1_2);
EXPECT_EQ(fingerprint2_1, fingerprint2_2);
EXPECT_NE(fingerprint0_1, fingerprint1_1);
EXPECT_NE(fingerprint1_1, fingerprint2_1);
EXPECT_NE(fingerprint2_1, fingerprint0_1);
}
TEST(Regex, QType) {
EXPECT_EQ(GetQType<RegexPtr>()->name(), "REGEX");
EXPECT_EQ(GetQType<RegexPtr>()->type_info(), typeid(RegexPtr));
ASSERT_OK_AND_ASSIGN(auto regex, CompileRegex("a.c"));
auto qvalue = TypedValue::FromValue(regex);
EXPECT_EQ(qvalue.Repr(), "regex{`a.c`}");
}
TEST(Regex, CompilationError) {
EXPECT_THAT(CompileRegex("ab\\αcd"),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("invalid regular expression: `ab\\αcd`;")));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/strings/regex.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/strings/regex_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
16498d21-1d10-4b45-ae7a-9b43a041a5b6 | cpp | tensorflow/tensorflow | memory_space_propagation | third_party/xla/xla/service/memory_space_propagation.cc | third_party/xla/xla/service/memory_space_propagation_test.cc | #include "xla/service/memory_space_propagation.h"
#include <cstdint>
#include "xla/shape.h"
#include "xla/shape_util.h"
namespace xla {
absl::StatusOr<bool> MemorySpacePropagation::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool modified = false;
TF_ASSIGN_OR_RETURN(auto dataflow_analysis,
HloDataflowAnalysis::Run(*module, false,
true));
dataflow_analysis_ = std::move(dataflow_analysis);
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kFusion) {
for (int operand_idx = 0;
operand_idx < instruction->fused_parameters().size();
++operand_idx) {
ShapeUtil::ForEachLeafShape(
instruction->operand(operand_idx)->shape(),
[&](const Shape& sub_shape, const ShapeIndex& index) {
int64_t memory_space = sub_shape.layout().memory_space();
modified |=
Propagate(index, instruction->fused_parameter(operand_idx),
memory_space);
});
}
ShapeUtil::ForEachLeafShape(
instruction->shape(),
[&](const Shape& sub_shape, const ShapeIndex& index) {
int64_t memory_space = sub_shape.layout().memory_space();
modified |= Propagate(index, instruction->fused_expression_root(),
memory_space);
});
}
}
}
return modified;
}
bool MemorySpacePropagation::Propagate(ShapeIndexView index,
const HloInstruction* callee_instruction,
int64_t memory_space) const {
bool modified = false;
const HloValue& value = dataflow_analysis_->GetUniqueValueAt(
callee_instruction, ShapeIndex(index));
for (const HloPosition& position : value.positions()) {
HloInstruction* instruction = position.instruction;
Shape* shape = ShapeUtil::GetMutableSubshape(instruction->mutable_shape(),
position.index);
if (shape->layout().memory_space() == memory_space) {
continue;
}
shape->mutable_layout()->set_memory_space(memory_space);
modified = true;
if (instruction->opcode() == HloOpcode::kFusion) {
Propagate(position.index, instruction->fused_expression_root(),
memory_space);
}
const HloInstruction* parent_fusion =
instruction->parent()->FusionInstruction();
if (instruction == instruction->parent()->root_instruction() &&
parent_fusion->parent()->IsFusionComputation()) {
Propagate(position.index, parent_fusion, memory_space);
}
if (instruction->opcode() == HloOpcode::kParameter &&
parent_fusion->parent()->IsFusionComputation()) {
const HloInstruction* fusion_operand =
parent_fusion->operand(instruction->parameter_number());
Propagate(position.index, fusion_operand, memory_space);
}
}
for (const HloUse& use : value.GetUses()) {
if (use.instruction->opcode() == HloOpcode::kFusion) {
modified |= Propagate(
use.operand_index,
use.instruction->fused_parameter(use.operand_number), memory_space);
}
}
return modified;
}
} | #include "xla/service/memory_space_propagation.h"
#include "xla/service/hlo_parser.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
namespace xla {
namespace {
class MemorySpacePropagationTest : public HloTestBase {
public:
MemorySpacePropagationTest()
: HloTestBase(),
verifier_(false, false) {
}
absl::Status Verify(HloModule* module) {
return verifier_.Run(module).status();
}
private:
HloVerifier verifier_;
};
TEST_F(MemorySpacePropagationTest, NoMemorySpace) {
absl::string_view hlo_string = R"(
HloModule NoMemorySpace
%fused_computation {
%param_1.3 = s32[1]{0:T(128)} parameter(1)
%constant.2 = s32[]{:T(128)} constant(-2147483648)
%pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5
%param_2.3 = s32[5]{0:T(128)} parameter(2)
%pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0
%maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3)
%param_0.1 = s32[6]{0:T(128)} parameter(0)
ROOT %add.0 = s32[6]{0:T(128)} add(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %param_0.1)
}
ENTRY %entry {
%param0 = s32[6]{0:T(128)} parameter(0)
%param1 = s32[1]{0:T(128)} parameter(1)
%param2 = s32[5]{0:T(128)} parameter(2)
%arg0 = s32[6]{0:T(128)} copy(%param0)
%arg1 = s32[1]{0:T(128)} copy(%param1)
%arg2 = s32[5]{0:T(128)} copy(%param2)
%fusion = s32[6]{0:T(128)} fusion(s32[6]{0:T(128)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)} %arg2), kind=kLoop, calls=%fused_computation
ROOT %root = s32[6]{0:T(128)} copy(%fusion)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
MemorySpacePropagation memory_space_propagation;
EXPECT_FALSE(memory_space_propagation.Run(module.get()).value());
TF_ASSERT_OK_AND_ASSIGN(auto ref, ParseAndReturnVerifiedModule(hlo_string));
EXPECT_EQ(absl::HashOf(*module), absl::HashOf(*ref));
}
TEST_F(MemorySpacePropagationTest, NonTupleOutput) {
absl::string_view hlo_string = R"(
HloModule NonTupleOutput
%fused_computation {
%param_1.3 = s32[1]{0:T(128)} parameter(1)
%constant.2 = s32[]{:T(128)} constant(-2147483648)
%pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5
%param_2.3 = s32[5]{0:T(128)} parameter(2)
%pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0
%maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3)
%param_0.1 = s32[6]{0:T(128)} parameter(0)
ROOT %add.0 = s32[6]{0:T(128)} add(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %param_0.1)
}
ENTRY %entry {
%param0 = s32[6]{0:T(128)} parameter(0)
%param1 = s32[1]{0:T(128)} parameter(1)
%param2 = s32[5]{0:T(128)} parameter(2)
%arg0 = s32[6]{0:T(128)S(1)} copy(%param0)
%arg1 = s32[1]{0:T(128)} copy(%param1)
%arg2 = s32[5]{0:T(128)S(1)} copy(%param2)
%fusion = s32[6]{0:T(128)S(1)} fusion(s32[6]{0:T(128)S(1)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)S(1)} %arg2), kind=kLoop, calls=%fused_computation
ROOT %root = s32[6]{0:T(128)} copy(%fusion)
}
)";
absl::string_view expected_hlo_string = R"(
HloModule NonTupleOutput
%fused_computation {
%param_1.3 = s32[1]{0:T(128)} parameter(1)
%constant.2 = s32[]{:T(128)} constant(-2147483648)
%pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5
%param_2.3 = s32[5]{0:T(128)S(1)} parameter(2)
%pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0
%maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3)
%param_0.1 = s32[6]{0:T(128)S(1)} parameter(0)
ROOT %add.0 = s32[6]{0:T(128)S(1)} add(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %param_0.1)
}
ENTRY %entry {
%param0 = s32[6]{0:T(128)} parameter(0)
%param1 = s32[1]{0:T(128)} parameter(1)
%param2 = s32[5]{0:T(128)} parameter(2)
%arg0 = s32[6]{0:T(128)S(1)} copy(%param0)
%arg1 = s32[1]{0:T(128)} copy(%param1)
%arg2 = s32[5]{0:T(128)S(1)} copy(%param2)
%fusion = s32[6]{0:T(128)S(1)} fusion(s32[6]{0:T(128)S(1)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)S(1)} %arg2), kind=kLoop, calls=%fused_computation
ROOT %root = s32[6]{0:T(128)} copy(%fusion)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
MemorySpacePropagation memory_space_propagation;
EXPECT_TRUE(memory_space_propagation.Run(module.get()).value());
TF_EXPECT_OK(Verify(module.get()));
TF_ASSERT_OK_AND_ASSIGN(auto ref,
ParseAndReturnVerifiedModule(expected_hlo_string));
EXPECT_EQ(absl::HashOf(*module), absl::HashOf(*ref));
}
TEST_F(MemorySpacePropagationTest, TupleOutput) {
absl::string_view hlo_string = R"(
HloModule TupleOutput
%fused_computation {
%param_1.3 = s32[1]{0:T(128)} parameter(1)
%constant.2 = s32[]{:T(128)} constant(-2147483648)
%pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5
%param_2.3 = s32[5]{0:T(128)} parameter(2)
%pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0
%maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3)
%param_0.1 = s32[6]{0:T(128)} parameter(0)
%add.0 = s32[6]{0:T(128)} add(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %param_0.1)
%multiply.0 = s32[6]{0:T(128)} multiply(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %param_0.1)
ROOT %tuple = (s32[6]{0:T(128)}, s32[6]{0:T(128)}) tuple(%add.0, %multiply.0)
}
ENTRY %entry {
%param0 = s32[6]{0:T(128)} parameter(0)
%param1 = s32[1]{0:T(128)} parameter(1)
%param2 = s32[5]{0:T(128)} parameter(2)
%arg0 = s32[6]{0:T(128)S(1)} copy(%param0)
%arg1 = s32[1]{0:T(128)} copy(%param1)
%arg2 = s32[5]{0:T(128)S(1)} copy(%param2)
%fusion = (s32[6]{0:T(128)S(1)}, s32[6]{0:T(128)}) fusion(s32[6]{0:T(128)S(1)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)S(1)} %arg2), kind=kLoop, calls=%fused_computation
%gte0 = s32[6]{0:T(128)S(1)} get-tuple-element(%fusion), index=0
%gte1 = s32[6]{0:T(128)} get-tuple-element(%fusion), index=1
ROOT %root = s32[6]{0:T(128)} add(%gte0, %gte1)
}
)";
absl::string_view expected_hlo_string = R"(
HloModule TupleOutput
%fused_computation {
%param_1.3 = s32[1]{0:T(128)} parameter(1)
%constant.2 = s32[]{:T(128)} constant(-2147483648)
%pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5
%param_2.3 = s32[5]{0:T(128)S(1)} parameter(2)
%pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0
%maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3)
%param_0.1 = s32[6]{0:T(128)S(1)} parameter(0)
%add.0 = s32[6]{0:T(128)S(1)} add(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %param_0.1)
%multiply.0 = s32[6]{0:T(128)} multiply(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %param_0.1)
ROOT %tuple = (s32[6]{0:T(128)S(1)}, s32[6]{0:T(128)}) tuple(%add.0, %multiply.0)
}
ENTRY %entry {
%param0 = s32[6]{0:T(128)} parameter(0)
%param1 = s32[1]{0:T(128)} parameter(1)
%param2 = s32[5]{0:T(128)} parameter(2)
%arg0 = s32[6]{0:T(128)S(1)} copy(%param0)
%arg1 = s32[1]{0:T(128)} copy(%param1)
%arg2 = s32[5]{0:T(128)S(1)} copy(%param2)
%fusion = (s32[6]{0:T(128)S(1)}, s32[6]{0:T(128)}) fusion(s32[6]{0:T(128)S(1)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)S(1)} %arg2), kind=kLoop, calls=%fused_computation
%gte0 = s32[6]{0:T(128)S(1)} get-tuple-element(%fusion), index=0
%gte1 = s32[6]{0:T(128)} get-tuple-element(%fusion), index=1
ROOT %root = s32[6]{0:T(128)} add(%gte0, %gte1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
MemorySpacePropagation memory_space_propagation;
EXPECT_TRUE(memory_space_propagation.Run(module.get()).value());
TF_EXPECT_OK(Verify(module.get()));
TF_ASSERT_OK_AND_ASSIGN(auto ref,
ParseAndReturnVerifiedModule(expected_hlo_string));
EXPECT_EQ(absl::HashOf(*module), absl::HashOf(*ref));
}
TEST_F(MemorySpacePropagationTest, NestedInputFusion) {
absl::string_view hlo_string = R"(
HloModule NestedFusion
%bitcast_fusion {
%bf_param = s32[3,2]{0,1:T(128)} parameter(0)
ROOT %bitcast = s32[6]{0:T(128)} bitcast(%bf_param)
}
%fused_computation {
%param_1.3 = s32[1]{0:T(128)} parameter(1)
%constant.2 = s32[]{:T(128)} constant(-2147483648)
%pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5
%param_2.3 = s32[5]{0:T(128)} parameter(2)
%pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0
%maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3)
%param_0.1 = s32[3,2]{0,1:T(128)} parameter(0)
%fusion.1 = s32[6]{0:T(128)} fusion(%param_0.1), kind=kLoop, calls=bitcast_fusion
ROOT %add.0 = s32[6]{0:T(128)} add(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %fusion.1)
}
ENTRY %entry {
%param0 = s32[3,2]{0,1:T(128)} parameter(0)
%param1 = s32[1]{0:T(128)} parameter(1)
%param2 = s32[5]{0:T(128)} parameter(2)
%arg0 = s32[3,2]{0,1:T(128)S(1)} copy(%param0)
%arg1 = s32[1]{0:T(128)} copy(%param1)
%arg2 = s32[5]{0:T(128)S(1)} copy(%param2)
%fusion = s32[6]{0:T(128)S(1)} fusion(s32[3,2]{0,1:T(128)S(1)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)S(1)} %arg2), kind=kLoop, calls=%fused_computation
ROOT %root = s32[6]{0:T(128)} copy(%fusion)
}
)";
absl::string_view expected_hlo_string = R"(
HloModule NestedFusion
%bitcast_fusion {
%bf_param = s32[3,2]{0,1:T(128)S(1)} parameter(0)
ROOT %bitcast = s32[6]{0:T(128)} bitcast(%bf_param)
}
%fused_computation {
%param_1.3 = s32[1]{0:T(128)} parameter(1)
%constant.2 = s32[]{:T(128)} constant(-2147483648)
%pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5
%param_2.3 = s32[5]{0:T(128)S(1)} parameter(2)
%pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0
%maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3)
%param_0.1 = s32[3,2]{0,1:T(128)S(1)} parameter(0)
%fusion.1 = s32[6]{0:T(128)} fusion(%param_0.1), kind=kLoop, calls=bitcast_fusion
ROOT %add.0 = s32[6]{0:T(128)S(1)} add(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %fusion.1)
}
ENTRY %entry {
%param0 = s32[3,2]{0,1:T(128)} parameter(0)
%param1 = s32[1]{0:T(128)} parameter(1)
%param2 = s32[5]{0:T(128)} parameter(2)
%arg0 = s32[3,2]{0,1:T(128)S(1)} copy(%param0)
%arg1 = s32[1]{0:T(128)} copy(%param1)
%arg2 = s32[5]{0:T(128)S(1)} copy(%param2)
%fusion = s32[6]{0:T(128)S(1)} fusion(s32[3,2]{0,1:T(128)S(1)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)S(1)} %arg2), kind=kLoop, calls=%fused_computation
ROOT %root = s32[6]{0:T(128)} copy(%fusion)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
MemorySpacePropagation memory_space_propagation;
EXPECT_TRUE(memory_space_propagation.Run(module.get()).value());
TF_EXPECT_OK(Verify(module.get()));
TF_ASSERT_OK_AND_ASSIGN(auto ref,
ParseAndReturnVerifiedModule(expected_hlo_string));
EXPECT_EQ(absl::HashOf(*module), absl::HashOf(*ref));
}
TEST_F(MemorySpacePropagationTest, NestedOutputFusion) {
absl::string_view hlo_string = R"(
HloModule NestedFusion
%bitcast_fusion {
%bf_param = s32[6]{0:T(128)} parameter(0)
ROOT %bitcast = s32[3,2]{0,1:T(128)} bitcast(%bf_param)
}
%fused_computation {
%param_1.3 = s32[1]{0:T(128)} parameter(1)
%constant.2 = s32[]{:T(128)} constant(-2147483648)
%pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5
%param_2.3 = s32[5]{0:T(128)} parameter(2)
%pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0
%maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3)
%param_0.1 = s32[6]{0:T(128)} parameter(0)
%add.0 = s32[6]{0:T(128)} add(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %param_0.1)
ROOT %fusion.1 = s32[3,2]{0,1:T(128)} fusion(%add.0), kind=kLoop, calls=bitcast_fusion
}
ENTRY %entry {
%param0 = s32[6]{0:T(128)} parameter(0)
%param1 = s32[1]{0:T(128)} parameter(1)
%param2 = s32[5]{0:T(128)} parameter(2)
%arg0 = s32[6]{0:T(128)S(1)} copy(%param0)
%arg1 = s32[1]{0:T(128)} copy(%param1)
%arg2 = s32[5]{0:T(128)S(1)} copy(%param2)
%fusion = s32[3,2]{0,1:T(128)S(1)} fusion(s32[6]{0:T(128)S(1)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)S(1)} %arg2), kind=kLoop, calls=%fused_computation
ROOT %root = s32[3,2]{0,1:T(128)} copy(%fusion)
}
)";
absl::string_view expected_hlo_string = R"(
HloModule NestedFusion
%bitcast_fusion {
%bf_param = s32[6]{0:T(128)} parameter(0)
ROOT %bitcast = s32[3,2]{0,1:T(128)S(1)} bitcast(%bf_param)
}
%fused_computation {
%param_1.3 = s32[1]{0:T(128)} parameter(1)
%constant.2 = s32[]{:T(128)} constant(-2147483648)
%pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5
%param_2.3 = s32[5]{0:T(128)S(1)} parameter(2)
%pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0
%maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3)
%param_0.1 = s32[6]{0:T(128)S(1)} parameter(0)
%add.0 = s32[6]{0:T(128)} add(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)S(1)} %param_0.1)
ROOT %fusion.1 = s32[3,2]{0,1:T(128)S(1)} fusion(%add.0), kind=kLoop, calls=bitcast_fusion
}
ENTRY %entry {
%param0 = s32[6]{0:T(128)} parameter(0)
%param1 = s32[1]{0:T(128)} parameter(1)
%param2 = s32[5]{0:T(128)} parameter(2)
%arg0 = s32[6]{0:T(128)S(1)} copy(%param0)
%arg1 = s32[1]{0:T(128)} copy(%param1)
%arg2 = s32[5]{0:T(128)S(1)} copy(%param2)
%fusion = s32[3,2]{0,1:T(128)S(1)} fusion(s32[6]{0:T(128)S(1)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)S(1)} %arg2), kind=kLoop, calls=%fused_computation
ROOT %root = s32[3,2]{0,1:T(128)} copy(%fusion)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
MemorySpacePropagation memory_space_propagation;
EXPECT_TRUE(memory_space_propagation.Run(module.get()).value());
TF_EXPECT_OK(Verify(module.get()));
TF_ASSERT_OK_AND_ASSIGN(auto ref,
ParseAndReturnVerifiedModule(expected_hlo_string));
EXPECT_EQ(absl::HashOf(*module), absl::HashOf(*ref));
}
TEST_F(MemorySpacePropagationTest, BitcastInFusion) {
absl::string_view hlo_string = R"(
HloModule TupleOutput
%fused_computation {
%param_1.3 = s32[1]{0:T(128)} parameter(1)
%constant.2 = s32[]{:T(128)} constant(-2147483648)
%pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5
%param_2.3 = s32[5]{0:T(128)} parameter(2)
%pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0
%maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3)
%param_0.1 = s32[6]{0:T(128)} parameter(0)
%bitcast.0 = s32[6]{0:T(128)} bitcast(s32[6]{0:T(128)} %param_0.1)
%multiply.0 = s32[6]{0:T(128)} multiply(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)} %param_0.1)
ROOT %tuple = (s32[6]{0:T(128)}, s32[6]{0:T(128)}) tuple(%bitcast.0, %multiply.0)
}
ENTRY %entry {
%param0 = s32[6]{0:T(128)} parameter(0)
%param1 = s32[1]{0:T(128)} parameter(1)
%param2 = s32[5]{0:T(128)} parameter(2)
%arg0 = s32[6]{0:T(128)S(1)} copy(%param0)
%arg1 = s32[1]{0:T(128)} copy(%param1)
%arg2 = s32[5]{0:T(128)S(1)} copy(%param2)
ROOT %fusion = (s32[6]{0:T(128)}, s32[6]{0:T(128)}) fusion(s32[6]{0:T(128)S(1)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)S(1)} %arg2), kind=kLoop, calls=%fused_computation
}
)";
absl::string_view expected_hlo_string = R"(
HloModule TupleOutput
%fused_computation {
%param_1.3 = s32[1]{0:T(128)} parameter(1)
%constant.2 = s32[]{:T(128)} constant(-2147483648)
%pad.2 = s32[6]{0:T(128)} pad(s32[1]{0:T(128)} %param_1.3, s32[]{:T(128)} %constant.2), padding=0_5
%param_2.3 = s32[5]{0:T(128)S(1)} parameter(2)
%pad.3 = s32[6]{0:T(128)} pad(s32[5]{0:T(128)S(1)} %param_2.3, s32[]{:T(128)} %constant.2), padding=1_0
%maximum.1 = s32[6]{0:T(128)} maximum(s32[6]{0:T(128)} %pad.2, s32[6]{0:T(128)} %pad.3)
%param_0.1 = s32[6]{0:T(128)S(1)} parameter(0)
%bitcast.0 = s32[6]{0:T(128)} bitcast(s32[6]{0:T(128)S(1)} %param_0.1)
%multiply.0 = s32[6]{0:T(128)} multiply(s32[6]{0:T(128)} %maximum.1, s32[6]{0:T(128)S(1)} %param_0.1)
ROOT %tuple = (s32[6]{0:T(128)}, s32[6]{0:T(128)}) tuple(%bitcast.0, %multiply.0)
}
ENTRY %entry {
%param0 = s32[6]{0:T(128)} parameter(0)
%param1 = s32[1]{0:T(128)} parameter(1)
%param2 = s32[5]{0:T(128)} parameter(2)
%arg0 = s32[6]{0:T(128)S(1)} copy(%param0)
%arg1 = s32[1]{0:T(128)} copy(%param1)
%arg2 = s32[5]{0:T(128)S(1)} copy(%param2)
ROOT %fusion = (s32[6]{0:T(128)}, s32[6]{0:T(128)}) fusion(s32[6]{0:T(128)S(1)} %arg0, s32[1]{0:T(128)} %arg1, s32[5]{0:T(128)S(1)} %arg2), kind=kLoop, calls=%fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
MemorySpacePropagation memory_space_propagation;
EXPECT_TRUE(memory_space_propagation.Run(module.get()).value());
TF_EXPECT_OK(Verify(module.get()));
TF_ASSERT_OK_AND_ASSIGN(auto ref,
ParseAndReturnVerifiedModule(expected_hlo_string));
EXPECT_EQ(absl::HashOf(*module), absl::HashOf(*ref));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/memory_space_propagation.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/memory_space_propagation_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9cb97eb8-0ab8-44ff-92e5-3ba9b6eeb3a6 | cpp | tensorflow/tensorflow | cudnn_vectorize_convolutions | third_party/xla/xla/service/gpu/transforms/cudnn_vectorize_convolutions.cc | third_party/xla/xla/service/gpu/transforms/cudnn_vectorize_convolutions_test.cc | #include "xla/service/gpu/transforms/cudnn_vectorize_convolutions.h"
#include <cstdint>
#include <optional>
#include <string>
#include <tuple>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/client/xla_builder.h"
#include "xla/client/xla_computation.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_clone_context.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/primitive_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/gpu/cudnn_support_utils.h"
#include "xla/service/gpu/stream_executor_util.h"
#include "xla/service/hlo_module_config.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/dnn.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
static std::vector<HloCustomCallInstruction*> GetRelevantConvs(
HloComputation* comp) {
std::vector<HloCustomCallInstruction*> convs;
for (HloInstruction* instr : comp->instructions()) {
if (instr->opcode() != HloOpcode::kCustomCall ||
(instr->custom_call_target() != kCudnnConvForwardCallTarget &&
instr->custom_call_target() !=
kCudnnConvBiasActivationForwardCallTarget) ||
instr->operand_count() < 2) {
continue;
}
PrimitiveType input_ty = instr->operand(0)->shape().element_type();
PrimitiveType output_ty = instr->shape().tuple_shapes(0).element_type();
if (input_ty == output_ty && (input_ty == S8 || input_ty == U8)) {
convs.push_back(Cast<HloCustomCallInstruction>(instr));
}
}
return convs;
}
static absl::StatusOr<HloComputation*> BuilderToHloComputation(
XlaBuilder& b, XlaOp root, HloComputation* sibling_computation) {
TF_ASSIGN_OR_RETURN(XlaComputation comp, b.Build(root));
TF_ASSIGN_OR_RETURN(ProgramShape program_shape, comp.GetProgramShape());
HloModuleConfig config(program_shape);
TF_ASSIGN_OR_RETURN(auto new_module,
HloModule::CreateFromProto(comp.proto(), config));
HloModule* dest_module = sibling_computation->parent();
HloCloneContext context(dest_module);
return dest_module->DeepCloneComputation(new_module->entry_computation(),
&context);
}
static XlaOp SplitAtDim(XlaOp instr, int64_t dim, int64_t vect_size) {
XlaBuilder& b = *instr.builder();
Shape shape = b.GetShape(instr).value();
DimensionVector new_dims(shape.dimensions().begin(),
shape.dimensions().end());
CHECK_EQ(new_dims[dim] % vect_size, 0);
new_dims[dim] /= vect_size;
new_dims.insert(new_dims.begin() + dim + 1, vect_size);
return Reshape(instr, new_dims);
}
static Shape SplitShapeAtDim(Shape shape, int64_t dim, int64_t vect_size) {
DimensionVector new_dims(shape.dimensions().begin(),
shape.dimensions().end());
CHECK_EQ(new_dims[dim] % vect_size, 0);
new_dims[dim] /= vect_size;
new_dims.insert(new_dims.begin() + dim + 1, vect_size);
return ShapeUtil::MakeShape(shape.element_type(), new_dims);
}
static XlaOp MoveDim(XlaOp instr, int64_t src, int64_t dst) {
XlaBuilder& b = *instr.builder();
int64_t rank = b.GetShape(instr)->dimensions_size();
DimensionVector idxs(rank);
absl::c_iota(idxs, 0);
if (src < dst) {
idxs.insert(idxs.begin() + dst, src);
idxs.erase(idxs.begin() + src);
} else {
idxs.erase(idxs.begin() + src);
idxs.insert(idxs.begin() + dst, src);
}
return Transpose(instr, idxs);
}
static XlaOp RevectorizeInstr(XlaOp instr, int64_t dim, int64_t vect_dim,
int64_t vect_size) {
XlaBuilder& b = *instr.builder();
Shape shape = b.GetShape(instr).value();
auto size = [&](int64_t d) { return shape.dimensions(d); };
CHECK_LE(size(vect_dim), vect_size);
CHECK_EQ(vect_size % size(vect_dim), 0);
int64_t split_factor = vect_size / size(vect_dim);
CHECK_EQ(size(dim) % split_factor, 0);
instr = SplitAtDim(instr, dim, split_factor);
if (vect_dim > dim) {
vect_dim++;
}
instr = MoveDim(instr, dim + 1, vect_dim);
if (vect_dim > dim) {
vect_dim--;
}
return Collapse(instr, {vect_dim, vect_dim + 1});
}
static XlaOp UnrevectorizeInstr(XlaOp instr, int64_t dim, int64_t vect_dim,
int64_t orig_vect_size) {
XlaBuilder& b = *instr.builder();
Shape shape = b.GetShape(instr).value();
auto size = [&](int64_t d) { return shape.dimensions(d); };
CHECK_GE(size(vect_dim), orig_vect_size);
CHECK_EQ(size(vect_dim) % orig_vect_size, 0);
instr = SplitAtDim(instr, vect_dim, orig_vect_size);
if (dim > vect_dim) {
dim++;
}
instr = MoveDim(instr, vect_dim, dim + 1);
if (dim > vect_dim) {
dim--;
}
return Collapse(instr, {dim, dim + 1});
}
static ConvolutionDimensionNumbers VectorizeDnums(
ConvolutionDimensionNumbers dnums, bool reordered_filter) {
int64_t input_vect_dim = dnums.input_feature_dimension();
if (dnums.input_batch_dimension() > input_vect_dim) {
dnums.set_input_batch_dimension(dnums.input_batch_dimension() + 1);
}
for (int64_t& d : *dnums.mutable_input_spatial_dimensions()) {
if (d > input_vect_dim) {
++d;
}
}
if (!reordered_filter) {
int64_t kernel_vect_dim = dnums.kernel_input_feature_dimension();
if (dnums.kernel_output_feature_dimension() > kernel_vect_dim) {
dnums.set_kernel_output_feature_dimension(
dnums.kernel_output_feature_dimension() + 1);
}
for (int64_t& d : *dnums.mutable_kernel_spatial_dimensions()) {
if (d > kernel_vect_dim) {
++d;
}
}
}
int64_t output_vect_dim = dnums.output_feature_dimension();
if (dnums.output_batch_dimension() > output_vect_dim) {
dnums.set_output_batch_dimension(dnums.output_batch_dimension() + 1);
}
for (int64_t& d : *dnums.mutable_output_spatial_dimensions()) {
if (d > output_vect_dim) {
++d;
}
}
return dnums;
}
absl::Status ReorderInt8NchwVect(HloCustomCallInstruction* conv,
XlaOp* operands) {
bool has_bias = conv->operand_count() > 2;
VLOG(1) << "Reordering filter" << (has_bias ? " and bias" : "")
<< " (replacement for cudnnReorderFilterAndBias)";
auto builder = operands->builder();
ConvolutionDimensionNumbers dnums = conv->convolution_dimension_numbers();
TF_ASSIGN_OR_RETURN(GpuBackendConfig gpu_config,
conv->backend_config<GpuBackendConfig>());
CudnnConvBackendConfig& config =
*gpu_config.mutable_cudnn_conv_backend_config();
config.set_reordered_int8_nchw_vect(true);
TF_RETURN_IF_ERROR(conv->set_backend_config(gpu_config));
TF_ASSIGN_OR_RETURN(Shape filter_shape, builder->GetShape(operands[1]));
TF_ASSIGN_OR_RETURN(auto reorder, CudnnInferTransposeForFilterReordering(
filter_shape, dnums));
XlaOp reshape = Reshape(reorder.transpose_shape, operands[1]);
XlaOp transpose = Transpose(reshape, reorder.permutation);
operands[1] = Reshape(reorder.result_shape, transpose);
dnums.set_kernel_output_feature_dimension(0);
dnums.set_kernel_input_feature_dimension(1);
dnums.set_kernel_spatial_dimensions(0, 2);
dnums.set_kernel_spatial_dimensions(1, 3);
conv->set_convolution_dimension_numbers(dnums);
if (has_bias) {
TF_ASSIGN_OR_RETURN(Shape bias_shape, builder->GetShape(operands[2]));
TF_ASSIGN_OR_RETURN(reorder,
CudnnInferTransposeForBiasReordering(bias_shape));
reshape = Reshape(reorder.transpose_shape, operands[2]);
transpose = Transpose(reshape, reorder.permutation);
operands[2] = Reshape(reorder.result_shape, transpose);
}
return absl::OkStatus();
}
static absl::StatusOr<bool> TryRevectorizeConv(
const se::CudaComputeCapability& compute_capability,
const se::dnn::VersionInfo& cudnn_version, HloCustomCallInstruction* conv,
int vect_size) {
const Shape& input_shape = conv->operand(0)->shape();
const Shape& kernel_shape = conv->operand(1)->shape();
const Shape& output_shape = conv->shape().tuple_shapes(0);
const ConvolutionDimensionNumbers* dnums =
&conv->convolution_dimension_numbers();
std::optional<int64_t> input_vect_dim;
std::optional<int64_t> kernel_vect_dim;
std::optional<int64_t> output_vect_dim;
std::tie(input_vect_dim, kernel_vect_dim, output_vect_dim) =
FindVectorizedFeatureDims(*dnums, input_shape, kernel_shape,
output_shape);
if (!input_vect_dim.has_value() || !kernel_vect_dim.has_value() ||
!output_vect_dim.has_value()) {
return false;
}
int64_t input_feat_size =
input_shape.dimensions(dnums->input_feature_dimension());
int64_t output_feat_size =
output_shape.dimensions(dnums->output_feature_dimension());
int64_t input_vect_size = input_shape.dimensions(*input_vect_dim);
int64_t output_vect_size = output_shape.dimensions(*output_vect_dim);
if (vect_size % input_vect_size != 0 || vect_size % output_vect_size != 0 ||
input_feat_size % (vect_size / input_vect_size) != 0 ||
output_feat_size % (vect_size / output_vect_size) != 0) {
return false;
}
if (primitive_util::IsIntegralType(input_shape.element_type())) {
TF_ASSIGN_OR_RETURN(bool supported_target_vectorization,
CudnnSupportsOptimizedIntegerConvolution(
compute_capability, *conv, vect_size));
if (!supported_target_vectorization) {
VLOG(3) << "Skipping re-vectorization of conv to vector size: "
<< vect_size << ": " << conv->ToString();
return false;
}
}
VLOG(1) << "Re-vectorizing conv channels from "
<< input_shape.dimensions(*input_vect_dim) << " to " << vect_size
<< ": " << conv->ToString();
XlaBuilder b(absl::StrCat(conv->name(), ".revectorized"));
b.SetOpMetadata(conv->metadata());
XlaOp filter = Parameter(&b, 1, conv->operand(1)->shape(), "filter");
absl::InlinedVector<XlaOp, 4> new_operands = {
RevectorizeInstr(Parameter(&b, 0, conv->operand(0)->shape(), "input"),
dnums->input_feature_dimension(), *input_vect_dim,
vect_size),
RevectorizeInstr(filter, dnums->kernel_input_feature_dimension(),
*kernel_vect_dim, vect_size),
};
if (conv->operand_count() > 2) {
new_operands.push_back(Parameter(&b, 2, conv->operand(2)->shape(), "bias"));
}
if (conv->operand_count() > 3) {
new_operands.push_back(RevectorizeInstr(
Parameter(&b, 3, conv->operand(3)->shape(), "side_input"),
dnums->input_feature_dimension(), *input_vect_dim, vect_size));
}
if (conv->operand_count() > 4) {
return InvalidArgument(
"Don't understand a conv with more than 4 arguments: %s",
conv->ToString());
}
const auto& debug_options = conv->GetModule()->config().debug_options();
bool use_reordering =
input_shape.element_type() == xla::S8 && vect_size == 32 &&
debug_options.xla_gpu_enable_cudnn_int8x32_convolution_reordering() &&
cudnn_version >= se::dnn::VersionInfo{8, 3, 0};
if (use_reordering) {
int64_t kernel_vect_size = kernel_shape.dimensions(*kernel_vect_dim);
if (kernel_vect_size == 4 || kernel_vect_size == 32) {
new_operands[1] = filter;
}
TF_RETURN_IF_ERROR(ReorderInt8NchwVect(conv, new_operands.data()));
dnums = &conv->convolution_dimension_numbers();
}
DimensionVector new_output_dims(output_shape.dimensions().begin(),
output_shape.dimensions().end());
new_output_dims[dnums->output_feature_dimension()] /=
(vect_size / output_vect_size);
new_output_dims[*output_vect_dim] = vect_size;
XlaOp new_conv = CustomCallWithConvDnums(
&b, conv->custom_call_target(), new_operands,
ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(output_shape.element_type(), new_output_dims),
ShapeUtil::MakeShape(U8, {0})}),
{},
conv->raw_backend_config_string(), false,
{}, nullptr,
conv->window(),
*dnums);
XlaOp new_conv_result = GetTupleElement(new_conv, 0);
XlaOp new_conv_scratch = GetTupleElement(new_conv, 1);
XlaOp new_conv_result_unrevectorized = UnrevectorizeInstr(
new_conv_result, dnums->output_feature_dimension(), *output_vect_dim,
output_shape.dimensions(*output_vect_dim));
TF_ASSIGN_OR_RETURN(
HloComputation * new_conv_comp,
BuilderToHloComputation(
b, Tuple(&b, {new_conv_result_unrevectorized, new_conv_scratch}),
conv->parent()));
auto new_conv_comp_instrs = new_conv_comp->instructions();
auto new_conv_it =
absl::c_find_if(new_conv_comp_instrs, [](HloInstruction* instr) {
return instr->opcode() == HloOpcode::kCustomCall;
});
if (new_conv_it != new_conv_comp_instrs.end()) {
new_conv_comp->parent()->SetAndUniquifyInstrName(*new_conv_it,
conv->name());
}
VLOG(1) << "Re-vectorized conv to " << new_conv_comp->ToString();
TF_RETURN_IF_ERROR(conv->parent()->ReplaceWithNewInstruction(
conv, HloInstruction::CreateCall(conv->shape(), conv->operands(),
new_conv_comp)));
return true;
}
static absl::StatusOr<bool> TryVectorizeConv(
const se::CudaComputeCapability& compute_capability,
const se::dnn::VersionInfo& cudnn_version, HloCustomCallInstruction* conv,
int64_t vect_size) {
const Shape& input_shape = conv->operand(0)->shape();
const Shape& output_shape = conv->shape().tuple_shapes(0);
const ConvolutionDimensionNumbers* dnums =
&conv->convolution_dimension_numbers();
int64_t in_channels =
input_shape.dimensions(dnums->input_feature_dimension());
int64_t out_channels =
output_shape.dimensions(dnums->output_feature_dimension());
if (in_channels % vect_size != 0 || out_channels % vect_size != 0) {
return false;
}
if (input_shape.dimensions_size() >
2 + dnums->input_spatial_dimensions_size()) {
return false;
}
if (primitive_util::IsIntegralType(input_shape.element_type())) {
TF_ASSIGN_OR_RETURN(bool supported_target_vectorization,
CudnnSupportsOptimizedIntegerConvolution(
compute_capability, *conv, vect_size));
if (!supported_target_vectorization) {
VLOG(3) << "Skipping vectorization of conv to vector size: " << vect_size
<< ": " << conv->ToString();
return false;
}
}
VLOG(1) << "Vectorizing conv channels by " << vect_size << ": "
<< conv->ToString();
XlaBuilder b(absl::StrCat(conv->name(), ".revectorized"));
b.SetOpMetadata(conv->metadata());
XlaOp filter = Parameter(&b, 1, conv->operand(1)->shape(), "filter");
absl::InlinedVector<XlaOp, 4> new_operands = {
SplitAtDim(Parameter(&b, 0, conv->operand(0)->shape(), "input"),
dnums->input_feature_dimension(), vect_size),
SplitAtDim(filter, dnums->kernel_input_feature_dimension(), vect_size),
};
if (conv->operand_count() > 2) {
new_operands.push_back(Parameter(&b, 2, conv->operand(2)->shape(), "bias"));
}
if (conv->operand_count() > 3) {
new_operands.push_back(
SplitAtDim(Parameter(&b, 3, conv->operand(3)->shape(), "side_input"),
dnums->output_feature_dimension(), vect_size));
}
if (conv->operand_count() > 4) {
return InvalidArgument(
"Don't understand a conv with more than 4 arguments: %s",
conv->ToString());
}
const auto& debug_options = conv->GetModule()->config().debug_options();
bool use_reordering =
input_shape.element_type() == xla::S8 && vect_size == 32 &&
debug_options.xla_gpu_enable_cudnn_int8x32_convolution_reordering() &&
cudnn_version >= se::dnn::VersionInfo{8, 3, 0};
if (use_reordering) {
new_operands[1] = filter;
TF_RETURN_IF_ERROR(ReorderInt8NchwVect(conv, new_operands.data()));
dnums = &conv->convolution_dimension_numbers();
}
Shape new_output_shape = SplitShapeAtDim(
output_shape, dnums->output_feature_dimension(), vect_size);
XlaOp new_conv = CustomCallWithConvDnums(
&b, conv->custom_call_target(), new_operands,
ShapeUtil::MakeTupleShape(
{new_output_shape, ShapeUtil::MakeShape(U8, {0})}),
{},
conv->raw_backend_config_string(), false,
{}, nullptr,
conv->window(),
VectorizeDnums(*dnums, use_reordering));
XlaOp new_conv_result = GetTupleElement(new_conv, 0);
XlaOp new_conv_scratch = GetTupleElement(new_conv, 1);
XlaOp conv_result_collapsed =
Collapse(new_conv_result, {dnums->output_feature_dimension(),
dnums->output_feature_dimension() + 1});
TF_ASSIGN_OR_RETURN(
HloComputation * new_conv_comp,
BuilderToHloComputation(
b, Tuple(&b, {conv_result_collapsed, new_conv_scratch}),
conv->parent()));
VLOG(1) << "Vectorized conv to: " << new_conv_comp->ToString();
TF_RETURN_IF_ERROR(conv->parent()->ReplaceWithNewInstruction(
conv, HloInstruction::CreateCall(conv->shape(), conv->operands(),
new_conv_comp)));
return true;
}
}
absl::StatusOr<bool> CudnnVectorizeConvolutions::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloComputation* comp :
module->MakeNonfusionComputations(execution_threads)) {
for (HloCustomCallInstruction* conv : GetRelevantConvs(comp)) {
bool local_changed = false;
if (compute_capability_.IsAtLeast(7, 5)) {
TF_ASSIGN_OR_RETURN(
local_changed,
TryRevectorizeConv(compute_capability_, cudnn_version_, conv, 32));
if (!local_changed) {
TF_ASSIGN_OR_RETURN(
local_changed,
TryVectorizeConv(compute_capability_, cudnn_version_, conv, 32));
}
}
if (!local_changed) {
TF_ASSIGN_OR_RETURN(
local_changed,
TryVectorizeConv(compute_capability_, cudnn_version_, conv, 4));
}
changed |= local_changed;
}
}
return changed;
}
}
} | #include "xla/service/gpu/transforms/cudnn_vectorize_convolutions.h"
#include <cstdint>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/status/statusor.h"
#include "xla/service/call_inliner.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/dnn.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
namespace m = ::xla::match;
class CudnnVectorizeConvolutionsTest : public HloTestBase {
protected:
absl::StatusOr<bool> Run(std::pair<int, int> compute_capability,
HloModule* module) {
CudnnVectorizeConvolutions pass(
se::CudaComputeCapability{compute_capability.first,
compute_capability.second},
se::dnn::VersionInfo(8, 3, 0));
TF_ASSIGN_OR_RETURN(bool changed, RunHloPass(&pass, module));
CallInliner inliner;
TF_RETURN_IF_ERROR(RunHloPass(&inliner, module).status());
return changed;
}
};
TEST_F(CudnnVectorizeConvolutionsTest, VectorizeTo4) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,20,30,40] parameter(0)
filter = s8[2,2,40,44] parameter(1)
ROOT result = (s8[10,20,30,44], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward",
backend_config="{bar: 0}"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));
EXPECT_TRUE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
const HloInstruction* conv = nullptr;
ASSERT_THAT(
root,
GmockMatch(m::Tuple(
m::Reshape(m::GetTupleElement(
m::CustomCall(&conv, {kCudnnConvForwardCallTarget},
m::Reshape(m::Parameter(0))
.WithShape(S8, {10, 20, 30, 10, 4}),
m::Reshape(m::Parameter(1))
.WithShape(S8, {2, 2, 10, 4, 44}))
.WithConvDnums("b01f?_01i?o->b01f?"))
.WithShape(S8, {10, 20, 30, 11, 4})),
m::Op())));
EXPECT_EQ(conv->raw_backend_config_string(), "{bar: 0}");
}
TEST_F(CudnnVectorizeConvolutionsTest, NoVectorizeTo4UnsupportedFilterType) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,20,30,40] parameter(0)
filter = f32[2,2,40,44] parameter(1)
ROOT result = (s8[10,20,30,44], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward",
backend_config="{bar: 0}"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CudnnVectorizeConvolutionsTest, VectorizeTo4NCHW) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,48,20,30] parameter(0)
filter = s8[48,44,2,2] parameter(1)
ROOT result = (s8[10,44,20,30], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=bf01_io01->bf01,
custom_call_target="__cudnn$convForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));
EXPECT_TRUE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
const HloInstruction* conv = nullptr;
ASSERT_THAT(
root,
GmockMatch(m::Tuple(
m::Reshape(m::GetTupleElement(
m::CustomCall(&conv, {kCudnnConvForwardCallTarget},
m::Reshape(m::Parameter(0))
.WithShape(S8, {10, 12, 4, 20, 30}),
m::Reshape(m::Parameter(1))
.WithShape(S8, {12, 4, 44, 2, 2}))
.WithConvDnums("bf?01_i?o01->bf?01"))
.WithShape(S8, {10, 11, 4, 20, 30})),
m::Op())));
}
TEST_F(CudnnVectorizeConvolutionsTest, IncrementAllDnums) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[16,16,16,16] parameter(0)
filter = s8[16,16,3,3] parameter(1)
ROOT result = (s8[16,16,16,16], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=fb01_i01o->fb01,
custom_call_target="__cudnn$convForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));
EXPECT_TRUE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
const HloInstruction* conv = nullptr;
ASSERT_THAT(
root,
GmockMatch(m::Tuple(
m::Reshape(m::GetTupleElement(
m::CustomCall(&conv, {kCudnnConvForwardCallTarget},
m::Reshape(m::Parameter(0))
.WithShape(S8, {4, 4, 16, 16, 16}),
m::Reshape(m::Parameter(1))
.WithShape(S8, {4, 4, 16, 3, 3}))
.WithConvDnums("f?b01_i?01o->f?b01"))
.WithShape(S8, {4, 4, 16, 16, 16})),
m::Op())));
}
TEST_F(CudnnVectorizeConvolutionsTest, FilterDnums) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[1,20,9,9] parameter(0)
filter = s8[3,3,20,32] parameter(1)
ROOT result = (s8[1,32,9,9], u8[0]) custom-call(s8[1,20,9,9] input, s8[3,3,20,32] filter),
window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01,
custom_call_target="__cudnn$convForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));
EXPECT_TRUE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
const HloInstruction* conv = nullptr;
ASSERT_THAT(
root,
GmockMatch(m::Tuple(
m::Reshape(m::GetTupleElement(
m::CustomCall(&conv, {kCudnnConvForwardCallTarget},
m::Reshape(m::Parameter(0))
.WithShape(S8, {1, 5, 4, 9, 9}),
m::Reshape(m::Parameter(1))
.WithShape(S8, {3, 3, 5, 4, 32}))
.WithConvDnums("bf?01_01i?o->bf?01"))
.WithShape(S8, {1, 8, 4, 9, 9})),
m::Op())));
}
TEST_F(CudnnVectorizeConvolutionsTest, NoVectorizeTo4) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,20,30,41] parameter(0)
filter = s8[2,2,41,44] parameter(1)
ROOT result = (s8[10,20,30,44], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
CudnnVectorizeConvolutions pass(
{7, 5},
se::dnn::VersionInfo{8, 3, 0});
TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_FALSE(changed);
}
TEST_F(CudnnVectorizeConvolutionsTest, NoVectorizeTo4IfOutputIsS32) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,20,30,41] parameter(0)
filter = s8[2,2,41,44] parameter(1)
ROOT result = (s32[10,20,30,44], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_FALSE(changed);
}
TEST_F(CudnnVectorizeConvolutionsTest, NoVectorizeTo4IfOutputIsF32) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,20,30,41] parameter(0)
filter = s8[2,2,41,44] parameter(1)
ROOT result = (f32[10,20,30,44], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_FALSE(changed);
}
TEST_F(CudnnVectorizeConvolutionsTest, VectorizeTo32) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,20,30,64] parameter(0)
filter = s8[2,2,64,128] parameter(1)
ROOT result = (s8[10,20,30,128], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));
EXPECT_TRUE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
const HloInstruction* conv = nullptr;
ASSERT_THAT(
root,
GmockMatch(m::Tuple(
m::Reshape(
m::GetTupleElement(
m::CustomCall(
&conv, {kCudnnConvForwardCallTarget},
m::Reshape(m::Parameter(0))
.WithShape(S8, {10, 20, 30, 2, 32}),
m::Reshape(
m::Transpose(
m::Reshape(m::Parameter(1))
.WithShape(S8, {2, 2, 2, 8, 4, 16, 4, 2}))
.WithShape(S8, {2, 2, 2, 16, 2, 8, 4, 4})
.WithPredicate([](const HloInstruction* instr) {
return absl::c_equal(
instr->dimensions(),
std::vector<int64_t>{2, 0, 1, 5, 7, 3, 6,
4});
}))
.WithShape(S8, {128, 2, 2, 2, 32})))
.WithShape(S8, {10, 20, 30, 4, 32})),
m::Op())));
EXPECT_TRUE(conv->backend_config<GpuBackendConfig>()
->cudnn_conv_backend_config()
.reordered_int8_nchw_vect());
}
TEST_F(CudnnVectorizeConvolutionsTest, BiasAndSideInput) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,20,30,64] parameter(0)
filter = s8[2,2,64,128] parameter(1)
bias = f32[128] parameter(2)
side_input = s8[10,20,30,64] parameter(3)
ROOT result = (s8[10,20,30,128], u8[0]) custom-call(input, filter, bias, side_input),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));
EXPECT_TRUE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
const HloInstruction* conv = nullptr;
ASSERT_THAT(
root,
GmockMatch(m::Tuple(
m::Reshape(
m::GetTupleElement(
m::CustomCall(
&conv, {kCudnnConvForwardCallTarget},
m::Reshape(m::Parameter(0))
.WithShape(S8, {10, 20, 30, 2, 32}),
m::Reshape(m::Transpose(m::Reshape(m::Parameter(1))))
.WithShape(S8, {128, 2, 2, 2, 32}),
m::Reshape(
m::Transpose(m::Reshape(m::Parameter(2))
.WithShape(F32, {4, 4, 2, 4}))
.WithShape(F32, {4, 2, 4, 4})
.WithPredicate([](const HloInstruction* instr) {
return absl::c_equal(
instr->dimensions(),
std::vector<int64_t>{0, 2, 1, 3});
}))
.WithShape(F32, {128}),
m::Reshape(m::Parameter(3))
.WithShape(S8, {10, 20, 30, 2, 32})))
.WithShape(S8, {10, 20, 30, 4, 32})),
m::Op())));
EXPECT_TRUE(conv->backend_config<GpuBackendConfig>()
->cudnn_conv_backend_config()
.reordered_int8_nchw_vect());
}
TEST_F(CudnnVectorizeConvolutionsTest, InputNHWC_OutputNCHW) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,20,30,64] parameter(0)
filter = s8[2,2,64,128] parameter(1)
bias = f32[128] parameter(2)
side_input = s8[10,128,20,30] parameter(3)
ROOT result = (s8[10,128,20,30], u8[0]) custom-call(input, filter, bias, side_input),
window={size=2x2}, dim_labels=b01f_01io->bf01,
custom_call_target="__cudnn$convForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));
EXPECT_TRUE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
const HloInstruction* conv = nullptr;
ASSERT_THAT(
root,
GmockMatch(m::Tuple(
m::Reshape(
m::GetTupleElement(
m::CustomCall(
&conv, {kCudnnConvForwardCallTarget},
m::Reshape(m::Parameter(0))
.WithShape(S8, {10, 20, 30, 2, 32}),
m::Reshape(m::Transpose(m::Reshape(m::Parameter(1))))
.WithShape(S8, {128, 2, 2, 2, 32}),
m::Reshape(
m::Transpose(m::Reshape(m::Parameter(2))
.WithShape(F32, {4, 4, 2, 4}))
.WithShape(F32, {4, 2, 4, 4})
.WithPredicate([](const HloInstruction* instr) {
return absl::c_equal(
instr->dimensions(),
std::vector<int64_t>{0, 2, 1, 3});
}))
.WithShape(F32, {128}),
m::Reshape(m::Parameter(3))
.WithShape(S8, {10, 4, 32, 20, 30})))
.WithShape(S8, {10, 4, 32, 20, 30})),
m::Op())));
EXPECT_TRUE(conv->backend_config<GpuBackendConfig>()
->cudnn_conv_backend_config()
.reordered_int8_nchw_vect());
}
TEST_F(CudnnVectorizeConvolutionsTest, NoVectorizeTo32) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,20,30,64] parameter(0)
filter = s8[2,2,64,128] parameter(1)
ROOT result = (s8[10,20,30,128], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 0}, module.get()));
EXPECT_TRUE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
const HloInstruction* conv = nullptr;
ASSERT_THAT(
root,
GmockMatch(m::Tuple(
m::Reshape(m::GetTupleElement(
m::CustomCall(&conv, {kCudnnConvForwardCallTarget},
m::Reshape(m::Parameter(0))
.WithShape(S8, {10, 20, 30, 16, 4}),
m::Reshape(m::Parameter(1))
.WithShape(S8, {2, 2, 16, 4, 128})))
.WithShape(S8, {10, 20, 30, 32, 4})),
m::Op())));
EXPECT_FALSE(conv->backend_config<GpuBackendConfig>()
->cudnn_conv_backend_config()
.reordered_int8_nchw_vect());
}
TEST_F(CudnnVectorizeConvolutionsTest, Vectorize4To32) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,20,30,16,4] parameter(0)
filter = s8[3,5,16,192,4] parameter(1)
bias = f32[64] parameter(2)
side_input = s8[10,20,30,16,4] parameter(3)
ROOT result = (s8[10,20,30,48,4], u8[0]) custom-call(input, filter, bias, side_input),
window={size=3x5}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));
EXPECT_TRUE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
const HloInstruction* conv = nullptr;
auto conv_pat =
m::GetTupleElement(
m::CustomCall(
&conv, {kCudnnConvForwardCallTarget},
m::Reshape(m::Transpose(m::Reshape(m::Parameter(0))
.WithShape(S8, {10, 20, 30, 2, 8, 4}))
.WithShape(S8, {10, 20, 30, 2, 8, 4}))
.WithShape(S8, {10, 20, 30, 2, 32}),
m::Reshape(
m::Transpose(m::Reshape(m::Parameter(1))
.WithShape(S8, {3, 5, 2, 8, 24, 4, 2, 4}))
.WithShape(S8, {2, 3, 5, 24, 2, 8, 4, 4})
.WithPredicate([](const HloInstruction* instr) {
return absl::c_equal(
instr->dimensions(),
std::vector<int64_t>{2, 0, 1, 4, 6, 3, 5, 7});
}))
.WithShape(S8, {192, 2, 3, 5, 32}),
m::Reshape(m::Transpose(m::Reshape(m::Parameter(2)))),
m::Reshape(m::Transpose(m::Reshape(m::Parameter(3))
.WithShape(S8, {10, 20, 30, 2, 8, 4}))
.WithShape(S8, {10, 20, 30, 2, 8, 4}))
.WithShape(S8, {10, 20, 30, 2, 32}))
.WithConvDnums("b01f?_oi01?->b01f?"))
.WithShape(S8, {10, 20, 30, 6, 32});
ASSERT_THAT(root, GmockMatch(m::Tuple(
m::Reshape(m::Transpose(m::Reshape(conv_pat).WithShape(
S8, {10, 20, 30, 6, 8, 4}))
.WithShape(S8, {10, 20, 30, 6, 8, 4}))
.WithShape(S8, {10, 20, 30, 48, 4}),
m::Op())));
EXPECT_TRUE(conv->backend_config<GpuBackendConfig>()
->cudnn_conv_backend_config()
.reordered_int8_nchw_vect());
}
TEST_F(CudnnVectorizeConvolutionsTest, Vectorize4To32NCHW) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,16,20,30,4] parameter(0)
filter = s8[16,128,2,2,4] parameter(1)
bias = f32[64] parameter(2)
side_input = s8[10,16,20,30,4] parameter(3)
ROOT result = (s8[10,32,20,30,4], u8[0]) custom-call(input, filter, bias, side_input),
window={size=2x2}, dim_labels=bf01_io01->bf01,
custom_call_target="__cudnn$convForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));
EXPECT_TRUE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
const HloInstruction* conv = nullptr;
auto conv_pat =
m::GetTupleElement(
m::CustomCall(
&conv, {kCudnnConvForwardCallTarget},
m::Reshape(m::Transpose(m::Reshape(m::Parameter(0))
.WithShape(S8, {10, 2, 8, 20, 30, 4}))
.WithShape(S8, {10, 2, 20, 30, 8, 4}))
.WithShape(S8, {10, 2, 20, 30, 32}),
m::Reshape(
m::Transpose(m::Reshape(m::Parameter(1))
.WithShape(S8, {2, 8, 16, 4, 2, 2, 2, 4}))
.WithShape(S8, {2, 2, 2, 16, 2, 8, 4, 4})
.WithPredicate([](const HloInstruction* instr) {
return absl::c_equal(
instr->dimensions(),
std::vector<int64_t>{0, 5, 6, 2, 4, 1, 3, 7});
}))
.WithShape(S8, {128, 2, 2, 2, 32}),
m::Reshape(m::Transpose(m::Reshape(m::Parameter(2)))),
m::Reshape(m::Transpose(m::Reshape(m::Parameter(3))
.WithShape(S8, {10, 2, 8, 20, 30, 4}))
.WithShape(S8, {10, 2, 20, 30, 8, 4}))
.WithShape(S8, {10, 2, 20, 30, 32}))
.WithConvDnums("bf01_oi01->bf01"))
.WithShape(S8, {10, 4, 20, 30, 32});
ASSERT_THAT(root, GmockMatch(m::Tuple(
m::Reshape(m::Transpose(m::Reshape(conv_pat).WithShape(
S8, {10, 4, 20, 30, 8, 4}))
.WithShape(S8, {10, 4, 8, 20, 30, 4}))
.WithShape(S8, {10, 32, 20, 30, 4}),
m::Op())));
EXPECT_TRUE(conv->backend_config<GpuBackendConfig>()
->cudnn_conv_backend_config()
.reordered_int8_nchw_vect());
}
TEST_F(CudnnVectorizeConvolutionsTest, Vectorize4To32VectorDimFirst) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[4,10,20,30,16] parameter(0)
filter = s8[4,3,5,16,192] parameter(1)
bias = f32[64] parameter(2)
side_input = s8[4,10,20,30,16] parameter(3)
ROOT result = (s8[4,10,20,30,48], u8[0]) custom-call(input, filter, bias, side_input),
window={size=3x5}, dim_labels=?b01f_?01io->?b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));
EXPECT_TRUE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
const HloInstruction* conv = nullptr;
auto conv_pat =
m::GetTupleElement(
m::CustomCall(
&conv, {kCudnnConvForwardCallTarget},
m::Reshape(m::Transpose(m::Reshape(m::Parameter(0))
.WithShape(S8, {4, 10, 20, 30, 2, 8}))
.WithShape(S8, {8, 4, 10, 20, 30, 2}))
.WithShape(S8, {32, 10, 20, 30, 2}),
m::Reshape(
m::Transpose(m::Reshape(m::Parameter(1))
.WithShape(S8, {4, 3, 5, 2, 8, 24, 4, 2}))
.WithShape(S8, {2, 3, 5, 24, 2, 8, 4, 4})
.WithPredicate([](const HloInstruction* instr) {
return absl::c_equal(
instr->dimensions(),
std::vector<int64_t>{3, 1, 2, 5, 7, 4, 6, 0});
}))
.WithShape(S8, {192, 2, 3, 5, 32}),
m::Reshape(m::Transpose(m::Reshape(m::Parameter(2)))),
m::Reshape(m::Transpose(m::Reshape(m::Parameter(3))
.WithShape(S8, {4, 10, 20, 30, 2, 8}))
.WithShape(S8, {8, 4, 10, 20, 30, 2}))
.WithShape(S8, {32, 10, 20, 30, 2}))
.WithConvDnums("?b01f_oi01->?b01f"))
.WithShape(S8, {32, 10, 20, 30, 6});
ASSERT_THAT(root, GmockMatch(m::Tuple(
m::Reshape(m::Transpose(m::Reshape(conv_pat).WithShape(
S8, {8, 4, 10, 20, 30, 6}))
.WithShape(S8, {4, 10, 20, 30, 6, 8}))
.WithShape(S8, {4, 10, 20, 30, 48}),
m::Op())));
EXPECT_TRUE(conv->backend_config<GpuBackendConfig>()
->cudnn_conv_backend_config()
.reordered_int8_nchw_vect());
}
TEST_F(CudnnVectorizeConvolutionsTest, NoVectorize4To32) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,20,30,16,4] parameter(0)
filter = s8[2,2,16,128,4] parameter(1)
bias = f32[10] parameter(2)
side_input = s8[10,20,30,16,4] parameter(3)
ROOT result = (s8[10,20,30,32,4], u8[0]) custom-call(input, filter, bias, side_input),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 0}, module.get()));
EXPECT_FALSE(changed);
}
TEST_F(CudnnVectorizeConvolutionsTest, Vectorize16To32) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,20,30,4,16] parameter(0)
filter = s8[3,5,4,192,16] parameter(1)
ROOT result = (s8[10,20,30,12,16], u8[0]) custom-call(input, filter),
window={size=3x5}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));
EXPECT_TRUE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
const HloInstruction* conv = nullptr;
auto filter_pat =
m::Reshape(
m::Transpose(
m::Reshape(m::Parameter(1)).WithShape(S8, {3, 5, 2, 2, 192, 16}))
.WithShape(S8, {3, 5, 2, 192, 2, 16}))
.WithShape(S8, {3, 5, 2, 192, 32});
auto conv_pat =
m::GetTupleElement(
m::CustomCall(
&conv, {kCudnnConvForwardCallTarget},
m::Reshape(
m::Transpose(m::Reshape(m::Parameter(0))
.WithShape(S8, {10, 20, 30, 2, 2, 16}))
.WithShape(S8, {10, 20, 30, 2, 2, 16}))
.WithShape(S8, {10, 20, 30, 2, 32}),
m::Reshape(
m::Transpose(m::Reshape(filter_pat)
.WithShape(S8, {3, 5, 2, 24, 4, 2, 8, 4}))
.WithShape(S8, {2, 3, 5, 24, 2, 8, 4, 4}))
.WithShape(S8, {192, 2, 3, 5, 32}))
.WithConvDnums("b01f_oi01->b01f"))
.WithShape(S8, {10, 20, 30, 6, 32});
ASSERT_THAT(root, GmockMatch(m::Tuple(
m::Reshape(m::Transpose(m::Reshape(conv_pat).WithShape(
S8, {10, 20, 30, 6, 2, 16}))
.WithShape(S8, {10, 20, 30, 6, 2, 16}))
.WithShape(S8, {10, 20, 30, 12, 16}),
m::Op())));
EXPECT_TRUE(conv->backend_config<GpuBackendConfig>()
->cudnn_conv_backend_config()
.reordered_int8_nchw_vect());
}
TEST_F(CudnnVectorizeConvolutionsTest, VectorizeMixedTo32) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[10,20,30,8,8] parameter(0)
filter = s8[3,5,2,192,32] parameter(1)
ROOT result = (s8[10,20,30,96,2], u8[0]) custom-call(input, filter),
window={size=3x5}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get()));
EXPECT_TRUE(changed);
SCOPED_TRACE(module->ToString());
auto* root = module->entry_computation()->root_instruction();
const HloInstruction* conv = nullptr;
auto conv_pat =
m::GetTupleElement(
m::CustomCall(
&conv, {kCudnnConvForwardCallTarget},
m::Reshape(m::Transpose(m::Reshape(m::Parameter(0))
.WithShape(S8, {10, 20, 30, 2, 4, 8}))
.WithShape(S8, {10, 20, 30, 2, 4, 8}))
.WithShape(S8, {10, 20, 30, 2, 32}),
m::Reshape(
m::Transpose(m::Reshape(m::Parameter(1))
.WithShape(S8, {3, 5, 2, 24, 4, 2, 8, 4}))
.WithShape(S8, {2, 3, 5, 24, 2, 8, 4, 4}))
.WithShape(S8, {192, 2, 3, 5, 32}))
.WithConvDnums("b01f_oi01->b01f"))
.WithShape(S8, {10, 20, 30, 6, 32});
ASSERT_THAT(root, GmockMatch(m::Tuple(
m::Reshape(m::Transpose(m::Reshape(conv_pat).WithShape(
S8, {10, 20, 30, 6, 16, 2}))
.WithShape(S8, {10, 20, 30, 6, 16, 2}))
.WithShape(S8, {10, 20, 30, 96, 2}),
m::Op())));
EXPECT_TRUE(conv->backend_config<GpuBackendConfig>()
->cudnn_conv_backend_config()
.reordered_int8_nchw_vect());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/cudnn_vectorize_convolutions.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/cudnn_vectorize_convolutions_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
376cd3aa-a22b-41e5-b02a-66981383442d | cpp | tensorflow/tensorflow | quantize_nodes | tensorflow/tools/graph_transforms/quantize_nodes.cc | tensorflow/tools/graph_transforms/quantize_nodes_test.cc | #define EIGEN_USE_THREADS
#include "tensorflow/core/common_runtime/constant_folding.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/threadpool_device.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/graph/subgraph.h"
#include "tensorflow/core/kernels/quantization_utils.h"
#include "tensorflow/core/platform/init_main.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
namespace tensorflow {
namespace graph_transforms {
struct QuantizedOpInfo {
string float_name;
std::vector<string> attrs_to_copy;
std::vector<std::pair<string, DataType>> dtypes_to_set;
DataType input_bit_depth;
DataType output_bit_depth;
std::set<int32> unquantized_inputs;
enum { CONTIGUOUS_MIN_MAX, SEPARATE_MIN_MAX } min_max_order;
};
const std::vector<QuantizedOpInfo>& GetQuantizedOpList() {
static const std::vector<QuantizedOpInfo> op_list = {
{"Add",
{},
{{"T1", DT_QUINT8}, {"T2", DT_QUINT8}, {"Toutput", DT_QINT32}},
DT_QUINT8,
DT_QINT32,
{},
QuantizedOpInfo::CONTIGUOUS_MIN_MAX},
{"AvgPool",
{"ksize", "strides", "padding"},
{{"T", DT_QUINT8}},
DT_QUINT8,
DT_QUINT8,
{},
QuantizedOpInfo::CONTIGUOUS_MIN_MAX},
{"BiasAdd",
{},
{{"T1", DT_QUINT8}, {"T2", DT_QUINT8}, {"out_type", DT_QINT32}},
DT_QUINT8,
DT_QINT32,
{},
QuantizedOpInfo::CONTIGUOUS_MIN_MAX},
{"Concat",
{"N"},
{{"T", DT_QUINT8}},
DT_QUINT8,
DT_QUINT8,
{0},
QuantizedOpInfo::SEPARATE_MIN_MAX},
{"Conv2D",
{"strides", "padding"},
{{"Tinput", DT_QUINT8}, {"Tfilter", DT_QUINT8}, {"out_type", DT_QINT32}},
DT_QUINT8,
DT_QINT32,
{},
QuantizedOpInfo::CONTIGUOUS_MIN_MAX},
{"MatMul",
{"transpose_a", "transpose_b"},
{{"T1", DT_QUINT8}, {"T2", DT_QUINT8}, {"Toutput", DT_QINT32}},
DT_QUINT8,
DT_QINT32,
{},
QuantizedOpInfo::CONTIGUOUS_MIN_MAX},
{"MaxPool",
{"ksize", "strides", "padding"},
{{"T", DT_QUINT8}},
DT_QUINT8,
DT_QUINT8,
{},
QuantizedOpInfo::CONTIGUOUS_MIN_MAX},
{"Mul",
{},
{{"T1", DT_QUINT8}, {"T2", DT_QUINT8}, {"Toutput", DT_QINT32}},
DT_QUINT8,
DT_QINT32,
{},
QuantizedOpInfo::CONTIGUOUS_MIN_MAX},
{"Relu",
{},
{{"Tinput", DT_QUINT8}},
DT_QUINT8,
DT_QUINT8,
{},
QuantizedOpInfo::CONTIGUOUS_MIN_MAX},
{"ResizeBilinear",
{"align_corners"},
{{"T", DT_QUINT8}},
DT_QUINT8,
DT_QUINT8,
{1},
QuantizedOpInfo::CONTIGUOUS_MIN_MAX},
{"Relu6",
{},
{{"Tinput", DT_QUINT8}},
DT_QUINT8,
DT_QUINT8,
{},
QuantizedOpInfo::CONTIGUOUS_MIN_MAX},
{"Reshape",
{},
{{"T", DT_QUINT8}},
DT_QUINT8,
DT_QUINT8,
{1},
QuantizedOpInfo::CONTIGUOUS_MIN_MAX},
};
return op_list;
}
namespace {
string UniqueNodeNameFromInput(const string& input_name) {
string prefix;
string node_name;
string suffix;
NodeNamePartsFromInput(input_name, &prefix, &node_name, &suffix);
string result;
if (prefix == "^") {
result += "__hat__";
}
result += node_name;
if (!suffix.empty()) {
result += "__port__" + suffix.substr(1, suffix.size() - 1);
}
return result;
}
Status ExtractRangeFromParams(const TransformFuncContext& context,
const string& min_name, const string& max_name,
float* min_value, float* max_value,
bool* has_range) {
const bool has_min = (context.params.count(min_name) != 0);
const bool has_max = (context.params.count(max_name) != 0);
*has_range = (has_min || has_max);
if (!*has_range) {
return OkStatus();
}
if (!has_min || !has_max) {
return errors::InvalidArgument("You must pass both ", min_name, " and ",
max_name, " into quantize_nodes");
}
TF_RETURN_IF_ERROR(context.GetOneFloatParameter(min_name, 0.0f, min_value));
TF_RETURN_IF_ERROR(context.GetOneFloatParameter(max_name, 0.0f, max_value));
return OkStatus();
}
}
Status MergeDuplicateNodes(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def) {
std::set<string> input_names(context.input_names.begin(),
context.input_names.end());
std::set<string> output_names(context.output_names.begin(),
context.output_names.end());
GraphDef current_graph_def = input_graph_def;
bool any_duplicates_found;
do {
any_duplicates_found = false;
std::map<uint64, std::vector<const NodeDef*>> hashed_nodes;
for (const NodeDef& node : current_graph_def.node()) {
NodeDef nameless_node = node;
if (!input_names.count(node.name()) && !output_names.count(node.name())) {
nameless_node.set_name("");
}
const uint64 hash = HashNodeDef(nameless_node);
hashed_nodes[hash].push_back(&node);
}
std::map<string, string> inputs_to_rename;
GraphDef merged_graph_def;
for (const std::pair<const uint64, std::vector<const NodeDef*>>&
hashed_node_info : hashed_nodes) {
const std::vector<const NodeDef*>& hash_node_list =
hashed_node_info.second;
for (int i = 0; i < hash_node_list.size(); ++i) {
const NodeDef* current_node = hash_node_list[i];
const OpDef* op_def = nullptr;
TF_RETURN_IF_ERROR(
OpRegistry::Global()->LookUpOpDef(current_node->op(), &op_def));
const bool is_duplicate = ((!op_def->is_stateful()) && (i > 0));
if (is_duplicate) {
const string original_name = hash_node_list[0]->name();
inputs_to_rename[current_node->name() + ":*"] = original_name;
any_duplicates_found = true;
} else {
NodeDef* new_node = merged_graph_def.mutable_node()->Add();
*new_node = *current_node;
}
}
}
TF_RETURN_IF_ERROR(RenameNodeInputs(merged_graph_def, inputs_to_rename,
std::unordered_set<string>(),
¤t_graph_def));
} while (any_duplicates_found);
*output_graph_def = current_graph_def;
return OkStatus();
}
Status RemoveRedundantQuantizations(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def) {
std::set<string> graph_outputs;
for (const string& output_name : context.output_names) {
graph_outputs.insert(NodeNameFromInput(output_name));
}
std::map<string, string> inputs_to_rename;
GraphDef replaced_graph_def;
TF_RETURN_IF_ERROR(ReplaceMatchingOpTypes(
input_graph_def,
{"QuantizeV2",
{
{"Dequantize"},
{"Min"},
{"Max"},
}
},
[&inputs_to_rename, &graph_outputs](const NodeMatch& match,
const std::set<string>& input_nodes,
const std::set<string>& output_nodes,
std::vector<NodeDef>* new_nodes) {
const NodeDef& quantize_node = match.node;
const NodeDef& dequantize_node = match.inputs[0].node;
inputs_to_rename[quantize_node.name() + ":0"] =
dequantize_node.input(0);
inputs_to_rename[quantize_node.name() + ":1"] =
dequantize_node.input(1);
inputs_to_rename[quantize_node.name() + ":2"] =
dequantize_node.input(2);
if (output_nodes.count(dequantize_node.name()) ||
graph_outputs.count(dequantize_node.name())) {
CopyOriginalMatch(match, new_nodes);
}
return OkStatus();
},
{true}, &replaced_graph_def));
return RenameNodeInputs(replaced_graph_def, inputs_to_rename,
std::unordered_set<string>(), output_graph_def);
}
Status QuantizePlaceholders(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def) {
float input_min;
float input_max;
bool has_input_range;
TF_RETURN_IF_ERROR(ExtractRangeFromParams(context, "input_min", "input_max",
&input_min, &input_max,
&has_input_range));
if (!has_input_range) {
*output_graph_def = input_graph_def;
return OkStatus();
}
std::map<string, string> inputs_to_rename_first_pass;
std::map<string, string> inputs_to_rename_second_pass;
GraphDef placeholder_graph_def;
placeholder_graph_def.Clear();
for (const NodeDef& node : input_graph_def.node()) {
if (node.op() != "Placeholder") {
*(placeholder_graph_def.mutable_node()->Add()) = node;
} else {
string namespace_prefix = node.name() + "_eightbit";
NodeDef quantized_placeholder;
quantized_placeholder = node;
SetNodeAttr("dtype", DT_QUINT8, &quantized_placeholder);
*(placeholder_graph_def.mutable_node()->Add()) = quantized_placeholder;
NodeDef min_node;
min_node.set_op("Const");
min_node.set_name(namespace_prefix + "/min");
SetNodeAttr("dtype", DT_FLOAT, &min_node);
Tensor min_tensor(DT_FLOAT, {});
min_tensor.flat<float>()(0) = input_min;
SetNodeTensorAttr<float>("value", min_tensor, &min_node);
*(placeholder_graph_def.mutable_node()->Add()) = min_node;
NodeDef max_node;
max_node.set_op("Const");
max_node.set_name(namespace_prefix + "/max");
SetNodeAttr("dtype", DT_FLOAT, &max_node);
Tensor max_tensor(DT_FLOAT, {});
max_tensor.flat<float>()(0) = input_max;
SetNodeTensorAttr<float>("value", max_tensor, &max_node);
*(placeholder_graph_def.mutable_node()->Add()) = max_node;
const string rename_suffix = "__RENAMED_PLACEHOLDER__";
NodeDef dequantize_node;
dequantize_node.set_op("Dequantize");
dequantize_node.set_name(namespace_prefix + "/dequantize");
SetNodeAttr("T", DT_QUINT8, &dequantize_node);
SetNodeAttr("mode", "MIN_FIRST", &dequantize_node);
AddNodeInput(node.name() + rename_suffix, &dequantize_node);
AddNodeInput(min_node.name(), &dequantize_node);
AddNodeInput(max_node.name(), &dequantize_node);
*(placeholder_graph_def.mutable_node()->Add()) = dequantize_node;
inputs_to_rename_first_pass[node.name()] = dequantize_node.name();
inputs_to_rename_second_pass[node.name() + rename_suffix] = node.name();
}
}
GraphDef first_pass_graph_def;
TF_RETURN_IF_ERROR(
RenameNodeInputs(placeholder_graph_def, inputs_to_rename_first_pass,
std::unordered_set<string>(), &first_pass_graph_def));
TF_RETURN_IF_ERROR(
RenameNodeInputs(first_pass_graph_def, inputs_to_rename_second_pass,
std::unordered_set<string>(), output_graph_def));
return OkStatus();
}
Status ConvertFakeQuantsToRequantize(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def) {
TF_RETURN_IF_ERROR(ReplaceMatchingOpTypes(
input_graph_def,
{"FakeQuantWithMinMaxVars",
{
{"*"},
{"Const"},
{"Const"},
}
},
[](const NodeMatch& match, const std::set<string>& input_nodes,
const std::set<string>& output_nodes,
std::vector<NodeDef>* new_nodes) {
const NodeDef& fake_quant_node = match.node;
const NodeDef& original_op_node = match.inputs[0].node;
const NodeDef& fake_quant_min_node = match.inputs[1].node;
const NodeDef& fake_quant_max_node = match.inputs[2].node;
string namespace_prefix = fake_quant_node.name() + "_eightbit";
new_nodes->push_back(original_op_node);
new_nodes->push_back(fake_quant_min_node);
new_nodes->push_back(fake_quant_max_node);
NodeDef quantize_node;
quantize_node.set_op("QuantizeV2");
quantize_node.set_name(namespace_prefix + "/quantize");
SetNodeAttr("T", DT_QINT32, &quantize_node);
SetNodeAttr("mode", "MIN_FIRST", &quantize_node);
AddNodeInput(fake_quant_node.input(0), &quantize_node);
AddNodeInput(fake_quant_min_node.name(), &quantize_node);
AddNodeInput(fake_quant_max_node.name(), &quantize_node);
new_nodes->push_back(quantize_node);
NodeDef requantize_node;
requantize_node.set_op("Requantize");
requantize_node.set_name(namespace_prefix + "/requantize");
SetNodeAttr("Tinput", DT_QINT32, &requantize_node);
SetNodeAttr("out_type", DT_QUINT8, &requantize_node);
AddNodeInput(quantize_node.name() + ":0", &requantize_node);
AddNodeInput(quantize_node.name() + ":1", &requantize_node);
AddNodeInput(quantize_node.name() + ":2", &requantize_node);
AddNodeInput(fake_quant_min_node.name(), &requantize_node);
AddNodeInput(fake_quant_max_node.name(), &requantize_node);
new_nodes->push_back(requantize_node);
NodeDef dequantize_node;
dequantize_node.set_op("Dequantize");
dequantize_node.set_name(fake_quant_node.name());
SetNodeAttr("T", DT_QUINT8, &dequantize_node);
SetNodeAttr("mode", "MIN_FIRST", &dequantize_node);
AddNodeInput(requantize_node.name() + ":0", &dequantize_node);
AddNodeInput(requantize_node.name() + ":1", &dequantize_node);
AddNodeInput(requantize_node.name() + ":2", &dequantize_node);
new_nodes->push_back(dequantize_node);
return OkStatus();
},
{}, output_graph_def));
return OkStatus();
}
Status MergeAdjacentRequantizes(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def) {
TF_RETURN_IF_ERROR(ReplaceMatchingOpTypes(
input_graph_def,
{"Requantize",
{
{"QuantizeV2",
{
{"Dequantize",
{
{"Requantize",
{
{"*"},
{"*"},
{"*"},
{"RequantizationRange"},
{"RequantizationRange"},
}
},
{"Requantize"},
{"Requantize"},
}
},
{"Const"},
{"Const"},
},
},
{"QuantizeV2"},
{"QuantizeV2"},
{"Const"},
{"Const"},
}
},
[](const NodeMatch& match, const std::set<string>& input_nodes,
const std::set<string>& output_nodes,
std::vector<NodeDef>* new_nodes) {
const NodeDef& fake_requantize_node = match.node;
const NodeDef& original_op_node =
match.inputs[0].inputs[0].inputs[0].inputs[0].node;
const NodeDef& fake_requantize_min_node = match.inputs[3].node;
const NodeDef& fake_requantize_max_node = match.inputs[4].node;
new_nodes->push_back(original_op_node);
new_nodes->push_back(fake_requantize_min_node);
new_nodes->push_back(fake_requantize_max_node);
NodeDef requantize_node;
requantize_node = fake_requantize_node;
requantize_node.mutable_input()->Clear();
AddNodeInput(original_op_node.name() + ":0", &requantize_node);
AddNodeInput(original_op_node.name() + ":1", &requantize_node);
AddNodeInput(original_op_node.name() + ":2", &requantize_node);
AddNodeInput(fake_requantize_min_node.name(), &requantize_node);
AddNodeInput(fake_requantize_max_node.name(), &requantize_node);
new_nodes->push_back(requantize_node);
return OkStatus();
},
{}, output_graph_def));
return OkStatus();
}
Status HoistFakeQuants(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def) {
GraphDef current_graph_def = input_graph_def;
const int max_depth = 3;
for (int depth = max_depth; depth > 0; --depth) {
OpTypePattern pattern = {"*"};
for (int i = 0; i < depth; ++i) {
pattern = {"*", {pattern}};
}
pattern = {"FakeQuantWithMinMaxVars", {pattern, {"Const"}, {"Const"}}};
GraphDef hoisted_graph_def;
TF_RETURN_IF_ERROR(ReplaceMatchingOpTypes(
current_graph_def, pattern,
[depth](const NodeMatch& match, const std::set<string>& input_nodes,
const std::set<string>& output_nodes,
std::vector<NodeDef>* new_nodes) {
const NodeDef& fake_quant_node = match.node;
const NodeDef& fake_quant_min_node = match.inputs[1].node;
const NodeDef& fake_quant_max_node = match.inputs[2].node;
std::vector<NodeDef> linear_nodes;
NodeMatch current_match = match;
for (int i = 0; i <= depth; ++i) {
linear_nodes.push_back(current_match.inputs[0].node);
current_match = current_match.inputs[0];
}
NodeDef new_fake_quant_node;
new_fake_quant_node = fake_quant_node;
new_fake_quant_node.set_name(fake_quant_node.name() + "_hoisted");
new_fake_quant_node.set_input(
0, linear_nodes[linear_nodes.size() - 2].input(0));
new_nodes->push_back(new_fake_quant_node);
new_nodes->push_back(fake_quant_min_node);
new_nodes->push_back(fake_quant_max_node);
linear_nodes[linear_nodes.size() - 2].set_input(
0, new_fake_quant_node.name());
linear_nodes.front().set_name(fake_quant_node.name());
for (const NodeDef& linear_node : linear_nodes) {
new_nodes->push_back(linear_node);
}
return OkStatus();
},
{}, &hoisted_graph_def));
current_graph_def = hoisted_graph_def;
}
*output_graph_def = current_graph_def;
return OkStatus();
}
Status QuantizeNodes(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def) {
std::set<string> ops_to_ignore;
if (context.params.count("ignore_op") > 0) {
for (const string& name : context.params.at("ignore_op")) {
ops_to_ignore.insert(name);
}
}
const std::vector<QuantizedOpInfo>& op_list = GetQuantizedOpList();
string op_pattern;
bool is_first = true;
std::map<string, QuantizedOpInfo> op_map;
for (const QuantizedOpInfo& op_info : op_list) {
if (ops_to_ignore.count(op_info.float_name) == 0) {
strings::StrAppend(&op_pattern, (is_first ? "" : "|"),
op_info.float_name);
op_map.insert({op_info.float_name, op_info});
is_first = false;
}
}
GraphDef placeholder_graph_def;
TF_RETURN_IF_ERROR(
QuantizePlaceholders(input_graph_def, context, &placeholder_graph_def));
TF_RETURN_IF_ERROR(IsGraphValid(placeholder_graph_def));
GraphDef hoisted_graph_def;
TF_RETURN_IF_ERROR(
HoistFakeQuants(placeholder_graph_def, context, &hoisted_graph_def));
TF_RETURN_IF_ERROR(IsGraphValid(hoisted_graph_def));
GraphDef converted_graph_def;
TF_RETURN_IF_ERROR(ConvertFakeQuantsToRequantize(hoisted_graph_def, context,
&converted_graph_def));
TF_RETURN_IF_ERROR(IsGraphValid(converted_graph_def));
float fallback_min;
float fallback_max;
bool has_fallback_range;
TF_RETURN_IF_ERROR(ExtractRangeFromParams(
context, "fallback_min", "fallback_max", &fallback_min, &fallback_max,
&has_fallback_range));
GraphDef quantized_graph_def;
TF_RETURN_IF_ERROR(ReplaceMatchingOpTypes(
converted_graph_def, {op_pattern},
[&op_map, fallback_min, fallback_max, has_fallback_range](
const NodeMatch& match, const std::set<string>& input_nodes,
const std::set<string>& output_nodes,
std::vector<NodeDef>* new_nodes) {
const NodeDef& float_node = match.node;
const QuantizedOpInfo& op_info = op_map[float_node.op()];
DataTypeVector input_types;
DataTypeVector output_types;
TF_RETURN_IF_ERROR(
GetInOutTypes(float_node, &input_types, &output_types));
bool are_all_float = true;
for (int i = 0; i < float_node.input_size(); ++i) {
if (op_info.unquantized_inputs.count(i)) {
continue;
}
if (i >= input_types.size()) {
LOG(ERROR) << "input_types has incorrect size "
<< input_types.size() << " <= " << i
<< ". Assuming everything else is floats.";
}
if (i < input_types.size() && input_types[i] != DT_FLOAT) {
are_all_float = false;
}
}
for (const DataType& output_type : output_types) {
if (output_type != DT_FLOAT) {
are_all_float = false;
}
}
if (!are_all_float) {
CopyOriginalMatch(match, new_nodes);
return OkStatus();
}
string namespace_prefix = float_node.name() + "_eightbit";
std::vector<string> quantized_input_names;
for (int i = 0; i < float_node.input_size(); ++i) {
if (op_info.unquantized_inputs.count(i)) {
continue;
}
const string& input_name = float_node.input(i);
string unique_input_name =
namespace_prefix + "/" + UniqueNodeNameFromInput(input_name);
NodeDef reshape_dims;
reshape_dims.set_op("Const");
reshape_dims.set_name(unique_input_name + "/reshape_dims");
AddNodeInput("^" + NodeNameFromInput(input_name), &reshape_dims);
SetNodeAttr("dtype", DT_INT32, &reshape_dims);
Tensor reshape_dims_tensor(DT_INT32, {1});
reshape_dims_tensor.flat<int32>()(0) = -1;
SetNodeTensorAttr<int32>("value", reshape_dims_tensor, &reshape_dims);
new_nodes->push_back(reshape_dims);
NodeDef reduction_dims;
reduction_dims.set_op("Const");
reduction_dims.set_name(unique_input_name + "/reduction_dims");
AddNodeInput("^" + NodeNameFromInput(input_name), &reduction_dims);
SetNodeAttr("dtype", DT_INT32, &reduction_dims);
Tensor reduction_dims_tensor(DT_INT32, {1});
reduction_dims_tensor.flat<int32>()(0) = 0;
SetNodeTensorAttr<int32>("value", reduction_dims_tensor,
&reduction_dims);
new_nodes->push_back(reduction_dims);
NodeDef reshape_node;
reshape_node.set_op("Reshape");
reshape_node.set_name(unique_input_name + "/reshape");
SetNodeAttr("T", DT_FLOAT, &reshape_node);
AddNodeInput(input_name, &reshape_node);
AddNodeInput(reshape_dims.name(), &reshape_node);
new_nodes->push_back(reshape_node);
NodeDef min_node;
min_node.set_op("Min");
min_node.set_name(unique_input_name + "/min");
SetNodeAttr("T", DT_FLOAT, &min_node);
SetNodeAttr("keep_dims", false, &min_node);
AddNodeInput(reshape_node.name(), &min_node);
AddNodeInput(reduction_dims.name(), &min_node);
new_nodes->push_back(min_node);
NodeDef max_node;
max_node.set_op("Max");
max_node.set_name(unique_input_name + "/max");
SetNodeAttr("T", DT_FLOAT, &max_node);
SetNodeAttr("keep_dims", false, &max_node);
AddNodeInput(reshape_node.name(), &max_node);
AddNodeInput(reduction_dims.name(), &max_node);
new_nodes->push_back(max_node);
NodeDef quantize_node;
quantize_node.set_op("QuantizeV2");
quantize_node.set_name(unique_input_name + "/quantize");
SetNodeAttr("T", DT_QUINT8, &quantize_node);
SetNodeAttr("mode", "MIN_FIRST", &quantize_node);
AddNodeInput(input_name, &quantize_node);
AddNodeInput(min_node.name(), &quantize_node);
AddNodeInput(max_node.name(), &quantize_node);
new_nodes->push_back(quantize_node);
quantized_input_names.push_back(quantize_node.name());
}
NodeDef quantized_main_node;
quantized_main_node.set_op("Quantized" + float_node.op());
quantized_main_node.set_name(float_node.name() + "/eightbit");
for (const string& attr_to_copy : op_info.attrs_to_copy) {
CopyNodeAttr(float_node, attr_to_copy, attr_to_copy,
&quantized_main_node);
}
for (const std::pair<string, DataType>& dtype_to_set :
op_info.dtypes_to_set) {
SetNodeAttr(dtype_to_set.first, dtype_to_set.second,
&quantized_main_node);
}
int quantized_input_index = 0;
for (int i = 0; i < float_node.input_size(); ++i) {
if (op_info.unquantized_inputs.count(i)) {
AddNodeInput(float_node.input(i), &quantized_main_node);
} else {
const string& quantized_input_name =
quantized_input_names[quantized_input_index];
AddNodeInput(quantized_input_name + ":0", &quantized_main_node);
++quantized_input_index;
}
}
if (op_info.min_max_order == QuantizedOpInfo::CONTIGUOUS_MIN_MAX) {
for (const string& quantized_input_name : quantized_input_names) {
AddNodeInput(quantized_input_name + ":1", &quantized_main_node);
AddNodeInput(quantized_input_name + ":2", &quantized_main_node);
}
} else {
for (const string& quantized_input_name : quantized_input_names) {
AddNodeInput(quantized_input_name + ":1", &quantized_main_node);
}
for (const string& quantized_input_name : quantized_input_names) {
AddNodeInput(quantized_input_name + ":2", &quantized_main_node);
}
}
new_nodes->push_back(quantized_main_node);
string eight_bit_node_name;
if (op_info.output_bit_depth == DT_QINT32) {
string requantize_min_input;
string requantize_max_input;
if (has_fallback_range) {
NodeDef fallback_min_node;
fallback_min_node.set_op("Const");
fallback_min_node.set_name(quantized_main_node.name() +
"/fallback_min");
SetNodeAttr("dtype", DT_FLOAT, &fallback_min_node);
Tensor fallback_min_tensor(DT_FLOAT, {});
fallback_min_tensor.flat<float>()(0) = fallback_min;
SetNodeTensorAttr<float>("value", fallback_min_tensor,
&fallback_min_node);
new_nodes->push_back(fallback_min_node);
NodeDef fallback_max_node;
fallback_max_node.set_op("Const");
fallback_max_node.set_name(quantized_main_node.name() +
"/fallback_max");
SetNodeAttr("dtype", DT_FLOAT, &fallback_max_node);
Tensor fallback_max_tensor(DT_FLOAT, {});
fallback_max_tensor.flat<float>()(0) = fallback_max;
SetNodeTensorAttr<float>("value", fallback_max_tensor,
&fallback_max_node);
new_nodes->push_back(fallback_max_node);
requantize_min_input = fallback_min_node.name();
requantize_max_input = fallback_max_node.name();
} else {
NodeDef requant_range_node;
requant_range_node.set_op("RequantizationRange");
requant_range_node.set_name(quantized_main_node.name() +
"/requant_range");
SetNodeAttr("Tinput", DT_QINT32, &requant_range_node);
AddNodeInput(quantized_main_node.name() + ":0",
&requant_range_node);
AddNodeInput(quantized_main_node.name() + ":1",
&requant_range_node);
AddNodeInput(quantized_main_node.name() + ":2",
&requant_range_node);
new_nodes->push_back(requant_range_node);
requantize_min_input = requant_range_node.name() + ":0";
requantize_max_input = requant_range_node.name() + ":1";
}
NodeDef requantize_node;
requantize_node.set_op("Requantize");
requantize_node.set_name(quantized_main_node.name() + "/requantize");
SetNodeAttr("Tinput", DT_QINT32, &requantize_node);
SetNodeAttr("out_type", DT_QUINT8, &requantize_node);
AddNodeInput(quantized_main_node.name() + ":0", &requantize_node);
AddNodeInput(quantized_main_node.name() + ":1", &requantize_node);
AddNodeInput(quantized_main_node.name() + ":2", &requantize_node);
AddNodeInput(requantize_min_input, &requantize_node);
AddNodeInput(requantize_max_input, &requantize_node);
new_nodes->push_back(requantize_node);
eight_bit_node_name = requantize_node.name();
} else {
eight_bit_node_name = quantized_main_node.name();
}
NodeDef dequantize_node;
dequantize_node.set_op("Dequantize");
dequantize_node.set_name(float_node.name());
SetNodeAttr("T", DT_QUINT8, &dequantize_node);
SetNodeAttr("mode", "MIN_FIRST", &dequantize_node);
AddNodeInput(eight_bit_node_name + ":0", &dequantize_node);
AddNodeInput(eight_bit_node_name + ":1", &dequantize_node);
AddNodeInput(eight_bit_node_name + ":2", &dequantize_node);
new_nodes->push_back(dequantize_node);
return OkStatus();
},
{}, &quantized_graph_def));
TF_RETURN_IF_ERROR(IsGraphValid(quantized_graph_def));
GraphDef merged_graph_def;
TF_RETURN_IF_ERROR(MergeAdjacentRequantizes(quantized_graph_def, context,
&merged_graph_def));
TF_RETURN_IF_ERROR(IsGraphValid(merged_graph_def));
GraphDef deduped_graph_def;
TF_RETURN_IF_ERROR(
MergeDuplicateNodes(merged_graph_def, context, &deduped_graph_def));
TF_RETURN_IF_ERROR(IsGraphValid(deduped_graph_def));
TF_RETURN_IF_ERROR(RemoveRedundantQuantizations(deduped_graph_def, context,
output_graph_def));
TF_RETURN_IF_ERROR(IsGraphValid(*output_graph_def));
return OkStatus();
}
REGISTER_GRAPH_TRANSFORM("quantize_nodes", QuantizeNodes);
REGISTER_GRAPH_TRANSFORM("merge_duplicate_nodes", MergeDuplicateNodes);
}
} | #define EIGEN_USE_THREADS
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/image_ops.h"
#include "tensorflow/cc/ops/nn_ops.h"
#include "tensorflow/cc/ops/sendrecv_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/kernels/quantization_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
namespace tensorflow {
namespace graph_transforms {
Status QuantizeNodes(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def);
Status RemoveRedundantQuantizations(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def);
Status QuantizePlaceholders(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def);
Status ConvertFakeQuantsToRequantize(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def);
Status MergeAdjacentRequantizes(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def);
Status HoistFakeQuants(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def);
Status MergeDuplicateNodes(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def);
class QuantizeNodesTest : public ::testing::Test {
protected:
void TestTransformedVersusFloatGraph(
const TransformFunc& transform_function, const GraphDef& float_graph_def,
const std::vector<std::pair<string, Tensor>>& float_inputs,
const std::vector<std::pair<string, Tensor>>& transformed_inputs,
const std::vector<string>& output_names,
const TransformFuncContext& in_context, double threshold,
GraphDef* transformed_graph_def) {
std::unique_ptr<Session> float_session(NewSession(SessionOptions()));
TF_ASSERT_OK(float_session->Create(float_graph_def));
std::vector<Tensor> float_outputs;
TF_ASSERT_OK(
float_session->Run(float_inputs, output_names, {}, &float_outputs));
TransformFuncContext context(in_context);
std::vector<string> input_names;
for (const std::pair<const string&, const Tensor&> float_input :
float_inputs) {
context.input_names.push_back(float_input.first);
}
context.output_names = output_names;
TF_ASSERT_OK(
transform_function(float_graph_def, context, transformed_graph_def));
std::unique_ptr<Session> transformed_session(NewSession(SessionOptions()));
TF_ASSERT_OK(transformed_session->Create(*transformed_graph_def));
std::vector<Tensor> transformed_outputs;
TF_ASSERT_OK(transformed_session->Run(transformed_inputs, output_names, {},
&transformed_outputs));
const int output_count = output_names.size();
EXPECT_EQ(output_count, float_outputs.size());
EXPECT_EQ(output_count, transformed_outputs.size());
for (int i = 0; i < output_count; ++i) {
test::ExpectTensorNear<float>(float_outputs[i], transformed_outputs[i],
threshold);
}
}
void TestQuantizedVersusFloatGraph(
const GraphDef& float_graph_def,
const std::vector<std::pair<string, Tensor>>& inputs,
const std::vector<string>& output_names) {
GraphDef quantized_graph_def;
TestTransformedVersusFloatGraph(QuantizeNodes, float_graph_def, inputs,
inputs, output_names, {}, 1.0,
&quantized_graph_def);
const std::set<string> quantizable_ops = {
"Add", "BiasAdd", "Concat", "Conv2D", "MatMul", "Relu",
"Relu6", "ResizeBilinear", "AvgPool", "MaxPool", "Mul"};
for (const NodeDef& node : quantized_graph_def.node()) {
EXPECT_EQ(0, quantizable_ops.count(node.op()))
<< "Found quantizable node " << node.op() << " for node named "
<< node.name();
}
}
void TestGraphWithInputRange(
const GraphDef& float_graph_def,
const std::vector<std::pair<string, Tensor>>& float_inputs,
const std::vector<string>& output_names, float range_min,
float range_max) {
TransformFuncContext context;
context.params["input_min"] = {strings::StrCat(range_min)};
context.params["input_max"] = {strings::StrCat(range_max)};
std::vector<std::pair<string, Tensor>> quantized_inputs;
for (const std::pair<string, Tensor>& float_input : float_inputs) {
const Tensor& float_tensor = float_input.second;
Tensor quantized_tensor(DT_QUINT8, float_tensor.shape());
FloatTensorToQuantizedInPlace<quint8>(float_tensor, range_min, range_max,
&quantized_tensor);
quantized_inputs.push_back({float_input.first, quantized_tensor});
}
GraphDef quantized_graph_def;
TestTransformedVersusFloatGraph(
QuantizeNodes, float_graph_def, float_inputs, quantized_inputs,
output_names, context, 1.0, &quantized_graph_def);
}
void TestGraphWithFallbackRange(
const GraphDef& float_graph_def,
const std::vector<std::pair<string, Tensor>>& float_inputs,
const std::vector<string>& output_names, float range_min, float range_max,
GraphDef* quantized_graph_def) {
TransformFuncContext context;
context.params["fallback_min"] = {strings::StrCat(range_min)};
context.params["fallback_max"] = {strings::StrCat(range_max)};
TestTransformedVersusFloatGraph(QuantizeNodes, float_graph_def,
float_inputs, float_inputs, output_names,
context, 2.0, quantized_graph_def);
}
void TestIgnoreOps(std::initializer_list<string> ops_to_ignore) {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
auto const_op = [&](const string& name, const TensorShape& shape,
std::initializer_list<float> values) {
Tensor tensor(DT_FLOAT, shape);
test::FillValues<float>(&tensor, values);
return Const(root.WithOpName(name), Input::Initializer(tensor));
};
int m = 1;
int n = 1;
int k = 1;
Output a_op = const_op("a_op", {m, k}, {2});
Output b_op = const_op("b_op", {k, n}, {3});
Output c_op = const_op("c_op", {m, k}, {1});
Output d_op = const_op("d_op", {k, n}, {4});
Output mat_mul_op = MatMul(root.WithOpName("mat_mul_op"), a_op, b_op);
Output mul_op = Mul(root.WithOpName("mul"), c_op, d_op);
GraphDef float_graph_def;
TF_ASSERT_OK(root.ToGraphDef(&float_graph_def));
TransformFuncContext context;
if (ops_to_ignore.size() > 0) {
context.params["ignore_op"] = ops_to_ignore;
}
GraphDef quantized_graph_def;
TestTransformedVersusFloatGraph(QuantizeNodes, float_graph_def, {}, {},
{"mat_mul_op", "mul"}, context, 1.0,
&quantized_graph_def);
for (const string& op_name : ops_to_ignore) {
bool exists_in_quantized_graph = false;
for (const NodeDef& node : quantized_graph_def.node()) {
if (node.op() == op_name) {
exists_in_quantized_graph = true;
break;
}
}
EXPECT_TRUE(exists_in_quantized_graph)
<< "Op " << op_name
<< " should not have been replace by a quantized version";
}
}
void TestQuantizeMatMul(int m, int n, int k,
const std::vector<float>& a_values,
const std::vector<float>& b_values) {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
Tensor a_tensor(DT_FLOAT, TensorShape({m, k}));
test::FillValues<float>(&a_tensor, a_values);
Output a_op = Const(root.WithOpName("a_op"), Input::Initializer(a_tensor));
Tensor b_tensor(DT_FLOAT, TensorShape({k, n}));
test::FillValues<float>(&b_tensor, b_values);
Output b_op = Const(root.WithOpName("b_op"), Input::Initializer(b_tensor));
Output mat_mul_op = MatMul(root.WithOpName("mat_mul_op"), a_op, b_op);
GraphDef float_graph_def;
TF_ASSERT_OK(root.ToGraphDef(&float_graph_def));
TestQuantizedVersusFloatGraph(float_graph_def, {}, {"mat_mul_op"});
}
void TestQuantizeMatMulTiny() {
TestQuantizeMatMul(1, 1, 1, {2}, {3});
TestQuantizeMatMul(1, 2, 1, {1}, {2, 3});
TestQuantizeMatMul(1, 1, 2, {1, 1}, {1, 1});
TestQuantizeMatMul(1, 1, 2, {0, 0}, {1, 1});
TestQuantizeMatMul(1, 1, 2, {1, 2}, {1, 2});
}
void TestQuantizeMatMulSmall() {
TestQuantizeMatMul(2, 4, 3, {1, 2, 3, 4, 5, 6},
{7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18});
}
void TestQuantizeMul() {
using namespace ::tensorflow::ops;
std::vector<int64_t> x_shape({10, 100});
const size_t x_num_elements = TensorShape(x_shape).num_elements();
std::vector<float> x_values(x_num_elements);
for (int i = 0; i < x_num_elements; ++i) {
x_values[i] = (i % 256) / 256.0f;
}
std::vector<int64_t> y_shape({100});
const size_t y_num_elements = TensorShape(y_shape).num_elements();
std::vector<float> y_values(y_num_elements);
for (int i = 0; i < y_num_elements; ++i) {
y_values[i] = ((i + 23) % 123) - 50;
}
Scope root = Scope::NewRootScope();
Tensor x_float_tensor(DT_FLOAT, TensorShape(x_shape));
test::FillValues<float>(&x_float_tensor, x_values);
Output x = Const(root.WithOpName("x"), Input::Initializer(x_float_tensor));
Tensor y_float_tensor(DT_FLOAT, TensorShape(y_shape));
test::FillValues<float>(&y_float_tensor, y_values);
Output y = Const(root.WithOpName("y"), Input::Initializer(y_float_tensor));
Mul mul = Mul(root.WithOpName("mul"), x, y);
GraphDef float_graph_def;
TF_ASSERT_OK(root.ToGraphDef(&float_graph_def));
TestQuantizedVersusFloatGraph(float_graph_def, {}, {"mul"});
}
void TestQuantizeAdd() {
using namespace ::tensorflow::ops;
std::vector<int64_t> x_shape({10, 100});
const size_t x_num_elements = TensorShape(x_shape).num_elements();
std::vector<float> x_values(x_num_elements);
for (int i = 0; i < x_num_elements; ++i) {
x_values[i] = (i % 256) / 256.0f;
}
std::vector<int64_t> y_shape({100});
const size_t y_num_elements = TensorShape(y_shape).num_elements();
std::vector<float> y_values(y_num_elements);
for (int i = 0; i < y_num_elements; ++i) {
y_values[i] = ((i + 23) % 123) - 50;
}
Scope root = Scope::NewRootScope();
Tensor x_float_tensor(DT_FLOAT, TensorShape(x_shape));
test::FillValues<float>(&x_float_tensor, x_values);
Output x = Const(root.WithOpName("x"), Input::Initializer(x_float_tensor));
Tensor y_float_tensor(DT_FLOAT, TensorShape(y_shape));
test::FillValues<float>(&y_float_tensor, y_values);
Output y = Const(root.WithOpName("y"), Input::Initializer(y_float_tensor));
Add add = Add(root.WithOpName("add"), x, y);
GraphDef float_graph_def;
TF_ASSERT_OK(root.ToGraphDef(&float_graph_def));
TestQuantizedVersusFloatGraph(float_graph_def, {}, {"add"});
}
void TestQuantizeConv2D(int depth, int input_width, int input_height,
int input_batch_count, int filter_size,
int filter_count, int stride, const string& padding,
const std::vector<float>& input_values,
const std::vector<float>& filter_values) {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
Tensor input_tensor(DT_FLOAT, TensorShape({input_batch_count, input_height,
input_width, depth}));
test::FillValues<float>(&input_tensor, input_values);
Output input_op =
Const(root.WithOpName("input_op"), Input::Initializer(input_tensor));
Tensor filter_tensor(
DT_FLOAT, TensorShape({filter_size, filter_size, depth, filter_count}));
test::FillValues<float>(&filter_tensor, filter_values);
Output filter_op =
Const(root.WithOpName("filter_op"), Input::Initializer(filter_tensor));
Output conv_op = Conv2D(root.WithOpName("conv_op"), input_op, filter_op,
{1, stride, stride, 1}, padding);
GraphDef float_graph_def;
TF_ASSERT_OK(root.ToGraphDef(&float_graph_def));
TestQuantizedVersusFloatGraph(float_graph_def, {}, {"conv_op"});
}
void TestQuantizeBiasAdd() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
Tensor input_tensor(DT_FLOAT, TensorShape({1, 1, 2, 6}));
test::FillIota<float>(&input_tensor, 1);
Output input_op =
Const(root.WithOpName("input_op"), Input::Initializer(input_tensor));
Tensor offset_tensor(DT_FLOAT, TensorShape({6}));
test::FillIota<float>(&offset_tensor, 1);
Output offset_op =
Const(root.WithOpName("offset_op"), Input::Initializer(offset_tensor));
Output bias_add_op =
BiasAdd(root.WithOpName("bias_add_op"), input_op, offset_op);
GraphDef float_graph_def;
TF_ASSERT_OK(root.ToGraphDef(&float_graph_def));
TestQuantizedVersusFloatGraph(float_graph_def, {}, {"bias_add_op"});
}
void TestQuantizeConcat() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
Tensor shape_tensor(DT_INT32, TensorShape({}));
test::FillValues<int32>(&shape_tensor, {0});
Output shape_op =
Const(root.WithOpName("shape_op"), Input::Initializer(shape_tensor));
Tensor a_tensor(DT_FLOAT, TensorShape({2, 2, 3}));
test::FillValues<float>(&a_tensor, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
Output a_op = Const(root.WithOpName("a_op"), Input::Initializer(a_tensor));
Tensor b_tensor(DT_FLOAT, TensorShape({2, 2, 3}));
test::FillValues<float>(&b_tensor,
{13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24});
Output b_op = Const(root.WithOpName("b_op"), Input::Initializer(b_tensor));
Output concat_op =
Concat(root.WithOpName("concat_op"), {a_op, b_op}, shape_op);
GraphDef float_graph_def;
TF_ASSERT_OK(root.ToGraphDef(&float_graph_def));
TestQuantizedVersusFloatGraph(float_graph_def, {}, {"concat_op"});
}
void TestQuantizeRelu() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
Tensor constant_tensor(DT_FLOAT, TensorShape({1, 2, 6, 1}));
test::FillValues<float>(&constant_tensor,
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
Output constant_op = Const(root.WithOpName("constant_op"),
Input::Initializer(constant_tensor));
Output relu_op = Relu(root.WithOpName("relu_op"), constant_op);
GraphDef float_graph_def;
TF_ASSERT_OK(root.ToGraphDef(&float_graph_def));
TestQuantizedVersusFloatGraph(float_graph_def, {}, {"relu_op"});
}
void TestQuantizeRelu6() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
Tensor constant_tensor(DT_FLOAT, TensorShape({1, 2, 6, 1}));
test::FillValues<float>(&constant_tensor,
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
Output constant_op = Const(root.WithOpName("constant_op"),
Input::Initializer(constant_tensor));
Output relu6_op = Relu6(root.WithOpName("relu6_op"), constant_op);
GraphDef float_graph_def;
TF_ASSERT_OK(root.ToGraphDef(&float_graph_def));
TestQuantizedVersusFloatGraph(float_graph_def, {}, {"relu6_op"});
}
void TestQuantizeMaxPool() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
Tensor constant_tensor(DT_FLOAT, TensorShape({1, 2, 6, 1}));
test::FillValues<float>(&constant_tensor,
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
Output constant_op = Const(root.WithOpName("constant_op"),
Input::Initializer(constant_tensor));
Output max_pool_op = MaxPool(root.WithOpName("max_pool_op"), constant_op,
{1, 2, 2, 1}, {1, 1, 1, 1}, "SAME");
GraphDef float_graph_def;
TF_ASSERT_OK(root.ToGraphDef(&float_graph_def));
TestQuantizedVersusFloatGraph(float_graph_def, {}, {"max_pool_op"});
}
void TestQuantizeAvgPool() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
Tensor constant_tensor(DT_FLOAT, TensorShape({1, 2, 6, 1}));
test::FillValues<float>(&constant_tensor,
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
Output constant_op = Const(root.WithOpName("constant_op"),
Input::Initializer(constant_tensor));
Output avg_pool_op = AvgPool(root.WithOpName("avg_pool_op"), constant_op,
{1, 2, 2, 1}, {1, 1, 1, 1}, "SAME");
GraphDef float_graph_def;
TF_ASSERT_OK(root.ToGraphDef(&float_graph_def));
TestQuantizedVersusFloatGraph(float_graph_def, {}, {"avg_pool_op"});
}
void TestQuantizeReshape() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
Tensor constant_tensor(DT_FLOAT, TensorShape({4, 5}));
test::FillValues<float>(&constant_tensor,
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20});
Output constant_op = Const(root.WithOpName("constant_op"),
Input::Initializer(constant_tensor));
Output reshape_op =
Reshape(root.WithOpName("reshape_op"), constant_op, {10, 2});
GraphDef float_graph_def;
TF_ASSERT_OK(root.ToGraphDef(&float_graph_def));
TestQuantizedVersusFloatGraph(float_graph_def, {}, {"reshape_op"});
}
void TestRemoveRedundantQuantization() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
Tensor quantized_tensor(DT_QUINT8, TensorShape({}));
test::FillValues<quint8>(&quantized_tensor, {0});
Output quantized_op = Const(root.WithOpName("quantized_op"),
Input::Initializer(quantized_tensor));
Tensor quantized_min_tensor(DT_FLOAT, TensorShape({}));
test::FillValues<float>(&quantized_min_tensor, {2.0f});
Output quantized_min_op = Const(root.WithOpName("quantized_min_op"),
Input::Initializer(quantized_min_tensor));
Tensor quantized_max_tensor(DT_FLOAT, TensorShape({}));
test::FillValues<float>(&quantized_max_tensor, {2.0f});
Output quantized_max_op = Const(root.WithOpName("quantized_max_op"),
Input::Initializer(quantized_min_tensor));
Output dequantize_op =
Dequantize(root.WithOpName("dequantize_op"), quantized_op,
quantized_min_op, quantized_max_op);
Tensor dequantize_reshape_dims_tensor(DT_INT32, TensorShape({1}));
test::FillValues<int32>(&dequantize_reshape_dims_tensor, {-1});
Output dequantize_reshape_dims =
Const(root.WithOpName("dequantize_reshape_dims"),
Input::Initializer(dequantize_reshape_dims_tensor));
Tensor dequantize_reduction_dims_tensor(DT_INT32, TensorShape({}));
test::FillValues<int32>(&dequantize_reduction_dims_tensor, {0});
Output dequantize_reduction_dims =
Const(root.WithOpName("dequantize_reduction_dims"),
Input::Initializer(dequantize_reduction_dims_tensor));
Output dequantize_reshape = Reshape(root.WithOpName("dequantize_reshape"),
dequantize_op, dequantize_reshape_dims);
Output dequantize_min =
Min(root.WithOpName("dequantize_min"), dequantize_reshape,
dequantize_reduction_dims, Min::Attrs().KeepDims(false));
Output dequantize_max =
Max(root.WithOpName("dequantize_max"), dequantize_reshape,
dequantize_reduction_dims, Max::Attrs().KeepDims(false));
QuantizeV2 quantize_op(root.WithOpName("quantize_op"), dequantize_op,
dequantize_min, dequantize_max, DT_QUINT8,
QuantizeV2::Attrs().Mode("MIN_FIRST"));
Output final_dequantize =
Dequantize(root.WithOpName("final_dequantize"), quantize_op.output,
quantize_op.output_min, quantize_op.output_max);
GraphDef float_graph_def;
TF_ASSERT_OK(root.ToGraphDef(&float_graph_def));
GraphDef removed_graph_def;
TestTransformedVersusFloatGraph(
RemoveRedundantQuantizations, float_graph_def, {}, {},
{"final_dequantize"}, {}, 1.0, &removed_graph_def);
std::map<string, const NodeDef*> node_map;
MapNamesToNodes(removed_graph_def, &node_map);
EXPECT_EQ(1, node_map.count("final_dequantize"));
EXPECT_EQ("quantized_op", node_map.at("final_dequantize")->input(0));
}
void TestRemoveRedundantQuantizationWithBiasAdd() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
Tensor quantized_tensor(DT_QUINT8, TensorShape({1, 6}));
test::FillValues<quint8>(&quantized_tensor, {0, 0, 0, 0, 0, 0});
Output quantized_op = Const(root.WithOpName("quantized_op"),
Input::Initializer(quantized_tensor));
Tensor quantized_min_tensor(DT_FLOAT, TensorShape({}));
test::FillValues<float>(&quantized_min_tensor, {2.0f});
Output quantized_min_op = Const(root.WithOpName("quantized_min_op"),
Input::Initializer(quantized_min_tensor));
Tensor quantized_max_tensor(DT_FLOAT, TensorShape({}));
test::FillValues<float>(&quantized_max_tensor, {2.0f});
Output quantized_max_op = Const(root.WithOpName("quantized_max_op"),
Input::Initializer(quantized_min_tensor));
Tensor offset_tensor(DT_QUINT8, TensorShape({6}));
test::FillValues<quint8>(&offset_tensor, {1, 2, 3, 4, 5, 6});
Output offset_op =
Const(root.WithOpName("offset_op"), Input::Initializer(offset_tensor));
Tensor offset_min_tensor(DT_FLOAT, TensorShape({}));
test::FillValues<float>(&offset_min_tensor, {0.0f});
Output offset_min_op = Const(root.WithOpName("offset_min_op"),
Input::Initializer(offset_min_tensor));
Tensor offset_max_tensor(DT_FLOAT, TensorShape({}));
test::FillValues<float>(&offset_max_tensor, {255.0f});
Output offset_max_op = Const(root.WithOpName("offset_max_op"),
Input::Initializer(offset_max_tensor));
QuantizedBiasAdd quantized_bias_add_op(
root.WithOpName("bias_add_op"), quantized_op, offset_op,
quantized_min_op, quantized_max_op, offset_min_op, offset_max_op,
DT_QINT32);
RequantizationRange requantization_range_op(
root.WithOpName("requantization_range_op"),
quantized_bias_add_op.output, quantized_bias_add_op.min_out,
quantized_bias_add_op.max_out);
Requantize requantize_op(
root.WithOpName("requantize_op"), quantized_bias_add_op.output,
quantized_bias_add_op.min_out, quantized_bias_add_op.max_out,
requantization_range_op.output_min, requantization_range_op.output_max,
DT_QUINT8);
Output dequantize_op =
Dequantize(root.WithOpName("dequantize_op"), requantize_op.output,
requantize_op.output_min, requantize_op.output_max);
Tensor dequantize_reshape_dims_tensor(DT_INT32, TensorShape({1}));
test::FillValues<int32>(&dequantize_reshape_dims_tensor, {-1});
Output dequantize_reshape_dims =
Const(root.WithOpName("dequantize_reshape_dims"),
Input::Initializer(dequantize_reshape_dims_tensor));
Tensor dequantize_reduction_dims_tensor(DT_INT32, TensorShape({}));
test::FillValues<int32>(&dequantize_reduction_dims_tensor, {0});
Output dequantize_reduction_dims =
Const(root.WithOpName("dequantize_reduction_dims"),
Input::Initializer(dequantize_reduction_dims_tensor));
Output dequantize_reshape = Reshape(root.WithOpName("dequantize_reshape"),
dequantize_op, dequantize_reshape_dims);
Output dequantize_min =
Min(root.WithOpName("dequantize_min"), dequantize_reshape,
dequantize_reduction_dims, Min::Attrs().KeepDims(false));
Output dequantize_max =
Max(root.WithOpName("dequantize_max"), dequantize_reshape,
dequantize_reduction_dims, Max::Attrs().KeepDims(false));
QuantizeV2 quantize_op(root.WithOpName("quantize_op"), dequantize_op,
dequantize_min, dequantize_max, DT_QUINT8,
QuantizeV2::Attrs().Mode("MIN_FIRST"));
Output final_dequantize =
Dequantize(root.WithOpName("final_dequantize"), quantize_op.output,
quantize_op.output_min, quantize_op.output_max);
GraphDef float_graph_def;
TF_ASSERT_OK(root.ToGraphDef(&float_graph_def));
GraphDef removed_graph_def;
TestTransformedVersusFloatGraph(
RemoveRedundantQuantizations, float_graph_def, {}, {},
{"final_dequantize"}, {}, 1.0, &removed_graph_def);
std::map<string, const NodeDef*> node_map;
MapNamesToNodes(removed_graph_def, &node_map);
EXPECT_EQ(1, node_map.count("final_dequantize"));
EXPECT_EQ("requantize_op", node_map.at("final_dequantize")->input(0));
}
void TestQuantizeResizeBilinear() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
Tensor size_tensor(DT_INT32, TensorShape({2}));
test::FillValues<int32>(&size_tensor, {256, 256});
Output constant_op = Const(root.WithOpName("size_tensor_op"),
Input::Initializer(size_tensor));
Output placeholder_op =
Placeholder(root.WithOpName("placeholder_op"), DT_FLOAT);
Output resize_bilinear_op = ResizeBilinear(
root.WithOpName("resize_bilinear_op"), placeholder_op, constant_op);
GraphDef float_graph_def;
TF_ASSERT_OK(root.ToGraphDef(&float_graph_def));
Tensor input_tensor(DT_FLOAT, {1, 128, 128, 3});
test::FillFn<float>(&input_tensor, [](int) { return 100.0f; });
TestQuantizedVersusFloatGraph(float_graph_def,
{{"placeholder_op", input_tensor}},
{"resize_bilinear_op"});
}
void TestRemoveRedundantQuantizationWithMultipleOutputs() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
Tensor quantized_tensor(DT_QUINT8, TensorShape({1, 6}));
test::FillValues<quint8>(&quantized_tensor, {0, 0, 0, 0, 0, 0});
Output quantized_op = Const(root.WithOpName("quantized_op"),
Input::Initializer(quantized_tensor));
Tensor quantized_min_tensor(DT_FLOAT, TensorShape({}));
test::FillValues<float>(&quantized_min_tensor, {2.0f});
Output quantized_min_op = Const(root.WithOpName("quantized_min_op"),
Input::Initializer(quantized_min_tensor));
Tensor quantized_max_tensor(DT_FLOAT, TensorShape({}));
test::FillValues<float>(&quantized_max_tensor, {2.0f});
Output quantized_max_op = Const(root.WithOpName("quantized_max_op"),
Input::Initializer(quantized_min_tensor));
Tensor offset_tensor(DT_QUINT8, TensorShape({6}));
test::FillValues<quint8>(&offset_tensor, {1, 2, 3, 4, 5, 6});
Output offset_op =
Const(root.WithOpName("offset_op"), Input::Initializer(offset_tensor));
Tensor offset_min_tensor(DT_FLOAT, TensorShape({}));
test::FillValues<float>(&offset_min_tensor, {0.0f});
Output offset_min_op = Const(root.WithOpName("offset_min_op"),
Input::Initializer(offset_min_tensor));
Tensor offset_max_tensor(DT_FLOAT, TensorShape({}));
test::FillValues<float>(&offset_max_tensor, {255.0f});
Output offset_max_op = Const(root.WithOpName("offset_max_op"),
Input::Initializer(offset_max_tensor));
QuantizedBiasAdd quantized_bias_add_op(
root.WithOpName("bias_add_op"), quantized_op, offset_op,
quantized_min_op, quantized_max_op, offset_min_op, offset_max_op,
DT_QINT32);
RequantizationRange requantization_range_op(
root.WithOpName("requantization_range_op"),
quantized_bias_add_op.output, quantized_bias_add_op.min_out,
quantized_bias_add_op.max_out);
Requantize requantize_op(
root.WithOpName("requantize_op"), quantized_bias_add_op.output,
quantized_bias_add_op.min_out, quantized_bias_add_op.max_out,
requantization_range_op.output_min, requantization_range_op.output_max,
DT_QUINT8);
Output dequantize_op =
Dequantize(root.WithOpName("dequantize_op"), requantize_op.output,
requantize_op.output_min, requantize_op.output_max);
Tensor dequantize_reshape_dims_tensor(DT_INT32, TensorShape({1}));
test::FillValues<int32>(&dequantize_reshape_dims_tensor, {-1});
Output dequantize_reshape_dims =
Const(root.WithOpName("dequantize_reshape_dims"),
Input::Initializer(dequantize_reshape_dims_tensor));
Tensor dequantize_reduction_dims_tensor(DT_INT32, TensorShape({}));
test::FillValues<int32>(&dequantize_reduction_dims_tensor, {0});
Output dequantize_reduction_dims =
Const(root.WithOpName("dequantize_reduction_dims"),
Input::Initializer(dequantize_reduction_dims_tensor));
Output dequantize_reshape = Reshape(root.WithOpName("dequantize_reshape"),
dequantize_op, dequantize_reshape_dims);
Output dequantize_min =
Min(root.WithOpName("dequantize_min"), dequantize_reshape,
dequantize_reduction_dims, Min::Attrs().KeepDims(false));
Output dequantize_max =
Max(root.WithOpName("dequantize_max"), dequantize_reshape,
dequantize_reduction_dims, Max::Attrs().KeepDims(false));
QuantizeV2 quantize_op(root.WithOpName("quantize_op"), dequantize_op,
dequantize_min, dequantize_max, DT_QUINT8,
QuantizeV2::Attrs().Mode("MIN_FIRST"));
Output final_dequantize =
Dequantize(root.WithOpName("final_dequantize"), quantize_op.output,
quantize_op.output_min, quantize_op.output_max);
Output relu_op = Relu(root.WithOpName("relu_op"), dequantize_op);
GraphDef float_graph_def;
TF_ASSERT_OK(root.ToGraphDef(&float_graph_def));
GraphDef removed_graph_def;
TestTransformedVersusFloatGraph(
RemoveRedundantQuantizations, float_graph_def, {}, {},
{"final_dequantize", "relu_op"}, {}, 1.0, &removed_graph_def);
std::map<string, int> op_type_count;
for (const NodeDef& node : removed_graph_def.node()) {
++op_type_count[node.op()];
}
EXPECT_EQ(2, op_type_count["Dequantize"]);
}
void TestQuantizePlaceholders() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
Output placeholder_op =
Placeholder(root.WithOpName("placeholder_op"), DT_FLOAT);
Output relu_op = Relu(root.WithOpName("relu_op"), placeholder_op);
GraphDef float_graph_def;
TF_ASSERT_OK(root.ToGraphDef(&float_graph_def));
TransformFuncContext context;
context.input_names = {"placeholder_op"};
context.output_names = {"relu_op"};
context.params = {{"input_min", {"-10.0"}}, {"input_max", {"10.0"}}};
GraphDef quantized_graph_def;
TF_ASSERT_OK(
QuantizePlaceholders(float_graph_def, context, &quantized_graph_def));
Tensor input_tensor(DT_FLOAT, {});
input_tensor.flat<float>()(0) = 5.0f;
TestQuantizedVersusFloatGraph(
float_graph_def, {{"placeholder_op", input_tensor}}, {"relu_op"});
std::map<string, const NodeDef*> node_map;
MapNamesToNodes(quantized_graph_def, &node_map);
EXPECT_NE("placeholder_op", node_map.at("relu_op")->input(0));
EXPECT_EQ("Placeholder", node_map.at("placeholder_op")->op());
EXPECT_EQ(DT_QUINT8,
node_map.at("placeholder_op")->attr().at("dtype").type());
}
void TestInputRange() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
const int width = 100;
Tensor a_data(DT_FLOAT, TensorShape({1, width}));
test::FillIota<float>(&a_data, 1.0f);
Output a_const = Const(root.WithOpName("a"), Input::Initializer(a_data));
Output placeholder = Placeholder(root.WithOpName("placeholder"), DT_FLOAT);
Output bias_add =
BiasAdd(root.WithOpName("bias_add"), a_const, placeholder);
GraphDef graph_def;
TF_ASSERT_OK(root.ToGraphDef(&graph_def));
Tensor placeholder_tensor(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&placeholder_tensor, 1.0f);
TestGraphWithInputRange(graph_def, {{"placeholder", placeholder_tensor}},
{"bias_add"}, 0.0f, 100.0f);
}
void TestFallbackRange() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
const int width = 100;
Tensor a_data(DT_FLOAT, TensorShape({1, width}));
test::FillIota<float>(&a_data, 1.0f);
Output a_const = Const(root.WithOpName("a"), Input::Initializer(a_data));
Output placeholder = Placeholder(root.WithOpName("placeholder"), DT_FLOAT);
Output bias_add =
BiasAdd(root.WithOpName("bias_add"), a_const, placeholder);
GraphDef graph_def;
TF_ASSERT_OK(root.ToGraphDef(&graph_def));
Tensor placeholder_tensor(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&placeholder_tensor, 1.0f);
GraphDef quantized_graph_def;
TestGraphWithFallbackRange(graph_def, {{"placeholder", placeholder_tensor}},
{"bias_add"}, 0.0f, 200.0f,
&quantized_graph_def);
for (const NodeDef& node : quantized_graph_def.node()) {
EXPECT_NE("RequantizationRange", node.op());
}
}
void TestConvertFakeQuantsToRequantize() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
Tensor input_tensor(DT_FLOAT, TensorShape({1, 1, 2, 6}));
test::FillIota<float>(&input_tensor, 1);
Output input_op =
Const(root.WithOpName("input_op"), Input::Initializer(input_tensor));
Tensor offset_tensor(DT_FLOAT, TensorShape({6}));
test::FillIota<float>(&offset_tensor, 1);
Output offset_op =
Const(root.WithOpName("offset_op"), Input::Initializer(offset_tensor));
Output bias_add_op =
BiasAdd(root.WithOpName("bias_add_op"), input_op, offset_op);
Tensor fake_quant_min_tensor(DT_FLOAT, TensorShape({}));
test::FillValues<float>(&fake_quant_min_tensor, {0.0f});
Output fake_quant_min_op = Const(root.WithOpName("fake_quant_min_op"),
Input::Initializer(fake_quant_min_tensor));
Tensor fake_quant_max_tensor(DT_FLOAT, TensorShape({}));
test::FillValues<float>(&fake_quant_max_tensor, {18.0f});
Output fake_quant_max_op = Const(root.WithOpName("fake_quant_max_op"),
Input::Initializer(fake_quant_max_tensor));
Output fake_quant_op =
FakeQuantWithMinMaxVars(root.WithOpName("fake_quant_op"), bias_add_op,
fake_quant_min_op, fake_quant_max_op);
GraphDef float_graph_def;
TF_ASSERT_OK(root.ToGraphDef(&float_graph_def));
GraphDef converted_graph_def;
TestTransformedVersusFloatGraph(ConvertFakeQuantsToRequantize,
float_graph_def, {}, {}, {"fake_quant_op"},
{}, 1.0, &converted_graph_def);
for (const NodeDef& node : converted_graph_def.node()) {
EXPECT_NE("FakeQuantWithMinMaxVars", node.op());
}
}
void TestMergeAdjacentRequantizes() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
Tensor input_tensor(DT_QUINT8, TensorShape({1, 1, 2, 6}));
test::FillValues<quint8>(&input_tensor,
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
Output input_op =
Const(root.WithOpName("input_op"), Input::Initializer(input_tensor));
Tensor input_min_tensor(DT_FLOAT, TensorShape({}));
test::FillValues<float>(&input_min_tensor, {0.0f});
Output input_min_op = Const(root.WithOpName("input_min_op"),
Input::Initializer(input_min_tensor));
Tensor input_max_tensor(DT_FLOAT, TensorShape({}));
test::FillValues<float>(&input_max_tensor, {255.0f});
Output input_max_op = Const(root.WithOpName("input_max_op"),
Input::Initializer(input_max_tensor));
Tensor offset_tensor(DT_QUINT8, TensorShape({6}));
test::FillValues<quint8>(&offset_tensor, {1, 2, 3, 4, 5, 6});
Output offset_op =
Const(root.WithOpName("offset_op"), Input::Initializer(offset_tensor));
Tensor offset_min_tensor(DT_FLOAT, TensorShape({}));
test::FillValues<float>(&offset_min_tensor, {0.0f});
Output offset_min_op = Const(root.WithOpName("offset_min_op"),
Input::Initializer(offset_min_tensor));
Tensor offset_max_tensor(DT_FLOAT, TensorShape({}));
test::FillValues<float>(&offset_max_tensor, {255.0f});
Output offset_max_op = Const(root.WithOpName("offset_max_op"),
Input::Initializer(offset_max_tensor));
QuantizedBiasAdd quantized_bias_add_op(
root.WithOpName("quantized_bias_add_op"), input_op, offset_op,
input_min_op, input_max_op, offset_min_op, offset_max_op, DT_QINT32);
RequantizationRange requantization_range_op(
root.WithOpName("requantization_range_op"),
quantized_bias_add_op.output, quantized_bias_add_op.min_out,
quantized_bias_add_op.max_out);
Requantize requantize_op(
root.WithOpName("requantize_op"), quantized_bias_add_op.output,
quantized_bias_add_op.min_out, quantized_bias_add_op.max_out,
requantization_range_op.output_min, requantization_range_op.output_max,
DT_QUINT8);
Output dequantize_op =
Dequantize(root.WithOpName("dequantize_op"), requantize_op.output,
requantize_op.output_min, requantize_op.output_max,
Dequantize::Attrs().Mode("MIN_FIRST"));
Tensor quantize_min_tensor(DT_FLOAT, TensorShape({}));
test::FillValues<float>(&quantize_min_tensor, {0.0f});
Output quantize_min_op = Const(root.WithOpName("quantize_min_op"),
Input::Initializer(quantize_min_tensor));
Tensor quantize_max_tensor(DT_FLOAT, TensorShape({}));
test::FillValues<float>(&quantize_max_tensor, {255.0f});
Output quantize_max_op = Const(root.WithOpName("quantize_max_op"),
Input::Initializer(quantize_max_tensor));
QuantizeV2 quantize_op(root.WithOpName("quantize_op"), dequantize_op,
quantize_min_op, quantize_max_op, DT_QINT32,
QuantizeV2::Attrs().Mode("MIN_FIRST"));
Tensor fake_requantize_min_tensor(DT_FLOAT, TensorShape({}));
test::FillValues<float>(&fake_requantize_min_tensor, {0.0f});
Output fake_requantize_min_op =
Const(root.WithOpName("fake_requantize_min_op"),
Input::Initializer(fake_requantize_min_tensor));
Tensor fake_requantize_max_tensor(DT_FLOAT, TensorShape({}));
test::FillValues<float>(&fake_requantize_max_tensor, {255.0f});
Output fake_requantize_max_op =
Const(root.WithOpName("fake_requantize_max_op"),
Input::Initializer(fake_requantize_max_tensor));
Requantize fake_requantize_op(
root.WithOpName("fake_requantize_op"), quantize_op.output,
quantize_op.output_min, quantize_op.output_max, fake_requantize_min_op,
fake_requantize_max_op, DT_QUINT8);
Output fake_dequantize_op = Dequantize(
root.WithOpName("fake_dequantize_op"), fake_requantize_op.output,
fake_requantize_op.output_min, fake_requantize_op.output_max,
Dequantize::Attrs().Mode("MIN_FIRST"));
GraphDef float_graph_def;
TF_ASSERT_OK(root.ToGraphDef(&float_graph_def));
GraphDef converted_graph_def;
TestTransformedVersusFloatGraph(MergeAdjacentRequantizes, float_graph_def,
{}, {}, {"fake_dequantize_op"}, {}, 1.0,
&converted_graph_def);
int requantize_count = 0;
for (const NodeDef& node : converted_graph_def.node()) {
if (node.op() == "Requantize") {
++requantize_count;
}
}
EXPECT_EQ(1, requantize_count);
}
void TestConvertFakeQuantsEndToEnd() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
Tensor input_tensor(DT_FLOAT, TensorShape({1, 1, 2, 6}));
test::FillIota<float>(&input_tensor, 1);
Output input_op =
Const(root.WithOpName("input_op"), Input::Initializer(input_tensor));
Tensor offset_tensor(DT_FLOAT, TensorShape({6}));
test::FillIota<float>(&offset_tensor, 1);
Output offset_op =
Const(root.WithOpName("offset_op"), Input::Initializer(offset_tensor));
Output bias_add_op =
BiasAdd(root.WithOpName("bias_add_op"), input_op, offset_op);
Tensor fake_quant_min_tensor(DT_FLOAT, TensorShape({}));
test::FillValues<float>(&fake_quant_min_tensor, {0.0f});
Output fake_quant_min_op = Const(root.WithOpName("fake_quant_min_op"),
Input::Initializer(fake_quant_min_tensor));
Tensor fake_quant_max_tensor(DT_FLOAT, TensorShape({}));
test::FillValues<float>(&fake_quant_max_tensor, {18.0f});
Output fake_quant_max_op = Const(root.WithOpName("fake_quant_max_op"),
Input::Initializer(fake_quant_max_tensor));
Output fake_quant_op =
FakeQuantWithMinMaxVars(root.WithOpName("fake_quant_op"), bias_add_op,
fake_quant_min_op, fake_quant_max_op);
GraphDef float_graph_def;
TF_ASSERT_OK(root.ToGraphDef(&float_graph_def));
GraphDef converted_graph_def;
TestTransformedVersusFloatGraph(QuantizeNodes, float_graph_def, {}, {},
{"fake_quant_op"}, {}, 1.0,
&converted_graph_def);
int requantize_count = 0;
for (const NodeDef& node : converted_graph_def.node()) {
EXPECT_NE("FakeQuantWithMinMaxVars", node.op());
if (node.op() == "Requantize") {
++requantize_count;
}
}
EXPECT_EQ(1, requantize_count);
}
void TestHoistFakeQuants() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
Tensor input_tensor(DT_FLOAT, TensorShape({1, 1, 2, 6}));
test::FillIota<float>(&input_tensor, 1);
Output input_op =
Const(root.WithOpName("input_op"), Input::Initializer(input_tensor));
Tensor offset_tensor(DT_FLOAT, TensorShape({6}));
test::FillIota<float>(&offset_tensor, 1);
Output offset_op =
Const(root.WithOpName("offset_op"), Input::Initializer(offset_tensor));
Output bias_add_op =
BiasAdd(root.WithOpName("bias_add_op"), input_op, offset_op);
Output relu_op = Relu(root.WithOpName("relu_op"), bias_add_op);
Output max_pool_op = MaxPool(root.WithOpName("max_pool_op"), relu_op,
{1, 2, 2, 1}, {1, 1, 1, 1}, "SAME");
Tensor fake_quant_min_tensor(DT_FLOAT, TensorShape({}));
test::FillValues<float>(&fake_quant_min_tensor, {0.0f});
Output fake_quant_min_op = Const(root.WithOpName("fake_quant_min_op"),
Input::Initializer(fake_quant_min_tensor));
Tensor fake_quant_max_tensor(DT_FLOAT, TensorShape({}));
test::FillValues<float>(&fake_quant_max_tensor, {18.0f});
Output fake_quant_max_op = Const(root.WithOpName("fake_quant_max_op"),
Input::Initializer(fake_quant_max_tensor));
Output fake_quant_op =
FakeQuantWithMinMaxVars(root.WithOpName("fake_quant_op"), max_pool_op,
fake_quant_min_op, fake_quant_max_op);
GraphDef float_graph_def;
TF_ASSERT_OK(root.ToGraphDef(&float_graph_def));
GraphDef converted_graph_def;
TestTransformedVersusFloatGraph(HoistFakeQuants, float_graph_def, {}, {},
{"fake_quant_op"}, {}, 1.0,
&converted_graph_def);
std::map<string, const NodeDef*> node_map;
MapNamesToNodes(converted_graph_def, &node_map);
EXPECT_EQ("MaxPool", node_map.at("fake_quant_op")->op());
EXPECT_EQ("FakeQuantWithMinMaxVars",
node_map.at(node_map.at("relu_op")->input(0))->op());
}
void TestMergeDuplicateQuantizes() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
Tensor quantized_tensor(DT_QUINT8, TensorShape({}));
test::FillValues<quint8>(&quantized_tensor, {0});
Output quantized_op = Const(root.WithOpName("quantized_op"),
Input::Initializer(quantized_tensor));
Tensor quantized_min_tensor(DT_FLOAT, TensorShape({}));
test::FillValues<float>(&quantized_min_tensor, {2.0f});
Output quantized_min_op = Const(root.WithOpName("quantized_min_op"),
Input::Initializer(quantized_min_tensor));
Tensor quantized_max_tensor(DT_FLOAT, TensorShape({}));
test::FillValues<float>(&quantized_max_tensor, {2.0f});
Output quantized_max_op = Const(root.WithOpName("quantized_max_op"),
Input::Initializer(quantized_min_tensor));
Output dequantize_op =
Dequantize(root.WithOpName("dequantize_op"), quantized_op,
quantized_min_op, quantized_max_op);
Tensor quantize_reshape_dims1_tensor(DT_INT32, TensorShape({1}));
test::FillValues<int32>(&quantize_reshape_dims1_tensor, {-1});
Output quantize_reshape_dims1 =
Const(root.WithOpName("dequantize_reshape_dims1"),
Input::Initializer(quantize_reshape_dims1_tensor));
Tensor quantize_reduction_dims1_tensor(DT_INT32, TensorShape({}));
test::FillValues<int32>(&quantize_reduction_dims1_tensor, {0});
Output quantize_reduction_dims1 =
Const(root.WithOpName("quantize_reduction_dims1"),
Input::Initializer(quantize_reduction_dims1_tensor));
Output quantize_reshape1 = Reshape(root.WithOpName("quantize_reshape1"),
dequantize_op, quantize_reshape_dims1);
Output quantize_min1 =
Min(root.WithOpName("quantize_min1"), quantize_reshape1,
quantize_reduction_dims1, Min::Attrs().KeepDims(false));
Output quantize_max1 =
Max(root.WithOpName("quantize_max1"), quantize_reshape1,
quantize_reduction_dims1, Max::Attrs().KeepDims(false));
QuantizeV2 quantize_op1(root.WithOpName("quantize_op1"), dequantize_op,
quantize_min1, quantize_max1, DT_QUINT8,
QuantizeV2::Attrs().Mode("MIN_FIRST"));
Tensor quantize_reshape_dims2_tensor(DT_INT32, TensorShape({1}));
test::FillValues<int32>(&quantize_reshape_dims2_tensor, {-1});
Output quantize_reshape_dims2 =
Const(root.WithOpName("dequantize_reshape_dims2"),
Input::Initializer(quantize_reshape_dims2_tensor));
Tensor quantize_reduction_dims2_tensor(DT_INT32, TensorShape({}));
test::FillValues<int32>(&quantize_reduction_dims2_tensor, {0});
Output quantize_reduction_dims2 =
Const(root.WithOpName("quantize_reduction_dims2"),
Input::Initializer(quantize_reduction_dims2_tensor));
Output quantize_reshape2 = Reshape(root.WithOpName("quantize_reshape2"),
dequantize_op, quantize_reshape_dims2);
Output quantize_min2 =
Min(root.WithOpName("quantize_min2"), quantize_reshape2,
quantize_reduction_dims2, Min::Attrs().KeepDims(false));
Output quantize_max2 =
Max(root.WithOpName("quantize_max2"), quantize_reshape2,
quantize_reduction_dims2, Max::Attrs().KeepDims(false));
QuantizeV2 quantize_op2(root.WithOpName("quantize_op2"), dequantize_op,
quantize_min1, quantize_max1, DT_QUINT8,
QuantizeV2::Attrs().Mode("MIN_FIRST"));
Output final_dequantize1 =
Dequantize(root.WithOpName("final_dequantize1"), quantize_op1.output,
quantize_op1.output_min, quantize_op1.output_max);
Output final_dequantize2 =
Dequantize(root.WithOpName("final_dequantize2"), quantize_op2.output,
quantize_op2.output_min, quantize_op2.output_max);
Output add_op =
Add(root.WithOpName("add_op"), final_dequantize1, final_dequantize2);
GraphDef float_graph_def;
TF_ASSERT_OK(root.ToGraphDef(&float_graph_def));
GraphDef merged_graph_def;
TestTransformedVersusFloatGraph(MergeDuplicateNodes, float_graph_def, {},
{}, {"add_op"}, {}, 1.0, &merged_graph_def);
std::map<string, int> op_map;
for (const NodeDef& node : merged_graph_def.node()) {
++op_map[node.op()];
}
EXPECT_EQ(1, op_map["QuantizeV2"]);
}
void TestMergeDuplicateConsts() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
const int width = 10;
Tensor a_tensor(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&a_tensor, 1.0f);
Output a_op = Const(root.WithOpName("a_op"), Input::Initializer(a_tensor));
Tensor b_tensor(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&b_tensor, 1.0f);
Output b_op = Const(root.WithOpName("b_op"), Input::Initializer(b_tensor));
Output add_op = Add(root.WithOpName("add_op"), a_op, b_op);
Tensor c_tensor(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&c_tensor, 2.0f);
Output c_op = Const(root.WithOpName("c_op"), Input::Initializer(c_tensor));
Output mul_op = Mul(root.WithOpName("mul_op"), add_op, c_op);
GraphDef float_graph_def;
TF_ASSERT_OK(root.ToGraphDef(&float_graph_def));
GraphDef merged_graph_def;
TestTransformedVersusFloatGraph(MergeDuplicateNodes, float_graph_def, {},
{}, {"mul_op"}, {}, 1.0, &merged_graph_def);
std::map<string, const NodeDef*> node_map;
MapNamesToNodes(merged_graph_def, &node_map);
EXPECT_EQ(1, (node_map.count("a_op") + node_map.count("b_op")));
string remaining_const;
if (node_map.count("a_op")) {
remaining_const = "a_op";
} else {
remaining_const = "b_op";
}
EXPECT_EQ(remaining_const, node_map["add_op"]->input(0));
EXPECT_EQ(remaining_const, node_map["add_op"]->input(1));
EXPECT_EQ(1, node_map.count("c_op"));
EXPECT_EQ("add_op", node_map["mul_op"]->input(0));
EXPECT_EQ("c_op", node_map["mul_op"]->input(1));
}
void TestMergeDuplicatesNested() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
const int width = 10;
Tensor a_tensor(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&a_tensor, 1.0f);
Output a_op = Const(root.WithOpName("a_op"), Input::Initializer(a_tensor));
Output a_relu_op = Relu(root.WithOpName("a_relu_op"), a_op);
Tensor b_tensor(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&b_tensor, 1.0f);
Output b_op = Const(root.WithOpName("b_op"), Input::Initializer(b_tensor));
Output b_relu_op = Relu(root.WithOpName("b_relu_op"), b_op);
Output add_op = Add(root.WithOpName("add_op"), a_relu_op, b_relu_op);
Tensor c_tensor(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&c_tensor, 2.0f);
Output c_op = Const(root.WithOpName("c_op"), Input::Initializer(c_tensor));
Output mul_op = Mul(root.WithOpName("mul_op"), add_op, c_op);
GraphDef float_graph_def;
TF_ASSERT_OK(root.ToGraphDef(&float_graph_def));
GraphDef merged_graph_def;
TestTransformedVersusFloatGraph(MergeDuplicateNodes, float_graph_def, {},
{}, {"mul_op"}, {}, 1.0, &merged_graph_def);
std::map<string, const NodeDef*> node_map;
MapNamesToNodes(merged_graph_def, &node_map);
EXPECT_EQ(1, (node_map.count("a_op") + node_map.count("b_op")));
EXPECT_EQ(1, (node_map.count("a_relu_op") + node_map.count("b_relu_op")));
string remaining_relu;
if (node_map.count("a_relu_op")) {
remaining_relu = "a_relu_op";
} else {
remaining_relu = "b_relu_op";
}
EXPECT_EQ(remaining_relu, node_map["add_op"]->input(0));
EXPECT_EQ(remaining_relu, node_map["add_op"]->input(1));
EXPECT_EQ(1, node_map.count("c_op"));
EXPECT_EQ("add_op", node_map["mul_op"]->input(0));
EXPECT_EQ("c_op", node_map["mul_op"]->input(1));
}
void TestMergeDuplicatesInOut() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
const int width = 10;
Tensor a_tensor(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&a_tensor, 1.0f);
Output a_op = Const(root.WithOpName("a_op"), Input::Initializer(a_tensor));
Output a_relu_op = Relu(root.WithOpName("a_relu_op"), a_op);
Tensor b_tensor(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&b_tensor, 1.0f);
Output b_op = Const(root.WithOpName("b_op"), Input::Initializer(b_tensor));
Output b_relu_op = Relu(root.WithOpName("b_relu_op"), b_op);
Output add_op = Add(root.WithOpName("add_op"), a_relu_op, b_relu_op);
Tensor c_tensor(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&c_tensor, 2.0f);
Output c_op = Const(root.WithOpName("c_op"), Input::Initializer(c_tensor));
Output mul_op1 = Mul(root.WithOpName("mul_op1"), add_op, c_op);
Output mul_op2 = Mul(root.WithOpName("mul_op2"), add_op, c_op);
Output mul_op3 = Mul(root.WithOpName("mul_op3"), add_op, c_op);
Output final_mul_op =
Mul(root.WithOpName("final_mul_op"), mul_op2, mul_op3);
GraphDef float_graph_def;
TF_ASSERT_OK(root.ToGraphDef(&float_graph_def));
GraphDef merged_graph_def;
TestTransformedVersusFloatGraph(MergeDuplicateNodes, float_graph_def,
{{"a_op", a_tensor}}, {{"a_op", a_tensor}},
{"mul_op1", "final_mul_op"}, {}, 1.0,
&merged_graph_def);
std::map<string, const NodeDef*> node_map;
MapNamesToNodes(merged_graph_def, &node_map);
EXPECT_EQ(1, node_map.count("a_op"));
EXPECT_EQ(1, node_map.count("b_op"));
EXPECT_EQ(1, node_map.count("a_relu_op"));
EXPECT_EQ(1, node_map.count("b_relu_op"));
EXPECT_EQ(1, node_map.count("mul_op1"));
EXPECT_EQ(1, node_map.count("final_mul_op"));
EXPECT_EQ(1, (node_map.count("mul_op2") + node_map.count("mul_op3")));
string remaining_mul;
if (node_map.count("mul_op2")) {
remaining_mul = "mul_op2";
} else {
remaining_mul = "mul_op3";
}
EXPECT_EQ(remaining_mul, node_map["final_mul_op"]->input(0));
EXPECT_EQ(remaining_mul, node_map["final_mul_op"]->input(1));
EXPECT_EQ(1, node_map.count("c_op"));
EXPECT_EQ("add_op", node_map["mul_op1"]->input(0));
EXPECT_EQ("c_op", node_map["mul_op1"]->input(1));
}
void TestExcludeNonFloat() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
Tensor int_constant_tensor(DT_INT32, TensorShape({4, 5}));
test::FillIota<int32>(&int_constant_tensor, 1);
Output int_constant = Const(root.WithOpName("int_constant"),
Input::Initializer(int_constant_tensor));
Tensor float_constant_tensor(DT_FLOAT, TensorShape({4, 5}));
test::FillIota<float>(&float_constant_tensor, 2.0f);
Output float_constant = Const(root.WithOpName("float_constant"),
Input::Initializer(float_constant_tensor));
Output excluded_reshape_op =
Reshape(root.WithOpName("excluded_reshape_op"), int_constant, {10, 2});
Output included_reshape_op = Reshape(root.WithOpName("included_reshape_op"),
float_constant, {10, 2});
Output excluded_relu_op =
Relu(root.WithOpName("excluded_relu_op"), excluded_reshape_op);
Output excluded_float_caster = Cast(
root.WithOpName("excluded_float_caster"), excluded_relu_op, DT_FLOAT);
Output included_relu_op =
Relu(root.WithOpName("included_relu_op"), included_reshape_op);
GraphDef float_graph_def;
TF_ASSERT_OK(root.ToGraphDef(&float_graph_def));
GraphDef quantized_graph_def;
TestTransformedVersusFloatGraph(
QuantizeNodes, float_graph_def, {}, {},
{"excluded_float_caster", "included_relu_op"}, {}, 1.0,
&quantized_graph_def);
std::map<string, const NodeDef*> node_map;
MapNamesToNodes(quantized_graph_def, &node_map);
ASSERT_EQ(1, node_map.count("excluded_reshape_op"));
EXPECT_EQ("Reshape", node_map.at("excluded_reshape_op")->op());
ASSERT_EQ(1, node_map.count("included_reshape_op"));
EXPECT_EQ("Dequantize", node_map.at("included_reshape_op")->op());
}
};
TEST_F(QuantizeNodesTest, TestIgnoreOps) {
TestIgnoreOps({});
TestIgnoreOps({"MatMul"});
TestIgnoreOps({"MatMul", "Mul"});
}
TEST_F(QuantizeNodesTest, TestQuantizeMatMulTiny) { TestQuantizeMatMulTiny(); }
TEST_F(QuantizeNodesTest, TestQuantizeMatMulSmall) {
TestQuantizeMatMulSmall();
}
TEST_F(QuantizeNodesTest, TestQuantizeMul) { TestQuantizeMul(); }
TEST_F(QuantizeNodesTest, TestQuantizeAdd) { TestQuantizeAdd(); }
TEST_F(QuantizeNodesTest, TestOddPaddingProblem) {
TestQuantizeConv2D(1, 4, 4, 1, 3, 1, 2, "SAME",
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
{1, 2, 3, 4, 5, 6, 7, 8, 9});
}
TEST_F(QuantizeNodesTest, TestQuantizeConv2D) {
TestQuantizeConv2D(1, 4, 3, 1, 3, 1, 1, "SAME",
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12},
{1, 4, 7, 2, 5, 8, 3, 6, 9});
}
TEST_F(QuantizeNodesTest, TestQuantizeBiasAdd) { TestQuantizeBiasAdd(); }
TEST_F(QuantizeNodesTest, TestQuantizeConcat) { TestQuantizeConcat(); }
TEST_F(QuantizeNodesTest, TestQuantizeRelu) { TestQuantizeRelu(); }
TEST_F(QuantizeNodesTest, TestQuantizeRelu6) { TestQuantizeRelu6(); }
TEST_F(QuantizeNodesTest, TestQuantizeMaxPool) { TestQuantizeMaxPool(); }
TEST_F(QuantizeNodesTest, TestQuantizeAvgPool) { TestQuantizeAvgPool(); }
TEST_F(QuantizeNodesTest, TestQuantizeReshape) { TestQuantizeReshape(); }
TEST_F(QuantizeNodesTest, TestQuantizeResizeBilinear) {
TestQuantizeResizeBilinear();
}
TEST_F(QuantizeNodesTest, TestRemoveRedundantQuantization) {
TestRemoveRedundantQuantization();
}
TEST_F(QuantizeNodesTest, TestRemoveRedundantQuantizationWithBiasAdd) {
TestRemoveRedundantQuantizationWithBiasAdd();
}
TEST_F(QuantizeNodesTest, TestRemoveRedundantQuantizationWithMultipleOutputs) {
TestRemoveRedundantQuantizationWithMultipleOutputs();
}
TEST_F(QuantizeNodesTest, TestQuantizePlaceholders) {
TestQuantizePlaceholders();
}
TEST_F(QuantizeNodesTest, TestInputRange) { TestInputRange(); }
TEST_F(QuantizeNodesTest, TestFallbackRange) { TestFallbackRange(); }
TEST_F(QuantizeNodesTest, TestConvertFakeQuantsToRequantize) {
TestConvertFakeQuantsToRequantize();
}
TEST_F(QuantizeNodesTest, TestMergeAdjacentRequantizes) {
TestMergeAdjacentRequantizes();
}
TEST_F(QuantizeNodesTest, TestConvertFakeQuantsEndToEnd) {
TestConvertFakeQuantsEndToEnd();
}
TEST_F(QuantizeNodesTest, TestHoistFakeQuants) { TestHoistFakeQuants(); }
TEST_F(QuantizeNodesTest, TestMergeDuplicateQuantizes) {
TestMergeDuplicateQuantizes();
}
TEST_F(QuantizeNodesTest, TestMergeDuplicateConsts) {
TestMergeDuplicateConsts();
}
TEST_F(QuantizeNodesTest, TestMergeDuplicatesNested) {
TestMergeDuplicatesNested();
}
TEST_F(QuantizeNodesTest, TestMergeDuplicateInOut) {
TestMergeDuplicatesInOut();
}
TEST_F(QuantizeNodesTest, TestExcludeNonFloat) { TestExcludeNonFloat(); }
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/quantize_nodes.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/quantize_nodes_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2d10528f-a1a6-4888-a5d3-6ca40943cf7b | cpp | google/quiche | quiche_lower_case_string | quiche/common/platform/api/quiche_lower_case_string.h | quiche/common/platform/api/quiche_lower_case_string_test.cc | #ifndef QUICHE_COMMON_PLATFORM_API_QUICHE_LOWER_CASE_STRING_H_
#define QUICHE_COMMON_PLATFORM_API_QUICHE_LOWER_CASE_STRING_H_
#include "quiche_platform_impl/quiche_lower_case_string_impl.h"
namespace quiche {
using QuicheLowerCaseString = QuicheLowerCaseStringImpl;
}
#endif | #include "quiche/common/platform/api/quiche_lower_case_string.h"
#include "absl/strings/string_view.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace quiche::test {
namespace {
TEST(QuicheLowerCaseString, Basic) {
QuicheLowerCaseString empty("");
EXPECT_EQ("", empty.get());
QuicheLowerCaseString from_lower_case("foo");
EXPECT_EQ("foo", from_lower_case.get());
QuicheLowerCaseString from_mixed_case("BaR");
EXPECT_EQ("bar", from_mixed_case.get());
const absl::string_view kData = "FooBar";
QuicheLowerCaseString from_string_view(kData);
EXPECT_EQ("foobar", from_string_view.get());
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/platform/api/quiche_lower_case_string.h | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/platform/api/quiche_lower_case_string_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
476923f3-b0e5-4116-ab25-59dc056d648d | cpp | tensorflow/tensorflow | grpc_dispatcher_impl | tensorflow/core/data/service/grpc_dispatcher_impl.cc | tensorflow/core/data/service/grpc_dispatcher_impl_test.cc | #include "tensorflow/core/data/service/grpc_dispatcher_impl.h"
#include "grpcpp/server_context.h"
#include "tensorflow/core/data/service/export.pb.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_util.h"
#include "tensorflow/core/protobuf/service_config.pb.h"
namespace tensorflow {
namespace data {
using ::grpc::ServerBuilder;
using ::grpc::ServerContext;
GrpcDispatcherImpl::GrpcDispatcherImpl(
const experimental::DispatcherConfig& config, ServerBuilder& server_builder)
: impl_(config) {
server_builder.RegisterService(this);
VLOG(1) << "Registered data service dispatcher";
}
Status GrpcDispatcherImpl::Start() { return impl_.Start(); }
void GrpcDispatcherImpl::Stop() { impl_.Stop(); }
size_t GrpcDispatcherImpl::NumActiveIterations() {
return impl_.NumActiveIterations();
}
DispatcherStateExport GrpcDispatcherImpl::ExportState() const {
return impl_.ExportState();
}
#define HANDLER(method) \
grpc::Status GrpcDispatcherImpl::method(ServerContext* context, \
const method##Request* request, \
method##Response* response) { \
return ToGrpcStatus(impl_.method(request, response)); \
}
HANDLER(WorkerHeartbeat);
HANDLER(WorkerUpdate);
HANDLER(GetDatasetDef);
HANDLER(GetSplit);
HANDLER(GetVersion);
HANDLER(GetOrRegisterDataset);
HANDLER(ReleaseIterationClient);
HANDLER(MaybeRemoveTask);
HANDLER(GetOrCreateJob);
HANDLER(GetOrCreateIteration);
HANDLER(ClientHeartbeat);
HANDLER(GetWorkers);
HANDLER(GetDataServiceMetadata);
HANDLER(GetDataServiceConfig);
HANDLER(Snapshot);
HANDLER(GetSnapshotSplit);
HANDLER(GetSnapshotStreams);
HANDLER(DisableCompressionAtRuntime);
#undef HANDLER
}
} | #include "tensorflow/core/data/service/grpc_dispatcher_impl.h"
#include <limits>
#include <memory>
#include <string>
#include <utility>
#include "grpcpp/channel.h"
#include "grpcpp/client_context.h"
#include "grpcpp/create_channel.h"
#include "grpcpp/security/credentials.h"
#include "grpcpp/support/channel_arguments.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/data/service/common.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/credentials_factory.h"
#include "tensorflow/core/data/service/dispatcher.pb.h"
#include "tensorflow/core/data/service/server_lib.h"
#include "tensorflow/core/data/service/test_util.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_util.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/data_service.pb.h"
#include "tensorflow/core/protobuf/service_config.pb.h"
namespace tensorflow {
namespace data {
namespace {
using ::grpc::Channel;
using ::grpc::ChannelArguments;
using ::grpc::ChannelCredentials;
using ::grpc::ClientContext;
constexpr const char kHostAddress[] = "localhost";
constexpr const char kProtocol[] = "grpc";
class GrpcDispatcherImplTest : public ::testing::Test {
protected:
void SetUp() override {
TF_ASSERT_OK(SetUpDispatcherServer());
TF_ASSERT_OK(SetUpDispatcherClientStub());
}
Status SetUpDispatcherServer() {
experimental::DispatcherConfig config;
config.set_protocol(kProtocol);
TF_RETURN_IF_ERROR(NewDispatchServer(config, dispatcher_server_));
return dispatcher_server_->Start();
}
Status SetUpDispatcherClientStub() {
std::shared_ptr<ChannelCredentials> credentials;
TF_RETURN_IF_ERROR(
CredentialsFactory::CreateClientCredentials(kProtocol, &credentials));
ChannelArguments args;
args.SetMaxReceiveMessageSize(std::numeric_limits<int32>::max());
args.SetInt(GRPC_ARG_USE_LOCAL_SUBCHANNEL_POOL, true);
std::shared_ptr<Channel> channel =
::grpc::CreateCustomChannel(GetDispatcherAddress(), credentials, args);
dispatcher_client_stub_ = DispatcherService::NewStub(channel);
return absl::OkStatus();
}
std::string GetDispatcherAddress() const {
return absl::StrCat(kHostAddress, ":", dispatcher_server_->BoundPort());
}
std::unique_ptr<DispatchGrpcDataServer> dispatcher_server_;
std::unique_ptr<DispatcherService::Stub> dispatcher_client_stub_;
};
TEST_F(GrpcDispatcherImplTest, GrpcTest) {
ClientContext ctx;
GetVersionRequest req;
GetVersionResponse resp;
TF_ASSERT_OK(
FromGrpcStatus(dispatcher_client_stub_->GetVersion(&ctx, req, &resp)));
EXPECT_EQ(resp.version(), kDataServiceVersion);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/grpc_dispatcher_impl.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/grpc_dispatcher_impl_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
145e02f9-e296-4fae-91d2-7cfe3e2ddd1e | cpp | tensorflow/tensorflow | mkl_conv_ops | tensorflow/core/kernels/mkl/mkl_conv_ops.cc | tensorflow/core/kernels/mkl/mkl_conv_ops_test.cc | #ifdef INTEL_MKL
#include "tensorflow/core/kernels/mkl/mkl_conv_ops.h"
#include <algorithm>
#include <map>
#include <string>
#include <unordered_map>
#include "absl/strings/str_join.h"
#include "tensorflow/core/kernels/mkl/mkl_kernel_util.h"
#include "tensorflow/core/kernels/mkl/mkl_quantized_conv_ops.h"
#include "tensorflow/core/kernels/no_op.h"
#if defined(DNNL_AARCH64_USE_ACL) && defined(ENABLE_ONEDNN_OPENMP)
#include "tensorflow/core/platform/mutex.h"
#endif
using dnnl::convolution_forward;
using dnnl::prop_kind;
using dnnl::stream;
using ConvFwdPd = dnnl::convolution_forward::primitive_desc;
using ReorderPd = dnnl::reorder::primitive_desc;
namespace tensorflow {
#ifndef ENABLE_ONEDNN_V3
#define APPEND_DEPTHWISE(wei_dt, bias_dt, dst_dt, kernel, stride, padding, \
scales_mask, scales) \
append_dw(wei_dt, bias_dt, dst_dt, kernel, stride, padding, scales_mask, \
scales)
#define APPEND_ELTWISE(scale, alg, alpha, beta) \
append_eltwise(scale, alg, alpha, beta)
#define GET_DATA_TYPE data_type()
#define SET_FUSE_ACTIVATION_FOR_RELU6 \
set_fuse_activation(true, dnnl::algorithm::eltwise_bounded_relu, 6.0)
#define SET_MKL_LAYOUT(md) SetMklLayout(&md)
#define OUTPUT_SCALE_DCHECK (post_op_param.name == "output_scale")
#define TSCALED_BIAS Tbias
#define SCALE scales
#define SUMMAND_SCALE_U8(summand_range, output_range) \
summand_range / output_range
#define SUMMAND_SCALE_S8(summand_range, output_range) \
255.0f * summand_range / (output_range * 127.0f)
#else
#define APPEND_DEPTHWISE(wei_dt, bias_dt, dst_dt, kernel, stride, padding, \
scales_mask, scales) \
append_dw(wei_dt, bias_dt, dst_dt, kernel, stride, padding)
#define APPEND_ELTWISE(scale, alg, alpha, beta) \
append_eltwise(alg, alpha, beta); \
(void)scale
#define GET_DATA_TYPE get_data_type()
#define SET_FUSE_ACTIVATION_FOR_RELU6 \
set_fuse_activation(true, dnnl::algorithm::eltwise_clip, 0.0, 6.0)
#define SET_MKL_LAYOUT(md) SetMklLayout(md)
#define OUTPUT_SCALE_DCHECK \
(post_op_param.name == "src_scale") || \
(post_op_param.name == "wei_scale") || \
(post_op_param.name == "dst_scale")
#define TSCALED_BIAS float
#define SCALE wei_scale
#define SUMMAND_SCALE_U8(summand_range, output_range) summand_range / 255.0f
#define SUMMAND_SCALE_S8(summand_range, output_range) summand_range / 127.0f
#endif
#if !defined(ENABLE_ONEDNN_OPENMP) && !defined(ENABLE_ONEDNN_V3)
#define FWD_STREAM , *fwd_stream
#else
#define FWD_STREAM
#endif
namespace quantized_fusions {
string none[] = {""};
string bias[] = {"BiasAdd"};
string relu[] = {"Relu"};
string requantize[] = {"Requantize"};
string bias_relu[] = {"BiasAdd", "Relu"};
string bias_requantize[] = {"BiasAdd", "Requantize"};
string relu_requantize[] = {"Relu", "Requantize"};
string bias_relu_requantize[] = {"BiasAdd", "Relu", "Requantize"};
string bias_sum_relu[] = {"BiasAdd", "Sum", "Relu"};
string bias_sum_relu_requantize[] = {"BiasAdd", "Sum", "Relu", "Requantize"};
}
struct MklConvFwdParams {
memory::dims src_dims;
memory::dims filter_dims;
memory::dims bias_dims;
memory::dims dst_dims;
memory::dims strides;
memory::dims dilations;
memory::dims padding_left;
memory::dims padding_right;
memory::dims fuse_bn_dims;
MklTensorFormat tf_fmt;
bool native_format;
bool is_depthwise;
bool is_filter_const = false;
string dtypes = string("");
struct PostOpParam {
string name;
dnnl::algorithm alg;
std::vector<float> param;
std::string partial_key;
DataType dtype = DT_INVALID;
};
std::vector<PostOpParam> post_op_params;
MklConvFwdParams(memory::dims src_dims, memory::dims filter_dims,
memory::dims bias_dims, memory::dims dst_dims,
memory::dims strides, memory::dims dilations,
memory::dims padding_left, memory::dims padding_right,
memory::dims fuse_bn_dims, MklTensorFormat tf_fmt,
bool native_format, bool is_depthwise, bool is_filter_const)
: src_dims(src_dims),
filter_dims(filter_dims),
bias_dims(bias_dims),
dst_dims(dst_dims),
strides(strides),
dilations(dilations),
padding_left(padding_left),
padding_right(padding_right),
fuse_bn_dims(fuse_bn_dims),
tf_fmt(tf_fmt),
native_format(native_format),
is_depthwise(is_depthwise),
is_filter_const(is_filter_const) {}
};
template <typename Tinput, typename Tfilter, typename Tbias, typename Toutput>
class MklConvFwdPrimitive : public MklPrimitive {
public:
explicit MklConvFwdPrimitive(const MklConvFwdParams& convFwdDims)
: MklPrimitive(engine(engine::kind::cpu, 0)) {
if (context_.conv_fwd == nullptr) {
Setup(convFwdDims);
}
}
~MklConvFwdPrimitive() {}
dnnl::memory::desc GetScratchPadDesc() {
return context_.fwd_pd->scratchpad_desc();
}
void Execute(const Tinput* src_data, const Tfilter* filter_data,
const void* bias_data, const Toutput* dst_data,
const MklConvFwdParams& convFwdDims,
std::shared_ptr<stream> fwd_stream, void* sp_data = nullptr) {
Execute(src_data, filter_data, bias_data, dst_data, nullptr, nullptr,
nullptr, nullptr, convFwdDims, fwd_stream, sp_data);
}
void Execute(const Tinput* src_data, const Tfilter* filter_data,
const void* bias_data, const Toutput* dst_data,
const Tinput* bn_scale_data, const Tinput* bn_mean_data,
const Tinput* bn_offset_data, const Tinput* bn_rsqrt_data,
const MklConvFwdParams& convFwdDims,
std::shared_ptr<stream> fwd_stream, void* sp_data) {
#if defined(DNNL_AARCH64_USE_ACL) && defined(ENABLE_ONEDNN_OPENMP)
mutex_lock lock(primitive_execution_mu_);
#endif
context_.src_mem->set_data_handle(
static_cast<void*>(const_cast<Tinput*>(src_data)) FWD_STREAM);
context_.filter_mem->set_data_handle(
static_cast<void*>(const_cast<Tfilter*>(filter_data)) FWD_STREAM);
if (bias_data != nullptr) {
context_.bias_mem->set_data_handle(const_cast<void*>(bias_data)
FWD_STREAM);
}
auto const& post_op_params = convFwdDims.post_op_params;
if (!post_op_params.empty()) {
for (auto const& post_op_param : post_op_params) {
if (post_op_param.name == "src_scale") {
context_.src_scale_mem->set_data_handle(static_cast<void*>(
const_cast<float*>(post_op_param.param.data())) FWD_STREAM);
} else if (post_op_param.name == "wei_scale") {
context_.wei_scale_mem->set_data_handle(static_cast<void*>(
const_cast<float*>(post_op_param.param.data())) FWD_STREAM);
} else if (post_op_param.name == "dst_scale") {
context_.dst_scale_mem->set_data_handle(static_cast<void*>(
const_cast<float*>(post_op_param.param.data())) FWD_STREAM);
}
}
}
if (bn_scale_data != nullptr) {
context_.bn_scale_mem->set_data_handle(
static_cast<void*>(const_cast<Tinput*>(bn_scale_data)) FWD_STREAM);
context_.bn_mean_mem->set_data_handle(
static_cast<void*>(const_cast<Tinput*>(bn_mean_data)) FWD_STREAM);
context_.bn_rsqrt_mem->set_data_handle(
static_cast<void*>(const_cast<Tinput*>(bn_rsqrt_data)) FWD_STREAM);
context_.bn_offset_mem->set_data_handle(
static_cast<void*>(const_cast<Tinput*>(bn_offset_data)) FWD_STREAM);
}
context_.dst_mem->set_data_handle(
static_cast<void*>(const_cast<Toutput*>(dst_data)) FWD_STREAM);
if (sp_data) {
context_.sp_mem->set_data_handle(static_cast<void*>(sp_data) FWD_STREAM);
}
DCHECK_EQ(context_.fwd_primitives.size(),
context_.fwd_primitives_args.size());
for (size_t i = 0; i < context_.fwd_primitives.size(); ++i) {
context_.fwd_primitives.at(i).execute(*fwd_stream,
context_.fwd_primitives_args.at(i));
}
context_.src_mem->set_data_handle(DummyData);
context_.filter_mem->set_data_handle(DummyData);
if (bias_data != nullptr) {
context_.bias_mem->set_data_handle(DummyData);
}
if (bn_scale_data != nullptr) {
context_.bn_scale_mem->set_data_handle(DummyData);
context_.bn_mean_mem->set_data_handle(DummyData);
context_.bn_rsqrt_mem->set_data_handle(DummyData);
context_.bn_offset_mem->set_data_handle(DummyData);
}
context_.dst_mem->set_data_handle(DummyData);
if (sp_data) {
context_.sp_mem->set_data_handle(DummyData);
}
}
void Execute(const Tinput* src_data, const Tfilter* filter_data,
const Toutput* dst_data, const MklConvFwdParams& convFwdDims,
std::shared_ptr<stream> fwd_stream, void* sp_data) {
Execute(src_data, filter_data, nullptr, dst_data, nullptr, nullptr, nullptr,
nullptr, convFwdDims, fwd_stream, sp_data);
}
std::shared_ptr<ConvFwdPd> GetPrimitiveDesc() const {
return context_.fwd_pd;
}
private:
struct ConvFwdContext {
std::shared_ptr<dnnl::memory> src_mem;
std::shared_ptr<dnnl::memory> filter_mem;
std::shared_ptr<dnnl::memory> bias_mem;
std::shared_ptr<dnnl::memory> dst_mem;
std::shared_ptr<dnnl::memory> sp_mem;
std::shared_ptr<dnnl::memory> bn_scale_mem;
std::shared_ptr<dnnl::memory> bn_mean_mem;
std::shared_ptr<dnnl::memory> bn_rsqrt_mem;
std::shared_ptr<dnnl::memory> bn_offset_mem;
std::shared_ptr<dnnl::memory> src_scale_mem;
std::shared_ptr<dnnl::memory> wei_scale_mem;
std::shared_ptr<dnnl::memory> dst_scale_mem;
#ifndef ENABLE_ONEDNN_V3
std::shared_ptr<dnnl::convolution_forward::desc> fwd_desc;
#endif
std::shared_ptr<ConvFwdPd> fwd_pd;
std::shared_ptr<dnnl::memory::desc> src_md;
std::shared_ptr<dnnl::memory::desc> filter_md;
std::shared_ptr<dnnl::memory::desc> bias_md;
std::shared_ptr<dnnl::memory::desc> dst_md;
std::shared_ptr<dnnl::memory::desc> bn_scale_md;
std::shared_ptr<dnnl::memory::desc> bn_mean_md;
std::shared_ptr<dnnl::memory::desc> bn_rsqrt_md;
std::shared_ptr<dnnl::memory::desc> bn_offset_md;
std::shared_ptr<dnnl::memory::desc> src_scale_md;
std::shared_ptr<dnnl::memory::desc> wei_scale_md;
std::shared_ptr<dnnl::memory::desc> dst_scale_md;
std::shared_ptr<dnnl::primitive> conv_fwd;
std::vector<dnnl::primitive> fwd_primitives;
std::vector<std::unordered_map<int, memory>> fwd_primitives_args;
ConvFwdContext()
: src_mem(nullptr),
filter_mem(nullptr),
bias_mem(nullptr),
dst_mem(nullptr),
sp_mem(nullptr),
bn_scale_mem(nullptr),
bn_mean_mem(nullptr),
bn_rsqrt_mem(nullptr),
bn_offset_mem(nullptr),
src_scale_mem(nullptr),
wei_scale_mem(nullptr),
dst_scale_mem(nullptr),
#ifndef ENABLE_ONEDNN_V3
fwd_desc(nullptr),
#endif
fwd_pd(nullptr),
src_md(nullptr),
filter_md(nullptr),
bias_md(nullptr),
dst_md(nullptr),
bn_scale_md(nullptr),
bn_mean_md(nullptr),
bn_rsqrt_md(nullptr),
bn_offset_md(nullptr),
src_scale_md(nullptr),
wei_scale_md(nullptr),
dst_scale_md(nullptr),
conv_fwd(nullptr) {
}
};
void Setup(const MklConvFwdParams& convFwdDims) {
memory::format_tag user_data_fmt;
if (convFwdDims.native_format) {
user_data_fmt = MklTensorFormatToMklDnnDataFormat(convFwdDims.tf_fmt);
} else {
user_data_fmt = memory::format_tag::any;
}
context_.src_md.reset(new memory::desc(
{convFwdDims.src_dims}, MklDnnType<Tinput>(), user_data_fmt));
if (convFwdDims.filter_dims.size() == 4 && !convFwdDims.is_filter_const &&
std::is_same<Tfilter, float>::value &&
convFwdDims.src_dims[MklDnnDims::Dim_N] == 1) {
context_.filter_md.reset(new memory::desc({convFwdDims.filter_dims},
MklDnnType<Tfilter>(),
memory::format_tag::hwio));
} else {
context_.filter_md.reset(new memory::desc({convFwdDims.filter_dims},
MklDnnType<Tfilter>(),
memory::format_tag::any));
}
context_.dst_md.reset(new memory::desc(
{convFwdDims.dst_dims}, MklDnnType<Toutput>(), user_data_fmt));
if (!convFwdDims.bias_dims.empty()) {
if (std::is_same<Tbias, qint32>::value) {
context_.bias_md.reset(new memory::desc({convFwdDims.bias_dims},
MklDnnType<TSCALED_BIAS>(),
memory::format_tag::any));
} else {
context_.bias_md.reset(new memory::desc({convFwdDims.bias_dims},
MklDnnType<Tbias>(),
memory::format_tag::any));
}
#ifndef ENABLE_ONEDNN_V3
context_.fwd_desc.reset(new convolution_forward::desc(
prop_kind::forward, dnnl::algorithm::convolution_direct,
*context_.src_md, *context_.filter_md, *context_.bias_md,
*context_.dst_md, convFwdDims.strides, convFwdDims.dilations,
convFwdDims.padding_left, convFwdDims.padding_right));
} else {
context_.fwd_desc.reset(new convolution_forward::desc(
prop_kind::forward, dnnl::algorithm::convolution_direct,
*context_.src_md, *context_.filter_md, *context_.dst_md,
convFwdDims.strides, convFwdDims.dilations, convFwdDims.padding_left,
convFwdDims.padding_right));
#endif
}
if (!convFwdDims.fuse_bn_dims.empty()) {
const memory::format_tag fused_bn_arg_fmt =
convFwdDims.native_format
? user_data_fmt
: MklTensorFormatToMklDnnDataFormat(convFwdDims.tf_fmt);
context_.bn_scale_md.reset(new memory::desc(
{convFwdDims.fuse_bn_dims}, MklDnnType<Tinput>(), fused_bn_arg_fmt));
context_.bn_mean_md.reset(new memory::desc(
{convFwdDims.fuse_bn_dims}, MklDnnType<Tinput>(), fused_bn_arg_fmt));
context_.bn_rsqrt_md.reset(new memory::desc(
{convFwdDims.fuse_bn_dims}, MklDnnType<Tinput>(), fused_bn_arg_fmt));
context_.bn_offset_md.reset(new memory::desc(
{convFwdDims.fuse_bn_dims}, MklDnnType<Tinput>(), fused_bn_arg_fmt));
}
auto const& post_op_params = convFwdDims.post_op_params;
dnnl::primitive_attr post_ops_attr;
dnnl::post_ops post_ops;
post_ops_attr.set_scratchpad_mode(dnnl::scratchpad_mode::user);
std::unordered_map<string, bool> is_scale_set;
if (!post_op_params.empty()) {
for (auto const& post_op_param : post_op_params) {
if (post_op_param.name == "activation") {
DCHECK_EQ(post_op_param.param.size(), 3);
float op_scale = post_op_param.param[0];
float op_alpha = post_op_param.param[1];
float op_beta = post_op_param.param[2];
post_ops.APPEND_ELTWISE(op_scale, post_op_param.alg, op_alpha,
op_beta);
} else if (post_op_param.name == "sum") {
DCHECK_EQ(post_op_param.param.size(), 1);
float op_scale = post_op_param.param[0];
#ifndef ENABLE_ONEDNN_V3
post_ops.append_sum(op_scale);
#else
if (post_op_param.dtype != DT_INVALID) {
if (post_op_param.dtype == DT_FLOAT) {
post_ops.append_sum(op_scale, 0,
MklDnnType<float>());
} else {
TF_CHECK_OK(absl::FailedPreconditionError(
"Summand data type is expected to be float"));
}
} else {
post_ops.append_sum(op_scale);
}
#endif
#ifndef ENABLE_ONEDNN_V3
} else if (post_op_param.name == "output_scale") {
if (post_op_param.param.size() == 1) {
post_ops_attr.set_output_scales(0, post_op_param.param);
} else {
post_ops_attr.set_output_scales(2, post_op_param.param);
}
#else
} else if (post_op_param.name == "src_scale") {
is_scale_set.insert({"src", true});
post_ops_attr.set_scales_mask(DNNL_ARG_SRC, 0);
context_.src_scale_md.reset(new memory::desc({1}, MklDnnType<float>(),
memory::format_tag::x));
context_.src_scale_mem.reset(
new memory(*context_.src_scale_md, cpu_engine_, DummyData));
} else if (post_op_param.name == "wei_scale") {
is_scale_set.insert({"wei", true});
const int scale_size = post_op_param.param.size();
const int mask = scale_size == 1 ? 0
: convFwdDims.is_depthwise ? 3
: 1;
post_ops_attr.set_scales_mask(DNNL_ARG_WEIGHTS, mask);
context_.wei_scale_md.reset(new memory::desc(
{scale_size}, MklDnnType<float>(), memory::format_tag::x));
context_.wei_scale_mem.reset(
new memory(*context_.wei_scale_md, cpu_engine_, DummyData));
} else if (post_op_param.name == "dst_scale") {
is_scale_set.insert({"dst", true});
post_ops_attr.set_scales_mask(DNNL_ARG_DST, 0);
context_.dst_scale_md.reset(new memory::desc({1}, MklDnnType<float>(),
memory::format_tag::x));
context_.dst_scale_mem.reset(
new memory(*context_.dst_scale_md, cpu_engine_, DummyData));
#endif
} else if (post_op_param.name == "fuse_bn") {
post_ops.append_binary(dnnl::algorithm::binary_sub,
*context_.bn_mean_md);
post_ops.append_binary(dnnl::algorithm::binary_mul,
*context_.bn_rsqrt_md);
post_ops.append_binary(dnnl::algorithm::binary_mul,
*context_.bn_scale_md);
post_ops.append_binary(dnnl::algorithm::binary_add,
*context_.bn_offset_md);
} else {
DCHECK((post_op_param.name == "activation") ||
(post_op_param.name == "sum") || OUTPUT_SCALE_DCHECK ||
(post_op_param.name == "fuse_bn"));
}
}
post_ops_attr.set_post_ops(post_ops);
}
#ifndef ENABLE_ONEDNN_V3
context_.fwd_pd.reset(
new ConvFwdPd(*context_.fwd_desc, post_ops_attr, cpu_engine_));
#else
if (!convFwdDims.bias_dims.empty()) {
context_.fwd_pd.reset(new ConvFwdPd(
cpu_engine_, prop_kind::forward, dnnl::algorithm::convolution_direct,
*context_.src_md, *context_.filter_md, *context_.bias_md,
*context_.dst_md, convFwdDims.strides, convFwdDims.dilations,
convFwdDims.padding_left, convFwdDims.padding_right, post_ops_attr));
} else {
context_.fwd_pd.reset(new ConvFwdPd(
cpu_engine_, prop_kind::forward, dnnl::algorithm::convolution_direct,
*context_.src_md, *context_.filter_md, *context_.dst_md,
convFwdDims.strides, convFwdDims.dilations, convFwdDims.padding_left,
convFwdDims.padding_right, post_ops_attr));
}
#endif
context_.src_mem.reset(
new memory(context_.fwd_pd.get()->src_desc(), cpu_engine_, DummyData));
context_.filter_mem.reset(new memory(context_.fwd_pd.get()->weights_desc(),
cpu_engine_, DummyData));
context_.dst_mem.reset(
new memory(context_.fwd_pd.get()->dst_desc(), cpu_engine_, DummyData));
context_.conv_fwd.reset(new convolution_forward(*context_.fwd_pd));
auto scratchpad_md = context_.fwd_pd->scratchpad_desc();
context_.sp_mem.reset(
new dnnl::memory(scratchpad_md, cpu_engine_, DummyData));
std::unordered_map<int, memory> net_args;
if (!convFwdDims.bias_dims.empty()) {
context_.bias_mem.reset(new memory(context_.fwd_pd.get()->bias_desc(),
cpu_engine_, DummyData));
net_args = {{DNNL_ARG_SRC, *context_.src_mem},
{DNNL_ARG_WEIGHTS, *context_.filter_mem},
{DNNL_ARG_BIAS, *context_.bias_mem},
{DNNL_ARG_SCRATCHPAD, *context_.sp_mem},
{DNNL_ARG_DST, *context_.dst_mem}};
#ifdef ENABLE_ONEDNN_V3
if (is_scale_set["src"] && is_scale_set["wei"] && is_scale_set["dst"]) {
net_args.insert(
{{DNNL_ARG_ATTR_SCALES | DNNL_ARG_SRC, *context_.src_scale_mem},
{DNNL_ARG_ATTR_SCALES | DNNL_ARG_WEIGHTS, *context_.wei_scale_mem},
{ DNNL_ARG_ATTR_SCALES | DNNL_ARG_DST,
*context_.dst_scale_mem }});
}
#endif
} else if (!convFwdDims.fuse_bn_dims.empty()) {
context_.bn_scale_mem.reset(
new memory(*context_.bn_scale_md, cpu_engine_, DummyData));
context_.bn_mean_mem.reset(
new memory(*context_.bn_mean_md, cpu_engine_, DummyData));
context_.bn_offset_mem.reset(
new memory(*context_.bn_offset_md, cpu_engine_, DummyData));
context_.bn_rsqrt_mem.reset(
new memory(*context_.bn_rsqrt_md, cpu_engine_, DummyData));
net_args = {{DNNL_ARG_SRC, *context_.src_mem},
{DNNL_ARG_WEIGHTS, *context_.filter_mem},
{DNNL_ARG_DST, *context_.dst_mem},
{DNNL_ARG_SCRATCHPAD, *context_.sp_mem},
{DNNL_ARG_ATTR_MULTIPLE_POST_OP(0) | DNNL_ARG_SRC_1,
*context_.bn_mean_mem},
{DNNL_ARG_ATTR_MULTIPLE_POST_OP(1) | DNNL_ARG_SRC_1,
*context_.bn_rsqrt_mem},
{DNNL_ARG_ATTR_MULTIPLE_POST_OP(2) | DNNL_ARG_SRC_1,
*context_.bn_scale_mem},
{DNNL_ARG_ATTR_MULTIPLE_POST_OP(3) | DNNL_ARG_SRC_1,
*context_.bn_offset_mem}};
} else {
net_args = {{DNNL_ARG_SRC, *context_.src_mem},
{DNNL_ARG_WEIGHTS, *context_.filter_mem},
{DNNL_ARG_SCRATCHPAD, *context_.sp_mem},
{DNNL_ARG_DST, *context_.dst_mem}};
#ifdef ENABLE_ONEDNN_V3
if (is_scale_set["src"] && is_scale_set["wei"] && is_scale_set["dst"]) {
net_args.insert(
{{DNNL_ARG_ATTR_SCALES | DNNL_ARG_SRC, *context_.src_scale_mem},
{DNNL_ARG_ATTR_SCALES | DNNL_ARG_WEIGHTS, *context_.wei_scale_mem},
{ DNNL_ARG_ATTR_SCALES | DNNL_ARG_DST,
*context_.dst_scale_mem }});
}
#endif
}
context_.fwd_primitives_args.push_back(net_args);
context_.fwd_primitives.push_back(*context_.conv_fwd);
}
struct ConvFwdContext context_;
#if defined(DNNL_AARCH64_USE_ACL) && defined(ENABLE_ONEDNN_OPENMP)
mutex primitive_execution_mu_;
#endif
};
template <typename Tinput, typename Tfilter, typename Tbias, typename Toutput>
class MklConvFwdPrimitiveFactory : public MklPrimitiveFactory<float> {
public:
static MklConvFwdPrimitive<Tinput, Tfilter, Tbias, Toutput>* Get(
const MklConvFwdParams& convFwdDims, bool do_not_cache) {
MklConvFwdPrimitive<Tinput, Tfilter, Tbias, Toutput>* conv_fwd = nullptr;
if (do_not_cache) {
conv_fwd =
new MklConvFwdPrimitive<Tinput, Tfilter, Tbias, Toutput>(convFwdDims);
} else {
conv_fwd =
dynamic_cast<MklConvFwdPrimitive<Tinput, Tfilter, Tbias, Toutput>*>(
MklConvFwdPrimitiveFactory<Tinput, Tfilter, Tbias,
Toutput>::GetInstance()
.GetConvFwd(convFwdDims));
if (conv_fwd == nullptr) {
conv_fwd = new MklConvFwdPrimitive<Tinput, Tfilter, Tbias, Toutput>(
convFwdDims);
MklConvFwdPrimitiveFactory<Tinput, Tfilter, Tbias,
Toutput>::GetInstance()
.SetConvFwd(convFwdDims, conv_fwd);
}
}
return conv_fwd;
}
private:
MklConvFwdPrimitiveFactory() {}
~MklConvFwdPrimitiveFactory() {}
static const int kDilationH = 0, kDilationW = 1;
static MklConvFwdPrimitiveFactory& GetInstance() {
static MklConvFwdPrimitiveFactory instance_;
return instance_;
}
static string CreateKey(const MklConvFwdParams& convFwdDims) {
string prefix = "conv_fwd_";
FactoryKeyCreator key_creator;
key_creator.AddAsKey(prefix);
key_creator.AddAsKey(convFwdDims.src_dims);
key_creator.AddAsKey(convFwdDims.filter_dims);
key_creator.AddAsKey(convFwdDims.bias_dims);
key_creator.AddAsKey(convFwdDims.dst_dims);
key_creator.AddAsKey(convFwdDims.strides);
key_creator.AddAsKey(convFwdDims.dilations);
key_creator.AddAsKey(convFwdDims.padding_left);
key_creator.AddAsKey(convFwdDims.padding_right);
key_creator.AddAsKey(convFwdDims.dtypes);
if (convFwdDims.native_format) {
key_creator.AddAsKey(convFwdDims.tf_fmt);
}
for (auto const& post_op_param : convFwdDims.post_op_params) {
key_creator.AddAsKey(post_op_param.name);
if (post_op_param.name == "activation") {
key_creator.AddAsKey(post_op_param.alg);
DCHECK_EQ(post_op_param.param.size(), 3);
for (auto& param : post_op_param.param) {
key_creator.AddAsKey(param);
}
} else if (post_op_param.name == "sum") {
DCHECK_EQ(post_op_param.param.size(), 1);
for (auto& param : post_op_param.param) {
key_creator.AddAsKey(param);
}
#ifndef ENABLE_ONEDNN_V3
} else if (post_op_param.name == "output_scale") {
#else
} else if (post_op_param.name == "src_scale" ||
post_op_param.name == "wei_scale" ||
post_op_param.name == "dst_scale") {
#endif
key_creator.AddAsKey(post_op_param.partial_key);
} else if (post_op_param.name == "fuse_bn") {
key_creator.AddAsKey(post_op_param.name);
key_creator.AddAsKey(convFwdDims.fuse_bn_dims);
} else {
return string("not_a_key");
}
}
return key_creator.GetKey();
}
MklPrimitive* GetConvFwd(const MklConvFwdParams& convFwdDims) {
string key = CreateKey(convFwdDims);
return this->GetOp(key);
}
void SetConvFwd(const MklConvFwdParams& convFwdDims, MklPrimitive* op) {
string key = CreateKey(convFwdDims);
this->SetOp(key, op);
}
};
template <typename Device, typename Tinput, typename Tfilter, typename Tbias,
typename Toutput, typename Ttemp_output, typename Tpadding,
bool bias_enabled, bool pad_enabled, bool is_depthwise,
bool native_format>
class MklConvOp : public OpKernel {
public:
~MklConvOp() {}
explicit MklConvOp(OpKernelConstruction* context) : OpKernel(context) {
OP_REQUIRES_OK(context, context->GetAttr("dilations", &dilations_));
OP_REQUIRES(
context,
!(context->HasAttr("padding_list") &&
context->HasAttr("explicit_paddings")),
absl::InvalidArgumentError("Can only have 1 `padding` list at most"));
if (context->HasAttr("padding_list")) {
OP_REQUIRES_OK(context, context->GetAttr("padding_list", &padding_list_));
}
if (context->HasAttr("explicit_paddings")) {
OP_REQUIRES_OK(context,
context->GetAttr("explicit_paddings", &padding_list_));
}
OP_REQUIRES_OK(context, context->GetAttr("strides", &strides_));
OP_REQUIRES_OK(context, context->GetAttr("data_format", &data_format_str_));
OP_REQUIRES(context, FormatFromString(data_format_str_, &data_format_),
absl::InvalidArgumentError("Invalid data format"));
OP_REQUIRES(context, (strides_.size() == 4 || strides_.size() == 5),
absl::InvalidArgumentError("Sliding window strides field must "
"specify 4 or 5 dimensions"));
const int64 stride_n = GetTensorDim(strides_, data_format_, 'N');
const int64 stride_c = GetTensorDim(strides_, data_format_, 'C');
OP_REQUIRES(
context, stride_n == 1 && stride_c == 1,
absl::UnimplementedError("Current implementation does not yet support "
"strides in the batch and depth dimensions."));
OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_));
is_filter_const_ = false;
if (AreWeightsFrozen()) {
is_filter_const_ = true;
} else if (context->HasAttr("is_filter_const")) {
OP_REQUIRES_OK(context,
context->GetAttr("is_filter_const", &is_filter_const_));
}
if (strides_.size() == 4) {
OP_REQUIRES(
context, dilations_.size() == 4,
absl::InvalidArgumentError("Sliding window dilations field must "
"specify 4 dimensions"));
const int64 dilation_n = GetTensorDim(dilations_, data_format_, 'N');
const int64 dilation_c = GetTensorDim(dilations_, data_format_, 'C');
const int64 dilation_h = GetTensorDim(dilations_, data_format_, 'H');
const int64 dilation_w = GetTensorDim(dilations_, data_format_, 'W');
OP_REQUIRES(context, dilation_n == 1 && dilation_c == 1,
absl::InvalidArgumentError(
"Current implementation does not yet support "
"dilations in the batch and depth dimensions."));
OP_REQUIRES(
context, dilation_h > 0 && dilation_w > 0,
absl::InvalidArgumentError("Dilated rates should be larger than 0."));
} else if (strides_.size() == 5) {
OP_REQUIRES(context, dilations_.size() == 5,
absl::InvalidArgumentError("Dilation rates field must "
"specify 5 dimensions"));
OP_REQUIRES(context,
(GetTensorDim(dilations_, data_format_, 'N') == 1 &&
GetTensorDim(dilations_, data_format_, 'C') == 1),
absl::InvalidArgumentError(
"Current implementation does not yet support "
"dilations rates in the batch and depth dimensions."));
OP_REQUIRES(
context,
(GetTensorDim(dilations_, data_format_, '0') > 0 &&
GetTensorDim(dilations_, data_format_, '1') > 0 &&
GetTensorDim(dilations_, data_format_, '2') > 0),
absl::InvalidArgumentError("Dilated rates should be larger than 0."));
}
}
void Compute(OpKernelContext* context) override {
try {
const Tensor& src_tensor = MklGetInput(context, kInputIndex_Src);
const Tensor& filter_tensor = MklGetInput(context, kInputIndex_Filter);
OP_REQUIRES(
context, filter_tensor.NumElements() > 0,
absl::InvalidArgumentError("filter must not have zero elements "
"(i.e. all dimensions must be non-zero)"));
if (std::is_same<Tinput, float>::value) {
(void)SetFPMathMode();
}
MklDnnShape src_mkl_shape, filter_mkl_shape;
GetMklShape(context, kInputIndex_Src, &src_mkl_shape, native_format);
GetMklShape(context, kInputIndex_Filter, &filter_mkl_shape,
native_format);
OP_REQUIRES(context, !filter_mkl_shape.IsMklTensor(),
absl::InvalidArgumentError("Filter should not be in "
"Mkl Layout"));
MklDnnData<Tinput> src(&cpu_engine_);
MklDnnData<Tfilter> filter(&cpu_engine_);
memory::dims src_dims, filter_dims, padding_left, padding_right,
dilations, strides;
memory::dims dst_dims_tf_order, dst_dims_mkl_order;
bool pad_attr_enabled = false;
for (auto const& padding_val : padding_list_) {
if (padding_val) {
pad_attr_enabled = true;
break;
}
}
if (fuse_pad_ || pad_attr_enabled) {
PadWithConvFusion(context, padding_left, padding_right,
pad_attr_enabled, data_format_str_);
}
MklDnnConvUtil conv_utl(context, strides_, padding_, data_format_,
dilations_);
auto src_tf_shape = GetTfShape(context, kInputIndex_Src, native_format);
auto filter_tf_shape =
GetTfShape(context, kInputIndex_Filter, native_format);
bool is_grouped_convolution = false;
conv_utl.GetConvFwdSizesInMklOrder(
src_tf_shape, filter_tf_shape, &src_dims, &filter_dims, &strides,
&dilations, &dst_dims_tf_order, &dst_dims_mkl_order, &padding_left,
&padding_right, &is_grouped_convolution,
(fuse_pad_ || pad_attr_enabled), is_depthwise);
if (!context->status().ok()) return;
TensorShape dst_tf_shape = MklDnnDimsToTFShape(dst_dims_tf_order);
Tensor* dst_tensor = nullptr;
bool emit_filter_output = (typeid(Tinput) == typeid(Tfilter) &&
typeid(Tinput) == typeid(Toutput) &&
(typeid(Tinput) == typeid(float) ||
typeid(Tinput) == typeid(bfloat16))) &&
!native_format;
if (dst_tf_shape.num_elements() == 0 || dst_dims_tf_order[0] == 0) {
MklDnnShape dst_mkl_shape;
dst_mkl_shape.SetMklTensor(false);
AllocateOutputSetMklShape(context, kOutputIndex_Dst, &dst_tensor,
src_tf_shape, dst_mkl_shape, native_format);
filter_mkl_shape.SetMklTensor(false);
Tensor* output_filter_tensor = nullptr;
if (emit_filter_output) {
filter_mkl_shape.SetMklTensor(false);
AllocateOutputSetMklShape(context, kOutputIndex_Filter,
&output_filter_tensor, filter_tf_shape,
filter_mkl_shape);
}
return;
}
bool is_conv2d = (strides_.size() == 4);
bool is_conv3d = (strides_.size() == 5);
if (!is_conv2d && !is_conv3d) {
OP_REQUIRES(context, !pad_enabled,
absl::InvalidArgumentError(
"Pad + Conv fusion only works for 2D/3D"));
OP_REQUIRES(
context, !fuse_pad_,
absl::InvalidArgumentError("Pad+Conv fusion only works for 2D/3D"));
}
if (is_depthwise) {
OP_REQUIRES(context, is_conv2d,
absl::InvalidArgumentError(
"Only 2D convolution is supported for depthwise."));
}
auto tf_fmt = is_conv2d ? TFDataFormatToMklDnnDataFormat(data_format_)
: TFDataFormatToMklDnn3DDataFormat(data_format_);
auto mkl_fmt_tag = MklTensorFormatToMklDnnDataFormat(tf_fmt);
OP_REQUIRES(context, mkl_fmt_tag != memory::format_tag::undef,
absl::InvalidArgumentError("Invalid data format"));
auto src_md =
src_mkl_shape.IsMklTensor()
? src_mkl_shape.GetMklLayout()
: memory::desc(src_dims, MklDnnType<Tinput>(), mkl_fmt_tag);
src.SetUsrMem(src_md, &src_tensor);
auto filter_format = is_conv2d ? ((is_depthwise || is_grouped_convolution)
? memory::format_tag::hwigo
: memory::format_tag::hwio)
: memory::format_tag::dhwio;
DCHECK(!filter_mkl_shape.IsMklTensor());
auto filter_md =
filter_mkl_shape.IsMklTensor()
? filter_mkl_shape.GetMklLayout()
: memory::desc(filter_dims, MklDnnType<Tfilter>(), filter_format);
filter.SetUsrMem(filter_md, &filter_tensor);
for (int i = 0; i < dilations.size(); ++i) --dilations[i];
bool do_not_cache =
MklPrimitiveFactory<Tinput>::IsPrimitiveMemOptEnabled() &&
(src_dims[MklDnnDims::Dim_N] > kSmallBatchSize) &&
(MklPrimitiveFactory<Tinput>::IsLegacyPlatform() ||
IsConv1x1StrideNot1(filter_dims, strides));
MklConvFwdPrimitive<Tinput, Tfilter, Tbias, Ttemp_output>* conv_fwd =
nullptr;
memory::dims bias_dims = {};
if (fuse_biasadd_) {
conv_utl.GetBiasSizeInMklOrder(kInputIndex_Bias, &bias_dims);
}
memory::dims fuse_bn_dims = {};
TensorShape fuse_bn_shape;
if (fuse_bn_) {
fuse_bn_shape = MklGetInput(context, kInputIndex_BN_Mean).shape();
OP_REQUIRES(context, fuse_bn_shape.dims() == 1,
absl::InvalidArgumentError(
absl::StrCat("FusedBatchNorm must be 1D, not: ",
fuse_bn_shape.DebugString())));
fuse_bn_dims = {1, fuse_bn_shape.dim_size(0), 1, 1};
}
MklConvFwdParams convFwdDims(
src_dims, filter_dims, fuse_biasadd_ ? bias_dims : NONE_DIMS,
dst_dims_mkl_order, strides, dilations, padding_left, padding_right,
fuse_bn_dims, tf_fmt, native_format, is_depthwise, is_filter_const_);
this->ExtendConvFwdParams(context, convFwdDims);
Eigen::ThreadPoolInterface* eigen_interface =
EigenThreadPoolFromTfContext(context);
tsl::OneDnnThreadPool eigen_tp(eigen_interface,
ThreadPoolUseCallerThread());
conv_fwd =
MklConvFwdPrimitiveFactory<Tinput, Tfilter, Tbias, Ttemp_output>::Get(
convFwdDims, do_not_cache);
MklDnnShape output_mkl_shape;
std::shared_ptr<ConvFwdPd> conv_fwd_pd = conv_fwd->GetPrimitiveDesc();
AllocateOutputTensor(context, *conv_fwd_pd, dst_dims_mkl_order, tf_fmt,
&output_mkl_shape, &dst_tensor);
Tensor* filter_out_tensor = nullptr;
if (emit_filter_output) {
AllocateFilterOutputTensor(context, *conv_fwd_pd,
TFShapeToMklDnnDims(filter_tf_shape),
&filter_out_tensor);
}
Ttemp_output* dst_data =
reinterpret_cast<Ttemp_output*>(dst_tensor->flat<Toutput>().data());
Tinput* src_data = nullptr;
if (src_md != conv_fwd_pd->src_desc()) {
src.SetUsrMem(src_md, &src_tensor);
src.CheckReorderToOpMem(conv_fwd_pd->src_desc(), cpu_engine_, context);
src_data = static_cast<Tinput*>(src.GetOpMem().get_data_handle());
} else {
src_data = static_cast<Tinput*>(
const_cast<Tinput*>(src_tensor.flat<Tinput>().data()));
}
Tfilter* filter_data = nullptr;
if (filter_md != conv_fwd_pd->weights_desc()) {
bool is_filter_cached = false;
if (is_filter_const_) {
if (IsFilterCacheEmpty(context)) {
CacheFilter(context, conv_fwd_pd, filter_data, filter_tensor,
filter, filter_md, filter_mkl_shape);
}
filter_data = GetCachedFilter(context, conv_fwd_pd->weights_desc());
is_filter_cached = (filter_data != nullptr);
}
if (!is_filter_cached) {
filter.SetUsrMem(filter_md, &filter_tensor);
if (filter_out_tensor == nullptr) {
filter.CheckReorderToOpMem(conv_fwd_pd->weights_desc(), cpu_engine_,
context);
} else {
filter.CheckReorderToOpMem(
conv_fwd_pd->weights_desc(),
filter.GetTensorBuffer(filter_out_tensor), cpu_engine_,
context);
}
filter_data =
static_cast<Tfilter*>(filter.GetOpMem().get_data_handle());
}
} else {
filter_data = static_cast<Tfilter*>(
const_cast<Tfilter*>(filter_tensor.flat<Tfilter>().data()));
}
UserScratchPad<unsigned char> scratch_pad;
scratch_pad.AllocateSPTensor(conv_fwd, context);
std::shared_ptr<stream> fwd_cpu_stream;
fwd_cpu_stream.reset(CreateStream(&eigen_tp, conv_fwd->GetEngine()));
if (fuse_biasadd_) {
const Tensor& bias_tensor = MklGetInput(context, kInputIndex_Bias);
void* bias_data =
this->GetBiasHandle(context, conv_fwd_pd, bias_tensor);
conv_fwd->Execute(src_data, filter_data, bias_data, dst_data,
convFwdDims, fwd_cpu_stream, scratch_pad.Get());
} else if (fuse_bn_) {
const Tensor& bn_scale_tensor =
MklGetInput(context, kInputIndex_BN_Scale);
Tinput* bn_scale_data = static_cast<Tinput*>(
const_cast<Tinput*>(bn_scale_tensor.flat<Tinput>().data()));
const Tensor& bn_mean_tensor =
MklGetInput(context, kInputIndex_BN_Mean);
Tinput* bn_mean_data = static_cast<Tinput*>(
const_cast<Tinput*>(bn_mean_tensor.flat<Tinput>().data()));
const Tensor& bn_offset_tensor =
MklGetInput(context, kInputIndex_BN_Offset);
Tinput* bn_offset_data = static_cast<Tinput*>(
const_cast<Tinput*>(bn_offset_tensor.flat<Tinput>().data()));
Tensor bn_rsqrt_tensor;
OP_REQUIRES_OK(context,
context->allocate_temp(DataTypeToEnum<Tinput>::v(),
fuse_bn_shape, &bn_rsqrt_tensor));
Tinput* bn_rsqrt_data = static_cast<Tinput*>(
const_cast<Tinput*>(bn_rsqrt_tensor.flat<Tinput>().data()));
this->ComputeBNScale(context, epsilon_, kInputIndex_BN_Variance,
bn_rsqrt_data);
conv_fwd->Execute(src_data, filter_data, nullptr, dst_data,
bn_scale_data, bn_mean_data, bn_offset_data,
bn_rsqrt_data, convFwdDims, fwd_cpu_stream,
scratch_pad.Get());
} else {
conv_fwd->Execute(src_data, filter_data, dst_data, convFwdDims,
fwd_cpu_stream, scratch_pad.Get());
}
if (do_not_cache) delete conv_fwd;
} catch (dnnl::error& e) {
string error_msg = tensorflow::strings::StrCat(
"Status: ", e.status, ", message: ", string(e.message), ", in file ",
__FILE__, ":", __LINE__);
OP_REQUIRES_OK(context,
absl::AbortedError(absl::StrCat(
"Operation received an exception:", error_msg)));
}
}
void PadWithConvFusion(OpKernelContext* context, memory::dims& padding_left,
memory::dims& padding_right, bool pad_attr_enabled,
string data_format_str_) {
Tpadding* paddings = nullptr;
if (pad_attr_enabled) {
paddings = padding_list_.data();
} else {
const Tensor& paddings_tf = MklGetInput(context, input_index_pad_);
OP_REQUIRES(context, paddings_tf.dims() == 2,
absl::InvalidArgumentError(
absl::StrCat("paddings must be 2-dimensional: ",
paddings_tf.shape().DebugString())));
paddings = static_cast<Tpadding*>(
const_cast<Tpadding*>(paddings_tf.flat<Tpadding>().data()));
}
int64 pad_top = 0, pad_left = 0, pad_front = 0;
int64 pad_bottom = 0, pad_right = 0, pad_back = 0;
if (data_format_str_ == "NHWC") {
pad_top = paddings[2];
pad_bottom = paddings[3];
pad_left = paddings[4];
pad_right = paddings[5];
} else if (data_format_str_ == "NCHW") {
pad_top = paddings[4];
pad_bottom = paddings[5];
pad_left = paddings[6];
pad_right = paddings[7];
} else if (data_format_str_ == "NDHWC") {
pad_front = paddings[2];
pad_back = paddings[3];
pad_top = paddings[4];
pad_bottom = paddings[5];
pad_left = paddings[6];
pad_right = paddings[7];
} else if (data_format_str_ == "NCDHW") {
pad_front = paddings[4];
pad_back = paddings[5];
pad_top = paddings[6];
pad_bottom = paddings[7];
pad_left = paddings[8];
pad_right = paddings[9];
}
if (data_format_str_ == "NHWC" || data_format_str_ == "NCHW") {
padding_left = {static_cast<int>(pad_top), static_cast<int>(pad_left)};
padding_right = {static_cast<int>(pad_bottom),
static_cast<int>(pad_right)};
} else if (data_format_str_ == "NDHWC" || data_format_str_ == "NCDHW") {
padding_left = {static_cast<int>(pad_front), static_cast<int>(pad_top),
static_cast<int>(pad_left)};
padding_right = {static_cast<int>(pad_back), static_cast<int>(pad_bottom),
static_cast<int>(pad_right)};
}
}
protected:
void set_input_add_idx(int input_add_idx) {
input_index_add_ = input_add_idx;
}
int get_input_add_idx() { return input_index_add_; }
void set_fuse_biasadd(bool fuse_biasadd) { fuse_biasadd_ = fuse_biasadd; }
bool get_fuse_biasadd() { return fuse_biasadd_; }
void set_fuse_activation(bool fuse_activation, dnnl::algorithm activation_alg,
float alpha_or_upbound = 0.0, float beta = 0.0) {
fuse_activation_ = fuse_activation;
activation_alg_ = activation_alg;
alpha_or_upbound_ = alpha_or_upbound;
beta_ = beta;
}
void set_fuse_pad(bool fuse_pad) {
fuse_pad_ = fuse_pad;
if (fuse_bn_) {
input_index_pad_ = 6;
} else if (fuse_add_ && fuse_biasadd_) {
input_index_pad_ = 4;
} else {
input_index_pad_ = 3;
}
}
void set_fuse_add(bool fuse_add) { fuse_add_ = fuse_add; }
bool get_fuse_add() { return fuse_add_; };
void set_fuse_bn(bool fuse_bn, float epsilon) {
fuse_bn_ = fuse_bn;
epsilon_ = epsilon;
}
virtual void ComputeBNScale(OpKernelContext* context, float epsilon,
int bn_variance_index, Tinput* scale_buf_ptr) {
OP_REQUIRES(context, false,
absl::UnimplementedError(
"Compute BN scale not expected in base class"));
return;
}
virtual void ExtendConvFwdParams(OpKernelContext* context,
MklConvFwdParams& params) {
params.dtypes.append(typeid(Tinput).name());
params.dtypes.append(typeid(Tfilter).name());
params.dtypes.append(typeid(Tbias).name());
params.dtypes.append(typeid(Toutput).name());
bool is_quantized_input = std::is_same<Tinput, quint8>::value ||
std::is_same<Tinput, qint8>::value;
if (!is_quantized_input) {
if (fuse_add_) {
params.post_op_params.push_back(
{"sum", dnnl::algorithm::undef, {1.0}, ""});
}
if (fuse_bn_) {
params.post_op_params.push_back(
{"fuse_bn", dnnl::algorithm::undef, {1.0}, ""});
}
if (fuse_activation_) {
params.post_op_params.push_back({"activation",
activation_alg_,
{1.0, alpha_or_upbound_, beta_},
""});
}
}
}
virtual void* GetBiasHandle(OpKernelContext* context,
std::shared_ptr<ConvFwdPd>& conv2d_fwd_pd,
const Tensor& bias_tensor) {
if (fuse_biasadd_) {
return static_cast<Tbias*>(
const_cast<Tbias*>(bias_tensor.flat<Tbias>().data()));
}
return nullptr;
}
virtual void AllocateOutputTensor(OpKernelContext* context,
const ConvFwdPd& conv_prim_desc,
const memory::dims& output_dims_mkl_order,
MklTensorFormat output_tf_format,
MklDnnShape* output_mkl_shape,
Tensor** output_tensor) {
DCHECK(output_tensor);
#ifndef ENABLE_ONEDNN_V3
auto dst_md = conv_prim_desc.dst_desc();
if (!std::is_same<Ttemp_output, Toutput>::value) {
#ifndef ENABLE_ONEDNN_V3
dst_md.data.data_type =
static_cast<dnnl_data_type_t>(MklDnnType<Toutput>());
#else
dst_md =
memory::desc(output_dims_mkl_order, MklDnnType<Toutput>(),
MklTensorFormatToMklDnnDataFormat(output_tf_format));
#endif
}
#else
auto dst_md =
std::is_same<Ttemp_output, Toutput>::value
? conv_prim_desc.dst_desc()
: memory::desc(conv_prim_desc.dst_desc().get_dims(),
MklDnnType<Toutput>(),
MklTensorFormatToMklDnnDataFormat(output_tf_format));
#endif
output_mkl_shape->SetMklTensor(true);
output_mkl_shape->SET_MKL_LAYOUT(dst_md);
output_mkl_shape->SetElemType(MklDnnType<Toutput>());
output_mkl_shape->SetTfLayout(output_dims_mkl_order.size(),
output_dims_mkl_order, output_tf_format);
TensorShape output_tf_shape;
output_tf_shape.AddDim((dst_md.get_size() / sizeof(Toutput)));
if (native_format) {
output_tf_shape = output_mkl_shape->GetTfShape();
}
bool is_quantized_input = std::is_same<Tinput, quint8>::value ||
std::is_same<Tinput, qint8>::value;
if (fuse_add_ && !is_quantized_input) {
const Tensor& add_tensor = MklGetInput(context, input_index_add_);
MklDnnShape add_mkl_shape;
GetMklShape(context, input_index_add_, &add_mkl_shape, native_format);
if (native_format && context->forward_input_to_output_with_shape(
input_index_add_, kOutputIndex_Dst,
output_tf_shape, output_tensor)) {
return;
}
if (!native_format && add_mkl_shape == *output_mkl_shape &&
ForwardMklTensorInToOutWithMklShape(context, input_index_add_,
kOutputIndex_Dst, output_tensor,
add_mkl_shape, false)) {
return;
} else {
AllocateOutputSetMklShape(context, kOutputIndex_Dst, output_tensor,
output_tf_shape, *output_mkl_shape,
native_format);
auto output_format_tag = MklTensorFormatToMklDnnDataFormat(
output_mkl_shape->GetTfDataFormat());
OP_REQUIRES(context, output_format_tag != memory::format_tag::undef,
absl::InvalidArgumentError(
"MklConvOp: AddN fusion: Invalid data format"));
auto add_md =
add_mkl_shape.IsMklTensor()
? add_mkl_shape.GetMklLayout()
: memory::desc(output_dims_mkl_order, MklDnnType<Toutput>(),
output_format_tag);
void* add_buf = static_cast<void*>(
const_cast<Toutput*>(add_tensor.flat<Toutput>().data()));
void* dst_buf =
static_cast<void*>((*output_tensor)->flat<Ttemp_output>().data());
if (native_format) {
add_md = dst_md =
memory::desc({add_tensor.NumElements()}, MklDnnType<Toutput>(),
dnnl::memory::format_tag::x);
}
fuse_add_src_.reset(new memory(add_md, this->cpu_engine_, add_buf));
fuse_add_dst_.reset(new memory(dst_md, this->cpu_engine_, dst_buf));
auto reorder_desc =
ReorderPd(this->cpu_engine_, add_md, this->cpu_engine_, dst_md);
CreateAndExecuteReorder(reorder_desc, *fuse_add_src_, *fuse_add_dst_,
this->cpu_engine_, context);
}
} else {
AllocateOutputSetMklShape(context, kOutputIndex_Dst, output_tensor,
output_tf_shape, *output_mkl_shape,
native_format);
}
}
engine cpu_engine_ = engine(engine::kind::cpu, 0);
private:
std::shared_ptr<dnnl::memory> fuse_add_src_;
std::shared_ptr<dnnl::memory> fuse_add_dst_;
std::vector<int32> strides_;
std::vector<int32> dilations_;
std::vector<Tpadding> padding_list_;
bool is_filter_const_;
mutex mu_;
Padding padding_;
string data_format_str_;
TensorFormat data_format_;
Tensor cached_filter_data_ TF_GUARDED_BY(mu_);
#ifndef ENABLE_ONEDNN_V3
Tensor cached_filter_md_ TF_GUARDED_BY(mu_);
#else
FilterMemoryDesc cached_filter_md_ TF_GUARDED_BY(mu_);
#endif
bool fuse_biasadd_ = bias_enabled;
bool fuse_activation_ = false;
bool fuse_pad_ = pad_enabled;
bool fuse_add_ = false;
bool fuse_bn_ = false;
float epsilon_ = 0.0001;
float alpha_or_upbound_ = 0.0;
float beta_ = 0.0;
dnnl::algorithm activation_alg_ = dnnl::algorithm::undef;
int input_index_pad_ = 2;
int input_index_add_ = 3;
const int kInputIndex_Src = 0, kInputIndex_Filter = 1, kInputIndex_Bias = 2;
const int kOutputIndex_Dst = 0, kOutputIndex_Filter = 1;
const int kDilationH = 0, kDilationW = 1;
const int kInputIndex_BN_Scale = 2, kInputIndex_BN_Offset = 3;
const int kInputIndex_BN_Mean = 4, kInputIndex_BN_Variance = 5;
MklTensorFormat GetFilterTfDataFormat(const MklDnnShape* filter_mkl_shape,
const ConvFwdPd& conv_prim_desc) const {
DCHECK(filter_mkl_shape);
return filter_mkl_shape->GetTfDataFormat();
}
void AllocateTensor(OpKernelContext* context, const ConvFwdPd& conv_prim_desc,
Tensor** filter_tensor,
const MklDnnShape* filter_mkl_shape)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
DCHECK(filter_tensor);
TensorShape filter_tf_shape;
filter_tf_shape.AddDim(
(conv_prim_desc.weights_desc().get_size() / sizeof(Tfilter)));
OP_REQUIRES_OK(
context, context->allocate_temp(DataTypeToEnum<Tfilter>::value,
filter_tf_shape, &cached_filter_data_));
*filter_tensor = &cached_filter_data_;
memory::desc weights_desc = conv_prim_desc.weights_desc();
#ifndef ENABLE_ONEDNN_V3
TensorShape cached_filter_md_shape;
cached_filter_md_shape.AddDim(sizeof(weights_desc) / sizeof(uint8));
OP_REQUIRES_OK(context,
context->allocate_temp(DT_UINT8, cached_filter_md_shape,
&cached_filter_md_));
*reinterpret_cast<memory::desc*>(cached_filter_md_.flat<uint8>().data()) =
weights_desc;
#else
cached_filter_md_ = FilterMemoryDesc(
weights_desc.get_ndims(), weights_desc.get_inner_nblks(),
weights_desc.get_data_type(), weights_desc.get_dims(),
weights_desc.get_inner_blks(), weights_desc.get_inner_idxs(),
weights_desc.get_strides());
#endif
}
void AllocateTensor(OpKernelContext* context, const ConvFwdPd& conv_prim_desc,
Tensor** filter_tensor) {
AllocateTensor(context, conv_prim_desc, filter_tensor, nullptr);
}
void AllocateFilterOutputTensor(OpKernelContext* context,
const ConvFwdPd& conv_prim_desc,
const memory::dims& filter_dims_tf_order,
Tensor** filter_tensor) {
DCHECK(filter_tensor);
auto filter_md = conv_prim_desc.weights_desc();
MklDnnShape filter_mkl_shape;
filter_mkl_shape.SetMklTensor(true);
filter_mkl_shape.SET_MKL_LAYOUT(filter_md);
filter_mkl_shape.SetElemType(MklDnnType<Tfilter>());
filter_mkl_shape.SetTfLayout(filter_dims_tf_order.size(),
filter_dims_tf_order,
MklTensorFormat::FORMAT_BLOCKED);
TensorShape filter_tf_shape;
filter_tf_shape.AddDim((filter_md.get_size() / sizeof(Tfilter)));
AllocateOutputSetMklShape(context, kOutputIndex_Filter, filter_tensor,
filter_tf_shape, filter_mkl_shape);
}
inline bool IsFilterCacheEmpty(OpKernelContext* context)
TF_LOCKS_EXCLUDED(mu_) {
tf_shared_lock lock(mu_);
const Tensor& cached_filter_data_tensor = cached_filter_data_;
return (cached_filter_data_tensor.NumElements() == 0);
}
void CacheFilter(OpKernelContext* context,
const std::shared_ptr<ConvFwdPd>& conv_fwd_pd,
Tfilter* filter_data, const Tensor& filter_tensor,
MklDnnData<Tfilter>& filter, const memory::desc& filter_md,
const MklDnnShape& filter_mkl_shape) TF_LOCKS_EXCLUDED(mu_) {
mutex_lock lock(mu_);
const Tensor& cached_filter_data_tensor = cached_filter_data_;
if (cached_filter_data_tensor.NumElements() > 0) {
return;
}
#ifdef ENABLE_ONEDNN_V3
if (filter_md.get_format_kind() != memory::format_kind::blocked) {
return;
}
#endif
filter.SetUsrMem(filter_md, &filter_tensor);
filter.CheckReorderToOpMem(conv_fwd_pd.get()->weights_desc(),
this->cpu_engine_, context);
filter_data = static_cast<Tfilter*>(filter.GetOpMem().get_data_handle());
Tensor* filter_tensor_ptr = nullptr;
AllocateTensor(context, *conv_fwd_pd, &filter_tensor_ptr,
&filter_mkl_shape);
void* cached_filter_data = filter.GetTensorBuffer(filter_tensor_ptr);
size_t cached_filter_data_size = filter.GetOpMem().get_desc().get_size();
memcpy(cached_filter_data, filter_data, cached_filter_data_size);
}
#ifndef ENABLE_ONEDNN_V3
bool AreMemoryDescriptorsEqual(const memory::desc& filter_md,
const Tensor& cached_filter_md) {
auto filter_md_data = filter_md.data;
const char* filter_data = reinterpret_cast<const char*>(&filter_md_data);
auto cached_filter_md_data = cached_filter_md.scalar<int64_t>()();
const char* cached_filter_data =
reinterpret_cast<const char*>(&cached_filter_md_data);
for (size_t i = 0; i < sizeof(filter_md_data); ++i) {
if (*filter_data++ != *cached_filter_data++) {
return false;
}
}
return true;
}
#endif
Tfilter* GetCachedFilter(OpKernelContext* context,
const memory::desc& filter_md)
TF_LOCKS_EXCLUDED(mu_) {
tf_shared_lock lock(mu_);
const Tensor& cached_filter_data = cached_filter_data_;
#ifndef ENABLE_ONEDNN_V3
const Tensor& cached_filter_md = cached_filter_md_;
if (filter_md == *static_cast<memory::desc*>(cached_filter_md.data())) {
return static_cast<Tfilter*>(
const_cast<Tfilter*>(cached_filter_data.flat<Tfilter>().data()));
}
return nullptr;
#else
if (cached_filter_md_ ==
FilterMemoryDesc(filter_md.get_ndims(), filter_md.get_inner_nblks(),
filter_md.get_data_type(), filter_md.get_dims(),
filter_md.get_inner_blks(), filter_md.get_inner_idxs(),
filter_md.get_strides())) {
return static_cast<Tfilter*>(
const_cast<Tfilter*>(cached_filter_data.flat<Tfilter>().data()));
}
return nullptr;
#endif
}
};
template <typename Device, typename Tinput, typename Tfilter, typename Tbias,
typename Toutput, typename Ttemp_output, typename Tpadding,
bool pad_enabled, bool native_format>
class MklFusedConvOp
: public MklConvOp<Device, Tinput, Tfilter, Tbias, Toutput, Ttemp_output,
Tpadding, false, false, false, native_format> {
public:
explicit MklFusedConvOp(OpKernelConstruction* context)
: MklConvOp<Device, Tinput, Tfilter, Tbias, Toutput, Ttemp_output,
Tpadding, false, false, false, native_format>(context) {
std::vector<string> fused_ops;
OP_REQUIRES_OK(context, context->GetAttr("fused_ops", &fused_ops));
int num_args;
OP_REQUIRES_OK(context, context->GetAttr("num_args", &num_args));
OP_REQUIRES(context, !fused_ops.empty(),
absl::InvalidArgumentError(
"Fused Conv2D must have at least one fused op."));
if (fused_ops == std::vector<string>{"BiasAdd"}) {
this->set_fuse_biasadd(true);
OP_REQUIRES(context, num_args == 1,
absl::InvalidArgumentError(
"Fused Conv2D must have one extra argument: bias."));
} else if (fused_ops == std::vector<string>{"Relu"}) {
this->set_fuse_activation(true, dnnl::algorithm::eltwise_relu);
} else if (fused_ops == std::vector<string>{"Relu6"}) {
this->SET_FUSE_ACTIVATION_FOR_RELU6;
} else if (fused_ops == std::vector<string>{"Elu"}) {
this->set_fuse_activation(true, dnnl::algorithm::eltwise_elu, 1.0);
} else if (fused_ops == std::vector<string>{"LeakyRelu"}) {
float leakyrelu_alpha;
OP_REQUIRES_OK(context,
context->GetAttr("leakyrelu_alpha", &leakyrelu_alpha));
this->set_fuse_activation(true, dnnl::algorithm::eltwise_relu,
leakyrelu_alpha);
} else if (fused_ops == std::vector<string>{"FusedBatchNorm"}) {
float epsilon;
OP_REQUIRES_OK(context, context->GetAttr("epsilon", &epsilon));
OP_REQUIRES(
context, num_args == 4,
absl::InvalidArgumentError(
"Fused Conv2D with batchnorm must have 4 extra argument"));
this->set_fuse_bn(true, epsilon);
} else if (fused_ops == std::vector<string>{"BiasAdd", "Relu"}) {
this->set_fuse_biasadd(true);
this->set_fuse_activation(true, dnnl::algorithm::eltwise_relu);
OP_REQUIRES(context, num_args == 1,
absl::InvalidArgumentError(
"Fused Conv2D must have one extra argument: bias."));
} else if (fused_ops == std::vector<string>{"BiasAdd", "Relu6"}) {
this->set_fuse_biasadd(true);
this->SET_FUSE_ACTIVATION_FOR_RELU6;
OP_REQUIRES(context, num_args == 1,
absl::InvalidArgumentError(
"Fused Conv2D must have one extra argument: bias."));
} else if (fused_ops == std::vector<string>{"BiasAdd", "Elu"}) {
this->set_fuse_biasadd(true);
this->set_fuse_activation(true, dnnl::algorithm::eltwise_elu, 1.0);
OP_REQUIRES(context, num_args == 1,
absl::InvalidArgumentError(
"Fused Conv2D must have one extra argument: bias."));
} else if (fused_ops == std::vector<string>{"BiasAdd", "LeakyRelu"}) {
this->set_fuse_biasadd(true);
float leakyrelu_alpha;
OP_REQUIRES_OK(context,
context->GetAttr("leakyrelu_alpha", &leakyrelu_alpha));
this->set_fuse_activation(true, dnnl::algorithm::eltwise_relu,
leakyrelu_alpha);
OP_REQUIRES(context, num_args == 1,
absl::InvalidArgumentError(
"Fused Conv2D must have one extra argument: bias."));
} else if (fused_ops == std::vector<string>{"BiasAdd", "_FusedHardSwish"}) {
this->set_fuse_biasadd(true);
this->set_fuse_activation(true, dnnl::algorithm::eltwise_hardswish,
1.0 / 6.0, 0.5);
} else if (fused_ops == std::vector<string>{"BiasAdd", "Add"}) {
this->set_fuse_biasadd(true);
this->set_fuse_add(true);
OP_REQUIRES(
context, num_args == 2,
absl::InvalidArgumentError(
"Fused Conv2D must have two extra arguments: bias and add."));
} else if (fused_ops == std::vector<string>{"FusedBatchNorm", "Relu"}) {
float epsilon;
OP_REQUIRES_OK(context, context->GetAttr("epsilon", &epsilon));
OP_REQUIRES(
context, num_args == 4,
absl::InvalidArgumentError(
"Fused Conv2D with batchnorm must have 4 extra argument"));
this->set_fuse_bn(true, epsilon);
this->set_fuse_activation(true, dnnl::algorithm::eltwise_relu);
} else if (fused_ops == std::vector<string>{"FusedBatchNorm", "Relu6"}) {
float epsilon;
OP_REQUIRES_OK(context, context->GetAttr("epsilon", &epsilon));
OP_REQUIRES(
context, num_args == 4,
absl::InvalidArgumentError(
"Fused Conv2D with batchnorm must have 4 extra argument"));
this->set_fuse_bn(true, epsilon);
this->SET_FUSE_ACTIVATION_FOR_RELU6;
} else if (fused_ops == std::vector<string>{"FusedBatchNorm", "Elu"}) {
float epsilon;
OP_REQUIRES_OK(context, context->GetAttr("epsilon", &epsilon));
OP_REQUIRES(
context, num_args == 4,
absl::InvalidArgumentError(
"Fused Conv2D with batchnorm must have 4 extra argument"));
this->set_fuse_bn(true, epsilon);
this->set_fuse_activation(true, dnnl::algorithm::eltwise_elu, 1.0);
} else if (fused_ops ==
std::vector<string>{"FusedBatchNorm", "LeakyRelu"}) {
float epsilon, leakyrelu_alpha;
OP_REQUIRES_OK(context, context->GetAttr("epsilon", &epsilon));
OP_REQUIRES_OK(context,
context->GetAttr("leakyrelu_alpha", &leakyrelu_alpha));
OP_REQUIRES(
context, num_args == 4,
absl::InvalidArgumentError(
"Fused Conv2D with batchnorm must have 4 extra argument"));
this->set_fuse_bn(true, epsilon);
this->set_fuse_activation(true, dnnl::algorithm::eltwise_relu,
leakyrelu_alpha);
} else if (fused_ops ==
std::vector<string>{"FusedBatchNorm", "_MklSwish"}) {
float epsilon;
OP_REQUIRES_OK(context, context->GetAttr("epsilon", &epsilon));
OP_REQUIRES(
context, num_args == 4,
absl::InvalidArgumentError(
"Fused Conv2D with batchnorm must have 4 extra argument"));
this->set_fuse_bn(true, epsilon);
this->set_fuse_activation(true, dnnl::algorithm::eltwise_swish, 1.0);
} else if (fused_ops == std::vector<string>{"BiasAdd", "Add", "Relu"}) {
this->set_fuse_biasadd(true);
this->set_fuse_add(true);
this->set_fuse_activation(true, dnnl::algorithm::eltwise_relu);
OP_REQUIRES(
context, num_args == 2,
absl::InvalidArgumentError(
"Fused Conv2D must have two extra arguments: bias and add."));
} else if (fused_ops == std::vector<string>{"BiasAdd", "Add", "Relu6"}) {
this->set_fuse_biasadd(true);
this->set_fuse_add(true);
this->SET_FUSE_ACTIVATION_FOR_RELU6;
OP_REQUIRES(
context, num_args == 2,
absl::InvalidArgumentError(
"Fused Conv2D must have two extra arguments: bias and add."));
} else if (fused_ops == std::vector<string>{"BiasAdd", "Add", "Elu"}) {
this->set_fuse_biasadd(true);
this->set_fuse_add(true);
this->set_fuse_activation(true, dnnl::algorithm::eltwise_elu, 1.0);
OP_REQUIRES(
context, num_args == 2,
absl::InvalidArgumentError(
"Fused Conv2D must have two extra arguments: bias and add."));
} else if (fused_ops ==
std::vector<string>{"BiasAdd", "Add", "LeakyRelu"}) {
this->set_fuse_biasadd(true);
this->set_fuse_add(true);
float leakyrelu_alpha;
OP_REQUIRES_OK(context,
context->GetAttr("leakyrelu_alpha", &leakyrelu_alpha));
this->set_fuse_activation(true, dnnl::algorithm::eltwise_relu,
leakyrelu_alpha);
OP_REQUIRES(
context, num_args == 2,
absl::InvalidArgumentError(
"Fused Conv2D must have two extra arguments: bias and add."));
} else if (fused_ops == std::vector<string>{"BiasAdd", "Mish"}) {
this->set_fuse_biasadd(true);
this->set_fuse_activation(true, dnnl::algorithm::eltwise_mish, 1.0);
OP_REQUIRES(context, num_args == 1,
absl::InvalidArgumentError(
"_FusedConv2D must have one extra argument: bias."));
} else if (fused_ops == std::vector<string>{"BiasAdd", "_MklSwish"}) {
this->set_fuse_biasadd(true);
this->set_fuse_activation(true, dnnl::algorithm::eltwise_swish, 1.0);
OP_REQUIRES(context, num_args == 1,
absl::InvalidArgumentError(
"Fused Conv2D must have one extra argument: bias."));
} else {
OP_REQUIRES(context, false,
absl::UnimplementedError(
absl::StrCat("Fusion is not implemented: [",
absl::StrJoin(fused_ops, ","), "]")));
}
if (pad_enabled) {
this->set_fuse_pad(true);
}
}
void ComputeBNScale(OpKernelContext* context, float epsilon,
int bn_variance_index, Tinput* scale_buf_ptr) override {
const Tensor& bn_var_tensor = MklGetInput(context, bn_variance_index);
Eigen::Tensor<Tinput, 1, Eigen::RowMajor> bn_rsqrt =
(bn_var_tensor.flat<Tinput>() + static_cast<Tinput>(epsilon)).rsqrt();
Tinput* bn_rsqrt_data = bn_rsqrt.data();
int64_t num_elem = bn_var_tensor.shape().dim_size(0);
for (int64_t i = 0; i < num_elem; i++) {
scale_buf_ptr[i] = bn_rsqrt_data[i];
}
return;
}
virtual ~MklFusedConvOp() {}
};
template <typename Device, typename Tinput, typename Tfilter, typename Tbias,
typename Toutput, typename Ttemp_output, typename Tpadding,
bool pad_enabled, bool bias_enabled, bool is_depthwise,
bool native_format>
class MklFusedDepthwiseConvOp
: public MklConvOp<Device, Tinput, Tfilter, Tbias, Toutput, Ttemp_output,
Tpadding, bias_enabled, false, is_depthwise,
native_format> {
public:
explicit MklFusedDepthwiseConvOp(OpKernelConstruction* context)
: MklConvOp<Device, Tinput, Tfilter, Tbias, Toutput, Ttemp_output,
Tpadding, bias_enabled, false, is_depthwise, native_format>(
context) {
std::vector<string> fused_ops;
OP_REQUIRES_OK(context, context->GetAttr("fused_ops", &fused_ops));
int num_args;
OP_REQUIRES_OK(context, context->GetAttr("num_args", &num_args));
OP_REQUIRES(context, !fused_ops.empty(),
absl::InvalidArgumentError(
"Fused DepthwiseConv2D must have at least one fused op."));
if (fused_ops == std::vector<string>{"BiasAdd"}) {
this->set_fuse_biasadd(true);
} else if (fused_ops == std::vector<string>{"BiasAdd", "Relu"}) {
this->set_fuse_biasadd(true);
this->set_fuse_activation(true, dnnl::algorithm::eltwise_relu);
} else if (fused_ops == std::vector<string>{"BiasAdd", "Relu6"}) {
this->set_fuse_biasadd(true);
this->SET_FUSE_ACTIVATION_FOR_RELU6;
} else if (fused_ops == std::vector<string>{"BiasAdd", "Elu"}) {
this->set_fuse_biasadd(true);
this->set_fuse_activation(true, dnnl::algorithm::eltwise_elu, 1.0);
} else if (fused_ops == std::vector<string>{"BiasAdd", "_FusedHardSwish"}) {
this->set_fuse_biasadd(true);
this->set_fuse_activation(true, dnnl::algorithm::eltwise_hardswish,
1.0 / 6.0, 0.5);
} else {
OP_REQUIRES(context, false,
absl::InvalidArgumentError(
absl::StrCat("Fusion is not implemented: [",
absl::StrJoin(fused_ops, ","), "]")));
}
OP_REQUIRES(
context, num_args == 1,
absl::InvalidArgumentError(
"Fused DepthwiseConv2D must have one extra argument: bias."));
if (pad_enabled) {
this->set_fuse_pad(true);
}
}
virtual ~MklFusedDepthwiseConvOp() {}
};
enum class oneDNNFusedOps { kBias = 1, kSum = 2, kRelu = 4, kRequantize = 8 };
template <typename Device, typename Tinput, typename Tbias, typename Toutput,
typename Ttemp_output, bool is_depthwise, string legacy_fused_ops[],
int num_fused_ops>
class MklQuantizedConvOp
: public MklConvOp<
Device, Tinput, qint8, Tbias, Toutput, Ttemp_output,
int32, false, false,
is_depthwise, true> {
public:
virtual ~MklQuantizedConvOp() {
if (this->input_bias_ != nullptr) {
delete this->input_bias_;
input_bias_ = nullptr;
}
if (this->scaled_bias_ != nullptr) {
delete this->scaled_bias_;
scaled_bias_ = nullptr;
}
}
explicit MklQuantizedConvOp(OpKernelConstruction* context)
: MklConvOp<Device, Tinput, qint8, Tbias, Toutput,
Ttemp_output, int32,
false, false, is_depthwise,
true>(context) {
std::vector<std::vector<string>> supported_fusions = {
{"BiasAdd"},
{"Relu"},
{"Requantize"},
{"BiasAdd", "Relu"},
{"BiasAdd", "Requantize"},
{"Relu", "Requantize"},
{"BiasAdd", "Relu", "Requantize"},
{"BiasAdd", "Sum", "Relu"},
{"BiasAdd", "Sum", "Relu", "Requantize"}};
std::vector<string> fused_ops_attr;
if (context->HasAttr("fused_ops")) {
OP_REQUIRES_OK(context, context->GetAttr("fused_ops", &fused_ops_attr));
}
OP_REQUIRES(context, !(fused_ops_attr.size() > 0 && num_fused_ops > 0),
absl::InvalidArgumentError(
"QuantizedConv fused ops should be only available through "
"either new API or old API, got both."));
if (fused_ops_attr.size() > 0) {
fused_ops_ = fused_ops_attr;
} else if (num_fused_ops > 0) {
for (int i = 0; i < num_fused_ops; ++i) {
fused_ops_.push_back(legacy_fused_ops[i]);
}
}
if (fused_ops_.size() > 0) {
bool is_fusion_supported =
std::find(supported_fusions.begin(), supported_fusions.end(),
fused_ops_) != supported_fusions.end();
OP_REQUIRES(context, is_fusion_supported,
absl::InvalidArgumentError(
absl::StrCat("Unsupported QuantizedConv fusion: [",
absl::StrJoin(fused_ops_, ","), "]")));
}
for (const auto& op : fused_ops_) {
fused_op_flags_ ^= static_cast<int64_t>(StrToEnum(op));
}
DataType bias_dt, summand_dt, out_dt;
if (IsFused(oneDNNFusedOps::kBias)) {
this->set_fuse_biasadd(true);
OP_REQUIRES_OK(context,
context->GetAttr("is_bias_const", &is_bias_const_));
if (context->HasAttr("Tbias")) {
OP_REQUIRES_OK(context, context->GetAttr("Tbias", &bias_dt));
}
}
if (IsFused(oneDNNFusedOps::kSum)) {
this->set_fuse_add(true);
}
const bool fuse_requantize = IsFused(oneDNNFusedOps::kRequantize);
OP_REQUIRES_OK(context, context->GetAttr("out_type", &out_dt));
if (fuse_requantize) {
OP_REQUIRES(
context, out_dt == DT_QINT8 || out_dt == DT_QUINT8,
absl::InvalidArgumentError("QuantizedConv: unsupported output "
"type when Requantize is fused."));
}
if (context->HasAttr("Tsummand")) {
OP_REQUIRES_OK(context, context->GetAttr("Tsummand", &summand_dt));
if (!this->get_fuse_add()) {
OP_REQUIRES(
context, summand_dt == out_dt,
absl::InvalidArgumentError(
"QuantizedConv: incorrect summand data type. When Sum is not "
"fused, Tsummand attribute must have same value as out_type."));
}
}
#ifndef ENABLE_ONEDNN_V3
int idx = fuse_requantize ? 1 : 0;
#else
post_op_to_idx_["src_scale"] = 0;
post_op_to_idx_["wei_scale"] = 1;
post_op_to_idx_["dst_scale"] = 2;
int idx = 3;
#endif
for (int i = 0; i < fused_ops_.size(); ++i) {
if (fused_ops_[i] == "Requantize") {
#ifndef ENABLE_ONEDNN_V3
post_op_to_idx_["output_scale"] = 0;
#endif
} else if (fused_ops_[i] == "Sum") {
post_op_to_idx_["sum"] = idx++;
} else if (fused_ops_[i] == "Relu") {
post_op_to_idx_["activation"] = idx++;
}
}
bool is_filter_const;
OP_REQUIRES_OK(context,
context->GetAttr("is_filter_const", &is_filter_const));
OP_REQUIRES(
context, is_filter_const,
absl::InvalidArgumentError("QuantizedConv: filter must be a constant"));
if (num_fused_ops == -1) {
int non_minmax_arg_idx_base = 2;
int minmax_arg_idx_base = 6;
int bias_idx_offset = this->get_fuse_biasadd() ? 1 : 0;
int summand_idx_offset = this->get_fuse_add() ? 1 : 0;
int bias_min_max_idx_offset =
this->get_fuse_biasadd() &&
!(bias_dt == DT_FLOAT || bias_dt == DT_QINT32)
? 2
: 0;
min_input_idx_ =
non_minmax_arg_idx_base + bias_idx_offset + summand_idx_offset;
max_input_idx_ = min_input_idx_ + 1;
min_filter_idx_ = min_input_idx_ + 2;
max_filter_idx_ = min_input_idx_ + 3;
if (this->get_fuse_biasadd()) {
min_bias_idx_ =
minmax_arg_idx_base + bias_idx_offset + summand_idx_offset;
max_bias_idx_ = min_bias_idx_ + 1;
}
if (this->get_fuse_add()) {
this->set_input_add_idx(non_minmax_arg_idx_base + bias_idx_offset);
if (summand_dt == DT_QINT8 || summand_dt == DT_QUINT8) {
min_summand_idx_ = minmax_arg_idx_base + bias_idx_offset +
summand_idx_offset + bias_min_max_idx_offset;
max_summand_idx_ = min_summand_idx_ + 1;
}
}
if (fuse_requantize) {
min_freezed_output_idx_ = context->num_inputs() - 2;
max_freezed_output_idx_ = min_freezed_output_idx_ + 1;
}
} else {
int bias_idx_offset = this->get_fuse_biasadd() ? 1 : 0;
min_input_idx_ = 2 + bias_idx_offset;
max_input_idx_ = 3 + bias_idx_offset;
min_filter_idx_ = 4 + bias_idx_offset;
max_filter_idx_ = 5 + bias_idx_offset;
if (fuse_requantize) {
min_freezed_output_idx_ = 6 + bias_idx_offset;
max_freezed_output_idx_ = 7 + bias_idx_offset;
}
if (this->get_fuse_add()) {
int input_add_idx = std::is_same<Toutput, quint8>::value
? context->num_inputs() - 1 - 2
: context->num_inputs() - 1;
this->set_input_add_idx(input_add_idx);
if (summand_dt == DT_QINT8 || summand_dt == DT_QUINT8) {
min_summand_idx_ = 9 + bias_idx_offset;
max_summand_idx_ = 10 + bias_idx_offset;
}
}
}
}
void Compute(OpKernelContext* context) override {
MklConvOp<Device, Tinput, qint8, Tbias, Toutput, Ttemp_output,
int32, false,
false, is_depthwise,
true>::Compute(context);
const float min_input =
context->input(min_input_idx_).template scalar<float>()();
const float max_input =
context->input(max_input_idx_).template scalar<float>()();
Tensor* output_min = nullptr;
Tensor* output_max = nullptr;
if (std::is_same<Toutput, quint8>::value ||
std::is_same<Toutput, qint8>::value) {
OP_REQUIRES_OK(context, context->allocate_output(1, {}, &output_min));
OP_REQUIRES_OK(context, context->allocate_output(2, {}, &output_max));
output_min->flat<float>()(0) =
context->input(min_freezed_output_idx_).template scalar<float>()();
output_max->flat<float>()(0) =
context->input(max_freezed_output_idx_).template scalar<float>()();
} else {
const Tensor& min_filter = context->input(min_filter_idx_);
const Tensor& max_filter = context->input(max_filter_idx_);
if (min_filter.dims() == 0) {
float min_output_value;
float max_output_value;
MklQuantizationRangeForMultiplication<Tinput, qint8, qint32>(
min_input, max_input, min_filter.scalar<float>()(),
max_filter.scalar<float>()(), &min_output_value, &max_output_value);
OP_REQUIRES_OK(context, context->allocate_output(1, {}, &output_min));
OP_REQUIRES_OK(context, context->allocate_output(2, {}, &output_max));
output_min->flat<float>()(0) = min_output_value;
output_max->flat<float>()(0) = max_output_value;
} else {
size_t depth = min_filter.NumElements();
OP_REQUIRES_OK(context,
context->allocate_output(
1, {static_cast<ptrdiff_t>(depth)}, &output_min));
OP_REQUIRES_OK(context,
context->allocate_output(
2, {static_cast<ptrdiff_t>(depth)}, &output_max));
MklQuantizationRangeForMultiplication<Tinput, qint8, qint32>(
min_input, max_input, min_filter, max_filter, &output_min,
&output_max);
}
}
}
protected:
void ExtendConvFwdParams(OpKernelContext* context,
MklConvFwdParams& params) override {
MklConvOp<Device, Tinput, qint8, Tbias, Toutput, Ttemp_output,
int32, false,
false, is_depthwise,
true>::ExtendConvFwdParams(context, params);
params.post_op_params.resize(post_op_to_idx_.size());
const float min_input =
context->input(min_input_idx_).template scalar<float>()();
const float max_input =
context->input(max_input_idx_).template scalar<float>()();
const Tensor& min_filter_vector = context->input(min_filter_idx_);
const Tensor& max_filter_vector = context->input(max_filter_idx_);
OP_REQUIRES(
context,
((min_filter_vector.NumElements() > 0) &&
(max_filter_vector.NumElements() > 0) &&
(min_filter_vector.shape() == max_filter_vector.shape())),
absl::InvalidArgumentError("`min_ and max_filter` must have same"
"shape and contain at least one element."));
size_t depth = min_filter_vector.NumElements();
const float* min_filter = min_filter_vector.flat<float>().data();
const float* max_filter = max_filter_vector.flat<float>().data();
std::vector<float> SCALE(depth);
float float_input_range =
std::max(std::abs(min_input), std::abs(max_input));
#ifdef ENABLE_ONEDNN_V3
float int_input_limit =
std::is_same<Tinput, quint8>::value ? 255.0f : 127.0f;
const float src_scale = float_input_range / int_input_limit;
#endif
if (std::is_same<Toutput, quint8>::value ||
std::is_same<Toutput, qint8>::value) {
const float min_freezed_output =
context->input(min_freezed_output_idx_).template scalar<float>()();
const float max_freezed_output =
context->input(max_freezed_output_idx_).template scalar<float>()();
float int_output_limit =
std::is_same<Toutput, quint8>::value ? 255.0f : 127.0f;
float float_output_range =
std::max(std::abs(min_freezed_output), std::abs(max_freezed_output));
#ifndef ENABLE_ONEDNN_V3
const float int_const_scale_limit =
(std::is_same<Tinput, quint8>::value) ? 255.0 * 127.0 : 127.0 * 127.0;
#endif
for (size_t i = 0; i < depth; ++i) {
float float_filter_range =
std::max(std::abs(min_filter[i]), std::abs(max_filter[i]));
#ifndef ENABLE_ONEDNN_V3
scales[i] = int_output_limit * float_input_range * float_filter_range /
(int_const_scale_limit * float_output_range);
#else
wei_scale[i] = float_filter_range / 127.0;
#endif
}
#ifndef ENABLE_ONEDNN_V3
FactoryKeyCreator param_key;
param_key.AddAsKey<float>(min_input);
param_key.AddAsKey<float>(max_input);
param_key.AddAsKey<float>(min_freezed_output);
param_key.AddAsKey<float>(max_freezed_output);
param_key.AddAsKey<const float*>(min_filter);
param_key.AddAsKey<const float*>(max_filter);
params.post_op_params[post_op_to_idx_["output_scale"]] = {
"output_scale", dnnl::algorithm::undef, scales, param_key.GetKey()};
#else
const float dst_scale = float_output_range / int_output_limit;
FactoryKeyCreator dst_param_key;
dst_param_key.AddAsKey<float>(min_freezed_output);
dst_param_key.AddAsKey<float>(max_freezed_output);
params.post_op_params[post_op_to_idx_["dst_scale"]] = {
"dst_scale",
dnnl::algorithm::undef,
{dst_scale},
dst_param_key.GetKey()};
#endif
} else {
#ifdef ENABLE_ONEDNN_V3
if (!std::is_same<Toutput, qint32>::value)
TF_CHECK_OK(absl::FailedPreconditionError(
"Output datatype is expected to be qint32."));
float min_min_filter = min_filter[0];
float max_max_filter = max_filter[0];
for (size_t i = 0; i < depth; ++i) {
float float_filter_range =
std::max(std::abs(min_filter[i]), std::abs(max_filter[i]));
wei_scale[i] = float_filter_range / 127.0;
if (min_filter[i] < min_min_filter) min_min_filter = min_filter[i];
if (max_filter[i] > max_max_filter) max_max_filter = max_filter[i];
}
const float single_wei_scale =
std::max(std::abs(min_min_filter), std::abs(max_max_filter)) / 127.0;
const float dst_scale = single_wei_scale * src_scale;
FactoryKeyCreator dst_param_key;
dst_param_key.AddAsKey<float>(dst_scale);
params.post_op_params[post_op_to_idx_["dst_scale"]] = {
"dst_scale",
dnnl::algorithm::undef,
{dst_scale},
dst_param_key.GetKey()};
#endif
}
#ifdef ENABLE_ONEDNN_V3
FactoryKeyCreator src_param_key;
src_param_key.AddAsKey<float>(min_input);
src_param_key.AddAsKey<float>(max_input);
FactoryKeyCreator wei_param_key;
wei_param_key.AddAsKey<const float*>(min_filter);
wei_param_key.AddAsKey<const float*>(max_filter);
params.post_op_params[post_op_to_idx_["src_scale"]] = {
"src_scale",
dnnl::algorithm::undef,
{src_scale},
src_param_key.GetKey()};
params.post_op_params[post_op_to_idx_["wei_scale"]] = {
"wei_scale", dnnl::algorithm::undef, wei_scale, wei_param_key.GetKey()};
#endif
if (this->get_fuse_add()) {
DataType summand_dt = this->input_type(this->get_input_add_idx());
if (std::is_same<Toutput, quint8>::value) {
bool summand_condition =
(summand_dt == DT_QINT8) || (summand_dt == DT_QUINT8);
DCHECK((summand_condition));
const Tensor& min_freezed_output_tensor =
context->input(min_freezed_output_idx_);
const Tensor& max_freezed_output_tensor =
context->input(max_freezed_output_idx_);
OP_REQUIRES(
context,
TensorShapeUtils::IsScalar(min_freezed_output_tensor.shape()),
absl::InvalidArgumentError(
absl::StrCat("`min_freezed_output` must be rank 0 but is rank ",
min_freezed_output_tensor.dims())));
OP_REQUIRES(
context,
TensorShapeUtils::IsScalar(max_freezed_output_tensor.shape()),
absl::InvalidArgumentError(
absl::StrCat("`max_freezed_output` must be rank 0 but is rank ",
max_freezed_output_tensor.dims())));
const Tensor& min_freezed_summand_tensor =
context->input(min_summand_idx_);
const Tensor& max_freezed_summand_tensor =
context->input(max_summand_idx_);
OP_REQUIRES(
context,
TensorShapeUtils::IsScalar(min_freezed_summand_tensor.shape()),
absl::InvalidArgumentError(absl::StrCat(
"`min_freezed_summand` must be rank 0 but is rank ",
min_freezed_summand_tensor.dims())));
OP_REQUIRES(
context,
TensorShapeUtils::IsScalar(max_freezed_summand_tensor.shape()),
absl::InvalidArgumentError(absl::StrCat(
"`max_freezed_summand` must be rank 0 but is rank ",
max_freezed_summand_tensor.dims())));
#ifndef ENABLE_ONEDNN_V3
const float min_freezed_output =
min_freezed_output_tensor.template scalar<float>()();
const float max_freezed_output =
max_freezed_output_tensor.template scalar<float>()();
float output_range = std::max(std::abs(min_freezed_output),
std::abs(max_freezed_output));
#endif
const float min_freezed_summand =
min_freezed_summand_tensor.template scalar<float>()();
const float max_freezed_summand =
max_freezed_summand_tensor.template scalar<float>()();
float summand_range = std::max(std::abs(min_freezed_summand),
std::abs(max_freezed_summand));
if (summand_dt == DT_QUINT8) {
params.post_op_params[post_op_to_idx_["sum"]] = {
"sum",
dnnl::algorithm::undef,
{SUMMAND_SCALE_U8(summand_range, output_range)},
""};
} else {
params.post_op_params[post_op_to_idx_["sum"]] = {
"sum",
dnnl::algorithm::undef,
{SUMMAND_SCALE_S8(summand_range, output_range)},
""};
}
} else {
params.post_op_params[post_op_to_idx_["sum"]] = {"sum",
dnnl::algorithm::undef,
{1.0},
"",
#ifdef ENABLE_ONEDNN_V3
summand_dt
#endif
};
}
}
if (IsFused(oneDNNFusedOps::kRelu)) {
params.post_op_params[post_op_to_idx_["activation"]] = {
"activation", dnnl::algorithm::eltwise_relu, {1.0, 0.0, 0.0}, ""};
}
}
void AllocateOutputTensor(OpKernelContext* context,
const ConvFwdPd& conv_prim_desc,
const memory::dims& output_dims_mkl_order,
MklTensorFormat output_tf_format,
MklDnnShape* output_mkl_shape,
Tensor** output_tensor) override {
if (!this->get_fuse_add()) {
MklConvOp<
Device, Tinput, qint8, Tbias, Toutput, Ttemp_output,
int32,
false, false, is_depthwise,
true>::AllocateOutputTensor(context, conv_prim_desc,
output_dims_mkl_order,
output_tf_format,
output_mkl_shape,
output_tensor);
} else {
if (std::is_same<Toutput, quint8>::value) {
int summand_idx = this->get_input_add_idx();
DataType summand_dt = this->input_type(summand_idx);
bool summand_condition =
(summand_dt == DT_QINT8) || (summand_dt == DT_QUINT8);
DCHECK((summand_condition));
Tensor& summand = const_cast<Tensor&>(context->input(summand_idx));
if (summand_dt == DT_QINT8) {
OP_REQUIRES_OK(context, summand.BitcastFrom(summand, DT_QUINT8,
summand.shape()));
}
OP_REQUIRES(context,
context->forward_input_to_output_with_shape(
summand_idx, 0, summand.shape(), output_tensor),
absl::InvalidArgumentError(
"Summand cannot be forwarded in the current fusion."));
return;
}
#ifndef ENABLE_ONEDNN_V3
MklConvOp<
Device, Tinput, qint8, Tbias, Toutput, Ttemp_output,
int32,
false, false, is_depthwise,
true>::AllocateOutputTensor(context, conv_prim_desc,
output_dims_mkl_order,
output_tf_format,
output_mkl_shape,
output_tensor);
const Tensor& summand = context->input(this->get_input_add_idx());
if (summand.dtype() != DT_FLOAT)
TF_CHECK_OK(absl::FailedPreconditionError(
"Current fusion requires summand to be float"));
const float min_input =
context->input(min_input_idx_).template scalar<float>()();
const float max_input =
context->input(max_input_idx_).template scalar<float>()();
const Tensor& min_filter_vector = context->input(min_filter_idx_);
const Tensor& max_filter_vector = context->input(max_filter_idx_);
const float* min_filter = min_filter_vector.flat<float>().data();
const float* max_filter = max_filter_vector.flat<float>().data();
const float int_const_scale_limit =
(std::is_same<Tinput, quint8>::value) ? 255.0 * 127.0 : 127.0 * 127.0;
size_t depth = min_filter_vector.NumElements();
std::vector<float> scales(depth);
for (size_t i = 0; i < depth; ++i) {
scales[i] =
int_const_scale_limit /
(std::max(std::abs(max_input), std::abs(min_input)) *
std::max(std::abs(max_filter[i]), std::abs(min_filter[i])));
}
dnnl::primitive_attr reorder_attr;
#ifndef ENABLE_ONEDNN_V3
if (depth == 1) {
reorder_attr.set_output_scales(0, scales);
} else {
reorder_attr.set_output_scales(2, scales);
}
#else
DCHECK_EQ(depth, 1);
reorder_attr.set_scales_mask(DNNL_ARG_SRC, 0);
reorder_attr.set_scales_mask(DNNL_ARG_WEIGHTS, 0);
reorder_attr.set_scales_mask(DNNL_ARG_DST, 0);
#endif
auto summand_md = memory::desc(output_dims_mkl_order, MklDnnType<Tbias>(),
memory::format_tag::nhwc);
void* summand_buf =
static_cast<void*>(const_cast<Tbias*>(summand.flat<Tbias>().data()));
void* dst_buf =
static_cast<void*>((*output_tensor)->flat<Ttemp_output>().data());
summand_.reset(new memory(summand_md, this->cpu_engine_, summand_buf));
dst_.reset(
new memory(conv_prim_desc.dst_desc(), this->cpu_engine_, dst_buf));
auto reorder_desc =
ReorderPd(this->cpu_engine_, summand_md, this->cpu_engine_,
conv_prim_desc.dst_desc(), reorder_attr);
CreateAndExecuteReorder(reorder_desc, *summand_, *dst_, this->cpu_engine_,
context);
#else
int summand_idx = this->get_input_add_idx();
DataType summand_dt = this->input_type(summand_idx);
if (summand_dt != DT_FLOAT)
TF_CHECK_OK(absl::FailedPreconditionError(
"Summand datatype is expected to be float."));
Tensor& summand_float = const_cast<Tensor&>(context->input(summand_idx));
OP_REQUIRES_OK(context,
summand_float.BitcastFrom(summand_float, DT_QINT32,
summand_float.shape()));
OP_REQUIRES(context,
context->forward_input_to_output_with_shape(
summand_idx, 0, summand_float.shape(), output_tensor),
absl::InvalidArgumentError(
"Summand cannot be forwarded in the current fusion."));
#endif
}
}
void* GetBiasHandle(OpKernelContext* context,
std::shared_ptr<ConvFwdPd>& conv_fwd_pd,
const Tensor& bias_tensor) override {
if (!this->get_fuse_biasadd()) {
return nullptr;
}
#ifndef ENABLE_ONEDNN_V3
if (std::is_same<Tbias, qint32>::value) {
return static_cast<Tbias*>(
const_cast<Tbias*>(bias_tensor.flat<Tbias>().data()));
}
const float min_input =
context->input(min_input_idx_).template scalar<float>()();
const float max_input =
context->input(max_input_idx_).template scalar<float>()();
const Tensor& min_filter_vector = context->input(min_filter_idx_);
const Tensor& max_filter_vector = context->input(max_filter_idx_);
const float* min_filter = min_filter_vector.flat<float>().data();
const float* max_filter = max_filter_vector.flat<float>().data();
const float int_const_scale_limit =
(std::is_same<Tinput, quint8>::value) ? 255.0 * 127.0 : 127.0 * 127.0;
size_t depth = min_filter_vector.NumElements();
bool scales_are_valid = (depth == scales_.size());
scales_.resize(depth);
for (size_t i = 0; i < depth; ++i) {
float tmp_scale =
int_const_scale_limit /
(std::max(std::abs(max_input), std::abs(min_input)) *
std::max(std::abs(max_filter[i]), std::abs(min_filter[i])));
if (scales_are_valid && std::abs(tmp_scale - scales_[i]) > 1e-6) {
scales_are_valid = false;
}
scales_[i] = tmp_scale;
}
if (!is_bias_const_ || IsBiasCacheEmpty(context) || !scales_are_valid) {
dnnl::primitive_attr bias_attr;
#ifndef ENABLE_ONEDNN_V3
if (depth == 1) {
bias_attr.set_output_scales(0, scales_);
} else {
bias_attr.set_output_scales(1, scales_);
}
#else
DCHECK_EQ(depth, 1);
bias_attr.set_scales_mask(DNNL_ARG_SRC, 0);
bias_attr.set_scales_mask(DNNL_ARG_WEIGHTS, 0);
bias_attr.set_scales_mask(DNNL_ARG_DST, 0);
#endif
auto bias_md = memory::desc({static_cast<int>(bias_tensor.NumElements())},
MklDnnType<Tbias>(), memory::format_tag::x);
void* bias_buf = static_cast<void*>(
const_cast<Tbias*>(bias_tensor.flat<Tbias>().data()));
if (!input_bias_) {
input_bias_ = new memory(bias_md, this->cpu_engine_, bias_buf);
} else {
input_bias_->set_data_handle(bias_buf);
}
if (!scaled_bias_buf_)
AllocTmpBuffer<Tbias>(context, &scaled_bias_tensor_,
conv_fwd_pd->bias_desc(), &scaled_bias_buf_);
if (!scaled_bias_) {
scaled_bias_ = new memory(bias_md, this->cpu_engine_, scaled_bias_buf_);
} else {
scaled_bias_->set_data_handle(scaled_bias_buf_);
}
auto reorder_desc =
ReorderPd(this->cpu_engine_, input_bias_->get_desc(),
this->cpu_engine_, scaled_bias_->get_desc(), bias_attr);
CreateAndExecuteReorder(reorder_desc, *input_bias_, *scaled_bias_,
this->cpu_engine_, context);
Tbias* bias_data =
reinterpret_cast<Tbias*>(scaled_bias_->get_data_handle());
if (is_bias_const_)
CacheBias(context, conv_fwd_pd, bias_data, scaled_bias_);
return bias_data;
}
return GetCachedBias(context);
#else
if (std::is_same<Tbias, float>::value) {
return static_cast<Tbias*>(
const_cast<Tbias*>(bias_tensor.flat<Tbias>().data()));
}
const float min_input =
context->input(min_input_idx_).template scalar<float>()();
const float max_input =
context->input(max_input_idx_).template scalar<float>()();
const Tensor& min_filter_vector = context->input(min_filter_idx_);
const Tensor& max_filter_vector = context->input(max_filter_idx_);
if ((min_filter_vector.NumElements() == 0) ||
(max_filter_vector.NumElements() == 0) ||
(min_filter_vector.shape() != max_filter_vector.shape())) {
TF_CHECK_OK(absl::FailedPreconditionError(
"`min_filter and max_filter` must have same"
"shape and contain at least one element."));
}
const float* min_filter = min_filter_vector.flat<float>().data();
const float* max_filter = max_filter_vector.flat<float>().data();
const float int_const_scale_limit =
(std::is_same<Tinput, quint8>::value) ? 255.0 * 127.0 : 127.0 * 127.0;
size_t depth = min_filter_vector.NumElements();
bool scales_are_valid = (depth == scales_.size());
scales_.resize(depth);
for (size_t i = 0; i < depth; ++i) {
float tmp_scale =
int_const_scale_limit /
(std::max(std::abs(max_input), std::abs(min_input)) *
std::max(std::abs(max_filter[i]), std::abs(min_filter[i])));
if (scales_are_valid && std::abs(tmp_scale - scales_[i]) > 1e-6) {
scales_are_valid = false;
}
scales_[i] = tmp_scale;
}
if (!is_bias_const_ || IsBiasCacheEmpty(context) || !scales_are_valid) {
dnnl::primitive_attr reorder_attr;
if (depth == 1) {
reorder_attr.set_scales_mask(DNNL_ARG_DST, 0);
} else {
reorder_attr.set_scales_mask(DNNL_ARG_DST, 1);
}
auto bias_md = memory::desc({static_cast<int>(bias_tensor.NumElements())},
MklDnnType<Tbias>(), memory::format_tag::x);
void* bias_buf = static_cast<void*>(
const_cast<Tbias*>(bias_tensor.flat<Tbias>().data()));
if (!input_bias_) {
input_bias_ = new memory(bias_md, this->cpu_engine_, bias_buf);
} else {
input_bias_->set_data_handle(bias_buf);
}
if (!scaled_bias_buf_) {
AllocTmpBuffer<float>(context, &scaled_bias_tensor_,
conv_fwd_pd->bias_desc(), &scaled_bias_buf_);
}
if (!scaled_bias_) {
scaled_bias_ = new memory(conv_fwd_pd->bias_desc(), this->cpu_engine_,
scaled_bias_buf_);
} else {
scaled_bias_->set_data_handle(scaled_bias_buf_);
}
std::unique_ptr<memory> scale_mem(
new memory({{static_cast<int64_t>(depth)},
MklDnnType<float>(),
memory::format_tag::x},
this->cpu_engine_, scales_.data()));
auto reorder_desc =
ReorderPd(this->cpu_engine_, input_bias_->get_desc(),
this->cpu_engine_, scaled_bias_->get_desc(), reorder_attr);
CreateAndExecuteReorder(reorder_desc, *input_bias_, *scaled_bias_,
this->cpu_engine_, context, scale_mem.get());
float* bias_data =
reinterpret_cast<float*>(scaled_bias_->get_data_handle());
if (is_bias_const_)
CacheBias(context, conv_fwd_pd, bias_data, scaled_bias_);
return bias_data;
}
return GetCachedBias(context);
#endif
}
bool is_bias_const_;
Tensor cached_bias_data_ TF_GUARDED_BY(bias_cache_mu_);
memory* input_bias_ = nullptr;
memory* scaled_bias_ = nullptr;
Tensor scaled_bias_tensor_;
void* scaled_bias_buf_ = nullptr;
private:
std::vector<float> scales_;
mutex bias_cache_mu_;
std::vector<string> fused_ops_;
std::map<string, int> post_op_to_idx_;
int64_t fused_op_flags_ = 0;
std::unordered_map<string, oneDNNFusedOps> str_to_enum_{
{"BiasAdd", oneDNNFusedOps::kBias},
{"Sum", oneDNNFusedOps::kSum},
{"Relu", oneDNNFusedOps::kRelu},
{"Requantize", oneDNNFusedOps::kRequantize}};
std::shared_ptr<dnnl::memory> summand_;
std::shared_ptr<dnnl::memory> dst_;
int min_input_idx_ = -1;
int max_input_idx_ = -1;
int min_filter_idx_ = -1;
int max_filter_idx_ = -1;
int min_bias_idx_ = -1;
int max_bias_idx_ = -1;
int min_summand_idx_ = -1;
int max_summand_idx_ = -1;
int min_freezed_output_idx_ = -1;
int max_freezed_output_idx_ = -1;
inline bool IsFused(oneDNNFusedOps op) {
return fused_op_flags_ & (static_cast<int64_t>(op));
}
inline oneDNNFusedOps StrToEnum(const string op) {
CHECK_EQ(str_to_enum_.find(op) != str_to_enum_.end(), true)
<< "Error: Unknown post op: " << op;
return str_to_enum_[op];
}
void AllocateTensor(OpKernelContext* context, const ConvFwdPd& conv_prim_desc,
Tensor** bias_tensor) {
DCHECK(bias_tensor);
TensorShape bias_tf_shape;
bias_tf_shape.AddDim(
(conv_prim_desc.bias_desc().get_size() / sizeof(TSCALED_BIAS)));
OP_REQUIRES_OK(context,
context->allocate_temp(DataTypeToEnum<TSCALED_BIAS>::value,
bias_tf_shape, &cached_bias_data_));
*bias_tensor = &cached_bias_data_;
}
inline bool IsBiasCacheEmpty(OpKernelContext* context)
TF_LOCKS_EXCLUDED(bias_cache_mu_) {
tf_shared_lock lock(bias_cache_mu_);
return (cached_bias_data_.NumElements() == 0);
}
void CacheBias(OpKernelContext* context,
const std::shared_ptr<ConvFwdPd>& conv_fwd_pd,
TSCALED_BIAS* bias_data, const memory* scaled_bias)
TF_LOCKS_EXCLUDED(bias_cache_mu_) {
mutex_lock lock(bias_cache_mu_);
if (cached_bias_data_.NumElements() > 0) {
return;
}
Tensor* bias_tensor_ptr = nullptr;
AllocateTensor(context, *conv_fwd_pd, &bias_tensor_ptr);
void* cached_bias_data = const_cast<void*>(
static_cast<const void*>(bias_tensor_ptr->flat<TSCALED_BIAS>().data()));
size_t cached_bias_data_size = scaled_bias->get_desc().get_size();
memcpy(cached_bias_data, bias_data, cached_bias_data_size);
}
TSCALED_BIAS* GetCachedBias(OpKernelContext* context)
TF_LOCKS_EXCLUDED(bias_cache_mu_) {
tf_shared_lock lock(bias_cache_mu_);
const Tensor& cached_bias_data = cached_bias_data_;
return static_cast<TSCALED_BIAS*>(const_cast<TSCALED_BIAS*>(
cached_bias_data.flat<TSCALED_BIAS>().data()));
}
};
template <typename Device, typename Tinput, typename Tfilter, typename Tbias,
typename Toutput, typename Ttemp_output, typename Tpadding,
bool pad_enabled, bool native_format>
class MklFusedConv3DOp
: public MklConvOp<Device, Tinput, Tfilter, Tbias, Toutput, Ttemp_output,
Tpadding, false, false, false, native_format> {
public:
explicit MklFusedConv3DOp(OpKernelConstruction* context)
: MklConvOp<Device, Tinput, Tfilter, Tbias, Toutput, Ttemp_output,
Tpadding, false, false, false, native_format>(context) {
std::vector<string> fused_ops;
OP_REQUIRES_OK(context, context->GetAttr("fused_ops", &fused_ops));
int num_args;
OP_REQUIRES_OK(context, context->GetAttr("num_args", &num_args));
std::vector<int> padding_list;
OP_REQUIRES_OK(context, context->GetAttr("padding_list", &padding_list));
if (padding_list.empty()) {
OP_REQUIRES(
context, !fused_ops.empty(),
absl::InvalidArgumentError("Fused Conv3D must have at least one "
"fused op when Pad is not fused."));
if (std::find(fused_ops.begin(), fused_ops.end(), "BiasAdd") ==
fused_ops.end()) {
OP_REQUIRES(context, num_args == 1,
absl::InvalidArgumentError(
"Fused Conv3D must have one extra argument: bias."));
} else if (std::find(fused_ops.begin(), fused_ops.end(), "BiasAdd") ==
fused_ops.end() &&
std::find(fused_ops.begin(), fused_ops.end(), "Add") ==
fused_ops.end()) {
OP_REQUIRES(
context, num_args == 2,
absl::InvalidArgumentError(
"Fused Conv3D must have two extra arguments: bias and add."));
}
}
if (fused_ops == std::vector<string>{"BiasAdd"}) {
this->set_fuse_biasadd(true);
} else if (fused_ops == std::vector<string>{"BiasAdd", "LeakyRelu"}) {
this->set_fuse_biasadd(true);
float leakyrelu_alpha;
OP_REQUIRES_OK(context,
context->GetAttr("leakyrelu_alpha", &leakyrelu_alpha));
this->set_fuse_activation(true, dnnl::algorithm::eltwise_relu,
leakyrelu_alpha);
} else if (fused_ops == std::vector<string>{"BiasAdd", "Mish"}) {
this->set_fuse_biasadd(true);
this->set_fuse_activation(true, dnnl::algorithm::eltwise_mish);
} else if (fused_ops == std::vector<string>{"BiasAdd", "Relu"}) {
this->set_fuse_biasadd(true);
this->set_fuse_activation(true, dnnl::algorithm::eltwise_relu);
} else if (fused_ops == std::vector<string>{"BiasAdd", "Relu6"}) {
this->set_fuse_biasadd(true);
this->SET_FUSE_ACTIVATION_FOR_RELU6;
} else if (fused_ops == std::vector<string>{"BiasAdd", "Elu"}) {
this->set_fuse_biasadd(true);
this->set_fuse_activation(true, dnnl::algorithm::eltwise_elu, 1.0);
} else if (fused_ops == std::vector<string>{"BiasAdd", "Add"}) {
this->set_fuse_biasadd(true);
this->set_fuse_add(true);
} else if (fused_ops == std::vector<string>{"BiasAdd", "Add", "Relu"}) {
this->set_fuse_biasadd(true);
this->set_fuse_add(true);
this->set_fuse_activation(true, dnnl::algorithm::eltwise_relu);
} else if (fused_ops == std::vector<string>{"BiasAdd", "Add", "Relu6"}) {
this->set_fuse_biasadd(true);
this->set_fuse_add(true);
this->SET_FUSE_ACTIVATION_FOR_RELU6;
} else if (fused_ops == std::vector<string>{"BiasAdd", "Add", "Elu"}) {
this->set_fuse_biasadd(true);
this->set_fuse_add(true);
this->set_fuse_activation(true, dnnl::algorithm::eltwise_elu, 1.0);
} else if (fused_ops ==
std::vector<string>{"BiasAdd", "Add", "LeakyRelu"}) {
this->set_fuse_biasadd(true);
this->set_fuse_add(true);
float leakyrelu_alpha;
OP_REQUIRES_OK(context,
context->GetAttr("leakyrelu_alpha", &leakyrelu_alpha));
this->set_fuse_activation(true, dnnl::algorithm::eltwise_relu,
leakyrelu_alpha);
} else {
if (padding_list.empty()) {
OP_REQUIRES(context, false,
absl::UnimplementedError(
absl::StrCat("Fusion is not implemented: [",
absl::StrJoin(fused_ops, ","), "]")));
}
}
}
virtual ~MklFusedConv3DOp() {}
};
#define REGISTER_MKL_KERNEL(op, kernel, input_type, bias_type, output_type, \
summand_type, is_depthwise, legacy_fused_ops, \
num_fused_ops) \
REGISTER_KERNEL_BUILDER( \
Name(op) \
.Device(DEVICE_CPU) \
.TypeConstraint<input_type>("Tinput") \
.TypeConstraint<qint8>("Tfilter") BIAS_TYPE_CONSTRAINT(bias_type) \
SUMMAND_TYPE_CONSTRAINT(summand_type) \
.TypeConstraint<output_type>("out_type") LABEL, \
kernel TEMPLATE_ARGS(CPUDevice, input_type, bias_type, output_type, \
summand_type, is_depthwise, legacy_fused_ops, \
num_fused_ops));
#define REGISTER_MKL_KERNEL_ALL_INPUT_TYPES( \
op, kernel, bias_type, output_type, summand_type, is_depthwise, \
legacy_fused_ops, num_fused_ops) \
REGISTER_MKL_KERNEL(op, kernel, qint8, bias_type, output_type, summand_type, \
is_depthwise, legacy_fused_ops, num_fused_ops); \
REGISTER_MKL_KERNEL(op, kernel, quint8, bias_type, output_type, \
summand_type, is_depthwise, legacy_fused_ops, \
num_fused_ops);
#define REGISTER_MKL_KERNEL_ALL_BIAS_TYPES( \
op, kernel, input_type, output_type, summand_type, is_depthwise, \
legacy_fused_ops, num_fused_ops) \
REGISTER_MKL_KERNEL(op, kernel, input_type, qint32, output_type, \
summand_type, is_depthwise, legacy_fused_ops, \
num_fused_ops); \
REGISTER_MKL_KERNEL(op, kernel, input_type, float, output_type, \
summand_type, is_depthwise, legacy_fused_ops, \
num_fused_ops);
#define REGISTER_MKL_KERNEL_ALL_INPUT_AND_BIAS_TYPES( \
op, kernel, output_type, summand_type, is_depthwise, legacy_fused_ops, \
num_fused_ops) \
REGISTER_MKL_KERNEL_ALL_INPUT_TYPES(op, kernel, qint32, output_type, \
summand_type, is_depthwise, \
legacy_fused_ops, num_fused_ops); \
REGISTER_MKL_KERNEL_ALL_INPUT_TYPES(op, kernel, float, output_type, \
summand_type, is_depthwise, \
legacy_fused_ops, num_fused_ops);
#define LABEL
#define TEMPLATE_ARGS(CPUDevice, input_type, bias_type, output_type, \
summand_type, has_bias, is_depthwise, is_native)
#define BIAS_TYPE_CONSTRAINT(bias_type)
#define SUMMAND_TYPE_CONSTRAINT(summand_type)
REGISTER_MKL_KERNEL("QuantizedConv2D", NoOp, quint8, float, qint32, qint32,
false, false, false);
REGISTER_MKL_KERNEL_ALL_INPUT_TYPES("QuantizedConv2DWithBias", NoOp, float,
qint32, qint32, false, false, false);
REGISTER_MKL_KERNEL_ALL_INPUT_TYPES("QuantizedConv2DWithBiasAndRelu", NoOp,
float, qint32, qint32, false, false, false);
REGISTER_MKL_KERNEL("QuantizedConv2DWithBiasSumAndRelu", NoOp, quint8, float,
qint32, qint32, false, false, false);
REGISTER_MKL_KERNEL("QuantizedConv2DAndRequantize", NoOp, quint8, float, qint8,
qint8, false, false, false);
REGISTER_MKL_KERNEL("QuantizedConv2DPerChannel", NoOp, quint8, float, qint32,
qint32, false, false, false);
REGISTER_MKL_KERNEL("QuantizedConv2DAndRelu", NoOp, quint8, float, qint32,
qint32, false, false, false);
REGISTER_MKL_KERNEL("QuantizedConv2DAndReluAndRequantize", NoOp, quint8, float,
quint8, quint8, false, false, false);
REGISTER_MKL_KERNEL("QuantizedDepthwiseConv2D", NoOp, quint8, float, qint32,
qint32, false, false, false);
REGISTER_MKL_KERNEL("QuantizedDepthwiseConv2DWithBias", NoOp, quint8, float,
qint32, qint32, false, false, false);
REGISTER_MKL_KERNEL("QuantizedDepthwiseConv2DWithBiasAndRelu", NoOp, quint8,
float, qint32, qint32, false, false, false);
#undef SUMMAND_TYPE_CONSTRAINT
#undef BIAS_TYPE_CONSTRAINT
#define BIAS_TYPE_CONSTRAINT(bias_type) .TypeConstraint<bias_type>("Tbias")
#define SUMMAND_TYPE_CONSTRAINT(summand_type)
REGISTER_MKL_KERNEL_ALL_INPUT_AND_BIAS_TYPES(
"QuantizedConv2DWithBiasAndRequantize", NoOp, qint8, qint8, false, false,
false);
REGISTER_MKL_KERNEL_ALL_INPUT_AND_BIAS_TYPES(
"QuantizedConv2DWithBiasAndReluAndRequantize", NoOp, quint8, quint8, false,
false, false);
REGISTER_MKL_KERNEL_ALL_BIAS_TYPES(
"QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize", NoOp, quint8,
quint8, quint8, false, false, false);
#undef SUMMAND_TYPE_CONSTRAINT
#define SUMMAND_TYPE_CONSTRAINT(summand_type) \
.TypeConstraint<summand_type>("Tsummand")
REGISTER_MKL_KERNEL_ALL_BIAS_TYPES(
"QuantizedConv2DWithBiasSumAndReluAndRequantize", NoOp, quint8, quint8,
quint8, false, false, false);
REGISTER_MKL_KERNEL_ALL_BIAS_TYPES(
"QuantizedConv2DWithBiasSignedSumAndReluAndRequantize", NoOp, quint8,
quint8, qint8, false, false, false);
#undef SUMMAND_TYPE_CONSTRAINT
#undef BIAS_TYPE_CONSTRAINT
#undef TEMPLATE_ARGS
#undef LABEL
#define TEMPLATE_ARGS(CPUDevice, input_type, bias_type, output_type, \
summand_type, is_depthwise, legacy_fused_ops, \
num_fused_ops) \
<CPUDevice, input_type, bias_type, output_type, summand_type, is_depthwise, \
legacy_fused_ops, num_fused_ops>
#define BIAS_TYPE_CONSTRAINT(bias_type)
#define SUMMAND_TYPE_CONSTRAINT(summand_type)
#define LABEL .Label(mkl_op_registry::kMklQuantizedOpLabel)
REGISTER_MKL_KERNEL_ALL_INPUT_TYPES("_MklQuantizedConv2D", MklQuantizedConvOp,
float, qint32, qint32, false,
quantized_fusions::none, 0);
REGISTER_MKL_KERNEL_ALL_INPUT_TYPES("_MklQuantizedConv2DPerChannel",
MklQuantizedConvOp, float, qint32, qint32,
false, quantized_fusions::none, 0);
REGISTER_MKL_KERNEL_ALL_INPUT_TYPES("_MklQuantizedConv2DWithBias",
MklQuantizedConvOp, float, qint32, qint32,
false, quantized_fusions::bias, 1);
REGISTER_MKL_KERNEL_ALL_INPUT_TYPES("_MklQuantizedConv2DWithBiasAndRelu",
MklQuantizedConvOp, float, qint32, qint32,
false, quantized_fusions::bias_relu, 2);
REGISTER_MKL_KERNEL("_MklQuantizedConv2DWithBiasSumAndRelu", MklQuantizedConvOp,
quint8, float, qint32, qint32, false,
quantized_fusions::bias_sum_relu, 3);
REGISTER_MKL_KERNEL("_MklQuantizedConv2DAndRequantize", MklQuantizedConvOp,
quint8, float, qint8, qint8, false,
quantized_fusions::requantize, 1);
REGISTER_MKL_KERNEL("_MklQuantizedConv2DAndRelu", MklQuantizedConvOp, quint8,
float, qint32, qint32, false, quantized_fusions::relu, 1);
REGISTER_MKL_KERNEL("_MklQuantizedConv2DAndReluAndRequantize",
MklQuantizedConvOp, quint8, float, quint8, quint8, false,
quantized_fusions::relu_requantize, 2);
REGISTER_MKL_KERNEL("_MklQuantizedDepthwiseConv2D", MklQuantizedConvOp, quint8,
float, qint32, qint32, true, quantized_fusions::none, 0);
REGISTER_MKL_KERNEL("_MklQuantizedDepthwiseConv2DWithBias", MklQuantizedConvOp,
quint8, float, qint32, qint32, true,
quantized_fusions::bias, 1);
REGISTER_MKL_KERNEL("_MklQuantizedDepthwiseConv2DWithBiasAndRelu",
MklQuantizedConvOp, quint8, float, qint32, qint32, true,
quantized_fusions::bias_relu, 2);
#undef SUMMAND_TYPE_CONSTRAINT
#undef BIAS_TYPE_CONSTRAINT
#define BIAS_TYPE_CONSTRAINT(bias_type) .TypeConstraint<bias_type>("Tbias")
#define SUMMAND_TYPE_CONSTRAINT(summand_type)
REGISTER_MKL_KERNEL_ALL_INPUT_AND_BIAS_TYPES(
"_MklQuantizedConv2DWithBiasAndRequantize", MklQuantizedConvOp, qint8,
qint8, false, quantized_fusions::bias_requantize, 2);
REGISTER_MKL_KERNEL_ALL_INPUT_AND_BIAS_TYPES(
"_MklQuantizedConv2DWithBiasAndReluAndRequantize", MklQuantizedConvOp,
quint8, quint8, false, quantized_fusions::bias_relu_requantize, 3);
REGISTER_MKL_KERNEL_ALL_BIAS_TYPES(
"_MklQuantizedDepthwiseConv2DWithBiasAndReluAndRequantize",
MklQuantizedConvOp, quint8, quint8, quint8, true,
quantized_fusions::bias_relu_requantize, 3);
#undef LABEL
#define LABEL
REGISTER_MKL_KERNEL_ALL_INPUT_AND_BIAS_TYPES("_FusedQuantizedConv2D",
MklQuantizedConvOp, qint32, qint32,
false, quantized_fusions::none, -1)
REGISTER_MKL_KERNEL_ALL_INPUT_AND_BIAS_TYPES("_FusedQuantizedDepthwiseConv2D",
MklQuantizedConvOp, qint32, qint32,
true, quantized_fusions::none, -1)
#undef LABEL
#define LABEL .Label(mkl_op_registry::kMklQuantizedOpLabel)
#undef SUMMAND_TYPE_CONSTRAINT
#define SUMMAND_TYPE_CONSTRAINT(summand_type) \
.TypeConstraint<summand_type>("Tsummand")
REGISTER_MKL_KERNEL_ALL_BIAS_TYPES(
"_MklQuantizedConv2DWithBiasSumAndReluAndRequantize", MklQuantizedConvOp,
quint8, quint8, quint8, false, quantized_fusions::bias_sum_relu_requantize,
4);
REGISTER_MKL_KERNEL_ALL_BIAS_TYPES(
"_MklQuantizedConv2DWithBiasSignedSumAndReluAndRequantize",
MklQuantizedConvOp, quint8, quint8, qint8, false,
quantized_fusions::bias_sum_relu_requantize, 4);
#undef LABEL
#define LABEL
REGISTER_MKL_KERNEL_ALL_INPUT_AND_BIAS_TYPES("_FusedQuantizedConv2D",
MklQuantizedConvOp, qint8, qint8,
false, quantized_fusions::none,
-1);
REGISTER_MKL_KERNEL_ALL_INPUT_AND_BIAS_TYPES("_FusedQuantizedConv2D",
MklQuantizedConvOp, quint8, qint8,
false, quantized_fusions::none,
-1);
REGISTER_MKL_KERNEL_ALL_INPUT_AND_BIAS_TYPES("_FusedQuantizedConv2D",
MklQuantizedConvOp, quint8, quint8,
false, quantized_fusions::none,
-1);
REGISTER_MKL_KERNEL_ALL_INPUT_AND_BIAS_TYPES("_FusedQuantizedConv2D",
MklQuantizedConvOp, qint8, quint8,
false, quantized_fusions::none,
-1);
REGISTER_MKL_KERNEL_ALL_INPUT_AND_BIAS_TYPES("_FusedQuantizedDepthwiseConv2D",
MklQuantizedConvOp, qint8, qint8,
true, quantized_fusions::none, -1);
REGISTER_MKL_KERNEL_ALL_INPUT_AND_BIAS_TYPES("_FusedQuantizedDepthwiseConv2D",
MklQuantizedConvOp, quint8, qint8,
true, quantized_fusions::none, -1);
REGISTER_MKL_KERNEL_ALL_INPUT_AND_BIAS_TYPES("_FusedQuantizedDepthwiseConv2D",
MklQuantizedConvOp, quint8, quint8,
true, quantized_fusions::none, -1);
REGISTER_MKL_KERNEL_ALL_INPUT_AND_BIAS_TYPES("_FusedQuantizedDepthwiseConv2D",
MklQuantizedConvOp, qint8, quint8,
true, quantized_fusions::none, -1);
#undef LABEL
#undef SUMMAND_TYPE_CONSTRAINT
#undef BIAS_TYPE_CONSTRAINT
#undef TEMPLATE_ARGS
#define REGISTER_NO_OP_CPU_2D_DEPTHWISE(T) \
REGISTER_KERNEL_BUILDER(Name("_FusedDepthwiseConv2dNative") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T"), \
NoOp);
TF_CALL_float(REGISTER_NO_OP_CPU_2D_DEPTHWISE);
TF_CALL_bfloat16(REGISTER_NO_OP_CPU_2D_DEPTHWISE);
TF_CALL_half(REGISTER_NO_OP_CPU_2D_DEPTHWISE);
#define REGISTER_MKL_CPU_2D(T) \
REGISTER_KERNEL_BUILDER( \
Name("_MklConv2D") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.Label(mkl_op_registry::kMklLayoutDependentOpLabel), \
MklConvOp<CPUDevice, T, T, T, T, T, int32, false, false, false, false>); \
REGISTER_KERNEL_BUILDER( \
Name("_MklConv2DWithBias") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.Label(mkl_op_registry::kMklLayoutDependentOpLabel), \
MklConvOp<CPUDevice, T, T, T, T, T, int32, true, false, false, false>); \
REGISTER_KERNEL_BUILDER( \
Name("__MklDummyConv2DWithBias") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.Label(mkl_op_registry::kMklLayoutDependentOpLabel), \
MklDummyOp<CPUDevice, T>); \
REGISTER_KERNEL_BUILDER( \
Name("_MklPadWithConv2D") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.TypeConstraint<int32>("Tpaddings") \
.Label(mkl_op_registry::kMklLayoutDependentOpLabel), \
MklConvOp<CPUDevice, T, T, T, T, T, int32, false, true, false, false>); \
REGISTER_KERNEL_BUILDER( \
Name("_MklPadWithConv2D") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.TypeConstraint<int64_t>("Tpaddings") \
.Label(mkl_op_registry::kMklLayoutDependentOpLabel), \
MklConvOp<CPUDevice, T, T, T, T, T, int64, false, true, false, false>); \
REGISTER_KERNEL_BUILDER( \
Name("__MklDummyPadWithConv2D") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.TypeConstraint<int32>("Tpaddings") \
.Label(mkl_op_registry::kMklLayoutDependentOpLabel), \
MklDummyOp<CPUDevice, T>); \
REGISTER_KERNEL_BUILDER( \
Name("_MklNativeConv2D") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.Label(mkl_op_registry::kMklNameChangeOpLabel), \
MklConvOp<CPUDevice, T, T, T, T, T, int32, false, false, false, true>); \
REGISTER_KERNEL_BUILDER( \
Name("_MklNativeConv2DWithBias") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.Label(mkl_op_registry::kMklNameChangeOpLabel), \
MklConvOp<CPUDevice, T, T, T, T, T, int32, true, false, false, true>); \
REGISTER_KERNEL_BUILDER( \
Name("_MklNativePadWithConv2D") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.TypeConstraint<int32>("Tpaddings") \
.Label(mkl_op_registry::kMklNameChangeOpLabel), \
MklConvOp<CPUDevice, T, T, T, T, T, int32, false, true, false, true>); \
REGISTER_KERNEL_BUILDER( \
Name("_MklNativePadWithConv2D") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.TypeConstraint<int64_t>("Tpaddings") \
.Label(mkl_op_registry::kMklNameChangeOpLabel), \
MklConvOp<CPUDevice, T, T, T, T, T, int64, false, true, false, true>);
TF_CALL_float(REGISTER_MKL_CPU_2D);
TF_CALL_bfloat16(REGISTER_MKL_CPU_2D);
TF_CALL_half(REGISTER_MKL_CPU_2D);
#define REGISTER_MKL_CPU_2D_DEPTHWISE(T) \
REGISTER_KERNEL_BUILDER( \
Name("_MklDepthwiseConv2dNative") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.Label(mkl_op_registry::kMklLayoutDependentOpLabel), \
MklConvOp<CPUDevice, T, T, T, T, T, int32, false, false, true, false>); \
REGISTER_KERNEL_BUILDER( \
Name("_MklFusedDepthwiseConv2dNative") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.Label(mkl_op_registry::kMklLayoutDependentOpLabel), \
MklFusedDepthwiseConvOp<CPUDevice, T, T, T, T, T, int32, false, true, \
true, false>); \
REGISTER_KERNEL_BUILDER( \
Name("_MklNativeFusedDepthwiseConv2dNative") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.Label(mkl_op_registry::kMklNameChangeOpLabel), \
MklFusedDepthwiseConvOp<CPUDevice, T, T, T, T, T, int32, false, true, \
true, true>); \
REGISTER_KERNEL_BUILDER( \
Name("_MklNativeDepthwiseConv2dNative") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.Label(mkl_op_registry::kMklNameChangeOpLabel), \
MklConvOp<CPUDevice, T, T, T, T, T, int32, false, false, true, true>);
TF_CALL_float(REGISTER_MKL_CPU_2D_DEPTHWISE);
TF_CALL_bfloat16(REGISTER_MKL_CPU_2D_DEPTHWISE);
TF_CALL_half(REGISTER_MKL_CPU_2D_DEPTHWISE);
#define REGISTER_MKL_CPU_2D_FUSED(T) \
REGISTER_KERNEL_BUILDER( \
Name("_MklFusedConv2D") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.Label(mkl_op_registry::kMklLayoutDependentOpLabel), \
MklFusedConvOp<CPUDevice, T, T, T, T, T, int32, false, false>); \
REGISTER_KERNEL_BUILDER( \
Name("_MklPadWithFusedConv2D") \
.Device(DEVICE_CPU) \
.TypeConstraint<int32>("Tpaddings") \
.TypeConstraint<T>("T") \
.Label(mkl_op_registry::kMklLayoutDependentOpLabel), \
MklFusedConvOp<CPUDevice, T, T, T, T, T, int32, true, false>); \
REGISTER_KERNEL_BUILDER( \
Name("_MklPadWithFusedConv2D") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.TypeConstraint<int64_t>("Tpaddings") \
.Label(mkl_op_registry::kMklLayoutDependentOpLabel), \
MklFusedConvOp<CPUDevice, T, T, T, T, T, int64, true, false>); \
REGISTER_KERNEL_BUILDER( \
Name("__MklDummyPadWithFusedConv2D") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.TypeConstraint<int32>("Tpaddings") \
.Label(mkl_op_registry::kMklLayoutDependentOpLabel), \
MklDummyOp<CPUDevice, T>); \
REGISTER_KERNEL_BUILDER( \
Name("_MklNativeFusedConv2D") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.Label(mkl_op_registry::kMklNameChangeOpLabel), \
MklFusedConvOp<CPUDevice, T, T, T, T, T, int32, false, true>); \
REGISTER_KERNEL_BUILDER( \
Name("_MklNativePadWithFusedConv2D") \
.Device(DEVICE_CPU) \
.TypeConstraint<int32>("Tpaddings") \
.TypeConstraint<T>("T") \
.Label(mkl_op_registry::kMklNameChangeOpLabel), \
MklFusedConvOp<CPUDevice, T, T, T, T, T, int32, true, true>); \
REGISTER_KERNEL_BUILDER( \
Name("_MklNativePadWithFusedConv2D") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.TypeConstraint<int64_t>("Tpaddings") \
.Label(mkl_op_registry::kMklNameChangeOpLabel), \
MklFusedConvOp<CPUDevice, T, T, T, T, T, int64, true, true>);
TF_CALL_float(REGISTER_MKL_CPU_2D_FUSED);
TF_CALL_bfloat16(REGISTER_MKL_CPU_2D_FUSED);
TF_CALL_half(REGISTER_MKL_CPU_2D_FUSED);
#define REGISTER_MKL_CPU_3D(T) \
REGISTER_KERNEL_BUILDER( \
Name("_MklConv3D") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.Label(mkl_op_registry::kMklLayoutDependentOpLabel), \
MklConvOp<CPUDevice, T, T, T, T, T, int32, false, false, false, false>); \
REGISTER_KERNEL_BUILDER( \
Name("_MklNativeConv3D") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.Label(mkl_op_registry::kMklNameChangeOpLabel), \
MklConvOp<CPUDevice, T, T, T, T, T, int32, false, false, false, true>); \
REGISTER_KERNEL_BUILDER( \
Name("_MklNativeFusedConv3D") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.Label(mkl_op_registry::kMklNameChangeOpLabel), \
MklFusedConv3DOp<CPUDevice, T, T, T, T, T, int32, false, true>);
TF_CALL_float(REGISTER_MKL_CPU_3D);
TF_CALL_bfloat16(REGISTER_MKL_CPU_3D);
TF_CALL_half(REGISTER_MKL_CPU_3D);
#undef APPEND_DEPTHWISE
#undef APPEND_ELTWISE
#undef GET_DATA_TYPE
#undef SET_FUSE_ACTIVATION_FOR_RELU6
#undef SET_MKL_LAYOUT
#undef OUTPUT_SCALE_DCHECK
#undef TSCALED_BIAS
#undef SCALE
#undef SUMMAND_SCALE_U8
#undef SUMMAND_SCALE_S8
}
#endif | #include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/nn_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/util/mkl_util.h"
namespace tensorflow {
struct Conv2DDimensions {
Conv2DDimensions(int n, int h, int w, int c, int fc, int fh, int fw)
: input_batches(n),
input_height(h),
input_width(w),
input_depth(c),
filter_count(fc),
filter_height(fh),
filter_width(fw) {}
int input_batches;
int input_height;
int input_width;
int input_depth;
int filter_count;
int filter_height;
int filter_width;
};
static Tensor GetRandomTensor(const TensorShape& shape) {
Tensor tensor(DT_FLOAT, TensorShape(shape));
tensor.flat<float>() = tensor.flat<float>().setRandom();
return tensor;
}
static Tensor GetRandomInputTensor(const Conv2DDimensions& dims) {
return GetRandomTensor({dims.input_batches, dims.input_height,
dims.input_width, dims.input_depth});
}
static Tensor GetRandomFilterTensor(const Conv2DDimensions& dims) {
return GetRandomTensor({dims.filter_height, dims.filter_width,
dims.input_depth, dims.filter_count});
}
static Tensor GetRandomOutputTensor(const Conv2DDimensions& dims) {
return GetRandomTensor({dims.input_batches, dims.input_height,
dims.input_width, dims.filter_count});
}
static Tensor GetInputSizesTensor(const Conv2DDimensions& dims) {
return test::AsTensor<int32>({dims.input_batches, dims.input_height,
dims.input_width, dims.input_depth});
}
static Tensor GetFilterSizesTensor(const Conv2DDimensions& dims) {
return test::AsTensor<int32>({dims.filter_height, dims.filter_width,
dims.input_depth, dims.filter_count});
}
static Graph* DefaultConv2D(const Conv2DDimensions& dims) {
auto* graph = new Graph(OpRegistry::Global());
Tensor input_t = GetRandomInputTensor(dims);
Tensor filter_t = GetRandomFilterTensor(dims);
Node* input = test::graph::Constant(graph, input_t, "input");
Node* filter = test::graph::Constant(graph, filter_t, "filter");
Node* conv2d;
TF_CHECK_OK(NodeBuilder(graph->NewName("conv_2d"), "Conv2D")
.Input(input)
.Input(filter)
.Attr("T", DT_FLOAT)
.Attr("strides", {1, 1, 1, 1})
.Attr("padding", "SAME")
.Finalize(graph, &conv2d));
return graph;
}
static Graph* MklConv2D(const Conv2DDimensions& dims) {
auto* graph = new Graph(OpRegistry::Global());
Tensor input_t = GetRandomInputTensor(dims);
Tensor filter_t = GetRandomFilterTensor(dims);
Node* input = test::graph::Constant(graph, input_t, "input");
Node* filter = test::graph::Constant(graph, filter_t, "filter");
Node* not_mkl_shape =
test::graph::Constant(graph, GetMklMetaTensor(), "not_mkl");
Node* conv2d;
TF_CHECK_OK(NodeBuilder(graph->NewName("mkl_conv_2d"), "_MklConv2D")
.Input(input)
.Input(filter)
.Input(not_mkl_shape)
.Input(not_mkl_shape)
.Attr("T", DT_FLOAT)
.Attr("strides", {1, 1, 1, 1})
.Attr("padding", "SAME")
.Attr("_kernel", "MklOp")
.Finalize(graph, &conv2d));
return graph;
}
static Graph* DefaultConv2DBwdInput(const Conv2DDimensions& dims) {
auto* graph = new Graph(OpRegistry::Global());
Tensor input_sizes_t = GetInputSizesTensor(dims);
Tensor filter_t = GetRandomFilterTensor(dims);
Tensor out_backprop_t = GetRandomOutputTensor(dims);
Node* input_sizes =
test::graph::Constant(graph, input_sizes_t, "input_sizes");
Node* filter = test::graph::Constant(graph, filter_t, "filter");
Node* out_backprop =
test::graph::Constant(graph, out_backprop_t, "out_backprop");
Node* conv2d_bwd_input;
TF_CHECK_OK(
NodeBuilder(graph->NewName("conv_2d_bwd_input"), "Conv2DBackpropInput")
.Input(input_sizes)
.Input(filter)
.Input(out_backprop)
.Attr("T", DT_FLOAT)
.Attr("strides", {1, 1, 1, 1})
.Attr("padding", "SAME")
.Finalize(graph, &conv2d_bwd_input));
return graph;
}
static Graph* MklConv2DBwdInput(const Conv2DDimensions& dims) {
auto* graph = new Graph(OpRegistry::Global());
Tensor input_sizes_t = GetInputSizesTensor(dims);
Tensor filter_t = GetRandomFilterTensor(dims);
Tensor out_backprop_t = GetRandomOutputTensor(dims);
Node* input_sizes =
test::graph::Constant(graph, input_sizes_t, "input_sizes");
Node* filter = test::graph::Constant(graph, filter_t, "filter");
Node* out_backprop =
test::graph::Constant(graph, out_backprop_t, "out_backprop");
Node* not_mkl_shape =
test::graph::Constant(graph, GetMklMetaTensor(), "not_mkl");
Node* conv2d_bwd_input;
TF_CHECK_OK(NodeBuilder(graph->NewName("conv_2d_bwd_input"),
"_MklConv2DBackpropInput")
.Input(input_sizes)
.Input(filter)
.Input(out_backprop)
.Input(not_mkl_shape)
.Input(not_mkl_shape)
.Input(not_mkl_shape)
.Attr("T", DT_FLOAT)
.Attr("strides", {1, 1, 1, 1})
.Attr("padding", "SAME")
.Attr("_kernel", "MklOp")
.Finalize(graph, &conv2d_bwd_input));
return graph;
}
static Graph* DefaultConv2DBwdFilter(const Conv2DDimensions& dims) {
auto* graph = new Graph(OpRegistry::Global());
Tensor input_t = GetRandomInputTensor(dims);
Tensor filter_sizes_t = GetFilterSizesTensor(dims);
Tensor filter_t = GetRandomFilterTensor(dims);
Tensor out_backprop_t = GetRandomOutputTensor(dims);
Node* input = test::graph::Constant(graph, input_t, "input");
Node* filter_sizes =
test::graph::Constant(graph, filter_sizes_t, "filter_sizes");
Node* out_backprop =
test::graph::Constant(graph, out_backprop_t, "out_backprop");
Node* conv2d_bwd_filter;
TF_CHECK_OK(
NodeBuilder(graph->NewName("conv_2d_bwd_filter"), "Conv2DBackpropFilter")
.Input(input)
.Input(filter_sizes)
.Input(out_backprop)
.Attr("T", DT_FLOAT)
.Attr("strides", {1, 1, 1, 1})
.Attr("padding", "SAME")
.Finalize(graph, &conv2d_bwd_filter));
return graph;
}
static Graph* MklConv2DBwdFilter(const Conv2DDimensions& dims) {
Graph* graph = new Graph(OpRegistry::Global());
Tensor input_t = GetRandomInputTensor(dims);
Tensor filter_sizes_t = GetFilterSizesTensor(dims);
Tensor filter_t = GetRandomFilterTensor(dims);
Tensor out_backprop_t = GetRandomOutputTensor(dims);
Node* input = test::graph::Constant(graph, input_t, "input");
Node* filter_sizes =
test::graph::Constant(graph, filter_sizes_t, "filter_sizes");
Node* out_backprop =
test::graph::Constant(graph, out_backprop_t, "out_backprop");
Node* not_mkl_shape =
test::graph::Constant(graph, GetMklMetaTensor(), "not_mkl");
Node* conv2d_bwd_filter;
TF_CHECK_OK(NodeBuilder(graph->NewName("conv_2d_bwd_filter"),
"_MklConv2DBackpropFilter")
.Input(input)
.Input(filter_sizes)
.Input(out_backprop)
.Input(not_mkl_shape)
.Input(not_mkl_shape)
.Input(not_mkl_shape)
.Attr("T", DT_FLOAT)
.Attr("strides", {1, 1, 1, 1})
.Attr("padding", "SAME")
.Attr("_kernel", "MklOp")
.Finalize(graph, &conv2d_bwd_filter));
return graph;
}
#define BM_CONCAT(a, b) a##b
#define BM_NAME(p, type, N, H, W, C, FC, FH, FW) \
BM_CONCAT(BM_##p##_##type##_in_##N##_##H##_##W##_##C, _f_##FC##_##FH##_##FW)
#define BM_Conv2DT(kind, N, H, W, C, FC, FH, FW, type, LABEL) \
static void BM_NAME(Conv2D_##kind, type, N, H, W, C, FC, FH, \
FW)(::testing::benchmark::State & state) { \
state.SetLabel(LABEL); \
\
int64 num_computed_elements = (N) * (H) * (W) * (FC); \
int64 flops_per_iter = num_computed_elements * ((C) * (FH) * (FW)); \
\
Conv2DDimensions dims(N, H, W, C, FC, FW, FH); \
test::Benchmark(#type, BM_CONCAT(kind, Conv2D)(dims), \
false) \
.Run(state); \
state.SetItemsProcessed(state.iterations() * flops_per_iter); \
} \
BENCHMARK(BM_NAME(Conv2D_##kind, type, N, H, W, C, FC, FH, FW))
#define BM_Conv2D(N, H, W, C, FC, FH, FW, type, LABEL) \
BM_Conv2DT(Default, N, H, W, C, FC, FH, FW, type, LABEL); \
BM_Conv2DT(Mkl, N, H, W, C, FC, FH, FW, type, LABEL);
#define BM_Conv2DBwdInputT(kind, N, H, W, C, FC, FH, FW, type, LABEL) \
static void BM_NAME(Conv2DBwdInput_##kind, type, N, H, W, C, FC, FH, \
FW)(::testing::benchmark::State & state) { \
state.SetLabel(LABEL); \
\
int64 num_computed_elements = (N) * (H) * (W) * (C); \
int64 flops_per_iter = num_computed_elements * ((C) * (FH) * (FW)); \
\
Conv2DDimensions dims(N, H, W, C, FC, FW, FH); \
test::Benchmark(#type, BM_CONCAT(kind, Conv2DBwdInput)(dims), \
false) \
.Run(state); \
state.SetItemsProcessed(state.iterations() * flops_per_iter); \
} \
BENCHMARK(BM_NAME(Conv2DBwdInput_##kind, type, N, H, W, C, FC, FH, FW))
#define BM_Conv2DBwdInput(N, H, W, C, FC, FH, FW, type, LABEL) \
BM_Conv2DBwdInputT(Default, N, H, W, C, FC, FH, FW, type, LABEL); \
BM_Conv2DBwdInputT(Mkl, N, H, W, C, FC, FH, FW, type, LABEL);
#define BM_Conv2DBwdFilterT(kind, N, H, W, C, FC, FH, FW, type, LABEL) \
static void BM_NAME(Conv2DBwdFilter_##kind, type, N, H, W, C, FC, FH, \
FW)(::testing::benchmark::State & state) { \
state.SetLabel(LABEL); \
\
int64 num_computed_elements = (FH) * (FW) * (C) * (FC); \
int64 flops_per_iter = num_computed_elements * ((N) * (H) * (W)); \
\
Conv2DDimensions dims(N, H, W, C, FC, FW, FH); \
test::Benchmark(#type, BM_CONCAT(kind, Conv2DBwdFilter)(dims), \
false) \
.Run(state); \
state.SetItemsProcessed(state.iterations() * flops_per_iter); \
} \
BENCHMARK(BM_NAME(Conv2DBwdFilter_##kind, type, N, H, W, C, FC, FH, FW))
#define BM_Conv2DBwdFilter(N, H, W, C, FC, FH, FW, type, LABEL) \
BM_Conv2DBwdFilterT(Default, N, H, W, C, FC, FH, FW, type, LABEL); \
BM_Conv2DBwdFilterT(Mkl, N, H, W, C, FC, FH, FW, type, LABEL);
BM_Conv2D(32, 28, 28, 96, 128, 3, 3, cpu, "conv3a_00_3x3");
BM_Conv2D(32, 28, 28, 16, 32, 5, 5, cpu, "conv3a_00_5x5");
BM_Conv2D(32, 28, 28, 128, 192, 3, 3, cpu, "conv3_00_3x3");
BM_Conv2D(32, 28, 28, 32, 96, 5, 5, cpu, "conv3_00_5x5");
BM_Conv2D(32, 14, 14, 96, 204, 3, 3, cpu, "conv4a_00_3x3");
BM_Conv2D(32, 14, 14, 16, 48, 5, 5, cpu, "conv4a_00_5x5");
BM_Conv2D(32, 14, 14, 112, 224, 3, 3, cpu, "conv4b_00_3x3");
BM_Conv2DBwdInput(32, 28, 28, 96, 128, 3, 3, cpu, "conv3a_00_3x3");
BM_Conv2DBwdInput(32, 28, 28, 16, 32, 5, 5, cpu, "conv3a_00_5x5");
BM_Conv2DBwdInput(32, 28, 28, 128, 192, 3, 3, cpu, "conv3_00_3x3");
BM_Conv2DBwdInput(32, 28, 28, 32, 96, 5, 5, cpu, "conv3_00_5x5");
BM_Conv2DBwdInput(32, 14, 14, 96, 204, 3, 3, cpu, "conv4a_00_3x3");
BM_Conv2DBwdInput(32, 14, 14, 16, 48, 5, 5, cpu, "conv4a_00_5x5");
BM_Conv2DBwdInput(32, 14, 14, 112, 224, 3, 3, cpu, "conv4b_00_3x3");
BM_Conv2DBwdFilter(32, 28, 28, 96, 128, 3, 3, cpu, "conv3a_00_3x3");
BM_Conv2DBwdFilter(32, 28, 28, 16, 32, 5, 5, cpu, "conv3a_00_5x5");
BM_Conv2DBwdFilter(32, 28, 28, 128, 192, 3, 3, cpu, "conv3_00_3x3");
BM_Conv2DBwdFilter(32, 28, 28, 32, 96, 5, 5, cpu, "conv3_00_5x5");
BM_Conv2DBwdFilter(32, 14, 14, 96, 204, 3, 3, cpu, "conv4a_00_3x3");
BM_Conv2DBwdFilter(32, 14, 14, 16, 48, 5, 5, cpu, "conv4a_00_5x5");
BM_Conv2DBwdFilter(32, 14, 14, 112, 224, 3, 3, cpu, "conv4b_00_3x3");
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/mkl/mkl_conv_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/mkl/mkl_conv_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
beb94efe-689c-4864-a58d-c462f72b8745 | cpp | google/leveldb | filter_block | table/filter_block.cc | table/filter_block_test.cc | #include "table/filter_block.h"
#include "leveldb/filter_policy.h"
#include "util/coding.h"
namespace leveldb {
static const size_t kFilterBaseLg = 11;
static const size_t kFilterBase = 1 << kFilterBaseLg;
FilterBlockBuilder::FilterBlockBuilder(const FilterPolicy* policy)
: policy_(policy) {}
void FilterBlockBuilder::StartBlock(uint64_t block_offset) {
uint64_t filter_index = (block_offset / kFilterBase);
assert(filter_index >= filter_offsets_.size());
while (filter_index > filter_offsets_.size()) {
GenerateFilter();
}
}
void FilterBlockBuilder::AddKey(const Slice& key) {
Slice k = key;
start_.push_back(keys_.size());
keys_.append(k.data(), k.size());
}
Slice FilterBlockBuilder::Finish() {
if (!start_.empty()) {
GenerateFilter();
}
const uint32_t array_offset = result_.size();
for (size_t i = 0; i < filter_offsets_.size(); i++) {
PutFixed32(&result_, filter_offsets_[i]);
}
PutFixed32(&result_, array_offset);
result_.push_back(kFilterBaseLg);
return Slice(result_);
}
void FilterBlockBuilder::GenerateFilter() {
const size_t num_keys = start_.size();
if (num_keys == 0) {
filter_offsets_.push_back(result_.size());
return;
}
start_.push_back(keys_.size());
tmp_keys_.resize(num_keys);
for (size_t i = 0; i < num_keys; i++) {
const char* base = keys_.data() + start_[i];
size_t length = start_[i + 1] - start_[i];
tmp_keys_[i] = Slice(base, length);
}
filter_offsets_.push_back(result_.size());
policy_->CreateFilter(&tmp_keys_[0], static_cast<int>(num_keys), &result_);
tmp_keys_.clear();
keys_.clear();
start_.clear();
}
FilterBlockReader::FilterBlockReader(const FilterPolicy* policy,
const Slice& contents)
: policy_(policy), data_(nullptr), offset_(nullptr), num_(0), base_lg_(0) {
size_t n = contents.size();
if (n < 5) return;
base_lg_ = contents[n - 1];
uint32_t last_word = DecodeFixed32(contents.data() + n - 5);
if (last_word > n - 5) return;
data_ = contents.data();
offset_ = data_ + last_word;
num_ = (n - 5 - last_word) / 4;
}
bool FilterBlockReader::KeyMayMatch(uint64_t block_offset, const Slice& key) {
uint64_t index = block_offset >> base_lg_;
if (index < num_) {
uint32_t start = DecodeFixed32(offset_ + index * 4);
uint32_t limit = DecodeFixed32(offset_ + index * 4 + 4);
if (start <= limit && limit <= static_cast<size_t>(offset_ - data_)) {
Slice filter = Slice(data_ + start, limit - start);
return policy_->KeyMayMatch(key, filter);
} else if (start == limit) {
return false;
}
}
return true;
}
} | #include "table/filter_block.h"
#include "gtest/gtest.h"
#include "leveldb/filter_policy.h"
#include "util/coding.h"
#include "util/hash.h"
#include "util/logging.h"
#include "util/testutil.h"
namespace leveldb {
class TestHashFilter : public FilterPolicy {
public:
const char* Name() const override { return "TestHashFilter"; }
void CreateFilter(const Slice* keys, int n, std::string* dst) const override {
for (int i = 0; i < n; i++) {
uint32_t h = Hash(keys[i].data(), keys[i].size(), 1);
PutFixed32(dst, h);
}
}
bool KeyMayMatch(const Slice& key, const Slice& filter) const override {
uint32_t h = Hash(key.data(), key.size(), 1);
for (size_t i = 0; i + 4 <= filter.size(); i += 4) {
if (h == DecodeFixed32(filter.data() + i)) {
return true;
}
}
return false;
}
};
class FilterBlockTest : public testing::Test {
public:
TestHashFilter policy_;
};
TEST_F(FilterBlockTest, EmptyBuilder) {
FilterBlockBuilder builder(&policy_);
Slice block = builder.Finish();
ASSERT_EQ("\\x00\\x00\\x00\\x00\\x0b", EscapeString(block));
FilterBlockReader reader(&policy_, block);
ASSERT_TRUE(reader.KeyMayMatch(0, "foo"));
ASSERT_TRUE(reader.KeyMayMatch(100000, "foo"));
}
TEST_F(FilterBlockTest, SingleChunk) {
FilterBlockBuilder builder(&policy_);
builder.StartBlock(100);
builder.AddKey("foo");
builder.AddKey("bar");
builder.AddKey("box");
builder.StartBlock(200);
builder.AddKey("box");
builder.StartBlock(300);
builder.AddKey("hello");
Slice block = builder.Finish();
FilterBlockReader reader(&policy_, block);
ASSERT_TRUE(reader.KeyMayMatch(100, "foo"));
ASSERT_TRUE(reader.KeyMayMatch(100, "bar"));
ASSERT_TRUE(reader.KeyMayMatch(100, "box"));
ASSERT_TRUE(reader.KeyMayMatch(100, "hello"));
ASSERT_TRUE(reader.KeyMayMatch(100, "foo"));
ASSERT_TRUE(!reader.KeyMayMatch(100, "missing"));
ASSERT_TRUE(!reader.KeyMayMatch(100, "other"));
}
TEST_F(FilterBlockTest, MultiChunk) {
FilterBlockBuilder builder(&policy_);
builder.StartBlock(0);
builder.AddKey("foo");
builder.StartBlock(2000);
builder.AddKey("bar");
builder.StartBlock(3100);
builder.AddKey("box");
builder.StartBlock(9000);
builder.AddKey("box");
builder.AddKey("hello");
Slice block = builder.Finish();
FilterBlockReader reader(&policy_, block);
ASSERT_TRUE(reader.KeyMayMatch(0, "foo"));
ASSERT_TRUE(reader.KeyMayMatch(2000, "bar"));
ASSERT_TRUE(!reader.KeyMayMatch(0, "box"));
ASSERT_TRUE(!reader.KeyMayMatch(0, "hello"));
ASSERT_TRUE(reader.KeyMayMatch(3100, "box"));
ASSERT_TRUE(!reader.KeyMayMatch(3100, "foo"));
ASSERT_TRUE(!reader.KeyMayMatch(3100, "bar"));
ASSERT_TRUE(!reader.KeyMayMatch(3100, "hello"));
ASSERT_TRUE(!reader.KeyMayMatch(4100, "foo"));
ASSERT_TRUE(!reader.KeyMayMatch(4100, "bar"));
ASSERT_TRUE(!reader.KeyMayMatch(4100, "box"));
ASSERT_TRUE(!reader.KeyMayMatch(4100, "hello"));
ASSERT_TRUE(reader.KeyMayMatch(9000, "box"));
ASSERT_TRUE(reader.KeyMayMatch(9000, "hello"));
ASSERT_TRUE(!reader.KeyMayMatch(9000, "foo"));
ASSERT_TRUE(!reader.KeyMayMatch(9000, "bar"));
}
} | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/table/filter_block.cc | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/table/filter_block_test.cc | 23e35d792b9154f922b8b575b12596a4d8664c65 |
464b7a60-847b-4ac6-bf92-ce88e1c0c36b | cpp | tensorflow/tensorflow | conditional_canonicalizer | third_party/xla/xla/service/conditional_canonicalizer.cc | third_party/xla/xla/service/conditional_canonicalizer_test.cc | #include "xla/service/conditional_canonicalizer.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/status_macros.h"
namespace xla {
namespace {
absl::Status CanonicalizeNonTupleConditional(HloInstruction* conditional) {
TF_RET_CHECK(conditional->opcode() == HloOpcode::kConditional);
for (auto* branch : conditional->called_computations()) {
HloInstruction* root = branch->root_instruction();
TF_RET_CHECK(!root->shape().IsTuple());
HloInstruction* tuple =
branch->AddInstruction(HloInstruction::CreateTuple({root}));
branch->set_root_instruction(tuple, true);
}
auto parent = conditional->parent();
const Shape& root_shape = conditional->shape();
auto new_shape = ShapeUtil::MakeTupleShape(absl::MakeSpan(&root_shape, 1));
auto new_conditional =
parent->AddInstruction(conditional->CloneWithNewShape(new_shape));
auto gte = parent->AddInstruction(
HloInstruction::CreateGetTupleElement(root_shape, new_conditional, 0));
TF_RETURN_IF_ERROR(parent->ReplaceInstruction(conditional, gte));
return absl::OkStatus();
}
}
absl::StatusOr<bool> ConditionalCanonicalizer::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
XLA_VLOG_LINES(
2, "ConditionalCanonicalizer::Run(), before:\n" + module->ToString());
bool changed = false;
for (auto* comp : module->MakeNonfusionComputations(execution_threads)) {
for (auto* inst : comp->MakeInstructionPostOrder()) {
if (inst->opcode() == HloOpcode::kConditional &&
!inst->shape().IsTuple()) {
TF_RETURN_IF_ERROR(CanonicalizeNonTupleConditional(inst));
changed = true;
}
}
}
XLA_VLOG_LINES(
2, "ConditionalCanonicalizer::Run(), after:\n" + module->ToString());
return changed;
}
} | #include "xla/service/conditional_canonicalizer.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/hlo_parser.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/literal_test_util.h"
#include "xla/tests/test_utils.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/types.h"
#include "xla/util.h"
namespace xla {
namespace {
namespace op = xla::testing::opcode_matchers;
class ConditionalCanonicalizerTest : public HloTestBase {
protected:
ConditionalCanonicalizerTest() {}
};
TEST_F(ConditionalCanonicalizerTest, DenseArrayConditionalRewrite) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule _
true_branch {
true_param = (s32[3,2]) parameter(0)
ROOT root = s32[] constant(0)
}
false_branch {
false_param = (s32[3,2]) parameter(0)
ROOT root = s32[] constant(1)
}
ENTRY entry {
param0 = s32[3,2] parameter(0)
branch = pred[] constant(false)
param_tuple = (s32[3 ,2]) tuple(param0)
ROOT conditional = s32[] conditional(branch, param_tuple, param_tuple),
true_computation=true_branch, false_computation=false_branch
}
)")
.value();
ConditionalCanonicalizer pass;
EXPECT_TRUE(pass.Run(module.get()).value());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::GetTupleElement(op::Conditional()));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/conditional_canonicalizer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/conditional_canonicalizer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
cf805915-470c-4ec5-9114-22bba8f23da3 | cpp | tensorflow/tensorflow | io | tensorflow/compiler/mlir/quantization/stablehlo/cc/io.cc | tensorflow/compiler/mlir/quantization/stablehlo/cc/io_test.cc | #include "tensorflow/compiler/mlir/quantization/stablehlo/cc/io.h"
#include <string>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace stablehlo::quantization::io {
absl::StatusOr<std::string> GetLocalTmpFileName(tsl::Env* const env) {
std::string tmp_fname{};
if (!env->LocalTempFilename(&tmp_fname)) {
return absl::InternalError("Failed to create tmp file name.");
}
return tmp_fname;
}
absl::StatusOr<std::string> GetLocalTmpFileName() {
return GetLocalTmpFileName(tsl::Env::Default());
}
absl::StatusOr<std::string> CreateTmpDir(tsl::Env* const env) {
TF_ASSIGN_OR_RETURN(std::string tmp_dir, GetLocalTmpFileName(env));
if (!env->RecursivelyCreateDir(tmp_dir).ok()) {
return absl::InternalError(
absl::StrFormat("Failed to create tmp dir: '%s'", tmp_dir));
}
return tmp_dir;
}
absl::StatusOr<std::string> CreateTmpDir() {
return CreateTmpDir(tsl::Env::Default());
}
absl::Status WriteStringToFile(const absl::string_view file_path,
const absl::string_view data) {
auto* env = tsl::Env::Default();
return WriteStringToFile(env, std::string(file_path), data);
}
absl::StatusOr<std::string> ReadFileToString(
const absl::string_view file_path) {
auto* env = tsl::Env::Default();
std::string data{};
absl::Status read_status =
ReadFileToString(env, std::string(file_path), &data);
if (read_status.ok()) {
return data;
} else {
return read_status;
}
}
absl::StatusOr<std::vector<std::string>> ListDirectory(
absl::string_view directory) {
std::vector<std::string> children;
TF_RETURN_IF_ERROR(
tsl::Env::Default()->GetChildren(std::string(directory), &children));
return children;
}
} | #include "tensorflow/compiler/mlir/quantization/stablehlo/cc/io.h"
#include <cstdint>
#include <fstream>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/functional/any_invocable.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "tsl/platform/env.h"
#include "tsl/platform/file_system.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/types.h"
namespace stablehlo::quantization::io {
namespace {
using ::testing::Eq;
using ::testing::HasSubstr;
using ::testing::IsEmpty;
using ::testing::Not;
using ::testing::SizeIs;
using ::testing::UnorderedElementsAre;
using ::tsl::testing::IsOk;
using ::tsl::testing::StatusIs;
class TestEnvBrokenFileSystem : public tsl::Env {
public:
TestEnvBrokenFileSystem() = default;
bool MatchPath(const tsl::string& path, const tsl::string& pattern) override {
return false;
}
void SleepForMicroseconds(int64_t micros) override {}
tsl::string GetRunfilesDir() override { return tsl::string("dummy_path"); }
int32_t GetCurrentThreadId() override { return 0; }
tsl::Thread* StartThread(const tsl::ThreadOptions& thread_options,
const tsl::string& name,
absl::AnyInvocable<void()> fn) override {
return nullptr;
}
bool GetCurrentThreadName(tsl::string* name) override { return false; }
void SchedClosure(absl::AnyInvocable<void()> closure) override {}
void SchedClosureAfter(int64_t micros,
absl::AnyInvocable<void()> closure) override {}
absl::Status LoadDynamicLibrary(const char* library_filename,
void** handle) override {
return absl::OkStatus();
}
absl::Status GetSymbolFromLibrary(void* handle, const char* symbol_name,
void** symbol) override {
return absl::OkStatus();
}
tsl::string FormatLibraryFileName(const tsl::string& name,
const tsl::string& version) override {
return tsl::string("dummy_path");
}
absl::Status GetFileSystemForFile(const std::string& fname,
tsl::FileSystem** result) override {
return absl::InternalError("Broken file system");
}
private:
void GetLocalTempDirectories(std::vector<tsl::string>* list) override {
list->push_back("/tmp");
}
};
class TestEnvBrokenFileSystemAndNoLocalTempDirs
: public TestEnvBrokenFileSystem {
private:
void GetLocalTempDirectories(std::vector<tsl::string>* list) override {}
};
TEST(IoTest, GetLocalTmpFileNameGivesValidFileName) {
absl::StatusOr<std::string> tmp_file_name = GetLocalTmpFileName();
ASSERT_THAT(tmp_file_name, IsOk());
EXPECT_THAT(*tmp_file_name, Not(IsEmpty()));
}
TEST(IoTest, GetLocalTmpFileNameWhenNoTempDirsReturnsInternalError) {
TestEnvBrokenFileSystemAndNoLocalTempDirs broken_env;
absl::StatusOr<std::string> tmp_file_name = GetLocalTmpFileName(&broken_env);
EXPECT_THAT(tmp_file_name,
StatusIs(absl::StatusCode::kInternal,
HasSubstr("Failed to create tmp file name")));
}
TEST(IoTest, CreateTmpDirReturnsValidTmpPath) {
absl::StatusOr<std::string> tmp_dir = CreateTmpDir();
ASSERT_THAT(tmp_dir, IsOk());
auto* const env = tsl::Env::Default();
EXPECT_THAT(env->FileExists(*tmp_dir), IsOk());
}
TEST(IoTest, CreateTmpDirWhenInvalidPathReturnsInternalError) {
TestEnvBrokenFileSystem test_env{};
absl::StatusOr<std::string> tmp_dir = CreateTmpDir(&test_env);
EXPECT_THAT(tmp_dir, StatusIs(absl::StatusCode::kInternal,
HasSubstr("Failed to create tmp dir")));
}
TEST(IoTest, WriteStringToFile) {
const std::string dst_file_path =
absl::StrCat(testing::TempDir(), "/tmp_file");
const absl::Status write_status =
WriteStringToFile(dst_file_path, "test_string");
ASSERT_THAT(write_status, IsOk());
auto* const env = tsl::Env::Default();
ASSERT_THAT(env->FileExists(dst_file_path), IsOk());
std::string data{};
ASSERT_THAT(tsl::ReadFileToString(env, dst_file_path, &data), IsOk());
EXPECT_THAT(data, Eq("test_string"));
}
TEST(IoTest, ReadFileToString) {
const std::string src_file_path =
absl::StrCat(testing::TempDir(), "/tmp_file");
{
std::ofstream ofs(src_file_path);
ofs << "test_string";
}
const absl::StatusOr<std::string> read_status =
ReadFileToString(src_file_path);
ASSERT_THAT(read_status, IsOk());
EXPECT_THAT(*read_status, Eq("test_string"));
}
TEST(IoTest, ListChildrenInDirectory) {
absl::StatusOr<std::string> tmp_dir = CreateTmpDir();
ASSERT_THAT(tmp_dir, IsOk());
auto* const env = tsl::Env::Default();
EXPECT_THAT(env->FileExists(*tmp_dir), IsOk());
ASSERT_THAT(
WriteStringToFile(absl::StrCat(*tmp_dir, "/tmp_file1"), "test_string"),
IsOk());
ASSERT_THAT(
WriteStringToFile(absl::StrCat(*tmp_dir, "/tmp_file2"), "test_string"),
IsOk());
ASSERT_THAT(env->RecursivelyCreateDir(absl::StrCat(*tmp_dir, "/subdir")),
IsOk());
absl::StatusOr<std::vector<std::string>> children = ListDirectory(*tmp_dir);
EXPECT_THAT(children, IsOk());
EXPECT_THAT(children.value(), SizeIs(3));
EXPECT_THAT(children.value(),
UnorderedElementsAre("subdir", "tmp_file1", "tmp_file2"));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/cc/io.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/cc/io_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7aae7bdd-3782-4a7a-a22a-065ac38df147 | cpp | google/quiche | quic_unacked_packet_map | quiche/quic/core/quic_unacked_packet_map.cc | quiche/quic/core/quic_unacked_packet_map_test.cc | #include "quiche/quic/core/quic_unacked_packet_map.h"
#include <cstddef>
#include <limits>
#include <type_traits>
#include <utility>
#include "absl/container/inlined_vector.h"
#include "quiche/quic/core/quic_connection_stats.h"
#include "quiche/quic/core/quic_packet_number.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/platform/api/quic_bug_tracker.h"
#include "quiche/quic/platform/api/quic_flag_utils.h"
namespace quic {
namespace {
bool WillStreamFrameLengthSumWrapAround(QuicPacketLength lhs,
QuicPacketLength rhs) {
static_assert(
std::is_unsigned<QuicPacketLength>::value,
"This function assumes QuicPacketLength is an unsigned integer type.");
return std::numeric_limits<QuicPacketLength>::max() - lhs < rhs;
}
enum QuicFrameTypeBitfield : uint32_t {
kInvalidFrameBitfield = 0,
kPaddingFrameBitfield = 1,
kRstStreamFrameBitfield = 1 << 1,
kConnectionCloseFrameBitfield = 1 << 2,
kGoawayFrameBitfield = 1 << 3,
kWindowUpdateFrameBitfield = 1 << 4,
kBlockedFrameBitfield = 1 << 5,
kStopWaitingFrameBitfield = 1 << 6,
kPingFrameBitfield = 1 << 7,
kCryptoFrameBitfield = 1 << 8,
kHandshakeDoneFrameBitfield = 1 << 9,
kStreamFrameBitfield = 1 << 10,
kAckFrameBitfield = 1 << 11,
kMtuDiscoveryFrameBitfield = 1 << 12,
kNewConnectionIdFrameBitfield = 1 << 13,
kMaxStreamsFrameBitfield = 1 << 14,
kStreamsBlockedFrameBitfield = 1 << 15,
kPathResponseFrameBitfield = 1 << 16,
kPathChallengeFrameBitfield = 1 << 17,
kStopSendingFrameBitfield = 1 << 18,
kMessageFrameBitfield = 1 << 19,
kNewTokenFrameBitfield = 1 << 20,
kRetireConnectionIdFrameBitfield = 1 << 21,
kAckFrequencyFrameBitfield = 1 << 22,
kResetStreamAtFrameBitfield = 1 << 23,
};
QuicFrameTypeBitfield GetFrameTypeBitfield(QuicFrameType type) {
switch (type) {
case PADDING_FRAME:
return kPaddingFrameBitfield;
case RST_STREAM_FRAME:
return kRstStreamFrameBitfield;
case CONNECTION_CLOSE_FRAME:
return kConnectionCloseFrameBitfield;
case GOAWAY_FRAME:
return kGoawayFrameBitfield;
case WINDOW_UPDATE_FRAME:
return kWindowUpdateFrameBitfield;
case BLOCKED_FRAME:
return kBlockedFrameBitfield;
case STOP_WAITING_FRAME:
return kStopWaitingFrameBitfield;
case PING_FRAME:
return kPingFrameBitfield;
case CRYPTO_FRAME:
return kCryptoFrameBitfield;
case HANDSHAKE_DONE_FRAME:
return kHandshakeDoneFrameBitfield;
case STREAM_FRAME:
return kStreamFrameBitfield;
case ACK_FRAME:
return kAckFrameBitfield;
case MTU_DISCOVERY_FRAME:
return kMtuDiscoveryFrameBitfield;
case NEW_CONNECTION_ID_FRAME:
return kNewConnectionIdFrameBitfield;
case MAX_STREAMS_FRAME:
return kMaxStreamsFrameBitfield;
case STREAMS_BLOCKED_FRAME:
return kStreamsBlockedFrameBitfield;
case PATH_RESPONSE_FRAME:
return kPathResponseFrameBitfield;
case PATH_CHALLENGE_FRAME:
return kPathChallengeFrameBitfield;
case STOP_SENDING_FRAME:
return kStopSendingFrameBitfield;
case MESSAGE_FRAME:
return kMessageFrameBitfield;
case NEW_TOKEN_FRAME:
return kNewTokenFrameBitfield;
case RETIRE_CONNECTION_ID_FRAME:
return kRetireConnectionIdFrameBitfield;
case ACK_FREQUENCY_FRAME:
return kAckFrequencyFrameBitfield;
case RESET_STREAM_AT_FRAME:
return kResetStreamAtFrameBitfield;
case NUM_FRAME_TYPES:
QUIC_BUG(quic_bug_10518_1) << "Unexpected frame type";
return kInvalidFrameBitfield;
}
QUIC_BUG(quic_bug_10518_2) << "Unexpected frame type";
return kInvalidFrameBitfield;
}
}
QuicUnackedPacketMap::QuicUnackedPacketMap(Perspective perspective)
: perspective_(perspective),
least_unacked_(FirstSendingPacketNumber()),
bytes_in_flight_(0),
bytes_in_flight_per_packet_number_space_{0, 0, 0},
packets_in_flight_(0),
last_inflight_packet_sent_time_(QuicTime::Zero()),
last_inflight_packets_sent_time_{
{QuicTime::Zero()}, {QuicTime::Zero()}, {QuicTime::Zero()}},
last_crypto_packet_sent_time_(QuicTime::Zero()),
session_notifier_(nullptr),
supports_multiple_packet_number_spaces_(false) {}
QuicUnackedPacketMap::~QuicUnackedPacketMap() {
for (QuicTransmissionInfo& transmission_info : unacked_packets_) {
DeleteFrames(&(transmission_info.retransmittable_frames));
}
}
const QuicTransmissionInfo& QuicUnackedPacketMap::AddDispatcherSentPacket(
const DispatcherSentPacket& packet) {
QuicPacketNumber packet_number = packet.packet_number;
QUICHE_DCHECK_EQ(least_unacked_, FirstSendingPacketNumber());
QUIC_BUG_IF(quic_unacked_map_dispatcher_packet_num_too_small,
largest_sent_packet_.IsInitialized() &&
largest_sent_packet_ >= packet_number)
<< "largest_sent_packet_: " << largest_sent_packet_
<< ", packet_number: " << packet_number;
QUICHE_DCHECK_GE(packet_number, least_unacked_ + unacked_packets_.size());
while (least_unacked_ + unacked_packets_.size() < packet_number) {
unacked_packets_.push_back(QuicTransmissionInfo());
unacked_packets_.back().state = NEVER_SENT;
}
QuicTransmissionInfo& info =
unacked_packets_.emplace_back(ENCRYPTION_INITIAL, NOT_RETRANSMISSION,
packet.sent_time, packet.bytes_sent,
false,
false, ECN_NOT_ECT);
QUICHE_DCHECK(!info.in_flight);
info.state = NOT_CONTRIBUTING_RTT;
info.largest_acked = packet.largest_acked;
largest_sent_largest_acked_.UpdateMax(packet.largest_acked);
largest_sent_packet_ = packet_number;
return info;
}
void QuicUnackedPacketMap::AddSentPacket(SerializedPacket* mutable_packet,
TransmissionType transmission_type,
QuicTime sent_time, bool set_in_flight,
bool measure_rtt,
QuicEcnCodepoint ecn_codepoint) {
const SerializedPacket& packet = *mutable_packet;
QuicPacketNumber packet_number = packet.packet_number;
QuicPacketLength bytes_sent = packet.encrypted_length;
QUIC_BUG_IF(quic_bug_12645_1, largest_sent_packet_.IsInitialized() &&
largest_sent_packet_ >= packet_number)
<< "largest_sent_packet_: " << largest_sent_packet_
<< ", packet_number: " << packet_number;
QUICHE_DCHECK_GE(packet_number, least_unacked_ + unacked_packets_.size());
while (least_unacked_ + unacked_packets_.size() < packet_number) {
unacked_packets_.push_back(QuicTransmissionInfo());
unacked_packets_.back().state = NEVER_SENT;
}
const bool has_crypto_handshake = packet.has_crypto_handshake == IS_HANDSHAKE;
QuicTransmissionInfo info(packet.encryption_level, transmission_type,
sent_time, bytes_sent, has_crypto_handshake,
packet.has_ack_frequency, ecn_codepoint);
info.largest_acked = packet.largest_acked;
largest_sent_largest_acked_.UpdateMax(packet.largest_acked);
if (!measure_rtt) {
QUIC_BUG_IF(quic_bug_12645_2, set_in_flight)
<< "Packet " << mutable_packet->packet_number << ", transmission type "
<< TransmissionTypeToString(mutable_packet->transmission_type)
<< ", retransmittable frames: "
<< QuicFramesToString(mutable_packet->retransmittable_frames)
<< ", nonretransmittable_frames: "
<< QuicFramesToString(mutable_packet->nonretransmittable_frames);
info.state = NOT_CONTRIBUTING_RTT;
}
largest_sent_packet_ = packet_number;
if (set_in_flight) {
const PacketNumberSpace packet_number_space =
GetPacketNumberSpace(info.encryption_level);
bytes_in_flight_ += bytes_sent;
bytes_in_flight_per_packet_number_space_[packet_number_space] += bytes_sent;
++packets_in_flight_;
info.in_flight = true;
largest_sent_retransmittable_packets_[packet_number_space] = packet_number;
last_inflight_packet_sent_time_ = sent_time;
last_inflight_packets_sent_time_[packet_number_space] = sent_time;
}
unacked_packets_.push_back(std::move(info));
if (has_crypto_handshake) {
last_crypto_packet_sent_time_ = sent_time;
}
mutable_packet->retransmittable_frames.swap(
unacked_packets_.back().retransmittable_frames);
}
void QuicUnackedPacketMap::RemoveObsoletePackets() {
while (!unacked_packets_.empty()) {
if (!IsPacketUseless(least_unacked_, unacked_packets_.front())) {
break;
}
DeleteFrames(&unacked_packets_.front().retransmittable_frames);
unacked_packets_.pop_front();
++least_unacked_;
}
}
bool QuicUnackedPacketMap::HasRetransmittableFrames(
QuicPacketNumber packet_number) const {
QUICHE_DCHECK_GE(packet_number, least_unacked_);
QUICHE_DCHECK_LT(packet_number, least_unacked_ + unacked_packets_.size());
return HasRetransmittableFrames(
unacked_packets_[packet_number - least_unacked_]);
}
bool QuicUnackedPacketMap::HasRetransmittableFrames(
const QuicTransmissionInfo& info) const {
if (!QuicUtils::IsAckable(info.state)) {
return false;
}
for (const auto& frame : info.retransmittable_frames) {
if (session_notifier_->IsFrameOutstanding(frame)) {
return true;
}
}
return false;
}
void QuicUnackedPacketMap::RemoveRetransmittability(
QuicTransmissionInfo* info) {
DeleteFrames(&info->retransmittable_frames);
info->first_sent_after_loss.Clear();
}
void QuicUnackedPacketMap::RemoveRetransmittability(
QuicPacketNumber packet_number) {
QUICHE_DCHECK_GE(packet_number, least_unacked_);
QUICHE_DCHECK_LT(packet_number, least_unacked_ + unacked_packets_.size());
QuicTransmissionInfo* info =
&unacked_packets_[packet_number - least_unacked_];
RemoveRetransmittability(info);
}
void QuicUnackedPacketMap::IncreaseLargestAcked(
QuicPacketNumber largest_acked) {
QUICHE_DCHECK(!largest_acked_.IsInitialized() ||
largest_acked_ <= largest_acked);
largest_acked_ = largest_acked;
}
void QuicUnackedPacketMap::MaybeUpdateLargestAckedOfPacketNumberSpace(
PacketNumberSpace packet_number_space, QuicPacketNumber packet_number) {
largest_acked_packets_[packet_number_space].UpdateMax(packet_number);
}
bool QuicUnackedPacketMap::IsPacketUsefulForMeasuringRtt(
QuicPacketNumber packet_number, const QuicTransmissionInfo& info) const {
return QuicUtils::IsAckable(info.state) &&
(!largest_acked_.IsInitialized() || packet_number > largest_acked_) &&
info.state != NOT_CONTRIBUTING_RTT;
}
bool QuicUnackedPacketMap::IsPacketUsefulForCongestionControl(
const QuicTransmissionInfo& info) const {
return info.in_flight;
}
bool QuicUnackedPacketMap::IsPacketUsefulForRetransmittableData(
const QuicTransmissionInfo& info) const {
return info.first_sent_after_loss.IsInitialized() &&
(!largest_acked_.IsInitialized() ||
info.first_sent_after_loss > largest_acked_);
}
bool QuicUnackedPacketMap::IsPacketUseless(
QuicPacketNumber packet_number, const QuicTransmissionInfo& info) const {
return !IsPacketUsefulForMeasuringRtt(packet_number, info) &&
!IsPacketUsefulForCongestionControl(info) &&
!IsPacketUsefulForRetransmittableData(info);
}
bool QuicUnackedPacketMap::IsUnacked(QuicPacketNumber packet_number) const {
if (packet_number < least_unacked_ ||
packet_number >= least_unacked_ + unacked_packets_.size()) {
return false;
}
return !IsPacketUseless(packet_number,
unacked_packets_[packet_number - least_unacked_]);
}
void QuicUnackedPacketMap::RemoveFromInFlight(QuicTransmissionInfo* info) {
if (info->in_flight) {
QUIC_BUG_IF(quic_bug_12645_3, bytes_in_flight_ < info->bytes_sent);
QUIC_BUG_IF(quic_bug_12645_4, packets_in_flight_ == 0);
bytes_in_flight_ -= info->bytes_sent;
--packets_in_flight_;
const PacketNumberSpace packet_number_space =
GetPacketNumberSpace(info->encryption_level);
if (bytes_in_flight_per_packet_number_space_[packet_number_space] <
info->bytes_sent) {
QUIC_BUG(quic_bug_10518_3)
<< "bytes_in_flight: "
<< bytes_in_flight_per_packet_number_space_[packet_number_space]
<< " is smaller than bytes_sent: " << info->bytes_sent
<< " for packet number space: "
<< PacketNumberSpaceToString(packet_number_space);
bytes_in_flight_per_packet_number_space_[packet_number_space] = 0;
} else {
bytes_in_flight_per_packet_number_space_[packet_number_space] -=
info->bytes_sent;
}
if (bytes_in_flight_per_packet_number_space_[packet_number_space] == 0) {
last_inflight_packets_sent_time_[packet_number_space] = QuicTime::Zero();
}
info->in_flight = false;
}
}
void QuicUnackedPacketMap::RemoveFromInFlight(QuicPacketNumber packet_number) {
QUICHE_DCHECK_GE(packet_number, least_unacked_);
QUICHE_DCHECK_LT(packet_number, least_unacked_ + unacked_packets_.size());
QuicTransmissionInfo* info =
&unacked_packets_[packet_number - least_unacked_];
RemoveFromInFlight(info);
}
absl::InlinedVector<QuicPacketNumber, 2>
QuicUnackedPacketMap::NeuterUnencryptedPackets() {
absl::InlinedVector<QuicPacketNumber, 2> neutered_packets;
QuicPacketNumber packet_number = GetLeastUnacked();
for (QuicUnackedPacketMap::iterator it = begin(); it != end();
++it, ++packet_number) {
if (!it->retransmittable_frames.empty() &&
it->encryption_level == ENCRYPTION_INITIAL) {
QUIC_DVLOG(2) << "Neutering unencrypted packet " << packet_number;
RemoveFromInFlight(packet_number);
it->state = NEUTERED;
neutered_packets.push_back(packet_number);
NotifyFramesAcked(*it, QuicTime::Delta::Zero(), QuicTime::Zero());
QUICHE_DCHECK(!HasRetransmittableFrames(*it));
}
}
QUICHE_DCHECK(!supports_multiple_packet_number_spaces_ ||
last_inflight_packets_sent_time_[INITIAL_DATA] ==
QuicTime::Zero());
return neutered_packets;
}
absl::InlinedVector<QuicPacketNumber, 2>
QuicUnackedPacketMap::NeuterHandshakePackets() {
absl::InlinedVector<QuicPacketNumber, 2> neutered_packets;
QuicPacketNumber packet_number = GetLeastUnacked();
for (QuicUnackedPacketMap::iterator it = begin(); it != end();
++it, ++packet_number) {
if (!it->retransmittable_frames.empty() &&
GetPacketNumberSpace(it->encryption_level) == HANDSHAKE_DATA) {
QUIC_DVLOG(2) << "Neutering handshake packet " << packet_number;
RemoveFromInFlight(packet_number);
it->state = NEUTERED;
neutered_packets.push_back(packet_number);
NotifyFramesAcked(*it, QuicTime::Delta::Zero(), QuicTime::Zero());
}
}
QUICHE_DCHECK(!supports_multiple_packet_number_spaces() ||
last_inflight_packets_sent_time_[HANDSHAKE_DATA] ==
QuicTime::Zero());
return neutered_packets;
}
bool QuicUnackedPacketMap::HasInFlightPackets() const {
return bytes_in_flight_ > 0;
}
const QuicTransmissionInfo& QuicUnackedPacketMap::GetTransmissionInfo(
QuicPacketNumber packet_number) const {
return unacked_packets_[packet_number - least_unacked_];
}
QuicTransmissionInfo* QuicUnackedPacketMap::GetMutableTransmissionInfo(
QuicPacketNumber packet_number) {
return &unacked_packets_[packet_number - least_unacked_];
}
QuicTime QuicUnackedPacketMap::GetLastInFlightPacketSentTime() const {
return last_inflight_packet_sent_time_;
}
QuicTime QuicUnackedPacketMap::GetLastCryptoPacketSentTime() const {
return last_crypto_packet_sent_time_;
}
size_t QuicUnackedPacketMap::GetNumUnackedPacketsDebugOnly() const {
size_t unacked_packet_count = 0;
QuicPacketNumber packet_number = least_unacked_;
for (auto it = begin(); it != end(); ++it, ++packet_number) {
if (!IsPacketUseless(packet_number, *it)) {
++unacked_packet_count;
}
}
return unacked_packet_count;
}
bool QuicUnackedPacketMap::HasMultipleInFlightPackets() const {
if (bytes_in_flight_ > kDefaultTCPMSS) {
return true;
}
size_t num_in_flight = 0;
for (auto it = rbegin(); it != rend(); ++it) {
if (it->in_flight) {
++num_in_flight;
}
if (num_in_flight > 1) {
return true;
}
}
return false;
}
bool QuicUnackedPacketMap::HasPendingCryptoPackets() const {
return session_notifier_->HasUnackedCryptoData();
}
bool QuicUnackedPacketMap::HasUnackedRetransmittableFrames() const {
for (auto it = rbegin(); it != rend(); ++it) {
if (it->in_flight && HasRetransmittableFrames(*it)) {
return true;
}
}
return false;
}
QuicPacketNumber QuicUnackedPacketMap::GetLeastUnacked() const {
return least_unacked_;
}
void QuicUnackedPacketMap::SetSessionNotifier(
SessionNotifierInterface* session_notifier) {
session_notifier_ = session_notifier;
}
bool QuicUnackedPacketMap::NotifyFramesAcked(const QuicTransmissionInfo& info,
QuicTime::Delta ack_delay,
QuicTime receive_timestamp) {
if (session_notifier_ == nullptr) {
return false;
}
bool new_data_acked = false;
for (const QuicFrame& frame : info.retransmittable_frames) {
if (session_notifier_->OnFrameAcked(frame, ack_delay, receive_timestamp)) {
new_data_acked = true;
}
}
return new_data_acked;
}
void QuicUnackedPacketMap::NotifyFramesLost(const QuicTransmissionInfo& info,
TransmissionType ) {
for (const QuicFrame& frame : info.retransmittable_frames) {
session_notifier_->OnFrameLost(frame);
}
}
bool QuicUnackedPacketMap::RetransmitFrames(const QuicFrames& frames,
TransmissionType type) {
return session_notifier_->RetransmitFrames(frames, type);
}
void QuicUnackedPacketMap::MaybeAggregateAckedStreamFrame(
const QuicTransmissionInfo& info, QuicTime::Delta ack_delay,
QuicTime receive_timestamp) {
if (session_notifier_ == nullptr) {
return;
}
for (const auto& frame : info.retransmittable_frames) {
const bool can_aggregate =
frame.type == STREAM_FRAME &&
frame.stream_frame.stream_id == aggregated_stream_frame_.stream_id &&
frame.stream_frame.offset == aggregated_stream_frame_.offset +
aggregated_stream_frame_.data_length &&
!WillStreamFrameLengthSumWrapAround(
aggregated_stream_frame_.data_length,
frame.stream_frame.data_length);
if (can_aggregate) {
aggregated_stream_frame_.data_length += frame.stream_frame.data_length;
aggregated_stream_frame_.fin = frame.stream_frame.fin;
if (aggregated_stream_frame_.fin) {
NotifyAggregatedStreamFrameAcked(ack_delay);
}
continue;
}
NotifyAggregatedStreamFrameAcked(ack_delay);
if (frame.type != STREAM_FRAME || frame.stream_frame.fin) {
session_notifier_->OnFrameAcked(frame, ack_delay, receive_timestamp);
continue;
}
aggregated_stream_frame_.stream_id = frame.stream_frame.stream_id;
aggregated_stream_frame_.offset = frame.stream_frame.offset;
aggregated_stream_frame_.data_length = frame.stream_frame.data_length;
aggregated_stream_frame_.fin = frame.stream_frame.fin;
}
}
void QuicUnackedPacketMap::NotifyAggregatedStreamFrameAcked(
QuicTime::Delta ack_delay) {
if (aggregated_stream_frame_.stream_id == static_cast<QuicStreamId>(-1) ||
session_notifier_ == nullptr) {
return;
}
session_notifier_->OnFrameAcked(QuicFrame(aggregated_stream_frame_),
ack_delay,
QuicTime::Zero());
aggregated_stream_frame_.stream_id = -1;
}
PacketNumberSpace QuicUnackedPacketMap::GetPacketNumberSpace(
QuicPacketNumber packet_number) const {
return GetPacketNumberSpace(
GetTransmissionInfo(packet_number).encryption_level);
}
PacketNumberSpace QuicUnackedPacketMap::GetPacketNumberSpace(
EncryptionLevel encryption_level) const {
if (supports_multiple_packet_number_spaces_) {
return QuicUtils::GetPacketNumberSpace(encryption_level);
}
if (perspective_ == Perspective::IS_CLIENT) {
return encryption_level == ENCRYPTION_INITIAL ? HANDSHAKE_DATA
: APPLICATION_DATA;
}
return encryption_level == ENCRYPTION_FORWARD_SECURE ? APPLICATION_DATA
: HANDSHAKE_DATA;
}
QuicPacketNumber QuicUnackedPacketMap::GetLargestAckedOfPacketNumberSpace(
PacketNumberSpace packet_number_space) const {
if (packet_number_space >= NUM_PACKET_NUMBER_SPACES) {
QUIC_BUG(quic_bug_10518_4)
<< "Invalid packet number space: " << packet_number_space;
return QuicPacketNumber();
}
return largest_acked_packets_[packet_number_space];
}
QuicTime QuicUnackedPacketMap::GetLastInFlightPacketSentTime(
PacketNumberSpace packet_number_space) const {
if (packet_number_space >= NUM_PACKET_NUMBER_SPACES) {
QUIC_BUG(quic_bug_10518_5)
<< "Invalid packet number space: " << packet_number_space;
return QuicTime::Zero();
}
return last_inflight_packets_sent_time_[packet_number_space];
}
QuicPacketNumber
QuicUnackedPacketMap::GetLargestSentRetransmittableOfPacketNumberSpace(
PacketNumberSpace packet_number_space) const {
if (packet_number_space >= NUM_PACKET_NUMBER_SPACES) {
QUIC_BUG(quic_bug_10518_6)
<< "Invalid packet number space: " << packet_number_space;
return QuicPacketNumber();
}
return largest_sent_retransmittable_packets_[packet_number_space];
}
const QuicTransmissionInfo*
QuicUnackedPacketMap::GetFirstInFlightTransmissionInfo() const {
QUICHE_DCHECK(HasInFlightPackets());
for (auto it = begin(); it != end(); ++it) {
if (it->in_flight) {
return &(*it);
}
}
QUICHE_DCHECK(false);
return nullptr;
}
const QuicTransmissionInfo*
QuicUnackedPacketMap::GetFirstInFlightTransmissionInfoOfSpace(
PacketNumberSpace packet_number_space) const {
for (auto it = begin(); it != end(); ++it) {
if (it->in_flight &&
GetPacketNumberSpace(it->encryption_level) == packet_number_space) {
return &(*it);
}
}
return nullptr;
}
void QuicUnackedPacketMap::EnableMultiplePacketNumberSpacesSupport() {
if (supports_multiple_packet_number_spaces_) {
QUIC_BUG(quic_bug_10518_7)
<< "Multiple packet number spaces has already been enabled";
return;
}
if (largest_sent_packet_.IsInitialized()) {
QUIC_BUG(quic_bug_10518_8)
<< "Try to enable multiple packet number spaces support after any "
"packet has been sent.";
return;
}
supports_multiple_packet_number_spaces_ = true;
}
int32_t QuicUnackedPacketMap::GetLastPacketContent() const {
if (empty()) {
return -1;
}
int32_t content = 0;
const QuicTransmissionInfo& last_packet = unacked_packets_.back();
for (const auto& frame : last_packet.retransmittable_frames) {
content |= GetFrameTypeBitfield(frame.type);
}
if (last_packet.largest_acked.IsInitialized()) {
content |= GetFrameTypeBitfield(ACK_FRAME);
}
return content;
}
} | #include "quiche/quic/core/quic_unacked_packet_map.h"
#include <cstddef>
#include <limits>
#include <vector>
#include "absl/base/macros.h"
#include "quiche/quic/core/frames/quic_stream_frame.h"
#include "quiche/quic/core/quic_packet_number.h"
#include "quiche/quic/core/quic_transmission_info.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
#include "quiche/quic/test_tools/quic_unacked_packet_map_peer.h"
using testing::_;
using testing::Return;
using testing::StrictMock;
namespace quic {
namespace test {
namespace {
const uint32_t kDefaultLength = 1000;
class QuicUnackedPacketMapTest : public QuicTestWithParam<Perspective> {
protected:
QuicUnackedPacketMapTest()
: unacked_packets_(GetParam()),
now_(QuicTime::Zero() + QuicTime::Delta::FromMilliseconds(1000)) {
unacked_packets_.SetSessionNotifier(¬ifier_);
EXPECT_CALL(notifier_, IsFrameOutstanding(_)).WillRepeatedly(Return(true));
EXPECT_CALL(notifier_, OnStreamFrameRetransmitted(_))
.Times(testing::AnyNumber());
}
~QuicUnackedPacketMapTest() override {}
SerializedPacket CreateRetransmittablePacket(uint64_t packet_number) {
return CreateRetransmittablePacketForStream(
packet_number, QuicUtils::GetFirstBidirectionalStreamId(
CurrentSupportedVersions()[0].transport_version,
Perspective::IS_CLIENT));
}
SerializedPacket CreateRetransmittablePacketForStream(
uint64_t packet_number, QuicStreamId stream_id) {
SerializedPacket packet(QuicPacketNumber(packet_number),
PACKET_1BYTE_PACKET_NUMBER, nullptr, kDefaultLength,
false, false);
QuicStreamFrame frame;
frame.stream_id = stream_id;
packet.retransmittable_frames.push_back(QuicFrame(frame));
return packet;
}
SerializedPacket CreateNonRetransmittablePacket(uint64_t packet_number) {
return SerializedPacket(QuicPacketNumber(packet_number),
PACKET_1BYTE_PACKET_NUMBER, nullptr, kDefaultLength,
false, false);
}
void VerifyInFlightPackets(uint64_t* packets, size_t num_packets) {
unacked_packets_.RemoveObsoletePackets();
if (num_packets == 0) {
EXPECT_FALSE(unacked_packets_.HasInFlightPackets());
EXPECT_FALSE(unacked_packets_.HasMultipleInFlightPackets());
return;
}
if (num_packets == 1) {
EXPECT_TRUE(unacked_packets_.HasInFlightPackets());
EXPECT_FALSE(unacked_packets_.HasMultipleInFlightPackets());
ASSERT_TRUE(unacked_packets_.IsUnacked(QuicPacketNumber(packets[0])));
EXPECT_TRUE(
unacked_packets_.GetTransmissionInfo(QuicPacketNumber(packets[0]))
.in_flight);
}
for (size_t i = 0; i < num_packets; ++i) {
ASSERT_TRUE(unacked_packets_.IsUnacked(QuicPacketNumber(packets[i])));
EXPECT_TRUE(
unacked_packets_.GetTransmissionInfo(QuicPacketNumber(packets[i]))
.in_flight);
}
size_t in_flight_count = 0;
for (auto it = unacked_packets_.begin(); it != unacked_packets_.end();
++it) {
if (it->in_flight) {
++in_flight_count;
}
}
EXPECT_EQ(num_packets, in_flight_count);
}
void VerifyUnackedPackets(uint64_t* packets, size_t num_packets) {
unacked_packets_.RemoveObsoletePackets();
if (num_packets == 0) {
EXPECT_TRUE(unacked_packets_.empty());
EXPECT_FALSE(unacked_packets_.HasUnackedRetransmittableFrames());
return;
}
EXPECT_FALSE(unacked_packets_.empty());
for (size_t i = 0; i < num_packets; ++i) {
EXPECT_TRUE(unacked_packets_.IsUnacked(QuicPacketNumber(packets[i])))
<< packets[i];
}
EXPECT_EQ(num_packets, unacked_packets_.GetNumUnackedPacketsDebugOnly());
}
void VerifyRetransmittablePackets(uint64_t* packets, size_t num_packets) {
unacked_packets_.RemoveObsoletePackets();
size_t num_retransmittable_packets = 0;
for (auto it = unacked_packets_.begin(); it != unacked_packets_.end();
++it) {
if (unacked_packets_.HasRetransmittableFrames(*it)) {
++num_retransmittable_packets;
}
}
EXPECT_EQ(num_packets, num_retransmittable_packets);
for (size_t i = 0; i < num_packets; ++i) {
EXPECT_TRUE(unacked_packets_.HasRetransmittableFrames(
QuicPacketNumber(packets[i])))
<< " packets[" << i << "]:" << packets[i];
}
}
void UpdatePacketState(uint64_t packet_number, SentPacketState state) {
unacked_packets_
.GetMutableTransmissionInfo(QuicPacketNumber(packet_number))
->state = state;
}
void RetransmitAndSendPacket(uint64_t old_packet_number,
uint64_t new_packet_number,
TransmissionType transmission_type) {
QUICHE_DCHECK(unacked_packets_.HasRetransmittableFrames(
QuicPacketNumber(old_packet_number)));
QuicTransmissionInfo* info = unacked_packets_.GetMutableTransmissionInfo(
QuicPacketNumber(old_packet_number));
QuicStreamId stream_id = QuicUtils::GetFirstBidirectionalStreamId(
CurrentSupportedVersions()[0].transport_version,
Perspective::IS_CLIENT);
for (const auto& frame : info->retransmittable_frames) {
if (frame.type == STREAM_FRAME) {
stream_id = frame.stream_frame.stream_id;
break;
}
}
UpdatePacketState(
old_packet_number,
QuicUtils::RetransmissionTypeToPacketState(transmission_type));
info->first_sent_after_loss = QuicPacketNumber(new_packet_number);
SerializedPacket packet(
CreateRetransmittablePacketForStream(new_packet_number, stream_id));
unacked_packets_.AddSentPacket(&packet, transmission_type, now_, true, true,
ECN_NOT_ECT);
}
QuicUnackedPacketMap unacked_packets_;
QuicTime now_;
StrictMock<MockSessionNotifier> notifier_;
};
INSTANTIATE_TEST_SUITE_P(Tests, QuicUnackedPacketMapTest,
::testing::ValuesIn({Perspective::IS_CLIENT,
Perspective::IS_SERVER}),
::testing::PrintToStringParamName());
TEST_P(QuicUnackedPacketMapTest, RttOnly) {
SerializedPacket packet(CreateNonRetransmittablePacket(1));
unacked_packets_.AddSentPacket(&packet, NOT_RETRANSMISSION, now_, false, true,
ECN_NOT_ECT);
uint64_t unacked[] = {1};
VerifyUnackedPackets(unacked, ABSL_ARRAYSIZE(unacked));
VerifyInFlightPackets(nullptr, 0);
VerifyRetransmittablePackets(nullptr, 0);
unacked_packets_.IncreaseLargestAcked(QuicPacketNumber(1));
VerifyUnackedPackets(nullptr, 0);
VerifyInFlightPackets(nullptr, 0);
VerifyRetransmittablePackets(nullptr, 0);
}
TEST_P(QuicUnackedPacketMapTest, RetransmittableInflightAndRtt) {
SerializedPacket packet(CreateRetransmittablePacket(1));
unacked_packets_.AddSentPacket(&packet, NOT_RETRANSMISSION, now_, true, true,
ECN_NOT_ECT);
uint64_t unacked[] = {1};
VerifyUnackedPackets(unacked, ABSL_ARRAYSIZE(unacked));
VerifyInFlightPackets(unacked, ABSL_ARRAYSIZE(unacked));
VerifyRetransmittablePackets(unacked, ABSL_ARRAYSIZE(unacked));
unacked_packets_.RemoveRetransmittability(QuicPacketNumber(1));
VerifyUnackedPackets(unacked, ABSL_ARRAYSIZE(unacked));
VerifyInFlightPackets(unacked, ABSL_ARRAYSIZE(unacked));
VerifyRetransmittablePackets(nullptr, 0);
unacked_packets_.IncreaseLargestAcked(QuicPacketNumber(1));
VerifyUnackedPackets(unacked, ABSL_ARRAYSIZE(unacked));
VerifyInFlightPackets(unacked, ABSL_ARRAYSIZE(unacked));
VerifyRetransmittablePackets(nullptr, 0);
unacked_packets_.RemoveFromInFlight(QuicPacketNumber(1));
VerifyUnackedPackets(nullptr, 0);
VerifyInFlightPackets(nullptr, 0);
VerifyRetransmittablePackets(nullptr, 0);
}
TEST_P(QuicUnackedPacketMapTest, StopRetransmission) {
const QuicStreamId stream_id = 2;
SerializedPacket packet(CreateRetransmittablePacketForStream(1, stream_id));
unacked_packets_.AddSentPacket(&packet, NOT_RETRANSMISSION, now_, true, true,
ECN_NOT_ECT);
uint64_t unacked[] = {1};
VerifyUnackedPackets(unacked, ABSL_ARRAYSIZE(unacked));
VerifyInFlightPackets(unacked, ABSL_ARRAYSIZE(unacked));
uint64_t retransmittable[] = {1};
VerifyRetransmittablePackets(retransmittable,
ABSL_ARRAYSIZE(retransmittable));
EXPECT_CALL(notifier_, IsFrameOutstanding(_)).WillRepeatedly(Return(false));
VerifyUnackedPackets(unacked, ABSL_ARRAYSIZE(unacked));
VerifyInFlightPackets(unacked, ABSL_ARRAYSIZE(unacked));
VerifyRetransmittablePackets(nullptr, 0);
}
TEST_P(QuicUnackedPacketMapTest, StopRetransmissionOnOtherStream) {
const QuicStreamId stream_id = 2;
SerializedPacket packet(CreateRetransmittablePacketForStream(1, stream_id));
unacked_packets_.AddSentPacket(&packet, NOT_RETRANSMISSION, now_, true, true,
ECN_NOT_ECT);
uint64_t unacked[] = {1};
VerifyUnackedPackets(unacked, ABSL_ARRAYSIZE(unacked));
VerifyInFlightPackets(unacked, ABSL_ARRAYSIZE(unacked));
uint64_t retransmittable[] = {1};
VerifyRetransmittablePackets(retransmittable,
ABSL_ARRAYSIZE(retransmittable));
VerifyUnackedPackets(unacked, ABSL_ARRAYSIZE(unacked));
VerifyInFlightPackets(unacked, ABSL_ARRAYSIZE(unacked));
VerifyRetransmittablePackets(retransmittable,
ABSL_ARRAYSIZE(retransmittable));
}
TEST_P(QuicUnackedPacketMapTest, StopRetransmissionAfterRetransmission) {
const QuicStreamId stream_id = 2;
SerializedPacket packet1(CreateRetransmittablePacketForStream(1, stream_id));
unacked_packets_.AddSentPacket(&packet1, NOT_RETRANSMISSION, now_, true, true,
ECN_NOT_ECT);
RetransmitAndSendPacket(1, 2, LOSS_RETRANSMISSION);
uint64_t unacked[] = {1, 2};
VerifyUnackedPackets(unacked, ABSL_ARRAYSIZE(unacked));
VerifyInFlightPackets(unacked, ABSL_ARRAYSIZE(unacked));
std::vector<uint64_t> retransmittable = {1, 2};
VerifyRetransmittablePackets(&retransmittable[0], retransmittable.size());
EXPECT_CALL(notifier_, IsFrameOutstanding(_)).WillRepeatedly(Return(false));
VerifyUnackedPackets(unacked, ABSL_ARRAYSIZE(unacked));
VerifyInFlightPackets(unacked, ABSL_ARRAYSIZE(unacked));
VerifyRetransmittablePackets(nullptr, 0);
}
TEST_P(QuicUnackedPacketMapTest, RetransmittedPacket) {
SerializedPacket packet1(CreateRetransmittablePacket(1));
unacked_packets_.AddSentPacket(&packet1, NOT_RETRANSMISSION, now_, true, true,
ECN_NOT_ECT);
RetransmitAndSendPacket(1, 2, LOSS_RETRANSMISSION);
uint64_t unacked[] = {1, 2};
VerifyUnackedPackets(unacked, ABSL_ARRAYSIZE(unacked));
VerifyInFlightPackets(unacked, ABSL_ARRAYSIZE(unacked));
std::vector<uint64_t> retransmittable = {1, 2};
VerifyRetransmittablePackets(&retransmittable[0], retransmittable.size());
EXPECT_CALL(notifier_, IsFrameOutstanding(_)).WillRepeatedly(Return(false));
unacked_packets_.RemoveRetransmittability(QuicPacketNumber(1));
VerifyUnackedPackets(unacked, ABSL_ARRAYSIZE(unacked));
VerifyInFlightPackets(unacked, ABSL_ARRAYSIZE(unacked));
VerifyRetransmittablePackets(nullptr, 0);
unacked_packets_.IncreaseLargestAcked(QuicPacketNumber(2));
VerifyUnackedPackets(unacked, ABSL_ARRAYSIZE(unacked));
VerifyInFlightPackets(unacked, ABSL_ARRAYSIZE(unacked));
VerifyRetransmittablePackets(nullptr, 0);
unacked_packets_.RemoveFromInFlight(QuicPacketNumber(2));
uint64_t unacked2[] = {1};
VerifyUnackedPackets(unacked2, ABSL_ARRAYSIZE(unacked2));
VerifyInFlightPackets(unacked2, ABSL_ARRAYSIZE(unacked2));
VerifyRetransmittablePackets(nullptr, 0);
unacked_packets_.RemoveFromInFlight(QuicPacketNumber(1));
VerifyUnackedPackets(nullptr, 0);
VerifyInFlightPackets(nullptr, 0);
VerifyRetransmittablePackets(nullptr, 0);
}
TEST_P(QuicUnackedPacketMapTest, RetransmitThreeTimes) {
SerializedPacket packet1(CreateRetransmittablePacket(1));
unacked_packets_.AddSentPacket(&packet1, NOT_RETRANSMISSION, now_, true, true,
ECN_NOT_ECT);
SerializedPacket packet2(CreateRetransmittablePacket(2));
unacked_packets_.AddSentPacket(&packet2, NOT_RETRANSMISSION, now_, true, true,
ECN_NOT_ECT);
uint64_t unacked[] = {1, 2};
VerifyUnackedPackets(unacked, ABSL_ARRAYSIZE(unacked));
VerifyInFlightPackets(unacked, ABSL_ARRAYSIZE(unacked));
uint64_t retransmittable[] = {1, 2};
VerifyRetransmittablePackets(retransmittable,
ABSL_ARRAYSIZE(retransmittable));
unacked_packets_.IncreaseLargestAcked(QuicPacketNumber(2));
unacked_packets_.RemoveFromInFlight(QuicPacketNumber(2));
unacked_packets_.RemoveRetransmittability(QuicPacketNumber(2));
unacked_packets_.RemoveFromInFlight(QuicPacketNumber(1));
RetransmitAndSendPacket(1, 3, LOSS_RETRANSMISSION);
SerializedPacket packet4(CreateRetransmittablePacket(4));
unacked_packets_.AddSentPacket(&packet4, NOT_RETRANSMISSION, now_, true, true,
ECN_NOT_ECT);
uint64_t unacked2[] = {1, 3, 4};
VerifyUnackedPackets(unacked2, ABSL_ARRAYSIZE(unacked2));
uint64_t pending2[] = {3, 4};
VerifyInFlightPackets(pending2, ABSL_ARRAYSIZE(pending2));
std::vector<uint64_t> retransmittable2 = {1, 3, 4};
VerifyRetransmittablePackets(&retransmittable2[0], retransmittable2.size());
unacked_packets_.IncreaseLargestAcked(QuicPacketNumber(4));
unacked_packets_.RemoveFromInFlight(QuicPacketNumber(4));
unacked_packets_.RemoveRetransmittability(QuicPacketNumber(4));
RetransmitAndSendPacket(3, 5, LOSS_RETRANSMISSION);
SerializedPacket packet6(CreateRetransmittablePacket(6));
unacked_packets_.AddSentPacket(&packet6, NOT_RETRANSMISSION, now_, true, true,
ECN_NOT_ECT);
std::vector<uint64_t> unacked3 = {3, 5, 6};
std::vector<uint64_t> retransmittable3 = {3, 5, 6};
VerifyUnackedPackets(&unacked3[0], unacked3.size());
VerifyRetransmittablePackets(&retransmittable3[0], retransmittable3.size());
uint64_t pending3[] = {3, 5, 6};
VerifyInFlightPackets(pending3, ABSL_ARRAYSIZE(pending3));
unacked_packets_.IncreaseLargestAcked(QuicPacketNumber(6));
unacked_packets_.RemoveFromInFlight(QuicPacketNumber(6));
unacked_packets_.RemoveRetransmittability(QuicPacketNumber(6));
RetransmitAndSendPacket(5, 7, LOSS_RETRANSMISSION);
std::vector<uint64_t> unacked4 = {3, 5, 7};
std::vector<uint64_t> retransmittable4 = {3, 5, 7};
VerifyUnackedPackets(&unacked4[0], unacked4.size());
VerifyRetransmittablePackets(&retransmittable4[0], retransmittable4.size());
uint64_t pending4[] = {3, 5, 7};
VerifyInFlightPackets(pending4, ABSL_ARRAYSIZE(pending4));
unacked_packets_.RemoveFromInFlight(QuicPacketNumber(3));
unacked_packets_.RemoveFromInFlight(QuicPacketNumber(5));
uint64_t pending5[] = {7};
VerifyInFlightPackets(pending5, ABSL_ARRAYSIZE(pending5));
}
TEST_P(QuicUnackedPacketMapTest, RetransmitFourTimes) {
SerializedPacket packet1(CreateRetransmittablePacket(1));
unacked_packets_.AddSentPacket(&packet1, NOT_RETRANSMISSION, now_, true, true,
ECN_NOT_ECT);
SerializedPacket packet2(CreateRetransmittablePacket(2));
unacked_packets_.AddSentPacket(&packet2, NOT_RETRANSMISSION, now_, true, true,
ECN_NOT_ECT);
uint64_t unacked[] = {1, 2};
VerifyUnackedPackets(unacked, ABSL_ARRAYSIZE(unacked));
VerifyInFlightPackets(unacked, ABSL_ARRAYSIZE(unacked));
uint64_t retransmittable[] = {1, 2};
VerifyRetransmittablePackets(retransmittable,
ABSL_ARRAYSIZE(retransmittable));
unacked_packets_.IncreaseLargestAcked(QuicPacketNumber(2));
unacked_packets_.RemoveFromInFlight(QuicPacketNumber(2));
unacked_packets_.RemoveRetransmittability(QuicPacketNumber(2));
unacked_packets_.RemoveFromInFlight(QuicPacketNumber(1));
RetransmitAndSendPacket(1, 3, LOSS_RETRANSMISSION);
uint64_t unacked2[] = {1, 3};
VerifyUnackedPackets(unacked2, ABSL_ARRAYSIZE(unacked2));
uint64_t pending2[] = {3};
VerifyInFlightPackets(pending2, ABSL_ARRAYSIZE(pending2));
std::vector<uint64_t> retransmittable2 = {1, 3};
VerifyRetransmittablePackets(&retransmittable2[0], retransmittable2.size());
RetransmitAndSendPacket(3, 4, PTO_RETRANSMISSION);
SerializedPacket packet5(CreateRetransmittablePacket(5));
unacked_packets_.AddSentPacket(&packet5, NOT_RETRANSMISSION, now_, true, true,
ECN_NOT_ECT);
uint64_t unacked3[] = {1, 3, 4, 5};
VerifyUnackedPackets(unacked3, ABSL_ARRAYSIZE(unacked3));
uint64_t pending3[] = {3, 4, 5};
VerifyInFlightPackets(pending3, ABSL_ARRAYSIZE(pending3));
std::vector<uint64_t> retransmittable3 = {1, 3, 4, 5};
VerifyRetransmittablePackets(&retransmittable3[0], retransmittable3.size());
unacked_packets_.IncreaseLargestAcked(QuicPacketNumber(5));
unacked_packets_.RemoveFromInFlight(QuicPacketNumber(5));
unacked_packets_.RemoveRetransmittability(QuicPacketNumber(5));
unacked_packets_.RemoveFromInFlight(QuicPacketNumber(3));
unacked_packets_.RemoveFromInFlight(QuicPacketNumber(4));
RetransmitAndSendPacket(4, 6, LOSS_RETRANSMISSION);
std::vector<uint64_t> unacked4 = {4, 6};
VerifyUnackedPackets(&unacked4[0], unacked4.size());
uint64_t pending4[] = {6};
VerifyInFlightPackets(pending4, ABSL_ARRAYSIZE(pending4));
std::vector<uint64_t> retransmittable4 = {4, 6};
VerifyRetransmittablePackets(&retransmittable4[0], retransmittable4.size());
}
TEST_P(QuicUnackedPacketMapTest, SendWithGap) {
SerializedPacket packet1(CreateRetransmittablePacket(1));
unacked_packets_.AddSentPacket(&packet1, NOT_RETRANSMISSION, now_, true, true,
ECN_NOT_ECT);
SerializedPacket packet3(CreateRetransmittablePacket(3));
unacked_packets_.AddSentPacket(&packet3, NOT_RETRANSMISSION, now_, true, true,
ECN_NOT_ECT);
RetransmitAndSendPacket(3, 5, LOSS_RETRANSMISSION);
EXPECT_EQ(QuicPacketNumber(1u), unacked_packets_.GetLeastUnacked());
EXPECT_TRUE(unacked_packets_.IsUnacked(QuicPacketNumber(1)));
EXPECT_FALSE(unacked_packets_.IsUnacked(QuicPacketNumber(2)));
EXPECT_TRUE(unacked_packets_.IsUnacked(QuicPacketNumber(3)));
EXPECT_FALSE(unacked_packets_.IsUnacked(QuicPacketNumber(4)));
EXPECT_TRUE(unacked_packets_.IsUnacked(QuicPacketNumber(5)));
EXPECT_EQ(QuicPacketNumber(5u), unacked_packets_.largest_sent_packet());
}
TEST_P(QuicUnackedPacketMapTest, AggregateContiguousAckedStreamFrames) {
testing::InSequence s;
EXPECT_CALL(notifier_, OnFrameAcked(_, _, _)).Times(0);
unacked_packets_.NotifyAggregatedStreamFrameAcked(QuicTime::Delta::Zero());
QuicTransmissionInfo info1;
QuicStreamFrame stream_frame1(3, false, 0, 100);
info1.retransmittable_frames.push_back(QuicFrame(stream_frame1));
QuicTransmissionInfo info2;
QuicStreamFrame stream_frame2(3, false, 100, 100);
info2.retransmittable_frames.push_back(QuicFrame(stream_frame2));
QuicTransmissionInfo info3;
QuicStreamFrame stream_frame3(3, false, 200, 100);
info3.retransmittable_frames.push_back(QuicFrame(stream_frame3));
QuicTransmissionInfo info4;
QuicStreamFrame stream_frame4(3, true, 300, 0);
info4.retransmittable_frames.push_back(QuicFrame(stream_frame4));
EXPECT_CALL(notifier_, OnFrameAcked(_, _, _)).Times(0);
unacked_packets_.MaybeAggregateAckedStreamFrame(
info1, QuicTime::Delta::Zero(), QuicTime::Zero());
EXPECT_CALL(notifier_, OnFrameAcked(_, _, _)).Times(0);
unacked_packets_.MaybeAggregateAckedStreamFrame(
info2, QuicTime::Delta::Zero(), QuicTime::Zero());
EXPECT_CALL(notifier_, OnFrameAcked(_, _, _)).Times(0);
unacked_packets_.MaybeAggregateAckedStreamFrame(
info3, QuicTime::Delta::Zero(), QuicTime::Zero());
EXPECT_CALL(notifier_, OnFrameAcked(_, _, _)).Times(1);
unacked_packets_.MaybeAggregateAckedStreamFrame(
info4, QuicTime::Delta::Zero(), QuicTime::Zero());
}
TEST_P(QuicUnackedPacketMapTest, CannotAggregateIfDataLengthOverflow) {
QuicByteCount kMaxAggregatedDataLength =
std::numeric_limits<decltype(QuicStreamFrame().data_length)>::max();
QuicStreamId stream_id = 2;
for (const QuicPacketLength acked_stream_length : {512, 1300}) {
++stream_id;
QuicStreamOffset offset = 0;
QuicByteCount aggregated_data_length = 0;
while (offset < 1e6) {
QuicTransmissionInfo info;
QuicStreamFrame stream_frame(stream_id, false, offset,
acked_stream_length);
info.retransmittable_frames.push_back(QuicFrame(stream_frame));
const QuicStreamFrame& aggregated_stream_frame =
QuicUnackedPacketMapPeer::GetAggregatedStreamFrame(unacked_packets_);
if (aggregated_stream_frame.data_length + acked_stream_length <=
kMaxAggregatedDataLength) {
EXPECT_CALL(notifier_, OnFrameAcked(_, _, _)).Times(0);
unacked_packets_.MaybeAggregateAckedStreamFrame(
info, QuicTime::Delta::Zero(), QuicTime::Zero());
aggregated_data_length += acked_stream_length;
testing::Mock::VerifyAndClearExpectations(¬ifier_);
} else {
EXPECT_CALL(notifier_, OnFrameAcked(_, _, _)).Times(1);
unacked_packets_.MaybeAggregateAckedStreamFrame(
info, QuicTime::Delta::Zero(), QuicTime::Zero());
aggregated_data_length = acked_stream_length;
testing::Mock::VerifyAndClearExpectations(¬ifier_);
}
EXPECT_EQ(aggregated_data_length, aggregated_stream_frame.data_length);
offset += acked_stream_length;
}
QuicTransmissionInfo info;
QuicStreamFrame stream_frame(stream_id, true, offset, acked_stream_length);
info.retransmittable_frames.push_back(QuicFrame(stream_frame));
EXPECT_CALL(notifier_, OnFrameAcked(_, _, _)).Times(1);
unacked_packets_.MaybeAggregateAckedStreamFrame(
info, QuicTime::Delta::Zero(), QuicTime::Zero());
testing::Mock::VerifyAndClearExpectations(¬ifier_);
}
}
TEST_P(QuicUnackedPacketMapTest, CannotAggregateAckedControlFrames) {
testing::InSequence s;
QuicWindowUpdateFrame window_update(1, 5, 100);
QuicStreamFrame stream_frame1(3, false, 0, 100);
QuicStreamFrame stream_frame2(3, false, 100, 100);
QuicBlockedFrame blocked(2, 5, 0);
QuicGoAwayFrame go_away(3, QUIC_PEER_GOING_AWAY, 5, "Going away.");
QuicTransmissionInfo info1;
info1.retransmittable_frames.push_back(QuicFrame(window_update));
info1.retransmittable_frames.push_back(QuicFrame(stream_frame1));
info1.retransmittable_frames.push_back(QuicFrame(stream_frame2));
QuicTransmissionInfo info2;
info2.retransmittable_frames.push_back(QuicFrame(blocked));
info2.retransmittable_frames.push_back(QuicFrame(&go_away));
EXPECT_CALL(notifier_, OnFrameAcked(_, _, _)).Times(1);
unacked_packets_.MaybeAggregateAckedStreamFrame(
info1, QuicTime::Delta::Zero(), QuicTime::Zero());
EXPECT_CALL(notifier_, OnFrameAcked(_, _, _)).Times(3);
unacked_packets_.MaybeAggregateAckedStreamFrame(
info2, QuicTime::Delta::Zero(), QuicTime::Zero());
EXPECT_CALL(notifier_, OnFrameAcked(_, _, _)).Times(0);
unacked_packets_.NotifyAggregatedStreamFrameAcked(QuicTime::Delta::Zero());
}
TEST_P(QuicUnackedPacketMapTest, LargestSentPacketMultiplePacketNumberSpaces) {
unacked_packets_.EnableMultiplePacketNumberSpacesSupport();
EXPECT_FALSE(
unacked_packets_
.GetLargestSentRetransmittableOfPacketNumberSpace(INITIAL_DATA)
.IsInitialized());
SerializedPacket packet1(CreateRetransmittablePacket(1));
packet1.encryption_level = ENCRYPTION_INITIAL;
unacked_packets_.AddSentPacket(&packet1, NOT_RETRANSMISSION, now_, true, true,
ECN_NOT_ECT);
EXPECT_EQ(QuicPacketNumber(1u), unacked_packets_.largest_sent_packet());
EXPECT_EQ(QuicPacketNumber(1),
unacked_packets_.GetLargestSentRetransmittableOfPacketNumberSpace(
INITIAL_DATA));
EXPECT_FALSE(
unacked_packets_
.GetLargestSentRetransmittableOfPacketNumberSpace(HANDSHAKE_DATA)
.IsInitialized());
SerializedPacket packet2(CreateRetransmittablePacket(2));
packet2.encryption_level = ENCRYPTION_HANDSHAKE;
unacked_packets_.AddSentPacket(&packet2, NOT_RETRANSMISSION, now_, true, true,
ECN_NOT_ECT);
EXPECT_EQ(QuicPacketNumber(2u), unacked_packets_.largest_sent_packet());
EXPECT_EQ(QuicPacketNumber(1),
unacked_packets_.GetLargestSentRetransmittableOfPacketNumberSpace(
INITIAL_DATA));
EXPECT_EQ(QuicPacketNumber(2),
unacked_packets_.GetLargestSentRetransmittableOfPacketNumberSpace(
HANDSHAKE_DATA));
EXPECT_FALSE(
unacked_packets_
.GetLargestSentRetransmittableOfPacketNumberSpace(APPLICATION_DATA)
.IsInitialized());
SerializedPacket packet3(CreateRetransmittablePacket(3));
packet3.encryption_level = ENCRYPTION_ZERO_RTT;
unacked_packets_.AddSentPacket(&packet3, NOT_RETRANSMISSION, now_, true, true,
ECN_NOT_ECT);
EXPECT_EQ(QuicPacketNumber(3u), unacked_packets_.largest_sent_packet());
EXPECT_EQ(QuicPacketNumber(1),
unacked_packets_.GetLargestSentRetransmittableOfPacketNumberSpace(
INITIAL_DATA));
EXPECT_EQ(QuicPacketNumber(2),
unacked_packets_.GetLargestSentRetransmittableOfPacketNumberSpace(
HANDSHAKE_DATA));
EXPECT_EQ(QuicPacketNumber(3),
unacked_packets_.GetLargestSentRetransmittableOfPacketNumberSpace(
APPLICATION_DATA));
EXPECT_EQ(QuicPacketNumber(3),
unacked_packets_.GetLargestSentRetransmittableOfPacketNumberSpace(
APPLICATION_DATA));
SerializedPacket packet4(CreateRetransmittablePacket(4));
packet4.encryption_level = ENCRYPTION_FORWARD_SECURE;
unacked_packets_.AddSentPacket(&packet4, NOT_RETRANSMISSION, now_, true, true,
ECN_NOT_ECT);
EXPECT_EQ(QuicPacketNumber(4u), unacked_packets_.largest_sent_packet());
EXPECT_EQ(QuicPacketNumber(1),
unacked_packets_.GetLargestSentRetransmittableOfPacketNumberSpace(
INITIAL_DATA));
EXPECT_EQ(QuicPacketNumber(2),
unacked_packets_.GetLargestSentRetransmittableOfPacketNumberSpace(
HANDSHAKE_DATA));
EXPECT_EQ(QuicPacketNumber(4),
unacked_packets_.GetLargestSentRetransmittableOfPacketNumberSpace(
APPLICATION_DATA));
EXPECT_EQ(QuicPacketNumber(4),
unacked_packets_.GetLargestSentRetransmittableOfPacketNumberSpace(
APPLICATION_DATA));
EXPECT_TRUE(unacked_packets_.GetLastPacketContent() & (1 << STREAM_FRAME));
EXPECT_FALSE(unacked_packets_.GetLastPacketContent() & (1 << ACK_FRAME));
}
TEST_P(QuicUnackedPacketMapTest, ReserveInitialCapacityTest) {
QuicUnackedPacketMap unacked_packets(GetParam());
ASSERT_EQ(QuicUnackedPacketMapPeer::GetCapacity(unacked_packets), 0u);
unacked_packets.ReserveInitialCapacity(16);
QuicStreamId stream_id(1);
SerializedPacket packet(CreateRetransmittablePacketForStream(1, stream_id));
unacked_packets.AddSentPacket(&packet, TransmissionType::NOT_RETRANSMISSION,
now_, true, true, ECN_NOT_ECT);
ASSERT_EQ(QuicUnackedPacketMapPeer::GetCapacity(unacked_packets), 16u);
}
TEST_P(QuicUnackedPacketMapTest, DebugString) {
EXPECT_EQ(unacked_packets_.DebugString(),
"{size: 0, least_unacked: 1, largest_sent_packet: uninitialized, "
"largest_acked: uninitialized, bytes_in_flight: 0, "
"packets_in_flight: 0}");
SerializedPacket packet1(CreateRetransmittablePacket(1));
unacked_packets_.AddSentPacket(&packet1, NOT_RETRANSMISSION, now_, true, true,
ECN_NOT_ECT);
EXPECT_EQ(
unacked_packets_.DebugString(),
"{size: 1, least_unacked: 1, largest_sent_packet: 1, largest_acked: "
"uninitialized, bytes_in_flight: 1000, packets_in_flight: 1}");
SerializedPacket packet2(CreateRetransmittablePacket(2));
unacked_packets_.AddSentPacket(&packet2, NOT_RETRANSMISSION, now_, true, true,
ECN_NOT_ECT);
unacked_packets_.RemoveFromInFlight(QuicPacketNumber(1));
unacked_packets_.IncreaseLargestAcked(QuicPacketNumber(1));
unacked_packets_.RemoveObsoletePackets();
EXPECT_EQ(
unacked_packets_.DebugString(),
"{size: 1, least_unacked: 2, largest_sent_packet: 2, largest_acked: 1, "
"bytes_in_flight: 1000, packets_in_flight: 1}");
}
TEST_P(QuicUnackedPacketMapTest, EcnInfoStored) {
SerializedPacket packet1(CreateRetransmittablePacket(1));
unacked_packets_.AddSentPacket(&packet1, NOT_RETRANSMISSION, now_, true, true,
ECN_NOT_ECT);
SerializedPacket packet2(CreateRetransmittablePacket(2));
unacked_packets_.AddSentPacket(&packet2, NOT_RETRANSMISSION, now_, true, true,
ECN_ECT0);
SerializedPacket packet3(CreateRetransmittablePacket(3));
unacked_packets_.AddSentPacket(&packet3, NOT_RETRANSMISSION, now_, true, true,
ECN_ECT1);
EXPECT_EQ(
unacked_packets_.GetTransmissionInfo(QuicPacketNumber(1)).ecn_codepoint,
ECN_NOT_ECT);
EXPECT_EQ(
unacked_packets_.GetTransmissionInfo(QuicPacketNumber(2)).ecn_codepoint,
ECN_ECT0);
EXPECT_EQ(
unacked_packets_.GetTransmissionInfo(QuicPacketNumber(3)).ecn_codepoint,
ECN_ECT1);
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_unacked_packet_map.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_unacked_packet_map_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
0a1cf107-125e-40dd-85f7-591c64563636 | cpp | tensorflow/tensorflow | python_op_gen_annotator | tensorflow/python/framework/python_op_gen_annotator.cc | tensorflow/python/framework/python_op_gen_annotator_test.cc | #include "tensorflow/python/framework/python_op_gen_annotator.h"
#include <cstdint>
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "absl/strings/escaping.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "tensorflow/python/framework/kythe_metadata.pb.h"
#include "tensorflow/python/framework/op_reg_offset.pb.h"
namespace tensorflow {
namespace python_op_gen_internal {
void GeneratedCodeAnnotator::AddAnnotation(const OpDef& op_def,
absl::string_view function_name,
uint32_t offset_start) {
const uint32_t start_byte = base_pos_ + offset_start;
const uint32_t end_byte = start_byte + function_name.size();
byte_offsets_map_[op_def.name()].generated_start = start_byte;
byte_offsets_map_[op_def.name()].generated_end = end_byte;
}
void GeneratedCodeAnnotator::FillSourceOffsets(
const OpRegOffsets& op_reg_offsets) {
for (const OpRegOffset& offset : op_reg_offsets.offsets()) {
if (byte_offsets_map_.find(offset.name()) != byte_offsets_map_.end()) {
byte_offsets_map_[offset.name()].file_path = offset.filepath();
byte_offsets_map_[offset.name()].source_start = offset.start();
byte_offsets_map_[offset.name()].source_end = offset.end();
}
}
}
string GeneratedCodeAnnotator::BuildKytheMetadata() {
GeneratedCodeInfo generated_code_info;
generated_code_info.set_type(GeneratedCodeInfo::KYTHE0);
for (const auto& [name, offsets] : byte_offsets_map_) {
if (offsets.file_path.empty()) {
continue;
}
MappingRule* meta = generated_code_info.add_meta();
meta->set_type(MappingRule::ANCHOR_ANCHOR);
meta->set_edge("/kythe/edge/imputes");
meta->set_source_begin(offsets.source_start);
meta->set_source_end(offsets.source_end);
meta->set_target_begin(offsets.generated_start);
meta->set_target_end(offsets.generated_end);
VName* vname = meta->mutable_source_vname();
vname->set_signature(absl::StrFormat(
"@%d:%d@tensorflow_op#%s#%s#%s", offsets.source_start,
offsets.source_end, name, kKytheCorpus, offsets.file_path));
vname->set_corpus(std::string(kKytheCorpus));
vname->set_path(offsets.file_path);
vname->set_language("c++");
}
return "# kythe.proto.metadata.GeneratedCodeInfo:" +
absl::Base64Escape(generated_code_info.SerializeAsString());
}
}
} | #include "tensorflow/python/framework/python_op_gen_annotator.h"
#include <utility>
#include "absl/strings/escaping.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_split.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/python/framework/kythe_metadata.pb.h"
namespace tensorflow {
namespace python_op_gen_internal {
namespace {
using ::testing::StartsWith;
GeneratedCodeInfo ParseMetadata(string metadata) {
GeneratedCodeInfo generated_code_info;
std::pair<string, string> p = absl::StrSplit(metadata, ':');
string serialized_generated_code_info;
absl::Base64Unescape(p.second, &serialized_generated_code_info);
generated_code_info.ParseFromString(serialized_generated_code_info);
return generated_code_info;
}
TEST(PythonOpGenAnnotatorTest, AddAnnotationWithoutSourceOffsets) {
GeneratedCodeAnnotator annotator;
OpDef fakeOpDef;
fakeOpDef.set_name("fake_op");
annotator.AddAnnotation(fakeOpDef, "fake_op", 0);
string meta = annotator.BuildKytheMetadata();
ASSERT_THAT(meta, StartsWith("# kythe.proto.metadata.GeneratedCodeInfo:"));
GeneratedCodeInfo actual = ParseMetadata(meta);
GeneratedCodeInfo expected;
ASSERT_TRUE(protobuf::TextFormat::ParseFromString("type: KYTHE0", &expected));
EXPECT_EQ(actual.SerializeAsString(), expected.SerializeAsString());
}
TEST(PythonOpGenAnnotatorTest, AddAnnotationWithSourceOffsets) {
GeneratedCodeAnnotator annotator;
OpDef fakeOpDef;
fakeOpDef.set_name("fake_op");
OpRegOffsets fakeOffsets;
ASSERT_TRUE(protobuf::TextFormat::ParseFromString(
R"pb(
offsets {
name: "fake_op",
filepath: "file/path/to/fake_op.cc",
start: 7,
end: 11,
}
)pb",
&fakeOffsets));
annotator.AddAnnotation(fakeOpDef, "fake_op", 100);
annotator.FillSourceOffsets(fakeOffsets);
string meta = annotator.BuildKytheMetadata();
ASSERT_THAT(meta, StartsWith("# kythe.proto.metadata.GeneratedCodeInfo:"));
GeneratedCodeInfo actual = ParseMetadata(meta);
EXPECT_EQ(actual.meta(0).type(), MappingRule::ANCHOR_ANCHOR);
EXPECT_EQ(actual.meta(0).edge(), "/kythe/edge/imputes");
EXPECT_EQ(
actual.meta(0).source_vname().signature(),
absl::StrFormat("@7:11@tensorflow_op#fake_op#%s#file/path/to/fake_op.cc",
kKytheCorpus));
EXPECT_EQ(actual.meta(0).source_vname().path(), "file/path/to/fake_op.cc");
EXPECT_EQ(actual.meta(0).source_begin(), 7);
EXPECT_EQ(actual.meta(0).source_end(), 11);
EXPECT_EQ(actual.meta(0).target_begin(), 100);
EXPECT_EQ(actual.meta(0).target_end(), 107);
}
TEST(PythonOpGenAnnotatorTest, AddAnnotationWithSourceOffsetsAndNonZeroBase) {
GeneratedCodeAnnotator annotator;
OpDef fakeOpDef;
fakeOpDef.set_name("fake_op");
OpRegOffsets fakeOffsets;
ASSERT_TRUE(protobuf::TextFormat::ParseFromString(
R"pb(
offsets {
name: "fake_op",
filepath: "file/path/to/fake_op.cc",
start: 7,
end: 11,
}
)pb",
&fakeOffsets));
annotator.SetBase(10);
annotator.AddAnnotation(fakeOpDef, "fake_op", 100);
annotator.FillSourceOffsets(fakeOffsets);
string meta = annotator.BuildKytheMetadata();
ASSERT_THAT(meta, StartsWith("# kythe.proto.metadata.GeneratedCodeInfo:"));
GeneratedCodeInfo actual = ParseMetadata(meta);
EXPECT_EQ(actual.meta(0).type(), MappingRule::ANCHOR_ANCHOR);
EXPECT_EQ(actual.meta(0).edge(), "/kythe/edge/imputes");
EXPECT_EQ(
actual.meta(0).source_vname().signature(),
absl::StrFormat("@7:11@tensorflow_op#fake_op#%s#file/path/to/fake_op.cc",
kKytheCorpus));
EXPECT_EQ(actual.meta(0).source_vname().path(), "file/path/to/fake_op.cc");
EXPECT_EQ(actual.meta(0).source_begin(), 7);
EXPECT_EQ(actual.meta(0).source_end(), 11);
EXPECT_EQ(actual.meta(0).target_begin(), 110);
EXPECT_EQ(actual.meta(0).target_end(), 117);
}
TEST(PythonOpGenAnnotatorTest, AddMultipleAnnotation) {
GeneratedCodeAnnotator annotator;
OpDef fakeOpDef;
OpRegOffsets fakeOffsets;
ASSERT_TRUE(protobuf::TextFormat::ParseFromString(
R"pb(
offsets {
name: "fake_op_1",
filepath: "file/path/to/fake_op.cc",
start: 7,
end: 11,
}
offsets {
name: "fake_op_2",
filepath: "file/path/to/fake_op.cc",
start: 101,
end: 103,
}
)pb",
&fakeOffsets));
fakeOpDef.set_name("fake_op_1");
annotator.AddAnnotation(fakeOpDef, "fake_op_1", 10);
fakeOpDef.set_name("fake_op_2");
annotator.AddAnnotation(fakeOpDef, "fake_op_2", 100);
annotator.FillSourceOffsets(fakeOffsets);
string meta = annotator.BuildKytheMetadata();
ASSERT_THAT(meta, StartsWith("# kythe.proto.metadata.GeneratedCodeInfo:"));
GeneratedCodeInfo actual = ParseMetadata(meta);
EXPECT_EQ(actual.meta_size(), 2);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/python/framework/python_op_gen_annotator.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/python/framework/python_op_gen_annotator_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
70f4d2fa-0102-416a-8f90-3068250b5d23 | cpp | google/tensorstore | nditerable_transformed_array | tensorstore/internal/nditerable_transformed_array.cc | tensorstore/internal/nditerable_transformed_array_test.cc | #include "tensorstore/internal/nditerable_transformed_array.h"
#include <cassert>
#include <cstddef>
#include <memory>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/internal/iterate_impl.h"
#include "tensorstore/index_space/internal/transform_rep.h"
#include "tensorstore/index_space/transformed_array.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/internal/nditerable.h"
#include "tensorstore/internal/nditerable_array.h"
#include "tensorstore/internal/nditerable_array_util.h"
#include "tensorstore/internal/nditerable_util.h"
#include "tensorstore/internal/unique_with_intrusive_allocator.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/byte_strided_pointer.h"
#include "tensorstore/util/element_pointer.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace internal {
namespace input_dim_iter_flags =
internal_index_space::input_dimension_iteration_flags;
namespace {
class IterableImpl : public NDIterable::Base<IterableImpl> {
public:
IterableImpl(IndexTransform<> transform, allocator_type allocator)
: transform_(std::move(transform)),
input_dimension_flags_(transform_.input_rank(),
input_dim_iter_flags::can_skip, allocator) {}
allocator_type get_allocator() const override {
return input_dimension_flags_.get_allocator();
}
int GetDimensionOrder(DimensionIndex dim_i,
DimensionIndex dim_j) const override {
auto flags_i = input_dimension_flags_[dim_i];
if ((flags_i & input_dim_iter_flags::array_indexed) !=
(input_dimension_flags_[dim_j] & input_dim_iter_flags::array_indexed)) {
return (flags_i & input_dim_iter_flags::array_indexed) ? -2 : 2;
}
if (flags_i & input_dim_iter_flags::array_indexed) {
for (DimensionIndex i = 0; i < state_.num_array_indexed_output_dimensions;
++i) {
const int order = GetDimensionOrderFromByteStrides(
state_.index_array_byte_strides[i][dim_i],
state_.index_array_byte_strides[i][dim_j]);
if (order != 0) return order;
}
}
return GetDimensionOrderFromByteStrides(state_.input_byte_strides[dim_i],
state_.input_byte_strides[dim_j]);
}
void UpdateDirectionPrefs(NDIterable::DirectionPref* prefs) const override {
const DimensionIndex input_rank = transform_.input_rank();
for (DimensionIndex i = 0; i < state_.num_array_indexed_output_dimensions;
++i) {
UpdateDirectionPrefsFromByteStrides(
tensorstore::span(state_.index_array_byte_strides[i], input_rank),
prefs);
}
UpdateDirectionPrefsFromByteStrides(
tensorstore::span(&state_.input_byte_strides[0], input_rank), prefs);
}
bool CanCombineDimensions(DimensionIndex dim_i, int dir_i,
DimensionIndex dim_j, int dir_j,
Index size_j) const override {
auto flags_i = input_dimension_flags_[dim_i];
if ((flags_i & input_dim_iter_flags::array_indexed) !=
(input_dimension_flags_[dim_j] & input_dim_iter_flags::array_indexed)) {
return false;
}
if (flags_i & input_dim_iter_flags::array_indexed) {
for (DimensionIndex i = 0; i < state_.num_array_indexed_output_dimensions;
++i) {
if (!CanCombineStridedArrayDimensions(
state_.index_array_byte_strides[i][dim_i], dir_i,
state_.index_array_byte_strides[i][dim_j], dir_j, size_j)) {
return false;
}
}
}
return CanCombineStridedArrayDimensions(
state_.input_byte_strides[dim_i], dir_i,
state_.input_byte_strides[dim_j], dir_j, size_j);
}
DataType dtype() const override { return dtype_; }
IterationBufferConstraint GetIterationBufferConstraint(
IterationLayoutView layout) const override {
const DimensionIndex penultimate_dim =
layout.iteration_dimensions[layout.iteration_dimensions.size() - 2];
const DimensionIndex last_dim =
layout.iteration_dimensions[layout.iteration_dimensions.size() - 1];
if ((last_dim == -1 || (input_dimension_flags_[last_dim] &
input_dim_iter_flags::array_indexed) == 0) &&
(penultimate_dim == -1 || (input_dimension_flags_[penultimate_dim] &
input_dim_iter_flags::array_indexed) == 0)) {
return {(last_dim == -1 || state_.input_byte_strides[last_dim] *
layout.directions[last_dim] ==
this->dtype_->size)
? IterationBufferKind::kContiguous
: IterationBufferKind::kStrided,
false};
} else {
return {IterationBufferKind::kIndexed, false};
}
}
std::ptrdiff_t GetWorkingMemoryBytesPerElement(
IterationLayoutView layout,
IterationBufferKind buffer_kind) const override {
return buffer_kind == IterationBufferKind::kIndexed ? sizeof(Index) : 0;
}
NDIterator::Ptr GetIterator(
NDIterable::IterationBufferKindLayoutView layout) const override {
return MakeUniqueWithVirtualIntrusiveAllocator<IteratorImpl>(
get_allocator(), this, layout);
}
class IteratorImpl : public NDIterator::Base<IteratorImpl> {
public:
IteratorImpl(const IterableImpl* iterable,
NDIterable::IterationBufferKindLayoutView layout,
allocator_type allocator)
: num_index_arrays_(
iterable->state_.num_array_indexed_output_dimensions),
num_index_array_iteration_dims_(0),
iterable_(iterable),
buffer_(
num_index_arrays_ +
layout.iteration_rank() * (num_index_arrays_ + 1) +
((layout.buffer_kind == IterationBufferKind::kIndexed)
? layout.block_shape[0] * layout.block_shape[1]
: 0),
allocator) {
static_assert(sizeof(Index) >= sizeof(void*));
for (DimensionIndex j = 0; j < num_index_arrays_; ++j) {
ByteStridedPointer<const Index> index_array_pointer =
iterable->state_.index_array_pointers[j].get();
for (DimensionIndex dim = 0; dim < layout.full_rank(); ++dim) {
if (layout.directions[dim] != -1) continue;
const Index size_minus_1 = layout.shape[dim] - 1;
const Index index_array_byte_stride =
iterable->state_.index_array_byte_strides[j][dim];
index_array_pointer +=
wrap_on_overflow::Multiply(index_array_byte_stride, size_minus_1);
}
buffer_[j] = reinterpret_cast<Index>(index_array_pointer.get());
}
Index base_offset = 0;
for (DimensionIndex dim = 0; dim < layout.full_rank(); ++dim) {
if (layout.directions[dim] != -1) continue;
const Index size_minus_1 = layout.shape[dim] - 1;
const Index input_byte_stride =
iterable->state_.input_byte_strides[dim];
base_offset = wrap_on_overflow::Add(
base_offset,
wrap_on_overflow::Multiply(input_byte_stride, size_minus_1));
}
for (DimensionIndex i = 0; i < layout.iteration_rank(); ++i) {
const DimensionIndex dim = layout.iteration_dimensions[i];
if (dim == -1) {
for (DimensionIndex j = 0; j < num_index_arrays_ + 1; ++j) {
buffer_[num_index_arrays_ + layout.iteration_rank() * j + i] = 0;
}
} else {
const Index dir = layout.directions[dim];
const Index input_byte_stride =
iterable->state_.input_byte_strides[dim];
buffer_[num_index_arrays_ + i] =
wrap_on_overflow::Multiply(input_byte_stride, dir);
if (iterable->input_dimension_flags_[dim] &
input_dim_iter_flags::array_indexed) {
num_index_array_iteration_dims_ = i + 1;
for (DimensionIndex j = 0; j < num_index_arrays_; ++j) {
const Index index_array_byte_stride =
iterable->state_.index_array_byte_strides[j][dim];
buffer_[num_index_arrays_ + layout.iteration_rank() * (j + 1) +
i] =
wrap_on_overflow::Multiply(index_array_byte_stride, dir);
}
}
}
}
if (layout.buffer_kind == IterationBufferKind::kIndexed) {
Index* offsets_array =
buffer_.data() + num_index_arrays_ +
layout.iteration_rank() * (num_index_arrays_ + 1);
pointer_ =
IterationBufferPointer{iterable->state_.base_pointer + base_offset,
layout.block_shape[1], offsets_array};
if (num_index_array_iteration_dims_ + 1 < layout.iteration_rank()) {
FillOffsetsArrayFromStride(
buffer_[num_index_arrays_ + layout.iteration_rank() - 2],
buffer_[num_index_arrays_ + layout.iteration_rank() - 1],
layout.block_shape[0], layout.block_shape[1], offsets_array);
}
} else {
assert(num_index_array_iteration_dims_ + 1 < layout.iteration_rank());
pointer_ = IterationBufferPointer{
iterable->state_.base_pointer + base_offset,
buffer_[num_index_arrays_ + layout.iteration_rank() - 2],
buffer_[num_index_arrays_ + layout.iteration_rank() - 1]};
}
}
allocator_type get_allocator() const override {
return buffer_.get_allocator();
}
bool GetBlock(tensorstore::span<const Index> indices,
IterationBufferShape block_shape,
IterationBufferPointer* pointer,
absl::Status* status) override {
IterationBufferPointer block_pointer = pointer_;
block_pointer.pointer += IndexInnerProduct(
indices.size(), indices.data(), buffer_.data() + num_index_arrays_);
if (num_index_array_iteration_dims_ + 1 < indices.size()) {
for (DimensionIndex j = 0; j < num_index_arrays_; ++j) {
const Index index = ByteStridedPointer<const Index>(
reinterpret_cast<const Index*>(buffer_[j]))[IndexInnerProduct(
num_index_array_iteration_dims_, indices.data(),
buffer_.data() + num_index_arrays_ + indices.size() * (j + 1))];
block_pointer.pointer += wrap_on_overflow::Multiply(
iterable_->state_.index_array_output_byte_strides[j], index);
}
} else {
block_pointer.byte_offsets_outer_stride = block_shape[1];
Index* offsets_array = const_cast<Index*>(block_pointer.byte_offsets);
FillOffsetsArrayFromStride(
buffer_[num_index_arrays_ + indices.size() - 2],
buffer_[num_index_arrays_ + indices.size() - 1], block_shape[0],
block_shape[1], offsets_array);
for (DimensionIndex j = 0; j < num_index_arrays_; ++j) {
const Index* index_array_byte_strides =
buffer_.data() + num_index_arrays_ + indices.size() * (j + 1);
ByteStridedPointer<const Index> index_array_pointer =
ByteStridedPointer<const Index>(
reinterpret_cast<const Index*>(buffer_[j])) +
IndexInnerProduct(indices.size() - 2, indices.data(),
index_array_byte_strides);
const Index output_byte_stride =
iterable_->state_.index_array_output_byte_strides[j];
const Index penultimate_index_array_byte_stride =
index_array_byte_strides[indices.size() - 2];
const Index last_index_array_byte_stride =
index_array_byte_strides[indices.size() - 1];
if (last_index_array_byte_stride == 0 &&
penultimate_index_array_byte_stride == 0) {
block_pointer.pointer += wrap_on_overflow::Multiply(
output_byte_stride, *index_array_pointer);
} else {
Index block_start0 = indices[indices.size() - 2];
Index block_start1 = indices[indices.size() - 1];
for (Index outer = 0; outer < block_shape[0]; ++outer) {
for (Index inner = 0; inner < block_shape[1]; ++inner) {
Index cur_contribution = wrap_on_overflow::Multiply(
output_byte_stride,
index_array_pointer[wrap_on_overflow::Add(
wrap_on_overflow::Multiply(
outer + block_start0,
penultimate_index_array_byte_stride),
wrap_on_overflow::Multiply(
inner + block_start1,
last_index_array_byte_stride))]);
auto& offset = offsets_array[outer * block_shape[1] + inner];
offset = wrap_on_overflow::Add(offset, cur_contribution);
}
}
}
}
}
*pointer = block_pointer;
return true;
}
private:
DimensionIndex num_index_arrays_;
DimensionIndex num_index_array_iteration_dims_;
const IterableImpl* iterable_;
IterationBufferPointer pointer_;
std::vector<Index, ArenaAllocator<Index>> buffer_;
};
std::shared_ptr<const void> data_owner_;
IndexTransform<> transform_;
internal_index_space::SingleArrayIterationState state_;
DataType dtype_;
std::vector<input_dim_iter_flags::Bitmask,
ArenaAllocator<input_dim_iter_flags::Bitmask>>
input_dimension_flags_;
};
Result<NDIterable::Ptr> MaybeConvertToArrayNDIterable(
std::unique_ptr<IterableImpl, VirtualDestroyDeleter> impl, Arena* arena) {
if (impl->state_.num_array_indexed_output_dimensions == 0) {
return GetArrayNDIterable(
SharedOffsetArrayView<const void>(
SharedElementPointer<const void>(
std::shared_ptr<const void>(std::move(impl->data_owner_),
impl->state_.base_pointer),
impl->dtype_),
StridedLayoutView<>(impl->transform_.input_rank(),
impl->transform_.input_shape().data(),
&impl->state_.input_byte_strides[0])),
arena);
}
return impl;
}
}
Result<NDIterable::Ptr> GetTransformedArrayNDIterable(
SharedOffsetArrayView<const void> array, IndexTransformView<> transform,
Arena* arena) {
if (!transform.valid()) {
return GetArrayNDIterable(array, arena);
}
auto impl = MakeUniqueWithVirtualIntrusiveAllocator<IterableImpl>(
ArenaAllocator<>(arena), transform);
TENSORSTORE_RETURN_IF_ERROR(InitializeSingleArrayIterationState(
array, internal_index_space::TransformAccess::rep(transform),
transform.input_origin().data(), transform.input_shape().data(),
&impl->state_, impl->input_dimension_flags_.data()));
impl->dtype_ = array.dtype();
impl->data_owner_ = std::move(array.element_pointer().pointer());
return MaybeConvertToArrayNDIterable(std::move(impl), arena);
}
Result<NDIterable::Ptr> GetTransformedArrayNDIterable(
TransformedArray<Shared<const void>> array, Arena* arena) {
auto impl = MakeUniqueWithVirtualIntrusiveAllocator<IterableImpl>(
ArenaAllocator<>(arena), std::move(array.transform()));
TENSORSTORE_RETURN_IF_ERROR(InitializeSingleArrayIterationState(
ElementPointer<const void>(array.element_pointer()),
internal_index_space::TransformAccess::rep(impl->transform_),
impl->transform_.input_origin().data(),
impl->transform_.input_shape().data(), &impl->state_,
impl->input_dimension_flags_.data()));
impl->dtype_ = array.dtype();
impl->data_owner_ = std::move(array.element_pointer().pointer());
return MaybeConvertToArrayNDIterable(std::move(impl), arena);
}
}
} | #include "tensorstore/internal/nditerable_transformed_array.h"
#include <stddef.h>
#include <array>
#include <memory>
#include <tuple>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/array_testutil.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/transformed_array.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/nditerable.h"
#include "tensorstore/internal/nditerable_buffer_management.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/element_pointer.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::AllocateArray;
using ::tensorstore::Index;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::kImplicit;
using ::tensorstore::MakeArray;
using ::tensorstore::MatchesStatus;
using ::tensorstore::Result;
using ::tensorstore::Shared;
using ::tensorstore::SharedArray;
using ::tensorstore::skip_repeated_elements;
using ::tensorstore::StridedLayout;
using ::tensorstore::TransformedArray;
using ::tensorstore::internal::Arena;
using ::tensorstore::internal::GetTransformedArrayNDIterable;
using ::tensorstore::internal::IterationBufferKind;
using ::tensorstore::internal::IterationBufferShape;
using ::tensorstore::internal::MultiNDIterator;
using ::tensorstore::internal::NDIterable;
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
using ::testing::FieldsAre;
using ::testing::Pair;
using IterationTrace = std::vector<void*>;
template <typename... Element>
std::pair<std::array<IterationTrace, sizeof...(Element)>, absl::Status>
GetIterationTrace(
MultiNDIterator<sizeof...(Element), true>* multi_iterator) {
std::pair<std::array<IterationTrace, sizeof...(Element)>, absl::Status>
result;
for (auto block_shape = multi_iterator->ResetAtBeginning();
block_shape[0] && block_shape[1];
block_shape = multi_iterator->StepForward(block_shape)) {
if (!multi_iterator->GetBlock(block_shape, &result.second)) {
break;
}
ptrdiff_t i = 0;
const auto unused = {(
[&] {
const auto get_trace_func = [](void* ptr, IterationTrace* trace) {
trace->push_back(ptr);
};
tensorstore::internal::ElementwiseFunction<1, IterationTrace*> func =
tensorstore::internal::SimpleElementwiseFunction<
decltype(get_trace_func)(Element), IterationTrace*>();
func[multi_iterator->buffer_kind](nullptr, block_shape,
multi_iterator->block_pointers()[i],
&result.first[i]);
++i;
}(),
0)...};
(void)unused;
}
return result;
}
template <size_t N>
using BlockTrace =
std::vector<std::tuple<std::vector<Index>, IterationBufferShape,
std::array<IterationTrace, N>>>;
template <typename... Element>
std::pair<BlockTrace<sizeof...(Element)>, absl::Status> GetBlockTrace(
MultiNDIterator<sizeof...(Element), true>* multi_iterator) {
std::pair<BlockTrace<sizeof...(Element)>, absl::Status> result;
for (auto block_shape = multi_iterator->ResetAtBeginning();
block_shape[0] && block_shape[1];
block_shape = multi_iterator->StepForward(block_shape)) {
if (!multi_iterator->GetBlock(block_shape, &result.second)) {
break;
}
auto& [position, shape, traces] = result.first.emplace_back();
position.assign(multi_iterator->position().begin(),
multi_iterator->position().end());
shape = block_shape;
ptrdiff_t i = 0;
const auto unused = {(
[&, traces_ptr = &traces[i]] {
const auto get_trace_func = [](void* ptr, IterationTrace* trace) {
trace->push_back(ptr);
};
tensorstore::internal::ElementwiseFunction<1, IterationTrace*> func =
tensorstore::internal::SimpleElementwiseFunction<
decltype(get_trace_func)(Element), IterationTrace*>();
func[multi_iterator->buffer_kind](nullptr, block_shape,
multi_iterator->block_pointers()[i],
traces_ptr);
++i;
}(),
0)...};
(void)unused;
}
return result;
}
class MaybeDirectTest : public ::testing::TestWithParam<bool> {
protected:
Arena arena;
Result<NDIterable::Ptr> GetMaybeDirectTransformedArrayNDIterable(
tensorstore::SharedOffsetArrayView<const void> array,
tensorstore::IndexTransformView<> transform) {
if (GetParam()) {
TENSORSTORE_ASSIGN_OR_RETURN(auto transformed_array,
MakeTransformedArray(array, transform));
return GetTransformedArrayNDIterable(std::move(transformed_array),
&arena);
} else {
return GetTransformedArrayNDIterable(std::move(array), transform, &arena);
}
}
};
INSTANTIATE_TEST_SUITE_P(Indirect, MaybeDirectTest, ::testing::Values(true));
INSTANTIATE_TEST_SUITE_P(Direct, MaybeDirectTest, ::testing::Values(false));
TEST(NDIterableTransformedArrayTest, Strided) {
Arena arena;
auto a = AllocateArray<int>({2, 3});
auto ta = (a | tensorstore::Dims(1).SizedInterval(0, 2, 2)).value();
auto iterable = GetTransformedArrayNDIterable(ta, &arena).value();
MultiNDIterator<1, true> multi_iterator(
ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena);
EXPECT_EQ(IterationBufferKind::kStrided, multi_iterator.buffer_kind);
EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(0, 1));
EXPECT_THAT(
GetIterationTrace<int>(&multi_iterator),
Pair(ElementsAre(ElementsAre(&a(0, 0), &a(0, 2), &a(1, 0), &a(1, 2))),
absl::OkStatus()));
}
TEST(NDIterableTransformedArrayTest, SingleIndexedDimension) {
Arena arena;
auto a = AllocateArray<int>({4});
auto ta = (a | tensorstore::Dims(0).OuterIndexArraySlice(
MakeArray<Index>({1, 2, 3, 0})))
.value();
auto iterable = GetTransformedArrayNDIterable(ta, &arena).value();
EXPECT_EQ(tensorstore::dtype_v<int>, iterable->dtype());
MultiNDIterator<1, true> multi_iterator(
ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena);
EXPECT_EQ(IterationBufferKind::kIndexed, multi_iterator.buffer_kind);
EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(-1, 0));
EXPECT_THAT(GetIterationTrace<int>(&multi_iterator),
Pair(ElementsAre(ElementsAre(&a(1), &a(2), &a(3), &a(0))),
absl::OkStatus()));
}
TEST(NDIterableTransformedArrayTest,
OneStridedOneIndexedDimensionIndexedBuffer) {
Arena arena;
auto a = AllocateArray<int>({2, 3});
auto ta = (a | tensorstore::Dims(1).OuterIndexArraySlice(
MakeArray<Index>({0, 2, 1, 1})))
.value();
auto iterable = GetTransformedArrayNDIterable(ta, &arena).value();
MultiNDIterator<1, true> multi_iterator(
ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena);
EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(1, 0));
EXPECT_THAT(multi_iterator.iteration_shape, ElementsAre(4, 2));
EXPECT_EQ(IterationBufferKind::kIndexed, multi_iterator.buffer_kind);
EXPECT_THAT(
GetIterationTrace<int>(&multi_iterator),
Pair(ElementsAre(ElementsAre(&a(0, 0), &a(1, 0), &a(0, 2), &a(1, 2),
&a(0, 1), &a(1, 1), &a(0, 1), &a(1, 1))),
absl::OkStatus()));
}
TEST(NDIterableTransformedArrayTest,
TwoStridedOneIndexedDimensionContiguousBuffer) {
Arena arena;
auto a = AllocateArray<int>({2, 3, 2});
auto ta = (a | tensorstore::Dims(1).OuterIndexArraySlice(
MakeArray<Index>({0, 2, 1, 1})))
.value();
auto iterable = GetTransformedArrayNDIterable(ta, &arena).value();
MultiNDIterator<1, true> multi_iterator(
ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena);
EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(1, 0, 2));
EXPECT_THAT(multi_iterator.iteration_shape, ElementsAre(4, 2, 2));
EXPECT_EQ(IterationBufferKind::kContiguous, multi_iterator.buffer_kind);
EXPECT_THAT(
GetIterationTrace<int>(&multi_iterator),
Pair(ElementsAre(ElementsAreArray(
{
&a(0, 0, 0), &a(0, 0, 1), &a(1, 0, 0), &a(1, 0, 1),
&a(0, 2, 0), &a(0, 2, 1), &a(1, 2, 0), &a(1, 2, 1),
&a(0, 1, 0), &a(0, 1, 1), &a(1, 1, 0), &a(1, 1, 1),
&a(0, 1, 0), &a(0, 1, 1), &a(1, 1, 0), &a(1, 1, 1)
})),
absl::OkStatus()));
}
TEST(NDIterableTransformedArrayTest,
TwoStridedOneIndexedDimensionStridedBuffer) {
Arena arena;
auto a = AllocateArray<int>({2, 3, 4});
auto ta = (a | tensorstore::Dims(2).Stride(2) |
tensorstore::Dims(1).OuterIndexArraySlice(
MakeArray<Index>({0, 2, 1, 1})))
.value();
auto iterable = GetTransformedArrayNDIterable(ta, &arena).value();
MultiNDIterator<1, true> multi_iterator(
ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena);
EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(1, 0, 2));
EXPECT_THAT(multi_iterator.iteration_shape, ElementsAre(4, 2, 2));
EXPECT_EQ(IterationBufferKind::kStrided, multi_iterator.buffer_kind);
EXPECT_THAT(
GetIterationTrace<int>(&multi_iterator),
Pair(ElementsAre(ElementsAreArray(
{
&a(0, 0, 0), &a(0, 0, 2), &a(1, 0, 0), &a(1, 0, 2),
&a(0, 2, 0), &a(0, 2, 2), &a(1, 2, 0), &a(1, 2, 2),
&a(0, 1, 0), &a(0, 1, 2), &a(1, 1, 0), &a(1, 1, 2),
&a(0, 1, 0), &a(0, 1, 2), &a(1, 1, 0), &a(1, 1, 2)
})),
absl::OkStatus()));
}
TEST(NDIterableTransformedArrayTest,
TwoStridedOneIndexedDimensionIndexedBuffer) {
Arena arena;
auto a = AllocateArray<int>({2, 3, 2});
auto ta = (a | tensorstore::Dims(1).OuterIndexArraySlice(
MakeArray<Index>({0, 2, 1, 1})))
.value();
auto tb =
(a | tensorstore::Dims(0).OuterIndexArraySlice(MakeArray<Index>({0, 1})) |
tensorstore::Dims(1).OuterIndexArraySlice(
MakeArray<Index>({0, 2, 1, 1})))
.value();
auto iterable1 = GetTransformedArrayNDIterable(ta, &arena).value();
auto iterable2 = GetTransformedArrayNDIterable(tb, &arena).value();
MultiNDIterator<2, true> multi_iterator(
ta.shape(), skip_repeated_elements, {{iterable1.get(), iterable2.get()}},
&arena);
EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(1, 0, 2));
EXPECT_THAT(multi_iterator.iteration_shape, ElementsAre(4, 2, 2));
EXPECT_EQ(IterationBufferKind::kIndexed, multi_iterator.buffer_kind);
auto element_matcher = ElementsAreArray(
{
&a(0, 0, 0), &a(0, 0, 1), &a(1, 0, 0), &a(1, 0, 1),
&a(0, 2, 0), &a(0, 2, 1), &a(1, 2, 0), &a(1, 2, 1),
&a(0, 1, 0), &a(0, 1, 1), &a(1, 1, 0), &a(1, 1, 1),
&a(0, 1, 0), &a(0, 1, 1), &a(1, 1, 0), &a(1, 1, 1)
});
EXPECT_THAT(
(GetIterationTrace<int, int>(&multi_iterator)),
Pair(ElementsAre(element_matcher, element_matcher), absl::OkStatus()));
}
TEST(NDIterableTransformedArrayTest, IndexedAndReversedStrided) {
Arena arena;
auto a = AllocateArray<int>({2, 3});
auto ta = (a |
tensorstore::Dims(1).OuterIndexArraySlice(
MakeArray<Index>({0, 2, 1, 1})) |
tensorstore::Dims(0).SizedInterval(kImplicit, kImplicit, -1))
.value();
auto iterable = GetTransformedArrayNDIterable(ta, &arena).value();
MultiNDIterator<1, true> multi_iterator(
ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena);
EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(1, 0));
EXPECT_THAT(multi_iterator.directions, ElementsAre(-1, 1));
EXPECT_THAT(multi_iterator.iteration_shape, ElementsAre(4, 2));
EXPECT_THAT(
GetIterationTrace<int>(&multi_iterator),
Pair(ElementsAre(ElementsAre(&a(0, 0), &a(1, 0), &a(0, 2), &a(1, 2),
&a(0, 1), &a(1, 1), &a(0, 1), &a(1, 1))),
absl::OkStatus()));
}
TEST(NDIterableTransformedArrayTest, IndexedCombine) {
Arena arena;
auto a = AllocateArray<int>({2, 3});
auto ta = (a | tensorstore::Dims(1).OuterIndexArraySlice(
MakeArray<Index>({{0, 2}, {2, 0}})))
.value();
auto iterable = GetTransformedArrayNDIterable(ta, &arena).value();
MultiNDIterator<1, true> multi_iterator(
ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena);
EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(2, 0));
EXPECT_THAT(
GetIterationTrace<int>(&multi_iterator),
Pair(ElementsAre(ElementsAre(&a(0, 0), &a(1, 0), &a(0, 2), &a(1, 2),
&a(0, 2), &a(1, 2), &a(0, 0), &a(1, 0))),
absl::OkStatus()));
}
TEST(NDIterableTransformedArrayTest, IndexedCombinePartiallyReversed) {
Arena arena;
auto a = AllocateArray<int>({2, 3});
auto ta = (a | tensorstore::Dims(1)
.OuterIndexArraySlice(MakeArray<Index>({{0, 2}, {2, 0}}))
.SizedInterval(kImplicit, kImplicit, {1, -1}))
.value();
auto iterable = GetTransformedArrayNDIterable(ta, &arena).value();
MultiNDIterator<1, true> multi_iterator(
ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena);
EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(2, 0));
EXPECT_THAT(multi_iterator.directions, ElementsAre(1, 1, -1));
EXPECT_THAT(
GetIterationTrace<int>(&multi_iterator),
Pair(ElementsAre(ElementsAre(&a(0, 0), &a(1, 0), &a(0, 2), &a(1, 2),
&a(0, 2), &a(1, 2), &a(0, 0), &a(1, 0))),
absl::OkStatus()));
}
TEST(NDIterableTransformedArrayTest, IndexedCombineBothReversed) {
Arena arena;
auto a = AllocateArray<int>({2, 3});
auto ta = (a | tensorstore::Dims(1)
.OuterIndexArraySlice(MakeArray<Index>({{0, 2}, {2, 0}}))
.SizedInterval(kImplicit, kImplicit, -1))
.value();
auto iterable = GetTransformedArrayNDIterable(ta, &arena).value();
MultiNDIterator<1, true> multi_iterator(
ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena);
EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(2, 0));
EXPECT_THAT(multi_iterator.directions, ElementsAre(1, -1, -1));
EXPECT_THAT(
GetIterationTrace<int>(&multi_iterator),
Pair(ElementsAre(ElementsAre(&a(0, 0), &a(1, 0), &a(0, 2), &a(1, 2),
&a(0, 2), &a(1, 2), &a(0, 0), &a(1, 0))),
absl::OkStatus()));
}
TEST(NDIterableTransformedArrayTest, IndexedVsStrided) {
Arena arena;
auto a = AllocateArray<int>({2, 2});
auto b = AllocateArray<int>({2, 3});
auto tb =
(b | tensorstore::Dims(1).OuterIndexArraySlice(MakeArray<Index>({0, 2})))
.value();
auto iterable_a = GetTransformedArrayNDIterable(a, &arena).value();
auto iterable_b = GetTransformedArrayNDIterable(tb, &arena).value();
MultiNDIterator<2, true> multi_iterator(
tb.shape(), skip_repeated_elements,
{{iterable_a.get(), iterable_b.get()}}, &arena);
EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(1, 0));
EXPECT_THAT(
(GetIterationTrace<int, int>(&multi_iterator)),
Pair(ElementsAre(ElementsAre(&a(0, 0), &a(1, 0), &a(0, 1), &a(1, 1)),
ElementsAre(&b(0, 0), &b(1, 0), &b(0, 2), &b(1, 2))),
absl::OkStatus()));
}
TEST(NDIterableTransformedArrayTest, IndexedWith2StridedDims) {
Arena arena;
auto a = AllocateArray<int>({2, 2, 3});
auto ta =
(a | tensorstore::Dims(1).MoveToFront() |
tensorstore::Dims(2).OuterIndexArraySlice(MakeArray<Index>({0, 2, 1})))
.value();
auto iterable = GetTransformedArrayNDIterable(ta, &arena).value();
MultiNDIterator<1, true> multi_iterator(
ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena);
EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(2, 0));
EXPECT_THAT(GetIterationTrace<int>(&multi_iterator),
Pair(ElementsAre(ElementsAre(
&a(0, 0, 0), &a(0, 1, 0), &a(1, 0, 0), &a(1, 1, 0),
&a(0, 0, 2), &a(0, 1, 2), &a(1, 0, 2), &a(1, 1, 2),
&a(0, 0, 1), &a(0, 1, 1), &a(1, 0, 1), &a(1, 1, 1))),
absl::OkStatus()));
}
TEST(NDIterableTransformedArrayTest, TwoIndexedDims) {
Arena arena;
auto a = AllocateArray<int>({2, 3});
auto ta =
(a |
tensorstore::Dims(0).OuterIndexArraySlice(MakeArray<Index>({0, 1, 1})) |
tensorstore::Dims(1).OuterIndexArraySlice(MakeArray<Index>({0, 2})))
.value();
auto iterable = GetTransformedArrayNDIterable(ta, &arena).value();
MultiNDIterator<1, true> multi_iterator(
ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena);
EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(0, 1));
EXPECT_THAT(GetIterationTrace<int>(&multi_iterator),
Pair(ElementsAre(ElementsAre(&a(0, 0), &a(0, 2), &a(1, 0),
&a(1, 2), &a(1, 0), &a(1, 2))),
absl::OkStatus()));
}
TEST(NDIterableTransformedArrayTest, FourIndexedDims) {
Arena arena;
auto a = AllocateArray<int>({2, 3});
auto ta = (a |
tensorstore::Dims(0).OuterIndexArraySlice(
MakeArray<Index>({{0, 1}, {1, 1}})) |
tensorstore::Dims(-1).OuterIndexArraySlice(
MakeArray<Index>({{0, 2}, {1, 0}})))
.value();
auto b = AllocateArray<int>({2, 2, 2, 2});
auto iterable_a = GetTransformedArrayNDIterable(ta, &arena).value();
auto iterable_b = GetTransformedArrayNDIterable(b, &arena).value();
MultiNDIterator<2, true> multi_iterator(
ta.shape(), skip_repeated_elements,
{{iterable_a.get(), iterable_b.get()}}, &arena);
EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(1, 3));
EXPECT_THAT(
(GetIterationTrace<int, int>(&multi_iterator)),
Pair(
ElementsAre(
ElementsAre(&a(0, 0), &a(0, 2), &a(0, 1), &a(0, 0),
&a(1, 0), &a(1, 2), &a(1, 1), &a(1, 0),
&a(1, 0), &a(1, 2), &a(1, 1), &a(1, 0),
&a(1, 0), &a(1, 2), &a(1, 1), &a(1, 0)),
ElementsAre(
b.data() + 0, b.data() + 1, b.data() + 2, b.data() + 3,
b.data() + 4, b.data() + 5, b.data() + 6, b.data() + 7,
b.data() + 8, b.data() + 9, b.data() + 10, b.data() + 11,
b.data() + 12, b.data() + 13, b.data() + 14, b.data() + 15)),
absl::OkStatus()));
}
TEST(NDIterableTransformedArrayTest, LastTwoDimsStrided) {
Arena arena;
auto a = AllocateArray<int>({2, 3});
auto ta = (a |
tensorstore::Dims(0).OuterIndexArraySlice(
MakeArray<Index>({{0, 1}, {1, 1}})) |
tensorstore::Dims(-1).OuterIndexArraySlice(
MakeArray<Index>({{0, 2}, {1, 0}})))
.value();
auto b = AllocateArray<int>({2, 2, 2, 2});
auto iterable_a = GetTransformedArrayNDIterable(ta, &arena).value();
auto iterable_b = GetTransformedArrayNDIterable(b, &arena).value();
MultiNDIterator<2, true> multi_iterator(
ta.shape(), skip_repeated_elements,
{{iterable_a.get(), iterable_b.get()}}, &arena);
EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(1, 3));
EXPECT_THAT(
(GetIterationTrace<int, int>(&multi_iterator)),
Pair(
ElementsAre(
ElementsAre(&a(0, 0), &a(0, 2), &a(0, 1), &a(0, 0),
&a(1, 0), &a(1, 2), &a(1, 1), &a(1, 0),
&a(1, 0), &a(1, 2), &a(1, 1), &a(1, 0),
&a(1, 0), &a(1, 2), &a(1, 1), &a(1, 0)),
ElementsAre(
b.data() + 0, b.data() + 1, b.data() + 2, b.data() + 3,
b.data() + 4, b.data() + 5, b.data() + 6, b.data() + 7,
b.data() + 8, b.data() + 9, b.data() + 10, b.data() + 11,
b.data() + 12, b.data() + 13, b.data() + 14, b.data() + 15)),
absl::OkStatus()));
}
TEST(NDIterableTransformedArrayTest, TwoTransformedArrays) {
Arena arena;
auto a = AllocateArray<int>({2, 3});
auto b = AllocateArray<int>({2, 3});
auto ta =
(a | tensorstore::Dims(0).OuterIndexArraySlice(MakeArray<Index>({0, 1})))
.value();
auto tb = (b | tensorstore::Dims(1).OuterIndexArraySlice(
MakeArray<Index>({0, 1, 2})))
.value();
auto iterable_a = GetTransformedArrayNDIterable(ta, &arena).value();
auto iterable_b = GetTransformedArrayNDIterable(tb, &arena).value();
MultiNDIterator<2, true> multi_iterator(
ta.shape(), skip_repeated_elements,
{{iterable_a.get(), iterable_b.get()}}, &arena);
EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(0, 1));
EXPECT_THAT((GetIterationTrace<int, int>(&multi_iterator)),
Pair(ElementsAre(ElementsAre(&a(0, 0), &a(0, 1), &a(0, 2),
&a(1, 0), &a(1, 1), &a(1, 2)),
ElementsAre(&b(0, 0), &b(0, 1), &b(0, 2),
&b(1, 0), &b(1, 1), &b(1, 2))),
absl::OkStatus()));
}
TEST(NDIterableTransformedArrayTest, ZeroRankIndexArray) {
Arena arena;
SharedArray<const Index> index_array{std::make_shared<Index>(3),
StridedLayout<>({5}, {0})};
int data[100];
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transform,
IndexTransformBuilder(1, 1)
.input_shape({5})
.output_index_array(0, sizeof(int) * 2, sizeof(int) * 4, index_array)
.Finalize());
auto iterable_a = GetTransformedArrayNDIterable(
{tensorstore::UnownedToShared(
tensorstore::ElementPointer<int>(&data[0])),
transform},
&arena)
.value();
MultiNDIterator<1, true> multi_iterator(
transform.input_shape(), skip_repeated_elements, {{iterable_a.get()}},
&arena);
EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(-1, -1));
EXPECT_THAT(
(GetIterationTrace<int>(&multi_iterator)),
Pair(ElementsAre(ElementsAre(&data[4 * 3 + 2])), absl::OkStatus()));
}
TEST(NDIterableTransformedArrayTest, OutOfBoundsConstant) {
Arena arena;
auto a = AllocateArray<int>({5});
auto transform = IndexTransformBuilder<1, 1>()
.input_shape({5})
.output_constant(0, 8)
.Finalize()
.value();
EXPECT_THAT(
GetTransformedArrayNDIterable(a, transform, &arena),
MatchesStatus(absl::StatusCode::kOutOfRange,
"Checking bounds of constant output index map for "
"dimension 0: Index 8 is outside valid range \\[0, 5\\)"));
}
TEST(NDIterableTransformedArrayTest, NullTransform) {
Arena arena;
auto a = AllocateArray<int>({5});
auto iterable_a = GetTransformedArrayNDIterable(a, {}, &arena).value();
EXPECT_EQ(tensorstore::dtype_v<int>, iterable_a->dtype());
MultiNDIterator<1, true> multi_iterator(
a.shape(), skip_repeated_elements, {{iterable_a.get()}}, &arena);
EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(-1, 0));
EXPECT_THAT((GetIterationTrace<int>(&multi_iterator)),
Pair(ElementsAre(ElementsAre(&a(0), &a(1), &a(2), &a(3), &a(4))),
absl::OkStatus()));
}
TEST(NDIterableTransformedArrayTest, IdentityTransform) {
Arena arena;
auto a = AllocateArray<int>({5});
auto iterable_a =
GetTransformedArrayNDIterable(
a,
tensorstore::IdentityTransform(tensorstore::span<const Index>({5})),
&arena)
.value();
EXPECT_EQ(tensorstore::dtype_v<int>, iterable_a->dtype());
MultiNDIterator<1, true> multi_iterator(
a.shape(), skip_repeated_elements, {{iterable_a.get()}}, &arena);
EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(-1, 0));
EXPECT_THAT((GetIterationTrace<int>(&multi_iterator)),
Pair(ElementsAre(ElementsAre(&a(0), &a(1), &a(2), &a(3), &a(4))),
absl::OkStatus()));
}
TEST(NDIterableTransformedArrayTest, OutOfBoundsSingleInputDimension) {
Arena arena;
auto a = AllocateArray<int>({5});
auto transform = IndexTransformBuilder<1, 1>()
.input_shape({5})
.output_single_input_dimension(0, 2, 1, 0)
.Finalize()
.value();
EXPECT_THAT(GetTransformedArrayNDIterable(a, transform, &arena),
MatchesStatus(absl::StatusCode::kOutOfRange,
"Output dimension 0 range of \\[2, 7\\) is not "
"contained within array domain of \\[0, 5\\)"));
}
TEST_P(MaybeDirectTest, OutOfBoundsIndexArray) {
auto a = AllocateArray<int>({5});
auto transform =
IndexTransformBuilder<1, 1>()
.input_shape({5})
.output_index_array(0, 2, 1, MakeArray<Index>({0, 0, 0, 0, 42}))
.Finalize()
.value();
EXPECT_THAT(GetMaybeDirectTransformedArrayNDIterable(a, transform),
MatchesStatus(absl::StatusCode::kOutOfRange,
".*Index 42 is outside valid range \\[-2, 3\\)"));
}
TEST_P(MaybeDirectTest, OutOfBoundsSingletonIndexArray) {
SharedArray<const Index> index_array{std::make_shared<Index>(42),
StridedLayout<>({5}, {0})};
auto a = AllocateArray<int>({5});
auto transform = IndexTransformBuilder<1, 1>()
.input_shape({5})
.output_index_array(0, 2, 1, index_array)
.Finalize()
.value();
EXPECT_THAT(GetMaybeDirectTransformedArrayNDIterable(a, transform),
MatchesStatus(absl::StatusCode::kOutOfRange,
".*Index 42 is outside valid range \\[-2, 3\\)"));
}
TEST(NDIterableTransformedArrayTest, BlockTraceThreeStridedDimensions) {
Arena arena;
auto a = AllocateArray<int>({2, 5, 3});
auto ta = (a | tensorstore::Dims(1).SizedInterval(0, 2, 2)).value();
auto iterable = GetTransformedArrayNDIterable(ta, &arena).value();
MultiNDIterator<1, true> multi_iterator(
ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena);
EXPECT_EQ(IterationBufferKind::kContiguous, multi_iterator.buffer_kind);
EXPECT_THAT(multi_iterator.iteration_dimensions, ElementsAre(0, 1, 2));
EXPECT_THAT(
GetBlockTrace<int>(&multi_iterator),
Pair(ElementsAre(FieldsAre(ElementsAre(0, 0, 0), ElementsAre(2, 3),
ElementsAre(ElementsAreArray({
&a(0, 0, 0),
&a(0, 0, 1),
&a(0, 0, 2),
&a(0, 2, 0),
&a(0, 2, 1),
&a(0, 2, 2),
}))),
FieldsAre(ElementsAre(1, 0, 0), ElementsAre(2, 3),
ElementsAre(ElementsAreArray({
&a(1, 0, 0),
&a(1, 0, 1),
&a(1, 0, 2),
&a(1, 2, 0),
&a(1, 2, 1),
&a(1, 2, 2),
})))),
absl::OkStatus()));
}
TEST(NDIterableTransformedArrayTest,
InnermostBlockSizeLessThanInnermostIterationSize) {
Arena arena;
auto a = AllocateArray<int>({2, 32768}, tensorstore::c_order,
tensorstore::value_init);
auto ta = (a | tensorstore::Dims(0).IndexArraySlice(MakeArray<Index>({0, 1})))
.value();
auto iterable = GetTransformedArrayNDIterable(ta, &arena).value();
struct IncrementValue {
void operator()(int* x) const { *x += 1; }
};
constexpr tensorstore::internal::ElementwiseFunction<1> increment_value_func =
tensorstore::internal::SimpleElementwiseFunction<IncrementValue(int)>();
TENSORSTORE_ASSERT_OK(
(tensorstore::internal::IterateOverNDIterables<1, true>(
ta.shape(), skip_repeated_elements, {{iterable.get()}}, &arena,
{&increment_value_func, nullptr})));
EXPECT_THAT(a, tensorstore::MatchesArray(
tensorstore::BroadcastArray(
tensorstore::MakeScalarArray<int>(1), a.shape())
.value()));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/nditerable_transformed_array.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/nditerable_transformed_array_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
03d1f556-a605-4615-a23e-2c5adb2cbce3 | cpp | google/quiche | quic_idle_network_detector | quiche/quic/core/quic_idle_network_detector.cc | quiche/quic/core/quic_idle_network_detector_test.cc | #include "quiche/quic/core/quic_idle_network_detector.h"
#include <algorithm>
#include "quiche/quic/core/quic_constants.h"
#include "quiche/quic/core/quic_time.h"
#include "quiche/quic/platform/api/quic_flag_utils.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/common/platform/api/quiche_logging.h"
namespace quic {
namespace {
}
QuicIdleNetworkDetector::QuicIdleNetworkDetector(Delegate* delegate,
QuicTime now, QuicAlarm* alarm)
: delegate_(delegate),
start_time_(now),
handshake_timeout_(QuicTime::Delta::Infinite()),
time_of_last_received_packet_(now),
time_of_first_packet_sent_after_receiving_(QuicTime::Zero()),
idle_network_timeout_(QuicTime::Delta::Infinite()),
alarm_(*alarm) {}
void QuicIdleNetworkDetector::OnAlarm() {
if (handshake_timeout_.IsInfinite()) {
delegate_->OnIdleNetworkDetected();
return;
}
if (idle_network_timeout_.IsInfinite()) {
delegate_->OnHandshakeTimeout();
return;
}
if (last_network_activity_time() + idle_network_timeout_ >
start_time_ + handshake_timeout_) {
delegate_->OnHandshakeTimeout();
return;
}
delegate_->OnIdleNetworkDetected();
}
void QuicIdleNetworkDetector::SetTimeouts(
QuicTime::Delta handshake_timeout, QuicTime::Delta idle_network_timeout) {
handshake_timeout_ = handshake_timeout;
idle_network_timeout_ = idle_network_timeout;
SetAlarm();
}
void QuicIdleNetworkDetector::StopDetection() {
alarm_.PermanentCancel();
handshake_timeout_ = QuicTime::Delta::Infinite();
idle_network_timeout_ = QuicTime::Delta::Infinite();
handshake_timeout_ = QuicTime::Delta::Infinite();
stopped_ = true;
}
void QuicIdleNetworkDetector::OnPacketSent(QuicTime now,
QuicTime::Delta pto_delay) {
if (time_of_first_packet_sent_after_receiving_ >
time_of_last_received_packet_) {
return;
}
time_of_first_packet_sent_after_receiving_ =
std::max(time_of_first_packet_sent_after_receiving_, now);
if (shorter_idle_timeout_on_sent_packet_) {
MaybeSetAlarmOnSentPacket(pto_delay);
return;
}
SetAlarm();
}
void QuicIdleNetworkDetector::OnPacketReceived(QuicTime now) {
time_of_last_received_packet_ = std::max(time_of_last_received_packet_, now);
SetAlarm();
}
void QuicIdleNetworkDetector::SetAlarm() {
if (stopped_) {
QUIC_BUG(quic_idle_detector_set_alarm_after_stopped)
<< "SetAlarm called after stopped";
return;
}
QuicTime new_deadline = QuicTime::Zero();
if (!handshake_timeout_.IsInfinite()) {
new_deadline = start_time_ + handshake_timeout_;
}
if (!idle_network_timeout_.IsInfinite()) {
const QuicTime idle_network_deadline = GetIdleNetworkDeadline();
if (new_deadline.IsInitialized()) {
new_deadline = std::min(new_deadline, idle_network_deadline);
} else {
new_deadline = idle_network_deadline;
}
}
alarm_.Update(new_deadline, kAlarmGranularity);
}
void QuicIdleNetworkDetector::MaybeSetAlarmOnSentPacket(
QuicTime::Delta pto_delay) {
QUICHE_DCHECK(shorter_idle_timeout_on_sent_packet_);
if (!handshake_timeout_.IsInfinite() || !alarm_.IsSet()) {
SetAlarm();
return;
}
const QuicTime deadline = alarm_.deadline();
const QuicTime min_deadline = last_network_activity_time() + pto_delay;
if (deadline > min_deadline) {
return;
}
alarm_.Update(min_deadline, kAlarmGranularity);
}
QuicTime QuicIdleNetworkDetector::GetIdleNetworkDeadline() const {
if (idle_network_timeout_.IsInfinite()) {
return QuicTime::Zero();
}
return last_network_activity_time() + idle_network_timeout_;
}
} | #include "quiche/quic/core/quic_idle_network_detector.h"
#include "quiche/quic/core/quic_connection_alarms.h"
#include "quiche/quic/core/quic_one_block_arena.h"
#include "quiche/quic/core/quic_time.h"
#include "quiche/quic/platform/api/quic_expect_bug.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/mock_quic_connection_alarms.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
namespace quic {
namespace test {
class QuicIdleNetworkDetectorTestPeer {
public:
static QuicAlarm& GetAlarm(QuicIdleNetworkDetector* detector) {
return detector->alarm_;
}
};
namespace {
class MockDelegate : public QuicIdleNetworkDetector::Delegate {
public:
MOCK_METHOD(void, OnHandshakeTimeout, (), (override));
MOCK_METHOD(void, OnIdleNetworkDetected, (), (override));
};
class QuicIdleNetworkDetectorTest : public QuicTest {
public:
QuicIdleNetworkDetectorTest()
: alarms_(&connection_alarms_delegate_, alarm_factory_, arena_),
detector_(&delegate_, clock_.Now() + QuicTimeDelta::FromSeconds(1),
&alarms_.idle_network_detector_alarm()) {
clock_.AdvanceTime(QuicTime::Delta::FromSeconds(1));
alarm_ = static_cast<MockAlarmFactory::TestAlarm*>(
&alarms_.idle_network_detector_alarm());
ON_CALL(connection_alarms_delegate_, OnIdleDetectorAlarm())
.WillByDefault([&] { detector_.OnAlarm(); });
}
protected:
testing::StrictMock<MockDelegate> delegate_;
MockConnectionAlarmsDelegate connection_alarms_delegate_;
QuicConnectionArena arena_;
MockAlarmFactory alarm_factory_;
QuicConnectionAlarms alarms_;
MockClock clock_;
QuicIdleNetworkDetector detector_;
MockAlarmFactory::TestAlarm* alarm_;
};
TEST_F(QuicIdleNetworkDetectorTest,
IdleNetworkDetectedBeforeHandshakeCompletes) {
EXPECT_FALSE(alarm_->IsSet());
detector_.SetTimeouts(
QuicTime::Delta::FromSeconds(30),
QuicTime::Delta::FromSeconds(20));
EXPECT_TRUE(alarm_->IsSet());
EXPECT_EQ(clock_.Now() + QuicTime::Delta::FromSeconds(20),
alarm_->deadline());
clock_.AdvanceTime(QuicTime::Delta::FromSeconds(20));
EXPECT_CALL(delegate_, OnIdleNetworkDetected());
alarm_->Fire();
}
TEST_F(QuicIdleNetworkDetectorTest, HandshakeTimeout) {
EXPECT_FALSE(alarm_->IsSet());
detector_.SetTimeouts(
QuicTime::Delta::FromSeconds(30),
QuicTime::Delta::FromSeconds(20));
EXPECT_TRUE(alarm_->IsSet());
clock_.AdvanceTime(QuicTime::Delta::FromSeconds(15));
detector_.OnPacketReceived(clock_.Now());
EXPECT_EQ(clock_.Now() + QuicTime::Delta::FromSeconds(15),
alarm_->deadline());
clock_.AdvanceTime(QuicTime::Delta::FromSeconds(15));
EXPECT_CALL(delegate_, OnHandshakeTimeout());
alarm_->Fire();
}
TEST_F(QuicIdleNetworkDetectorTest,
IdleNetworkDetectedAfterHandshakeCompletes) {
EXPECT_FALSE(alarm_->IsSet());
detector_.SetTimeouts(
QuicTime::Delta::FromSeconds(30),
QuicTime::Delta::FromSeconds(20));
EXPECT_TRUE(alarm_->IsSet());
EXPECT_EQ(clock_.Now() + QuicTime::Delta::FromSeconds(20),
alarm_->deadline());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(200));
detector_.OnPacketReceived(clock_.Now());
detector_.SetTimeouts(
QuicTime::Delta::Infinite(),
QuicTime::Delta::FromSeconds(600));
EXPECT_EQ(clock_.Now() + QuicTime::Delta::FromSeconds(600),
alarm_->deadline());
clock_.AdvanceTime(QuicTime::Delta::FromSeconds(600));
EXPECT_CALL(delegate_, OnIdleNetworkDetected());
alarm_->Fire();
}
TEST_F(QuicIdleNetworkDetectorTest,
DoNotExtendIdleDeadlineOnConsecutiveSentPackets) {
EXPECT_FALSE(alarm_->IsSet());
detector_.SetTimeouts(
QuicTime::Delta::FromSeconds(30),
QuicTime::Delta::FromSeconds(20));
EXPECT_TRUE(alarm_->IsSet());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(200));
detector_.OnPacketReceived(clock_.Now());
detector_.SetTimeouts(
QuicTime::Delta::Infinite(),
QuicTime::Delta::FromSeconds(600));
EXPECT_EQ(clock_.Now() + QuicTime::Delta::FromSeconds(600),
alarm_->deadline());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(200));
detector_.OnPacketSent(clock_.Now(), QuicTime::Delta::Zero());
const QuicTime packet_sent_time = clock_.Now();
EXPECT_EQ(packet_sent_time + QuicTime::Delta::FromSeconds(600),
alarm_->deadline());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(200));
detector_.OnPacketSent(clock_.Now(), QuicTime::Delta::Zero());
EXPECT_EQ(packet_sent_time + QuicTime::Delta::FromSeconds(600),
alarm_->deadline());
clock_.AdvanceTime(QuicTime::Delta::FromSeconds(600) -
QuicTime::Delta::FromMilliseconds(200));
EXPECT_CALL(delegate_, OnIdleNetworkDetected());
alarm_->Fire();
}
TEST_F(QuicIdleNetworkDetectorTest, ShorterIdleTimeoutOnSentPacket) {
detector_.enable_shorter_idle_timeout_on_sent_packet();
QuicTime::Delta idle_network_timeout = QuicTime::Delta::Zero();
idle_network_timeout = QuicTime::Delta::FromSeconds(30);
detector_.SetTimeouts(
QuicTime::Delta::Infinite(), idle_network_timeout);
EXPECT_TRUE(alarm_->IsSet());
const QuicTime deadline = alarm_->deadline();
EXPECT_EQ(clock_.Now() + QuicTime::Delta::FromSeconds(30), deadline);
clock_.AdvanceTime(QuicTime::Delta::FromSeconds(15));
detector_.OnPacketSent(clock_.Now(), QuicTime::Delta::FromSeconds(2));
EXPECT_TRUE(alarm_->IsSet());
EXPECT_EQ(deadline, alarm_->deadline());
clock_.AdvanceTime(QuicTime::Delta::FromSeconds(14));
detector_.OnPacketSent(clock_.Now(), QuicTime::Delta::FromSeconds(2));
EXPECT_TRUE(alarm_->IsSet());
EXPECT_EQ(deadline, alarm_->deadline());
clock_.AdvanceTime(QuicTime::Delta::FromSeconds(1));
detector_.OnPacketReceived(clock_.Now());
EXPECT_TRUE(alarm_->IsSet());
EXPECT_EQ(clock_.Now() + QuicTime::Delta::FromSeconds(30),
alarm_->deadline());
clock_.AdvanceTime(QuicTime::Delta::FromSeconds(29));
detector_.OnPacketSent(clock_.Now(), QuicTime::Delta::FromSeconds(2));
EXPECT_TRUE(alarm_->IsSet());
EXPECT_EQ(clock_.Now() + QuicTime::Delta::FromSeconds(2), alarm_->deadline());
}
TEST_F(QuicIdleNetworkDetectorTest, NoAlarmAfterStopped) {
detector_.StopDetection();
EXPECT_QUIC_BUG(
detector_.SetTimeouts(
QuicTime::Delta::FromSeconds(30),
QuicTime::Delta::FromSeconds(20)),
"SetAlarm called after stopped");
EXPECT_FALSE(alarm_->IsSet());
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_idle_network_detector.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_idle_network_detector_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
32f38623-1d99-4128-9f44-0ec7c874d2fa | cpp | abseil/abseil-cpp | log_entry | absl/log/log_entry.cc | absl/log/log_entry_test.cc | #include "absl/log/log_entry.h"
#include "absl/base/config.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
constexpr int LogEntry::kNoVerbosityLevel;
constexpr int LogEntry::kNoVerboseLevel;
#endif
#ifdef __APPLE__
namespace log_internal {
extern const char kAvoidEmptyLogEntryLibraryWarning;
const char kAvoidEmptyLogEntryLibraryWarning = 0;
}
#endif
ABSL_NAMESPACE_END
} | #include "absl/log/log_entry.h"
#include <stddef.h>
#include <stdint.h>
#include <cstring>
#include <limits>
#include <string>
#include <type_traits>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/log_severity.h"
#include "absl/log/internal/append_truncated.h"
#include "absl/log/internal/log_format.h"
#include "absl/log/internal/test_helpers.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "absl/time/civil_time.h"
#include "absl/time/time.h"
#include "absl/types/span.h"
namespace {
using ::absl::log_internal::LogEntryTestPeer;
using ::testing::Eq;
using ::testing::IsTrue;
using ::testing::StartsWith;
using ::testing::StrEq;
auto* test_env ABSL_ATTRIBUTE_UNUSED = ::testing::AddGlobalTestEnvironment(
new absl::log_internal::LogTestEnvironment);
}
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace log_internal {
class LogEntryTestPeer {
public:
LogEntryTestPeer(absl::string_view base_filename, int line, bool prefix,
absl::LogSeverity severity, absl::string_view timestamp,
absl::LogEntry::tid_t tid, PrefixFormat format,
absl::string_view text_message)
: format_{format}, buf_(15000, '\0') {
entry_.base_filename_ = base_filename;
entry_.line_ = line;
entry_.prefix_ = prefix;
entry_.severity_ = severity;
std::string time_err;
EXPECT_THAT(
absl::ParseTime("%Y-%m-%d%ET%H:%M:%E*S", timestamp,
absl::LocalTimeZone(), &entry_.timestamp_, &time_err),
IsTrue())
<< "Failed to parse time " << timestamp << ": " << time_err;
entry_.tid_ = tid;
std::pair<absl::string_view, std::string> timestamp_bits =
absl::StrSplit(timestamp, absl::ByChar('.'));
EXPECT_THAT(absl::ParseCivilTime(timestamp_bits.first, &ci_.cs), IsTrue())
<< "Failed to parse time " << timestamp_bits.first;
timestamp_bits.second.resize(9, '0');
int64_t nanos = 0;
EXPECT_THAT(absl::SimpleAtoi(timestamp_bits.second, &nanos), IsTrue())
<< "Failed to parse time " << timestamp_bits.first;
ci_.subsecond = absl::Nanoseconds(nanos);
absl::Span<char> view = absl::MakeSpan(buf_);
view.remove_suffix(2);
entry_.prefix_len_ =
entry_.prefix_
? log_internal::FormatLogPrefix(
entry_.log_severity(), entry_.timestamp(), entry_.tid(),
entry_.source_basename(), entry_.source_line(), format_, view)
: 0;
EXPECT_THAT(entry_.prefix_len_,
Eq(static_cast<size_t>(view.data() - buf_.data())));
log_internal::AppendTruncated(text_message, view);
view = absl::Span<char>(view.data(), view.size() + 2);
view[0] = '\n';
view[1] = '\0';
view.remove_prefix(2);
buf_.resize(static_cast<size_t>(view.data() - buf_.data()));
entry_.text_message_with_prefix_and_newline_and_nul_ = absl::MakeSpan(buf_);
}
LogEntryTestPeer(const LogEntryTestPeer&) = delete;
LogEntryTestPeer& operator=(const LogEntryTestPeer&) = delete;
std::string FormatLogMessage() const {
return log_internal::FormatLogMessage(
entry_.log_severity(), ci_.cs, ci_.subsecond, entry_.tid(),
entry_.source_basename(), entry_.source_line(), format_,
entry_.text_message());
}
std::string FormatPrefixIntoSizedBuffer(size_t sz) {
std::string str(sz, '\0');
absl::Span<char> buf(&str[0], str.size());
const size_t prefix_size = log_internal::FormatLogPrefix(
entry_.log_severity(), entry_.timestamp(), entry_.tid(),
entry_.source_basename(), entry_.source_line(), format_, buf);
EXPECT_THAT(prefix_size, Eq(static_cast<size_t>(buf.data() - str.data())));
str.resize(prefix_size);
return str;
}
const absl::LogEntry& entry() const { return entry_; }
private:
absl::LogEntry entry_;
PrefixFormat format_;
absl::TimeZone::CivilInfo ci_;
std::vector<char> buf_;
};
}
ABSL_NAMESPACE_END
}
namespace {
constexpr bool kUsePrefix = true, kNoPrefix = false;
TEST(LogEntryTest, Baseline) {
LogEntryTestPeer entry("foo.cc", 1234, kUsePrefix, absl::LogSeverity::kInfo,
"2020-01-02T03:04:05.6789", 451,
absl::log_internal::PrefixFormat::kNotRaw,
"hello world");
EXPECT_THAT(entry.FormatLogMessage(),
Eq("I0102 03:04:05.678900 451 foo.cc:1234] hello world"));
EXPECT_THAT(entry.FormatPrefixIntoSizedBuffer(1000),
Eq("I0102 03:04:05.678900 451 foo.cc:1234] "));
for (size_t sz = strlen("I0102 03:04:05.678900 451 foo.cc:1234] ") + 20;
sz != std::numeric_limits<size_t>::max(); sz--)
EXPECT_THAT("I0102 03:04:05.678900 451 foo.cc:1234] ",
StartsWith(entry.FormatPrefixIntoSizedBuffer(sz)));
EXPECT_THAT(entry.entry().text_message_with_prefix_and_newline(),
Eq("I0102 03:04:05.678900 451 foo.cc:1234] hello world\n"));
EXPECT_THAT(
entry.entry().text_message_with_prefix_and_newline_c_str(),
StrEq("I0102 03:04:05.678900 451 foo.cc:1234] hello world\n"));
EXPECT_THAT(entry.entry().text_message_with_prefix(),
Eq("I0102 03:04:05.678900 451 foo.cc:1234] hello world"));
EXPECT_THAT(entry.entry().text_message(), Eq("hello world"));
}
TEST(LogEntryTest, NoPrefix) {
LogEntryTestPeer entry("foo.cc", 1234, kNoPrefix, absl::LogSeverity::kInfo,
"2020-01-02T03:04:05.6789", 451,
absl::log_internal::PrefixFormat::kNotRaw,
"hello world");
EXPECT_THAT(entry.FormatLogMessage(),
Eq("I0102 03:04:05.678900 451 foo.cc:1234] hello world"));
EXPECT_THAT(entry.FormatPrefixIntoSizedBuffer(1000),
Eq("I0102 03:04:05.678900 451 foo.cc:1234] "));
for (size_t sz = strlen("I0102 03:04:05.678900 451 foo.cc:1234] ") + 20;
sz != std::numeric_limits<size_t>::max(); sz--)
EXPECT_THAT("I0102 03:04:05.678900 451 foo.cc:1234] ",
StartsWith(entry.FormatPrefixIntoSizedBuffer(sz)));
EXPECT_THAT(entry.entry().text_message_with_prefix_and_newline(),
Eq("hello world\n"));
EXPECT_THAT(entry.entry().text_message_with_prefix_and_newline_c_str(),
StrEq("hello world\n"));
EXPECT_THAT(entry.entry().text_message_with_prefix(), Eq("hello world"));
EXPECT_THAT(entry.entry().text_message(), Eq("hello world"));
}
TEST(LogEntryTest, EmptyFields) {
LogEntryTestPeer entry("", 0, kUsePrefix, absl::LogSeverity::kInfo,
"2020-01-02T03:04:05", 0,
absl::log_internal::PrefixFormat::kNotRaw, "");
const std::string format_message = entry.FormatLogMessage();
EXPECT_THAT(format_message, Eq("I0102 03:04:05.000000 0 :0] "));
EXPECT_THAT(entry.FormatPrefixIntoSizedBuffer(1000), Eq(format_message));
for (size_t sz = format_message.size() + 20;
sz != std::numeric_limits<size_t>::max(); sz--)
EXPECT_THAT(format_message,
StartsWith(entry.FormatPrefixIntoSizedBuffer(sz)));
EXPECT_THAT(entry.entry().text_message_with_prefix_and_newline(),
Eq("I0102 03:04:05.000000 0 :0] \n"));
EXPECT_THAT(entry.entry().text_message_with_prefix_and_newline_c_str(),
StrEq("I0102 03:04:05.000000 0 :0] \n"));
EXPECT_THAT(entry.entry().text_message_with_prefix(),
Eq("I0102 03:04:05.000000 0 :0] "));
EXPECT_THAT(entry.entry().text_message(), Eq(""));
}
TEST(LogEntryTest, NegativeFields) {
if (std::is_signed<absl::LogEntry::tid_t>::value) {
LogEntryTestPeer entry(
"foo.cc", -1234, kUsePrefix, absl::LogSeverity::kInfo,
"2020-01-02T03:04:05.6789", static_cast<absl::LogEntry::tid_t>(-451),
absl::log_internal::PrefixFormat::kNotRaw, "hello world");
EXPECT_THAT(entry.FormatLogMessage(),
Eq("I0102 03:04:05.678900 -451 foo.cc:-1234] hello world"));
EXPECT_THAT(entry.FormatPrefixIntoSizedBuffer(1000),
Eq("I0102 03:04:05.678900 -451 foo.cc:-1234] "));
for (size_t sz =
strlen("I0102 03:04:05.678900 -451 foo.cc:-1234] ") + 20;
sz != std::numeric_limits<size_t>::max(); sz--)
EXPECT_THAT("I0102 03:04:05.678900 -451 foo.cc:-1234] ",
StartsWith(entry.FormatPrefixIntoSizedBuffer(sz)));
EXPECT_THAT(
entry.entry().text_message_with_prefix_and_newline(),
Eq("I0102 03:04:05.678900 -451 foo.cc:-1234] hello world\n"));
EXPECT_THAT(
entry.entry().text_message_with_prefix_and_newline_c_str(),
StrEq("I0102 03:04:05.678900 -451 foo.cc:-1234] hello world\n"));
EXPECT_THAT(entry.entry().text_message_with_prefix(),
Eq("I0102 03:04:05.678900 -451 foo.cc:-1234] hello world"));
EXPECT_THAT(entry.entry().text_message(), Eq("hello world"));
} else {
LogEntryTestPeer entry("foo.cc", -1234, kUsePrefix,
absl::LogSeverity::kInfo, "2020-01-02T03:04:05.6789",
451, absl::log_internal::PrefixFormat::kNotRaw,
"hello world");
EXPECT_THAT(entry.FormatLogMessage(),
Eq("I0102 03:04:05.678900 451 foo.cc:-1234] hello world"));
EXPECT_THAT(entry.FormatPrefixIntoSizedBuffer(1000),
Eq("I0102 03:04:05.678900 451 foo.cc:-1234] "));
for (size_t sz =
strlen("I0102 03:04:05.678900 451 foo.cc:-1234] ") + 20;
sz != std::numeric_limits<size_t>::max(); sz--)
EXPECT_THAT("I0102 03:04:05.678900 451 foo.cc:-1234] ",
StartsWith(entry.FormatPrefixIntoSizedBuffer(sz)));
EXPECT_THAT(
entry.entry().text_message_with_prefix_and_newline(),
Eq("I0102 03:04:05.678900 451 foo.cc:-1234] hello world\n"));
EXPECT_THAT(
entry.entry().text_message_with_prefix_and_newline_c_str(),
StrEq("I0102 03:04:05.678900 451 foo.cc:-1234] hello world\n"));
EXPECT_THAT(entry.entry().text_message_with_prefix(),
Eq("I0102 03:04:05.678900 451 foo.cc:-1234] hello world"));
EXPECT_THAT(entry.entry().text_message(), Eq("hello world"));
}
}
TEST(LogEntryTest, LongFields) {
LogEntryTestPeer entry(
"I am the very model of a modern Major-General / "
"I've information vegetable, animal, and mineral.",
2147483647, kUsePrefix, absl::LogSeverity::kInfo,
"2020-01-02T03:04:05.678967896789", 2147483647,
absl::log_internal::PrefixFormat::kNotRaw,
"I know the kings of England, and I quote the fights historical / "
"From Marathon to Waterloo, in order categorical.");
EXPECT_THAT(entry.FormatLogMessage(),
Eq("I0102 03:04:05.678967 2147483647 I am the very model of a "
"modern Major-General / I've information vegetable, animal, "
"and mineral.:2147483647] I know the kings of England, and I "
"quote the fights historical / From Marathon to Waterloo, in "
"order categorical."));
EXPECT_THAT(entry.FormatPrefixIntoSizedBuffer(1000),
Eq("I0102 03:04:05.678967 2147483647 I am the very model of a "
"modern Major-General / I've information vegetable, animal, "
"and mineral.:2147483647] "));
for (size_t sz =
strlen("I0102 03:04:05.678967 2147483647 I am the very model of a "
"modern Major-General / I've information vegetable, animal, "
"and mineral.:2147483647] ") +
20;
sz != std::numeric_limits<size_t>::max(); sz--)
EXPECT_THAT(
"I0102 03:04:05.678967 2147483647 I am the very model of a "
"modern Major-General / I've information vegetable, animal, "
"and mineral.:2147483647] ",
StartsWith(entry.FormatPrefixIntoSizedBuffer(sz)));
EXPECT_THAT(entry.entry().text_message_with_prefix_and_newline(),
Eq("I0102 03:04:05.678967 2147483647 I am the very model of a "
"modern Major-General / I've information vegetable, animal, "
"and mineral.:2147483647] I know the kings of England, and I "
"quote the fights historical / From Marathon to Waterloo, in "
"order categorical.\n"));
EXPECT_THAT(
entry.entry().text_message_with_prefix_and_newline_c_str(),
StrEq("I0102 03:04:05.678967 2147483647 I am the very model of a "
"modern Major-General / I've information vegetable, animal, "
"and mineral.:2147483647] I know the kings of England, and I "
"quote the fights historical / From Marathon to Waterloo, in "
"order categorical.\n"));
EXPECT_THAT(entry.entry().text_message_with_prefix(),
Eq("I0102 03:04:05.678967 2147483647 I am the very model of a "
"modern Major-General / I've information vegetable, animal, "
"and mineral.:2147483647] I know the kings of England, and I "
"quote the fights historical / From Marathon to Waterloo, in "
"order categorical."));
EXPECT_THAT(
entry.entry().text_message(),
Eq("I know the kings of England, and I quote the fights historical / "
"From Marathon to Waterloo, in order categorical."));
}
TEST(LogEntryTest, LongNegativeFields) {
if (std::is_signed<absl::LogEntry::tid_t>::value) {
LogEntryTestPeer entry(
"I am the very model of a modern Major-General / "
"I've information vegetable, animal, and mineral.",
-2147483647, kUsePrefix, absl::LogSeverity::kInfo,
"2020-01-02T03:04:05.678967896789",
static_cast<absl::LogEntry::tid_t>(-2147483647),
absl::log_internal::PrefixFormat::kNotRaw,
"I know the kings of England, and I quote the fights historical / "
"From Marathon to Waterloo, in order categorical.");
EXPECT_THAT(
entry.FormatLogMessage(),
Eq("I0102 03:04:05.678967 -2147483647 I am the very model of a "
"modern Major-General / I've information vegetable, animal, "
"and mineral.:-2147483647] I know the kings of England, and I "
"quote the fights historical / From Marathon to Waterloo, in "
"order categorical."));
EXPECT_THAT(entry.FormatPrefixIntoSizedBuffer(1000),
Eq("I0102 03:04:05.678967 -2147483647 I am the very model of a "
"modern Major-General / I've information vegetable, animal, "
"and mineral.:-2147483647] "));
for (size_t sz =
strlen(
"I0102 03:04:05.678967 -2147483647 I am the very model of a "
"modern Major-General / I've information vegetable, animal, "
"and mineral.:-2147483647] ") +
20;
sz != std::numeric_limits<size_t>::max(); sz--)
EXPECT_THAT(
"I0102 03:04:05.678967 -2147483647 I am the very model of a "
"modern Major-General / I've information vegetable, animal, "
"and mineral.:-2147483647] ",
StartsWith(entry.FormatPrefixIntoSizedBuffer(sz)));
EXPECT_THAT(
entry.entry().text_message_with_prefix_and_newline(),
Eq("I0102 03:04:05.678967 -2147483647 I am the very model of a "
"modern Major-General / I've information vegetable, animal, "
"and mineral.:-2147483647] I know the kings of England, and I "
"quote the fights historical / From Marathon to Waterloo, in "
"order categorical.\n"));
EXPECT_THAT(
entry.entry().text_message_with_prefix_and_newline_c_str(),
StrEq("I0102 03:04:05.678967 -2147483647 I am the very model of a "
"modern Major-General / I've information vegetable, animal, "
"and mineral.:-2147483647] I know the kings of England, and I "
"quote the fights historical / From Marathon to Waterloo, in "
"order categorical.\n"));
EXPECT_THAT(
entry.entry().text_message_with_prefix(),
Eq("I0102 03:04:05.678967 -2147483647 I am the very model of a "
"modern Major-General / I've information vegetable, animal, "
"and mineral.:-2147483647] I know the kings of England, and I "
"quote the fights historical / From Marathon to Waterloo, in "
"order categorical."));
EXPECT_THAT(
entry.entry().text_message(),
Eq("I know the kings of England, and I quote the fights historical / "
"From Marathon to Waterloo, in order categorical."));
} else {
LogEntryTestPeer entry(
"I am the very model of a modern Major-General / "
"I've information vegetable, animal, and mineral.",
-2147483647, kUsePrefix, absl::LogSeverity::kInfo,
"2020-01-02T03:04:05.678967896789", 2147483647,
absl::log_internal::PrefixFormat::kNotRaw,
"I know the kings of England, and I quote the fights historical / "
"From Marathon to Waterloo, in order categorical.");
EXPECT_THAT(
entry.FormatLogMessage(),
Eq("I0102 03:04:05.678967 2147483647 I am the very model of a "
"modern Major-General / I've information vegetable, animal, "
"and mineral.:-2147483647] I know the kings of England, and I "
"quote the fights historical / From Marathon to Waterloo, in "
"order categorical."));
EXPECT_THAT(entry.FormatPrefixIntoSizedBuffer(1000),
Eq("I0102 03:04:05.678967 2147483647 I am the very model of a "
"modern Major-General / I've information vegetable, animal, "
"and mineral.:-2147483647] "));
for (size_t sz =
strlen(
"I0102 03:04:05.678967 2147483647 I am the very model of a "
"modern Major-General / I've information vegetable, animal, "
"and mineral.:-2147483647] ") +
20;
sz != std::numeric_limits<size_t>::max(); sz--)
EXPECT_THAT(
"I0102 03:04:05.678967 2147483647 I am the very model of a "
"modern Major-General / I've information vegetable, animal, "
"and mineral.:-2147483647] ",
StartsWith(entry.FormatPrefixIntoSizedBuffer(sz)));
EXPECT_THAT(
entry.entry().text_message_with_prefix_and_newline(),
Eq("I0102 03:04:05.678967 2147483647 I am the very model of a "
"modern Major-General / I've information vegetable, animal, "
"and mineral.:-2147483647] I know the kings of England, and I "
"quote the fights historical / From Marathon to Waterloo, in "
"order categorical.\n"));
EXPECT_THAT(
entry.entry().text_message_with_prefix_and_newline_c_str(),
StrEq("I0102 03:04:05.678967 2147483647 I am the very model of a "
"modern Major-General / I've information vegetable, animal, "
"and mineral.:-2147483647] I know the kings of England, and I "
"quote the fights historical / From Marathon to Waterloo, in "
"order categorical.\n"));
EXPECT_THAT(
entry.entry().text_message_with_prefix(),
Eq("I0102 03:04:05.678967 2147483647 I am the very model of a "
"modern Major-General / I've information vegetable, animal, "
"and mineral.:-2147483647] I know the kings of England, and I "
"quote the fights historical / From Marathon to Waterloo, in "
"order categorical."));
EXPECT_THAT(
entry.entry().text_message(),
Eq("I know the kings of England, and I quote the fights historical / "
"From Marathon to Waterloo, in order categorical."));
}
}
TEST(LogEntryTest, Raw) {
LogEntryTestPeer entry("foo.cc", 1234, kUsePrefix, absl::LogSeverity::kInfo,
"2020-01-02T03:04:05.6789", 451,
absl::log_internal::PrefixFormat::kRaw, "hello world");
EXPECT_THAT(
entry.FormatLogMessage(),
Eq("I0102 03:04:05.678900 451 foo.cc:1234] RAW: hello world"));
EXPECT_THAT(entry.FormatPrefixIntoSizedBuffer(1000),
Eq("I0102 03:04:05.678900 451 foo.cc:1234] RAW: "));
for (size_t sz =
strlen("I0102 03:04:05.678900 451 foo.cc:1234] RAW: ") + 20;
sz != std::numeric_limits<size_t>::max(); sz--)
EXPECT_THAT("I0102 03:04:05.678900 451 foo.cc:1234] RAW: ",
StartsWith(entry.FormatPrefixIntoSizedBuffer(sz)));
EXPECT_THAT(
entry.entry().text_message_with_prefix_and_newline(),
Eq("I0102 03:04:05.678900 451 foo.cc:1234] RAW: hello world\n"));
EXPECT_THAT(
entry.entry().text_message_with_prefix_and_newline_c_str(),
StrEq("I0102 03:04:05.678900 451 foo.cc:1234] RAW: hello world\n"));
EXPECT_THAT(
entry.entry().text_message_with_prefix(),
Eq("I0102 03:04:05.678900 451 foo.cc:1234] RAW: hello world"));
EXPECT_THAT(entry.entry().text_message(), Eq("hello world"));
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/log/log_entry.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/log/log_entry_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
d37b53b4-31a7-4b63-b4fa-2549c355aa9f | cpp | google/cel-cpp | type_introspector | common/type_introspector.cc | extensions/protobuf/type_introspector_test.cc | #include "common/type_introspector.h"
#include <algorithm>
#include <cstdint>
#include <initializer_list>
#include "absl/container/flat_hash_map.h"
#include "absl/container/inlined_vector.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "common/memory.h"
#include "common/type.h"
#include "common/types/thread_compatible_type_introspector.h"
namespace cel {
namespace {
common_internal::BasicStructTypeField MakeBasicStructTypeField(
absl::string_view name, Type type, int32_t number) {
return common_internal::BasicStructTypeField(name, number, type);
}
struct FieldNameComparer {
using is_transparent = void;
bool operator()(const common_internal::BasicStructTypeField& lhs,
const common_internal::BasicStructTypeField& rhs) const {
return (*this)(lhs.name(), rhs.name());
}
bool operator()(const common_internal::BasicStructTypeField& lhs,
absl::string_view rhs) const {
return (*this)(lhs.name(), rhs);
}
bool operator()(absl::string_view lhs,
const common_internal::BasicStructTypeField& rhs) const {
return (*this)(lhs, rhs.name());
}
bool operator()(absl::string_view lhs, absl::string_view rhs) const {
return lhs < rhs;
}
};
struct FieldNumberComparer {
using is_transparent = void;
bool operator()(const common_internal::BasicStructTypeField& lhs,
const common_internal::BasicStructTypeField& rhs) const {
return (*this)(lhs.number(), rhs.number());
}
bool operator()(const common_internal::BasicStructTypeField& lhs,
int64_t rhs) const {
return (*this)(lhs.number(), rhs);
}
bool operator()(int64_t lhs,
const common_internal::BasicStructTypeField& rhs) const {
return (*this)(lhs, rhs.number());
}
bool operator()(int64_t lhs, int64_t rhs) const { return lhs < rhs; }
};
struct WellKnownType {
WellKnownType(
const Type& type,
std::initializer_list<common_internal::BasicStructTypeField> fields)
: type(type), fields_by_name(fields), fields_by_number(fields) {
std::sort(fields_by_name.begin(), fields_by_name.end(),
FieldNameComparer{});
std::sort(fields_by_number.begin(), fields_by_number.end(),
FieldNumberComparer{});
}
explicit WellKnownType(const Type& type) : WellKnownType(type, {}) {}
Type type;
absl::InlinedVector<common_internal::BasicStructTypeField, 2> fields_by_name;
absl::InlinedVector<common_internal::BasicStructTypeField, 2>
fields_by_number;
absl::optional<StructTypeField> FieldByName(absl::string_view name) const {
auto it = std::lower_bound(fields_by_name.begin(), fields_by_name.end(),
name, FieldNameComparer{});
if (it == fields_by_name.end() || it->name() != name) {
return absl::nullopt;
}
return *it;
}
absl::optional<StructTypeField> FieldByNumber(int64_t number) const {
auto it = std::lower_bound(fields_by_number.begin(), fields_by_number.end(),
number, FieldNumberComparer{});
if (it == fields_by_number.end() || it->number() != number) {
return absl::nullopt;
}
return *it;
}
};
using WellKnownTypesMap = absl::flat_hash_map<absl::string_view, WellKnownType>;
const WellKnownTypesMap& GetWellKnownTypesMap() {
static const WellKnownTypesMap* types = []() -> WellKnownTypesMap* {
WellKnownTypesMap* types = new WellKnownTypesMap();
types->insert_or_assign(
"google.protobuf.BoolValue",
WellKnownType{BoolWrapperType{},
{MakeBasicStructTypeField("value", BoolType{}, 1)}});
types->insert_or_assign(
"google.protobuf.Int32Value",
WellKnownType{IntWrapperType{},
{MakeBasicStructTypeField("value", IntType{}, 1)}});
types->insert_or_assign(
"google.protobuf.Int64Value",
WellKnownType{IntWrapperType{},
{MakeBasicStructTypeField("value", IntType{}, 1)}});
types->insert_or_assign(
"google.protobuf.UInt32Value",
WellKnownType{UintWrapperType{},
{MakeBasicStructTypeField("value", UintType{}, 1)}});
types->insert_or_assign(
"google.protobuf.UInt64Value",
WellKnownType{UintWrapperType{},
{MakeBasicStructTypeField("value", UintType{}, 1)}});
types->insert_or_assign(
"google.protobuf.FloatValue",
WellKnownType{DoubleWrapperType{},
{MakeBasicStructTypeField("value", DoubleType{}, 1)}});
types->insert_or_assign(
"google.protobuf.DoubleValue",
WellKnownType{DoubleWrapperType{},
{MakeBasicStructTypeField("value", DoubleType{}, 1)}});
types->insert_or_assign(
"google.protobuf.StringValue",
WellKnownType{StringWrapperType{},
{MakeBasicStructTypeField("value", StringType{}, 1)}});
types->insert_or_assign(
"google.protobuf.BytesValue",
WellKnownType{BytesWrapperType{},
{MakeBasicStructTypeField("value", BytesType{}, 1)}});
types->insert_or_assign(
"google.protobuf.Duration",
WellKnownType{DurationType{},
{MakeBasicStructTypeField("seconds", IntType{}, 1),
MakeBasicStructTypeField("nanos", IntType{}, 2)}});
types->insert_or_assign(
"google.protobuf.Timestamp",
WellKnownType{TimestampType{},
{MakeBasicStructTypeField("seconds", IntType{}, 1),
MakeBasicStructTypeField("nanos", IntType{}, 2)}});
types->insert_or_assign(
"google.protobuf.Value",
WellKnownType{
DynType{},
{MakeBasicStructTypeField("null_value", NullType{}, 1),
MakeBasicStructTypeField("number_value", DoubleType{}, 2),
MakeBasicStructTypeField("string_value", StringType{}, 3),
MakeBasicStructTypeField("bool_value", BoolType{}, 4),
MakeBasicStructTypeField("struct_value", JsonMapType(), 5),
MakeBasicStructTypeField("list_value", ListType{}, 6)}});
types->insert_or_assign(
"google.protobuf.ListValue",
WellKnownType{ListType{},
{MakeBasicStructTypeField("values", ListType{}, 1)}});
types->insert_or_assign(
"google.protobuf.Struct",
WellKnownType{JsonMapType(),
{MakeBasicStructTypeField("fields", JsonMapType(), 1)}});
types->insert_or_assign(
"google.protobuf.Any",
WellKnownType{AnyType{},
{MakeBasicStructTypeField("type_url", StringType{}, 1),
MakeBasicStructTypeField("value", BytesType{}, 2)}});
types->insert_or_assign("null_type", WellKnownType{NullType{}});
types->insert_or_assign("google.protobuf.NullValue",
WellKnownType{NullType{}});
types->insert_or_assign("bool", WellKnownType{BoolType{}});
types->insert_or_assign("int", WellKnownType{IntType{}});
types->insert_or_assign("uint", WellKnownType{UintType{}});
types->insert_or_assign("double", WellKnownType{DoubleType{}});
types->insert_or_assign("bytes", WellKnownType{BytesType{}});
types->insert_or_assign("string", WellKnownType{StringType{}});
types->insert_or_assign("list", WellKnownType{ListType{}});
types->insert_or_assign("map", WellKnownType{MapType{}});
types->insert_or_assign("type", WellKnownType{TypeType{}});
return types;
}();
return *types;
}
}
absl::StatusOr<absl::optional<Type>> TypeIntrospector::FindType(
TypeFactory& type_factory, absl::string_view name) const {
const auto& well_known_types = GetWellKnownTypesMap();
if (auto it = well_known_types.find(name); it != well_known_types.end()) {
return it->second.type;
}
return FindTypeImpl(type_factory, name);
}
absl::StatusOr<absl::optional<TypeIntrospector::EnumConstant>>
TypeIntrospector::FindEnumConstant(TypeFactory& type_factory,
absl::string_view type,
absl::string_view value) const {
if (type == "google.protobuf.NullValue" && value == "NULL_VALUE") {
return EnumConstant{NullType{}, "google.protobuf.NullValue", "NULL_VALUE",
0};
}
return FindEnumConstantImpl(type_factory, type, value);
}
absl::StatusOr<absl::optional<StructTypeField>>
TypeIntrospector::FindStructTypeFieldByName(TypeFactory& type_factory,
absl::string_view type,
absl::string_view name) const {
const auto& well_known_types = GetWellKnownTypesMap();
if (auto it = well_known_types.find(type); it != well_known_types.end()) {
return it->second.FieldByName(name);
}
return FindStructTypeFieldByNameImpl(type_factory, type, name);
}
absl::StatusOr<absl::optional<Type>> TypeIntrospector::FindTypeImpl(
TypeFactory&, absl::string_view) const {
return absl::nullopt;
}
absl::StatusOr<absl::optional<TypeIntrospector::EnumConstant>>
TypeIntrospector::FindEnumConstantImpl(TypeFactory&, absl::string_view,
absl::string_view) const {
return absl::nullopt;
}
absl::StatusOr<absl::optional<StructTypeField>>
TypeIntrospector::FindStructTypeFieldByNameImpl(TypeFactory&, absl::string_view,
absl::string_view) const {
return absl::nullopt;
}
Shared<TypeIntrospector> NewThreadCompatibleTypeIntrospector(
MemoryManagerRef memory_manager) {
return memory_manager
.MakeShared<common_internal::ThreadCompatibleTypeIntrospector>();
}
} | #include "extensions/protobuf/type_introspector.h"
#include "absl/types/optional.h"
#include "common/type.h"
#include "common/type_kind.h"
#include "common/type_testing.h"
#include "internal/testing.h"
#include "proto/test/v1/proto2/test_all_types.pb.h"
#include "google/protobuf/descriptor.h"
namespace cel::extensions {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::google::api::expr::test::v1::proto2::TestAllTypes;
using ::testing::Eq;
using ::testing::Optional;
class ProtoTypeIntrospectorTest
: public common_internal::ThreadCompatibleTypeTest<> {
private:
Shared<TypeIntrospector> NewTypeIntrospector(
MemoryManagerRef memory_manager) override {
return memory_manager.MakeShared<ProtoTypeIntrospector>();
}
};
TEST_P(ProtoTypeIntrospectorTest, FindType) {
EXPECT_THAT(
type_manager().FindType(TestAllTypes::descriptor()->full_name()),
IsOkAndHolds(Optional(Eq(MessageType(TestAllTypes::GetDescriptor())))));
EXPECT_THAT(type_manager().FindType("type.that.does.not.Exist"),
IsOkAndHolds(Eq(absl::nullopt)));
}
TEST_P(ProtoTypeIntrospectorTest, FindStructTypeFieldByName) {
ASSERT_OK_AND_ASSIGN(
auto field, type_manager().FindStructTypeFieldByName(
TestAllTypes::descriptor()->full_name(), "single_int32"));
ASSERT_TRUE(field.has_value());
EXPECT_THAT(field->name(), Eq("single_int32"));
EXPECT_THAT(field->number(), Eq(1));
EXPECT_THAT(
type_manager().FindStructTypeFieldByName(
TestAllTypes::descriptor()->full_name(), "field_that_does_not_exist"),
IsOkAndHolds(Eq(absl::nullopt)));
EXPECT_THAT(type_manager().FindStructTypeFieldByName(
"type.that.does.not.Exist", "does_not_matter"),
IsOkAndHolds(Eq(absl::nullopt)));
}
TEST_P(ProtoTypeIntrospectorTest, FindEnumConstant) {
ProtoTypeIntrospector introspector;
const auto* enum_desc = TestAllTypes::NestedEnum_descriptor();
ASSERT_OK_AND_ASSIGN(
auto enum_constant,
introspector.FindEnumConstant(
type_manager(),
"google.api.expr.test.v1.proto2.TestAllTypes.NestedEnum", "BAZ"));
ASSERT_TRUE(enum_constant.has_value());
EXPECT_EQ(enum_constant->type.kind(), TypeKind::kEnum);
EXPECT_EQ(enum_constant->type_full_name, enum_desc->full_name());
EXPECT_EQ(enum_constant->value_name, "BAZ");
EXPECT_EQ(enum_constant->number, 2);
}
TEST_P(ProtoTypeIntrospectorTest, FindEnumConstantNull) {
ProtoTypeIntrospector introspector;
ASSERT_OK_AND_ASSIGN(
auto enum_constant,
introspector.FindEnumConstant(type_manager(), "google.protobuf.NullValue",
"NULL_VALUE"));
ASSERT_TRUE(enum_constant.has_value());
EXPECT_EQ(enum_constant->type.kind(), TypeKind::kNull);
EXPECT_EQ(enum_constant->type_full_name, "google.protobuf.NullValue");
EXPECT_EQ(enum_constant->value_name, "NULL_VALUE");
EXPECT_EQ(enum_constant->number, 0);
}
TEST_P(ProtoTypeIntrospectorTest, FindEnumConstantUnknownEnum) {
ProtoTypeIntrospector introspector;
ASSERT_OK_AND_ASSIGN(
auto enum_constant,
introspector.FindEnumConstant(type_manager(), "NotARealEnum", "BAZ"));
EXPECT_FALSE(enum_constant.has_value());
}
TEST_P(ProtoTypeIntrospectorTest, FindEnumConstantUnknownValue) {
ProtoTypeIntrospector introspector;
ASSERT_OK_AND_ASSIGN(
auto enum_constant,
introspector.FindEnumConstant(
type_manager(),
"google.api.expr.test.v1.proto2.TestAllTypes.NestedEnum", "QUX"));
ASSERT_FALSE(enum_constant.has_value());
}
INSTANTIATE_TEST_SUITE_P(
ProtoTypeIntrospectorTest, ProtoTypeIntrospectorTest,
::testing::Values(MemoryManagement::kPooling,
MemoryManagement::kReferenceCounting),
ProtoTypeIntrospectorTest::ToString);
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/type_introspector.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/extensions/protobuf/type_introspector_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
7ab47ab0-9fed-413f-9b03-23b0997a2c1f | cpp | tensorflow/tensorflow | nest_gemm_fusion | third_party/xla/xla/service/gpu/transforms/nest_gemm_fusion.cc | third_party/xla/xla/service/gpu/transforms/nest_gemm_fusion_test.cc | #include "xla/service/gpu/transforms/nest_gemm_fusion.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/gpu/matmul_utils.h"
#include "xla/service/gpu/model/symbolic_tile_analysis.h"
#include "xla/service/gpu/model/symbolic_tiled_hlo_instruction.h"
#include "xla/service/gpu/model/tiled_hlo_computation.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/instruction_fusion.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla::gpu {
namespace {
absl::Status FuseInstructionsForConsumer(
const std::vector<HloInstruction*>& instructions,
HloInstruction& consumer) {
HloComputation::Builder builder(instructions.back()->name());
absl::flat_hash_map<const HloInstruction*, HloInstruction*>
old_to_new_mapping;
std::vector<HloInstruction*> parameters;
auto add_parameter = [&](HloInstruction* instruction) -> void {
int param_index = parameters.size();
old_to_new_mapping[instruction] =
builder.AddInstruction(HloInstruction::CreateParameter(
param_index, instruction->shape(),
absl::StrCat("parameter_", param_index)));
parameters.push_back(instruction);
};
for (HloInstruction* instruction : instructions) {
if (old_to_new_mapping.contains(instruction)) {
continue;
}
if (instruction->opcode() == HloOpcode::kParameter) {
add_parameter(instruction);
continue;
}
std::vector<HloInstruction*> new_operands;
for (HloInstruction* operand : instruction->mutable_operands()) {
if (!old_to_new_mapping.contains(operand)) {
add_parameter(operand);
}
new_operands.push_back(old_to_new_mapping[operand]);
}
old_to_new_mapping[instruction] = builder.AddInstruction(
instruction->CloneWithNewOperands(instruction->shape(), new_operands));
}
HloInstruction* old_root = instructions.back();
old_to_new_mapping[old_root]->MarkAsRoot();
HloComputation* computation =
old_root->GetModule()->AddComputationAndUnifyNamesAndIds(
builder.Build(), false);
HloInstruction* fusion =
old_root->parent()->AddInstruction(HloInstruction::CreateFusion(
old_root->shape(), HloInstruction::FusionKind::kCustom, parameters,
computation));
fusion->GetModule()->SetAndUniquifyInstrName(fusion, "block_fusion");
TF_ASSIGN_OR_RETURN(auto gpu_config,
fusion->backend_config<GpuBackendConfig>());
FusionBackendConfig& backend_config =
*gpu_config.mutable_fusion_backend_config();
backend_config.set_kind(std::string(kTritonFusionKind));
TF_RETURN_IF_ERROR(fusion->set_backend_config(gpu_config));
for (int64_t operand_index : consumer.OperandIndices(old_root)) {
TF_RETURN_IF_ERROR(consumer.ReplaceOperandWith(operand_index, fusion));
}
return absl::OkStatus();
}
absl::Status AnnotateDotOperandNestedFusionImpl(
HloFusionInstruction& nested_fusion, const HloDotInstruction& dot,
const TritonGemmConfig& config,
absl::Span<const int64_t> contracting_dimensions,
absl::Span<const int64_t> batch_dimensions, int64_t contracting_dim_size,
int64_t non_contracting_dim_size) {
if (contracting_dimensions.size() != 1) {
return absl::InternalError(
absl::StrCat("Expected a single lhs contracting dimension but got ",
contracting_dimensions.size()));
}
TF_ASSIGN_OR_RETURN(
std::vector<int64_t> non_contracting_dimensions,
GetNonContractingDims(dot.operand(0)->shape(), batch_dimensions,
contracting_dimensions));
if (non_contracting_dimensions.size() != 1) {
return absl::InternalError(
absl::StrCat("Expected a single non-contracting dimension but got ",
non_contracting_dimensions.size()));
}
std::vector<int64_t> output_tile_sizes(dot.operand(0)->shape().rank(), 1);
output_tile_sizes[contracting_dimensions[0]] = contracting_dim_size;
output_tile_sizes[non_contracting_dimensions[0]] = non_contracting_dim_size;
BlockLevelParameters block_level_parameters;
block_level_parameters.output_tile_sizes = std::move(output_tile_sizes);
TF_ASSIGN_OR_RETURN(auto backend_config,
nested_fusion.backend_config<GpuBackendConfig>());
*backend_config.mutable_fusion_backend_config()
->mutable_block_level_fusion_config() =
block_level_parameters.ToBlockLevelFusionConfig();
TF_RETURN_IF_ERROR(nested_fusion.set_backend_config(backend_config));
return absl::OkStatus();
}
absl::Status AnnotateDotLhsNestedFusion(HloFusionInstruction& nested_fusion,
const HloDotInstruction& dot,
const TritonGemmConfig& config) {
const DotDimensionNumbers& dimension_numbers = dot.dot_dimension_numbers();
return AnnotateDotOperandNestedFusionImpl(
nested_fusion, dot, config,
dimension_numbers.lhs_contracting_dimensions(),
dimension_numbers.lhs_batch_dimensions(), config.block_k, config.block_m);
}
absl::Status AnnotateDotRhsNestedFusion(HloFusionInstruction& nested_fusion,
const HloDotInstruction& dot,
const TritonGemmConfig& config) {
const DotDimensionNumbers& dimension_numbers = dot.dot_dimension_numbers();
return AnnotateDotOperandNestedFusionImpl(
nested_fusion, dot, config,
dimension_numbers.rhs_contracting_dimensions(),
dimension_numbers.rhs_batch_dimensions(), config.block_k, config.block_n);
}
absl::StatusOr<llvm::SmallVector<int64_t>> FindOutputTileSizesForEpilogue(
const SymbolicTiledHloInstruction& tiled_dot,
const SymbolicTileAnalysis& analysis, const TritonGemmConfig& config) {
int64_t dot_rank = tiled_dot.symbolic_tile().tile_map().GetDimensionCount();
llvm::SmallVector<int64_t> expected_dot_tile_sizes(dot_rank, 1);
expected_dot_tile_sizes[dot_rank - 2] = config.block_m;
expected_dot_tile_sizes[dot_rank - 1] = config.block_n;
llvm::SmallVector<int64_t> output_tile_sizes = expected_dot_tile_sizes;
std::sort(output_tile_sizes.begin(), output_tile_sizes.end());
do {
TF_ASSIGN_OR_RETURN(
bool parameters_satisfy_constraints,
analysis.ParametersSatisfyConstraints(output_tile_sizes));
if (!parameters_satisfy_constraints) {
continue;
}
auto mapped_dot_tile_sizes = tiled_dot.TileSizes(output_tile_sizes);
if (mapped_dot_tile_sizes == expected_dot_tile_sizes) {
return output_tile_sizes;
}
} while (std::next_permutation(output_tile_sizes.begin(),
output_tile_sizes.end()));
return absl::InternalError(absl::StrCat(
"Couldn't find output tile sizes that satisfy ", tiled_dot.ToString()));
}
absl::StatusOr<TritonGemmConfig> GetTritonGemmConfig(
const HloFusionInstruction& fusion) {
TF_ASSIGN_OR_RETURN(auto gpu_config,
fusion.backend_config<GpuBackendConfig>());
const FusionBackendConfig& backend_config =
gpu_config.fusion_backend_config();
if (!backend_config.has_triton_gemm_config()) {
return absl::InternalError(
"The fusion's backend config doesn't have a triton_gemm_config.");
}
return TritonGemmConfig::FromProto(backend_config.triton_gemm_config());
}
absl::Status MakeNestedFusionFromGemmFusion(
HloFusionInstruction* fusion, const TritonGemmConfig& config,
const SymbolicTileAnalysis& analysis,
const SymbolicTiledHloInstruction& tiled_dot, HloDotInstruction* dot) {
DCHECK(GetTritonGemmConfig(*fusion).value() == config);
DCHECK_EQ(tiled_dot.hlo(), dot);
HloComputation* computation = fusion->called_computation();
TF_RETURN_IF_ERROR(FuseInstructionsForConsumer(
computation->MakeInstructionPostOrderFrom(*dot->mutable_operand(0)),
*dot));
TF_RETURN_IF_ERROR(AnnotateDotLhsNestedFusion(
*::xla::Cast<HloFusionInstruction>(dot->mutable_operand(0)), *dot,
config));
TF_RETURN_IF_ERROR(FuseInstructionsForConsumer(
computation->MakeInstructionPostOrderFrom(*dot->mutable_operand(1)),
*dot));
TF_RETURN_IF_ERROR(AnnotateDotRhsNestedFusion(
*::xla::Cast<HloFusionInstruction>(dot->mutable_operand(1)), *dot,
config));
TF_ASSIGN_OR_RETURN([[maybe_unused]] bool changed,
HloDCE::RunOnComputation(
computation,
false));
TF_ASSIGN_OR_RETURN(
llvm::SmallVector<int64_t> output_tile_sizes,
FindOutputTileSizesForEpilogue(tiled_dot, analysis, config));
TF_ASSIGN_OR_RETURN(auto gpu_config,
fusion->backend_config<GpuBackendConfig>());
FusionBackendConfig& backend_config =
*gpu_config.mutable_fusion_backend_config();
backend_config.set_kind(std::string(kTritonFusionKind));
BlockLevelParameters block_level_parameters;
block_level_parameters.output_tile_sizes.assign(output_tile_sizes.begin(),
output_tile_sizes.end());
*backend_config.mutable_block_level_fusion_config() =
block_level_parameters.ToBlockLevelFusionConfig();
TF_RETURN_IF_ERROR(fusion->set_backend_config(gpu_config));
return absl::OkStatus();
}
size_t GetDotCount(HloComputation* computation) {
return absl::c_count_if(computation->instructions(), [](HloInstruction* hlo) {
return hlo->opcode() == HloOpcode::kDot;
});
}
class NestGemmFusionVisitor : public DfsHloRewriteVisitor {
public:
explicit NestGemmFusionVisitor(mlir::MLIRContext* ctx) : ctx_(ctx) {}
absl::Status HandleFusion(HloInstruction* instruction) override {
HloFusionInstruction* fusion = Cast<HloFusionInstruction>(instruction);
absl::StatusOr<TritonGemmConfig> config = GetTritonGemmConfig(*fusion);
if (!config.ok()) {
return absl::OkStatus();
}
HloComputation* computation = fusion->called_computation();
HloInstruction* dot =
hlo_query::GetFirstInstructionWithOpcode(*computation, HloOpcode::kDot);
if (dot == nullptr) {
return absl::OkStatus();
}
DCHECK_EQ(GetDotCount(computation), 1) << "Fusion has more than one dot.";
SymbolicTileAnalysisOrError analysis_or =
SymbolicTileAnalysis::AnalyzeComputation(
*fusion->called_computations()[0], ctx_);
if (std::holds_alternative<FusionDecision>(analysis_or)) {
return absl::InternalError(
absl::StrCat("Failed to analyze the computation (",
std::get<FusionDecision>(analysis_or).Explain(),
"): ", fusion->called_computation()->ToString()));
}
auto& analysis = std::get<SymbolicTileAnalysis>(analysis_or);
auto tiled_dot_it = absl::c_find_if(
analysis.GetSymbolicTiledHloComputation(),
[&](const auto& tiled_hlo) { return tiled_hlo->hlo() == dot; });
if (tiled_dot_it == analysis.GetSymbolicTiledHloComputation().end()) {
return absl::InternalError(absl::StrCat(
"Couldn't find a symbolic tiled instruction for ", dot->ToString()));
}
TF_RETURN_IF_ERROR(MakeNestedFusionFromGemmFusion(
fusion, config.value(), analysis, **tiled_dot_it,
Cast<HloDotInstruction>(dot)));
this->MarkAsChanged();
return absl::OkStatus();
}
private:
mlir::MLIRContext* ctx_;
};
}
absl::StatusOr<bool> NestGemmFusion::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
mlir::MLIRContext ctx;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
NestGemmFusionVisitor visitor(&ctx);
TF_RETURN_IF_ERROR(computation->Accept(&visitor));
changed |= visitor.changed();
}
return changed;
}
} | #include "xla/service/gpu/transforms/nest_gemm_fusion.h"
#include <ostream>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
using ::testing::ElementsAre;
namespace xla {
static void PrintTo(const HloInstruction& hlo, std::ostream* os) {
*os << hlo.ToString();
}
namespace gpu {
namespace {
MATCHER_P(OutputTileSizesIs, matcher, "") {
auto backend_config = arg.template backend_config<GpuBackendConfig>();
if (!backend_config.ok()) {
*result_listener << "failed to get backend config: "
<< backend_config.status();
return false;
}
FusionBackendConfig fusion_backend_config =
backend_config->fusion_backend_config();
if (!fusion_backend_config.has_block_level_fusion_config()) {
*result_listener << "has no block level fusion config";
return false;
}
auto output_tile_sizes =
fusion_backend_config.block_level_fusion_config().output_tile_sizes();
return ExplainMatchResult(matcher, output_tile_sizes, result_listener);
}
class NestGemmFusionTest : public HloTestBase {};
TEST_F(NestGemmFusionTest, BasicTest) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule module
dot {
lhs = bf16[8192,512] parameter(0)
rhs = bf16[512,512] parameter(1)
ROOT %dot = bf16[8192,512] dot(lhs, rhs),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY entry {
p0 = bf16[8192,512] parameter(0)
p1 = bf16[512,512] parameter(1)
ROOT fusion = bf16[8192,512] fusion(p0, p1),
kind=kCustom, calls=dot, backend_config={
"fusion_backend_config": {
"kind":"__triton_gemm", "triton_gemm_config": {
"block_m":"64", "block_n":"256", "block_k":"32",
"split_k":"1", "num_stages":"1", "num_warps":"1", "num_ctas":"1"
}
}
}
}
)"));
TF_ASSERT_OK_AND_ASSIGN(bool changed, NestGemmFusion().Run(module.get()))
EXPECT_TRUE(changed);
TF_ASSERT_OK(verifier().Run(module.get()).status());
const HloInstruction* fusion = nullptr;
ASSERT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(match::Fusion(&fusion)));
EXPECT_THAT(*fusion, OutputTileSizesIs(ElementsAre(64, 256)));
const HloInstruction* lhs = nullptr;
const HloInstruction* rhs = nullptr;
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(match::Dot(match::Fusion(&lhs), match::Fusion(&rhs))));
EXPECT_THAT(*lhs, OutputTileSizesIs(ElementsAre(64, 32)));
EXPECT_THAT(*rhs, OutputTileSizesIs(ElementsAre(32, 256)));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/nest_gemm_fusion.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/nest_gemm_fusion_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
79b73ddc-d78b-4fc8-891a-abd1b5e2ac20 | cpp | google/libaddressinput | region_data_builder | cpp/src/region_data_builder.cc | cpp/test/region_data_builder_test.cc | #include <libaddressinput/region_data_builder.h>
#include <libaddressinput/address_data.h>
#include <libaddressinput/preload_supplier.h>
#include <libaddressinput/region_data.h>
#include <cassert>
#include <cstddef>
#include <string>
#include <vector>
#include "language.h"
#include "lookup_key.h"
#include "region_data_constants.h"
#include "rule.h"
#include "util/size.h"
namespace i18n {
namespace addressinput {
namespace {
const size_t kLookupKeysMaxDepth = size(LookupKey::kHierarchy) - 1;
void BuildRegionTreeRecursively(
const std::map<std::string, const Rule*>& rules,
std::map<std::string, const Rule*>::const_iterator hint,
const LookupKey& parent_key,
RegionData* parent_region,
const std::vector<std::string>& keys,
bool prefer_latin_name,
size_t region_max_depth) {
assert(parent_region != nullptr);
LookupKey lookup_key;
for (const auto& key : keys) {
lookup_key.FromLookupKey(parent_key, key);
const std::string lookup_key_string =
lookup_key.ToKeyString(kLookupKeysMaxDepth);
++hint;
if (hint == rules.end() || hint->first != lookup_key_string) {
hint = rules.find(lookup_key_string);
if (hint == rules.end()) {
return;
}
}
const Rule* rule = hint->second;
assert(rule != nullptr);
const std::string& local_name = rule->GetName().empty()
? key : rule->GetName();
const std::string& name =
prefer_latin_name && !rule->GetLatinName().empty()
? rule->GetLatinName() : local_name;
RegionData* region = parent_region->AddSubRegion(key, name);
if (!rule->GetSubKeys().empty() &&
region_max_depth > parent_key.GetDepth()) {
BuildRegionTreeRecursively(rules,
hint,
lookup_key,
region,
rule->GetSubKeys(),
prefer_latin_name,
region_max_depth);
}
}
}
RegionData* BuildRegion(const std::map<std::string, const Rule*>& rules,
const std::string& region_code,
const Language& language) {
AddressData address;
address.region_code = region_code;
LookupKey lookup_key;
lookup_key.FromAddress(address);
auto hint = rules.find(lookup_key.ToKeyString(kLookupKeysMaxDepth));
assert(hint != rules.end());
const Rule* rule = hint->second;
assert(rule != nullptr);
auto* region = new RegionData(region_code);
size_t region_max_depth =
RegionDataConstants::GetMaxLookupKeyDepth(region_code);
if (region_max_depth > 0) {
BuildRegionTreeRecursively(rules,
hint,
lookup_key,
region,
rule->GetSubKeys(),
language.has_latin_script,
region_max_depth);
}
return region;
}
}
RegionDataBuilder::RegionDataBuilder(PreloadSupplier* supplier)
: supplier_(supplier),
cache_() {
assert(supplier_ != nullptr);
}
RegionDataBuilder::~RegionDataBuilder() {
for (const auto& outer : cache_) {
assert(outer.second != nullptr);
for (const auto& inner : *outer.second) {
delete inner.second;
}
delete outer.second;
}
}
const RegionData& RegionDataBuilder::Build(
const std::string& region_code,
const std::string& ui_language_tag,
std::string* best_region_tree_language_tag) {
assert(supplier_->IsLoaded(region_code));
assert(best_region_tree_language_tag != nullptr);
auto region_it = cache_.find(region_code);
if (region_it == cache_.end()) {
region_it = cache_.emplace(region_code, new LanguageRegionMap).first;
}
Rule rule;
rule.ParseSerializedRule(RegionDataConstants::GetRegionData(region_code));
static const Language kUndefinedLanguage("und");
const Language best_language =
rule.GetLanguages().empty()
? kUndefinedLanguage
: ChooseBestAddressLanguage(rule, Language(ui_language_tag));
*best_region_tree_language_tag = best_language.tag;
auto language_it = region_it->second->find(best_language.tag);
if (language_it == region_it->second->end()) {
const auto& rules = supplier_->GetRulesForRegion(region_code);
language_it = region_it->second
->emplace(best_language.tag,
BuildRegion(rules, region_code, best_language))
.first;
}
return *language_it->second;
}
}
} | #include <libaddressinput/region_data_builder.h>
#include <libaddressinput/callback.h>
#include <libaddressinput/null_storage.h>
#include <libaddressinput/preload_supplier.h>
#include <libaddressinput/region_data.h>
#include <memory>
#include <string>
#include <gtest/gtest.h>
#include "testdata_source.h"
namespace {
using i18n::addressinput::BuildCallback;
using i18n::addressinput::NullStorage;
using i18n::addressinput::PreloadSupplier;
using i18n::addressinput::RegionData;
using i18n::addressinput::RegionDataBuilder;
using i18n::addressinput::TestdataSource;
class RegionDataBuilderTest : public testing::Test {
public:
RegionDataBuilderTest(const RegionDataBuilderTest&) = delete;
RegionDataBuilderTest& operator=(const RegionDataBuilderTest&) = delete;
protected:
RegionDataBuilderTest()
: supplier_(new TestdataSource(true),
new NullStorage),
builder_(&supplier_),
loaded_callback_(BuildCallback(this, &RegionDataBuilderTest::OnLoaded)),
best_language_() {}
PreloadSupplier supplier_;
RegionDataBuilder builder_;
const std::unique_ptr<const PreloadSupplier::Callback> loaded_callback_;
std::string best_language_;
private:
void OnLoaded(bool success, const std::string& region_code, int num_rules) {
ASSERT_TRUE(success);
ASSERT_FALSE(region_code.empty());
ASSERT_LT(0, num_rules);
ASSERT_TRUE(supplier_.IsLoaded(region_code));
}
};
TEST_F(RegionDataBuilderTest, BuildUsRegionTree) {
supplier_.LoadRules("US", *loaded_callback_);
const RegionData& tree = builder_.Build("US", "en-US", &best_language_);
EXPECT_FALSE(tree.sub_regions().empty());
}
TEST_F(RegionDataBuilderTest, BuildCnRegionTree) {
supplier_.LoadRules("CN", *loaded_callback_);
const RegionData& tree = builder_.Build("CN", "zh-Hans", &best_language_);
ASSERT_FALSE(tree.sub_regions().empty());
EXPECT_FALSE(tree.sub_regions().front()->sub_regions().empty());
}
TEST_F(RegionDataBuilderTest, BuildChRegionTree) {
supplier_.LoadRules("CH", *loaded_callback_);
const RegionData& tree = builder_.Build("CH", "de-CH", &best_language_);
EXPECT_TRUE(tree.sub_regions().empty());
}
TEST_F(RegionDataBuilderTest, BuildZwRegionTree) {
supplier_.LoadRules("ZW", *loaded_callback_);
const RegionData& tree = builder_.Build("ZW", "en-ZW", &best_language_);
EXPECT_TRUE(tree.sub_regions().empty());
}
TEST_F(RegionDataBuilderTest, UsTreeHasStateAbbreviationsAndNames) {
supplier_.LoadRules("US", *loaded_callback_);
const RegionData& tree = builder_.Build("US", "en-US", &best_language_);
EXPECT_EQ("en", best_language_);
ASSERT_FALSE(tree.sub_regions().empty());
EXPECT_EQ("AL", tree.sub_regions().front()->key());
EXPECT_EQ("Alabama", tree.sub_regions().front()->name());
}
TEST_F(RegionDataBuilderTest,
KrWithKoLatnLanguageHasKoreanKeysAndLatinScriptNames) {
supplier_.LoadRules("KR", *loaded_callback_);
const RegionData& tree = builder_.Build("KR", "ko-Latn", &best_language_);
EXPECT_EQ("ko-Latn", best_language_);
ASSERT_FALSE(tree.sub_regions().empty());
EXPECT_EQ("강원도", tree.sub_regions().front()->key());
EXPECT_EQ("Gangwon", tree.sub_regions().front()->name());
}
TEST_F(RegionDataBuilderTest, KrWithKoKrLanguageHasKoreanKeysAndNames) {
supplier_.LoadRules("KR", *loaded_callback_);
const RegionData& tree = builder_.Build("KR", "ko-KR", &best_language_);
EXPECT_EQ("ko", best_language_);
ASSERT_FALSE(tree.sub_regions().empty());
EXPECT_EQ("강원도", tree.sub_regions().front()->key());
EXPECT_EQ("강원", tree.sub_regions().front()->name());
}
} | https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/src/region_data_builder.cc | https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/test/region_data_builder_test.cc | 2610f7b1043d6784ada41392fc9392d1ea09ea07 |
31c1fe9d-b92e-40d4-9b3a-0cd0e17a79e6 | cpp | tensorflow/tensorflow | dataset | tensorflow/core/framework/dataset.cc | tensorflow/core/framework/dataset_test.cc | #include "tensorflow/core/framework/dataset.h"
#include <unordered_map>
#include <vector>
#include "tensorflow/core/activity_watcher/activity.h"
#include "tensorflow/core/framework/dataset.pb.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/framework/variant_encode_decode.h"
#include "tensorflow/core/framework/variant_op_registry.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/refcount.h"
#include "tensorflow/core/platform/resource.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/strcat.h"
#include "tensorflow/core/profiler/lib/traceme.h"
#include "tensorflow/core/public/version.h"
#if defined(PLATFORM_WINDOWS)
#undef GetMessage
#endif
namespace tensorflow {
namespace data {
namespace {
static mutex* get_dataset_op_registry_lock() {
static mutex dataset_op_registry_lock(LINKER_INITIALIZED);
return &dataset_op_registry_lock;
}
static std::unordered_set<string>* get_dataset_op_registry() {
static std::unordered_set<string>* names = new std::unordered_set<string>;
return names;
}
std::string UniqueNodeName(const std::string& base) {
static std::atomic<int64_t> counter(0);
return strings::StrCat(base, "/", counter.fetch_add(1));
}
class DatasetVariantWrapper {
public:
DatasetVariantWrapper() : dataset_(nullptr) {}
explicit DatasetVariantWrapper(DatasetBase* dataset) : dataset_(dataset) {}
DatasetVariantWrapper(const DatasetVariantWrapper& other)
: dataset_(other.dataset_) {
if (dataset_) dataset_->Ref();
}
DatasetVariantWrapper& operator=(DatasetVariantWrapper&& other) {
if (&other == this) return *this;
std::swap(dataset_, other.dataset_);
return *this;
}
DatasetVariantWrapper& operator=(const DatasetVariantWrapper& other) = delete;
~DatasetVariantWrapper() {
if (dataset_) dataset_->Unref();
}
DatasetBase* get() const { return dataset_; }
string TypeName() const { return "tensorflow::DatasetVariantWrapper"; }
string DebugString() const {
if (dataset_) {
return dataset_->DebugString();
} else {
return "<Uninitialized DatasetVariantWrapper>";
}
}
void Encode(VariantTensorData* data) const {
LOG(ERROR) << "The Encode() method is not implemented for "
"DatasetVariantWrapper objects.";
}
bool Decode(const VariantTensorData& data) {
LOG(ERROR) << "The Decode() method is not implemented for "
"DatasetVariantWrapper objects.";
return false;
}
private:
DatasetBase* dataset_;
};
const char kWrappedDatasetVariantTypeName[] =
"tensorflow::data::WrappedDatasetVariant";
class WrappedDatasetVariantWrapper {
public:
WrappedDatasetVariantWrapper() {}
explicit WrappedDatasetVariantWrapper(const Tensor& ds_tensor)
: ds_tensor_(ds_tensor) {}
Tensor get() const { return ds_tensor_; }
string TypeName() const { return "tensorflow::WrappedDatasetVariantWrapper"; }
string DebugString() const {
return "tensorflow::WrappedDatasetVariantWrapper::DebugString";
}
void Encode(VariantTensorData* data) const {
*(data->add_tensors()) = ds_tensor_;
}
bool Decode(const VariantTensorData& data) {
ds_tensor_ = data.tensors(0);
return true;
}
private:
Tensor ds_tensor_;
};
class WrapDatasetVariantOp : public OpKernel {
public:
explicit WrapDatasetVariantOp(OpKernelConstruction* ctx) : OpKernel(ctx) {}
void Compute(OpKernelContext* ctx) override {
const Tensor& tensor = ctx->input(0);
OP_REQUIRES(ctx,
tensor.dtype() == DT_VARIANT &&
TensorShapeUtils::IsScalar(tensor.shape()),
errors::InvalidArgument(
"Dataset tensor must be a scalar of dtype DT_VARIANT."));
DatasetBase* unused;
OP_REQUIRES_OK(ctx, GetDatasetFromVariantTensor(tensor, &unused));
Tensor* output = nullptr;
OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({}), &output));
output->scalar<Variant>()() = WrappedDatasetVariantWrapper(tensor);
}
};
REGISTER_KERNEL_BUILDER(Name("WrapDatasetVariant").Device(DEVICE_CPU),
WrapDatasetVariantOp);
REGISTER_KERNEL_BUILDER(Name("WrapDatasetVariant")
.HostMemory("input_handle")
.HostMemory("output_handle")
.Device(DEVICE_GPU),
WrapDatasetVariantOp);
class UnwrapDatasetVariantOp : public OpKernel {
public:
explicit UnwrapDatasetVariantOp(OpKernelConstruction* ctx) : OpKernel(ctx) {}
void Compute(OpKernelContext* ctx) override {
const Tensor& tensor = ctx->input(0);
OP_REQUIRES(ctx,
tensor.dtype() == DT_VARIANT &&
TensorShapeUtils::IsScalar(tensor.shape()),
errors::InvalidArgument(
"Dataset tensor must be a scalar of dtype DT_VARIANT."));
Variant variant = tensor.scalar<Variant>()();
const WrappedDatasetVariantWrapper* wrapper =
variant.get<WrappedDatasetVariantWrapper>();
OP_REQUIRES(ctx, wrapper != nullptr,
errors::InvalidArgument(
"Tensor must be a WrappedDataset variant object."));
Tensor ds_tensor = wrapper->get();
OP_REQUIRES_OK(ctx, ctx->set_output("output_handle", ds_tensor));
}
};
REGISTER_KERNEL_BUILDER(Name("UnwrapDatasetVariant").Device(DEVICE_CPU),
UnwrapDatasetVariantOp);
REGISTER_KERNEL_BUILDER(Name("UnwrapDatasetVariant")
.HostMemory("input_handle")
.HostMemory("output_handle")
.Device(DEVICE_GPU),
UnwrapDatasetVariantOp);
static Status WrappedDatasetVariantDeviceCopy(
const WrappedDatasetVariantWrapper& from, WrappedDatasetVariantWrapper* to,
const UnaryVariantOpRegistry::AsyncTensorDeviceCopyFn& copy) {
*to = WrappedDatasetVariantWrapper(from);
return absl::OkStatus();
}
#define REGISTER_OPTIONAL_COPY(DIRECTION) \
INTERNAL_REGISTER_UNARY_VARIANT_DEVICE_COPY_FUNCTION( \
WrappedDatasetVariantWrapper, DIRECTION, \
WrappedDatasetVariantDeviceCopy)
REGISTER_OPTIONAL_COPY(VariantDeviceCopyDirection::HOST_TO_DEVICE);
REGISTER_OPTIONAL_COPY(VariantDeviceCopyDirection::DEVICE_TO_HOST);
REGISTER_OPTIONAL_COPY(VariantDeviceCopyDirection::DEVICE_TO_DEVICE);
REGISTER_UNARY_VARIANT_DECODE_FUNCTION(WrappedDatasetVariantWrapper,
kWrappedDatasetVariantTypeName);
}
Status GraphDefBuilderWrapper::AddDataset(const DatasetBase* dataset,
const std::vector<Node*>& inputs,
Node** output) {
return AddDataset(dataset, inputs, {}, output);
}
Status GraphDefBuilderWrapper::AddDataset(
const DatasetBase* dataset, const std::vector<Node*>& inputs,
const std::vector<std::pair<StringPiece, AttrValue>>& attrs,
Node** output) {
std::vector<std::pair<size_t, Node*>> enumerated_inputs(inputs.size());
for (size_t i = 0; i < inputs.size(); i++) {
enumerated_inputs[i] = std::make_pair(i, inputs[i]);
}
return AddDataset(dataset, enumerated_inputs, {}, attrs, output);
}
Status GraphDefBuilderWrapper::AddDataset(
const DatasetBase* dataset,
const std::vector<std::pair<size_t, Node*>>& inputs,
const std::vector<std::pair<size_t, absl::Span<Node* const>>>& list_inputs,
const std::vector<std::pair<StringPiece, AttrValue>>& attrs,
Node** output) {
return AddDataset(dataset, inputs, list_inputs, attrs,
false, output);
}
Status GraphDefBuilderWrapper::AddDataset(
const DatasetBase* dataset,
const std::vector<std::pair<size_t, Node*>>& inputs,
const std::vector<std::pair<size_t, absl::Span<Node* const>>>& list_inputs,
const std::vector<std::pair<StringPiece, AttrValue>>& attrs,
bool use_dataset_name, Node** output) {
auto& type_string = dataset->type_string();
auto opts = absl::make_unique<GraphDefBuilder::Options>(b_->opts());
bool has_output_types_attr = HasAttr(type_string, "output_types");
bool has_output_shapes_attr = HasAttr(type_string, "output_shapes");
if (has_output_shapes_attr) {
opts = absl::make_unique<GraphDefBuilder::Options>(
opts->WithAttr("output_shapes", dataset->output_shapes()));
}
if (has_output_types_attr) {
opts = absl::make_unique<GraphDefBuilder::Options>(
opts->WithAttr("output_types", dataset->output_dtypes()));
}
bool has_metadata_attr = HasAttr(type_string, "metadata");
if (has_metadata_attr) {
std::string serialized_metadata;
dataset->metadata().SerializeToString(&serialized_metadata);
opts = absl::make_unique<GraphDefBuilder::Options>(
opts->WithAttr("metadata", serialized_metadata));
}
for (const auto& attr : attrs) {
opts = absl::make_unique<GraphDefBuilder::Options>(
opts->WithAttr(attr.first, attr.second));
}
if (opts->HaveError()) {
return errors::Internal("AddDataset: Failed to build Options with error ",
opts->StatusToString());
}
NodeBuilder node_builder(
use_dataset_name ? dataset->node_name() : opts->GetNameForOp(type_string),
type_string, opts->op_registry());
{
size_t total_size = inputs.size() + list_inputs.size();
auto inputs_iter = inputs.begin();
auto list_inputs_iter = list_inputs.begin();
for (int i = 0; i < total_size; i++) {
if (inputs_iter != inputs.end() && inputs_iter->first == i) {
node_builder.Input(NodeBuilder::NodeOut(inputs_iter->second));
inputs_iter++;
} else if (list_inputs_iter != list_inputs.end() &&
list_inputs_iter->first == i) {
std::vector<NodeBuilder::NodeOut> nodeout_inputs;
nodeout_inputs.reserve(list_inputs_iter->second.size());
for (Node* n : list_inputs_iter->second) {
nodeout_inputs.emplace_back(n);
}
node_builder.Input(nodeout_inputs);
list_inputs_iter++;
} else {
return errors::InvalidArgument("No input found for index ", i);
}
}
}
*output = opts->FinalizeBuilder(&node_builder);
if (*output == nullptr) {
return errors::Internal("AddDataset: Failed to build ", type_string,
" op with error ", opts->StatusToString());
}
return absl::OkStatus();
}
Status GraphDefBuilderWrapper::AddFunction(
SerializationContext* ctx, const string& function_name,
const FunctionLibraryDefinition& lib_def) {
if (b_->HasFunction(function_name)) {
VLOG(1) << "Function with name " << function_name << "already exists in"
<< " the graph. It will not be added again.";
return absl::OkStatus();
}
const FunctionDef* f_def = lib_def.Find(function_name);
if (f_def == nullptr) {
return errors::InvalidArgument("Unable to find FunctionDef for ",
function_name, " in the registry.");
}
FunctionDefLibrary def;
*def.add_function() = *f_def;
const string gradient_func = lib_def.FindGradient(function_name);
if (!gradient_func.empty()) {
GradientDef* g_def = def.add_gradient();
g_def->set_function_name(function_name);
g_def->set_gradient_func(gradient_func);
}
TF_RETURN_IF_ERROR(b_->AddFunctionLibrary(def));
for (const NodeDef& node_def : f_def->node_def()) {
const OpRegistrationData* op_reg_data = nullptr;
TF_RETURN_IF_ERROR(lib_def.LookUp(node_def.op(), &op_reg_data));
if (op_reg_data->is_function_op) {
TF_RETURN_IF_ERROR(AddFunction(ctx, op_reg_data->op_def.name(), lib_def));
}
for (const auto& pair : node_def.attr()) {
TF_RETURN_IF_ERROR(AddAttrFunctions(ctx, pair.second, lib_def));
}
}
for (auto iter = f_def->attr().begin(); iter != f_def->attr().end(); iter++) {
TF_RETURN_IF_ERROR(AddAttrFunctions(ctx, iter->second, lib_def));
}
return absl::OkStatus();
}
void GraphDefBuilderWrapper::AddPlaceholderInternal(const Tensor& val,
Node** output) {
*output = ops::SourceOp(
"Placeholder",
b_->opts().WithAttr("dtype", val.dtype()).WithAttr("shape", val.shape()));
}
void GraphDefBuilderWrapper::AddTensorInternal(const Tensor& val,
Node** output) {
*output = ops::SourceOp(
"Const",
b_->opts().WithAttr("dtype", val.dtype()).WithAttr("value", val));
}
bool GraphDefBuilderWrapper::HasAttr(const string& name,
const string& attr_name) const {
const OpDef* op_def = nullptr;
Status s = b_->opts().op_registry()->LookUpOpDef(name, &op_def);
if (!s.ok() || op_def == nullptr) {
return false;
}
return HasAttr(op_def, attr_name);
}
int32_t GetRunnerThreadpoolSizeFromOpKernelContext(OpKernelContext* ctx) {
thread::ThreadPool* thread_pool =
ctx->device()->tensorflow_device_thread_pool();
if (thread_pool) {
return thread_pool->NumThreads();
} else {
static const int32_t kDefaultRunnerThreadpoolSize = port::MaxParallelism();
return kDefaultRunnerThreadpoolSize;
}
}
int64_t MemoryCheckpoint::IdRegistry::Add(const std::string& prefix,
const std::string& key) {
mutex_lock l(mu_);
auto pair = std::make_pair(prefix, key);
if (string_to_int_.contains(pair)) {
return string_to_int_[pair];
}
int64_t id = next_id_++;
int_to_string_[id] = pair;
string_to_int_[pair] = id;
return id;
}
std::vector<int64_t> MemoryCheckpoint::IdRegistry::GetMatchingIds(
const std::string& prefix_to_match) {
mutex_lock l(mu_);
std::vector<int64_t> ids;
for (const auto& [pair, id] : string_to_int_) {
auto [prefix, key] = pair;
if (prefix.compare(0, prefix_to_match.length(), prefix_to_match) == 0) {
ids.push_back(id);
}
}
return ids;
}
std::pair<std::string, std::string> MemoryCheckpoint::IdRegistry::Get(
int64_t id) {
mutex_lock l(mu_);
auto result = int_to_string_.find(id);
DCHECK(result != int_to_string_.end())
<< "Failed find id " << id << " in IdRegistry. "
<< "Max id is: " << next_id_ - 1;
return result->second;
}
void MemoryCheckpoint::IdRegistry::RemoveIds(const std::vector<int64_t>& ids) {
mutex_lock l(mu_);
for (const auto& id : ids) {
string_to_int_.erase(int_to_string_[id]);
int_to_string_.erase(id);
}
}
std::string MemoryCheckpoint::DebugString() const {
std::string result = absl::StrCat("status=", status_.ToString(),
", "
"root=",
(is_root_ ? "true" : "false"), "\n");
absl::StrAppend(&result, "number of integers: ", int_values_.size(), "\n");
for (const auto& [k, v] : int_values_) {
absl::StrAppend(&result, " ", id_registry_->Get(k).first, ":",
id_registry_->Get(k).second, ": ", v, "\n");
}
absl::StrAppend(&result, "number of strings: ", str_values_.size(), "\n");
for (const auto& [k, v] : str_values_) {
absl::StrAppend(&result, " ", id_registry_->Get(k).first, ":",
id_registry_->Get(k).second, ": ", v, "\n");
}
absl::StrAppend(&result, "number of tensors: ", tensor_values_.size(), "\n");
absl::StrAppend(
&result, "number of expired prefixes: ", expired_prefixes_.size(), "\n");
return result;
}
void MemoryCheckpoint::Merge(MemoryCheckpoint* other) {
if (!status_.ok()) {
return;
}
if (!other->status_.ok()) {
status_ = other->status_;
int_values_.clear();
str_values_.clear();
tensor_values_.clear();
}
for (const auto& [k, v] : other->int_values_) {
int_values_[k] = v;
}
for (const auto& [k, v] : other->str_values_) {
str_values_[k] = v;
}
for (const auto& [k, v] : other->tensor_values_) {
tensor_values_[k] = v;
}
for (const auto& prefix : other->expired_prefixes_) {
Purge(prefix);
}
other->expired_prefixes_.clear();
VLOG(5) << "MemoryCheckpoint::Merge " << DebugString();
}
void MemoryCheckpoint::Purge(const std::string& prefix) {
std::vector<int64_t> ids = id_registry_->GetMatchingIds(prefix);
for (const auto& id : ids) {
int_values_.erase(id);
str_values_.erase(id);
tensor_values_.erase(id);
}
if (!is_root_) {
expired_prefixes_.insert(prefix);
} else {
id_registry_->RemoveIds(ids);
}
}
Status MemoryCheckpoint::Save(IteratorStateWriter* writer) const {
for (const auto& [id, value] : int_values_) {
auto [prefix, key] = id_registry_->Get(id);
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix, key, value));
}
for (const auto& [id, value] : str_values_) {
auto [prefix, key] = id_registry_->Get(id);
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix, key, value));
}
for (const auto& [id, value] : tensor_values_) {
auto [prefix, key] = id_registry_->Get(id);
TF_RETURN_IF_ERROR(writer->WriteTensor(prefix, key, value));
}
return absl::OkStatus();
}
Status IteratorBase::InitializeBase(IteratorContext* ctx,
const IteratorBase* parent) {
parent_ = parent;
id_ =
Hash64CombineUnordered(Hash64(prefix()), reinterpret_cast<uint64>(this));
if (parent_) {
parent_id_ = Hash64CombineUnordered(Hash64(parent_->prefix()),
reinterpret_cast<uint64>(parent_));
if (const auto& model = ctx->model()) {
auto factory = [ctx, this](model::Node::Args args) {
return CreateNode(ctx, std::move(args));
};
model->AddNode(std::move(factory), prefix(), parent->model_node(),
&node_);
cleanup_fns_.push_back([this, model]() { model->RemoveNode(node_); });
}
}
return absl::OkStatus();
}
Status GetCompressedElementFromVariantTensor(
const Tensor& tensor, const CompressedElement** out_compressed_element) {
if (!(tensor.dtype() == DT_VARIANT &&
TensorShapeUtils::IsScalar(tensor.shape()))) {
return errors::InvalidArgument(
"`CompressedElement` tensor must be a scalar of dtype `DT_VARIANT`.");
}
const Variant& variant = tensor.scalar<Variant>()();
const CompressedElement* compressed_element =
variant.get<CompressedElement>();
if (compressed_element == nullptr) {
return errors::InvalidArgument(
"Tensor must be a `CompressedElement` object.");
}
*out_compressed_element = compressed_element;
return absl::OkStatus();
}
int64_t GetAllocatedBytes(const std::vector<Tensor>& element) {
int64_t allocated_bytes = 0;
for (auto& tensor : element) {
if (tensor.dtype() == DT_VARIANT) {
DatasetBase* dataset;
if (GetDatasetFromVariantTensor(tensor, &dataset).ok()) {
allocated_bytes += dataset->AllocatedBytes();
continue;
}
const CompressedElement* compressed_element;
if (GetCompressedElementFromVariantTensor(tensor, &compressed_element)
.ok()) {
allocated_bytes += compressed_element->ByteSizeLong();
continue;
}
}
allocated_bytes += tensor.AllocatedBytes();
}
return allocated_bytes;
}
int64_t GetTotalBytes(const std::vector<Tensor>& element) {
int64_t total_bytes = 0;
for (auto& tensor : element) {
if (tensor.dtype() == DT_VARIANT) {
DatasetBase* dataset;
if (GetDatasetFromVariantTensor(tensor, &dataset).ok()) {
total_bytes += dataset->TotalBytes();
continue;
}
const CompressedElement* compressed_element;
if (GetCompressedElementFromVariantTensor(tensor, &compressed_element)
.ok()) {
total_bytes += compressed_element->ByteSizeLong();
continue;
}
}
total_bytes += tensor.TotalBytes();
}
return total_bytes;
}
std::string FullName(const std::string& prefix, const std::string& name) {
if (absl::StrContains(name, kColon)) {
LOG(ERROR) << name << " should not contain " << kColon;
}
return strings::StrCat(kFullNameRandomHex, kPipe, prefix, kColon, name);
}
Status ExtractIteratorPrefix(StringPiece key, string* prefix) {
if (!absl::StartsWith(key, data::kFullNameRandomHex)) {
return errors::InvalidArgument("Key: ", key,
" was not generated using full_name.");
}
std::vector<string> split_keys = str_util::Split(key, data::kPipe);
if (split_keys.size() != 2) {
return errors::InvalidArgument("Key: ", key,
" was not generated using full_name.");
}
string real_key = split_keys[1];
const int pos = real_key.rfind(kColon);
*prefix = real_key.substr(0, pos);
return absl::OkStatus();
}
Status GetDatasetFromVariantTensor(const Tensor& tensor,
DatasetBase** out_dataset) {
if (!(tensor.dtype() == DT_VARIANT &&
TensorShapeUtils::IsScalar(tensor.shape()))) {
return errors::InvalidArgument(
"Dataset tensor must be a scalar of dtype DT_VARIANT.");
}
const Variant& variant = tensor.scalar<Variant>()();
const DatasetVariantWrapper* wrapper = variant.get<DatasetVariantWrapper>();
if (wrapper == nullptr) {
return errors::InvalidArgument("Tensor must be a Dataset object.");
}
*out_dataset = wrapper->get();
if (*out_dataset == nullptr) {
return errors::Internal("Read uninitialized Dataset variant.");
}
return absl::OkStatus();
}
Status StoreDatasetInVariantTensor(DatasetBase* dataset, Tensor* tensor) {
if (!(tensor->dtype() == DT_VARIANT &&
TensorShapeUtils::IsScalar(tensor->shape()))) {
return errors::InvalidArgument(
"Dataset tensor must be a scalar of dtype DT_VARIANT.");
}
tensor->scalar<Variant>()() = DatasetVariantWrapper(dataset);
return absl::OkStatus();
}
namespace internal {
#define WARN_PROTO_FIELD_CONFLICT(reflection, field, field_type, src, dst) \
{ \
auto source_value = reflection->Get##field_type(src, field); \
auto destination_value = reflection->Get##field_type(*dst, field); \
if (source_value != destination_value) { \
LOG(WARNING) << "Changing the value of option field " << field->name() \
<< " from " << destination_value << " to " << source_value; \
} \
}
#define WARN_PROTO_ENUM_FIELD_CONFLICT(reflection, field, src, dst) \
{ \
auto source_value = reflection->GetEnum(src, field); \
auto destination_value = reflection->GetEnum(*dst, field); \
if (source_value != destination_value) { \
LOG(WARNING) << "Changing the value of option enum field " \
<< field->name() << " from " \
<< destination_value->full_name() << " to " \
<< source_value->full_name(); \
} \
}
void WarnProtoConflicts(const protobuf::Message& src, protobuf::Message* dst) {
std::vector<const protobuf::FieldDescriptor*> set_src;
std::vector<const protobuf::FieldDescriptor*> set_dst;
const protobuf::Reflection* reflection = src.GetReflection();
reflection->ListFields(src, &set_src);
reflection->ListFields(*dst, &set_dst);
std::sort(set_src.begin(), set_src.end());
std::sort(set_dst.begin(), set_dst.end());
std::vector<const protobuf::FieldDescriptor*> in_both;
std::set_intersection(set_src.begin(), set_src.end(), set_dst.begin(),
set_dst.end(), std::back_inserter(in_both));
for (auto field : in_both) {
if (field->name() == "framework_type") {
continue;
}
if (field->type() == protobuf::FieldDescriptor::TYPE_MESSAGE) {
WarnProtoConflicts(reflection->GetMessage(src, field),
reflection->MutableMessage(dst, field));
} else {
switch (field->cpp_type()) {
case protobuf::FieldDescriptor::CPPTYPE_INT32:
WARN_PROTO_FIELD_CONFLICT(reflection, field, Int32, src, dst);
break;
case protobuf::FieldDescriptor::CPPTYPE_INT64:
WARN_PROTO_FIELD_CONFLICT(reflection, field, Int64, src, dst);
break;
case protobuf::FieldDescriptor::CPPTYPE_UINT32:
WARN_PROTO_FIELD_CONFLICT(reflection, field, UInt32, src, dst);
break;
case protobuf::FieldDescriptor::CPPTYPE_UINT64:
WARN_PROTO_FIELD_CONFLICT(reflection, field, UInt64, src, dst);
break;
case protobuf::FieldDescriptor::CPPTYPE_DOUBLE:
WARN_PROTO_FIELD_CONFLICT(reflection, field, Double, src, dst);
break;
case protobuf::FieldDescriptor::CPPTYPE_FLOAT:
WARN_PROTO_FIELD_CONFLICT(reflection, field, Float, src, dst);
break;
case protobuf::FieldDescriptor::CPPTYPE_BOOL:
WARN_PROTO_FIELD_CONFLICT(reflection, field, Bool, src, dst);
break;
case protobuf::FieldDescriptor::CPPTYPE_ENUM:
WARN_PROTO_ENUM_FIELD_CONFLICT(reflection, field, src, dst);
break;
default: {
LOG(ERROR) << "Unrecognized proto type for field "
<< field->full_name();
}
}
}
}
}
#undef WARN_PROTO_ENUM_FIELD_CONFLICT
#undef WARN_PROTO_FIELD_CONFLICT
void MergeOptions(const protobuf::Message& source,
protobuf::Message* destination) {
WarnProtoConflicts(source, destination);
destination->MergeFrom(source);
}
void MergeOptions(const protobuf::MessageLite& source,
protobuf::MessageLite* destination) {
destination->CheckTypeAndMergeFrom(source);
}
}
void DatasetBase::Initialize(const Metadata& metadata) {
Status s = ComputeNumSources();
if (!s.ok()) {
LOG_EVERY_N_SEC(ERROR, 10) << s;
}
s = MergeOptionsFromInputs();
if (!s.ok()) {
LOG_EVERY_N_SEC(ERROR, 10) << s;
}
metadata_ = metadata;
if (metadata_.name() == "") {
static std::atomic<int64_t> id_counter(0);
*metadata_.mutable_name() =
strings::StrCat(type_string(), ":", id_counter.fetch_add(1));
}
}
Status DatasetBase::ComputeNumSources() {
std::vector<const DatasetBase*> inputs;
Status s = InputDatasets(&inputs);
if (errors::IsUnimplemented(s)) {
return s;
}
if (num_sources_ >= 0) {
return absl::OkStatus();
}
num_sources_ = 0;
if (inputs.empty()) {
num_sources_ = 1;
return absl::OkStatus();
}
for (const auto& input : inputs) {
if (input->num_sources() < 0) {
return errors::FailedPrecondition(
"Cannot compute input sources for dataset of type ", type_string(),
", because sources could not be computed for input dataset of type ",
input->type_string());
}
num_sources_ += input->num_sources();
}
return absl::OkStatus();
}
Status DatasetBase::CheckRandomAccessCompatible(const int64 index) const {
CardinalityOptions options;
options.set_compute_level(CardinalityOptions::CARDINALITY_COMPUTE_MODERATE);
int64 cardinality = Cardinality(options);
if (cardinality == kInfiniteCardinality ||
cardinality == kUnknownCardinality) {
return tensorflow::errors::FailedPrecondition(
"Dataset of type ", this->DebugString(), " has ",
cardinality == kInfiniteCardinality ? "infinite" : "unknown",
" cardinality, which does not support random access.");
}
if (index < 0 || index >= cardinality) {
return errors::OutOfRange("Index out of range [0, ", cardinality,
"):", index);
}
return absl::OkStatus();
}
Status DatasetBase::Get(OpKernelContext* ctx, int64 index,
std::vector<Tensor>* out_tensors) const {
return errors::Unimplemented("Random access is not implemented for dataset ",
DebugString());
}
Status DatasetBase::Get(AnyContext ctx, int64 index,
std::vector<Tensor>* out_tensors) const {
return errors::Unimplemented("Random access is not implemented for dataset ",
DebugString());
}
absl::StatusOr<DatasetBase*> DatasetBase::Finalize(
OpKernelContext* ctx,
std::function<absl::StatusOr<core::RefCountPtr<DatasetBase>>()>
make_finalized_dataset) const {
mutex_lock l(mu_);
if (!finalized_dataset_) {
TF_ASSIGN_OR_RETURN(finalized_dataset_, make_finalized_dataset());
}
return finalized_dataset_.get();
}
Status DatasetBase::MergeOptionsFromInputs() {
std::vector<const DatasetBase*> inputs;
Status s = InputDatasets(&inputs);
if (errors::IsUnimplemented(s)) {
return s;
}
if (inputs.empty()) {
return absl::OkStatus();
}
Options merged_options = inputs[0]->options_;
for (int i = 1; i < inputs.size(); ++i) {
internal::MergeOptions(inputs[i]->options_, &merged_options);
}
internal::MergeOptions(options_, &merged_options);
options_ = merged_options;
return absl::OkStatus();
}
Status DatasetBase::MakeIterator(
IteratorContext* ctx, const IteratorBase* parent,
const string& output_prefix,
std::unique_ptr<IteratorBase>* iterator) const {
if (type_string() == "OptionsDataset" || type_string() == "FinalizeDataset") {
std::vector<const DatasetBase*> inputs;
Status s = InputDatasets(&inputs);
return inputs[0]->MakeIterator(ctx, parent, output_prefix, iterator);
}
tsl::profiler::TraceMe traceme(
[&] {
return tsl::profiler::TraceMeEncode(
strings::StrCat("MakeIterator::", type_string()), {});
},
tsl::profiler::TraceMeLevel::kInfo);
*iterator = MakeIteratorInternal(output_prefix);
Status s = (*iterator)->InitializeBase(ctx, parent);
if (s.ok()) {
s.Update((*iterator)->Initialize(ctx));
ctx->SaveCheckpoint(iterator->get());
}
if (!s.ok()) {
iterator->reset();
}
return s;
}
Status DatasetBase::MakeSplitProviders(
std::vector<std::unique_ptr<SplitProvider>>* split_providers) const {
std::vector<const DatasetBase*> inputs;
Status s = InputDatasets(&inputs);
if (errors::IsUnimplemented(s)) {
return errors::Unimplemented(
"Cannot create split providers for dataset of type ", type_string(),
", because the dataset implements neither `InputDatasets` nor "
"`MakeSplitProvider`.");
}
if (inputs.size() != 1) {
return errors::Unimplemented(
"Cannot create split providers for dataset of type ", type_string(),
", because the dataset is not unary (instead having arity ",
inputs.size(),
"), and no custom implementation of `MakeSplitProvider` is defined.");
}
return inputs[0]->MakeSplitProviders(split_providers);
}
std::optional<int64_t> DatasetBase::GetEstimatedElementSize() const {
const auto& shapes = output_shapes();
const auto& dtypes = output_dtypes();
if (shapes.size() != dtypes.size()) {
LOG(ERROR) << "This should not happen because the sizes of output_shapes() "
"and output_dtypes() should always be "
"the same.";
return std::nullopt;
}
size_t num_outputs = shapes.size();
int64_t element_size = 0;
for (int i = 0; i < num_outputs; ++i) {
const auto& partial_shape = shapes[i];
const auto& dtype = dtypes[i];
auto num_elements = partial_shape.num_elements();
if (num_elements == -1) {
return std::nullopt;
}
element_size += num_elements * DataTypeSize(dtype);
}
return element_size;
}
int64_t DatasetBase::Cardinality() const {
mutex_lock l(cardinality_mu_);
if (cardinality_ == kUnknownCardinality) {
CardinalityOptions options;
cardinality_ = CardinalityInternal(options);
}
return cardinality_;
}
int64_t DatasetBase::Cardinality(CardinalityOptions options) const {
mutex_lock l(cardinality_mu_);
if (cardinality_ == kUnknownCardinality) {
cardinality_ = CardinalityInternal(options);
}
return cardinality_;
}
Status DatasetBase::InputDatasets(
std::vector<const DatasetBase*>* inputs) const {
return errors::Unimplemented(
"Cannot compute input sources for dataset of type ", type_string(),
", because the dataset does not implement `InputDatasets`. To fix this, "
"your dataset should override the `InputDatasets` method. If it is a "
"source dataset, it should return empty inputs.");
}
Status DatasetBase::DatasetGraphDefBuilder::AddInputDataset(
SerializationContext* ctx, const DatasetBase* dataset, Node** output) {
Status status = dataset->AsGraphDefInternal(ctx, this, output);
if (ctx->is_graph_rewrite()) {
if (status.ok()) {
(*output)->AddAttr(kCardinalityAttrForRewrite, dataset->Cardinality());
} else if (errors::IsUnimplemented(status)) {
Tensor t(DT_VARIANT, TensorShape({}));
dataset->Ref();
TF_RETURN_IF_ERROR(
StoreDatasetInVariantTensor(const_cast<DatasetBase*>(dataset), &t));
TF_RETURN_IF_ERROR(AddPlaceholder(t, output));
DCHECK_NE(ctx->input_list(), nullptr);
ctx->input_list()->emplace_back((*output)->name(), std::move(t));
LOG_EVERY_N_SEC(WARNING, 30)
<< "Input of " << dataset->DebugString()
<< " will not be optimized because the dataset does not implement "
"the "
"AsGraphDefInternal() method needed to apply optimizations.";
return absl::OkStatus();
}
}
return status;
}
Status DatasetBase::DatasetGraphDefBuilder::AddDatasetOrTensor(
SerializationContext* ctx, const Tensor& t, Node** output) {
if (t.dtype() == DT_VARIANT) {
Status s = AddDatasetOrTensorHelper(ctx, t, output);
if (s.ok()) {
return s;
}
}
if (t.dtype() == DT_RESOURCE && !ctx->is_graph_rewrite()) {
Status s = AddResourceHelper(ctx, t, output);
if (!errors::IsUnimplemented(s)) {
return s;
}
}
return AddTensor(t, output);
}
Status DatasetBase::DatasetGraphDefBuilder::AddIdentity(
SerializationContext* ctx, const std::string& name_prefix, Node** input,
Node** output) {
*output =
ops::UnaryOp("Identity", *input,
builder()->opts().WithName(UniqueNodeName(name_prefix)));
return absl::OkStatus();
}
Status DatasetBase::DatasetGraphDefBuilder::AddDatasetOrTensorHelper(
SerializationContext* ctx, const Tensor& t, Node** output) {
if (t.dims() == 0) {
DatasetBase* dataset;
TF_RETURN_IF_ERROR(GetDatasetFromVariantTensor(t, &dataset));
return AddInputDataset(ctx, dataset, output);
}
std::vector<NodeBuilder::NodeOut> nodes;
for (int i = 0; i < t.dim_size(0); ++i) {
Node* node;
TF_RETURN_IF_ERROR(AddDatasetOrTensorHelper(ctx, t.SubSlice(i), &node));
nodes.emplace_back(node);
}
auto op_name = "Pack";
auto opts = builder()->opts();
NodeBuilder node_builder(opts.GetNameForOp(op_name), op_name,
opts.op_registry());
node_builder.Input(std::move(nodes));
*output = opts.FinalizeBuilder(&node_builder);
return absl::OkStatus();
}
Status DatasetBase::DatasetGraphDefBuilder::AddResourceHelper(
SerializationContext* ctx, const Tensor& t, Node** output) {
if (t.NumElements() == 0) {
return errors::InvalidArgument("Empty resouce handle");
}
const ResourceHandle& handle = t.flat<ResourceHandle>()(0);
if (ctx->device_name() != handle.device()) {
return errors::InvalidArgument("Trying to access resource ", handle.name(),
" located in device ", handle.device(),
" from device ", ctx->device_name());
}
ResourceBase* resource;
TF_RETURN_IF_ERROR(ctx->resource_mgr()->Lookup(handle, &resource));
core::ScopedUnref unref(resource);
return resource->AsGraphDef(builder(), output);
}
DatasetBaseIterator::DatasetBaseIterator(const BaseParams& params)
: params_(params) {
params_.dataset->Ref();
VLOG(2) << prefix() << " constructor";
strings::StrAppend(&traceme_metadata_, "name=", dataset()->metadata().name());
strings::StrAppend(&traceme_metadata_, ",shapes=");
auto& shapes = output_shapes();
for (int i = 0; i < shapes.size(); ++i) {
if (i > 0) {
strings::StrAppend(&traceme_metadata_, " ");
}
strings::StrAppend(&traceme_metadata_, shapes.at(i).DebugString());
}
strings::StrAppend(&traceme_metadata_, ",types=");
auto& types = output_dtypes();
for (int i = 0; i < types.size(); ++i) {
if (i > 0) {
strings::StrAppend(&traceme_metadata_, " ");
}
strings::StrAppend(&traceme_metadata_, DataTypeString(types.at(i)));
}
}
DatasetBaseIterator::~DatasetBaseIterator() {
VLOG(2) << prefix() << " destructor";
params_.dataset->Unref();
}
string DatasetBaseIterator::BuildTraceMeName() {
string result =
strings::StrCat(params_.prefix, "#", traceme_metadata_, ",id=", id_);
if (parent_) {
strings::StrAppend(&result, ",parent_id=", parent_id_);
}
TraceMeMetadata metadata = GetTraceMeMetadata();
for (const auto& pair : metadata) {
strings::StrAppend(&result, ",", pair.first, "=", pair.second);
}
if (model_node() != nullptr) {
if (model_node()->buffered_elements() > 0) {
strings::StrAppend(
&result, ",buffered_elements=",
static_cast<long long>(model_node()->buffered_elements()));
strings::StrAppend(
&result, ",buffered_bytes_MB=",
static_cast<long long>(
static_cast<double>(model_node()->buffered_bytes()) * 1e-6));
}
}
strings::StrAppend(&result, "#");
return result;
}
Status DatasetBaseIterator::GetNext(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) {
activity_watcher::ActivityScope activity_scope([&]() {
activity_watcher::Activity::Attributes attributes;
attributes["iterator_prefix"] = prefix();
return std::make_unique<activity_watcher::Activity>(
"Iterator::GetNext", activity_watcher::ActivityCategory::kDatasetOp,
std::move(attributes));
});
tsl::profiler::TraceMe activity([&] { return BuildTraceMeName(); },
tsl::profiler::TraceMeLevel::kInfo);
DVLOG(3) << prefix() << " GetNext enter";
auto model = ctx->model();
bool output_was_recording =
node_ && node_->output() && node_->output()->is_recording();
if (collect_resource_usage(ctx)) {
int64_t now_nanos = EnvTime::NowNanos();
if (output_was_recording) {
node_->output()->record_stop(now_nanos);
}
node_->record_start(now_nanos);
}
out_tensors->clear();
Status s = GetNextInternal(ctx, out_tensors, end_of_sequence);
ctx->SaveCheckpoint(this);
if (!SymbolicCheckpointCompatible()) {
ctx->UpdateCheckpointStatus([this]() {
return errors::Unimplemented(dataset()->type_string(),
" does not support symbolic checkpointing.");
});
}
if (TF_PREDICT_TRUE(s.ok())) {
if (TF_PREDICT_TRUE(!*end_of_sequence)) {
if (TF_PREDICT_FALSE(out_tensors->size() !=
dataset()->output_dtypes().size())) {
return errors::Internal("Expected ", dataset()->output_dtypes().size(),
" components but got ", out_tensors->size(),
".");
}
RecordElement(ctx, out_tensors);
} else {
out_tensors->clear();
}
}
if (collect_resource_usage(ctx)) {
int64_t now_nanos = EnvTime::NowNanos();
node_->record_stop(now_nanos);
if (output_was_recording) {
node_->output()->record_start(now_nanos);
}
}
if (TF_PREDICT_FALSE(errors::IsOutOfRange(s))) {
s = errors::Internal("Iterator \"", params_.prefix,
"\" returned `OutOfRange`. This indicates an "
"implementation error as `OutOfRange` errors are not "
"expected to be returned here. Original message: ",
s.message());
LOG(ERROR) << s;
}
DVLOG(3) << prefix() << " GetNext exit";
return s;
}
Status DatasetBaseIterator::Skip(IteratorContext* ctx, int num_to_skip,
bool* end_of_sequence, int* num_skipped) {
tsl::profiler::TraceMe activity([&] { return BuildTraceMeName(); },
tsl::profiler::TraceMeLevel::kInfo);
DVLOG(3) << prefix() << " Skip enter";
auto model = ctx->model();
bool output_was_recording =
node_ && node_->output() && node_->output()->is_recording();
if (collect_resource_usage(ctx)) {
int64_t now_nanos = EnvTime::NowNanos();
auto output = node_->output();
if (output_was_recording) {
output->record_stop(now_nanos);
}
node_->record_start(now_nanos);
}
Status s = SkipInternal(ctx, num_to_skip, end_of_sequence, num_skipped);
if (collect_resource_usage(ctx)) {
int64_t now_nanos = EnvTime::NowNanos();
node_->record_stop(now_nanos);
auto output = node_->output();
if (output_was_recording) {
output->record_start(now_nanos);
}
}
if (TF_PREDICT_FALSE(errors::IsOutOfRange(s))) {
s = errors::Internal("Iterator \"", params_.prefix,
"\" returned `OutOfRange`. This indicates an "
"implementation error as `OutOfRange` errors are not "
"expected to be returned here. Original message: ",
s.message());
LOG(ERROR) << s;
}
DVLOG(3) << prefix() << " Skip exit";
return s;
}
Status DatasetBaseIterator::SkipInternal(IteratorContext* ctx, int num_to_skip,
bool* end_of_sequence,
int* num_skipped) {
*num_skipped = 0;
for (int i = 0; i < num_to_skip; ++i) {
std::vector<Tensor> out_tensors;
TF_RETURN_IF_ERROR(GetNextInternal(ctx, &out_tensors, end_of_sequence));
if (*end_of_sequence) {
return absl::OkStatus();
}
RecordElement(ctx, &out_tensors);
(*num_skipped)++;
}
return absl::OkStatus();
}
void DatasetOpKernel::Compute(OpKernelContext* ctx) {
DatasetBase* dataset = nullptr;
MakeDataset(ctx, &dataset);
if (ctx->status().ok()) {
Tensor* output = nullptr;
OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({}), &output));
OP_REQUIRES_OK(ctx, StoreDatasetInVariantTensor(dataset, output));
if (ctx->stack_trace().has_value() && VLOG_IS_ON(4)) {
VLOG(4) << "Dataset " << dataset->type_string()
<< " created using the following stack trace:";
for (const auto& stack_frame : ctx->stack_trace()->ToStackFrames(
{}, {}, false, -1)) {
VLOG(4) << stack_frame.file_name << ":" << stack_frame.line_number
<< " in " << stack_frame.function_name << "()";
}
}
dataset->Initialize(metadata_);
}
}
string DatasetOpKernel::TraceString(const OpKernelContext& ctx,
bool verbose) const {
return tsl::profiler::TraceMeOp(name_view(), type_string_view());
}
bool DatasetOpKernel::IsDatasetOp(const OpDef& op_def) {
if (op_def.output_arg_size() != 1) return false;
if (op_def.output_arg(0).type() != DT_VARIANT) return false;
absl::string_view op_name = op_def.name();
std::vector<std::string> v1, v2;
if (absl::StartsWith(op_name, "__wrapped__")) {
v1 = absl::StrSplit(op_name, "__wrapped__", absl::SkipEmpty());
if (v1.empty()) return false;
v2 = absl::StrSplit(v1[0], "_", absl::SkipEmpty());
op_name = v2.empty() ? v1[0] : v2[0];
}
if (op_name == "DatasetFromGraph") return true;
if (absl::EndsWith(op_name, "Dataset")) return true;
size_t index = op_name.length() - 1;
while (index >= 0 && isdigit(op_name[index])) {
index--;
}
constexpr absl::string_view kDatasetPrefix = "DatasetV";
constexpr absl::string_view::size_type kPrefixLength = kDatasetPrefix.size();
if (index < kPrefixLength - 1 || index == op_name.length() - 1) return false;
return op_name.substr(index - kPrefixLength + 1, kPrefixLength) ==
kDatasetPrefix;
}
void UnaryDatasetOpKernel::MakeDataset(OpKernelContext* ctx,
DatasetBase** output) {
DatasetBase* input;
OP_REQUIRES_OK(ctx, GetDatasetFromVariantTensor(ctx->input(0), &input));
MakeDataset(ctx, input, output);
}
void BinaryDatasetOpKernel::MakeDataset(OpKernelContext* ctx,
DatasetBase** output) {
DatasetBase* input;
OP_REQUIRES_OK(ctx, GetDatasetFromVariantTensor(ctx->input(0), &input));
DatasetBase* another_input;
OP_REQUIRES_OK(ctx,
GetDatasetFromVariantTensor(ctx->input(1), &another_input));
MakeDataset(ctx, input, another_input, output);
}
const char DatasetBase::kDatasetGraphKey[] = "_DATASET_GRAPH";
const char DatasetBase::kDatasetGraphOutputNodeKey[] =
"_DATASET_GRAPH_OUTPUT_NODE";
BackgroundWorker::BackgroundWorker(Env* env, const char* name)
: env_(env), name_(name) {}
BackgroundWorker::~BackgroundWorker() {
{
mutex_lock l(mu_);
cancelled_ = true;
}
cond_var_.notify_one();
thread_.reset();
}
void BackgroundWorker::Schedule(std::function<void()> work_item) {
{
mutex_lock l(mu_);
if (!thread_) {
thread_ = absl::WrapUnique(env_->StartThread(
{} , name_, [this]() { WorkerLoop(); }));
}
work_queue_.push_back(std::move(work_item));
}
cond_var_.notify_one();
}
void BackgroundWorker::WorkerLoop() {
tensorflow::ResourceTagger tag(kTFDataResourceTag, "Background");
while (true) {
std::function<void()> work_item = nullptr;
{
mutex_lock l(mu_);
while (!cancelled_ && work_queue_.empty()) {
cond_var_.wait(l);
}
if (cancelled_) {
return;
}
DCHECK(!work_queue_.empty());
work_item = std::move(work_queue_.front());
work_queue_.pop_front();
}
DCHECK(work_item != nullptr);
work_item();
}
}
namespace {
class RunnerImpl : public Runner {
public:
void Run(const std::function<void()>& f) override {
tensorflow::ResourceTagger tag(kTFDataResourceTag, "Runner");
f();
PreventTailCall();
}
private:
virtual void PreventTailCall() {}
};
}
Runner* Runner::get() {
static Runner* singleton = new RunnerImpl;
return singleton;
}
}
} | #include "tensorflow/core/framework/dataset.h"
#include <memory>
#include <tuple>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace data {
TEST(DatasetTest, FullName) {
EXPECT_EQ(FullName("prefix", "name"),
"60d899aa0d8ce4351e7c3b419e92d25b|prefix:name");
}
enum DataTypeTest {
_tf_int_32,
_tf_int_64,
_tf_float_,
_tf_double_,
_tf_string_
};
struct DatasetTestParam {
const DataTypeTest type;
std::function<std::vector<Tensor>()> tensor_factory;
const int64_t expected_bytes;
};
class DatasetTestTotalBytes
: public ::testing::TestWithParam<DatasetTestParam> {};
TEST_P(DatasetTestTotalBytes, TestTotalBytes) {
const DatasetTestParam& test_case = GetParam();
if (test_case.type == _tf_string_) {
EXPECT_LE(GetTotalBytes(test_case.tensor_factory()),
test_case.expected_bytes);
} else {
EXPECT_EQ(GetTotalBytes(test_case.tensor_factory()),
test_case.expected_bytes);
}
}
std::vector<Tensor> tensor_tf_int_32s() {
return {test::AsTensor<int32>({1, 2, 3, 4, 5}),
test::AsTensor<int32>({1, 2, 3, 4})};
}
std::vector<Tensor> tensor_tf_int_64s() {
return {test::AsTensor<int64_t>({1, 2, 3, 4, 5}),
test::AsTensor<int64_t>({10, 12})};
}
std::vector<Tensor> tensor_tf_float_s() {
return {test::AsTensor<float>({1.0, 2.0, 3.0, 4.0})};
}
std::vector<Tensor> tensor_tf_double_s() {
return {test::AsTensor<double>({100.0}), test::AsTensor<double>({200.0}),
test::AsTensor<double>({400.0}), test::AsTensor<double>({800.0})};
}
const tstring str = "test string";
std::vector<Tensor> tensor_strs() { return {test::AsTensor<tstring>({str})}; }
INSTANTIATE_TEST_SUITE_P(
DatasetTestTotalBytes, DatasetTestTotalBytes,
::testing::ValuesIn(std::vector<DatasetTestParam>{
{_tf_int_32, tensor_tf_int_32s, 4 * 9 },
{_tf_int_64, tensor_tf_int_64s, 8 * 7 },
{_tf_float_, tensor_tf_float_s, 4 * 4 },
{_tf_double_, tensor_tf_double_s, 8 * 4 },
{_tf_string_, tensor_strs,
static_cast<int64_t>(sizeof(str) + str.size()) }}));
struct MergeOptionsTestParam {
const std::string source;
const std::string destination;
const std::string expected;
};
class MergeOptionsTest
: public ::testing::TestWithParam<MergeOptionsTestParam> {};
TEST_P(MergeOptionsTest, MergeOptions) {
const MergeOptionsTestParam& test_case = GetParam();
Options source;
CHECK(tensorflow::protobuf::TextFormat::ParseFromString(test_case.source,
&source));
Options destination;
CHECK(tensorflow::protobuf::TextFormat::ParseFromString(test_case.destination,
&destination));
Options expected;
CHECK(tensorflow::protobuf::TextFormat::ParseFromString(test_case.expected,
&expected));
internal::MergeOptions(source, &destination);
EXPECT_EQ(expected.SerializeAsString(), destination.SerializeAsString());
}
INSTANTIATE_TEST_SUITE_P(
MergeOptionsTest, MergeOptionsTest,
::testing::ValuesIn(std::vector<MergeOptionsTestParam>{
{"deterministic: false", "",
"deterministic: false"},
{"deterministic: false",
"deterministic: false",
"deterministic: false"},
{"deterministic: false",
"deterministic: true",
"deterministic: false"},
{"external_state_policy: POLICY_IGNORE",
"external_state_policy: POLICY_FAIL",
"external_state_policy: POLICY_IGNORE"}}));
TEST(DatasetTest, IsDatasetOp) {
OpDef op_def;
EXPECT_FALSE(DatasetOpKernel::IsDatasetOp(op_def));
op_def.add_output_arg()->set_type(DT_STRING);
EXPECT_FALSE(DatasetOpKernel::IsDatasetOp(op_def));
op_def.mutable_output_arg(0)->set_type(DT_VARIANT);
op_def.set_name("Identity");
EXPECT_FALSE(DatasetOpKernel::IsDatasetOp(op_def));
for (const auto& name : {"Dataset", "RangeDataset", "MapDatasetV1",
"ParallelInterleaveDatasetV42",
"DataServiceDatasetV1000", "DatasetFromGraph"}) {
op_def.set_name(name);
EXPECT_TRUE(DatasetOpKernel::IsDatasetOp(op_def));
}
}
TEST(DatasetTest, IdRegistry) {
MemoryCheckpoint::IdRegistry id_registry;
auto id_1 = id_registry.Add("foo", "key_1");
auto id_2 = id_registry.Add("foo:bar", "key_2");
auto id_3 = id_registry.Add("foo:bar:baz", "key_3");
auto [prefix_1, key_1] = id_registry.Get(id_1);
EXPECT_EQ(prefix_1, "foo");
EXPECT_EQ(key_1, "key_1");
auto [prefix_2, key_2] = id_registry.Get(id_2);
EXPECT_EQ(prefix_2, "foo:bar");
EXPECT_EQ(key_2, "key_2");
auto [prefix_3, key_3] = id_registry.Get(id_3);
EXPECT_EQ(prefix_3, "foo:bar:baz");
EXPECT_EQ(key_3, "key_3");
auto matching_ids = id_registry.GetMatchingIds("hello");
EXPECT_EQ(matching_ids.size(), 0);
matching_ids = id_registry.GetMatchingIds("foo:bar:baz");
EXPECT_EQ(matching_ids.size(), 1);
matching_ids = id_registry.GetMatchingIds("foo:bar");
EXPECT_EQ(matching_ids.size(), 2);
matching_ids = id_registry.GetMatchingIds("foo");
EXPECT_EQ(matching_ids.size(), 3);
matching_ids = id_registry.GetMatchingIds("f");
EXPECT_EQ(matching_ids.size(), 3);
absl::flat_hash_set<int64_t> matching_ids_set(matching_ids.begin(),
matching_ids.end());
EXPECT_TRUE(matching_ids_set.contains(id_1));
EXPECT_TRUE(matching_ids_set.contains(id_2));
EXPECT_TRUE(matching_ids_set.contains(id_3));
id_registry.RemoveIds(matching_ids);
matching_ids = id_registry.GetMatchingIds("foo");
EXPECT_EQ(matching_ids.size(), 0);
}
TEST(DatasetTest, MemoryCheckpointWrites) {
std::shared_ptr<MemoryCheckpoint::IdRegistry> id_registry =
std::make_shared<MemoryCheckpoint::IdRegistry>();
MemoryCheckpoint memory_checkpoint(id_registry);
Tensor input_tensor(DT_FLOAT, {1});
input_tensor.flat<float>()(0) = 2.0f;
TF_EXPECT_OK(memory_checkpoint.WriteScalar("name_foo", "key_bar", 5));
TF_EXPECT_OK(
memory_checkpoint.WriteTensor("name_corgi", "key_baz", input_tensor));
auto matching_ids = id_registry->GetMatchingIds("name_foo");
EXPECT_EQ(matching_ids.size(), 1);
auto id = matching_ids.at(0);
auto [_, key] = id_registry->Get(id);
EXPECT_EQ(key, "key_bar");
matching_ids = id_registry->GetMatchingIds("name_corgi");
EXPECT_EQ(matching_ids.size(), 1);
id = matching_ids.at(0);
std::tie(_, key) = id_registry->Get(id);
EXPECT_EQ(key, "key_baz");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/dataset.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/dataset_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9f7a8b01-7a36-410b-8f64-3545915fd10c | cpp | google/tensorstore | downsample_array | tensorstore/driver/downsample/downsample_array.cc | tensorstore/driver/downsample/downsample_array_test.cc | #include "tensorstore/driver/downsample/downsample_array.h"
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/downsample_method.h"
#include "tensorstore/driver/downsample/downsample_nditerable.h"
#include "tensorstore/driver/downsample/downsample_util.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/transformed_array.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/nditerable.h"
#include "tensorstore/internal/nditerable_array.h"
#include "tensorstore/internal/nditerable_copy.h"
#include "tensorstore/internal/nditerable_transformed_array.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace internal_downsample {
namespace {
absl::Status ValidateDownsampleDomain(BoxView<> base_domain,
BoxView<> downsampled_domain,
span<const Index> downsample_factors,
DownsampleMethod method) {
const DimensionIndex rank = base_domain.rank();
if (rank != downsampled_domain.rank()) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Cannot downsample domain ", base_domain, " to domain ",
downsampled_domain, " with different rank"));
}
if (rank != downsample_factors.size()) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Cannot downsample domain ", base_domain, " with downsample factors ",
downsample_factors, " of different rank"));
}
for (DimensionIndex i = 0; i < rank; ++i) {
const auto expected_interval =
DownsampleInterval(base_domain[i], downsample_factors[i], method);
if (expected_interval != downsampled_domain[i]) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Cannot downsample array with domain ", base_domain, " by factors ",
downsample_factors, " with method ", method, " to array with domain ",
downsampled_domain, ": expected target dimension ", i,
" to have domain ", expected_interval));
}
}
return absl::OkStatus();
}
}
absl::Status DownsampleArray(OffsetArrayView<const void> source,
OffsetArrayView<void> target,
span<const Index> downsample_factors,
DownsampleMethod method) {
if (source.dtype() != target.dtype()) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Source data type (", source.dtype(),
") does not match target data type (", target.dtype(), ")"));
}
TENSORSTORE_RETURN_IF_ERROR(ValidateDownsampleMethod(source.dtype(), method));
TENSORSTORE_RETURN_IF_ERROR(ValidateDownsampleDomain(
source.domain(), target.domain(), downsample_factors, method));
if (method == DownsampleMethod::kStride) {
return CopyTransformedArray(
source | tensorstore::AllDims().Stride(downsample_factors), target);
}
internal::DefaultNDIterableArena arena;
auto base_iterable = GetArrayNDIterable(UnownedToShared(source), arena);
auto target_iterable = GetArrayNDIterable(UnownedToShared(target), arena);
auto downsampled_iterable = DownsampleNDIterable(
std::move(base_iterable), source.domain(), downsample_factors, method,
downsample_factors.size(), arena);
internal::NDIterableCopier copier(*downsampled_iterable, *target_iterable,
target.shape(), skip_repeated_elements,
arena);
return copier.Copy();
}
Result<SharedOffsetArray<void>> DownsampleArray(
OffsetArrayView<const void> source, span<const Index> downsample_factors,
DownsampleMethod method) {
SharedOffsetArray<void> target;
target.layout().set_rank(source.rank());
DownsampleBounds(source.domain(),
MutableBoxView<>(target.origin(), target.shape()),
downsample_factors, method);
target.element_pointer() = AllocateArrayElementsLike<void>(
StridedLayoutView<dynamic_rank, offset_origin>(
target.rank(), target.origin().data(), target.shape().data(),
source.byte_strides().data()),
target.byte_strides().data(), skip_repeated_elements, default_init,
source.dtype());
TENSORSTORE_RETURN_IF_ERROR(
DownsampleArray(source, target, downsample_factors, method));
return target;
}
absl::Status DownsampleTransformedArray(TransformedArrayView<const void> source,
TransformedArrayView<void> target,
span<const Index> downsample_factors,
DownsampleMethod method) {
if (source.dtype() != target.dtype()) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Source data type (", source.dtype(),
") does not match target data type (", target.dtype(), ")"));
}
TENSORSTORE_RETURN_IF_ERROR(ValidateDownsampleMethod(source.dtype(), method));
TENSORSTORE_RETURN_IF_ERROR(
ValidateDownsampleDomain(source.domain().box(), target.domain().box(),
downsample_factors, method));
if (method == DownsampleMethod::kStride) {
return CopyTransformedArray(
std::move(source) | tensorstore::AllDims().Stride(downsample_factors),
target);
}
internal::DefaultNDIterableArena arena;
TENSORSTORE_ASSIGN_OR_RETURN(
auto base_iterable,
GetTransformedArrayNDIterable(UnownedToShared(source), arena));
TENSORSTORE_ASSIGN_OR_RETURN(
auto target_iterable,
GetTransformedArrayNDIterable(UnownedToShared(target), arena));
auto downsampled_iterable = DownsampleNDIterable(
std::move(base_iterable), source.domain().box(), downsample_factors,
method, downsample_factors.size(), arena);
internal::NDIterableCopier copier(*downsampled_iterable, *target_iterable,
target.shape(), skip_repeated_elements,
arena);
return copier.Copy();
}
Result<SharedOffsetArray<void>> DownsampleTransformedArray(
TransformedArrayView<const void> source,
span<const Index> downsample_factors, DownsampleMethod method) {
SharedOffsetArray<void> target;
target.layout().set_rank(source.rank());
DownsampleBounds(source.domain().box(),
MutableBoxView<>(target.origin(), target.shape()),
downsample_factors, method);
target =
AllocateArray(target.domain(), c_order, default_init, source.dtype());
TENSORSTORE_RETURN_IF_ERROR(DownsampleTransformedArray(
source, TransformedArray(target), downsample_factors, method));
return target;
}
}
} | #include "tensorstore/driver/downsample/downsample_array.h"
#include <stdint.h>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <nlohmann/json.hpp>
#include "tensorstore/array.h"
#include "tensorstore/array_testutil.h"
#include "tensorstore/data_type.h"
#include "tensorstore/downsample_method.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/transformed_array.h"
#include "tensorstore/util/span.h"
namespace {
using ::tensorstore::Dims;
using ::tensorstore::DownsampleMethod;
using ::tensorstore::Index;
using ::tensorstore::kImplicit;
using ::tensorstore::MakeArray;
using ::tensorstore::MakeOffsetArray;
using ::tensorstore::span;
using ::tensorstore::internal_downsample::DownsampleArray;
using ::tensorstore::internal_downsample::DownsampleTransformedArray;
using ::testing::Optional;
TEST(DownsampleArrayTest, MeanRank0) {
EXPECT_THAT(DownsampleArray(tensorstore::MakeScalarArray<float>(42.0),
span<const Index>(), DownsampleMethod::kMean),
Optional(tensorstore::MakeScalarArray<float>(42.0)));
}
TEST(DownsampleArrayTest, MeanRank1ExactMultiple) {
EXPECT_THAT(DownsampleArray(MakeArray<float>({1, 2, 5, 7}),
span<const Index>({2}), DownsampleMethod::kMean),
Optional(MakeArray<float>({1.5, 6})));
EXPECT_THAT(DownsampleArray(MakeArray<float>({1, 2, 3, 5, 7, 12}),
span<const Index>({3}), DownsampleMethod::kMean),
Optional(MakeArray<float>({2, 8})));
}
TEST(DownsampleArrayTest, MeanRoundingUint8) {
EXPECT_THAT(DownsampleArray(MakeArray<uint8_t>({253, 254, 254}),
span<const Index>({3}), DownsampleMethod::kMean),
Optional(MakeArray<uint8_t>({254})));
}
TEST(DownsampleArrayTest, MeanRoundingInt16) {
EXPECT_THAT(DownsampleArray(MakeArray<int16_t>({-253, -254, -254}),
span<const Index>({3}), DownsampleMethod::kMean),
Optional(MakeArray<int16_t>({-254})));
}
TEST(DownsampleArrayTest, MeanRoundingToEvenInt16) {
EXPECT_THAT(DownsampleArray(MakeArray<int16_t>({3, 3, 2, 2}),
span<const Index>({4}), DownsampleMethod::kMean),
Optional(MakeArray<int16_t>({2})));
EXPECT_THAT(DownsampleArray(MakeArray<int16_t>({3, 3, 4, 4}),
span<const Index>({4}), DownsampleMethod::kMean),
Optional(MakeArray<int16_t>({4})));
EXPECT_THAT(DownsampleArray(MakeArray<int16_t>({-3, -3, -2, -2}),
span<const Index>({4}), DownsampleMethod::kMean),
Optional(MakeArray<int16_t>({-2})));
EXPECT_THAT(DownsampleArray(MakeArray<int16_t>({-3, -3, -4, -4}),
span<const Index>({4}), DownsampleMethod::kMean),
Optional(MakeArray<int16_t>({-4})));
}
TEST(DownsampleArrayTest, MeanRoundingUint64) {
EXPECT_THAT(DownsampleArray(MakeArray<uint64_t>({253, 254, 254}),
span<const Index>({3}), DownsampleMethod::kMean),
Optional(MakeArray<uint64_t>({254})));
}
TEST(DownsampleArrayTest, MeanRoundingBool) {
EXPECT_THAT(DownsampleArray(MakeArray<bool>({0, 0, 1}),
span<const Index>({3}), DownsampleMethod::kMean),
Optional(MakeArray<bool>({0})));
EXPECT_THAT(DownsampleArray(MakeArray<bool>({0, 1, 1}),
span<const Index>({3}), DownsampleMethod::kMean),
Optional(MakeArray<bool>({1})));
EXPECT_THAT(DownsampleArray(MakeArray<bool>({0, 1, 1, 0}),
span<const Index>({4}), DownsampleMethod::kMean),
Optional(MakeArray<bool>({0})));
}
TEST(DownsampleArrayTest, MeanRank1Offset) {
EXPECT_THAT(DownsampleArray(MakeOffsetArray<float>({1}, {1, 2, 5, 9}),
span<const Index>({2}), DownsampleMethod::kMean),
Optional(MakeArray<float>({1, 3.5, 9})));
}
TEST(DownsampleArrayTest, MeanRank1SingleDownsampledElement) {
EXPECT_THAT(DownsampleArray(MakeArray<float>({1, 2}), span<const Index>({2}),
DownsampleMethod::kMean),
Optional(MakeArray<float>({1.5})));
}
TEST(DownsampleArrayTest, MeanRank1NotExactMultiple) {
EXPECT_THAT(DownsampleArray(MakeArray<float>({1, 2, 5, 7, 9}),
span<const Index>({2}), DownsampleMethod::kMean),
Optional(MakeArray<float>({1.5, 6, 9})));
EXPECT_THAT(DownsampleArray(MakeArray<float>({1, 2, 6, 7, 9}),
span<const Index>({3}), DownsampleMethod::kMean),
Optional(MakeArray<float>({3, 8})));
}
TEST(DownsampleArrayTest, MeanRank1NoDownsampling) {
EXPECT_THAT(DownsampleArray(MakeArray<float>({1, 2, 5, 7}),
span<const Index>({1}), DownsampleMethod::kMean),
Optional(MakeArray<float>({1, 2, 5, 7})));
}
TEST(DownsampleArrayTest, MeanRank2SingleDownsampleDim1) {
EXPECT_THAT(
DownsampleArray(MakeArray<float>({
{1, 2, 5, 7},
{5, 6, 15, 25},
}),
span<const Index>({1, 2}), DownsampleMethod::kMean),
Optional(MakeArray<float>({{1.5, 6}, {5.5, 20}})));
}
TEST(DownsampleArrayTest, MeanRank2SingleDownsampleDim0) {
EXPECT_THAT(
DownsampleArray(MakeArray<float>({
{1, 2, 5, 7},
{5, 6, 15, 25},
}),
span<const Index>({2, 1}), DownsampleMethod::kMean),
Optional(MakeArray<float>({{3, 4, 10, 16}})));
}
TEST(DownsampleArrayTest, MeanRank2TwoDownsampleDims) {
EXPECT_THAT(
DownsampleArray(MakeArray<float>({
{1, 2, 5, 7},
{5, 6, 15, 25},
}),
span<const Index>({2, 2}), DownsampleMethod::kMean),
Optional(MakeArray<float>({{3.5, 13.0}})));
}
TEST(DownsampleArrayTest, MeanRank2NotExactMultiple) {
EXPECT_THAT(
DownsampleArray(MakeArray<float>({
{1, 2, 3, 4, 5},
{6, 7, 8, 9, 10},
{11, 12, 13, 14, 15},
}),
span<const Index>({2, 2}), DownsampleMethod::kMean),
Optional(MakeArray<float>({
{4, 6, 7.5},
{11.5, 13.5, 15},
})));
}
TEST(DownsampleArrayTest, MeanRank2PartialStartBlock) {
EXPECT_THAT(
DownsampleArray(MakeOffsetArray<float>({3, 8}, {{1, 2, 3, 4, 5},
{6, 7, 8, 9, 10},
{11, 12, 13, 14, 15}}),
span<const Index>({2, 3}), DownsampleMethod::kMean),
Optional(MakeOffsetArray<float>({1, 2}, {{1, 3, 5}, {8.5, 10.5, 12.5}})));
}
TEST(DownsampleArrayTest, MedianRank2PartialStartBlock) {
EXPECT_THAT(
DownsampleArray(MakeOffsetArray<float>({3, 8}, {{1, 2, 3, 4, 5},
{6, 7, 8, 9, 10},
{11, 12, 13, 14, 15}}),
span<const Index>({2, 3}), DownsampleMethod::kMedian),
Optional(MakeOffsetArray<float>({1, 2}, {{1, 3, 5}, {6, 9, 10}})));
}
TEST(DownsampleArrayTest, ModeRank2PartialStartBlock) {
EXPECT_THAT(
DownsampleArray(MakeOffsetArray<float>({3, 8},
{
{1, 2, 3, 3, 5},
{6, 4, 5, 5, 10},
{11, 6, 6, 6, 15},
}),
span<const Index>({2, 3}), DownsampleMethod::kMode),
Optional(MakeOffsetArray<float>({1, 2}, {{1, 3, 5}, {6, 6, 10}})));
}
TEST(DownsampleArrayTest, StrideRank2PartialEndBlock) {
EXPECT_THAT(
DownsampleArray(MakeOffsetArray<float>({2, 6},
{
{1, 2, 3, 4, 5},
{6, 7, 8, 9, 10},
{11, 12, 13, 14, 15},
}),
span<const Index>({2, 3}), DownsampleMethod::kStride),
Optional(MakeOffsetArray<float>({1, 2}, {
{1, 4},
{11, 14},
})));
}
TEST(DownsampleArrayTest, StrideRank2PartialStartBlock) {
EXPECT_THAT(
DownsampleArray(MakeOffsetArray<float>({3, 8},
{
{1, 2, 3, 4, 5},
{6, 7, 8, 9, 10},
{11, 12, 13, 14, 15},
}),
span<const Index>({2, 3}), DownsampleMethod::kStride),
Optional(MakeOffsetArray<float>({2, 3}, {
{7, 10},
})));
}
TEST(DownsampleArrayTest, MeanRank3ThreeDownsampleDims) {
EXPECT_THAT(
DownsampleArray(MakeArray<float>({{
{1, 2, 3, 4},
{5, 6, 7, 8},
{9, 10, 11, 12},
},
{
{13, 14, 15, 16},
{17, 18, 19, 20},
{21, 22, 23, 24},
},
{
{25, 26, 27, 28},
{29, 30, 31, 32},
{33, 34, 35, 36},
}}),
span<const Index>({2, 2, 2}), DownsampleMethod::kMean),
Optional(MakeArray<float>({{
{9.5, 11.5},
{15.5, 17.5},
},
{
{27.5, 29.5},
{33.5, 35.5},
}})));
}
TEST(DownsampleArrayTest, MeanRank1ReversedExactMultiple) {
EXPECT_THAT(DownsampleTransformedArray(
(MakeArray<float>({1, 2, 3, 4}) |
Dims(0).TranslateSizedInterval(kImplicit, kImplicit, -1))
.value(),
span<const Index>({2}), DownsampleMethod::kMean),
Optional(MakeArray<float>({3.5, 1.5})));
}
TEST(DownsampleArrayTest, MeanRank1ReversedNotExactMultiple) {
EXPECT_THAT(DownsampleTransformedArray(
(MakeArray<float>({1, 2, 3, 4, 5}) |
Dims(0).TranslateSizedInterval(kImplicit, kImplicit, -1))
.value(),
span<const Index>({2}), DownsampleMethod::kMean),
Optional(MakeArray<float>({4.5, 2.5, 1})));
}
TEST(DownsampleArrayTest, MeanRank2ReversedNotExactMultiple) {
EXPECT_THAT(DownsampleTransformedArray(
(MakeArray<float>({
{1, 2, 3, 4, 5},
{6, 7, 8, 9, 10},
{11, 12, 13, 14, 15},
}) |
Dims(0, 1).TranslateSizedInterval(kImplicit, kImplicit, -1))
.value(),
span<const Index>({2, 2}), DownsampleMethod::kMean),
Optional(MakeArray<float>({
{12, 10, 8.5},
{4.5, 2.5, 1},
})));
}
TEST(DownsampleArrayTest, MinRank1ExactMultiple) {
EXPECT_THAT(DownsampleArray(MakeArray<float>({2, 3, 5, 1}),
span<const Index>({2}), DownsampleMethod::kMin),
Optional(MakeArray<float>({2, 1})));
EXPECT_THAT(DownsampleArray(MakeArray<int>({2, 3, 8, 7, 1, 5}),
span<const Index>({3}), DownsampleMethod::kMin),
Optional(MakeArray<int>({2, 1})));
}
TEST(DownsampleArrayTest, MaxRank1ExactMultiple) {
EXPECT_THAT(DownsampleArray(MakeArray<float>({2, 3, 5, 1}),
span<const Index>({2}), DownsampleMethod::kMax),
Optional(MakeArray<float>({3, 5})));
EXPECT_THAT(DownsampleArray(MakeArray<int>({2, 3, 8, 7, 1, 5}),
span<const Index>({3}), DownsampleMethod::kMax),
Optional(MakeArray<int>({8, 7})));
}
TEST(DownsampleArrayTest, MedianRank1ExactMultiple) {
EXPECT_THAT(
DownsampleArray(MakeArray<float>({100, 3, 1, 2, 99, 98, 97, 5}),
span<const Index>({4}), DownsampleMethod::kMedian),
Optional(MakeArray<float>({2, 97})));
}
TEST(DownsampleArrayTest, MedianRank1Partial) {
EXPECT_THAT(
DownsampleArray(MakeArray<float>({100, 3, 1, 2, 99, 97, 98}),
span<const Index>({4}), DownsampleMethod::kMedian),
Optional(MakeArray<float>({2, 98})));
}
TEST(DownsampleArrayTest, ModeRank1ExactMultiple) {
EXPECT_THAT(DownsampleArray(MakeArray<float>({100, 99, 99, 99, 3, 3, 2, 2}),
span<const Index>({4}), DownsampleMethod::kMode),
Optional(MakeArray<float>({99, 2})));
}
TEST(DownsampleArrayTest, ModeRank1Partial) {
EXPECT_THAT(DownsampleArray(MakeArray<float>({100, 99, 99, 99, 3, 3, 2}),
span<const Index>({4}), DownsampleMethod::kMode),
Optional(MakeArray<float>({99, 3})));
}
TEST(DownsampleArrayTest, ModeBool) {
EXPECT_THAT(DownsampleArray(MakeArray<bool>({0, 0, 1, 1}),
span<const Index>({4}), DownsampleMethod::kMode),
Optional(MakeArray<bool>({0})));
EXPECT_THAT(DownsampleArray(MakeArray<bool>({0, 1, 1, 1}),
span<const Index>({4}), DownsampleMethod::kMode),
Optional(MakeArray<bool>({1})));
EXPECT_THAT(DownsampleArray(MakeArray<bool>({0, 0, 1, 1, 1}),
span<const Index>({5}), DownsampleMethod::kMode),
Optional(MakeArray<bool>({1})));
}
TEST(DownsampleArrayTest, MeanBool) {
EXPECT_THAT(DownsampleArray(MakeArray<bool>({0, 0, 1, 1}),
span<const Index>({4}), DownsampleMethod::kMean),
Optional(MakeArray<bool>({0})));
EXPECT_THAT(DownsampleArray(MakeArray<bool>({0, 1, 1, 1}),
span<const Index>({4}), DownsampleMethod::kMean),
Optional(MakeArray<bool>({1})));
EXPECT_THAT(DownsampleArray(MakeArray<bool>({0, 0, 1, 1, 1}),
span<const Index>({5}), DownsampleMethod::kMean),
Optional(MakeArray<bool>({1})));
}
TEST(DownsampleArrayTest, MedianBool) {
EXPECT_THAT(
DownsampleArray(MakeArray<bool>({0, 0, 1, 1}), span<const Index>({4}),
DownsampleMethod::kMedian),
Optional(MakeArray<bool>({0})));
EXPECT_THAT(
DownsampleArray(MakeArray<bool>({0, 1, 1, 1}), span<const Index>({4}),
DownsampleMethod::kMedian),
Optional(MakeArray<bool>({1})));
EXPECT_THAT(
DownsampleArray(MakeArray<bool>({0, 0, 1, 1, 1}), span<const Index>({5}),
DownsampleMethod::kMedian),
Optional(MakeArray<bool>({1})));
}
TEST(DownsampleArrayTest, ModeJson) {
using ::tensorstore::dtypes::json_t;
EXPECT_THAT(DownsampleArray(MakeArray<json_t>({"a", "a", 3.0, 3, 3u}),
span<const Index>({5}), DownsampleMethod::kMode),
Optional(MakeArray<::nlohmann::json>({json_t(3)})));
}
TEST(DownsampleArrayTest, MultipleBlocks) {
auto source_array = tensorstore::AllocateArray<uint8_t>({128, 128});
auto expected_downsampled = tensorstore::AllocateArray<uint8_t>({64, 64});
for (int i = 0; i < 128; ++i) {
for (int j = 0; j < 128; ++j) {
source_array(i, j) = static_cast<uint8_t>(i);
}
}
for (int i = 0; i < 64; ++i) {
for (int j = 0; j < 64; ++j) {
expected_downsampled(i, j) = static_cast<uint8_t>(i * 2);
}
}
EXPECT_THAT(DownsampleArray(source_array, {{2, 2}}, DownsampleMethod::kMean),
Optional(tensorstore::MatchesArray(expected_downsampled)));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/downsample/downsample_array.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/downsample/downsample_array_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |