ID
int64 0
2.65k
| Language
stringclasses 1
value | Repository Name
stringclasses 14
values | File Name
stringlengths 2
48
| File Path in Repository
stringlengths 11
111
⌀ | File Path for Unit Test
stringlengths 16
116
⌀ | Code
stringlengths 411
31.4k
| Unit Test - (Ground Truth)
stringlengths 40
32.1k
|
---|---|---|---|---|---|---|---|
500 | cpp | google/libaddressinput | rule | cpp/src/rule.cc | cpp/test/rule_test.cc | #ifndef I18N_ADDRESSINPUT_RULE_H_
#define I18N_ADDRESSINPUT_RULE_H_
#include <libaddressinput/address_field.h>
#include <memory>
#include <string>
#include <vector>
namespace i18n {
namespace addressinput {
class FormatElement;
class Json;
struct RE2ptr;
class Rule {
public:
Rule(const Rule&) = delete;
Rule& operator=(const Rule&) = delete;
Rule();
~Rule();
static const Rule& GetDefault();
void CopyFrom(const Rule& rule);
bool ParseSerializedRule(const std::string& serialized_rule);
void ParseJsonRule(const Json& json);
const std::string& GetId() const { return id_; }
const std::vector<FormatElement>& GetFormat() const { return format_; }
const std::vector<FormatElement>& GetLatinFormat() const {
return latin_format_;
}
const std::vector<AddressField>& GetRequired() const { return required_; }
const std::vector<std::string>& GetSubKeys() const { return sub_keys_; }
const std::vector<std::string>& GetLanguages() const { return languages_; }
const RE2ptr* GetPostalCodeMatcher() const {
return postal_code_matcher_.get();
}
const std::string& GetSolePostalCode() const { return sole_postal_code_; }
int GetAdminAreaNameMessageId() const { return admin_area_name_message_id_; }
int GetPostalCodeNameMessageId() const {
return postal_code_name_message_id_;
}
int GetLocalityNameMessageId() const {
return locality_name_message_id_;
}
int GetSublocalityNameMessageId() const {
return sublocality_name_message_id_;
}
const std::string& GetName() const { return name_; }
const std::string& GetLatinName() const { return latin_name_; }
const std::string& GetPostalCodeExample() const {
return postal_code_example_;
}
const std::string& GetPostServiceUrl() const { return post_service_url_; }
private:
std::string id_;
std::vector<FormatElement> format_;
std::vector<FormatElement> latin_format_;
std::vector<AddressField> required_;
std::vector<std::string> sub_keys_;
std::vector<std::string> languages_;
std::unique_ptr<const RE2ptr> postal_code_matcher_;
std::string sole_postal_code_;
int admin_area_name_message_id_;
int postal_code_name_message_id_;
int locality_name_message_id_;
int sublocality_name_message_id_;
std::string name_;
std::string latin_name_;
std::string postal_code_example_;
std::string post_service_url_;
};
}
}
#endif
#include "rule.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <string>
#include <utility>
#include <re2/re2.h>
#include "address_field_util.h"
#include "format_element.h"
#include "grit.h"
#include "messages.h"
#include "region_data_constants.h"
#include "util/json.h"
#include "util/re2ptr.h"
#include "util/size.h"
#include "util/string_split.h"
namespace i18n {
namespace addressinput {
namespace {
const char kSeparator = '~';
struct NameIdInfo {
const char* name;
int id;
static bool less(const NameIdInfo& a, const NameIdInfo& b) {
return strcmp(a.name, b.name) < 0;
}
};
struct NameIdMap {
const NameIdInfo* infos;
size_t size;
int GetIdFromName(const std::string& name) const {
NameIdInfo key{name.c_str()};
const NameIdInfo* begin = infos;
const NameIdInfo* end = begin + size;
const NameIdInfo* probe =
std::lower_bound(begin, end, key, NameIdInfo::less);
return probe != end && name == probe->name ? probe->id : INVALID_MESSAGE_ID;
}
bool IsSorted() const {
for (size_t n = 1; n < size; ++n) {
if (!NameIdInfo::less(infos[n - 1], infos[n])) {
return false;
}
}
return true;
}
};
const NameIdInfo kAdminAreaInfoArray[] = {
{"area", IDS_LIBADDRESSINPUT_AREA},
{"county", IDS_LIBADDRESSINPUT_COUNTY},
{"department", IDS_LIBADDRESSINPUT_DEPARTMENT},
{"district", IDS_LIBADDRESSINPUT_DISTRICT},
{"do_si", IDS_LIBADDRESSINPUT_DO_SI},
{"emirate", IDS_LIBADDRESSINPUT_EMIRATE},
{"island", IDS_LIBADDRESSINPUT_ISLAND},
{"oblast", IDS_LIBADDRESSINPUT_OBLAST},
{"parish", IDS_LIBADDRESSINPUT_PARISH},
{"prefecture", IDS_LIBADDRESSINPUT_PREFECTURE},
{"province", IDS_LIBADDRESSINPUT_PROVINCE},
{"state", IDS_LIBADDRESSINPUT_STATE},
};
const NameIdMap kAdminAreaMessageIds{
kAdminAreaInfoArray,
size(kAdminAreaInfoArray)
};
const NameIdInfo kPostalCodeInfoArray[] = {
{"eircode", IDS_LIBADDRESSINPUT_EIR_CODE_LABEL},
{"pin", IDS_LIBADDRESSINPUT_PIN_CODE_LABEL},
{"postal", IDS_LIBADDRESSINPUT_POSTAL_CODE_LABEL},
{"zip", IDS_LIBADDRESSINPUT_ZIP_CODE_LABEL},
};
const NameIdMap kPostalCodeMessageIds{
kPostalCodeInfoArray,
size(kPostalCodeInfoArray)
};
const NameIdInfo kLocalityInfoArray[] = {
{"city", IDS_LIBADDRESSINPUT_LOCALITY_LABEL},
{"district", IDS_LIBADDRESSINPUT_DISTRICT},
{"post_town", IDS_LIBADDRESSINPUT_POST_TOWN},
{"suburb", IDS_LIBADDRESSINPUT_SUBURB},
};
const NameIdMap kLocalityMessageIds{
kLocalityInfoArray,
size(kLocalityInfoArray)
};
const NameIdInfo kSublocalityInfoArray[] = {
{"district", IDS_LIBADDRESSINPUT_DISTRICT},
{"neighborhood", IDS_LIBADDRESSINPUT_NEIGHBORHOOD},
{"suburb", IDS_LIBADDRESSINPUT_SUBURB},
{"townland", IDS_LIBADDRESSINPUT_TOWNLAND},
{"village_township", IDS_LIBADDRESSINPUT_VILLAGE_TOWNSHIP},
};
const NameIdMap kSublocalityMessageIds{
kSublocalityInfoArray,
size(kSublocalityInfoArray)
};
#ifndef _NDEBUG
struct StaticMapChecker {
StaticMapChecker() {
assert(kAdminAreaMessageIds.IsSorted());
assert(kPostalCodeMessageIds.IsSorted());
assert(kLocalityMessageIds.IsSorted());
assert(kSublocalityMessageIds.IsSorted());
}
};
#endif
bool ContainsRegExSpecialCharacters(const std::string& input) {
return input.find_first_of(R"(([\{?)") != std::string::npos;
}
}
Rule::Rule()
: id_(),
format_(),
latin_format_(),
required_(),
sub_keys_(),
languages_(),
postal_code_matcher_(nullptr),
sole_postal_code_(),
admin_area_name_message_id_(INVALID_MESSAGE_ID),
postal_code_name_message_id_(INVALID_MESSAGE_ID),
locality_name_message_id_(INVALID_MESSAGE_ID),
sublocality_name_message_id_(INVALID_MESSAGE_ID),
name_(),
latin_name_(),
postal_code_example_(),
post_service_url_() {}
Rule::~Rule() = default;
const Rule& Rule::GetDefault() {
static Rule* default_rule = nullptr;
if (default_rule == nullptr) {
default_rule = new Rule;
default_rule->ParseSerializedRule(
RegionDataConstants::GetDefaultRegionData());
}
return *default_rule;
}
void Rule::CopyFrom(const Rule& rule) {
assert(this != &rule);
id_ = rule.id_;
format_ = rule.format_;
latin_format_ = rule.latin_format_;
required_ = rule.required_;
sub_keys_ = rule.sub_keys_;
languages_ = rule.languages_;
postal_code_matcher_.reset(
rule.postal_code_matcher_ == nullptr
? nullptr
: new RE2ptr(new RE2(rule.postal_code_matcher_->ptr->pattern(),
rule.postal_code_matcher_->ptr->options())));
sole_postal_code_ = rule.sole_postal_code_;
admin_area_name_message_id_ = rule.admin_area_name_message_id_;
postal_code_name_message_id_ = rule.postal_code_name_message_id_;
locality_name_message_id_ = rule.locality_name_message_id_;
sublocality_name_message_id_ = rule.sublocality_name_message_id_;
name_ = rule.name_;
latin_name_ = rule.latin_name_;
postal_code_example_ = rule.postal_code_example_;
post_service_url_ = rule.post_service_url_;
}
bool Rule::ParseSerializedRule(const std::string& serialized_rule) {
Json json;
if (!json.ParseObject(serialized_rule)) {
return false;
}
ParseJsonRule(json);
return true;
}
void Rule::ParseJsonRule(const Json& json) {
#ifndef _NDEBUG
static StaticMapChecker map_checker;
#endif
std::string value;
if (json.GetStringValueForKey("id", &value)) {
id_.swap(value);
}
if (json.GetStringValueForKey("fmt", &value)) {
ParseFormatRule(value, &format_);
}
if (json.GetStringValueForKey("lfmt", &value)) {
ParseFormatRule(value, &latin_format_);
}
if (json.GetStringValueForKey("require", &value)) {
ParseAddressFieldsRequired(value, &required_);
}
if (json.GetStringValueForKey("sub_keys", &value)) {
SplitString(value, kSeparator, &sub_keys_);
}
if (json.GetStringValueForKey("languages", &value)) {
SplitString(value, kSeparator, &languages_);
}
sole_postal_code_.clear();
if (json.GetStringValueForKey("zip", &value)) {
RE2::Options options;
options.set_never_capture(true);
RE2* matcher = new RE2("^(" + value + ")", options);
if (matcher->ok()) {
postal_code_matcher_.reset(new RE2ptr(matcher));
} else {
postal_code_matcher_.reset(nullptr);
delete matcher;
}
if (!ContainsRegExSpecialCharacters(value)) {
sole_postal_code_.swap(value);
}
}
if (json.GetStringValueForKey("state_name_type", &value)) {
admin_area_name_message_id_ = kAdminAreaMessageIds.GetIdFromName(value);
}
if (json.GetStringValueForKey("zip_name_type", &value)) {
postal_code_name_message_id_ = kPostalCodeMessageIds.GetIdFromName(value);
}
if (json.GetStringValueForKey("locality_name_type", &value)) {
locality_name_message_id_ = kLocalityMessageIds.GetIdFromName(value);
}
if (json.GetStringValueForKey("sublocality_name_type", &value)) {
sublocality_name_message_id_ = kSublocalityMessageIds.GetIdFromName(value);
}
if (json.GetStringValueForKey("name", &value)) {
name_.swap(value);
}
if (json.GetStringValueForKey("lname", &value)) {
latin_name_.swap(value);
}
if (json.GetStringValueForKey("zipex", &value)) {
postal_code_example_.swap(value);
}
if (json.GetStringValueForKey("posturl", &value)) {
post_service_url_.swap(value);
}
}
}
} | #include "rule.h"
#include <libaddressinput/address_field.h>
#include <libaddressinput/localization.h>
#include <cstddef>
#include <string>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "format_element.h"
#include "grit.h"
#include "messages.h"
#include "region_data_constants.h"
#include "util/json.h"
namespace {
using i18n::addressinput::AddressField;
using i18n::addressinput::ADMIN_AREA;
using i18n::addressinput::FormatElement;
using i18n::addressinput::INVALID_MESSAGE_ID;
using i18n::addressinput::Json;
using i18n::addressinput::LOCALITY;
using i18n::addressinput::Localization;
using i18n::addressinput::RegionDataConstants;
using i18n::addressinput::Rule;
using i18n::addressinput::STREET_ADDRESS;
TEST(RuleTest, CopyOverwritesRule) {
Rule rule;
ASSERT_TRUE(rule.ParseSerializedRule(
R"({)"
R"("fmt":"%S%Z",)"
R"("lfmt":"%Z%S",)"
R"("id":"data/XA",)"
R"("name":"Le Test",)"
R"("lname":"Testistan",)"
R"("require":"AC",)"
R"("sub_keys":"aa~bb~cc",)"
R"("languages":"en~fr",)"
R"("zip":"\\d{3}",)"
R"("state_name_type":"area",)"
R"("locality_name_type":"post_town",)"
R"("sublocality_name_type":"neighborhood",)"
R"("zip_name_type":"postal",)"
R"("zipex":"1234",)"
R"("posturl":"http:
R"(})"));
Rule copy;
EXPECT_NE(rule.GetFormat(), copy.GetFormat());
EXPECT_NE(rule.GetLatinFormat(), copy.GetLatinFormat());
EXPECT_NE(rule.GetId(), copy.GetId());
EXPECT_NE(rule.GetRequired(), copy.GetRequired());
EXPECT_NE(rule.GetSubKeys(), copy.GetSubKeys());
EXPECT_NE(rule.GetLanguages(), copy.GetLanguages());
EXPECT_NE(rule.GetAdminAreaNameMessageId(),
copy.GetAdminAreaNameMessageId());
EXPECT_NE(rule.GetPostalCodeNameMessageId(),
copy.GetPostalCodeNameMessageId());
EXPECT_NE(rule.GetLocalityNameMessageId(),
copy.GetLocalityNameMessageId());
EXPECT_NE(rule.GetSublocalityNameMessageId(),
copy.GetSublocalityNameMessageId());
EXPECT_NE(rule.GetName(), copy.GetName());
EXPECT_NE(rule.GetLatinName(), copy.GetLatinName());
EXPECT_NE(rule.GetPostalCodeExample(), copy.GetPostalCodeExample());
EXPECT_NE(rule.GetPostServiceUrl(), copy.GetPostServiceUrl());
EXPECT_TRUE(rule.GetPostalCodeMatcher() != nullptr);
EXPECT_TRUE(copy.GetPostalCodeMatcher() == nullptr);
copy.CopyFrom(rule);
EXPECT_EQ(rule.GetFormat(), copy.GetFormat());
EXPECT_EQ(rule.GetLatinFormat(), copy.GetLatinFormat());
EXPECT_EQ(rule.GetId(), copy.GetId());
EXPECT_EQ(rule.GetRequired(), copy.GetRequired());
EXPECT_EQ(rule.GetSubKeys(), copy.GetSubKeys());
EXPECT_EQ(rule.GetLanguages(), copy.GetLanguages());
EXPECT_EQ(rule.GetAdminAreaNameMessageId(),
copy.GetAdminAreaNameMessageId());
EXPECT_EQ(rule.GetPostalCodeNameMessageId(),
copy.GetPostalCodeNameMessageId());
EXPECT_EQ(rule.GetSublocalityNameMessageId(),
copy.GetSublocalityNameMessageId());
EXPECT_EQ(rule.GetLocalityNameMessageId(),
copy.GetLocalityNameMessageId());
EXPECT_EQ(rule.GetName(), copy.GetName());
EXPECT_EQ(rule.GetLatinName(), copy.GetLatinName());
EXPECT_EQ(rule.GetPostalCodeExample(), copy.GetPostalCodeExample());
EXPECT_EQ(rule.GetPostServiceUrl(), copy.GetPostServiceUrl());
EXPECT_TRUE(copy.GetPostalCodeMatcher() != nullptr);
}
TEST(RuleTest, ParseOverwritesRule) {
Rule rule;
ASSERT_TRUE(rule.ParseSerializedRule("{"
"\"fmt\":\"%S%Z\","
"\"state_name_type\":\"area\","
"\"zip\":\"1234\","
"\"zip_name_type\":\"postal\","
"\"zipex\":\"1234\","
"\"posturl\":\"http:
"}"));
EXPECT_FALSE(rule.GetFormat().empty());
EXPECT_EQ(IDS_LIBADDRESSINPUT_AREA,
rule.GetAdminAreaNameMessageId());
EXPECT_EQ(IDS_LIBADDRESSINPUT_POSTAL_CODE_LABEL,
rule.GetPostalCodeNameMessageId());
EXPECT_EQ("1234", rule.GetSolePostalCode());
EXPECT_EQ("1234", rule.GetPostalCodeExample());
EXPECT_EQ("http:
ASSERT_TRUE(rule.ParseSerializedRule("{"
"\"fmt\":\"\","
"\"state_name_type\":\"do_si\","
"\"zip_name_type\":\"zip\","
"\"zipex\":\"5678\","
"\"posturl\":\"http:
"}"));
EXPECT_TRUE(rule.GetFormat().empty());
EXPECT_EQ(IDS_LIBADDRESSINPUT_DO_SI,
rule.GetAdminAreaNameMessageId());
EXPECT_EQ(IDS_LIBADDRESSINPUT_ZIP_CODE_LABEL,
rule.GetPostalCodeNameMessageId());
EXPECT_TRUE(rule.GetSolePostalCode().empty());
EXPECT_EQ("5678", rule.GetPostalCodeExample());
EXPECT_EQ("http:
}
TEST(RuleTest, ParsesFormatCorrectly) {
const std::vector<FormatElement> expected{
FormatElement{ADMIN_AREA},
FormatElement{LOCALITY},
};
Rule rule;
ASSERT_TRUE(rule.ParseSerializedRule("{\"fmt\":\"%S%C\"}"));
EXPECT_EQ(expected, rule.GetFormat());
}
TEST(RuleTest, ParsesNameCorrectly) {
Rule rule;
ASSERT_TRUE(rule.ParseSerializedRule("{\"name\":\"Le Test\"}"));
EXPECT_EQ("Le Test", rule.GetName());
}
TEST(RuleTest, ParsesLatinNameCorrectly) {
Rule rule;
ASSERT_TRUE(rule.ParseSerializedRule("{\"lname\":\"Testistan\"}"));
EXPECT_EQ("Testistan", rule.GetLatinName());
}
TEST(RuleTest, ParsesLatinFormatCorrectly) {
const std::vector<FormatElement> expected{
FormatElement{LOCALITY},
FormatElement{ADMIN_AREA},
};
Rule rule;
ASSERT_TRUE(rule.ParseSerializedRule("{\"lfmt\":\"%C%S\"}"));
EXPECT_EQ(expected, rule.GetLatinFormat());
}
TEST(RuleTest, ParsesRequiredCorrectly) {
const std::vector<AddressField> expected{
STREET_ADDRESS,
LOCALITY,
};
Rule rule;
ASSERT_TRUE(rule.ParseSerializedRule("{\"require\":\"AC\"}"));
EXPECT_EQ(expected, rule.GetRequired());
}
TEST(RuleTest, ParsesSubKeysCorrectly) {
const std::vector<std::string> expected{
"aa",
"bb",
"cc",
};
Rule rule;
ASSERT_TRUE(rule.ParseSerializedRule("{\"sub_keys\":\"aa~bb~cc\"}"));
EXPECT_EQ(expected, rule.GetSubKeys());
}
TEST(RuleTest, ParsesLanguagesCorrectly) {
const std::vector<std::string> expected{
"de",
"fr",
"it",
};
Rule rule;
ASSERT_TRUE(rule.ParseSerializedRule("{\"languages\":\"de~fr~it\"}"));
EXPECT_EQ(expected, rule.GetLanguages());
}
TEST(RuleTest, ParsesPostalCodeExampleCorrectly) {
Rule rule;
ASSERT_TRUE(rule.ParseSerializedRule("{\"zipex\":\"1234,12345-6789\"}"));
EXPECT_EQ("1234,12345-6789", rule.GetPostalCodeExample());
}
TEST(RuleTest, ParsesPostServiceUrlCorrectly) {
Rule rule;
ASSERT_TRUE(
rule.ParseSerializedRule("{\"posturl\":\"http:
EXPECT_EQ("http:
}
TEST(RuleTest, PostalCodeMatcher) {
Rule rule;
ASSERT_TRUE(rule.ParseSerializedRule(R"({"zip":"\\d{3}"})"));
EXPECT_TRUE(rule.GetPostalCodeMatcher() != nullptr);
}
TEST(RuleTest, PostalCodeMatcherInvalidRegExp) {
Rule rule;
ASSERT_TRUE(rule.ParseSerializedRule(R"({"zip":"("})"));
EXPECT_TRUE(rule.GetPostalCodeMatcher() == nullptr);
}
TEST(RuleTest, ParsesJsonRuleCorrectly) {
Json json;
ASSERT_TRUE(json.ParseObject(R"({"zip":"\\d{3}"})"));
Rule rule;
rule.ParseJsonRule(json);
EXPECT_TRUE(rule.GetPostalCodeMatcher() != nullptr);
}
TEST(RuleTest, EmptyStringIsNotValid) {
Rule rule;
EXPECT_FALSE(rule.ParseSerializedRule(std::string()));
}
TEST(RuleTest, EmptyDictionaryIsValid) {
Rule rule;
EXPECT_TRUE(rule.ParseSerializedRule("{}"));
}
class PostalCodeNameParseTest
: public testing::TestWithParam<std::pair<std::string, int> > {
public:
PostalCodeNameParseTest(const PostalCodeNameParseTest&) = delete;
PostalCodeNameParseTest& operator=(const PostalCodeNameParseTest&) = delete;
protected:
PostalCodeNameParseTest() = default;
Rule rule_;
};
TEST_P(PostalCodeNameParseTest, ParsedCorrectly) {
ASSERT_TRUE(rule_.ParseSerializedRule(GetParam().first));
EXPECT_EQ(GetParam().second, rule_.GetPostalCodeNameMessageId());
}
INSTANTIATE_TEST_SUITE_P(
AllPostalCodeNames, PostalCodeNameParseTest,
testing::Values(std::make_pair("{\"zip_name_type\":\"pin\"}",
IDS_LIBADDRESSINPUT_PIN_CODE_LABEL),
std::make_pair("{\"zip_name_type\":\"postal\"}",
IDS_LIBADDRESSINPUT_POSTAL_CODE_LABEL),
std::make_pair("{\"zip_name_type\":\"zip\"}",
IDS_LIBADDRESSINPUT_ZIP_CODE_LABEL)));
class LocalityNameParseTest
: public testing::TestWithParam<std::pair<std::string, int> > {
public:
LocalityNameParseTest(const LocalityNameParseTest&) = delete;
LocalityNameParseTest& operator=(const LocalityNameParseTest&) = delete;
protected:
LocalityNameParseTest() = default;
Rule rule_;
};
TEST_P(LocalityNameParseTest, ParsedCorrectly) {
ASSERT_TRUE(rule_.ParseSerializedRule(GetParam().first));
EXPECT_EQ(GetParam().second, rule_.GetLocalityNameMessageId());
}
INSTANTIATE_TEST_SUITE_P(
AllLocalityNames, LocalityNameParseTest,
testing::Values(std::make_pair("{\"locality_name_type\":\"post_town\"}",
IDS_LIBADDRESSINPUT_POST_TOWN),
std::make_pair("{\"locality_name_type\":\"city\"}",
IDS_LIBADDRESSINPUT_LOCALITY_LABEL),
std::make_pair("{\"locality_name_type\":\"district\"}",
IDS_LIBADDRESSINPUT_DISTRICT)));
class SublocalityNameParseTest
: public testing::TestWithParam<std::pair<std::string, int> > {
public:
SublocalityNameParseTest(const SublocalityNameParseTest&) = delete;
SublocalityNameParseTest& operator=(const SublocalityNameParseTest&) = delete;
protected:
SublocalityNameParseTest() = default;
Rule rule_;
};
TEST_P(SublocalityNameParseTest, ParsedCorrectly) {
ASSERT_TRUE(rule_.ParseSerializedRule(GetParam().first));
EXPECT_EQ(GetParam().second, rule_.GetSublocalityNameMessageId());
}
INSTANTIATE_TEST_SUITE_P(
AllSublocalityNames, SublocalityNameParseTest,
testing::Values(
std::make_pair("{\"sublocality_name_type\":\"village_township\"}",
IDS_LIBADDRESSINPUT_VILLAGE_TOWNSHIP),
std::make_pair("{\"sublocality_name_type\":\"neighborhood\"}",
IDS_LIBADDRESSINPUT_NEIGHBORHOOD),
std::make_pair("{\"sublocality_name_type\":\"suburb\"}",
IDS_LIBADDRESSINPUT_SUBURB),
std::make_pair("{\"sublocality_name_type\":\"district\"}",
IDS_LIBADDRESSINPUT_DISTRICT)));
class AdminAreaNameParseTest
: public testing::TestWithParam<std::pair<std::string, int> > {
public:
AdminAreaNameParseTest(const AdminAreaNameParseTest&) = delete;
AdminAreaNameParseTest& operator=(const AdminAreaNameParseTest&) = delete;
protected:
AdminAreaNameParseTest() = default;
Rule rule_;
};
TEST_P(AdminAreaNameParseTest, ParsedCorrectly) {
ASSERT_TRUE(rule_.ParseSerializedRule(GetParam().first));
EXPECT_EQ(GetParam().second, rule_.GetAdminAreaNameMessageId());
}
INSTANTIATE_TEST_SUITE_P(
AllAdminAreaNames, AdminAreaNameParseTest,
testing::Values(std::make_pair("{\"state_name_type\":\"area\"}",
IDS_LIBADDRESSINPUT_AREA),
std::make_pair("{\"state_name_type\":\"county\"}",
IDS_LIBADDRESSINPUT_COUNTY),
std::make_pair("{\"state_name_type\":\"department\"}",
IDS_LIBADDRESSINPUT_DEPARTMENT),
std::make_pair("{\"state_name_type\":\"district\"}",
IDS_LIBADDRESSINPUT_DISTRICT),
std::make_pair("{\"state_name_type\":\"do_si\"}",
IDS_LIBADDRESSINPUT_DO_SI),
std::make_pair("{\"state_name_type\":\"emirate\"}",
IDS_LIBADDRESSINPUT_EMIRATE),
std::make_pair("{\"state_name_type\":\"island\"}",
IDS_LIBADDRESSINPUT_ISLAND),
std::make_pair("{\"state_name_type\":\"parish\"}",
IDS_LIBADDRESSINPUT_PARISH),
std::make_pair("{\"state_name_type\":\"prefecture\"}",
IDS_LIBADDRESSINPUT_PREFECTURE),
std::make_pair("{\"state_name_type\":\"province\"}",
IDS_LIBADDRESSINPUT_PROVINCE),
std::make_pair("{\"state_name_type\":\"state\"}",
IDS_LIBADDRESSINPUT_STATE)));
class RuleParseTest : public testing::TestWithParam<std::string> {
public:
RuleParseTest(const RuleParseTest&) = delete;
RuleParseTest& operator=(const RuleParseTest&) = delete;
protected:
RuleParseTest() = default;
std::string GetRegionData() const {
std::string data = RegionDataConstants::GetRegionData(GetParam());
return !data.empty() ? data : GetParam();
}
Rule rule_;
Localization localization_;
};
TEST_P(RuleParseTest, RegionDataParsedSuccessfully) {
EXPECT_TRUE(rule_.ParseSerializedRule(GetRegionData()));
}
TEST_P(RuleParseTest, AdminAreaNameTypeHasUiString) {
const std::string& region_data = GetRegionData();
rule_.ParseSerializedRule(region_data);
if (region_data.find("state_name_type") != std::string::npos) {
EXPECT_NE(INVALID_MESSAGE_ID, rule_.GetAdminAreaNameMessageId());
EXPECT_FALSE(
localization_.GetString(rule_.GetAdminAreaNameMessageId()).empty());
}
}
TEST_P(RuleParseTest, PostalCodeNameTypeHasUiString) {
const std::string& region_data = GetRegionData();
rule_.ParseSerializedRule(region_data);
if (region_data.find("zip_name_type") != std::string::npos) {
EXPECT_NE(INVALID_MESSAGE_ID, rule_.GetPostalCodeNameMessageId());
EXPECT_FALSE(
localization_.GetString(rule_.GetPostalCodeNameMessageId()).empty());
}
}
TEST_P(RuleParseTest, LocalityNameTypeHasUiString) {
const std::string& region_data = GetRegionData();
rule_.ParseSerializedRule(region_data);
if (region_data.find("\"locality_name_type") != std::string::npos) {
EXPECT_NE(INVALID_MESSAGE_ID, rule_.GetLocalityNameMessageId());
EXPECT_FALSE(
localization_.GetString(rule_.GetLocalityNameMessageId()).empty());
}
}
TEST_P(RuleParseTest, SublocalityNameTypeHasUiString) {
const std::string& region_data = GetRegionData();
rule_.ParseSerializedRule(region_data);
if (region_data.find("sublocality_name_type") != std::string::npos) {
EXPECT_NE(INVALID_MESSAGE_ID, rule_.GetSublocalityNameMessageId());
EXPECT_FALSE(
localization_.GetString(rule_.GetSublocalityNameMessageId()).empty());
}
}
TEST_P(RuleParseTest, SolePostalCode) {
Rule rule;
ASSERT_TRUE(rule.ParseSerializedRule("{\"zip\":\"1234\"}"));
EXPECT_TRUE(rule.GetPostalCodeMatcher() != nullptr);
EXPECT_EQ(rule.GetSolePostalCode(), "1234");
Rule copy;
EXPECT_TRUE(copy.GetPostalCodeMatcher() == nullptr);
EXPECT_TRUE(copy.GetSolePostalCode().empty());
copy.CopyFrom(rule);
EXPECT_TRUE(copy.GetPostalCodeMatcher() != nullptr);
EXPECT_EQ(rule.GetSolePostalCode(), copy.GetSolePostalCode());
}
INSTANTIATE_TEST_SUITE_P(
AllRulesTest, RuleParseTest,
testing::ValuesIn(RegionDataConstants::GetRegionCodes()));
INSTANTIATE_TEST_SUITE_P(
DefaultRuleTest, RuleParseTest,
testing::Values(RegionDataConstants::GetDefaultRegionData()));
} |
501 | cpp | google/libaddressinput | language | cpp/src/language.cc | cpp/test/language_test.cc | #ifndef I18N_ADDRESSINPUT_LANGUAGE_H_
#define I18N_ADDRESSINPUT_LANGUAGE_H_
#include <string>
namespace i18n {
namespace addressinput {
class Rule;
struct Language {
explicit Language(const std::string& language_tag);
~Language();
std::string tag;
std::string base;
bool has_latin_script;
};
Language ChooseBestAddressLanguage(const Rule& address_region_rule,
const Language& ui_language);
}
}
#endif
#include "language.h"
#include <algorithm>
#include <cctype>
#include <string>
#include <vector>
#include "rule.h"
#include "util/string_split.h"
namespace i18n {
namespace addressinput {
Language::Language(const std::string& language_tag) : tag(language_tag),
base(),
has_latin_script(false) {
static const char kSubtagsSeparator = '-';
static const char kAlternativeSubtagsSeparator = '_';
std::replace(
tag.begin(), tag.end(), kAlternativeSubtagsSeparator, kSubtagsSeparator);
std::string lowercase = tag;
std::transform(
lowercase.begin(), lowercase.end(), lowercase.begin(), tolower);
base = lowercase.substr(0, lowercase.find(kSubtagsSeparator));
static const char kLowercaseLatinScript[] = "latn";
std::vector<std::string> subtags;
SplitString(lowercase, kSubtagsSeparator, &subtags);
has_latin_script =
(subtags.size() > 1 && subtags[1] == kLowercaseLatinScript) ||
(subtags.size() > 2 && subtags[2] == kLowercaseLatinScript);
}
Language::~Language() = default;
Language ChooseBestAddressLanguage(const Rule& address_region_rule,
const Language& ui_language) {
if (address_region_rule.GetLanguages().empty()) {
return ui_language;
}
std::vector<Language> available_languages;
for (const auto& language_tag : address_region_rule.GetLanguages()) {
available_languages.emplace_back(language_tag);
}
if (ui_language.tag.empty()) {
return available_languages.front();
}
bool has_latin_format = !address_region_rule.GetLatinFormat().empty();
static const char kLatinScriptSuffix[] = "-Latn";
Language latin_script_language(
available_languages.front().base + kLatinScriptSuffix);
if (has_latin_format && ui_language.has_latin_script) {
return latin_script_language;
}
for (const auto& language : available_languages) {
if (ui_language.base == language.base) {
return language;
}
}
return has_latin_format ? latin_script_language : available_languages.front();
}
}
} | #include "language.h"
#include <string>
#include <gtest/gtest.h>
namespace {
using i18n::addressinput::Language;
struct LanguageTestCase {
LanguageTestCase(const std::string& input_language_tag,
const std::string& expected_language_tag,
const std::string& expected_base_language,
bool expected_has_latin_script)
: input_language_tag(input_language_tag),
expected_language_tag(expected_language_tag),
expected_base_language(expected_base_language),
expected_has_latin_script(expected_has_latin_script) {}
~LanguageTestCase() = default;
const std::string input_language_tag;
const std::string expected_language_tag;
const std::string expected_base_language;
const bool expected_has_latin_script;
};
class LanguageTest : public testing::TestWithParam<LanguageTestCase> {
public:
LanguageTest(const LanguageTest&) = delete;
LanguageTest& operator=(const LanguageTest&) = delete;
protected:
LanguageTest() = default;
};
TEST_P(LanguageTest, ExtractedDataIsCorrect) {
Language language(GetParam().input_language_tag);
EXPECT_EQ(GetParam().expected_language_tag, language.tag);
EXPECT_EQ(GetParam().expected_base_language, language.base);
EXPECT_EQ(GetParam().expected_has_latin_script, language.has_latin_script);
}
INSTANTIATE_TEST_SUITE_P(
LanguageTestCases, LanguageTest,
testing::Values(LanguageTestCase("", "", "", false),
LanguageTestCase("en", "en", "en", false),
LanguageTestCase("zh-Latn-CN", "zh-Latn-CN", "zh", true),
LanguageTestCase("zh-cmn-Latn-CN", "zh-cmn-Latn-CN", "zh",
true),
LanguageTestCase("zh-Hans", "zh-Hans", "zh", false),
LanguageTestCase("en_GB", "en-GB", "en", false)));
} |
502 | cpp | google/libaddressinput | retriever | cpp/src/retriever.cc | cpp/test/retriever_test.cc | #ifndef I18N_ADDRESSINPUT_RETRIEVER_H_
#define I18N_ADDRESSINPUT_RETRIEVER_H_
#include <libaddressinput/callback.h>
#include <memory>
#include <string>
namespace i18n {
namespace addressinput {
class Source;
class Storage;
class ValidatingStorage;
class Retriever {
public:
using Callback =
i18n::addressinput::Callback<const std::string&, const std::string&>;
Retriever(const Retriever&) = delete;
Retriever& operator=(const Retriever&) = delete;
Retriever(const Source* source, Storage* storage);
~Retriever();
void Retrieve(const std::string& key, const Callback& retrieved) const;
private:
std::unique_ptr<const Source> source_;
std::unique_ptr<ValidatingStorage> storage_;
};
}
}
#endif
#include "retriever.h"
#include <libaddressinput/callback.h>
#include <libaddressinput/source.h>
#include <libaddressinput/storage.h>
#include <cassert>
#include <cstddef>
#include <memory>
#include <string>
#include "validating_storage.h"
namespace i18n {
namespace addressinput {
namespace {
class Helper {
public:
Helper(const Helper&) = delete;
Helper& operator=(const Helper&) = delete;
Helper(const std::string& key,
const Retriever::Callback& retrieved,
const Source& source,
ValidatingStorage* storage)
: retrieved_(retrieved),
source_(source),
storage_(storage),
fresh_data_ready_(BuildCallback(this, &Helper::OnFreshDataReady)),
validated_data_ready_(
BuildCallback(this, &Helper::OnValidatedDataReady)),
stale_data_() {
assert(storage_ != nullptr);
storage_->Get(key, *validated_data_ready_);
}
private:
~Helper() = default;
void OnValidatedDataReady(bool success,
const std::string& key,
std::string* data) {
if (success) {
assert(data != nullptr);
retrieved_(success, key, *data);
delete this;
} else {
if (data != nullptr && !data->empty()) {
stale_data_ = *data;
}
source_.Get(key, *fresh_data_ready_);
}
delete data;
}
void OnFreshDataReady(bool success,
const std::string& key,
std::string* data) {
if (success) {
assert(data != nullptr);
retrieved_(true, key, *data);
storage_->Put(key, data);
data = nullptr;
} else if (!stale_data_.empty()) {
retrieved_(true, key, stale_data_);
} else {
retrieved_(false, key, std::string());
}
delete data;
delete this;
}
const Retriever::Callback& retrieved_;
const Source& source_;
ValidatingStorage* storage_;
const std::unique_ptr<const Source::Callback> fresh_data_ready_;
const std::unique_ptr<const Storage::Callback> validated_data_ready_;
std::string stale_data_;
};
}
Retriever::Retriever(const Source* source, Storage* storage)
: source_(source), storage_(new ValidatingStorage(storage)) {
assert(source_ != nullptr);
assert(storage_ != nullptr);
}
Retriever::~Retriever() = default;
void Retriever::Retrieve(const std::string& key,
const Callback& retrieved) const {
new Helper(key, retrieved, *source_, storage_.get());
}
}
} | #include "retriever.h"
#include <libaddressinput/callback.h>
#include <libaddressinput/null_storage.h>
#include <libaddressinput/storage.h>
#include <cstddef>
#include <memory>
#include <string>
#include <gtest/gtest.h>
#include "mock_source.h"
#include "testdata_source.h"
#define CHECKSUM "dd63dafcbd4d5b28badfcaf86fb6fcdb"
#define DATA "{'foo': 'bar'}"
#define OLD_TIMESTAMP "0"
namespace {
using i18n::addressinput::BuildCallback;
using i18n::addressinput::MockSource;
using i18n::addressinput::NullStorage;
using i18n::addressinput::Retriever;
using i18n::addressinput::Storage;
using i18n::addressinput::TestdataSource;
const char kKey[] = "data/CA/AB--fr";
const char kEmptyData[] = "{}";
const char kStaleData[] = DATA;
const char kStaleWrappedData[] = "timestamp=" OLD_TIMESTAMP "\n"
"checksum=" CHECKSUM "\n"
DATA;
class RetrieverTest : public testing::Test {
public:
RetrieverTest(const RetrieverTest&) = delete;
RetrieverTest& operator=(const RetrieverTest&) = delete;
protected:
RetrieverTest()
: retriever_(new TestdataSource(false), new NullStorage),
success_(false),
key_(),
data_(),
data_ready_(BuildCallback(this, &RetrieverTest::OnDataReady)) {}
Retriever retriever_;
bool success_;
std::string key_;
std::string data_;
const std::unique_ptr<const Retriever::Callback> data_ready_;
private:
void OnDataReady(bool success,
const std::string& key,
const std::string& data) {
success_ = success;
key_ = key;
data_ = data;
}
};
TEST_F(RetrieverTest, RetrieveData) {
retriever_.Retrieve(kKey, *data_ready_);
EXPECT_TRUE(success_);
EXPECT_EQ(kKey, key_);
EXPECT_FALSE(data_.empty());
EXPECT_NE(kEmptyData, data_);
}
TEST_F(RetrieverTest, ReadDataFromStorage) {
retriever_.Retrieve(kKey, *data_ready_);
retriever_.Retrieve(kKey, *data_ready_);
EXPECT_TRUE(success_);
EXPECT_EQ(kKey, key_);
EXPECT_FALSE(data_.empty());
EXPECT_NE(kEmptyData, data_);
}
TEST_F(RetrieverTest, MissingKeyReturnsEmptyData) {
static const char kMissingKey[] = "junk";
retriever_.Retrieve(kMissingKey, *data_ready_);
EXPECT_TRUE(success_);
EXPECT_EQ(kMissingKey, key_);
EXPECT_EQ(kEmptyData, data_);
}
TEST_F(RetrieverTest, FaultySource) {
Retriever bad_retriever(new MockSource, new NullStorage);
bad_retriever.Retrieve(kKey, *data_ready_);
EXPECT_FALSE(success_);
EXPECT_EQ(kKey, key_);
EXPECT_TRUE(data_.empty());
}
class StaleStorage : public Storage {
public:
StaleStorage(const StaleStorage&) = delete;
StaleStorage& operator=(const StaleStorage&) = delete;
StaleStorage() : data_updated_(false) {}
~StaleStorage() override = default;
void Get(const std::string& key, const Callback& data_ready) const override {
data_ready(true, key, new std::string(kStaleWrappedData));
}
void Put(const std::string& key, std::string* value) override {
ASSERT_TRUE(value != nullptr);
data_updated_ = true;
delete value;
}
bool data_updated_;
};
TEST_F(RetrieverTest, UseStaleDataWhenSourceFails) {
auto* stale_storage = new StaleStorage;
Retriever resilient_retriever(new MockSource, stale_storage);
resilient_retriever.Retrieve(kKey, *data_ready_);
EXPECT_TRUE(success_);
EXPECT_EQ(kKey, key_);
EXPECT_EQ(kStaleData, data_);
EXPECT_FALSE(stale_storage->data_updated_);
}
TEST_F(RetrieverTest, DoNotUseStaleDataWhenSourceSucceeds) {
auto* stale_storage = new StaleStorage;
Retriever resilient_retriever(new TestdataSource(false), stale_storage);
resilient_retriever.Retrieve(kKey, *data_ready_);
EXPECT_TRUE(success_);
EXPECT_EQ(kKey, key_);
EXPECT_FALSE(data_.empty());
EXPECT_NE(kEmptyData, data_);
EXPECT_NE(kStaleData, data_);
EXPECT_TRUE(stale_storage->data_updated_);
}
} |
503 | cpp | google/libaddressinput | preload_supplier | cpp/src/preload_supplier.cc | cpp/test/preload_supplier_test.cc | #ifndef I18N_ADDRESSINPUT_PRELOAD_SUPPLIER_H_
#define I18N_ADDRESSINPUT_PRELOAD_SUPPLIER_H_
#include <libaddressinput/callback.h>
#include <libaddressinput/supplier.h>
#include <map>
#include <memory>
#include <set>
#include <string>
#include <vector>
namespace i18n {
namespace addressinput {
class IndexMap;
class LookupKey;
class Retriever;
class Rule;
class Source;
class Storage;
class PreloadSupplier : public Supplier {
public:
using Callback = i18n::addressinput::Callback<const std::string&, int>;
PreloadSupplier(const PreloadSupplier&) = delete;
PreloadSupplier& operator=(const PreloadSupplier&) = delete;
PreloadSupplier(const Source* source, Storage* storage);
~PreloadSupplier() override;
void Supply(const LookupKey& lookup_key,
const Supplier::Callback& supplied) override;
void SupplyGlobally(const LookupKey& lookup_key,
const Supplier::Callback& supplied) override;
const Rule* GetRule(const LookupKey& lookup_key) const;
void LoadRules(const std::string& region_code, const Callback& loaded);
const std::map<std::string, const Rule*>& GetRulesForRegion(
const std::string& region_code) const;
bool IsLoaded(const std::string& region_code) const;
bool IsPending(const std::string& region_code) const;
size_t GetLoadedRuleDepth(const std::string& region_code) const override;
private:
bool GetRuleHierarchy(const LookupKey& lookup_key, RuleHierarchy* hierarchy,
const bool search_globally) const;
bool IsLoadedKey(const std::string& key) const;
bool IsPendingKey(const std::string& key) const;
const std::unique_ptr<const Retriever> retriever_;
std::set<std::string> pending_;
const std::unique_ptr<IndexMap> rule_index_;
const std::unique_ptr<IndexMap> language_rule_index_;
std::vector<const Rule*> rule_storage_;
std::map<std::string, std::map<std::string, const Rule*> > region_rules_;
};
}
}
#endif
#include <libaddressinput/preload_supplier.h>
#include <libaddressinput/address_data.h>
#include <libaddressinput/address_field.h>
#include <libaddressinput/callback.h>
#include <libaddressinput/supplier.h>
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <map>
#include <memory>
#include <set>
#include <stack>
#include <string>
#include <vector>
#include "lookup_key.h"
#include "region_data_constants.h"
#include "retriever.h"
#include "rule.h"
#include "util/json.h"
#include "util/size.h"
#include "util/string_compare.h"
namespace i18n {
namespace addressinput {
namespace {
class IndexLess {
public:
bool operator()(const std::string& a, const std::string& b) const {
static const StringCompare kStringCompare;
return kStringCompare.NaturalLess(a, b);
}
};
}
class IndexMap : public std::map<std::string, const Rule*, IndexLess> {};
namespace {
class Helper {
public:
Helper(const Helper&) = delete;
Helper& operator=(const Helper&) = delete;
Helper(const std::string& region_code, const std::string& key,
const PreloadSupplier::Callback& loaded, const Retriever& retriever,
std::set<std::string>* pending, IndexMap* rule_index,
IndexMap* language_rule_index, std::vector<const Rule*>* rule_storage,
std::map<std::string, const Rule*>* region_rules)
: region_code_(region_code),
loaded_(loaded),
pending_(pending),
rule_index_(rule_index),
language_rule_index_(language_rule_index),
rule_storage_(rule_storage),
region_rules_(region_rules),
retrieved_(BuildCallback(this, &Helper::OnRetrieved)) {
assert(pending_ != nullptr);
assert(rule_index_ != nullptr);
assert(rule_storage_ != nullptr);
assert(region_rules_ != nullptr);
assert(retrieved_ != nullptr);
pending_->insert(key);
retriever.Retrieve(key, *retrieved_);
}
private:
~Helper() = default;
void OnRetrieved(bool success, const std::string& key,
const std::string& data) {
int rule_count = 0;
size_t status = pending_->erase(key);
assert(status == 1);
(void)status;
Json json;
std::string id;
std::vector<const Rule*> sub_rules;
auto last_index_it = rule_index_->end();
auto last_latin_it = rule_index_->end();
auto language_index_it = language_rule_index_->end();
auto last_region_it = region_rules_->end();
IndexMap::const_iterator hints[size(LookupKey::kHierarchy) - 1];
std::fill(hints, hints + size(hints), rule_index_->end());
if (!success) {
goto callback;
}
if (!json.ParseObject(data)) {
success = false;
goto callback;
}
for (auto ptr : json.GetSubDictionaries()) {
assert(ptr != nullptr);
if (!ptr->GetStringValueForKey("id", &id)) {
success = false;
goto callback;
}
assert(!id.empty());
size_t depth = std::count(id.begin(), id.end(), '/') - 1;
assert(depth < size(LookupKey::kHierarchy));
AddressField field = LookupKey::kHierarchy[depth];
auto* rule = new Rule;
if (field == COUNTRY) {
rule->CopyFrom(Rule::GetDefault());
}
rule->ParseJsonRule(*ptr);
assert(id == rule->GetId());
rule_storage_->push_back(rule);
if (depth > 0) {
sub_rules.push_back(rule);
}
last_index_it = rule_index_->emplace_hint(last_index_it, id, rule);
last_region_it = region_rules_->emplace_hint(last_region_it, id, rule);
++rule_count;
}
for (auto ptr : sub_rules) {
assert(ptr != nullptr);
std::stack<const Rule*> hierarchy;
hierarchy.push(ptr);
for (std::string parent_id(ptr->GetId());;) {
std::string::size_type pos = parent_id.rfind('/');
if (pos == sizeof "data/ZZ" - 1) {
break;
}
parent_id.resize(pos);
IndexMap::const_iterator* const hint = &hints[hierarchy.size() - 1];
if (*hint == rule_index_->end() || (*hint)->first != parent_id) {
*hint = rule_index_->find(parent_id);
}
assert(*hint != rule_index_->end());
hierarchy.push((*hint)->second);
}
std::string human_id(ptr->GetId().substr(0, sizeof "data/ZZ" - 1));
std::string latin_id(human_id);
for (; !hierarchy.empty(); hierarchy.pop()) {
const Rule* rule = hierarchy.top();
human_id.push_back('/');
if (!rule->GetName().empty()) {
human_id.append(rule->GetName());
} else {
const std::string& id = rule->GetId();
std::string::size_type pos = id.rfind('/');
assert(pos != std::string::npos);
human_id.append(id.substr(pos + 1));
}
if (!rule->GetLatinName().empty()) {
latin_id.push_back('/');
latin_id.append(rule->GetLatinName());
}
}
{
const std::string& id = ptr->GetId();
std::string::size_type pos = id.rfind("--");
if (pos != std::string::npos) {
language_index_it = language_rule_index_->emplace_hint(
language_index_it, human_id, ptr);
human_id.append(id, pos, id.size() - pos);
}
}
last_index_it = rule_index_->emplace_hint(last_index_it, human_id, ptr);
if (std::count(human_id.begin(), human_id.end(), '/') ==
std::count(latin_id.begin(), latin_id.end(), '/')) {
last_latin_it = rule_index_->emplace_hint(last_latin_it, latin_id, ptr);
}
}
callback:
loaded_(success, region_code_, rule_count);
delete this;
}
const std::string region_code_;
const PreloadSupplier::Callback& loaded_;
std::set<std::string>* const pending_;
IndexMap* const rule_index_;
IndexMap* const language_rule_index_;
std::vector<const Rule*>* const rule_storage_;
std::map<std::string, const Rule*>* const region_rules_;
const std::unique_ptr<const Retriever::Callback> retrieved_;
};
std::string KeyFromRegionCode(const std::string& region_code) {
AddressData address;
address.region_code = region_code;
LookupKey lookup_key;
lookup_key.FromAddress(address);
return lookup_key.ToKeyString(0);
}
}
PreloadSupplier::PreloadSupplier(const Source* source, Storage* storage)
: retriever_(new Retriever(source, storage)),
pending_(),
rule_index_(new IndexMap),
language_rule_index_(new IndexMap),
rule_storage_(),
region_rules_() {}
PreloadSupplier::~PreloadSupplier() {
for (auto ptr : rule_storage_) {
delete ptr;
}
}
void PreloadSupplier::Supply(const LookupKey& lookup_key,
const Supplier::Callback& supplied) {
Supplier::RuleHierarchy hierarchy;
bool success = GetRuleHierarchy(lookup_key, &hierarchy, false);
supplied(success, lookup_key, hierarchy);
}
void PreloadSupplier::SupplyGlobally(const LookupKey& lookup_key,
const Supplier::Callback& supplied) {
Supplier::RuleHierarchy hierarchy;
bool success = GetRuleHierarchy(lookup_key, &hierarchy, true);
supplied(success, lookup_key, hierarchy);
}
const Rule* PreloadSupplier::GetRule(const LookupKey& lookup_key) const {
assert(IsLoaded(lookup_key.GetRegionCode()));
Supplier::RuleHierarchy hierarchy;
if (!GetRuleHierarchy(lookup_key, &hierarchy, false)) {
return nullptr;
}
return hierarchy.rule[lookup_key.GetDepth()];
}
void PreloadSupplier::LoadRules(const std::string& region_code,
const Callback& loaded) {
const std::string key = KeyFromRegionCode(region_code);
if (IsLoadedKey(key)) {
loaded(true, region_code, 0);
return;
}
if (IsPendingKey(key)) {
return;
}
new Helper(region_code, key, loaded, *retriever_, &pending_,
rule_index_.get(), language_rule_index_.get(), &rule_storage_,
®ion_rules_[region_code]);
}
const std::map<std::string, const Rule*>& PreloadSupplier::GetRulesForRegion(
const std::string& region_code) const {
assert(IsLoaded(region_code));
return region_rules_.find(region_code)->second;
}
bool PreloadSupplier::IsLoaded(const std::string& region_code) const {
return IsLoadedKey(KeyFromRegionCode(region_code));
}
bool PreloadSupplier::IsPending(const std::string& region_code) const {
return IsPendingKey(KeyFromRegionCode(region_code));
}
bool PreloadSupplier::GetRuleHierarchy(const LookupKey& lookup_key,
RuleHierarchy* hierarchy,
const bool search_globally) const {
assert(hierarchy != nullptr);
if (RegionDataConstants::IsSupported(lookup_key.GetRegionCode())) {
size_t max_depth = std::min(
lookup_key.GetDepth(),
RegionDataConstants::GetMaxLookupKeyDepth(lookup_key.GetRegionCode()));
for (size_t depth = 0; depth <= max_depth; ++depth) {
const std::string key = lookup_key.ToKeyString(depth);
const Rule* rule = nullptr;
auto it = rule_index_->find(key);
if (it != rule_index_->end()) {
rule = it->second;
} else if (search_globally && depth > 0 &&
!hierarchy->rule[0]->GetLanguages().empty()) {
it = language_rule_index_->find(key);
if (it != language_rule_index_->end()) {
rule = it->second;
}
}
if (rule == nullptr) {
return depth > 0;
}
hierarchy->rule[depth] = rule;
}
}
return true;
}
size_t PreloadSupplier::GetLoadedRuleDepth(
const std::string& region_code) const {
const size_t code_size = 7;
std::string full_code = region_code.substr(0, code_size);
size_t depth = 0;
auto it = rule_index_->find(full_code);
while (it != rule_index_->end()) {
const Rule* rule = it->second;
depth++;
if (rule->GetSubKeys().empty()) return depth;
full_code += "/" + rule->GetSubKeys()[0];
it = rule_index_->find(full_code);
}
return depth;
}
bool PreloadSupplier::IsLoadedKey(const std::string& key) const {
return rule_index_->find(key) != rule_index_->end();
}
bool PreloadSupplier::IsPendingKey(const std::string& key) const {
return pending_.find(key) != pending_.end();
}
}
} | #include <libaddressinput/preload_supplier.h>
#include <libaddressinput/address_data.h>
#include <libaddressinput/callback.h>
#include <libaddressinput/null_storage.h>
#include <libaddressinput/supplier.h>
#include <cstddef>
#include <memory>
#include <string>
#include <gtest/gtest.h>
#include "lookup_key.h"
#include "rule.h"
#include "testdata_source.h"
namespace {
using i18n::addressinput::AddressData;
using i18n::addressinput::BuildCallback;
using i18n::addressinput::LookupKey;
using i18n::addressinput::NullStorage;
using i18n::addressinput::PreloadSupplier;
using i18n::addressinput::Rule;
using i18n::addressinput::Supplier;
using i18n::addressinput::TestdataSource;
class PreloadSupplierTest : public testing::Test {
public:
PreloadSupplierTest(const PreloadSupplierTest&) = delete;
PreloadSupplierTest& operator=(const PreloadSupplierTest&) = delete;
protected:
PreloadSupplierTest()
: supplier_(new TestdataSource(true), new NullStorage),
loaded_callback_(BuildCallback(this, &PreloadSupplierTest::OnLoaded)),
supplied_callback_(
BuildCallback(this, &PreloadSupplierTest::OnSupplied)) {}
PreloadSupplier supplier_;
const std::unique_ptr<const PreloadSupplier::Callback> loaded_callback_;
const std::unique_ptr<const Supplier::Callback> supplied_callback_;
Supplier::RuleHierarchy hierarchy_;
private:
void OnLoaded(bool success, const std::string& region_code, int num_rules) {
ASSERT_TRUE(success);
ASSERT_FALSE(region_code.empty());
ASSERT_LT(0, num_rules);
ASSERT_TRUE(supplier_.IsLoaded(region_code));
}
void OnSupplied(bool success, const LookupKey& lookup_key,
const Supplier::RuleHierarchy& hierarchy) {
ASSERT_TRUE(success);
hierarchy_ = hierarchy;
}
};
TEST_F(PreloadSupplierTest, GetUsRule) {
supplier_.LoadRules("US", *loaded_callback_);
LookupKey us_key;
const AddressData us_address{.region_code = "US"};
us_key.FromAddress(us_address);
const Rule* rule = supplier_.GetRule(us_key);
ASSERT_TRUE(rule != nullptr);
EXPECT_EQ("data/US", rule->GetId());
}
TEST_F(PreloadSupplierTest, GetUsCaRule) {
supplier_.LoadRules("US", *loaded_callback_);
LookupKey ca_key;
const AddressData ca_address{
.region_code = "US",
.administrative_area = "CA",
};
ca_key.FromAddress(ca_address);
const Rule* rule = supplier_.GetRule(ca_key);
ASSERT_TRUE(rule != nullptr);
EXPECT_EQ("data/US/CA", rule->GetId());
}
TEST_F(PreloadSupplierTest, GetUsCaliforniaRule) {
supplier_.LoadRules("US", *loaded_callback_);
LookupKey ca_key;
const AddressData ca_address{
.region_code = "US",
.administrative_area = "California",
};
ca_key.FromAddress(ca_address);
const Rule* rule = supplier_.GetRule(ca_key);
ASSERT_TRUE(rule != nullptr);
EXPECT_EQ("data/US/CA", rule->GetId());
}
TEST_F(PreloadSupplierTest, GetZwRule) {
supplier_.LoadRules("ZW", *loaded_callback_);
LookupKey zw_key;
const AddressData zw_address{.region_code = "ZW"};
zw_key.FromAddress(zw_address);
const Rule* rule = supplier_.GetRule(zw_key);
ASSERT_TRUE(rule != nullptr);
EXPECT_EQ("data/ZW", rule->GetId());
}
TEST_F(PreloadSupplierTest, GetUnknownRule) {
supplier_.LoadRules("US", *loaded_callback_);
LookupKey unknown_key;
const AddressData unknown_address{
.region_code = "US",
.administrative_area = "ZZ",
};
unknown_key.FromAddress(unknown_address);
const Rule* rule = supplier_.GetRule(unknown_key);
EXPECT_TRUE(rule == nullptr);
}
TEST_F(PreloadSupplierTest, GetTooPreciseRule) {
supplier_.LoadRules("US", *loaded_callback_);
LookupKey precise_key;
const AddressData precise_address{
.region_code = "US",
.administrative_area = "CA",
.locality = "Mountain View",
};
precise_key.FromAddress(precise_address);
const Rule* rule = supplier_.GetRule(precise_key);
EXPECT_TRUE(rule == nullptr);
}
TEST_F(PreloadSupplierTest, GetRulesForRegion) {
supplier_.LoadRules("CN", *loaded_callback_);
const auto& rules = supplier_.GetRulesForRegion("CN");
EXPECT_TRUE(rules.find("data/CN") != rules.end());
EXPECT_LT(1U, rules.size());
}
TEST_F(PreloadSupplierTest, SupplyRegionCode) {
supplier_.LoadRules("CA", *loaded_callback_);
LookupKey key;
const AddressData address{
.region_code = "CA",
.administrative_area = "NB",
};
key.FromAddress(address);
supplier_.Supply(key, *supplied_callback_);
ASSERT_TRUE(hierarchy_.rule[0] != nullptr);
EXPECT_EQ(key.ToKeyString(0), hierarchy_.rule[0]->GetId());
ASSERT_TRUE(hierarchy_.rule[1] != nullptr);
EXPECT_EQ("data/CA/NB", hierarchy_.rule[1]->GetId());
EXPECT_TRUE(hierarchy_.rule[2] == nullptr);
EXPECT_TRUE(hierarchy_.rule[3] == nullptr);
}
TEST_F(PreloadSupplierTest, SupplyGloballyRegionCode) {
supplier_.LoadRules("CA", *loaded_callback_);
LookupKey key;
const AddressData address{
.region_code = "CA",
.administrative_area = "NB",
};
key.FromAddress(address);
supplier_.SupplyGlobally(key, *supplied_callback_);
ASSERT_TRUE(hierarchy_.rule[0] != nullptr);
EXPECT_EQ(key.ToKeyString(0), hierarchy_.rule[0]->GetId());
ASSERT_TRUE(hierarchy_.rule[1] != nullptr);
EXPECT_EQ("data/CA/NB", hierarchy_.rule[1]->GetId());
EXPECT_TRUE(hierarchy_.rule[2] == nullptr);
EXPECT_TRUE(hierarchy_.rule[3] == nullptr);
}
TEST_F(PreloadSupplierTest, SupplyRegionName) {
supplier_.LoadRules("CA", *loaded_callback_);
LookupKey key;
const AddressData address{
.region_code = "CA",
.administrative_area = "New Brunswick",
};
key.FromAddress(address);
supplier_.Supply(key, *supplied_callback_);
ASSERT_TRUE(hierarchy_.rule[0] != nullptr);
EXPECT_EQ(key.ToKeyString(0), hierarchy_.rule[0]->GetId());
ASSERT_TRUE(hierarchy_.rule[1] != nullptr);
EXPECT_EQ("data/CA/NB", hierarchy_.rule[1]->GetId());
EXPECT_TRUE(hierarchy_.rule[2] == nullptr);
EXPECT_TRUE(hierarchy_.rule[3] == nullptr);
}
TEST_F(PreloadSupplierTest, SupplyGloballyRegionName) {
supplier_.LoadRules("CA", *loaded_callback_);
LookupKey key;
const AddressData address{
.region_code = "CA",
.administrative_area = "New Brunswick",
};
key.FromAddress(address);
supplier_.SupplyGlobally(key, *supplied_callback_);
ASSERT_TRUE(hierarchy_.rule[0] != nullptr);
EXPECT_EQ(key.ToKeyString(0), hierarchy_.rule[0]->GetId());
ASSERT_TRUE(hierarchy_.rule[1] != nullptr);
EXPECT_EQ("data/CA/NB", hierarchy_.rule[1]->GetId());
EXPECT_TRUE(hierarchy_.rule[2] == nullptr);
EXPECT_TRUE(hierarchy_.rule[3] == nullptr);
}
TEST_F(PreloadSupplierTest, SupplyRegionNameLanguage) {
supplier_.LoadRules("CA", *loaded_callback_);
LookupKey key;
const AddressData address{
.region_code = "CA",
.administrative_area = "Nouveau-Brunswick",
};
key.FromAddress(address);
supplier_.Supply(key, *supplied_callback_);
ASSERT_TRUE(hierarchy_.rule[0] != nullptr);
EXPECT_EQ(key.ToKeyString(0), hierarchy_.rule[0]->GetId());
EXPECT_TRUE(hierarchy_.rule[1] == nullptr);
EXPECT_TRUE(hierarchy_.rule[2] == nullptr);
EXPECT_TRUE(hierarchy_.rule[3] == nullptr);
}
TEST_F(PreloadSupplierTest, SupplyRegionNameLanguageSet) {
supplier_.LoadRules("CA", *loaded_callback_);
LookupKey key;
const AddressData address{
.region_code = "CA",
.administrative_area = "Nouveau-Brunswick",
.language_code = "fr",
};
key.FromAddress(address);
supplier_.Supply(key, *supplied_callback_);
ASSERT_TRUE(hierarchy_.rule[0] != nullptr);
EXPECT_EQ(key.ToKeyString(0), hierarchy_.rule[0]->GetId());
ASSERT_TRUE(hierarchy_.rule[1] != nullptr);
EXPECT_EQ("data/CA/NB--fr", hierarchy_.rule[1]->GetId());
EXPECT_TRUE(hierarchy_.rule[2] == nullptr);
EXPECT_TRUE(hierarchy_.rule[3] == nullptr);
}
TEST_F(PreloadSupplierTest, SupplyGloballyRegionNameLanguage) {
supplier_.LoadRules("CA", *loaded_callback_);
LookupKey key;
const AddressData address{
.region_code = "CA",
.administrative_area = "Nouveau-Brunswick",
};
key.FromAddress(address);
supplier_.SupplyGlobally(key, *supplied_callback_);
ASSERT_TRUE(hierarchy_.rule[0] != nullptr);
EXPECT_EQ(key.ToKeyString(0), hierarchy_.rule[0]->GetId());
ASSERT_TRUE(hierarchy_.rule[1] != nullptr);
EXPECT_EQ("data/CA/NB--fr", hierarchy_.rule[1]->GetId());
EXPECT_TRUE(hierarchy_.rule[2] == nullptr);
EXPECT_TRUE(hierarchy_.rule[3] == nullptr);
}
TEST_F(PreloadSupplierTest, SupplyRegionNameHK) {
supplier_.LoadRules("HK", *loaded_callback_);
LookupKey key;
const AddressData address{
.region_code = "HK",
.administrative_area = "新界",
.locality = "大嶼山石壁",
};
key.FromAddress(address);
supplier_.Supply(key, *supplied_callback_);
ASSERT_TRUE(hierarchy_.rule[0] != nullptr);
EXPECT_EQ(key.ToKeyString(0), hierarchy_.rule[0]->GetId());
ASSERT_TRUE(hierarchy_.rule[1] != nullptr);
EXPECT_EQ("data/HK/新界", hierarchy_.rule[1]->GetId());
ASSERT_TRUE(hierarchy_.rule[2] != nullptr);
EXPECT_EQ("data/HK/新界/大嶼山石壁", hierarchy_.rule[2]->GetId());
EXPECT_TRUE(hierarchy_.rule[3] == nullptr);
}
TEST_F(PreloadSupplierTest, SupplyGloballyRegionNameHKEnglish) {
supplier_.LoadRules("HK", *loaded_callback_);
LookupKey key;
const AddressData address{
.region_code = "HK",
.administrative_area = "New Territories",
.locality = "Tsing Yi",
};
key.FromAddress(address);
supplier_.SupplyGlobally(key, *supplied_callback_);
ASSERT_TRUE(hierarchy_.rule[0] != nullptr);
EXPECT_EQ(key.ToKeyString(0), hierarchy_.rule[0]->GetId());
ASSERT_TRUE(hierarchy_.rule[1] != nullptr);
EXPECT_EQ("data/HK/New Territories--en", hierarchy_.rule[1]->GetId());
ASSERT_TRUE(hierarchy_.rule[2] != nullptr);
EXPECT_EQ("data/HK/New Territories/Tsing Yi--en",
hierarchy_.rule[2]->GetId());
EXPECT_TRUE(hierarchy_.rule[3] == nullptr);
}
TEST_F(PreloadSupplierTest, SupplyRegionNameAllLevels) {
supplier_.LoadRules("CN", *loaded_callback_);
LookupKey key;
const AddressData address{
.region_code = "CN",
.administrative_area = "云南省",
.locality = "临沧市",
.dependent_locality = "临翔区",
};
key.FromAddress(address);
supplier_.Supply(key, *supplied_callback_);
ASSERT_TRUE(hierarchy_.rule[0] != nullptr);
EXPECT_EQ(key.ToKeyString(0), hierarchy_.rule[0]->GetId());
ASSERT_TRUE(hierarchy_.rule[1] != nullptr);
EXPECT_EQ("data/CN/云南省", hierarchy_.rule[1]->GetId());
ASSERT_TRUE(hierarchy_.rule[2] != nullptr);
EXPECT_EQ("data/CN/云南省/临沧市", hierarchy_.rule[2]->GetId());
ASSERT_TRUE(hierarchy_.rule[3] != nullptr);
EXPECT_EQ("data/CN/云南省/临沧市/临翔区", hierarchy_.rule[3]->GetId());
}
TEST_F(PreloadSupplierTest, GetLoadedRuleDepth) {
supplier_.LoadRules("CA", *loaded_callback_);
EXPECT_EQ(2,
supplier_.GetLoadedRuleDepth("data/CA"));
EXPECT_EQ(0, supplier_.GetLoadedRuleDepth(
"data/CN"));
supplier_.LoadRules("CN", *loaded_callback_);
EXPECT_EQ(4,
supplier_.GetLoadedRuleDepth(
"data/CN"));
EXPECT_EQ(
0, supplier_.GetLoadedRuleDepth("data/PP"));
}
} |
504 | cpp | google/libaddressinput | address_problem | cpp/src/address_problem.cc | cpp/test/address_problem_test.cc | #ifndef I18N_ADDRESSINPUT_ADDRESS_PROBLEM_H_
#define I18N_ADDRESSINPUT_ADDRESS_PROBLEM_H_
#include <iosfwd>
namespace i18n {
namespace addressinput {
enum AddressProblem {
UNEXPECTED_FIELD,
MISSING_REQUIRED_FIELD,
UNKNOWN_VALUE,
INVALID_FORMAT,
MISMATCHING_VALUE,
USES_P_O_BOX,
UNSUPPORTED_FIELD
};
}
}
std::ostream& operator<<(std::ostream& o,
i18n::addressinput::AddressProblem problem);
#endif
#include <libaddressinput/address_problem.h>
#include <cstddef>
#include <ostream>
#include "util/size.h"
using i18n::addressinput::AddressProblem;
using i18n::addressinput::size;
using i18n::addressinput::UNEXPECTED_FIELD;
using i18n::addressinput::UNSUPPORTED_FIELD;
std::ostream& operator<<(std::ostream& o, AddressProblem problem) {
static const char* const kProblemNames[] = {
"UNEXPECTED_FIELD", "MISSING_REQUIRED_FIELD", "UNKNOWN_VALUE",
"INVALID_FORMAT", "MISMATCHING_VALUE", "USES_P_O_BOX",
"UNSUPPORTED_FIELD",
};
static_assert(UNEXPECTED_FIELD == 0, "bad_base");
static_assert(UNSUPPORTED_FIELD == size(kProblemNames) - 1, "bad_length");
if (problem < 0 || static_cast<size_t>(problem) >= size(kProblemNames)) {
o << "[INVALID ENUM VALUE " << static_cast<int>(problem) << "]";
} else {
o << kProblemNames[problem];
}
return o;
} | #include <libaddressinput/address_problem.h>
#include <sstream>
#include <gtest/gtest.h>
namespace {
using i18n::addressinput::UNKNOWN_VALUE;
TEST(AddressProblemTest, ValidEnumValue) {
std::ostringstream oss;
oss << UNKNOWN_VALUE;
EXPECT_EQ("UNKNOWN_VALUE", oss.str());
}
} |
505 | cpp | google/libaddressinput | validating_storage | cpp/src/validating_storage.cc | cpp/test/validating_storage_test.cc | #ifndef I18N_ADDRESSINPUT_VALIDATING_STORAGE_H_
#define I18N_ADDRESSINPUT_VALIDATING_STORAGE_H_
#include <libaddressinput/storage.h>
#include <memory>
#include <string>
namespace i18n {
namespace addressinput {
class ValidatingStorage : public Storage {
public:
ValidatingStorage(const ValidatingStorage&) = delete;
ValidatingStorage& operator=(const ValidatingStorage&) = delete;
explicit ValidatingStorage(Storage* storage);
~ValidatingStorage() override;
void Put(const std::string& key, std::string* data) override;
void Get(const std::string& key, const Callback& data_ready) const override;
private:
std::unique_ptr<Storage> wrapped_storage_;
};
}
}
#endif
#include "validating_storage.h"
#include <libaddressinput/callback.h>
#include <libaddressinput/storage.h>
#include <cassert>
#include <cstddef>
#include <ctime>
#include <memory>
#include <string>
#include "validating_util.h"
namespace i18n {
namespace addressinput {
namespace {
class Helper {
public:
Helper(const Helper&) = delete;
Helper& operator=(const Helper&) = delete;
Helper(const std::string& key,
const ValidatingStorage::Callback& data_ready,
const Storage& wrapped_storage)
: data_ready_(data_ready),
wrapped_data_ready_(BuildCallback(this, &Helper::OnWrappedDataReady)) {
wrapped_storage.Get(key, *wrapped_data_ready_);
}
private:
~Helper() = default;
void OnWrappedDataReady(bool success,
const std::string& key,
std::string* data) {
if (success) {
assert(data != nullptr);
bool is_stale =
!ValidatingUtil::UnwrapTimestamp(data, std::time(nullptr));
bool is_corrupted = !ValidatingUtil::UnwrapChecksum(data);
success = !is_corrupted && !is_stale;
if (is_corrupted) {
delete data;
data = nullptr;
}
} else {
delete data;
data = nullptr;
}
data_ready_(success, key, data);
delete this;
}
const Storage::Callback& data_ready_;
const std::unique_ptr<const Storage::Callback> wrapped_data_ready_;
};
}
ValidatingStorage::ValidatingStorage(Storage* storage)
: wrapped_storage_(storage) {
assert(wrapped_storage_ != nullptr);
}
ValidatingStorage::~ValidatingStorage() = default;
void ValidatingStorage::Put(const std::string& key, std::string* data) {
assert(data != nullptr);
ValidatingUtil::Wrap(std::time(nullptr), data);
wrapped_storage_->Put(key, data);
}
void ValidatingStorage::Get(const std::string& key,
const Callback& data_ready) const {
new Helper(key, data_ready, *wrapped_storage_);
}
}
} | #include "validating_storage.h"
#include <libaddressinput/callback.h>
#include <libaddressinput/storage.h>
#include <cstddef>
#include <memory>
#include <string>
#include <gtest/gtest.h>
#include "fake_storage.h"
#define CHECKSUM "dd63dafcbd4d5b28badfcaf86fb6fcdb"
#define DATA "{'foo': 'bar'}"
#define OLD_TIMESTAMP "0"
namespace {
using i18n::addressinput::BuildCallback;
using i18n::addressinput::FakeStorage;
using i18n::addressinput::Storage;
using i18n::addressinput::ValidatingStorage;
const char kKey[] = "key";
const char kValidatedData[] = DATA;
const char kStaleWrappedData[] = "timestamp=" OLD_TIMESTAMP "\n"
"checksum=" CHECKSUM "\n"
DATA;
const char kEmptyData[] = "";
class ValidatingStorageTest : public testing::Test {
public:
ValidatingStorageTest(const ValidatingStorageTest&) = delete;
ValidatingStorageTest& operator=(const ValidatingStorageTest&) = delete;
protected:
ValidatingStorageTest()
: wrapped_storage_(new FakeStorage),
storage_(wrapped_storage_),
success_(false),
key_(),
data_(),
data_ready_(BuildCallback(this, &ValidatingStorageTest::OnDataReady)) {}
Storage* const wrapped_storage_;
ValidatingStorage storage_;
bool success_;
std::string key_;
std::string data_;
const std::unique_ptr<const ValidatingStorage::Callback> data_ready_;
private:
void OnDataReady(bool success, const std::string& key, std::string* data) {
ASSERT_FALSE(success && data == nullptr);
success_ = success;
key_ = key;
if (data != nullptr) {
data_ = *data;
delete data;
}
}
};
TEST_F(ValidatingStorageTest, GoodData) {
storage_.Put(kKey, new std::string(kValidatedData));
storage_.Get(kKey, *data_ready_);
EXPECT_TRUE(success_);
EXPECT_EQ(kKey, key_);
EXPECT_EQ(kValidatedData, data_);
}
TEST_F(ValidatingStorageTest, EmptyData) {
storage_.Put(kKey, new std::string(kEmptyData));
storage_.Get(kKey, *data_ready_);
EXPECT_TRUE(success_);
EXPECT_EQ(kKey, key_);
EXPECT_EQ(kEmptyData, data_);
}
TEST_F(ValidatingStorageTest, MissingKey) {
storage_.Get(kKey, *data_ready_);
EXPECT_FALSE(success_);
EXPECT_EQ(kKey, key_);
EXPECT_TRUE(data_.empty());
}
TEST_F(ValidatingStorageTest, GarbageData) {
storage_.Put(kKey, new std::string(kValidatedData));
wrapped_storage_->Put(kKey, new std::string("garbage"));
storage_.Get(kKey, *data_ready_);
EXPECT_FALSE(success_);
EXPECT_EQ(kKey, key_);
EXPECT_TRUE(data_.empty());
}
TEST_F(ValidatingStorageTest, StaleData) {
storage_.Put(kKey, new std::string(kValidatedData));
wrapped_storage_->Put(kKey, new std::string(kStaleWrappedData));
storage_.Get(kKey, *data_ready_);
EXPECT_FALSE(success_);
EXPECT_EQ(kKey, key_);
EXPECT_EQ(kValidatedData, data_);
}
} |
506 | cpp | google/libaddressinput | ondemand_supply_task | cpp/src/ondemand_supply_task.cc | cpp/test/ondemand_supply_task_test.cc | #ifndef I18N_ADDRESSINPUT_ONDEMAND_SUPPLY_TASK_H_
#define I18N_ADDRESSINPUT_ONDEMAND_SUPPLY_TASK_H_
#include <libaddressinput/supplier.h>
#include <map>
#include <memory>
#include <set>
#include <string>
#include "retriever.h"
namespace i18n {
namespace addressinput {
class LookupKey;
class Rule;
class OndemandSupplyTask {
public:
OndemandSupplyTask(const OndemandSupplyTask&) = delete;
OndemandSupplyTask& operator=(const OndemandSupplyTask&) = delete;
OndemandSupplyTask(const LookupKey& lookup_key,
std::map<std::string, const Rule*>* rules,
const Supplier::Callback& supplied);
~OndemandSupplyTask();
void Queue(const std::string& key);
void Retrieve(const Retriever& retriever);
Supplier::RuleHierarchy hierarchy_;
private:
void Load(bool success, const std::string& key, const std::string& data);
void Loaded();
std::set<std::string> pending_;
const LookupKey& lookup_key_;
std::map<std::string, const Rule*>* const rule_cache_;
const Supplier::Callback& supplied_;
const std::unique_ptr<const Retriever::Callback> retrieved_;
bool success_;
};
}
}
#endif
#include "ondemand_supply_task.h"
#include <libaddressinput/address_field.h>
#include <libaddressinput/callback.h>
#include <libaddressinput/supplier.h>
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <map>
#include <string>
#include "lookup_key.h"
#include "retriever.h"
#include "rule.h"
#include "util/size.h"
namespace i18n {
namespace addressinput {
OndemandSupplyTask::OndemandSupplyTask(
const LookupKey& lookup_key,
std::map<std::string, const Rule*>* rules,
const Supplier::Callback& supplied)
: hierarchy_(),
pending_(),
lookup_key_(lookup_key),
rule_cache_(rules),
supplied_(supplied),
retrieved_(BuildCallback(this, &OndemandSupplyTask::Load)),
success_(true) {
assert(rule_cache_ != nullptr);
assert(retrieved_ != nullptr);
}
OndemandSupplyTask::~OndemandSupplyTask() = default;
void OndemandSupplyTask::Queue(const std::string& key) {
assert(pending_.find(key) == pending_.end());
pending_.insert(key);
}
void OndemandSupplyTask::Retrieve(const Retriever& retriever) {
if (pending_.empty()) {
Loaded();
} else {
bool done = false;
for (auto it = pending_.begin(); !done;) {
const std::string& key = *it++;
done = it == pending_.end();
retriever.Retrieve(key, *retrieved_);
}
}
}
void OndemandSupplyTask::Load(bool success,
const std::string& key,
const std::string& data) {
size_t depth = std::count(key.begin(), key.end(), '/') - 1;
assert(depth < size(LookupKey::kHierarchy));
size_t status = pending_.erase(key);
assert(status == 1);
(void)status;
if (success) {
if (data != "{}") {
auto* rule = new Rule;
if (LookupKey::kHierarchy[depth] == COUNTRY) {
rule->CopyFrom(Rule::GetDefault());
}
if (rule->ParseSerializedRule(data)) {
auto result = rule_cache_->emplace(rule->GetId(), rule);
if (!result.second) {
delete rule;
}
hierarchy_.rule[depth] = result.first->second;
} else {
delete rule;
success_ = false;
}
}
} else {
success_ = false;
}
if (pending_.empty()) {
Loaded();
}
}
void OndemandSupplyTask::Loaded() {
supplied_(success_, lookup_key_, hierarchy_);
delete this;
}
}
} | #include "ondemand_supply_task.h"
#include <libaddressinput/callback.h>
#include <libaddressinput/null_storage.h>
#include <libaddressinput/supplier.h>
#include <cstddef>
#include <cstring>
#include <map>
#include <memory>
#include <string>
#include <gtest/gtest.h>
#include "lookup_key.h"
#include "mock_source.h"
#include "retriever.h"
#include "rule.h"
#include "util/size.h"
namespace {
using i18n::addressinput::BuildCallback;
using i18n::addressinput::LookupKey;
using i18n::addressinput::MockSource;
using i18n::addressinput::NullStorage;
using i18n::addressinput::OndemandSupplyTask;
using i18n::addressinput::Retriever;
using i18n::addressinput::Rule;
using i18n::addressinput::Supplier;
class OndemandSupplyTaskTest : public testing::Test {
public:
OndemandSupplyTaskTest(const OndemandSupplyTaskTest&) = delete;
OndemandSupplyTaskTest& operator=(const OndemandSupplyTaskTest&) = delete;
protected:
OndemandSupplyTaskTest()
: success_(true),
lookup_key_(),
rule_(),
called_(false),
source_(new MockSource),
rule_cache_(),
retriever_(new Retriever(source_, new NullStorage)),
supplied_(BuildCallback(this, &OndemandSupplyTaskTest::Supplied)),
task_(new OndemandSupplyTask(lookup_key_, &rule_cache_, *supplied_)) {}
~OndemandSupplyTaskTest() override {
for (const auto& pair : rule_cache_) {
delete pair.second;
}
}
void Queue(const std::string& key) { task_->Queue(key); }
void Retrieve() { task_->Retrieve(*retriever_); }
bool success_;
LookupKey lookup_key_;
const Rule* rule_[size(LookupKey::kHierarchy)];
bool called_;
MockSource* const source_;
private:
void Supplied(bool success,
const LookupKey& lookup_key,
const Supplier::RuleHierarchy& hierarchy) {
ASSERT_EQ(success_, success);
ASSERT_EQ(&lookup_key_, &lookup_key);
ASSERT_EQ(&task_->hierarchy_, &hierarchy);
std::memcpy(rule_, hierarchy.rule, sizeof rule_);
called_ = true;
}
std::map<std::string, const Rule*> rule_cache_;
const std::unique_ptr<Retriever> retriever_;
const std::unique_ptr<const Supplier::Callback> supplied_;
OndemandSupplyTask* const task_;
};
TEST_F(OndemandSupplyTaskTest, Empty) {
ASSERT_NO_FATAL_FAILURE(Retrieve());
ASSERT_TRUE(called_);
EXPECT_TRUE(rule_[0] == nullptr);
EXPECT_TRUE(rule_[1] == nullptr);
EXPECT_TRUE(rule_[2] == nullptr);
EXPECT_TRUE(rule_[3] == nullptr);
}
TEST_F(OndemandSupplyTaskTest, Invalid) {
Queue("data/XA");
success_ = false;
ASSERT_NO_FATAL_FAILURE(Retrieve());
ASSERT_TRUE(called_);
}
TEST_F(OndemandSupplyTaskTest, Valid) {
source_->data_ = {{"data/XA", R"({"id":"data/XA"})"}};
Queue("data/XA");
ASSERT_NO_FATAL_FAILURE(Retrieve());
ASSERT_TRUE(called_);
EXPECT_TRUE(rule_[0] != nullptr);
EXPECT_TRUE(rule_[1] == nullptr);
EXPECT_TRUE(rule_[2] == nullptr);
EXPECT_TRUE(rule_[3] == nullptr);
EXPECT_EQ("data/XA", rule_[0]->GetId());
EXPECT_FALSE(rule_[0]->GetFormat().empty());
EXPECT_FALSE(rule_[0]->GetRequired().empty());
EXPECT_TRUE(rule_[0]->GetPostalCodeMatcher() == nullptr);
}
TEST_F(OndemandSupplyTaskTest, ValidHierarchy) {
source_->data_ = {
{"data/XA", R"({"id":"data/XA"})"},
{"data/XA/aa", R"({"id":"data/XA/aa"})"},
{"data/XA/aa/bb", R"({"id":"data/XA/aa/bb"})"},
{"data/XA/aa/bb/cc", R"({"id":"data/XA/aa/bb/cc"})"},
};
Queue("data/XA");
Queue("data/XA/aa");
Queue("data/XA/aa/bb");
Queue("data/XA/aa/bb/cc");
ASSERT_NO_FATAL_FAILURE(Retrieve());
ASSERT_TRUE(called_);
EXPECT_TRUE(rule_[0] != nullptr);
EXPECT_TRUE(rule_[1] != nullptr);
EXPECT_TRUE(rule_[2] != nullptr);
EXPECT_TRUE(rule_[3] != nullptr);
EXPECT_EQ("data/XA", rule_[0]->GetId());
EXPECT_EQ("data/XA/aa", rule_[1]->GetId());
EXPECT_EQ("data/XA/aa/bb", rule_[2]->GetId());
EXPECT_EQ("data/XA/aa/bb/cc", rule_[3]->GetId());
EXPECT_FALSE(rule_[0]->GetFormat().empty());
EXPECT_FALSE(rule_[0]->GetRequired().empty());
EXPECT_TRUE(rule_[1]->GetFormat().empty());
EXPECT_TRUE(rule_[1]->GetRequired().empty());
EXPECT_TRUE(rule_[2]->GetFormat().empty());
EXPECT_TRUE(rule_[2]->GetRequired().empty());
EXPECT_TRUE(rule_[3]->GetFormat().empty());
EXPECT_TRUE(rule_[3]->GetRequired().empty());
}
TEST_F(OndemandSupplyTaskTest, InvalidJson1) {
source_->data_ = {{"data/XA", ":"}};
success_ = false;
Queue("data/XA");
ASSERT_NO_FATAL_FAILURE(Retrieve());
ASSERT_TRUE(called_);
}
TEST_F(OndemandSupplyTaskTest, InvalidJson2) {
source_->data_ = {
{"data/XA", R"({"id":"data/XA"})"},
{"data/XA/aa", ":"},
};
success_ = false;
Queue("data/XA");
Queue("data/XA/aa");
ASSERT_NO_FATAL_FAILURE(Retrieve());
ASSERT_TRUE(called_);
}
TEST_F(OndemandSupplyTaskTest, EmptyJsonJustMeansServerKnowsNothingAboutKey) {
source_->data_ = {
{"data/XA", R"({"id":"data/XA"})"},
{"data/XA/aa", "{}"},
};
Queue("data/XA");
Queue("data/XA/aa");
ASSERT_NO_FATAL_FAILURE(Retrieve());
ASSERT_TRUE(called_);
EXPECT_TRUE(rule_[0] != nullptr);
EXPECT_TRUE(rule_[1] == nullptr);
EXPECT_TRUE(rule_[2] == nullptr);
EXPECT_TRUE(rule_[3] == nullptr);
EXPECT_EQ("data/XA", rule_[0]->GetId());
}
TEST_F(OndemandSupplyTaskTest, IfCountryFailsAllFails) {
source_->data_ = {{"data/XA/aa", R"({"id":"data/XA/aa"})"}};
success_ = false;
Queue("data/XA");
Queue("data/XA/aa");
ASSERT_NO_FATAL_FAILURE(Retrieve());
ASSERT_TRUE(called_);
}
} |
507 | cpp | google/libaddressinput | string_compare | cpp/src/util/string_compare.cc | cpp/test/util/string_compare_test.cc | #ifndef I18N_ADDRESSINPUT_UTIL_STRING_COMPARE_H_
#define I18N_ADDRESSINPUT_UTIL_STRING_COMPARE_H_
#include <memory>
#include <string>
namespace i18n {
namespace addressinput {
class StringCompare {
public:
StringCompare(const StringCompare&) = delete;
StringCompare& operator=(const StringCompare&) = delete;
StringCompare();
~StringCompare();
bool NaturalEquals(const std::string& a, const std::string& b) const;
bool NaturalLess(const std::string& a, const std::string& b) const;
private:
class Impl;
std::unique_ptr<Impl> impl_;
};
}
}
#endif
#include "string_compare.h"
#include <cassert>
#include <string>
#include <re2/re2.h>
#include "lru_cache_using_std.h"
namespace {
std::string ComputeMinPossibleMatch(const std::string& str) {
std::string min, max;
RE2::Options options;
options.set_literal(true);
options.set_case_sensitive(false);
RE2 matcher(str, options);
bool success = matcher.PossibleMatchRange(&min, &max, str.size());
assert(success);
(void)success;
return min;
}
}
namespace i18n {
namespace addressinput {
class StringCompare::Impl {
enum { MAX_CACHE_SIZE = 1 << 15 };
public:
Impl(const Impl&) = delete;
Impl& operator=(const Impl&) = delete;
Impl() : min_possible_match_(&ComputeMinPossibleMatch, MAX_CACHE_SIZE) {
options_.set_literal(true);
options_.set_case_sensitive(false);
}
~Impl() = default;
bool NaturalEquals(const std::string& a, const std::string& b) const {
RE2 matcher(b, options_);
return RE2::FullMatch(a, matcher);
}
bool NaturalLess(const std::string& a, const std::string& b) const {
const std::string& min_a(min_possible_match_(a));
const std::string& min_b(min_possible_match_(b));
return min_a < min_b;
}
private:
RE2::Options options_;
mutable lru_cache_using_std<std::string, std::string> min_possible_match_;
};
StringCompare::StringCompare() : impl_(new Impl) {}
StringCompare::~StringCompare() = default;
bool StringCompare::NaturalEquals(const std::string& a,
const std::string& b) const {
return impl_->NaturalEquals(a, b);
}
bool StringCompare::NaturalLess(const std::string& a,
const std::string& b) const {
return impl_->NaturalLess(a, b);
}
}
} | #include "util/string_compare.h"
#include <string>
#include <gtest/gtest.h>
namespace {
using i18n::addressinput::StringCompare;
struct TestCase {
TestCase(const std::string& left,
const std::string& right,
bool should_be_equal,
bool should_be_less)
: left(left),
right(right),
should_be_equal(should_be_equal),
should_be_less(should_be_less) {}
~TestCase() = default;
std::string left;
std::string right;
bool should_be_equal;
bool should_be_less;
};
class StringCompareTest : public testing::TestWithParam<TestCase> {
public:
StringCompareTest(const StringCompareTest&) = delete;
StringCompareTest& operator=(const StringCompareTest&) = delete;
protected:
StringCompareTest() = default;
StringCompare compare_;
};
TEST_P(StringCompareTest, CorrectComparison) {
if (GetParam().should_be_equal) {
EXPECT_TRUE(compare_.NaturalEquals(GetParam().left, GetParam().right));
} else {
EXPECT_FALSE(compare_.NaturalEquals(GetParam().left, GetParam().right));
}
}
TEST_P(StringCompareTest, CorrectLess) {
if (GetParam().should_be_less) {
EXPECT_TRUE(compare_.NaturalLess(GetParam().left, GetParam().right));
} else {
EXPECT_FALSE(compare_.NaturalLess(GetParam().left, GetParam().right));
}
}
INSTANTIATE_TEST_SUITE_P(
Comparisons, StringCompareTest,
testing::Values(TestCase("foo", "foo", true, false),
TestCase("foo", "FOO", true, false),
TestCase("bar", "foo", false, true),
TestCase("강원도", "강원도", true, false),
TestCase("강원도", "대구광역시", false, true),
TestCase("ZÜRICH", "zürich", true, false),
TestCase("абв", "где", false, true),
TestCase("абв", "ГДЕ", false, true),
TestCase("где", "абв", false, false),
TestCase("где", "АБВ", false, false)));
} |
508 | cpp | google/libaddressinput | json | cpp/src/util/json.cc | cpp/test/util/json_test.cc | #ifndef I18N_ADDRESSINPUT_UTIL_JSON_H_
#define I18N_ADDRESSINPUT_UTIL_JSON_H_
#include <memory>
#include <string>
#include <vector>
namespace i18n {
namespace addressinput {
class Json {
public:
Json(const Json&) = delete;
Json& operator=(const Json&) = delete;
Json();
~Json();
bool ParseObject(const std::string& json);
const std::vector<const Json*>& GetSubDictionaries() const;
bool GetStringValueForKey(const std::string& key, std::string* value) const;
private:
class JsonImpl;
friend class JsonImpl;
explicit Json(JsonImpl* impl);
std::unique_ptr<JsonImpl> impl_;
};
}
}
#endif
#include "json.h"
#include <cassert>
#include <cstddef>
#include <memory>
#include <string>
#include <vector>
#include <rapidjson/document.h>
#include <rapidjson/reader.h>
namespace i18n {
namespace addressinput {
using rapidjson::Document;
using rapidjson::kParseValidateEncodingFlag;
using rapidjson::Value;
class Json::JsonImpl {
public:
JsonImpl(const JsonImpl&) = delete;
JsonImpl& operator=(const JsonImpl&) = delete;
explicit JsonImpl(const std::string& json)
: document_(new Document),
value_(document_.get()),
dictionaries_(),
valid_(false) {
document_->Parse<kParseValidateEncodingFlag>(json.c_str());
valid_ = !document_->HasParseError() && document_->IsObject();
}
~JsonImpl() {
for (auto ptr : dictionaries_) {
delete ptr;
}
}
bool valid() const { return valid_; }
const std::vector<const Json*>& GetSubDictionaries() {
if (dictionaries_.empty()) {
for (Value::ConstMemberIterator member = value_->MemberBegin();
member != value_->MemberEnd(); ++member) {
if (member->value.IsObject()) {
dictionaries_.push_back(new Json(new JsonImpl(&member->value)));
}
}
}
return dictionaries_;
}
bool GetStringValueForKey(const std::string& key, std::string* value) const {
assert(value != nullptr);
Value::ConstMemberIterator member = value_->FindMember(key.c_str());
if (member == value_->MemberEnd() || !member->value.IsString()) {
return false;
}
value->assign(member->value.GetString(), member->value.GetStringLength());
return true;
}
private:
explicit JsonImpl(const Value* value)
: document_(),
value_(value),
dictionaries_(),
valid_(true) {
assert(value_ != nullptr);
assert(value_->IsObject());
}
const std::unique_ptr<Document> document_;
const Value* const value_;
std::vector<const Json*> dictionaries_;
bool valid_;
};
Json::Json() : impl_() {}
Json::~Json() = default;
bool Json::ParseObject(const std::string& json) {
assert(impl_ == nullptr);
impl_.reset(new JsonImpl(json));
if (!impl_->valid()) {
impl_.reset();
}
return impl_ != nullptr;
}
const std::vector<const Json*>& Json::GetSubDictionaries() const {
assert(impl_ != nullptr);
return impl_->GetSubDictionaries();
}
bool Json::GetStringValueForKey(const std::string& key,
std::string* value) const {
assert(impl_ != nullptr);
return impl_->GetStringValueForKey(key, value);
}
Json::Json(JsonImpl* impl) : impl_(impl) {}
}
} | #include "util/json.h"
#include <string>
#include <gtest/gtest.h>
namespace {
using i18n::addressinput::Json;
TEST(JsonTest, EmptyStringIsNotValid) {
Json json;
EXPECT_FALSE(json.ParseObject(std::string()));
}
TEST(JsonTest, EmptyDictionaryContainsNoKeys) {
Json json;
ASSERT_TRUE(json.ParseObject("{}"));
std::string not_checked;
EXPECT_FALSE(json.GetStringValueForKey("key", ¬_checked));
EXPECT_FALSE(json.GetStringValueForKey(std::string(), ¬_checked));
}
TEST(JsonTest, InvalidJsonIsNotValid) {
Json json;
EXPECT_FALSE(json.ParseObject("{"));
}
TEST(JsonTest, OneKeyIsValid) {
Json json;
ASSERT_TRUE(json.ParseObject(R"({"key": "value"})"));
std::string value;
EXPECT_TRUE(json.GetStringValueForKey("key", &value));
EXPECT_EQ("value", value);
}
TEST(JsonTest, EmptyStringKeyIsNotInObject) {
Json json;
ASSERT_TRUE(json.ParseObject(R"({"key": "value"})"));
std::string not_checked;
EXPECT_FALSE(json.GetStringValueForKey(std::string(), ¬_checked));
}
TEST(JsonTest, EmptyKeyIsValid) {
Json json;
ASSERT_TRUE(json.ParseObject(R"({"": "value"})"));
std::string value;
EXPECT_TRUE(json.GetStringValueForKey(std::string(), &value));
EXPECT_EQ("value", value);
}
TEST(JsonTest, EmptyValueIsValid) {
Json json;
ASSERT_TRUE(json.ParseObject(R"({"key": ""})"));
std::string value;
EXPECT_TRUE(json.GetStringValueForKey("key", &value));
EXPECT_TRUE(value.empty());
}
TEST(JsonTest, Utf8EncodingIsValid) {
Json json;
ASSERT_TRUE(json.ParseObject(R"({"key": "Ü"})"));
std::string value;
EXPECT_TRUE(json.GetStringValueForKey("key", &value));
EXPECT_EQ("Ü", value);
}
TEST(JsonTest, InvalidUtf8IsNotValid) {
Json json;
EXPECT_FALSE(json.ParseObject("{\"key\": \"\xC3\x28\"}"));
}
TEST(JsonTest, NullInMiddleIsNotValid) {
Json json;
static const char kJson[] = "{\"key\": \"val\0ue\"}";
EXPECT_FALSE(json.ParseObject(std::string(kJson, sizeof kJson - 1)));
}
TEST(JsonTest, TwoKeysAreValid) {
Json json;
ASSERT_TRUE(json.ParseObject(R"({"key1": "value1", "key2": "value2"})"));
std::string value;
EXPECT_TRUE(json.GetStringValueForKey("key1", &value));
EXPECT_EQ("value1", value);
EXPECT_TRUE(json.GetStringValueForKey("key2", &value));
EXPECT_EQ("value2", value);
}
TEST(JsonTest, ListIsNotValid) {
Json json;
EXPECT_FALSE(json.ParseObject("[]"));
}
TEST(JsonTest, StringIsNotValid) {
Json json;
EXPECT_FALSE(json.ParseObject(R"("value")"));
}
TEST(JsonTest, NumberIsNotValid) {
Json json;
EXPECT_FALSE(json.ParseObject("3"));
}
TEST(JsonTest, NoDictionaryFound) {
Json json;
ASSERT_TRUE(json.ParseObject(R"({"key":"value"})"));
EXPECT_TRUE(json.GetSubDictionaries().empty());
}
TEST(JsonTest, DictionaryFound) {
Json json;
ASSERT_TRUE(json.ParseObject(R"({"key":{"inner_key":"value"}})"));
const auto& sub_dicts = json.GetSubDictionaries();
ASSERT_EQ(1U, sub_dicts.size());
std::string value;
EXPECT_TRUE(sub_dicts.front()->GetStringValueForKey("inner_key", &value));
EXPECT_EQ("value", value);
}
TEST(JsonTest, DictionariesHaveSubDictionaries) {
Json json;
ASSERT_TRUE(json.ParseObject(
R"({"key":{"inner_key":{"inner_inner_key":"value"}}})"));
const auto& sub_dicts = json.GetSubDictionaries();
ASSERT_EQ(1U, sub_dicts.size());
const auto& sub_sub_dicts = sub_dicts.front()->GetSubDictionaries();
ASSERT_EQ(1U, sub_sub_dicts.size());
std::string value;
EXPECT_TRUE(
sub_sub_dicts.front()->GetStringValueForKey("inner_inner_key", &value));
EXPECT_EQ("value", value);
}
} |
509 | cpp | google/libaddressinput | md5 | cpp/src/util/md5.cc | cpp/test/util/md5_unittest.cc | #ifndef I18N_ADDRESSINPUT_UTIL_MD5_H_
#define I18N_ADDRESSINPUT_UTIL_MD5_H_
#include <cstddef>
#include <cstdint>
#include <string>
namespace i18n {
namespace addressinput {
struct MD5Digest {
uint8_t a[16];
};
typedef char MD5Context[88];
void MD5Init(MD5Context* context);
void MD5Update(MD5Context* context, const std::string& data);
void MD5Final(MD5Digest* digest, MD5Context* context);
void MD5IntermediateFinal(MD5Digest* digest, const MD5Context* context);
std::string MD5DigestToBase16(const MD5Digest& digest);
void MD5Sum(const void* data, size_t length, MD5Digest* digest);
std::string MD5String(const std::string& str);
}
}
#endif
#include "md5.h"
#include <cstddef>
#include <string>
#include <string.h>
namespace {
struct Context {
uint32_t buf[4];
uint32_t bits[2];
uint8_t in[64];
};
void byteReverse(uint8_t* buf, unsigned longs) {
do {
uint32_t temp = static_cast<uint32_t>(
static_cast<unsigned>(buf[3]) << 8 |
buf[2]) << 16 |
(static_cast<unsigned>(buf[1]) << 8 | buf[0]);
*reinterpret_cast<uint32_t*>(buf) = temp;
buf += 4;
} while (--longs);
}
#define F1(x, y, z) (z ^ (x & (y ^ z)))
#define F2(x, y, z) F1(z, x, y)
#define F3(x, y, z) (x ^ y ^ z)
#define F4(x, y, z) (y ^ (x | ~z))
#define MD5STEP(f, w, x, y, z, data, s) \
(w += f(x, y, z) + data, w = w << s | w >> (32 - s), w += x)
void MD5Transform(uint32_t buf[4], const uint32_t in[16]) {
uint32_t a, b, c, d;
a = buf[0];
b = buf[1];
c = buf[2];
d = buf[3];
MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7);
MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12);
MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17);
MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22);
MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7);
MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12);
MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17);
MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22);
MD5STEP(F1, a, b, c, d, in[8] + 0x698098d8, 7);
MD5STEP(F1, d, a, b, c, in[9] + 0x8b44f7af, 12);
MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17);
MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22);
MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7);
MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12);
MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17);
MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22);
MD5STEP(F2, a, b, c, d, in[1] + 0xf61e2562, 5);
MD5STEP(F2, d, a, b, c, in[6] + 0xc040b340, 9);
MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14);
MD5STEP(F2, b, c, d, a, in[0] + 0xe9b6c7aa, 20);
MD5STEP(F2, a, b, c, d, in[5] + 0xd62f105d, 5);
MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9);
MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14);
MD5STEP(F2, b, c, d, a, in[4] + 0xe7d3fbc8, 20);
MD5STEP(F2, a, b, c, d, in[9] + 0x21e1cde6, 5);
MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9);
MD5STEP(F2, c, d, a, b, in[3] + 0xf4d50d87, 14);
MD5STEP(F2, b, c, d, a, in[8] + 0x455a14ed, 20);
MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5);
MD5STEP(F2, d, a, b, c, in[2] + 0xfcefa3f8, 9);
MD5STEP(F2, c, d, a, b, in[7] + 0x676f02d9, 14);
MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20);
MD5STEP(F3, a, b, c, d, in[5] + 0xfffa3942, 4);
MD5STEP(F3, d, a, b, c, in[8] + 0x8771f681, 11);
MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16);
MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23);
MD5STEP(F3, a, b, c, d, in[1] + 0xa4beea44, 4);
MD5STEP(F3, d, a, b, c, in[4] + 0x4bdecfa9, 11);
MD5STEP(F3, c, d, a, b, in[7] + 0xf6bb4b60, 16);
MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23);
MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4);
MD5STEP(F3, d, a, b, c, in[0] + 0xeaa127fa, 11);
MD5STEP(F3, c, d, a, b, in[3] + 0xd4ef3085, 16);
MD5STEP(F3, b, c, d, a, in[6] + 0x04881d05, 23);
MD5STEP(F3, a, b, c, d, in[9] + 0xd9d4d039, 4);
MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11);
MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16);
MD5STEP(F3, b, c, d, a, in[2] + 0xc4ac5665, 23);
MD5STEP(F4, a, b, c, d, in[0] + 0xf4292244, 6);
MD5STEP(F4, d, a, b, c, in[7] + 0x432aff97, 10);
MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15);
MD5STEP(F4, b, c, d, a, in[5] + 0xfc93a039, 21);
MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6);
MD5STEP(F4, d, a, b, c, in[3] + 0x8f0ccc92, 10);
MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15);
MD5STEP(F4, b, c, d, a, in[1] + 0x85845dd1, 21);
MD5STEP(F4, a, b, c, d, in[8] + 0x6fa87e4f, 6);
MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10);
MD5STEP(F4, c, d, a, b, in[6] + 0xa3014314, 15);
MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21);
MD5STEP(F4, a, b, c, d, in[4] + 0xf7537e82, 6);
MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10);
MD5STEP(F4, c, d, a, b, in[2] + 0x2ad7d2bb, 15);
MD5STEP(F4, b, c, d, a, in[9] + 0xeb86d391, 21);
buf[0] += a;
buf[1] += b;
buf[2] += c;
buf[3] += d;
}
}
namespace i18n {
namespace addressinput {
void MD5Init(MD5Context* context) {
struct Context* ctx = reinterpret_cast<struct Context*>(context);
ctx->buf[0] = 0x67452301;
ctx->buf[1] = 0xefcdab89;
ctx->buf[2] = 0x98badcfe;
ctx->buf[3] = 0x10325476;
ctx->bits[0] = 0;
ctx->bits[1] = 0;
}
void MD5Update(MD5Context* context, const std::string& data) {
struct Context* ctx = reinterpret_cast<struct Context*>(context);
const uint8_t* buf = reinterpret_cast<const uint8_t*>(data.data());
size_t len = data.size();
uint32_t t = ctx->bits[0];
if ((ctx->bits[0] = t + (static_cast<uint32_t>(len) << 3)) < t)
ctx->bits[1]++;
ctx->bits[1] += static_cast<uint32_t>(len >> 29);
t = (t >> 3) & 0x3f;
if (t) {
uint8_t* p = static_cast<uint8_t*>(ctx->in + t);
t = 64 - t;
if (len < t) {
memcpy(p, buf, len);
return;
}
memcpy(p, buf, t);
byteReverse(ctx->in, 16);
MD5Transform(ctx->buf, reinterpret_cast<uint32_t*>(ctx->in));
buf += t;
len -= t;
}
while (len >= 64) {
memcpy(ctx->in, buf, 64);
byteReverse(ctx->in, 16);
MD5Transform(ctx->buf, reinterpret_cast<uint32_t*>(ctx->in));
buf += 64;
len -= 64;
}
memcpy(ctx->in, buf, len);
}
void MD5Final(MD5Digest* digest, MD5Context* context) {
struct Context* ctx = reinterpret_cast<struct Context*>(context);
unsigned count;
uint8_t* p;
count = (ctx->bits[0] >> 3) & 0x3F;
p = ctx->in + count;
*p++ = 0x80;
count = 64 - 1 - count;
if (count < 8) {
memset(p, 0, count);
byteReverse(ctx->in, 16);
MD5Transform(ctx->buf, reinterpret_cast<uint32_t*>(ctx->in));
memset(ctx->in, 0, 56);
} else {
memset(p, 0, count - 8);
}
byteReverse(ctx->in, 14);
memcpy(&ctx->in[14 * sizeof(ctx->bits[0])], &ctx->bits[0],
sizeof(ctx->bits[0]));
memcpy(&ctx->in[15 * sizeof(ctx->bits[1])], &ctx->bits[1],
sizeof(ctx->bits[1]));
MD5Transform(ctx->buf, reinterpret_cast<uint32_t*>(ctx->in));
byteReverse(reinterpret_cast<uint8_t*>(ctx->buf), 4);
memcpy(digest->a, ctx->buf, 16);
memset(ctx, 0, sizeof(*ctx));
}
void MD5IntermediateFinal(MD5Digest* digest, const MD5Context* context) {
MD5Context context_copy;
memcpy(&context_copy, context, sizeof(context_copy));
MD5Final(digest, &context_copy);
}
std::string MD5DigestToBase16(const MD5Digest& digest) {
static char const zEncode[] = "0123456789abcdef";
std::string ret;
ret.resize(32);
for (int i = 0, j = 0; i < 16; i++, j += 2) {
uint8_t a = digest.a[i];
ret[j] = zEncode[(a >> 4) & 0xf];
ret[j + 1] = zEncode[a & 0xf];
}
return ret;
}
void MD5Sum(const void* data, size_t length, MD5Digest* digest) {
MD5Context ctx;
MD5Init(&ctx);
MD5Update(&ctx, std::string(reinterpret_cast<const char*>(data), length));
MD5Final(digest, &ctx);
}
std::string MD5String(const std::string& str) {
MD5Digest digest;
MD5Sum(str.data(), str.length(), &digest);
return MD5DigestToBase16(digest);
}
}
} | #include "util/md5.h"
#include <cstring>
#include <memory>
#include <string>
#include <gtest/gtest.h>
namespace {
using i18n::addressinput::MD5Context;
using i18n::addressinput::MD5Digest;
using i18n::addressinput::MD5Init;
using i18n::addressinput::MD5String;
using i18n::addressinput::MD5Update;
TEST(MD5, DigestToBase16) {
MD5Digest digest;
int data[] = {
0xd4, 0x1d, 0x8c, 0xd9,
0x8f, 0x00, 0xb2, 0x04,
0xe9, 0x80, 0x09, 0x98,
0xec, 0xf8, 0x42, 0x7e
};
for (int i = 0; i < 16; ++i)
digest.a[i] = data[i] & 0xff;
std::string actual = MD5DigestToBase16(digest);
std::string expected = "d41d8cd98f00b204e9800998ecf8427e";
EXPECT_EQ(expected, actual);
}
TEST(MD5, MD5SumEmtpyData) {
MD5Digest digest;
const char data[] = "";
MD5Sum(data, strlen(data), &digest);
int expected[] = {
0xd4, 0x1d, 0x8c, 0xd9,
0x8f, 0x00, 0xb2, 0x04,
0xe9, 0x80, 0x09, 0x98,
0xec, 0xf8, 0x42, 0x7e
};
for (int i = 0; i < 16; ++i)
EXPECT_EQ(expected[i], digest.a[i] & 0xFF);
}
TEST(MD5, MD5SumOneByteData) {
MD5Digest digest;
const char data[] = "a";
MD5Sum(data, strlen(data), &digest);
int expected[] = {
0x0c, 0xc1, 0x75, 0xb9,
0xc0, 0xf1, 0xb6, 0xa8,
0x31, 0xc3, 0x99, 0xe2,
0x69, 0x77, 0x26, 0x61
};
for (int i = 0; i < 16; ++i)
EXPECT_EQ(expected[i], digest.a[i] & 0xFF);
}
TEST(MD5, MD5SumLongData) {
const int length = 10 * 1024 * 1024 + 1;
std::unique_ptr<char[]> data(new char[length]);
for (int i = 0; i < length; ++i)
data[i] = i & 0xFF;
MD5Digest digest;
MD5Sum(data.get(), length, &digest);
int expected[] = {
0x90, 0xbd, 0x6a, 0xd9,
0x0a, 0xce, 0xf5, 0xad,
0xaa, 0x92, 0x20, 0x3e,
0x21, 0xc7, 0xa1, 0x3e
};
for (int i = 0; i < 16; ++i)
EXPECT_EQ(expected[i], digest.a[i] & 0xFF);
}
TEST(MD5, ContextWithEmptyData) {
MD5Context ctx;
MD5Init(&ctx);
MD5Digest digest;
MD5Final(&digest, &ctx);
int expected[] = {
0xd4, 0x1d, 0x8c, 0xd9,
0x8f, 0x00, 0xb2, 0x04,
0xe9, 0x80, 0x09, 0x98,
0xec, 0xf8, 0x42, 0x7e
};
for (int i = 0; i < 16; ++i)
EXPECT_EQ(expected[i], digest.a[i] & 0xFF);
}
TEST(MD5, ContextWithLongData) {
MD5Context ctx;
MD5Init(&ctx);
const int length = 10 * 1024 * 1024 + 1;
std::unique_ptr<char[]> data(new char[length]);
for (int i = 0; i < length; ++i)
data[i] = i & 0xFF;
int total = 0;
while (total < length) {
int len = 4097;
if (len > length - total)
len = length - total;
MD5Update(&ctx,
std::string(reinterpret_cast<char*>(data.get() + total), len));
total += len;
}
EXPECT_EQ(length, total);
MD5Digest digest;
MD5Final(&digest, &ctx);
int expected[] = {
0x90, 0xbd, 0x6a, 0xd9,
0x0a, 0xce, 0xf5, 0xad,
0xaa, 0x92, 0x20, 0x3e,
0x21, 0xc7, 0xa1, 0x3e
};
for (int i = 0; i < 16; ++i)
EXPECT_EQ(expected[i], digest.a[i] & 0xFF);
}
TEST(MD5, MD5StringTestSuite1) {
std::string actual = MD5String("");
std::string expected = "d41d8cd98f00b204e9800998ecf8427e";
EXPECT_EQ(expected, actual);
}
TEST(MD5, MD5StringTestSuite2) {
std::string actual = MD5String("a");
std::string expected = "0cc175b9c0f1b6a831c399e269772661";
EXPECT_EQ(expected, actual);
}
TEST(MD5, MD5StringTestSuite3) {
std::string actual = MD5String("abc");
std::string expected = "900150983cd24fb0d6963f7d28e17f72";
EXPECT_EQ(expected, actual);
}
TEST(MD5, MD5StringTestSuite4) {
std::string actual = MD5String("message digest");
std::string expected = "f96b697d7cb7938d525a2f31aaf161d0";
EXPECT_EQ(expected, actual);
}
TEST(MD5, MD5StringTestSuite5) {
std::string actual = MD5String("abcdefghijklmnopqrstuvwxyz");
std::string expected = "c3fcd3d76192e4007dfb496cca67e13b";
EXPECT_EQ(expected, actual);
}
TEST(MD5, MD5StringTestSuite6) {
std::string actual = MD5String("ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"abcdefghijklmnopqrstuvwxyz"
"0123456789");
std::string expected = "d174ab98d277d9f5a5611c2c9f419d9f";
EXPECT_EQ(expected, actual);
}
TEST(MD5, MD5StringTestSuite7) {
std::string actual = MD5String("12345678901234567890"
"12345678901234567890"
"12345678901234567890"
"12345678901234567890");
std::string expected = "57edf4a22be3c955ac49da2e2107b67a";
EXPECT_EQ(expected, actual);
}
TEST(MD5, ContextWithStringData) {
MD5Context ctx;
MD5Init(&ctx);
MD5Update(&ctx, "abc");
MD5Digest digest;
MD5Final(&digest, &ctx);
std::string actual = MD5DigestToBase16(digest);
std::string expected = "900150983cd24fb0d6963f7d28e17f72";
EXPECT_EQ(expected, actual);
}
TEST(MD5, IntermediateFinal) {
MD5Context check_header_context;
MD5Init(&check_header_context);
MD5Context check_full_context;
MD5Init(&check_full_context);
MD5Context context;
MD5Init(&context);
static const char kHeader[] = "header data";
static const char kBody[] = "payload data";
MD5Update(&context, kHeader);
MD5Update(&check_header_context, kHeader);
MD5Update(&check_full_context, kHeader);
MD5Digest check_header_digest;
MD5Final(&check_header_digest, &check_header_context);
MD5Digest header_digest;
MD5IntermediateFinal(&header_digest, &context);
MD5Update(&context, kBody);
MD5Update(&check_full_context, kBody);
MD5Digest check_full_digest;
MD5Final(&check_full_digest, &check_full_context);
MD5Digest digest;
MD5Final(&digest, &context);
EXPECT_TRUE(!memcmp(&header_digest, &check_header_digest,
sizeof(header_digest)));
EXPECT_TRUE(!memcmp(&digest, &check_full_digest, sizeof(digest)));
EXPECT_TRUE(memcmp(&digest, &header_digest, sizeof(digest)));
}
} |
510 | cpp | google/libaddressinput | string_split | cpp/src/util/string_split.cc | cpp/test/util/string_split_unittest.cc | #ifndef I18N_ADDRESSINPUT_UTIL_STRING_SPLIT_H_
#define I18N_ADDRESSINPUT_UTIL_STRING_SPLIT_H_
#include <string>
#include <vector>
namespace i18n {
namespace addressinput {
void SplitString(const std::string& str, char s, std::vector<std::string>* r);
}
}
#endif
#include "string_split.h"
#include <cassert>
#include <cstddef>
#include <string>
#include <vector>
namespace i18n {
namespace addressinput {
void SplitString(const std::string& str, char s, std::vector<std::string>* r) {
assert(r != nullptr);
r->clear();
size_t last = 0;
size_t c = str.size();
for (size_t i = 0; i <= c; ++i) {
if (i == c || str[i] == s) {
std::string tmp(str, last, i - last);
if (i != c || !r->empty() || !tmp.empty()) {
r->push_back(tmp);
}
last = i + 1;
}
}
}
}
} | #include "util/string_split.h"
#include <string>
#include <vector>
#include <gtest/gtest.h>
namespace {
using i18n::addressinput::SplitString;
TEST(StringSplitTest, SplitString) {
std::vector<std::string> r;
SplitString(std::string(), ',', &r);
EXPECT_EQ(0U, r.size());
SplitString("a,b,c", ',', &r);
ASSERT_EQ(3U, r.size());
EXPECT_EQ(r[0], "a");
EXPECT_EQ(r[1], "b");
EXPECT_EQ(r[2], "c");
SplitString("a, b, c", ',', &r);
ASSERT_EQ(3U, r.size());
EXPECT_EQ(r[0], "a");
EXPECT_EQ(r[1], " b");
EXPECT_EQ(r[2], " c");
SplitString("a,,c", ',', &r);
ASSERT_EQ(3U, r.size());
EXPECT_EQ(r[0], "a");
EXPECT_EQ(r[1], "");
EXPECT_EQ(r[2], "c");
SplitString(" ", '*', &r);
EXPECT_EQ(1U, r.size());
SplitString("foo", '*', &r);
ASSERT_EQ(1U, r.size());
EXPECT_EQ(r[0], "foo");
SplitString("foo ,", ',', &r);
ASSERT_EQ(2U, r.size());
EXPECT_EQ(r[0], "foo ");
EXPECT_EQ(r[1], "");
SplitString(",", ',', &r);
ASSERT_EQ(2U, r.size());
EXPECT_EQ(r[0], "");
EXPECT_EQ(r[1], "");
SplitString("\t\ta\t", '\t', &r);
ASSERT_EQ(4U, r.size());
EXPECT_EQ(r[0], "");
EXPECT_EQ(r[1], "");
EXPECT_EQ(r[2], "a");
EXPECT_EQ(r[3], "");
SplitString("\ta\t\nb\tcc", '\n', &r);
ASSERT_EQ(2U, r.size());
EXPECT_EQ(r[0], "\ta\t");
EXPECT_EQ(r[1], "b\tcc");
SplitString(" ", '*', &r);
ASSERT_EQ(1U, r.size());
EXPECT_EQ(r[0], " ");
SplitString("\t \ta\t ", '\t', &r);
ASSERT_EQ(4U, r.size());
EXPECT_EQ(r[0], "");
EXPECT_EQ(r[1], " ");
EXPECT_EQ(r[2], "a");
EXPECT_EQ(r[3], " ");
SplitString("\ta\t\nb\tcc", '\n', &r);
ASSERT_EQ(2U, r.size());
EXPECT_EQ(r[0], "\ta\t");
EXPECT_EQ(r[1], "b\tcc");
}
} |
511 | cpp | google/libaddressinput | string_util | cpp/src/util/string_util.cc | cpp/test/util/string_util_test.cc | #ifndef I18N_ADDRESSINPUT_UTIL_STRING_UTIL_H_
#define I18N_ADDRESSINPUT_UTIL_STRING_UTIL_H_
#include <string>
#include <vector>
namespace i18n {
namespace addressinput {
std::string DoReplaceStringPlaceholders(const std::string& format_string,
const std::vector<std::string>& subst);
}
}
#endif
#include "string_util.h"
#include <cassert>
#include <cstddef>
#include <stdint.h>
#include <string>
#include <vector>
namespace i18n {
namespace addressinput {
std::string DoReplaceStringPlaceholders(const std::string& format_string,
const std::vector<std::string>& subst) {
size_t substitutions = subst.size();
size_t sub_length = 0;
for (std::vector<std::string>::const_iterator iter = subst.begin();
iter != subst.end(); ++iter) {
sub_length += iter->length();
}
std::string formatted;
formatted.reserve(format_string.length() + sub_length);
for (std::string::const_iterator i = format_string.begin();
i != format_string.end(); ++i) {
if ('$' == *i) {
if (i + 1 != format_string.end()) {
++i;
assert('$' == *i || '1' <= *i);
if ('$' == *i) {
while (i != format_string.end() && '$' == *i) {
formatted.push_back('$');
++i;
}
--i;
} else {
uintptr_t index = 0;
while (i != format_string.end() && '0' <= *i && *i <= '9') {
index *= 10;
index += *i - '0';
++i;
}
--i;
index -= 1;
if (index < substitutions)
formatted.append(subst.at(index));
}
}
} else {
formatted.push_back(*i);
}
}
return formatted;
}
}
} | #include "util/string_util.h"
#include <string>
#include <vector>
#include <gtest/gtest.h>
namespace {
using i18n::addressinput::DoReplaceStringPlaceholders;
TEST(StringUtilTest, Ok) {
const std::vector<std::string> subst{
"A",
"B",
"C",
};
EXPECT_EQ("aA,bB,cC", DoReplaceStringPlaceholders("a$1,b$2,c$3", subst));
}
TEST(StringUtilTest, FewParameters) {
const std::vector<std::string> subst{
"A",
"B",
"C",
};
EXPECT_EQ("aA,bB,cC,d,aA",
DoReplaceStringPlaceholders("a$1,b$2,c$3,d$4,a$1", subst));
}
TEST(StringUtilTest, MoreThan9Parameters) {
const std::vector<std::string> subst{
"A",
"B",
"C",
"D",
"E",
"F",
"G",
"H",
"I",
"J",
"K",
};
EXPECT_EQ("aA,bB,cC,dD,eE,fF,gG,hH,iI,jJ,kK,aA",
DoReplaceStringPlaceholders("a$1,b$2,c$3,d$4,e$5,f$6,g$7,h$8,i$9,"
"j$10,k$11,a$1",
subst));
}
TEST(StringUtilTest, ConsecutiveDollarSigns) {
const std::vector<std::string> subst{
"A",
"B",
"C",
};
EXPECT_EQ("$1 $$2 $$$3",
DoReplaceStringPlaceholders("$$1 $$$2 $$$$3", subst));
}
} |
512 | cpp | google/tensorstore | unit | tensorstore/util/unit.cc | tensorstore/util/unit_test.cc | #ifndef TENSORSTORE_UTIL_UNIT_H_
#define TENSORSTORE_UTIL_UNIT_H_
#include <iosfwd>
#include <string>
#include <string_view>
#include <utility>
namespace tensorstore {
struct Unit {
Unit() = default;
Unit(std::string_view unit);
Unit(const char* unit) : Unit(std::string_view(unit)) {}
Unit(const std::string& unit) : Unit(std::string_view(unit)) {}
Unit(double multiplier, std::string base_unit)
: multiplier(multiplier), base_unit(std::move(base_unit)) {}
double multiplier = 1;
std::string base_unit;
friend std::ostream& operator<<(std::ostream& os, const Unit& unit);
std::string to_string() const;
template <typename Sink>
friend void AbslStringify(Sink& sink, const Unit& self) {
sink.Append(self.to_string());
}
friend bool operator==(const Unit& a, const Unit& b);
friend bool operator!=(const Unit& a, const Unit& b) { return !(a == b); }
friend Unit operator*(Unit u, double x) {
u.multiplier *= x;
return u;
}
friend Unit operator*(double x, Unit u) {
u.multiplier *= x;
return u;
}
friend Unit& operator*=(Unit& u, double x) {
u.multiplier *= x;
return u;
}
friend Unit operator/(Unit u, double x) {
u.multiplier /= x;
return u;
}
friend Unit& operator/=(Unit& u, double x) {
u.multiplier /= x;
return u;
}
static constexpr auto ApplyMembers = [](auto&& x, auto f) {
return f(x.multiplier, x.base_unit);
};
};
}
#endif
#include "tensorstore/util/unit.h"
#include <ostream>
#include <string>
#include "absl/strings/ascii.h"
#include "absl/strings/str_cat.h"
#include "re2/re2.h"
namespace tensorstore {
std::ostream& operator<<(std::ostream& os, const Unit& unit) {
if (unit.base_unit.empty()) {
return os << unit.multiplier;
} else {
if (unit.multiplier != 1) {
os << unit.multiplier << ' ';
}
return os << unit.base_unit;
}
}
bool operator==(const Unit& a, const Unit& b) {
return a.multiplier == b.multiplier && a.base_unit == b.base_unit;
}
Unit::Unit(std::string_view unit) {
static LazyRE2 kNumberPattern = {
"([-+]?(?:\\.[0-9]+|[0-9]+(?:\\.[0-9]*)?)(?:[eE][-+]?\\d+)?)\\s*"};
while (!unit.empty() && absl::ascii_isspace(unit.front())) {
unit.remove_prefix(1);
}
while (!unit.empty() && absl::ascii_isspace(unit.back())) {
unit.remove_suffix(1);
}
RE2::Consume(&unit, *kNumberPattern, &multiplier);
base_unit = unit;
}
std::string Unit::to_string() const {
if (base_unit.empty()) {
return absl::StrCat(multiplier);
}
if (multiplier != 1) {
return absl::StrCat(multiplier, " ", base_unit);
}
return base_unit;
}
} | #include "tensorstore/util/unit.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
#include "tensorstore/internal/json_binding/gtest.h"
#include "tensorstore/internal/json_binding/unit.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/serialization/test_util.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::TestJsonBinderRoundTrip;
using ::tensorstore::TestJsonBinderRoundTripJsonOnlyInexact;
using ::tensorstore::Unit;
using ::tensorstore::serialization::TestSerializationRoundTrip;
TEST(UnitTest, DefaultConstruct) {
Unit u;
EXPECT_EQ(1, u.multiplier);
EXPECT_EQ("", u.base_unit);
}
TEST(UnitTest, Compare) {
Unit a(5, "nm");
Unit b(5.5, "nm");
Unit c(5, "um");
Unit d;
EXPECT_EQ(a, a);
EXPECT_EQ(b, b);
EXPECT_EQ(c, c);
EXPECT_EQ(d, d);
EXPECT_NE(a, b);
EXPECT_NE(a, c);
EXPECT_NE(a, d);
EXPECT_NE(b, c);
EXPECT_NE(b, d);
EXPECT_NE(c, d);
}
TEST(UnitTest, Ostream) {
EXPECT_EQ("5.5 nm", tensorstore::StrCat(Unit(5.5, "nm")));
EXPECT_EQ("nm", tensorstore::StrCat(Unit(1, "nm")));
EXPECT_EQ("5", tensorstore::StrCat(Unit(5, "")));
EXPECT_EQ("1", tensorstore::StrCat(Unit(1, "")));
}
TEST(UnitTest, ConvertToString) {
EXPECT_EQ("5.5 nm", Unit(5.5, "nm").to_string());
EXPECT_EQ("nm", Unit(1, "nm").to_string());
EXPECT_EQ("5", Unit(5, "").to_string());
EXPECT_EQ("1", Unit(1, "").to_string());
EXPECT_EQ("1", absl::StrCat(Unit(1, "")));
}
TEST(UnitTest, MultiplierBaseUnit) {
Unit u = {5, "nm"};
EXPECT_EQ(5, u.multiplier);
EXPECT_EQ("nm", u.base_unit);
}
TEST(UnitTest, Unit) {
EXPECT_EQ(Unit(4, "nm"), Unit("4nm"));
EXPECT_EQ(Unit(4, "nm"), Unit("4.nm"));
EXPECT_EQ(Unit(4e-3, "nm"), Unit("4e-3nm"));
EXPECT_EQ(Unit(.4, "nm"), Unit(".4nm"));
EXPECT_EQ(Unit(.4, "nm"), Unit(".4 nm"));
EXPECT_EQ(Unit(.4, "nm"), Unit(" .4 nm"));
EXPECT_EQ(Unit(.4, "nm"), Unit(" .4 nm "));
EXPECT_EQ(Unit(4e-3, "nm"), Unit("+4e-3nm"));
EXPECT_EQ(Unit(-4e-3, "nm"), Unit("-4e-3nm"));
EXPECT_EQ(Unit(4.5, "nm"), Unit("4.5nm"));
EXPECT_EQ(Unit(1, "nm"), Unit("nm"));
EXPECT_EQ(Unit(4, ""), Unit("4"));
EXPECT_EQ(Unit(1, ""), Unit(""));
EXPECT_EQ(Unit(3, "nm @ 50"), Unit("3 nm @ 50"));
}
TEST(UnitTest, JsonRoundTrip) {
TestJsonBinderRoundTrip<Unit>({
{Unit(4, "nm"), {4, "nm"}},
{Unit(4.5, "nm"), {4.5, "nm"}},
{Unit(4.5, ""), {4.5, ""}},
});
}
TEST(UnitTest, JsonRoundTripInexact) {
TestJsonBinderRoundTripJsonOnlyInexact<Unit>({
{"4nm", {4, "nm"}},
{4, {4, ""}},
{"nm", {1, "nm"}},
});
}
TEST(SerializationTest, Basic) {
TestSerializationRoundTrip(Unit("4nm"));
TestSerializationRoundTrip(Unit("4"));
TestSerializationRoundTrip(Unit("nm"));
TestSerializationRoundTrip(Unit(""));
}
} |
513 | cpp | google/tensorstore | data_type | tensorstore/internal/json_binding/data_type.cc | tensorstore/internal/json_binding/data_type_test.cc | #ifndef TENSORSTORE_INTERNAL_JSON_BINDING_DATA_TYPE_H_
#define TENSORSTORE_INTERNAL_JSON_BINDING_DATA_TYPE_H_
#include "absl/status/status.h"
#include "tensorstore/data_type.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/json_serialization_options.h"
namespace tensorstore {
namespace internal_json_binding {
TENSORSTORE_DECLARE_JSON_BINDER(DataTypeJsonBinder, DataType)
TENSORSTORE_DECLARE_JSON_BINDER(OptionalDataTypeJsonBinder, DataType)
TENSORSTORE_DECLARE_JSON_BINDER(ConstrainedDataTypeJsonBinder, DataType,
JsonSerializationOptions,
JsonSerializationOptions)
template <>
inline constexpr auto DefaultBinder<DataType> = OptionalDataTypeJsonBinder;
}
}
#endif
#include "tensorstore/internal/json_binding/data_type.h"
#include <string>
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "tensorstore/data_type.h"
#include "tensorstore/internal/json/json.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_json_binding {
TENSORSTORE_DEFINE_JSON_BINDER(DataTypeJsonBinder, [](auto is_loading,
const auto& options,
auto* obj,
::nlohmann::json* j) {
if constexpr (is_loading) {
return internal_json_binding::Compose<std::string>(
[](auto is_loading, const auto& options, DataType* obj, auto* id) {
*obj = tensorstore::GetDataType(*id);
if (!obj->valid()) {
return absl::Status(
absl::StatusCode::kInvalidArgument,
tensorstore::StrCat("Unsupported data type: ",
tensorstore::QuoteString(*id)));
}
return absl::OkStatus();
})(is_loading, options, obj, j);
} else {
if (!obj->valid()) {
*j = ::nlohmann::json(::nlohmann::json::value_t::discarded);
} else if (obj->id() == DataTypeId::custom) {
return absl::Status(absl::StatusCode::kInvalidArgument,
"Data type has no canonical identifier");
} else {
*j = obj->name();
}
return absl::OkStatus();
}
})
TENSORSTORE_DEFINE_JSON_BINDER(OptionalDataTypeJsonBinder,
[](auto is_loading, const auto& options,
auto* obj, ::nlohmann::json* j) {
if constexpr (is_loading) {
if (j->is_discarded()) {
*obj = DataType{};
return absl::OkStatus();
}
}
return DataTypeJsonBinder(is_loading, options,
obj, j);
})
TENSORSTORE_DEFINE_JSON_BINDER(
ConstrainedDataTypeJsonBinder,
[](auto is_loading, const auto& options, auto* obj, ::nlohmann::json* j) {
return Validate(
[](const auto& options, DataType* d) {
if (options.dtype().valid() && d->valid() &&
options.dtype() != *d) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Expected data type of ", options.dtype(),
" but received: ", *d));
}
return absl::OkStatus();
},
DefaultValue([dtype = options.dtype()](DataType* d) { *d = dtype; }))(
is_loading, options, obj, j);
})
}
} | #include "tensorstore/internal/json_binding/data_type.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "tensorstore/data_type.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/gtest.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/json_serialization_options_base.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
using ::tensorstore::DataType;
using ::tensorstore::dtype_v;
using ::tensorstore::MatchesStatus;
namespace jb = tensorstore::internal_json_binding;
namespace {
struct X {};
TEST(DataTypeJsonBinderTest, ToJson) {
EXPECT_THAT(jb::ToJson(DataType(dtype_v<std::int32_t>)),
::testing::Optional(::nlohmann::json("int32")));
EXPECT_THAT(jb::ToJson(DataType(dtype_v<bool>)),
::testing::Optional(::nlohmann::json("bool")));
EXPECT_THAT(jb::ToJson(DataType(dtype_v<X>)),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Data type has no canonical identifier"));
EXPECT_THAT(jb::ToJson(DataType{}),
::testing::Optional(tensorstore::MatchesJson(
::nlohmann::json(::nlohmann::json::value_t::discarded))));
}
TEST(DataTypeJsonBinderTest, FromJson) {
EXPECT_THAT(jb::FromJson<DataType>(::nlohmann::json("int32")),
::testing::Optional(dtype_v<std::int32_t>));
EXPECT_THAT(jb::FromJson<DataType>(::nlohmann::json("bool")),
::testing::Optional(dtype_v<bool>));
EXPECT_THAT(jb::FromJson<DataType>(::nlohmann::json("invalid")),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Unsupported data type: \"invalid\""));
EXPECT_THAT(jb::FromJson<DataType>(
::nlohmann::json(::nlohmann::json::value_t::discarded)),
::testing::Optional(DataType{}));
EXPECT_THAT(jb::FromJson<DataType>(
::nlohmann::json(::nlohmann::json::value_t::discarded),
tensorstore::internal_json_binding::DataTypeJsonBinder),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
} |
514 | cpp | google/tensorstore | chunk_layout | tensorstore/chunk_layout.cc | tensorstore/chunk_layout_test.cc | #ifndef TENSORSTORE_CHUNK_LAYOUT_H_
#define TENSORSTORE_CHUNK_LAYOUT_H_
#include <stddef.h>
#include <stdint.h>
#include <iosfwd>
#include <memory>
#include <string_view>
#include <type_traits>
#include <utility>
#include "absl/status/status.h"
#include "tensorstore/box.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/internal/integer_range.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/type_traits.h"
#include "tensorstore/json_serialization_options.h"
#include "tensorstore/rank.h"
#include "tensorstore/serialization/fwd.h"
#include "tensorstore/util/dimension_set.h"
#include "tensorstore/util/garbage_collection/fwd.h"
#include "tensorstore/util/maybe_hard_constraint.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
class ChunkLayout {
public:
enum Usage : unsigned char {
kWrite = 0,
kRead = 1,
kCodec = 2,
};
constexpr static Usage kUnspecifiedUsage = static_cast<Usage>(3);
constexpr static internal::IntegerRange<Usage> kUsages =
internal::IntegerRange<Usage>::Inclusive(kWrite, kCodec);
constexpr static size_t kNumUsages = 3;
constexpr static double kDefaultAspectRatioValue = 0;
constexpr static Index kDefaultShapeValue = 0;
friend std::ostream& operator<<(std::ostream& os, Usage usage);
static Result<Usage> ParseUsage(std::string_view s);
using ToJsonOptions = JsonSerializationOptions;
using FromJsonOptions = JsonSerializationOptions;
struct ChunkElementsBase : public MaybeHardConstraintIndex {
using MaybeHardConstraintIndex::MaybeHardConstraintIndex;
explicit ChunkElementsBase(MaybeHardConstraintIndex base)
: MaybeHardConstraintIndex(base) {}
};
template <Usage U>
struct ChunkElementsFor : public ChunkElementsBase {
using ChunkElementsBase::ChunkElementsBase;
using ChunkElementsBase::value;
using ChunkElementsBase::hard_constraint;
};
using ChunkElements = ChunkElementsFor<kUnspecifiedUsage>;
using WriteChunkElements = ChunkElementsFor<Usage::kWrite>;
using ReadChunkElements = ChunkElementsFor<Usage::kRead>;
using CodecChunkElements = ChunkElementsFor<Usage::kCodec>;
struct ChunkShapeBase : public MaybeHardConstraintSpan<Index> {
using MaybeHardConstraintSpan<Index>::MaybeHardConstraintSpan;
explicit ChunkShapeBase(MaybeHardConstraintSpan<Index> base)
: MaybeHardConstraintSpan<Index>(base) {}
};
template <Usage U>
struct ChunkShapeFor : public ChunkShapeBase {
using ChunkShapeBase::ChunkShapeBase;
using ChunkShapeBase::hard_constraint;
};
using ChunkShape = ChunkShapeFor<kUnspecifiedUsage>;
using WriteChunkShape = ChunkShapeFor<Usage::kWrite>;
using ReadChunkShape = ChunkShapeFor<Usage::kRead>;
using CodecChunkShape = ChunkShapeFor<Usage::kCodec>;
struct ChunkAspectRatioBase : public MaybeHardConstraintSpan<double> {
using MaybeHardConstraintSpan<double>::MaybeHardConstraintSpan;
explicit ChunkAspectRatioBase(MaybeHardConstraintSpan<double> base)
: MaybeHardConstraintSpan<double>(base) {}
};
template <Usage U>
struct ChunkAspectRatioFor : public ChunkAspectRatioBase {
using ChunkAspectRatioBase::ChunkAspectRatioBase;
};
class GridView;
class Grid {
public:
Grid() = default;
Grid(const Grid&);
Grid(Grid&&) = default;
~Grid();
Grid& operator=(const Grid& other);
Grid& operator=(Grid&& other) = default;
using Shape = ChunkShapeBase;
using AspectRatio = ChunkAspectRatioBase;
using Elements = ChunkElementsBase;
DimensionIndex rank() const { return rank_; }
absl::Status Set(RankConstraint value);
Shape shape() const {
return shape_ ? Shape(span<const Index>(shape_.get(), rank_),
shape_hard_constraint_)
: Shape();
}
explicit operator Shape() const { return shape(); }
absl::Status Set(Shape value);
AspectRatio aspect_ratio() const {
return aspect_ratio_
? AspectRatio(span<const double>(aspect_ratio_.get(), rank_),
aspect_ratio_hard_constraint_)
: AspectRatio();
}
explicit operator AspectRatio() const { return aspect_ratio(); }
absl::Status Set(AspectRatio value);
Elements elements() const {
return Elements(elements_, elements_hard_constraint_);
}
explicit operator Elements() const { return elements(); }
absl::Status Set(Elements value);
absl::Status Set(const GridView& value);
friend bool operator==(const Grid& a, const Grid& b);
friend bool operator!=(const Grid& a, const Grid& b) { return !(a == b); }
TENSORSTORE_DECLARE_JSON_DEFAULT_BINDER(Grid, FromJsonOptions,
ToJsonOptions)
private:
friend class ChunkLayout;
int8_t rank_ = dynamic_rank;
bool elements_hard_constraint_ = false;
std::unique_ptr<Index[]> shape_;
std::unique_ptr<double[]> aspect_ratio_;
DimensionSet shape_hard_constraint_;
DimensionSet aspect_ratio_hard_constraint_;
Index elements_ = kImplicit;
};
class GridView {
public:
explicit GridView() = default;
explicit GridView(const GridView& other, bool hard_constraint)
: GridView(other) {
if (!hard_constraint) {
elements_hard_constraint_ = false;
shape_hard_constraint_ = false;
aspect_ratio_hard_constraint_ = false;
}
}
explicit GridView(const Grid& grid, bool hard_constraint = true)
: GridView(GridView(grid.shape(), grid.aspect_ratio(), grid.elements()),
hard_constraint) {}
explicit GridView(ChunkShapeBase shape, ChunkAspectRatioBase aspect_ratio,
ChunkElementsBase elements)
: shape_rank_(shape.size()),
aspect_ratio_rank_(aspect_ratio.size()),
elements_hard_constraint_(elements.hard_constraint),
shape_hard_constraint_(shape.hard_constraint),
aspect_ratio_hard_constraint_(aspect_ratio.hard_constraint),
elements_(elements),
shape_(shape.data()),
aspect_ratio_(aspect_ratio.data()) {}
explicit GridView(ChunkShapeBase shape)
: GridView(shape, ChunkAspectRatioBase(), ChunkElementsBase()) {}
explicit GridView(ChunkAspectRatioBase aspect_ratio)
: GridView(ChunkShapeBase(), aspect_ratio, ChunkElementsBase()) {}
explicit GridView(ChunkElementsBase elements)
: GridView(ChunkShapeBase(), ChunkAspectRatioBase(), elements) {}
ChunkShapeBase shape() const {
return ChunkShapeBase(span<const Index>(shape_, shape_rank_),
shape_hard_constraint_);
}
ChunkAspectRatioBase aspect_ratio() const {
return ChunkAspectRatioBase(
span<const double>(aspect_ratio_, aspect_ratio_rank_),
aspect_ratio_hard_constraint_);
}
ChunkElementsBase elements() const {
return ChunkElementsBase(elements_, elements_hard_constraint_);
}
private:
friend class ChunkLayout;
int8_t shape_rank_ = 0;
int8_t aspect_ratio_rank_ = 0;
bool elements_hard_constraint_ = false;
DimensionSet shape_hard_constraint_;
DimensionSet aspect_ratio_hard_constraint_;
Index elements_ = kImplicit;
const Index* shape_ = nullptr;
const double* aspect_ratio_ = nullptr;
};
template <Usage U>
class GridViewFor : public GridView {
public:
using Shape = ChunkShapeFor<U>;
using AspectRatio = ChunkAspectRatioFor<U>;
using Elements = ChunkElementsFor<U>;
using GridView::GridView;
explicit GridViewFor(GridView other) : GridView(other) {}
Shape shape() const { return Shape(GridView::shape()); }
AspectRatio aspect_ratio() const {
return AspectRatio(GridView::aspect_ratio());
}
Elements elements() const { return Elements(GridView::elements()); }
};
using Chunk = GridViewFor<kUnspecifiedUsage>;
using WriteChunk = GridViewFor<Usage::kWrite>;
using ReadChunk = GridViewFor<Usage::kRead>;
using CodecChunk = GridViewFor<Usage::kCodec>;
struct InnerOrder : public span<const DimensionIndex> {
explicit InnerOrder() = default;
explicit InnerOrder(span<const DimensionIndex> s,
bool hard_constraint = true)
: span<const DimensionIndex>(s), hard_constraint(hard_constraint) {}
template <size_t N>
explicit InnerOrder(const DimensionIndex (&s)[N],
bool hard_constraint = true)
: span<const DimensionIndex>(s), hard_constraint(hard_constraint) {}
bool valid() const { return !this->empty(); }
friend bool operator==(const InnerOrder& a, const InnerOrder& b) {
return internal::RangesEqual(a, b) &&
a.hard_constraint == b.hard_constraint;
}
friend bool operator!=(const InnerOrder& a, const InnerOrder& b) {
return !(a == b);
}
bool hard_constraint{false};
};
struct GridOrigin : public MaybeHardConstraintSpan<Index> {
using MaybeHardConstraintSpan<Index>::MaybeHardConstraintSpan;
using MaybeHardConstraintSpan<Index>::hard_constraint;
};
using ChunkAspectRatio = ChunkAspectRatioFor<kUnspecifiedUsage>;
using WriteChunkAspectRatio = ChunkAspectRatioFor<Usage::kWrite>;
using ReadChunkAspectRatio = ChunkAspectRatioFor<Usage::kRead>;
using CodecChunkAspectRatio = ChunkAspectRatioFor<Usage::kCodec>;
ChunkLayout() = default;
explicit ChunkLayout(ChunkLayout layout, bool hard_constraint);
DimensionIndex rank() const;
bool HasHardConstraints() const;
absl::Status GetChunkTemplate(Usage usage, MutableBoxView<> box) const;
absl::Status GetWriteChunkTemplate(MutableBoxView<> box) const {
return GetChunkTemplate(kWrite, box);
}
absl::Status GetReadChunkTemplate(MutableBoxView<> box) const {
return GetChunkTemplate(kRead, box);
}
InnerOrder inner_order() const;
explicit operator InnerOrder() const { return inner_order(); }
absl::Status Set(InnerOrder value);
GridOrigin grid_origin() const;
explicit operator GridOrigin() const { return grid_origin(); }
absl::Status Set(GridOrigin value);
WriteChunk write_chunk() const;
explicit operator WriteChunk() const { return write_chunk(); }
ReadChunk read_chunk() const;
explicit operator ReadChunk() const { return read_chunk(); }
CodecChunk codec_chunk() const;
explicit operator CodecChunk() const { return codec_chunk(); }
GridView operator[](Usage usage) const;
template <Usage U>
absl::Status Set(const GridViewFor<U>& value);
WriteChunkShape write_chunk_shape() const;
explicit operator WriteChunkShape() const { return write_chunk_shape(); } | #include "tensorstore/chunk_layout.h"
#include <stddef.h>
#include <algorithm>
#include <array>
#include <cstdlib>
#include <random>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "absl/random/bit_gen_ref.h"
#include "absl/random/random.h"
#include "absl/status/status.h"
#include "tensorstore/box.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/index_domain_builder.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/index_transform_testutil.h"
#include "tensorstore/index_space/output_index_method.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/gtest.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/internal/testing/random_seed.h"
#include "tensorstore/json_serialization_options_base.h"
#include "tensorstore/rank.h"
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/serialization/test_util.h"
#include "tensorstore/util/dimension_set.h"
#include "tensorstore/util/division.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::Box;
using ::tensorstore::BoxView;
using ::tensorstore::ChunkLayout;
using ::tensorstore::DimensionIndex;
using ::tensorstore::DimensionSet;
using ::tensorstore::Dims;
using ::tensorstore::dynamic_rank;
using ::tensorstore::Index;
using ::tensorstore::IndexDomainBuilder;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::IndexTransformView;
using ::tensorstore::kImplicit;
using ::tensorstore::kInfIndex;
using ::tensorstore::kMaxRank;
using ::tensorstore::MatchesJson;
using ::tensorstore::MatchesStatus;
using ::tensorstore::span;
using ::tensorstore::internal::ChooseChunkGrid;
using ::tensorstore::internal::ChooseChunkShape;
using ::tensorstore::internal::ChooseReadWriteChunkShapes;
using ::tensorstore::internal::MakeRandomDimensionOrder;
using ::testing::Optional;
using Usage = ChunkLayout::Usage;
TEST(ChunkLayoutTest, SingleLevelRank0) {
ChunkLayout layout;
TENSORSTORE_ASSERT_OK(layout.Set(tensorstore::RankConstraint(0)));
TENSORSTORE_ASSERT_OK(layout.Finalize());
ASSERT_EQ(0, layout.rank());
EXPECT_THAT(layout.inner_order(), ::testing::ElementsAre());
EXPECT_THAT(layout | tensorstore::IdentityTransform(0), Optional(layout));
EXPECT_THAT(layout.read_chunk().shape(), ::testing::ElementsAre());
}
TEST(ChunkLayoutTest, SingleLevelRank1) {
ChunkLayout layout;
TENSORSTORE_ASSERT_OK(layout.Set(ChunkLayout::GridOrigin({0})));
TENSORSTORE_ASSERT_OK(layout.Set(ChunkLayout::WriteChunkShape({5})));
TENSORSTORE_ASSERT_OK(layout.Finalize());
ASSERT_EQ(1, layout.rank());
EXPECT_THAT(layout.inner_order(), ::testing::ElementsAre());
EXPECT_THAT(layout.grid_origin(), ::testing::ElementsAre(0));
EXPECT_THAT(layout.read_chunk_shape(), ::testing::ElementsAre(5));
EXPECT_THAT(layout.write_chunk_shape(), ::testing::ElementsAre(5));
EXPECT_THAT(layout | tensorstore::IdentityTransform(1), Optional(layout));
}
using HierarchicalGridCell = std::array<std::vector<Index>, 3>;
HierarchicalGridCell GetHierarchicalGridCell(const ChunkLayout& layout,
span<const Index> position) {
const DimensionIndex rank = layout.rank();
auto origin = layout.grid_origin();
HierarchicalGridCell hier_grid_cell;
for (Usage usage : ChunkLayout::kUsages) {
auto& grid_cell = hier_grid_cell[static_cast<int>(usage)];
grid_cell.resize(rank);
auto grid = layout[usage];
for (DimensionIndex i = 0; i < rank; ++i) {
const Index size = grid.shape()[i];
if (size == 0) {
grid_cell[i] = 0;
continue;
}
const Index x = position[i] - origin[i];
grid_cell[i] = tensorstore::FloorOfRatio(x, size);
}
}
return hier_grid_cell;
}
void TestGridCorrespondence(absl::BitGenRef gen,
const ChunkLayout& output_layout,
const ChunkLayout& input_layout,
IndexTransformView<> transform) {
const DimensionIndex output_rank = transform.output_rank();
const DimensionIndex input_rank = transform.input_rank();
ASSERT_EQ(output_layout.rank(), output_rank);
ASSERT_EQ(input_layout.rank(), input_rank);
HierarchicalGridCell output_chunk_divisors;
for (Usage usage : ChunkLayout::kUsages) {
auto& divisors = output_chunk_divisors[static_cast<size_t>(usage)];
divisors.resize(output_rank, 1);
for (DimensionIndex output_dim = 0; output_dim < output_rank;
++output_dim) {
const auto map = transform.output_index_maps()[output_dim];
if (map.method() !=
tensorstore::OutputIndexMethod::single_input_dimension) {
continue;
}
auto size = output_layout[usage].shape()[output_dim];
if (size == 0) continue;
divisors[output_dim] =
std::abs(map.stride()) /
tensorstore::GreatestCommonDivisor(map.stride(), size);
}
}
SCOPED_TRACE(tensorstore::StrCat("output_layout=", output_layout));
SCOPED_TRACE(tensorstore::StrCat("input_layout=", input_layout));
SCOPED_TRACE(
tensorstore::StrCat("output_chunk_divisors=",
::testing::PrintToString(output_chunk_divisors)));
absl::flat_hash_map<HierarchicalGridCell, HierarchicalGridCell>
output_to_input_cell_map;
absl::flat_hash_map<HierarchicalGridCell, HierarchicalGridCell>
input_to_output_cell_map;
std::vector<Index> input_pos(input_rank);
std::vector<Index> output_pos(output_rank);
const auto test_point = [&] {
TENSORSTORE_ASSERT_OK(transform.TransformIndices(input_pos, output_pos));
auto input_cell = GetHierarchicalGridCell(input_layout, input_pos);
auto output_cell = GetHierarchicalGridCell(output_layout, output_pos);
SCOPED_TRACE(tensorstore::StrCat("orig_output_cell=",
::testing::PrintToString(output_cell)));
for (Usage usage : ChunkLayout::kUsages) {
const size_t usage_index = static_cast<size_t>(usage);
for (DimensionIndex output_dim = 0; output_dim < output_rank;
++output_dim) {
auto& out_cell = output_cell[usage_index][output_dim];
out_cell = tensorstore::FloorOfRatio(
out_cell, output_chunk_divisors[usage_index][output_dim]);
}
}
SCOPED_TRACE(tensorstore::StrCat("input_pos=", span(input_pos)));
SCOPED_TRACE(tensorstore::StrCat("output_pos=", span(output_pos)));
SCOPED_TRACE(tensorstore::StrCat("input_cell=",
::testing::PrintToString(input_cell)));
SCOPED_TRACE(tensorstore::StrCat("output_cell=",
::testing::PrintToString(output_cell)));
auto input_it =
output_to_input_cell_map.emplace(output_cell, input_cell).first;
auto output_it =
input_to_output_cell_map.emplace(input_cell, output_cell).first;
EXPECT_EQ(input_it->second, input_cell);
EXPECT_EQ(output_it->second, output_cell);
};
constexpr size_t kNumSamplePoints = 10;
for (size_t sample_i = 0; sample_i < kNumSamplePoints; ++sample_i) {
for (DimensionIndex input_dim = 0; input_dim < input_rank; ++input_dim) {
input_pos[input_dim] =
absl::Uniform<Index>(absl::IntervalClosedClosed, gen, -40, 40);
}
for (DimensionIndex dir_input_dim = 0; dir_input_dim < input_rank;
++dir_input_dim) {
const Index initial_pos = input_pos[dir_input_dim];
for (Index i = -20; i <= 20; ++i) {
input_pos[dir_input_dim] = initial_pos + i;
test_point();
}
input_pos[dir_input_dim] = initial_pos;
}
}
}
template <typename Expr>
void TestApplyIndexTransform(::nlohmann::json a, const Expr& expr,
::nlohmann::json b) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto a_layout, ChunkLayout::FromJson(a));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto b_layout, ChunkLayout::FromJson(b));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transform, tensorstore::IdentityTransform(a_layout.rank()) | expr);
EXPECT_THAT(a_layout | transform, ::testing::Optional(b_layout));
}
struct MakeRandomChunkLayoutParameters {
DimensionIndex min_rank = 1;
DimensionIndex max_rank = 3;
};
ChunkLayout MakeRandomChunkLayout(
absl::BitGenRef gen, const MakeRandomChunkLayoutParameters& p = {}) {
const DimensionIndex rank = absl::Uniform<DimensionIndex>(
absl::IntervalClosedClosed, gen, p.min_rank, p.max_rank);
ChunkLayout layout;
TENSORSTORE_CHECK_OK(layout.Set(tensorstore::RankConstraint(rank)));
if (absl::Bernoulli(gen, 0.5)) {
DimensionIndex inner_order[kMaxRank];
MakeRandomDimensionOrder(gen, span(inner_order, rank));
TENSORSTORE_CHECK_OK(
layout.Set(ChunkLayout::InnerOrder(span(inner_order, rank))));
} else {
}
Index grid_origin[kMaxRank];
for (DimensionIndex dim = 0; dim < rank; ++dim) {
grid_origin[dim] =
absl::Uniform<Index>(absl::IntervalClosedClosed, gen, -5, 5);
}
TENSORSTORE_CHECK_OK(
layout.Set(ChunkLayout::GridOrigin(span(grid_origin, rank))));
const auto set_grid = [&](Usage usage) {
if (absl::Bernoulli(gen, 0.3)) {
return;
}
Index shape[kMaxRank];
std::fill_n(shape, rank, 0);
for (DimensionIndex dim = 0; dim < rank; ++dim) {
if (absl::Bernoulli(gen, 0.3)) {
continue;
}
Index size;
if (usage == Usage::kWrite && layout.read_chunk_shape()[dim] != 0) {
const Index read_size = layout.read_chunk_shape()[dim];
size = absl::Uniform<Index>(absl::IntervalClosedClosed, gen, 1, 5) *
read_size;
} else {
size = absl::Uniform<Index>(absl::IntervalClosedClosed, gen, 1,
usage == Usage::kCodec ? 5 : 10);
}
shape[dim] = size;
}
TENSORSTORE_CHECK_OK(layout.Set(ChunkLayout::Chunk(
ChunkLayout::ChunkShapeBase(span<const Index>(shape, rank)), usage)));
};
set_grid(Usage::kCodec);
set_grid(Usage::kRead);
set_grid(Usage::kWrite);
TENSORSTORE_CHECK_OK(layout.Finalize());
return layout;
}
TEST(ChunkLayoutTest, Json) {
tensorstore::TestJsonBinderRoundTripJsonOnly<ChunkLayout>(
{
{
{"rank", 0},
},
{
{"rank", 2},
},
{
{"grid_origin", {1, 2}},
{"write_chunk",
{
{"shape", {10, 11}},
}},
{"inner_order", {1, 0}},
},
},
tensorstore::internal_json_binding::DefaultBinder<>,
tensorstore::IncludeDefaults{false});
}
TEST(ChunkLayoutTest, JsonExcludeDefaults) {
tensorstore::TestJsonBinderRoundTripJsonOnly<ChunkLayout>(
{{
{"grid_origin", {1, 2}},
{"write_chunk",
{
{"shape", {10, 11}},
}},
{"inner_order", {1, 0}},
}},
tensorstore::internal_json_binding::DefaultBinder<>,
tensorstore::IncludeDefaults{false});
}
TEST(ChunkLayoutTest, Rank2Translate) {
TestApplyIndexTransform(
{
{"grid_origin", {0, 1}},
{"write_chunk",
{
{"shape", {10, 20}},
}},
{"inner_order", {1, 0}},
},
Dims(0, 1).TranslateBy(5),
{
{"grid_origin", {5, 6}},
{"write_chunk",
{
{"shape", {10, 20}},
}},
{"inner_order", {1, 0}},
});
}
TEST(ChunkLayoutTest, Rank2Transpose) {
TestApplyIndexTransform(
{
{"grid_origin", {0, 1}},
{"write_chunk",
{
{"shape", {10, 20}},
}},
{"inner_order", {1, 0}},
},
Dims(1, 0).Transpose(),
{
{"grid_origin", {1, 0}},
{"write_chunk",
{
{"shape", {20, 10}},
}},
{"inner_order", {0, 1}},
});
}
TEST(ChunkLayoutTest, Rank2TransposeWithGridOrder) {
TestApplyIndexTransform(
{
{"grid_origin", {0, 1}},
{"write_chunk",
{
{"shape", {10, 20}},
}},
{"inner_order", {1, 0}},
},
Dims(1, 0).Transpose(),
{
{"grid_origin", {1, 0}},
{"write_chunk",
{
{"shape", {20, 10}},
}},
{"inner_order", {0, 1}},
});
}
TEST(ChunkLayoutTest, Rank2Stride) {
TestApplyIndexTransform(
{
{"grid_origin", {0, 1}},
{"write_chunk",
{
{"shape", {10, 20}},
}},
{"inner_order", {0, 1}},
},
Dims(0, 1).Stride(2),
{
{"grid_origin", {0, 1}},
{"write_chunk",
{
{"shape", {5, 10}},
}},
{"inner_order", {0, 1}},
});
}
TEST(ChunkLayoutTest, Rank2StrideNotEvenlyDisibile) {
TestApplyIndexTransform(
{
{"grid_origin", {0, 1}},
{"write_chunk",
{
{"shape", {10, 20}},
}},
{"inner_order", {0, 1}},
},
Dims(0, 1).Stride(6),
{
{"grid_origin", {0, 1}},
{"write_chunk",
{
{"shape", {5, 10}},
}},
{"inner_order", {0, 1}},
});
}
TEST(ChunkLayoutTest, Rank2StrideNegative) {
TestApplyIndexTransform(
{
{"grid_origin", {0, 1}},
{"write_chunk",
{
{"shape", {10, 20}},
}},
{"inner_order", {0, 1}},
},
Dims(0, 1).Stride(-2),
{
{"grid_origin", {1, 0}},
{"write_chunk",
{
{"shape", {5, 10}},
}},
{"inner_order", {0, 1}},
});
}
TEST(ChunkLayoutTest, Rank2TwoLevelStrideNegative) {
TestApplyIndexTransform(
{
{"grid_origin", {0, 1}},
{"write_chunk",
{
{"shape", {10, 20}},
}},
{"read_chunk",
{
{"shape", {5, 5}},
}},
{"inner_order", {0, 1}},
},
Dims(0, 1).TranslateBy({2, 3}).Stride(-2),
{
{"grid_origin", {0, -1}},
{"write_chunk",
{
{"shape", {5, 10}},
}},
{"read_chunk",
{
{"shape", {5, 5}},
}},
{"inner_order", {0, 1}},
});
}
TEST(ApplyIndexTransformTest, RandomInvertible) {
constexpr size_t kNumIterations = 10;
for (size_t iteration = 0; iteration < kNumIterations; ++iteration) {
std::minstd_rand gen{tensorstore::internal_testing::GetRandomSeedForTest(
"TENSORSTORE_INTERNAL_LAYOUT_TEST_SEED")};
MakeRandomChunkLayoutParameters layout_p;
auto output_layout = MakeRandomChunkLayout(gen, layout_p);
tensorstore::internal::MakeStridedIndexTransformForOutputSpaceParameters
transform_p;
transform_p.new_dims_are_singleton = false;
auto transform =
tensorstore::internal::MakeRandomStridedIndexTransformForOutputSpace(
gen, tensorstore::IdentityTransform(output_layout.rank()).domain(),
transform_p);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto input_layout,
output_layout | transform);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto new_output_layout,
ApplyInverseIndexTransform(transform, input_layout));
SCOPED_TRACE(tensorstore::StrCat("transform=", transform));
EXPECT_EQ(output_layout, new_output_layout)
<< "input_layout=" << input_layout;
TestGridCorrespondence(gen, output_layout, input_layout, transform);
}
}
TEST(ApplyIndexTransformTest, RandomNonInvertibleUnaligned) {
constexpr size_t kNumIterations = 10;
for (size_t iteration = 0; iteration < kNumIterations; ++iteration) {
std::minstd_rand gen{tensorstore::internal_testing::GetRandomSeedForTest(
"TENSORSTORE_INTERNAL_LAYOUT_TEST_SEED")};
MakeRandomChunkLayoutParameters layout_p;
auto output_layout = MakeRandomChunkLayout(gen, layout_p);
tensorstore::internal::MakeStridedIndexTransformForOutputSpaceParameters
transform_p;
transform_p.new_dims_are_singleton = false;
transform_p.max_stride = 3;
auto transform =
tensorstore::internal::MakeRandomStridedIndexTransformForOutputSpace(
gen, tensorstore::IdentityTransform(output_layout.rank()).domain(),
transform_p);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto input_layout,
output_layout | transform);
SCOPED_TRACE(tensorstore::StrCat("transform=", transform));
TestGridCorrespondence(gen, output_layout, input_layout, transform);
}
}
TEST(ApplyIndexTransformTest, RandomNonInvertibleAligned) {
constexpr size_t kNumIterations = 10;
for (size_t iteration = 0; iteration < kNumIterations; ++iteration) {
std::minstd_rand gen{tensorstore::internal_testing::GetRandomSeedForTest(
"TENSORSTORE_INTERNAL_LAYOUT_TEST_SEED")};
MakeRandomChunkLayoutParameters layout_p;
auto input_layout = MakeRandomChunkLayout(gen, layout_p);
tensorstore::internal::MakeStridedIndexTransformForInputSpaceParameters
transform_p;
transform_p.max_stride = 3;
auto transform =
tensorstore::internal::MakeRandomStridedIndexTransformForInputSpace(
gen, tensorstore::IdentityTransform(input_layout.rank()).domain(),
transform_p);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto output_layout,
ApplyInverseIndexTransform(transform, input_layout));
SCOPED_TRACE(tensorstore::StrCat("transform=", transform));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto new_input_layout, ApplyIndexTransform(transform, output_layout));
EXPECT_EQ(input_layout, new_input_layout)
<< "output_layout=" << output_layout;
TestGridCorrespondence(gen, output_layout, input_layout, transform);
}
}
TEST(ChunkLayoutTest, DefaultConstruct) {
ChunkLayout x;
EXPECT_EQ(dynamic_rank, x.rank());
EXPECT_FALSE(x.inner_order().valid());
EXPECT_FALSE(x.grid_origin().valid());
EXPECT_FALSE(x.read_chunk().aspect_ratio().valid());
}
TEST(ChunkLayoutTest, ConstraintsJson) {
tensorstore::TestJsonBinderRoundTripJsonOnly<ChunkLayout>({
{
{"write_chunk",
{
{"elements_soft_constraint", 5},
}},
},
{
{"grid_origin", {1, 2}},
{"write_chunk",
{
{"shape", {10, 11}},
}},
{"inner_order", {1, 0}},
},
{
{"grid_origin", {1, 2}},
{"write_chunk",
{
{"shape", {10, 11}},
}},
{"inner_order_soft_constraint", {1, 0}},
},
{
{"grid_origin", {nullptr, nullptr, 3}},
{"grid_origin_soft_constraint", {4, nullptr, nullptr}},
{"write_chunk",
{{"elements_soft_constraint", 1000}, {"shape", {5, nullptr, 6}}}},
{"read_chunk",
{{"elements", 100},
{"shape_soft_constraint", {nullptr, 10, nullptr}},
{"aspect_ratio", {nullptr, 1, 2}}}},
{"codec_chunk", {{"aspect_ratio_soft_constraint", {nullptr, 2, 1}}}},
{"inner_order", {2, 1, 0}},
},
});
}
TEST(ChunkLayoutTest, JsonRoundTripInexact) {
tensorstore::TestJsonBinderRoundTripJsonOnlyInexact<ChunkLayout>({
{{
{"chunk", {{"elements", 50}}},
},
{
{"read_chunk", {{"elements", 50}}},
{"write_chunk", {{"elements", 50}}},
}},
{{
{"chunk", {{"elements_soft_constraint", 50}}},
},
{
{"read_chunk", {{"elements_soft_constraint", 50}}},
{"write_chunk", {{"elements_soft_constraint", 50}}},
}},
{{
{"read_chunk", {{"shape", {-1, 2, 3}}}},
},
{
{"read_chunk",
{{"shape", {nullptr, 2, 3}},
{"shape_soft_constraint", {-1, nullptr, nullptr}}}},
}},
{{
{"chunk", {{"elements_soft_constraint", 50}}},
{"read_chunk", {{"elements_soft_constraint", 60}}},
},
{
{"read_chunk", {{"elements_soft_constraint", 50}}},
{"write_chunk", {{"elements_soft_constraint", 50}}},
}},
{{
{"chunk", {{"elements_soft_constraint", 50}}},
{"read_chunk", {{"elements", 60}}},
},
{
{"read_chunk", {{"elements", 60}}},
{"write_chunk", {{"elements_soft_constraint", 50}}},
}},
{{
{"chunk", {{"aspect_ratio", {2, 3}}}},
},
{
{"codec_chunk", {{"aspect_ratio", {2, 3}}}},
{"read_chunk", {{"aspect_ratio", {2, 3}}}},
{"write_chunk", {{"aspect_ratio", {2, 3}}}},
}},
{{
{"chunk", {{"aspect_ratio_soft_constraint", {2, 3}}}},
},
{
{"codec_chunk", {{"aspect_ratio_soft_constraint", {2, 3}}}},
{"read_chunk", {{"aspect_ratio_soft_constraint", {2, 3}}}},
{"write_chunk", {{"aspect_ratio_soft_constraint", {2, 3}}}},
}},
{{
{"chunk", {{"shape", {2, 3}}}},
},
{
{"read_chunk", {{"shape", {2, 3}}}},
{"write_chunk", {{"shape", {2, 3}}}},
}},
{{
{"chunk", {{"shape_soft_constraint", {2, 3}}}},
},
{
{"read_chunk", {{"shape_soft_constraint", {2, 3}}}},
{"write_chunk", {{"shape_soft_constraint", {2, 3}}}},
}},
{{
{"chunk", {{"shape_soft_constraint", {2, 3}}}},
{"read_chunk", {{"shape", {4, nullptr}}}},
},
{
{"read_chunk",
{
{"shape_soft_constraint", {nullptr, 3}},
{"shape", {4, nullptr}},
}},
{"write_chunk", {{"shape_soft_constraint", {2, 3}}}},
}},
});
}
TEST(ChunkLayoutTest, CompareAllUnset) {
ChunkLayout a;
ChunkLayout b;
EXPECT_FALSE(b.Set(ChunkLayout::InnerOrder({2, 3, 4})).ok());
EXPECT_EQ(a, b);
EXPECT_EQ(b, a);
}
TEST(ChunkLayoutTest, CompareInnerOrder) {
tensorstore::TestCompareDistinctFromJson<ChunkLayout>({
::nlohmann::json::object_t(),
{{"inner_order", {0, 1}}},
{{"inner_order", {0, 1, 2}}},
{{"inner_order", {0, 2, 1}}},
{{"inner_order_soft_constraint", {0, 2, 1}}},
});
}
TEST(ChunkLayoutTest, CompareChunkElements) {
for (std::string prefix : {"codec", "read", "write"}) {
tensorstore::TestCompareDistinctFromJson<ChunkLayout>({
::nlohmann::json::object_t(),
{{prefix + "_chunk", {{"elements", 42}}}},
{{prefix + "_chunk", {{"elements", 43}}}},
{{prefix + "_chunk", {{"elements_soft_constraint", 42}}}},
});
}
}
TEST(ChunkLayoutTest, CompareChunkAspectRatio) {
for (std::string prefix : {"codec", "read", "write"}) {
tensorstore::TestCompareDistinctFromJson<ChunkLayout>({
::nlohmann::json::object_t(),
{{prefix + "_chunk", {{"aspect_ratio", {1, 2, nullptr}}}}},
{{prefix + "_chunk", {{"aspect_ratio", {1, 1, nullptr}}}}},
{{prefix + "_chunk",
{
{"aspect_ratio", {1, 1, nullptr}},
{"aspect_ratio_soft_constraint", {nullptr, nullptr, 4}},
}}},
{{prefix + "_chunk",
{{"aspect_ratio_soft_constraint", {1, 2, nullptr}}}}},
});
}
}
TEST(ChunkLayoutTest, CompareGridOrigin) {
tensorstore::TestCompareDistinctFromJson<ChunkLayout>({
::nlohmann::json::object_t(),
{{"grid_origin", {1, 2, nullptr}}},
{{"grid_origin", {1, 1, nullptr}}},
{
{"grid_origin", {1, 1, nullptr}},
{"grid_origin_soft_constraint", {nullptr, nullptr, 4}},
},
{{"grid_origin_soft_constraint", {1, 2, nullptr}}},
});
}
TEST(ChunkLayoutTest, CompareChunkShape) {
for (std::string prefix : {"codec", "read", "write"}) {
tensorstore::TestCompareDistinctFromJson<ChunkLayout>({
::nlohmann::json::object_t(),
{{prefix + "_chunk", {{"shape", {1, 2, nullptr}}}}},
{{prefix + "_chunk", {{"shape", {1, 1, nullptr}}}}},
{{prefix + "_chunk",
{
{"shape", {1, 1, nullptr}},
{"shape_soft_constraint", {nullptr, nullptr, 4}},
}}},
{{prefix + "_chunk", {{"shape_soft_constraint", {1, 2, nullptr}}}}},
});
}
}
TEST(ChunkLayoutTest, SetUnspecifiedUsage) {
ChunkLayout constraints;
TENSORSTORE_ASSERT_OK(constraints.Set(
ChunkLayout::Chunk(ChunkLayout::ChunkShape({5, 6, 0}),
ChunkLayout::ChunkAspectRatio({2, 1, 0}),
ChunkLayout::ChunkElements(42))));
EXPECT_THAT(constraints.ToJson(),
::testing::Optional(MatchesJson({
{"write_chunk",
{{"shape", {5, 6, nullptr}},
{"aspect_ratio", {2, 1, nullptr}},
{"elements", 42}}},
{"read_chunk",
{{"shape", {5, 6, nullptr}},
{"aspect_ratio", {2, 1, nullptr}},
{"elements", 42}}},
{"codec_chunk", {{"aspect_ratio", {2, 1, nullptr}}}},
})));
}
TEST(ChunkLayoutConstraintsTest, ApplyIndexTransformRandomInvertible) {
constexpr size_t kNumIterations = 10;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto output_constraints,
ChunkLayout::FromJson({
{"codec_chunk",
{{"elements_soft_constraint", 20},
{"aspect_ratio", {1, 2, 3}},
{"shape", {nullptr, 4, 5}}}},
{"read_chunk",
{{"elements", 30},
{"aspect_ratio", {4, 5, 6}},
{"shape_soft_constraint", {6, nullptr, 7}}}},
{"write_chunk",
{{"elements", 40},
{"aspect_ratio_soft_constraint", {7, 8, 9}},
{"shape", {8, 9, nullptr}}}},
{"grid_origin", {nullptr, nullptr, 11}},
{"inner_order_soft_constraint", {2, 0, 1}},
}));
for (size_t iteration = 0; iteration < kNumIterations; ++iteration) {
std::minstd_rand gen{tensorstore::internal_testing::GetRandomSeedForTest(
"TENSORSTORE_INTERNAL_LAYOUT_CONSTRAINTS_TEST_SEED")};
tensorstore::internal::MakeStridedIndexTransformForOutputSpaceParameters
transform_p;
transform_p.new_dims_are_singleton = true;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto domain, IndexDomainBuilder(output_constraints.rank()).Finalize());
auto transform =
tensorstore::internal::MakeRandomStridedIndexTransformForOutputSpace(
gen, domain, transform_p);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto inverse_transform,
InverseTransform(transform));
SCOPED_TRACE(tensorstore::StrCat("transform=", transform));
SCOPED_TRACE(tensorstore::StrCat("inverse_transform=", inverse_transform));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto input_constraints,
output_constraints | transform);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto input_constraints2,
ApplyInverseIndexTransform(inverse_transform, output_constraints));
EXPECT_EQ(input_constraints, input_constraints2)
<< "output_constraints=" << output_constraints;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto output_constraints2,
ApplyInverseIndexTransform(transform, input_constraints));
EXPECT_EQ(output_constraints, output_constraints2)
<< "input_constraints=" << input_constraints;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto new_output_constraints,
input_constraints | inverse_transform);
EXPECT_EQ(output_constraints, new_output_constraints)
<< "input_constraints=" << input_constraints;
}
}
TEST(ChunkLayoutTest, ApplyIndexTransformNoRank) {
ChunkLayout constraints;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto new_constraints,
constraints | tensorstore::Dims(0, 1).TranslateBy(5));
EXPECT_EQ(constraints, new_constraints);
}
TEST(ChunkLayoutTest, ApplyIndexTransform) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto constraints,
ChunkLayout::FromJson({
{"inner_order", {0, 1, 2}},
{"grid_origin", {1, 2, 3}},
{"read_chunk", {{"shape", {4, 5, 6}}}},
}));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto expected_new_constraints,
ChunkLayout::FromJson({
{"inner_order", {2, 1, 0}},
{"grid_origin", {8, 7, 6}},
{"read_chunk", {{"shape", {6, 5, 4}}}},
}));
EXPECT_THAT(
constraints | tensorstore::Dims(2, 1, 0).TranslateBy(5).Transpose(),
::testing::Optional(expected_new_constraints));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto expected_new_inverse_constraints,
ChunkLayout::FromJson({
{"inner_order", {2, 1, 0}},
{"grid_origin", {-2, -3, -4}},
{"read_chunk", {{"shape", {6, 5, 4}}}},
}));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transform,
tensorstore::IdentityTransform(3) |
tensorstore::Dims(2, 1, 0).TranslateBy(5).Transpose());
EXPECT_THAT(ApplyInverseIndexTransform(transform, constraints),
::testing::Optional(expected_new_inverse_constraints));
}
TEST(ChunkLayoutTest, ApplyIndexTransformOverflow) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto constraints,
ChunkLayout::FromJson({
{"grid_origin", {0, 0, 0}},
}));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transform, tensorstore::IdentityTransform(3) |
tensorstore::Dims(0).TranslateBy(kInfIndex));
EXPECT_THAT(constraints | transform,
MatchesStatus(
absl::StatusCode::kOutOfRange,
"Error transforming grid_origin: "
"Error transforming output dimension 0 -> input dimension 0: "
"Integer overflow transforming output origin 0 by offset .* "
"and stride 1"));
EXPECT_THAT(ApplyInverseIndexTransform(transform, constraints), |
515 | cpp | google/tensorstore | virtual_chunked | tensorstore/driver/virtual_chunked/virtual_chunked.cc | tensorstore/driver/virtual_chunked/virtual_chunked_test.cc | #ifndef TENSORSTORE_VIRTUAL_CHUNKED_H_
#define TENSORSTORE_VIRTUAL_CHUNKED_H_
#include <functional>
#include <type_traits>
#include "absl/base/attributes.h"
#include "tensorstore/array.h"
#include "tensorstore/box.h"
#include "tensorstore/context.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/serialization/function.h"
#include "tensorstore/staleness_bound.h"
#include "tensorstore/tensorstore.h"
#include "tensorstore/transaction.h"
#include "tensorstore/util/executor.h"
#include "tensorstore/util/option.h"
namespace tensorstore {
namespace virtual_chunked {
class ReadParameters {
public:
ReadParameters() = default;
const Executor& executor() const { return executor_; }
const StorageGeneration& if_not_equal() const { return if_not_equal_; }
absl::Time staleness_bound() const { return staleness_bound_; }
Executor executor_;
StorageGeneration if_not_equal_;
absl::Time staleness_bound_;
};
using ReadFunction =
serialization::SerializableFunction<Future<TimestampedStorageGeneration>(
Array<void, dynamic_rank, offset_origin> output,
ReadParameters read_params)>;
template <typename Func, typename Element, DimensionIndex Rank>
constexpr inline bool IsReadFunction =
serialization::IsSerializableFunctionLike<
Future<TimestampedStorageGeneration>, Func,
Array<Element, Rank, offset_origin>, ReadParameters>;
class WriteParameters {
public:
WriteParameters() = default;
const Executor& executor() const { return executor_; }
const StorageGeneration& if_equal() const { return if_equal_; }
Executor executor_;
StorageGeneration if_equal_;
};
using WriteFunction =
serialization::SerializableFunction<Future<TimestampedStorageGeneration>(
Array<const void, dynamic_rank, offset_origin> input,
WriteParameters write_params)>;
template <typename Func, typename Element, DimensionIndex Rank>
constexpr inline bool IsWriteFunction =
serialization::IsSerializableFunctionLike<
Future<TimestampedStorageGeneration>, Func,
Array<const Element, Rank, offset_origin>, WriteParameters>;
struct OpenOptions : public Schema {
Context context;
Transaction transaction{no_transaction};
RecheckCachedData recheck_cached_data;
template <typename T>
static inline constexpr bool IsOption = Schema::IsOption<T>;
using Schema::Set;
absl::Status Set(Context value) {
context = std::move(value);
return absl::OkStatus();
}
absl::Status Set(Transaction value) {
transaction = std::move(value);
return absl::OkStatus();
}
absl::Status Set(RecheckCachedData value) {
if (value.specified()) {
recheck_cached_data = value;
}
return absl::OkStatus();
}
};
template <>
constexpr inline bool OpenOptions::IsOption<Context> = true;
template <>
constexpr inline bool OpenOptions::IsOption<Transaction> = true;
template <>
constexpr inline bool OpenOptions::IsOption<RecheckCachedData> = true;
namespace internal_virtual_chunked {
Result<internal::Driver::Handle> MakeDriver(
virtual_chunked::ReadFunction read_function,
virtual_chunked::WriteFunction write_function, OpenOptions&& options);
template <typename ErasedElement, typename Element, DimensionIndex Rank,
typename Parameters, typename Func>
struct FunctionAdapter {
Future<TimestampedStorageGeneration> operator()(
Array<ErasedElement, dynamic_rank, offset_origin> array,
Parameters params) const {
return func_(StaticCast<Array<Element, Rank, offset_origin>, unchecked>(
std::move(array)),
std::move(params));
}
ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS Func func_;
constexpr static auto ApplyMembers = [](auto&& x, auto f) {
return f(x.func_);
};
};
}
template <typename Element = void, DimensionIndex Rank = dynamic_rank,
typename ReadFunc>
std::enable_if_t<IsReadFunction<ReadFunc, Element, Rank>,
Result<TensorStore<Element, Rank, ReadWriteMode::read>>>
VirtualChunked(ReadFunc read_function, OpenOptions&& options) {
static_assert(std::is_same_v<Element, internal::remove_cvref_t<Element>>,
"Element type must be unqualified");
static_assert(Rank >= dynamic_rank,
"Rank must equal dynamic_rank (-1) or be non-negative.");
if constexpr (Rank != dynamic_rank) {
TENSORSTORE_RETURN_IF_ERROR(options.Set(RankConstraint{Rank}));
}
if constexpr (!std::is_void_v<Element>) {
TENSORSTORE_RETURN_IF_ERROR(options.Set(dtype_v<Element>));
}
ReadFunction serializable_read_function;
if constexpr (std::is_void_v<Element> && Rank == dynamic_rank) {
serializable_read_function = std::move(read_function);
} else {
serializable_read_function =
internal_virtual_chunked::FunctionAdapter<void, Element, Rank,
ReadParameters, ReadFunc>{
std::move(read_function)};
if (!serializable_read_function) {
return absl::InvalidArgumentError("Invalid read_function specified");
}
}
TENSORSTORE_ASSIGN_OR_RETURN(
auto handle,
internal_virtual_chunked::MakeDriver(
std::move(serializable_read_function), {}, std::move(options)));
return internal::TensorStoreAccess::Construct<
TensorStore<Element, Rank, ReadWriteMode::read>>(std::move(handle));
}
template <typename Element = void, DimensionIndex Rank = dynamic_rank,
typename ReadFunc, typename WriteFunc>
std::enable_if_t<(IsReadFunction<ReadFunc, Element, Rank> &&
IsWriteFunction<WriteFunc, Element, Rank>),
Result<TensorStore<Element, Rank, ReadWriteMode::read_write>>>
VirtualChunked(ReadFunc read_function, WriteFunc write_function,
OpenOptions&& options) {
static_assert(std::is_same_v<Element, internal::remove_cvref_t<Element>>,
"Element type must be unqualified");
static_assert(Rank >= dynamic_rank,
"Rank must equal dynamic_rank (-1) or be non-negative.");
if constexpr (Rank != dynamic_rank) {
TENSORSTORE_RETURN_IF_ERROR(options.Set(RankConstraint{Rank}));
}
if constexpr (!std::is_void_v<Element>) {
TENSORSTORE_RETURN_IF_ERROR(options.Set(dtype_v<Element>));
}
ReadFunction serializable_read_function;
WriteFunction serializable_write_function;
if constexpr (std::is_void_v<Element> && Rank == dynamic_rank) {
serializable_read_function = std::move(read_function);
serializable_write_function = std::move(write_function);
} else {
serializable_read_function =
internal_virtual_chunked::FunctionAdapter<void, Element, Rank,
ReadParameters, ReadFunc>{
std::move(read_function)};
if (!serializable_read_function) {
return absl::InvalidArgumentError("Invalid read_function specified");
}
serializable_write_function =
internal_virtual_chunked::FunctionAdapter<const void, Element, Rank,
WriteParameters, WriteFunc>{
std::move(write_function)};
if (!serializable_write_function) {
return absl::InvalidArgumentError("Invalid write_function specified");
}
}
TENSORSTORE_ASSIGN_OR_RETURN(
auto handle,
internal_virtual_chunked::MakeDriver(
std::move(serializable_read_function),
std::move(serializable_write_function), std::move(options)));
return internal::TensorStoreAccess::Construct<
TensorStore<Element, Rank, ReadWriteMode::read_write>>(std::move(handle));
}
template <typename Element = void, DimensionIndex Rank = dynamic_rank,
typename WriteFunc>
Result<TensorStore<Element, Rank, ReadWriteMode::write>>
VirtualChunkedWriteOnly(WriteFunc write_function, OpenOptions&& options) {
static_assert(std::is_same_v<Element, internal::remove_cvref_t<Element>>,
"Element type must be unqualified");
static_assert(Rank >= dynamic_rank,
"Rank must equal dynamic_rank (-1) or be non-negative.");
static_assert(IsWriteFunction<WriteFunc, Element, Rank>);
if constexpr (Rank != dynamic_rank) {
TENSORSTORE_RETURN_IF_ERROR(options.Set(RankConstraint{Rank}));
}
if constexpr (!std::is_void_v<Element>) {
TENSORSTORE_RETURN_IF_ERROR(options.Set(dtype_v<Element>));
}
WriteFunction serializable_write_function;
if constexpr (std::is_void_v<Element> && Rank == dynamic_rank) {
serializable_write_function = std::move(write_function);
if (!serializable_write_function) {
return absl::InvalidArgumentError("Invalid write_function specified");
}
} else {
serializable_write_function =
internal_virtual_chunked::FunctionAdapter<const void, Element, Rank,
WriteParameters, WriteFunc>{
std::move(write_function)};
}
TENSORSTORE_ASSIGN_OR_RETURN(
auto handle,
internal_virtual_chunked::MakeDriver(
{}, std::move(serializable_write_function), std::move(options)));
return internal::TensorStoreAccess::Construct<
TensorStore<Element, Rank, ReadWriteMode::write>>(std::move(handle));
}
template <typename Element = void, DimensionIndex Rank = dynamic_rank,
typename ReadFunc, typename... Option>
std::enable_if_t<(IsReadFunction<ReadFunc, Element, Rank> &&
IsCompatibleOptionSequence<OpenOptions, Option...>),
Result<TensorStore<Element, Rank, ReadWriteMode::read>>>
VirtualChunked(ReadFunc read_function, Option&&... option) {
TENSORSTORE_INTERNAL_ASSIGN_OPTIONS_OR_RETURN(OpenOptions, options, option);
return VirtualChunked<Element, Rank>(std::move(read_function),
std::move(options));
}
template <typename Element = void, DimensionIndex Rank = dynamic_rank,
typename ReadFunc, typename WriteFunc, typename... Option>
std::enable_if_t<(IsReadFunction<ReadFunc, Element, Rank> &&
IsWriteFunction<WriteFunc, Element, Rank> &&
IsCompatibleOptionSequence<OpenOptions, Option...>),
Result<TensorStore<Element, Rank, ReadWriteMode::read_write>>>
VirtualChunked(ReadFunc read_function, WriteFunc write_function,
Option&&... option) {
TENSORSTORE_INTERNAL_ASSIGN_OPTIONS_OR_RETURN(OpenOptions, options, option);
return VirtualChunked<Element, Rank>(
std::move(read_function), std::move(write_function), std::move(options));
}
template <typename Element = void, DimensionIndex Rank = dynamic_rank,
typename WriteFunc, typename... Option>
std::enable_if_t<IsCompatibleOptionSequence<OpenOptions, Option...>,
Result<TensorStore<Element, Rank, ReadWriteMode::write>>>
VirtualChunkedWriteOnly(WriteFunc write_function, Option&&... option) {
static_assert(IsWriteFunction<WriteFunc, Element, Rank>);
TENSORSTORE_INTERNAL_ASSIGN_OPTIONS_OR_RETURN(OpenOptions, options, option);
return VirtualChunkedWriteOnly<Element, Rank>(std::move(write_function),
std::move(options));
}
}
using virtual_chunked::VirtualChunked;
using virtual_chunked::VirtualChunkedWriteOnly;
}
#endif
#include "tensorstore/virtual_chunked.h"
#include <stddef.h>
#include <algorithm>
#include <atomic>
#include <memory>
#include <numeric>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/optimization.h"
#include "absl/status/status.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tensorstore/array.h"
#include "tensorstore/box.h"
#include "tensorstore/chunk_layout.h"
#include "tensorstore/codec_spec.h"
#include "tensorstore/context.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/driver/chunk_cache_driver.h"
#include "tensorstore/driver/driver.h"
#include "tensorstore/driver/driver_handle.h"
#include "tensorstore/driver/driver_spec.h"
#include "tensorstore/driver/registry.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/dimension_units.h"
#include "tensorstore/index_space/index_domain.h"
#include "tensorstore/index_space/index_domain_builder.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/internal/async_write_array.h"
#include "tensorstore/internal/cache/async_cache.h"
#include "tensorstore/internal/cache/cache.h"
#include "tensorstore/internal/cache/cache_pool_resource.h"
#include "tensorstore/internal/cache/chunk_cache.h"
#include "tensorstore/int | #include "tensorstore/virtual_chunked.h"
#include <memory>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tensorstore/array.h"
#include "tensorstore/chunk_layout.h"
#include "tensorstore/context.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/internal/queue_testutil.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/test_util.h"
#include "tensorstore/open_mode.h"
#include "tensorstore/rank.h"
#include "tensorstore/schema.h"
#include "tensorstore/serialization/function.h"
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/serialization/test_util.h"
#include "tensorstore/staleness_bound.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/tensorstore.h"
#include "tensorstore/transaction.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/iterate_over_index_range.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::DimensionIndex;
using ::tensorstore::dynamic_rank;
using ::tensorstore::Future;
using ::tensorstore::Index;
using ::tensorstore::MatchesStatus;
using ::tensorstore::Promise;
using ::tensorstore::Result;
using ::tensorstore::span;
using ::tensorstore::StorageGeneration;
using ::tensorstore::TimestampedStorageGeneration;
using ::tensorstore::internal::ConcurrentQueue;
using ::tensorstore::internal::UniqueNow;
using ::tensorstore::serialization::SerializationRoundTrip;
template <typename... Option>
Result<tensorstore::TensorStore<Index, dynamic_rank,
tensorstore::ReadWriteMode::read>>
CoordinatesView(DimensionIndex dim, Option&&... option) {
return tensorstore::VirtualChunked<Index>(
tensorstore::NonSerializable{[dim](auto output, auto read_params)
-> Future<TimestampedStorageGeneration> {
tensorstore::IterateOverIndexRange(
output.domain(),
[&](span<const Index> indices) { output(indices) = indices[dim]; });
return TimestampedStorageGeneration{StorageGeneration::FromString(""),
absl::Now()};
}},
std::forward<Option>(option)...);
}
template <typename... Option>
Result<tensorstore::TensorStore<Index, dynamic_rank,
tensorstore::ReadWriteMode::read>>
SerializableCoordinatesView(DimensionIndex dim, Option&&... option) {
return tensorstore::VirtualChunked<Index>(
tensorstore::serialization::BindFront(
[](DimensionIndex dim, auto output,
auto read_params) -> Future<TimestampedStorageGeneration> {
tensorstore::IterateOverIndexRange(output.domain(),
[&](span<const Index> indices) {
output(indices) = indices[dim];
});
return TimestampedStorageGeneration{
StorageGeneration::FromString(""), absl::Now()};
},
dim),
std::forward<Option>(option)...);
}
using RequestLayout =
::tensorstore::StridedLayout<dynamic_rank, ::tensorstore::offset_origin>;
template <typename... Option>
Result<tensorstore::TensorStore<void, dynamic_rank,
tensorstore::ReadWriteMode::read>>
LoggingView(std::vector<RequestLayout>& requests, Option&&... option) {
auto mutex = std::make_shared<absl::Mutex>();
return tensorstore::VirtualChunked(
tensorstore::NonSerializable{
[mutex, &requests](auto output, auto read_params)
-> Future<TimestampedStorageGeneration> {
tensorstore::InitializeArray(output);
absl::MutexLock lock(mutex.get());
requests.emplace_back(output.layout());
return TimestampedStorageGeneration{
StorageGeneration::FromString(""), absl::Now()};
}},
std::forward<Option>(option)...);
}
template <typename Element, DimensionIndex Rank, typename Parameters>
struct Request {
tensorstore::Array<Element, Rank, tensorstore::offset_origin> array;
Parameters params;
Promise<TimestampedStorageGeneration> promise;
};
template <typename Element, DimensionIndex Rank, typename Parameters>
auto EnqueueRequestHandler(
ConcurrentQueue<Request<Element, Rank, Parameters>>& queue) {
return tensorstore::NonSerializable{
[&queue](
tensorstore::Array<Element, Rank, tensorstore::offset_origin> array,
Parameters params) -> Future<TimestampedStorageGeneration> {
auto [promise, future] = tensorstore::PromiseFuturePair<
TimestampedStorageGeneration>::Make();
queue.push({std::move(array), std::move(params), std::move(promise)});
return future;
}};
}
template <typename Element, DimensionIndex Rank>
using ReadRequest =
Request<Element, Rank, tensorstore::virtual_chunked::ReadParameters>;
template <typename Element, DimensionIndex Rank>
using WriteRequest =
Request<const Element, Rank, tensorstore::virtual_chunked::WriteParameters>;
template <typename Element, DimensionIndex Rank, typename... Option>
Result<
tensorstore::TensorStore<Element, Rank, tensorstore::ReadWriteMode::read>>
MockView(ConcurrentQueue<ReadRequest<Element, Rank>>& queue,
Option&&... option) {
return tensorstore::VirtualChunked<Element, Rank>(
EnqueueRequestHandler(queue), std::forward<Option>(option)...);
}
template <typename Element, DimensionIndex Rank, typename... Option>
Result<tensorstore::TensorStore<Element, Rank,
tensorstore::ReadWriteMode::read_write>>
MockView(ConcurrentQueue<ReadRequest<Element, Rank>>& read_queue,
ConcurrentQueue<WriteRequest<Element, Rank>>& write_queue,
Option&&... option) {
return tensorstore::VirtualChunked<Element, Rank>(
EnqueueRequestHandler(read_queue), EnqueueRequestHandler(write_queue),
std::forward<Option>(option)...);
}
template <typename Element, DimensionIndex Rank, typename... Option>
Result<
tensorstore::TensorStore<Element, Rank, tensorstore::ReadWriteMode::write>>
MockView(ConcurrentQueue<WriteRequest<Element, Rank>>& write_queue,
Option&&... option) {
return tensorstore::VirtualChunkedWriteOnly<Element, Rank>(
EnqueueRequestHandler(write_queue), std::forward<Option>(option)...);
}
TEST(VirtualChunkedTest, Coordinates) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto coords0, CoordinatesView(0, tensorstore::Schema::Shape({2, 3})));
EXPECT_THAT(tensorstore::Read(coords0).result(),
::testing::Optional(
tensorstore::MakeArray<Index>({{0, 0, 0}, {1, 1, 1}})));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto coords1, CoordinatesView(1, tensorstore::Schema::Shape({2, 3})));
EXPECT_THAT(tensorstore::Read(coords1).result(),
::testing::Optional(
tensorstore::MakeArray<Index>({{0, 1, 2}, {0, 1, 2}})));
}
TEST(VirtualChunkedTest, CoordinatesUnbounded) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto coords0, CoordinatesView(0, tensorstore::RankConstraint{2}));
EXPECT_THAT(
tensorstore::Read<tensorstore::zero_origin>(
coords0 | tensorstore::Dims(0, 1).SizedInterval({1000, 2}, {2, 3}))
.result(),
::testing::Optional(tensorstore::MakeArray<Index>(
{{1000, 1000, 1000}, {1001, 1001, 1001}})));
}
TEST(VirtualChunkedTest, CoordinatesInnerOrder) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto coords0,
CoordinatesView(0, tensorstore::Schema::Shape({2, 3}),
tensorstore::ChunkLayout::InnerOrder({1, 0})));
EXPECT_THAT(tensorstore::Read(coords0).result(),
::testing::Optional(
tensorstore::MakeArray<Index>({{0, 0, 0}, {1, 1, 1}})));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto coords1,
CoordinatesView(1, tensorstore::Schema::Shape({2, 3}),
tensorstore::ChunkLayout::InnerOrder({1, 0})));
EXPECT_THAT(tensorstore::Read(coords1).result(),
::testing::Optional(
tensorstore::MakeArray<Index>({{0, 1, 2}, {0, 1, 2}})));
}
TEST(VirtualChunkedTest, SerializableCoordinatesInnerOrder) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto coords0_orig, SerializableCoordinatesView(
0, tensorstore::Schema::Shape({2, 3}),
tensorstore::ChunkLayout::InnerOrder({1, 0})));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto coords0,
SerializationRoundTrip(coords0_orig));
EXPECT_THAT(tensorstore::Read(coords0).result(),
::testing::Optional(
tensorstore::MakeArray<Index>({{0, 0, 0}, {1, 1, 1}})));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto coords1_orig, SerializableCoordinatesView(
1, tensorstore::Schema::Shape({2, 3}),
tensorstore::ChunkLayout::InnerOrder({1, 0})));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto coords1,
SerializationRoundTrip(coords1_orig));
EXPECT_THAT(tensorstore::Read(coords1).result(),
::testing::Optional(
tensorstore::MakeArray<Index>({{0, 1, 2}, {0, 1, 2}})));
}
TEST(VirtualChunkedTest, ReadChunkShape) {
std::vector<RequestLayout> requests;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto view, LoggingView(requests, tensorstore::dtype_v<bool>,
tensorstore::Schema::Shape({2, 3}),
tensorstore::ChunkLayout::ReadChunkShape({2, 1})));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto chunk_layout, view.chunk_layout());
EXPECT_THAT(chunk_layout.read_chunk_shape(), ::testing::ElementsAre(2, 1));
TENSORSTORE_ASSERT_OK(tensorstore::Read(view));
EXPECT_THAT(requests, ::testing::UnorderedElementsAre(
RequestLayout({0, 0}, {2, 1}, {1, 1}),
RequestLayout({0, 1}, {2, 1}, {1, 1}),
RequestLayout({0, 2}, {2, 1}, {1, 1})));
}
TEST(VirtualChunkedTest, InnerOrder) {
std::vector<RequestLayout> requests;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto view,
LoggingView(requests, tensorstore::dtype_v<bool>,
tensorstore::Schema::Shape({3, 4, 5}),
tensorstore::ChunkLayout::InnerOrder({2, 0, 1}),
tensorstore::ChunkLayout::ReadChunkShape({2, 3, 4})));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto chunk_layout, view.chunk_layout());
EXPECT_THAT(chunk_layout.read_chunk_shape(), ::testing::ElementsAre(2, 3, 4));
EXPECT_THAT(chunk_layout.inner_order(), ::testing::ElementsAre(2, 0, 1));
TENSORSTORE_ASSERT_OK(tensorstore::Read(view));
EXPECT_THAT(requests, ::testing::UnorderedElementsAreArray({
RequestLayout({0, 0, 0}, {2, 3, 4}, {3, 1, 6}),
RequestLayout({2, 0, 0}, {1, 3, 4}, {3, 1, 6}),
RequestLayout({0, 3, 0}, {2, 1, 4}, {3, 1, 6}),
RequestLayout({2, 3, 0}, {1, 1, 4}, {3, 1, 6}),
RequestLayout({0, 0, 4}, {2, 3, 1}, {3, 1, 6}),
RequestLayout({2, 0, 4}, {1, 3, 1}, {3, 1, 6}),
RequestLayout({0, 3, 4}, {2, 1, 1}, {3, 1, 6}),
RequestLayout({2, 3, 4}, {1, 1, 1}, {3, 1, 6}),
}));
}
TEST(VirtualChunkedTest, NoRecheckCache) {
ConcurrentQueue<ReadRequest<int, 0>> requests;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto context, tensorstore::Context::FromJson(
{{"cache_pool", {{"total_bytes_limit", 10000000}}}}));
auto mock_view = MockView<int, 0>(
requests, tensorstore::RecheckCachedData{false}, context);
auto read_future = tensorstore::Read(mock_view);
{
auto request = requests.pop();
EXPECT_EQ(StorageGeneration::Unknown(), request.params.if_not_equal());
request.array() = 42;
request.promise.SetResult(TimestampedStorageGeneration(
StorageGeneration::FromString("abc"), absl::Now()));
}
EXPECT_THAT(read_future.result(),
::testing::Optional(tensorstore::MakeScalarArray<int>(42)));
read_future = tensorstore::Read(mock_view);
EXPECT_THAT(read_future.result(),
::testing::Optional(tensorstore::MakeScalarArray<int>(42)));
}
TEST(VirtualChunkedTest, RecheckCache) {
ConcurrentQueue<ReadRequest<int, 0>> requests;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto context, tensorstore::Context::FromJson(
{{"cache_pool", {{"total_bytes_limit", 10000000}}}}));
auto mock_view = MockView<int, 0>(requests, context);
auto read_future = tensorstore::Read(mock_view);
{
auto request = requests.pop();
EXPECT_EQ(StorageGeneration::Unknown(), request.params.if_not_equal());
request.array() = 42;
request.promise.SetResult(TimestampedStorageGeneration(
StorageGeneration::FromString("abc"), absl::Now()));
}
EXPECT_THAT(read_future.result(),
::testing::Optional(tensorstore::MakeScalarArray<int>(42)));
UniqueNow();
read_future = tensorstore::Read(mock_view);
{
auto request = requests.pop();
EXPECT_EQ(StorageGeneration::FromString("abc"),
request.params.if_not_equal());
request.array() = 43;
request.promise.SetResult(TimestampedStorageGeneration(
StorageGeneration::Unknown(), absl::Now()));
}
EXPECT_THAT(read_future.result(),
::testing::Optional(tensorstore::MakeScalarArray<int>(42)));
}
TEST(VirtualChunkedTest, RecheckCacheImmutable) {
ConcurrentQueue<ReadRequest<int, 0>> requests;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto context, tensorstore::Context::FromJson(
{{"cache_pool", {{"total_bytes_limit", 10000000}}}}));
auto mock_view =
MockView<int, 0>(requests, tensorstore::RecheckCachedData{true}, context);
auto read_future = tensorstore::Read(mock_view);
{
auto request = requests.pop();
EXPECT_EQ(StorageGeneration::Unknown(), request.params.if_not_equal());
request.array() = 42;
request.promise.SetResult(TimestampedStorageGeneration(
StorageGeneration::FromString(""), absl::InfiniteFuture()));
}
EXPECT_THAT(read_future.result(),
::testing::Optional(tensorstore::MakeScalarArray<int>(42)));
UniqueNow();
read_future = tensorstore::Read(mock_view);
EXPECT_THAT(read_future.result(),
::testing::Optional(tensorstore::MakeScalarArray<int>(42)));
}
TEST(VirtualChunkedTest, ReadWrite) {
ConcurrentQueue<ReadRequest<int, 1>> read_requests;
ConcurrentQueue<WriteRequest<int, 1>> write_requests;
auto mock_view = MockView<int, 1>(read_requests, write_requests,
tensorstore::Schema::Shape({2}));
auto write_future =
tensorstore::Write(tensorstore::MakeScalarArray<int>(42),
mock_view | tensorstore::Dims(0).IndexSlice(0));
write_future.Force();
{
auto request = read_requests.pop();
EXPECT_EQ(StorageGeneration::Unknown(), request.params.if_not_equal());
request.array(0) = 1;
request.array(1) = 2;
request.promise.SetResult(TimestampedStorageGeneration(
StorageGeneration::FromString("gen1"), absl::Now()));
}
{
auto request = write_requests.pop();
EXPECT_EQ(StorageGeneration::FromString("gen1"), request.params.if_equal());
EXPECT_EQ(tensorstore::MakeArray<int>({42, 2}), request.array);
request.promise.SetResult(TimestampedStorageGeneration(
StorageGeneration::FromString("gen2"), absl::Now()));
}
TENSORSTORE_ASSERT_OK(write_future);
}
TEST(VirtualChunkedTest, ReadWriteWrite) {
ConcurrentQueue<ReadRequest<int, 1>> read_requests;
ConcurrentQueue<WriteRequest<int, 1>> write_requests;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto context, tensorstore::Context::FromJson(
{{"cache_pool", {{"total_bytes_limit", 1000000}}}}));
auto mock_view = MockView<int, 1>(read_requests, write_requests, context,
tensorstore::Schema::Shape({2}));
{
auto write_future =
tensorstore::Write(tensorstore::MakeScalarArray<int>(42),
mock_view | tensorstore::Dims(0).IndexSlice(0));
write_future.Force();
{
auto request = read_requests.pop();
EXPECT_EQ(StorageGeneration::Unknown(), request.params.if_not_equal());
request.array(0) = 1;
request.array(1) = 2;
request.promise.SetResult(TimestampedStorageGeneration(
StorageGeneration::FromString(""), absl::InfiniteFuture()));
}
{
auto request = write_requests.pop();
EXPECT_EQ(StorageGeneration::FromString(""), request.params.if_equal());
EXPECT_EQ(tensorstore::MakeArray<int>({42, 2}), request.array);
request.promise.SetResult(TimestampedStorageGeneration(
StorageGeneration::FromString(""), absl::InfiniteFuture()));
}
TENSORSTORE_ASSERT_OK(write_future);
}
{
auto write_future =
tensorstore::Write(tensorstore::MakeScalarArray<int>(50),
mock_view | tensorstore::Dims(0).IndexSlice(1));
write_future.Force();
{
auto request = write_requests.pop();
EXPECT_EQ(StorageGeneration::FromString(""), request.params.if_equal());
EXPECT_EQ(tensorstore::MakeArray<int>({42, 50}), request.array);
request.promise.SetResult(TimestampedStorageGeneration(
StorageGeneration::FromString(""), absl::InfiniteFuture()));
}
TENSORSTORE_ASSERT_OK(write_future);
}
}
TEST(VirtualChunkedTest, Write) {
ConcurrentQueue<WriteRequest<int, 1>> write_requests;
auto mock_view =
MockView<int, 1>(write_requests, tensorstore::Schema::Shape({6}),
tensorstore::ChunkLayout::ChunkShape({4}));
{
auto write_future = tensorstore::Write(
tensorstore::MakeScalarArray<int>(42),
mock_view | tensorstore::Dims(0).SizedInterval(0, 4));
write_future.Force();
{
auto request = write_requests.pop();
EXPECT_EQ(StorageGeneration::Unknown(), request.params.if_equal());
EXPECT_EQ(tensorstore::MakeArray<int>({42, 42, 42, 42}), request.array);
request.promise.SetResult(TimestampedStorageGeneration(
StorageGeneration::FromString(""), absl::Now()));
}
TENSORSTORE_ASSERT_OK(write_future);
}
{
auto write_future = tensorstore::Write(
tensorstore::MakeScalarArray<int>(42),
mock_view | tensorstore::Dims(0).SizedInterval(4, 2));
write_future.Force();
{
auto request = write_requests.pop();
EXPECT_EQ(StorageGeneration::Unknown(), request.params.if_equal());
EXPECT_EQ(tensorstore::MakeOffsetArray<int>({4}, {42, 42}),
request.array);
request.promise.SetResult(TimestampedStorageGeneration(
StorageGeneration::FromString(""), absl::Now()));
}
TENSORSTORE_ASSERT_OK(write_future);
}
}
TEST(VirtualChunkedTest, WriteFillValue) {
ConcurrentQueue<WriteRequest<int, 0>> write_requests;
auto mock_view = MockView<int, 0>(write_requests);
auto write_future =
tensorstore::Write(tensorstore::MakeScalarArray<int>(0), mock_view);
write_future.Force();
{
auto request = write_requests.pop();
EXPECT_EQ(StorageGeneration::Unknown(), request.params.if_equal());
EXPECT_EQ(tensorstore::MakeScalarArray<int>(0), request.array);
request.promise.SetResult(TimestampedStorageGeneration(
StorageGeneration::FromString(""), absl::Now()));
}
TENSORSTORE_ASSERT_OK(write_future);
}
TEST(VirtualChunkedTest, WriteOnlyError) {
ConcurrentQueue<WriteRequest<int, 1>> write_requests;
auto mock_view =
MockView<int, 1>(write_requests, tensorstore::Schema::Shape({2}));
EXPECT_THAT(
tensorstore::Write(tensorstore::MakeScalarArray<int>(42),
mock_view | tensorstore::Dims(0).IndexSlice(0))
.result(),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Write-only virtual chunked view requires chunk-aligned writes"));
}
TEST(VirtualChunkedTest, AtomicSingleChunk) {
tensorstore::Transaction transaction(tensorstore::atomic_isolated);
ConcurrentQueue<WriteRequest<int, 1>> write_requests;
auto mock_view =
MockView<int, 1>(write_requests, tensorstore::Schema::Shape({6}),
tensorstore::ChunkLayout::ChunkShape({4}), transaction);
TENSORSTORE_ASSERT_OK(tensorstore::Write(
tensorstore::MakeScalarArray<int>(42),
mock_view | tensorstore::Dims(0).HalfOpenInterval(0, 4)));
auto future = transaction.CommitAsync();
{
auto request = write_requests.pop();
EXPECT_EQ(StorageGeneration::Unknown(), request.params.if_equal());
EXPECT_EQ(tensorstore::MakeArray<int>({42, 42, 42, 42}), request.array);
request.promise.SetResult(TimestampedStorageGeneration(
StorageGeneration::FromString(""), absl::Now()));
}
TENSORSTORE_ASSERT_OK(future);
}
TEST(VirtualChunkedTest, AtomicMultipleChunks) {
tensorstore::Transaction transaction(tensorstore::atomic_isolated);
ConcurrentQueue<WriteRequest<int, 1>> write_requests;
auto mock_view =
MockView<int, 1>(write_requests, tensorstore::Schema::Shape({6}),
tensorstore::ChunkLayout::ChunkShape({4}), transaction);
EXPECT_THAT(
tensorstore::Write(tensorstore::MakeScalarArray<int>(42), mock_view)
.result(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Cannot write to virtual chunk .* and write to virtual "
"chunk .* as single atomic transaction"));
}
TEST(VirtualChunkedTest, NonAtomicSingleChunk) {
tensorstore::Transaction transaction(tensorstore::isolated);
ConcurrentQueue<WriteRequest<int, 1>> write_requests;
auto mock_view =
MockView<int, 1>(write_requests, tensorstore::Schema::Shape({6}),
tensorstore::ChunkLayout::ChunkShape({4}), transaction);
TENSORSTORE_ASSERT_OK(
tensorstore::Write(tensorstore::MakeScalarArray<int>(42), mock_view));
auto future = transaction.CommitAsync();
for (int i = 0; i < 2; ++i) {
auto request = write_requests.pop();
EXPECT_EQ(StorageGeneration::Unknown(), request.params.if_equal());
request.promise.SetResult(TimestampedStorageGeneration(
StorageGeneration::FromString(""), absl::Now()));
}
TENSORSTORE_ASSERT_OK(future);
}
} |
516 | cpp | google/tensorstore | serialization | tensorstore/serialization/serialization.cc | tensorstore/serialization/serialization_test.cc | #ifndef TENSORSTORE_SERIALIZATION_SERIALIZATION_H_
#define TENSORSTORE_SERIALIZATION_SERIALIZATION_H_
#include <stddef.h>
#include <stdint.h>
#include <cassert>
#include <memory>
#include <string>
#include <string_view>
#include <type_traits>
#include <typeinfo>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/functional/function_ref.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "riegeli/bytes/reader.h"
#include "riegeli/bytes/writer.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/memory.h"
#include "tensorstore/internal/poly/poly.h"
#include "tensorstore/internal/riegeli/delimited.h"
#include "tensorstore/internal/type_traits.h"
#include "tensorstore/serialization/fwd.h"
#include "tensorstore/util/apply_members/apply_members.h"
namespace tensorstore {
namespace serialization {
namespace internal_serialization {
void FailNonNull(DecodeSource& source);
void FailEof(DecodeSource& source);
}
class EncodeSink {
public:
riegeli::Writer& writer() { return writer_; }
void Fail(absl::Status status);
absl::Status status() const { return writer_.status(); }
virtual bool Close() { return writer_.Close(); }
template <typename T,
typename DirectSerializer = Serializer<std::shared_ptr<T>>>
[[nodiscard]] bool Indirect(std::shared_ptr<T> object,
DirectSerializer serializer = {}) {
return DoIndirect(
typeid(std::shared_ptr<T>),
[serializer = std::move(serializer)](
EncodeSink& sink, const std::shared_ptr<void>& value) {
return serializer.Encode(sink, std::static_pointer_cast<T>(value));
},
internal::StaticConstPointerCast<void>(std::move(object)));
}
template <
typename T, typename Traits,
typename DirectSerializer = Serializer<internal::IntrusivePtr<T, Traits>>>
[[nodiscard]] bool Indirect(internal::IntrusivePtr<T, Traits> object,
DirectSerializer serializer = {}) {
return DoIndirect(
typeid(internal::IntrusivePtr<T, Traits>),
[serializer = std::move(serializer)](
EncodeSink& sink, const std::shared_ptr<void>& value) {
return serializer.Encode(sink, internal::IntrusivePtr<T, Traits>(
static_cast<T*>(value.get())));
},
internal::StaticConstPointerCast<void>(
internal::IntrusiveToShared(std::move(object))));
}
using ErasedEncodeWrapperFunction =
poly::Poly<0, true,
bool(EncodeSink& sink,
const std::shared_ptr<void>& erased_value) const>;
[[nodiscard]] virtual bool DoIndirect(const std::type_info& type,
ErasedEncodeWrapperFunction encode,
std::shared_ptr<void> object) = 0;
protected:
explicit EncodeSink(riegeli::Writer& writer) : writer_(writer) {}
~EncodeSink() = default;
private:
riegeli::Writer& writer_;
};
absl::Status DecodeError();
absl::Status DecodeError(std::string_view message);
class DecodeSource {
public:
riegeli::Reader& reader() { return reader_; }
void Fail(absl::Status status);
absl::Status status() const { return reader_.status(); }
virtual absl::Status Done() {
if (reader_.VerifyEndAndClose()) return absl::OkStatus();
return status();
}
template <typename T,
typename DirectSerializer = Serializer<std::shared_ptr<T>>>
[[nodiscard]] bool Indirect(std::shared_ptr<T>& object,
DirectSerializer serializer = {}) {
std::shared_ptr<void> void_ptr;
if (!DoIndirect(
typeid(std::shared_ptr<T>),
[serializer = std::move(serializer)](DecodeSource& source,
std::shared_ptr<void>& value) {
std::shared_ptr<T> typed_value;
if (!serializer.Decode(source, typed_value)) return false;
value = std::move(typed_value);
return true;
},
void_ptr)) {
return false;
}
object = internal::static_pointer_cast<T>(std::move(void_ptr));
return true;
}
template <
typename T, typename Traits,
typename DirectSerializer = Serializer<internal::IntrusivePtr<T, Traits>>>
[[nodiscard]] bool Indirect(internal::IntrusivePtr<T, Traits>& object,
DirectSerializer serializer = {}) {
std::shared_ptr<void> void_ptr;
if (!DoIndirect(
typeid(internal::IntrusivePtr<T, Traits>),
[&serializer](DecodeSource& source, std::shared_ptr<void>& value) {
internal::IntrusivePtr<T, Traits> typed_value;
if (!serializer.Decode(source, typed_value)) return false;
value = internal::StaticConstPointerCast<void>(
internal::IntrusiveToShared(std::move(typed_value)));
return true;
},
void_ptr)) {
return false;
}
object.reset(static_cast<T*>(void_ptr.get()));
return true;
}
using ErasedDecodeWrapperFunction = absl::FunctionRef<bool(
DecodeSource& source, std::shared_ptr<void>& value)>;
[[nodiscard]] virtual bool DoIndirect(const std::type_info& type,
ErasedDecodeWrapperFunction decode,
std::shared_ptr<void>& value) = 0;
protected:
DecodeSource(riegeli::Reader& reader) : reader_(reader) {}
~DecodeSource() = default;
private:
riegeli::Reader& reader_;
};
template <typename T>
struct NonSerializable : public T {
static constexpr auto ApplyMembers = [](auto&& x, auto f) {
return f(internal::BaseCast<T>(x));
};
};
template <typename T>
NonSerializable(const T& x) -> NonSerializable<T>;
template <typename T>
constexpr inline bool IsNonSerializable = false;
template <typename T>
constexpr inline bool IsNonSerializable<NonSerializable<T>> = true;
namespace internal_serialization {
absl::Status NonSerializableError();
}
template <typename T>
struct Serializer<NonSerializable<T>> {
[[nodiscard]] static bool Encode(EncodeSink& sink,
const NonSerializable<T>& value) {
sink.Fail(internal_serialization::NonSerializableError());
return false;
}
[[nodiscard]] static bool Decode(DecodeSource& source,
NonSerializable<T>& value) {
source.Fail(internal_serialization::NonSerializableError());
return false;
}
constexpr static bool non_serializable() { return true; }
};
template <typename Serializer, typename SFINAE = void>
constexpr inline bool IsNonSerializer = false;
template <typename Serializer>
constexpr inline bool IsNonSerializer<
Serializer, std::void_t<decltype(&Serializer::non_serializable)>> =
Serializer::non_serializable();
template <typename T>
constexpr inline bool IsNonSerializableLike = IsNonSerializer<Serializer<T>>;
template <typename T>
struct MemcpySerializer {
[[nodiscard]] static bool Encode(EncodeSink& sink, const T& value) {
return sink.writer().Write(
std::string_view(reinterpret_cast<const char*>(&value), sizeof(T)));
}
[[nodiscard]] static bool Decode(DecodeSource& source, T& value) {
return source.reader().Read(sizeof(T), reinterpret_cast<char*>(&value));
}
};
template <typename T>
struct Serializer<T, std::enable_if_t<SerializeUsingMemcpy<T>>>
: public MemcpySerializer<T> {};
template <>
struct Serializer<bool> {
[[nodiscard]] static bool Encode(EncodeSink& sink, bool value) {
return sink.writer().WriteByte(value);
}
[[nodiscard]] static bool Decode(DecodeSource& source, bool& value) {
uint8_t v;
if (!source.reader().ReadByte(v)) return false;
value = static_cast<bool>(v);
return true;
}
};
template <typename T, typename ElementSerializer = Serializer<T>>
[[nodiscard]] bool Encode(EncodeSink& sink, const T& value,
const ElementSerializer& serialize = {}) {
return serialize.Encode(sink, value);
}
template <typename T,
typename ElementSerializer = Serializer<internal::remove_cvref_t<T>>>
[[nodiscard]] bool Decode(DecodeSource& source, T&& value,
const ElementSerializer& serialize = {}) {
return serialize.Decode(source, value);
}
template <typename String>
struct StringSerializer {
[[nodiscard]] static bool Encode(EncodeSink& sink, const String& value) {
return serialization::WriteDelimited(sink.writer(), value);
}
[[nodiscard]] static bool Decode(DecodeSource& source, String& value) {
return serialization::ReadDelimited(source.reader(), value);
}
};
template <>
struct Serializer<std::string> : public StringSerializer<std::string> {};
template <>
struct Serializer<absl::Cord> : public StringSerializer<absl::Cord> {};
template <>
struct Serializer<std::string_view>
: public StringSerializer<std::string_view> {};
template <typename... T>
[[nodiscard]] ABSL_ATTRIBUTE_ALWAYS_INLINE inline bool EncodeTuple(
EncodeSink& sink, const T&... value) {
return (serialization::Encode(sink, value) && ...);
}
template <typename... T>
[[nodiscard]] ABSL_ATTRIBUTE_ALWAYS_INLINE inline bool DecodeTuple(
DecodeSource& source, T&&... value) {
return (serialization::Decode(source, value) && ...);
}
struct IsAnyNonSerializable {
template <typename... T>
constexpr auto operator()(const T&... arg) const {
return std::integral_constant<bool, (IsNonSerializableLike<T> || ...)>{};
}
};
template <typename T>
struct ApplyMembersSerializer {
[[nodiscard]] static bool Encode(EncodeSink& sink, const T& value) {
return ApplyMembers<T>::Apply(value, [&sink](const auto&... member) {
return (serialization::Encode(sink, member) && ...);
});
}
[[nodiscard]] static bool Decode(DecodeSource& source, T& value) {
return ApplyMembers<T>::Apply(value, [&source](auto&&... member) {
return (serialization::Decode(source, member) && ...);
});
}
constexpr static bool non_serializable() {
return decltype(ApplyMembers<T>::Apply(std::declval<const T&>(),
IsAnyNonSerializable{}))::value;
}
};
template <typename T>
struct Serializer<
T, std::enable_if_t<(SupportsApplyMembers<T> && !IsNonSerializable<T> &&
!SerializeUsingMemcpy<T>)>>
: public ApplyMembersSerializer<T> {};
template <typename T, typename ValueType = typename T::value_type,
typename ElementSerializer = Serializer<ValueType>>
struct ContainerSerializer {
[[nodiscard]] bool Encode(EncodeSink& sink, const T& value) const {
if (!serialization::WriteSize(sink.writer(), value.size())) return false;
for (const auto& element : value) {
if (!serialization::Encode(sink, element, element_serializer)) {
return false;
}
}
return true;
}
[[nodiscard]] bool Decode(DecodeSource& source, T& value) const {
value.clear();
size_t size;
if (!serialization::ReadSize(source.reader(), size)) return false;
for (size_t i = 0; i < size; ++i) {
ValueType element;
if (!serialization::Decode(source, element, element_serializer)) {
return false;
}
value.insert(value.end(), std::move(element));
}
return true;
}
ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS ElementSerializer element_serializer = {};
constexpr static bool non_serializable() {
return IsNonSerializer<ElementSerializer>;
}
};
template <typename T,
typename ElementSerializer = Serializer<typename T::value_type>>
struct OptionalSerializer {
[[nodiscard]] bool Encode(EncodeSink& sink, const T& value) const {
return serialization::Encode(sink, static_cast<bool>(value)) &&
(!value || element_serializer.Encode(sink, *value));
}
[[nodiscard]] bool Decode(DecodeSource& source, T& value) const {
bool has_value;
return serialization::Decode(source, has_value) &&
(!has_value || element_serializer.Decode(source, value.emplace()));
}
ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS ElementSerializer element_serializer;
constexpr static bool non_serializable() {
return IsNonSerializer<ElementSerializer>;
}
};
template <typename T, typename SFINAE = void>
inline constexpr bool IsSerializable = false;
template <typename T>
inline constexpr bool IsSerializable<
T, std::void_t<decltype(Serializer<T>::Encode(std::declval<EncodeSink&>(),
std::declval<const T&>()))>> =
std::is_default_constructible_v<T>;
struct IsNonNull {
template <typename T>
constexpr bool operator()(const T& x) const {
return static_cast<bool>(x);
}
};
struct IsValid {
template <typename T>
constexpr bool operator()(const T& x) const {
return x.valid();
}
};
template <typename T, typename NonNullSerializer,
typename IsNullPredicate = IsNonNull>
struct MaybeNullSerializer {
[[nodiscard]] bool Encode(EncodeSink& sink, const T& value) const {
const bool valid = IsNullPredicate{}(value);
if (!serialization::Encode(sink, valid)) return false;
if (!valid) return true;
return non_null_serializer.Encode(sink, value);
}
[[nodiscard]] bool Decode(DecodeSource& source, T& value) const {
bool valid;
if (!serialization::Decode(source, valid)) return false;
if (!valid) return true;
if (!non_null_serializer.Decode(source, value)) return false;
assert(IsNullPredicate{}(value));
return true;
}
ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS NonNullSerializer non_null_serializer = {};
constexpr static bool non_serializable() {
return IsNonSerializer<NonNullSerializer>;
}
};
template <typename T, typename BaseSerializer = Serializer<T>,
typename Predicate = IsNonNull>
struct NonNullSerializer {
[[nodiscard]] bool Encode(EncodeSink& sink, const T& value) const {
assert(Predicate{}(value));
return base_serializer.Encode(sink, value);
}
[[nodiscard]] bool Decode(DecodeSource& source, T& value) const {
if (!base_serializer.Decode(source, value)) return false;
if (!Predicate{}(value)) {
internal_serialization::FailNonNull(source);
return false;
}
return true;
}
ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS BaseSerializer base_serializer = {};
constexpr static bool non_serializable() {
return IsNonSerializer<BaseSerializer>;
}
};
template <typename Pointer,
typename ElementSerializer =
Serializer<std::remove_cv_t<typename Pointer::element_type>>>
struct NonNullPointerSerializer {
using element_type = std::remove_cv_t<typename Pointer::element_type>;
[[nodiscard]] bool Encode(EncodeSink& sink, const Pointer& value) const {
assert(value);
return element_serializer.Encode(sink, *value);
}
[[nodiscard]] bool Decode(DecodeSource& source, Pointer& value) const {
value.reset(new element_type);
return element_serializer.Decode(source, *value);
}
ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS ElementSerializer element_serializer = {};
constexpr static bool non_serializable() {
return IsNonSerializer<ElementSerializer>;
}
};
template <typename Pointer,
typename NonNullSerializer = NonNullPointerSerializer<Pointer>>
using PointerSerializer = MaybeNullSerializer<Pointer, NonNullSerializer>;
template <typename Pointer,
typename NonNullSerializer = NonNullPointerSerializer<Pointer>>
struct NonNullIndirectPointerSerializer {
[[nodiscard]] bool Encode(EncodeSink& sink, const Pointer& value) const {
assert(value);
return sink.Indirect(value, non_null_serializer);
}
[[nodiscard]] bool Decode(DecodeSource& source, Pointer& value) const {
return | #include "tensorstore/serialization/serialization.h"
#include <cstdint>
#include <map>
#include <set>
#include <string>
#include <tuple>
#include <variant>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/serialization/std_map.h"
#include "tensorstore/serialization/std_optional.h"
#include "tensorstore/serialization/std_set.h"
#include "tensorstore/serialization/std_tuple.h"
#include "tensorstore/serialization/std_variant.h"
#include "tensorstore/serialization/std_vector.h"
#include "tensorstore/serialization/test_util.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::serialization::IsNonSerializableLike;
using ::tensorstore::serialization::NonSerializable;
using ::tensorstore::serialization::SerializationRoundTrip;
using ::tensorstore::serialization::TestSerializationRoundTrip;
TEST(SerializationTest, Bool) {
TestSerializationRoundTrip(true);
TestSerializationRoundTrip(false);
}
TEST(SerializationTest, Float) {
TestSerializationRoundTrip(3.14f);
TestSerializationRoundTrip(0.0f);
}
TEST(SerializationTest, String) {
TestSerializationRoundTrip(std::string("abcdefg"));
TestSerializationRoundTrip(std::string(""));
}
TEST(CordTest, SerializationRoundTrip) {
TestSerializationRoundTrip(absl::Cord(""));
TestSerializationRoundTrip(absl::Cord("abc"));
}
TEST(SerializationTest, Int32) {
TestSerializationRoundTrip(static_cast<int32_t>(0));
TestSerializationRoundTrip(static_cast<int32_t>(3));
TestSerializationRoundTrip(static_cast<int32_t>(2147483647));
TestSerializationRoundTrip(static_cast<int32_t>(-2147483648));
}
TEST(SerializationTest, VectorInt) {
TestSerializationRoundTrip(std::vector<int>{});
TestSerializationRoundTrip(std::vector<int>{1, 2, 3});
}
TEST(SerializationTest, VectorString) {
TestSerializationRoundTrip(std::vector<std::string>{});
TestSerializationRoundTrip(std::vector<std::string>{"a", "b", "def"});
}
TEST(SerializationTest, VectorVectorString) {
TestSerializationRoundTrip(
std::vector<std::vector<std::string>>{{"a", "b", "def"}, {"e", "f"}});
}
TEST(SerializationTest, Map) {
TestSerializationRoundTrip(std::map<int, std::string>{{1, "a"}, {2, "b"}});
}
TEST(SerializationTest, Set) {
TestSerializationRoundTrip(std::set<int>{1, 2, 3});
}
TEST(SerializationTest, Tuple) {
TestSerializationRoundTrip(
std::tuple(std::string("abc"), 3, std::string("def")));
}
TEST(SerializationTest, UniquePtrNull) {
std::unique_ptr<int> ptr;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto ptr2, SerializationRoundTrip(ptr));
EXPECT_FALSE(ptr2);
}
TEST(SerializationTest, UniquePtrNonNull) {
auto ptr = std::make_unique<int>(5);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto ptr2, SerializationRoundTrip(ptr));
EXPECT_THAT(ptr2, ::testing::Pointee(5));
}
TEST(SerializationTest, SharedPtrNull) {
std::shared_ptr<int> ptr;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto ptr2, SerializationRoundTrip(ptr));
EXPECT_FALSE(ptr2);
}
TEST(SerializationTest, SharedPtrNonNull) {
auto ptr = std::make_shared<int>(5);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto ptr2, SerializationRoundTrip(ptr));
EXPECT_THAT(ptr2, ::testing::Pointee(5));
}
TEST(SerializationTest, SharedPtrDuplicate) {
auto ptr = std::make_shared<int>(5);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto tuple2, SerializationRoundTrip(std::make_tuple(ptr, ptr)));
EXPECT_THAT(std::get<0>(tuple2), ::testing::Pointee(5));
EXPECT_EQ(std::get<0>(tuple2), std::get<1>(tuple2));
}
struct Foo {
std::string a;
std::string b;
constexpr static auto ApplyMembers = [](auto& x, auto f) {
return f(x.a, x.b);
};
bool operator==(const Foo& other) const {
return a == other.a && b == other.b;
}
};
TEST(SerializationTest, ApplyMembers) {
TestSerializationRoundTrip(Foo{"xyz", "abcd"});
TestSerializationRoundTrip(Foo{"", "abcd"});
}
TEST(SerialiationTest, Optional) {
TestSerializationRoundTrip(std::optional<int>());
TestSerializationRoundTrip(std::optional<int>(42));
}
TEST(SerialiationTest, Variant) {
TestSerializationRoundTrip(std::variant<int, std::string>(42));
TestSerializationRoundTrip(std::variant<int, std::string>("abc"));
TestSerializationRoundTrip(std::variant<int, int>(std::in_place_index<1>, 1));
TestSerializationRoundTrip(std::variant<int, int>(std::in_place_index<0>, 0));
}
static_assert(!IsNonSerializableLike<Foo>);
static_assert(!IsNonSerializableLike<std::pair<Foo, Foo>>);
static_assert(IsNonSerializableLike<NonSerializable<Foo>>);
static_assert(IsNonSerializableLike<std::pair<Foo, NonSerializable<Foo>>>);
} |
517 | cpp | google/tensorstore | dim_expression | python/tensorstore/dim_expression.cc | tensorstore/index_space/dim_expression_test.cc | #ifndef TENSORSTORE_INDEX_SPACE_DIM_EXPRESSION_H_
#define TENSORSTORE_INDEX_SPACE_DIM_EXPRESSION_H_
#include <type_traits>
#include <utility>
#include "tensorstore/array.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/internal/add_new_dims_op.h"
#include "tensorstore/index_space/internal/diagonal_op.h"
#include "tensorstore/index_space/internal/dim_expression_helper.h"
#include "tensorstore/index_space/internal/dimension_selection.h"
#include "tensorstore/index_space/internal/index_array_slice_op.h"
#include "tensorstore/index_space/internal/interval_slice_op.h"
#include "tensorstore/index_space/internal/label_op.h"
#include "tensorstore/index_space/internal/mark_explicit_op.h"
#include "tensorstore/index_space/internal/single_index_slice_op.h"
#include "tensorstore/index_space/internal/transform_rep.h"
#include "tensorstore/index_space/internal/translate_op.h"
#include "tensorstore/index_space/internal/transpose_op.h"
namespace tensorstore {
template <typename T>
constexpr inline bool IsIndexArray =
IsArray<T> && std::is_convertible_v<T, SharedArrayView<const Index>>;
template <typename... Op>
class DimExpression {
static_assert(sizeof...(Op) > 0);
using DimExpressionHelper = internal_index_space::DimExpressionHelper;
using Access = internal_index_space::TransformAccess;
using Parent =
typename internal_index_space::DimExpressionTraits<Op...>::Parent;
using LastOp =
typename internal_index_space::DimExpressionTraits<Op...>::LastOp;
using static_selection_rank =
std::integral_constant<DimensionIndex,
DimExpressionHelper::GetStaticSelectionRank<Op...>(
dynamic_rank)>;
template <typename NextOp>
using NewExpr = DimExpression<NextOp, Op...>;
template <template <typename...> class OpTemplate, typename... IndexVector>
using IndexVectorOpExpr = NewExpr<DimExpressionHelper::IndexVectorOp<
OpTemplate, static_selection_rank::value, IndexVector...>>;
template <typename IndexVector>
using TranslateByOpExpr =
IndexVectorOpExpr<internal_index_space::TranslateByOp, IndexVector>;
template <typename IndexVector>
using TranslateBackwardByOpExpr =
IndexVectorOpExpr<internal_index_space::TranslateBackwardByOp,
IndexVector>;
template <typename IndexVector>
using TranslateToOpExpr =
IndexVectorOpExpr<internal_index_space::TranslateToOp, IndexVector>;
template <typename IndexVector>
using StrideOpExpr =
IndexVectorOpExpr<internal_index_space::StrideOp, IndexVector>;
template <typename IndexVector>
using SingleIndexSliceOpExpr =
IndexVectorOpExpr<internal_index_space::SingleIndexSliceOp, IndexVector>;
template <typename... IndexVector>
using IntervalSliceOpExpr =
IndexVectorOpExpr<internal_index_space::IntervalSliceOp, IndexVector...>;
template <typename BoxType>
using BoxSliceOpExpr = NewExpr<std::enable_if_t<
(IsBoxLike<BoxType> &&
RankConstraint::EqualOrUnspecified(static_selection_rank::value,
BoxType::static_rank)),
internal_index_space::BoxSliceOp<BoxType::static_rank>>>;
template <typename... IndexArray>
using IndexArraySliceOpExpr = std::enable_if_t<
(sizeof...(IndexArray) >= 1) &&
RankConstraint::EqualOrUnspecified(sizeof...(IndexArray),
static_selection_rank::value) &&
(IsIndexArray<IndexArray> && ...) &&
RankConstraint::EqualOrUnspecified({IndexArray::static_rank...}),
NewExpr<internal_index_space::IndexArraySliceOp<
false,
RankConstraint::And({IndexArray::static_rank...}),
std::array<SharedArrayView<const Index>, sizeof...(IndexArray)>>>>;
using DynamicIndexArraySliceOpExpr =
NewExpr<internal_index_space::IndexArraySliceOp<
false, dynamic_rank,
span<const SharedArrayView<const Index>>>>;
template <typename... IndexArray>
using IndexArrayOuterSliceOpExpr = std::enable_if_t<
RankConstraint::EqualOrUnspecified(sizeof...(IndexArray),
static_selection_rank::value) &&
(IsIndexArray<IndexArray> && ...),
NewExpr<internal_index_space::IndexArraySliceOp<
true,
RankConstraint::Add({IndexArray::static_rank...}),
std::array<SharedArrayView<const Index>, sizeof...(IndexArray)>>>>;
using DynamicIndexArrayOuterSliceOpExpr =
NewExpr<internal_index_space::IndexArraySliceOp<
true, dynamic_rank,
span<const SharedArrayView<const Index>>>>;
template <typename Labels, DimensionIndex Rank>
using LabelOpExpr =
std::enable_if_t<RankConstraint::EqualOrUnspecified(
Rank, static_selection_rank::value),
NewExpr<internal_index_space::LabelOp<Labels>>>;
template <typename Labels,
typename LabelsSpan = internal::ConstSpanType<Labels>>
using LabelSpanOpExpr =
std::enable_if_t<internal::IsStringLike<typename LabelsSpan::value_type>,
LabelOpExpr<LabelsSpan, LabelsSpan::extent>>;
template <typename... Label>
using LabelPackOpExpr = std::enable_if_t<
internal::IsPackConvertibleWithoutNarrowing<std::string_view, Label...>,
LabelOpExpr<std::array<std::string_view, sizeof...(Label)>,
sizeof...(Label)>>;
using MoveToOpExpr = NewExpr<internal_index_space::MoveToOp>;
using DiagonalOpExpr = NewExpr<internal_index_space::DiagonalOp>;
using AddNewOpExpr = NewExpr<internal_index_space::AddNewDimsOp>;
using TransposeOpExpr = NewExpr<internal_index_space::TransposeOp>;
template <typename TargetDims,
typename TargetDimsSpan = internal::ConstSpanType<TargetDims>>
using TransposeToOpExpr = std::enable_if_t<
(RankConstraint::EqualOrUnspecified(TargetDimsSpan::extent,
static_selection_rank::value) &&
std::is_same_v<typename TargetDimsSpan::value_type, DimensionIndex>),
NewExpr<internal_index_space::TransposeToOp<TargetDimsSpan>>>;
using ChangeImplicitStateOpExpr =
NewExpr<internal_index_space::ChangeImplicitStateOp>;
template <typename IndexVectorArray>
using IndexVectorArraySliceOpExpr =
std::enable_if_t<IsIndexArray<IndexVectorArray> &&
RankConstraint::GreaterOrUnspecified(
IndexVectorArray::static_rank, 0),
NewExpr<internal_index_space::IndexVectorArraySliceOp<
IndexVectorArray::static_rank>>>;
public:
template <typename Offsets>
TranslateByOpExpr<Offsets> TranslateBy(const Offsets& offsets) const {
return {{offsets}, *this};
}
template <DimensionIndex Rank>
TranslateByOpExpr<const Index (&)[Rank]> TranslateBy(
const Index (&offsets)[Rank]) const {
return {{span(offsets)}, *this};
}
template <typename Offsets>
TranslateBackwardByOpExpr<Offsets> TranslateBackwardBy(
const Offsets& offsets) const {
return {{offsets}, *this};
}
template <DimensionIndex Rank>
TranslateBackwardByOpExpr<const Index (&)[Rank]> TranslateBackwardBy(
const Index (&offsets)[Rank]) const {
return {{span(offsets)}, *this};
}
template <typename Origins>
TranslateToOpExpr<Origins> TranslateTo(const Origins& origins) const {
return {{origins}, *this};
}
template <DimensionIndex Rank>
TranslateToOpExpr<const Index (&)[Rank]> TranslateTo(
const Index (&origins)[Rank]) const {
return {{span(origins)}, *this};
}
template <typename Indices>
SingleIndexSliceOpExpr<Indices> IndexSlice(const Indices& indices) const {
return {{indices}, *this};
}
template <DimensionIndex Rank>
SingleIndexSliceOpExpr<const Index (&)[Rank]> IndexSlice(
const Index (&indices)[Rank]) const {
return {{span(indices)}, *this};
}
template <typename BoxType>
BoxSliceOpExpr<BoxType> BoxSlice(const BoxType& box) const {
return {{box, false}, *this};
}
template <typename BoxType>
BoxSliceOpExpr<BoxType> TranslateBoxSlice(const BoxType& box) const {
return {{box, true}, *this};
}
template <typename Start, typename Stop, typename Strides = Index>
IntervalSliceOpExpr<Start, Stop, Strides> ClosedInterval(
const Start& start, const Stop& stop, const Strides& strides = 1) const {
return {{IntervalForm::closed, false, start, stop, strides}, *this};
}
template <typename Stop, typename Strides = Index, DimensionIndex Rank>
IntervalSliceOpExpr<const Index (&)[Rank], Stop, Strides> ClosedInterval(
const Index (&start)[Rank], const Stop& stop,
const Strides& strides = 1) const {
return {{IntervalForm::closed, false, start, stop, strides}, *this};
}
template <typename Start, typename Strides = Index, DimensionIndex Rank>
IntervalSliceOpExpr<Start, const Index (&)[Rank], Strides> ClosedInterval(
const Start& start, const Index (&stop)[Rank],
const Strides& strides = 1) const {
return {{IntervalForm::closed, false, start, stop, strides}, *this};
}
template <typename Strides = Index, DimensionIndex Rank>
IntervalSliceOpExpr<const Index (&)[Rank], const Index (&)[Rank], Strides>
ClosedInterval(const Index (&st | #include "tensorstore/index_space/dim_expression.h"
#include <string_view>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/array.h"
#include "tensorstore/box.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/transformed_array.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::AllDims;
using ::tensorstore::BoxView;
using ::tensorstore::DimRange;
using ::tensorstore::Dims;
using ::tensorstore::Index;
using ::tensorstore::MakeArray;
using ::tensorstore::MakeOffsetArrayView;
using ::tensorstore::Materialize;
static const Index default_origin[3] = {0, 0, 0};
auto TestArray(tensorstore::span<const Index, 3> origin = default_origin) {
static const int test_array[4][4][8] = {
{
{111, 112, 113, 114, 115, 116, 117, 118},
{121, 122, 123, 124, 125, 126, 127, 128},
{131, 132, 133, 134, 135, 136, 137, 138},
{141, 142, 143, 144, 145, 146, 147, 148},
},
{
{211, 212, 213, 214, 215, 216, 217, 218},
{221, 222, 223, 224, 225, 226, 227, 228},
{231, 232, 233, 234, 235, 236, 237, 238},
{241, 242, 243, 244, 245, 246, 247, 248},
},
{
{311, 312, 313, 314, 315, 316, 317, 318},
{321, 322, 323, 324, 325, 326, 327, 328},
{331, 332, 333, 334, 335, 336, 337, 338},
{341, 342, 343, 344, 345, 346, 347, 348},
},
{
{411, 412, 413, 414, 415, 416, 417, 418},
{421, 422, 423, 424, 425, 426, 427, 428},
{431, 432, 433, 434, 435, 436, 437, 438},
{441, 442, 443, 444, 445, 446, 447, 448},
}};
return MakeOffsetArrayView(origin, test_array);
}
TEST(DimExpressionTest, TranslateBy) {
auto view = TestArray() | Dims(0, 2).TranslateBy({10, 20}) | Materialize();
TENSORSTORE_EXPECT_OK(view);
EXPECT_EQ(344, ((*view)({12, 3, 23})));
}
TEST(DimExpressionTest, TranslateBySingle) {
auto view = TestArray() | Dims(0, 2).TranslateBy(10);
TENSORSTORE_EXPECT_OK(view);
}
TEST(DimExpressionTest, TranslateTo) {
const Index origin[3] = {1, 2, 3};
auto view =
TestArray(origin) | Dims(0, 2).TranslateTo({10, 20}) | Materialize();
TENSORSTORE_EXPECT_OK(view);
EXPECT_EQ(344 - 123, ((*view)({11, 3, 20})));
}
TEST(DimExpressionTest, TranslateToSingle) {
auto view = TestArray() | AllDims().TranslateTo(0);
TENSORSTORE_EXPECT_OK(view);
}
TEST(DimExpressionTest, IndexSlice) {
auto view = TestArray() | Dims(0, 2).IndexSlice({2, 4}) | Materialize();
TENSORSTORE_EXPECT_OK(view);
EXPECT_EQ(345, ((*view)({3})));
}
TEST(DimExpressionTest, IndexSliceSingle) {
auto view = TestArray() | Dims(0, 2).IndexSlice(1);
TENSORSTORE_EXPECT_OK(view);
}
TEST(DimExpressionTest, BoxSlice) {
auto view = TestArray() | Dims(0, 2).BoxSlice(BoxView({1, 4}, {3, 4})) |
Materialize();
TENSORSTORE_EXPECT_OK(view);
EXPECT_EQ(245, ((*view)({1, 3, 4})));
}
TEST(DimExpressionTest, TranslateBoxSlice) {
const Index origin[3] = {0, 2, 0};
auto view = TestArray(origin) |
Dims(0, 2).TranslateBoxSlice(BoxView({1, 4}, {3, 4})) |
Materialize();
TENSORSTORE_EXPECT_OK(view);
EXPECT_EQ(245 - 20, ((*view)({0, 3, 0})));
}
TEST(DimExpressionTest, ClosedInterval) {
auto view = TestArray() | Dims(0, 2).ClosedInterval({1, 6}, {3, 0}, {1, -2}) |
Materialize();
TENSORSTORE_EXPECT_OK(view);
EXPECT_EQ(347, ((*view)({2, 3, -3})));
}
TEST(DimExpressionTest, ClosedInterval1) {
auto view = TestArray() | Dims(0, 2).ClosedInterval(1, 1);
TENSORSTORE_EXPECT_OK(view);
}
TEST(DimExpressionTest, HalfOpenInterval) {
auto view = TestArray() |
Dims(0, 2).HalfOpenInterval({1, 6}, {3, 0}, {1, -2}) |
Materialize();
TENSORSTORE_EXPECT_OK(view);
EXPECT_EQ(347, ((*view)({2, 3, -3})));
}
TEST(DimExpressionTest, HalfOpenInterval1) {
auto view = TestArray() | Dims(0, 2).HalfOpenInterval(1, 2);
TENSORSTORE_EXPECT_OK(view);
}
TEST(DimExpressionTest, SizedInterval) {
auto view = TestArray() | Dims(0, 2).SizedInterval({1, 6}, {3, 2}, {1, -2}) |
Materialize();
TENSORSTORE_EXPECT_OK(view);
EXPECT_EQ(347, ((*view)({2, 3, -3})));
}
TEST(DimExpressionTest, SizedInterval1) {
auto view = TestArray() | Dims(0, 2).SizedInterval(1, 2);
TENSORSTORE_EXPECT_OK(view);
}
TEST(DimExpressionTest, TranslateClosedInterval) {
auto view = TestArray() | Dims(0, 2).TranslateClosedInterval({0, 1}, {1, 1});
TENSORSTORE_EXPECT_OK(view);
}
TEST(DimExpressionTest, TranslateClosedInterval1) {
auto view = TestArray() | Dims(0, 2).TranslateClosedInterval(1, 1);
TENSORSTORE_EXPECT_OK(view);
}
TEST(DimExpressionTest, TranslateHalfOpenInterval) {
auto view =
TestArray() | Dims(0, 2).TranslateHalfOpenInterval({0, 1}, {1, 1});
TENSORSTORE_EXPECT_OK(view);
}
TEST(DimExpressionTest, TranslateHalfOpenInterval1) {
auto view = TestArray() | Dims(0, 2).TranslateHalfOpenInterval(1, 2);
TENSORSTORE_EXPECT_OK(view);
}
TEST(DimExpressionTest, TranslateSizedInterval) {
auto view = TestArray() | Dims(0, 2).TranslateSizedInterval({0, 1}, {1, 1});
TENSORSTORE_EXPECT_OK(view);
}
TEST(DimExpressionTest, TranslateSizedInterval1) {
auto view = TestArray() | Dims(0, 2).TranslateSizedInterval(1, 2);
TENSORSTORE_EXPECT_OK(view);
}
TEST(DimExpressionTest, IndexArraySlice) {
auto view = TestArray() |
Dims(0, 2).IndexArraySlice(
MakeArray<Index>({{1, 2, 3}, {3, 2, 1}}),
MakeArray<Index>({{7, 6, 5}, {1, 2, 4}})) |
Materialize();
TENSORSTORE_EXPECT_OK(view);
EXPECT_EQ(248, ((*view)({0, 0, 3})));
}
TEST(DimExpressionTest, IndexVectorArraySlice) {
auto view = TestArray() |
Dims(0, 2).IndexVectorArraySlice(
MakeArray<Index>(
{{{1, 7}, {2, 6}, {3, 5}}, {{3, 1}, {2, 2}, {1, 4}}}),
-1) |
Materialize();
TENSORSTORE_EXPECT_OK(view);
EXPECT_EQ(248, ((*view)({0, 0, 3})));
}
TEST(DimExpressionTest, OuterIndexArraySlice) {
auto view = TestArray() |
Dims(2, 0).OuterIndexArraySlice(
MakeArray<Index>({{4, 5}, {6, 7}}),
MakeArray<Index>({3, 2})) |
Materialize();
TENSORSTORE_EXPECT_OK(view);
EXPECT_EQ(438, ((*view)({0, 2, 1, 1})));
}
TEST(DimExpressionTest, Label) {
auto view = TestArray() | Dims(0, 2).Label({"a", "b"});
TENSORSTORE_EXPECT_OK(view);
}
TEST(DimExpressionTest, LabelB) {
auto view = TestArray() | Dims(0, 2).Label("a", "b");
TENSORSTORE_EXPECT_OK(view);
}
TEST(DimExpressionTest, MoveTo) {
auto view = TestArray() | Dims(2, 0).MoveTo(1) | Materialize();
TENSORSTORE_EXPECT_OK(view);
EXPECT_EQ(345, ((*view)({3, 4, 2})));
}
TEST(DimExpressionTest, MoveToFront) {
auto view = TestArray() | Dims(0, 2).MoveToFront();
TENSORSTORE_EXPECT_OK(view);
}
TEST(DimExpressionTest, MoveToBack) {
auto view = TestArray() | Dims(0, 2).MoveToFront();
TENSORSTORE_EXPECT_OK(view);
}
TEST(DimExpressionTest, Diagonal) {
auto view = TestArray() | Dims(0, 2).Diagonal() | Materialize();
TENSORSTORE_EXPECT_OK(view);
EXPECT_EQ(343, ((*view)({2, 3})));
}
TEST(DimExpressionTest, AddNew) {
auto view = TestArray() | Dims(0, -1).AddNew() | Materialize();
TENSORSTORE_EXPECT_OK(view);
EXPECT_EQ(333, ((*view)({0, 2, 2, 2, 0})));
}
TEST(DimExpressionTest, Transpose) {
auto view = TestArray() | Dims(2, 0, 1).Transpose() | Materialize();
TENSORSTORE_EXPECT_OK(view);
EXPECT_EQ(234, ((*view)({3, 1, 2})));
}
TEST(DimExpressionTest, TransposeB) {
auto view = TestArray() | Dims(2, 0).Transpose({1, 2}) | Materialize();
TENSORSTORE_EXPECT_OK(view);
EXPECT_EQ(345, ((*view)({3, 4, 2})));
}
TEST(DimExpressionTest, MarkBoundsExplicit) {
auto view = TestArray() | Dims(2, 0).MarkBoundsExplicit();
TENSORSTORE_EXPECT_OK(view);
}
TEST(DimExpressionTest, UnsafeMarkBoundsImplicit) {
auto view = TestArray() | Dims(2, 0).UnsafeMarkBoundsImplicit();
TENSORSTORE_EXPECT_OK(view);
}
TEST(DimExpressionTest, Stride) {
auto view = TestArray() | Dims(0, 2).Stride({-2, 3}) | Materialize();
TENSORSTORE_EXPECT_OK(view);
EXPECT_EQ(344, ((*view)({-1, 3, 1})));
}
TEST(DimExpressionTest, AllDims) {
auto view = TestArray() | AllDims().IndexSlice(1) | Materialize();
TENSORSTORE_EXPECT_OK(view);
EXPECT_EQ(222, ((*view)()));
}
TEST(DimExpressionTest, DimRange) {
auto view =
TestArray() | tensorstore::DimRange(1).IndexSlice(1) | Materialize();
TENSORSTORE_EXPECT_OK(view);
EXPECT_EQ(322, ((*view)(2)));
}
} |
518 | cpp | google/tensorstore | batch | tensorstore/serialization/batch.cc | tensorstore/batch_test.cc | #ifndef TENSORSTORE_SERIALIZATION_BATCH_H_
#define TENSORSTORE_SERIALIZATION_BATCH_H_
#include <cstddef>
#include <memory>
#include <string>
#include <string_view>
#include <typeinfo>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "riegeli/bytes/string_reader.h"
#include "riegeli/bytes/string_writer.h"
#include "riegeli/bytes/writer.h"
#include "tensorstore/internal/type_traits.h"
#include "tensorstore/serialization/fwd.h"
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace serialization {
class BatchEncodeSink final : public EncodeSink {
public:
explicit BatchEncodeSink(riegeli::Writer& writer);
~BatchEncodeSink();
[[nodiscard]] bool DoIndirect(const std::type_info& type,
ErasedEncodeWrapperFunction encode,
std::shared_ptr<void> object) override;
private:
absl::flat_hash_map<std::shared_ptr<void>, size_t> indirect_map_;
};
class BatchDecodeSource final : public DecodeSource {
public:
BatchDecodeSource(riegeli::Reader& reader);
~BatchDecodeSource();
[[nodiscard]] bool DoIndirect(const std::type_info& type,
ErasedDecodeWrapperFunction decode,
std::shared_ptr<void>& value) override;
private:
struct IndirectEntry {
std::shared_ptr<void> value;
const std::type_info* type;
};
std::vector<IndirectEntry> indirect_objects_;
};
template <typename T, typename ElementSerializer = Serializer<T>>
Result<std::string> EncodeBatch(const T& value,
const ElementSerializer& serializer = {}) {
std::string buffer;
riegeli::StringWriter writer(&buffer);
BatchEncodeSink sink(writer);
if (!serializer.Encode(sink, value) || !sink.Close()) {
return sink.status();
}
return buffer;
}
template <typename T,
typename ElementSerializer = Serializer<internal::remove_cvref_t<T>>>
absl::Status DecodeBatch(std::string_view encoded, T& value,
const ElementSerializer& serializer = {}) {
riegeli::StringReader reader(encoded);
BatchDecodeSource source(reader);
if (!serializer.Decode(source, value)) {
internal_serialization::FailEof(source);
}
return source.Done();
}
template <typename T,
typename ElementSerializer = Serializer<internal::remove_cvref_t<T>>>
absl::Status DecodeBatch(std::string_view encoded, T&& value,
const ElementSerializer& serializer = {}) {
riegeli::StringReader reader(encoded);
BatchDecodeSource source(reader);
if (!serializer.Decode(source, value)) {
internal_serialization::FailEof(source);
}
return source.Done();
}
template <typename T>
class MaybeDecode {
public:
absl::Status Decode(const std::string& arg) {
return serialization::DecodeBatch(arg, value_);
}
const T& value() const { return value_; }
T value_;
};
template <>
class MaybeDecode<std::string> {
public:
absl::Status Decode(const std::string& arg) {
value_ = &arg;
return absl::OkStatus();
}
const std::string& value() { return *value_; }
const std::string* value_ = nullptr;
};
}
}
#endif
#include "tensorstore/serialization/batch.h"
#include "absl/status/status.h"
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace serialization {
BatchEncodeSink::BatchEncodeSink(riegeli::Writer& writer)
: EncodeSink(writer) {}
BatchEncodeSink::~BatchEncodeSink() = default;
bool BatchEncodeSink::DoIndirect(const std::type_info& type,
ErasedEncodeWrapperFunction encode,
std::shared_ptr<void> object) {
auto [it, inserted] = indirect_map_.emplace(object, indirect_map_.size());
return serialization::WriteSize(writer(), it->second) &&
(!inserted || encode(*this, object));
}
BatchDecodeSource::BatchDecodeSource(riegeli::Reader& reader)
: DecodeSource(reader) {}
BatchDecodeSource::~BatchDecodeSource() = default;
bool BatchDecodeSource::DoIndirect(const std::type_info& type,
ErasedDecodeWrapperFunction decode,
std::shared_ptr<void>& value) {
size_t id;
if (!serialization::ReadSize(reader(), id)) return false;
if (id > indirect_objects_.size()) {
Fail(DecodeError(tensorstore::StrCat("Indirect object index ", id,
" out of range [0, ",
indirect_objects_.size(), ")")));
return false;
}
if (id < indirect_objects_.size()) {
auto& entry = indirect_objects_[id];
if (*entry.type != type) {
Fail(absl::InvalidArgumentError(tensorstore::StrCat(
"Type mismatch for indirect object, received ", entry.type->name(),
" but expected ", type.name())));
return false;
}
value = entry.value;
return true;
}
indirect_objects_.emplace_back();
if (!decode(*this, value)) return false;
auto& entry = indirect_objects_[id];
entry.type = &type;
entry.value = value;
return true;
}
}
} | #include "tensorstore/batch.h"
#include <stddef.h>
#include <functional>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "tensorstore/batch_impl.h"
namespace {
using ::tensorstore::Batch;
using ::testing::ElementsAre;
using Log = std::vector<std::string>;
template <typename T>
struct Entry : public Batch::Impl::Entry {
using KeyParam = T;
Entry(Log& log, size_t nesting_depth, T key)
: Batch::Impl::Entry(nesting_depth), key_(key), log(log) {}
T key_;
T key() const { return key_; }
virtual void Submit(Batch::View batch) {
log.push_back(absl::StrCat("begin_submit ", key()));
for (auto& submit_func : submit_funcs) {
submit_func(batch);
}
log.push_back(absl::StrCat("end_submit ", key()));
delete this;
}
std::vector<std::function<void(Batch::View batch)>> submit_funcs;
Log& log;
};
template <typename T>
void AddFunc(Log& log, Batch::View batch, size_t nesting_depth, T key,
std::function<void(Batch::View)> func) {
auto& entry = Batch::Impl::From(batch)->GetEntry<Entry<T>>(
key, [&] { return std::make_unique<Entry<T>>(log, nesting_depth, key); });
entry.submit_funcs.emplace_back(std::move(func));
}
TEST(BatchTest, SingleNestingDepth) {
Log log;
auto batch = Batch::New();
for (int i = 0; i < 3; ++i) {
for (int j = 0; j < 2; ++j) {
AddFunc<int>(log, batch, 0, i,
[&log, i, j](Batch::View batch) {
log.emplace_back(absl::StrFormat("i=%d, j=%d", i, j));
});
}
}
EXPECT_THAT(log, ElementsAre());
batch.Release();
EXPECT_THAT(log,
::testing::UnorderedElementsAre(
"begin_submit 0", "i=0, j=0", "i=0, j=1", "end_submit 0",
"begin_submit 1", "i=1, j=0", "i=1, j=1", "end_submit 1",
"begin_submit 2", "i=2, j=0", "i=2, j=1", "end_submit 2"));
}
TEST(BatchTest, MultipleNestingDepths) {
Log log;
auto batch = Batch::New();
for (int nesting_depth : {2, 3, 0}) {
AddFunc<int>(log, batch, nesting_depth, nesting_depth,
[](Batch::View batch) {});
}
EXPECT_THAT(log, ElementsAre());
batch.Release();
EXPECT_THAT(log, ::testing::ElementsAre("begin_submit 3", "end_submit 3",
"begin_submit 2", "end_submit 2",
"begin_submit 0", "end_submit 0"));
}
TEST(BatchTest, MultipleTypes) {
Log log;
auto batch = Batch::New();
AddFunc<int>(log, batch, 0, 42,
[](Batch::View batch) {});
AddFunc<float>(log, batch, 0, 1.5,
[](Batch::View batch) {});
EXPECT_THAT(log, ElementsAre());
batch.Release();
EXPECT_THAT(log,
::testing::ElementsAre("begin_submit 42", "end_submit 42",
"begin_submit 1.5", "end_submit 1.5"));
}
TEST(BatchTest, Async) {
Log log;
auto batch = Batch::New();
Batch saved_batch{Batch::no_batch};
AddFunc<int>(log, batch, 2, 2,
[&](Batch::View batch) { saved_batch = batch; });
AddFunc<int>(log, batch, 1, 3,
[](Batch::View batch) {});
batch.Release();
EXPECT_THAT(log, ElementsAre("begin_submit 2", "end_submit 2"));
log.clear();
AddFunc<int>(log, saved_batch, 1, 1,
[](Batch::View batch) {});
saved_batch.Release();
EXPECT_THAT(
log, ::testing::UnorderedElementsAre("begin_submit 1", "end_submit 1",
"begin_submit 3", "end_submit 3"));
}
} |
519 | cpp | google/tensorstore | transaction | tensorstore/kvstore/transaction.cc | tensorstore/kvstore/transaction_test.cc | #ifndef TENSORSTORE_KVSTORE_TRANSACTION_H_
#define TENSORSTORE_KVSTORE_TRANSACTION_H_
#include <stddef.h>
#include <stdint.h>
#include <atomic>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
#include "tensorstore/internal/container/intrusive_red_black_tree.h"
#include "tensorstore/internal/source_location.h"
#include "tensorstore/internal/tagged_ptr.h"
#include "tensorstore/kvstore/driver.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/read_modify_write.h"
#include "tensorstore/kvstore/read_result.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/transaction.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal_kvstore {
using kvstore::Driver;
using kvstore::Key;
using kvstore::ReadModifyWriteSource;
using kvstore::ReadModifyWriteTarget;
using kvstore::ReadOptions;
using kvstore::ReadResult;
using kvstore::Value;
using kvstore::WriteOptions;
class ReadModifyWriteEntry;
class DeleteRangeEntry;
class MutationEntry;
class MultiPhaseMutation;
class SinglePhaseMutation;
enum MutationEntryType {
kReadModifyWrite = 0,
kDeleteRange = 1,
kDeleteRangePlaceholder = 2,
};
using MutationEntryTree =
internal::intrusive_red_black_tree::Tree<MutationEntry>;
using ReadModifyWriteEntryTree =
internal::intrusive_red_black_tree::Tree<ReadModifyWriteEntry>;
class MutationEntry : public MutationEntryTree::NodeBase {
public:
std::string key_;
internal::TaggedPtr<SinglePhaseMutation, 2> single_phase_mutation_;
SinglePhaseMutation& single_phase_mutation() const {
return *single_phase_mutation_;
}
MutationEntryType entry_type() const {
return static_cast<MutationEntryType>(single_phase_mutation_.tag());
}
MultiPhaseMutation& multi_phase() const;
absl::Mutex& mutex() const;
using DeleteRangeEntry = internal_kvstore::DeleteRangeEntry;
using ReadModifyWriteEntry = internal_kvstore::ReadModifyWriteEntry;
constexpr static MutationEntryType kReadModifyWrite =
MutationEntryType::kReadModifyWrite;
constexpr static MutationEntryType kDeleteRange =
MutationEntryType::kDeleteRange;
protected:
~MutationEntry() = default;
};
class EntryCounter {
public:
void SetError() { value_.fetch_or(1, std::memory_order_relaxed); }
bool HasError() const { return value_.load(std::memory_order_relaxed) & 1; }
void IncrementCount(size_t amount = 1) {
value_.fetch_add(2 * amount, std::memory_order_relaxed);
}
bool DecrementCount(size_t amount = 1) {
return value_.fetch_sub(2 * amount, std::memory_order_acq_rel) -
2 * amount <=
1;
}
bool IsDone() const { return value_ <= 1; }
private:
std::atomic<size_t> value_{0};
};
class DeleteRangeEntry final : public MutationEntry {
public:
std::string exclusive_max_;
ReadModifyWriteEntryTree superseded_;
EntryCounter remaining_entries_;
};
class ReadModifyWriteEntry : public MutationEntry,
public ReadModifyWriteTarget {
public:
ReadModifyWriteSource* source_;
ReadModifyWriteEntry* prev_ = nullptr;
MutationEntry* next_ = nullptr;
ReadModifyWriteEntry* next_read_modify_write() const {
if (!next_ || (flags_ & kDeleted)) return nullptr;
return static_cast<ReadModifyWriteEntry*>(next_);
}
using Flags = uint8_t;
Flags flags_ = 0;
constexpr static Flags kWritebackProvided = 1;
constexpr static Flags kTransitivelyUnconditional = 2;
constexpr static Flags kDirty = 4;
constexpr static Flags kPrevDeleted = 8;
constexpr static Flags kError = 16;
constexpr static Flags kDeleted = 32;
constexpr static Flags kTransitivelyDirty = 64;
void KvsRead(TransactionalReadOptions options,
ReadReceiver receiver) override;
bool KvsReadsCommitted() override;
virtual ~ReadModifyWriteEntry() = default;
};
class SinglePhaseMutation {
public:
SinglePhaseMutation() = default;
SinglePhaseMutation(const SinglePhaseMutation&) = delete;
MultiPhaseMutation* multi_phase_;
size_t phase_number_;
MutationEntryTree entries_;
SinglePhaseMutation* next_;
SinglePhaseMutation* prev_;
EntryCounter remaining_entries_;
};
void DestroyPhaseEntries(SinglePhaseMutation& single_phase_mutation);
void WritebackError(MutationEntry& entry);
void WritebackError(ReadModifyWriteEntry& entry);
void WritebackError(DeleteRangeEntry& entry);
void WritebackError(SinglePhaseMutation& single_phase_mutation);
void WritebackSuccess(DeleteRangeEntry& entry);
void WritebackSuccess(ReadModifyWriteEntry& entry,
TimestampedStorageGeneration new_stamp);
void InvalidateReadState(SinglePhaseMutation& single_phase_mutation);
class MultiPhaseMutation {
public:
MultiPhaseMutation();
SinglePhaseMutation phases_;
virtual internal::TransactionState::Node& GetTransactionNode() = 0;
virtual std::string DescribeKey(std::string_view key) = 0;
SinglePhaseMutation& GetCommittingPhase();
virtual ReadModifyWriteEntry* AllocateReadModifyWriteEntry();
virtual void FreeReadModifyWriteEntry(ReadModifyWriteEntry* entry);
virtual void Read(ReadModifyWriteEntry& entry,
ReadModifyWriteTarget::TransactionalReadOptions&& options,
ReadModifyWriteTarget::ReadReceiver&& receiver) = 0;
virtual void Writeback(ReadModifyWriteEntry& entry,
ReadModifyWriteEntry& source_entry,
ReadResult&& read_result) = 0;
virtual void WritebackDelete(DeleteRangeEntry& entry) = 0;
virtual bool MultiPhaseReadsCommitted() { return true; }
virtual void PhaseCommitDone(size_t next_phase) = 0;
virtual void AllEntriesDone(SinglePhaseMutation& single_phase_mutation);
virtual void RecordEntryWritebackError(ReadModifyWriteEntry& entry,
absl::Status error);
void AbortRemainingPhases();
void CommitNextPhase();
enum class ReadModifyWriteStatus {
kExisting,
kAddedFirst,
kAddedSubsequent,
};
ReadModifyWriteStatus ReadModifyWrite(size_t& phase, Key key,
ReadModifyWriteSource& source);
void DeleteRange(KeyRange range);
std::string DescribeFirstEntry();
virtual absl::Mutex& mutex() = 0;
protected:
~MultiPhaseMutation() = default;
};
inline MultiPhaseMutation& MutationEntry::multi_phase() const {
return *single_phase_mutation().multi_phase_;
}
inline absl::Mutex& MutationEntry::mutex() const {
return multi_phase().mutex();
}
class AtomicMultiPhaseMutationBase : public MultiPhaseMutation {
public:
static void AtomicWritebackReady(ReadModifyWriteEntry& entry);
struct ReadModifyWriteEntryWithStamp
: public internal_kvstore::ReadModifyWriteEntry {
bool IsOutOfDate(absl::Time staleness_bound) {
return stamp_.time == absl::InfinitePast() ||
stamp_.time < staleness_bound;
}
TimestampedStorageGeneration& stamp() { return stamp_; }
TimestampedStorageGeneration stamp_;
};
void RetryAtomicWriteback(absl::Time staleness_bound);
void WritebackDelete(DeleteRangeEntry& entry) override;
void AtomicCommitWritebackSuccess();
void RevokeAllEntries();
protected:
~AtomicMultiPhaseMutationBase() = default;
};
class AtomicMultiPhaseMutation : public AtomicMultiPhaseMutationBase {
public:
class BufferedReadModifyWriteEntry
: public AtomicMultiPhaseMutationBase::ReadModifyWriteEntryWithStamp {
public:
ReadResult::State value_state_;
absl::Cord value_;
};
ReadModifyWriteEntry* AllocateReadModifyWriteEntry() override;
void FreeReadModifyWriteEntry(ReadModifyWriteEntry* entry) override;
void Writeback(ReadModifyWriteEntry& entry,
ReadModifyWriteEntry& source_entry,
ReadResult&& read_result) override;
protected:
~AtomicMultiPhaseMutation() = default;
};
void ReadDirectly(Driver* driver, ReadModifyWriteEntry& entry,
ReadModifyWriteTarget::TransactionalReadOptions&& options,
ReadModifyWriteTarget::ReadReceiver&& receiver);
void WritebackDirectly(Driver* driver, ReadModifyWriteEntry& entry,
ReadResult&& read_result);
void WritebackDirectly(Driver* driver, DeleteRangeEntry& entry);
template <typename DerivedMultiPhaseMutation = MultiPhaseMutation>
class TransactionNodeBase : public internal::TransactionState::Node,
public DerivedMultiPhaseMutation {
public:
TransactionNodeBase(Driver* driver)
: internal::TransactionState::Node(driver) {
intrusive_ptr_increment(driver);
}
~TransactionNodeBase() { intrusive_ptr_decrement(this->driver()); }
Driver* driver() { return static_cast<Driver*>(this->associated_data()); }
internal::TransactionState::Node& GetTransactionNode() override {
return *this;
}
std::string DescribeKey(std::string_view key) override {
return this->driver()->DescribeKey(key);
}
void Read(ReadModifyWriteEntry& entry,
ReadModifyWriteTarget::TransactionalReadOptions&& options,
ReadModifyWriteTarget::ReadReceiver&& receiver) override {
internal_kvstore::ReadDirectly(driver(), entry, std::move(options),
std::move(receiver));
}
void PhaseCommitDone(size_t next_phase) override {
this->CommitDone(next_phase);
}
void PrepareForCommit() override {
this->PrepareDone();
this->ReadyForCommit();
}
void Commit() override { this->CommitNextPhase(); }
absl::Mutex& mutex() override { return mutex_; }
void Abort() override {
this->AbortRemainingPhases();
this->AbortDone();
}
std::string Describe() override {
absl::MutexLock lock(&mutex_);
return this->DescribeFirstEntry();
}
absl::Mutex mutex_;
};
class NonAtomicTransactionNode
: public TransactionNodeBase<MultiPhaseMutation> {
public:
using TransactionNodeBase<MultiPhaseMutation>::TransactionNodeBase;
void Writeback(ReadModifyWriteEntry& entry,
ReadModifyWriteEntry& source_entry,
ReadResult&& read_result) override {
internal_kvstore::WritebackDirectly(this->driver(), entry,
std::move(read_result));
}
void WritebackDelete(DeleteRangeEntry& entry) override {
internal_kvstore::WritebackDirectly(driver(), entry);
}
};
using AtomicTransactionNode = TransactionNodeBase<AtomicMultiPhaseMutation>;
template <typename TransactionNode, typename... Arg>
Result<internal::OpenTransactionNodePtr<TransactionNode>> GetTransactionNode(
Driver* driver, internal::OpenTransactionPtr& transaction, Arg&&... arg) {
TENSORSTORE_ASSIGN_OR_RETURN(auto node,
internal::GetOrCreateOpenTransaction(transaction)
.GetOrCreateMultiPhaseNode(driver, [&] {
return new TransactionNode(
driver, std::forward<Arg>(arg)...);
}));
return internal::static_pointer_cast<TransactionNode>(std::move(node));
}
template <typename TransactionNode, typename... Arg>
absl::Status AddReadModifyWrite(Driver* driver,
internal::OpenTransactionPtr& transaction,
size_t& phase, Key key,
ReadModifyWriteSource& source, Arg&&... arg) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto node, internal_kvstore::GetTransactionNode<TransactionNode>(
driver, transaction, std::forward<Arg>(arg)...));
absl::MutexLock lock(&node->mutex_);
node->ReadModifyWrite(phase, std::move(key), source);
return absl::OkStatus();
}
template <typename TransactionNode, typename... Arg>
absl::Status AddDeleteRange(Driver* driver,
const internal::OpenTransactionPtr& transaction,
KeyRange&& range, Arg&&... arg) {
auto transaction_copy = transaction;
TENSORSTORE_ASSIGN_OR_RETURN(
auto node, internal_kvstore::GetTransactionNode<TransactionNode>(
driver, transaction_copy, std::forward<Arg>(arg)...));
absl::MutexLock lock(&node->mutex_);
node->DeleteRange(std::move(range));
return absl::OkStatus();
}
Future<ReadResult> ReadViaExistingTransaction(
Driver* driver, internal::OpenTransactionPtr& transaction, size_t& phase,
Key key, kvstore::TransactionalReadOptions options);
Future<TimestampedStorageGeneration> WriteViaExistingTransaction(
Driver* driver, internal::OpenTransactionPtr& transaction, size_t& phase,
Key key, std::optional<Value> value, WriteOptions options);
Future<TimestampedStorageGeneration> WriteViaTransaction(
Driver* driver, Key key, std::optional<Value> value, WriteOptions options);
#ifdef TENSORSTORE_INTERNAL_KVSTORE_TRANSACTION_DEBUG
#define TENSORSTORE_KVSTORE_DEBUG_LOG(...) \
do { \
tensorstore::internal_kvstore::KvstoreDebugLog( \
tensorstore::SourceLocation::current(), __VA_ARGS__); \
} while (false)
template <typename... T>
void KvstoreDebugLog(tensorstore::SourceLocation loc, MutationEntry& entry,
const T&... arg) {
std::string message;
tensorstore::StrAppend(
&message, "[", typeid(entry.multi_phase()).name(),
": multi_phase=", &entry.multi_phase(), ", entry=", &entry,
", phase=", entry.single_phase_mutation().phase_number_,
", key=", tensorstore::QuoteString(entry.key_));
if (entry.entry_type() == kDeleteRange) {
tensorstore::StrAppend(
&message, ", exclusive_max=",
tensorstore::QuoteString(
static_cast<DeleteRangeEntry&>(entry).exclusive_max_));
} else {
size_t seq = 0;
for (auto* e = static_cast<ReadModifyWriteEntry*>(&entry)->prev_; e;
e = e->prev_) {
++seq;
}
tensorstore::StrAppend(&message, ", seq=", seq);
}
tensorstore::StrAppend(&message, "] ", arg...);
ABSL_LOG(INFO).AtLocation(loc.file_name(), loc.line()) << message;
}
#else
#define TENSORSTORE_KVSTORE_DEBUG_LOG(...) while (false)
#endif
}
}
#endif
#include "tensorstore/kvstore/transaction.h"
#include <stddef.h>
#include <stdint.h>
#include <cassert>
#include <iterator>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include "absl/base/optimization.h"
#include "absl/container/btree_map.h"
#include "absl/functional/function_ref.h"
#include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
#include "absl/types/compare.h"
#include "tensorstore/internal/compare.h"
#include "tensorstore/internal/intrus | #include "tensorstore/transaction.h"
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/time/clock.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/kvstore/byte_range.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/mock_kvstore.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/read_result.h"
#include "tensorstore/kvstore/test_matchers.h"
#include "tensorstore/kvstore/test_util.h"
#include "tensorstore/util/status_testutil.h"
namespace {
namespace kvstore = tensorstore::kvstore;
using ::tensorstore::MatchesStatus;
using ::tensorstore::OptionalByteRangeRequest;
using ::tensorstore::StorageGeneration;
using ::tensorstore::TimestampedStorageGeneration;
using ::tensorstore::Transaction;
using ::tensorstore::internal::MatchesKvsReadResult;
using ::tensorstore::internal::MockKeyValueStore;
using ::tensorstore::kvstore::KvStore;
using ::tensorstore::kvstore::ReadResult;
TEST(KvStoreTest, WriteThenRead) {
auto mock_driver = MockKeyValueStore::Make();
Transaction txn(tensorstore::isolated);
KvStore store(mock_driver, "", txn);
TENSORSTORE_ASSERT_OK(kvstore::Write(store, "a", absl::Cord("value")));
EXPECT_THAT(kvstore::Read(store, "a").result(),
::testing::Optional(MatchesKvsReadResult(absl::Cord("value"))));
auto future = txn.CommitAsync();
{
auto req = mock_driver->write_requests.pop();
EXPECT_THAT(req.key, "a");
EXPECT_THAT(req.value, ::testing::Optional(absl::Cord("value")));
EXPECT_THAT(req.options.generation_conditions.if_equal,
StorageGeneration::Unknown());
req.promise.SetResult(TimestampedStorageGeneration(
StorageGeneration::FromString("abc"), absl::Now()));
}
TENSORSTORE_ASSERT_OK(future);
}
TEST(KvStoreTest, ReadWithoutRepeatableReadIsolation) {
auto mock_driver = MockKeyValueStore::Make();
Transaction txn(tensorstore::isolated);
KvStore store(mock_driver, "", txn);
{
auto read_future = kvstore::Read(store, "a");
{
auto req = mock_driver->read_requests.pop();
EXPECT_THAT(req.key, "a");
req.promise.SetResult(ReadResult::Value(
absl::Cord("value"),
TimestampedStorageGeneration(StorageGeneration::FromString("abc"),
absl::Now())));
}
EXPECT_THAT(read_future.result(),
::testing::Optional(MatchesKvsReadResult(absl::Cord("value"))));
}
TENSORSTORE_ASSERT_OK(txn.CommitAsync().result());
}
TEST(KvStoreTest, ReadWithRepeatableReadIsolation) {
auto mock_driver = MockKeyValueStore::Make();
Transaction txn(tensorstore::isolated | tensorstore::repeatable_read);
KvStore store(mock_driver, "", txn);
{
auto read_future = kvstore::Read(store, "a");
{
auto req = mock_driver->read_requests.pop();
EXPECT_THAT(req.key, "a");
req.promise.SetResult(ReadResult::Value(
absl::Cord("value"),
TimestampedStorageGeneration(StorageGeneration::FromString("abc"),
absl::Now())));
}
EXPECT_THAT(read_future.result(),
::testing::Optional(MatchesKvsReadResult(absl::Cord("value"))));
}
auto future = txn.CommitAsync();
{
auto req = mock_driver->read_requests.pop();
EXPECT_THAT(req.key, "a");
EXPECT_THAT(req.options.byte_range, OptionalByteRangeRequest(0, 0));
EXPECT_THAT(req.options.generation_conditions.if_not_equal,
StorageGeneration::FromString("abc"));
req.promise.SetResult(ReadResult::Unspecified(TimestampedStorageGeneration(
StorageGeneration::FromString("abc"), absl::Now())));
}
TENSORSTORE_ASSERT_OK(future);
}
TEST(KvStoreTest, ReadInvalidOptionIfEqual) {
auto mock_driver = MockKeyValueStore::Make();
Transaction txn(tensorstore::isolated);
KvStore store(mock_driver, "", txn);
kvstore::ReadOptions options;
options.generation_conditions.if_equal = StorageGeneration::FromString("abc");
EXPECT_THAT(kvstore::Read(store, "a", std::move(options)).result(),
MatchesStatus(absl::StatusCode::kUnimplemented));
}
TEST(KvStoreTest, ReadInvalidOptionByteRange) {
auto mock_driver = MockKeyValueStore::Make();
Transaction txn(tensorstore::isolated);
KvStore store(mock_driver, "", txn);
kvstore::ReadOptions options;
options.byte_range = OptionalByteRangeRequest{5, 10};
EXPECT_THAT(kvstore::Read(store, "a", std::move(options)).result(),
MatchesStatus(absl::StatusCode::kUnimplemented));
}
TEST(KvStoreTest, ReadMismatch) {
auto mock_driver = MockKeyValueStore::Make();
Transaction txn(tensorstore::isolated | tensorstore::repeatable_read);
KvStore store(mock_driver, "", txn);
{
auto read_future = kvstore::Read(store, "a");
{
auto req = mock_driver->read_requests.pop();
EXPECT_THAT(req.key, "a");
req.promise.SetResult(ReadResult::Value(
absl::Cord("value"),
TimestampedStorageGeneration(StorageGeneration::FromString("abc"),
absl::Now())));
}
EXPECT_THAT(read_future.result(),
::testing::Optional(MatchesKvsReadResult(absl::Cord("value"))));
}
auto future = txn.CommitAsync();
{
auto req = mock_driver->read_requests.pop();
EXPECT_THAT(req.key, "a");
EXPECT_THAT(req.options.byte_range, OptionalByteRangeRequest(0, 0));
EXPECT_THAT(req.options.generation_conditions.if_not_equal,
StorageGeneration::FromString("abc"));
req.promise.SetResult(ReadResult::Missing(TimestampedStorageGeneration(
StorageGeneration::FromString("def"), absl::Now())));
}
{
auto req = mock_driver->read_requests.pop();
EXPECT_THAT(req.key, "a");
req.promise.SetResult(ReadResult::Missing(TimestampedStorageGeneration(
StorageGeneration::FromString("def"), absl::Now())));
}
EXPECT_THAT(future.result(),
MatchesStatus(absl::StatusCode::kAborted,
"Error writing \"a\": Generation mismatch"));
}
TEST(KvStoreTest, ListInvalid) {
auto mock_driver = MockKeyValueStore::Make();
Transaction txn(tensorstore::isolated);
KvStore store(mock_driver, "", txn);
EXPECT_THAT(kvstore::ListFuture(store).result(),
MatchesStatus(absl::StatusCode::kUnimplemented));
}
} |
520 | cpp | google/tensorstore | spec | tensorstore/kvstore/spec.cc | tensorstore/driver/zarr/spec_test.cc | #ifndef TENSORSTORE_KVSTORE_SPEC_H_
#define TENSORSTORE_KVSTORE_SPEC_H_
#include <string>
#include <string_view>
#include "absl/status/status.h"
#include "tensorstore/context.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/path.h"
#include "tensorstore/json_serialization_options.h"
#include "tensorstore/open_mode.h"
#include "tensorstore/serialization/fwd.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/garbage_collection/fwd.h"
#include "tensorstore/util/option.h"
namespace tensorstore {
namespace kvstore {
struct DriverSpecOptions {
bool minimal_spec = false;
template <typename T>
constexpr static bool IsOption = false;
void Set(MinimalSpec value) { minimal_spec = value.minimal_spec(); }
};
template <>
constexpr inline bool DriverSpecOptions::IsOption<MinimalSpec> = true;
struct SpecConvertOptions : public DriverSpecOptions {
ContextBindingMode context_binding_mode = ContextBindingMode::unspecified;
Context context;
template <typename T>
constexpr static bool IsOption = DriverSpecOptions::IsOption<T>;
using DriverSpecOptions::Set;
void Set(Context value) { context = std::move(value); }
void Set(ContextBindingMode value) {
if (value > context_binding_mode) context_binding_mode = value;
}
};
template <>
constexpr inline bool SpecConvertOptions::IsOption<Context> = true;
template <>
constexpr inline bool SpecConvertOptions::IsOption<ContextBindingMode> = true;
class DriverSpec;
void intrusive_ptr_increment(const DriverSpec* p);
void intrusive_ptr_decrement(const DriverSpec* p);
class DriverSpecPtr : public internal::IntrusivePtr<const DriverSpec> {
using Base = internal::IntrusivePtr<const DriverSpec>;
public:
using Base::Base;
absl::Status BindContext(const Context& context);
void UnbindContext() { return UnbindContext({}); }
void UnbindContext(const internal::ContextSpecBuilder& context_builder);
void StripContext();
ContextBindingState context_binding_state() const;
template <typename... Option>
std::enable_if_t<IsCompatibleOptionSequence<SpecConvertOptions, Option...>,
absl::Status>
Set(Option&&... option) {
SpecConvertOptions options;
(options.Set(option), ...);
return Set(std::move(options));
}
absl::Status Set(DriverSpecOptions&& options);
absl::Status Set(SpecConvertOptions&& options);
friend void EncodeCacheKeyAdl(std::string* out, const DriverSpecPtr& ptr);
};
class Driver;
void intrusive_ptr_increment(Driver* p);
void intrusive_ptr_decrement(Driver* p);
using DriverPtr = internal::IntrusivePtr<Driver>;
void EncodeCacheKeyAdl(std::string* out, const DriverPtr& ptr);
}
namespace internal_kvstore {
template <typename Derived, typename DerivedSpec, typename Parent>
class RegisteredDriver;
template <typename Derived, typename SpecDataT, typename Parent>
class RegisteredDriverSpec;
}
namespace kvstore {
class Spec {
public:
Spec() = default;
Spec(DriverSpecPtr driver) : driver(std::move(driver)) {}
explicit Spec(DriverSpecPtr driver, std::string path)
: driver(std::move(driver)), path(std::move(path)) {}
void AppendSuffix(std::string_view suffix) { path += suffix; }
void AppendPathComponent(std::string_view component) {
internal::AppendPathComponent(path, component);
}
bool valid() const { return static_cast<bool>(driver); }
DriverSpecPtr driver;
std::string path;
static constexpr auto ApplyMembers = [](auto& x, auto f) {
return f(x.driver, x.path);
};
absl::Status BindContext(const Context& context);
void UnbindContext() { UnbindContext({}); }
void UnbindContext(const internal::ContextSpecBuilder& context_builder);
void StripContext();
ContextBindingState context_binding_state() const {
return driver.context_binding_state();
}
template <typename... Option>
std::enable_if_t<IsCompatibleOptionSequence<SpecConvertOptions, Option...>,
absl::Status>
Set(Option&&... option) {
SpecConvertOptions options;
(options.Set(option), ...);
return Set(std::move(options));
}
absl::Status Set(SpecConvertOptions&& options);
TENSORSTORE_DECLARE_JSON_DEFAULT_BINDER(Spec, JsonSerializationOptions,
JsonSerializationOptions)
static Result<Spec> FromUrl(std::string_view url);
Result<std::string> ToUrl() const;
Result<Spec> base() const;
friend bool operator==(const Spec& a, const Spec& b);
friend bool operator!=(const Spec& a, const Spec& b) { return !(a == b); }
};
}
namespace internal {
template <typename, typename>
struct ContextBindingTraits;
template <>
struct ContextBindingTraits<kvstore::DriverSpecPtr, void> {
using Spec = kvstore::DriverSpecPtr;
static absl::Status Bind(Spec& spec, const Context& context) {
if (!spec) return absl::OkStatus();
return spec.BindContext(context);
}
static void Unbind(Spec& spec, const ContextSpecBuilder& builder) {
spec.UnbindContext(builder);
}
static void Strip(Spec& spec) { spec.StripContext(); }
};
}
namespace internal_json_binding {
TENSORSTORE_DECLARE_JSON_BINDER(KvStoreSpecAndPathJsonBinder, kvstore::Spec,
JsonSerializationOptions,
JsonSerializationOptions,
::nlohmann::json::object_t)
}
}
TENSORSTORE_DECLARE_SERIALIZER_SPECIALIZATION(tensorstore::kvstore::Spec)
TENSORSTORE_DECLARE_GARBAGE_COLLECTION_SPECIALIZATION(
tensorstore::kvstore::Spec)
#endif
#include "tensorstore/kvstore/spec.h"
#include <string>
#include <string_view>
#include <utility>
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "tensorstore/context.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/kvstore/driver.h"
#include "tensorstore/kvstore/registry.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
using ::tensorstore::internal::IntrusivePtr;
namespace tensorstore {
namespace kvstore {
void intrusive_ptr_increment(const DriverSpec* p) {
intrusive_ptr_increment(
static_cast<const internal::AtomicReferenceCount<DriverSpec>*>(p));
}
void intrusive_ptr_decrement(const DriverSpec* p) {
intrusive_ptr_decrement(
static_cast<const internal::AtomicReferenceCount<DriverSpec>*>(p));
}
DriverSpec::~DriverSpec() = default;
absl::Status DriverSpec::NormalizeSpec(std::string& path) {
return absl::OkStatus();
}
Result<std::string> DriverSpec::ToUrl(std::string_view path) const {
return absl::UnimplementedError("URL representation not supported");
}
absl::Status DriverSpec::ApplyOptions(DriverSpecOptions&& options) {
return absl::OkStatus();
}
Result<Spec> DriverSpec::GetBase(std::string_view path) const {
return {std::in_place};
}
Result<Spec> Spec::base() const { return driver->GetBase(path); }
ContextBindingState DriverSpecPtr::context_binding_state() const {
return get()->context_binding_state_;
}
void EncodeCacheKeyAdl(std::string* out, const DriverSpecPtr& ptr) {
return ptr->EncodeCacheKey(out);
}
TENSORSTORE_DEFINE_JSON_DEFAULT_BINDER(Spec, [](auto is_loading,
const auto& options, auto* obj,
auto* j) {
if constexpr (is_loading) {
if (auto* s = j->template get_ptr<const std::string*>()) {
TENSORSTORE_ASSIGN_OR_RETURN(*obj, Spec::FromUrl(*s));
return absl::OkStatus();
}
} else {
if (!obj->valid()) {
*j = ::nlohmann::json::value_t::discarded;
return absl::OkStatus();
}
}
namespace jb = tensorstore::internal_json_binding;
auto& registry = internal_kvstore::GetDriverRegistry();
return jb::NestedContextJsonBinder(jb::Object(
jb::Member("driver", jb::Projection<&Spec::driver>(registry.KeyBinder())),
jb::Initialize([](Spec* p) {
const_cast<DriverSpec&>(*p->driver).context_binding_state_ =
ContextBindingState::unbound;
}),
jb::Member("context", jb::Projection(
[](const Spec& p) -> Context::Spec& {
return const_cast<Context::Spec&>(
p.driver->context_spec_);
},
internal::ContextSpecDefaultableJsonBinder)),
jb::Member("path", jb::Projection(
[](auto& p) -> decltype(auto) { return (p.path); },
jb::DefaultInitializedValue())),
[&](auto is_loading, const auto& options, auto* obj, auto* j) {
if constexpr (is_loading) {
TENSORSTORE_RETURN_IF_ERROR(registry.RegisteredObjectBinder()(
is_loading, {options, obj->path}, &obj->driver, j));
return const_cast<DriverSpec&>(*obj->driver).NormalizeSpec(obj->path);
} else {
return registry.RegisteredObjectBinder()(is_loading, options,
&obj->driver, j);
}
}))(is_loading, options, obj, j);
})
absl::Status DriverSpecPtr::Set(DriverSpecOptions&& options) {
if (options.minimal_spec) {
if ((*this)->use_count() != 1) *this = (*this)->Clone();
TENSORSTORE_RETURN_IF_ERROR(
const_cast<DriverSpec*>(get())->ApplyOptions(std::move(options)));
}
return absl::OkStatus();
}
absl::Status DriverSpecPtr::Set(SpecConvertOptions&& options) {
internal::ApplyContextBindingMode(
*this, options.context_binding_mode,
ContextBindingMode::retain);
if (options.context) {
TENSORSTORE_RETURN_IF_ERROR(BindContext(options.context));
}
return Set(static_cast<DriverSpecOptions&&>(options));
}
absl::Status DriverSpecPtr::BindContext(const Context& context) {
return internal::BindContextCopyOnWriteWithNestedContext(*this, context);
}
absl::Status Spec::Set(SpecConvertOptions&& options) {
return driver.Set(std::move(options));
}
void DriverSpecPtr::UnbindContext(
const internal::ContextSpecBuilder& context_builder) {
internal::UnbindContextCopyOnWriteWithNestedContext(*this, context_builder);
}
void DriverSpecPtr::StripContext() {
internal::StripContextCopyOnWriteWithNestedContext(*this);
}
absl::Status Spec::BindContext(const Context& context) {
return driver.BindContext(context);
}
void Spec::UnbindContext(const internal::ContextSpecBuilder& context_builder) {
driver.UnbindContext(context_builder);
}
void Spec::StripContext() { driver.StripContext(); }
Result<std::string> Spec::ToUrl() const {
if (!driver) {
return absl::InvalidArgumentError("Invalid kvstore spec");
}
return driver->ToUrl(path);
}
bool operator==(const Spec& a, const Spec& b) {
if (!a.valid() || !b.valid()) {
return a.valid() == b.valid();
}
return internal::ContextBindableSpecsSameViaJson(a, b);
}
}
namespace serialization {
namespace {
using DriverSpecPtrNonNullDirectSerializer =
RegistrySerializer<internal::IntrusivePtr<const kvstore::DriverSpec>>;
using DriverSpecPtrSerializer =
IndirectPointerSerializer<internal::IntrusivePtr<const kvstore::DriverSpec>,
DriverSpecPtrNonNullDirectSerializer>;
using DriverSpecPtrNonNullSerializer = NonNullIndirectPointerSerializer<
internal::IntrusivePtr<const kvstore::DriverSpec>,
DriverSpecPtrNonNullDirectSerializer>;
}
}
namespace internal_json_binding {
TENSORSTORE_DEFINE_JSON_BINDER(
KvStoreSpecAndPathJsonBinder,
Sequence(Member("kvstore", DefaultInitializedPredicate([](auto* obj) {
return !obj->valid();
})),
LoadSave(OptionalMember(
"path",
Compose<std::string>([](auto is_loading, const auto& options,
auto* obj, std::string* j) {
if (!obj->valid()) {
return absl::InvalidArgumentError(
"\"path\" must be specified in conjunction with "
"\"kvstore\"");
}
obj->AppendPathComponent(*j);
return absl::OkStatus();
})))))
}
}
TENSORSTORE_DEFINE_SERIALIZER_SPECIALIZATION(
tensorstore::kvstore::DriverSpecPtr,
tensorstore::serialization::DriverSpecPtrSerializer())
TENSORSTORE_DEFINE_SERIALIZER_SPECIALIZATION(
tensorstore::kvstore::Spec,
tensorstore::serialization::ApplyMembersSerializer<
tensorstore::kvstore::Spec>())
TENSORSTORE_DEFINE_GARBAGE_COLLECTION_SPECIALIZATION(
tensorstore::kvstore::DriverSpec,
tensorstore::garbage_collection::PolymorphicGarbageCollection<
tensorstore::kvstore::DriverSpec>)
TENSORSTORE_DEFINE_GARBAGE_COLLECTION_SPECIALIZATION(
tensorstore::kvstore::Spec,
tensorstore::garbage_collection::ApplyMembersGarbageCollection<
tensorstore::kvstore::Spec>)
TENSORSTORE_DEFINE_GARBAGE_COLLECTION_SPECIALIZATION(
tensorstore::kvstore::DriverSpecPtr,
tensorstore::garbage_collection::IndirectPointerGarbageCollection<
tensorstore::kvstore::DriverSpecPtr>) | #include "tensorstore/driver/zarr/spec.h"
#include <stdint.h>
#include <optional>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <nlohmann/json.hpp>
#include "tensorstore/codec_spec.h"
#include "tensorstore/driver/zarr/metadata.h"
#include "tensorstore/index_space/index_domain_builder.h"
#include "tensorstore/internal/json_binding/gtest.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::ChunkLayout;
using ::tensorstore::CodecSpec;
using ::tensorstore::dtype_v;
using ::tensorstore::MatchesJson;
using ::tensorstore::MatchesStatus;
using ::tensorstore::Schema;
using ::tensorstore::internal_zarr::GetFieldIndex;
using ::tensorstore::internal_zarr::ParseDType;
using ::tensorstore::internal_zarr::ParseSelectedField;
using ::tensorstore::internal_zarr::SelectedField;
using ::tensorstore::internal_zarr::ZarrMetadata;
using ::tensorstore::internal_zarr::ZarrPartialMetadata;
TEST(ParsePartialMetadataTest, InvalidZarrFormat) {
tensorstore::TestJsonBinderFromJson<ZarrPartialMetadata>({
{{{"zarr_format", "2"}},
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing object member \"zarr_format\": .*")},
});
}
TEST(ParsePartialMetadataTest, InvalidChunks) {
tensorstore::TestJsonBinderFromJson<ZarrPartialMetadata>({
{{{"chunks", "2"}},
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing object member \"chunks\": .*")},
});
}
TEST(ParsePartialMetadataTest, InvalidShape) {
tensorstore::TestJsonBinderFromJson<ZarrPartialMetadata>({
{{{"shape", "2"}},
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing object member \"shape\": .*")},
});
}
TEST(ParsePartialMetadataTest, InvalidCompressor) {
tensorstore::TestJsonBinderFromJson<ZarrPartialMetadata>({
{{{"compressor", "2"}},
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing object member \"compressor\": .*")},
});
}
TEST(ParsePartialMetadataTest, InvalidOrder) {
tensorstore::TestJsonBinderFromJson<ZarrPartialMetadata>({
{{{"order", "2"}},
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing object member \"order\": .*")},
});
}
TEST(ParsePartialMetadataTest, InvalidDType) {
tensorstore::TestJsonBinderFromJson<ZarrPartialMetadata>({
{{{"dtype", "2"}},
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing object member \"dtype\": .*")},
});
}
TEST(ParsePartialMetadataTest, InvalidFilters) {
tensorstore::TestJsonBinderFromJson<ZarrPartialMetadata>({
{{{"filters", "x"}},
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing object member \"filters\": .*")},
});
}
TEST(ParsePartialMetadataTest, Empty) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto result, ZarrPartialMetadata::FromJson(::nlohmann::json::object_t{}));
EXPECT_EQ(std::nullopt, result.zarr_format);
EXPECT_EQ(std::nullopt, result.order);
EXPECT_EQ(std::nullopt, result.compressor);
EXPECT_EQ(std::nullopt, result.filters);
EXPECT_EQ(std::nullopt, result.dtype);
EXPECT_EQ(std::nullopt, result.fill_value);
EXPECT_EQ(std::nullopt, result.shape);
EXPECT_EQ(std::nullopt, result.chunks);
}
::nlohmann::json GetMetadataSpec() {
return {{"zarr_format", 2},
{"chunks", {3, 2}},
{"shape", {100, 100}},
{"order", "C"},
{"filters", nullptr},
{"fill_value", nullptr},
{"dtype", "<i2"},
{"compressor",
{{"id", "blosc"},
{"blocksize", 0},
{"clevel", 5},
{"cname", "lz4"},
{"shuffle", -1}}}};
}
TEST(ParsePartialMetadataTest, Complete) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto result, ZarrPartialMetadata::FromJson(GetMetadataSpec()));
EXPECT_EQ(2, result.zarr_format);
EXPECT_EQ(tensorstore::c_order, result.order);
ASSERT_TRUE(result.compressor);
EXPECT_EQ((::nlohmann::json{{"id", "blosc"},
{"blocksize", 0},
{"clevel", 5},
{"cname", "lz4"},
{"shuffle", -1}}),
::nlohmann::json(*result.compressor));
ASSERT_TRUE(result.dtype);
EXPECT_EQ("<i2", ::nlohmann::json(*result.dtype));
ASSERT_TRUE(result.fill_value);
ASSERT_EQ(1, result.fill_value->size());
EXPECT_FALSE((*result.fill_value)[0].valid());
ASSERT_TRUE(result.shape);
EXPECT_THAT(*result.shape, ::testing::ElementsAre(100, 100));
ASSERT_TRUE(result.chunks);
EXPECT_THAT(*result.chunks, ::testing::ElementsAre(3, 2));
}
TEST(ParseSelectedFieldTest, Null) {
EXPECT_EQ(SelectedField(), ParseSelectedField(nullptr));
}
TEST(ParseSelectedFieldTest, InvalidString) {
EXPECT_THAT(
ParseSelectedField(""),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected null or non-empty string, but received: \"\""));
}
TEST(ParseSelectedFieldTest, String) {
EXPECT_EQ(SelectedField("label"), ParseSelectedField("label"));
}
TEST(ParseSelectedFieldTest, InvalidType) {
EXPECT_THAT(
ParseSelectedField(true),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected null or non-empty string, but received: true"));
}
TEST(GetFieldIndexTest, Null) {
EXPECT_EQ(0u, GetFieldIndex(ParseDType("<i4").value(), SelectedField()));
EXPECT_THAT(
GetFieldIndex(
ParseDType(::nlohmann::json::array_t{{"x", "<i4"}, {"y", "<u2"}})
.value(),
SelectedField()),
MatchesStatus(
absl::StatusCode::kFailedPrecondition,
"Must specify a \"field\" that is one of: \\[\"x\",\"y\"\\]"));
}
TEST(GetFieldIndexTest, String) {
EXPECT_THAT(
GetFieldIndex(ParseDType("<i4").value(), "x"),
MatchesStatus(
absl::StatusCode::kFailedPrecondition,
"Requested field \"x\" but dtype does not have named fields"));
EXPECT_EQ(0u, GetFieldIndex(ParseDType(::nlohmann::json::array_t{
{"x", "<i4"}, {"y", "<u2"}})
.value(),
"x"));
EXPECT_EQ(1u, GetFieldIndex(ParseDType(::nlohmann::json::array_t{
{"x", "<i4"}, {"y", "<u2"}})
.value(),
"y"));
EXPECT_THAT(
GetFieldIndex(
ParseDType(::nlohmann::json::array_t{{"x", "<i4"}, {"y", "<u2"}})
.value(),
"z"),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"Requested field \"z\" is not one of: \\[\"x\",\"y\"\\]"));
}
TEST(EncodeSelectedFieldTest, NonEmpty) {
auto dtype =
ParseDType(::nlohmann::json::array_t{{"x", "<i4"}, {"y", "<u2"}}).value();
EXPECT_EQ("x", EncodeSelectedField(0, dtype));
EXPECT_EQ("y", EncodeSelectedField(1, dtype));
}
TEST(EncodeSelectedFieldTest, Empty) {
auto dtype = ParseDType("<i4").value();
EXPECT_EQ("", EncodeSelectedField(0, dtype));
}
template <typename... Option>
tensorstore::Result<::nlohmann::json> GetNewMetadataFromOptions(
::nlohmann::json partial_metadata_json, std::string selected_field,
Option&&... option) {
Schema schema;
if (absl::Status status;
!((status = schema.Set(std::forward<Option>(option))).ok() && ...)) {
return status;
}
TENSORSTORE_ASSIGN_OR_RETURN(
auto partial_metadata,
ZarrPartialMetadata::FromJson(partial_metadata_json));
TENSORSTORE_ASSIGN_OR_RETURN(
auto new_metadata,
GetNewMetadata(partial_metadata, selected_field, schema));
return new_metadata->ToJson();
}
TEST(GetNewMetadataTest, FullMetadata) {
EXPECT_THAT(GetNewMetadataFromOptions({{"chunks", {8, 10}},
{"dtype", "<i4"},
{"compressor", nullptr},
{"shape", {5, 6}}},
{}),
::testing::Optional(MatchesJson({
{"chunks", {8, 10}},
{"compressor", nullptr},
{"dtype", "<i4"},
{"fill_value", nullptr},
{"filters", nullptr},
{"order", "C"},
{"shape", {5, 6}},
{"zarr_format", 2},
{"dimension_separator", "."},
})));
}
TEST(GetNewMetadataTest, NoShape) {
EXPECT_THAT(
GetNewMetadataFromOptions(
{{"chunks", {2, 3}}, {"dtype", "<i4"}, {"compressor", nullptr}},
{}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"domain must be specified"));
}
TEST(GetNewMetadataTest, AutomaticChunks) {
EXPECT_THAT(
GetNewMetadataFromOptions(
{{"shape", {2, 3}}, {"dtype", "<i4"}, {"compressor", nullptr}},
{}),
::testing::Optional(MatchesJson({
{"chunks", {2, 3}},
{"compressor", nullptr},
{"dtype", "<i4"},
{"fill_value", nullptr},
{"filters", nullptr},
{"order", "C"},
{"shape", {2, 3}},
{"zarr_format", 2},
{"dimension_separator", "."},
})));
}
TEST(GetNewMetadataTest, NoDtype) {
EXPECT_THAT(
GetNewMetadataFromOptions(
{{"shape", {2, 3}}, {"chunks", {2, 3}}, {"compressor", nullptr}},
{}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"\"dtype\" must be specified"));
}
TEST(GetNewMetadataTest, NoCompressor) {
EXPECT_THAT(GetNewMetadataFromOptions(
{{"shape", {2, 3}}, {"chunks", {2, 3}}, {"dtype", "<i4"}},
{}),
::testing::Optional(MatchesJson({
{"fill_value", nullptr},
{"filters", nullptr},
{"zarr_format", 2},
{"order", "C"},
{"shape", {2, 3}},
{"chunks", {2, 3}},
{"dtype", "<i4"},
{"compressor",
{
{"id", "blosc"},
{"cname", "lz4"},
{"clevel", 5},
{"blocksize", 0},
{"shuffle", -1},
}},
{"dimension_separator", "."},
})));
}
TEST(GetNewMetadataTest, IntegerOverflow) {
EXPECT_THAT(
GetNewMetadataFromOptions(
{{"shape", {4611686018427387903, 4611686018427387903}},
{"chunks", {4611686018427387903, 4611686018427387903}},
{"dtype", "<i4"},
{"compressor", nullptr}},
{}),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Product of chunk dimensions "
"\\{4611686018427387903, 4611686018427387903\\} is too large"));
}
TEST(GetNewMetadataTest, SchemaDomainDtype) {
EXPECT_THAT(GetNewMetadataFromOptions(::nlohmann::json::object_t(),
{},
tensorstore::IndexDomainBuilder(3)
.shape({1000, 2000, 3000})
.Finalize()
.value(),
dtype_v<int32_t>),
::testing::Optional(MatchesJson({
{"fill_value", nullptr},
{"filters", nullptr},
{"zarr_format", 2},
{"order", "C"},
{"shape", {1000, 2000, 3000}},
{"chunks", {101, 101, 101}},
{"dtype", "<i4"},
{"compressor",
{
{"id", "blosc"},
{"cname", "lz4"},
{"clevel", 5},
{"blocksize", 0},
{"shuffle", -1},
}},
{"dimension_separator", "."},
})));
}
TEST(GetNewMetadataTest, SchemaDomainDtypeFillValue) {
EXPECT_THAT(GetNewMetadataFromOptions(
::nlohmann::json::object_t(),
{},
tensorstore::IndexDomainBuilder(3)
.shape({1000, 2000, 3000})
.Finalize()
.value(),
dtype_v<int32_t>,
Schema::FillValue{tensorstore::MakeScalarArray<int32_t>(5)}),
::testing::Optional(MatchesJson({
{"fill_value", 5},
{"filters", nullptr},
{"zarr_format", 2},
{"order", "C"},
{"shape", {1000, 2000, 3000}},
{"chunks", {101, 101, 101}},
{"dtype", "<i4"},
{"compressor",
{
{"id", "blosc"},
{"cname", "lz4"},
{"clevel", 5},
{"blocksize", 0},
{"shuffle", -1},
}},
{"dimension_separator", "."},
})));
}
TEST(GetNewMetadataTest, SchemaObjectWithDomainDtypeFillValue) {
Schema schema;
TENSORSTORE_ASSERT_OK(schema.Set(tensorstore::IndexDomainBuilder(3)
.shape({1000, 2000, 3000})
.Finalize()
.value()));
TENSORSTORE_ASSERT_OK(schema.Set(dtype_v<int32_t>));
TENSORSTORE_ASSERT_OK(
schema.Set(Schema::FillValue{tensorstore::MakeScalarArray<int32_t>(5)}));
EXPECT_THAT(GetNewMetadataFromOptions(::nlohmann::json::object_t(),
{}, schema),
::testing::Optional(MatchesJson({
{"fill_value", 5},
{"filters", nullptr},
{"zarr_format", 2},
{"order", "C"},
{"shape", {1000, 2000, 3000}},
{"chunks", {101, 101, 101}},
{"dtype", "<i4"},
{"compressor",
{
{"id", "blosc"},
{"cname", "lz4"},
{"clevel", 5},
{"blocksize", 0},
{"shuffle", -1},
}},
{"dimension_separator", "."},
})));
}
TEST(GetNewMetadataTest, SchemaDtypeShapeCodec) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto codec,
CodecSpec::FromJson({{"driver", "zarr"}, {"compressor", nullptr}}));
EXPECT_THAT(GetNewMetadataFromOptions(::nlohmann::json::object_t(),
{},
Schema::Shape({100, 200}),
dtype_v<int32_t>, codec),
::testing::Optional(MatchesJson({
{"fill_value", nullptr},
{"filters", nullptr},
{"zarr_format", 2},
{"order", "C"},
{"shape", {100, 200}},
{"chunks", {100, 200}},
{"dtype", "<i4"},
{"compressor", nullptr},
{"dimension_separator", "."},
})));
}
TEST(GetNewMetadataTest, SchemaDtypeInnerOrderC) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto codec,
CodecSpec::FromJson({{"driver", "zarr"}, {"compressor", nullptr}}));
EXPECT_THAT(GetNewMetadataFromOptions(
::nlohmann::json::object_t(),
{}, Schema::Shape({100, 200}),
ChunkLayout::InnerOrder({0, 1}), dtype_v<int32_t>, codec),
::testing::Optional(MatchesJson({
{"fill_value", nullptr},
{"filters", nullptr},
{"zarr_format", 2},
{"order", "C"},
{"shape", {100, 200}},
{"chunks", {100, 200}},
{"dtype", "<i4"},
{"compressor", nullptr},
{"dimension_separator", "."},
})));
}
TEST(GetNewMetadataTest, SchemaDtypeInnerOrderFortran) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto codec,
CodecSpec::FromJson({{"driver", "zarr"}, {"compressor", nullptr}}));
EXPECT_THAT(GetNewMetadataFromOptions(
::nlohmann::json::object_t(),
{}, Schema::Shape({100, 200}),
ChunkLayout::InnerOrder({1, 0}), dtype_v<int32_t>, codec),
::testing::Optional(MatchesJson({
{"fill_value", nullptr},
{"filters", nullptr},
{"zarr_format", 2},
{"order", "F"},
{"shape", {100, 200}},
{"chunks", {100, 200}},
{"dtype", "<i4"},
{"compressor", nullptr},
{"dimension_separator", "."},
})));
}
TEST(GetNewMetadataTest, SchemaDtypeInnerOrderFortranFieldShape) {
EXPECT_THAT(GetNewMetadataFromOptions(
{
{"compressor", nullptr},
{"dtype", {{"x", "<u4", {2, 3}}}},
},
"x", Schema::Shape({100, 200, 2, 3}),
ChunkLayout::InnerOrder({1, 0, 2, 3})),
::testing::Optional(MatchesJson({
{"fill_value", nullptr},
{"filters", nullptr},
{"zarr_format", 2},
{"order", "F"},
{"shape", {100, 200}},
{"chunks", {100, 200}},
{"dtype", {{"x", "<u4", {2, 3}}}},
{"compressor", nullptr},
{"dimension_separator", "."},
})));
}
TEST(GetNewMetadataTest, SchemaDtypeInnerOrderInvalid) {
EXPECT_THAT(
GetNewMetadataFromOptions(
::nlohmann::json::object_t(),
{}, Schema::Shape({100, 200, 300}),
ChunkLayout::InnerOrder({2, 0, 1}), dtype_v<int32_t>),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Invalid \"inner_order\" constraint: \\{2, 0, 1\\}"));
}
TEST(GetNewMetadataTest, SchemaDtypeInnerOrderInvalidSoft) {
EXPECT_THAT(GetNewMetadataFromOptions(
{{"compressor", nullptr}},
{}, Schema::Shape({100, 200, 300}),
ChunkLayout::InnerOrder({2, 0, 1}, false),
dtype_v<int32_t>),
::testing::Optional(MatchesJson({
{"fill_value", nullptr},
{"filters", nullptr},
{"zarr_format", 2},
{"order", "C"},
{"shape", {100, 200, 300}},
{"chunks", {100, 102, 102}},
{"dtype", "<i4"},
{"compressor", nullptr},
{"dimension_separator", "."},
})));
}
TEST(GetNewMetadataTest, SchemaStructuredDtypeInvalidFillValue) {
EXPECT_THAT(
GetNewMetadataFromOptions(
{{"dtype", ::nlohmann::json::array_t{{"x", "<u4"}, {"y", "<i4"}}}},
"x", Schema::Shape({100, 200}),
Schema::FillValue(tensorstore::MakeScalarArray<uint32_t>(42))),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Invalid fill_value: Cannot specify fill_value through schema for "
"structured zarr data type \\[.*"));
}
TEST(GetNewMetadataTest, SchemaFillValueMismatch) {
EXPECT_THAT(
GetNewMetadataFromOptions(
{{"dtype", "<u4"}, {"fill_value", 42}},
{}, Schema::Shape({100, 200}),
Schema::FillValue(tensorstore::MakeScalarArray<uint32_t>(43))),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Invalid fill_value: .*"));
}
TEST(GetNewMetadataTest, SchemaFillValueMismatchNull) {
EXPECT_THAT(
GetNewMetadataFromOptions(
{{"dtype", "<u4"}, {"fill_value", nullptr}},
{}, Schema::Shape({100, 200}),
Schema::FillValue(tensorstore::MakeScalarArray<uint32_t>(42))),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Invalid fill_value: .*"));
}
TEST(GetNewMetadataTest, SchemaFillValueRedundant) {
EXPECT_THAT(
GetNewMetadataFromOptions(
{
{"dtype", "<u4"},
{"fill_value", 42},
{"compressor", nullptr},
},
{}, Schema::Shape({100, 200}),
Schema::FillValue(tensorstore::MakeScalarArray<uint32_t>(42))),
::testing::Optional(MatchesJson({
{"fill_value", 42},
{"filters", nullptr},
{"zarr_format", 2},
{"order", "C"},
{"shape", {100, 200}},
{"chunks", {100, 200}},
{"dtype", "<u4"},
{"compressor", nullptr},
{"dimension_separator", "."},
})));
}
TEST(GetNewMetadataTest, SchemaCodecChunkShape) {
EXPECT_THAT(GetNewMetadataFromOptions(
::nlohmann::json::object_t{},
{}, Schema::Shape({100, 200}),
dtype_v<uint32_t>, ChunkLayout::CodecChunkShape({5, 6})),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"codec_chunk_shape not supported"));
}
TEST(GetNewMetadataTest, CodecMismatch) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto codec,
CodecSpec::FromJson({{"driver", "zarr"}, {"compressor", nullptr}}));
EXPECT_THAT(
GetNewMetadataFromOptions({{"compressor", {{"id", "blosc"}}}},
{},
Schema::Shape({100, 200}), dtype_v<int32_t>,
codec),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Cannot merge codec spec .* with .*: \"compressor\" does not match"));
}
TEST(GetNewMetadataTest, SelectedFieldDtypeNotSpecified) {
EXPECT_THAT(
GetNewMetadataFromOptions(::nlohmann::json::object_t(),
"x",
Schema::Shape({100, 200}), dtype_v<int32_t>),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"\"dtype\" must be specified in \"metadata\" if "
"\"field\" is specified"));
}
TEST(GetNewMetadataTest, SelectedFieldInvalid) {
EXPECT_THAT(
GetNewMetadataFromOptions({{"dtype", {{"x", "<u4", {2}}, {"y", "<i4"}}}},
"z",
Schema::Shape({100, 200})),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"Requested field \"z\" is not one of: \\[\"x\",\"y\"\\]"));
}
TEST(GetNewMetadataTest, InvalidDtype) {
EXPECT_THAT(GetNewMetadataFromOptions(::nlohmann::json::object_t(),
{},
dtype_v<tensorstore::dtypes::json_t>,
Schema::Shape({100, 200})),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Data type not supported: json"));
}
TEST(GetNewMetadataTest, InvalidDomain) {
EXPECT_THAT(
GetNewMetadataFromOptions(::nlohmann::json::object_t(),
{}, dtype_v<int32_t>,
tensorstore::IndexDomainBuilder(2)
.origin({1, 2})
.shape({100, 200})
.Finalize()
.value()),
MatchesStatus(absl::StatusCode::kInvalidArgument, "Invalid domain: .*"));
}
TEST(GetNewMetadataTest, DomainIncompatibleWithFieldShape) {
EXPECT_THAT(
GetNewMetadataFromOptions({{"dtype", {{"x", "<u4", {2, 3}}}}},
"x",
Schema::Shape({100, 200, 2, 4})),
MatchesStatus(absl::StatusCode::kInvalidArgument, "Invalid domain: .*"));
}
TEST(GetNewMetadataTest, DomainIncompatibleWithMetadataRank) {
EXPECT_THAT(
GetNewMetadataFromOptions({{"chunks", {100, 100}}},
{}, dtype_v<int32_t>,
Schema::Shape({100, 200, 300})),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Rank specified by schema \\(3\\) is not compatible with metadata"));
}
TEST(ValidateMetadataTest, Success) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto metadata,
ZarrMetadata::FromJson(GetMetadataSpec()));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto partial_metadata, ZarrPartialMetadata::FromJson(GetMetadataSpec()));
TENSORSTORE_EXPECT_OK(ValidateMetadata(metadata, partial_metadata));
}
TEST(ValidateMetadataTest, Unconstrained) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto metadata,
ZarrMetadata::FromJson(GetMetadataSpec()));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto partial_metadata,
ZarrPartialMetadata::FromJson(::nlohmann::json::object_t{}));
TENSORSTORE_EXPECT_OK(ValidateMetadata(metadata, partial_metadata));
}
TEST(ValidateMetadataTest, ShapeMismatch) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto metadata,
ZarrMetadata::FromJson(GetMetadataSpec()));
::nlohmann::json spec = GetMetadataSpec();
spec["shape"] = {7, 8};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto partial_metadata,
ZarrPartialMetadata::FromJson(spec));
EXPECT_THAT(
ValidateMetadata(metadata, partial_metadata),
MatchesStatus(
absl::StatusCode::kFailedPrecondition,
"Expected \"shape\" of \\[7,8\\] but received: \\[100,100\\]"));
}
TEST(ValidateMetadataTest, ChunksMismatch) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto metadata,
ZarrMetadata::FromJson(GetMetadataSpec()));
::nlohmann::json spec = GetMetadataSpec();
spec["chunks"] = {1, 1};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto partial_metadata,
ZarrPartialMetadata::FromJson(spec));
EXPECT_THAT(ValidateMetadata(metadata, partial_metadata),
MatchesStatus(
absl::StatusCode::kFailedPrecondition,
"Expected \"chunks\" of \\[1,1\\] but received: \\[3,2\\]"));
}
TEST(ValidateMetadataTest, OrderMismatch) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto metadata,
ZarrMetadata::FromJson(GetMetadataSpec()));
::nlohmann::json spec = GetMetadataSpec();
spec["order"] = "F";
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto partial_metadata,
ZarrPartialMetadata::FromJson(spec));
EXPECT_THAT(ValidateMetadata(metadata, partial_metadata),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"Expected \"order\" of \"F\" but received: \"C\""));
}
TEST(ValidateMetadataTest, CompressorMismatch) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto metadata,
ZarrMetadata::FromJson(GetMetadataSpec()));
::nlohmann::json spec = GetMetadataSpec();
spec["compressor"] = nullptr;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto partial_metadata,
ZarrPartialMetadata::FromJson(spec));
EXPECT_THAT(ValidateMetadata(metadata, partial_metadata),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"Expected \"compressor\" of null but received: "
"\\{\"blocksize\":0,\"clevel\":5,\"cname\":\"lz4\","
"\"id\":\"blosc\",\"shuffle\":-1\\}"));
}
TEST(ValidateMetadataTest, DTypeMismatch) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto metadata,
ZarrMetadata::FromJson(GetMetadataSpec()));
::nlohmann::json spec = GetMetadataSpec();
spec["dtype"] = ">i4";
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto partial_metadata,
ZarrPartialMetadata::FromJson(spec));
EXPECT_THAT(
ValidateMetadata(metadata, partial_metadata),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"Expected \"dtype\" of \">i4\" but received: \"<i2\""));
}
TEST(ValidateMetadataTest, FillValueMismatch) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto metadata,
ZarrMetadata::FromJson(GetMetadataSpec()));
::nlohmann::json spec = GetMetadataSpec();
spec["fill_value"] = 1;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto partial_metadata,
ZarrPartialMetadata::FromJson(spec));
EXPECT_THAT(ValidateMetadata(metadata, partial_metadata),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"Expected \"fill_value\" of 1 but received: null"));
}
TEST(ZarrCodecSpecTest, Merge) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto codec1,
CodecSpec::FromJson({{"driver", "zarr"}}));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto codec2,
CodecSpec::FromJson({{"driver", "zarr"}, {"filters", nullptr}}));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto codec3,
CodecSpec::FromJson({{"driver", "zarr"}, {"compressor", nullptr}}));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto codec4, CodecSpec::FromJson({{"driver", "zarr"},
{"compressor", {{"id", "blosc"}}}}));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto codec5,
CodecSpec::FromJson(
{{"driver", "zarr"}, {"compressor", nullptr}, {"filters", nullptr}}));
EXPECT_THAT(CodecSpec::Merge(codec1, codec1), ::testing::Optional(codec1));
EXPECT_THAT(CodecSpec::Merge(codec3, codec3), ::testing::Optional(codec3));
EXPECT_THAT(CodecSpec::Merge(codec1, CodecSpec()),
::testing::Optional(codec1));
EXPECT_THAT(CodecSpec::Merge(CodecSpec(), codec1),
::testing::Optional(codec1));
EXPECT_THAT(CodecSpec::Merge(CodecSpec(), CodecSpec()),
::testing::Optional(CodecSpec()));
EXPECT_THAT(CodecSpec::Merge(codec1, codec2), ::testing::Optional(codec2));
EXPECT_THAT(CodecSpec::Merge(codec1, codec3), ::testing::Optional(codec3));
EXPECT_THAT(CodecSpec::Merge(codec2, codec3), ::testing::Optional(codec5));
EXPECT_THAT(
CodecSpec::Merge(codec3, codec4),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Cannot merge codec spec .* with .*: \"compressor\" does not match"));
}
TEST(ZarrCodecSpecTest, RoundTrip) {
tensorstore::TestJsonBinderRoundTripJsonOnly<tensorstore::CodecSpec>({
::nlohmann::json::value_t::discarded,
{
{"driver", "zarr"},
{"compressor", nullptr},
{"filters", nullptr},
},
{
{"driver", "zarr"},
{"compressor",
{{"id", "blosc"},
{"cname", "lz4"},
{"clevel", 5},
{"blocksize", 0},
{"shuffle", -1}}},
{"filters", nullptr},
},
});
}
} |
521 | cpp | google/tensorstore | downsample | tensorstore/driver/downsample/downsample.cc | tensorstore/driver/downsample/downsample_test.cc | #ifndef TENSORSTORE_DRIVER_DOWNSAMPLE_DOWNSAMPLE_H_
#define TENSORSTORE_DRIVER_DOWNSAMPLE_DOWNSAMPLE_H_
#include "tensorstore/downsample_method.h"
#include "tensorstore/driver/driver.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal {
Result<Driver::Handle> MakeDownsampleDriver(
Driver::Handle base, span<const Index> downsample_factors,
DownsampleMethod downsample_method);
}
}
#endif
#include "tensorstore/driver/downsample/downsample.h"
#include <stddef.h>
#include <algorithm>
#include <cassert>
#include <mutex>
#include <utility>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/container/inlined_vector.h"
#include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include "tensorstore/array.h"
#include "tensorstore/array_storage_statistics.h"
#include "tensorstore/box.h"
#include "tensorstore/chunk_layout.h"
#include "tensorstore/codec_spec.h"
#include "tensorstore/context.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/downsample_method.h"
#include "tensorstore/driver/chunk.h"
#include "tensorstore/driver/downsample/downsample_array.h"
#include "tensorstore/driver/downsample/downsample_method_json_binder.h"
#include "tensorstore/driver/downsample/downsample_nditerable.h"
#include "tensorstore/driver/downsample/downsample_util.h"
#include "tensorstore/driver/downsample/grid_occupancy_map.h"
#include "tensorstore/driver/driver.h"
#include "tensorstore/driver/driver_handle.h"
#include "tensorstore/driver/driver_spec.h"
#include "tensorstore/driver/read.h"
#include "tensorstore/driver/registry.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/dimension_units.h"
#include "tensorstore/index_space/index_domain.h"
#include "tensorstore/index_space/index_domain_builder.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/transformed_array.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/json_binding/std_array.h"
#include "tensorstore/internal/lock_collection.h"
#include "tensorstore/internal/nditerable_transformed_array.h"
#include "tensorstore/json_serialization_options.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/open_mode.h"
#include "tensorstore/open_options.h"
#include "tensorstore/rank.h"
#include "tensorstore/resize_options.h"
#include "tensorstore/schema.h"
#include "tensorstore/serialization/std_vector.h"
#include "tensorstore/spec.h"
#include "tensorstore/transaction.h"
#include "tensorstore/util/execution/any_receiver.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/execution/sender_util.h"
#include "tensorstore/util/executor.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/garbage_collection/std_vector.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_downsample {
namespace {
using ::tensorstore::internal::DriverPtr;
using ::tensorstore::internal::IntrusivePtr;
using ::tensorstore::internal::LockCollection;
using ::tensorstore::internal::NDIterable;
using ::tensorstore::internal::OpenTransactionPtr;
using ::tensorstore::internal::ReadChunk;
using ::tensorstore::internal::TransformedDriverSpec;
namespace jb = tensorstore::internal_json_binding;
Result<IndexDomain<>> GetBaseDomainConstraintFromDownsampledDomain(
IndexDomain<> downsampled_domain, span<const Index> downsample_factors) {
assert(downsampled_domain.valid());
const DimensionIndex rank = downsampled_domain.rank();
assert(rank == downsample_factors.size());
IndexDomainBuilder builder(rank);
builder.labels(downsampled_domain.labels());
auto& implicit_lower_bounds = builder.implicit_lower_bounds();
auto& implicit_upper_bounds = builder.implicit_upper_bounds();
auto origin = builder.origin();
auto shape = builder.shape();
for (DimensionIndex i = 0; i < rank; ++i) {
if (downsample_factors[i] != 1) {
implicit_lower_bounds[i] = true;
implicit_upper_bounds[i] = true;
origin[i] = -kInfIndex;
shape[i] = kInfSize;
} else {
implicit_lower_bounds[i] = downsampled_domain.implicit_lower_bounds()[i];
implicit_upper_bounds[i] = downsampled_domain.implicit_upper_bounds()[i];
origin[i] = downsampled_domain.origin()[i];
shape[i] = downsampled_domain.shape()[i];
}
}
return builder.Finalize();
}
Result<IndexTransform<>> GetBaseTransformForDownsampledTransform(
IndexTransformView<> base_transform,
IndexTransformView<> downsampled_transform,
span<const Index> downsample_factors, DownsampleMethod downsample_method) {
if (downsample_method == DownsampleMethod::kStride) {
return base_transform | tensorstore::AllDims().Stride(downsample_factors) |
downsampled_transform;
}
PropagatedIndexTransformDownsampling propagated;
TENSORSTORE_RETURN_IF_ERROR(
internal_downsample::PropagateAndComposeIndexTransformDownsampling(
downsampled_transform, base_transform, downsample_factors,
propagated));
return std::move(propagated.transform);
}
class DownsampleDriverSpec
: public internal::RegisteredDriverSpec<DownsampleDriverSpec,
internal::DriverSpec> {
public:
constexpr static char id[] = "downsample";
TransformedDriverSpec base;
std::vector<Index> downsample_factors;
DownsampleMethod downsample_method;
constexpr static auto ApplyMembers = [](auto& x, auto f) {
return f(internal::BaseCast<internal::DriverSpec>(x), x.base,
x.downsample_factors, x.downsample_method);
};
absl::Status InitializeFromBase() {
TENSORSTORE_RETURN_IF_ERROR(
this->schema.Set(RankConstraint{internal::GetRank(this->base)}));
TENSORSTORE_RETURN_IF_ERROR(
this->schema.Set(this->base.driver_spec->schema.dtype()));
return absl::OkStatus();
}
absl::Status ValidateDownsampleFactors() {
TENSORSTORE_RETURN_IF_ERROR(
this->schema.Set(RankConstraint(this->downsample_factors.size())));
return absl::OkStatus();
}
absl::Status ValidateDownsampleMethod() {
auto dtype = this->schema.dtype();
if (!dtype.valid()) return absl::OkStatus();
return internal_downsample::ValidateDownsampleMethod(
dtype, this->downsample_method);
}
OpenMode open_mode() const override { return base.driver_spec->open_mode(); }
absl::Status ApplyOptions(SpecOptions&& options) override {
TENSORSTORE_RETURN_IF_ERROR(schema.Set(options.dtype()));
TENSORSTORE_RETURN_IF_ERROR(schema.Set(options.rank()));
auto transform = base.transform;
if (!transform.valid()) {
transform = tensorstore::IdentityTransform(downsample_factors.size());
}
if (options.domain().valid()) {
TENSORSTORE_RETURN_IF_ERROR(schema.Set(options.domain()));
TENSORSTORE_ASSIGN_OR_RETURN(auto base_domain,
GetBaseDomainConstraintFromDownsampledDomain(
options.domain(), downsample_factors));
TENSORSTORE_RETURN_IF_ERROR(options.Override(std::move(base_domain)));
}
TENSORSTORE_ASSIGN_OR_RETURN(
transform, transform | AllDims().Stride(downsample_factors));
TENSORSTORE_RETURN_IF_ERROR(options.TransformInputSpaceSchema(transform));
return internal::TransformAndApplyOptions(base, std::move(options));
}
constexpr static auto default_json_binder = jb::Object(
jb::Member("base",
[](auto is_loading, const auto& options, auto* obj, auto* j) {
return jb::Projection<&DownsampleDriverSpec::base>()(
is_loading,
JsonSerializationOptions(options, obj->schema.dtype(),
obj->schema.rank()),
obj, j);
}),
jb::Initialize([](auto* obj) { return obj->InitializeFromBase(); }),
jb::Member("downsample_factors",
jb::Validate(
[](const auto& options, auto* obj) {
return obj->ValidateDownsampleFactors();
},
jb::Projection<&DownsampleDriverSpec::downsample_factors>(
jb::Array(jb::Integer<Index>(1))))),
jb::Member(
"downsample_method",
jb::Validate(
[](const auto& options, auto* obj) {
return obj->ValidateDownsampleMethod();
},
jb::Projection<&DownsampleDriverSpec::downsample_method>())),
jb::Initialize([](auto* obj) {
SpecOptions base_options;
static_cast<Schema&>(base_options) = std::exchange(obj->schema, {});
return obj->ApplyOptions(std::move(base_options));
}));
Result<IndexDomain<>> GetDomain() const override {
TENSORSTORE_ASSIGN_OR_RETURN(auto domain,
internal::GetEffectiveDomain(base));
if (!domain.valid()) {
return schema.domain();
}
if (domain.rank() != downsample_factors.size()) {
return absl::InternalError(tensorstore::StrCat(
"Domain of base TensorStore has rank (", domain.rank(),
") but expected ", downsample_factors.size()));
}
auto downsampled_domain = internal_downsample::DownsampleDomain(
domain, downsample_factors, downsample_method);
return MergeIndexDomains(std::move(downsampled_domain), schema.domain());
}
Result<ChunkLayout> GetChunkLayout() const override {
return internal::GetEffectiveChunkLayout(base) |
AllDims().Stride(downsample_factors);
}
Result<CodecSpec> GetCodec() const override {
return internal::GetEffectiveCodec(base);
}
Result<SharedArray<const void>> GetFillValue(
IndexTransformView<> transform) const override {
return {std::in_place};
}
Result<DimensionUnitsVector> GetDimensionUnits() const override {
TENSORSTORE_ASSIGN_OR_RETURN(auto dimension_units,
internal::GetEffectiveDimensionUnits(base));
if (!dimension_units.empty()) {
span<const Index> downsample_factors = this->downsample_factors;
TENSORSTORE_ASSIGN_OR_RETURN(
auto transform,
tensorstore::IdentityTransform(downsample_factors.size()) |
tensorstore::AllDims().Stride(downsample_factors));
dimension_units =
TransformOutputDimensionUnits(transform, std::move(dimension_units));
}
return dimension_units;
}
kvstore::Spec GetKvstore() const override {
return base.driver_spec->GetKvstore();
}
Result<TransformedDriverSpec> GetBase(
IndexTransformView<> transform) const override {
TransformedDriverSpec new_base;
new_base.driver_spec = base.driver_spec;
if (transform.valid()) {
TENSORSTORE_ASSIGN_OR_RETURN(
new_base.transform,
GetBaseTransformForDownsampledTransform(
base.transform.valid()
? base.transform
: tensorstore::IdentityTransform(downsample_factors.size()),
transform, downsample_factors, downsample_method));
}
return new_base;
}
Future<internal::Driver::Handle> Open(
internal::DriverOpenRequest request) const override {
if (!!(request.read_write_mode & ReadWriteMode::write)) {
return absl::InvalidArgumentError("only reading is supported");
}
request.read_write_mode = ReadWriteMode::read;
return MapFutureValue(
InlineExecutor{},
[spec = internal::DriverSpec::PtrT<const DownsampleDriverSpec>(this)](
internal::Driver::Handle handle)
-> Result<internal::Driver::Handle> {
TENSORSTORE_ASSIGN_OR_RETURN(
auto downsampled_handle,
MakeDownsampleDriver(std::move(handle), spec->downsample_factors,
spec->downsample_method));
if (auto domain = spec->schema.domain(); domain.valid()) {
TENSORSTORE_RETURN_IF_ERROR(
MergeIndexDomains(domain,
downsampled_handle.transform.domain()),
tensorstore::MaybeAnnotateStatus(
_, "downsampled domain does not match domain in schema"));
}
return downsampled_handle;
},
internal::OpenDriver(base, std::move(request)));
}
};
class DownsampleDriver
: public internal::RegisteredDriver<DownsampleDriver,
internal::Driver> {
public:
Result<TransformedDriverSpec> GetBoundSpec(
internal::OpenTransactionPtr transaction,
IndexTransformView<> transform) override {
auto driver_spec = internal::DriverSpec::Make<DownsampleDriverSpec>();
driver_spec->context_binding_state_ = ContextBindingState::bound;
TENSORSTORE_ASSIGN_OR_RETURN(
driver_spec->base,
base_driver_->GetBoundSpec(std::move(transaction), base_transform_));
driver_spec->downsample_factors = downsample_factors_;
driver_spec->downsample_method = downsample_method_;
TENSORSTORE_RETURN_IF_ERROR(driver_spec->InitializeFromBase());
TransformedDriverSpec spec;
spec.transform = transform;
spec.driver_spec = std::move(driver_spec);
return spec;
}
Result<ChunkLayout> GetChunkLayout(IndexTransformView<> transform) override {
TENSORSTORE_ASSIGN_OR_RETURN(auto strided_base_transform,
GetStridedBaseTransform());
return base_driver_->GetChunkLayout(strided_base_transform) | transform;
}
Result<CodecSpec> GetCodec() override { return base_driver_->GetCodec(); }
Result<SharedArray<const void>> GetFillValue(
IndexTransformView<> transform) override {
if (downsample_method_ == DownsampleMethod::kStride) {
TENSORSTORE_ASSIGN_OR_RETURN(auto strided_transform,
GetStridedBaseTransform() | transform);
return base_driver_->GetFillValue(strided_transform);
}
PropagatedIndexTransformDownsampling propagated;
TENSORSTORE_RETURN_IF_ERROR(
internal_downsample::PropagateAndComposeIndexTransformDownsampling(
transform, base_transform_, downsample_factors_, propagated));
TENSORSTORE_ASSIGN_OR_RETURN(
auto fill_value, base_driver_->GetFillValue(propagated.transform));
if (!fill_value.valid()) return {std::in_place};
TENSORSTORE_ASSIGN_OR_RETURN(
auto broadcast_fill_value,
BroadcastArray(std::move(fill_value),
propagated.transform.domain().box()));
TENSORSTORE_ASSIGN_OR_RETURN(
auto downsampled_fill_value,
internal_downsample::DownsampleArray(
broadcast_fill_value, propagated.input_downsample_factors,
downsample_method_));
return UnbroadcastArray(downsampled_fill_value);
}
Result<DimensionUnitsVector> GetDimensionUnits() override {
TENSORSTORE_ASSIGN_OR_RETURN(auto dimension_units,
base_driver_->GetDimensionUnits());
TENSORSTORE_ASSIGN_OR_RETURN(auto strided_base_transform,
GetStridedBaseTransform());
return TransformOutputDimensionUnits(strided_base_transform,
std::move(dimension_units));
}
KvStore GetKvstore(const Transaction& transaction) override {
return base_driver_->GetKvstore(transaction);
}
Result<internal::DriverHandle> GetBase(
ReadWriteMode read_write_mode, IndexTransformView<> transform,
const Transaction& transaction) override {
internal::DriverHandle base_handle;
base_handle.driver = base_driver_;
base_handle.driver.set_read_write_mode(read_write_mode);
base_handle.transaction = transaction;
TENSORSTORE_ASSIGN_OR_RETURN(base_handle.transform,
GetBaseTransformForDownsampledTransform(
base_transform_, transform,
downsample_factors_, downsample_method_));
return base_handle;
}
Future<ArrayStorageStatistics> GetStorageStatistics(
GetStorageStatisticsRequest request) override;
explicit DownsampleDriver(DriverPtr base, IndexTransform<> base_transform,
span<const Index> downsample_factors,
DownsampleMethod downsample_method)
: base_driver_(std::move(base)),
base_transform_(std::move(base_transform)),
downsample_factors_(downsample_factors.begin(),
downsample_factors.end()),
downsample_method_(downsample_method) {}
DataType dtype() override { return base_driver_->dtype(); }
DimensionIndex rank() override { return base_transform_.input_rank(); }
Executor data_copy_executor() override {
return base_driver_->data_copy_executor();
}
void Read(ReadRequest request,
AnyFlowReceiver<absl::Status, ReadChunk, IndexTransform<>> receiver)
override;
Result<IndexTransform<>> GetStridedBaseTransform() {
return base_transform_ | tensorstore::AllDims().Stride(downsample_factors_);
}
Future<IndexTransform<>> ResolveBounds(ResolveBoundsRequest request) override;
constexpr static auto ApplyMembers = [](auto&& x, auto f) {
return f(x.base_driver_, x.base_transform_, x.downsample_factors_,
x.downsample_method_);
};
DriverPtr base_driver_;
IndexTransform<> base_transform_;
std::vector<Index> downsample_factors_;
DownsampleMethod downsample_method_;
};
Future<IndexTransform<>> DownsampleDriver::ResolveBounds(
ResolveBoundsRequest request) {
return MapFutureValue(
InlineExecutor{},
[self = IntrusivePtr<DownsampleDriver>(this),
transform = std::move(request.transform)](
IndexTransform<> base_transform) -> Result<IndexTransform<>> {
Box<dynamic_rank(internal::kNumInlinedDims)> downsampled_bounds(
base_transform.input_rank());
internal_downsample::DownsampleBounds(
base_transform.domain().box(), downsampled_bounds,
self->downsample_factors_, self->downsample_method_);
return tensorstore::PropagateBoundsToTransform(
downsampled_bounds, base_transform.implicit_lower_bounds(),
base_transform.implicit_upper_bounds(), std::move(transform));
},
base_driver_->ResolveBounds({std::move(request.transaction),
base_transform_,
std::move(request.options)}));
}
struct ReadState : public internal::AtomicReferenceCount<ReadState> {
IntrusivePtr<DownsampleDriver> self_;
AnyFlowReceiver<absl::Status, ReadChunk, IndexTransform<>> receiver_;
absl::Mutex mutex_;
SharedOffsetArray<void> data_buffer_;
Index remaining_elements_;
internal_downsample::GridOccupancyTracker independently_emitted_chunks_;
absl::InlinedVector<Index, internal::kNumInlinedDims> downsample_factors_;
DimensionIndex original_input_rank_;
IndexDomain<> base_transform_domain_;
AnyCancelReceiver on_cancel_;
absl::Status error_;
bool done_signal_received_ = false;
bool done_sent_ = false;
bool canceled_ = false;
size_t chunks_in_progress_ = 0;
void Cancel() {
std::lock_guard<ReadState> guard(*this);
canceled_ = true;
}
void lock() ABSL_NO_THREAD_SAFETY_ANALYSIS { mutex_.Lock(); }
void unlock() ABSL_NO_THREAD_SAFETY_ANALYSIS {
bool has_error = !error_.ok();
bool send_done = !done_sent_ && chunks_in_progress_ == 0 &&
(done_signal_received_ || has_error);
if (send_done) done_sent_ = true;
AnyCancelReceiver on_cancel;
if (canceled_ && on_cancel_) {
on_cancel = std::move(on_cancel_);
}
mutex_.Unlock();
if (on_cancel) on_cancel();
if (!send_done) return;
if (has_error) {
execution::set_error(receiver_, error_);
} else {
execution::set_done(receiver_);
}
execution::set_stopping(receiver_);
}
void SetError(absl::Status error, size_t decrement_chunks_in_progress = 0) {
std::lock_guard<ReadState> guard(*this);
chunks_in_progress_ -= decrement_chunks_in_progress;
if (!error_.ok()) return;
error_ = std::move(error);
canceled_ = true;
}
void EmitBufferedChunkForBox(BoxView<> base_domain);
void EmitBufferedChunks();
};
struct BufferedReadChunkImpl {
internal::IntrusivePtr<ReadState> state_;
absl::Status operator()(LockCollection& lock_collection) const {
return absl::OkStatus();
}
Result<NDIterable::Ptr> operator()(internal::ReadChunk::BeginRead,
IndexTransform<> chunk_transform,
internal::Arena* arena) const {
TENSORSTORE_ASSIGN_OR_RETURN(
auto propagated,
internal_downsample::PropagateIndexTransformDownsampling(
chunk_transform, state_->data_buffer_.domain(),
state_->downsample_factors_));
TENSORSTORE_ASSIGN_OR_RETURN(
auto transformed_array,
MakeTransformedArray(state_->data_buffer_,
std::move(propagated.transform)));
TENSORSTORE_ASSIGN_OR_RETURN(
auto base_nditerable,
GetTransformedArrayNDIterable(transformed_array, arena));
return internal_downsample::DownsampleNDIterable(
std::move(base_nditerable), transformed_array.domain().box(),
propagated.input_downsample_factors, state_->self_->downsample_method_,
chunk_transform.input_rank(), arena);
}
};
IndexTransform<> GetDownsampledRequestIdentityTransform(
BoxView<> base_domain, span<const Index> downsample_factors,
DownsampleMethod downsample_method, DimensionIndex request_rank) {
assert(base_domain.rank() == downsample_factors.size());
assert(request_rank <= base_domain.rank());
IndexTransformBuilder builder(base_domain.rank(), request_rank);
internal_downsample::DownsampleBounds(base_domain, builder.input_bounds(),
downsample_factors, downsample_method);
builder.output_identity_transform();
return builder.Finalize().value();
}
void ReadState::EmitBufferedChunkForBox(BoxView<> base_domain) {
auto request_transform = GetDownsampledRequestIdentityTransform(
base_domain, downsample_factors_, self_->downsample_method_,
original_input_rank_);
ReadChunk downsampled_chunk;
downsampled_chunk.transform =
IdentityTransform(request_transform.domain().box());
downsampled_chunk.impl = BufferedReadChunkImpl{IntrusivePtr<ReadState>(this)};
execution::set_value(receiver_, std::move(downsampled_chunk),
std::move(request_transform));
}
void ReadState::EmitBufferedChunks() {
if (independently_emitted_chunks_.occupied_chunks.empty()) {
EmitBufferedChunkForBox(base_transform_domain_.box());
} else { | #include "tensorstore/downsample.h"
#include <stdint.h>
#include <memory>
#include <string>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "tensorstore/array.h"
#include "tensorstore/chunk_layout.h"
#include "tensorstore/context.h"
#include "tensorstore/data_type.h"
#include "tensorstore/downsample_method.h"
#include "tensorstore/driver/array/array.h"
#include "tensorstore/driver/driver_testutil.h"
#include "tensorstore/driver/read.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/transformed_array.h"
#include "tensorstore/internal/global_initializer.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/open.h"
#include "tensorstore/open_mode.h"
#include "tensorstore/schema.h"
#include "tensorstore/spec.h"
#include "tensorstore/static_cast.h"
#include "tensorstore/tensorstore.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/execution/sender_util.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
#include "tensorstore/util/unit.h"
namespace {
using ::tensorstore::BoxView;
using ::tensorstore::ChunkLayout;
using ::tensorstore::Context;
using ::tensorstore::DimensionIndex;
using ::tensorstore::DownsampleMethod;
using ::tensorstore::Index;
using ::tensorstore::MakeArray;
using ::tensorstore::MakeOffsetArray;
using ::tensorstore::MatchesJson;
using ::tensorstore::MatchesStatus;
using ::tensorstore::ReadWriteMode;
using ::tensorstore::Spec;
using ::tensorstore::TensorStore;
using ::tensorstore::internal::CollectReadChunks;
using ::tensorstore::internal::MakeArrayBackedReadChunk;
using ::tensorstore::internal::MockDriver;
using ::tensorstore::internal::ReadAsIndividualChunks;
using ::tensorstore::internal::TestSpecSchema;
using ::tensorstore::internal::TestTensorStoreCreateCheckSchema;
using ::testing::Optional;
using ::testing::Pair;
TEST(DownsampleTest, Rank1Mean) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, tensorstore::FromArray(MakeArray<float>({1, 2, 5, 7})));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto downsampled_store,
tensorstore::Downsample(store, {2}, DownsampleMethod::kMean));
EXPECT_THAT(tensorstore::Read(downsampled_store).result(),
Optional(MakeArray<float>({1.5, 6})));
}
TEST(DownsampleTest, Rank1Median) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, tensorstore::FromArray(MakeArray<float>({1, 2, 5, 7})));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto downsampled_store,
tensorstore::Downsample(store, {2}, DownsampleMethod::kMin));
EXPECT_THAT(tensorstore::Read(downsampled_store).result(),
Optional(MakeArray<float>({1, 5})));
}
TEST(DownsampleTest, Rank1Empty) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store,
tensorstore::FromArray(tensorstore::AllocateArray<float>({2, 0, 3})));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto downsampled_store,
tensorstore::Downsample(store, {2, 3, 2}, DownsampleMethod::kMean));
EXPECT_THAT(tensorstore::Read(downsampled_store).result(),
Optional(tensorstore::AllocateArray<float>({1, 0, 2})));
}
TEST(DownsampleTest, Rank1MeanTranslated) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store,
tensorstore::FromArray(MakeOffsetArray<float>({1}, {1, 2, 5, 7})));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto downsampled_store,
tensorstore::Downsample(store, {2}, DownsampleMethod::kMean));
EXPECT_THAT(tensorstore::Read(downsampled_store).result(),
Optional(MakeArray<float>({1, 3.5, 7})));
}
TEST(DownsampleTest, Rank1Stride) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, tensorstore::FromArray(MakeArray<float>({1, 2, 5, 7})));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto downsampled_store,
tensorstore::Downsample(store, {2}, DownsampleMethod::kStride));
EXPECT_THAT(tensorstore::Read(downsampled_store).result(),
Optional(MakeArray<float>({1, 5})));
}
TEST(DownsampleTest, Rank1MeanChunked) {
::nlohmann::json base_spec{{"driver", "n5"},
{"kvstore", {{"driver", "memory"}}},
{"metadata",
{{"dataType", "uint8"},
{"dimensions", {11}},
{"blockSize", {3}},
{"compression", {{"type", "raw"}}}}}};
auto context = Context::Default();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto base_store,
tensorstore::Open(base_spec, context, tensorstore::OpenMode::create)
.result());
TENSORSTORE_ASSERT_OK(tensorstore::Write(
MakeArray<uint8_t>({0, 2, 3, 9, 1, 5, 7, 3, 4, 0, 5}), base_store));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto downsampled_store, tensorstore::Open({{"driver", "downsample"},
{"base", base_spec},
{"downsample_factors", {2}},
{"downsample_method", "mean"}},
context)
.result());
EXPECT_THAT(tensorstore::Read(downsampled_store).result(),
Optional(MakeArray<uint8_t>({1, 6, 3, 5, 2, 5})));
}
TEST(DownsampleTest, Rank1MeanChunkedTranslated) {
::nlohmann::json base_spec{{"driver", "n5"},
{"kvstore", {{"driver", "memory"}}},
{"metadata",
{{"dataType", "uint8"},
{"dimensions", {11}},
{"blockSize", {3}},
{"compression", {{"type", "raw"}}}}},
{"transform",
{
{"input_inclusive_min", {1}},
{"input_exclusive_max", {12}},
{"output",
{
{{"input_dimension", 0}, {"offset", -1}},
}},
}}};
auto context = Context::Default();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto base_store,
tensorstore::Open(base_spec, context, tensorstore::OpenMode::create)
.result());
TENSORSTORE_ASSERT_OK(tensorstore::Write(
MakeArray<uint8_t>({0, 2, 3, 9, 1, 5, 7, 3, 4, 0, 5}), base_store));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto downsampled_store, tensorstore::Open({{"driver", "downsample"},
{"base", base_spec},
{"downsample_factors", {2}},
{"downsample_method", "mean"}},
context)
.result());
EXPECT_THAT(ReadAsIndividualChunks(downsampled_store).result(),
Optional(::testing::UnorderedElementsAre(
Pair(MakeOffsetArray<uint8_t>({0}, {0, 2}),
IdentityTransform(BoxView({0}, {2}))),
Pair(MakeOffsetArray<uint8_t>({5}, {2}),
IdentityTransform(BoxView({5}, {1}))),
Pair(MakeOffsetArray<uint8_t>({2}, {5, 6, 4}),
IdentityTransform(BoxView({2}, {3}))))));
EXPECT_THAT(tensorstore::Read(downsampled_store).result(),
Optional(MakeArray<uint8_t>({0, 2, 5, 6, 4, 2})));
}
TEST(DownsampleTest, Rank1MeanChunkedIndexArray) {
::nlohmann::json base_spec{{"driver", "n5"},
{"kvstore", {{"driver", "memory"}}},
{"metadata",
{{"dataType", "uint8"},
{"dimensions", {11}},
{"blockSize", {3}},
{"compression", {{"type", "raw"}}}}}};
auto context = Context::Default();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto base_store,
tensorstore::Open(base_spec, context, tensorstore::OpenMode::create)
.result());
TENSORSTORE_ASSERT_OK(tensorstore::Write(
MakeArray<uint8_t>({0, 2, 3, 9, 1, 5, 7, 3, 4, 0, 5}), base_store));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto downsampled_store, tensorstore::Open({{"driver", "downsample"},
{"base", base_spec},
{"downsample_factors", {2}},
{"downsample_method", "mean"}},
context)
.result());
EXPECT_THAT(tensorstore::Read(downsampled_store |
tensorstore::Dims(0).IndexArraySlice(
MakeArray<Index>({0, 3, 2})))
.result(),
Optional(MakeArray<uint8_t>({1, 5, 3})));
}
TEST(DownsampleTest, JsonSpecArray) {
::nlohmann::json base_spec{
{"driver", "array"},
{"dtype", "float32"},
{"array", {1, 2, 3, 4}},
};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, tensorstore::Open({{"driver", "downsample"},
{"base", base_spec},
{"downsample_factors", {2}},
{"downsample_method", "mean"}})
.result());
EXPECT_THAT(tensorstore::Read(store).result(),
Optional(MakeArray<float>({1.5, 3.5})));
}
TEST(DownsampleTest, JsonSpecArrayRank0) {
::nlohmann::json base_spec{
{"driver", "array"},
{"dtype", "float32"},
{"array", 42},
};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store,
tensorstore::Open({{"driver", "downsample"},
{"base", base_spec},
{"downsample_factors", ::nlohmann::json::array_t{}},
{"downsample_method", "mean"}})
.result());
EXPECT_THAT(tensorstore::Read(store).result(),
Optional(tensorstore::MakeScalarArray<float>(42)));
}
TEST(DownsampleTest, JsonSpecErrorMissingBase) {
EXPECT_THAT(
tensorstore::Open({
{"driver", "downsample"},
{"downsample_factors", {2}},
{"downsample_method", "mean"},
})
.result(),
MatchesStatus(absl::StatusCode::kInvalidArgument, ".*\"base\".*"));
}
TEST(DownsampleTest, JsonSpecErrorMissingDownsampleFactors) {
::nlohmann::json base_spec{
{"driver", "array"},
{"dtype", "float32"},
{"array", {1, 2, 3, 4}},
};
EXPECT_THAT(tensorstore::Open({
{"driver", "downsample"},
{"base", base_spec},
{"downsample_method", "mean"},
})
.result(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*\"downsample_factors\".*"));
}
TEST(DownsampleTest, JsonSpecErrorDownsampleFactorsInvalidRank) {
::nlohmann::json base_spec{
{"driver", "array"},
{"dtype", "float32"},
{"array", {1, 2, 3, 4}},
};
EXPECT_THAT(tensorstore::Open({
{"driver", "downsample"},
{"base", base_spec},
{"downsample_method", "mean"},
{"downsample_factors", {2, 3}},
})
.result(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*\"downsample_factors\": .*rank.*"));
}
TEST(DownsampleTest, JsonSpecErrorDownsampleFactorsZero) {
::nlohmann::json base_spec{
{"driver", "array"},
{"dtype", "float32"},
{"array", {1, 2, 3, 4}},
};
EXPECT_THAT(
tensorstore::Open({
{"driver", "downsample"},
{"base", base_spec},
{"downsample_method", "mean"},
{"downsample_factors", {0}},
})
.result(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*\"downsample_factors\":.*Expected .*, but received: 0"));
}
TEST(DownsampleTest, JsonSpecErrorDownsampleFactorsNegative) {
::nlohmann::json base_spec{
{"driver", "array"},
{"dtype", "float32"},
{"array", {1, 2, 3, 4}},
};
EXPECT_THAT(tensorstore::Open({
{"driver", "downsample"},
{"base", base_spec},
{"downsample_method", "mean"},
{"downsample_factors", {-2}},
})
.result(),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
".*\"downsample_factors\":.*Expected .*, but received: -2"));
}
TEST(DownsampleTest, JsonSpecErrorMissingDownsampleMethod) {
::nlohmann::json base_spec{
{"driver", "array"},
{"dtype", "float32"},
{"array", {1, 2, 3, 4}},
};
EXPECT_THAT(tensorstore::Open({
{"driver", "downsample"},
{"base", base_spec},
{"downsample_factors", {2}},
})
.result(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*\"downsample_method\".*"));
}
TEST(DownsampleTest, JsonSpecErrorInvalidDownsampleMethod) {
::nlohmann::json base_spec{
{"driver", "array"},
{"dtype", "float32"},
{"array", {1, 2, 3, 4}},
};
EXPECT_THAT(tensorstore::Open({
{"driver", "downsample"},
{"base", base_spec},
{"downsample_factors", {2}},
{"downsample_method", 42},
})
.result(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*\"downsample_method\".*42.*"));
}
TEST(DownsampleTest, ErrorOpenWriteOnly) {
::nlohmann::json base_spec{
{"driver", "array"},
{"dtype", "float32"},
{"array", {1, 2, 3, 4}},
};
for (auto mode : {ReadWriteMode::write, ReadWriteMode::read_write}) {
SCOPED_TRACE(tensorstore::StrCat("mode=", mode));
EXPECT_THAT(tensorstore::Open(
{
{"driver", "downsample"},
{"base", base_spec},
{"downsample_factors", {2}},
{"downsample_method", "mean"},
},
mode)
.result(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*: only reading is supported"));
}
}
TEST(DownsampleTest, AdapterErrorNegativeDownsampleFactor) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, tensorstore::FromArray(MakeArray<float>({1, 2, 5, 7})));
EXPECT_THAT(
tensorstore::Downsample(store, {-2}, DownsampleMethod::kMean),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Downsample factors \\{-2\\} are not all positive"));
}
TEST(DownsampleTest, AdapterErrorZeroDownsampleFactor) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, tensorstore::FromArray(MakeArray<float>({1, 2, 5, 7})));
EXPECT_THAT(tensorstore::Downsample(store, {0}, DownsampleMethod::kMean),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Downsample factors \\{0\\} are not all positive"));
}
TEST(DownsampleTest, AdapterErrorDownsampleFactorsRankMismatch) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
TensorStore<float> store,
tensorstore::FromArray(MakeArray<float>({1, 2, 5, 7})));
EXPECT_THAT(
tensorstore::Downsample(store, {2, 2}, DownsampleMethod::kMean),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Number of downsample factors \\(2\\) does not match "
"TensorStore rank \\(1\\)"));
}
TEST(DownsampleTest, AdapterErrorDataType) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store,
tensorstore::FromArray(MakeArray<std::string>({"a", "b", "c"})));
TENSORSTORE_EXPECT_OK(
tensorstore::Downsample(store, {2}, DownsampleMethod::kStride));
EXPECT_THAT(tensorstore::Downsample(store, {2}, DownsampleMethod::kMean),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Downsample method \"mean\" does not support "
"data type \"string\""));
}
TEST(DownsampleTest, AdapterErrorWriteOnly) {
tensorstore::TensorStore<float, 1> store;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
store, tensorstore::FromArray(MakeArray<float>({1, 2, 3})));
store = tensorstore::ModeCast<ReadWriteMode::write, tensorstore::unchecked>(
std::move(store));
EXPECT_THAT(tensorstore::Downsample(store, {2}, DownsampleMethod::kMean),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Cannot downsample write-only TensorStore"));
}
TEST(DownsampleTest, ReadError) {
auto mock_driver = MockDriver::Make(tensorstore::ReadWriteMode::dynamic,
tensorstore::dtype_v<float>, 1);
auto mock_store = mock_driver->Wrap(tensorstore::IdentityTransform<1>({10}));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto downsampled_store,
tensorstore::Downsample(mock_store, {2}, DownsampleMethod::kMean));
auto read_future = tensorstore::Read(downsampled_store);
{
auto read_req = mock_driver->read_requests.pop();
EXPECT_EQ(tensorstore::IdentityTransform<1>({10}), read_req.transform);
tensorstore::execution::set_error(
tensorstore::FlowSingleReceiver{std::move(read_req.receiver)},
absl::UnknownError("read error"));
}
EXPECT_THAT(read_future.result(),
MatchesStatus(absl::StatusCode::kUnknown, "read error"));
}
TEST(DownsampleTest, CancelRead) {
auto mock_driver = MockDriver::Make(tensorstore::ReadWriteMode::dynamic,
tensorstore::dtype_v<float>, 1);
auto mock_store = mock_driver->Wrap(tensorstore::IdentityTransform<1>({10}));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto downsampled_store,
tensorstore::Downsample(mock_store, {2}, DownsampleMethod::kMean));
auto read_future = tensorstore::Read(downsampled_store);
auto canceled = std::make_shared<bool>(false);
{
auto read_req = mock_driver->read_requests.pop();
tensorstore::execution::set_starting(read_req.receiver,
[canceled] { *canceled = true; });
read_future = {};
EXPECT_EQ(true, *canceled);
tensorstore::execution::set_done(read_req.receiver);
tensorstore::execution::set_stopping(read_req.receiver);
}
}
TEST(DownsampleTest, IndependentChunkCompletesBufferedChunk) {
auto mock_driver = MockDriver::Make(tensorstore::ReadWriteMode::dynamic,
tensorstore::dtype_v<float>, 1);
auto mock_store = mock_driver->Wrap(tensorstore::IdentityTransform<1>({4}));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto downsampled_store,
tensorstore::Downsample(mock_store, {2}, DownsampleMethod::kMean));
auto read_future = tensorstore::Read(downsampled_store);
{
auto read_req = mock_driver->read_requests.pop();
tensorstore::execution::set_starting(read_req.receiver, [] {});
tensorstore::execution::set_value(
read_req.receiver, MakeArrayBackedReadChunk(MakeArray<float>({0, 1})),
(tensorstore::IdentityTransform(1) |
tensorstore::Dims(0).IndexArraySlice(MakeArray<Index>({0, 1})))
.value());
tensorstore::execution::set_value(
read_req.receiver,
MakeArrayBackedReadChunk(MakeOffsetArray<float>({2}, {2, 3})),
tensorstore::IdentityTransform(BoxView<1>({2}, {2})));
tensorstore::execution::set_done(read_req.receiver);
tensorstore::execution::set_stopping(read_req.receiver);
}
ASSERT_TRUE(read_future.ready());
EXPECT_THAT(read_future.result(), Optional(MakeArray<float>({0.5, 2.5})));
}
TEST(DownsampleTest, EmptyChunk) {
auto mock_driver = MockDriver::Make(tensorstore::ReadWriteMode::dynamic,
tensorstore::dtype_v<float>, 1);
auto mock_store = mock_driver->Wrap(tensorstore::IdentityTransform<1>({10}));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto downsampled_store,
tensorstore::Downsample(mock_store, {2}, DownsampleMethod::kMean));
auto read_future = tensorstore::Read(downsampled_store);
{
auto read_req = mock_driver->read_requests.pop();
EXPECT_EQ(tensorstore::IdentityTransform<1>({10}), read_req.transform);
tensorstore::execution::set_error(
tensorstore::FlowSingleReceiver{std::move(read_req.receiver)},
absl::UnknownError("read error"));
}
EXPECT_THAT(read_future.result(),
MatchesStatus(absl::StatusCode::kUnknown, "read error"));
}
TEST(DownsampleTest, ReadChunkWithIndexTransform) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto store,
tensorstore::FromArray(MakeArray<float>({
{1, 2, 3, 4, 5},
{6, 7, 8, 9, 10},
{11, 12, 13, 14, 15},
})));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto downsampled_store,
tensorstore::Downsample(store, {2, 3}, DownsampleMethod::kMean));
EXPECT_THAT(tensorstore::Read(downsampled_store).result(),
Optional(MakeArray<float>({
{4.5, 7},
{12, 14.5},
})));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto chunks, CollectReadChunks(downsampled_store).result());
ASSERT_THAT(chunks,
::testing::ElementsAre(Pair(
::testing::_, tensorstore::IdentityTransform<2>({2, 2}))));
auto& entry = chunks[0];
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transform, entry.second | tensorstore::Dims(0).IndexArraySlice(
MakeArray<Index>({0, 1, 1, 0})));
auto target_array = tensorstore::AllocateArray<float>({4, 2});
TENSORSTORE_ASSERT_OK(tensorstore::internal::CopyReadChunk(
entry.first.impl, transform,
tensorstore::TransformedArray(target_array)));
EXPECT_EQ(MakeArray<float>({
{4.5, 7},
{12, 14.5},
{12, 14.5},
{4.5, 7},
}),
target_array);
}
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transform, entry.second | tensorstore::Dims(0, 1).IndexArraySlice(
MakeArray<Index>({0, 1, 1}),
MakeArray<Index>({0, 0, 1})));
auto target_array = tensorstore::AllocateArray<float>({3});
TENSORSTORE_ASSERT_OK(tensorstore::internal::CopyReadChunk(
entry.first.impl, transform,
tensorstore::TransformedArray(target_array)));
EXPECT_EQ(MakeArray<float>({4.5, 12, 14.666666666666666}), target_array);
}
}
TEST(DownsampleTest, ConvertError) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto downsampled_store,
tensorstore::Open({
{"driver", "downsample"},
{"base",
{
{"driver", "cast"},
{"base",
{
{"driver", "array"},
{"dtype", "json"},
{"array", {1, "abc", 2}},
}},
{"dtype", "uint8"},
}},
{"downsample_method", "mean"},
{"downsample_factors", {2}},
})
.result());
auto dest = tensorstore::MakeArray<uint8_t>({0, 0});
EXPECT_THAT(
tensorstore::Read(downsampled_store, dest).result(),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Expected integer in the range \\[0, 255\\], but received: \"abc\""));
EXPECT_EQ(dest, MakeArray<uint8_t>({0, 0}));
}
TENSORSTORE_GLOBAL_INITIALIZER {
tensorstore::internal::TestTensorStoreDriverSpecRoundtripOptions options;
options.test_name = "downsample";
options.create_spec = {
{"driver", "downsample"},
{"base",
{
{"driver", "array"},
{"dtype", "float32"},
{"array", {{1, 2, 3}, {4, 5, 6}}},
}},
{"downsample_method", "mean"},
{"downsample_factors", {1, 2}},
};
options.full_spec = {
{"driver", "downsample"},
{"base",
{
{"driver", "array"},
{"array", {{1, 2, 3}, {4, 5, 6}}},
{"transform",
{{"input_inclusive_min", {0, 0}}, {"input_exclusive_max", {2, 3}}}},
}},
{"dtype", "float32"},
{"downsample_method", "mean"},
{"downsample_factors", {1, 2}},
{"transform",
{{"input_inclusive_min", {0, 0}}, {"input_exclusive_max", {2, 2}}}},
};
options.full_base_spec = {
{"driver", "array"},
{"array", {{1, 2, 3}, {4, 5, 6}}},
{"dtype", "float32"},
{"transform",
{{"input_inclusive_min", {0, 0}}, {"input_exclusive_max", {2, 3}}}},
};
options.minimal_spec = options.full_spec;
options.check_not_found_before_create = false;
options.check_not_found_before_commit = false;
options.supported_transaction_modes = {};
tensorstore::internal::RegisterTensorStoreDriverSpecRoundtripTest(
std::move(options));
}
TEST(DownsampleTest, Spec) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto spec, Spec::FromJson({
{"driver", "array"},
{"dtype", "float32"},
{"array", {1, 2, 3, 4}},
}));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto downsampled_spec,
tensorstore::Downsample(spec, {2}, DownsampleMethod::kMean));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto downsampled_store, tensorstore::Open(downsampled_spec).result());
EXPECT_THAT(tensorstore::Read(downsampled_store).result(),
Optional(MakeArray<float>({1.5, 3.5})));
EXPECT_THAT(
downsampled_spec.ToJson(),
Optional(MatchesJson(::nlohmann::json({
{"driver", "downsample"},
{"dtype", "float32"},
{"base",
{
{"driver", "array"},
{"array", {1, 2, 3, 4}},
{"transform",
{{"input_inclusive_min", {0}}, {"input_exclusive_max", {4}}}},
}},
{"downsample_factors", {2}},
{"downsample_method", "mean"},
{"transform",
{{"input_inclusive_min", {0}}, {"input_exclusive_max", {2}}}},
}))));
}
TEST(DownsampleTest, ChunkLayout) {
::nlohmann::json base_spec{
{"driver", "n5"},
{"kvstore", {{"driver", "memory"}}},
{"metadata",
{{"dataType", "uint8"},
{"dimensions", {100, 200}},
{"blockSize", {10, 21}},
{"compression", {{"type", "raw"}}}}},
};
auto context = Context::Default();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto base_store,
tensorstore::Open(base_spec, context, tensorstore::OpenMode::create)
.result());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, tensorstore::Open({{"driver", "downsample"},
{"base", base_spec},
{"downsample_factors", {2, 3}},
{"downsample_method", "mean"}},
context)
.result());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto expected_layout,
ChunkLayout::FromJson({
{"write_chunk", {{"shape", {5, 7}}}},
{"read_chunk", {{"shape", {5, 7}}}},
{"grid_origin", {0, 0}},
{"inner_order", {1, 0}},
}));
EXPECT_THAT(store.chunk_layout(), ::testing::Optional(expected_layout));
}
TEST(SpecSchemaTest, Basic) {
TestSpecSchema(
{
{"driver", "downsample"},
{"downsample_method", "mean"},
{"downsample_factors", {1, 2}},
{"base",
{
{"driver", "array"},
{"array", {{1, 2, 3, 4}, {5, 6, 7, 8}}},
{"dtype", "float32"},
}},
{"schema", {{"dimension_units", {"4nm", "5nm"}}}},
},
{
{"rank", 2},
{"dtype", "float32"},
{"domain", {{"shape", {2, 2}}}},
{"chunk_layout", {{"grid_origin", {0, 0}}, {"inner_order", {0, 1}}}},
{"dimension_units", {"4nm", "5nm"}},
});
}
TEST(TensorStoreCreateCheckSchemaTest, Basic) {
TestTensorStoreCreateCheckSchema(
{
{"driver", "downsample"},
{"downsample_method", "mean"},
{"downsample_factors", {1, 2}},
{"base",
{
{"driver", "array"},
{"array", {{1, 2, 3, 4}, {5, 6, 7, 8}}},
{"dtype", "float32"},
}},
{"schema", {{"dimension_units", {"4nm", "5nm"}}}},
},
{
{"rank", 2},
{"dtype", "float32"},
{"domain", {{"shape", {2, 2}}}},
{"chunk_layout", {{"grid_origin", {0, 0}}, {"inner_order", {0, 1}}}},
{"dimension_units", {"4nm", "5nm"}},
});
}
TEST(DownsampleTest, DomainSpecified) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto base_spec,
tensorstore::Spec::FromJson({
{"driver", "zarr"},
{"kvstore", {{"driver", "memory"}}},
}));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto downsampled_spec,
tensorstore::Downsample(base_spec, {2, 1}, DownsampleMethod::kMean));
TENSORSTORE_ASSERT_OK(
downsampled_spec.Set(tensorstore::Schema::Shape({10, 10})));
EXPECT_THAT(downsampled_spec.ToJson(),
::testing::Optional(MatchesJson({
{"driver", "downsample"},
{"base", |
522 | cpp | google/tensorstore | cast | tensorstore/driver/cast/cast.cc | tensorstore/driver/cast/cast_test.cc | #ifndef TENSORSTORE_DRIVER_CAST_CAST_H_
#define TENSORSTORE_DRIVER_CAST_CAST_H_
#include <cassert>
#include "tensorstore/data_type.h"
#include "tensorstore/data_type_conversion.h"
#include "tensorstore/driver/driver.h"
#include "tensorstore/open_mode.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal {
Result<Driver::Handle> MakeCastDriver(
Driver::Handle base, DataType target_dtype,
ReadWriteMode read_write_mode = ReadWriteMode::dynamic);
Result<TransformedDriverSpec> MakeCastDriverSpec(TransformedDriverSpec base,
DataType target_dtype);
template <typename SourceElement, typename TargetElement>
constexpr ReadWriteMode GetCastMode(ReadWriteMode existing_mode) {
if constexpr (std::is_void_v<SourceElement> ||
std::is_void_v<TargetElement>) {
return (existing_mode == ReadWriteMode::read_write) ? ReadWriteMode::dynamic
: existing_mode;
} else if (std::is_same_v<SourceElement, TargetElement>) {
return existing_mode;
} else {
constexpr auto input_flags =
DataTypeConversionTraits<SourceElement, TargetElement>::flags;
constexpr auto output_flags =
DataTypeConversionTraits<TargetElement, SourceElement>::flags;
ReadWriteMode mode = ReadWriteMode{};
if ((input_flags & DataTypeConversionFlags::kSupported) ==
DataTypeConversionFlags::kSupported) {
mode = mode | ReadWriteMode::read;
}
if ((output_flags & DataTypeConversionFlags::kSupported) ==
DataTypeConversionFlags::kSupported) {
mode = mode | ReadWriteMode::write;
}
assert(mode != ReadWriteMode() && "Cannot convert data types");
assert((existing_mode == ReadWriteMode::dynamic) ||
(((existing_mode & mode) != ReadWriteMode()) &&
"Supported conversions incompatible with existing mode"));
return (mode == ReadWriteMode::read_write) ? existing_mode : mode;
}
}
struct CastDataTypeConversions {
DataTypeConversionLookupResult input;
DataTypeConversionLookupResult output;
ReadWriteMode mode;
};
Result<CastDataTypeConversions> GetCastDataTypeConversions(
DataType source_dtype, DataType target_dtype, ReadWriteMode existing_mode,
ReadWriteMode required_mode);
}
}
#endif
#include "tensorstore/driver/cast/cast.h"
#include <cassert>
#include <utility>
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/array_storage_statistics.h"
#include "tensorstore/chunk_layout.h"
#include "tensorstore/codec_spec.h"
#include "tensorstore/context.h"
#include "tensorstore/data_type.h"
#include "tensorstore/data_type_conversion.h"
#include "tensorstore/driver/chunk.h"
#include "tensorstore/driver/driver.h"
#include "tensorstore/driver/driver_handle.h"
#include "tensorstore/driver/driver_spec.h"
#include "tensorstore/driver/registry.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/dimension_units.h"
#include "tensorstore/index_space/index_domain.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/lock_collection.h"
#include "tensorstore/internal/nditerable_data_type_conversion.h"
#include "tensorstore/internal/type_traits.h"
#include "tensorstore/json_serialization_options.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/open_mode.h"
#include "tensorstore/open_options.h"
#include "tensorstore/rank.h"
#include "tensorstore/resize_options.h"
#include "tensorstore/schema.h"
#include "tensorstore/spec.h"
#include "tensorstore/transaction.h"
#include "tensorstore/util/execution/any_receiver.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/executor.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_cast_driver {
namespace {
using ::tensorstore::internal::Arena;
using ::tensorstore::internal::DataTypeConversionLookupResult;
using ::tensorstore::internal::IntrusivePtr;
using ::tensorstore::internal::NDIterable;
using ::tensorstore::internal::OpenTransactionPtr;
using ::tensorstore::internal::ReadChunk;
using ::tensorstore::internal::TransformedDriverSpec;
using ::tensorstore::internal::WriteChunk;
namespace jb = tensorstore::internal_json_binding;
class CastDriverSpec
: public internal::RegisteredDriverSpec<CastDriverSpec,
internal::DriverSpec> {
public:
constexpr static const char id[] = "cast";
TransformedDriverSpec base;
constexpr static auto ApplyMembers = [](auto&& x, auto f) {
return f(internal::BaseCast<internal::DriverSpec>(x), x.base);
};
OpenMode open_mode() const override { return base.driver_spec->open_mode(); }
absl::Status ApplyOptions(SpecOptions&& options) override {
TENSORSTORE_RETURN_IF_ERROR(schema.Set(options.dtype()));
options.Override(DataType()).IgnoreError();
return internal::TransformAndApplyOptions(base, std::move(options));
}
constexpr static auto default_json_binder = jb::Object(
jb::Member("base",
[](auto is_loading, const auto& options, auto* obj, auto* j) {
return jb::Projection<&CastDriverSpec::base>()(
is_loading,
JsonSerializationOptions(options, DataType(),
obj->schema.rank()),
obj, j);
}),
jb::Initialize([](auto* obj) -> absl::Status {
if (obj->base.transform.valid()) {
TENSORSTORE_RETURN_IF_ERROR(obj->schema.Set(
RankConstraint{obj->base.transform.input_rank()}));
}
DataType dtype = obj->schema.dtype();
DimensionIndex rank = obj->schema.rank();
SpecOptions base_options;
static_cast<Schema&>(base_options) = std::exchange(obj->schema, {});
obj->schema.Set(dtype).IgnoreError();
obj->schema.Set(RankConstraint{rank}).IgnoreError();
return obj->ApplyOptions(std::move(base_options));
}));
Result<IndexDomain<>> GetDomain() const override {
return internal::GetEffectiveDomain(base);
}
Result<ChunkLayout> GetChunkLayout() const override {
return internal::GetEffectiveChunkLayout(base);
}
Result<CodecSpec> GetCodec() const override {
return internal::GetEffectiveCodec(base);
}
Result<SharedArray<const void>> GetFillValue(
IndexTransformView<> transform) const override {
TENSORSTORE_ASSIGN_OR_RETURN(
auto adjusted_transform,
tensorstore::ComposeOptionalTransforms(base.transform, transform));
TENSORSTORE_ASSIGN_OR_RETURN(
auto fill_value, base.driver_spec->GetFillValue(adjusted_transform));
if (!fill_value.valid()) return {std::in_place};
auto dtype = schema.dtype();
if (dtype == fill_value.dtype()) return fill_value;
auto converter = internal::GetDataTypeConverter(fill_value.dtype(), dtype);
if (!(converter.flags & DataTypeConversionFlags::kSupported)) {
return {std::in_place};
}
return MakeCopy(fill_value, skip_repeated_elements, dtype);
}
Result<DimensionUnitsVector> GetDimensionUnits() const override {
return internal::GetEffectiveDimensionUnits(base);
}
kvstore::Spec GetKvstore() const override {
return base.driver_spec->GetKvstore();
}
Result<TransformedDriverSpec> GetBase(
IndexTransformView<> transform) const override {
TransformedDriverSpec new_base;
TENSORSTORE_ASSIGN_OR_RETURN(
new_base.transform,
ComposeOptionalTransforms(base.transform, transform));
new_base.driver_spec = base.driver_spec;
return new_base;
}
Future<internal::Driver::Handle> Open(
internal::DriverOpenRequest request) const override {
DataType target_dtype = schema.dtype();
if (!target_dtype.valid()) {
return absl::InvalidArgumentError("dtype must be specified");
}
auto read_write_mode = request.read_write_mode;
return MapFutureValue(
InlineExecutor{},
[target_dtype, read_write_mode](internal::Driver::Handle handle)
-> Result<internal::Driver::Handle> {
return MakeCastDriver(std::move(handle), target_dtype,
read_write_mode);
},
internal::OpenDriver(base, std::move(request)));
}
};
class CastDriver
: public internal::RegisteredDriver<CastDriver,
internal::Driver> {
public:
Result<TransformedDriverSpec> GetBoundSpec(
internal::OpenTransactionPtr transaction,
IndexTransformView<> transform) override {
auto driver_spec = internal::DriverSpec::Make<CastDriverSpec>();
driver_spec->context_binding_state_ = ContextBindingState::bound;
TENSORSTORE_ASSIGN_OR_RETURN(
driver_spec->base,
base_driver_->GetBoundSpec(std::move(transaction), transform));
driver_spec->schema.Set(target_dtype_).IgnoreError();
const DimensionIndex base_rank = base_driver_->rank();
driver_spec->schema.Set(RankConstraint{base_rank}).IgnoreError();
TransformedDriverSpec spec;
spec.transform = std::exchange(driver_spec->base.transform, {});
spec.driver_spec = std::move(driver_spec);
return spec;
}
Result<ChunkLayout> GetChunkLayout(IndexTransformView<> transform) override {
return base_driver_->GetChunkLayout(transform);
}
Result<CodecSpec> GetCodec() override { return base_driver_->GetCodec(); }
Result<SharedArray<const void>> GetFillValue(
IndexTransformView<> transform) override {
if (!(input_conversion_.flags & DataTypeConversionFlags::kSupported)) {
return {std::in_place};
}
TENSORSTORE_ASSIGN_OR_RETURN(auto base_fill_value,
base_driver_->GetFillValue(transform));
if (!base_fill_value.valid()) return {std::in_place};
if (base_fill_value.dtype() == target_dtype_) {
return base_fill_value;
}
return tensorstore::MakeCopy(base_fill_value, skip_repeated_elements,
target_dtype_);
}
Result<DimensionUnitsVector> GetDimensionUnits() override {
return base_driver_->GetDimensionUnits();
}
KvStore GetKvstore(const Transaction& transaction) override {
return base_driver_->GetKvstore(transaction);
}
Result<internal::DriverHandle> GetBase(
ReadWriteMode read_write_mode, IndexTransformView<> transform,
const Transaction& transaction) override {
internal::DriverHandle base_handle;
base_handle.driver = base_driver_;
base_handle.driver.set_read_write_mode(read_write_mode);
base_handle.transform = transform;
base_handle.transaction = transaction;
return base_handle;
}
Future<ArrayStorageStatistics> GetStorageStatistics(
GetStorageStatisticsRequest request) override {
return base_driver_->GetStorageStatistics(std::move(request));
}
explicit CastDriver(internal::DriverPtr base, DataType target_dtype,
DataTypeConversionLookupResult input_conversion,
DataTypeConversionLookupResult output_conversion)
: base_driver_(std::move(base)),
target_dtype_(target_dtype),
input_conversion_(input_conversion),
output_conversion_(output_conversion) {}
DataType dtype() override { return target_dtype_; }
DimensionIndex rank() override { return base_driver_->rank(); }
Executor data_copy_executor() override {
return base_driver_->data_copy_executor();
}
void Read(ReadRequest request,
AnyFlowReceiver<absl::Status, ReadChunk, IndexTransform<>> receiver)
override;
void Write(WriteRequest request,
AnyFlowReceiver<absl::Status, WriteChunk, IndexTransform<>>
receiver) override;
Future<IndexTransform<>> ResolveBounds(
ResolveBoundsRequest request) override {
return base_driver_->ResolveBounds(std::move(request));
}
Future<IndexTransform<>> Resize(ResizeRequest request) override {
return base_driver_->Resize(std::move(request));
}
constexpr static auto ApplyMembers = [](auto&& x, auto f) {
return f(x.base_driver_, x.target_dtype_, x.input_conversion_,
x.output_conversion_);
};
internal::DriverPtr base_driver_;
DataType target_dtype_;
DataTypeConversionLookupResult input_conversion_;
DataTypeConversionLookupResult output_conversion_;
};
struct ReadChunkImpl {
IntrusivePtr<CastDriver> self;
ReadChunk::Impl base;
absl::Status operator()(internal::LockCollection& lock_collection) {
return base(lock_collection);
}
Result<NDIterable::Ptr> operator()(ReadChunk::BeginRead,
IndexTransform<> chunk_transform,
Arena* arena) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto iterable,
base(ReadChunk::BeginRead{}, std::move(chunk_transform), arena));
return GetConvertedInputNDIterable(std::move(iterable), self->target_dtype_,
self->input_conversion_);
}
};
struct WriteChunkImpl {
IntrusivePtr<CastDriver> self;
WriteChunk::Impl base;
absl::Status operator()(internal::LockCollection& lock_collection) {
return base(lock_collection);
}
Result<NDIterable::Ptr> operator()(WriteChunk::BeginWrite,
IndexTransform<> chunk_transform,
Arena* arena) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto iterable,
base(WriteChunk::BeginWrite{}, std::move(chunk_transform), arena));
return GetConvertedOutputNDIterable(
std::move(iterable), self->target_dtype_, self->output_conversion_);
}
WriteChunk::EndWriteResult operator()(WriteChunk::EndWrite,
IndexTransformView<> chunk_transform,
bool success, Arena* arena) {
return base(WriteChunk::EndWrite{}, chunk_transform, success, arena);
}
bool operator()(WriteChunk::WriteArray, IndexTransformView<> chunk_transform,
WriteChunk::GetWriteSourceArrayFunction get_source_array,
Arena* arena, WriteChunk::EndWriteResult& end_write_result) {
if (!(self->output_conversion_.flags &
DataTypeConversionFlags::kCanReinterpretCast)) {
return false;
}
return base(WriteChunk::WriteArray{}, chunk_transform, get_source_array,
arena, end_write_result);
}
};
template <typename Chunk, typename ChunkImpl>
struct ChunkReceiverAdapter {
IntrusivePtr<CastDriver> self;
AnyFlowReceiver<absl::Status, Chunk, IndexTransform<>> base;
template <typename CancelReceiver>
void set_starting(CancelReceiver receiver) {
tensorstore::execution::set_starting(base, std::move(receiver));
}
void set_value(Chunk chunk, IndexTransform<> transform) {
tensorstore::execution::set_value(
base,
Chunk{ChunkImpl{self, std::move(chunk.impl)},
std::move(chunk.transform)},
std::move(transform));
}
void set_done() { tensorstore::execution::set_done(base); }
void set_error(absl::Status status) {
tensorstore::execution::set_error(base, std::move(status));
}
void set_stopping() { tensorstore::execution::set_stopping(base); }
};
void CastDriver::Read(
ReadRequest request,
AnyFlowReceiver<absl::Status, ReadChunk, IndexTransform<>> receiver) {
base_driver_->Read(std::move(request),
ChunkReceiverAdapter<ReadChunk, ReadChunkImpl>{
IntrusivePtr<CastDriver>(this), std::move(receiver)});
}
void CastDriver::Write(
WriteRequest request,
AnyFlowReceiver<absl::Status, WriteChunk, IndexTransform<>> receiver) {
base_driver_->Write(std::move(request),
ChunkReceiverAdapter<WriteChunk, WriteChunkImpl>{
IntrusivePtr<CastDriver>(this), std::move(receiver)});
}
const internal::DriverRegistration<CastDriverSpec> driver_registration;
}
}
namespace internal {
Result<CastDataTypeConversions> GetCastDataTypeConversions(
DataType source_dtype, DataType target_dtype, ReadWriteMode existing_mode,
ReadWriteMode required_mode) {
assert((existing_mode & required_mode) == required_mode);
CastDataTypeConversions result = {};
if (required_mode == ReadWriteMode::dynamic &&
existing_mode != ReadWriteMode::read_write) {
required_mode = existing_mode;
}
const ReadWriteMode requested_mode =
required_mode == ReadWriteMode::dynamic ? existing_mode : required_mode;
result.mode = requested_mode;
if ((requested_mode & ReadWriteMode::read) == ReadWriteMode::read) {
result.input = GetDataTypeConverter(source_dtype, target_dtype);
if (!(result.input.flags & DataTypeConversionFlags::kSupported)) {
if ((required_mode & ReadWriteMode::read) == ReadWriteMode::read) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Read access requires unsupported ", source_dtype, " -> ",
target_dtype, " conversion"));
}
result.mode &= ~ReadWriteMode::read;
}
}
if ((requested_mode & ReadWriteMode::write) == ReadWriteMode::write) {
result.output = GetDataTypeConverter(target_dtype, source_dtype);
if (!(result.output.flags & DataTypeConversionFlags::kSupported)) {
if ((required_mode & ReadWriteMode::write) == ReadWriteMode::write) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Write access requires unsupported ", target_dtype, " -> ",
source_dtype, " conversion"));
}
result.mode &= ~ReadWriteMode::write;
}
}
if (result.mode == ReadWriteMode{}) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Cannot convert ", source_dtype, " <-> ", target_dtype));
}
return result;
}
Result<Driver::Handle> MakeCastDriver(Driver::Handle base,
DataType target_dtype,
ReadWriteMode read_write_mode) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto conversions, GetCastDataTypeConversions(
base.driver->dtype(), target_dtype,
base.driver.read_write_mode(), read_write_mode));
base.driver = internal::MakeReadWritePtr<internal_cast_driver::CastDriver>(
conversions.mode, std::move(base.driver), target_dtype, conversions.input,
conversions.output);
return base;
}
Result<TransformedDriverSpec> MakeCastDriverSpec(TransformedDriverSpec base,
DataType target_dtype) {
if (!base.driver_spec) return {std::in_place};
DataType source_dtype = base.driver_spec->schema.dtype();
if (source_dtype.valid()) {
TENSORSTORE_RETURN_IF_ERROR(GetCastDataTypeConversions(
source_dtype, target_dtype, ReadWriteMode::read_write,
ReadWriteMode::dynamic));
}
auto driver_spec =
internal::DriverSpec::Make<internal_cast_driver::CastDriverSpec>();
driver_spec->schema
.Set(base.transform.valid() ? RankConstraint{base.transform.output_rank()}
: base.driver_spec->schema.rank())
.IgnoreError();
driver_spec->schema.Set(target_dtype).IgnoreError();
driver_spec->context_binding_state_ = base.context_binding_state();
driver_spec->base.driver_spec = std::move(base.driver_spec);
base.driver_spec = std::move(driver_spec);
return base;
}
}
Result<Spec> Cast(const Spec& base_spec, DataType target_dtype) {
Spec spec;
auto& base_impl = internal_spec::SpecAccess::impl(base_spec);
auto& impl = internal_spec::SpecAccess::impl(spec);
TENSORSTORE_ASSIGN_OR_RETURN(
impl, internal::MakeCastDriverSpec(base_impl, target_dtype));
return spec;
}
} | #include "tensorstore/cast.h"
#include <cstddef>
#include <cstdint>
#include <string>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/box.h"
#include "tensorstore/chunk_layout.h"
#include "tensorstore/codec_spec.h"
#include "tensorstore/context.h"
#include "tensorstore/data_type.h"
#include "tensorstore/driver/cast/cast.h"
#include "tensorstore/driver/driver_testutil.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/json.h"
#include "tensorstore/internal/global_initializer.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/json_serialization_options_base.h"
#include "tensorstore/open.h"
#include "tensorstore/open_mode.h"
#include "tensorstore/schema.h"
#include "tensorstore/spec.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/tensorstore.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::Cast;
using ::tensorstore::ChunkLayout;
using ::tensorstore::DataTypeConversionFlags;
using ::tensorstore::DimensionIndex;
using ::tensorstore::dtype_v;
using ::tensorstore::Index;
using ::tensorstore::MakeArray;
using ::tensorstore::MatchesStatus;
using ::tensorstore::ReadWriteMode;
using ::tensorstore::Result;
using ::tensorstore::zero_origin;
using ::tensorstore::dtypes::string_t;
using ::tensorstore::internal::CastDataTypeConversions;
using ::tensorstore::internal::GetCastDataTypeConversions;
using ::tensorstore::internal::GetCastMode;
using ::tensorstore::internal::TestSpecSchema;
using ::tensorstore::internal::TestTensorStoreCreateCheckSchema;
#ifndef _MSC_VER
template <class T>
constexpr void test_helper(T&& t) {}
#define TENSORSTORE_IS_CONSTEXPR(...) noexcept(test_helper(__VA_ARGS__))
static_assert(!TENSORSTORE_IS_CONSTEXPR(
GetCastMode<std::byte, std::string>(ReadWriteMode::dynamic)));
static_assert(!TENSORSTORE_IS_CONSTEXPR(
GetCastMode<std::byte, std::string>(ReadWriteMode::read)));
static_assert(!TENSORSTORE_IS_CONSTEXPR(
GetCastMode<std::byte, std::string>(ReadWriteMode::write)));
static_assert(!TENSORSTORE_IS_CONSTEXPR(
GetCastMode<std::byte, std::string>(ReadWriteMode::read_write)));
#endif
static_assert(GetCastMode<std::int32_t, float>(ReadWriteMode::dynamic) ==
ReadWriteMode::dynamic);
static_assert(GetCastMode<std::int32_t, float>(ReadWriteMode::read) ==
ReadWriteMode::read);
static_assert(GetCastMode<std::int32_t, float>(ReadWriteMode::write) ==
ReadWriteMode::write);
static_assert(GetCastMode<std::int32_t, float>(ReadWriteMode::read_write) ==
ReadWriteMode::read_write);
static_assert(GetCastMode<std::int32_t, std::string>(ReadWriteMode::dynamic) ==
ReadWriteMode::read);
static_assert(GetCastMode<std::int32_t, std::string>(ReadWriteMode::read) ==
ReadWriteMode::read);
#ifndef _MSC_VER
static_assert(!TENSORSTORE_IS_CONSTEXPR(
GetCastMode<std::int32_t, std::string>(ReadWriteMode::write)));
#endif
static_assert(GetCastMode<std::int32_t, std::string>(
ReadWriteMode::read_write) == ReadWriteMode::read);
static_assert(GetCastMode<std::string, std::int32_t>(ReadWriteMode::dynamic) ==
ReadWriteMode::write);
static_assert(GetCastMode<std::string, std::int32_t>(ReadWriteMode::write) ==
ReadWriteMode::write);
#ifndef _MSC_VER
static_assert(!TENSORSTORE_IS_CONSTEXPR(
GetCastMode<std::string, std::int32_t>(ReadWriteMode::read)));
#endif
static_assert(GetCastMode<std::string, std::int32_t>(
ReadWriteMode::read_write) == ReadWriteMode::write);
static_assert(GetCastMode<std::int32_t, std::int32_t>(
ReadWriteMode::read_write) == ReadWriteMode::read_write);
static_assert(GetCastMode<std::int32_t, std::int32_t>(ReadWriteMode::dynamic) ==
ReadWriteMode::dynamic);
static_assert(GetCastMode<std::int32_t, std::int32_t>(ReadWriteMode::read) ==
ReadWriteMode::read);
static_assert(GetCastMode<std::int32_t, std::int32_t>(ReadWriteMode::write) ==
ReadWriteMode::write);
static_assert(GetCastMode<std::int32_t, void>(ReadWriteMode::write) ==
ReadWriteMode::write);
static_assert(GetCastMode<std::int32_t, void>(ReadWriteMode::read) ==
ReadWriteMode::read);
static_assert(GetCastMode<std::int32_t, void>(ReadWriteMode::dynamic) ==
ReadWriteMode::dynamic);
static_assert(GetCastMode<std::int32_t, void>(ReadWriteMode::read_write) ==
ReadWriteMode::dynamic);
static_assert(GetCastMode<void, std::int32_t>(ReadWriteMode::write) ==
ReadWriteMode::write);
static_assert(GetCastMode<void, std::int32_t>(ReadWriteMode::read) ==
ReadWriteMode::read);
static_assert(GetCastMode<void, std::int32_t>(ReadWriteMode::dynamic) ==
ReadWriteMode::dynamic);
static_assert(GetCastMode<void, std::int32_t>(ReadWriteMode::read_write) ==
ReadWriteMode::dynamic);
static_assert(GetCastMode<void, void>(ReadWriteMode::write) ==
ReadWriteMode::write);
static_assert(GetCastMode<void, void>(ReadWriteMode::read) ==
ReadWriteMode::read);
static_assert(GetCastMode<void, void>(ReadWriteMode::dynamic) ==
ReadWriteMode::dynamic);
static_assert(GetCastMode<void, void>(ReadWriteMode::read_write) ==
ReadWriteMode::dynamic);
::testing::Matcher<Result<CastDataTypeConversions>>
MatchesCastDataTypeConversions(DataTypeConversionFlags input_flags,
DataTypeConversionFlags output_flags,
ReadWriteMode mode) {
return ::testing::Optional(::testing::AllOf(
::testing::ResultOf([](const auto& x) { return x.input.flags; },
input_flags),
::testing::ResultOf([](const auto& x) { return x.output.flags; },
output_flags),
::testing::Field(&CastDataTypeConversions::mode, mode)));
}
TEST(GetCastDataTypeConversions, Basic) {
constexpr static DataTypeConversionFlags kSupported =
DataTypeConversionFlags::kSupported;
constexpr static DataTypeConversionFlags kIdentity =
DataTypeConversionFlags::kIdentity;
constexpr static DataTypeConversionFlags kSafeAndImplicit =
DataTypeConversionFlags::kSafeAndImplicit;
constexpr static DataTypeConversionFlags kCanReinterpretCast =
DataTypeConversionFlags::kCanReinterpretCast;
constexpr static DataTypeConversionFlags kNone = {};
constexpr static DataTypeConversionFlags kAll =
kSupported | kIdentity | kCanReinterpretCast | kSafeAndImplicit;
constexpr static ReadWriteMode read = ReadWriteMode::read;
constexpr static ReadWriteMode write = ReadWriteMode::write;
constexpr static ReadWriteMode read_write = ReadWriteMode::read_write;
constexpr static ReadWriteMode dynamic = ReadWriteMode::dynamic;
constexpr auto IfMode = [](ReadWriteMode mode, ReadWriteMode condition,
DataTypeConversionFlags true_value) {
return ((mode & condition) == condition) ? true_value : kNone;
};
for (const auto existing_mode : {read, write, read_write}) {
for (const auto required_mode : {existing_mode, dynamic}) {
EXPECT_THAT(GetCastDataTypeConversions(dtype_v<std::int32_t>,
dtype_v<std::int32_t>,
existing_mode, required_mode),
MatchesCastDataTypeConversions(
IfMode(existing_mode, read, kAll),
IfMode(existing_mode, write, kAll),
existing_mode));
}
}
for (const auto existing_mode : {read, write, read_write}) {
for (const auto required_mode : {existing_mode, dynamic}) {
EXPECT_THAT(
GetCastDataTypeConversions(dtype_v<std::int32_t>, dtype_v<float>,
existing_mode, required_mode),
MatchesCastDataTypeConversions(
IfMode(existing_mode, read, kSupported),
IfMode(existing_mode, write, kSupported),
existing_mode));
}
}
for (const auto existing_mode : {read, write, read_write}) {
for (const auto required_mode : {existing_mode, dynamic}) {
EXPECT_THAT(
GetCastDataTypeConversions(dtype_v<std::int16_t>, dtype_v<float>,
existing_mode, required_mode),
MatchesCastDataTypeConversions(
IfMode(existing_mode, read,
kSupported | kSafeAndImplicit),
IfMode(existing_mode, write, kSupported),
existing_mode));
}
}
for (const auto existing_mode : {read, read_write}) {
for (const auto required_mode : {read, dynamic}) {
EXPECT_THAT(GetCastDataTypeConversions(dtype_v<std::int32_t>,
dtype_v<std::string>,
existing_mode, required_mode),
MatchesCastDataTypeConversions(
kSupported,
kNone,
read));
}
}
for (const auto required_mode : {write, read_write}) {
EXPECT_THAT(
GetCastDataTypeConversions(dtype_v<std::int32_t>, dtype_v<std::string>,
read_write, required_mode),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
for (const auto required_mode : {write, dynamic}) {
EXPECT_THAT(
GetCastDataTypeConversions(dtype_v<std::int32_t>, dtype_v<std::string>,
write, required_mode),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
for (const auto existing_mode : {write, read_write}) {
for (const auto required_mode : {write, dynamic}) {
EXPECT_THAT(GetCastDataTypeConversions(dtype_v<std::string>,
dtype_v<std::int32_t>,
existing_mode, required_mode),
MatchesCastDataTypeConversions(
kNone,
kSupported,
write));
}
}
for (const auto required_mode : {read, dynamic}) {
EXPECT_THAT(
GetCastDataTypeConversions(dtype_v<std::string>, dtype_v<std::int32_t>,
read, required_mode),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
for (const auto required_mode : {read, read_write}) {
EXPECT_THAT(
GetCastDataTypeConversions(dtype_v<std::string>, dtype_v<std::int32_t>,
read_write, required_mode),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
for (const auto existing_mode : {read, write, read_write}) {
for (const auto required_mode : {read, write, read_write, dynamic}) {
if ((existing_mode & required_mode) != required_mode) continue;
EXPECT_THAT(GetCastDataTypeConversions(
dtype_v<std::byte>, dtype_v<std::string>, existing_mode,
required_mode & existing_mode),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
}
}
TEST(CastTest, Int32ToStringDynamic) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store,
tensorstore::Open(
{{"driver", "array"}, {"array", {1, 2, 3}}, {"dtype", "int32"}})
.result());
EXPECT_EQ(store.read_write_mode(), ReadWriteMode::read_write);
ASSERT_EQ(tensorstore::Box<1>({3}), store.domain().box());
auto cast_store = Cast(store, dtype_v<std::string>).value();
EXPECT_EQ(cast_store.read_write_mode(), ReadWriteMode::read);
EXPECT_EQ(tensorstore::Read<zero_origin>(cast_store).result(),
MakeArray<std::string>({"1", "2", "3"}));
EXPECT_THAT(
cast_store.spec().value().ToJson({tensorstore::IncludeDefaults{false}}),
::testing::Optional(tensorstore::MatchesJson(
{{"driver", "cast"},
{"dtype", "string"},
{"transform",
::nlohmann::json(tensorstore::IdentityTransform<1>({3}))},
{"base",
{
{"driver", "array"},
{"array", {1, 2, 3}},
{"dtype", "int32"},
}}})));
}
TEST(CastTest, StringToInt32Dynamic) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, tensorstore::Open({{"driver", "array"},
{"array", {"a", "b", "c"}},
{"dtype", "string"}})
.result());
EXPECT_EQ(store.read_write_mode(), ReadWriteMode::read_write);
auto cast_store = Cast(store, dtype_v<std::int32_t>).value();
EXPECT_EQ(cast_store.read_write_mode(), ReadWriteMode::write);
TENSORSTORE_EXPECT_OK(
tensorstore::Write(MakeArray<std::int32_t>({1, 2, 3}), cast_store));
EXPECT_EQ(tensorstore::Read<zero_origin>(store).result(),
MakeArray<std::string>({"1", "2", "3"}));
}
TEST(CastTest, OpenInt32ToInt64) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store,
tensorstore::Open(
{{"driver", "cast"},
{"dtype", "int64"},
{"base",
{{"driver", "array"}, {"array", {1, 2, 3}}, {"dtype", "int32"}}}})
.result());
EXPECT_EQ(store.read_write_mode(), ReadWriteMode::read_write);
EXPECT_EQ(tensorstore::Read<zero_origin>(store).result(),
MakeArray<std::int64_t>({1, 2, 3}));
TENSORSTORE_EXPECT_OK(tensorstore::Write(
tensorstore::MakeScalarArray<std::int64_t>(10), store));
EXPECT_EQ(tensorstore::Read<zero_origin>(store).result(),
MakeArray<std::int64_t>({10, 10, 10}));
}
TEST(CastTest, OpenInputConversionError) {
EXPECT_THAT(
tensorstore::Open(
{{"driver", "cast"},
{"dtype", "byte"},
{"base",
{{"driver", "array"}, {"array", {1, 2, 3}}, {"dtype", "int32"}}}},
tensorstore::ReadWriteMode::read)
.result(),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Error opening \"cast\" driver: "
"Read access requires unsupported int32 -> byte conversion"));
}
TEST(CastTest, OpenOutputConversionError) {
EXPECT_THAT(
tensorstore::Open(
{{"driver", "cast"},
{"dtype", "byte"},
{"base",
{{"driver", "array"}, {"array", {1, 2, 3}}, {"dtype", "int32"}}}},
tensorstore::ReadWriteMode::write)
.result(),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Error opening \"cast\" driver: "
"Write access requires unsupported byte -> int32 conversion"));
}
TEST(CastTest, OpenAnyConversionError) {
EXPECT_THAT(
tensorstore::Open(
{{"driver", "cast"},
{"dtype", "byte"},
{"base",
{{"driver", "array"}, {"array", {1, 2, 3}}, {"dtype", "int32"}}}})
.result(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error opening \"cast\" driver: "
"Cannot convert int32 <-> byte"));
}
TEST(CastTest, OpenMissingDataType) {
EXPECT_THAT(
tensorstore::Open(
{{"driver", "cast"},
{"base",
{{"driver", "array"}, {"array", {1, 2, 3}}, {"dtype", "int32"}}}})
.result(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*: dtype must be specified"));
}
TEST(CastTest, ComposeTransforms) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store,
tensorstore::Open(
{{"driver", "cast"},
{"transform",
{{"input_inclusive_min", {10}},
{"input_shape", {3}},
{"output", {{{"input_dimension", 0}, {"offset", -8}}}}}},
{"dtype", "int64"},
{"base",
{{"driver", "array"},
{"array", {1, 2, 3}},
{"transform",
{{"input_inclusive_min", {2}},
{"input_shape", {3}},
{"output", {{{"input_dimension", 0}, {"offset", -2}}}}}},
{"dtype", "int32"}}}})
.result());
EXPECT_THAT(
store.spec().value().ToJson({tensorstore::IncludeDefaults{false}}),
::testing::Optional(tensorstore::MatchesJson(
{{"driver", "cast"},
{"base",
{
{"driver", "array"},
{"array", {1, 2, 3}},
{"dtype", "int32"},
}},
{"dtype", "int64"},
{"transform",
::nlohmann::json(tensorstore::IndexTransformBuilder<>(1, 1)
.input_origin({10})
.input_shape({3})
.output_single_input_dimension(0, -10, 1, 0)
.Finalize()
.value())}})));
}
TEST(CastTest, ComposeTransformsError) {
EXPECT_THAT(tensorstore::Open({{"driver", "cast"},
{"rank", 2},
{"dtype", "int64"},
{"base",
{{"driver", "array"},
{"array", {1, 2, 3}},
{"rank", 1},
{"dtype", "int32"}}}})
.result(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing object member \"base\": "
"Error parsing object member \"rank\": "
"Expected 2, but received: 1"));
}
TEST(CastTest, SpecRankPropagation) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto spec, tensorstore::Spec::FromJson({
{"driver", "cast"},
{"base",
{
{"driver", "array"},
{"array", {1, 2, 3}},
{"dtype", "int32"},
}},
{"dtype", "int64"},
}));
EXPECT_EQ(1, spec.rank());
}
TEST(CastTest, ChunkLayout) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, tensorstore::Open({
{"driver", "cast"},
{"dtype", "int32"},
{"base",
{{"driver", "array"},
{"dtype", "int64"},
{"array", {{1, 2, 3}, {4, 5, 6}}}}},
})
.result());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto expected_layout,
ChunkLayout::FromJson({
{"grid_origin", {0, 0}},
{"inner_order", {0, 1}},
}));
EXPECT_THAT(store.chunk_layout(), ::testing::Optional(expected_layout));
}
TEST(SpecSchemaTest, CastArray) {
TestSpecSchema(
{
{"driver", "cast"},
{"base",
{
{"driver", "array"},
{"array", {{1, 2, 3}, {4, 5, 6}}},
{"dtype", "float32"},
{"schema", {{"dimension_units", {"4nm", "5nm"}}}},
}},
{"dtype", "int32"},
},
{
{"rank", 2},
{"dtype", "int32"},
{"domain", {{"shape", {2, 3}}}},
{"chunk_layout", {{"grid_origin", {0, 0}}, {"inner_order", {0, 1}}}},
{"dimension_units", {"4nm", "5nm"}},
});
}
TEST(DriverCreateCheckSchemaTest, CastArray) {
TestTensorStoreCreateCheckSchema(
{
{"driver", "cast"},
{"base",
{
{"driver", "array"},
{"array", {{1, 2, 3}, {4, 5, 6}}},
{"dtype", "float32"},
{"schema", {{"dimension_units", {"4nm", "5nm"}}}},
}},
{"dtype", "int32"},
},
{
{"rank", 2},
{"dtype", "int32"},
{"domain", {{"shape", {2, 3}}}},
{"chunk_layout", {{"grid_origin", {0, 0}}, {"inner_order", {0, 1}}}},
{"dimension_units", {"4nm", "5nm"}},
});
}
TEST(CastTest, FillValueNotSpecified) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto base_store,
tensorstore::Open(
{
{"driver", "zarr"},
{"kvstore", {{"driver", "memory"}}},
},
tensorstore::OpenMode::create, tensorstore::dtype_v<uint16_t>,
tensorstore::Schema::Shape({100, 4, 3}))
.result());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, tensorstore::Cast(base_store, tensorstore::dtype_v<int32_t>));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto fill_value, store.fill_value());
EXPECT_FALSE(fill_value.valid());
}
TEST(CastTest, FillValueSpecified) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto base_store,
tensorstore::Open(
{
{"driver", "zarr"},
{"kvstore", {{"driver", "memory"}}},
},
tensorstore::OpenMode::create, tensorstore::dtype_v<uint16_t>,
tensorstore::Schema::Shape({100, 4, 3}),
tensorstore::Schema::FillValue(
tensorstore::MakeScalarArray<uint16_t>(42)))
.result());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, tensorstore::Cast(base_store, tensorstore::dtype_v<int32_t>));
EXPECT_THAT(store.fill_value(),
::testing::Optional(tensorstore::MakeScalarArray<int32_t>(42)));
}
TEST(CastTest, Codec) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto base_store,
tensorstore::Open(
{
{"driver", "zarr"},
{"kvstore", {{"driver", "memory"}}},
{"metadata", {{"compressor", nullptr}}},
},
tensorstore::OpenMode::create, tensorstore::dtype_v<uint16_t>,
tensorstore::Schema::Shape({100, 4, 3}))
.result());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, tensorstore::Cast(base_store, tensorstore::dtype_v<int32_t>));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto expected_codec,
tensorstore::CodecSpec::FromJson({
{"driver", "zarr"},
{"compressor", nullptr},
{"filters", nullptr},
}));
EXPECT_THAT(store.codec(), ::testing::Optional(expected_codec));
}
TEST(SpecSchemaTest, ChunkLayout) {
TestSpecSchema(
{
{"driver", "cast"},
{"dtype", "uint32"},
{"base",
{
{"driver", "zarr"},
{"kvstore", {{"driver", "memory"}}},
{"metadata", {{"dtype", "<u2"}, {"chunks", {3, 4, 5}}}},
}},
},
{
{"dtype", "uint32"},
{"chunk_layout",
{
{"grid_origin", {0, 0, 0}},
{"chunk", {{"shape", {3, 4, 5}}}},
}},
{"codec", {{"driver", "zarr"}}},
});
}
TEST(SpecSchemaTest, Codec) {
TestSpecSchema(
{
{"driver", "cast"},
{"dtype", "uint32"},
{"base",
{
{"driver", "zarr"},
{"kvstore", {{"driver", "memory"}}},
{"metadata", {{"dtype", "<u2"}, {"compressor", nullptr}}},
}},
},
{
{"dtype", "uint32"},
{"codec", {{"driver", "zarr"}, {"compressor", nullptr}}},
});
}
TEST(SpecSchemaTest, FillValue) {
TestSpecSchema(
{
{"driver", "cast"},
{"dtype", "uint32"},
{"base",
{
{"driver", "zarr"},
{"kvstore", {{"driver", "memory"}}},
{"metadata", {{"dtype", "<f4"}, {"fill_value", 3.5}}},
}},
},
{
{"dtype", "uint32"},
{"fill_value", 3},
{"codec", {{"driver", "zarr"}}},
});
}
TEST(SpecSchemaTest, FillValueSameDtype) {
TestSpecSchema(
{
{"driver", "cast"},
{"dtype", "uint32"},
{"base",
{
{"driver", "zarr"},
{"kvstore", {{"driver", "memory"}}},
{"metadata", {{"dtype", "<u4"}, {"fill_value", 3}}},
}},
},
{
{"dtype", "uint32"},
{"fill_value", 3},
{"codec", {{"driver", "zarr"}}},
});
}
TENSORSTORE_GLOBAL_INITIALIZER {
tensorstore::internal::TestTensorStoreDriverSpecRoundtripOptions options;
options.test_name = "cast";
options.create_spec = {
{"driver", "cast"},
{"base",
{
{"driver", "array"},
{"dtype", "float32"},
{"array", {{1, 2, 3}, {4, 5, 6}}},
}},
{"dtype", "uint32"},
};
options.full_spec = {
{"driver", "cast"},
{"base",
{
{"driver", "array"},
{"array", {{1, 2, 3}, {4, 5, 6}}},
{"dtype", "float32"},
}},
{"dtype", "uint32"},
{"transform",
{{"input_inclusive_min", {0, 0}}, {"input_exclusive_max", {2, 3}}}},
};
options.full_base_spec = {
{"driver", "array"},
{"array", {{1, 2, 3}, {4, 5, 6}}},
{"dtype", "float32"},
{"transform",
{{"input_inclusive_min", {0, 0}}, {"input_exclusive_max", {2, 3}}}},
};
options.minimal_spec = options.full_spec;
options.check_not_found_before_create = false;
options.check_not_found_before_commit = false;
options.supported_transaction_modes = {};
tensorstore::internal::RegisterTensorStoreDriverSpecRoundtripTest(
std::move(options));
}
} |
523 | cpp | google/tensorstore | index | python/tensorstore/index.cc | python/tensorstore/index_test.cc | #ifndef TENSORSTORE_INDEX_H_
#define TENSORSTORE_INDEX_H_
#include <stddef.h>
#include <stdint.h>
namespace tensorstore {
using Index = int64_t;
using DimensionIndex = ptrdiff_t;
constexpr Index kMinFiniteIndex = -0x3ffffffffffffffe;
constexpr Index kInfIndex = 0x3fffffffffffffff;
constexpr Index kMaxFiniteIndex = 0x3ffffffffffffffe;
constexpr Index kInfSize = 0x7fffffffffffffff;
constexpr Index kMaxFiniteSize = 0x7ffffffffffffffd;
static_assert(-kInfIndex + kInfSize - 1 == kInfIndex, "");
static_assert(kMinFiniteIndex + kMaxFiniteSize - 1 == kMaxFiniteIndex, "");
constexpr Index kImplicit = -0x8000000000000000;
}
#endif
#include <pybind11/pybind11.h>
#include "python/tensorstore/index.h"
#include <string>
#include <variant>
#include <vector>
#include "python/tensorstore/sequence_parameter.h"
#include "tensorstore/index.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_python {
IndexVectorOrScalarContainer ToIndexVectorOrScalarContainer(
const OptionallyImplicitIndexVectorOrScalarContainer& x,
Index implicit_value) {
if (auto* index = std::get_if<OptionallyImplicitIndex>(&x)) {
return index->value_or(implicit_value);
}
const auto& v = std::get<SequenceParameter<OptionallyImplicitIndex>>(x);
std::vector<Index> out_v;
out_v.reserve(v.size());
for (size_t i = 0; i < v.size(); ++i) {
out_v.push_back(v[i].value_or(implicit_value));
}
return out_v;
}
internal_index_space::IndexVectorOrScalarView ToIndexVectorOrScalar(
const IndexVectorOrScalarContainer& x) {
constexpr static Index temp = 0;
if (auto* index = std::get_if<Index>(&x)) {
return *index;
} else {
const auto& v = std::get<std::vector<Index>>(x);
if (v.empty()) {
return span(&temp, 0);
}
return span(v);
}
}
std::string IndexVectorRepr(const IndexVectorOrScalarContainer& x,
bool implicit, bool subscript) {
return internal::IndexVectorRepr(ToIndexVectorOrScalar(x), implicit,
subscript);
}
}
}
namespace pybind11 {
namespace detail {
handle type_caster<tensorstore::internal_python::PythonDimensionIndex>::cast(
tensorstore::internal_python::PythonDimensionIndex x,
return_value_policy , handle ) {
return int_(x.value).release();
}
bool type_caster<tensorstore::internal_python::PythonDimensionIndex>::load(
handle src, bool convert) {
value.value = PyNumber_AsSsize_t(src.ptr(), PyExc_IndexError);
if (value.value == -1 && PyErr_Occurred()) {
PyErr_Clear();
return false;
}
return true;
}
handle type_caster<tensorstore::internal_python::OptionallyImplicitIndex>::cast(
tensorstore::internal_python::OptionallyImplicitIndex x,
return_value_policy , handle ) {
if (x.value == tensorstore::kImplicit) return none().release();
return int_(x.value).release();
}
bool type_caster<tensorstore::internal_python::OptionallyImplicitIndex>::load(
handle src, bool convert) {
if (src.is_none()) {
value.value = tensorstore::kImplicit;
return true;
}
value.value = PyNumber_AsSsize_t(src.ptr(), PyExc_IndexError);
if (value.value == -1 && PyErr_Occurred()) {
PyErr_Clear();
return false;
}
return true;
}
}
} | #include "python/tensorstore/index.h"
#include <vector>
#include <gtest/gtest.h>
#include "tensorstore/index.h"
namespace {
TEST(OptionallyImplicitIndexReprTest, Basic) {
using tensorstore::kImplicit;
using tensorstore::internal_python::OptionallyImplicitIndexRepr;
EXPECT_EQ("None", OptionallyImplicitIndexRepr(kImplicit));
EXPECT_EQ("3", OptionallyImplicitIndexRepr(3));
EXPECT_EQ("-3", OptionallyImplicitIndexRepr(-3));
}
TEST(IndexVectorReprTest, Basic) {
using tensorstore::Index;
using tensorstore::kImplicit;
using tensorstore::internal_python::IndexVectorRepr;
for (bool subscript : {false, true}) {
EXPECT_EQ("None", IndexVectorRepr(kImplicit, true, subscript));
for (bool implicit : {false, true}) {
EXPECT_EQ("1", IndexVectorRepr(1, implicit, subscript));
EXPECT_EQ("-1", IndexVectorRepr(-1, implicit, subscript));
}
}
for (bool implicit : {false, true}) {
EXPECT_EQ("[1,2,3]", IndexVectorRepr(std::vector<Index>{1, 2, 3}, implicit,
false));
EXPECT_EQ("1,2,3", IndexVectorRepr(std::vector<Index>{1, 2, 3}, implicit,
true));
EXPECT_EQ("[]", IndexVectorRepr(std::vector<Index>{}, implicit,
false));
EXPECT_EQ("()", IndexVectorRepr(std::vector<Index>{}, implicit,
true));
}
EXPECT_EQ("[1,2,None]", IndexVectorRepr(std::vector<Index>{1, 2, kImplicit},
true,
false));
EXPECT_EQ("1,2,None", IndexVectorRepr(std::vector<Index>{1, 2, kImplicit},
true,
true));
}
TEST(ToIndexVectorOrScalarContainerTest, Basic) {
using tensorstore::Index;
using tensorstore::kImplicit;
using tensorstore::internal_python::IndexVectorOrScalarContainer;
using tensorstore::internal_python::OptionallyImplicitIndex;
using tensorstore::internal_python::ToIndexVectorOrScalarContainer;
EXPECT_EQ(
IndexVectorOrScalarContainer{Index{3}},
ToIndexVectorOrScalarContainer(OptionallyImplicitIndex{3}, kImplicit));
EXPECT_EQ(IndexVectorOrScalarContainer{3},
ToIndexVectorOrScalarContainer(OptionallyImplicitIndex{3}, 4));
EXPECT_EQ(
IndexVectorOrScalarContainer{Index{3}},
ToIndexVectorOrScalarContainer(OptionallyImplicitIndex{kImplicit}, 3));
EXPECT_EQ(IndexVectorOrScalarContainer{kImplicit},
ToIndexVectorOrScalarContainer(OptionallyImplicitIndex{kImplicit},
kImplicit));
EXPECT_EQ(IndexVectorOrScalarContainer{std::vector<Index>({1, 2, 3})},
ToIndexVectorOrScalarContainer(
std::vector<OptionallyImplicitIndex>{
OptionallyImplicitIndex{1},
OptionallyImplicitIndex{2},
OptionallyImplicitIndex{3},
},
kImplicit));
EXPECT_EQ(IndexVectorOrScalarContainer{std::vector<Index>({1, 2, kImplicit})},
ToIndexVectorOrScalarContainer(
std::vector<OptionallyImplicitIndex>{
OptionallyImplicitIndex{1},
OptionallyImplicitIndex{2},
OptionallyImplicitIndex{kImplicit},
},
kImplicit));
EXPECT_EQ(IndexVectorOrScalarContainer{std::vector<Index>({1, 2, 3})},
ToIndexVectorOrScalarContainer(
std::vector<OptionallyImplicitIndex>{
OptionallyImplicitIndex{1},
OptionallyImplicitIndex{2},
OptionallyImplicitIndex{kImplicit},
},
3));
}
} |
524 | cpp | google/tensorstore | kvstore | tensorstore/kvstore/kvstore.cc | tensorstore/kvstore/kvstore_test.cc | #ifndef TENSORSTORE_KVSTORE_KVSTORE_H_
#define TENSORSTORE_KVSTORE_KVSTORE_H_
#include <string>
#include <string_view>
#include <type_traits>
#include <utility>
#include <nlohmann/json_fwd.hpp>
#include "tensorstore/context.h"
#include "tensorstore/internal/path.h"
#include "tensorstore/json_serialization_options.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/serialization/fwd.h"
#include "tensorstore/transaction.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/garbage_collection/fwd.h"
#include "tensorstore/util/option.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace kvstore {
struct SpecRequestOptions : public DriverSpecOptions {
ContextBindingMode context_binding_mode = ContextBindingMode::unspecified;
template <typename T>
constexpr static bool IsOption = DriverSpecOptions::IsOption<T>;
using DriverSpecOptions::Set;
void Set(ContextBindingMode value) {
if (value > context_binding_mode) context_binding_mode = value;
}
};
template <>
constexpr inline bool SpecRequestOptions::IsOption<ContextBindingMode> = true;
class KvStore {
public:
KvStore() = default;
KvStore(DriverPtr driver) : driver(std::move(driver)) {}
explicit KvStore(DriverPtr driver, Transaction transaction)
: driver(std::move(driver)), transaction(std::move(transaction)) {}
explicit KvStore(DriverPtr driver, std::string path,
Transaction transaction = no_transaction)
: driver(std::move(driver)),
path(std::move(path)),
transaction(std::move(transaction)) {}
void AppendSuffix(std::string_view suffix) { path += suffix; }
void AppendPathComponent(std::string_view component) {
internal::AppendPathComponent(path, component);
}
KvStore WithPathSuffix(std::string_view suffix) && {
AppendSuffix(suffix);
return std::move(*this);
}
KvStore WithPathSuffix(std::string_view suffix) const& {
return KvStore(*this).WithPathSuffix(suffix);
}
bool valid() const { return static_cast<bool>(driver); }
DriverPtr driver;
std::string path;
template <typename... Option>
std::enable_if_t<IsCompatibleOptionSequence<SpecRequestOptions, Option...>,
Result<Spec>>
spec(Option&&... option) const {
SpecRequestOptions options;
(options.Set(std::move(option)), ...);
return spec(std::move(options));
}
Result<Spec> spec(SpecRequestOptions&& options) const;
Result<std::string> ToUrl() const;
friend bool operator==(const KvStore& a, const KvStore& b);
friend bool operator!=(const KvStore& a, const KvStore& b) {
return !(a == b);
}
KvStore non_transactional() const& { return KvStore(driver, path); }
KvStore non_transactional() && {
return KvStore(std::move(driver), std::move(path));
}
Result<KvStore> base() const;
friend Result<KvStore> ApplyTensorStoreTransaction(KvStore store,
Transaction transaction) {
TENSORSTORE_RETURN_IF_ERROR(
internal::ChangeTransaction(store.transaction, std::move(transaction)));
return store;
}
template <typename Func>
PipelineResultType<const KvStore&, Func> operator|(Func&& func) const& {
return std::forward<Func>(func)(*this);
}
template <typename Func>
PipelineResultType<KvStore&&, Func> operator|(Func&& func) && {
return std::forward<Func>(func)(std::move(*this));
}
Transaction transaction = no_transaction;
constexpr static auto ApplyMembers = [](auto&& x, auto f) {
return f(x.driver, x.path, x.transaction);
};
};
struct DriverOpenOptions {
Context context;
template <typename T>
constexpr static bool IsOption = false;
void Set(Context value) { context = std::move(value); }
};
struct OpenOptions : public DriverOpenOptions {
Transaction transaction = no_transaction;
template <typename T>
constexpr static bool IsOption = DriverOpenOptions::IsOption<T>;
using DriverOpenOptions::Set;
void Set(Transaction transaction) {
this->transaction = std::move(transaction);
}
};
template <>
constexpr inline bool DriverOpenOptions::IsOption<Context> = true;
template <>
constexpr inline bool OpenOptions::IsOption<Transaction> = true;
Future<KvStore> Open(Spec spec, OpenOptions&& options);
Future<KvStore> Open(::nlohmann::json json_spec, OpenOptions&& options);
template <typename... Option>
static std::enable_if_t<IsCompatibleOptionSequence<OpenOptions, Option...>,
Future<KvStore>>
Open(Spec spec, Option&&... option) {
OpenOptions options;
(options.Set(option), ...);
return kvstore::Open(std::move(spec), std::move(options));
}
template <typename... Option>
static std::enable_if_t<IsCompatibleOptionSequence<OpenOptions, Option...>,
Future<KvStore>>
Open(::nlohmann::json j, Option&&... option) {
OpenOptions options;
(options.Set(option), ...);
return kvstore::Open(std::move(j), std::move(options));
}
}
using KvStore = kvstore::KvStore;
}
TENSORSTORE_DECLARE_SERIALIZER_SPECIALIZATION(tensorstore::kvstore::KvStore)
TENSORSTORE_DECLARE_GARBAGE_COLLECTION_SPECIALIZATION(
tensorstore::kvstore::KvStore)
#endif
#include "tensorstore/kvstore/kvstore.h"
#include <stddef.h>
#include <stdint.h>
#include <algorithm>
#include <atomic>
#include <cassert>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/base/no_destructor.h"
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
#include <nlohmann/json.hpp>
#include "tensorstore/context.h"
#include "tensorstore/internal/cache_key/cache_key.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/log/verbose_flag.h"
#include "tensorstore/internal/source_location.h"
#include "tensorstore/kvstore/driver.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/read_result.h"
#include "tensorstore/kvstore/registry.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/kvstore/supported_features.h"
#include "tensorstore/serialization/fwd.h"
#include "tensorstore/serialization/registry.h"
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/transaction.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/execution/future_sender.h"
#include "tensorstore/util/execution/sender.h"
#include "tensorstore/util/execution/sender_util.h"
#include "tensorstore/util/executor.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/garbage_collection/fwd.h"
#include "tensorstore/util/garbage_collection/garbage_collection.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
using ::tensorstore::internal::IntrusivePtr;
namespace tensorstore {
namespace kvstore {
namespace {
ABSL_CONST_INIT internal_log::VerboseFlag kvstore_cache_logging(
"kvstore_cache");
}
void intrusive_ptr_increment(Driver* p) {
p->reference_count_.fetch_add(1, std::memory_order_relaxed);
}
void intrusive_ptr_decrement(Driver* p) {
if (!internal::DecrementReferenceCountIfGreaterThanOne(p->reference_count_)) {
p->DestroyLastReference();
}
}
void EncodeCacheKeyAdl(std::string* out, const DriverPtr& ptr) {
return ptr->EncodeCacheKey(out);
}
Result<Spec> KvStore::spec(SpecRequestOptions&& options) const {
TENSORSTORE_ASSIGN_OR_RETURN(auto driver_spec,
driver->spec(std::move(options)));
return Spec(std::move(driver_spec), path);
}
Result<std::string> KvStore::ToUrl() const {
TENSORSTORE_ASSIGN_OR_RETURN(auto spec, this->spec());
return spec.ToUrl();
}
Result<KvStore> KvStore::base() const {
return driver->GetBase(path, transaction);
}
Result<DriverSpecPtr> Driver::spec(SpecRequestOptions&& options) const {
TENSORSTORE_ASSIGN_OR_RETURN(auto spec, GetBoundSpec());
internal::ApplyContextBindingMode(spec, options.context_binding_mode,
ContextBindingMode::strip);
TENSORSTORE_RETURN_IF_ERROR(spec.Set(std::move(options)));
return spec;
}
Result<DriverSpecPtr> Driver::GetBoundSpec() const {
return absl::UnimplementedError(
"KeyValueStore does not support JSON representation");
}
SupportedFeatures Driver::GetSupportedFeatures(
const KeyRange& key_range) const {
return SupportedFeatures::kNone;
}
void Driver::EncodeCacheKey(std::string* out) const {
internal::EncodeCacheKey(out, reinterpret_cast<uintptr_t>(this));
}
Result<KvStore> Driver::GetBase(std::string_view path,
const Transaction& transaction) const {
return {std::in_place};
}
}
namespace internal_kvstore {
DriverRegistry& GetDriverRegistry() {
static absl::NoDestructor<DriverRegistry> registry;
return *registry;
}
}
template serialization::Registry&
serialization::GetRegistry<internal::IntrusivePtr<const kvstore::DriverSpec>>();
namespace kvstore {
Driver::~Driver() = default;
Future<KvStore> Open(Spec spec, OpenOptions&& options) {
if (!spec.valid()) {
return absl::InvalidArgumentError("Cannot open null kvstore spec");
}
return MapFutureValue(
InlineExecutor{},
[path = std::move(spec.path),
transaction =
std::move(options.transaction)](DriverPtr& driver) mutable {
return KvStore(std::move(driver), std::move(path),
std::move(transaction));
},
kvstore::Open(std::move(spec.driver),
static_cast<DriverOpenOptions&&>(options)));
}
Future<KvStore> Open(::nlohmann::json json_spec, OpenOptions&& options) {
TENSORSTORE_ASSIGN_OR_RETURN(auto spec, Spec::FromJson(std::move(json_spec)));
return Open(std::move(spec), std::move(options));
}
namespace {
struct OpenDriverCache {
absl::Mutex mutex;
absl::flat_hash_map<std::string, Driver*> map ABSL_GUARDED_BY(mutex);
};
OpenDriverCache& GetOpenDriverCache() {
static absl::NoDestructor<OpenDriverCache> cache_;
return *cache_;
}
}
Future<DriverPtr> Open(DriverSpecPtr spec, DriverOpenOptions&& options) {
TENSORSTORE_RETURN_IF_ERROR(spec.BindContext(options.context));
std::string cache_identifier;
spec->EncodeCacheKey(&cache_identifier);
{
auto& open_cache = GetOpenDriverCache();
absl::MutexLock lock(&open_cache.mutex);
auto it = open_cache.map.find(cache_identifier);
if (it != open_cache.map.end()) {
ABSL_LOG_IF(INFO, kvstore_cache_logging)
<< "Reusing cached kvstore: " << QuoteString(cache_identifier);
return DriverPtr(it->second);
}
}
return MapFutureValue(
InlineExecutor{},
[cache_identifier =
std::move(cache_identifier)](DriverPtr driver) mutable {
auto& open_cache = GetOpenDriverCache();
absl::MutexLock lock(&open_cache.mutex);
auto p = open_cache.map.emplace(cache_identifier, driver.get());
if (p.second) {
driver->cache_identifier_ = std::move(cache_identifier);
ABSL_LOG_IF(INFO, kvstore_cache_logging)
<< "Inserted kvstore into cache: "
<< QuoteString(driver->cache_identifier_);
} else {
ABSL_LOG_IF(INFO, kvstore_cache_logging)
<< "Reusing cached kvstore: " << QuoteString(cache_identifier);
}
return DriverPtr(p.first->second);
},
spec->DoOpen());
}
void Driver::DestroyLastReference() {
auto& open_cache = GetOpenDriverCache();
if (!cache_identifier_.empty()) {
absl::MutexLock lock(&open_cache.mutex);
if (reference_count_.fetch_sub(1, std::memory_order_acq_rel) != 1) {
return;
}
auto it = open_cache.map.find(cache_identifier_);
if (it != open_cache.map.end()) {
assert(it->second == this);
open_cache.map.erase(it);
ABSL_LOG_IF(INFO, kvstore_cache_logging)
<< "Removed kvstore from open cache: "
<< QuoteString(cache_identifier_);
}
} else {
if (reference_count_.fetch_sub(1, std::memory_order_acq_rel) != 1) {
return;
}
}
delete this;
}
Future<ReadResult> Driver::Read(Key key, ReadOptions options) {
return absl::UnimplementedError("KeyValueStore does not support reading");
}
Future<TimestampedStorageGeneration> Driver::Write(Key key,
std::optional<Value> value,
WriteOptions options) {
return absl::UnimplementedError("KeyValueStore does not support writing");
}
#if 0
namespace {
struct CopyRangeListReceiver
: public internal::AtomicReferenceCount<CopyRangeListReceiver> {
using Ptr = internal::IntrusivePtr<CopyRangeListReceiver>;
internal::OpenTransactionPtr target_transaction;
DriverPtr source_driver;
absl::Time source_staleness_bound;
DriverPtr target_driver;
size_t source_prefix_length;
std::string target_prefix;
Promise<void> promise;
FutureCallbackRegistration cancel_registration;
template <typename Cancel>
friend void set_starting(const Ptr& self, Cancel&& cancel) {
self->cancel_registration =
self->promise.ExecuteWhenNotNeeded(std::forward<Cancel>(cancel));
}
friend void set_stopping(const Ptr& self) {
self->cancel_registration.Unregister();
}
friend void set_error(const Ptr& self, absl::Status&& error) {
SetDeferredResult(self->promise, std::move(error));
}
friend void set_done(const Ptr& self) {}
friend void set_value(const Ptr& self, ListEntry&& entry) {
ReadOptions options;
options.staleness_bound = self->source_staleness_bound;
std::string target_key =
absl::StrCat(self->target_prefix,
std::string_view(entry.key).substr(std::min(
self->source_prefix_length, entry.key.size())));
auto read_future =
self->source_driver->Read(std::move(entry.key), std::move(options));
Link(
[self, target_key = std::move(target_key)](
Promise<void> promise, ReadyFuture<ReadResult> future) {
TENSORSTORE_ASSIGN_OR_RETURN(auto read_result,
std::move(future.result()),
SetDeferredResult(self->promise, _));
if (!read_result.has_value()) return;
Link(
[](Promise<void> promise,
ReadyFuture<TimestampedStorageGeneration> future) {
TENSORSTORE_RETURN_IF_ERROR(future.result(),
SetDeferredResult(promise, _));
},
std::move(promise),
kvstore::Write(KvStore(self->target_driver, std::move(target_key),
internal::TransactionState::ToTransaction(
self->target_transaction)),
"", read_result.value));
},
self->promise, std::move(read_future));
}
};
}
#endif
Future<const void> Driver::ExperimentalCopyRangeFrom(
const internal::OpenTransactionPtr& transaction, const KvStore& source,
Key target_prefix, CopyRangeOptions options) {
return absl::UnimplementedError("CopyRange not supported");
#if 0
auto receiver = internal::MakeIntrusivePtr<CopyRangeListReceiver>();
if (source.transaction != no_transaction) {
return absl::UnimplementedError(
"CopyRange does not support a source KvStore with a transaction");
}
receiver->target_transaction = transaction;
receiver->target_driver.reset(this);
receiver->source_driver = source.driver;
receiver->source_staleness_bound = options.source_staleness_bound;
receiver->source_prefix_length = source.path.size();
receiver->target_prefix = std::move(target_prefix);
auto [promise, future] = PromiseFuturePair<void>::Make(std::in_place);
receiver->promise = std::move(promise);
ListOptions list_options;
list_options.staleness_bound = options.source_staleness_bound;
list_options.range = KeyRange::AddPrefix(source.path, options.source_range);
source.driver->ListImpl(std::move(list_options), std::move(receiver));
return std::move(future);
#endif
}
Future<const void> Driver::DeleteRange(KeyRange range) {
return absl::UnimplementedError(
"KeyValueStore does not support deleting by range");
}
void Driver::ListImpl(ListOptions options, ListReceiver receiver) {
execution::submit(FlowSingleSender{ErrorSender{absl::UnimplementedError(
"KeyValueStore does not support listing")}},
std::move(receiver));
}
ListSender Driver::List(ListOptions options) {
struct ListSender {
IntrusivePtr<Driver> self;
ListOptions options;
void submit(ListReceiver receiver) {
self->ListImpl(options, std::move(receiver));
}
};
return ListSender{IntrusivePtr<Driver>(this), std::move(options)};
}
std::string Driver::DescribeKey(std::string_view key) {
return tensorstore::QuoteString(key);
}
absl::Status Driver::AnnotateError(std::string_view key,
std::string_view action,
const absl::Status& error,
SourceLocation loc) {
return AnnotateErrorWithKeyDescription(DescribeKey(key), action, error, loc);
}
absl::Status Driver::AnnotateErrorWithKeyDescription(
std::string_view key_description, std::string_view action,
const absl::Status& error, SourceLocation loc) {
if (absl::StrContains(error.message(), key_description)) {
return error;
}
return tensorstore::MaybeAnnotateStatus(
error, absl::StrCat("Error ", action, " ", key_description), loc);
}
bool operator==(const KvStore& a, const KvStore& b) {
return a.driver == b.driver && a.path == b.path &&
a.transaction == b.transaction;
}
}
namespace serialization {
namespace {
using DriverSpecPtrNonNullDirectSerializer =
RegistrySerializer<internal::IntrusivePtr<const kvstore::DriverSpec>>;
using DriverSpecPtrNonNullSerializer = NonNullIndirectPointerSerializer<
internal::IntrusivePtr<const kvstore::DriverSpec>,
DriverSpecPtrNonNullDirectSerializer>;
struct DriverPtrNonNullDirectSerializer {
[[nodiscard]] static bool Encode(EncodeSink& sink,
const kvstore::DriverPtr& value) {
TENSORSTORE_ASSIGN_OR_RETURN(auto driver_spec, value->spec(retain_context),
(sink.Fail(_), false));
return DriverSpecPtrNonNullSerializer().Encode(sink, driver_spec);
}
[[nodiscard]] static bool Decode(DecodeSource& source,
kvstore::DriverPtr& value) {
kvstore::DriverSpecPtr driver_spec;
if (!DriverSpecPtrNonNullSerializer().Decode(source, driver_spec)) {
return false;
}
TENSORSTORE_ASSIGN_OR_RETURN(value,
kvstore::Open(std::move(driver_spec)).result(),
(source.Fail(_), false));
return true;
}
};
using DriverPtrSerializer =
IndirectPointerSerializer<kvstore::DriverPtr,
DriverPtrNonNullDirectSerializer>;
}
}
}
TENSORSTORE_DEFINE_SERIALIZER_SPECIALIZATION(
tensorstore::kvstore::DriverPtr,
tensorstore::serialization::DriverPtrSerializer())
TENSORSTORE_DEFINE_SERIALIZER_SPECIALIZATION(
tensorstore::kvstore::KvStore,
tensorstore::serialization::ApplyMembersSerializer<
tensorstore::kvstore::KvStore>())
TENSORSTORE_DEFINE_GARBAGE_COLLECTION_SPECIALIZATION(
tensorstore::kvstore::Driver,
tensorstore::garbage_collection::PolymorphicGarbageCollection<
tensorstore::kvstore::Driver>)
TENSORSTORE_DEFINE_GARBAGE_COLLECTION_SPECIALIZATION(
tensorstore::kvstore::KvStore,
tensorstore::garbage_collection::ApplyMembersGarbageCollection<
tensorstore::kvstore::KvStore>) | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/context.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
namespace kvstore = tensorstore::kvstore;
using ::tensorstore::MatchesStatus;
TEST(KeyValueStoreTest, OpenInvalid) {
auto context = tensorstore::Context::Default();
EXPECT_THAT(kvstore::Open({{"driver", "invalid"}}, context).result(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing object member \"driver\": "
"\"invalid\" is not registered"));
}
} |
525 | cpp | google/tensorstore | future | tensorstore/util/future.cc | tensorstore/util/future_test.cc | #ifndef TENSORSTORE_UTIL_FUTURE_H_
#define TENSORSTORE_UTIL_FUTURE_H_
#include <stddef.h>
#include <atomic>
#include <cassert>
#include <tuple>
#include <type_traits>
#include <utility>
#include <vector>
#include "absl/base/attributes.h"
#include "absl/status/status.h"
#include "absl/time/time.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/type_traits.h"
#include "tensorstore/util/executor.h"
#include "tensorstore/util/future_impl.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
class [[nodiscard]] AnyFuture;
template <typename T>
class [[nodiscard]] Future;
template <typename T>
class [[nodiscard]] Promise;
template <typename T>
class [[nodiscard]] PromiseFuturePair;
template <typename T>
class [[nodiscard]] ReadyFuture;
template <typename T>
constexpr inline bool IsFuture = false;
template <typename T>
constexpr inline bool IsFuture<Future<T>> = true;
template <typename T>
constexpr inline bool IsFuture<ReadyFuture<T>> = true;
template <typename SourceT, typename DestT>
constexpr inline bool IsFutureConvertible =
internal::IsConstConvertible<SourceT, DestT>;
template <typename T>
using UnwrapFutureType = typename internal_future::UnwrapFutureHelper<T>::type;
class FutureCallbackRegistration {
public:
FutureCallbackRegistration() = default;
void Unregister() noexcept {
if (!rep_) {
return;
}
rep_->Unregister(true);
rep_.reset();
}
void UnregisterNonBlocking() noexcept {
if (!rep_) {
return;
}
rep_->Unregister(false);
rep_.reset();
}
void operator()() noexcept { this->Unregister(); }
private:
friend class internal_future::FutureAccess;
explicit FutureCallbackRegistration(internal_future::CallbackPointer pointer)
: rep_(std::move(pointer)) {}
internal_future::CallbackPointer rep_;
};
template <typename T>
class Promise {
static_assert(!std::is_reference_v<T>, "T must not be a reference type.");
static_assert(!IsFuture<T>, "T may not be a Future type.");
static_assert(!IsResult<T>, "T may not be a Result type.");
using SharedState = internal_future::FutureStateType<T>;
public:
using result_type = internal_future::ResultType<T>;
using value_type = T;
Promise() = default;
template <typename U, std::enable_if_t<IsFutureConvertible<U, T>>* = nullptr>
Promise(Promise<U> x) noexcept
: rep_(std::move(internal_future::FutureAccess::rep_pointer(x))) {}
template <typename U, std::enable_if_t<IsFutureConvertible<U, T>>* = nullptr>
Promise& operator=(Promise<U> x) noexcept {
rep_ = std::move(internal_future::FutureAccess::rep_pointer(x));
return *this;
}
void reset() noexcept { rep_.reset(); }
bool null() const noexcept { return rep_ == nullptr; }
bool ready() const noexcept { return rep().ready(); }
bool result_needed() const noexcept { return rep().result_needed(); }
std::add_lvalue_reference_t<result_type> raw_result() const {
return rep().result;
}
template <typename... U,
bool SfinaeNotConst = !std::is_const_v<T>>
std::enable_if_t<
(SfinaeNotConst && std::is_constructible_v<result_type, U...>), bool>
SetResult(U&&... u) const noexcept {
return rep().SetResult(std::forward<U>(u)...);
}
template <typename U = T>
std::enable_if_t<!std::is_const_v<U>, bool> SetReady() const noexcept {
return rep().SetReady();
}
template <typename Callback>
FutureCallbackRegistration ExecuteWhenForced(Callback&& callback) const {
auto& rep = this->rep();
if (rep.has_future()) {
const auto value = rep.state_.load(std::memory_order_acquire);
if ((value & (internal_future::FutureStateBase::kReady |
internal_future::FutureStateBase::kForcing)) == 0) {
using Impl =
internal_future::ForceCallback<T,
internal::remove_cvref_t<Callback>>;
return internal_future::FutureAccess::Construct<
FutureCallbackRegistration>(
internal_future::PromiseStatePointer(rep_)
.release()
->RegisterForceCallback(
new Impl(&rep, std::forward<Callback>(callback))));
}
if ((value & (internal_future::FutureStateBase::kReady |
internal_future::FutureStateBase::kForcing)) ==
internal_future::FutureStateBase::kForcing) {
std::forward<Callback>(callback)(*this);
}
}
return {};
}
template <typename Callback>
FutureCallbackRegistration ExecuteWhenNotNeeded(Callback&& callback) const {
auto& rep = this->rep();
if (rep.result_needed()) {
using Impl = internal_future::ResultNotNeededCallback<
internal::remove_cvref_t<Callback>>;
return internal_future::FutureAccess::Construct<
FutureCallbackRegistration>(rep.RegisterNotNeededCallback(
new Impl(&rep, std::forward<Callback>(callback))));
}
std::forward<Callback>(callback)();
return {};
}
Future<T> future() const {
auto& rep = this->rep();
if (!rep.AcquireFutureReference()) return {};
return internal_future::FutureAccess::Construct<Future<T>>(
internal_future::FutureStatePointer(&rep, internal::adopt_object_ref));
}
private:
explicit Promise(internal_future::PromiseStatePointer rep)
: rep_(std::move(rep)) {}
friend class internal_future::FutureAccess;
constexpr SharedState& rep() const {
return static_cast<SharedState&>(*rep_);
}
internal_future::PromiseStatePointer rep_;
};
template <typename T, typename... U>
std::enable_if_t<std::is_constructible_v<Result<T>, U...>, ReadyFuture<T>>
MakeReadyFuture(U&&... u) {
auto pair = PromiseFuturePair<T>::Make(std::forward<U>(u)...);
pair.promise.reset();
return ReadyFuture<T>(pair.future);
}
ReadyFuture<const void> MakeReadyFuture();
class AnyFuture {
using SharedState = internal_future::FutureStateBase;
public:
explicit AnyFuture() = default;
AnyFuture(const AnyFuture&) = default;
AnyFuture(AnyFuture&&) = default;
AnyFuture& operator=(const AnyFuture&) = default;
AnyFuture& operator=(AnyFuture&&) = default;
inline void IgnoreFuture() const {}
void reset() noexcept { rep_.reset(); }
bool null() const noexcept { return rep_ == nullptr; }
bool ready() const noexcept { return rep().ready(); }
void Wait() const noexcept { rep().Wait(); }
bool WaitFor(absl::Duration duration) const noexcept {
return rep().WaitFor(duration);
}
bool WaitUntil(absl::Time deadline) const noexcept {
return rep().WaitUntil(deadline);
}
void Force() const noexcept { return rep().Force(); }
const absl::Status& status() const& noexcept ABSL_ATTRIBUTE_LIFETIME_BOUND {
Wait();
return rep().status();
}
template <class Callback>
FutureCallbackRegistration UntypedExecuteWhenReady(Callback&& callback) {
static_assert(std::is_invocable_v<Callback, AnyFuture>);
if (!rep_->ready()) {
using Impl =
internal_future::ReadyCallback<AnyFuture,
internal::remove_cvref_t<Callback>>;
return internal_future::FutureAccess::Construct<
FutureCallbackRegistration>(rep_->RegisterReadyCallback(
new Impl(rep_.release(), std::forward<Callback>(callback))));
}
std::forward<Callback>(callback)(std::move(*this));
return FutureCallbackRegistration();
}
protected:
friend class internal_future::FutureAccess;
explicit AnyFuture(internal_future::FutureStatePointer rep)
: rep_(std::move(rep)) {}
constexpr internal_future::FutureStateBase& rep() const { return *rep_; }
internal_future::FutureStatePointer rep_;
};
template <typename T>
class Future : public AnyFuture {
static_assert(!std::is_reference_v<T>, "T must not be a reference type.");
static_assert(!IsFuture<T>, "T may not be a Future type.");
static_assert(!IsResult<T>, "T may not be a Result type.");
using SharedState = internal_future::FutureStateType<T>;
public:
using result_type = internal_future::ResultType<T>;
using value_type = T;
Future() = default;
Future(const Future&) = default;
Future(Future&&) = default;
Future& operator=(const Future& src) = default;
Future& operator=(Future&& src) = default;
template <typename U,
std::enable_if_t<(!std::is_same_v<U, T> &&
IsFutureConvertible<U, T>)>* = nullptr>
Future(Future<U> x) noexcept : AnyFuture(std::move(x)) {}
template <typename U, std::enable_if_t<!std::is_same_v<U, T> &&
IsFutureConvertible<U, T>>* = nullptr>
Future& operator=(Future<U> x) noexcept {
rep_ = std::move(internal_future::FutureAccess::rep_pointer(x));
return *this;
}
template <typename U, std::enable_if_t<IsFutureConvertible<U, T>>* = nullptr>
Future(const Result<Future<U>>& result) {
if (result) {
*this = *result;
} else {
*this = MakeReadyFuture<std::remove_const_t<T>>(result.status());
}
}
Future(const absl::Status& status)
: Future(MakeReadyFuture<std::remove_const_t<T>>(status)) {}
Future(absl::Status&& status)
: Future(MakeReadyFuture<std::remove_const_t<T>>(std::move(status))) {}
template <typename U, std::enable_if_t<IsFutureConvertible<U, T>>* = nullptr>
Future(const Result<U>& result)
: Future(MakeReadyFuture<std::remove_const_t<T>>(result)) {}
template <typename U, std::enable_if_t<IsFutureConvertible<U, T>>* = nullptr>
Future(Result<U>&& result)
: Future(MakeReadyFuture<std::remove_const_t<T>>(std::move(result))) {}
template <typename V,
std::enable_if_t<(internal_future::value_conversion<T, V> &&
std::is_convertible_v<V&&, result_type>
)>* = nullptr>
Future(V&& value)
: Future(
MakeReadyFuture<std::remove_const_t<T>>(std::forward<V>(value))) {}
inline void IgnoreFuture() const {}
using AnyFuture::reset;
using AnyFuture::null;
using AnyFuture::ready;
using AnyFuture::Wait;
using AnyFuture::WaitFor;
using AnyFuture::WaitUntil;
using AnyFuture::Force;
template <class Callback>
FutureCallbackRegistration ExecuteWhenReady(Callback&& callback) && {
static_assert(std::is_invocable_v<Callback, ReadyFuture<T>>);
if (!rep_->ready()) {
using Impl =
internal_future::ReadyCallback<ReadyFuture<T>,
internal::remove_cvref_t<Callback>>;
return internal_future::FutureAccess::Construct<
FutureCallbackRegistration>(rep_->RegisterReadyCallback(
new Impl(rep_.release(), std::forward<Callback>(callback))));
}
std::forward<Callback>(callback)(ReadyFuture<T>(std::move(*this)));
return FutureCallbackRegistration();
}
template <class Callback>
FutureCallbackRegistration ExecuteWhenReady(Callback&& callback) const& {
return Future<T>(*this).ExecuteWhenReady(std::forward<Callback>(callback));
}
std::add_lvalue_reference_t<result_type> result() const
ABSL_ATTRIBUTE_LIFETIME_BOUND {
this->Wait();
return rep().result;
}
std::add_lvalue_reference_t<T> value() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
return result().value();
}
using AnyFuture::status;
private:
explicit Future(internal_future::FutureStatePointer rep)
: AnyFuture(std::move(rep)) {}
friend class internal_future::FutureAccess;
constexpr SharedState& rep() const {
return static_cast<SharedState&>(*rep_);
}
};
template <typename T>
Future(Result<T>&& result) -> Future<T>;
template <typename T>
Future(const Result<T>& result) -> Future<T>;
inline bool HaveSameSharedState(const AnyFuture& a, const AnyFuture& b) {
return internal_future::FutureAccess::rep_pointer(a).get() ==
internal_future::FutureAccess::rep_pointer(b).get();
}
template <typename T>
inline bool HaveSameSharedState(const Promise<T>& a, const AnyFuture& b) {
return internal_future::FutureAccess::rep_pointer(a).get() ==
internal_future::FutureAccess::rep_pointer(b).get();
}
template <typename T>
inline bool HaveSameSharedState(const AnyFuture& a, const Promise<T>& b) {
return internal_future::FutureAccess::rep_pointer(a).get() ==
internal_future::FutureAccess::rep_pointer(b).get();
}
template <typename T, typename U>
inline bool HaveSameSharedState(const Promise<T>& a, const Promise<U>& b) {
return internal_future::FutureAccess::rep_pointer(a).get() ==
internal_future::FutureAccess::rep_pointer(b).get();
}
template <typename | #include "tensorstore/util/future.h"
#include <stddef.h>
#include <atomic>
#include <chrono>
#include <functional>
#include <memory>
#include <thread>
#include <type_traits>
#include <utility>
#include <benchmark/benchmark.h>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tensorstore/internal/metrics/registry.h"
#include "tensorstore/internal/testing/concurrent.h"
#include "tensorstore/util/executor.h"
#include "tensorstore/util/future_impl.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::AnyFuture;
using ::tensorstore::Future;
using ::tensorstore::FutureCallbackRegistration;
using ::tensorstore::InlineExecutor;
using ::tensorstore::IsFutureConvertible;
using ::tensorstore::MakeReadyFuture;
using ::tensorstore::MakeResult;
using ::tensorstore::MatchesStatus;
using ::tensorstore::Promise;
using ::tensorstore::PromiseFuturePair;
using ::tensorstore::ReadyFuture;
using ::tensorstore::Result;
using ::tensorstore::internal_future::FutureAccess;
using ::tensorstore::internal_testing::TestConcurrent;
static_assert(IsFutureConvertible<int, const int>);
static_assert(!IsFutureConvertible<const int, int>);
static_assert(
std::is_same_v<
decltype(FutureAccess::rep_pointer(std::declval<Future<void>&>())),
tensorstore::internal_future::FutureStatePointer&>);
static_assert(
std::is_same_v<decltype(FutureAccess::rep_pointer(
std::declval<const Future<void>&>())),
const tensorstore::internal_future::FutureStatePointer&>);
static_assert(
std::is_same_v<
decltype(FutureAccess::rep_pointer(std::declval<Future<void>&&>())),
tensorstore::internal_future::FutureStatePointer&&>);
static_assert(
std::is_same_v<
decltype(FutureAccess::rep_pointer(std::declval<Promise<void>&>())),
tensorstore::internal_future::PromiseStatePointer&>);
static_assert(
std::is_same_v<decltype(FutureAccess::rep_pointer(
std::declval<const Promise<void>&>())),
const tensorstore::internal_future::PromiseStatePointer&>);
static_assert(
std::is_same_v<
decltype(FutureAccess::rep_pointer(std::declval<Promise<void>&&>())),
tensorstore::internal_future::PromiseStatePointer&&>);
static_assert(!std::is_constructible_v<Result<int>, Result<Future<int>>>);
static_assert(!std::is_convertible_v<Result<int>, Result<Future<int>>>);
static_assert(!std::is_assignable_v<Result<int>, Result<Future<int>>>);
static_assert(std::is_same_v<
Result<Future<void>>,
tensorstore::FlatResult<std::invoke_result_t<Future<void>()>>>);
TEST(FutureTest, Valid) {
EXPECT_TRUE(Future<int>().null());
EXPECT_TRUE(Promise<int>().null());
auto pair = PromiseFuturePair<int>::Make();
EXPECT_FALSE(pair.future.null());
EXPECT_FALSE(pair.promise.null());
auto future2 = pair.promise.future();
EXPECT_FALSE(future2.null());
}
TEST(FutureTest, MakeReadyFuture) {
Future<int> future = MakeReadyFuture<int>(3);
EXPECT_EQ(true, future.ready());
EXPECT_EQ(3, future.result().value());
Result<int> result{tensorstore::in_place};
bool got_result = false;
future.ExecuteWhenReady([&](ReadyFuture<int> r) {
got_result = true;
result = r.result();
});
EXPECT_TRUE(got_result);
EXPECT_EQ(result, future.result());
}
TEST(FutureTest, MakeInPlace) {
auto pair = PromiseFuturePair<int>::Make(tensorstore::in_place, 4);
pair.promise.reset();
EXPECT_EQ(4, pair.future.value());
}
TEST(FutureTest, ConstructFromValue) {
Future<int> x = 3;
EXPECT_EQ(3, x.value());
}
TEST(FutureTest, ConstructFromValueConst) {
Future<const int> x = 3;
EXPECT_EQ(3, x.value());
}
TEST(FutureTest, FlattenResultError) {
Future<int> x = MakeResult<Future<int>>(absl::UnknownError("Error"));
EXPECT_THAT(x.result(), MatchesStatus(absl::StatusCode::kUnknown, "Error"));
}
TEST(FutureTest, FlattenResultErrorConst) {
Future<const int> x = MakeResult<Future<int>>(absl::UnknownError("Error"));
EXPECT_THAT(x.result(), MatchesStatus(absl::StatusCode::kUnknown, "Error"));
}
TEST(FutureTest, FlattenResultSuccess) {
auto pair = PromiseFuturePair<int>::Make();
Future<int> x = MakeResult(pair.future);
EXPECT_TRUE(HaveSameSharedState(pair.future, x));
}
TEST(FutureTest, FlattenResultSuccessConstConvert) {
auto pair = PromiseFuturePair<int>::Make();
Future<const int> x = MakeResult(pair.future);
EXPECT_TRUE(HaveSameSharedState(pair.future, x));
}
TEST(FutureTest, FlattenResultLvalue) {
Result<Future<int>> f1 = absl::UnknownError("");
Future<int> f2 = f1;
EXPECT_EQ(absl::UnknownError(""), GetStatus(f2.result()));
}
TEST(FutureTest, SetResult) {
{
auto pair = PromiseFuturePair<int>::Make();
EXPECT_FALSE(pair.promise.ready());
EXPECT_TRUE(pair.promise.result_needed());
EXPECT_FALSE(pair.future.ready());
Result<int> result{tensorstore::in_place};
bool got_result = false;
pair.future.ExecuteWhenReady([&](ReadyFuture<int> r) {
got_result = true;
result = r.result();
});
EXPECT_FALSE(got_result);
EXPECT_TRUE(pair.promise.SetResult(5));
EXPECT_FALSE(pair.promise.result_needed());
EXPECT_TRUE(pair.future.ready());
EXPECT_TRUE(pair.promise.ready());
EXPECT_EQ(result, 5);
}
{
auto pair = PromiseFuturePair<int>::Make();
pair.promise.SetResult(std::in_place, 6);
EXPECT_TRUE(pair.future.ready());
EXPECT_TRUE(pair.promise.ready());
EXPECT_TRUE(pair.future.result().ok());
}
{
auto pair = PromiseFuturePair<int>::Make();
pair.promise.SetResult(MakeResult(7));
EXPECT_TRUE(pair.future.ready());
EXPECT_TRUE(pair.promise.ready());
EXPECT_TRUE(pair.future.result().ok());
}
{
auto pair = PromiseFuturePair<int>::Make();
pair.promise.SetResult(absl::InternalError("error"));
EXPECT_TRUE(pair.future.ready());
EXPECT_TRUE(pair.promise.ready());
EXPECT_FALSE(pair.future.result().ok());
}
{
auto pair = PromiseFuturePair<int>::Make();
pair.promise.SetResult(MakeResult<int>(absl::InternalError("error")));
EXPECT_TRUE(pair.future.ready());
EXPECT_TRUE(pair.promise.ready());
EXPECT_FALSE(pair.future.result().ok());
}
}
TEST(FutureTest, SetResultVoid) {
{
auto pair = PromiseFuturePair<void>::Make();
EXPECT_FALSE(pair.promise.ready());
EXPECT_TRUE(pair.promise.result_needed());
EXPECT_FALSE(pair.future.ready());
EXPECT_TRUE(pair.promise.SetResult(absl::OkStatus()));
EXPECT_FALSE(pair.promise.result_needed());
EXPECT_TRUE(pair.future.ready());
EXPECT_TRUE(pair.promise.ready());
EXPECT_TRUE(pair.future.result().ok());
}
{
auto pair = PromiseFuturePair<void>::Make();
pair.promise.SetResult(std::in_place);
EXPECT_TRUE(pair.future.ready());
EXPECT_TRUE(pair.promise.ready());
EXPECT_TRUE(pair.future.result().ok());
}
{
auto pair = PromiseFuturePair<void>::Make();
pair.promise.SetResult(MakeResult<void>(absl::OkStatus()));
EXPECT_TRUE(pair.future.ready());
EXPECT_TRUE(pair.promise.ready());
EXPECT_TRUE(pair.future.result().ok());
}
{
auto pair = PromiseFuturePair<void>::Make();
pair.promise.SetResult(absl::InternalError("error"));
EXPECT_TRUE(pair.future.ready());
EXPECT_TRUE(pair.promise.ready());
EXPECT_FALSE(pair.future.result().ok());
}
{
auto pair = PromiseFuturePair<void>::Make();
pair.promise.SetResult(MakeResult<void>(absl::InternalError("error")));
EXPECT_TRUE(pair.future.ready());
EXPECT_TRUE(pair.promise.ready());
EXPECT_FALSE(pair.future.result().ok());
}
}
TEST(FutureTest, Wait) {
auto pair = PromiseFuturePair<int>::Make();
std::thread thread(
[](Promise<int> promise) {
absl::SleepFor(absl::Milliseconds(20));
EXPECT_TRUE(promise.SetResult(5));
},
std::move(pair.promise));
pair.future.Wait();
EXPECT_EQ(5, pair.future.result());
thread.join();
}
TEST(FutureTest, WaitForFailure) {
for (size_t i = 0; i < 100; ++i) {
auto pair = PromiseFuturePair<int>::Make();
EXPECT_FALSE(pair.future.WaitFor(absl::Milliseconds(10)));
std::thread thread(
[](Promise<int> promise) {
absl::SleepFor(absl::Milliseconds(20));
EXPECT_TRUE(promise.SetResult(5));
},
pair.promise);
const bool ready = pair.future.WaitFor(absl::Milliseconds(5));
thread.join();
if (!ready) {
return;
}
}
FAIL();
}
TEST(FutureTest, WaitForSuccess) {
for (size_t i = 0; i < 100; ++i) {
auto pair = PromiseFuturePair<int>::Make();
std::thread thread(
[](Promise<int> promise) {
absl::SleepFor(absl::Milliseconds(5));
EXPECT_TRUE(promise.SetResult(5));
},
pair.promise);
const bool ready1 = pair.future.WaitFor(absl::Milliseconds(20));
const bool ready2 = pair.future.WaitFor(absl::Milliseconds(10));
thread.join();
if (ready1 && ready2) {
return;
}
}
FAIL();
}
TEST(FutureTest, WaitUntilFailure) {
for (size_t i = 0; i < 100; ++i) {
auto pair = PromiseFuturePair<int>::Make();
EXPECT_FALSE(pair.future.WaitUntil(absl::Now() - absl::Milliseconds(10)));
EXPECT_FALSE(pair.future.WaitUntil(absl::Now() + absl::Milliseconds(10)));
std::thread thread(
[](Promise<int> promise) {
absl::SleepFor(absl::Milliseconds(20));
EXPECT_TRUE(promise.SetResult(5));
},
pair.promise);
const bool ready =
pair.future.WaitUntil(absl::Now() + absl::Milliseconds(5));
thread.join();
if (!ready) {
return;
}
}
FAIL();
}
TEST(FutureTest, WaitUntilSuccess) {
for (size_t i = 0; i < 100; ++i) {
auto pair = PromiseFuturePair<int>::Make();
std::thread thread(
[](Promise<int> promise) {
absl::SleepFor(absl::Milliseconds(5));
EXPECT_TRUE(promise.SetResult(5));
},
pair.promise);
const bool ready1 =
pair.future.WaitUntil(absl::Now() + absl::Milliseconds(20));
const bool ready2 =
pair.future.WaitUntil(absl::Now() + absl::Milliseconds(10));
thread.join();
if (ready1 && ready2) {
return;
}
}
FAIL();
}
TEST(FutureTest, SetResultTwice) {
auto pair = PromiseFuturePair<int>::Make();
EXPECT_TRUE(pair.promise.SetResult(3));
EXPECT_EQ(3, pair.future.result());
EXPECT_EQ(false, pair.promise.SetResult(5));
EXPECT_EQ(3, pair.future.result());
}
TEST(FutureTest, ExecuteWhenNotNeeded) {
auto pair = PromiseFuturePair<int>::Make();
bool no_future = false;
pair.promise.ExecuteWhenNotNeeded([&] { no_future = true; });
EXPECT_FALSE(no_future);
pair.future.reset();
EXPECT_FALSE(pair.promise.result_needed());
EXPECT_TRUE(no_future);
}
TEST(FutureTest, ExecuteWhenNotNeededBeforeForced) {
auto pair = PromiseFuturePair<int>::Make();
bool no_future = false;
pair.promise.ExecuteWhenNotNeeded([&] { no_future = true; });
EXPECT_FALSE(no_future);
pair.future.reset();
EXPECT_FALSE(pair.promise.result_needed());
EXPECT_TRUE(no_future);
}
TEST(FutureTest, ExecuteWhenNotNeededUnregister) {
auto pair = PromiseFuturePair<int>::Make();
bool no_future = false;
auto registration =
pair.promise.ExecuteWhenNotNeeded([&] { no_future = true; });
EXPECT_FALSE(no_future);
registration.Unregister();
pair.future.reset();
EXPECT_FALSE(no_future);
}
TEST(FutureTest, ExecuteWhenNotNeededImmediate) {
auto pair = PromiseFuturePair<int>::Make();
bool no_future = false;
pair.future.reset();
auto registration =
pair.promise.ExecuteWhenNotNeeded([&] { no_future = true; });
EXPECT_TRUE(no_future);
registration.Unregister();
}
TEST(FutureTest, ExecuteWhenReadyUnregisterTwice) {
auto pair = PromiseFuturePair<int>::Make();
bool invoked = false;
auto registration =
pair.future.ExecuteWhenReady([&](ReadyFuture<int>) { invoked = true; });
EXPECT_FALSE(invoked);
auto registration2 = registration;
registration.Unregister();
registration2.Unregister();
pair.promise.SetResult(3);
EXPECT_FALSE(invoked);
}
TEST(FutureTest, ExecuteWhenNotNeededThenForce) {
auto pair = PromiseFuturePair<int>::Make();
bool no_future = false;
auto registration =
pair.promise.ExecuteWhenNotNeeded([&] { no_future = true; });
pair.future.Force();
pair.future.reset();
EXPECT_TRUE(no_future);
registration.Unregister();
}
TEST(FutureTest, ExecuteWhenReadyUnregisterSelf) {
auto pair = PromiseFuturePair<int>::Make();
bool invoked = false;
FutureCallbackRegistration registration;
registration = pair.future.ExecuteWhenReady([&](ReadyFuture<int>) {
invoked = true;
registration();
});
pair.promise.SetResult(3);
EXPECT_TRUE(invoked);
}
TEST(FutureTest, ExecuteWhenReadyUnregisterSelfTwice) {
auto pair = PromiseFuturePair<int>::Make();
bool invoked = false;
FutureCallbackRegistration registration;
registration = pair.future.ExecuteWhenReady([&](ReadyFuture<int>) {
invoked = true;
auto registration_copy = registration;
registration();
registration_copy();
});
pair.promise.SetResult(3);
EXPECT_TRUE(invoked);
}
TEST(FutureTest, Destructor) {
auto pair = PromiseFuturePair<int>::Make();
static_cast<void>(pair);
}
TEST(FutureTest, DestructorExecuteWhenReady) {
auto pair = PromiseFuturePair<int>::Make();
pair.future.ExecuteWhenReady([&](ReadyFuture<int>) {});
}
TEST(FutureTest, ExecuteWhenReadyUnregisterOther) {
auto pair = PromiseFuturePair<int>::Make();
bool invoked = false;
FutureCallbackRegistration registration;
pair.future.ExecuteWhenReady([&](ReadyFuture<int>) { registration(); });
registration =
pair.future.ExecuteWhenReady([&](ReadyFuture<int>) { invoked = true; });
pair.promise.SetResult(3);
EXPECT_FALSE(invoked);
}
TEST(FutureTest, ExecuteWhenReadyUnregisterConcurrent) {
PromiseFuturePair<int> pair;
std::atomic<bool> unregistered;
FutureCallbackRegistration registration;
TestConcurrent(
1000,
[&] {
unregistered = false;
pair = PromiseFuturePair<int>::Make();
registration = pair.future.ExecuteWhenReady([&](ReadyFuture<int>) {
for (int i = 0; i < 100; ++i) {
EXPECT_FALSE(unregistered.load());
}
});
},
[&] { EXPECT_TRUE(unregistered.load()); },
[&] { pair.promise.SetResult(3); },
[&] {
registration.Unregister();
unregistered = true;
});
}
TEST(FutureTest, ExecuteWhenReadyUnregisterNonBlockingConcurrent) {
PromiseFuturePair<int> pair;
std::atomic<bool> callback_started, unregister_returned, callback_finished;
FutureCallbackRegistration registration;
TestConcurrent(
1,
[&] {
callback_started = false;
callback_finished = false;
unregister_returned = false;
pair = PromiseFuturePair<int>::Make();
registration = pair.future.ExecuteWhenReady([&](ReadyFuture<int>) {
callback_started = true;
while (unregister_returned == false) {
}
callback_finished = true;
});
},
[&] {
EXPECT_TRUE(callback_started);
EXPECT_TRUE(unregister_returned);
EXPECT_TRUE(callback_finished);
},
[&] { pair.promise.SetResult(3); },
[&] {
while (!callback_started) {
}
EXPECT_FALSE(callback_finished);
registration.UnregisterNonBlocking();
unregister_returned = true;
});
}
TEST(FutureTest, ExecuteWhenNotNeededUnregisterConcurrent) {
PromiseFuturePair<int> pair;
std::atomic<bool> unregistered;
FutureCallbackRegistration registration;
TestConcurrent(
1000,
[&] {
unregistered = false;
pair = PromiseFuturePair<int>::Make();
registration = pair.promise.ExecuteWhenNotNeeded([&] {
for (int i = 0; i < 100; ++i) {
EXPECT_FALSE(unregistered.load());
}
});
},
[&] { EXPECT_TRUE(unregistered.load()); },
[&] { pair.promise.SetResult(3); },
[&] {
registration.Unregister();
unregistered = true;
});
}
TEST(FutureTest, ExecuteWhenForcedUnregisterConcurrent) {
PromiseFuturePair<int> pair;
std::atomic<bool> unregistered;
FutureCallbackRegistration registration;
TestConcurrent(
1000,
[&] {
unregistered = false;
pair = PromiseFuturePair<int>::Make();
registration = pair.promise.ExecuteWhenForced([&](Promise<int>) {
for (int i = 0; i < 100; ++i) {
EXPECT_FALSE(unregistered.load());
}
});
},
[&] { EXPECT_TRUE(unregistered.load()); },
[&] { pair.future.Force(); },
[&] {
registration.Unregister();
unregistered = true;
});
}
TEST(FutureTest, SetResultInForceCallback) {
auto pair = PromiseFuturePair<int>::Make();
pair.promise.ExecuteWhenForced([](Promise<int> p) { p.SetResult(5); });
EXPECT_FALSE(pair.future.ready());
pair.future.Force();
EXPECT_EQ(true, pair.future.ready());
EXPECT_EQ(5, pair.future.result());
}
TEST(FutureTest, ForceCallbackAddedAfterForced) {
auto pair = PromiseFuturePair<int>::Make();
auto sentinel = std::make_shared<int>();
pair.future.Force();
bool callback_ran = false;
pair.promise.ExecuteWhenForced(
[sentinel, &callback_ran](Promise<int> p) { callback_ran = true; });
EXPECT_TRUE(callback_ran);
EXPECT_EQ(1, sentinel.use_count());
EXPECT_FALSE(pair.future.ready());
}
TEST(FutureTest, ForceCallbackAddedAfterForcedWithNoFuturesRemaining) {
auto pair = PromiseFuturePair<int>::Make();
auto sentinel = std::make_shared<int>();
pair.future.Force();
pair.future.reset();
bool callback_ran = false;
pair.promise.ExecuteWhenForced(
[sentinel, &callback_ran](Promise<int> p) { callback_ran = true; });
EXPECT_FALSE(callback_ran);
EXPECT_EQ(1, sentinel.use_count());
EXPECT_FALSE(pair.promise.result_needed());
}
TEST(FutureTest, ForceCallbackDestroyedAfterForce) {
auto pair = PromiseFuturePair<int>::Make();
auto sentinel = std::make_shared<int>();
pair.promise.ExecuteWhenForced(
[sentinel](Promise<int> p) { p.SetResult(5); });
EXPECT_EQ(2, sentinel.use_count());
EXPECT_FALSE(pair.future.ready());
pair.future.Force();
EXPECT_EQ(1, sentinel.use_count());
EXPECT_EQ(true, pair.future.ready());
EXPECT_EQ(5, pair.future.result());
}
TEST(FutureTest, ForceAfterReady) {
auto pair = PromiseFuturePair<int>::Make();
bool forced = false;
auto sentinel = std::make_shared<int>();
pair.promise.ExecuteWhenForced(
[&forced, sentinel](Promise<int> p) { forced = true; });
EXPECT_EQ(2, sentinel.use_count());
pair.promise.SetResult(3);
EXPECT_FALSE(forced);
EXPECT_EQ(1, sentinel.use_count());
pair.future.Force();
EXPECT_FALSE(forced);
}
TEST(FutureTest, ForceCallbacksDestroyedWhenNoFuturesRemain) {
auto pair = PromiseFuturePair<int>::Make();
bool forced = false;
auto sentinel = std::make_shared<int>();
pair.promise.ExecuteWhenForced(
[&forced, sentinel](Promise<int> p) { forced = true; });
EXPECT_EQ(2, sentinel.use_count());
pair.future.reset();
EXPECT_EQ(1, sentinel.use_count());
EXPECT_FALSE(forced);
}
struct CallOnCopy {
CallOnCopy(const CallOnCopy& x)
: call_when_copied(x.call_when_copied),
call_when_invoked(x.call_when_invoked) {
call_when_copied();
}
CallOnCopy(std::function<void()> call_when_copied,
std::function<void()> call_when_invoked)
: call_when_copied(call_when_copied),
call_when_invoked(call_when_invoked) {}
template <typename... Arg>
void operator()(Arg&&...) {
call_when_invoked();
}
std::function<void()> call_when_copied, call_when_invoked;
};
TEST(FutureTest, SetReadyCalledConcurrentlyWithExecuteWhenReady) {
bool was_called = false;
auto pair = PromiseFuturePair<int>::Make();
pair.future.ExecuteWhenReady(CallOnCopy{[&] { pair.promise.SetResult(5); },
[&] { was_called = true; }});
EXPECT_TRUE(was_called);
EXPECT_EQ(5, pair.future.result().value());
}
TEST(FutureTest, ForceCalledConcurrentlyWithExecuteWhenForced) {
bool was_called = false;
auto sentinel = std::make_shared<int>();
auto pair = PromiseFuturePair<int>::Make();
pair.promise.ExecuteWhenForced(CallOnCopy{
[&] { pair.future.Force(); }, [&, sentinel] { was_called = true; }});
EXPECT_TRUE(was_called);
EXPECT_EQ(1, sentinel.use_count());
}
TEST(FutureTest, ForceAndThenSetResultCalledConcurrentlyWithExecuteWhenForced) {
bool was_called = false;
auto sentinel = std::make_shared<int>();
auto pair = PromiseFuturePair<int>::Make();
pair.promise.ExecuteWhenForced(CallOnCopy{[&] { pair.future.Force(); },
[&, sentinel] {
was_called = true;
pair.promise.SetResult(5);
}});
EXPECT_TRUE(was_called);
EXPECT_EQ(1, sentinel.use_count());
EXPECT_EQ(5, pair.future.result().value());
}
TEST(FutureTest, LastFutureReleasedConcurrentlyWithExecuteWhenNotNeeded) {
bool was_called = false;
auto sentinel = std::make_shared<int>();
auto pair = PromiseFuturePair<int>::Make();
pair.promise.ExecuteWhenNotNeeded(CallOnCopy{
[&] { pair.future.reset(); }, [&, sentinel] { was_called = true; }});
EXPECT_TRUE(was_called);
EXPECT_EQ(1, sentinel.use_count());
}
TEST(FutureTest, LastFutureReleasedConcurrentlyWithExecuteWhenForced) {
bool was_called = false;
auto sentinel = std::make_shared<int>();
auto pair = PromiseFuturePair<int>::Make();
pair.promise.ExecuteWhenForced(CallOnCopy{
[&] { pair.future.reset(); }, [&, sentinel] { was_called = true; }});
EXPECT_FALSE(was_called);
EXPECT_EQ(1, sentinel.use_count());
}
TEST(FutureTest, SetResultCalledConcurrentlyWithExecuteWhenForced) {
bool was_called = false;
auto sentinel = std::make_shared<int>();
auto pair = PromiseFuturePair<int>::Make();
pair.promise.ExecuteWhenForced(
CallOnCopy{[&] { pair.promise.SetResult(5); },
[&, sentinel] { was_called = true; }});
EXPECT_FALSE(was_called);
EXPECT_EQ(1, sentinel.use_count());
EXPECT_EQ(5, pair.future.result().value());
}
TEST(FutureTest, PromiseBroken) {
auto pair = PromiseFuturePair<int>::Make();
pair.promise = {};
EXPECT_TRUE(pair.future.ready());
EXPECT_FALSE(pair.future.result().has_value());
EXPECT_EQ(absl::UnknownError(""), pair.future.result().status());
}
TEST(FutureTest, ConvertInt) {
auto pair = PromiseFuturePair<int>::Make();
Future<const int> f = pair.future;
Promise<const int> p = pair.promise;
}
TEST(FutureTest, ConvertVoid) {
auto pair = PromiseFuturePair<void>::Make();
Future<const void> f = pair.future;
Promise<const void> p = pair.promise;
pair.promise.SetResult(tensorstore::MakeResult());
f.value();
}
TEST(FutureTest, ConvertVoid2) {
Future<const void> f;
Promise<const void> p;
auto pair = PromiseFuturePair<void>::Make();
f = pair.future;
p = pair.promise;
pair.promise.SetResult(std::in_place);
f.value();
}
struct NonMovable {
NonMovable(int value) : value(value) {}
NonMovable(NonMovable const&) = delete;
NonMovable(NonMovable&&) = delete;
int value;
};
TEST(FutureTest, NonMovableTypeInitialize) {
auto pair = PromiseFuturePair<NonMovable>::Make(3);
pair.promise.SetReady();
EXPECT_EQ(3, pair.future.value().value);
}
TEST(FutureTest, NonMovableTypeSetReady) {
auto pair = PromiseFuturePair<NonMovable>::Make();
pair.promise.raw_result().emplace(5);
pair.promise.SetReady();
EXPECT_EQ(5, pair.future.value().value);
}
TEST(HaveSameSharedStateTest, Invalid) {
Future<int> fa, fb;
Future<const int> cf;
Promise<int> pa, pb;
Promise<int> cp;
EXPECT_TRUE(HaveSameSharedState(fa, fb));
EXPECT_TRUE(HaveSameSharedState(fa, cf));
EXPECT_TRUE(HaveSameSharedState(pa, pb));
EXPECT_TRUE(HaveSameSharedState(pa, fa));
EXPECT_TRUE(HaveSameSharedState(fa, pb));
EXPECT_TRUE(HaveSameSharedState(pa, cf));
}
TEST(HaveSameSharedStateTest, Valid) {
auto pair1 = PromiseFuturePair<void>::Make();
auto pair2 = PromiseFuturePair<void>::Make();
EXPECT_TRUE(HaveSameSharedState(pair1.future, pair1.future));
EXPECT_TRUE(HaveSameSharedState(pair1.future, pair1.promise));
EXPECT_TRUE(HaveSameSharedState(pair1.promise, pair1.future));
EXPECT_TRUE(HaveSameSharedState(pair1.promise, pair1.promise));
EXPECT_FALSE(HaveSameSharedState(pair1.promise, pair2.promise));
EXPECT_FALSE(HaveSameSharedState(pair1.promise, pair2.future));
EXPECT_FALSE(HaveSameSharedState(pair1.future, pair2.future));
EXPECT_FALSE(HaveSameSharedState(pair1.future, pair2.promise));
}
TEST(AcquireFutureReferenceTest, ExistingFutureNotReady) {
auto pair = PromiseFuturePair<void>::Make();
auto future2 = pair.promise.future();
EXPECT_TRUE(HaveSameSharedState(future2, pair.future));
}
TEST(AcquireFutureReferenceTest, ExistingFutureReady) {
auto pair = PromiseFuturePair<void>::Make();
pair.promise.SetReady();
auto future2 = pair.promise.future();
EXPECT_TRUE(HaveSameSharedState(future2, pair.future));
}
TEST(AcquireFutureReferenceTest, NoExistingFutureNotReady) {
auto pair = PromiseFuturePair<void>::Make();
pair.future.reset();
auto future2 = pair.promise.future();
EXPECT_FALSE(!future2.null());
}
TEST(AcquireFutureReferenceTest, NoExistingFutureReady) {
auto pair = PromiseFuturePair<void>::Make();
pair.future.reset();
pair.promise.SetReady();
auto future2 = pair.promise.future();
EXPECT_TRUE(HaveSameSharedState(future2, pair.promise));
}
TEST(LinkTest, MultipleSimple) {
auto a_pair = PromiseFuturePair<int>::Make();
auto b_pair = PromiseFuturePair<int |
526 | cpp | google/tensorstore | context | tensorstore/context.cc | tensorstore/context_test.cc | #ifndef TENSORSTORE_CONTEXT_H_
#define TENSORSTORE_CONTEXT_H_
#include <stddef.h>
#include <stdint.h>
#include <cassert>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "tensorstore/context_impl_base.h"
#include "tensorstore/internal/cache_key/cache_key.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/json/same.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/type_traits.h"
#include "tensorstore/json_serialization_options.h"
#include "tensorstore/serialization/fwd.h"
#include "tensorstore/util/garbage_collection/fwd.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
class Context {
public:
using ToJsonOptions = JsonSerializationOptions;
using FromJsonOptions = JsonSerializationOptions;
class Spec {
public:
Spec() = default;
TENSORSTORE_DECLARE_JSON_DEFAULT_BINDER(Spec, FromJsonOptions,
ToJsonOptions)
private:
friend class internal_context::Access;
friend class Context;
friend class internal::ContextSpecBuilder;
internal_context::ContextSpecImplPtr impl_;
};
template <typename Provider>
class Resource {
using ResourceImpl = internal_context::ResourceImpl<Provider>;
public:
using ToJsonOptions = JsonSerializationOptions;
using FromJsonOptions = JsonSerializationOptions;
Resource() = default;
static Resource<Provider> DefaultSpec() {
Resource<Provider> r;
r.impl_ = internal_context::DefaultResourceSpec(Provider::id);
return r;
}
auto* get() const noexcept {
return has_resource()
? &(static_cast<ResourceImpl*>(impl_.get().get())->value_)
: nullptr;
}
auto* operator->() const noexcept { return get(); }
auto& operator*() const noexcept {
assert(has_resource());
return *get();
}
bool has_resource() const { return impl_.get() && impl_.get().tag() == 0; }
bool valid() const { return !!impl_; }
friend bool operator==(const Resource& a, const Resource& b) {
return a.impl_ == b.impl_;
}
friend bool operator!=(const Resource& a, const Resource& b) {
return !(a == b);
}
Result<::nlohmann::json> ToJson(
const ToJsonOptions& options = ToJsonOptions{}) const {
return internal_json_binding::ToJson(
static_cast<const Resource&>(*this),
internal_json_binding::DefaultBinder<>, options);
}
static Result<Resource<Provider>> FromJson(
::nlohmann::json j,
const FromJsonOptions& options = FromJsonOptions{}) {
return internal_json_binding::FromJson<Resource<Provider>>(
std::move(j), internal_json_binding::DefaultBinder<>, options);
}
static constexpr internal_context::ResourceJsonBinderImpl<Provider,
Resource>
default_json_binder = {};
absl::Status BindContext(const Context& context) {
TENSORSTORE_ASSIGN_OR_RETURN(*this, context.GetResource(*this));
return absl::OkStatus();
}
absl::Status BindContext(internal::ContextResourceCreationContext context);
void UnbindContext(
const internal::ContextSpecBuilder& context_spec_builder);
void StripContext() { internal_context::StripContext(impl_); }
private:
friend class Context;
friend class internal::ContextSpecBuilder;
friend class internal_context::Access;
friend struct internal_context::ResourceJsonBinderImpl<Provider, Resource>;
internal_context::ResourceOrSpecPtr impl_;
};
Context() = default;
static Context Default();
explicit Context(const Spec& spec, Context parent = {});
static Result<Context> FromJson(::nlohmann::json json_spec,
Context parent = {},
FromJsonOptions options = {});
template <typename Provider>
Result<Resource<Provider>> GetResource(
const Resource<Provider>& resource_spec) const {
Resource<Provider> resource;
TENSORSTORE_RETURN_IF_ERROR(internal_context::GetOrCreateResource(
impl_.get(), resource_spec.impl_.get(), nullptr,
resource.impl_));
return resource;
}
template <typename Provider>
Result<Resource<Provider>> GetResource(
const ::nlohmann::json& json_spec) const {
TENSORSTORE_ASSIGN_OR_RETURN(auto spec,
Resource<Provider>::FromJson(json_spec));
return GetResource(spec);
}
template <typename Provider>
Result<Resource<Provider>> GetResource() const {
return GetResource<Provider>(Provider::id);
}
explicit operator bool() const { return static_cast<bool>(impl_); }
friend bool operator==(const Context& a, const Context& b) {
return a.impl_ == b.impl_;
}
friend bool operator!=(const Context& a, const Context& b) {
return !(a == b);
}
Context::Spec spec() const;
Context parent() const;
private:
friend class internal_context::Access;
internal_context::ContextImplPtr impl_;
};
enum class ContextBindingMode : unsigned char {
unspecified,
retain,
unbind,
strip,
};
enum class ContextBindingState : unsigned char {
unbound,
unknown,
bound
};
constexpr ContextBindingMode retain_context = ContextBindingMode::retain;
constexpr ContextBindingMode unbind_context = ContextBindingMode::unbind;
constexpr ContextBindingMode strip_context = ContextBindingMode::strip;
namespace internal {
class ContextSpecBuilder {
public:
ContextSpecBuilder() = default;
explicit operator bool() const { return static_cast<bool>(impl_); }
static ContextSpecBuilder Make(ContextSpecBuilder parent = {},
Context::Spec existing_spec = {});
template <typename Provider>
Context::Resource<Provider> AddResource(
const Context::Resource<Provider>& resource) const {
Context::Resource<Provider> resource_spec;
resource_spec.impl_ =
internal_context::AddResourceOrSpec(*this, resource.impl_.get());
return resource_spec;
}
Context::Spec spec() const;
private:
friend class internal_context::Access;
internal_context::BuilderImplPtr impl_;
internal_context::ContextSpecImplPtr spec_impl_;
};
TENSORSTORE_DECLARE_JSON_BINDER(ContextSpecDefaultableJsonBinder, Context::Spec,
JsonSerializationOptions,
JsonSerializationOptions)
bool IsPartialBindingContext(const Context& context);
inline bool GetRecordBindingState(const internal::ContextSpecBuilder& builder) {
return internal_context::Access::impl(builder).get().tag() != 0;
}
void SetRecordBindingState(internal::ContextSpecBuilder& builder,
bool record_binding_state);
template <typename Ptr>
absl::Status BindContextCopyOnWriteWithNestedContext(Ptr& ptr,
const Context& context) {
if (!ptr) return absl::OkStatus();
using internal_context::Access;
{
auto& orig_obj = *ptr;
if (Access::context_binding_state(orig_obj) == ContextBindingState::bound) {
return absl::OkStatus();
}
if (orig_obj.use_count() != 1) ptr = orig_obj.Clone();
}
using T = internal::remove_cvref_t<decltype(*ptr)>;
auto& obj = const_cast<T&>(*ptr);
Access::context_binding_state(obj) = ContextBindingState::unknown;
if (context && IsPartialBindingContext(context)) {
TENSORSTORE_RETURN_IF_ERROR(obj.BindContext(context));
} else {
Context child_context(Access::context_spec(obj),
context ? context : Context::Default());
TENSORSTORE_RETURN_IF_ERROR(obj.BindContext(child_context));
Access::context_spec(obj) = {};
Access::context_binding_state(obj) = ContextBindingState::bound;
}
return absl::OkStatus();
}
template <typename Ptr>
void UnbindContextCopyOnWriteWithNestedContext(
Ptr& ptr, const ContextSpecBuilder& context_builder) {
if (!ptr) return;
using internal_context::Access;
{
auto& orig_obj = *ptr;
if (Access::context_binding_state(orig_obj) ==
ContextBindingState::unbound) {
return;
}
if (orig_obj.use_count() != 1) ptr = orig_obj.Clone();
}
using T = internal::remove_cvref_t<decltype(*ptr)>;
auto& obj = const_cast<T&>(*ptr);
auto child_builder = internal::ContextSpecBuilder::Make(
context_builder, std::move(Access::context_spec(obj)));
Access::context_spec(obj) = child_builder.spec();
obj.UnbindContext(
const_cast<const internal::ContextSpecBuilder&>(child_builder));
Access::context_binding_state(obj) = ContextBindingState::unbound;
}
template <typename Ptr>
void StripContextCopyOnWriteWithNestedContext(Ptr& ptr) {
if (!ptr) return;
using internal_context::Access;
{
auto& orig_obj = *ptr;
if (orig_obj.use_count() != 1) ptr = orig_obj.Clone();
}
using T = internal::remove_cvref_t<decltype(*ptr)>;
auto& obj = const_cast<T&>(*ptr);
Access::context_spec(obj) = {};
obj.StripContext();
Access::context_binding_state(obj) = ContextBindingState::unbound;
}
template <typename Ptr>
void ApplyContextBindingMode(Ptr& ptr, ContextBindingMode mode,
ContextBindingMode default_mode) {
if (mode == ContextBindingMode::unspecified) mode = default_mode;
switch (mode) {
case ContextBindingMode::unbind:
ptr.UnbindContext();
break;
case ContextBindingMode::strip:
ptr.StripContext();
break;
case ContextBindingMode::retain:
case ContextBindingMode::unspecified:
break;
}
}
template <typename SpecType>
bool ContextBindableSpecsSameViaJson(const SpecType& a, const SpecType& b) {
SpecType a_unbound, b_unbound;
{
auto spec_builder = internal::ContextSpecBuilder::Make();
internal::SetRecordBindingState(spec_builder, true);
a_unbound = a;
a_unbound.UnbindContext(spec_builder);
b_unbound = b;
b_unbound.UnbindContext(spec_builder);
}
JsonSerializationOptions json_serialization_options;
json_serialization_options.preserve_bound_context_resources_ = true;
auto a_json = a_unbound.ToJson(json_serialization_options);
auto b_json = b_unbound.ToJson(json_serialization_options);
if (!a_json.ok() || !b_json.ok()) return false;
return internal_json::JsonSame(*a_json, *b_json);
}
class ContextResourceCreationContext {
public:
internal_context::ContextImpl* context_ = nullptr;
internal_context::ResourceContainer* trigger_ = nullptr;
};
}
namespace internal_json_binding {
template <typename Binder>
auto NestedContextJsonBinder(Binder binder) {
return [binder = std::move(binder)](auto is_loading,
const JsonSerializationOptions& options,
auto* obj, auto* j) {
if constexpr (!is_loading) {
if (obj->context_binding_state() != ContextBindingState::unbound) {
auto copy = *obj;
internal::ContextSpecBuilder spec_builder;
if (options.preserve_bound_context_resources_) {
internal::SetRecordBindingState(spec_builder, true);
}
copy.UnbindContext(spec_builder);
return binder(is_loading, options, ©, j);
}
}
return binder(is_loading, options, obj, j);
};
}
}
template <typename Provider>
absl::Status Context::Resource<Provider>::BindContext(
internal::ContextResourceCreationContext context) {
return internal_context::GetOrCreateResource(context.context_, impl_.get(),
context.trigger_, impl_);
}
template <typename Provider>
void Context::Resource<Provider>::UnbindContext(
const internal::ContextSpecBuilder& context_spec_builder) {
*this = context_spec_builder.AddResource(*this);
}
namespace serialization {
template <typename Provider>
struct Serializer<Context::Resource<Provider>> {
[[nodiscard]] static bool Encode(EncodeSink& sink,
const Context::Resource<Provider>& value) {
return internal_context::EncodeContextResourceOrSpec(
sink, internal_context::Access::impl(value));
}
[[nodiscard]] static bool Decode(DecodeSource& source,
Context::Resource<Provider>& value) {
return internal_context::DecodeContextResourceOrSpec(
source, Provider::id, internal_context::Access::impl(value));
}
};
}
namespace garbage_collection {
template <typename Provider>
struct GarbageCollection<Context::Resource<Provider>> {
constexpr static bool required() { return false; }
};
}
namespace internal {
template <typename Provider>
struct CacheKeyEncoder<Context::Resource<Provider>> {
static void Encode(std::string* out,
const Context::Resource<Provider>& value) {
auto& ptr = internal_context::Access::impl(value);
if (!ptr) {
internal::EncodeCacheKey(out,
internal_context::ResourceOrSpecBase::kNull);
} else {
ptr->EncodeCacheKey(out);
}
}
};
}
}
TENSORSTORE_DECLARE_SERIALIZER_SPECIALIZATION(tensorstore::Context::Spec)
TENSORSTORE_DECLARE_SERIALIZER_SPECIALIZATION(tensorstore::Context)
TENSORSTORE_DECLARE_GARBAGE_COLLECTION_NOT_REQUIRED(tensorstore::Context::Spec)
TENSORSTORE_DECLARE_GARBAGE_COLLECTION_NOT_REQUIRED(tensorstore::Context)
namespace std {
template <typename Provider>
struct pointer_traits<tensorstore::Context::Resource<Provider>> {
using pointer = tensorstore::Context::Resource<Provider>;
using element_type = typename Provider::Resource;
using difference_type = ptrdiff_t;
};
}
#endif
#include "tensorstore/context.h"
#include <stddef.h>
#include <algorithm>
#include <cassert>
#include <memory>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/base/no_destructor.h"
#include "absl/base/thread_annotations.h"
#include "absl/log/absl_check.h"
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "absl/strings/str_join.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
#include <nlohmann/json.hpp>
#include "tensorstore/context_impl.h"
#include "tensorstore/context_resource_provider.h"
#include "tensorstore/internal/cache_key/cache_key.h"
#include "tensorstore/internal/container/heterogeneous_container.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/json/value_as.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/mutex.h"
#include "tensorstore/internal/riegeli/delimited.h"
#include "tensorstore/json_serialization_options.h"
#include "tensorstore/serialization/fwd.h"
#include "tensorstore/serialization/json.h"
#include "tensorstore/serialization/json_bindable.h"
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_context {
ResourceProviderImplBase::~ResourceProviderImplBase() = default;
ResourceOrSpecBase::~ResourceOrSpecBase() = default;
ResourceImplBase::~ResourceImplBase() = default;
ResourceSpecImplBase::~ResourceSpecImplBase() = default;
ContextImplPtr GetCreator(ResourceImplBase& resource) {
absl::MutexLock lock(&resource.mutex_);
auto* creator_ptr = resource.weak_creator_;
if (!creator_ptr ||
!internal::IncrementReferenceCountIfNonZero(*creator_ptr)) {
return {};
}
return ContextImplPtr(creator_ptr, internal::adopt_object_ref);
}
void ResourceOrSpecPtrTraits::increment(ResourceOrSpecBase* p) {
intrusive_ptr_increment(p);
}
void ResourceOrSpecPtrTraits::decrement(ResourceOrSpecBase* p) {
intrusive_ptr_decrement(p);
}
void ResourceImplWeakPtrTraits::increment(ResourceOrSpecBase* p) {
intrusive_ptr_increment(p);
}
void ResourceImplWeakPtrTraits::decrement(ResourceOrSpecBase* p) {
intrusive_ptr_decrement(p);
}
void ResourceImplStrongPtrTraits::increment(ResourceImplBase* p) {
intrusive_ptr_increment(p);
p->spec_->provider_->AcquireContextReference(*p);
}
void ResourceImplStrongPtrTraits::decrement(ResourceImplBase* p) {
p->spec_->provider_->ReleaseContextReference(*p);
intrusive_ptr_decrement(p);
}
void intrusive_ptr_increment(ContextSpecImpl* p) {
intrusive_ptr_increment(
static_cast<internal::AtomicReferenceCount<ContextSpecImpl>*>(p));
}
void intrusive_ptr_decrement(ContextSpecImpl* p) {
intrusive_ptr_decrement(
static_cast<internal::AtomicReferenceCount<ContextSpecImpl>*>(p));
}
void intrusive_ptr_increment(ContextImpl* p) {
intrusive_ptr_increment(
static_cast<internal::AtomicReferenceCount<ContextImpl>*>(p));
}
void intrusive_ptr_decrement(ContextImpl* p) {
intrusive_ptr_decrement(
static_cast<internal::AtomicReferenceCount<ContextImpl>*>(p));
}
ContextImpl::ContextImpl() = default;
ContextImpl::~ContextImpl() {
for (const auto& resource_container : resources_) {
auto& result = resource_container->result_;
if (!result.ok()) continue;
auto& resource = **result;
absl::MutexLock lock(&resource.mutex_); | #include "tensorstore/context.h"
#include <cstddef>
#include <cstdint>
#include <optional>
#include <string>
#include <tuple>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "tensorstore/context_impl.h"
#include "tensorstore/context_resource_provider.h"
#include "tensorstore/internal/cache_key/cache_key.h"
#include "tensorstore/internal/cache_key/std_optional.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/json_binding/std_optional.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/internal/testing/concurrent.h"
#include "tensorstore/json_serialization_options.h"
#include "tensorstore/json_serialization_options_base.h"
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/serialization/std_tuple.h"
#include "tensorstore/serialization/test_util.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace tensorstore {
template <typename Provider>
auto* GetRawPointer(const Context::Resource<Provider>& resource) {
return resource.get();
}
}
namespace {
namespace jb = tensorstore::internal_json_binding;
using ::tensorstore::Context;
using ::tensorstore::IncludeDefaults;
using ::tensorstore::MatchesJson;
using ::tensorstore::MatchesStatus;
using ::tensorstore::Result;
using ::tensorstore::internal::ContextResourceCreationContext;
using ::tensorstore::internal::ContextResourceRegistration;
using ::tensorstore::internal::ContextResourceTraits;
using ::tensorstore::internal::ContextSpecBuilder;
using ::tensorstore::internal_testing::TestConcurrent;
using ::tensorstore::serialization::SerializationRoundTrip;
struct IntResource : public ContextResourceTraits<IntResource> {
struct Spec {
std::int64_t value;
constexpr static auto ApplyMembers = [](auto&& x, auto f) {
return f(x.value);
};
};
using Resource = std::int64_t;
static constexpr char id[] = "int_resource";
static Spec Default() { return {42}; }
static constexpr auto JsonBinder() {
return jb::Object(jb::Member(
"value", jb::Projection(&Spec::value,
jb::DefaultValue([](auto* v) { *v = 42; }))));
}
static Result<Resource> Create(Spec v,
ContextResourceCreationContext context) {
return v.value;
}
static Spec GetSpec(Resource v, const ContextSpecBuilder& builder) {
return {v};
}
};
struct IntConfigResource : public ContextResourceTraits<IntConfigResource> {
constexpr static bool config_only = true;
struct Spec {
std::int64_t value;
constexpr static auto ApplyMembers = [](auto&& x, auto f) {
return f(x.value);
};
};
using Resource = std::int64_t;
static constexpr char id[] = "int_config_resource";
static Spec Default() { return {42}; }
static constexpr auto JsonBinder() { return jb::Projection(&Spec::value); }
static Result<Resource> Create(Spec v,
ContextResourceCreationContext context) {
return v.value;
}
static Spec GetSpec(Resource v, const ContextSpecBuilder& builder) {
return {v};
}
};
struct StrongRefResource : public ContextResourceTraits<StrongRefResource> {
struct Spec {
std::int64_t value;
constexpr static auto ApplyMembers = [](auto&& x, auto f) {
return f(x.value);
};
};
struct Resource {
size_t num_strong_references = 0;
};
static constexpr char id[] = "strongref";
static Spec Default() { return Spec{42}; }
static constexpr auto JsonBinder() {
namespace jb = tensorstore::internal_json_binding;
return jb::Object(jb::Member(
"value", jb::Projection(&Spec::value, jb::DefaultValue([](auto* obj) {
*obj = 7;
}))));
}
static Result<Resource> Create(Spec v,
ContextResourceCreationContext context) {
return Resource{};
}
static Spec GetSpec(const Resource& v, const ContextSpecBuilder& builder) {
return {42};
}
static void AcquireContextReference(Resource& v) {
++v.num_strong_references;
}
static void ReleaseContextReference(Resource& v) {
--v.num_strong_references;
}
};
struct OptionalResource : public ContextResourceTraits<OptionalResource> {
using Spec = std::optional<size_t>;
using Resource = Spec;
static constexpr char id[] = "optional_resource";
static Spec Default() { return {}; }
static constexpr auto JsonBinder() {
namespace jb = tensorstore::internal_json_binding;
return jb::Object(jb::Member(
"limit", jb::DefaultInitializedValue(jb::Optional(
jb::Integer<size_t>(1), [] { return "shared"; }))));
}
static Result<Resource> Create(Spec v,
ContextResourceCreationContext context) {
return v;
}
static Spec GetSpec(Resource v, const ContextSpecBuilder& builder) {
return v;
}
};
const ContextResourceRegistration<IntResource> int_resource_registration;
const ContextResourceRegistration<IntConfigResource>
int_config_resource_registration;
const ContextResourceRegistration<StrongRefResource>
strong_ref_resource_registration;
const ContextResourceRegistration<OptionalResource>
optional_resource_registration;
TEST(IntResourceTest, InvalidDirectSpec) {
EXPECT_THAT(Context::Resource<IntResource>::FromJson(nullptr),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected non-null value, but received: null"));
EXPECT_THAT(Context::Resource<IntResource>::FromJson(3),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected object, but received: 3"));
EXPECT_THAT(
Context::Resource<IntResource>::FromJson("foo"),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Invalid reference to \"int_resource\" resource: \"foo\""));
}
TEST(IntResourceTest, Default) {
auto context = Context::Default();
EXPECT_EQ(context, context);
EXPECT_FALSE(context.parent());
auto context2 = Context::Default();
EXPECT_NE(context, context2);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto resource_spec,
Context::Resource<IntResource>::FromJson("int_resource"));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto resource,
context.GetResource(resource_spec));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto resource2,
context.GetResource(resource_spec));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto resource3,
context2.GetResource(resource_spec));
EXPECT_EQ(resource, resource);
EXPECT_EQ(resource, resource2);
EXPECT_NE(resource, resource3);
EXPECT_THAT(context.GetResource<IntResource>(),
::testing::Optional(resource));
EXPECT_THAT(context.GetResource<IntResource>("int_resource"),
::testing::Optional(resource));
EXPECT_THAT(resource, ::testing::Pointee(42));
EXPECT_THAT(context.GetResource<IntResource>({{"value", 50}}),
::testing::Optional(::testing::Pointee(50)));
}
TEST(IntResourceTest, ValidDirectSpec) {
auto context = Context::Default();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto resource_spec,
Context::Resource<IntResource>::FromJson({{"value", 7}}));
EXPECT_THAT(context.GetResource(resource_spec),
::testing::Optional(::testing::Pointee(7)));
}
TEST(IntResourceTest, ValidIndirectSpecDefaultId) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto spec, Context::Spec::FromJson({{"int_resource", {{"value", 7}}}}));
auto context = Context(spec);
auto resource_spec = Context::Resource<IntResource>::DefaultSpec();
EXPECT_THAT(context.GetResource(resource_spec),
::testing::Optional(::testing::Pointee(7)));
}
TEST(IntResourceTest, ContextFromJson) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto context, Context::FromJson({{"int_resource", {{"value", 7}}}}));
EXPECT_THAT(context.GetResource<IntResource>(),
::testing::Optional(::testing::Pointee(7)));
}
TEST(IntResourceTest, ValidIndirectSpecDefault) {
auto context = Context::Default();
auto resource_spec = Context::Resource<IntResource>::DefaultSpec();
EXPECT_THAT(context.GetResource(resource_spec),
::testing::Optional(::testing::Pointee(42)));
}
TEST(IntResourceTest, ValidIndirectSpecIdentifier) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto spec, Context::Spec::FromJson({{"int_resource#x", {{"value", 7}}}}));
auto context = Context(spec);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto resource_spec,
Context::Resource<IntResource>::FromJson("int_resource#x"));
EXPECT_THAT(context.GetResource(resource_spec),
::testing::Optional(::testing::Pointee(7)));
}
TEST(IntResourceTest, UndefinedIndirectReference) {
auto context = Context::Default();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto resource_spec,
Context::Resource<IntResource>::FromJson("int_resource#x"));
EXPECT_THAT(context.GetResource(resource_spec),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Resource not defined: \"int_resource#x\""));
}
TEST(IntResourceTest, SimpleReference) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto spec,
Context::Spec::FromJson({
{"int_resource#x", {{"value", 7}}},
{"int_resource#y", "int_resource#x"},
}));
auto context = Context(spec);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto resource_spec,
Context::Resource<IntResource>::FromJson("int_resource#y"));
EXPECT_THAT(context.GetResource(resource_spec),
::testing::Optional(::testing::Pointee(7)));
}
TEST(IntResourceTest, ReferenceCycle1) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto spec, Context::Spec::FromJson({{"int_resource", "int_resource"}}));
auto context = Context(spec);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto resource_spec,
Context::Resource<IntResource>::FromJson("int_resource"));
EXPECT_THAT(context.GetResource(resource_spec),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Context resource reference cycle: "
"\"int_resource\":\"int_resource\""));
}
TEST(IntResourceTest, ReferenceCycle2) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto spec,
Context::Spec::FromJson({
{"int_resource#a", "int_resource#b"},
{"int_resource#b", "int_resource#a"},
}));
auto context = Context(spec);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto resource_spec,
Context::Resource<IntResource>::FromJson("int_resource#a"));
EXPECT_THAT(context.GetResource(resource_spec),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Context resource reference cycle: "
"\"int_resource#b\":\"int_resource#a\" -> "
"\"int_resource#a\":\"int_resource#b\""));
}
TEST(IntResourceTest, Inherit) {
const ::nlohmann::json json_spec1{
{"int_resource", {{"value", 7}}},
{"int_resource#a", {{"value", 9}}},
{"int_resource#d", {{"value", 42}}},
{"int_resource#c", nullptr},
};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto spec1,
Context::Spec::FromJson(json_spec1));
EXPECT_THAT(spec1.ToJson(IncludeDefaults{false}),
::testing::Optional(MatchesJson({
{"int_resource", {{"value", 7}}},
{"int_resource#a", {{"value", 9}}},
{"int_resource#d", ::nlohmann::json::object_t{}},
{"int_resource#c", nullptr},
})));
EXPECT_THAT(spec1.ToJson(IncludeDefaults{true}),
::testing::Optional(MatchesJson({
{"int_resource", {{"value", 7}}},
{"int_resource#a", {{"value", 9}}},
{"int_resource#d", {{"value", 42}}},
{"int_resource#c", nullptr},
})));
::nlohmann::json json_spec2{
{"int_resource", {{"value", 8}}},
{"int_resource#b", nullptr},
};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto spec2,
Context::Spec::FromJson(json_spec2));
auto context1 = Context(spec1);
auto context2 = Context(spec2, context1);
EXPECT_EQ(context1, context2.parent());
EXPECT_THAT(context1.spec().ToJson(IncludeDefaults{true}),
::testing::Optional(MatchesJson(json_spec1)));
EXPECT_THAT(context2.spec().ToJson(),
::testing::Optional(MatchesJson(json_spec2)));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto resource1,
context2.GetResource(
Context::Resource<IntResource>::FromJson("int_resource").value()));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto resource2,
context2.GetResource(
Context::Resource<IntResource>::FromJson("int_resource#a").value()));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto resource3,
context2.GetResource(
Context::Resource<IntResource>::FromJson("int_resource#b").value()));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto resource4,
context2.GetResource(
Context::Resource<IntResource>::FromJson("int_resource#c").value()));
EXPECT_EQ(8, *resource1);
EXPECT_EQ(9, *resource2);
EXPECT_EQ(7, *resource3);
EXPECT_EQ(42, *resource4);
}
TEST(IntResourceTest, Unknown) {
EXPECT_THAT(Context::Spec::FromJson({
{"foo", {{"value", 7}}},
}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Invalid context resource identifier: \"foo\""));
}
TEST(IntConfigResourceTest, ContextSpec) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto context, Context::FromJson({{"int_config_resource", 111}}));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto resource1,
context.GetResource<IntConfigResource>());
EXPECT_THAT(resource1, ::testing::Pointee(111));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto resource2,
context.GetResource<IntConfigResource>(222));
EXPECT_THAT(resource2, ::testing::Pointee(222));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto resource3,
context.GetResource<IntConfigResource>(222));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto resource4,
context.GetResource<IntConfigResource>(111));
std::string cache_key1, cache_key2, cache_key3, cache_key4;
tensorstore::internal::EncodeCacheKey(&cache_key1, resource1);
tensorstore::internal::EncodeCacheKey(&cache_key2, resource2);
tensorstore::internal::EncodeCacheKey(&cache_key3, resource3);
tensorstore::internal::EncodeCacheKey(&cache_key4, resource4);
EXPECT_EQ(cache_key1, cache_key4);
EXPECT_EQ(cache_key2, cache_key3);
}
TEST(StrongRefResourceTest, DirectSpec) {
auto context = Context::Default();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto resource_spec, Context::Resource<StrongRefResource>::FromJson(
::nlohmann::json::object_t{}));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto resource,
context.GetResource(resource_spec));
EXPECT_EQ(0, resource->num_strong_references);
}
TEST(StrongRefResourceTest, IndirectSpec) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto spec,
Context::Spec::FromJson({{"strongref", ::nlohmann::json::object_t{}}}));
auto context = Context(spec);
auto resource_spec = Context::Resource<StrongRefResource>::DefaultSpec();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto resource,
context.GetResource(resource_spec));
EXPECT_EQ(1, resource->num_strong_references);
context = Context();
EXPECT_EQ(0, resource->num_strong_references);
}
TEST(ContextSpecBuilderTest, Simple) {
auto spec =
Context::Spec::FromJson({{"int_resource", {{"value", 5}}}}).value();
auto context = Context(spec);
auto resource_spec = Context::Resource<IntResource>::DefaultSpec();
auto resource = context.GetResource(resource_spec).value();
Context::Spec new_spec;
Context::Resource<IntResource> new_resource_spec;
{
auto builder = ContextSpecBuilder::Make();
new_spec = builder.spec();
new_resource_spec = builder.AddResource(resource);
}
EXPECT_THAT(
new_spec.ToJson(),
::testing::Optional(MatchesJson({{"int_resource", {{"value", 5}}}})));
EXPECT_THAT(new_resource_spec.ToJson(IncludeDefaults{true}),
::testing::Optional(MatchesJson("int_resource")));
auto new_context = Context(new_spec);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto new_resource,
new_context.GetResource(new_resource_spec));
EXPECT_EQ(5, *new_resource);
}
TEST(ContextSpecBuilderTest, Default) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto resource, Context::Default().GetResource<IntResource>());
Context::Spec new_spec;
Context::Resource<IntResource> new_resource_spec;
{
auto builder = ContextSpecBuilder::Make();
new_spec = builder.spec();
new_resource_spec = builder.AddResource(resource);
}
EXPECT_THAT(
jb::ToJson(new_spec,
tensorstore::internal::ContextSpecDefaultableJsonBinder,
IncludeDefaults{false}),
::testing::Optional(
MatchesJson({{"int_resource", ::nlohmann::json::object_t()}})));
EXPECT_THAT(
new_spec.ToJson(IncludeDefaults{true}),
::testing::Optional(MatchesJson({{"int_resource", {{"value", 42}}}})));
EXPECT_THAT(new_resource_spec.ToJson(IncludeDefaults{true}),
::testing::Optional(MatchesJson("int_resource")));
auto new_context = Context(new_spec);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto new_resource,
new_context.GetResource(new_resource_spec));
EXPECT_THAT(new_resource, ::testing::Pointee(42));
}
TEST(ContextSpecBuilderTest, MultipleContexts) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto spec1, Context::Spec::FromJson({{"int_resource", {{"value", 5}}}}));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto spec2, Context::Spec::FromJson({{"int_resource", {{"value", 6}}}}));
auto context1 = Context(spec1);
auto context2 = Context(spec2);
auto resource_spec = Context::Resource<IntResource>::DefaultSpec();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto resource1,
context1.GetResource(resource_spec));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto resource2,
context2.GetResource(resource_spec));
Context::Spec new_spec;
Context::Resource<IntResource> new_resource_spec1;
Context::Resource<IntResource> new_resource_spec2;
{
auto builder = ContextSpecBuilder::Make();
new_spec = builder.spec();
new_resource_spec1 = builder.AddResource(resource1);
new_resource_spec2 = builder.AddResource(resource2);
}
EXPECT_THAT(new_spec.ToJson(), ::testing::Optional(MatchesJson({
{"int_resource#0", {{"value", 5}}},
{"int_resource#1", {{"value", 6}}},
})));
EXPECT_EQ("int_resource#0", new_resource_spec1.ToJson());
EXPECT_EQ("int_resource#1", new_resource_spec2.ToJson());
}
TEST(ContextSpecBuilderTest, Inline) {
auto context = Context::Default();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto resource_spec,
Context::Resource<IntResource>::FromJson({{"value", 5}}));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto resource,
context.GetResource(resource_spec));
Context::Spec new_spec;
Context::Resource<IntResource> new_resource_spec;
{
auto builder = ContextSpecBuilder::Make();
new_spec = builder.spec();
new_resource_spec = builder.AddResource(resource);
}
EXPECT_THAT(new_spec.ToJson(),
::testing::Optional(MatchesJson(::nlohmann::json::object_t())));
EXPECT_THAT(new_resource_spec.ToJson(),
::testing::Optional(MatchesJson({{"value", 5}})));
}
TEST(ContextSpecBuilderTest, InlineEqualToDefault) {
auto context = Context::Default();
auto resource_spec =
Context::Resource<IntResource>::FromJson({{"value", 42}}).value();
auto resource = context.GetResource(resource_spec).value();
Context::Spec new_spec;
Context::Resource<IntResource> new_resource_spec;
{
auto builder = ContextSpecBuilder::Make();
new_spec = builder.spec();
new_resource_spec = builder.AddResource(resource);
}
EXPECT_EQ(::nlohmann::json({}), new_spec.ToJson());
EXPECT_EQ(::nlohmann::json::object_t{},
new_resource_spec.ToJson(IncludeDefaults{false}));
}
TEST(ContextSpecBuilderTest, InlineShared) {
auto context = Context::Default();
auto resource_spec =
Context::Resource<IntResource>::FromJson({{"value", 5}}).value();
auto resource = context.GetResource(resource_spec).value();
Context::Spec new_spec;
Context::Resource<IntResource> new_resource_spec1;
Context::Resource<IntResource> new_resource_spec2;
{
auto builder = ContextSpecBuilder::Make();
new_spec = builder.spec();
new_resource_spec1 = builder.AddResource(resource);
new_resource_spec2 = builder.AddResource(resource);
}
EXPECT_EQ(::nlohmann::json({{"int_resource#0", {{"value", 5}}}}),
new_spec.ToJson());
EXPECT_EQ("int_resource#0", new_resource_spec1.ToJson());
EXPECT_EQ("int_resource#0", new_resource_spec2.ToJson());
}
TEST(ContextSpecBuilderTest, ExcludeDefaultsJson) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto context, Context::FromJson({
{"optional_resource", {{"limit", "shared"}}},
{"optional_resource#a", {{"limit", 5}}},
}));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto resource1,
context.GetResource<OptionalResource>());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto resource2,
context.GetResource<OptionalResource>("optional_resource#a"));
Context::Spec new_spec;
Context::Resource<OptionalResource> new_resource_spec1;
Context::Resource<OptionalResource> new_resource_spec2;
{
auto builder = ContextSpecBuilder::Make();
new_spec = builder.spec();
new_resource_spec1 = builder.AddResource(resource1);
new_resource_spec2 = builder.AddResource(resource2);
}
EXPECT_THAT(new_spec.ToJson(tensorstore::IncludeDefaults{false}),
::testing::Optional(MatchesJson({
{"optional_resource#a", {{"limit", 5}}},
{"optional_resource", ::nlohmann::json::object_t()},
})));
EXPECT_THAT(new_spec.ToJson(tensorstore::IncludeDefaults{true}),
::testing::Optional(MatchesJson({
{"optional_resource#a", {{"limit", 5}}},
{"optional_resource", {{"limit", "shared"}}},
})));
}
TEST(ContextTest, WeakCreator) {
using ::tensorstore::internal_context::Access;
using ::tensorstore::internal_context::GetCreator;
using ::tensorstore::internal_context::ResourceImplBase;
const ::nlohmann::json json_spec1{
{"int_resource", {{"value", 7}}},
{"int_resource#a", {{"value", 9}}},
{"int_resource#d", {{"value", 42}}},
{"int_resource#c", nullptr},
};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto spec1,
Context::Spec::FromJson(json_spec1));
::nlohmann::json json_spec2{
{"int_resource", {{"value", 8}}},
{"int_resource#b", nullptr},
};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto spec2,
Context::Spec::FromJson(json_spec2));
auto context1 = Context(spec1);
auto context2 = Context(spec2, context1);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto resource1,
context1.GetResource<IntResource>());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto resource2,
context2.GetResource<IntResource>());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto resource2_a, context2.GetResource<IntResource>("int_resource#a"));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto resource2_b, context2.GetResource<IntResource>("int_resource#b"));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto resource2_c, context2.GetResource<IntResource>("int_resource#c"));
EXPECT_EQ(
Access::impl(context1),
GetCreator(static_cast<ResourceImplBase&>(*Access::impl(resource1))));
EXPECT_EQ(
Access::impl(context1),
GetCreator(static_cast<ResourceImplBase&>(*Access::impl(resource2_a))));
EXPECT_EQ(
Access::impl(context1),
GetCreator(static_cast<ResourceImplBase&>(*Access::impl(resource2_b))));
EXPECT_EQ(
Access::impl(context1),
GetCreator(static_cast<ResourceImplBase&>(*Access::impl(resource2_c))));
EXPECT_EQ(
Access::impl(context2),
GetCreator(static_cast<ResourceImplBase&>(*Access::impl(resource2))));
context2 = Context();
EXPECT_EQ(
Access::impl(context1),
GetCreator(static_cast<ResourceImplBase&>(*Access::impl(resource1))));
EXPECT_FALSE(
GetCreator(static_cast<ResourceImplBase&>(*Access::impl(resource2))));
}
struct NestedResource : public ContextResourceTraits<NestedResource> {
struct Spec {
int value;
Context::Resource<NestedResource> parent;
int GetTotal() const {
int total = value;
if (parent.has_resource()) total += parent->GetTotal();
return total;
}
constexpr static auto ApplyMembers = [](auto&& x, auto f) {
return f(x.value, x.parent);
};
};
using Resource = Spec;
static constexpr char id[] = "nested_resource";
static Spec Default() { return {42}; }
static constexpr auto JsonBinder() {
return jb::Object(
jb::Member("value",
jb::Projection(&Spec::value,
jb::DefaultValue([](auto* v) { *v = 42; }))),
jb::Member(
"parent",
jb::Projection(
&Spec::parent,
jb::DefaultInitializedPredicate<jb::kNeverIncludeDefaults>(
[](auto* obj) { return !obj->valid(); }))));
}
static Result<Resource> Create(const Spec& spec,
ContextResourceCreationContext context) {
Resource resource = spec;
TENSORSTORE_RETURN_IF_ERROR(resource.parent.BindContext(context));
return resource;
}
static Spec GetSpec(const Resource& resource,
const ContextSpecBuilder& builder) {
Spec spec = resource;
UnbindContext(spec, builder);
return spec;
}
static void UnbindContext(Spec& spec, const ContextSpecBuilder& builder) {
spec.parent.UnbindContext(builder);
}
};
const ContextResourceRegistration<NestedResource> nested_resource_registration;
TEST(NestedResourceTest, Basic) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto context, Context::FromJson({
{"nested_resource#a", {{"value", 1}}},
{"nested_resource#b",
{{"value", 3}, {"parent", "nested_resource#a"}}},
{"nested_resource#c",
{{"value", 5}, {"parent", "nested_resource#b"}}},
{"nested_resource#d",
{{"value", 10}, {"parent", "nested_resource#e"}}},
{"nested_resource#e",
{{"value", 15}, {"parent", "nested_resource#d"}}},
}));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto a, context.GetResource<NestedResource>("nested_resource#a"));
EXPECT_FALSE(a->parent.valid());
EXPECT_EQ(1, a->GetTotal());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto b, context.GetResource<NestedResource>("nested_resource#b"));
EXPECT_EQ(a, b->parent);
EXPECT_EQ(4, b->GetTotal());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto c, context.GetResource<NestedResource>("nested_resource#c"));
EXPECT_EQ(b, c->parent);
EXPECT_EQ(9, c->GetTotal());
EXPECT_THAT(
context.GetResource<NestedResource>("nested_resource#d"),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Context resource reference cycle: "
"\"nested_resource#d\" -> "
"\"nested_resource#d\":"
"\\{\"parent\":\"nested_resource#e\",\"value\":10\\} -> "
"\"nested_resource#e\" -> "
"\"nested_resource#e\":"
"\\{\"parent\":\"nested_resource#d\",\"value\":15\\}"));
EXPECT_THAT(context.GetResource<NestedResource>("nested_resource#e"),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Context resource reference cycle: .*"));
}
TEST(ContextSpecBuilderTest, PartiallyBound) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto context_spec, Context::Spec::FromJson({
{"nested_resource#a", {{"value", 2}}},
{"nested_resource#b",
{{"value", 3}, {"parent", "nested_resource#a"}}},
}));
auto context = Context(context_spec);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto resource_spec,
Context::Resource<NestedResource>::FromJson("nested_resource#b"));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto resource,
context.GetResource(resource_spec));
Context::Spec new_spec;
Context::Resource<NestedResource> new_resource_spec1;
Context::Resource<NestedResource> new_resource_spec2;
{
auto builder = ContextSpecBuilder::Make({}, context_spec);
new_spec = builder.spec();
new_resource_spec1 = builder.AddResource(resource_spec);
new_resource_spec2 = builder.AddResource(resource);
}
EXPECT_THAT(new_spec.ToJson(),
::testing::Optional(MatchesJson({
{"nested_resource#a", {{"value", 2}}},
{"nested_resource#b",
{{"value", 3}, {"parent", "nested_resource#a"}}},
{"nested_resource#1", {{"value", 2}}},
{"nested_resource#0",
{{"value", 3}, {"parent", "nested_resource#1"}}},
})));
EXPECT_THAT(new_resource_spec1.ToJson(),
::testing::Optional(MatchesJson("nested_resource#b")));
EXPECT_THAT(new_resource_spec2.ToJson(), |
527 | cpp | google/tensorstore | status | tensorstore/util/status.cc | tensorstore/util/status_test.cc | #ifndef TENSORSTORE_STATUS_H_
#define TENSORSTORE_STATUS_H_
#include <optional>
#include <string>
#include <string_view>
#include <type_traits>
#include <utility>
#include "absl/base/optimization.h"
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include "tensorstore/internal/preprocessor/expand.h"
#include "tensorstore/internal/source_location.h"
#include "tensorstore/internal/type_traits.h"
namespace tensorstore {
namespace internal {
void MaybeAddSourceLocationImpl(absl::Status& status, SourceLocation loc);
absl::Status MaybeAnnotateStatusImpl(absl::Status source,
std::string_view prefix_message,
std::optional<absl::StatusCode> new_code,
std::optional<SourceLocation> loc);
[[noreturn]] void FatalStatus(const char* message, const absl::Status& status,
SourceLocation loc);
inline absl::Status MaybeConvertStatusTo(
absl::Status status, absl::StatusCode code,
SourceLocation loc = tensorstore::SourceLocation::current()) {
if (status.code() == code) {
if (!status.message().empty()) MaybeAddSourceLocationImpl(status, loc);
return status;
}
return MaybeAnnotateStatusImpl(std::move(status), {}, code, loc);
}
inline absl::Status ConvertInvalidArgumentToFailedPrecondition(
absl::Status status,
SourceLocation loc = tensorstore::SourceLocation::current()) {
if (status.code() == absl::StatusCode::kInvalidArgument ||
status.code() == absl::StatusCode::kOutOfRange) {
return MaybeAnnotateStatusImpl(std::move(status), {},
absl::StatusCode::kFailedPrecondition, loc);
}
return status;
}
template <typename F, typename... Args>
inline absl::Status InvokeForStatus(F&& f, Args&&... args) {
using R = std::invoke_result_t<F&&, Args&&...>;
static_assert(std::is_void_v<R> ||
std::is_same_v<internal::remove_cvref_t<R>, absl::Status>);
if constexpr (std::is_void_v<R>) {
std::invoke(static_cast<F&&>(f), static_cast<Args&&>(args)...);
return absl::OkStatus();
} else {
return std::invoke(static_cast<F&&>(f), static_cast<Args&&>(args)...);
}
}
}
inline void MaybeAddSourceLocation(
absl::Status& status,
SourceLocation loc = tensorstore::SourceLocation::current()) {
if (status.message().empty()) return;
internal::MaybeAddSourceLocationImpl(status, loc);
}
std::optional<std::string> AddStatusPayload(absl::Status& status,
std::string_view prefix,
absl::Cord value);
inline absl::Status MaybeAnnotateStatus(
absl::Status source, std::string_view message,
SourceLocation loc = tensorstore::SourceLocation::current()) {
return internal::MaybeAnnotateStatusImpl(std::move(source), message,
std::nullopt, loc);
}
inline absl::Status MaybeAnnotateStatus(
absl::Status source, std::string_view message, absl::StatusCode new_code,
SourceLocation loc = tensorstore::SourceLocation::current()) {
return internal::MaybeAnnotateStatusImpl(std::move(source), message, new_code,
loc);
}
inline const absl::Status& GetStatus(const absl::Status& status) {
return status;
}
inline absl::Status GetStatus(absl::Status&& status) {
return std::move(status);
}
}
#define TENSORSTORE_RETURN_IF_ERROR(...) \
TENSORSTORE_PP_EXPAND( \
TENSORSTORE_INTERNAL_RETURN_IF_ERROR_IMPL(__VA_ARGS__, _))
#define TENSORSTORE_INTERNAL_RETURN_IF_ERROR_IMPL(expr, error_expr, ...) \
for (absl::Status _ = ::tensorstore::GetStatus(expr); \
ABSL_PREDICT_FALSE(!_.ok());) \
return ::tensorstore::MaybeAddSourceLocation(_), error_expr
#define TENSORSTORE_CHECK_OK(...) \
do { \
[](const ::absl::Status& tensorstore_check_ok_condition) { \
if (ABSL_PREDICT_FALSE(!tensorstore_check_ok_condition.ok())) { \
::tensorstore::internal::FatalStatus( \
"Status not ok: " #__VA_ARGS__, tensorstore_check_ok_condition, \
tensorstore::SourceLocation::current()); \
} \
}(::tensorstore::GetStatus((__VA_ARGS__))); \
} while (false)
#endif
#if !defined(TENSORSTORE_INTERNAL_STATUS_TEST_HACK)
#include "tensorstore/util/status.h"
#endif
#include <array>
#include <cstdio>
#include <exception>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "tensorstore/internal/source_location.h"
namespace tensorstore {
namespace internal {
void MaybeAddSourceLocationImpl(absl::Status& status, SourceLocation loc) {
constexpr const char kSourceLocationKey[] = "source locations";
#if TENSORSTORE_HAVE_SOURCE_LOCATION_CURRENT
if (loc.line() <= 1) return;
std::string_view filename(loc.file_name());
if (auto idx = filename.find("tensorstore"); idx != std::string::npos) {
filename.remove_prefix(idx);
}
auto payload = status.GetPayload(kSourceLocationKey);
if (!payload.has_value()) {
status.SetPayload(kSourceLocationKey, absl::Cord(absl::StrFormat(
"%s:%d", filename, loc.line())));
} else {
payload->Append(absl::StrFormat("\n%s:%d", filename, loc.line()));
status.SetPayload(kSourceLocationKey, std::move(*payload));
}
#endif
}
absl::Status MaybeAnnotateStatusImpl(absl::Status source,
std::string_view prefix_message,
std::optional<absl::StatusCode> new_code,
std::optional<SourceLocation> loc) {
if (source.ok()) return source;
if (!new_code) new_code = source.code();
size_t index = 0;
std::array<std::string_view, 3> to_join = {};
if (!prefix_message.empty()) {
to_join[index++] = prefix_message;
}
if (!source.message().empty()) {
to_join[index++] = source.message();
}
absl::Status dest(*new_code, (index > 1) ? std::string_view(absl::StrJoin(
to_join.begin(),
to_join.begin() + index, ": "))
: to_join[0]);
source.ForEachPayload([&](auto name, const absl::Cord& value) {
dest.SetPayload(name, value);
});
if (loc) {
MaybeAddSourceLocation(dest, *loc);
}
return dest;
}
[[noreturn]] void FatalStatus(const char* message, const absl::Status& status,
SourceLocation loc) {
std::fprintf(stderr, "%s:%d: %s: %s\n", loc.file_name(), loc.line(), message,
status.ToString().c_str());
std::terminate();
}
}
std::optional<std::string> AddStatusPayload(absl::Status& status,
std::string_view prefix,
absl::Cord value) {
std::string payload_id(prefix);
int i = 1;
while (true) {
auto p = status.GetPayload(payload_id);
if (!p.has_value()) {
break;
}
if (p.value() == value) return std::nullopt;
payload_id = absl::StrFormat("%s[%d]", prefix, i++);
}
status.SetPayload(payload_id, std::move(value));
return payload_id;
}
} | #include "tensorstore/util/status.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::MatchesStatus;
using ::tensorstore::MaybeAnnotateStatus;
using ::tensorstore::internal::InvokeForStatus;
using ::tensorstore::internal::MaybeAnnotateStatusImpl;
using ::tensorstore::internal::MaybeConvertStatusTo;
TEST(StatusTest, StrCat) {
const absl::Status s = absl::UnknownError("Message");
EXPECT_THAT(s.ToString(), testing::HasSubstr("UNKNOWN: Message"));
EXPECT_THAT(tensorstore::StrCat(s), testing::HasSubstr("UNKNOWN: Message"));
}
TEST(StatusTest, MaybeAnnotateStatusImpl) {
EXPECT_THAT(MaybeAnnotateStatusImpl(absl::UnknownError("Boo"), {},
absl::StatusCode::kInternal,
tensorstore::SourceLocation::current()),
MatchesStatus(absl::StatusCode::kInternal, "Boo"));
EXPECT_THAT(
MaybeAnnotateStatusImpl(absl::UnknownError("Boo"), "Annotated", {},
tensorstore::SourceLocation::current()),
MatchesStatus(absl::StatusCode::kUnknown, "Annotated: Boo"));
EXPECT_THAT(MaybeAnnotateStatusImpl(absl::UnknownError("Boo"), "Annotated",
absl::StatusCode::kInternal,
tensorstore::SourceLocation::current()),
MatchesStatus(absl::StatusCode::kInternal, "Annotated: Boo"));
}
TEST(StatusTest, MaybeAnnotateStatus) {
EXPECT_THAT(MaybeAnnotateStatus(absl::OkStatus(), "Annotated"),
tensorstore::IsOk());
EXPECT_THAT(MaybeAnnotateStatus(absl::OkStatus(), "Annotated",
tensorstore::SourceLocation::current()),
::tensorstore::IsOk());
auto bar_status = absl::UnknownError("Bar");
bar_status.SetPayload("a", absl::Cord("b"));
auto status = MaybeAnnotateStatus(bar_status, "Annotated");
EXPECT_TRUE(status.GetPayload("a").has_value());
EXPECT_THAT(status,
MatchesStatus(absl::StatusCode::kUnknown, "Annotated: Bar"));
EXPECT_THAT(tensorstore::StrCat(status), testing::HasSubstr("a='b'"));
}
TEST(StatusTest, MaybeConvertStatusTo) {
EXPECT_EQ(absl::OkStatus(),
MaybeConvertStatusTo(absl::OkStatus(),
absl::StatusCode::kDeadlineExceeded));
EXPECT_THAT(MaybeConvertStatusTo(absl::UnknownError("Boo"),
absl::StatusCode::kInternal),
MatchesStatus(absl::StatusCode::kInternal, "Boo"));
}
TEST(StatusTest, InvokeForStatus) {
int count = 0;
auto a = [&](int i) { count += i; };
EXPECT_THAT(InvokeForStatus(a, 1), ::tensorstore::IsOk());
EXPECT_EQ(1, count);
auto b = [&](int i, absl::Status s) {
count += i;
return s;
};
EXPECT_THAT(InvokeForStatus(b, 2, absl::OkStatus()), ::tensorstore::IsOk());
EXPECT_EQ(3, count);
EXPECT_THAT(InvokeForStatus(b, 4, absl::UnknownError("A")),
MatchesStatus(absl::StatusCode::kUnknown, "A"));
EXPECT_EQ(7, count);
auto c = [](int& i, int j) { i += j; };
EXPECT_THAT(InvokeForStatus(std::move(c), std::ref(count), 8),
::tensorstore::IsOk());
EXPECT_EQ(15, count);
}
TEST(StatusTest, ReturnIfError) {
const auto Helper = [](absl::Status s) {
TENSORSTORE_RETURN_IF_ERROR(s);
return absl::UnknownError("No error");
};
EXPECT_THAT(Helper(absl::Status()),
MatchesStatus(absl::StatusCode::kUnknown, "No error"));
EXPECT_THAT(Helper(absl::UnknownError("Got error")),
MatchesStatus(absl::StatusCode::kUnknown, "Got error"));
}
TEST(StatusTest, ReturnIfErrorAnnotate) {
const auto Helper = [](absl::Status s) {
TENSORSTORE_RETURN_IF_ERROR(s, MaybeAnnotateStatus(_, "Annotated"));
return absl::UnknownError("No error");
};
EXPECT_THAT(Helper(absl::Status()),
MatchesStatus(absl::StatusCode::kUnknown, "No error"));
EXPECT_THAT(
Helper(absl::UnknownError("Got error")),
MatchesStatus(absl::StatusCode::kUnknown, "Annotated: Got error"));
}
} |
528 | cpp | google/tensorstore | codec_spec | tensorstore/driver/zarr3/codec/codec_spec.cc | tensorstore/codec_spec_test.cc | #ifndef TENSORSTORE_DRIVER_ZARR3_CODEC_CODEC_SPEC_H_
#define TENSORSTORE_DRIVER_ZARR3_CODEC_CODEC_SPEC_H_
#include <stddef.h>
#include <stdint.h>
#include <array>
#include <optional>
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/data_type.h"
#include "tensorstore/driver/zarr3/codec/codec.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/json_serialization_options_base.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal_zarr3 {
enum class ZarrCodecKind {
kArrayToArray,
kArrayToBytes,
kBytesToBytes,
};
class ZarrCodecSpec : public internal::AtomicReferenceCount<ZarrCodecSpec> {
public:
using Ptr = internal::IntrusivePtr<const ZarrCodecSpec>;
virtual ~ZarrCodecSpec();
virtual ZarrCodecKind kind() const = 0;
virtual absl::Status MergeFrom(const ZarrCodecSpec& other, bool strict) = 0;
virtual Ptr Clone() const = 0;
struct FromJsonOptions {
bool constraints = false;
};
struct ToJsonOptions : public IncludeDefaults {
constexpr ToJsonOptions() = default;
constexpr ToJsonOptions(IncludeDefaults include_defaults)
: IncludeDefaults(include_defaults) {}
bool constraints = false;
};
};
struct ArrayDataTypeAndShapeInfo {
DataType dtype;
DimensionIndex rank = dynamic_rank;
std::optional<std::array<Index, kMaxRank>> shape;
};
struct ArrayCodecChunkLayoutInfo {
std::optional<std::array<DimensionIndex, kMaxRank>> inner_order;
std::optional<std::array<Index, kMaxRank>> read_chunk_shape;
std::optional<std::array<Index, kMaxRank>> codec_chunk_shape;
};
struct ArrayCodecResolveParameters {
DataType dtype;
DimensionIndex rank;
SharedArray<const void> fill_value;
std::optional<std::array<Index, kMaxRank>> read_chunk_shape;
std::optional<std::array<Index, kMaxRank>> codec_chunk_shape;
std::optional<std::array<DimensionIndex, kMaxRank>> inner_order;
};
class ZarrArrayToArrayCodecSpec : public ZarrCodecSpec {
public:
using Ptr = internal::IntrusivePtr<const ZarrArrayToArrayCodecSpec>;
ZarrCodecKind kind() const final;
virtual absl::Status PropagateDataTypeAndShape(
const ArrayDataTypeAndShapeInfo& decoded,
ArrayDataTypeAndShapeInfo& encoded) const = 0;
virtual absl::Status GetDecodedChunkLayout(
const ArrayDataTypeAndShapeInfo& encoded_info,
const ArrayCodecChunkLayoutInfo& encoded,
const ArrayDataTypeAndShapeInfo& decoded_info,
ArrayCodecChunkLayoutInfo& decoded) const = 0;
virtual Result<internal::IntrusivePtr<const ZarrArrayToArrayCodec>> Resolve(
ArrayCodecResolveParameters&& decoded,
ArrayCodecResolveParameters& encoded,
ZarrArrayToArrayCodecSpec::Ptr* resolved_spec) const = 0;
};
struct BytesCodecResolveParameters {
int64_t item_bits = -1;
};
class ZarrArrayToBytesCodecSpec : public ZarrCodecSpec {
public:
using Ptr = internal::IntrusivePtr<const ZarrArrayToBytesCodecSpec>;
ZarrCodecKind kind() const final;
virtual absl::Status GetDecodedChunkLayout(
const ArrayDataTypeAndShapeInfo& array_info,
ArrayCodecChunkLayoutInfo& decoded) const = 0;
virtual bool SupportsInnerOrder(
const ArrayCodecResolveParameters& decoded,
span<DimensionIndex> preferred_inner_order) const = 0;
virtual Result<internal::IntrusivePtr<const ZarrArrayToBytesCodec>> Resolve(
ArrayCodecResolveParameters&& decoded,
BytesCodecResolveParameters& encoded,
ZarrArrayToBytesCodecSpec::Ptr* resolved_spec) const = 0;
virtual size_t sharding_height() const;
};
class ZarrBytesToBytesCodecSpec : public ZarrCodecSpec {
public:
using Ptr = internal::IntrusivePtr<const ZarrBytesToBytesCodecSpec>;
ZarrCodecKind kind() const final;
virtual Result<internal::IntrusivePtr<const ZarrBytesToBytesCodec>> Resolve(
BytesCodecResolveParameters&& decoded,
BytesCodecResolveParameters& encoded,
ZarrBytesToBytesCodecSpec::Ptr* resolved_spec) const = 0;
};
}
}
#endif
#include "tensorstore/driver/zarr3/codec/codec_spec.h"
#include <stddef.h>
#include "absl/base/no_destructor.h"
#include "tensorstore/driver/zarr3/codec/registry.h"
namespace tensorstore {
namespace internal_zarr3 {
ZarrCodecSpec::~ZarrCodecSpec() = default;
ZarrCodecKind ZarrArrayToArrayCodecSpec::kind() const {
return ZarrCodecKind::kArrayToArray;
}
ZarrCodecKind ZarrArrayToBytesCodecSpec::kind() const {
return ZarrCodecKind::kArrayToBytes;
}
size_t ZarrArrayToBytesCodecSpec::sharding_height() const { return 0; }
ZarrCodecKind ZarrBytesToBytesCodecSpec::kind() const {
return ZarrCodecKind::kBytesToBytes;
}
CodecRegistry& GetCodecRegistry() {
static absl::NoDestructor<CodecRegistry> registry;
return *registry;
}
}
} | #include "tensorstore/codec_spec.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/serialization/test_util.h"
namespace {
using ::tensorstore::serialization::TestSerializationRoundTrip;
TEST(CodecSpecSerializationTest, SerializationRoundTrip) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto codec,
tensorstore::CodecSpec::FromJson({
{"driver", "zarr"},
{"compressor", nullptr},
{"filters", nullptr},
}));
TestSerializationRoundTrip(codec);
}
} |
529 | cpp | google/tensorstore | box | tensorstore/box.cc | tensorstore/box_test.cc | #ifndef TENSORSTORE_BOX_H_
#define TENSORSTORE_BOX_H_
#include <stddef.h>
#include <cassert>
#include <iosfwd>
#include <string>
#include <type_traits>
#include "absl/base/attributes.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/internal/gdb_scripting.h"
#include "tensorstore/internal/multi_vector.h"
#include "tensorstore/internal/multi_vector_view.h"
#include "tensorstore/internal/type_traits.h"
#include "tensorstore/rank.h"
#include "tensorstore/serialization/fwd.h"
#include "tensorstore/static_cast.h"
#include "tensorstore/util/constant_vector.h"
#include "tensorstore/util/extents.h"
#include "tensorstore/util/garbage_collection/fwd.h"
#include "tensorstore/util/span.h"
TENSORSTORE_GDB_AUTO_SCRIPT("multi_vector_gdb.py")
namespace tensorstore {
template <DimensionIndex Rank, bool Mutable>
class BoxView;
template <DimensionIndex Rank>
class Box;
namespace internal_box {
template <typename T>
constexpr inline int IsBoxLikeHelper = 0;
template <DimensionIndex Rank>
constexpr inline int IsBoxLikeHelper<const Box<Rank>> = 1;
template <DimensionIndex Rank>
constexpr inline int IsBoxLikeHelper<Box<Rank>> = 2;
template <DimensionIndex Rank, bool Mutable>
constexpr inline int IsBoxLikeHelper<BoxView<Rank, Mutable>> = Mutable ? 2 : 1;
template <DimensionIndex Rank, bool Mutable>
constexpr inline int IsBoxLikeHelper<const BoxView<Rank, Mutable>> =
Mutable ? 2 : 1;
std::string DescribeForCast(DimensionIndex rank);
}
template <typename T>
constexpr inline bool IsBoxLike =
internal_box::IsBoxLikeHelper<internal::remove_cvref_t<T>> != 0;
template <typename T>
constexpr inline bool IsMutableBoxLike =
internal_box::IsBoxLikeHelper<std::remove_reference_t<T>> == 2;
template <typename T, DimensionIndex Rank, typename = void>
constexpr inline bool IsBoxLikeImplicitlyConvertibleToRank = false;
template <typename T, DimensionIndex Rank>
constexpr inline bool IsBoxLikeImplicitlyConvertibleToRank<
T, Rank, std::enable_if_t<IsBoxLike<T>>> =
RankConstraint::Implies(internal::remove_cvref_t<T>::static_rank, Rank);
template <typename T, DimensionIndex Rank, typename = void>
constexpr inline bool IsBoxLikeExplicitlyConvertibleToRank = false;
template <typename T, DimensionIndex Rank>
constexpr inline bool IsBoxLikeExplicitlyConvertibleToRank<
T, Rank, std::enable_if_t<IsBoxLike<T>>> =
RankConstraint::EqualOrUnspecified(internal::remove_cvref_t<T>::static_rank,
Rank);
namespace internal_box {
std::ostream& PrintToOstream(std::ostream& os,
const BoxView<dynamic_rank, false>& view);
bool AreEqual(const BoxView<dynamic_rank, false>& box_a,
const BoxView<dynamic_rank, false>& box_b);
template <DimensionIndex Rank>
bool IsEmpty(span<const Index, Rank> shape) {
for (const Index size : shape) {
if (size == 0) return true;
}
return false;
}
template <bool Const>
using MaybeConstIndex = std::conditional_t<Const, const Index, Index>;
template <DimensionIndex Rank, bool Mutable>
using BoxViewStorage =
internal::MultiVectorViewStorage<Rank, MaybeConstIndex<!Mutable>,
MaybeConstIndex<!Mutable>>;
template <DimensionIndex Rank>
using BoxStorage = internal::MultiVectorStorage<Rank, Index, Index>;
}
template <DimensionIndex Rank = dynamic_rank>
class Box : public internal_box::BoxStorage<Rank> {
using Storage = internal_box::BoxStorage<Rank>;
using Access = internal::MultiVectorAccess<Storage>;
static_assert(IsValidInlineRank(Rank));
public:
constexpr static DimensionIndex static_rank =
(Rank < 0) ? dynamic_rank : Rank;
using RankType = StaticOrDynamicRank<Rank>;
Box() : Box(RankType{}) {}
explicit Box(RankType rank) { set_rank(rank); }
template <typename OriginVec, typename ShapeVec,
typename = std::enable_if_t<(
IsImplicitlyCompatibleFullIndexVector<static_rank, OriginVec> &&
IsImplicitlyCompatibleFullIndexVector<static_rank, ShapeVec>)>>
explicit Box(OriginVec origin, ShapeVec shape) {
Access::Assign(this, span(origin), span(shape));
}
template <size_t N, typename = std::enable_if_t<
RankConstraint::Implies(N, static_rank)>>
explicit Box(const Index (&origin)[N], const Index (&shape)[N]) {
Access::Assign(this, StaticRank<N>{}, origin, shape);
}
template <typename OriginT, typename ShapeT,
typename = std::enable_if_t<internal::IsIndexPack<OriginT, ShapeT>>>
explicit Box(RankType rank, OriginT* origin, ShapeT* shape) {
Access::Assign(this, rank, origin, shape);
}
template <typename ShapeVec,
typename = std::enable_if_t<
IsImplicitlyCompatibleFullIndexVector<static_rank, ShapeVec>>>
explicit Box(const ShapeVec& shape)
: Box(GetStaticOrDynamicExtent(span(shape)),
GetConstantVector<Index, 0>(GetStaticOrDynamicExtent(span(shape)))
.data(),
shape.data()) {}
template <size_t N, typename = std::enable_if_t<
RankConstraint::Implies(N, static_rank)>>
explicit Box(const Index (&shape)[N]) {
Access::Assign(this, StaticRank<N>{},
GetConstantVector<Index, 0, N>().data(), shape);
}
template <typename BoxType,
std::enable_if_t<IsBoxLikeImplicitlyConvertibleToRank<
BoxType, static_rank>>* = nullptr>
explicit Box(const BoxType& other)
: Box(other.rank(), other.origin().data(), other.shape().data()) {}
template <typename BoxType,
typename = std::enable_if_t<
IsBoxLikeExplicitlyConvertibleToRank<BoxType, static_rank>>>
explicit Box(unchecked_t, const BoxType& other)
: Box(StaticRankCast<static_rank, unchecked>(other.rank()),
other.origin().data(), other.shape().data()) {}
explicit Box(unchecked_t, Box&& other) : Box(std::move(other)) {}
template <typename BoxType>
std::enable_if_t<IsBoxLikeImplicitlyConvertibleToRank<BoxType, static_rank>,
Box&>
operator=(const BoxType& other) {
Access::Assign(this, other.rank(), other.origin().data(),
other.shape().data());
return *this;
}
RankType rank() const { return Access::GetExtent(*this); }
IndexInterval operator[](DimensionIndex i) const {
return IndexInterval::UncheckedSized(origin()[i], shape()[i]);
}
IndexIntervalRef operator[](DimensionIndex i) {
return IndexIntervalRef::UncheckedSized(origin()[i], shape()[i]);
}
span<const Index, static_rank> origin() const {
return Access::template get<0>(this);
}
span<Index, static_rank> origin() { return Access::template get<0>(this); }
span<const Index, static_rank> shape() const {
return Access::template get<1>(this);
}
span<Index, static_rank> shape() { return Access::template get<1>(this); }
Index num_elements() const { return ProductOfExtents(shape()); }
bool is_empty() const { return internal_box::IsEmpty(shape()); }
void set_rank(RankType rank) {
Access::Resize(this, rank);
Fill();
}
void Fill(IndexInterval interval = {}) {
std::fill_n(origin().begin(), rank(), interval.inclusive_min());
std::fill_n(shape().begin(), rank(), interval.size());
}
friend std::ostream& operator<<(std::ostream& os, const Box& box) {
return internal_box::PrintToOstream(os, box);
}
template <typename Transformable>
decltype(ApplyIndexTransform(
std::declval<BoxView<RankConstraint::FromInlineRank(Rank), false>>(),
std::declval<Transformable>()))
operator()(Transformable&& transformable) const {
return ApplyIndexTransform(
BoxView<RankConstraint::FromInlineRank(Rank), false>(*this),
std::forward<Transformable>(transformable));
}
};
Box(DimensionIndex rank) -> Box<>;
template <DimensionIndex Rank>
Box(std::integral_constant<DimensionIndex, Rank> rank) -> Box<Rank>;
template <typename Shape,
std::enable_if_t<IsIndexConvertibleVector<Shape>>* = nullptr>
Box(const Shape& shape) -> Box<SpanStaticExtent<Shape>::value>;
template <DimensionIndex Rank>
Box(const Index (&shape)[Rank]) -> Box<Rank>;
template <typename Origin, typename Shape,
std::enable_if_t<(IsIndexConvertibleVector<Origin> &&
IsIndexConvertibleVector<Shape>)>* = nullptr>
Box(const Origin& origin, const Shape& shape)
-> Box<SpanStaticExtent<Origin, Shape>::value>;
template <DimensionIndex Rank>
Box(const Index (&origin)[Rank], const Index (&shape)[Rank]) -> Box<Rank>;
template <DimensionIndex Rank>
Box(const Box<Rank>& box) -> Box<Rank>;
template <DimensionIndex Rank, bool Mutable>
Box(BoxView<Rank, Mutable> box) -> Box<Rank>;
template <DimensionIndex Rank = dynamic_rank, bool Mutable = false>
class BoxView : public internal_box::BoxViewStorage<Rank, Mutable> {
using Storage = internal_box::BoxViewStorage<Rank, Mutable>;
using Access = internal::MultiVectorAccess<Storage>;
static_assert(RankConstraint(Rank).valid());
public:
constexpr static DimensionIndex static_rank = Rank;
using RankType = StaticOrDynamicRank<Rank>;
using IndexType = internal_box::MaybeConstIndex<!Mutable>;
using IndexIntervalType =
std::conditional_t<Mutable, IndexIntervalRef, IndexInterval>;
template <bool SfinaeM = Mutable,
typename = std::enable_if_t<SfinaeM == false>>
BoxView() : BoxView(RankType()) {}
template <bool SfinaeM = Mutable,
typename = std::enable_if_t<SfinaeM == false>>
explicit BoxView(RankType rank) {
Access::Assign(this, GetConstantVector<Index, -kInfIndex>(rank),
GetConstantVector<Index, kInfSize>(rank));
}
template <bool SfinaeM = Mutable,
typename = std::enable_if_t<SfinaeM == false>>
explicit BoxView(
span<const Index, Rank> shape ABSL_ATTRIBUTE_LIFETIME_BOUND) {
const auto rank = GetStaticOrDynamicExtent(shape);
Access::Assign(this, rank, GetConstantVector<Index, 0>(rank).data(),
shape.data());
}
template <size_t N, bool SfinaeM = Mutable,
typename = std::enable_if_t<
(RankConstraint::Implies(N, static_rank) && SfinaeM == false)>>
explicit BoxView(IndexType (&shape ABSL_ATTRIBUTE_LIFETIME_BOUND)[N]) {
const auto rank = std::integral_constant<ptrdiff_t, N>{};
Access::Assign(this, rank, GetConstantVector<Index, 0>(rank).data(), shape);
}
explicit BoxView(span<IndexType, Rank> origin, span<IndexType, Rank> shape) {
Access::Assign(this, origin, shape);
}
template <size_t N, typename = std::enable_if_t<
RankConstraint::Implies(N, static_rank)>>
explicit BoxView(IndexType (&origin ABSL_ATTRIBUTE_LIFETIME_BOUND)[N],
IndexType (&shape ABSL_ATTRIBUTE_LIFETIME_BOUND)[N]) {
const auto rank = std::integral_constant<ptrdiff_t, N>{};
Access::Assign(this, rank, origin, shape);
}
explicit BoxView(RankType rank, IndexType* origin, IndexType* shape) {
Access::Assign(this, rank, origin, shape);
}
template <
typename BoxType,
typename = std::enable_if_t<
(IsBoxLike<BoxType> &&
(!Mutable || IsMutableBoxLike<BoxType>)&&RankConstraint::Implies(
internal::remove_cvref_t<BoxType>::static_rank, Rank))>>
BoxView(BoxType&& other)
: BoxView(other.rank(), other.origin().data(), other.shape().data()) {}
template <typename BoxType,
typename = std::enable_if_t<
(IsBoxLikeExplicitlyConvertibleToRank<BoxType, Rank> &&
(!Mutable || IsMutableBoxLike<BoxType>))>>
explicit BoxView(unchecked_t, BoxType&& other)
: BoxView(StaticRankCast<Rank, unchecked>(other.rank()),
other.origin().data(), other.shape().data()) {}
template <
typename BoxType,
std::enable_if_t<
(IsBoxLike<internal::remove_cvref_t<BoxType>> &&
(!Mutable || IsMutableBoxLike<std::remove_reference_t<BoxType>>)&&
RankConstraint::Implies(
internal::remove_cvref_t<BoxType>::static_rank, Rank))>* =
nullptr>
BoxView& operator=(BoxType&& other) {
*this = BoxView(other);
return *this;
}
RankType rank() const { return Access::GetExtent(*this); }
IndexIntervalType operator[](DimensionIndex i) const {
return IndexIntervalType::UncheckedSized(origin()[i], shape()[i]);
}
span<IndexType, Rank> origin() const { return Access::template get<0>(this); }
span<IndexType, Rank> shape() const { return Access::template get<1>(this); }
Index num_elements() const { return ProductOfExtents(shape()); }
bool is_empty() const {
return internal_box::IsEmpty(span<const Index, Rank>(shape()));
}
template <typename BoxType,
bool SfinaeMutable = Mutable>
std::enable_if_t<(SfinaeMutable &&
IsBoxLikeImplicitlyConvertibleToRank<BoxType, Rank>)>
DeepAssign(const BoxType& other) const {
assert(other.rank() == rank());
std::copy_n(other.origin().begin(), rank(), origin().begin());
std::copy_n(other.shape().begin(), rank(), shape().begin());
}
template <int&... ExplicitArgumentBarrier, bool SfinaeM = Mutable>
std::enable_if_t<SfinaeM == true> Fill(IndexInterval interval = {}) const {
std::fill_n(origin().begin(), rank(), interval.inclusive_min());
std::fill_n(shape().begin(), rank(), interval.size());
}
friend std::ostream& operator<<(std::ostream& os, const BoxView& view) {
return internal_box::PrintToOstream(os, view);
}
template <typename Transformable>
decltype(ApplyIndexTransform(std::declval<BoxView>(),
std::declval<Transformable>()))
operator()(Transformable&& transformable) const {
return ApplyIndexTransform(*this,
std::forward<Transformable>(transformable));
}
};
BoxView(DimensionIndex rank) -> BoxView<>;
template <DimensionIndex Rank>
BoxView(std::integral_constant<DimensionIndex, Rank> rank) -> BoxView<Rank>;
template <DimensionIndex Rank = dynamic_rank>
using MutableBoxView = BoxView<Rank, true>;
template <DimensionIndex Rank>
BoxView(Box<Rank>& box) -> BoxView<RankConstraint::FromInlineRank(Rank), true>;
template <DimensionIndex Rank>
BoxView(const Box<Rank>& box) -> BoxView<RankConstraint::FromInlineRank(Rank)>;
template <typename Shape, std::enable_if_t<IsIndexVector<Shape>>* = nullptr>
BoxView(Shape&& shape)
-> BoxView<SpanStaticExtent<Shape>::value, IsMutableIndexVector<Shape>>;
template <DimensionIndex Rank>
BoxView(const Index (&shape)[Rank]) -> BoxView<Rank>;
template <DimensionIndex Rank>
BoxView(Index (&shape)[Rank]) -> BoxView<Rank, true>;
template <typename Origin, typename Shape,
std::enable_if_t<(IsIndexVector<Origin> && IsIndexVector<Shape>)>* =
nullptr>
BoxView(Origin&& origin, Shape&& shape)
-> BoxView<SpanStaticExtent<Origin, Shape>::value,
(IsMutableIndexVector<Origin> && IsMutableIndexVector<Shape>)>;
template <DimensionIndex Rank>
BoxView(const Index (&origin)[Rank], const Index (&shape)[Rank])
-> BoxView<Rank>;
template <DimensionIndex Rank, bool Mutable>
struct StaticCastTraits<BoxView<Rank, Mutable>>
: public DefaultStaticCastTraits<BoxView<Rank, Mutable>> {
template <typename BoxType>
constexpr static bool IsCompatible(const BoxType& box) {
return RankConstraint::Implies(box.rank(), Rank);
}
static std::string Describe() { return internal_box::DescribeForCast(Rank); }
static std::string Describe(const BoxView<Rank, Mutable>& box) {
return internal_box::DescribeForCast(box.rank());
}
template <DimensionIndex TargetRank>
using RebindRank = BoxView<TargetRank, Mutable>;
};
template <DimensionIndex Rank>
struct StaticCastTraits<Box<Rank>> : public DefaultStaticCastTraits<Box<Rank>> {
template <typename BoxType>
constexpr static bool IsCompatible(const BoxType& box) {
return RankConstraint::Implies(box.rank(),
RankConstraint::FromInlineRank(Rank));
}
static std::string Describe() {
return internal_box::DescribeForCast(RankConstraint::FromInlineRank(Rank));
}
static std::string Describe(const Box<Rank>& box) {
return internal_box::DescribeForCast(box.rank());
}
template <DimensionIndex TargetRank>
using RebindRank = Box<TargetRank>;
};
template <typename BoxA, typename BoxB>
std::enable_if_t<(IsBoxLike<BoxA> && IsBoxLike<BoxB> &&
RankConstraint::EqualOrUnspecified(BoxA::static_rank,
BoxB::static_rank)),
bool>
operator==(const BoxA& box_a, const BoxB& box_b) {
return internal_box::AreEqual(box_a, box_b);
}
template <typename BoxA, typename BoxB>
std::enable_if_t<(IsBoxLike<BoxA> && IsBoxLike<BoxB> &&
RankConstraint::EqualOrUnspecified(BoxA::static_rank,
BoxB::static_rank)),
bool>
operator!=(const BoxA& box_a, const BoxB& box_b) {
return !internal_box::AreEqual(box_a, box_b);
}
template <typename T>
constexpr inline bool HasBoxDomain = false;
template <DimensionIndex Rank>
constexpr inline bool HasBoxDomain<Box<Rank>> = true;
template <DimensionIndex Rank, bool Mutable>
constexpr inline bool HasBoxDomain<BoxView<Rank, Mutable>> = true;
template <DimensionIndex Rank>
inline BoxView<RankConstraint::FromInlineRank(Rank)> GetBoxDomainOf(
const Box<Rank>& box) {
return box;
}
template <DimensionIndex Rank, bool Mutable>
inline BoxView<Rank> GetBoxDomainOf(const BoxView<Rank, Mutable>& box) {
return box;
}
namespace internal_box {
bool IsFinite(BoxView<> box);
template <DimensionIndex BoxRank, DimensionIndex VectorRank, typename IndexType>
bool Contains(const BoxView<BoxRank>& box,
span<const IndexType, VectorRank> indices) {
if (indices.size() != box.rank()) return false;
for (DimensionIndex i = 0; i < box.rank(); ++i) {
if (!Contains(box[i], indices[i])) return false;
}
return true;
}
template <DimensionIndex OuterRank, DimensionIndex InnerRank>
bool Contains(const BoxView<OuterRank>& outer,
const BoxView<InnerRank>& inner) {
if (inner.rank() != outer.rank()) return false;
for (DimensionIndex i = 0; i < outer.rank(); ++i) {
if (!Contains(outer[i], inner[i])) return false;
}
return true;
}
template <DimensionIndex BoxRank, DimensionIndex VectorRank, typename IndexType>
bool ContainsPartial(const BoxView<BoxRank>& box,
span<const IndexType, VectorRank> indices) {
if (indices.size() > box.rank()) return false;
for (DimensionIndex i = 0; i < indices.size(); ++i) {
if (!Contains(box[i], indices[i])) return false;
}
return true;
}
}
template <typename BoxType>
std::enable_if_t<HasBoxDomain<BoxType>, bool> IsFinite(const BoxType& box) {
return internal_box::IsFinite(GetBoxDomainOf(box));
}
template <typename BoxType, typename Indices>
std::enable_if_t<(HasBoxDomain<BoxType> && IsIndexConvertibleVector<Indices>),
bool>
Contains(const BoxType& box, const Indices& indices) {
return internal_box::Contains(
BoxView<BoxType::static_rank>(GetBoxDomainOf(box)), span(indices));
}
template <typename BoxType, DimensionIndex IndicesRank>
std::enable_if_t<HasBoxDomain<BoxType>, bool> Contains(
const BoxType& box, const Index (&indices)[IndicesRank]) {
return internal_box::Contains(
BoxView<BoxType::static_rank>(GetBoxDomainOf(box)), span(indices));
}
template <typename OuterBox, typename InnerBox>
std::enable_if_t<(HasBoxDomain<OuterBox> && IsBoxLike<InnerBox>), bool>
Contains(const OuterBox& outer, const InnerBox& inner) {
return internal_box::Contains(
BoxView<OuterBox::static_rank>(GetBoxDomainOf(outer)),
BoxView<InnerBox::static_rank>(inner));
} | #include "tensorstore/box.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/box.h"
#include "tensorstore/rank.h"
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/serialization/test_util.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::Box;
using ::tensorstore::BoxView;
using ::tensorstore::dynamic_rank;
using ::tensorstore::HasBoxDomain;
using ::tensorstore::Index;
using ::tensorstore::IndexInterval;
using ::tensorstore::IsStaticCastConstructible;
using ::tensorstore::kInfIndex;
using ::tensorstore::kInfSize;
using ::tensorstore::MatchesStatus;
using ::tensorstore::MutableBoxView;
using ::tensorstore::span;
using ::tensorstore::StaticRankCast;
using ::tensorstore::SubBoxView;
using ::tensorstore::unchecked;
using ::tensorstore::serialization::TestSerializationRoundTrip;
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
static_assert(std::is_convertible_v<BoxView<3>, BoxView<>>);
static_assert(!std::is_constructible_v<BoxView<3>, BoxView<>>);
static_assert(!std::is_assignable_v<BoxView<3>, BoxView<>>);
static_assert(!std::is_assignable_v<Box<3>, Box<>>);
static_assert(!std::is_constructible_v<Box<3>, Box<>>);
static_assert(!std::is_constructible_v<BoxView<3>, Box<>>);
static_assert(!std::is_constructible_v<MutableBoxView<3>, MutableBoxView<>>);
static_assert(!std::is_constructible_v<MutableBoxView<3>, Box<>>);
static_assert(std::is_constructible_v<MutableBoxView<3>, Box<3>&>);
static_assert(IsStaticCastConstructible<BoxView<3>, BoxView<>>);
static_assert(IsStaticCastConstructible<Box<3>, BoxView<>>);
static_assert(IsStaticCastConstructible<Box<3>, Box<>>);
static_assert(IsStaticCastConstructible<BoxView<>, BoxView<3>>);
static_assert(IsStaticCastConstructible<MutableBoxView<3>, Box<3>&>);
static_assert(!IsStaticCastConstructible<MutableBoxView<>, const Box<3>&>);
static_assert(!IsStaticCastConstructible<BoxView<2>, BoxView<3>>);
static_assert(!IsStaticCastConstructible<BoxView<2>, Box<3>>);
static_assert(!IsStaticCastConstructible<Box<3>, Box<2>>);
TEST(BoxTest, DefaultConstructDynamic) {
Box<> box;
EXPECT_EQ(0, box.rank());
}
TEST(BoxTest, DefaultConstructStatic) {
Box<3> box;
EXPECT_THAT(box.origin(), ElementsAre(-kInfIndex, -kInfIndex, -kInfIndex));
EXPECT_THAT(box.shape(), ElementsAre(kInfSize, kInfSize, kInfSize));
}
TEST(BoxTest, RankPointersConstruct) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {4, 5, 6};
Box<> box(3, origin, shape);
EXPECT_THAT(box.origin(), ElementsAre(1, 2, 3));
EXPECT_THAT(box.shape(), ElementsAre(4, 5, 6));
}
TEST(BoxTest, SizeConstruct) {
Box<> box(3);
EXPECT_THAT(box.origin(), ElementsAre(-kInfIndex, -kInfIndex, -kInfIndex));
EXPECT_THAT(box.shape(), ElementsAre(kInfSize, kInfSize, kInfSize));
}
TEST(BoxTest, ShapeArrayConstruct) {
std::array<Index, 3> shape{{1, 2, 3}};
Box<> box(shape);
EXPECT_THAT(box.origin(), ElementsAre(0, 0, 0));
EXPECT_THAT(box.shape(), ElementsAre(1, 2, 3));
}
TEST(BoxTest, DynamicRankSpanConstruct) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {3, 4, 5};
Box<> box{span(origin), span(shape)};
EXPECT_EQ(3, box.rank());
EXPECT_THAT(origin, ElementsAreArray(origin));
EXPECT_THAT(shape, ElementsAreArray(shape));
}
TEST(BoxTest, ConstructFromArrays) {
Box<> box({1, 2, 3}, {4, 5, 6});
EXPECT_THAT(box.origin(), ElementsAre(1, 2, 3));
EXPECT_THAT(box.shape(), ElementsAre(4, 5, 6));
}
TEST(BoxTest, ConstructFromBoxView) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {4, 5, 6};
BoxView<> view(origin, shape);
Box<> box(view);
EXPECT_EQ(3, box.rank());
EXPECT_THAT(box.shape(), ElementsAreArray(shape));
EXPECT_THAT(box.origin(), ElementsAreArray(origin));
}
TEST(BoxTest, DeduceFromShapeArray) {
const Index shape[] = {3, 4, 5};
auto box = Box(shape);
static_assert(std::is_same_v<decltype(box), Box<3>>);
EXPECT_THAT(box.origin(), ElementsAre(0, 0, 0));
EXPECT_THAT(box.shape(), ElementsAre(3, 4, 5));
}
TEST(BoxTest, DeduceFromShapeSpanStatic) {
const Index shape[] = {3, 4, 5};
auto box = Box(span(shape));
static_assert(std::is_same_v<decltype(box), Box<3>>);
EXPECT_THAT(box.origin(), ElementsAre(0, 0, 0));
EXPECT_THAT(box.shape(), ElementsAre(3, 4, 5));
}
TEST(BoxTest, DeduceFromShapeSpanDynamic) {
const Index shape[] = {3, 4, 5};
auto box = Box(span<const Index>(shape));
static_assert(std::is_same_v<decltype(box), Box<>>);
EXPECT_THAT(box.origin(), ElementsAre(0, 0, 0));
EXPECT_THAT(box.shape(), ElementsAre(3, 4, 5));
}
TEST(BoxTest, DeduceFromOriginAndShapeArrays) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {3, 4, 5};
auto box = Box(origin, shape);
static_assert(std::is_same_v<decltype(box), Box<3>>);
EXPECT_THAT(box.origin(), ElementsAre(1, 2, 3));
EXPECT_THAT(box.shape(), ElementsAre(3, 4, 5));
}
TEST(BoxTest, DeduceFromOriginAndShapeSpansStatic) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {3, 4, 5};
auto box = Box(span(origin), span(shape));
static_assert(std::is_same_v<decltype(box), Box<3>>);
EXPECT_THAT(box.origin(), ElementsAre(1, 2, 3));
EXPECT_THAT(box.shape(), ElementsAre(3, 4, 5));
}
TEST(BoxTest, DeduceFromOriginAndShapeDynamic) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {3, 4, 5};
auto box = Box(span<const Index>(origin), span<const Index>(shape));
static_assert(std::is_same_v<decltype(box), Box<>>);
EXPECT_THAT(box.origin(), ElementsAre(1, 2, 3));
EXPECT_THAT(box.shape(), ElementsAre(3, 4, 5));
}
TEST(BoxTest, DeduceFromBoxView) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {3, 4, 5};
BoxView<3> box(origin, shape);
auto box2 = Box(box);
static_assert(std::is_same_v<decltype(box2), Box<3>>);
EXPECT_THAT(box2.shape(), ElementsAreArray(shape));
EXPECT_THAT(box2.origin(), ElementsAreArray(origin));
}
TEST(BoxTest, DeduceFromBox) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {3, 4, 5};
Box<3> box(origin, shape);
auto box2 = Box(box);
static_assert(std::is_same_v<decltype(box2), Box<3>>);
EXPECT_THAT(box2.shape(), ElementsAreArray(shape));
EXPECT_THAT(box2.origin(), ElementsAreArray(origin));
}
TEST(BoxTest, AssignFromBoxView) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {4, 5, 6};
BoxView<> view(origin, shape);
Box<> box;
box = view;
EXPECT_EQ(3, box.rank());
EXPECT_THAT(box.shape(), ElementsAreArray(shape));
EXPECT_THAT(box.origin(), ElementsAreArray(origin));
}
TEST(BoxTest, AssignFromBox) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {4, 5, 6};
Box<> other(origin, shape);
Box<> box;
box = other;
EXPECT_EQ(3, box.rank());
EXPECT_THAT(box.shape(), ElementsAreArray(shape));
EXPECT_THAT(box.origin(), ElementsAreArray(origin));
}
TEST(BoxTest, AssignDynamicBoxFromStaticBox) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {4, 5, 6};
Box<3> other(origin, shape);
Box<> box;
box = other;
EXPECT_EQ(3, box.rank());
EXPECT_THAT(box.shape(), ElementsAreArray(shape));
EXPECT_THAT(box.origin(), ElementsAreArray(origin));
box.Fill();
box = BoxView<3>(other);
EXPECT_EQ(3, box.rank());
EXPECT_THAT(box.shape(), ElementsAreArray(shape));
EXPECT_THAT(box.origin(), ElementsAreArray(origin));
}
TEST(BoxTest, AssignStaticBoxFromDynamic) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {4, 5, 6};
Box<> other(origin, shape);
Box<3> box;
box = StaticRankCast<3, unchecked>(other);
EXPECT_EQ(3, box.rank());
EXPECT_THAT(box.shape(), ElementsAreArray(shape));
EXPECT_THAT(box.origin(), ElementsAreArray(origin));
}
TEST(BoxTest, SetRank) {
Box<> box;
box.set_rank(3);
EXPECT_EQ(3, box.rank());
EXPECT_THAT(box.origin(), ElementsAre(-kInfIndex, -kInfIndex, -kInfIndex));
EXPECT_THAT(box.shape(), ElementsAre(kInfSize, kInfSize, kInfSize));
}
TEST(BoxTest, Accessors) {
Box<> box({1, 2, 3}, {4, 5, 6});
EXPECT_EQ(4 * 5 * 6, box.num_elements());
EXPECT_EQ(IndexInterval::UncheckedSized(1, 4), box[0]);
EXPECT_EQ(IndexInterval::UncheckedSized(2, 5), box[1]);
EXPECT_EQ(IndexInterval::UncheckedSized(3, 6), box[2]);
}
TEST(BoxTest, ConstAccessors) {
const Box<> box({1, 2, 3}, {4, 5, 6});
EXPECT_EQ(4 * 5 * 6, box.num_elements());
EXPECT_EQ(IndexInterval::UncheckedSized(1, 4), box[0]);
EXPECT_EQ(IndexInterval::UncheckedSized(2, 5), box[1]);
EXPECT_EQ(IndexInterval::UncheckedSized(3, 6), box[2]);
}
TEST(BoxTest, SubscriptAssignment) {
Box<> box(2);
box[1] = IndexInterval::UncheckedSized(1, 5);
EXPECT_THAT(box.origin(), ElementsAre(-kInfIndex, 1));
EXPECT_THAT(box.shape(), ElementsAre(kInfSize, 5));
}
TEST(BoxTest, Fill) {
Box<> box(2);
box.Fill(IndexInterval::UncheckedSized(1, 5));
EXPECT_THAT(box.origin(), ElementsAre(1, 1));
EXPECT_THAT(box.shape(), ElementsAre(5, 5));
}
TEST(BoxTest, IsEmpty) {
Box<> box(3);
EXPECT_FALSE(box.is_empty());
box.Fill(IndexInterval::UncheckedSized(0, 0));
EXPECT_TRUE(box.is_empty());
}
TEST(BoxViewTest, StaticRankDefaultConstruct) {
BoxView<3> box;
EXPECT_THAT(box.origin(), ElementsAre(-kInfIndex, -kInfIndex, -kInfIndex));
EXPECT_THAT(box.shape(), ElementsAre(kInfSize, kInfSize, kInfSize));
}
TEST(BoxViewTest, DynamicRankDefaultConstruct) {
BoxView<> box;
EXPECT_EQ(0, box.rank());
}
TEST(BoxViewTest, DynamicRankSizeConstruct) {
BoxView<> box(3);
EXPECT_EQ(3, box.rank());
EXPECT_THAT(box.origin(), ElementsAre(-kInfIndex, -kInfIndex, -kInfIndex));
EXPECT_THAT(box.shape(), ElementsAre(kInfSize, kInfSize, kInfSize));
}
TEST(BoxViewTest, DynamicRankSpanConstruct) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {3, 4, 5};
BoxView<> box{span(origin), span(shape)};
EXPECT_EQ(3, box.rank());
EXPECT_EQ(&origin[0], box.origin().data());
EXPECT_EQ(&shape[0], box.shape().data());
}
TEST(BoxViewTest, DeduceFromShapeArray) {
const Index shape[] = {3, 4, 5};
auto box = BoxView(shape);
static_assert(std::is_same_v<decltype(box), BoxView<3>>);
EXPECT_THAT(box.origin(), ElementsAre(0, 0, 0));
EXPECT_THAT(box.shape(), ElementsAre(3, 4, 5));
}
TEST(BoxViewTest, DeduceFromShapeSpanStatic) {
const Index shape[] = {3, 4, 5};
auto box = BoxView(span(shape));
static_assert(std::is_same_v<decltype(box), BoxView<3>>);
EXPECT_THAT(box.origin(), ElementsAre(0, 0, 0));
EXPECT_THAT(box.shape(), ElementsAre(3, 4, 5));
}
TEST(BoxViewTest, DeduceFromShapeSpanDynamic) {
const Index shape[] = {3, 4, 5};
auto box = BoxView(span<const Index>(shape));
static_assert(std::is_same_v<decltype(box), BoxView<>>);
EXPECT_THAT(box.origin(), ElementsAre(0, 0, 0));
EXPECT_THAT(box.shape(), ElementsAre(3, 4, 5));
}
TEST(BoxViewTest, DeduceFromOriginAndShapeArrays) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {3, 4, 5};
auto box = BoxView(origin, shape);
static_assert(std::is_same_v<decltype(box), BoxView<3>>);
EXPECT_THAT(box.origin(), ElementsAre(1, 2, 3));
EXPECT_THAT(box.shape(), ElementsAre(3, 4, 5));
}
TEST(BoxViewTest, DeduceFromOriginAndShapeSpansStatic) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {3, 4, 5};
auto box = BoxView(span(origin), span(shape));
static_assert(std::is_same_v<decltype(box), BoxView<3>>);
EXPECT_THAT(box.origin(), ElementsAre(1, 2, 3));
EXPECT_THAT(box.shape(), ElementsAre(3, 4, 5));
}
TEST(BoxViewTest, DeduceFromOriginAndShapeDynamic) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {3, 4, 5};
auto box = BoxView(span<const Index>(origin), span<const Index>(shape));
static_assert(std::is_same_v<decltype(box), BoxView<>>);
EXPECT_THAT(box.origin(), ElementsAre(1, 2, 3));
EXPECT_THAT(box.shape(), ElementsAre(3, 4, 5));
}
TEST(BoxViewTest, DeduceFromBoxView) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {3, 4, 5};
BoxView<3> box(origin, shape);
auto box2 = BoxView(box);
static_assert(std::is_same_v<decltype(box2), BoxView<3>>);
EXPECT_THAT(box2.shape(), ElementsAreArray(shape));
EXPECT_THAT(box2.origin(), ElementsAreArray(origin));
}
TEST(BoxViewTest, DeduceFromBox) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {3, 4, 5};
const Box<3> box(origin, shape);
auto box2 = BoxView(box);
static_assert(std::is_same_v<decltype(box2), BoxView<3>>);
EXPECT_THAT(box2.shape(), ElementsAreArray(shape));
EXPECT_THAT(box2.origin(), ElementsAreArray(origin));
}
TEST(BoxViewTest, Subscript) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {3, 4, 5};
BoxView<> box(origin, shape);
EXPECT_EQ(IndexInterval::UncheckedSized(1, 3), box[0]);
EXPECT_EQ(IndexInterval::UncheckedSized(2, 4), box[1]);
EXPECT_EQ(IndexInterval::UncheckedSized(3, 5), box[2]);
}
TEST(BoxViewTest, NumElements) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {3, 4, 5};
BoxView<> box(origin, shape);
EXPECT_EQ(3 * 4 * 5, box.num_elements());
}
TEST(BoxViewTest, StaticToDynamicConversion) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {3, 4, 5};
BoxView<3> box(origin, shape);
BoxView<> dynamic_box = box;
EXPECT_EQ(3, dynamic_box.rank());
EXPECT_THAT(dynamic_box.shape(), ElementsAreArray(shape));
EXPECT_THAT(dynamic_box.origin(), ElementsAreArray(origin));
}
TEST(BoxViewTest, DefaultAssignment) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {3, 4, 5};
BoxView<3> box(origin, shape);
BoxView<3> box2;
box2 = box;
EXPECT_EQ(3, box2.rank());
EXPECT_THAT(box2.shape(), ElementsAreArray(shape));
EXPECT_THAT(box2.origin(), ElementsAreArray(origin));
}
TEST(BoxViewTest, DefaultAssignmentStaticToDynamic) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {3, 4, 5};
BoxView<3> box(origin, shape);
BoxView<> box2;
box2 = box;
EXPECT_EQ(3, box2.rank());
EXPECT_THAT(box2.shape(), ElementsAreArray(shape));
EXPECT_THAT(box2.origin(), ElementsAreArray(origin));
}
TEST(BoxViewTest, StaticRankCast) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {3, 4, 5};
BoxView<> box(origin, shape);
auto box2 = StaticRankCast<3, unchecked>(box);
EXPECT_THAT(
StaticRankCast<2>(box),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Cannot cast box with rank of 3 to box with rank of 2"));
static_assert(std::is_same_v<decltype(box2), BoxView<3>>);
EXPECT_THAT(box2.shape(), ElementsAreArray(shape));
EXPECT_THAT(box2.origin(), ElementsAreArray(origin));
}
TEST(BoxViewTest, ConstructFromDynamicBox) {
Box<> box({1, 2}, {3, 4});
BoxView<> box_view = box;
EXPECT_EQ(2, box_view.rank());
EXPECT_EQ(box.shape().data(), box_view.shape().data());
EXPECT_EQ(box.origin().data(), box_view.origin().data());
}
TEST(BoxViewTest, ConstructFromStaticBox) {
Box<2> box({1, 2}, {3, 4});
BoxView<> box_view = box;
EXPECT_EQ(2, box_view.rank());
EXPECT_EQ(box.shape().data(), box_view.shape().data());
EXPECT_EQ(box.origin().data(), box_view.origin().data());
}
TEST(MutableBoxViewTest, RankPointersConstruct) {
Index origin[] = {1, 2, 3};
Index shape[] = {4, 5, 6};
MutableBoxView<> box(3, origin, shape);
EXPECT_EQ(3, box.rank());
EXPECT_EQ(box.origin().data(), origin);
EXPECT_EQ(box.shape().data(), shape);
}
TEST(MutableBoxViewTest, DynamicRankSpanConstruct) {
Index origin[] = {1, 2, 3};
Index shape[] = {3, 4, 5};
MutableBoxView<> box{span(origin), span(shape)};
EXPECT_EQ(3, box.rank());
EXPECT_EQ(box.origin().data(), origin);
EXPECT_EQ(box.shape().data(), shape);
}
TEST(MutableBoxViewTest, DeduceFromOriginAndShapeArrays) {
Index origin[] = {1, 2, 3};
Index shape[] = {3, 4, 5};
auto box = BoxView(origin, shape);
static_assert(std::is_same_v<decltype(box), MutableBoxView<3>>);
EXPECT_EQ(3, box.rank());
EXPECT_EQ(box.origin().data(), origin);
EXPECT_EQ(box.shape().data(), shape);
}
TEST(MutableBoxViewTest, DeduceFromOriginAndShapeSpansStatic) {
Index origin[] = {1, 2, 3};
Index shape[] = {3, 4, 5};
auto box = BoxView(span(origin), span(shape));
static_assert(std::is_same_v<decltype(box), MutableBoxView<3>>);
EXPECT_EQ(3, box.rank());
EXPECT_EQ(box.origin().data(), origin);
EXPECT_EQ(box.shape().data(), shape);
}
TEST(MutableBoxViewTest, DeduceFromOriginAndShapeDynamic) {
Index origin[] = {1, 2, 3};
Index shape[] = {3, 4, 5};
auto box = BoxView(span<Index>(origin), span<Index>(shape));
static_assert(std::is_same_v<decltype(box), MutableBoxView<>>);
EXPECT_EQ(3, box.rank());
EXPECT_EQ(box.origin().data(), origin);
EXPECT_EQ(box.shape().data(), shape);
}
TEST(MutableBoxViewTest, DeduceFromBox) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {3, 4, 5};
Box<3> box(origin, shape);
auto box2 = BoxView(box);
static_assert(std::is_same_v<decltype(box2), MutableBoxView<3>>);
EXPECT_EQ(box2.shape().data(), box.shape().data());
EXPECT_EQ(box2.origin().data(), box.origin().data());
}
TEST(MutableBoxViewTest, DeduceFromMutableBoxView) {
Index origin[] = {1, 2, 3};
Index shape[] = {3, 4, 5};
MutableBoxView<3> box(origin, shape);
auto box2 = BoxView(box);
static_assert(std::is_same_v<decltype(box2), MutableBoxView<3>>);
EXPECT_EQ(box2.shape().data(), box.shape().data());
EXPECT_EQ(box2.origin().data(), box.origin().data());
}
TEST(MutableBoxViewTest, AssignFromBoxView) {
Index origin1[] = {1, 2, 3};
Index shape1[] = {4, 5, 6};
const Index origin2[] = {10, 20, 30};
const Index shape2[] = {40, 50, 60};
MutableBoxView<> box(origin1, shape1);
box.DeepAssign(BoxView(origin2, shape2));
EXPECT_EQ(3, box.rank());
EXPECT_THAT(origin1, ElementsAreArray(origin2));
EXPECT_THAT(shape1, ElementsAreArray(shape2));
}
TEST(MutableBoxViewTest, AssignFromBox) {
Index origin1[] = {1, 2, 3};
Index shape1[] = {4, 5, 6};
const Index origin2[] = {10, 20, 30};
const Index shape2[] = {40, 50, 60};
MutableBoxView<> box(origin1, shape1);
box.DeepAssign(Box(origin2, shape2));
EXPECT_EQ(3, box.rank());
EXPECT_THAT(origin1, ElementsAreArray(origin2));
EXPECT_THAT(shape1, ElementsAreArray(shape2));
}
TEST(MutableBoxViewTest, CopyAssign) {
Index origin1[] = {1, 2, 3};
Index shape1[] = {4, 5, 6};
Index origin2[] = {10, 20, 30};
Index shape2[] = {40, 50, 60};
MutableBoxView<> box(origin1, shape1);
box.DeepAssign(MutableBoxView<>(origin2, shape2));
EXPECT_EQ(3, box.rank());
EXPECT_THAT(origin1, ElementsAreArray(origin2));
EXPECT_THAT(shape1, ElementsAreArray(shape2));
}
TEST(MutableBoxViewTest, SubscriptAssignment) {
Index origin[] = {1, 2, 3};
Index shape[] = {4, 5, 6};
MutableBoxView<> box(origin, shape);
box[1] = IndexInterval::UncheckedSized(1, 7);
EXPECT_THAT(origin, ElementsAre(1, 1, 3));
EXPECT_THAT(shape, ElementsAre(4, 7, 6));
}
TEST(MutableBoxViewTest, Fill) {
Index origin[] = {1, 2, 3};
Index shape[] = {4, 5, 6};
MutableBoxView<> box(origin, shape);
box.Fill(IndexInterval::UncheckedSized(1, 5));
EXPECT_THAT(box.origin(), ElementsAre(1, 1, 1));
EXPECT_THAT(box.shape(), ElementsAre(5, 5, 5));
box.Fill();
EXPECT_THAT(box.origin(), ElementsAre(-kInfIndex, -kInfIndex, -kInfIndex));
EXPECT_THAT(box.shape(), ElementsAre(kInfSize, kInfSize, kInfSize));
}
TEST(MutableBoxViewTest, StaticRankCast) {
Index origin[] = {1, 2, 3};
Index shape[] = {3, 4, 5};
MutableBoxView<> box(origin, shape);
auto box2 = StaticRankCast<3, unchecked>(box);
static_assert(std::is_same_v<decltype(box2), MutableBoxView<3>>);
EXPECT_THAT(box2.shape(), ElementsAreArray(shape));
EXPECT_THAT(box2.origin(), ElementsAreArray(origin));
}
TEST(BoxTest, Comparison) {
const Index origin1[] = {1, 2, 3};
const Index shape1[] = {4, 5, 6};
const Index origin2[] = {1, 2, 3};
const Index shape2[] = {4, 5, 6};
const Index origin3[] = {1, 2, 4};
const Index shape3[] = {4, 5, 7};
const Index origin4[] = {1, 2};
const Index shape4[] = {4, 5};
BoxView<> view1(origin1, shape1);
Box<> box1(view1);
BoxView<> view2(origin2, shape2);
Box<> box2(view2);
BoxView<> view3(origin3, shape3);
Box<> box3(view3);
BoxView<> view4(origin4, shape4);
Box<> box4(view4);
EXPECT_EQ(box1, view1);
EXPECT_EQ(box2, view2);
EXPECT_EQ(box3, view3);
EXPECT_EQ(box4, view4);
EXPECT_EQ(view1, view2);
EXPECT_EQ(view1, box2);
EXPECT_EQ(box1, view2);
EXPECT_EQ(box1, box2);
EXPECT_NE(view1, view3);
EXPECT_NE(view1, box3);
EXPECT_NE(box1, view3);
EXPECT_NE(box1, box3);
EXPECT_NE(view1, view4);
EXPECT_NE(view1, box4);
EXPECT_NE(box1, view4);
EXPECT_NE(box1, box4);
}
TEST(BoxTest, Print) {
Index origin[] = {1, 2, 3};
Index shape[] = {3, 4, 5};
EXPECT_EQ("{origin={1, 2, 3}, shape={3, 4, 5}}",
tensorstore::StrCat(BoxView<>(origin, shape)));
EXPECT_EQ("{origin={1, 2, 3}, shape={3, 4, 5}}",
tensorstore::StrCat(Box<>(origin, shape)));
EXPECT_EQ("{origin={1, 2, 3}, shape={3, 4, 5}}",
tensorstore::StrCat(MutableBoxView<>(origin, shape)));
}
TEST(BoxTest, Contains) {
const Index origin1[] = {1, 2};
const Index shape1[] = {4, 5};
const Index origin2[] = {2, 2};
const Index shape2[] = {3, 5};
const Index origin3[] = {1, 2};
const Index shape3[] = {4, 6};
const Index origin4[] = {1};
const Index shape4[] = {4};
const Index indices1[] = {2, 3};
const Index indices2[] = {0, 3};
const Index indices3[] = {0};
Index indices4[] = {2};
auto span1 = span(indices1);
auto span2 = span(indices2);
auto span3 = span(indices3);
auto span4 = span(indices4);
BoxView<> view1(origin1, shape1);
BoxView<> view2(origin2, shape2);
BoxView<> view3(origin3, shape3);
BoxView<> view4(origin4, shape4);
Box<> box1(origin1, shape1);
Box<> box2(origin2, shape2);
Box<> box3(origin3, shape3);
Box<> box4(origin4, shape4);
EXPECT_TRUE(Contains(view1, indices1));
EXPECT_TRUE(ContainsPartial(view1, indices1));
EXPECT_TRUE(ContainsPartial(view1, indices4));
EXPECT_FALSE(Contains(view1, indices2));
EXPECT_FALSE(Contains(view1, indices3));
EXPECT_FALSE(ContainsPartial(view1, indices2));
EXPECT_FALSE(ContainsPartial(view1, indices3));
EXPECT_TRUE(Contains(view1, span1));
EXPECT_TRUE(ContainsPartial(view1, span1));
EXPECT_FALSE(Contains(view1, span2));
EXPECT_FALSE(ContainsPartial(view1, span2));
EXPECT_FALSE(Contains(view1, span3));
EXPECT_FALSE(ContainsPartial(view1, span3));
EXPECT_TRUE(ContainsPartial(view1, span4));
EXPECT_TRUE(Contains(box1, indices1));
EXPECT_TRUE(ContainsPartial(box1, indices1));
EXPECT_FALSE(Contains(box1, indices2));
EXPECT_FALSE(Contains(box1, indices3));
EXPECT_TRUE(Contains(box1, span1));
EXPECT_FALSE(Contains(box1, span2));
EXPECT_FALSE(Contains(box1, span3));
EXPECT_TRUE(Contains(view1, view2));
EXPECT_FALSE(Contains(view1, view3));
EXPECT_FALSE(Contains(view1, view4));
EXPECT_TRUE(Contains(view1, box2));
EXPECT_FALSE(Contains(view1, box3));
EXPECT_FALSE(Contains(view1, box4));
EXPECT_TRUE(Contains(box1, view2));
EXPECT_FALSE(Contains(box1, view3));
EXPECT_FALSE(Contains(box1, view4));
EXPECT_TRUE(Contains(box1, box2));
EXPECT_FALSE(Contains(box1, box3));
EXPECT_FALSE(Contains(box1, box4));
}
TEST(BoxTest, GetBoxDomainOf) {
static_assert(!HasBoxDomain<int>);
static_assert(HasBoxDomain<BoxView<>>);
static_assert(HasBoxDomain<Box<>>);
static_assert(HasBoxDomain<MutableBoxView<>>);
Box<> box({1, 2}, {3, 4});
BoxView<> view = box;
EXPECT_EQ(box, GetBoxDomainOf(box));
EXPECT_EQ(box, GetBoxDomainOf(view));
}
TEST(BoxTest, InlineSize) {
Box<dynamic_rank(2)> box({1, 2}, {3, 4});
BoxView<dynamic_rank> v = box;
EXPECT_EQ(v, box);
MutableBoxView<dynamic_rank> v2 = box;
EXPECT_EQ(v2, box);
}
TEST(BoxTest, DeductionGuides) {
auto box = Box({1, 2}, {3, 4});
static_assert(std::is_same_v<decltype(box), Box<2>>);
static_assert(std::is_same_v<decltype(BoxView({1, 2}, {3, 4})), BoxView<2>>);
static_assert(decltype(box)::static_rank == 2);
auto box_view = BoxView(box);
static_assert(std::is_same_v<decltype(box_view), MutableBoxView<2>>);
}
TEST(BoxTest, IsFinite) {
EXPECT_TRUE(IsFinite(Box<>()));
EXPECT_TRUE(IsFinite(BoxView<>()));
EXPECT_FALSE(IsFinite(Box<>(1)));
EXPECT_FALSE(IsFinite(Box<1>()));
EXPECT_FALSE(IsFinite(BoxView<>(1)));
EXPECT_FALSE(IsFinite(BoxView<>(2)));
EXPECT_FALSE(IsFinite(BoxView<2>()));
EXPECT_TRUE(IsFinite(Box<3>({1, 2, 3}, {4, 5, 6})));
EXPECT_TRUE(IsFinite(BoxView<3>({1, 2, 3}, {4, 5, 6})));
EXPECT_TRUE(IsFinite(Box<>({1, 2, 3}, {4, 5, 6})));
EXPECT_TRUE(IsFinite(BoxView<>({1, 2, 3}, {4, 5, 6})));
EXPECT_TRUE(IsFinite(Box<1>({1}, {4})));
EXPECT_FALSE(IsFinite(Box<3>({1, -kInfIndex, 3}, {4, 5, 6})));
EXPECT_FALSE(IsFinite(Box<3>({1, kInfIndex - 5, 3}, {4, 6, 6})));
}
TEST(BoxSerializationTest, StaticRank) {
TestSerializationRoundTrip(Box<0>());
TestSerializationRoundTrip(Box<3>({1, 2, 3}, {4, 5, 6}));
}
TEST(BoxSerializationTest, DynamicRank) {
TestSerializationRoundTrip(Box<>());
TestSerializationRoundTrip(Box({1, 2, 3}, {4, 5, 6}));
}
TEST(BoxTest, SubBoxView) {
Box<> b({1, 2, 3}, {4, 5, 6});
const Box<>& b_const = b;
BoxView<> b_view = b;
MutableBoxView<> b_mut_view = b;
EXPECT_EQ(Box<>({2, 3}, {5, 6}), SubBoxView(b, 1));
EXPECT_EQ(Box<>({2}, {5}), SubBoxView(b, 1, 2));
static_assert(std::is_same_v<decltype(SubBoxView(b, 1)), MutableBoxView<>>);
static_assert(std::is_same_v<decltype(SubBoxView(b_const, 1)), BoxView<>>);
static_assert(std::is_same_v<decltype(SubBoxView(b_view, 1)), BoxView<>>);
static_assert(
std::is_same_v<decltype(SubBoxView(b_mut_view, 1)), MutableBoxView<>>);
}
} |
530 | cpp | google/tensorstore | index_interval | tensorstore/index_interval.cc | tensorstore/index_interval_test.cc | #ifndef TENSORSTORE_INDEX_INTERVAL_H_
#define TENSORSTORE_INDEX_INTERVAL_H_
#include <cassert>
#include <iosfwd>
#include <string_view>
#include <type_traits>
#include <utility>
#include "absl/status/status.h"
#include "tensorstore/container_kind.h"
#include "tensorstore/index.h"
#include "tensorstore/serialization/fwd.h"
#include "tensorstore/util/division.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
inline constexpr bool IsFiniteIndex(Index index) {
return index >= kMinFiniteIndex && index <= kMaxFiniteIndex;
}
inline constexpr bool IsValidIndex(Index index) {
return index >= -kInfIndex && index <= +kInfIndex;
}
class IndexInterval {
public:
constexpr IndexInterval() noexcept
: inclusive_min_(-kInfIndex), size_(kInfSize) {}
constexpr static IndexInterval Infinite() noexcept { return {}; }
constexpr static bool ValidClosed(Index inclusive_min, Index inclusive_max) {
return inclusive_min >= -kInfIndex && inclusive_min < kInfIndex &&
inclusive_max > -kInfIndex && inclusive_max >= inclusive_min - 1 &&
inclusive_max <= kInfIndex;
}
static constexpr IndexInterval UncheckedClosed(Index inclusive_min,
Index inclusive_max) noexcept {
assert(ValidClosed(inclusive_min, inclusive_max));
return IndexInterval(inclusive_min, inclusive_max - inclusive_min + 1);
}
static Result<IndexInterval> Closed(Index inclusive_min, Index inclusive_max);
constexpr static bool ValidHalfOpen(Index inclusive_min,
Index exclusive_max) {
return inclusive_min >= -kInfIndex && inclusive_min < kInfIndex &&
exclusive_max > -kInfIndex + 1 && exclusive_max >= inclusive_min &&
exclusive_max <= kInfIndex + 1;
}
static constexpr IndexInterval UncheckedHalfOpen(
Index inclusive_min, Index exclusive_max) noexcept {
assert(ValidHalfOpen(inclusive_min, exclusive_max));
return IndexInterval(inclusive_min, exclusive_max - inclusive_min);
}
static Result<IndexInterval> HalfOpen(Index inclusive_min,
Index exclusive_max);
constexpr static bool ValidSized(Index inclusive_min, Index size) {
return inclusive_min >= -kInfIndex && size >= 0 && size <= kInfSize &&
inclusive_min < kInfIndex && inclusive_min <= kInfIndex + 1 - size &&
inclusive_min + size > -kInfIndex + 1;
}
static constexpr IndexInterval UncheckedSized(Index inclusive_min,
Index size) {
assert(ValidSized(inclusive_min, size));
return IndexInterval(inclusive_min, size);
}
static Result<IndexInterval> Sized(Index inclusive_min, Index size);
constexpr Index inclusive_min() const { return inclusive_min_; }
constexpr Index exclusive_min() const { return inclusive_min_ - 1; }
constexpr Index exclusive_max() const { return inclusive_min_ + size_; }
constexpr Index inclusive_max() const { return inclusive_min_ + size_ - 1; }
constexpr Index size() const { return size_; }
constexpr bool empty() const { return size_ == 0; }
friend std::ostream& operator<<(std::ostream& os, IndexInterval x);
friend constexpr bool operator==(IndexInterval a, IndexInterval b) {
return a.inclusive_min() == b.inclusive_min() && a.size() == b.size();
}
friend constexpr bool operator!=(IndexInterval a, IndexInterval b) {
return !(a == b);
}
constexpr IndexInterval operator-() const {
if (size_ == 0) return IndexInterval(-inclusive_min_, 0);
return IndexInterval(-inclusive_max(), size());
}
template <typename H>
friend H AbslHashValue(H h, IndexInterval x) {
return H::combine(std::move(h), x.inclusive_min(), x.size());
}
static constexpr IndexInterval FiniteRange() {
return UncheckedClosed(kMinFiniteIndex, kMaxFiniteIndex);
}
private:
explicit constexpr IndexInterval(Index inclusive_min, Index size) noexcept
: inclusive_min_(inclusive_min), size_(size) {}
friend class IndexIntervalRef;
Index inclusive_min_;
Index size_;
};
constexpr inline bool Contains(IndexInterval interval, Index index) {
return index >= kMinFiniteIndex && index <= kMaxFiniteIndex &&
index >= interval.inclusive_min() && index <= interval.inclusive_max();
}
constexpr inline bool Contains(IndexInterval outer, IndexInterval inner) {
return inner.size() == 0 || (inner.inclusive_min() >= outer.inclusive_min() &&
inner.inclusive_max() <= outer.inclusive_max());
}
constexpr inline bool IsFinite(IndexInterval interval) {
return interval.inclusive_min() != -kInfIndex &&
interval.inclusive_max() != kInfIndex;
}
class IndexIntervalRef {
public:
constexpr explicit IndexIntervalRef(IndexInterval& other)
: IndexIntervalRef(other.inclusive_min_, other.size_) {}
constexpr operator IndexInterval() const {
return IndexInterval::UncheckedSized(inclusive_min(), size());
}
constexpr IndexIntervalRef& operator=(IndexInterval interval) noexcept {
inclusive_min_ = interval.inclusive_min();
size_ = interval.size();
return *this;
}
constexpr IndexIntervalRef& operator=(IndexIntervalRef interval) noexcept {
inclusive_min_ = interval.inclusive_min();
size_ = interval.size();
return *this;
}
constexpr Index inclusive_min() const { return inclusive_min_; }
constexpr Index size() const { return size_; }
constexpr bool empty() const { return size_ == 0; }
constexpr Index exclusive_min() const { return inclusive_min_ - 1; }
constexpr Index exclusive_max() const { return inclusive_min_ + size_; }
constexpr Index inclusive_max() const { return inclusive_min_ + size_ - 1; }
static constexpr IndexIntervalRef UncheckedSized(
Index& inclusive_min,
Index& size) {
return IndexIntervalRef(inclusive_min, size);
}
friend std::ostream& operator<<(std::ostream& os, IndexIntervalRef x) {
return os << static_cast<IndexInterval>(x);
}
private:
explicit constexpr IndexIntervalRef(Index& inclusive_min,
Index& size)
: inclusive_min_(inclusive_min), size_(size) {}
Index& inclusive_min_;
Index& size_;
};
IndexInterval Hull(IndexInterval a, IndexInterval b);
IndexInterval Intersect(IndexInterval a, IndexInterval b);
inline IndexInterval FiniteSubset(IndexInterval interval) {
return Intersect(interval, IndexInterval::FiniteRange());
}
bool AreCompatibleOrUnbounded(IndexInterval a, IndexInterval b);
bool ContainsOrUnbounded(IndexInterval outer, IndexInterval inner);
Result<IndexInterval> ShiftInterval(IndexInterval interval, Index min_offset,
Index max_offset);
Result<IndexInterval> ShiftInterval(IndexInterval interval, Index offset);
Result<IndexInterval> ShiftIntervalBackward(IndexInterval interval,
Index min_offset, Index max_offset);
Result<IndexInterval> ShiftIntervalBackward(IndexInterval interval,
Index offset);
Result<IndexInterval> ShiftIntervalTo(IndexInterval interval, Index origin);
absl::Status CheckContains(IndexInterval interval, Index index);
enum class IntervalForm {
sized,
closed,
half_open,
};
class OptionallyImplicitIndexInterval : public IndexInterval {
public:
constexpr OptionallyImplicitIndexInterval() noexcept = default;
constexpr OptionallyImplicitIndexInterval(IndexInterval interval,
bool implicit_lower,
bool implicit_upper) noexcept
: IndexInterval(interval),
implicit_lower_(implicit_lower),
implicit_upper_(implicit_upper) {}
const IndexInterval& interval() const { return *this; }
IndexInterval& interval() { return *this; }
bool implicit_lower() const { return implicit_lower_; }
bool& implicit_lower() { return implicit_lower_; }
bool implicit_upper() const { return implicit_upper_; }
bool& implicit_upper() { return implicit_upper_; }
IndexInterval effective_interval() const {
return IndexInterval::UncheckedClosed(
implicit_lower() ? -kInfIndex : inclusive_min(),
implicit_upper() ? +kInfIndex : inclusive_max());
}
friend std::ostream& operator<<(std::ostream& os,
const OptionallyImplicitIndexInterval& x);
friend bool operator==(const OptionallyImplicitIndexInterval& a,
const OptionallyImplicitIndexInterval& b) {
return a.interval() == b.interval() &&
a.implicit_lower() == b.implicit_lower() &&
a.implicit_upper() == b.implicit_upper();
}
friend bool operator!=(const OptionallyImplicitIndexInterval& a,
const OptionallyImplicitIndexInterval& b) {
return !(a == b);
}
template <typename H>
friend H AbslHashValue(H h, const OptionallyImplicitIndexInterval& x) {
return H::combine(std::move(h), x.interval(), x.implicit_lower(),
x.implicit_upper());
}
constexpr static auto ApplyMembers = [](auto&& x, auto f) {
return f(x.interval(), x.implicit_lower(), x.implicit_upper());
};
private:
bool implicit_lower_ = true;
bool implicit_upper_ = true;
};
OptionallyImplicitIndexInterval Hull(OptionallyImplicitIndexInterval a,
OptionallyImplicitIndexInterval b);
OptionallyImplicitIndexInterval Intersect(OptionallyImplicitIndexInterval a,
OptionallyImplicitIndexInterval b);
OptionallyImplicitIndexInterval IntersectPreferringExplicit(
OptionallyImplicitIndexInterval a, OptionallyImplicitIndexInterval b);
template <ContainerKind LabelCKind = container>
class IndexDomainDimension : public OptionallyImplicitIndexInterval {
public:
using Label = std::conditional_t<LabelCKind == container, std::string,
std::string_view>;
IndexDomainDimension() = default;
IndexDomainDimension(const OptionallyImplicitIndexInterval& interval)
: OptionallyImplicitIndexInterval(interval) {}
IndexDomainDimension(const OptionallyImplicitIndexInterval& interval,
Label label)
: OptionallyImplicitIndexInterval(interval), label_(std::move(label)) {}
template <ContainerKind OtherCKind>
IndexDomainDimension(const IndexDomainDimension<OtherCKind>& other)
: IndexDomainDimension(other.optionally_implicit_interval(),
Label(other.label())) {}
template <ContainerKind OtherCKind>
IndexDomainDimension& operator=(
const IndexDomainDimension<OtherCKind>& other) {
optionally_implicit_interval() = other.optionally_implicit_interval();
label_ = Label(other.label());
return *this;
}
const OptionallyImplicitIndexInterval& optionally_implicit_interval() const {
return *this;
}
OptionallyImplicitIndexInterval& optionally_implicit_interval() {
return *this;
}
std::string_view label() const { return label_; }
Label& label() { return label_; }
constexpr static auto ApplyMembers = [](auto&& x, auto f) {
return f(x.optionally_implicit_interval(), x.label_);
};
#if defined(__GNUC__) && !defined(__clang__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wnon-template-friend"
#endif
friend std::ostream& operator<<(std::ostream& os,
const IndexDomainDimension& x);
#if defined(__GNUC__) && !defined(__clang__)
#pragma GCC diagnostic pop
#endif
friend bool operator==(const IndexDomainDimension<container>& a,
const IndexDomainDimension<container>& b);
friend bool operator==(const IndexDomainDimension<view>& a,
const IndexDomainDimension<view>& b);
friend bool operator==(const IndexDomainDimension<view>& a,
const IndexDomainDimension<container>& b);
friend bool operator==(const IndexDomainDimension<container>& a,
const IndexDomainDimension<view>& b);
template <ContainerKind OtherCKind>
friend bool operator!=(const IndexDomainDimension<LabelCKind>& a,
const IndexDomainDimension<OtherCKind>& b) {
return !(a == b);
}
template <typename H>
friend H AbslHashValue(H h, const IndexDomainDimension& x) {
return H::combine(std::move(h), x.optionally_implicit_interval(),
x.label());
}
private:
Label label_;
};
template <ContainerKind LabelCKind>
bool operator==(const IndexDomainDimension<LabelCKind>& a,
const OptionallyImplicitIndexInterval& b) = delete;
template <ContainerKind LabelCKind>
bool operator!=(const IndexDomainDimension<LabelCKind>& a,
const OptionallyImplicitIndexInterval& b) = delete;
template <ContainerKind LabelCKind>
bool operator==(const OptionallyImplicitIndexInterval& a,
const IndexDomainDimension<LabelCKind>& b) = delete;
template <ContainerKind LabelCKind>
bool operator!=(const OptionallyImplicitIndexInterval& a,
const IndexDomainDimension<LabelCKind>& b) = delete;
template <ContainerKind LabelCKind>
bool operator==(const IndexDomainDimension<LabelCKind>& a,
const IndexInterval& b) = delete;
template <ContainerKind LabelCKind>
bool operator!=(const IndexDomainDimension<LabelCKind>& a,
const IndexInterval& b) = delete;
template <ContainerKind LabelCKind>
bool operator==(const IndexInterval& a,
const IndexDomainDimension<LabelCKind>& b) = delete;
template <ContainerKind LabelCKind>
bool operator!=(const IndexInterval& a,
const IndexDomainDimension<LabelCKind>& b) = delete;
Result<std::string_view> MergeDimensionLabels(std::string_view a,
std::string_view b);
Result<OptionallyImplicitIndexInterval> MergeOptionallyImplicitIndexIntervals(
OptionallyImplicitIndexInterval a, OptionallyImplicitIndexInterval b); | #include "tensorstore/index_interval.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/hash/hash_testing.h"
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/serialization/test_util.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::AreCompatibleOrUnbounded;
using ::tensorstore::ComputeStridedSliceMap;
using ::tensorstore::container;
using ::tensorstore::DividePositiveRoundOut;
using ::tensorstore::ExplicitIndexOr;
using ::tensorstore::ExtractClosedStridedSlice;
using ::tensorstore::ExtractHalfOpenStridedSlice;
using ::tensorstore::ExtractSizedStridedSlice;
using ::tensorstore::GetAffineTransformInverseDomain;
using ::tensorstore::ImplicitOrEqual;
using ::tensorstore::Index;
using ::tensorstore::IndexDomainDimension;
using ::tensorstore::IndexInterval;
using ::tensorstore::IndexIntervalRef;
using ::tensorstore::Intersect;
using ::tensorstore::IntervalForm;
using ::tensorstore::kImplicit;
using ::tensorstore::kInfIndex;
using ::tensorstore::kInfSize;
using ::tensorstore::kMaxFiniteIndex;
using ::tensorstore::kMinFiniteIndex;
using ::tensorstore::MatchesStatus;
using ::tensorstore::MergeDimensionLabels;
using ::tensorstore::MergeOptionallyImplicitIndexIntervals;
using ::tensorstore::OptionallyImplicitIndexInterval;
using ::tensorstore::ShiftInterval;
using ::tensorstore::ShiftIntervalBackward;
using ::tensorstore::ShiftIntervalTo;
using ::tensorstore::StrCat;
using ::tensorstore::view;
using ::tensorstore::serialization::TestSerializationRoundTrip;
using ::testing::Optional;
using ::testing::Pair;
TEST(IndexIntervalTest, DefaultConstruct) {
IndexInterval x;
EXPECT_EQ(-kInfIndex, x.inclusive_min());
EXPECT_EQ(-kInfIndex - 1, x.exclusive_min());
EXPECT_EQ(kInfIndex, x.inclusive_max());
EXPECT_EQ(kInfIndex + 1, x.exclusive_max());
EXPECT_EQ(kInfSize, x.size());
EXPECT_FALSE(x.empty());
}
TEST(IndexIntervalTest, Empty) {
EXPECT_TRUE(IndexInterval::UncheckedSized(1, 0).empty());
}
TEST(IndexIntervalTest, ValidSized) {
EXPECT_TRUE(IndexInterval::ValidSized(0, 0));
EXPECT_TRUE(IndexInterval::ValidSized(-kInfIndex, kInfSize));
EXPECT_TRUE(IndexInterval::ValidSized(-kInfIndex, 100));
EXPECT_TRUE(IndexInterval::ValidSized(kInfIndex - 5, 6));
EXPECT_TRUE(IndexInterval::ValidSized(-kInfIndex, 2));
EXPECT_FALSE(IndexInterval::ValidSized(-kInfIndex - 1, 0));
EXPECT_FALSE(IndexInterval::ValidSized(5, -1));
EXPECT_FALSE(IndexInterval::ValidSized(kInfIndex - 5, 7));
EXPECT_FALSE(IndexInterval::ValidSized(-kInfIndex, 0));
EXPECT_FALSE(IndexInterval::ValidSized(-kInfIndex, 1));
EXPECT_FALSE(IndexInterval::ValidSized(kInfIndex, 1));
EXPECT_FALSE(IndexInterval::ValidSized(kInfIndex, 0));
}
TEST(IndexIntervalTest, ValidClosed) {
EXPECT_TRUE(IndexInterval::ValidClosed(0, 0));
EXPECT_TRUE(IndexInterval::ValidClosed(0, -1));
EXPECT_TRUE(IndexInterval::ValidClosed(-kInfIndex, kInfIndex));
EXPECT_TRUE(IndexInterval::ValidClosed(-5, kInfIndex));
EXPECT_TRUE(IndexInterval::ValidClosed(-kInfIndex, -kInfIndex + 1));
EXPECT_FALSE(IndexInterval::ValidClosed(0, -2));
EXPECT_FALSE(IndexInterval::ValidClosed(-kInfIndex - 1, 0));
EXPECT_FALSE(IndexInterval::ValidClosed(0, kInfIndex + 1));
EXPECT_FALSE(IndexInterval::ValidClosed(-kInfIndex, -kInfIndex));
EXPECT_FALSE(IndexInterval::ValidClosed(+kInfIndex, +kInfIndex));
}
TEST(IndexIntervalTest, ValidHalfOpen) {
EXPECT_TRUE(IndexInterval::ValidHalfOpen(0, 0));
EXPECT_FALSE(IndexInterval::ValidHalfOpen(0, -1));
EXPECT_TRUE(IndexInterval::ValidHalfOpen(-kInfIndex, kInfIndex + 1));
EXPECT_TRUE(IndexInterval::ValidHalfOpen(-5, kInfIndex + 1));
EXPECT_TRUE(IndexInterval::ValidHalfOpen(-kInfIndex, -kInfIndex + 2));
EXPECT_FALSE(IndexInterval::ValidHalfOpen(-kInfIndex - 1, 0));
EXPECT_FALSE(IndexInterval::ValidHalfOpen(0, kInfIndex + 2));
EXPECT_FALSE(IndexInterval::ValidHalfOpen(-kInfIndex, -kInfIndex));
EXPECT_FALSE(IndexInterval::ValidHalfOpen(-kInfIndex, -kInfIndex + 1));
EXPECT_FALSE(IndexInterval::ValidHalfOpen(kInfIndex, kInfIndex));
EXPECT_FALSE(IndexInterval::ValidHalfOpen(kInfIndex, kInfIndex + 1));
}
TEST(IndexIntervalTest, Sized) {
EXPECT_EQ(IndexInterval::UncheckedSized(0, 5), IndexInterval::Sized(0, 5));
EXPECT_THAT(IndexInterval::Sized(0, -1),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(IndexIntervalTest, UncheckedSized) {
auto x = IndexInterval::UncheckedSized(1, 5);
EXPECT_EQ(1, x.inclusive_min());
EXPECT_EQ(0, x.exclusive_min());
EXPECT_EQ(5, x.size());
EXPECT_EQ(5, x.inclusive_max());
EXPECT_EQ(6, x.exclusive_max());
}
TEST(IndexIntervalTest, Equality) {
EXPECT_TRUE(IndexInterval::UncheckedSized(1, 2) ==
IndexInterval::UncheckedSized(1, 2));
EXPECT_FALSE(IndexInterval::UncheckedSized(1, 2) !=
IndexInterval::UncheckedSized(1, 2));
EXPECT_FALSE(IndexInterval::UncheckedSized(1, 3) ==
IndexInterval::UncheckedSized(1, 2));
EXPECT_FALSE(IndexInterval::UncheckedSized(2, 2) ==
IndexInterval::UncheckedSized(1, 2));
EXPECT_TRUE(IndexInterval::UncheckedSized(2, 3) ==
IndexInterval::UncheckedClosed(2, 4));
EXPECT_TRUE(IndexInterval::UncheckedSized(2, 3) ==
IndexInterval::UncheckedHalfOpen(2, 5));
}
TEST(IndexIntervalTest, UncheckedClosed) {
EXPECT_EQ(IndexInterval::UncheckedSized(2, 3),
IndexInterval::UncheckedClosed(2, 4));
}
TEST(IndexIntervalTest, Closed) {
EXPECT_EQ(IndexInterval::UncheckedClosed(2, 4), IndexInterval::Closed(2, 4));
EXPECT_THAT(IndexInterval::Closed(2, 0),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(IndexIntervalTest, UncheckedHalfOpen) {
EXPECT_EQ(IndexInterval::UncheckedSized(2, 2),
IndexInterval::UncheckedHalfOpen(2, 4));
}
TEST(IndexIntervalTest, HalfOpen) {
EXPECT_EQ(IndexInterval::UncheckedHalfOpen(2, 4),
IndexInterval::HalfOpen(2, 4));
EXPECT_THAT(IndexInterval::HalfOpen(2, 0),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(IndexIntervalTest, ContainsIndex) {
EXPECT_TRUE(Contains(IndexInterval::UncheckedClosed(3, 15), 5));
EXPECT_TRUE(Contains(IndexInterval::UncheckedClosed(3, 15), 3));
EXPECT_TRUE(Contains(IndexInterval::UncheckedClosed(3, 15), 15));
EXPECT_FALSE(Contains(IndexInterval::UncheckedClosed(3, 15), 2));
EXPECT_FALSE(Contains(IndexInterval::UncheckedClosed(3, 15), 16));
EXPECT_TRUE(Contains(IndexInterval::UncheckedClosed(-kInfIndex, 15),
kMinFiniteIndex));
EXPECT_FALSE(
Contains(IndexInterval::UncheckedClosed(-kInfIndex, 15), -kInfIndex));
EXPECT_FALSE(Contains(IndexInterval::UncheckedClosed(-kInfIndex, 15), 16));
EXPECT_TRUE(Contains(IndexInterval::UncheckedClosed(3, kInfIndex), 16));
EXPECT_TRUE(
Contains(IndexInterval::UncheckedClosed(3, kInfIndex), kMaxFiniteIndex));
EXPECT_FALSE(
Contains(IndexInterval::UncheckedClosed(3, kInfIndex), kInfIndex));
EXPECT_FALSE(Contains(IndexInterval::UncheckedClosed(-kInfIndex, kInfIndex),
-kInfIndex));
EXPECT_FALSE(Contains(IndexInterval::UncheckedClosed(-kInfIndex, kInfIndex),
kInfIndex));
EXPECT_TRUE(
Contains(IndexInterval::UncheckedClosed(-kInfIndex, kInfIndex), 3));
}
TEST(IndexIntervalTest, ContainsInterval) {
EXPECT_TRUE(Contains(IndexInterval::UncheckedClosed(3, 15),
IndexInterval::UncheckedClosed(3, 15)));
EXPECT_TRUE(Contains(IndexInterval::UncheckedClosed(3, 15),
IndexInterval::UncheckedClosed(4, 15)));
EXPECT_TRUE(Contains(IndexInterval::UncheckedClosed(3, 15),
IndexInterval::UncheckedClosed(3, 14)));
EXPECT_TRUE(Contains(IndexInterval::UncheckedClosed(3, 15),
IndexInterval::UncheckedClosed(6, 8)));
EXPECT_TRUE(Contains(IndexInterval::UncheckedClosed(3, 15),
IndexInterval::UncheckedSized(20, 0)));
EXPECT_FALSE(Contains(IndexInterval::UncheckedClosed(3, 15),
IndexInterval::UncheckedClosed(2, 10)));
EXPECT_FALSE(Contains(IndexInterval::UncheckedClosed(3, 15),
IndexInterval::UncheckedClosed(3, 16)));
EXPECT_FALSE(Contains(IndexInterval::UncheckedClosed(3, 15),
IndexInterval::UncheckedClosed(5, 16)));
}
TEST(IndexIntervalTest, IsFinite) {
EXPECT_TRUE(IsFinite(IndexInterval::UncheckedClosed(3, 15)));
EXPECT_FALSE(IsFinite(IndexInterval::UncheckedClosed(-kInfIndex, 15)));
EXPECT_FALSE(IsFinite(IndexInterval::UncheckedClosed(3, kInfIndex)));
EXPECT_FALSE(IsFinite(IndexInterval::UncheckedClosed(-kInfIndex, kInfIndex)));
}
TEST(IndexIntervalTest, Intersect) {
EXPECT_EQ(IndexInterval::UncheckedClosed(3, 5),
Intersect(IndexInterval::UncheckedClosed(-3, 5),
IndexInterval::UncheckedClosed(3, 10)));
EXPECT_EQ(IndexInterval::UncheckedClosed(3, 5),
Intersect(IndexInterval::UncheckedClosed(3, 10),
IndexInterval::UncheckedClosed(-3, 5)));
EXPECT_EQ(IndexInterval::UncheckedClosed(3, 10),
Intersect(IndexInterval::UncheckedClosed(3, 10),
IndexInterval::UncheckedClosed(-3, 11)));
EXPECT_EQ(IndexInterval::UncheckedSized(3, 0),
Intersect(IndexInterval::UncheckedClosed(-3, 0),
IndexInterval::UncheckedClosed(3, 5)));
}
TEST(IndexIntervalTest, IntersectOptionallyImplicit) {
using OIII = OptionallyImplicitIndexInterval;
EXPECT_THAT(
Intersect(OIII{IndexInterval::UncheckedClosed(1, 5), false, false},
OIII{IndexInterval::UncheckedClosed(2, 6), false, true}),
::testing::Eq(OIII{IndexInterval::UncheckedClosed(2, 5), false, false}));
EXPECT_THAT(
Intersect(OIII{IndexInterval::UncheckedClosed(2, 5), false, true},
OIII{IndexInterval::UncheckedClosed(1, 6), true, true}),
::testing::Eq(OIII{IndexInterval::UncheckedClosed(2, 5), false, true}));
for (int x = 0; x < 16; x++) {
const bool a = ((x & 1) != 0);
const bool b = ((x & 2) != 0);
const bool c = ((x & 4) != 0);
const bool d = ((x & 8) != 0);
EXPECT_THAT(Intersect(OIII{IndexInterval::UncheckedClosed(1, 5), a, b},
OIII{IndexInterval::UncheckedClosed(1, 5), c, d}),
::testing::Eq(
OIII{IndexInterval::UncheckedClosed(1, 5), a && c, b && d}))
<< x;
EXPECT_THAT(Intersect(OIII{IndexInterval::UncheckedClosed(-3, 5), a, b},
OIII{IndexInterval::UncheckedClosed(3, 10), c, d}),
::testing::Eq(OIII{IndexInterval::UncheckedClosed(3, 5), c, b}))
<< x;
}
EXPECT_THAT(
Intersect(
OIII{IndexInterval::UncheckedClosed(-kInfIndex, kMaxFiniteIndex),
true, true},
OIII{IndexInterval::UncheckedClosed(0, 10), false, false}),
::testing::Eq(OIII{IndexInterval::UncheckedClosed(0, 10), false, false}));
EXPECT_THAT(
Intersect(
OIII{IndexInterval::UncheckedClosed(-kInfIndex, kMaxFiniteIndex),
true, true},
OIII{IndexInterval::UncheckedClosed(kMinFiniteIndex, kInfIndex),
false, false}),
::testing::Eq(
OIII{IndexInterval::UncheckedClosed(kMinFiniteIndex, kMaxFiniteIndex),
false, true}));
EXPECT_THAT(
Intersect(
OIII{IndexInterval::UncheckedClosed(-kInfIndex, kMaxFiniteIndex),
false, false},
OIII{IndexInterval::UncheckedClosed(0, 10), true, true}),
::testing::Eq(OIII{IndexInterval::UncheckedClosed(0, 10), true, true}));
}
TEST(IndexIntervalTest, IntersectPreferringExplicit) {
using OIII = OptionallyImplicitIndexInterval;
for (int x = 0; x < 16; x++) {
const bool a = ((x & 1) != 0);
const bool b = ((x & 2) != 0);
const bool c = ((x & 4) != 0);
const bool d = ((x & 8) != 0);
EXPECT_THAT(Intersect(OIII{IndexInterval::UncheckedClosed(1, 5), a, b},
OIII{IndexInterval::UncheckedClosed(1, 5), c, d}),
::testing::Eq(
OIII{IndexInterval::UncheckedClosed(1, 5), a && c, b && d}))
<< x;
EXPECT_THAT(Intersect(OIII{IndexInterval::UncheckedClosed(-3, 5), a, b},
OIII{IndexInterval::UncheckedClosed(3, 10), a, b}),
::testing::Eq(OIII{IndexInterval::UncheckedClosed(3, 5), a, b}))
<< x;
}
EXPECT_THAT(
IntersectPreferringExplicit(
OIII{IndexInterval::UncheckedClosed(1, 5), false, false},
OIII{IndexInterval::UncheckedClosed(2, 6), false, true}),
::testing::Eq(OIII{IndexInterval::UncheckedClosed(2, 5), false, false}));
EXPECT_THAT(
IntersectPreferringExplicit(
OIII{IndexInterval::UncheckedClosed(2, 5), false, true},
OIII{IndexInterval::UncheckedClosed(1, 6), true, true}),
::testing::Eq(OIII{IndexInterval::UncheckedClosed(2, 5), false, true}));
EXPECT_THAT(
IntersectPreferringExplicit(
OIII{IndexInterval::UncheckedClosed(-3, 5), true, false},
OIII{IndexInterval::UncheckedClosed(3, 10), true, false}),
::testing::Eq(OIII{IndexInterval::UncheckedClosed(3, 5), true, false}));
EXPECT_THAT(
IntersectPreferringExplicit(
OIII{IndexInterval::UncheckedClosed(-3, 5), false, false},
OIII{IndexInterval::UncheckedClosed(3, 10), false, false}),
::testing::Eq(OIII{IndexInterval::UncheckedClosed(3, 5), false, false}));
EXPECT_THAT(
IntersectPreferringExplicit(
OIII{IndexInterval::UncheckedClosed(-3, 5), false, true},
OIII{IndexInterval::UncheckedClosed(3, 10), false, true}),
::testing::Eq(OIII{IndexInterval::UncheckedClosed(3, 5), false, true}));
EXPECT_THAT(
IntersectPreferringExplicit(
OIII{IndexInterval::UncheckedClosed(-3, 5), true, true},
OIII{IndexInterval::UncheckedClosed(3, 10), true, true}),
::testing::Eq(OIII{IndexInterval::UncheckedClosed(3, 5), true, true}));
EXPECT_THAT(
IntersectPreferringExplicit(
OIII{IndexInterval::UncheckedClosed(-3, 5), true, false},
OIII{IndexInterval::UncheckedClosed(-5, 10), false, false}),
::testing::Eq(OIII{IndexInterval::UncheckedClosed(-5, 5), false, false}));
EXPECT_THAT(
IntersectPreferringExplicit(
OIII{IndexInterval::UncheckedClosed(-5, 10), false, false},
OIII{IndexInterval::UncheckedClosed(-3, 5), true, false}),
::testing::Eq(OIII{IndexInterval::UncheckedClosed(-5, 5), false, false}));
EXPECT_THAT(IntersectPreferringExplicit(
OIII{IndexInterval::UncheckedClosed(-3, 12), true, false},
OIII{IndexInterval::UncheckedClosed(-5, 10), false, true}),
::testing::Eq(
OIII{IndexInterval::UncheckedClosed(-5, 12), false, false}));
EXPECT_THAT(IntersectPreferringExplicit(
OIII{IndexInterval::UncheckedClosed(-5, 10), false, true},
OIII{IndexInterval::UncheckedClosed(-3, 12), true, false}),
::testing::Eq(
OIII{IndexInterval::UncheckedClosed(-5, 12), false, false}));
EXPECT_THAT(
IntersectPreferringExplicit(
OIII{IndexInterval::UncheckedClosed(-kInfIndex, kMaxFiniteIndex),
true, true},
OIII{IndexInterval::UncheckedClosed(0, 10), false, false}),
::testing::Eq(OIII{IndexInterval::UncheckedClosed(0, 10), false, false}));
EXPECT_THAT(
IntersectPreferringExplicit(
OIII{IndexInterval::UncheckedClosed(-kInfIndex, kMaxFiniteIndex),
true, true},
OIII{IndexInterval::UncheckedClosed(kMinFiniteIndex, kInfIndex),
false, false}),
::testing::Eq(
OIII{IndexInterval::UncheckedClosed(kMinFiniteIndex, kInfIndex),
false, false}));
EXPECT_THAT(
IntersectPreferringExplicit(
OIII{IndexInterval::UncheckedClosed(-kInfIndex, kMaxFiniteIndex),
false, false},
OIII{IndexInterval::UncheckedClosed(0, 10), true, true}),
::testing::Eq(
OIII{IndexInterval::UncheckedClosed(-kInfIndex, kMaxFiniteIndex),
false, false}));
}
TEST(IndexIntervalTest, Hull) {
EXPECT_EQ(IndexInterval::UncheckedClosed(3, 15),
Hull(IndexInterval::UncheckedClosed(3, 5),
IndexInterval::UncheckedClosed(10, 15)));
EXPECT_EQ(IndexInterval::UncheckedClosed(5, 15),
Hull(IndexInterval::UncheckedClosed(0, -1),
IndexInterval::UncheckedClosed(5, 15)));
EXPECT_EQ(IndexInterval::UncheckedClosed(5, 15),
Hull(IndexInterval::UncheckedClosed(5, 15),
IndexInterval::UncheckedClosed(0, -1)));
EXPECT_EQ(IndexInterval::UncheckedClosed(0, -1),
Hull(IndexInterval::UncheckedClosed(5, 4),
IndexInterval::UncheckedClosed(0, -1)));
}
TEST(IndexIntervalTest, HullOptionallyImplicit) {
using OIII = OptionallyImplicitIndexInterval;
EXPECT_THAT(
Hull(OIII{IndexInterval::UncheckedClosed(1, 5), false, true},
OIII{IndexInterval::UncheckedClosed(2, 6), false, true}),
::testing::Eq(OIII{IndexInterval::UncheckedClosed(1, 6), false, true}));
for (int x = 0; x < 16; x++) {
const bool a = ((x & 1) != 0);
const bool b = ((x & 2) != 0);
const bool c = ((x & 4) != 0);
const bool d = ((x & 8) != 0);
EXPECT_THAT(Hull(OIII{IndexInterval::UncheckedClosed(1, 5), a, b},
OIII{IndexInterval::UncheckedClosed(1, 5), c, d}),
::testing::Eq(
OIII{IndexInterval::UncheckedClosed(1, 5), a && c, b && d}))
<< x;
EXPECT_THAT(
Hull(OIII{IndexInterval::UncheckedClosed(-3, 5), a, b},
OIII{IndexInterval::UncheckedClosed(3, 10), c, d}),
::testing::Eq(OIII{IndexInterval::UncheckedClosed(-3, 10), a, d}))
<< x;
}
EXPECT_THAT(
Hull(OIII{IndexInterval::UncheckedClosed(-kInfIndex, kMaxFiniteIndex),
true, true},
OIII{IndexInterval::UncheckedClosed(kMinFiniteIndex, kInfIndex),
false, false}),
::testing::Eq(OIII{IndexInterval::UncheckedClosed(-kInfIndex, kInfIndex),
true, false}));
}
TEST(IndexIntervalTest, ContainsOrUnbounded) {
EXPECT_TRUE(
ContainsOrUnbounded(IndexInterval::UncheckedClosed(5, 10),
IndexInterval::UncheckedClosed(-kInfIndex, 10)));
EXPECT_TRUE(ContainsOrUnbounded(IndexInterval::UncheckedClosed(5, 10),
IndexInterval::UncheckedClosed(6, 9)));
EXPECT_FALSE(ContainsOrUnbounded(IndexInterval::UncheckedClosed(5, 10),
IndexInterval::UncheckedClosed(4, 10)));
EXPECT_TRUE(
ContainsOrUnbounded(IndexInterval::UncheckedClosed(5, 10),
IndexInterval::UncheckedClosed(5, kInfIndex)));
EXPECT_FALSE(ContainsOrUnbounded(IndexInterval::UncheckedClosed(5, 10),
IndexInterval::UncheckedClosed(5, 11)));
EXPECT_TRUE(ContainsOrUnbounded(
IndexInterval::UncheckedClosed(5, 10),
IndexInterval::UncheckedClosed(-kInfIndex, +kInfIndex)));
}
TEST(IndexIntervalTest, AreCompatibleOrUnbounded) {
EXPECT_TRUE(AreCompatibleOrUnbounded(IndexInterval(), IndexInterval()));
EXPECT_TRUE(AreCompatibleOrUnbounded(IndexInterval(),
IndexInterval::UncheckedSized(1, 4)));
EXPECT_TRUE(AreCompatibleOrUnbounded(IndexInterval::UncheckedSized(1, 4),
IndexInterval()));
EXPECT_FALSE(AreCompatibleOrUnbounded(IndexInterval::UncheckedSized(1, 4),
IndexInterval::UncheckedSized(1, 5)));
EXPECT_FALSE(AreCompatibleOrUnbounded(IndexInterval::UncheckedSized(1, 4),
IndexInterval::UncheckedSized(2, 3)));
EXPECT_TRUE(
AreCompatibleOrUnbounded(IndexInterval::UncheckedClosed(1, 4),
IndexInterval::UncheckedClosed(-kInfIndex, 4)));
EXPECT_TRUE(
AreCompatibleOrUnbounded(IndexInterval::UncheckedClosed(1, 4),
IndexInterval::UncheckedClosed(1, kInfIndex)));
}
TEST(IndexIntervalTest, Ostream) {
EXPECT_EQ("[1, 3)", StrCat(IndexInterval::UncheckedClosed(1, 2)));
EXPECT_EQ("(-inf, 3)", StrCat(IndexInterval::UncheckedClosed(-kInfIndex, 2)));
EXPECT_EQ("[7, +inf)", StrCat(IndexInterval::UncheckedClosed(7, kInfIndex)));
}
TEST(IndexIntervalTest, Hash) {
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly({
IndexInterval(),
IndexInterval::UncheckedSized(0, 1),
IndexInterval::UncheckedSized(0, 0),
IndexInterval::UncheckedSized(0, 2),
IndexInterval::UncheckedSized(1, 2),
}));
}
TEST(IndexIntervalTest, ShiftInterval) {
EXPECT_THAT(ShiftInterval(IndexInterval::UncheckedClosed(1, 8), 2),
Optional(IndexInterval::UncheckedClosed(3, 10)));
EXPECT_THAT(ShiftInterval(IndexInterval::UncheckedClosed(-kInfIndex, 8), 2),
Optional(IndexInterval::UncheckedClosed(-kInfIndex, 10)));
EXPECT_THAT(ShiftInterval(IndexInterval::UncheckedClosed(1, kInfIndex), 2),
Optional(IndexInterval::UncheckedClosed(3, kInfIndex)));
EXPECT_THAT(ShiftInterval(
IndexInterval::UncheckedClosed(kMinFiniteIndex + 1, 101), -1),
Optional(IndexInterval::Closed(kMinFiniteIndex, 100)));
EXPECT_THAT(ShiftInterval(IndexInterval::UncheckedClosed(5, 10), -kInfIndex),
Optional(IndexInterval::UncheckedClosed(-kInfIndex + 5,
-kInfIndex + 10)));
EXPECT_THAT(ShiftInterval(IndexInterval::UncheckedClosed(5, 10), kInfIndex),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"5 \\+ [0-9]+ is outside valid range .*"));
EXPECT_THAT(
ShiftInterval(IndexInterval::UncheckedClosed(5, 10), kMaxFiniteIndex),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"5 \\+ [0-9]+ is outside valid range .*"));
EXPECT_THAT(
ShiftInterval(IndexInterval::UncheckedClosed(-1, 10), kMinFiniteIndex),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"-1 \\+ -[0-9]+ is outside valid range .*"));
EXPECT_THAT(ShiftInterval(IndexInterval::UncheckedClosed(-kInfIndex, -5),
kMinFiniteIndex),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"-5 \\+ -[0-9]+ is outside valid range .*"));
}
TEST(IndexIntervalTest, ShiftIntervalBackward) {
EXPECT_THAT(
ShiftIntervalBackward(IndexInterval(), std::numeric_limits<Index>::min()),
Optional(IndexInterval()));
EXPECT_THAT(ShiftIntervalBackward(IndexInterval::UncheckedClosed(1, 8), -2),
Optional(IndexInterval::UncheckedClosed(3, 10)));
EXPECT_THAT(
ShiftIntervalBackward(IndexInterval::UncheckedClosed(-kInfIndex, 8), -2),
Optional(IndexInterval::UncheckedClosed(-kInfIndex, 10)));
EXPECT_THAT(
ShiftIntervalBackward(IndexInterval::UncheckedClosed(1, kInfIndex), -2),
Optional(IndexInterval::UncheckedClosed(3, kInfIndex)));
EXPECT_THAT(ShiftIntervalBackward(
IndexInterval::UncheckedClosed(kMinFiniteIndex + 1, 101), 1),
Optional(IndexInterval::Closed(kMinFiniteIndex, 100)));
EXPECT_THAT(
ShiftIntervalBackward(IndexInterval::UncheckedClosed(5, 10), kInfIndex),
Optional(
IndexInterval::UncheckedClosed(-kInfIndex + 5, -kInfIndex + 10)));
EXPECT_THAT(
ShiftIntervalBackward(IndexInterval::UncheckedClosed(5, 10), -kInfIndex),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"5 \\+ [0-9]+ is outside valid range .*"));
EXPECT_THAT(ShiftIntervalBackward(IndexInterval::UncheckedClosed(5, 10),
kMinFiniteIndex),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"5 \\+ [0-9]+ is outside valid range .*"));
EXPECT_THAT(ShiftIntervalBackward(IndexInterval::UncheckedClosed(-1, 10),
kMaxFiniteIndex),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"-1 \\+ -[0-9]+ is outside valid range .*"));
EXPECT_THAT(
ShiftIntervalBackward(IndexInterval::UncheckedClosed(-kInfIndex, -5),
kMaxFiniteIndex),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"-5 \\+ -[0-9]+ is outside valid range .*"));
}
TEST(IndexIntervalTest, ShiftIntervalSeparateOffsets) {
EXPECT_THAT(ShiftInterval(IndexInterval::UncheckedClosed(1, 8), 2, 5),
Optional(IndexInterval::UncheckedClosed(3, 13)));
EXPECT_THAT(
ShiftInterval(IndexInterval::UncheckedClosed(-kMaxFiniteIndex, 8), 0, 5),
Optional(IndexInterval::UncheckedClosed(-kMaxFiniteIndex, 13)));
EXPECT_THAT(
ShiftInterval(IndexInterval::UncheckedClosed(-kMaxFiniteIndex, 8), 1, 5),
Optional(IndexInterval::UncheckedClosed(-kMaxFiniteIndex + 1, 13)));
EXPECT_THAT(
ShiftInterval(IndexInterval::UncheckedClosed(-kMaxFiniteIndex, 8), -1, 5),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"-[0-9]+ \\+ -1 is outside valid range .*"));
EXPECT_THAT(ShiftInterval(IndexInterval::UncheckedClosed(-1, 8),
std::numeric_limits<Index>::min(), 5),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"-1 \\+ -[0-9]+ is outside valid range .*"));
EXPECT_THAT(
ShiftInterval(IndexInterval::UncheckedClosed(2, kMaxFiniteIndex), -1, 0),
Optional(IndexInterval::UncheckedClosed(1, kMaxFiniteIndex)));
EXPECT_THAT(
ShiftInterval(IndexInterval::UncheckedClosed(2, kMaxFiniteIndex), -1, 1),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"[0-9]+ \\+ 1 is outside valid range .*"));
EXPECT_THAT(ShiftInterval(IndexInterval::UncheckedClosed(2, 1), -1,
std::numeric_limits<Index>::max()),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"1 \\+ [0-9]+ is outside valid range .*"));
EXPECT_THAT(ShiftInterval(IndexInterval::UncheckedClosed(0, 8),
std::numeric_limits<Index>::min(), 5),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"0 \\+ -[0-9]+ is outside valid range .*"));
EXPECT_THAT(ShiftInterval(IndexInterval::UncheckedClosed(1, 8), 2, 5),
Optional(IndexInterval::UncheckedClosed(3, 13)));
EXPECT_THAT(
ShiftInterval(IndexInterval::UncheckedClosed(-kInfIndex, 8), 2, 5),
Optional(IndexInterval::UncheckedClosed(-kInfIndex, 13)));
EXPECT_THAT(
ShiftInterval(IndexInterval::UncheckedClosed(1, +kInfIndex), 2, 5),
Optional(IndexInterval::UncheckedClosed(3, +kInfIndex)));
EXPECT_THAT(ShiftInterval(
IndexInterval::UncheckedClosed(-kInfIndex, +kInfIndex), 2, 5),
Optional(IndexInterval::UncheckedClosed(-kInfIndex, +kInfIndex)));
}
TEST(IndexIntervalTest, ShiftIntervalBackwardSeparateOffsets) {
EXPECT_THAT(
ShiftIntervalBackward(IndexInterval::UncheckedClosed(1, 8), -2, -5),
Optional(IndexInterval::UncheckedClosed(3, 13)));
EXPECT_THAT(ShiftIntervalBackward(
IndexInterval::UncheckedClosed(-kMaxFiniteIndex, 8), 0, -5),
Optional(IndexInterval::UncheckedClosed(-kMaxFiniteIndex, 13)));
EXPECT_THAT(
ShiftIntervalBackward(IndexInterval::UncheckedClosed(-kMaxFiniteIndex, 8),
-1, -5),
Optional(IndexInterval::UncheckedClosed(-kMaxFiniteIndex + 1, 13)));
EXPECT_THAT(ShiftIntervalBackward(
IndexInterval::UncheckedClosed(-kMaxFiniteIndex, 8), 1, -5),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"-[0-9]+ \\+ -1 is outside valid range .*"));
EXPECT_THAT(ShiftIntervalBackward(IndexInterval::UncheckedClosed(-1, 8),
std::numeric_limits<Index>::max(), -5),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"-1 \\+ -[0-9]+ is outside valid range .*"));
EXPECT_THAT(ShiftIntervalBackward(
IndexInterval::UncheckedClosed(2, kMaxFiniteIndex), 1, 0),
Optional(IndexInterval::UncheckedClosed(1, kMaxFiniteIndex)));
EXPECT_THAT(ShiftIntervalBackward(
IndexInterval::UncheckedClosed(2, kMaxFiniteIndex), 1, -1),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"[0-9]+ \\+ 1 is outside valid range .*"));
EXPECT_THAT(ShiftIntervalBackward(IndexInterval::UncheckedClosed(2, 1), 1,
std::numeric_limits<Index>::min()),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"1 \\+ -[0-9]+ is outside valid range .*"));
EXPECT_THAT(ShiftIntervalBackward(IndexInterval::UncheckedClosed(0, 8),
std::numeric_limits<Index>::max(), -5),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"0 \\+ -[0-9]+ is outside valid range .*"));
EXPECT_THAT(
ShiftIntervalBackward(IndexInterval::UncheckedClosed(1, 8), -2, -5),
Optional(IndexInterval::UncheckedClosed(3, 13)));
EXPECT_THAT(ShiftIntervalBackward(
IndexInterval::UncheckedClosed(-kInfIndex, 8), -2, -5),
Optional(IndexInterval::UncheckedClosed(-kInfIndex, 13)));
EXPECT_THAT(ShiftIntervalBackward(
IndexInterval::UncheckedClosed(1, +kInfIndex), -2, -5),
Optional(IndexInterval::UncheckedClosed(3, +kInfIndex)));
EXPECT_THAT(
ShiftIntervalBackward(
IndexInterval::UncheckedClosed(-kInfIndex, +kInfIndex), -2, -5),
Optional(IndexInterval::UncheckedClosed(-kInfIndex, +kInfIndex)));
}
TEST(IndexIntervalTest, ShiftIntervalTo) {
EXPECT_THAT(ShiftIntervalTo(IndexInterval::UncheckedClosed(1, 8), 3),
Optional(IndexInterval::UncheckedClosed(3, 10)));
EXPECT_THAT(ShiftIntervalTo(IndexInterval::UncheckedClosed(-kInfIndex, 8), 2),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Interval .* is not bounded below"));
EXPECT_THAT(ShiftIntervalTo(IndexInterval::UncheckedClosed(1, kInfIndex), 3),
Optional(IndexInterval::UncheckedClosed(3, kInfIndex)));
EXPECT_THAT(
ShiftIntervalTo(IndexInterval::UncheckedClosed(kMinFiniteIndex + 1, 101),
kMinFiniteIndex),
Optional(IndexInterval::Closed(kMinFiniteIndex, 100)));
EXPECT_THAT(
ShiftIntervalTo(IndexInterval::UncheckedClosed(5, 10), -kInfIndex),
MatchesStatus(absl::StatusCode::kOutOfRange,
"Origin -[0-9]+ is outside valid range .*"));
EXPECT_THAT(ShiftIntervalTo(IndexInterval::UncheckedClosed(5, 10), kInfIndex),
MatchesStatus(absl::StatusCode::kOutOfRange,
"Origin [0-9]+ is outside valid range .*"));
EXPECT_THAT(
ShiftIntervalTo(IndexInterval::UncheckedClosed(5, 10), kMaxFiniteIndex),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"10 \\+ [0-9]+ is outside valid range .*"));
}
TEST(ExtractStridedSliceTest, Closed) {
using OIII = tensorstore::OptionallyImplicitIndexInterval;
EXPECT_THAT(
ExtractClosedStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), false, false}, 6, |
531 | cpp | google/tensorstore | schema | tensorstore/proto/schema.cc | tensorstore/proto/schema_test.cc | #ifndef TENSORSTORE_PROTO_SCHEMA_H_
#define TENSORSTORE_PROTO_SCHEMA_H_
#include "absl/status/status.h"
#include "tensorstore/proto/schema.pb.h"
#include "tensorstore/schema.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
Result<Schema> ParseSchemaFromProto(const ::tensorstore::proto::Schema& proto);
void EncodeToProto(::tensorstore::proto::Schema& proto,
const Schema& schema);
}
#endif
#include "tensorstore/proto/schema.h"
#include <stddef.h>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/chunk_layout.h"
#include "tensorstore/codec_spec.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/dimension_units.h"
#include "tensorstore/index_space/index_domain.h"
#include "tensorstore/index_space/json.h"
#include "tensorstore/proto/array.h"
#include "tensorstore/proto/index_transform.h"
#include "tensorstore/proto/schema.pb.h"
#include "tensorstore/rank.h"
#include "tensorstore/schema.h"
#include "tensorstore/serialization/batch.h"
#include "tensorstore/serialization/fwd.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/dimension_set.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/unit.h"
namespace tensorstore {
namespace {
void EncodeToProto(::tensorstore::proto::OptionalUnit& proto,
const std::optional<Unit>& unit) {
if (unit.has_value()) {
proto.set_base_unit(unit->base_unit);
proto.set_multiplier(unit->multiplier);
}
}
bool IsValidGridView(ChunkLayout::GridView view) {
return (view.aspect_ratio().valid() || view.elements().valid() ||
view.shape().valid());
}
void EncodeToProto(::tensorstore::proto::ChunkLayout& proto,
const ChunkLayout& chunk_layout) {
auto encode_grid =
[](::tensorstore::proto::ChunkLayout::Grid& proto,
ChunkLayout::GridView grid_view) {
{
DimensionSet soft_constraints(false);
auto shape = grid_view.shape();
for (size_t i = 0; i < shape.size(); i++) {
proto.add_shape(shape[i]);
soft_constraints[i] = !shape.hard_constraint[i];
}
if (soft_constraints) {
proto.set_shape_soft_constraint_bitset(soft_constraints.to_uint());
}
}
{
DimensionSet soft_constraints(false);
auto aspect_ratio = grid_view.aspect_ratio();
for (size_t i = 0; i < aspect_ratio.size(); i++) {
proto.add_aspect_ratio(aspect_ratio[i]);
soft_constraints[i] = !aspect_ratio.hard_constraint[i];
}
if (soft_constraints) {
proto.set_aspect_ratio_soft_constraint_bitset(
soft_constraints.to_uint());
}
}
if (grid_view.elements().valid()) {
proto.set_elements(grid_view.elements().value);
if (!grid_view.elements().hard_constraint) {
proto.set_elements_soft_constraint(true);
}
}
};
{
DimensionSet grid_origin_soft_constraint_bitset(false);
auto grid_origin = chunk_layout.grid_origin();
for (size_t i = 0; i < grid_origin.size(); i++) {
proto.add_grid_origin(grid_origin[i]);
grid_origin_soft_constraint_bitset[i] = !grid_origin.hard_constraint[i];
}
if (grid_origin_soft_constraint_bitset) {
proto.set_grid_origin_soft_constraint_bitset(
grid_origin_soft_constraint_bitset.to_uint());
}
}
{
auto inner_order = chunk_layout.inner_order();
if (!inner_order.hard_constraint) {
proto.set_inner_order_soft_constraint(true);
}
for (size_t i = 0; i < inner_order.size(); i++) {
proto.add_inner_order(inner_order[i]);
}
}
if (IsValidGridView(chunk_layout.read_chunk())) {
encode_grid(*proto.mutable_read_chunk(), chunk_layout.read_chunk());
}
if (IsValidGridView(chunk_layout.write_chunk())) {
encode_grid(*proto.mutable_write_chunk(), chunk_layout.write_chunk());
}
if (IsValidGridView(chunk_layout.codec_chunk())) {
encode_grid(*proto.mutable_codec_chunk(), chunk_layout.codec_chunk());
}
}
Result<ChunkLayout> ParseChunkLayoutFromProto(
const ::tensorstore::proto::ChunkLayout& proto) {
auto parse_grid = [](const ::tensorstore::proto::ChunkLayout::Grid& proto)
-> Result<ChunkLayout::Grid> {
ChunkLayout::Grid grid;
if (proto.shape_size() > 0) {
DimensionSet soft_constraints =
DimensionSet::FromUint(proto.shape_soft_constraint_bitset());
TENSORSTORE_RETURN_IF_ERROR(grid.Set(ChunkLayout::Grid::Shape(
tensorstore::span(proto.shape()), ~soft_constraints)));
}
if (proto.aspect_ratio_size() > 0) {
DimensionSet soft_constraints =
DimensionSet::FromUint(proto.aspect_ratio_soft_constraint_bitset());
TENSORSTORE_RETURN_IF_ERROR(grid.Set(ChunkLayout::Grid::AspectRatio(
tensorstore::span(proto.aspect_ratio()), ~soft_constraints)));
}
if (proto.has_elements()) {
TENSORSTORE_RETURN_IF_ERROR(grid.Set(ChunkLayout::Grid::Elements(
proto.elements(), !proto.elements_soft_constraint())));
}
return grid;
};
ChunkLayout chunk_layout;
if (proto.grid_origin_size() > 0) {
DimensionSet soft_constraints =
DimensionSet::FromUint(proto.grid_origin_soft_constraint_bitset());
TENSORSTORE_RETURN_IF_ERROR(chunk_layout.Set(ChunkLayout::GridOrigin(
tensorstore::span(proto.grid_origin()), ~soft_constraints)));
}
if (proto.inner_order_size() > 0) {
std::vector<DimensionIndex> inner_order(proto.inner_order().begin(),
proto.inner_order().end());
TENSORSTORE_RETURN_IF_ERROR(chunk_layout.Set(ChunkLayout::InnerOrder(
inner_order, !proto.inner_order_soft_constraint())));
}
if (proto.has_read_chunk()) {
TENSORSTORE_ASSIGN_OR_RETURN(auto grid, parse_grid(proto.read_chunk()));
TENSORSTORE_RETURN_IF_ERROR(chunk_layout.Set(
ChunkLayout::GridViewFor<ChunkLayout::Usage::kRead>(grid)));
}
if (proto.has_write_chunk()) {
TENSORSTORE_ASSIGN_OR_RETURN(auto grid, parse_grid(proto.write_chunk()));
TENSORSTORE_RETURN_IF_ERROR(chunk_layout.Set(
ChunkLayout::GridViewFor<ChunkLayout::Usage::kWrite>(grid)));
}
if (proto.has_codec_chunk()) {
TENSORSTORE_ASSIGN_OR_RETURN(auto grid, parse_grid(proto.codec_chunk()));
TENSORSTORE_RETURN_IF_ERROR(chunk_layout.Set(
ChunkLayout::GridViewFor<ChunkLayout::Usage::kCodec>(grid)));
}
return chunk_layout;
}
}
void EncodeToProto(::tensorstore::proto::Schema& proto,
const Schema& schema) {
if (DimensionIndex rank = schema.rank(); rank != dynamic_rank) {
proto.set_rank(rank);
}
if (DataType dtype = schema.dtype(); dtype.valid()) {
proto.set_dtype(std::string(dtype.name()));
}
if (IndexDomain<> domain = schema.domain(); domain.valid()) {
EncodeToProto(*proto.mutable_domain(), domain);
}
EncodeToProto(*proto.mutable_chunk_layout(), schema.chunk_layout());
if (Schema::FillValue fill_value = schema.fill_value(); fill_value.valid()) {
EncodeToProto(*proto.mutable_fill_value(), fill_value);
}
if (CodecSpec codec = schema.codec(); codec.valid()) {
auto serialized = tensorstore::serialization::EncodeBatch(schema.codec());
proto.set_codec(serialized.value());
}
if (Schema::DimensionUnits dimension_units = schema.dimension_units();
dimension_units.valid()) {
for (const auto& unit : dimension_units) {
EncodeToProto(*proto.add_dimension_unit(), unit);
}
}
}
Result<Schema> ParseSchemaFromProto(const ::tensorstore::proto::Schema& proto) {
Schema schema;
if (proto.has_rank()) {
TENSORSTORE_RETURN_IF_ERROR(schema.Set(RankConstraint(proto.rank())));
}
if (proto.has_dtype() && !proto.dtype().empty()) {
auto dtype = GetDataType(proto.dtype());
if (!dtype.valid()) {
return absl::InvalidArgumentError("dtype is not valid");
}
TENSORSTORE_RETURN_IF_ERROR(schema.Set(dtype));
}
if (proto.has_domain()) {
TENSORSTORE_ASSIGN_OR_RETURN(auto domain,
ParseIndexDomainFromProto(proto.domain()))
TENSORSTORE_RETURN_IF_ERROR(schema.Set(domain));
}
if (proto.has_chunk_layout()) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto chunk_layout, ParseChunkLayoutFromProto(proto.chunk_layout()))
TENSORSTORE_RETURN_IF_ERROR(schema.Set(chunk_layout));
}
if (proto.has_codec()) {
CodecSpec codec;
TENSORSTORE_RETURN_IF_ERROR(
tensorstore::serialization::DecodeBatch(proto.codec(), codec));
TENSORSTORE_RETURN_IF_ERROR(schema.Set(codec));
}
if (proto.has_fill_value()) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto array, ParseArrayFromProto(proto.fill_value(), zero_origin));
TENSORSTORE_ASSIGN_OR_RETURN(auto fill_value,
ArrayOriginCast<zero_origin>(array));
TENSORSTORE_RETURN_IF_ERROR(schema.Set(Schema::FillValue(fill_value)));
}
if (!proto.dimension_unit().empty()) {
DimensionUnitsVector dimensions;
for (size_t i = 0; i < proto.dimension_unit_size(); i++) {
auto& unit = proto.dimension_unit(i);
if (unit.has_multiplier() || !unit.base_unit().empty()) {
dimensions.emplace_back(std::in_place, unit.multiplier(),
unit.base_unit());
} else {
dimensions.emplace_back(std::nullopt);
}
}
TENSORSTORE_RETURN_IF_ERROR(schema.Set(Schema::DimensionUnits(dimensions)));
}
return schema;
}
} | #include "tensorstore/proto/schema.h"
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/proto/protobuf_matchers.h"
#include "tensorstore/proto/schema.pb.h"
#include "tensorstore/schema.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::protobuf_matchers::EqualsProto;
using ::tensorstore::MatchesStatus;
using ::tensorstore::ParseSchemaFromProto;
using ::tensorstore::Schema;
template <typename Proto>
Proto ParseProtoOrDie(const std::string& asciipb) {
return protobuf_matchers::internal::MakePartialProtoFromAscii<Proto>(asciipb);
}
auto DoEncode(const Schema& schema) {
::tensorstore::proto::Schema proto;
::tensorstore::EncodeToProto(proto, schema);
return proto;
}
TEST(SchemaProtoTest, Basic) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto schema,
Schema::FromJson(
{
{"rank", 3},
{"dtype", "uint8"},
{"domain",
{{"labels", {"x", "y", "z"}},
{"inclusive_min", {1, 2, 3}},
{"exclusive_max", {5, 6, 7}}}},
{"chunk_layout",
{
{"codec_chunk",
{
{"elements_soft_constraint", 20},
{"aspect_ratio", {1, 2, 3}},
{"shape", {nullptr, 4, 5}},
}},
{"read_chunk",
{
{"elements", 30},
{"aspect_ratio", {4, 5, 6}},
{"shape_soft_constraint", {6, nullptr, 7}},
}},
{"write_chunk",
{
{"elements", 40},
{"aspect_ratio_soft_constraint", {7, 8, 9}},
{"shape", {8, 9, nullptr}},
}},
{"grid_origin", {nullptr, nullptr, 11}},
{"inner_order_soft_constraint", {2, 0, 1}},
}},
{"fill_value", 5},
{"dimension_units", {{4, "nm"}, nullptr, {30, "nm"}}},
}));
auto proto = ParseProtoOrDie<::tensorstore::proto::Schema>(R"pb(
rank: 3
dtype: "uint8"
domain {
origin: [ 1, 2, 3 ]
shape: [ 4, 4, 4 ]
labels: [ "x", "y", "z" ]
}
chunk_layout {
grid_origin: [ -9223372036854775808, -9223372036854775808, 11 ]
grid_origin_soft_constraint_bitset: 3
inner_order: [ 2, 0, 1 ]
inner_order_soft_constraint: true
write_chunk {
aspect_ratio: [ 7, 8, 9 ]
shape: [ 8, 9, 0 ]
elements: 40
aspect_ratio_soft_constraint_bitset: 7
shape_soft_constraint_bitset: 4
}
read_chunk {
shape: [ 6, 0, 7 ]
elements: 30
aspect_ratio: [ 4, 5, 6 ]
shape_soft_constraint_bitset: 7
}
codec_chunk {
elements: 20
shape: [ 0, 4, 5 ]
aspect_ratio: [ 1, 2, 3 ]
elements_soft_constraint: true
shape_soft_constraint_bitset: 1
}
}
fill_value { dtype: "uint8" void_data: "\x05" }
dimension_unit { multiplier: 4 base_unit: "nm" }
dimension_unit {}
dimension_unit { multiplier: 30 base_unit: "nm" }
)pb");
EXPECT_THAT(DoEncode(schema), EqualsProto(proto));
EXPECT_THAT(ParseSchemaFromProto(proto), testing::Eq(schema));
}
TEST(SchemaProtoTest, Empty) {
tensorstore::Schema schema;
EXPECT_THAT(
ParseSchemaFromProto(ParseProtoOrDie<::tensorstore::proto::Schema>(R"pb(
)pb")),
testing::Eq(schema));
}
TEST(SchemaProtoTest, RankFromDimensionUnit) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto schema,
ParseSchemaFromProto(ParseProtoOrDie<::tensorstore::proto::Schema>(R"pb(
rank: 1
dimension_unit {}
)pb")));
EXPECT_THAT(
ParseSchemaFromProto(ParseProtoOrDie<::tensorstore::proto::Schema>(R"pb(
dimension_unit {}
)pb")),
testing::Eq(schema));
}
TEST(SchemaProtoTest, Errors) {
EXPECT_THAT(
ParseSchemaFromProto(ParseProtoOrDie<::tensorstore::proto::Schema>(R"pb(
rank: -2
)pb")),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(
ParseSchemaFromProto(ParseProtoOrDie<::tensorstore::proto::Schema>(R"pb(
dtype: "foo"
)pb")),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(
ParseSchemaFromProto(ParseProtoOrDie<::tensorstore::proto::Schema>(R"pb(
codec: "12345"
)pb")),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
} |
532 | cpp | google/tensorstore | contiguous_layout | tensorstore/contiguous_layout.cc | tensorstore/contiguous_layout_test.cc | #ifndef TENSORSTORE_CONTIGUOUS_LAYOUT_H_
#define TENSORSTORE_CONTIGUOUS_LAYOUT_H_
#include <stddef.h>
#include <cassert>
#include <iosfwd>
#include "tensorstore/index.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
enum class ContiguousLayoutOrder {
right = 0,
c = 0,
row_major = 0,
left = 1,
fortran = 1,
column_major = 1
};
std::ostream& operator<<(std::ostream& os, ContiguousLayoutOrder order);
constexpr ContiguousLayoutOrder c_order = ContiguousLayoutOrder::c;
constexpr ContiguousLayoutOrder row_major_order =
ContiguousLayoutOrder::row_major;
constexpr ContiguousLayoutOrder fortran_order = ContiguousLayoutOrder::fortran;
constexpr ContiguousLayoutOrder column_major_order =
ContiguousLayoutOrder::column_major;
void ComputeStrides(ContiguousLayoutOrder order, ptrdiff_t element_stride,
span<const Index> shape, span<Index> strides);
template <ContiguousLayoutOrder Order = c_order, typename I = Index>
inline I GetContiguousOffset(span<const I> shape, span<const I> indices) {
assert(shape.size() == indices.size());
I offset = 0;
for (ptrdiff_t i = (Order == c_order) ? 0 : (indices.size() - 1);
(Order == c_order) ? (i < indices.size()) : (i >= 0);
(Order == c_order) ? ++i : --i) {
assert(indices[i] >= 0 && indices[i] < shape[i]);
offset *= shape[i];
offset += indices[i];
}
return offset;
}
template <ContiguousLayoutOrder Order = c_order, typename I = Index>
inline void GetContiguousIndices(I offset, span<const I> shape,
span<I> indices) {
assert(shape.size() == indices.size());
assert(offset >= 0);
ptrdiff_t rank = shape.size();
for (ptrdiff_t i = (Order == c_order) ? (rank - 1) : 0;
(Order == c_order) ? (i >= 0) : (i < rank);
(Order == c_order) ? --i : ++i) {
const I size = shape[i];
indices[i] = offset % size;
offset /= size;
}
assert(offset == 0);
}
}
#endif
#include "tensorstore/contiguous_layout.h"
#include <stddef.h>
#include <cassert>
#include <ostream>
#include "tensorstore/index.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
void ComputeStrides(ContiguousLayoutOrder order, ptrdiff_t element_stride,
span<const Index> shape, span<Index> strides) {
const DimensionIndex rank = shape.size();
assert(strides.size() == rank);
if (order == ContiguousLayoutOrder::right) {
for (DimensionIndex i = rank - 1; i >= 0; --i) {
strides[i] = element_stride;
element_stride *= shape[i];
}
} else {
for (DimensionIndex i = 0; i < rank; ++i) {
strides[i] = element_stride;
element_stride *= shape[i];
}
}
}
std::ostream& operator<<(std::ostream& os, ContiguousLayoutOrder order) {
return os << (order == ContiguousLayoutOrder::c ? 'C' : 'F');
}
} | #include "tensorstore/contiguous_layout.h"
#include <array>
#include <sstream>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/index.h"
#include "tensorstore/util/span.h"
namespace {
using ::tensorstore::ComputeStrides;
using ::tensorstore::ContiguousLayoutOrder;
using ::tensorstore::GetContiguousIndices;
using ::tensorstore::GetContiguousOffset;
using ::tensorstore::Index;
using ::tensorstore::span;
TEST(ContiguousLayoutOrderTest, PrintToOstream) {
{
std::ostringstream ostr;
ostr << ContiguousLayoutOrder::c;
EXPECT_EQ("C", ostr.str());
}
{
std::ostringstream ostr;
ostr << ContiguousLayoutOrder::fortran;
EXPECT_EQ("F", ostr.str());
}
}
TEST(ComputeStridesTest, COrder) {
{
std::array<Index, 3> strides;
ComputeStrides(ContiguousLayoutOrder::c, 1,
span<const Index>({3l, 4l, 5l}), strides);
EXPECT_THAT(strides, ::testing::ElementsAre(20, 5, 1));
}
{
std::array<Index, 3> strides;
ComputeStrides(ContiguousLayoutOrder::c, 2,
span<const Index>({3l, 4l, 5l}), strides);
EXPECT_THAT(strides, ::testing::ElementsAre(40, 10, 2));
}
}
TEST(ComputeStridesTest, FOrder) {
std::array<Index, 3> strides;
ComputeStrides(ContiguousLayoutOrder::fortran, 1,
span<const Index>({3l, 4l, 5l}), strides);
EXPECT_THAT(strides, ::testing::ElementsAre(1, 3, 12));
}
TEST(GetContiguousOffsetTest, Basic) {
Index indices[2];
EXPECT_EQ(3 * 11 + 4,
GetContiguousOffset<ContiguousLayoutOrder::c>({{7, 11}}, {{3, 4}}));
GetContiguousIndices<ContiguousLayoutOrder::c, Index>(3 * 11 + 4, {{7, 11}},
indices);
EXPECT_THAT(indices, ::testing::ElementsAre(3, 4));
EXPECT_EQ(3 + 4 * 7, GetContiguousOffset<ContiguousLayoutOrder::fortran>(
{{7, 11}}, {{3, 4}}));
GetContiguousIndices<ContiguousLayoutOrder::fortran, Index>(
3 + 4 * 7, {{7, 11}}, indices);
EXPECT_THAT(indices, ::testing::ElementsAre(3, 4));
EXPECT_EQ(
2 * (7 * 11) + 3 * 11 + 4,
GetContiguousOffset<ContiguousLayoutOrder::c>({{5, 7, 11}}, {{2, 3, 4}}));
EXPECT_EQ(2 + 5 * 3 + (5 * 7) * 4,
GetContiguousOffset<ContiguousLayoutOrder::fortran>({{5, 7, 11}},
{{2, 3, 4}}));
EXPECT_EQ(0, GetContiguousOffset<ContiguousLayoutOrder::c>({}, {}));
EXPECT_EQ(0, GetContiguousOffset<ContiguousLayoutOrder::fortran>({}, {}));
}
} |
533 | cpp | google/tensorstore | array | tensorstore/proto/array.cc | tensorstore/proto/array_test.cc | #ifndef TENSORSTORE_PROTO_ARRAY_H_
#define TENSORSTORE_PROTO_ARRAY_H_
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/index.h"
#include "tensorstore/proto/array.pb.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
void EncodeToProtoImpl(::tensorstore::proto::Array& proto,
OffsetArrayView<const void> array);
Result<SharedArray<void, dynamic_rank, offset_origin>> ParseArrayFromProto(
const ::tensorstore::proto::Array& proto,
ArrayOriginKind origin_kind = offset_origin,
DimensionIndex rank_constraint = dynamic_rank);
template <typename Element, DimensionIndex Rank, ArrayOriginKind OriginKind,
ContainerKind LayoutCKind>
void EncodeToProto(
::tensorstore::proto::Array& proto,
const Array<Shared<Element>, Rank, OriginKind, LayoutCKind>& value) {
EncodeToProtoImpl(proto, value);
}
}
#endif
#include "tensorstore/proto/array.h"
#include <algorithm>
#include <array>
#include <cstddef>
#include <cstdint>
#include <limits>
#include <string>
#include <type_traits>
#include "absl/status/status.h"
#include "riegeli/bytes/string_reader.h"
#include "riegeli/bytes/string_writer.h"
#include "tensorstore/array.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/internal/unaligned_data_type_functions.h"
#include "tensorstore/proto/array.pb.h"
#include "tensorstore/rank.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/dimension_set.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace {
struct WriteProtoImpl {
tensorstore::proto::Array& proto;
void operator()(const double* item, void*) { proto.add_double_data(*item); }
void operator()(const float* item, void*) { proto.add_float_data(*item); }
void operator()(const int16_t* item, void*) { proto.add_int_data(*item); }
void operator()(const int32_t* item, void*) { proto.add_int_data(*item); }
void operator()(const int64_t* item, void*) { proto.add_int_data(*item); }
void operator()(const uint16_t* item, void*) { proto.add_uint_data(*item); }
void operator()(const uint32_t* item, void*) { proto.add_uint_data(*item); }
void operator()(const uint64_t* item, void*) { proto.add_uint_data(*item); }
};
struct ReadProtoImpl {
const tensorstore::proto::Array& proto;
size_t index = 0;
size_t error_count = 0;
void operator()(double* item, void*) { *item = proto.double_data(index++); }
void operator()(float* item, void*) { *item = proto.float_data(index++); }
void operator()(int16_t* item, void*) {
auto i = proto.int_data(index++);
*item = static_cast<int16_t>(i);
if (i > std::numeric_limits<int16_t>::max() ||
i < std::numeric_limits<int16_t>::min()) {
error_count++;
}
}
void operator()(int32_t* item, void*) {
auto i = proto.int_data(index++);
*item = static_cast<int32_t>(i);
if (i > std::numeric_limits<int32_t>::max() ||
i < std::numeric_limits<int32_t>::min()) {
error_count++;
}
}
void operator()(int64_t* item, void*) { *item = proto.int_data(index++); }
void operator()(uint16_t* item, void*) {
auto i = proto.uint_data(index++);
*item = static_cast<uint16_t>(i);
if (i > std::numeric_limits<uint16_t>::max()) {
error_count++;
}
}
void operator()(uint32_t* item, void*) {
auto i = proto.uint_data(index++);
*item = static_cast<uint32_t>(i);
if (i > std::numeric_limits<uint32_t>::max()) {
error_count++;
}
}
void operator()(uint64_t* item, void*) { *item = proto.uint_data(index++); }
};
struct ProtoArrayDataTypeFunctions {
const internal::ElementwiseFunction<1, void*>* write_fn = nullptr;
const internal::ElementwiseFunction<1, void*>* read_fn = nullptr;
};
const std::array<ProtoArrayDataTypeFunctions, kNumDataTypeIds> kProtoFunctions =
MapCanonicalDataTypes([](auto dtype) {
using T = typename decltype(dtype)::Element;
ProtoArrayDataTypeFunctions functions;
if constexpr (std::is_invocable_v<ReadProtoImpl, T*, void*>) {
functions.write_fn =
internal::SimpleElementwiseFunction<WriteProtoImpl(const T),
void*>();
functions.read_fn =
internal::SimpleElementwiseFunction<ReadProtoImpl(T), void*>();
}
return functions;
});
}
void EncodeToProtoImpl(::tensorstore::proto::Array& proto,
OffsetArrayView<const void> array) {
const auto dtype = array.dtype();
proto.set_dtype(std::string(dtype.name()));
{
bool all_zero = true;
for (Index x : array.origin()) {
proto.add_origin(x);
all_zero &= (x == 0);
}
if (all_zero) proto.clear_origin();
}
for (Index x : array.shape()) {
proto.add_shape(x);
}
{
DimensionSet zero_byte_strides(false);
for (DimensionIndex i = 0; i < array.rank(); i++) {
zero_byte_strides[i] =
(array.byte_strides()[i] == 0 && array.shape()[i] != 1);
}
if (zero_byte_strides) {
proto.set_zero_byte_strides_bitset(zero_byte_strides.to_uint());
}
}
const size_t index = static_cast<size_t>(dtype.id());
if (kProtoFunctions[index].write_fn) {
if (dtype.id() == DataTypeIdOf<int16_t> ||
dtype.id() == DataTypeIdOf<int32_t> ||
dtype.id() == DataTypeIdOf<int64_t>) {
proto.mutable_int_data()->Reserve(array.num_elements());
} else if (dtype.id() == DataTypeIdOf<uint16_t> ||
dtype.id() == DataTypeIdOf<uint32_t> ||
dtype.id() == DataTypeIdOf<uint64_t>) {
proto.mutable_uint_data()->Reserve(array.num_elements());
} else if (dtype.id() == DataTypeIdOf<double>) {
proto.mutable_double_data()->Reserve(array.num_elements());
} else if (dtype.id() == DataTypeIdOf<float>) {
proto.mutable_float_data()->Reserve(array.num_elements());
}
WriteProtoImpl impl{proto};
internal::IterateOverArrays({kProtoFunctions[index].write_fn, &impl},
nullptr,
{c_order, skip_repeated_elements}, array);
} else {
proto.mutable_void_data()->reserve(dtype.size() * array.num_elements());
riegeli::StringWriter writer(proto.mutable_void_data());
internal::IterateOverArrays(
{&internal::kUnalignedDataTypeFunctions[index].write_native_endian,
&writer},
nullptr, {c_order, skip_repeated_elements}, array);
writer.Close();
}
}
Result<SharedArray<void, dynamic_rank, offset_origin>> ParseArrayFromProto(
const ::tensorstore::proto::Array& proto, ArrayOriginKind origin_kind,
DimensionIndex rank_constraint) {
SharedArray<void, dynamic_rank, offset_origin> array;
DataType dtype = GetDataType(proto.dtype());
if (!dtype.valid()) {
return absl::DataLossError(
"Cannot deserialize array with unspecified data type");
}
const size_t rank = proto.shape_size();
if (rank_constraint != dynamic_rank && rank != rank_constraint) {
return absl::InvalidArgumentError("Proto array rank mismatch");
}
if (rank > kMaxRank) {
return absl::InvalidArgumentError("Proto rank exceeds maximum rank");
}
array.layout().set_rank(rank);
std::copy(proto.shape().begin(), proto.shape().end(),
array.layout().shape().begin());
std::fill(array.layout().origin().begin(), array.layout().origin().end(),
Index(0));
if (proto.origin_size() > 0 &&
std::any_of(proto.origin().begin(), proto.origin().end(),
[](auto x) { return x != 0; })) {
if (origin_kind == zero_origin) {
return absl::InvalidArgumentError(
"Proto zero_origin array has non-zero origin");
}
if (proto.origin_size() != rank) {
return absl::InvalidArgumentError("Proto origin/rank mismatch");
}
std::copy(proto.origin().begin(), proto.origin().end(),
array.layout().origin().begin());
}
Index num_elements = 1;
{
DimensionSet zero_byte_strides =
(proto.has_zero_byte_strides_bitset())
? DimensionSet::FromUint(proto.zero_byte_strides_bitset())
: DimensionSet(false);
for (DimensionIndex i = rank - 1; i >= 0; --i) {
if (!IndexInterval::ValidSized(array.origin()[i], array.shape()[i])) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Proto origin and shape of {", array.origin()[i], ", ",
array.shape()[i],
"} do not specify a valid IndexInterval for rank ", i));
}
if (zero_byte_strides[i]) {
array.layout().byte_strides()[i] = 0;
} else {
array.layout().byte_strides()[i] = 1;
if (internal::MulOverflow(num_elements, array.shape()[i],
&num_elements)) {
return absl::DataLossError(
tensorstore::StrCat("Invalid array shape ", array.shape()));
}
}
}
}
array.element_pointer() = tensorstore::AllocateArrayElementsLike<void>(
array.layout(), array.byte_strides().data(),
{c_order, skip_repeated_elements}, default_init, dtype);
const size_t index = static_cast<size_t>(dtype.id());
if (kProtoFunctions[index].read_fn && proto.void_data().empty()) {
if ((dtype.id() == DataTypeIdOf<int16_t> ||
dtype.id() == DataTypeIdOf<int32_t> ||
dtype.id() == DataTypeIdOf<int64_t>)&&proto.int_data_size() !=
num_elements) {
return absl::DataLossError("proto int_data incomplete");
}
if ((dtype.id() == DataTypeIdOf<uint16_t> ||
dtype.id() == DataTypeIdOf<uint32_t> ||
dtype.id() == DataTypeIdOf<uint64_t>)&&proto.uint_data_size() !=
num_elements) {
return absl::DataLossError("proto uint_data incomplete");
}
if (dtype.id() == DataTypeIdOf<double> &&
proto.double_data_size() != num_elements) {
return absl::DataLossError("proto double_data incomplete");
}
if (dtype.id() == DataTypeIdOf<float> &&
proto.float_data_size() != num_elements) {
return absl::DataLossError("proto float_data incomplete");
}
ReadProtoImpl impl{proto};
internal::IterateOverArrays({kProtoFunctions[index].read_fn, &impl},
nullptr,
{c_order, skip_repeated_elements}, array);
if (impl.error_count > 0) {
return absl::DataLossError("Array element truncated");
}
} else {
riegeli::StringReader reader(proto.void_data());
internal::IterateOverArrays(
{&internal::kUnalignedDataTypeFunctions[index].read_native_endian,
&reader},
nullptr, {c_order, skip_repeated_elements}, array);
if (!reader.VerifyEndAndClose()) return reader.status();
}
return array;
}
} | #include "tensorstore/proto/array.h"
#include <memory>
#include <random>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/index_transform_testutil.h"
#include "tensorstore/internal/data_type_random_generator.h"
#include "tensorstore/internal/testing/random_seed.h"
#include "tensorstore/proto/array.pb.h"
#include "tensorstore/proto/protobuf_matchers.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::protobuf_matchers::EqualsProto;
using ::tensorstore::Index;
using ::tensorstore::kInfIndex;
using ::tensorstore::MatchesStatus;
using ::tensorstore::ParseArrayFromProto;
using ::tensorstore::StridedLayout;
template <typename Proto>
Proto ParseProtoOrDie(const std::string& asciipb) {
return protobuf_matchers::internal::MakePartialProtoFromAscii<Proto>(asciipb);
}
template <typename T>
auto DoEncode(const T& array) {
::tensorstore::proto::Array proto;
::tensorstore::EncodeToProto(proto, array);
return proto;
}
TEST(ArrayProtoTest, Basic) {
auto array = tensorstore::MakeArray<Index>({{{1, 0, 2, 2}, {4, 5, 6, 7}}});
auto proto = ParseProtoOrDie<::tensorstore::proto::Array>(R"pb(
dtype: "int64"
shape: [ 1, 2, 4 ]
int_data: [ 1, 0, 2, 2, 4, 5, 6, 7 ]
)pb");
EXPECT_THAT(DoEncode(array), EqualsProto(proto));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto copy, ParseArrayFromProto(proto));
ASSERT_TRUE(copy.valid());
EXPECT_EQ(copy.layout(), array.layout());
EXPECT_THAT(copy, testing::Eq(array));
}
TEST(ArrayProtoTest, BasicVoidData) {
auto array =
tensorstore::MakeOffsetArray<bool>({3, 1, -2}, {{{true}}, {{false}}});
auto proto = ParseProtoOrDie<::tensorstore::proto::Array>(R"pb(
dtype: "bool"
shape: [ 2, 1, 1 ]
origin: [ 3, 1, -2 ]
void_data: "\x01\x00"
)pb");
EXPECT_THAT(DoEncode(array), EqualsProto(proto));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto copy, ParseArrayFromProto(proto));
ASSERT_TRUE(copy.valid());
EXPECT_EQ(copy.layout(), array.layout());
EXPECT_THAT(copy, testing::Eq(array));
}
TEST(ArrayProtoTest, DecodeRank0) {
auto proto = ParseProtoOrDie<::tensorstore::proto::Array>(R"pb(
dtype: "int64"
int_data: [ 3 ]
)pb");
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto copy, ParseArrayFromProto(proto));
EXPECT_TRUE(copy.valid());
EXPECT_THAT(copy.rank(), testing::Eq(0));
}
TEST(ArrayProtoTest, ZeroStrides) {
int data[] = {1, 2, 3, 4, 5, 6};
tensorstore::SharedArray<int> array(
std::shared_ptr<int>(std::shared_ptr<void>(), &data[0]),
tensorstore::StridedLayout<>({kInfIndex + 1, 2, 3, kInfIndex + 1},
{0, 3 * sizeof(int), sizeof(int), 0}));
auto proto = ParseProtoOrDie<::tensorstore::proto::Array>(R"pb(
dtype: "int32"
shape: [ 4611686018427387904, 2, 3, 4611686018427387904 ]
zero_byte_strides_bitset: 9
int_data: [ 1, 2, 3, 4, 5, 6 ]
)pb");
EXPECT_THAT(DoEncode(array), EqualsProto(proto));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto copy, ParseArrayFromProto(proto));
ASSERT_TRUE(copy.valid());
ASSERT_EQ(copy.layout(), array.layout());
EXPECT_EQ(array, copy);
}
TEST(ArrayProtoTest, Errors) {
EXPECT_THAT(
ParseArrayFromProto(ParseProtoOrDie<::tensorstore::proto::Array>(R"pb(
dtype: "foo"
int_data: [ 3 ]
)pb")),
MatchesStatus(absl::StatusCode::kDataLoss));
EXPECT_THAT(
ParseArrayFromProto(ParseProtoOrDie<::tensorstore::proto::Array>(R"pb(
dtype: "int32"
int_data: [ 3 ]
)pb"),
tensorstore::offset_origin, 2),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(
ParseArrayFromProto(ParseProtoOrDie<::tensorstore::proto::Array>(R"pb(
dtype: "int32"
shape: [
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
]
)pb")),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(
ParseArrayFromProto(ParseProtoOrDie<::tensorstore::proto::Array>(R"pb(
dtype: "int32"
shape: [ 1, 2, 3 ]
origin: [ 1, 2, 3 ]
)pb"),
tensorstore::zero_origin),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(
ParseArrayFromProto(ParseProtoOrDie<::tensorstore::proto::Array>(R"pb(
dtype: "int32"
shape: [ 1, 2, 3 ]
origin: [ 1, 2 ]
)pb")),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(
ParseArrayFromProto(ParseProtoOrDie<::tensorstore::proto::Array>(R"pb(
dtype: "int32"
shape: [ 1, -2, 3 ]
)pb")),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(
ParseArrayFromProto(ParseProtoOrDie<::tensorstore::proto::Array>(R"pb(
dtype: "int32"
shape: [ 2147483647, 2147483647, 2147483647 ]
)pb")),
MatchesStatus(absl::StatusCode::kDataLoss));
EXPECT_THAT(
ParseArrayFromProto(ParseProtoOrDie<::tensorstore::proto::Array>(R"pb(
dtype: "int64"
int_data: [ 3, 4 ]
)pb")),
MatchesStatus(absl::StatusCode::kDataLoss));
EXPECT_THAT(
ParseArrayFromProto(ParseProtoOrDie<::tensorstore::proto::Array>(R"pb(
dtype: "int64"
shape: 2
int_data: [ 3 ]
)pb")),
MatchesStatus(absl::StatusCode::kDataLoss));
EXPECT_THAT(
ParseArrayFromProto(ParseProtoOrDie<::tensorstore::proto::Array>(R"pb(
dtype: "uint64"
shape: 2
)pb")),
MatchesStatus(absl::StatusCode::kDataLoss));
EXPECT_THAT(
ParseArrayFromProto(ParseProtoOrDie<::tensorstore::proto::Array>(R"pb(
dtype: "double"
shape: 2
)pb")),
MatchesStatus(absl::StatusCode::kDataLoss));
EXPECT_THAT(
ParseArrayFromProto(ParseProtoOrDie<::tensorstore::proto::Array>(R"pb(
dtype: "float"
shape: 2
)pb")),
MatchesStatus(absl::StatusCode::kDataLoss));
}
class RandomArrayProtoTest
: public ::testing::TestWithParam<tensorstore::DataType> {};
INSTANTIATE_TEST_SUITE_P(DataTypes, RandomArrayProtoTest,
::testing::ValuesIn(tensorstore::kDataTypes));
TEST_P(RandomArrayProtoTest, COrder) {
auto dtype = GetParam();
for (int iteration = 0; iteration < 100; ++iteration) {
std::minstd_rand gen{tensorstore::internal_testing::GetRandomSeedForTest(
"TENSORSTORE_PROTO_ARRAY_TEST_SEED")};
auto box = tensorstore::internal::MakeRandomBox(gen);
auto array = tensorstore::internal::MakeRandomArray(gen, box, dtype,
tensorstore::c_order);
auto proto = DoEncode(array);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto copy, ParseArrayFromProto(proto));
EXPECT_THAT(copy, testing::Eq(array));
}
}
TEST_P(RandomArrayProtoTest, FOrder) {
auto dtype = GetParam();
for (int iteration = 0; iteration < 100; ++iteration) {
std::minstd_rand gen{tensorstore::internal_testing::GetRandomSeedForTest(
"TENSORSTORE_PROTO_ARRAY_TEST_SEED")};
auto box = tensorstore::internal::MakeRandomBox(gen);
auto array = tensorstore::internal::MakeRandomArray(
gen, box, dtype, tensorstore::fortran_order);
auto proto = DoEncode(array);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto copy, ParseArrayFromProto(proto));
EXPECT_THAT(copy, testing::Eq(array));
}
}
} |
534 | cpp | google/tensorstore | static_cast | tensorstore/static_cast.cc | tensorstore/static_cast_test.cc | #ifndef TENSORSTORE_STATIC_CAST_H_
#define TENSORSTORE_STATIC_CAST_H_
#include <cassert>
#include <string_view>
#include <type_traits>
#include "absl/status/status.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
enum class CastChecking { checked = 0, unchecked = 1 };
struct unchecked_t {
explicit constexpr unchecked_t() = default;
constexpr operator CastChecking() const { return CastChecking::unchecked; }
};
constexpr unchecked_t unchecked{};
template <typename T>
struct StaticCastTraits;
template <typename T>
struct DefaultStaticCastTraits {
template <typename SourceRef>
static std::enable_if_t<std::is_constructible_v<T, unchecked_t, SourceRef>, T>
Construct(SourceRef&& source) {
return T(unchecked, std::forward<SourceRef>(source));
}
template <typename SourceRef>
static bool IsCompatible(SourceRef&& source) = delete;
static std::string Describe() = delete;
static std::string Describe(const T& value) = delete;
};
template <typename T>
using StaticCastTraitsType = StaticCastTraits<internal::remove_cvref_t<T>>;
namespace internal_cast {
template <CastChecking Checking, bool IsNoOp>
struct CastImpl;
absl::Status CastError(std::string_view source_description,
std::string_view target_description);
template <>
struct CastImpl<CastChecking::checked, false> {
template <typename SourceRef, typename Target>
using ResultType = Result<Target>;
template <typename Target, typename SourceRef>
static Result<Target> StaticCast(SourceRef&& source) {
if (!StaticCastTraits<Target>::IsCompatible(source)) {
return CastError(StaticCastTraitsType<SourceRef>::Describe(source),
StaticCastTraits<Target>::Describe());
}
return StaticCastTraits<Target>::Construct(std::forward<SourceRef>(source));
}
};
template <>
struct CastImpl<CastChecking::unchecked, false> {
template <typename SourceRef, typename Target>
using ResultType = Target;
template <typename Target, typename SourceRef>
static Target StaticCast(SourceRef&& source) {
assert(StaticCastTraits<Target>::IsCompatible(source) &&
"StaticCast is not valid");
return StaticCastTraits<Target>::Construct(std::forward<SourceRef>(source));
}
};
template <>
struct CastImpl<CastChecking::unchecked, true> {
template <typename SourceRef, typename Target>
using ResultType = SourceRef&&;
template <typename Target, typename SourceRef>
static SourceRef&& StaticCast(SourceRef&& source) {
return std::forward<SourceRef>(source);
}
};
template <typename Target, typename SourceRef, CastChecking Checking,
bool IsSame =
std::is_same_v<Target, internal::remove_cvref_t<SourceRef>>>
using CastImplType =
internal_cast::CastImpl<Checking,
(Checking == CastChecking::unchecked && IsSame)>;
template <typename Target, typename SourceRef, typename ReturnType = Target>
constexpr inline bool IsStaticCastConstructible = false;
template <typename Target, typename SourceRef>
constexpr inline bool IsStaticCastConstructible<
Target, SourceRef,
decltype(StaticCastTraits<Target>::Construct(std::declval<SourceRef>()))> =
true;
}
template <typename Target, typename SourceRef>
constexpr inline bool IsStaticCastConstructible =
internal_cast::IsStaticCastConstructible<Target, SourceRef>;
template <typename Target, typename SourceRef,
CastChecking Checking = CastChecking::unchecked>
using StaticCastResultType = std::enable_if_t<
IsStaticCastConstructible<Target, SourceRef>,
typename internal_cast::CastImplType<
Target, SourceRef, Checking>::template ResultType<SourceRef, Target>>;
template <typename Target, CastChecking Checking = CastChecking::checked,
typename SourceRef>
std::enable_if_t<IsStaticCastConstructible<Target, SourceRef>,
StaticCastResultType<Target, SourceRef, Checking>>
StaticCast(SourceRef&& source) {
return internal_cast::CastImplType<Target, SourceRef, Checking>::
template StaticCast<Target>(std::forward<SourceRef>(source));
}
}
#endif
#include "tensorstore/static_cast.h"
#include "absl/status/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_cast {
absl::Status CastError(std::string_view source_description,
std::string_view target_description) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Cannot cast ", source_description, " to ", target_description));
}
}
} | #include "tensorstore/static_cast.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::dynamic_extent;
using ::tensorstore::IsStaticCastConstructible;
using ::tensorstore::MatchesStatus;
using ::tensorstore::Result;
using ::tensorstore::span;
using ::tensorstore::StaticCast;
using ::tensorstore::unchecked;
using ::tensorstore::unchecked_t;
template <std::ptrdiff_t Extent>
struct X {
X(span<int, Extent> data) : data(data) {}
template <std::ptrdiff_t OtherExtent,
std::enable_if_t<(OtherExtent == Extent ||
OtherExtent == dynamic_extent ||
Extent == dynamic_extent)>* = nullptr>
explicit X(unchecked_t, X<OtherExtent> other)
: data(other.data.data(), other.data.size()) {}
span<int, Extent> data;
};
template <std::ptrdiff_t Extent>
struct Y {
Y(span<int, Extent> data) : data(data) {}
span<int, Extent> data;
};
}
namespace tensorstore {
template <std::ptrdiff_t Extent>
struct StaticCastTraits<X<Extent>> : public DefaultStaticCastTraits<X<Extent>> {
template <typename Other>
static bool IsCompatible(const Other& other) {
return other.data.size() == Extent || Extent == tensorstore::dynamic_extent;
}
static std::string Describe() { return StrCat("X with extent of ", Extent); }
static std::string Describe(const X<Extent>& value) {
return StrCat("X with extent of ", value.data.size());
}
};
template <std::ptrdiff_t Extent>
struct StaticCastTraits<Y<Extent>> {
template <std::ptrdiff_t OtherExtent,
std::enable_if_t<(OtherExtent == Extent ||
OtherExtent == dynamic_extent ||
Extent == dynamic_extent)>* = nullptr>
static Y<Extent> Construct(Y<OtherExtent> other) {
return Y<Extent>(span<int, Extent>(other.data.data(), other.data.size()));
}
template <typename Other>
static bool IsCompatible(const Other& other) {
return other.data.size() == Extent || Extent == tensorstore::dynamic_extent;
}
static std::string Describe() { return StrCat("Y with extent of ", Extent); }
static std::string Describe(const Y<Extent>& value) {
return StrCat("Y with extent of ", value.data.size());
}
};
}
namespace {
static_assert(IsStaticCastConstructible<X<3>, X<dynamic_extent>>);
static_assert(IsStaticCastConstructible<X<dynamic_extent>, X<3>>);
static_assert(IsStaticCastConstructible<X<3>, X<3>>);
static_assert(!IsStaticCastConstructible<X<3>, X<2>>);
static_assert(IsStaticCastConstructible<Y<3>, Y<dynamic_extent>>);
static_assert(IsStaticCastConstructible<Y<dynamic_extent>, Y<3>>);
static_assert(IsStaticCastConstructible<Y<3>, Y<3>>);
static_assert(!IsStaticCastConstructible<Y<3>, Y<2>>);
static_assert(std::is_same_v<const X<3>&, decltype(StaticCast<X<3>, unchecked>(
std::declval<const X<3>&>()))>);
static_assert(std::is_same_v<X<3>&, decltype(StaticCast<X<3>, unchecked>(
std::declval<X<3>&>()))>);
static_assert(std::is_same_v<X<3>&&, decltype(StaticCast<X<3>, unchecked>(
std::declval<X<3>&&>()))>);
static_assert(
std::is_same_v<X<3>, decltype(StaticCast<X<3>, unchecked>(
std::declval<const X<dynamic_extent>&>()))>);
static_assert(std::is_same_v<X<3>, decltype(StaticCast<X<3>, unchecked>(
std::declval<X<dynamic_extent>&>()))>);
static_assert(std::is_same_v<Result<X<3>>, decltype(StaticCast<X<3>>(
std::declval<const X<3>&>()))>);
static_assert(std::is_same_v<
Result<X<3>>, decltype(StaticCast<X<3>>(std::declval<X<3>&>()))>);
static_assert(std::is_same_v<Result<X<3>>,
decltype(StaticCast<X<3>>(
std::declval<const X<dynamic_extent>&>()))>);
static_assert(
std::is_same_v<Result<X<3>>, decltype(StaticCast<X<3>>(
std::declval<X<dynamic_extent>&>()))>);
TEST(DefaultCastTraitsTest, Success) {
std::vector<int> vec{1, 2, 3};
X<dynamic_extent> x(vec);
auto cast_result = StaticCast<X<3>>(x);
static_assert(std::is_same_v<decltype(cast_result), Result<X<3>>>);
ASSERT_TRUE(cast_result);
EXPECT_EQ(vec.data(), cast_result->data.data());
auto& noop_cast_result = StaticCast<X<dynamic_extent>, unchecked>(x);
EXPECT_EQ(&noop_cast_result, &x);
auto unchecked_cast_result = StaticCast<X<3>, unchecked>(x);
static_assert(std::is_same_v<decltype(unchecked_cast_result), X<3>>);
}
TEST(DefaultCastTraitsTest, CheckedFailure) {
std::vector<int> vec{1, 2, 3};
X<dynamic_extent> x(vec);
EXPECT_THAT(
StaticCast<X<2>>(x),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Cannot cast X with extent of 3 to X with extent of 2"));
}
TEST(DefaultCastTraitsDeathTest, UncheckedFailure) {
std::vector<int> vec{1, 2, 3};
X<dynamic_extent> x(vec);
EXPECT_DEBUG_DEATH((StaticCast<X<2>, unchecked>(x)),
"StaticCast is not valid");
}
TEST(CustomTraitsTest, Success) {
std::vector<int> vec{1, 2, 3};
Y<dynamic_extent> x(vec);
auto cast_result = StaticCast<Y<3>>(x);
static_assert(std::is_same_v<decltype(cast_result), Result<Y<3>>>);
ASSERT_TRUE(cast_result);
EXPECT_EQ(vec.data(), cast_result->data.data());
auto& noop_cast_result = StaticCast<Y<dynamic_extent>, unchecked>(x);
EXPECT_EQ(&noop_cast_result, &x);
auto unchecked_cast_result = StaticCast<Y<3>, unchecked>(x);
static_assert(std::is_same_v<decltype(unchecked_cast_result), Y<3>>);
}
TEST(CustomTraitsTest, CheckedFailure) {
std::vector<int> vec{1, 2, 3};
Y<dynamic_extent> x(vec);
EXPECT_THAT(
StaticCast<Y<2>>(x),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Cannot cast Y with extent of 3 to Y with extent of 2"));
}
TEST(CustomTraitsDeathTest, UncheckedFailure) {
std::vector<int> vec{1, 2, 3};
Y<dynamic_extent> x(vec);
EXPECT_DEBUG_DEATH((StaticCast<Y<2>, unchecked>(x)),
"StaticCast is not valid");
}
} |
535 | cpp | google/tensorstore | array_testutil | tensorstore/array_testutil.cc | tensorstore/array_testutil_test.cc | #ifndef TENSORSTORE_ARRAY_TESTUTIL_H_
#define TENSORSTORE_ARRAY_TESTUTIL_H_
#include <cstddef>
#include <ostream>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/array.h"
#include "tensorstore/container_kind.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/rank.h"
#include "tensorstore/static_cast.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/iterate_over_index_range.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal_array {
template <typename Element>
class ArrayElementMatcherImpl
: public ::testing::MatcherInterface<OffsetArrayView<const void>> {
public:
ArrayElementMatcherImpl(
SharedOffsetArray<const ::testing::Matcher<Element>> element_matchers)
: element_matchers_(std::move(element_matchers)) {}
bool MatchAndExplain(
OffsetArrayView<const void> value_untyped,
::testing::MatchResultListener* listener) const override {
const bool listener_interested = listener->IsInterested();
if (value_untyped.dtype() != dtype_v<Element>) {
if (listener_interested) {
*listener << "which has a data type of " << value_untyped.dtype();
}
return false;
}
if (element_matchers_.domain() != value_untyped.domain()) {
return false;
}
auto value = StaticDataTypeCast<const Element, unchecked>(value_untyped);
SharedOffsetArray<std::string> explanations;
if (listener_interested) {
explanations = AllocateArray<std::string>(value.domain());
}
std::vector<Index> mismatch_indices;
bool matches =
IterateOverIndexRange(value.domain(), [&](span<const Index> indices) {
const Element& element = value(indices);
const auto& matcher = element_matchers_(indices);
bool element_matches;
if (listener_interested) {
::testing::StringMatchResultListener s;
element_matches = matcher.MatchAndExplain(element, &s);
explanations(indices) = s.str();
} else {
element_matches = matcher.Matches(element);
}
if (!element_matches) {
mismatch_indices.assign(indices.begin(), indices.end());
}
return element_matches;
});
if (!matches) {
if (listener_interested) {
*listener << "whose element at " << span(mismatch_indices)
<< " doesn't match";
const auto& explanation = explanations(mismatch_indices);
if (!explanation.empty()) {
*listener << ", " << explanation;
}
}
return false;
}
if (listener_interested) {
bool reason_printed = false;
IterateOverIndexRange(value.domain(), [&](span<const Index> indices) {
const std::string& explanation = explanations(indices);
if (explanation.empty()) return;
if (reason_printed) *listener << ",\nand ";
*listener << "whose element at " << span(indices) << " matches, "
<< explanation;
reason_printed = true;
});
}
return true;
}
void DescribeTo(std::ostream* os) const override {
*os << "has a data type of " << dtype_v<Element> << " and a domain of "
<< element_matchers_.domain();
if (!element_matchers_.domain().is_empty()) {
*os << " where\n";
bool is_first = true;
IterateOverIndexRange(element_matchers_.domain(),
[&](span<const Index> indices) {
if (!is_first) {
*os << ",\n";
}
is_first = false;
*os << "element at " << indices << " ";
element_matchers_(indices).DescribeTo(os);
});
}
}
void DescribeNegationTo(std::ostream* os) const override {
*os << "doesn't have a data type of "
<< dtype_v<Element> << ", or\ndoesn't have a domain of "
<< element_matchers_.domain();
IterateOverIndexRange(element_matchers_.domain(),
[&](span<const Index> indices) {
*os << ", or\nelement at " << indices << " ";
element_matchers_(indices).DescribeNegationTo(os);
});
}
private:
SharedOffsetArray<const ::testing::Matcher<Element>> element_matchers_;
};
}
using ArrayMatcher = ::testing::Matcher<OffsetArrayView<const void>>;
ArrayMatcher MatchesArray(
SharedOffsetArray<const void> expected,
EqualityComparisonKind comparison_kind = EqualityComparisonKind::equal);
inline ArrayMatcher MatchesArrayIdentically(
SharedOffsetArray<const void> expected) {
return MatchesArray(std::move(expected), EqualityComparisonKind::identical);
}
template <typename Element>
ArrayMatcher MatchesArray(
SharedOffsetArray<const ::testing::Matcher<Element>> matcher_array) {
return ::testing::MakeMatcher(
new internal_array::ArrayElementMatcherImpl<Element>(
std::move(matcher_array)));
}
template <typename Element>
ArrayMatcher MatchesScalarArray(const ::testing::Matcher<Element>& matcher) {
return MatchesArray<Element>(
MakeScalarArray<::testing::Matcher<Element>>(matcher));
}
template <typename Element, Index N0>
ArrayMatcher MatchesArray(
const ::testing::Matcher<Element> (&element_matchers)[N0]) {
return MatchesArray<Element>(MakeArray(element_matchers));
}
template <typename Element, Index N0>
ArrayMatcher MatchesArray(
span<const Index, 1> origin,
const ::testing::Matcher<Element> (&element_matchers)[N0]) {
return MatchesArray<Element>(MakeOffsetArray(origin, element_matchers));
}
template <typename Element, Index N0, std::ptrdiff_t OriginRank>
ArrayMatcher MatchesArray(
const Index (&origin)[OriginRank],
const ::testing::Matcher<Element> (&element_matchers)[N0]) {
static_assert(OriginRank == 1, "Origin vector must have length 1.");
return MatchesArray<Element>(MakeOffsetArray(origin, element_matchers));
}
#include "tensorstore/array_testutil_matches_array.inc"
namespace internal_array {
inline StridedLayout<dynamic_rank, offset_origin>
NormalizeStridedLayoutForComparison(
StridedLayoutView<dynamic_rank, offset_origin> layout) {
StridedLayout<dynamic_rank, offset_origin> normalized(layout);
for (DimensionIndex i = 0; i < normalized.rank(); ++i) {
if (normalized.shape()[i] <= 1 && normalized.origin()[i] == 0) {
normalized.byte_strides()[i] = 0;
}
}
return normalized;
}
template <typename ElementTag, DimensionIndex Rank, ArrayOriginKind OriginKind,
ContainerKind LayoutCKind>
Array<ElementTag, Rank, OriginKind> NormalizeArrayForComparison(
const Array<ElementTag, Rank, OriginKind, LayoutCKind>& array) {
Array<ElementTag, Rank, OriginKind> normalized(array);
for (DimensionIndex i = 0; i < normalized.rank(); ++i) {
if (normalized.shape()[i] <= 1) {
auto& byte_stride = normalized.layout().byte_strides()[i];
const Index origin_value = normalized.origin()[i];
if (origin_value != 0) {
normalized.element_pointer() = AddByteOffset(
std::move(normalized.element_pointer()),
internal::wrap_on_overflow::Multiply(byte_stride, origin_value));
}
byte_stride = 0;
}
}
return normalized;
}
}
template <typename ElementTag, DimensionIndex Rank, ArrayOriginKind OriginKind,
ContainerKind LayoutCKind>
inline ArrayMatcher ReferencesSameDataAs(
const Array<ElementTag, Rank, OriginKind, LayoutCKind>& array) {
if (array.num_elements() == 0) {
return ::testing::AllOf(
::testing::ResultOf(
"dtype", [](const auto& a) { return a.dtype(); },
::testing::Eq(array.dtype())),
::testing::ResultOf(
"domain", [](const auto& a) { return a.domain(); },
::testing::Eq(array.domain())));
}
auto normalized_array = internal_array::NormalizeArrayForComparison(array);
return ::testing::ResultOf(
"normalized array",
[](const auto& a) {
return internal_array::NormalizeArrayForComparison(a);
},
::testing::AllOf(::testing::ResultOf(
"dtype", [](const auto& a) { return a.dtype(); },
::testing::Eq(normalized_array.dtype())),
::testing::ResultOf(
"data", [](const auto& a) { return a.data(); },
::testing::Eq(normalized_array.data())),
::testing::ResultOf(
"layout", [](const auto& a) { return a.layout(); },
::testing::Eq(normalized_array.layout()))));
}
}
#endif
#include "tensorstore/array_testutil.h"
#include <ostream>
#include <utility>
#include <gtest/gtest.h>
#include "tensorstore/array.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/util/iterate_over_index_range.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal_array {
class ArrayMatcherImpl
: public ::testing::MatcherInterface<OffsetArrayView<const void>> {
public:
ArrayMatcherImpl(SharedOffsetArray<const void> expected,
EqualityComparisonKind comparison_kind)
: expected_(std::move(expected)), comparison_kind_(comparison_kind) {}
bool MatchAndExplain(
OffsetArrayView<const void> value,
::testing::MatchResultListener* listener) const override {
const bool listener_interested = listener->IsInterested();
if (value.dtype() != expected_.dtype()) {
if (listener_interested) {
*listener << "which has a data type of " << value.dtype();
}
return false;
}
if (expected_.domain() != value.domain()) {
if (listener_interested) {
*listener << "which has a domain of " << value.domain();
}
return false;
}
if (AreArraysEqual(expected_, value, comparison_kind_)) {
return true;
}
if (!listener_interested) return false;
bool reason_printed = false;
IterateOverIndexRange(value.domain(), [&](span<const Index> indices) {
if (!AreArraysEqual(value[indices], expected_[indices],
comparison_kind_)) {
if (reason_printed) {
*listener << ", ";
}
*listener << "whose element at " << indices
<< " doesn't match, expected=" << expected_[indices]
<< ", actual=" << value[indices];
reason_printed = true;
}
});
return false;
}
void DescribeTo(std::ostream* os) const override {
*os << "has a data type of " << expected_.dtype() << " and a domain of "
<< expected_.domain() << " and is "
<< (comparison_kind_ == EqualityComparisonKind::equal ? "equal"
: "identical")
<< " to " << expected_;
}
private:
SharedOffsetArray<const void> expected_;
EqualityComparisonKind comparison_kind_;
};
}
ArrayMatcher MatchesArray(SharedOffsetArray<const void> expected,
EqualityComparisonKind comparison_kind) {
return ::testing::MakeMatcher(new internal_array::ArrayMatcherImpl(
std::move(expected), comparison_kind));
}
} | #include "tensorstore/array_testutil.h"
#include <sstream>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace {
using ::tensorstore::Index;
using ::tensorstore::MakeArray;
using ::tensorstore::MakeOffsetArray;
using ::tensorstore::MakeScalarArray;
using ::tensorstore::MatchesArray;
using ::tensorstore::MatchesScalarArray;
using ::tensorstore::span;
TEST(MatchesArrayTest, Describe) {
std::ostringstream ss;
MatchesArray<std::int32_t>({1, 2}).DescribeTo(&ss);
EXPECT_EQ(
R"(has a data type of int32 and a domain of {origin={0}, shape={2}} where
element at {0} is equal to 1,
element at {1} is equal to 2)",
ss.str());
}
TEST(MatchesArrayTest, DescribeNegation) {
std::ostringstream ss;
MatchesArray<std::int32_t>({1, 2}).DescribeNegationTo(&ss);
EXPECT_EQ(R"(doesn't have a data type of int32, or
doesn't have a domain of {origin={0}, shape={2}}, or
element at {0} isn't equal to 1, or
element at {1} isn't equal to 2)",
ss.str());
}
TEST(MatchesArrayTest, ExplainDataTypeMismatch) {
::testing::StringMatchResultListener listener;
::testing::ExplainMatchResult(MatchesArray<std::int32_t>({1, 2, 3}),
MakeArray<float>({1, 2}), &listener);
EXPECT_EQ("which has a data type of float32", listener.str());
}
TEST(MatchesArrayTest, ExplainDomainMismatch) {
::testing::StringMatchResultListener listener;
::testing::ExplainMatchResult(MatchesArray<int>({1, 2, 3}),
MakeArray<int>({1, 2}), &listener);
EXPECT_EQ("", listener.str());
}
TEST(MatchesArrayTest, ExplainElementMismatch) {
::testing::StringMatchResultListener listener;
::testing::ExplainMatchResult(MatchesArray<int>({1, 2}),
MakeArray<int>({1, 3}), &listener);
EXPECT_EQ("whose element at {1} doesn't match", listener.str());
}
TEST(MatchesArrayTest, ExplainElementMatch) {
::testing::StringMatchResultListener listener;
::testing::ExplainMatchResult(
MatchesArray<std::string>(
{::testing::Not(::testing::ElementsAre('d')),
::testing::Not(::testing::ElementsAre('a', 'b'))}),
MakeArray<std::string>({"x", "ac"}), &listener);
EXPECT_EQ(
"whose element at {0} matches, whose element #0 doesn't match,\n"
"and whose element at {1} matches, whose element #1 doesn't match",
listener.str());
}
TEST(MatchesArrayTest, ExplainElementMismatchExplanation) {
::testing::StringMatchResultListener listener;
::testing::ExplainMatchResult(
MatchesScalarArray<std::string>(::testing::ElementsAre('a', 'b')),
MakeScalarArray<std::string>("ac"), &listener);
EXPECT_EQ("whose element at {} doesn't match, whose element #1 doesn't match",
listener.str());
}
TEST(MatchesArrayTest, Matches) {
EXPECT_THAT(MakeScalarArray<int>(1), MatchesScalarArray<int>(1));
EXPECT_THAT(MakeArray<int>({1, 2}), MatchesArray<int>({1, 2}));
EXPECT_THAT(MakeArray<int>({{1, 2}}), MatchesArray<int>({{1, 2}}));
EXPECT_THAT(MakeArray<int>({{{1, 2}}}), MatchesArray<int>({{{1, 2}}}));
EXPECT_THAT(MakeArray<int>({{{{1, 2}}}}), MatchesArray<int>({{{{1, 2}}}}));
EXPECT_THAT(MakeArray<int>({{{{{1, 2}}}}}),
MatchesArray<int>({{{{{1, 2}}}}}));
EXPECT_THAT(MakeArray<int>({{{{{{1, 2}}}}}}),
MatchesArray<int>({{{{{{1, 2}}}}}}));
EXPECT_THAT(MakeOffsetArray<int>({3}, {1, 2}),
MatchesArray<int>({3}, {1, 2}));
EXPECT_THAT(MakeOffsetArray<int>({3, 4}, {{1, 2}}),
MatchesArray<int>({3, 4}, {{1, 2}}));
EXPECT_THAT(MakeOffsetArray<int>({3, 4, 5}, {{{1, 2}}}),
MatchesArray<int>({3, 4, 5}, {{{1, 2}}}));
EXPECT_THAT(MakeOffsetArray<int>({3, 4, 5, 6}, {{{{1, 2}}}}),
MatchesArray<int>({3, 4, 5, 6}, {{{{1, 2}}}}));
EXPECT_THAT(MakeOffsetArray<int>({3, 4, 5, 6, 7}, {{{{{1, 2}}}}}),
MatchesArray<int>({3, 4, 5, 6, 7}, {{{{{1, 2}}}}}));
EXPECT_THAT(MakeOffsetArray<int>({3, 4, 5, 6, 7, 8}, {{{{{{1, 2}}}}}}),
MatchesArray<int>({3, 4, 5, 6, 7, 8}, {{{{{{1, 2}}}}}}));
EXPECT_THAT(MakeOffsetArray<int>({3}, {1, 2}),
MatchesArray<int>(span<const Index, 1>({3}), {1, 2}));
EXPECT_THAT(MakeOffsetArray<int>({3, 4}, {{1, 2}}),
MatchesArray<int>(span<const Index, 2>({3, 4}), {{1, 2}}));
EXPECT_THAT(MakeOffsetArray<int>({3, 4, 5}, {{{1, 2}}}),
MatchesArray<int>(span<const Index, 3>({3, 4, 5}), {{{1, 2}}}));
EXPECT_THAT(
MakeOffsetArray<int>({3, 4, 5, 6}, {{{{1, 2}}}}),
MatchesArray<int>(span<const Index, 4>({3, 4, 5, 6}), {{{{1, 2}}}}));
EXPECT_THAT(
MakeOffsetArray<int>({3, 4, 5, 6, 7}, {{{{{1, 2}}}}}),
MatchesArray<int>(span<const Index, 5>({3, 4, 5, 6, 7}), {{{{{1, 2}}}}}));
EXPECT_THAT(MakeOffsetArray<int>({3, 4, 5, 6, 7, 8}, {{{{{{1, 2}}}}}}),
MatchesArray<int>(span<const Index, 6>({3, 4, 5, 6, 7, 8}),
{{{{{{1, 2}}}}}}));
EXPECT_THAT(MakeArray<int>({1, 3}),
::testing::Not(MatchesArray<int>({1, 2})));
EXPECT_THAT(MakeArray<int>({1}), ::testing::Not(MatchesArray<int>({1, 2})));
}
} |
536 | cpp | google/tensorstore | strided_layout | tensorstore/strided_layout.cc | tensorstore/strided_layout_test.cc | #ifndef TENSORSTORE_STRIDED_LAYOUT_H_
#define TENSORSTORE_STRIDED_LAYOUT_H_
#include <stddef.h>
#include <algorithm>
#include <cassert>
#include <iosfwd>
#include <string>
#include <type_traits>
#include <utility>
#include "tensorstore/box.h"
#include "tensorstore/container_kind.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/attributes.h"
#include "tensorstore/internal/gdb_scripting.h"
#include "tensorstore/internal/multi_vector.h"
#include "tensorstore/internal/multi_vector_view.h"
#include "tensorstore/internal/type_traits.h"
#include "tensorstore/rank.h"
#include "tensorstore/static_cast.h"
#include "tensorstore/util/constant_vector.h"
#include "tensorstore/util/extents.h"
#include "tensorstore/util/span.h"
TENSORSTORE_GDB_AUTO_SCRIPT("multi_vector_gdb.py")
namespace tensorstore {
enum class ArrayOriginKind {
zero,
offset
};
constexpr ArrayOriginKind zero_origin = ArrayOriginKind::zero;
constexpr ArrayOriginKind offset_origin = ArrayOriginKind::offset;
std::ostream& operator<<(std::ostream& os, ArrayOriginKind origin_kind);
constexpr inline bool IsArrayOriginKindConvertible(ArrayOriginKind source,
ArrayOriginKind target) {
return static_cast<int>(source) <= static_cast<int>(target);
}
template <DimensionIndex Rank = dynamic_rank,
ArrayOriginKind OriginKind = zero_origin,
ContainerKind CKind = container>
class StridedLayout;
template <DimensionIndex Rank = dynamic_rank,
ArrayOriginKind OriginKind = zero_origin>
using StridedLayoutView = StridedLayout<Rank, OriginKind, view>;
template <typename X>
constexpr inline bool IsStridedLayout = false;
template <DimensionIndex Rank, ArrayOriginKind OriginKind, ContainerKind CKind>
constexpr inline bool IsStridedLayout<StridedLayout<Rank, OriginKind, CKind>> =
true;
template <typename T0, typename T1>
inline std::enable_if_t<internal::IsIndexPack<T0, T1>, Index> IndexInnerProduct(
DimensionIndex n, const T0* a, const T1* b) {
return internal::wrap_on_overflow::InnerProduct<Index>(n, a, b);
}
template <DimensionIndex N, typename T0, typename T1>
inline std::enable_if_t<internal::IsIndexPack<T0, T1>, Index> IndexInnerProduct(
const T0* a, const T1* b) {
return internal::wrap_on_overflow::InnerProduct<N, Index>(a, b);
}
template <DimensionIndex Rank, typename T0, typename T1>
inline std::enable_if_t<internal::IsIndexPack<T0, T1>, Index> IndexInnerProduct(
span<T0, Rank> a, span<T1, Rank> b) {
assert(a.size() == b.size());
if constexpr (Rank == -1) {
return IndexInnerProduct(a.size(), a.data(), b.data());
} else {
return IndexInnerProduct<Rank>(a.data(), b.data());
}
}
template <DimensionIndex Rank, ArrayOriginKind OriginKind>
void InitializeContiguousLayout(ContiguousLayoutOrder order,
Index element_stride,
StridedLayout<Rank, OriginKind>* layout) {
ComputeStrides(order, element_stride, layout->shape(),
layout->byte_strides());
}
template <DimensionIndex Rank>
void InitializeContiguousLayout(
ContiguousLayoutOrder order, Index element_stride,
BoxView<RankConstraint::FromInlineRank(Rank)> domain,
StridedLayout<Rank, offset_origin>* layout) {
const auto rank = domain.rank();
layout->set_rank(rank);
std::copy_n(domain.origin().begin(), rank, layout->origin().begin());
std::copy_n(domain.shape().begin(), rank, layout->shape().begin());
InitializeContiguousLayout(order, element_stride, layout);
}
template <DimensionIndex Rank, ArrayOriginKind OriginKind>
void InitializeContiguousLayout(
ContiguousLayoutOrder order, Index element_stride,
internal::type_identity_t<
span<const Index, RankConstraint::FromInlineRank(Rank)>>
shape,
StridedLayout<Rank, OriginKind>* layout) {
layout->set_rank(GetStaticOrDynamicExtent(shape));
std::copy(shape.begin(), shape.end(), layout->shape().begin());
if constexpr (OriginKind == offset_origin) {
std::fill(layout->origin().begin(), layout->origin().end(), Index(0));
}
InitializeContiguousLayout(order, element_stride, layout);
}
namespace internal_strided_layout {
template <ArrayOriginKind OriginKind, typename StorageT>
struct LayoutAccess;
template <typename StorageT>
struct LayoutAccess<zero_origin, StorageT>
: public internal::MultiVectorAccess<StorageT> {
using Base = internal::MultiVectorAccess<StorageT>;
using Base::static_extent;
using MaybeConstOriginIndex = const Index;
using MaybeConstIndex = typename Base::template ElementType<0>;
static span<const Index, static_extent> origin(const StorageT* storage) {
return GetConstantVector<Index, 0>(Base::GetExtent(*storage));
}
static span<MaybeConstIndex, static_extent> shape(StorageT* storage) {
return Base::template get<0>(storage);
}
static span<MaybeConstIndex, static_extent> byte_strides(StorageT* storage) {
return Base::template get<1>(storage);
}
using Base::Assign;
template <typename Other>
static void AssignFrom(StorageT* storage, const Other& other) {
Assign(storage, StaticRankCast<static_extent, unchecked>(other.rank()),
other.shape().data(), other.byte_strides().data());
}
};
template <typename StorageT>
struct LayoutAccess<offset_origin, StorageT>
: public internal::MultiVectorAccess<StorageT> {
using Base = internal::MultiVectorAccess<StorageT>;
using RankType = typename Base::ExtentType;
using MaybeConstIndex = typename Base::template ElementType<0>;
using MaybeConstOriginIndex = MaybeConstIndex;
using Base::static_extent;
static span<MaybeConstIndex, static_extent> origin(StorageT* storage) {
return Base::template get<0>(storage);
}
static span<MaybeConstIndex, static_extent> shape(StorageT* storage) {
return Base::template get<1>(storage);
}
static span<MaybeConstIndex, static_extent> byte_strides(StorageT* storage) {
return Base::template get<2>(storage);
}
using Base::Assign;
static void Assign(StorageT* storage, RankType rank, const Index* shape,
const Index* byte_strides) {
Base::Assign(storage, rank, GetConstantVector<Index, 0>(rank).data(), shape,
byte_strides);
}
template <typename Other>
static void AssignFrom(StorageT* storage, const Other& other) {
Assign(storage, StaticRankCast<static_extent, unchecked>(other.rank()),
other.origin().data(), other.shape().data(),
other.byte_strides().data());
}
};
template <DimensionIndex Rank, ArrayOriginKind OriginKind, ContainerKind CKind>
struct LayoutStorageSelector;
template <DimensionIndex Rank>
struct LayoutStorageSelector<Rank, zero_origin, container> {
using Storage = internal::MultiVectorStorage<Rank, Index, Index>;
using Access = LayoutAccess<zero_origin, Storage>;
};
template <DimensionIndex Rank>
struct LayoutStorageSelector<Rank, zero_origin, view> {
using Storage =
internal::MultiVectorViewStorage<Rank, const Index, const Index>;
using Access = LayoutAccess<zero_origin, Storage>;
};
template <DimensionIndex Rank>
struct LayoutStorageSelector<Rank, offset_origin, container> {
using Storage = internal::MultiVectorStorage<Rank, Index, Index, Index>;
using Access = LayoutAccess<offset_origin, Storage>;
};
template <DimensionIndex Rank>
struct LayoutStorageSelector<Rank, offset_origin, view> {
using Storage = internal::MultiVectorViewStorage<Rank, const Index,
const Index, const Index>;
using Access = LayoutAccess<offset_origin, Storage>;
};
void PrintToOstream(
std::ostream& os,
const StridedLayout<dynamic_rank, offset_origin, view>& layout);
std::string DescribeForCast(DimensionIndex rank);
bool StridedLayoutsEqual(StridedLayoutView<dynamic_rank, offset_origin> a,
StridedLayoutView<dynamic_rank, offset_origin> b);
}
template <DimensionIndex Rank, ArrayOriginKind OriginKind, ContainerKind CKind>
class StridedLayout
: public internal_strided_layout::LayoutStorageSelector<Rank, OriginKind,
CKind>::Storage {
private:
static_assert(IsValidInlineRank(Rank));
using Selector =
internal_strided_layout::LayoutStorageSelector<Rank, OriginKind, CKind>;
using Storage = typename Selector::Storage;
using Access = typename Selector::Access;
public:
constexpr static ArrayOriginKind array_origin_kind = OriginKind;
constexpr static ContainerKind container_kind = CKind;
static_assert(CKind == container || Rank >= dynamic_rank);
constexpr static DimensionIndex static_rank =
RankConstraint::FromInlineRank(Rank);
template <DimensionIndex R, ArrayOriginKind O = OriginKind>
using Rebind = StridedLayout<R, O, CKind>;
using RankType = StaticOrDynamicRank<RankConstraint::FromInlineRank(Rank)>;
using MaybeConstIndex = typename Access::MaybeConstIndex;
using MaybeConstOriginIndex = typename Access::MaybeConstOriginIndex;
RankType rank() const { return Access::GetExtent(*this); }
StridedLayout() noexcept {
if (container_kind == view) {
const Index* zero_vec = GetConstantVector<Index, 0>(RankType{}).data();
Access::Assign(this, RankType{}, zero_vec, zero_vec);
}
}
template <ContainerKind SfinaeC = CKind,
typename = std::enable_if_t<SfinaeC == container>>
explicit StridedLayout(RankType rank) {
set_rank(rank);
}
explicit StridedLayout(
span<const Index, RankConstraint::FromInlineRank(Rank)> shape,
span<const Index, RankConstraint::FromInlineRank(Rank)> byte_strides) {
assert(shape.size() == byte_strides.size());
Access::Assign(this, GetStaticOrDynamicExtent(shape), shape.data(),
byte_strides.data());
}
template <size_t N, typename = std::enable_if_t<
RankConstraint::Implies(N, static_rank)>>
explicit StridedLayout(const Index (&shape)[N],
const Index (&byte_strides)[N]) {
Access::Assign(this, StaticRank<N>{}, shape, byte_strides);
}
template <ArrayOriginKind SfinaeOKind = array_origin_kind,
typename = std::enable_if_t<SfinaeOKind == offset_origin>>
explicit StridedLayout(
span<const Index, RankConstraint::FromInlineRank(Rank)> origin,
span<const Index, RankConstraint::FromInlineRank(Rank)> shape,
span<const Index, RankConstraint::FromInlineRank(Rank)> byte_strides) {
assert(origin.size() == shape.size());
assert(origin.size() == byte_strides.size());
Access::Assign(this, GetStaticOrDynamicExtent(origin), origin.data(),
shape.data(), byte_strides.data());
}
template <
size_t N, ArrayOriginKind SfinaeOKind = OriginKind,
typename = std::enable_if_t<SfinaeOKind == offset_origin &&
RankConstraint::Implies(N, static_rank)>>
explicit StridedLayout(const Index (&origin)[N], const Index (&shape)[N],
const Index (&byte_strides)[N]) {
Access::Assign(this, StaticRank<N>{}, origin, shape, byte_strides);
}
template <ArrayOriginKind SfinaeOKind = array_origin_kind,
typename = std::enable_if_t<SfinaeOKind == offset_origin>>
explicit StridedLayout(
BoxView<RankConstraint::FromInlineRank(Rank)> domain,
span<const Index, RankConstraint::FromInlineRank(Rank)> byte_strides) {
assert(domain.rank() == byte_strides.size());
Access::Assign(this, domain.rank(), domain.origin().data(),
domain.shape().data(), byte_strides.data());
}
template <
DimensionIndex R, ArrayOriginKind O, ContainerKind C,
ContainerKind SfinaeC = CKind,
typename = std::enable_if_t<
(ExplicitRequires(SfinaeC == container && C != container && R != 0) &&
RankConstraint::Implies(RankConstraint::FromInlineRank(R),
static_rank) &&
IsArrayOriginKindConvertible(O, OriginKind))>>
explicit StridedLayout(const StridedLayout<R, O, C>& source) {
Access::AssignFrom(this, source);
}
template <DimensionIndex R, ArrayOriginKind O, ContainerKind C,
typename = std::enable_if_t<
((CKind == view || C == container || R == 0) &&
RankConstraint::Implies(RankConstraint::FromInlineRank(R),
static_rank) &&
(R == 0 || IsArrayOriginKindConvertible(O, OriginKind)))>>
StridedLayout(const StridedLayout<R, O, C>& source) {
Access::AssignFrom(this, source);
}
template <DimensionIndex R, ArrayOriginKind O, ContainerKind C,
typename = std::enable_if_t<
(RankConstraint::EqualOrUnspecified(
RankConstraint::FromInlineRank(R), static_rank) &&
(R == 0 || IsArrayOriginKindConvertible(O, OriginKind)))>>
explicit StridedLayout(unchecked_t, const StridedLayout<R, O, C>& source) {
assert(RankConstraint::EqualOrUnspecified(source.rank(), static_rank));
Access::AssignFrom(this, source);
}
explicit StridedLayout(unchecked_t, StridedLayout&& source)
: StridedLayout(std::move(source)) {}
explicit StridedLayout(RankType rank, const Index* shape,
const Index* byte_strides) {
Access::Assign(this, rank, shape, byte_strides);
}
template <ArrayOriginKind OKind = array_origin_kind,
typename = std::enable_if_t<OKind == offset_origin>>
explicit StridedLayout(RankType rank, const Index* origin, const Index* shape,
const Index* byte_strides) {
Access::Assign(this, rank, origin, shape, byte_strides);
}
template <ArrayOriginKind SfinaeOKind = array_origin_kind,
typename = std::enable_if_t<(SfinaeOKind == offset_origin &&
container_kind == container)>>
explicit StridedLayout(ContiguousLayoutOrder order, Index element_stride,
BoxView<RankConstraint::FromInlineRank(Rank)> domain) {
InitializeContiguousLayout(order, element_stride, domain, this);
}
template <ContainerKind SfinaeC = container_kind,
typename = std::enable_if_t<(SfinaeC == container)>>
explicit StridedLayout(
ContiguousLayoutOrder order, Index element_stride,
span<const Index, RankConstraint::FromInlineRank(Rank)> shape) {
InitializeContiguousLayout(order, element_stride, shape, this);
}
template <
DimensionIndex R, ContainerKind SfinaeC = CKind,
typename = std::enable_if_t<(SfinaeC == container &&
RankConstraint::Implies(R, static_rank))>>
explicit StridedLayout(ContiguousLayoutOrder order, Index element_stride,
const Index (&shape)[R]) {
InitializeContiguousLayout(order, element_stride, span(shape), this);
}
template <DimensionIndex R, ArrayOriginKind O, ContainerKind C>
std::enable_if_t<(RankConstraint::Implies(RankConstraint::FromInlineRank(R),
static_rank) &&
(R == 0 || IsArrayOriginKindConvertible(O, OriginKind))),
StridedLayout&>
operator=(const StridedLayout<R, O, C>& other) {
Access::AssignFrom(this, other);
return *this;
}
template <int&... ExplicitArgumentBarrier, ContainerKind SfinaeC = CKind>
std::enable_if_t<SfinaeC == container> set_rank(RankType rank) {
Access::Resize(this, rank);
}
span<const Index, RankConstraint::FromInlineRank(Rank)> origin() const {
return const_cast<StridedLayout*>(this)->origin();
}
span<MaybeConstOriginIndex, RankConstraint::FromInlineRank(Rank)> origin() {
return Access::origin(this);
}
span<const Index, RankConstraint::FromInlineRank(Rank)> byte_strides() const {
return const_cast<StridedLayout*>(this)->byte_strides();
}
span<MaybeConstIndex, RankConstraint::FromInlineRank(Rank)> byte_strides() {
return Access::byte_strides(this);
}
span<const Index, RankConstraint::FromInlineRank(Rank)> shape() const {
return const_cast<StridedLayout*>(this)->shape();
}
span<MaybeConstIndex, RankConstraint::FromInlineRank(Rank)> shape() {
return Access::shape(this);
}
Index origin_byte_offset() const {
return array_origin_kind == zero_origin
? 0
: IndexInnerProduct(this->rank(), this->origin().data(),
this->byte_strides().data());
}
template <typename Indices>
std::enable_if_t<IsCompatiblePartialIndexVector<static_rank, Indices>, Index>
operator[](const Indices& indices) const {
const auto indices_span = span(indices);
assert(indices_span.size() <= rank() &&
"Length of index vector is greater than rank of array");
assert(ContainsPartial(*this, indices_span) &&
"Array index out of bounds.");
return IndexInnerProduct(indices_span.size(), byte_strides().data(),
indices_span.data());
}
template <typename IndexType, size_t N>
std::enable_if_t<
IsCompatiblePartialIndexVector<static_rank, const IndexType (&)[N]>,
Index>
operator[](const IndexType (&indices)[N]) const {
return (*this)[span<const IndexType, N>(indices)];
}
template <typename Indices>
std::enable_if_t<IsCompatibleFullIndexVector<static_rank, Indices>, Index>
operator()(const Indices& indices) const {
const auto indices_span = span(indices);
assert(indices_span.size() == rank() &&
"Length of index vector must match rank of array.");
return (*this)[indices_span];
}
template <size_t N>
std::enable_if_t<RankConstraint::EqualOrUnspecified(static_rank, N), Index>
operator()(const Index (&indices)[N]) const {
return (*this)(span<const Index, N>(indices));
}
template <typename... IndexType>
std::enable_if_t<IsCompatibleFullIndexPack<static_rank, IndexType...>, Index>
operator()(IndexType... index) const {
constexpr size_t N = sizeof...(IndexType);
if constexpr (N == 0) {
assert(rank() == 0);
return 0;
} else {
const Index indices[N] = {index...};
return (*this)(span<const Index, N>(indices));
}
}
Index num_elements() const { return ProductOfExtents(this->shape()); }
BoxView<RankConstraint::FromInlineRank(Rank)> domain() const {
return BoxView<static_rank>(this->origin(), this->shape());
}
friend std::ostream& operator<<(std::ostream& os,
const StridedLayout& layout) {
internal_strided_layout::PrintToOstream(os, layout);
return os;
}
template <DimensionIndex R, ArrayOriginKind O, ContainerKind C>
friend bool operator==(const StridedLayout& a,
const StridedLayout<R, O, C>& b) {
return internal_strided_layout::StridedLayoutsEqual(a, b);
}
template <DimensionIndex R, ArrayOriginKind O, ContainerKind C>
friend bool operator!=(const StridedLayout& a,
const StridedLayout<R, O, C>& b) {
return !internal_strided_layout::StridedLayoutsEqual(a, b);
}
};
template <DimensionIndex Rank>
explicit StridedLayout(const Index (&shape)[Rank],
const Index (&byte_strides)[Rank])
-> StridedLayout<Rank>;
template <DimensionIndex Rank>
explicit StridedLayout(const Index (&origin)[Rank], const Index (&shape)[Rank],
const Index (&byte_strides)[Rank])
-> StridedLayout<Rank, offset_origin>;
template <typename Shape, typename ByteStrides,
std::enable_if_t<(IsIndexConvertibleVector<Shape> &&
IsIndexConvertibleVector<ByteStrides>)>* = nullptr>
explicit StridedL | #include "tensorstore/strided_layout.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/index.h"
#include "tensorstore/internal/type_traits.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
#ifdef NDEBUG
#define TENSORSTORE_EXPECT_DEATH_DEBUG_ONLY(stmt, pattern)
#else
#define TENSORSTORE_EXPECT_DEATH_DEBUG_ONLY(stmt, pattern) \
EXPECT_DEATH(stmt, pattern)
#endif
namespace {
using ::tensorstore::Box;
using ::tensorstore::BoxView;
using ::tensorstore::ContiguousLayoutOrder;
using ::tensorstore::DimensionIndex;
using ::tensorstore::dynamic_rank;
using ::tensorstore::GetSubLayoutView;
using ::tensorstore::Index;
using ::tensorstore::IndexInnerProduct;
using ::tensorstore::IsStridedLayout;
using ::tensorstore::MatchesStatus;
using ::tensorstore::offset_origin;
using ::tensorstore::span;
using ::tensorstore::StaticCast;
using ::tensorstore::StaticRankCast;
using ::tensorstore::StrCat;
using ::tensorstore::StridedLayout;
using ::tensorstore::StridedLayoutView;
using ::tensorstore::unchecked;
using ::tensorstore::zero_origin;
using ::tensorstore::internal::remove_cvref_t;
static_assert(!IsStridedLayout<int>);
static_assert(IsStridedLayout<StridedLayout<>>);
static_assert(IsStridedLayout<StridedLayout<2, offset_origin>>);
static_assert(IsStridedLayout<StridedLayoutView<>>);
static_assert(IsStridedLayout<StridedLayoutView<2, offset_origin>>);
namespace dynamic_layout_cast_tests {
template <typename T>
constexpr inline bool NoOpCheck =
std::is_same_v<T, decltype(StaticCast<remove_cvref_t<T>, unchecked>(
std::declval<T>()))>;
static_assert(NoOpCheck<const StridedLayout<2>&>);
static_assert(NoOpCheck<StridedLayout<2>&>);
static_assert(NoOpCheck<StridedLayout<2>&&>);
static_assert(NoOpCheck<const StridedLayout<2, offset_origin>&>);
static_assert(NoOpCheck<StridedLayout<2, offset_origin>&>);
static_assert(NoOpCheck<StridedLayout<2, offset_origin>&&>);
}
namespace dynamic_rank_cast_tests {
template <typename T>
constexpr inline bool NoOpCheck =
std::is_same_v<T, decltype(StaticRankCast<remove_cvref_t<T>::static_rank,
unchecked>(std::declval<T>()))>;
static_assert(NoOpCheck<const StridedLayout<2>&>);
static_assert(NoOpCheck<StridedLayout<2>&>);
static_assert(NoOpCheck<StridedLayout<2>&&>);
static_assert(NoOpCheck<const StridedLayout<2, offset_origin>&>);
static_assert(NoOpCheck<StridedLayout<2, offset_origin>&>);
static_assert(NoOpCheck<StridedLayout<2, offset_origin>&&>);
}
static_assert(std::is_empty_v<StridedLayout<0>>);
static_assert(std::is_empty_v<StridedLayoutView<0>>);
static_assert(sizeof(Index) * 2 == sizeof(StridedLayout<1>));
static_assert(sizeof(Index) * 4 == sizeof(StridedLayout<2>));
static_assert(sizeof(Index*) * 2 == sizeof(StridedLayout<>));
static_assert(sizeof(Index*) * 3 == sizeof(StridedLayoutView<>));
static_assert(sizeof(Index*) * 2 == sizeof(StridedLayoutView<2>));
static_assert(sizeof(Index*) * 3 ==
sizeof(StridedLayoutView<2, offset_origin>));
TEST(IndexInnerProductTest, Basic) {
const Index a[] = {1, 2, 3};
const Index b[] = {4, 5, 6};
EXPECT_EQ(1 * 4 + 2 * 5 + 3 * 6, IndexInnerProduct(3, a, b));
}
TEST(IndexInnerProductTest, WrapOnOverflowMultiply) {
const Index a[] = {Index(1) << 62, 2, 3};
const Index b[] = {4, 5, 6};
EXPECT_EQ(2 * 5 + 3 * 6, IndexInnerProduct(3, a, b));
}
TEST(IndexInnerProductTest, WrapOnOverflowAdd) {
const Index a[] = {Index(1) << 62, Index(1) << 62};
const Index b[] = {2, 2};
EXPECT_EQ(0, IndexInnerProduct(2, a, b));
}
TEST(IndexInnerProductTest, Span) {
const Index a[] = {1, 2, 3};
const Index b[] = {4, 5, 6};
EXPECT_EQ(1 * 4 + 2 * 5 + 3 * 6, IndexInnerProduct(span(a), span(b)));
}
namespace conversion_tests {
using ::tensorstore::internal::IsOnlyExplicitlyConvertible;
static_assert(IsOnlyExplicitlyConvertible<
StridedLayoutView<dynamic_rank, offset_origin>,
StridedLayout<dynamic_rank, offset_origin>>);
static_assert(IsOnlyExplicitlyConvertible<
StridedLayoutView<2, offset_origin>,
StridedLayout<dynamic_rank, offset_origin>>);
static_assert(
IsOnlyExplicitlyConvertible<
StridedLayoutView<2, offset_origin>, StridedLayout<2, offset_origin>>);
static_assert(IsOnlyExplicitlyConvertible<
StridedLayoutView<dynamic_rank, zero_origin>,
StridedLayout<dynamic_rank, offset_origin>>);
static_assert(
IsOnlyExplicitlyConvertible<
StridedLayoutView<2, zero_origin>, StridedLayout<2, offset_origin>>);
static_assert(IsOnlyExplicitlyConvertible<
StridedLayoutView<dynamic_rank, zero_origin>,
StridedLayout<dynamic_rank, zero_origin>>);
static_assert(IsOnlyExplicitlyConvertible<
StridedLayoutView<2, zero_origin>,
StridedLayout<dynamic_rank, zero_origin>>);
static_assert(!std::is_constructible_v<
StridedLayout<dynamic_rank, zero_origin>,
StridedLayoutView<dynamic_rank, offset_origin>>);
static_assert(!std::is_constructible_v<
StridedLayout<2, zero_origin>,
StridedLayoutView<dynamic_rank, zero_origin>>);
static_assert(!std::is_constructible_v<
StridedLayout<2, zero_origin>,
StridedLayoutView<3, zero_origin>>);
static_assert(std::is_convertible_v<
StridedLayoutView<0, offset_origin>,
StridedLayout<dynamic_rank, zero_origin>>);
static_assert(
std::is_convertible_v<
StridedLayoutView<0, offset_origin>, StridedLayout<0, zero_origin>>);
static_assert(std::is_convertible_v<
StridedLayout<2, zero_origin>,
StridedLayoutView<2, zero_origin>>);
static_assert(std::is_convertible_v<
StridedLayout<2, zero_origin>,
StridedLayoutView<dynamic_rank, zero_origin>>);
static_assert(std::is_convertible_v<
StridedLayout<2, zero_origin>,
StridedLayoutView<2, offset_origin>>);
static_assert(std::is_convertible_v<
StridedLayout<2, zero_origin>,
StridedLayoutView<dynamic_rank, offset_origin>>);
static_assert(std::is_convertible_v<
StridedLayout<2, offset_origin>,
StridedLayoutView<dynamic_rank, offset_origin>>);
static_assert(std::is_convertible_v<
StridedLayout<dynamic_rank, offset_origin>,
StridedLayoutView<dynamic_rank, offset_origin>>);
static_assert(std::is_convertible_v<
StridedLayout<2, zero_origin>,
StridedLayout<2, offset_origin>>);
static_assert(std::is_convertible_v<
StridedLayout<2, zero_origin>,
StridedLayout<dynamic_rank, zero_origin>>);
static_assert(std::is_convertible_v<
StridedLayout<2, zero_origin>,
StridedLayout<2, offset_origin>>);
static_assert(std::is_convertible_v<
StridedLayout<2, zero_origin>,
StridedLayout<dynamic_rank, offset_origin>>);
static_assert(std::is_convertible_v<
StridedLayout<2, offset_origin>,
StridedLayout<dynamic_rank, offset_origin>>);
static_assert(std::is_convertible_v<
StridedLayout<0, offset_origin>,
StridedLayout<dynamic_rank, zero_origin>>);
static_assert(std::is_convertible_v<
StridedLayout<0, offset_origin>,
StridedLayout<0, zero_origin>>);
}
TEST(StridedLayoutTest, DynamicRank0) {
StridedLayout<> layout;
EXPECT_EQ(0, layout.rank());
EXPECT_EQ(1, layout.num_elements());
EXPECT_TRUE(layout.shape().empty());
EXPECT_TRUE(layout.byte_strides().empty());
EXPECT_EQ(0, layout());
}
TEST(StridedLayoutDeathTest, DynamicRank0) {
StridedLayout<> layout;
TENSORSTORE_EXPECT_DEATH_DEBUG_ONLY(
layout[{1}], "Length of index vector is greater than rank of array");
TENSORSTORE_EXPECT_DEATH_DEBUG_ONLY(
layout({1}), "Length of index vector must match rank of array.");
TENSORSTORE_EXPECT_DEATH_DEBUG_ONLY(
layout(1), "Length of index vector must match rank of array.");
}
TEST(StridedLayoutTest, DynamicRankCopyAndMove) {
StridedLayout<> layout;
layout.set_rank(3);
EXPECT_EQ(3, layout.rank());
layout.shape()[0] = 7;
layout.shape()[1] = 8;
layout.shape()[2] = 9;
layout.byte_strides()[0] = 4;
layout.byte_strides()[1] = 5;
layout.byte_strides()[2] = 6;
EXPECT_EQ(7 * 8 * 9, layout.num_elements());
EXPECT_EQ(8 + 5, (layout[{2, 1}]));
EXPECT_EQ(8 + 5 + 6, (layout[{2, 1, 1}]));
EXPECT_EQ(8 + 5 + 6, (layout({2, 1, 1})));
EXPECT_EQ(8 + 5 + 6, layout(span({2, 1, 1})));
EXPECT_EQ(8 + 5 + 6, layout(2, 1, 1));
auto layout2 = layout;
EXPECT_EQ(3, layout2.rank());
EXPECT_THAT(layout2.shape(), ::testing::ElementsAreArray({7, 8, 9}));
EXPECT_THAT(layout2.byte_strides(), ::testing::ElementsAreArray({4, 5, 6}));
EXPECT_TRUE(layout == layout2);
EXPECT_FALSE(layout != layout2);
layout.shape()[0] = 1;
EXPECT_FALSE(layout == layout2);
EXPECT_TRUE(layout != layout2);
const auto* shape = layout2.shape().data();
const auto* byte_strides = layout2.byte_strides().data();
auto layout3 = std::move(layout2);
EXPECT_EQ(0, layout2.rank());
EXPECT_EQ(3, layout3.rank());
EXPECT_EQ(shape, layout3.shape().data());
EXPECT_EQ(byte_strides, layout3.byte_strides().data());
StridedLayout<> layout4 = layout;
layout4 = std::move(layout3);
EXPECT_EQ(3, layout4.rank());
EXPECT_EQ(shape, layout4.shape().data());
EXPECT_EQ(byte_strides, layout4.byte_strides().data());
}
TEST(StridedLayoutTest, ConstructDynamicFromShapeAndByteStrides) {
const Index shape_arr[] = {1, 2};
const Index byte_strides_arr[] = {3, 4};
span<const Index> shape(shape_arr);
span<const Index> byte_strides(byte_strides_arr);
StridedLayout<> layout5(shape, byte_strides);
EXPECT_EQ(2, layout5.rank());
EXPECT_THAT(layout5.shape(), ::testing::ElementsAreArray({1, 2}));
EXPECT_THAT(layout5.byte_strides(), ::testing::ElementsAreArray({3, 4}));
}
TEST(StridedLayoutDeathTest, ConstructDynamicFromShapeAndByteStrides) {
const Index shape_arr[] = {1, 2};
const Index byte_strides_arr[] = {3};
span<const Index> shape(shape_arr);
span<const Index> byte_strides(byte_strides_arr);
TENSORSTORE_EXPECT_DEATH_DEBUG_ONLY((StridedLayout<>(shape, byte_strides)),
"shape");
}
TEST(StridedLayoutTest, ConstructDynamicFromStridedLayoutView) {
const Index shape_arr[] = {1, 2};
const Index byte_strides_arr[] = {3, 4};
StridedLayoutView<> layout_ref(shape_arr, byte_strides_arr);
StridedLayout<> layout(layout_ref);
EXPECT_EQ(2, layout.rank());
EXPECT_THAT(layout.shape(), ::testing::ElementsAreArray({1, 2}));
EXPECT_THAT(layout.byte_strides(), ::testing::ElementsAreArray({3, 4}));
EXPECT_NE(layout_ref.shape().data(), layout.shape().data());
EXPECT_NE(layout_ref.byte_strides().data(), layout.byte_strides().data());
}
TEST(StridedLayoutTest, ConstructDynamicFromStatic) {
StridedLayout<2> layout_s({1, 2}, {3, 4});
StridedLayout<> layout_d(layout_s);
EXPECT_EQ(2, layout_d.rank());
EXPECT_THAT(layout_d.shape(), ::testing::ElementsAreArray({1, 2}));
EXPECT_THAT(layout_d.byte_strides(), ::testing::ElementsAreArray({3, 4}));
}
TEST(StridedLayoutTest, AssignDynamicFromDynamic) {
StridedLayout<> layout1({1, 2}, {3, 4});
StridedLayout<> layout2;
layout2 = layout1;
EXPECT_EQ(2, layout2.rank());
EXPECT_THAT(layout2.shape(), ::testing::ElementsAreArray({1, 2}));
EXPECT_THAT(layout2.byte_strides(), ::testing::ElementsAreArray({3, 4}));
}
TEST(StridedLayoutTest, AssignDynamicFromDynamicRef) {
StridedLayout<> layout1({1, 2}, {3, 4});
StridedLayoutView<> layout_ref = layout1;
StridedLayout<> layout2;
layout2 = layout_ref;
EXPECT_EQ(2, layout2.rank());
EXPECT_THAT(layout2.shape(), ::testing::ElementsAreArray({1, 2}));
EXPECT_THAT(layout2.byte_strides(), ::testing::ElementsAreArray({3, 4}));
}
TEST(StridedLayoutTest, AssignDynamicFromStatic) {
StridedLayout<2> layout_s({1, 2}, {3, 4});
StridedLayout<> layout_d;
layout_d = layout_s;
EXPECT_EQ(2, layout_d.rank());
EXPECT_THAT(layout_d.shape(), ::testing::ElementsAreArray({1, 2}));
EXPECT_THAT(layout_d.byte_strides(), ::testing::ElementsAreArray({3, 4}));
}
TEST(StridedLayoutDeathTest, DynamicRankIndexing) {
StridedLayout<> layout(3);
layout.shape()[0] = 7;
layout.shape()[1] = 8;
layout.shape()[2] = 9;
layout.byte_strides()[0] = 4;
layout.byte_strides()[1] = 5;
layout.byte_strides()[2] = 6;
EXPECT_EQ(4 * 6, (layout[{6}]));
TENSORSTORE_EXPECT_DEATH_DEBUG_ONLY((layout[{7}]),
"Array index out of bounds");
TENSORSTORE_EXPECT_DEATH_DEBUG_ONLY((layout[{-1}]),
"Array index out of bounds");
TENSORSTORE_EXPECT_DEATH_DEBUG_ONLY((layout[{1, 2, 10}]),
"Array index out of bounds");
TENSORSTORE_EXPECT_DEATH_DEBUG_ONLY(
(layout[{1, 2, 3, 4}]),
"Length of index vector is greater than rank of array");
TENSORSTORE_EXPECT_DEATH_DEBUG_ONLY(
layout({1, 2}), "Length of index vector must match rank of array");
TENSORSTORE_EXPECT_DEATH_DEBUG_ONLY(
(StridedLayout<>(span<const Index>({1}), span<const Index>({1, 2}))),
"shape");
}
TEST(StridedLayoutTest, StaticRank0) {
StridedLayout<0> layout;
EXPECT_EQ(1, layout.num_elements());
EXPECT_EQ(0, layout.rank());
EXPECT_TRUE(layout.shape().empty());
EXPECT_TRUE(layout.byte_strides().empty());
static_assert(!std::is_assignable_v<StridedLayout<0>, StridedLayout<>>);
static_assert(!std::is_assignable_v<StridedLayout<0>, StridedLayoutView<>>);
static_assert(!std::is_constructible_v<StridedLayout<0>, StridedLayout<1>>);
static_assert(
!std::is_constructible_v<StridedLayout<0>, StridedLayoutView<1>>);
StridedLayout<0> layout3(span<const Index, 0>{}, span<const Index, 0>{});
[[maybe_unused]] StridedLayout<0> layout2 = layout;
layout3 = layout;
StridedLayout<0> layout5{StridedLayoutView<0>{}};
EXPECT_EQ(0, layout());
EXPECT_EQ(0, (layout[std::array<int, 0>{}]));
EXPECT_EQ(0, (layout(std::array<int, 0>{})));
}
TEST(StridedLayoutTest, DefaultConstructStatic) {
StridedLayout<2> layout;
EXPECT_EQ(2, layout.rank());
}
TEST(StridedLayoutTest, ConstructStaticFromArrays) {
StridedLayout<2> layout({1, 2}, {3, 4});
EXPECT_THAT(layout.shape(), ::testing::ElementsAreArray({1, 2}));
EXPECT_THAT(layout.byte_strides(), ::testing::ElementsAreArray({3, 4}));
}
TEST(StridedLayoutTest, ConstructDynamicFromArrays) {
StridedLayout<> layout({1, 2}, {3, 4});
EXPECT_EQ(2, layout.rank());
EXPECT_THAT(layout.shape(), ::testing::ElementsAreArray({1, 2}));
EXPECT_THAT(layout.byte_strides(), ::testing::ElementsAreArray({3, 4}));
}
TEST(StridedLayoutTest, ConstructStaticFromDynamic) {
StridedLayout<> layout_d({1, 2}, {3, 4});
auto layout_s = StaticRankCast<2>(layout_d).value();
static_assert(std::is_same_v<decltype(layout_s), StridedLayout<2>>);
EXPECT_THAT(layout_s.shape(), ::testing::ElementsAreArray({1, 2}));
EXPECT_THAT(layout_s.byte_strides(), ::testing::ElementsAreArray({3, 4}));
static_assert(!std::is_constructible_v<StridedLayout<2>, StridedLayout<3>>);
static_assert(!std::is_assignable_v<StridedLayout<2>, StridedLayout<3>>);
StridedLayout<2> layout_s2(layout_s);
EXPECT_THAT(layout_s2.shape(), ::testing::ElementsAreArray({1, 2}));
EXPECT_THAT(layout_s2.byte_strides(), ::testing::ElementsAreArray({3, 4}));
static_assert(!std::is_constructible_v<StridedLayout<2>, StridedLayout<>>);
}
TEST(StridedLayoutTest, ConstructStaticFromDynamicStridedLayoutView) {
StridedLayout<> layout_d({1, 2}, {3, 4});
StridedLayoutView<> layout_ref = layout_d;
auto layout_s = StaticCast<StridedLayout<2>>(layout_ref).value();
static_assert(std::is_same_v<decltype(layout_s), StridedLayout<2>>);
EXPECT_THAT(layout_s.shape(), ::testing::ElementsAreArray({1, 2}));
EXPECT_THAT(layout_s.byte_strides(), ::testing::ElementsAreArray({3, 4}));
auto layout_ref2 = StaticCast<StridedLayoutView<2>>(layout_d).value();
StridedLayout<2> layout_s2(layout_ref2);
EXPECT_THAT(layout_s2.shape(), ::testing::ElementsAreArray({1, 2}));
EXPECT_THAT(layout_s2.byte_strides(), ::testing::ElementsAreArray({3, 4}));
static_assert(
!std::is_constructible_v<StridedLayout<2>, StridedLayoutView<3>>);
}
TEST(StridedLayoutTest, AssignStatic) {
StridedLayout<> layout_d({1, 2}, {3, 4});
static_assert(!std::is_assignable_v<StridedLayout<2>, StridedLayout<>>);
static_assert(!std::is_assignable_v<StridedLayout<2>, StridedLayoutView<>>);
{
StridedLayout<2> layout_s;
layout_s = StaticRankCast<2>(layout_d).value();
EXPECT_THAT(layout_s.shape(), ::testing::ElementsAreArray({1, 2}));
EXPECT_THAT(layout_s.byte_strides(), ::testing::ElementsAreArray({3, 4}));
}
{
StridedLayout<2> layout_s;
layout_s = StaticCast<StridedLayoutView<2>>(layout_d).value();
EXPECT_THAT(layout_s.shape(), ::testing::ElementsAreArray({1, 2}));
EXPECT_THAT(layout_s.byte_strides(), ::testing::ElementsAreArray({3, 4}));
}
}
TEST(StridedLayoutTest, StaticIndexing) {
StridedLayout<2> layout({3, 5}, {3, 4});
EXPECT_EQ(6 + 4, layout(2, 1));
}
TEST(StridedLayoutViewTest, StaticConstructDefault) {
StridedLayoutView<2> ref;
EXPECT_EQ(2, ref.rank());
EXPECT_EQ(0, ref.shape()[0]);
EXPECT_EQ(0, ref.shape()[1]);
EXPECT_EQ(0, ref.byte_strides()[0]);
EXPECT_EQ(0, ref.byte_strides()[1]);
}
TEST(StridedLayoutViewTest, StaticConstructFromSpans) {
const Index shape[] = {5, 3};
const Index byte_strides[] = {3, 4};
StridedLayoutView<2> ref(shape, byte_strides);
EXPECT_EQ(&shape[0], ref.shape().data());
EXPECT_EQ(&byte_strides[0], ref.byte_strides().data());
}
TEST(StridedLayoutViewTest, StaticConstructAndAssign) {
const Index shape[] = {5, 3};
const Index byte_strides[] = {3, 4};
StridedLayoutView<2> ref(shape, byte_strides);
{
StridedLayoutView<2> ref2 = ref;
EXPECT_EQ(&shape[0], ref2.shape().data());
EXPECT_EQ(&byte_strides[0], ref2.byte_strides().data());
}
{
StridedLayoutView<2> ref2 =
StaticRankCast<2>(StridedLayoutView<>{ref}).value();
EXPECT_EQ(&shape[0], ref2.shape().data());
EXPECT_EQ(&byte_strides[0], ref2.byte_strides().data());
}
static_assert(
!std::is_convertible_v<StridedLayoutView<>, StridedLayoutView<2>>);
static_assert(!std::is_convertible_v<StridedLayout<>, StridedLayoutView<2>>);
static_assert(
!std::is_constructible_v<StridedLayoutView<2>, StridedLayoutView<3>>);
static_assert(
!std::is_constructible_v<StridedLayoutView<2>, StridedLayoutView<>>);
static_assert(
!std::is_constructible_v<StridedLayoutView<2>, StridedLayout<>>);
static_assert(
!std::is_assignable_v<StridedLayoutView<2>, StridedLayoutView<>>);
static_assert(!std::is_assignable_v<StridedLayoutView<2>, StridedLayout<>>);
static_assert(!std::is_assignable_v<StridedLayoutView<2>, StridedLayout<3>>);
static_assert(
!std::is_assignable_v<StridedLayoutView<2>, StridedLayoutView<3>>);
{
StridedLayoutView<2> ref2;
ref2 = ref;
EXPECT_EQ(&shape[0], ref2.shape().data());
EXPECT_EQ(&byte_strides[0], ref2.byte_strides().data());
}
{
StridedLayout<2> layout(ref);
StridedLayoutView<2> ref2;
ref2 = layout;
EXPECT_EQ(layout.shape().data(), ref2.shape().data());
EXPECT_EQ(layout.byte_strides().data(), ref2.byte_strides().data());
}
StridedLayout<2> layout(std::integral_constant<DimensionIndex, 2>{});
}
TEST(StridedLayoutViewTest, CastError) {
const Index shape[] = {5, 3};
const Index byte_strides[] = {3, 4};
StridedLayoutView<> ref(shape, byte_strides);
EXPECT_THAT(StaticCast<StridedLayout<1>>(ref),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Cannot cast strided layout with rank of 2 to "
"strided layout with rank of 1"));
}
TEST(StridedLayoutViewTest, DynamicConsructAndAssign) {
const Index shape[] = {5, 3};
const Index byte_strides[] = {3, 4};
StridedLayoutView<2> ref(shape, byte_strides);
{
StridedLayoutView<> r;
EXPECT_EQ(0, r.rank());
EXPECT_TRUE(r.shape().empty());
EXPECT_TRUE(r.byte_strides().empty());
}
{
StridedLayoutView<> r(shape, byte_strides);
EXPECT_EQ(2, r.rank());
EXPECT_EQ(&shape[0], r.shape().data());
EXPECT_EQ(&byte_strides[0], r.byte_strides().data());
EXPECT_EQ(2, r.shape().size());
EXPECT_EQ(2, r.byte_strides().size());
{
StridedLayoutView<> r2 = r;
EXPECT_EQ(2, r2.rank());
EXPECT_EQ(&shape[0], r2.shape().data());
EXPECT_EQ(&byte_strides[0], r2.byte_strides().data());
}
{
StridedLayoutView<> r2;
r2 = r;
EXPECT_EQ(2, r2.rank());
EXPECT_EQ(&shape[0], r2.shape().data());
EXPECT_EQ(&byte_strides[0], r2.byte_strides().data());
}
}
{
StridedLayoutView<> r = ref;
EXPECT_EQ(2, r.rank());
EXPECT_EQ(&shape[0], r.shape().data());
EXPECT_EQ(&byte_strides[0], r.byte_strides().data());
}
{
StridedLayoutView<> r;
r = ref;
EXPECT_EQ(2, r.rank());
EXPECT_EQ(&shape[0], r.shape().data());
EXPECT_EQ(&byte_strides[0], r.byte_strides().data());
}
{
StridedLayout<> layout(ref);
{
StridedLayoutView<> r = layout;
EXPECT_EQ(2, r.rank());
EXPECT_EQ(layout.shape().data(), r.shape().data());
EXPECT_EQ(layout.byte_strides().data(), r.byte_strides().data());
}
{
StridedLayoutView<> r;
r = layout;
EXPECT_EQ(2, r.rank());
EXPECT_EQ(layout.shape().data(), r.shape().data());
EXPECT_EQ(layout.byte_strides().data(), r.byte_strides().data());
}
}
{
StridedLayout<2> layout(ref);
{
StridedLayoutView<> r = layout;
EXPECT_EQ(2, r.rank());
EXPECT_EQ(layout.shape().data(), r.shape().data());
EXPECT_EQ(layout.byte_strides().data(), r.byte_strides().data());
}
{
StridedLayoutView<> r;
r = layout;
EXPECT_EQ(2, r.rank());
EXPECT_EQ(layout.shape().data(), r.shape().data());
EXPECT_EQ(layout.byte_strides().data(), r.byte_strides().data());
}
}
}
TEST(StridedLayoutViewTest, Static0) {
{
StridedLayoutView<0> r;
EXPECT_EQ(0, r.rank());
EXPECT_EQ(nullptr, r.shape().data());
EXPECT_EQ(nullptr, r.byte_strides().data());
}
{
StridedLayoutView<0> r;
[[maybe_unused]] StridedLayoutView<0> r2 = r;
}
{ StridedLayoutView<0> r(span<const Index, 0>{}, span<const Index, 0>{}); }
{
StridedLayout<0> layout;
StridedLayoutView<0> r = layout;
r = layout;
}
}
TEST(StridedLayoutViewDeathTest, DynamicConstruct) {
[[maybe_unused]] const Index shape[] = {5, 3};
[[maybe_unused]] const Index byte_strides[] = {3};
TENSORSTORE_EXPECT_DEATH_DEBUG_ONLY(
(StridedLayoutView<>(shape, byte_strides)), "shape");
StridedLayout<> x;
x.set_rank(2);
EXPECT_THAT(StaticCast<StridedLayoutView<0>>(StridedLayoutView<>(x)),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(StaticCast<StridedLayoutView<0>>(x),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(StridedLayoutViewTest, Compare) {
StridedLayout<> r1(span<const Index>({1, 2}), span<const Index>({3, 4}));
StridedLayout<> r2(span<const Index>({1, 2}), span<const Index>({3, 4}));
StridedLayout<> r3(span<const Index>({1, 2, 3}),
span<const Index>({3, 4, 5}));
EXPECT_TRUE(r1 == r2);
EXPECT_FALSE(r1 != r2);
r1.shape()[0] = 2;
EXPECT_FALSE(r1 == r2);
EXPECT_TRUE(r1 != r2);
EXPECT_FALSE(r1 == StridedLayoutView<>{});
EXPECT_TRUE(r1 != StridedLayoutView<>{});
EXPECT_TRUE(StridedLayout<0>() == StridedLayoutView<0>());
EXPECT_FALSE(r3 == r2);
EXPECT_FALSE(r2 == r3);
EXPECT_TRUE(r2 != r3);
}
TEST(StridedLayoutViewTest, SubLayout) {
{
StridedLayout<3> r({1, 2, 3}, {3, 4, 5});
{
auto s = GetSubLayoutView<0>(r);
static_assert(std::is_same_v<decltype(s), StridedLayoutView<3>>);
EXPECT_EQ(r.rank(), s.rank());
EXPECT_EQ(r.shape().data(), s.shape().data());
EXPECT_EQ(r.byte_strides().data(), s.byte_strides().data());
}
{
auto s = GetSubLayoutView<1>(r);
static_assert(std::is_same_v<decltype(s), StridedLayoutView<2>>);
EXPECT_EQ(2, s.rank());
EXPECT_EQ(r.shape().data() + 1, s.shape().data());
EXPECT_EQ(r.byte_strides().data() + 1, s.byte_strides().data());
}
{
auto s = GetSubLayoutView(r, tensorstore::StaticRank<1>{});
static_assert(std::is_same_v<decltype(s), StridedLayoutView<2>>);
EXPECT_EQ(2, s.rank());
EXPECT_EQ(r.shape().data() + 1, s.shape().data());
EXPECT_EQ(r.byte_strides().data() + 1, s.byte_strides().data());
}
{
auto s = GetSubLayoutView<2>(r);
static_assert(std::is_same_v<decltype(s), StridedLayoutView<1>>);
EXPECT_EQ(1, s.rank());
EXPECT_EQ(r.shape().data() + 2, s.shape().data());
EXPECT_EQ(r.byte_strides().data() + 2, s.byte_strides().data());
}
{
auto s = GetSubLayoutView<3>(r);
static_assert(std::is_same_v<decltype(s), StridedLayoutView<0>>);
EXPECT_EQ(0, s.rank());
}
}
{
StridedLayout<3> r({1, 2, 3}, {3, 4, 5});
{
auto s = GetSubLayoutView(r, 0);
static_assert(std::is_same_v<decltype(s), StridedLayoutView<>>);
EXPECT_EQ(r.rank(), s.rank());
EXPECT_EQ(r.shape().data(), s.shape().data());
EXPECT_EQ(r.byte_strides().data(), s.byte_strides().data());
}
{
auto s = GetSubLayoutView(r, 1);
EXPECT_EQ(2, s.rank());
EXPECT_EQ(r.shape().data() + 1, s.shape().data());
EXPECT_EQ(r.byte_strides().data() + 1, s.byte_strides().data());
}
{
auto s = GetSubLayoutView(r, 2);
EXPECT_EQ(1, s.rank());
EXPECT_EQ(r.shape().data() + 2, s.shape().data());
EXPECT_EQ(r.byte_strides().data() + 2, s.byte_strides().data());
}
{
auto s = GetSubLayoutView(r, 3);
EXPECT_EQ(0, s.rank());
}
}
{
StridedLayout<> r({1, 2, 3}, {3, 4, 5});
{
auto s = GetSubLayoutView<0>(r);
static_assert(std::is_same_v<decltype(s), StridedLayoutView<>>);
EXPECT_EQ(r.rank(), s.rank());
EXPECT_EQ(r.shape().data(), s.shape().data());
EXPECT_EQ(r.byte_strides().data(), s.byte_strides().data());
}
{
auto s = GetSubLayoutView<1>(r);
static_assert(std::is_same_v<decltype(s), StridedLayoutView<>>);
EXPECT_EQ(2, s.rank());
EXPECT_EQ(r.shape().data() + 1, s.shape().data());
EXPECT_EQ(r.byte_strides().data() + 1, s.byte_strides().data());
}
{
auto s = GetSubLayoutView<2>(r);
static_assert(std::is_same_v<decltype(s), StridedLayoutView<>>);
EXPECT_EQ(1, s.rank());
EXPECT_EQ(r.shape().data() + 2, s.shape().data());
EXPECT_EQ(r.byte_strides().data() + 2, s.byte_strides().data());
}
{
auto s = GetSubLayoutView<3>(r);
static_assert(std::is_same_v<decltype(s), StridedLayoutView<>>);
EXPECT_EQ(0, s.rank());
}
}
{
StridedLayout<> r({1, 2, 3}, {3, 4, 5});
{
auto s = GetSubLayoutView(r, 0);
static_assert(std::is_same_v<decltype(s), StridedLayoutView<>>);
EXPECT_EQ(r.rank(), s.rank());
EXPECT_EQ(r.shape().data(), s.shape().data());
EXPECT_EQ(r.byte_strides().data(), s.byte_strides().data());
}
{
auto s = GetSubLayoutView(r, 1);
EXPECT_EQ(2, s.rank());
EXPECT_EQ(r.shape().data() + 1, s.shape().data());
EXPECT_EQ(r.byte_strides().data() + 1, s.byte_strides().data());
}
{
auto s = GetSubLayoutView(r, 2);
EXPECT_EQ(1, s.rank());
EXPECT_EQ(r.shape().data() + 2, s.shape().data());
EXPECT_EQ(r.byte_strides().data() + 2, s.byte_strides().data());
}
{
auto s = GetSubLayoutView(r, 3);
EXPECT_EQ(0, s.rank());
}
}
}
TEST(StridedLayoutViewDeathTest, SubLayout) {
StridedLayout<> r({1, 2, 3}, {3, 4, 5});
TENSORSTORE_EXPECT_DEATH_DEBUG_ONLY(GetSubLayoutView(r, -1), "sub_rank");
TENSORSTORE_EXPECT_DEATH_DEBUG_ONLY(GetSubLayoutView(r, 4), "sub_rank");
TENSORSTORE_EXPECT_DEATH_DEBUG_ONLY(GetSubLayoutView<4>(r), "sub_rank");
}
TEST(StridedLayoutTest, COrderStatic) {
auto layout = StridedLayout(ContiguousLayoutOrder::c, 2,
span<const Index, 3>({3, 4, 5}));
static_assert(std::is_same_v<decltype(layout), StridedLayout<3>>);
EXPECT_EQ(StridedLayout<3>({3, 4, 5}, {4 * 5 * 2, 5 * 2, 2}), layout);
StridedLayout<3, offset_origin> layout_offset_origin(ContiguousLayoutOrder::c,
2, {3, 4, 5}) |
537 | cpp | google/tensorstore | resize_options | tensorstore/resize_options.cc | tensorstore/resize_options_test.cc | #ifndef TENSORSTORE_RESIZE_OPTIONS_H_
#define TENSORSTORE_RESIZE_OPTIONS_H_
#include <iosfwd>
#include <type_traits>
#include "absl/meta/type_traits.h"
#include "tensorstore/batch.h"
namespace tensorstore {
enum class ResolveBoundsMode {
fix_resizable_bounds = 1,
};
constexpr ResolveBoundsMode fix_resizable_bounds =
ResolveBoundsMode::fix_resizable_bounds;
constexpr inline ResolveBoundsMode operator&(ResolveBoundsMode a,
ResolveBoundsMode b) {
return static_cast<ResolveBoundsMode>(static_cast<int>(a) &
static_cast<int>(b));
}
constexpr inline ResolveBoundsMode operator|(ResolveBoundsMode a,
ResolveBoundsMode b) {
return static_cast<ResolveBoundsMode>(static_cast<int>(a) |
static_cast<int>(b));
}
constexpr inline bool operator!(ResolveBoundsMode a) {
return !static_cast<int>(a);
}
std::ostream& operator<<(std::ostream& os, ResolveBoundsMode mode);
struct ResolveBoundsOptions {
template <typename T>
constexpr static inline bool IsOption = false;
template <typename... T, typename = std::enable_if_t<
(IsOption<absl::remove_cvref_t<T>> && ...)>>
ResolveBoundsOptions(T&&... option) {
(Set(std::forward<T>(option)), ...);
}
void Set(ResolveBoundsMode value) { this->mode = value; }
void Set(Batch value) { this->batch = std::move(value); }
ResolveBoundsMode mode = ResolveBoundsMode{};
Batch batch{no_batch};
};
template <>
constexpr inline bool ResolveBoundsOptions::IsOption<ResolveBoundsMode> = true;
template <>
constexpr inline bool ResolveBoundsOptions::IsOption<Batch> = true;
template <>
constexpr inline bool ResolveBoundsOptions::IsOption<Batch::View> = true;
enum class ResizeMode {
resize_metadata_only = 1,
resize_tied_bounds = 2,
expand_only = 4,
shrink_only = 8,
};
constexpr ResizeMode resize_metadata_only = ResizeMode::resize_metadata_only;
constexpr ResizeMode resize_tied_bounds = ResizeMode::resize_tied_bounds;
constexpr ResizeMode expand_only = ResizeMode::expand_only;
constexpr ResizeMode shrink_only = ResizeMode::shrink_only;
constexpr inline ResizeMode operator&(ResizeMode a, ResizeMode b) {
return static_cast<ResizeMode>(static_cast<int>(a) & static_cast<int>(b));
}
constexpr inline ResizeMode operator|(ResizeMode a, ResizeMode b) {
return static_cast<ResizeMode>(static_cast<int>(a) | static_cast<int>(b));
}
constexpr inline bool operator!(ResizeMode a) { return !static_cast<int>(a); }
std::ostream& operator<<(std::ostream& os, ResizeMode mode);
struct ResizeOptions {
template <typename T>
constexpr static inline bool IsOption = false;
template <typename... T, typename = std::enable_if_t<
(IsOption<absl::remove_cvref_t<T>> && ...)>>
ResizeOptions(T&&... option) {
(Set(std::forward<T>(option)), ...);
}
void Set(ResizeMode value) { this->mode = value; }
ResizeMode mode = ResizeMode{};
};
template <>
constexpr inline bool ResizeOptions::IsOption<ResizeMode> = true;
}
#endif
#include "tensorstore/resize_options.h"
#include <stddef.h>
#include <ostream>
#include "absl/base/macros.h"
namespace tensorstore {
std::ostream& operator<<(std::ostream& os, ResolveBoundsMode mode) {
constexpr const char* kModeNames[] = {
"fix_resizable_bounds",
};
const char* sep = "";
constexpr const char* kSep = "|";
for (size_t i = 0; i < ABSL_ARRAYSIZE(kModeNames); ++i) {
if (static_cast<int>(mode) & (1 << i)) {
os << sep << kModeNames[i];
sep = kSep;
}
}
return os;
}
std::ostream& operator<<(std::ostream& os, ResizeMode mode) {
constexpr const char* kModeNames[] = {
"resize_metadata_only",
"resize_tied_bounds",
"expand_only",
"shrink_only",
};
const char* sep = "";
constexpr const char* kSep = "|";
for (size_t i = 0; i < ABSL_ARRAYSIZE(kModeNames); ++i) {
if (static_cast<int>(mode) & (1 << i)) {
os << sep << kModeNames[i];
sep = kSep;
}
}
return os;
}
} | #include "tensorstore/resize_options.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::ResizeMode;
using ::tensorstore::ResolveBoundsMode;
using ::tensorstore::StrCat;
TEST(ResolveBoundsModeTest, PrintToOstream) {
EXPECT_EQ("fix_resizable_bounds",
StrCat(ResolveBoundsMode::fix_resizable_bounds));
EXPECT_EQ("", StrCat(ResolveBoundsMode{}));
}
TEST(ResolveBoundsModeTest, BitwiseOr) {
EXPECT_EQ(ResolveBoundsMode::fix_resizable_bounds,
ResolveBoundsMode::fix_resizable_bounds | ResolveBoundsMode{});
EXPECT_EQ(ResolveBoundsMode::fix_resizable_bounds,
ResolveBoundsMode{} | ResolveBoundsMode::fix_resizable_bounds);
}
TEST(ResizeModeTest, PrintToOstream) {
EXPECT_EQ(
"resize_metadata_only|resize_tied_bounds|expand_only|shrink_only",
StrCat(ResizeMode::resize_metadata_only | ResizeMode::resize_tied_bounds |
ResizeMode::expand_only | ResizeMode::shrink_only));
EXPECT_EQ("", StrCat(ResizeMode{}));
}
TEST(ResizeModeTest, BitwiseOr) {
EXPECT_EQ(ResizeMode::resize_metadata_only,
ResizeMode::resize_metadata_only | ResizeMode{});
EXPECT_EQ(ResizeMode::resize_metadata_only,
ResizeMode{} | ResizeMode::resize_metadata_only);
}
} |
538 | cpp | google/tensorstore | progress | tensorstore/progress.cc | tensorstore/progress_test.cc | #ifndef TENSORSTORE_PROGRESS_H_
#define TENSORSTORE_PROGRESS_H_
#include <iosfwd>
#include <utility>
#include "absl/status/status.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/poly/poly.h"
#include "tensorstore/util/future.h"
namespace tensorstore {
struct ReadProgress {
Index total_elements;
Index copied_elements;
friend bool operator==(const ReadProgress& a, const ReadProgress& b);
friend bool operator!=(const ReadProgress& a, const ReadProgress& b);
friend std::ostream& operator<<(std::ostream& os, const ReadProgress& a);
};
struct WriteProgress {
Index total_elements;
Index copied_elements;
Index committed_elements;
friend bool operator==(const WriteProgress& a, const WriteProgress& b);
friend bool operator!=(const WriteProgress& a, const WriteProgress& b);
friend std::ostream& operator<<(std::ostream& os, const WriteProgress& a);
};
struct CopyProgress {
Index total_elements;
Index read_elements;
Index copied_elements;
Index committed_elements;
friend bool operator==(const CopyProgress& a, const CopyProgress& b);
friend bool operator!=(const CopyProgress& a, const CopyProgress& b);
friend std::ostream& operator<<(std::ostream& os, const CopyProgress& a);
};
struct [[nodiscard]] WriteFutures {
WriteFutures() = default;
WriteFutures(Future<void> copy_future, Future<void> commit_future)
: copy_future(std::move(copy_future)),
commit_future(std::move(commit_future)) {}
WriteFutures(absl::Status status)
: copy_future(status), commit_future(copy_future) {}
WriteFutures(Result<WriteFutures> result) {
if (result) {
*this = *result;
} else {
*this = WriteFutures(result.status());
}
}
Result<void>& result() const { return commit_future.result(); }
absl::Status status() const { return commit_future.status(); }
void value() const { return commit_future.value(); }
void Force() const { commit_future.Force(); }
Future<void> copy_future;
Future<void> commit_future;
};
inline absl::Status GetStatus(const WriteFutures& future) {
return future.status();
}
struct ReadProgressFunction {
using Function =
poly::Poly<sizeof(void*) * 2, false, void(ReadProgress)>;
Function value;
};
struct WriteProgressFunction {
using Function =
poly::Poly<sizeof(void*) * 2, false, void(WriteProgress)>;
Function value;
};
struct CopyProgressFunction {
using Function =
poly::Poly<sizeof(void*) * 2, false, void(CopyProgress)>;
Function value;
};
}
#endif
#include "tensorstore/progress.h"
#include <ostream>
namespace tensorstore {
bool operator==(const ReadProgress& a, const ReadProgress& b) {
return a.total_elements == b.total_elements &&
a.copied_elements == b.copied_elements;
}
bool operator!=(const ReadProgress& a, const ReadProgress& b) {
return !(a == b);
}
std::ostream& operator<<(std::ostream& os, const ReadProgress& a) {
return os << "{ total_elements=" << a.total_elements
<< ", copied_elements=" << a.copied_elements << " }";
}
bool operator==(const WriteProgress& a, const WriteProgress& b) {
return a.total_elements == b.total_elements &&
a.copied_elements == b.copied_elements &&
a.committed_elements == b.committed_elements;
}
bool operator!=(const WriteProgress& a, const WriteProgress& b) {
return !(a == b);
}
std::ostream& operator<<(std::ostream& os, const WriteProgress& a) {
return os << "{ total_elements=" << a.total_elements
<< ", copied_elements=" << a.copied_elements
<< ", committed_elements=" << a.committed_elements << " }";
}
bool operator==(const CopyProgress& a, const CopyProgress& b) {
return a.total_elements == b.total_elements &&
a.read_elements == b.read_elements &&
a.copied_elements == b.copied_elements &&
a.committed_elements == b.committed_elements;
}
bool operator!=(const CopyProgress& a, const CopyProgress& b) {
return !(a == b);
}
std::ostream& operator<<(std::ostream& os, const CopyProgress& a) {
return os << "{ total_elements=" << a.total_elements
<< ", read_elements=" << a.read_elements
<< ", copied_elements=" << a.copied_elements
<< ", committed_elements=" << a.committed_elements << " }";
}
} | #include "tensorstore/progress.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::CopyProgress;
using ::tensorstore::ReadProgress;
using ::tensorstore::WriteProgress;
TEST(ReadProgressTest, Comparison) {
ReadProgress a{1, 1};
ReadProgress b{2, 2};
ReadProgress c{2, 1};
EXPECT_EQ(a, a);
EXPECT_EQ(b, b);
EXPECT_EQ(c, c);
EXPECT_NE(a, b);
EXPECT_NE(a, c);
EXPECT_NE(b, c);
}
TEST(ReadProgressTest, Ostream) {
EXPECT_EQ("{ total_elements=2, copied_elements=1 }",
tensorstore::StrCat(ReadProgress{2, 1}));
}
TEST(WriteProgressTest, Comparison) {
WriteProgress a{1, 1, 1};
WriteProgress b{2, 2, 2};
WriteProgress c{2, 1, 1};
WriteProgress d{2, 1, 2};
EXPECT_EQ(a, a);
EXPECT_EQ(b, b);
EXPECT_EQ(c, c);
EXPECT_EQ(d, d);
EXPECT_NE(a, b);
EXPECT_NE(a, c);
EXPECT_NE(a, d);
EXPECT_NE(b, d);
EXPECT_NE(b, c);
EXPECT_NE(c, d);
}
TEST(WriteProgressTest, Ostream) {
EXPECT_EQ("{ total_elements=3, copied_elements=2, committed_elements=1 }",
tensorstore::StrCat(WriteProgress{3, 2, 1}));
}
TEST(CopyProgressTest, Comparison) {
CopyProgress a{1, 1, 1, 1};
CopyProgress b{2, 1, 1, 1};
CopyProgress c{1, 2, 1, 1};
CopyProgress d{1, 1, 2, 1};
CopyProgress e{1, 1, 1, 2};
EXPECT_EQ(a, a);
EXPECT_EQ(b, b);
EXPECT_EQ(c, c);
EXPECT_EQ(d, d);
EXPECT_EQ(e, e);
EXPECT_NE(a, b);
EXPECT_NE(a, c);
EXPECT_NE(a, d);
EXPECT_NE(a, e);
}
TEST(CopyProgressTest, Ostream) {
EXPECT_EQ(
"{ total_elements=4, read_elements=3, copied_elements=2, "
"committed_elements=1 }",
tensorstore::StrCat(CopyProgress{4, 3, 2, 1}));
}
} |
539 | cpp | google/tensorstore | rank | tensorstore/rank.cc | tensorstore/rank_test.cc | #ifndef TENSORSTORE_RANK_H_
#define TENSORSTORE_RANK_H_
#include <cassert>
#include <initializer_list>
#include <string>
#include <type_traits>
#include "tensorstore/index.h"
#include "tensorstore/static_cast.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
constexpr DimensionIndex kMaxRank = 32;
constexpr inline bool IsValidRank(DimensionIndex rank) {
return 0 <= rank && rank <= kMaxRank;
}
struct DynamicRank {
constexpr operator DimensionIndex() const { return -1; }
constexpr DimensionIndex operator()(DimensionIndex inline_buffer_size) const {
assert(inline_buffer_size >= 0);
assert(inline_buffer_size <= kMaxRank);
return -1 - inline_buffer_size;
}
};
constexpr inline DynamicRank dynamic_rank = {};
using InlineRank = DimensionIndex;
struct RankConstraint {
constexpr RankConstraint() = default;
constexpr RankConstraint(DynamicRank) {}
constexpr explicit RankConstraint(DimensionIndex rank) : rank(rank) {}
static constexpr RankConstraint FromInlineRank(InlineRank value) {
return RankConstraint(value < 0 ? dynamic_rank : value);
}
DimensionIndex rank = dynamic_rank;
constexpr operator DimensionIndex() const { return rank; }
constexpr bool valid() const {
return rank == -1 || (rank >= 0 && rank <= kMaxRank);
}
static constexpr RankConstraint And(DimensionIndex a, DimensionIndex b) {
assert(EqualOrUnspecified(a, b));
return RankConstraint(a == dynamic_rank ? b : a);
}
static constexpr RankConstraint And(
std::initializer_list<DimensionIndex> constraints) {
assert(EqualOrUnspecified(constraints));
for (DimensionIndex x : constraints) {
if (x == dynamic_rank) continue;
return RankConstraint(x);
}
return dynamic_rank;
}
static constexpr RankConstraint Add(DimensionIndex a, DimensionIndex b) {
if (a == dynamic_rank || b == dynamic_rank) return dynamic_rank;
return RankConstraint(a + b);
}
static constexpr RankConstraint Add(
std::initializer_list<DimensionIndex> constraints) {
DimensionIndex result = 0;
for (auto x : constraints) {
if (x == dynamic_rank) return dynamic_rank;
result += x;
}
return RankConstraint(result);
}
static constexpr RankConstraint Subtract(DimensionIndex a, DimensionIndex b) {
if (a == dynamic_rank || b == dynamic_rank) return dynamic_rank;
assert(a >= b);
return RankConstraint(a - b);
}
static constexpr bool Implies(DimensionIndex inner, DimensionIndex outer) {
return outer == dynamic_rank || outer == inner;
}
static constexpr bool EqualOrUnspecified(DimensionIndex a, DimensionIndex b) {
return a == dynamic_rank || b == dynamic_rank || a == b;
}
static constexpr bool EqualOrUnspecified(
std::initializer_list<DimensionIndex> constraints) {
DimensionIndex common = dynamic_rank;
for (auto x : constraints) {
if (x == dynamic_rank) continue;
if (x != common && common != dynamic_rank) {
return false;
}
common = x;
}
return true;
}
static constexpr bool LessOrUnspecified(DimensionIndex a, DimensionIndex b) {
return a == dynamic_rank || b == dynamic_rank || a < b;
}
static constexpr bool LessEqualOrUnspecified(DimensionIndex a,
DimensionIndex b) {
return a == dynamic_rank || b == dynamic_rank || a <= b;
}
static constexpr bool GreaterOrUnspecified(DimensionIndex a,
DimensionIndex b) {
return a == dynamic_rank || b == dynamic_rank || a > b;
}
static constexpr bool GreaterEqualOrUnspecified(DimensionIndex a,
DimensionIndex b) {
return a == dynamic_rank || b == dynamic_rank || a >= b;
}
};
constexpr inline bool IsValidInlineRank(InlineRank inline_rank) {
return inline_rank >= (-kMaxRank - 1) && inline_rank <= kMaxRank;
}
constexpr inline DimensionIndex InlineRankLimit(InlineRank rank_spec) {
return (rank_spec <= -1) ? -1 - rank_spec : 0;
}
template <DimensionIndex Rank>
using StaticRank =
std::enable_if_t<(Rank >= 0), std::integral_constant<DimensionIndex, Rank>>;
template <DimensionIndex Rank>
using StaticOrDynamicRank =
std::conditional_t<(Rank <= dynamic_rank), DimensionIndex,
std::integral_constant<DimensionIndex, Rank>>;
template <DimensionIndex Rank>
inline constexpr StaticOrDynamicRank<Rank> GetDefaultRank() {
if constexpr (Rank == dynamic_rank) {
return dynamic_rank;
} else {
return {};
}
}
template <typename SourceRef, DimensionIndex TargetRank>
using RebindRank =
typename StaticCastTraitsType<SourceRef>::template RebindRank<TargetRank>;
template <DimensionIndex TargetRank,
CastChecking Checking = CastChecking::checked, typename SourceRef>
StaticCastResultType<RebindRank<SourceRef, TargetRank>, SourceRef, Checking>
StaticRankCast(SourceRef&& source) {
return StaticCast<RebindRank<SourceRef, TargetRank>, Checking>(
std::forward<SourceRef>(source));
}
template <>
struct StaticCastTraits<DimensionIndex>
: public DefaultStaticCastTraits<DimensionIndex> {
static constexpr DimensionIndex Construct(DimensionIndex rank) {
return rank;
}
template <DimensionIndex Rank>
static constexpr DimensionIndex Construct(
std::integral_constant<DimensionIndex, Rank> rank) {
return rank;
}
template <typename SourceRef>
static constexpr bool IsCompatible(SourceRef&& source) {
return true;
}
static std::string Describe(DimensionIndex value);
static std::string Describe() { return Describe(dynamic_rank); }
template <DimensionIndex TargetRank>
using RebindRank = StaticOrDynamicRank<TargetRank>;
};
namespace internal_rank {
std::string DescribeStaticRank(DimensionIndex rank);
}
template <DimensionIndex Rank>
struct StaticCastTraits<std::integral_constant<DimensionIndex, Rank>> {
static constexpr StaticRank<Rank> Construct(StaticRank<Rank>) { return {}; }
static constexpr StaticRank<Rank> Construct(DimensionIndex) { return {}; }
static constexpr bool IsCompatible(DimensionIndex source) {
return RankConstraint::EqualOrUnspecified(source, Rank);
}
static std::string Describe() {
return StaticCastTraits<DimensionIndex>::Describe(Rank);
}
static std::string Describe(StaticRank<Rank>) { return Describe(); }
template <DimensionIndex TargetRank>
using RebindRank = StaticOrDynamicRank<TargetRank>;
};
absl::Status ValidateRank(DimensionIndex rank);
}
#endif
#include "tensorstore/rank.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
std::string StaticCastTraits<DimensionIndex>::Describe(DimensionIndex value) {
if (value == dynamic_rank) return "dynamic rank";
return tensorstore::StrCat("rank of ", value);
}
absl::Status ValidateRank(DimensionIndex rank) {
if (!IsValidRank(rank)) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Rank ", rank, " is outside valid range [0, ", kMaxRank, "]"));
}
return absl::OkStatus();
}
} | #include "tensorstore/rank.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::DimensionIndex;
using ::tensorstore::dynamic_rank;
using ::tensorstore::InlineRankLimit;
using ::tensorstore::MatchesStatus;
using ::tensorstore::RankConstraint;
using ::tensorstore::StaticRankCast;
using ::tensorstore::unchecked;
static_assert(RankConstraint::Implies(3, 3));
static_assert(RankConstraint::Implies(3, dynamic_rank));
static_assert(RankConstraint::Implies(dynamic_rank, dynamic_rank));
static_assert(!RankConstraint::Implies(3, 2));
static_assert(!RankConstraint::Implies(dynamic_rank, 3));
static_assert(RankConstraint::EqualOrUnspecified(3, 3));
static_assert(RankConstraint::EqualOrUnspecified(dynamic_rank, dynamic_rank));
static_assert(RankConstraint::EqualOrUnspecified(dynamic_rank, 3));
static_assert(RankConstraint::EqualOrUnspecified(3, dynamic_rank));
static_assert(!RankConstraint::EqualOrUnspecified(3, 2));
static_assert(RankConstraint::Add(2, 3) == 5);
static_assert(RankConstraint::Add({2, 3, 4}) == 9);
static_assert(RankConstraint::Add({2}) == 2);
static_assert(RankConstraint::Add({}) == 0);
static_assert(RankConstraint::Add(dynamic_rank, 3) == dynamic_rank);
static_assert(RankConstraint::Add(3, dynamic_rank) == dynamic_rank);
static_assert(RankConstraint::Add(dynamic_rank, dynamic_rank) == dynamic_rank);
static_assert(RankConstraint::Subtract(5, 2) == 3);
static_assert(RankConstraint::Subtract(dynamic_rank, 3) == dynamic_rank);
static_assert(RankConstraint::Subtract(3, dynamic_rank) == dynamic_rank);
static_assert(RankConstraint::Subtract(dynamic_rank, dynamic_rank) ==
dynamic_rank);
static_assert(RankConstraint::And(dynamic_rank, 5) == 5);
static_assert(RankConstraint::And(5, dynamic_rank) == 5);
static_assert(RankConstraint::And(dynamic_rank, dynamic_rank) == dynamic_rank);
static_assert(RankConstraint::And({5, 5, dynamic_rank}) == 5);
static_assert(RankConstraint::And({3}) == 3);
static_assert(RankConstraint::And({}) == dynamic_rank);
static_assert(RankConstraint::LessOrUnspecified(1, 2) == true);
static_assert(RankConstraint::LessOrUnspecified(1, 1) == false);
static_assert(RankConstraint::LessOrUnspecified(dynamic_rank, 2) == true);
static_assert(RankConstraint::LessOrUnspecified(1, dynamic_rank) == true);
static_assert(RankConstraint::LessOrUnspecified(dynamic_rank, dynamic_rank) ==
true);
static_assert(RankConstraint::LessEqualOrUnspecified(1, 2) == true);
static_assert(RankConstraint::LessEqualOrUnspecified(1, 1) == true);
static_assert(RankConstraint::LessEqualOrUnspecified(1, 0) == false);
static_assert(RankConstraint::LessEqualOrUnspecified(dynamic_rank, 2) == true);
static_assert(RankConstraint::LessEqualOrUnspecified(1, dynamic_rank) == true);
static_assert(RankConstraint::LessEqualOrUnspecified(dynamic_rank,
dynamic_rank) == true);
static_assert(RankConstraint::GreaterOrUnspecified(2, 1) == true);
static_assert(RankConstraint::GreaterOrUnspecified(1, 1) == false);
static_assert(RankConstraint::GreaterOrUnspecified(dynamic_rank, 2) == true);
static_assert(RankConstraint::GreaterOrUnspecified(1, dynamic_rank) == true);
static_assert(RankConstraint::GreaterOrUnspecified(dynamic_rank,
dynamic_rank) == true);
static_assert(RankConstraint::GreaterEqualOrUnspecified(2, 1) == true);
static_assert(RankConstraint::GreaterEqualOrUnspecified(1, 1) == true);
static_assert(RankConstraint::GreaterEqualOrUnspecified(0, 1) == false);
static_assert(RankConstraint::GreaterEqualOrUnspecified(dynamic_rank, 2) ==
true);
static_assert(RankConstraint::GreaterEqualOrUnspecified(1, dynamic_rank) ==
true);
static_assert(RankConstraint::GreaterEqualOrUnspecified(dynamic_rank,
dynamic_rank) == true);
TEST(RankCastTest, Basic) {
auto x =
StaticRankCast<3>(std::integral_constant<DimensionIndex, 3>()).value();
static_assert(
std::is_same_v<decltype(x), std::integral_constant<DimensionIndex, 3>>);
auto y = StaticRankCast<dynamic_rank>(x).value();
EXPECT_EQ(3, y);
static_assert(std::is_same_v<decltype(y), DimensionIndex>);
auto a = StaticRankCast<3>(DimensionIndex(3)).value();
auto b = StaticRankCast<dynamic_rank>(DimensionIndex(3)).value();
static_assert(
std::is_same_v<decltype(a), std::integral_constant<DimensionIndex, 3>>);
static_assert(std::is_same_v<decltype(b), DimensionIndex>);
EXPECT_THAT((StaticRankCast<3>(DimensionIndex(2))),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Cannot cast rank of 2 to rank of 3"));
EXPECT_THAT((StaticRankCast<3>(DimensionIndex(3))),
::testing::Optional(tensorstore::StaticRank<3>()));
EXPECT_THAT((StaticRankCast<3>(DimensionIndex(dynamic_rank))),
::testing::Optional(tensorstore::StaticRank<3>()));
}
TEST(RankCastDeathTest, DynamicToStatic) {
EXPECT_DEBUG_DEATH((StaticRankCast<3, unchecked>(DimensionIndex(1))),
"StaticCast is not valid");
}
static_assert(InlineRankLimit(dynamic_rank(0)) == 0);
static_assert(InlineRankLimit(dynamic_rank(1)) == 1);
static_assert(InlineRankLimit(dynamic_rank(2)) == 2);
static_assert(RankConstraint::FromInlineRank(dynamic_rank(0)) == -1);
static_assert(RankConstraint::FromInlineRank(dynamic_rank(1)) == -1);
static_assert(RankConstraint::FromInlineRank(dynamic_rank(2)) == -1);
static_assert(RankConstraint::FromInlineRank(0) == 0);
static_assert(RankConstraint::FromInlineRank(1) == 1);
static_assert(RankConstraint::FromInlineRank(2) == 2);
} |
540 | cpp | google/tensorstore | open_mode | tensorstore/open_mode.cc | tensorstore/open_mode_test.cc | #ifndef TENSORSTORE_OPEN_MODE_H_
#define TENSORSTORE_OPEN_MODE_H_
#include <iosfwd>
#include <string_view>
#include "absl/status/status.h"
namespace tensorstore {
enum class OpenMode {
unknown = 0,
open = 1,
create = 2,
delete_existing = 4,
open_or_create = open + create,
assume_metadata = 8,
assume_cached_metadata = 16,
};
constexpr inline OpenMode operator&(OpenMode a, OpenMode b) {
return static_cast<OpenMode>(static_cast<int>(a) & static_cast<int>(b));
}
constexpr inline OpenMode operator|(OpenMode a, OpenMode b) {
return static_cast<OpenMode>(static_cast<int>(a) | static_cast<int>(b));
}
constexpr inline bool operator!(OpenMode a) { return !static_cast<int>(a); }
constexpr inline OpenMode operator~(OpenMode a) {
return static_cast<OpenMode>(
~static_cast<std::underlying_type_t<OpenMode>>(a));
}
std::ostream& operator<<(std::ostream& os, OpenMode mode);
enum class ReadWriteMode {
dynamic = 0,
read = 1,
write = 2,
read_write = 3,
};
constexpr inline ReadWriteMode operator&(ReadWriteMode a, ReadWriteMode b) {
return static_cast<ReadWriteMode>(static_cast<int>(a) & static_cast<int>(b));
}
constexpr inline ReadWriteMode& operator&=(ReadWriteMode& a, ReadWriteMode b) {
return a = (a & b);
}
constexpr inline ReadWriteMode operator|(ReadWriteMode a, ReadWriteMode b) {
return static_cast<ReadWriteMode>(static_cast<int>(a) | static_cast<int>(b));
}
constexpr inline ReadWriteMode& operator|=(ReadWriteMode& a, ReadWriteMode b) {
return a = (a | b);
}
constexpr inline bool operator!(ReadWriteMode a) {
return !static_cast<int>(a);
}
constexpr inline ReadWriteMode operator~(ReadWriteMode a) {
return static_cast<ReadWriteMode>(
~static_cast<std::underlying_type_t<ReadWriteMode>>(a));
}
constexpr inline bool IsModeExplicitlyConvertible(ReadWriteMode source,
ReadWriteMode target) {
return (target == ReadWriteMode::dynamic ||
source == ReadWriteMode::dynamic || (target & source) == target);
}
std::string_view to_string(ReadWriteMode mode);
std::ostream& operator<<(std::ostream& os, ReadWriteMode mode);
class MinimalSpec {
public:
constexpr explicit MinimalSpec(bool minimal_spec = true)
: minimal_spec_(minimal_spec) {}
bool minimal_spec() const { return minimal_spec_; }
private:
bool minimal_spec_;
};
namespace internal {
constexpr ReadWriteMode StaticReadWriteMask(ReadWriteMode mode) {
return mode == ReadWriteMode::dynamic ? ReadWriteMode::read_write : mode;
}
constexpr bool IsModePossible(ReadWriteMode mode, ReadWriteMode constraint) {
return constraint == ReadWriteMode::dynamic ? mode != ReadWriteMode::dynamic
: mode == constraint;
}
absl::Status ValidateSupportsRead(ReadWriteMode mode);
absl::Status ValidateSupportsWrite(ReadWriteMode mode);
absl::Status ValidateSupportsModes(ReadWriteMode mode,
ReadWriteMode required_modes);
}
}
#endif
#include "tensorstore/open_mode.h"
#include <ostream>
#include "absl/status/status.h"
namespace tensorstore {
std::string_view to_string(ReadWriteMode mode) {
switch (mode) {
case ReadWriteMode::dynamic:
return "dynamic";
case ReadWriteMode::read:
return "read";
case ReadWriteMode::write:
return "write";
case ReadWriteMode::read_write:
return "read_write";
default:
return "<unknown>";
}
}
std::ostream& operator<<(std::ostream& os, ReadWriteMode mode) {
return os << to_string(mode);
}
std::ostream& operator<<(std::ostream& os, OpenMode mode) {
const char* sep = "";
constexpr const char* kSep = "|";
if (!!(mode & OpenMode::open)) {
os << "open";
sep = kSep;
}
if (!!(mode & OpenMode::create)) {
os << sep << "create";
sep = kSep;
}
if (!!(mode & OpenMode::delete_existing)) {
os << sep << "delete_existing";
sep = kSep;
}
if (!!(mode & OpenMode::assume_metadata)) {
os << sep << "assume_metadata";
sep = kSep;
}
return os;
}
namespace internal {
absl::Status ValidateSupportsRead(ReadWriteMode mode) {
return !(mode & ReadWriteMode::read)
? absl::InvalidArgumentError("Source does not support reading.")
: absl::Status();
}
absl::Status ValidateSupportsWrite(ReadWriteMode mode) {
return !(mode & ReadWriteMode::write)
? absl::InvalidArgumentError(
"Destination does not support writing.")
: absl::Status();
}
absl::Status ValidateSupportsModes(ReadWriteMode mode,
ReadWriteMode required_modes) {
if ((mode & required_modes) != required_modes) {
if (!!(required_modes & ReadWriteMode::read) &&
!(mode & ReadWriteMode::read)) {
return absl::InvalidArgumentError("Read mode not supported");
}
if (!!(required_modes & ReadWriteMode::write) &&
!(mode & ReadWriteMode::write)) {
return absl::InvalidArgumentError("Write mode not supported");
}
}
return absl::OkStatus();
}
}
} | #include "tensorstore/open_mode.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::OpenMode;
using ::tensorstore::ReadWriteMode;
using ::tensorstore::StrCat;
static_assert(ReadWriteMode::read_write ==
(ReadWriteMode::read | ReadWriteMode::write));
static_assert((ReadWriteMode::read_write & ReadWriteMode::read) ==
ReadWriteMode::read);
static_assert(!ReadWriteMode::dynamic);
static_assert(tensorstore::internal::StaticReadWriteMask(ReadWriteMode::read) ==
ReadWriteMode::read);
static_assert(tensorstore::internal::StaticReadWriteMask(
ReadWriteMode::write) == ReadWriteMode::write);
static_assert(tensorstore::internal::StaticReadWriteMask(
ReadWriteMode::dynamic) == ReadWriteMode::read_write);
static_assert(tensorstore::internal::IsModePossible(ReadWriteMode::read,
ReadWriteMode::dynamic));
static_assert(tensorstore::internal::IsModePossible(ReadWriteMode::read,
ReadWriteMode::read));
static_assert(tensorstore::internal::IsModePossible(ReadWriteMode::write,
ReadWriteMode::dynamic));
static_assert(tensorstore::internal::IsModePossible(ReadWriteMode::write,
ReadWriteMode::write));
static_assert(tensorstore::internal::IsModePossible(ReadWriteMode::read_write,
ReadWriteMode::dynamic));
static_assert(tensorstore::internal::IsModePossible(ReadWriteMode::read_write,
ReadWriteMode::read_write));
static_assert(!tensorstore::internal::IsModePossible(ReadWriteMode::dynamic,
ReadWriteMode::dynamic));
static_assert(!tensorstore::internal::IsModePossible(
ReadWriteMode::read, ReadWriteMode::read_write));
static_assert(!tensorstore::internal::IsModePossible(ReadWriteMode::read,
ReadWriteMode::write));
static_assert(!tensorstore::internal::IsModePossible(ReadWriteMode::write,
ReadWriteMode::read));
static_assert(!tensorstore::internal::IsModePossible(
ReadWriteMode::write, ReadWriteMode::read_write));
static_assert(!tensorstore::internal::IsModePossible(ReadWriteMode::read_write,
ReadWriteMode::read));
static_assert(!tensorstore::internal::IsModePossible(ReadWriteMode::read_write,
ReadWriteMode::write));
TEST(ReadWriteModeTest, PrintToOstream) {
EXPECT_EQ("dynamic", StrCat(ReadWriteMode::dynamic));
EXPECT_EQ("read", StrCat(ReadWriteMode::read));
EXPECT_EQ("write", StrCat(ReadWriteMode::write));
EXPECT_EQ("read_write", StrCat(ReadWriteMode::read_write));
EXPECT_EQ("<unknown>", StrCat(static_cast<ReadWriteMode>(10)));
}
TEST(OpenTest, PrintToOstream) {
EXPECT_EQ("", StrCat(OpenMode{}));
EXPECT_EQ("open", StrCat(OpenMode::open));
EXPECT_EQ("create", StrCat(OpenMode::create));
EXPECT_EQ("open|create", StrCat(OpenMode::open | OpenMode::create));
EXPECT_EQ("open|assume_metadata",
StrCat(OpenMode::open | OpenMode::assume_metadata));
EXPECT_EQ("create|delete_existing",
StrCat(OpenMode::create | OpenMode::delete_existing));
}
} |
541 | cpp | google/tensorstore | index_transform_builder | tensorstore/index_space/index_transform_builder.cc | tensorstore/index_space/index_transform_builder_test.cc | #ifndef TENSORSTORE_INDEX_SPACE_INDEX_TRANSFORM_BUILDER_H_
#define TENSORSTORE_INDEX_SPACE_INDEX_TRANSFORM_BUILDER_H_
#include <stddef.h>
#include <algorithm>
#include <optional>
#include <string>
#include <string_view>
#include <type_traits>
#include <utility>
#include "absl/container/inlined_vector.h"
#include "absl/log/absl_check.h"
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/box.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/index_domain.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/internal/deep_copy_transform_rep_ptr.h"
#include "tensorstore/index_space/internal/transform_rep.h"
#include "tensorstore/index_space/output_index_map.h"
#include "tensorstore/index_space/output_index_method.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/rank.h"
#include "tensorstore/static_cast.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/dimension_set.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace internal_index_space {
enum class BuilderFlags : unsigned int {
kDefault = 0,
kSetLower = 1,
kSetImplicitLower = 2,
kSetUpper = 4,
kSetImplicitUpper = 8
};
inline BuilderFlags operator|(BuilderFlags a, BuilderFlags b) {
return static_cast<BuilderFlags>(static_cast<unsigned int>(a) |
static_cast<unsigned int>(b));
}
inline BuilderFlags& operator|=(BuilderFlags& a, BuilderFlags b) {
return a = a | b;
}
inline BuilderFlags operator&(BuilderFlags a, BuilderFlags b) {
return static_cast<BuilderFlags>(static_cast<unsigned int>(a) &
static_cast<unsigned int>(b));
}
class OutputIndexMapInitializer {
public:
OutputIndexMapInitializer() {}
OutputIndexMapInitializer(DimensionIndex input_dimension)
: input_dimension(input_dimension) {}
OutputIndexMapInitializer(const SharedArrayView<const Index, dynamic_rank,
offset_origin>& index_array,
Result<IndexInterval> bounds)
: index_array(index_array), index_array_bounds(bounds) {}
std::optional<DimensionIndex> input_dimension;
SharedArray<const Index, dynamic_rank, offset_origin> index_array;
Result<IndexInterval> index_array_bounds{in_place};
};
template <typename Range, typename Element>
void AssignRange(const Range& range, span<Element> dest);
template <std::ptrdiff_t StaticExtent, typename Range, typename = void>
constexpr inline bool IsStaticExtentCompatibleWithRange = true;
template <std::ptrdiff_t StaticExtent, typename Range>
constexpr inline bool IsStaticExtentCompatibleWithRange<
StaticExtent, Range, std::void_t<internal::ConstSpanType<Range>>> =
RankConstraint::EqualOrUnspecified(StaticExtent,
internal::ConstSpanType<Range>::extent);
}
template <DimensionIndex InputRank = dynamic_rank,
DimensionIndex OutputRank = dynamic_rank>
class IndexTransformBuilder {
static_assert(RankConstraint(InputRank).valid());
static_assert(RankConstraint(OutputRank).valid());
public:
IndexTransformBuilder(std::nullptr_t) {}
template <DimensionIndex IRank = InputRank, DimensionIndex ORank = OutputRank,
typename = std::enable_if_t<(IRank == dynamic_rank &&
ORank == dynamic_rank)>>
IndexTransformBuilder(DimensionIndex input_rank, DimensionIndex output_rank)
: IndexTransformBuilder(std::true_type{}, input_rank, output_rank) {}
template <DimensionIndex IRank = InputRank, DimensionIndex ORank = OutputRank,
typename = std::enable_if_t<(IRank != dynamic_rank &&
ORank != dynamic_rank)>>
IndexTransformBuilder(
std::integral_constant<DimensionIndex, IRank> input_rank = {},
std::integral_constant<DimensionIndex, ORank> output_rank = {})
: IndexTransformBuilder(std::true_type{}, input_rank, output_rank) {}
template <DimensionIndex IRank = InputRank, DimensionIndex ORank = OutputRank,
typename = std::enable_if_t<(IRank == dynamic_rank &&
ORank != dynamic_rank)>>
IndexTransformBuilder(
DimensionIndex input_rank,
std::integral_constant<DimensionIndex, ORank> output_rank = {})
: IndexTransformBuilder(std::true_type{}, input_rank, output_rank) {}
IndexTransformBuilder(const IndexTransformBuilder&) = default;
IndexTransformBuilder(IndexTransformBuilder&&) = default;
IndexTransformBuilder& operator=(const IndexTransformBuilder&) = default;
IndexTransformBuilder& operator=(IndexTransformBuilder&&) = default;
bool valid() const { return static_cast<bool>(rep_); }
StaticOrDynamicRank<InputRank> input_rank() const {
return StaticRankCast<InputRank, unchecked>(
static_cast<DimensionIndex>(rep_->input_rank));
}
StaticOrDynamicRank<OutputRank> output_rank() const {
return StaticRankCast<OutputRank, unchecked>(
static_cast<DimensionIndex>(rep_->output_rank));
}
span<Index, InputRank> input_origin() {
flags_ |= internal_index_space::BuilderFlags::kSetLower;
return {rep_->input_origin().data(), input_rank()};
}
template <typename Indices>
std::enable_if_t<internal_index_space::IsStaticExtentCompatibleWithRange<
InputRank, Indices>,
IndexTransformBuilder&>
input_origin(const Indices& indices) {
internal_index_space::AssignRange(indices, span<Index>(input_origin()));
return *this;
}
template <size_t N>
IndexTransformBuilder& input_origin(const Index (&indices)[N]) {
static_assert(InputRank == dynamic_rank || InputRank == N, "");
return input_origin(span(indices));
}
span<Index, InputRank> input_shape() {
flags_ |= internal_index_space::BuilderFlags::kSetUpper;
interval_form_ = IntervalForm::sized;
return {rep_->input_shape().data(), input_rank()};
}
template <typename Indices>
std::enable_if_t<internal_index_space::IsStaticExtentCompatibleWithRange<
InputRank, Indices>,
IndexTransformBuilder&>
input_shape(const Indices& indices) {
internal_index_space::AssignRange(indices, span<Index>(input_shape()));
return *this;
}
template <size_t N>
IndexTransformBuilder& input_shape(const Index (&indices)[N]) {
static_assert(InputRank == dynamic_rank || InputRank == N, "");
return input_shape(span(indices));
}
span<Index, InputRank> input_exclusive_max() {
flags_ |= internal_index_space::BuilderFlags::kSetUpper;
interval_form_ = IntervalForm::half_open;
return {rep_->input_shape().data(), input_rank()};
}
template <typename Indices>
std::enable_if_t<internal_index_space::IsStaticExtentCompatibleWithRange<
InputRank, Indices>,
IndexTransformBuilder&>
input_exclusive_max(const Indices& indices) {
internal_index_space::AssignRange(indices,
span<Index>(input_exclusive_max()));
return *this;
}
template <size_t N>
IndexTransformBuilder& input_exclusive_max(const Index (&indices)[N]) {
static_assert(InputRank == dynamic_rank || InputRank == N, "");
return input_exclusive_max(span(indices));
}
span<Index, InputRank> input_inclusive_max() {
flags_ |= internal_index_space::BuilderFlags::kSetUpper;
interval_form_ = IntervalForm::closed;
return {rep_->input_shape().data(), input_rank()};
}
template <typename Indices>
std::enable_if_t<internal_index_space::IsStaticExtentCompatibleWithRange<
InputRank, Indices>,
IndexTransformBuilder&>
input_inclusive_max(const Indices& indices) {
internal_index_space::AssignRange(indices,
span<Index>(input_inclusive_max()));
return *this;
}
template <size_t N>
IndexTransformBuilder& input_inclusive_max(const Index (&indices)[N]) {
static_assert(InputRank == dynamic_rank || InputRank == N, "");
return input_inclusive_max(span(indices));
}
template <typename BoxLike>
std::enable_if_t<IsBoxLikeImplicitlyConvertibleToRank<BoxLike, InputRank>,
IndexTransformBuilder&>
input_bounds(const BoxLike& box) {
this->input_bounds().DeepAssign(box);
return *this;
}
MutableBoxView<InputRank> input_bounds() {
flags_ |= (internal_index_space::BuilderFlags::kSetLower |
internal_index_space::BuilderFlags::kSetUpper);
interval_form_ = IntervalForm::sized;
return MutableBoxView<InputRank>(input_rank(), rep_->input_origin().data(),
rep_->input_shape().data());
}
IndexTransformBuilder& input_domain(IndexDomainView<InputRank> domain) {
input_origin(domain.origin());
input_shape(domain.shape());
input_labels(domain.labels());
implicit_lower_bounds(domain.implicit_lower_bounds());
implicit_upper_bounds(domain.implicit_upper_bounds());
return *this;
}
span<std::string, InputRank> input_labels() {
return {rep_->input_labels().data(), input_rank()};
}
template <typename Labels>
std::enable_if_t<internal_index_space::IsStaticExtentCompatibleWithRange<
InputRank, Labels>,
IndexTransformBuilder&>
input_labels(const Labels& labels) {
internal_index_space::AssignRange(labels,
span<std::string>(input_labels()));
return *this;
}
template <size_t N>
IndexTransformBuilder& input_labels(const std::string_view (&labels)[N]) {
static_assert(InputRank == dynamic_rank || InputRank == N, "");
return input_labels(span(labels));
}
DimensionSet& implicit_lower_bounds() {
flags_ |= internal_index_space::BuilderFlags::kSetImplicitLower;
return rep_->implicit_lower_bounds;
}
IndexTransformBuilder& implicit_lower_bounds(DimensionSet x) {
implicit_lower_bounds() = x;
return *this;
}
template <size_t N>
IndexTransformBuilder& implicit_lower_bounds(const bool (&x)[N]) {
static_assert(InputRank == dynamic_rank || InputRank == N);
ABSL_CHECK_EQ(N, input_rank()) << "range size mismatch";
return implicit_lower_bounds(DimensionSet::FromBools(x));
}
DimensionSet& implicit_upper_bounds() {
flags_ |= internal_index_space::BuilderFlags::kSetImplicitUpper;
return rep_->implicit_upper_bounds;
}
IndexTransformBuilder& implicit_upper_bounds(DimensionSet x) {
implicit_upper_bounds() = x;
return *this;
}
template <size_t N>
IndexTransformBuilder& implicit_upper_bounds(const bool (&x)[N]) {
static_assert(InputRank == dynamic_rank || InputRank == N);
ABSL_CHECK_EQ(N, input_rank()) << "range size mismatch";
return implicit_upper_bounds(DimensionSet::FromBools(x));
}
template <DimensionIndex OtherInputRank>
IndexTransformBuilder& output_map(DimensionIndex output_dim,
OutputIndexMapRef<OtherInputRank> map) {
switch (map.method()) {
case OutputIndexMethod::constant:
AssignOutput(output_dim, map.offset(), 0,
internal_index_space::OutputIndexMapInitializer());
break;
case OutputIndexMethod::single_input_dimension:
AssignOutput(output_dim, map.offset(), map.stride(),
internal_index_space::OutputIndexMapInitializer(
map.input_dimension()));
break;
case OutputIndexMethod::array: {
auto index_array = map.index_array();
AssignOutput(
output_dim, map.offset(), map.stride(),
internal_index_space::OutputIndexMapInitializer(
index_array.shared_array_ref(), index_array.index_range()));
break;
}
}
return *this;
}
IndexTransformBuilder& output_constant(DimensionIndex output_dim,
Index offset) {
AssignOutput(output_dim, offset, 0,
internal_index_space::OutputIndexMapInitializer());
return *this;
}
IndexTransformBuilder& output_single_input_dimension(
DimensionIndex output_dim, Index offset, Index stride,
DimensionIndex input_dim) {
AssignOutput(output_dim, offset, stride,
internal_index_space::OutputIndexMapInitializer(input_dim));
return *this;
}
IndexTransformBuilder& output_single_input_dimension(
DimensionIndex output_dim, DimensionIndex input_dim) {
return output_single_input_dimension(output_dim, 0, 1, input_dim);
}
IndexTransformBuilder& output_index_array(
DimensionIndex output_dim, Index offset, Index stride,
const SharedArrayView<const Index, dynamic_rank, offset_origin>&
index_array,
Result<IndexInterval> index_range = IndexInterval()) {
AssignOutput(output_dim, offset, stride,
internal_index_space::OutputIndexMapInitializer(
index_array, std::move(index_range)));
return * | #include "tensorstore/index_space/index_transform_builder.h"
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/array.h"
#include "tensorstore/box.h"
#include "tensorstore/index_space/index_domain_builder.h"
#include "tensorstore/util/dimension_set.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::DimensionSet;
using ::tensorstore::Index;
using ::tensorstore::IndexDomainBuilder;
using ::tensorstore::IndexInterval;
using ::tensorstore::IndexTransform;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::kInfIndex;
using ::tensorstore::kInfSize;
using ::tensorstore::MakeArray;
using ::tensorstore::MatchesStatus;
using ::tensorstore::OutputIndexMethod;
using ::tensorstore::span;
using ::tensorstore::internal_index_space::TransformAccess;
TEST(IndexTransformTest, BuilderValid) {
auto index_array = MakeArray<Index>({{{1, 0, 2, 2}}});
auto t =
IndexTransformBuilder<3, 4>()
.input_origin({1, 2, 3})
.input_shape({2, 2, 4})
.implicit_lower_bounds({0, 1, 0})
.implicit_upper_bounds({1, 0, 0})
.input_labels({"x", "y", "z"})
.output_constant(0, 4)
.output_single_input_dimension(1, 5, 7, 2)
.output_constant(2, 6)
.output_index_array(3, 7, 9, index_array, IndexInterval::Closed(0, 3))
.Finalize()
.value();
static_assert(std::is_same_v<decltype(t), IndexTransform<3, 4>>);
EXPECT_THAT(t.input_origin(), ::testing::ElementsAre(1, 2, 3));
EXPECT_THAT(t.input_shape(), ::testing::ElementsAre(2, 2, 4));
EXPECT_THAT(t.input_labels(), ::testing::ElementsAre("x", "y", "z"));
EXPECT_THAT(t.implicit_lower_bounds(), DimensionSet::FromBools({0, 1, 0}));
EXPECT_THAT(t.implicit_upper_bounds(), DimensionSet::FromBools({1, 0, 0}));
EXPECT_EQ(IndexInterval::UncheckedSized(1, 2),
t.input_domain()[0].interval());
EXPECT_EQ(IndexInterval::UncheckedSized(2, 2),
t.input_domain()[1].interval());
EXPECT_EQ(IndexInterval::UncheckedSized(3, 4),
t.input_domain()[2].interval());
{
auto map = t.output_index_map(0);
EXPECT_EQ(OutputIndexMethod::constant, map.method());
EXPECT_EQ(4, map.offset());
EXPECT_EQ(0, map.stride());
}
{
auto map = t.output_index_map(1);
EXPECT_EQ(OutputIndexMethod::single_input_dimension, map.method());
EXPECT_EQ(2, map.input_dimension());
EXPECT_EQ(5, map.offset());
EXPECT_EQ(7, map.stride());
}
{
auto map = t.output_index_map(2);
EXPECT_EQ(OutputIndexMethod::constant, map.method());
EXPECT_EQ(6, map.offset());
EXPECT_EQ(0, map.stride());
}
{
auto map = t.output_index_map(3);
EXPECT_EQ(OutputIndexMethod::array, map.method());
EXPECT_EQ(7, map.offset());
EXPECT_EQ(9, map.stride());
auto index_array_ref = map.index_array();
EXPECT_EQ(&index_array(0, 0, 0), &index_array_ref.array_ref()(1, 2, 3));
EXPECT_THAT(index_array_ref.layout().byte_strides(),
::testing::ElementsAre(0, 0, sizeof(Index)));
}
{
std::array<Index, 4> output_indices;
ASSERT_EQ(
absl::OkStatus(),
t.TransformIndices(span<const Index, 3>({1, 2, 3}), output_indices));
EXPECT_THAT(output_indices, ::testing::ElementsAre(4, 26, 6, 16));
}
}
TEST(IndexTransformBuilderTest, Nullptr) {
IndexTransformBuilder<> builder(nullptr);
EXPECT_FALSE(builder.valid());
{
IndexTransformBuilder<> other_builder(builder);
EXPECT_FALSE(other_builder.valid());
}
{
IndexTransformBuilder<> other_builder(nullptr);
other_builder = builder;
EXPECT_FALSE(other_builder.valid());
}
}
TEST(IndexTransformBuilderTest, Move) {
IndexTransformBuilder<> builder(1, 1);
EXPECT_TRUE(builder.valid());
builder.input_origin({1});
auto builder2 = std::move(builder);
EXPECT_TRUE(builder2.valid());
EXPECT_FALSE(builder.valid());
builder2.output_constant(0, 5);
EXPECT_THAT(builder2.Finalize().value(), IndexTransformBuilder<>(1, 1)
.input_origin({1})
.output_constant(0, 5)
.Finalize()
.value());
}
TEST(IndexTransformBuilderTest, Copy) {
IndexTransformBuilder<> builder(1, 1);
EXPECT_TRUE(builder.valid());
builder.input_origin({1});
auto builder2 = builder;
EXPECT_TRUE(builder.valid());
EXPECT_TRUE(builder2.valid());
builder.output_constant(0, 4);
builder2.output_constant(0, 5);
EXPECT_THAT(builder.Finalize().value(), IndexTransformBuilder<>(1, 1)
.input_origin({1})
.output_constant(0, 4)
.Finalize()
.value());
EXPECT_THAT(builder2.Finalize().value(), IndexTransformBuilder<>(1, 1)
.input_origin({1})
.output_constant(0, 5)
.Finalize()
.value());
}
TEST(IndexTransformBuilderTest, Default) {
auto t = IndexTransformBuilder<>(2, 1).Finalize().value();
EXPECT_THAT(t.input_origin(), ::testing::ElementsAre(-kInfIndex, -kInfIndex));
EXPECT_THAT(t.input_shape(), ::testing::ElementsAre(kInfSize, kInfSize));
EXPECT_THAT(t.implicit_lower_bounds(), DimensionSet::FromBools({1, 1}));
EXPECT_THAT(t.implicit_upper_bounds(), DimensionSet::FromBools({1, 1}));
EXPECT_THAT(t.input_labels(), ::testing::ElementsAre("", ""));
auto map = t.output_index_map(0);
EXPECT_EQ(0, map.offset());
EXPECT_EQ(0, map.stride());
EXPECT_EQ(OutputIndexMethod::constant, map.method());
}
TEST(IndexTransformBuilderTest, InputOriginSpecified) {
auto t =
IndexTransformBuilder<>(2, 0).input_origin({1, 2}).Finalize().value();
EXPECT_EQ(t.domain()[0].interval(),
IndexInterval::UncheckedClosed(1, kInfIndex));
EXPECT_EQ(t.domain()[1].interval(),
IndexInterval::UncheckedClosed(2, kInfIndex));
EXPECT_THAT(t.implicit_lower_bounds(), DimensionSet::FromBools({0, 0}));
EXPECT_THAT(t.implicit_upper_bounds(), DimensionSet::FromBools({1, 1}));
EXPECT_THAT(t.input_labels(), ::testing::ElementsAre("", ""));
}
TEST(IndexTransformBuilderTest, ImplicitLowerBoundsSpecified) {
auto t = IndexTransformBuilder<>(2, 0)
.implicit_lower_bounds({1, 0})
.Finalize()
.value();
EXPECT_EQ(t.domain()[0].interval(),
IndexInterval::UncheckedClosed(-kInfIndex, kInfIndex));
EXPECT_EQ(t.domain()[1].interval(),
IndexInterval::UncheckedClosed(-kInfIndex, kInfIndex));
EXPECT_THAT(t.implicit_lower_bounds(), DimensionSet::FromBools({1, 0}));
EXPECT_THAT(t.implicit_upper_bounds(), DimensionSet::FromBools({1, 1}));
EXPECT_THAT(t.input_labels(), ::testing::ElementsAre("", ""));
}
TEST(IndexTransformBuilderTest, InputShapeSpecified) {
auto t =
IndexTransformBuilder<>(2, 0).input_shape({5, 10}).Finalize().value();
EXPECT_EQ(t.domain()[0].interval(), IndexInterval::UncheckedSized(0, 5));
EXPECT_EQ(t.domain()[1].interval(), IndexInterval::UncheckedSized(0, 10));
EXPECT_THAT(t.implicit_lower_bounds(), DimensionSet::FromBools({0, 0}));
EXPECT_THAT(t.implicit_upper_bounds(), DimensionSet::FromBools({0, 0}));
EXPECT_THAT(t.input_labels(), ::testing::ElementsAre("", ""));
}
TEST(IndexTransformBuilderTest, InputInclusiveMaxSpecified) {
auto t = IndexTransformBuilder<>(2, 0)
.input_inclusive_max({5, 10})
.Finalize()
.value();
EXPECT_EQ(t.domain()[0].interval(),
IndexInterval::UncheckedClosed(-kInfIndex, 5));
EXPECT_EQ(t.domain()[1].interval(),
IndexInterval::UncheckedClosed(-kInfIndex, 10));
EXPECT_THAT(t.implicit_lower_bounds(), DimensionSet::FromBools({1, 1}));
EXPECT_THAT(t.implicit_upper_bounds(), DimensionSet::FromBools({0, 0}));
EXPECT_THAT(t.input_labels(), ::testing::ElementsAre("", ""));
}
TEST(IndexTransformBuilderTest, InputExclusiveMaxSpecified) {
auto t = IndexTransformBuilder<>(2, 0)
.input_exclusive_max({5, 10})
.Finalize()
.value();
EXPECT_EQ(t.domain()[0].interval(),
IndexInterval::UncheckedHalfOpen(-kInfIndex, 5));
EXPECT_EQ(t.domain()[1].interval(),
IndexInterval::UncheckedHalfOpen(-kInfIndex, 10));
EXPECT_THAT(t.implicit_lower_bounds(), DimensionSet::FromBools({1, 1}));
EXPECT_THAT(t.implicit_upper_bounds(), DimensionSet::FromBools({0, 0}));
EXPECT_THAT(t.input_labels(), ::testing::ElementsAre("", ""));
}
TEST(IndexTransformBuilderTest, ImplicitUpperBoundsSpecified) {
auto t = IndexTransformBuilder<>(2, 0)
.implicit_upper_bounds({1, 0})
.Finalize()
.value();
EXPECT_EQ(t.domain()[0].interval(),
IndexInterval::UncheckedClosed(-kInfIndex, kInfIndex));
EXPECT_EQ(t.domain()[1].interval(),
IndexInterval::UncheckedClosed(-kInfIndex, kInfIndex));
EXPECT_THAT(t.implicit_lower_bounds(), DimensionSet::FromBools({1, 1}));
EXPECT_THAT(t.implicit_upper_bounds(), DimensionSet::FromBools({1, 0}));
EXPECT_THAT(t.input_labels(), ::testing::ElementsAre("", ""));
}
TEST(IndexTransformBuilderTest, SingleInputDimensionDefaults) {
EXPECT_EQ(IndexTransformBuilder<>(3, 1)
.output_single_input_dimension(0, 2)
.Finalize()
.value(),
IndexTransformBuilder<>(3, 1)
.output_single_input_dimension(0, 2)
.Finalize()
.value());
}
TEST(IndexTransformBuilderTest, InputOriginOutOfRange) {
EXPECT_THAT(
IndexTransformBuilder<>(2, 1)
.input_origin({-kInfIndex - 1, -kInfIndex})
.Finalize(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
".* do not specify a valid half-open index interval"));
}
TEST(IndexTransformBuilderTest, InputShapeOutOfRange) {
EXPECT_THAT(
IndexTransformBuilder<>(2, 1).input_shape({1, -1}).Finalize(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"\\(0, -1\\) do not specify a valid sized index interval"));
}
TEST(IndexTransformBuilderTest, InvalidInputDimensionNegative) {
EXPECT_THAT(
IndexTransformBuilder<>(2, 1)
.output_single_input_dimension(0, 0, 1, -1)
.Finalize(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Input dimension -1 specified for output dimension 0 "
"is outside valid range \\[0, 2\\)"));
}
TEST(IndexTransformBuilderTest, InvalidInputDimensionPositive) {
EXPECT_THAT(
IndexTransformBuilder<>(2, 1)
.output_single_input_dimension(0, 2)
.Finalize(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Input dimension 2 specified for output dimension 0 "
"is outside valid range \\[0, 2\\)"));
}
TEST(IndexTransformBuilderTest, InvalidIndexArrayRank) {
EXPECT_THAT(IndexTransformBuilder<>(2, 1)
.output_index_array(0, 0, 1, MakeArray<Index>({1}))
.Finalize(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Index array for output dimension 0 "
"has rank 1 but must have rank 2"));
}
TEST(IndexTransformBuilderTest, InvalidIndexArrayShape) {
EXPECT_THAT(
IndexTransformBuilder<>(2, 1)
.input_shape({2, 2})
.output_index_array(0, 0, 1,
MakeArray<Index>({{1, 2}, {3, 4}, {5, 6}}))
.Finalize(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Index array for output dimension 0 has shape \\{3, 2\\} "
"which does not match input_shape \\{2, 2\\}"));
}
TEST(IndexTransformBuilderTest, InvalidIndexArrayImplicitLowerBound) {
EXPECT_THAT(
IndexTransformBuilder<>(2, 1)
.input_shape({3, 2})
.implicit_lower_bounds({1, 0})
.output_index_array(0, 0, 1,
MakeArray<Index>({{1, 2}, {3, 4}, {5, 6}}))
.Finalize(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Index array for output dimension 0 "
"depends on input dimension 0 with implicit bounds"));
}
TEST(IndexTransformBuilderTest, InvalidIndexArrayImplicitUpperBound) {
EXPECT_THAT(
IndexTransformBuilder<>(2, 1)
.input_shape({3, 2})
.implicit_upper_bounds({1, 0})
.output_index_array(0, 0, 1,
MakeArray<Index>({{1, 2}, {3, 4}, {5, 6}}))
.Finalize(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Index array for output dimension 0 "
"depends on input dimension 0 with implicit bounds"));
}
TEST(IndexTransformBuilderTest, InvalidIndexArrayIndexRange) {
EXPECT_THAT(
IndexTransformBuilder<>(2, 1)
.input_shape({2, 2})
.output_index_array(0, 0, 1, MakeArray<Index>({{1, 2}, {3, 4}}),
IndexInterval::Sized(3, -1))
.Finalize(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"\\(3, -1\\) do not specify a valid sized index interval"));
}
TEST(IndexTransformBuilderTest, InvalidIndexArrayWithUnboundedDomain) {
EXPECT_THAT(
IndexTransformBuilder(1, 1)
.input_origin({tensorstore::kMaxFiniteIndex})
.input_shape({2})
.output_index_array(0, 0, 1, MakeArray<Index>({1, 2}))
.Finalize(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Index array for output dimension 0 "
"depends on input dimension 0 with infinite bounds"));
}
TEST(IndexTransformBuilderDeathTest, InvalidArguments) {
EXPECT_DEATH((IndexTransformBuilder<>(2, 1).input_origin({1, 2, 3})),
"range size mismatch");
EXPECT_DEATH((IndexTransformBuilder<>(2, 1).input_shape({1, 2, 3})),
"range size mismatch");
EXPECT_DEATH((IndexTransformBuilder<>(2, 1).implicit_lower_bounds({1, 1, 0})),
"range size mismatch");
EXPECT_DEATH((IndexTransformBuilder<>(2, 1).implicit_upper_bounds({1, 1, 0})),
"range size mismatch");
EXPECT_DEATH((IndexTransformBuilder<>(2, 1).input_labels({"a"})),
"range size mismatch");
EXPECT_DEATH((IndexTransformBuilder<>(2, 1).output_constant(1, 0)),
"invalid output dimension");
}
TEST(IndexTransformBuilderTest, OutputStrideZero) {
auto t = IndexTransformBuilder<>(1, 1)
.output_single_input_dimension(0, 1, 0, 0)
.Finalize()
.value();
auto map = t.output_index_map(0);
EXPECT_EQ(1, map.offset());
EXPECT_EQ(0, map.stride());
EXPECT_EQ(OutputIndexMethod::constant, map.method());
}
TEST(IndexTransformBuilderTest, InclusiveMax) {
auto t = IndexTransformBuilder<>(2, 2)
.input_origin({1, 2})
.input_inclusive_max({3, 5})
.Finalize()
.value();
EXPECT_THAT(t.input_origin(), ::testing::ElementsAre(1, 2));
EXPECT_THAT(t.input_shape(), ::testing::ElementsAre(3, 4));
}
TEST(IndexTransformBuilderTest, InputShapeInfSize) {
auto t = IndexTransformBuilder<>(2, 2)
.input_origin({1, 2})
.input_shape({3, kInfSize})
.Finalize()
.value();
EXPECT_THAT(t.input_origin(), ::testing::ElementsAre(1, 2));
EXPECT_THAT(t.input_shape(), ::testing::ElementsAre(3, kInfIndex + 1 - 2));
}
TEST(IndexTransformBuilderTest, ExclusiveMax) {
auto t = IndexTransformBuilder<>(2, 2)
.input_origin({1, 2})
.input_exclusive_max({3, 5})
.Finalize()
.value();
EXPECT_THAT(t.input_origin(), ::testing::ElementsAre(1, 2));
EXPECT_THAT(t.input_shape(), ::testing::ElementsAre(2, 3));
}
TEST(IndexTransformBuilderTest, ExclusiveMaxAfterShape) {
auto t = IndexTransformBuilder<>(2, 2)
.input_origin({1, 2})
.input_shape({15, 16})
.input_exclusive_max({3, 5})
.Finalize()
.value();
EXPECT_THAT(t.input_origin(), ::testing::ElementsAre(1, 2));
EXPECT_THAT(t.input_shape(), ::testing::ElementsAre(2, 3));
}
TEST(IndexTransformBuilderTest, InputDomainBox) {
auto t = IndexTransformBuilder<>(2, 2)
.input_bounds(tensorstore::BoxView({1, 2}, {2, 3}))
.Finalize()
.value();
EXPECT_THAT(t.input_origin(), ::testing::ElementsAre(1, 2));
EXPECT_THAT(t.input_shape(), ::testing::ElementsAre(2, 3));
}
TEST(IndexTransformBuilderTest, InputDomain) {
tensorstore::IndexDomain<2> domain(IndexTransformBuilder<2, 0>()
.input_origin({1, 2})
.input_shape({3, 4})
.implicit_lower_bounds({0, 1})
.implicit_upper_bounds({1, 0})
.input_labels({"x", "y"})
.Finalize()
.value()
.domain());
auto t =
IndexTransformBuilder<>(2, 2).input_domain(domain).Finalize().value();
EXPECT_EQ(domain, t.domain());
}
TEST(IndexTransformBuilderTest, OutputIdentityTransform) {
EXPECT_THAT(
IndexTransformBuilder(2, 2).output_identity_transform().Finalize(),
::testing::Optional(tensorstore::IdentityTransform(2)));
EXPECT_EQ(IndexTransformBuilder(3, 2)
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.Finalize()
.value(),
IndexTransformBuilder(3, 2)
.output_identity_transform()
.Finalize()
.value());
EXPECT_EQ(IndexTransformBuilder(2, 3)
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.output_constant(2, 0)
.Finalize()
.value(),
IndexTransformBuilder(2, 3)
.output_identity_transform()
.Finalize()
.value());
}
TEST(IndexTransformBuilderTest, CopyOutputMap) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto t,
IndexTransformBuilder(3, 4)
.input_origin({1, 2, 3})
.input_shape({2, 2, 4})
.implicit_lower_bounds({0, 1, 0})
.implicit_upper_bounds({1, 0, 0})
.input_labels({"x", "y", "z"})
.output_constant(0, 4)
.output_single_input_dimension(1, 5, 7, 2)
.output_constant(2, 6)
.output_index_array(3, 7, 9, MakeArray<Index>({{{1, 0, 2, 2}}}),
IndexInterval::Closed(0, 3))
.Finalize());
EXPECT_THAT(IndexTransformBuilder(3, 4)
.input_domain(t.domain())
.output_maps(t.output_index_maps())
.Finalize(),
::testing::Optional(t));
EXPECT_THAT(IndexTransformBuilder(3, 4)
.input_domain(t.domain())
.output_constant(0, 4)
.output_map(1, t.output_index_maps()[1])
.output_map(2, t.output_index_maps()[2])
.output_map(3, t.output_index_maps()[3])
.Finalize(),
::testing::Optional(t));
}
TEST(InitializeTransformRepForBuilder, Basic) {
auto source = tensorstore::internal_index_space::TransformRep::Allocate(1, 2);
source->output_rank = 2;
tensorstore::internal_index_space::InitializeTransformRepForBuilder(
source.get());
EXPECT_EQ(0, source->output_index_maps()[0].offset());
EXPECT_EQ(0, source->output_index_maps()[0].stride());
EXPECT_EQ(0, source->output_index_maps()[1].offset());
EXPECT_EQ(0, source->output_index_maps()[1].stride());
}
TEST(IndexTransformBuilder, NonUniqueLabels) {
EXPECT_THAT(
IndexTransformBuilder<>(3, 0).input_labels({"a", "", "a"}).Finalize(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Dimension label\\(s\\) \"a\" not unique"));
}
TEST(IndexTransformBuilderTest, IndexArrayWithEmptyExplicitDomain) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto expected,
IndexTransformBuilder(2, 2)
.input_shape({0, 2})
.output_constant(0, 0)
.output_constant(1, 1)
.Finalize());
EXPECT_THAT(IndexTransformBuilder(2, 2)
.input_shape({0, 2})
.output_index_array(0, 0, 1, MakeArray<Index>({{2, 3}}))
.output_constant(1, 1)
.Finalize(),
::testing::Optional(expected));
}
TEST(IndexDomainBuilderTest, Null) {
IndexDomainBuilder builder(nullptr);
EXPECT_FALSE(builder.valid());
}
TEST(IndexDomainBuilderTest, Basic) {
IndexDomainBuilder builder(3);
EXPECT_EQ(3, builder.rank());
builder.origin(span<const Index, 3>({1, 2, 3}));
EXPECT_THAT(builder.origin(), ::testing::ElementsAre(1, 2, 3));
builder.shape(span<const Index, 3>({4, 5, 6}));
EXPECT_THAT(builder.shape(), ::testing::ElementsAre(4, 5, 6));
builder.exclusive_max(span<const Index, 3>({4, 5, 6}));
EXPECT_THAT(builder.exclusive_max(), ::testing::ElementsAre(4, 5, 6));
builder.inclusive_max(span<const Index, 3>({4, 5, 6}));
EXPECT_THAT(builder.inclusive_max(), ::testing::ElementsAre(4, 5, 6));
builder.implicit_lower_bounds({0, 1, 1});
builder.implicit_upper_bounds({1, 0, 1});
EXPECT_THAT(builder.implicit_lower_bounds(),
DimensionSet::FromBools({0, 1, 1}));
EXPECT_THAT(builder.implicit_upper_bounds(),
DimensionSet::FromBools({1, 0, 1}));
builder.labels(std::vector<std::string>{"x", "y", "z"});
EXPECT_THAT(builder.labels(), ::testing::ElementsAre("x", "y", "z"));
}
TEST(IndexDomainBuilderTest, Labels) {
auto d = IndexDomainBuilder(2).labels({"x", "y"}).Finalize().value();
EXPECT_THAT(d.labels(), ::testing::ElementsAre("x", "y"));
}
TEST(IndexDomainBuilderTest, InclusiveMax) {
auto d = IndexDomainBuilder(2)
.origin({1, 2})
.inclusive_max({3, 5})
.Finalize()
.value();
EXPECT_THAT(d.origin(), ::testing::ElementsAre(1, 2));
EXPECT_THAT(d.shape(), ::testing::ElementsAre(3, 4));
}
TEST(IndexDomainBuilderTest, Shape) {
auto d =
IndexDomainBuilder(2).origin({1, 2}).shape({3, 5}).Finalize().value();
EXPECT_THAT(d.origin(), ::testing::ElementsAre(1, 2));
EXPECT_THAT(d.shape(), ::testing::ElementsAre(3, 5));
}
TEST(IndexDomainBuilderTest, ExclusiveMax) {
auto d = IndexDomainBuilder(2)
.origin({1, 2})
.exclusive_max({3, 5})
.Finalize()
.value();
EXPECT_THAT(d.origin(), ::testing::ElementsAre(1, 2));
EXPECT_THAT(d.shape(), ::testing::ElementsAre(2, 3));
}
TEST(IndexDomainBuilderTest, InputDomainBox) {
auto d = IndexDomainBuilder(2)
.bounds(tensorstore::BoxView({1, 2}, {2, 3}))
.Finalize()
.value();
EXPECT_THAT(d.origin(), ::testing::ElementsAre(1, 2));
EXPECT_THAT(d.shape(), ::testing::ElementsAre(2, 3));
}
TEST(IndexDomainBuilderTest, InputDomain) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(tensorstore::IndexDomain<2> domain,
IndexDomainBuilder<2>()
.origin({1, 2})
.shape({3, 4})
.implicit_lower_bounds({0, 1})
.implicit_upper_bounds({1, 0})
.labels({"x", "y"})
.Finalize());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto d, IndexDomainBuilder<>(2).domain(domain).Finalize());
EXPECT_EQ(domain, d);
}
} |
542 | cpp | google/tensorstore | index_transform | tensorstore/proto/index_transform.cc | tensorstore/proto/index_transform_test.cc | #ifndef TENSORSTORE_PROTO_INDEX_TRANSFORM_H_
#define TENSORSTORE_PROTO_INDEX_TRANSFORM_H_
#include "absl/status/status.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/proto/index_transform.pb.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
Result<IndexTransform<>> ParseIndexTransformFromProto(
const ::tensorstore::proto::IndexTransform& proto,
DimensionIndex input_rank_constraint = dynamic_rank,
DimensionIndex output_rank_constraint = dynamic_rank);
Result<IndexDomain<>> ParseIndexDomainFromProto(
const ::tensorstore::proto::IndexDomain& proto,
DimensionIndex rank_constraint = dynamic_rank);
void EncodeToProto(::tensorstore::proto::IndexTransform& proto,
IndexTransformView<> t);
void EncodeToProto(::tensorstore::proto::IndexDomain& proto,
IndexDomainView<> d);
}
#endif
#include "tensorstore/proto/index_transform.h"
#include <algorithm>
#include <cstddef>
#include <vector>
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/index_domain.h"
#include "tensorstore/index_space/index_domain_builder.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/output_index_method.h"
#include "tensorstore/internal/json/array.h"
#include "tensorstore/internal/json_binding/dimension_indexed.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/json_serialization_options.h"
#include "tensorstore/proto/index_transform.pb.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/element_pointer.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
Result<IndexDomain<>> ParseIndexDomainFromProto(
const ::tensorstore::proto::IndexDomain& proto,
DimensionIndex rank_constraint) {
const DimensionIndex rank = [&]() -> DimensionIndex {
if (proto.has_rank()) return proto.rank();
if (proto.origin_size() > 0) return proto.origin_size();
if (proto.shape_size() > 0) return proto.shape_size();
return proto.labels_size();
}();
if (rank < 0 || rank > kMaxRank) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Expected rank to be in the range [0, ", kMaxRank,
"], but is: ", rank));
}
if (!RankConstraint::EqualOrUnspecified(rank_constraint, rank)) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Expected rank to be ", rank_constraint, ", but is: ", rank));
}
if (proto.origin_size() > 0 && proto.origin_size() != rank) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Proto origin must include ", rank, " items"));
}
if (proto.shape_size() > 0 && proto.shape_size() != rank) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Proto shape must include ", rank, " items"));
}
if (proto.labels_size() > 0 && proto.labels_size() != rank) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Proto labels must include ", rank, " items"));
}
if (proto.implicit_lower_bound_size() > 0 &&
proto.implicit_lower_bound_size() != rank) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Proto implicit_lower_bound must include ", rank, " items"));
}
if (proto.implicit_upper_bound_size() > 0 &&
proto.implicit_upper_bound_size() != rank) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Proto implicit_upper_bound must include ", rank, " items"));
}
IndexDomainBuilder builder(rank);
if (proto.origin_size() > 0) {
std::copy(proto.origin().begin(), proto.origin().end(),
builder.origin().begin());
if (proto.implicit_lower_bound_size() > 0) {
std::copy(proto.implicit_lower_bound().begin(),
proto.implicit_lower_bound().end(),
builder.implicit_lower_bounds().bools_view().begin());
}
}
if (proto.shape_size() > 0) {
std::copy(proto.shape().begin(), proto.shape().end(),
builder.shape().begin());
if (proto.implicit_upper_bound_size() > 0) {
std::copy(proto.implicit_upper_bound().begin(),
proto.implicit_upper_bound().end(),
builder.implicit_upper_bounds().bools_view().begin());
}
}
if (!proto.labels().empty()) {
std::copy(proto.labels().begin(), proto.labels().end(),
builder.labels().begin());
}
return builder.Finalize();
}
Result<IndexTransform<>> ParseIndexTransformFromProto(
const proto::IndexTransform& proto, DimensionIndex input_rank_constraint,
DimensionIndex output_rank_constraint) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto input_domain,
ParseIndexDomainFromProto(proto.input_domain(), input_rank_constraint));
const DimensionIndex rank = input_domain.rank();
const DimensionIndex output_rank = [&]() -> DimensionIndex {
if (proto.has_output_rank()) return proto.output_rank();
if (proto.output_size() == 0) return rank;
return proto.output_size();
}();
if (output_rank < 0 || output_rank > kMaxRank) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Expected output_rank to be in the range [0, ",
kMaxRank, "], but is: ", output_rank));
}
if (!RankConstraint::EqualOrUnspecified(output_rank_constraint,
output_rank)) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Expected output_rank to be ",
output_rank_constraint, ", but is: ", output_rank));
}
IndexTransformBuilder builder(rank, output_rank);
if (proto.output().empty() && output_rank == rank) {
builder.output_identity_transform();
} else {
if (proto.output_size() != output_rank) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Proto output expected ", output_rank, " items"));
}
for (DimensionIndex output_dim = 0; output_dim < output_rank;
++output_dim) {
const auto& output_proto = proto.output(output_dim);
if (output_proto.has_index_array()) {
const auto& array = output_proto.index_array();
std::vector<tensorstore::Index> shape(array.shape().cbegin(),
array.shape().cend());
auto a = MakeCopy(
tensorstore::Array(tensorstore::ElementPointer<const int64_t>(
&array.data()[0], dtype_v<int64_t>),
shape));
Result<IndexInterval> index_range = IndexInterval();
if (output_proto.has_index_array_inclusive_min() &&
output_proto.has_index_array_exclusive_max()) {
index_range =
IndexInterval::HalfOpen(output_proto.index_array_inclusive_min(),
output_proto.index_array_exclusive_max());
}
builder.output_index_array(output_dim, output_proto.offset(),
output_proto.stride(), a, index_range);
continue;
}
if (output_proto.stride() == 0) {
builder.output_constant(output_dim, output_proto.offset());
continue;
}
builder.output_single_input_dimension(output_dim, output_proto.offset(),
output_proto.stride(),
output_proto.input_dimension());
}
}
return builder.input_domain(input_domain).Finalize();
}
void EncodeToProto(proto::IndexDomain& proto,
IndexDomainView<> d) {
const DimensionIndex rank = d.rank();
bool all_implicit_lower = true;
bool all_implicit_upper = true;
size_t implicit_lower_count = 0;
size_t implicit_upper_count = 0;
bool has_labels = false;
for (DimensionIndex i = 0; i < rank; ++i) {
implicit_lower_count += d.implicit_lower_bounds()[i];
all_implicit_lower = all_implicit_lower && d.implicit_lower_bounds()[i] &&
(d[i].inclusive_min() == -kInfIndex);
implicit_upper_count += d.implicit_upper_bounds()[i];
all_implicit_upper = all_implicit_upper && d.implicit_upper_bounds()[i] &&
(d[i].exclusive_max() == (+kInfIndex + 1));
has_labels |= !d.labels()[i].empty();
}
if (all_implicit_lower && all_implicit_upper && !has_labels) {
proto.set_rank(rank);
}
for (DimensionIndex i = 0; i < rank; i++) {
if (!all_implicit_lower) {
proto.add_origin(d.origin()[i]);
if (implicit_lower_count > 0) {
proto.add_implicit_lower_bound(d.implicit_lower_bounds()[i]);
}
}
if (!all_implicit_upper) {
proto.add_shape(d.shape()[i]);
if (implicit_upper_count > 0) {
proto.add_implicit_upper_bound(d.implicit_upper_bounds()[i]);
}
}
if (has_labels) {
proto.add_labels(d.labels()[i]);
}
}
}
void EncodeToProto(proto::IndexTransform& proto,
IndexTransformView<> t) {
EncodeToProto(*proto.mutable_input_domain(), t.input_domain());
const DimensionIndex input_rank = t.input_rank();
bool all_identity = true;
for (DimensionIndex i = 0; i < t.output_rank(); ++i) {
const auto map = t.output_index_map(i);
auto* out_proto = proto.add_output();
if (map.offset() != 0) {
out_proto->set_offset(map.offset());
all_identity = false;
}
if (map.method() != OutputIndexMethod::constant) {
out_proto->set_stride(map.stride());
if (map.stride() != 1) all_identity = false;
}
switch (map.method()) {
case OutputIndexMethod::constant:
all_identity = false;
break;
case OutputIndexMethod::single_input_dimension: {
const DimensionIndex input_dim = map.input_dimension();
out_proto->set_input_dimension(input_dim);
if (input_dim != i) all_identity = false;
break;
}
case OutputIndexMethod::array: {
all_identity = false;
const auto index_array_data = map.index_array();
auto index_array =
UnbroadcastArrayPreserveRank(index_array_data.array_ref());
auto* out_array = out_proto->mutable_index_array();
for (Index size : index_array.shape()) {
out_array->add_shape(size);
}
IndexInterval index_range = index_array_data.index_range();
if (index_range != IndexInterval::Infinite() &&
!ValidateIndexArrayBounds(index_range, index_array).ok()) {
out_proto->set_index_array_inclusive_min(index_range.inclusive_min());
out_proto->set_index_array_exclusive_max(index_range.exclusive_max());
}
IterateOverArrays(
[&](const Index* value) { out_array->add_data(*value); }, c_order,
index_array);
break;
}
}
}
if (all_identity) {
proto.clear_output();
}
if (t.output_rank() != input_rank && t.output_rank() == 0) {
proto.set_output_rank(t.output_rank());
}
}
} | #include "tensorstore/proto/index_transform.h"
#include <string>
#include <type_traits>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/box.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/index_domain_builder.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/proto/index_transform.pb.h"
#include "tensorstore/proto/protobuf_matchers.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::protobuf_matchers::EqualsProto;
using ::tensorstore::DimensionIndex;
using ::tensorstore::dynamic_rank;
using ::tensorstore::EncodeToProto;
using ::tensorstore::Index;
using ::tensorstore::IndexDomainView;
using ::tensorstore::IndexInterval;
using ::tensorstore::IndexTransform;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::IndexTransformView;
using ::tensorstore::kInfIndex;
using ::tensorstore::MatchesStatus;
using ::tensorstore::ParseIndexDomainFromProto;
using ::tensorstore::ParseIndexTransformFromProto;
template <typename Proto>
Proto ParseProtoOrDie(const std::string& asciipb) {
return protobuf_matchers::internal::MakePartialProtoFromAscii<Proto>(asciipb);
}
IndexTransform<> MakeLabeledExampleTransform() {
return tensorstore::IndexTransformBuilder<4, 3>()
.input_origin({-kInfIndex, 7, -kInfIndex, 8})
.input_exclusive_max({kInfIndex + 1, 10, kInfIndex + 1, 17})
.implicit_lower_bounds({0, 0, 1, 1})
.implicit_upper_bounds({0, 0, 1, 1})
.input_labels({"x", "y", "z", "t"})
.output_constant(0, 3)
.output_single_input_dimension(1, 0, 2, 2)
.output_index_array(2, 7, 1,
tensorstore::MakeArray<Index>({{
{{1}},
{{2}},
{{3}},
}}))
.Finalize()
.value();
}
IndexTransform<> MakeUnlabeledExampleTransform() {
return tensorstore::IndexTransformBuilder<4, 3>()
.input_origin({-kInfIndex, 7, -kInfIndex, 8})
.input_exclusive_max({kInfIndex + 1, 10, kInfIndex + 1, 17})
.implicit_lower_bounds({0, 0, 1, 1})
.implicit_upper_bounds({0, 0, 1, 1})
.output_constant(0, 3)
.output_single_input_dimension(1, 0, 2, 2)
.output_index_array(2, 7, 1,
tensorstore::MakeArray<Index>({{
{{1}},
{{2}},
{{3}},
}}),
IndexInterval::Closed(1, 2))
.Finalize()
.value();
}
::tensorstore::proto::IndexTransform MakeUnlabeledExampleProto() {
return ParseProtoOrDie<::tensorstore::proto::IndexTransform>(R"pb(
input_domain {
# rank: 4
origin: [ -4611686018427387903, 7, -4611686018427387903, 8 ]
implicit_lower_bound: [ 0, 0, 1, 1 ]
shape: [ 9223372036854775807, 3, 9223372036854775807, 9 ]
implicit_upper_bound: [ 0, 0, 1, 1 ]
}
output { offset: 3 }
output { stride: 2 input_dimension: 2 }
output {
offset: 7
stride: 1
index_array {
shape: [ 1, 3, 1, 1 ]
data: [ 1, 2, 3 ]
}
index_array_inclusive_min: 1
index_array_exclusive_max: 3
}
)pb");
}
::tensorstore::proto::IndexTransform MakeLabeledExampleProto() {
return ParseProtoOrDie<::tensorstore::proto::IndexTransform>(R"pb(
input_domain {
# rank: 4
origin: [ -4611686018427387903, 7, -4611686018427387903, 8 ]
implicit_lower_bound: [ 0, 0, 1, 1 ]
shape: [ 9223372036854775807, 3, 9223372036854775807, 9 ]
implicit_upper_bound: [ 0, 0, 1, 1 ]
labels: "x"
labels: "y"
labels: "z"
labels: "t"
}
output { offset: 3 }
output { stride: 2 input_dimension: 2 }
output {
offset: 7
stride: 1
index_array {
shape: [ 1, 3, 1, 1 ]
data: [ 1, 2, 3 ]
}
}
)pb");
}
auto DoEncode(IndexTransformView<> t) {
::tensorstore::proto::IndexTransform proto;
EncodeToProto(proto, t);
return proto;
}
TEST(IndexTransformProtoTest, Unlabeled) {
EXPECT_THAT(DoEncode(MakeUnlabeledExampleTransform()),
EqualsProto(MakeUnlabeledExampleProto()));
EXPECT_THAT(ParseIndexTransformFromProto(MakeUnlabeledExampleProto()),
testing::Eq(MakeUnlabeledExampleTransform()));
}
TEST(IndexTransformProtoTest, Labeled) {
EXPECT_THAT(DoEncode(MakeLabeledExampleTransform()),
EqualsProto(MakeLabeledExampleProto()));
EXPECT_THAT(ParseIndexTransformFromProto(MakeLabeledExampleProto()),
testing::Eq(MakeLabeledExampleTransform()));
}
TEST(IndexTransformProtoTest, IdentityTransform) {
auto transform = tensorstore::IdentityTransform(tensorstore::BoxView({3, 4}));
auto proto = ParseProtoOrDie<::tensorstore::proto::IndexTransform>(R"pb(
input_domain {
# rank: 2
origin: [ 0, 0 ]
shape: [ 3, 4 ]
}
)pb");
EXPECT_THAT(DoEncode(transform), EqualsProto(proto));
EXPECT_THAT(ParseIndexTransformFromProto(proto), testing::Eq(transform));
}
TEST(IndexTransformProtoTest, IndexArrayOutOfBounds) {
EXPECT_THAT(
DoEncode(IndexTransformBuilder(1, 1)
.input_shape({3})
.output_index_array(0, 0, 1,
tensorstore::MakeArray<Index>({1, 2, 3}))
.Finalize()
.value()),
EqualsProto(R"pb(
input_domain {
# rank: 1
origin: 0
shape: 3
}
output {
stride: 1
index_array {
shape: 3
data: [ 1, 2, 3 ]
}
}
)pb"));
EXPECT_THAT(
DoEncode(IndexTransformBuilder(1, 1)
.input_shape({3})
.output_index_array(0, 0, 1,
tensorstore::MakeArray<Index>({1, 2, 3}),
IndexInterval::UncheckedClosed(1, 2))
.Finalize()
.value()),
EqualsProto(R"pb(
input_domain {
# rank: 1
origin: 0
shape: 3
}
output {
stride: 1
index_array {
shape: 3
data: [ 1, 2, 3 ]
}
index_array_inclusive_min: 1
index_array_exclusive_max: 3
}
)pb"));
EXPECT_THAT(DoEncode(IndexTransformBuilder(1, 1)
.input_shape({3})
.output_index_array(0, 0, 1,
tensorstore::MakeArray<Index>(
{1, kInfIndex + 1, 3}))
.Finalize()
.value()),
EqualsProto(R"pb(
input_domain {
# rank: 1
origin: 0
shape: 3
}
output {
stride: 1
index_array {
shape: 3
data: [ 1, 4611686018427387904, 3 ]
}
}
)pb"));
EXPECT_THAT(
DoEncode(IndexTransformBuilder(1, 1)
.input_shape({3})
.output_index_array(0, 0, 1,
tensorstore::MakeArray<Index>({1, 2, 3}),
IndexInterval::Closed(1, 3))
.Finalize()
.value()),
EqualsProto(R"pb(
input_domain {
# rank: 1
origin: 0
shape: 3
}
output {
stride: 1
index_array {
shape: 3
data: [ 1, 2, 3 ]
}
}
)pb"));
}
TEST(IndexTransformProtoTest, Translation) {
auto transform =
ChainResult(tensorstore::IdentityTransform(tensorstore::BoxView({3, 4})),
tensorstore::AllDims().TranslateTo({1, 2}))
.value();
auto proto = ParseProtoOrDie<::tensorstore::proto::IndexTransform>(R"pb(
input_domain {
# rank: 2
origin: [ 1, 2 ]
shape: [ 3, 4 ]
}
output { offset: -1 input_dimension: 0 stride: 1 }
output { offset: -2 input_dimension: 1 stride: 1 }
)pb");
EXPECT_THAT(DoEncode(transform), EqualsProto(proto));
EXPECT_THAT(ParseIndexTransformFromProto(proto), testing::Eq(transform));
}
TEST(IndexTransformProtoTest, Labels) {
auto transform =
ChainResult(tensorstore::IdentityTransform(tensorstore::BoxView({3, 4})),
tensorstore::AllDims().Label("x", "y"))
.value();
auto proto = ParseProtoOrDie<::tensorstore::proto::IndexTransform>(R"pb(
input_domain {
# rank: 2
origin: [ 0, 0 ]
shape: [ 3, 4 ]
labels: [ "x", "y" ]
}
)pb");
EXPECT_THAT(DoEncode(transform), EqualsProto(proto));
EXPECT_THAT(ParseIndexTransformFromProto(proto), testing::Eq(transform));
}
TEST(IndexTransformProtoTest, Rank0) {
auto transform = IndexTransformBuilder(0, 0).Finalize().value();
auto proto = ParseProtoOrDie<::tensorstore::proto::IndexTransform>(R"pb(
input_domain { rank: 0 }
)pb");
EXPECT_THAT(DoEncode(transform), EqualsProto(proto));
EXPECT_THAT(ParseIndexTransformFromProto(proto), testing::Eq(transform));
}
TEST(IndexTransformProtoTest, Rank0EmptyProto) {
::tensorstore::proto::IndexTransform proto;
EXPECT_THAT(ParseIndexTransformFromProto(proto),
testing::Eq(IndexTransformBuilder(0, 0).Finalize().value()));
}
TEST(IndexTransformProtoTest, Input1Output0) {
auto transform = IndexTransformBuilder(1, 0).Finalize().value();
auto proto = ParseProtoOrDie<::tensorstore::proto::IndexTransform>(R"pb(
input_domain { rank: 1 }
output_rank: 0
)pb");
EXPECT_THAT(DoEncode(transform), EqualsProto(proto));
EXPECT_THAT(ParseIndexTransformFromProto(proto), testing::Eq(transform));
}
TEST(IndexTransformProtoTest, LabelsOnly) {
auto proto = ParseProtoOrDie<::tensorstore::proto::IndexTransform>(
R"pb(
input_domain { labels: [ "x", "y" ] }
)pb");
EXPECT_THAT(DoEncode(ParseIndexTransformFromProto(proto).value()),
EqualsProto(proto));
}
TEST(IndexTransformProtoTest, MinOnlyNotImplicit) {
auto proto = ParseProtoOrDie<::tensorstore::proto::IndexTransform>(R"pb(
input_domain { origin: -4611686018427387903 }
)pb");
EXPECT_THAT(DoEncode(ParseIndexTransformFromProto(proto).value()),
EqualsProto(proto));
}
TEST(IndexTransformProtoTest, SingleInfiniteMaxNotImplicit) {
auto transform = IndexTransformBuilder<>(1, 1)
.input_origin({0})
.input_exclusive_max({kInfIndex + 1})
.output_identity_transform()
.Finalize()
.value();
auto proto = ParseProtoOrDie<::tensorstore::proto::IndexTransform>(R"pb(
input_domain { shape: 4611686018427387904 }
)pb");
EXPECT_THAT(ParseIndexTransformFromProto(proto), testing::Eq(transform));
EXPECT_THAT(DoEncode(transform), EqualsProto(R"pb(
input_domain { origin: 0 shape: 4611686018427387904 }
)pb"));
}
TEST(IndexTransformProtoTest, IdentityTransformWithInf) {
auto transform = IndexTransformBuilder<>(2, 2)
.input_origin({1, 2})
.input_exclusive_max({5, kInfIndex + 1})
.output_identity_transform()
.Finalize()
.value();
auto proto = ParseProtoOrDie<::tensorstore::proto::IndexTransform>(R"pb(
input_domain {
origin: [ 1, 2 ]
shape: [ 4, 4611686018427387902 ]
}
)pb");
EXPECT_THAT(DoEncode(transform), EqualsProto(proto));
EXPECT_THAT(ParseIndexTransformFromProto(proto), testing::Eq(transform));
}
TEST(IndexTransformProtoTest, BadOutputRank) {
auto proto = ParseProtoOrDie<::tensorstore::proto::IndexTransform>(R"pb(
input_domain {
origin: [ 1, 2 ]
shape: [ 4, 5 ]
}
output_rank: 1
)pb");
EXPECT_THAT(ParseIndexTransformFromProto(proto),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(IndexTransformProtoTest, RankMismatch) {
EXPECT_THAT(ParseIndexTransformFromProto(MakeLabeledExampleProto(), 3),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected rank to be 3, but is: 4"));
}
TEST(IndexTransformProtoTest, MissingInputRank) {
auto proto = ParseProtoOrDie<::tensorstore::proto::IndexTransform>(R"pb(
output { offset: 3 stride: 1 }
output { stride: 2 input_dimension: 1 }
)pb");
EXPECT_THAT(ParseIndexTransformFromProto(proto),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Input dimension 0 specified for output dimension "
"0 is outside valid range .*"));
}
TEST(IndexTransformProtoTest, InvalidShape) {
auto proto = ParseProtoOrDie<::tensorstore::proto::IndexTransform>(R"pb(
input_domain {
origin: [ 1, 2 ]
shape: [ 3, 4, 5 ]
}
)pb");
EXPECT_THAT(ParseIndexTransformFromProto(proto),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(IndexTransformProtoTest, MissingOutputs) {
auto proto = ParseProtoOrDie<::tensorstore::proto::IndexTransform>(R"pb(
input_domain {
origin: [ 1, 2 ]
shape: [ 3, 4 ]
}
)pb");
EXPECT_THAT(ParseIndexTransformFromProto(proto, dynamic_rank, 2),
testing::Eq(tensorstore::IndexTransformBuilder<2, 2>()
.input_origin({1, 2})
.input_shape({3, 4})
.output_identity_transform()
.Finalize()
.value()));
EXPECT_THAT(ParseIndexTransformFromProto(proto, dynamic_rank, 3),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected output_rank to be 3, but is: 2"));
}
TEST(IndexTransformProtoTest, DuplicateLabels) {
auto proto = ParseProtoOrDie<::tensorstore::proto::IndexTransform>(R"pb(
input_domain {
origin: [ 1, 2 ]
labels: [ "a", "a" ]
}
)pb");
EXPECT_THAT(ParseIndexTransformFromProto(proto),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Dimension label.*not unique"));
}
auto DoEncode(IndexDomainView<> t) {
::tensorstore::proto::IndexDomain proto;
EncodeToProto(proto, t);
return proto;
}
TEST(IndexDomainProtoTest, Simple) {
auto domain = tensorstore::IndexDomainBuilder<4>()
.origin({-kInfIndex, 7, -kInfIndex, 8})
.exclusive_max({kInfIndex + 1, 10, kInfIndex + 1, 17})
.implicit_lower_bounds({0, 0, 1, 1})
.implicit_upper_bounds({0, 0, 1, 1})
.labels({"x", "y", "z", "t"})
.Finalize()
.value();
auto proto = ParseProtoOrDie<::tensorstore::proto::IndexDomain>(R"pb(
# rank: 4
origin: [ -4611686018427387903, 7, -4611686018427387903, 8 ]
implicit_upper_bound: [ 0, 0, 1, 1 ]
shape: [ 9223372036854775807, 3, 9223372036854775807, 9 ]
implicit_lower_bound: [ 0, 0, 1, 1 ]
labels: [ "x", "y", "z", "t" ]
)pb");
EXPECT_THAT(DoEncode(domain), EqualsProto(proto));
EXPECT_THAT(ParseIndexDomainFromProto(proto), testing::Eq(domain));
}
TEST(IndexDomainProtoTest, NoImplicit) {
auto domain = tensorstore::IndexDomainBuilder<3>()
.origin({1, 2, 3})
.exclusive_max({100, 200, 300})
.labels({"x", "y", "z"})
.Finalize()
.value();
auto proto = ParseProtoOrDie<::tensorstore::proto::IndexDomain>(R"pb(
# rank: 3
origin: [ 1, 2, 3 ]
shape: [ 99, 198, 297 ]
labels: [ "x", "y", "z" ]
)pb");
EXPECT_THAT(DoEncode(domain), EqualsProto(proto));
EXPECT_THAT(ParseIndexDomainFromProto(proto), testing::Eq(domain));
}
TEST(IndexDomainProtoTest, Errors) {
EXPECT_THAT(ParseIndexDomainFromProto(
ParseProtoOrDie<::tensorstore::proto::IndexDomain>(R"pb(
rank: 33
)pb")),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected rank .*: 33"));
EXPECT_THAT(ParseIndexDomainFromProto(
ParseProtoOrDie<::tensorstore::proto::IndexDomain>(R"pb(
origin: [
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
]
)pb")),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected rank .*: 34"));
EXPECT_THAT(ParseIndexDomainFromProto(
ParseProtoOrDie<::tensorstore::proto::IndexDomain>(R"pb(
shape: [
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
]
)pb")),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected rank .*: 34"));
EXPECT_THAT(ParseIndexDomainFromProto(
ParseProtoOrDie<::tensorstore::proto::IndexDomain>(R"pb(
labels: [
"", "", "", "", "", "", "", "", "", "", "",
"", "", "", "", "", "", "", "", "", "", "",
"", "", "", "", "", "", "", "", "", "", ""
]
)pb")),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected rank .*: 33"));
EXPECT_THAT(ParseIndexDomainFromProto(
ParseProtoOrDie<::tensorstore::proto::IndexDomain>(R"pb(
origin: [ 1, 2, 3 ]
implicit_lower_bound: [ 1 ]
)pb")),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(ParseIndexDomainFromProto(
ParseProtoOrDie<::tensorstore::proto::IndexDomain>(R"pb(
shape: [ 1, 2, 3 ]
implicit_upper_bound: [ 1 ]
)pb")),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
} |
543 | cpp | google/tensorstore | alignment | tensorstore/index_space/alignment.cc | tensorstore/index_space/alignment_test.cc | #ifndef TENSORSTORE_INDEX_SPACE_ALIGNMENT_H_
#define TENSORSTORE_INDEX_SPACE_ALIGNMENT_H_
#include "absl/status/status.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
enum class DomainAlignmentOptions {
none = 0,
permute = 1,
translate = 2,
broadcast = 4,
all = 7,
};
constexpr inline DomainAlignmentOptions operator|(DomainAlignmentOptions a,
DomainAlignmentOptions b) {
return static_cast<DomainAlignmentOptions>(static_cast<int>(a) |
static_cast<int>(b));
}
constexpr inline DomainAlignmentOptions operator&(DomainAlignmentOptions a,
DomainAlignmentOptions b) {
return static_cast<DomainAlignmentOptions>(static_cast<int>(a) &
static_cast<int>(b));
}
constexpr inline bool operator!(DomainAlignmentOptions a) {
return !static_cast<int>(a);
}
Result<IndexTransform<>> AlignDomainTo(
IndexDomainView<> source, IndexDomainView<> target,
DomainAlignmentOptions options = DomainAlignmentOptions::all);
absl::Status AlignDimensionsTo(
IndexDomainView<> source, IndexDomainView<> target,
span<DimensionIndex> source_matches,
DomainAlignmentOptions options = DomainAlignmentOptions::all);
Result<IndexTransform<>> AlignTransformTo(
IndexTransform<> source_transform, IndexDomainView<> target,
DomainAlignmentOptions options = DomainAlignmentOptions::all);
}
#endif
#include "tensorstore/index_space/alignment.h"
#include <algorithm>
#include <numeric>
#include "absl/status/status.h"
#include "tensorstore/index_space/internal/transform_rep.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
absl::Status AlignDimensionsTo(IndexDomainView<> source,
IndexDomainView<> target,
span<DimensionIndex> source_matches,
DomainAlignmentOptions options) {
assert(source.valid());
assert(target.valid());
const DimensionIndex source_rank = source.rank();
const DimensionIndex target_rank = target.rank();
if (!(options & DomainAlignmentOptions::broadcast) &&
source_rank != target_rank) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Aligning source domain of rank ", source_rank,
" to target domain of rank ", target_rank, " requires broadcasting"));
}
assert(source_matches.size() == source_rank);
const auto source_labels = source.labels();
const auto target_labels = target.labels();
if (!(options & DomainAlignmentOptions::permute) ||
internal_index_space::IsUnlabeled(source_labels) ||
internal_index_space::IsUnlabeled(target_labels)) {
const DimensionIndex match_rank = std::min(source_rank, target_rank);
const DimensionIndex source_match_start = source_rank - match_rank;
const DimensionIndex target_match_start = target_rank - match_rank;
std::fill_n(source_matches.begin(), source_match_start, DimensionIndex(-1));
std::iota(source_matches.begin() + source_match_start, source_matches.end(),
target_match_start);
} else {
DimensionIndex next_potentially_unlabeled_target = target_rank - 1;
for (DimensionIndex i = source_rank - 1; i >= 0; --i) {
std::string_view source_label = source_labels[i];
DimensionIndex j;
if (source_label.empty()) {
while (true) {
if (next_potentially_unlabeled_target < 0) {
j = -1;
break;
}
if (target_labels[next_potentially_unlabeled_target].empty()) {
j = next_potentially_unlabeled_target--;
break;
}
--next_potentially_unlabeled_target;
}
} else {
for (j = target_rank - 1; j >= 0; --j) {
if (target_labels[j] == source_label) break;
}
}
source_matches[i] = j;
}
}
std::string mismatch_error;
const auto source_shape = source.shape();
const auto target_shape = target.shape();
for (DimensionIndex i = 0; i < source_rank; ++i) {
DimensionIndex& j = source_matches[i];
const DimensionIndex source_size = source_shape[i];
if (j != -1) {
if (!(options & DomainAlignmentOptions::translate)
? source[i] != target[j]
: source_size != target_shape[j]) {
if (!(options & DomainAlignmentOptions::broadcast) ||
source_size != 1) {
tensorstore::StrAppend(&mismatch_error, "source dimension ", i, " ",
source[i], " mismatch with target dimension ",
j, " ", target[j], ", ");
}
j = -1;
}
} else {
if (!(options & DomainAlignmentOptions::broadcast)) {
tensorstore::StrAppend(&mismatch_error, "unmatched source dimension ",
i, " ", source[i], ", ");
}
if (source_size != 1) {
tensorstore::StrAppend(&mismatch_error, "unmatched source dimension ",
i, " ", source[i],
" does not have a size of 1, ");
}
}
}
if (!mismatch_error.empty()) {
mismatch_error.resize(mismatch_error.size() - 2);
return absl::InvalidArgumentError(
tensorstore::StrCat("Error aligning dimensions: ", mismatch_error));
}
return absl::OkStatus();
}
Result<IndexTransform<>> AlignDomainTo(IndexDomainView<> source,
IndexDomainView<> target,
DomainAlignmentOptions options) {
using internal_index_space::TransformAccess;
assert(source.valid());
assert(target.valid());
const DimensionIndex source_rank = source.rank();
DimensionIndex source_matches[kMaxRank];
TENSORSTORE_RETURN_IF_ERROR(AlignDimensionsTo(
source, target, span(source_matches).first(source_rank), options));
const DimensionIndex target_rank = target.rank();
auto alignment =
internal_index_space::TransformRep::Allocate(target_rank, source_rank);
CopyTransformRepDomain(TransformAccess::rep(target), alignment.get());
alignment->output_rank = source_rank;
const auto maps = alignment->output_index_maps();
span<const Index> source_origin = source.origin();
span<const Index> target_origin = target.origin();
for (DimensionIndex i = 0; i < source_rank; ++i) {
auto& map = maps[i];
const DimensionIndex j = source_matches[i];
const Index source_origin_value = source_origin[i];
if (j == -1) {
map.SetConstant();
map.offset() = source_origin_value;
map.stride() = 0;
} else {
map.SetSingleInputDimension(j);
map.offset() = source_origin_value - target_origin[j];
map.stride() = 1;
}
}
internal_index_space::DebugCheckInvariants(alignment.get());
return TransformAccess::Make<IndexTransform<>>(std::move(alignment));
}
Result<IndexTransform<>> AlignTransformTo(IndexTransform<> source_transform,
IndexDomainView<> target,
DomainAlignmentOptions options) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto alignment,
AlignDomainTo(source_transform.domain(), target, options));
return ComposeTransforms(source_transform, alignment);
}
} | #include "tensorstore/index_space/alignment.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/index_space/index_domain_builder.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::DimensionIndex;
using ::tensorstore::Index;
using ::tensorstore::IndexDomain;
using ::tensorstore::IndexDomainBuilder;
using ::tensorstore::IndexTransform;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::MatchesStatus;
using Dao = tensorstore::DomainAlignmentOptions;
TEST(AlignDimensionsToTest, AllUnlabeled) {
auto source = IndexDomainBuilder(3)
.origin({3, 5, 4})
.exclusive_max({7, 6, 10})
.Finalize()
.value();
auto target = IndexDomainBuilder(3)
.origin({2, 0, 6})
.exclusive_max({6, 4, 12})
.Finalize()
.value();
for (auto options : {Dao::all, Dao::translate | Dao::broadcast}) {
std::vector<DimensionIndex> source_matches(source.rank());
EXPECT_EQ(absl::OkStatus(),
AlignDimensionsTo(source, target, source_matches, options));
EXPECT_THAT(source_matches, ::testing::ElementsAre(0, -1, 2));
}
{
std::vector<DimensionIndex> source_matches(source.rank());
EXPECT_EQ(absl::OkStatus(),
AlignDimensionsTo(source, source, source_matches, Dao::none));
EXPECT_THAT(source_matches, ::testing::ElementsAre(0, 1, 2));
}
{
std::vector<DimensionIndex> source_matches(source.rank());
EXPECT_THAT(
AlignDimensionsTo(source, target, source_matches, Dao::translate),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error aligning dimensions: "
"source dimension 1 \\[5, 6\\) mismatch with target "
"dimension 1 \\[0, 4\\)"));
}
{
std::vector<DimensionIndex> source_matches(source.rank());
EXPECT_THAT(
AlignDimensionsTo(source, target, source_matches, Dao::broadcast),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error aligning dimensions: "
"source dimension 0 \\[3, 7\\) mismatch with target "
"dimension 0 \\[2, 6\\), "
"source dimension 2 \\[4, 10\\) mismatch with target "
"dimension 2 \\[6, 12\\)"));
}
}
TEST(AlignDimensionsToTest, MismatchedLabelsNoPermute) {
auto source = IndexDomainBuilder(3)
.labels({"x", "y", "z"})
.origin({3, 5, 4})
.exclusive_max({7, 6, 10})
.Finalize()
.value();
auto target = IndexDomainBuilder(3)
.labels({"a", "b", "c"})
.origin({2, 0, 6})
.exclusive_max({6, 4, 12})
.Finalize()
.value();
std::vector<DimensionIndex> source_matches(source.rank());
EXPECT_EQ(absl::OkStatus(),
AlignDimensionsTo(source, target, source_matches,
Dao::translate | Dao::broadcast));
EXPECT_THAT(source_matches, ::testing::ElementsAre(0, -1, 2));
EXPECT_THAT(AlignDimensionsTo(source, target, source_matches, Dao::all),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error aligning dimensions: "
"unmatched source dimension 0 \"x\": \\[3, 7\\) "
"does not have a size of 1, "
"unmatched source dimension 2 \"z\": \\[4, 10\\) "
"does not have a size of 1"));
}
TEST(AlignDimensionsToTest, SourceUnlabeled) {
auto source = IndexDomainBuilder(3)
.origin({3, 5, 4})
.exclusive_max({7, 6, 10})
.Finalize()
.value();
auto target = IndexDomainBuilder(3)
.origin({4, 0, 6})
.labels({"x", "y", "z"})
.exclusive_max({8, 4, 12})
.Finalize()
.value();
for (auto options : {Dao::all, Dao::translate | Dao::broadcast}) {
std::vector<DimensionIndex> source_matches(source.rank());
EXPECT_EQ(absl::OkStatus(),
AlignDimensionsTo(source, target, source_matches, options));
EXPECT_THAT(source_matches, ::testing::ElementsAre(0, -1, 2));
}
}
TEST(AlignDimensionsToTest, TargetUnlabeled) {
auto source = IndexDomainBuilder(3)
.origin({3, 5, 4})
.labels({"x", "y", "z"})
.exclusive_max({7, 6, 10})
.Finalize()
.value();
auto target = IndexDomainBuilder(3)
.origin({4, 0, 6})
.exclusive_max({8, 4, 12})
.Finalize()
.value();
for (auto options : {Dao::all, Dao::translate | Dao::broadcast}) {
std::vector<DimensionIndex> source_matches(source.rank());
EXPECT_EQ(absl::OkStatus(),
AlignDimensionsTo(source, target, source_matches, options));
EXPECT_THAT(source_matches, ::testing::ElementsAre(0, -1, 2));
}
}
TEST(AlignDimensionsToTest, AllLabeled) {
auto source = IndexDomainBuilder(3)
.labels({"x", "y", "z"})
.origin({3, 5, 4})
.exclusive_max({7, 6, 10})
.Finalize()
.value();
auto target = IndexDomainBuilder(3)
.labels({"z", "x", "y"})
.origin({6, 4, 0})
.exclusive_max({12, 8, 4})
.Finalize()
.value();
std::vector<DimensionIndex> source_matches(source.rank());
EXPECT_EQ(absl::OkStatus(),
AlignDimensionsTo(source, target, source_matches));
EXPECT_THAT(source_matches, ::testing::ElementsAre(1, -1, 0));
}
TEST(AlignDimensionsToTest, AllLabeledPermuteOnly) {
auto source = IndexDomainBuilder(3)
.labels({"x", "y", "z"})
.origin({3, 5, 4})
.exclusive_max({7, 6, 10})
.Finalize()
.value();
auto target = IndexDomainBuilder(3)
.labels({"z", "x", "y"})
.origin({4, 3, 5})
.exclusive_max({10, 7, 6})
.Finalize()
.value();
for (auto options : {Dao::permute, Dao::permute | Dao::translate,
Dao::permute | Dao::broadcast, Dao::all}) {
std::vector<DimensionIndex> source_matches(source.rank());
EXPECT_EQ(absl::OkStatus(),
AlignDimensionsTo(source, target, source_matches, options));
EXPECT_THAT(source_matches, ::testing::ElementsAre(1, 2, 0));
}
for (auto options : {Dao::none, Dao::translate, Dao::broadcast,
Dao::translate | Dao::broadcast}) {
std::vector<DimensionIndex> source_matches(source.rank());
EXPECT_THAT(AlignDimensionsTo(source, target, source_matches, options),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error aligning dimensions: .*"));
}
}
TEST(AlignDimensionsToTest, AllLabeledPermuteTranslateOnly) {
auto source = IndexDomainBuilder(3)
.labels({"x", "y", "z"})
.origin({3, 5, 4})
.exclusive_max({7, 9, 10})
.Finalize()
.value();
auto target = IndexDomainBuilder(3)
.labels({"z", "x", "y"})
.origin({6, 4, 0})
.exclusive_max({12, 8, 4})
.Finalize()
.value();
std::vector<DimensionIndex> source_matches(source.rank());
EXPECT_EQ(absl::OkStatus(),
AlignDimensionsTo(source, target, source_matches));
EXPECT_THAT(source_matches, ::testing::ElementsAre(1, 2, 0));
}
TEST(AlignDimensionsToTest, PartiallyLabeled) {
auto source = IndexDomainBuilder(3)
.labels({"x", "y", ""})
.origin({3, 5, 4})
.exclusive_max({7, 6, 10})
.Finalize()
.value();
auto target = IndexDomainBuilder(4)
.labels({"", "", "x", "y"})
.origin({0, 6, 4, 0})
.exclusive_max({10, 12, 8, 4})
.Finalize()
.value();
std::vector<DimensionIndex> source_matches(source.rank());
EXPECT_EQ(absl::OkStatus(),
AlignDimensionsTo(source, target, source_matches));
EXPECT_THAT(source_matches, ::testing::ElementsAre(2, -1, 1));
EXPECT_THAT(AlignDimensionsTo(source, target, source_matches, Dao::none),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Aligning source domain of rank 3 to target "
"domain of rank 4 requires broadcasting"));
}
TEST(AlignDomainToTest, PartiallyLabeled) {
auto source = IndexDomainBuilder(3)
.labels({"x", "y", ""})
.origin({3, 5, 4})
.exclusive_max({7, 6, 10})
.Finalize()
.value();
auto target = IndexDomainBuilder(4)
.labels({"", "", "x", "y"})
.origin({0, 6, 4, 0})
.exclusive_max({10, 12, 8, 4})
.Finalize()
.value();
IndexTransform<> alignment = IndexTransformBuilder<>(4, 3)
.input_domain(target)
.output_single_input_dimension(0, -1, 1, 2)
.output_constant(1, 5)
.output_single_input_dimension(2, -2, 1, 1)
.Finalize()
.value();
EXPECT_EQ(alignment, AlignDomainTo(source, target));
}
TEST(AlignDimensionsToTest, BroadcastOnly) {
auto source = IndexDomainBuilder(2)
.origin({2, 3})
.exclusive_max({5, 6})
.Finalize()
.value();
auto target = IndexDomainBuilder(3)
.origin({1, 2, 3})
.exclusive_max({4, 5, 6})
.Finalize()
.value();
for (auto options : {Dao::broadcast, Dao::broadcast | Dao::translate,
Dao::broadcast | Dao::permute, Dao::all}) {
std::vector<DimensionIndex> source_matches(source.rank());
EXPECT_EQ(absl::OkStatus(),
AlignDimensionsTo(source, target, source_matches, options));
EXPECT_THAT(source_matches, ::testing::ElementsAre(1, 2));
}
for (auto options : {Dao::none, Dao::permute, Dao::translate,
Dao::permute | Dao::translate}) {
std::vector<DimensionIndex> source_matches(source.rank());
EXPECT_THAT(AlignDimensionsTo(source, target, source_matches, options),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Aligning source domain of rank 2 to target "
"domain of rank 3 requires broadcasting"));
}
}
TEST(AlignDimensionsToTest, PermuteAndBroadcast) {
auto source = IndexDomainBuilder(2)
.origin({2, 3})
.exclusive_max({5, 4})
.labels({"x", "y"})
.Finalize()
.value();
auto target = IndexDomainBuilder(2)
.origin({2, 5})
.exclusive_max({5, 10})
.labels({"x", "z"})
.Finalize()
.value();
for (auto options : {Dao::permute | Dao::broadcast, Dao::all}) {
std::vector<DimensionIndex> source_matches(source.rank());
EXPECT_EQ(absl::OkStatus(),
AlignDimensionsTo(source, target, source_matches, options));
EXPECT_THAT(source_matches, ::testing::ElementsAre(0, -1));
}
for (auto options : {Dao::permute, Dao::permute | Dao::translate}) {
std::vector<DimensionIndex> source_matches(source.rank());
EXPECT_THAT(
AlignDimensionsTo(source, target, source_matches, options),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error aligning dimensions: "
"unmatched source dimension 1 \"y\": \\[3, 4\\)"));
}
}
TEST(AlignDimensionsToTest, UnmatchedUnlabeledSourceDimension) {
auto source = IndexDomainBuilder(4)
.labels({"x", "y", "", ""})
.origin({3, 5, 7, 4})
.exclusive_max({7, 9, 8, 10})
.Finalize()
.value();
auto target = IndexDomainBuilder(3)
.labels({"", "x", "y"})
.origin({0, 4, 0})
.exclusive_max({6, 8, 4})
.Finalize()
.value();
std::vector<DimensionIndex> source_matches(source.rank());
EXPECT_EQ(absl::OkStatus(),
AlignDimensionsTo(source, target, source_matches));
EXPECT_THAT(source_matches, ::testing::ElementsAre(1, 2, -1, 0));
}
TEST(AlignDimensionsToTest, MismatchedLabeled) {
auto source = IndexDomainBuilder(3)
.labels({"x", "y", "z"})
.origin({3, 5, 4})
.exclusive_max({7, 6, 10})
.Finalize()
.value();
auto target = IndexDomainBuilder(3)
.labels({"z", "w", "y"})
.origin({6, 4, 0})
.exclusive_max({12, 8, 4})
.Finalize()
.value();
std::vector<DimensionIndex> source_matches(source.rank());
EXPECT_THAT(AlignDimensionsTo(source, target, source_matches),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error aligning dimensions: "
"unmatched source dimension 0 \"x\": \\[3, 7\\) "
"does not have a size of 1"));
}
TEST(AlignDomainToTest, MismatchedLabeled) {
auto source = IndexDomainBuilder(3)
.labels({"x", "y", "z"})
.origin({3, 5, 4})
.exclusive_max({7, 6, 10})
.Finalize()
.value();
auto target = IndexDomainBuilder(3)
.labels({"z", "w", "y"})
.origin({6, 4, 0})
.exclusive_max({12, 8, 4})
.Finalize()
.value();
EXPECT_THAT(AlignDomainTo(source, target),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(AlignDimensionsToTest, MismatchedSizeLabeled) {
auto source = IndexDomainBuilder(3)
.labels({"x", "y", "z"})
.origin({3, 5, 4})
.exclusive_max({7, 7, 10})
.Finalize()
.value();
auto target = IndexDomainBuilder(3)
.labels({"z", "x", "y"})
.origin({6, 4, 0})
.exclusive_max({12, 8, 4})
.Finalize()
.value();
std::vector<DimensionIndex> source_matches(source.rank());
EXPECT_THAT(AlignDimensionsTo(source, target, source_matches),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error aligning dimensions: "
"source dimension 1 \"y\": \\[5, 7\\) mismatch "
"with target dimension 2 \"y\": \\[0, 4\\)"));
}
TEST(AlignDimensionsToTest, MismatchedSizeUnlabeled) {
auto source = IndexDomainBuilder(3)
.origin({3, 5, 4})
.exclusive_max({7, 7, 10})
.Finalize()
.value();
auto target = IndexDomainBuilder(3)
.origin({4, 0, 6})
.exclusive_max({8, 4, 12})
.Finalize()
.value();
std::vector<DimensionIndex> source_matches(source.rank());
EXPECT_THAT(AlignDimensionsTo(source, target, source_matches),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error aligning dimensions: "
"source dimension 1 \\[5, 7\\) mismatch with "
"target dimension 1 \\[0, 4\\)"));
}
} |
544 | cpp | google/tensorstore | dimension_identifier | tensorstore/index_space/dimension_identifier.cc | tensorstore/index_space/dimension_identifier_test.cc | #ifndef TENSORSTORE_INDEX_SPACE_DIMENSION_IDENTIFIER_H_
#define TENSORSTORE_INDEX_SPACE_DIMENSION_IDENTIFIER_H_
#include <cstddef>
#include <optional>
#include <string>
#include <string_view>
#include <variant>
#include "absl/status/status.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/dimension_index_buffer.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
class DimensionIdentifier {
public:
DimensionIdentifier() = default;
constexpr DimensionIdentifier(DimensionIndex index) : index_(index) {}
constexpr DimensionIdentifier(int index) : index_(index) {}
constexpr DimensionIdentifier(std::string_view label) : label_(label) {
assert(label.data() != nullptr);
}
DimensionIdentifier(const std::string& label) : label_(label) {}
constexpr DimensionIdentifier(const char* label) : label_(label) {
assert(label != nullptr);
}
DimensionIdentifier(std::nullptr_t) = delete;
constexpr DimensionIndex index() const { return index_; }
constexpr std::string_view label() const { return label_; }
friend bool operator==(const DimensionIdentifier& a,
const DimensionIdentifier& b) {
return a.index_ == b.index_ && a.label_ == b.label_;
}
friend bool operator!=(const DimensionIdentifier& a,
const DimensionIdentifier& b) {
return !(a == b);
}
friend std::ostream& operator<<(std::ostream& os,
const DimensionIdentifier& x);
private:
DimensionIndex index_ = std::numeric_limits<DimensionIndex>::max();
std::string_view label_;
};
Result<DimensionIndex> NormalizeDimensionIndex(DimensionIndex index,
DimensionIndex rank);
Result<DimensionIndex> NormalizeDimensionLabel(std::string_view label,
span<const std::string> labels);
Result<DimensionIndex> NormalizeDimensionLabel(
std::string_view label, span<const std::string_view> labels);
Result<DimensionIndex> NormalizeDimensionIdentifier(
DimensionIdentifier identifier, span<const std::string> labels);
struct DimRangeSpec {
std::optional<DimensionIndex> inclusive_start;
std::optional<DimensionIndex> exclusive_stop;
DimensionIndex step = 1;
friend std::ostream& operator<<(std::ostream& os, const DimRangeSpec& spec);
friend bool operator==(const DimRangeSpec& a, const DimRangeSpec& b);
friend bool operator!=(const DimRangeSpec& a, const DimRangeSpec& b) {
return !(a == b);
}
constexpr static auto ApplyMembers = [](auto&& x, auto f) {
return f(x.inclusive_start, x.exclusive_stop, x.step);
};
};
absl::Status NormalizeDimRangeSpec(const DimRangeSpec& spec,
DimensionIndex rank,
DimensionIndexBuffer* result);
using DynamicDimSpec = std::variant<DimensionIndex, std::string, DimRangeSpec>;
absl::Status NormalizeDynamicDimSpec(const DynamicDimSpec& spec,
span<const std::string> labels,
DimensionIndexBuffer* result);
absl::Status NormalizeDynamicDimSpecs(span<const DynamicDimSpec> specs,
span<const std::string> labels,
DimensionIndexBuffer* result);
}
#endif
#include "tensorstore/index_space/dimension_identifier.h"
#include <cassert>
#include <ostream>
#include <string>
#include <system_error>
#include <variant>
#include "absl/status/status.h"
#include "absl/strings/str_join.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/dimension_index_buffer.h"
#include "tensorstore/util/division.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
std::ostream& operator<<(std::ostream& os, const DimensionIdentifier& x) {
if (x.label().data()) {
return os << QuoteString(x.label());
}
return os << x.index();
}
Result<DimensionIndex> NormalizeDimensionIndex(DimensionIndex index,
DimensionIndex rank) {
assert(rank >= 0);
if (index < -rank || index >= rank) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Dimension index ", index, " is outside valid range [-", rank, ", ",
rank, ")"));
}
return index >= 0 ? index : index + rank;
}
Result<DimensionIndex> NormalizeDimensionExclusiveStopIndex(
DimensionIndex index, DimensionIndex rank) {
assert(rank >= 0);
if (index < -rank - 1 || index > rank) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Dimension exclusive stop index ", index, " is outside valid range [-",
rank + 1, ", ", rank, "]"));
}
return index >= 0 ? index : index + rank;
}
namespace {
template <typename Label>
Result<DimensionIndex> NormalizeDimensionLabelImpl(std::string_view label,
span<const Label> labels) {
if (label.empty()) {
return absl::InvalidArgumentError(
"Dimension cannot be specified by empty label");
}
const DimensionIndex dim =
std::find(labels.begin(), labels.end(), label) - labels.begin();
if (dim == labels.size()) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Label ", QuoteString(label), " does not match one of {",
absl::StrJoin(labels, ", ",
[](std::string* out, std::string_view x) {
*out += QuoteString(x);
}),
"}"));
}
return dim;
}
}
Result<DimensionIndex> NormalizeDimensionLabel(std::string_view label,
span<const std::string> labels) {
return NormalizeDimensionLabelImpl(label, labels);
}
Result<DimensionIndex> NormalizeDimensionLabel(
std::string_view label, span<const std::string_view> labels) {
return NormalizeDimensionLabelImpl(label, labels);
}
Result<DimensionIndex> NormalizeDimensionIdentifier(
DimensionIdentifier identifier, span<const std::string> labels) {
if (identifier.label().data()) {
return NormalizeDimensionLabel(identifier.label(), labels);
} else {
return NormalizeDimensionIndex(identifier.index(), labels.size());
}
}
std::ostream& operator<<(std::ostream& os, const DimRangeSpec& spec) {
if (spec.inclusive_start) os << *spec.inclusive_start;
os << ':';
if (spec.exclusive_stop) os << *spec.exclusive_stop;
if (spec.step != 1) os << ':' << spec.step;
return os;
}
bool operator==(const DimRangeSpec& a, const DimRangeSpec& b) {
return a.inclusive_start == b.inclusive_start &&
a.exclusive_stop == b.exclusive_stop && a.step == b.step;
}
absl::Status NormalizeDimRangeSpec(const DimRangeSpec& spec,
DimensionIndex rank,
DimensionIndexBuffer* result) {
const DimensionIndex step = spec.step;
if (step == 0) {
return absl::InvalidArgumentError("step must not be 0");
}
DimensionIndex inclusive_start;
if (spec.inclusive_start) {
TENSORSTORE_ASSIGN_OR_RETURN(
inclusive_start, NormalizeDimensionIndex(*spec.inclusive_start, rank));
} else if (step > 0) {
inclusive_start = 0;
} else {
inclusive_start = rank - 1;
}
DimensionIndex exclusive_stop;
if (spec.exclusive_stop) {
TENSORSTORE_ASSIGN_OR_RETURN(
exclusive_stop,
NormalizeDimensionExclusiveStopIndex(*spec.exclusive_stop, rank));
if ((step > 0 && exclusive_stop < inclusive_start) ||
(step < 0 && exclusive_stop > inclusive_start)) {
return absl::InvalidArgumentError(
tensorstore::StrCat(spec, " is not a valid range"));
}
} else if (step > 0) {
exclusive_stop = rank;
} else {
exclusive_stop = -1;
}
const DimensionIndex size =
CeilOfRatio(exclusive_stop - inclusive_start, step);
result->reserve(result->size() + size);
for (DimensionIndex i = 0; i < size; ++i) {
result->push_back(inclusive_start + step * i);
}
return absl::OkStatus();
}
absl::Status NormalizeDynamicDimSpec(const DynamicDimSpec& spec,
span<const std::string> labels,
DimensionIndexBuffer* result) {
struct Visitor {
span<const std::string> labels;
DimensionIndexBuffer* result;
absl::Status operator()(DimensionIndex i) const {
TENSORSTORE_ASSIGN_OR_RETURN(DimensionIndex index,
NormalizeDimensionIndex(i, labels.size()));
result->push_back(index);
return absl::OkStatus();
}
absl::Status operator()(const std::string& label) const {
TENSORSTORE_ASSIGN_OR_RETURN(DimensionIndex index,
NormalizeDimensionLabel(label, labels));
result->push_back(index);
return absl::OkStatus();
}
absl::Status operator()(const DimRangeSpec& s) const {
return NormalizeDimRangeSpec(s, labels.size(), result);
}
};
return std::visit(Visitor{labels, result}, spec);
}
absl::Status NormalizeDynamicDimSpecs(span<const DynamicDimSpec> specs,
span<const std::string> labels,
DimensionIndexBuffer* result) {
for (const auto& spec : specs) {
TENSORSTORE_RETURN_IF_ERROR(NormalizeDynamicDimSpec(spec, labels, result));
}
return absl::OkStatus();
}
} | #include "tensorstore/index_space/dimension_identifier.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/index.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::DimensionIdentifier;
using ::tensorstore::DimensionIndexBuffer;
using ::tensorstore::DimRangeSpec;
using ::tensorstore::DynamicDimSpec;
using ::tensorstore::Index;
using ::tensorstore::MatchesStatus;
using ::tensorstore::NormalizeDimensionIdentifier;
using ::tensorstore::NormalizeDimensionIndex;
using ::tensorstore::span;
using ::tensorstore::StrCat;
TEST(DimensionIdentifierTest, ConstructDefault) {
DimensionIdentifier d;
EXPECT_EQ(std::numeric_limits<Index>::max(), d.index());
EXPECT_EQ(nullptr, d.label().data());
}
TEST(DimensionIdentifierTest, ConstructDimensionIndex) {
DimensionIdentifier d(5);
EXPECT_EQ(5, d.index());
EXPECT_EQ(nullptr, d.label().data());
}
TEST(DimensionIdentifierTest, ConstructStringView) {
DimensionIdentifier d(std::string_view("hello"));
EXPECT_EQ(std::numeric_limits<Index>::max(), d.index());
EXPECT_EQ("hello", d.label());
}
TEST(DimensionIdentifierTest, ConstructCString) {
DimensionIdentifier d("hello");
EXPECT_EQ(std::numeric_limits<Index>::max(), d.index());
EXPECT_EQ("hello", d.label());
}
TEST(DimensionIdentifierTest, ConstructStdString) {
std::string s = "hello";
DimensionIdentifier d(s);
EXPECT_EQ(std::numeric_limits<Index>::max(), d.index());
EXPECT_EQ("hello", d.label());
}
TEST(DimensionIdentifierTest, Compare) {
EXPECT_EQ(DimensionIdentifier(3), DimensionIdentifier(3));
EXPECT_EQ(DimensionIdentifier("a"), DimensionIdentifier("a"));
EXPECT_NE(DimensionIdentifier("a"), DimensionIdentifier(2));
EXPECT_NE(DimensionIdentifier("a"), DimensionIdentifier("b"));
EXPECT_NE(DimensionIdentifier(2), DimensionIdentifier(3));
}
TEST(DimensionIdentifierTest, PrintToOstream) {
EXPECT_EQ("3", StrCat(DimensionIdentifier(3)));
EXPECT_EQ("\"a\"", StrCat(DimensionIdentifier("a")));
}
TEST(NormalizeDimensionIndexTest, ValidNonNegative) {
EXPECT_EQ(0, NormalizeDimensionIndex(0, 5));
EXPECT_EQ(3, NormalizeDimensionIndex(3, 5));
EXPECT_EQ(4, NormalizeDimensionIndex(4, 5));
}
TEST(NormalizeDimensionIndexTest, ValidNegative) {
EXPECT_EQ(0, NormalizeDimensionIndex(-5, 5));
EXPECT_EQ(2, NormalizeDimensionIndex(-3, 5));
EXPECT_EQ(4, NormalizeDimensionIndex(-1, 5));
}
TEST(NormalizeDimensionIndexTest, InvalidNegative) {
EXPECT_THAT(NormalizeDimensionIndex(-6, 5),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(NormalizeDimensionIndex(-7, 5),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(NormalizeDimensionIndexTest, InvalidNonNegative) {
EXPECT_THAT(NormalizeDimensionIndex(5, 5),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(NormalizeDimensionIndex(6, 5),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(NormalizeDimensionLabelTest, ValidLabel) {
EXPECT_EQ(2, NormalizeDimensionLabel(
"x", span<const std::string>({"a", "b", "x", "y"})));
}
TEST(NormalizeDimensionLabelTest, MissingLabel) {
EXPECT_THAT(NormalizeDimensionLabel(
"w", span<const std::string>({"a", "b", "x", "y"})),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(NormalizeDimensionLabelTest, EmptyLabel) {
EXPECT_THAT(NormalizeDimensionLabel(
"", span<const std::string>({"a", "b", "x", "y"})),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(NormalizeDimensionIdentifierTest, ValidLabel) {
EXPECT_EQ(2, NormalizeDimensionIdentifier(
"x", span<const std::string>({"a", "b", "x", "y"})));
}
TEST(NormalizeDimensionIdentifierTest, ValidPositiveIndex) {
EXPECT_EQ(2, NormalizeDimensionIdentifier(
2, span<const std::string>({"a", "b", "x", "y"})));
EXPECT_EQ(0, NormalizeDimensionIdentifier(
0, span<const std::string>({"a", "b", "x", "y"})));
EXPECT_EQ(3, NormalizeDimensionIdentifier(
3, span<const std::string>({"a", "b", "x", "y"})));
}
TEST(NormalizeDimensionIdentifierTest, ValidNegativeIndex) {
EXPECT_EQ(2, NormalizeDimensionIdentifier(
-2, span<const std::string>({"a", "b", "x", "y"})));
EXPECT_EQ(3, NormalizeDimensionIdentifier(
-1, span<const std::string>({"a", "b", "x", "y"})));
EXPECT_EQ(0, NormalizeDimensionIdentifier(
-4, span<const std::string>({"a", "b", "x", "y"})));
}
TEST(NormalizeDimensionIdentifierTest, InvalidIndex) {
EXPECT_THAT(NormalizeDimensionIdentifier(
4, span<const std::string>({"a", "b", "x", "y"})),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(NormalizeDimensionIdentifier(
-5, span<const std::string>({"a", "b", "x", "y"})),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(DimRangeSpecTest, Comparison) {
DimRangeSpec a{1, 5, 1};
DimRangeSpec b{0, 5, 1};
DimRangeSpec c{1, 6, 1};
DimRangeSpec d{1, 6, 2};
EXPECT_EQ(a, a);
EXPECT_NE(a, b);
EXPECT_NE(a, c);
EXPECT_NE(a, d);
}
TEST(DimRangeSpecTest, PrintToOstream) {
EXPECT_EQ("1:5", StrCat(DimRangeSpec{1, 5, 1}));
EXPECT_EQ("1:5:2", StrCat(DimRangeSpec{1, 5, 2}));
EXPECT_EQ(":5", StrCat(DimRangeSpec{std::nullopt, 5, 1}));
EXPECT_EQ("1:", StrCat(DimRangeSpec{1, std::nullopt, 1}));
EXPECT_EQ(":", StrCat(DimRangeSpec{std::nullopt, std::nullopt, 1}));
EXPECT_EQ("::-1", StrCat(DimRangeSpec{std::nullopt, std::nullopt, -1}));
}
TEST(NormalizeDimRangeSpecTest, ValidFullySpecifiedStep1) {
DimensionIndexBuffer buffer;
EXPECT_EQ(absl::OkStatus(),
NormalizeDimRangeSpec(DimRangeSpec{2, 10, 1}, 20, &buffer));
EXPECT_THAT(buffer, ::testing::ElementsAre(2, 3, 4, 5, 6, 7, 8, 9));
}
TEST(NormalizeDimRangeSpecTest, ValidFullySpecifiedStep2) {
DimensionIndexBuffer buffer;
EXPECT_EQ(absl::OkStatus(),
NormalizeDimRangeSpec(DimRangeSpec{2, 10, 2}, 20, &buffer));
EXPECT_THAT(buffer, ::testing::ElementsAre(2, 4, 6, 8));
}
TEST(NormalizeDimRangeSpecTest, ValidFullySpecifiedStep2Floor) {
DimensionIndexBuffer buffer;
EXPECT_EQ(absl::OkStatus(),
NormalizeDimRangeSpec(DimRangeSpec{2, 7, 3}, 20, &buffer));
EXPECT_THAT(buffer, ::testing::ElementsAre(2, 5));
}
TEST(NormalizeDimRangeSpecTest, ValidFullySpecifiedStepNeg1) {
DimensionIndexBuffer buffer;
EXPECT_EQ(absl::OkStatus(),
NormalizeDimRangeSpec(DimRangeSpec{9, 1, -1}, 20, &buffer));
EXPECT_THAT(buffer, ::testing::ElementsAre(9, 8, 7, 6, 5, 4, 3, 2));
}
TEST(NormalizeDimRangeSpecTest, ValidFullySpecifiedStepNeg2) {
DimensionIndexBuffer buffer;
EXPECT_EQ(absl::OkStatus(),
NormalizeDimRangeSpec(DimRangeSpec{9, 1, -2}, 20, &buffer));
EXPECT_THAT(buffer, ::testing::ElementsAre(9, 7, 5, 3));
}
TEST(NormalizeDimRangeSpecTest, ValidStartOnlyStep1) {
DimensionIndexBuffer buffer;
EXPECT_EQ(
absl::OkStatus(),
NormalizeDimRangeSpec(DimRangeSpec{15, std::nullopt, 1}, 20, &buffer));
EXPECT_THAT(buffer, ::testing::ElementsAre(15, 16, 17, 18, 19));
}
TEST(NormalizeDimRangeSpecTest, ValidStartOnlyStepNegative1) {
DimensionIndexBuffer buffer;
EXPECT_EQ(
absl::OkStatus(),
NormalizeDimRangeSpec(DimRangeSpec{5, std::nullopt, -1}, 20, &buffer));
EXPECT_THAT(buffer, ::testing::ElementsAre(5, 4, 3, 2, 1, 0));
}
TEST(NormalizeDimRangeSpecTest, ValidNegativeStartOnlyStep1) {
DimensionIndexBuffer buffer;
EXPECT_EQ(
absl::OkStatus(),
NormalizeDimRangeSpec(DimRangeSpec{-5, std::nullopt, 1}, 20, &buffer));
EXPECT_THAT(buffer, ::testing::ElementsAre(15, 16, 17, 18, 19));
}
TEST(NormalizeDimRangeSpecTest, ValidStopOnlyStep1) {
DimensionIndexBuffer buffer;
EXPECT_EQ(
absl::OkStatus(),
NormalizeDimRangeSpec(DimRangeSpec{std::nullopt, 5, 1}, 20, &buffer));
EXPECT_THAT(buffer, ::testing::ElementsAre(0, 1, 2, 3, 4));
}
TEST(NormalizeDimRangeSpecTest, ValidNegativeStopOnlyStep1) {
DimensionIndexBuffer buffer;
EXPECT_EQ(
absl::OkStatus(),
NormalizeDimRangeSpec(DimRangeSpec{std::nullopt, -15, 1}, 20, &buffer));
EXPECT_THAT(buffer, ::testing::ElementsAre(0, 1, 2, 3, 4));
}
TEST(NormalizeDimRangeSpecTest, ValidStopOnlyStepNeg1) {
DimensionIndexBuffer buffer;
EXPECT_EQ(
absl::OkStatus(),
NormalizeDimRangeSpec(DimRangeSpec{std::nullopt, 15, -1}, 20, &buffer));
EXPECT_THAT(buffer, ::testing::ElementsAre(19, 18, 17, 16));
}
TEST(NormalizeDimRangeSpecTest, ValidNoBoundsStep1) {
DimensionIndexBuffer buffer;
EXPECT_EQ(absl::OkStatus(),
NormalizeDimRangeSpec(DimRangeSpec{std::nullopt, std::nullopt, 1},
5, &buffer));
EXPECT_THAT(buffer, ::testing::ElementsAre(0, 1, 2, 3, 4));
}
TEST(NormalizeDimRangeSpecTest, ValidNoBoundsStep2) {
DimensionIndexBuffer buffer;
EXPECT_EQ(absl::OkStatus(),
NormalizeDimRangeSpec(DimRangeSpec{std::nullopt, std::nullopt, 2},
5, &buffer));
EXPECT_THAT(buffer, ::testing::ElementsAre(0, 2, 4));
}
TEST(NormalizeDimRangeSpecTest, ValidMaxStop) {
DimensionIndexBuffer buffer;
EXPECT_EQ(absl::OkStatus(),
NormalizeDimRangeSpec(DimRangeSpec{1, 5, 1}, 5, &buffer));
EXPECT_THAT(buffer, ::testing::ElementsAre(1, 2, 3, 4));
}
TEST(NormalizeDimRangeSpecTest, InvalidStep0) {
DimensionIndexBuffer buffer;
EXPECT_THAT(
NormalizeDimRangeSpec(DimRangeSpec{std::nullopt, std::nullopt, 0}, 5,
&buffer),
MatchesStatus(absl::StatusCode::kInvalidArgument, "step must not be 0"));
}
TEST(NormalizeDimRangeSpecTest, InvalidIntervalStep1) {
DimensionIndexBuffer buffer;
EXPECT_THAT(NormalizeDimRangeSpec(DimRangeSpec{3, 1, 1}, 5, &buffer),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"3:1 is not a valid range"));
}
TEST(NormalizeDimRangeSpecTest, InvalidIntervalStepNeg1) {
DimensionIndexBuffer buffer;
EXPECT_THAT(NormalizeDimRangeSpec(DimRangeSpec{1, 3, -1}, 5, &buffer),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"1:3:-1 is not a valid range"));
}
TEST(NormalizeDimRangeSpecTest, InvalidIndex) {
DimensionIndexBuffer buffer;
EXPECT_THAT(NormalizeDimRangeSpec(DimRangeSpec{1, 8, 1}, 5, &buffer),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Dimension exclusive stop index 8 is outside valid "
"range \\[-6, 5\\]"));
}
} |
545 | cpp | google/tensorstore | index_vector_or_scalar | tensorstore/index_space/index_vector_or_scalar.cc | tensorstore/index_space/index_vector_or_scalar_test.cc | #ifndef TENSORSTORE_INDEX_SPACE_INDEX_VECTOR_OR_SCALAR_H_
#define TENSORSTORE_INDEX_SPACE_INDEX_VECTOR_OR_SCALAR_H_
#include <type_traits>
#include <variant>
#include "absl/status/status.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/type_traits.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
template <typename T, typename = std::true_type>
struct IsIndexVectorOrScalar {
static constexpr bool value = false;
static constexpr DimensionIndex extent = -1;
using normalized_type = void;
};
template <typename T>
struct IsIndexVectorOrScalar<
T,
std::integral_constant<bool, static_cast<bool>(internal::IsIndexPack<T>)>>
: public std::true_type {
using normalized_type = Index;
constexpr static std::ptrdiff_t extent = dynamic_extent;
};
template <typename T>
struct IsIndexVectorOrScalar<
T,
std::integral_constant<
bool, static_cast<bool>(
std::is_same_v<
typename internal::ConstSpanType<T>::value_type, Index>)>>
: public std::true_type {
using normalized_type = internal::ConstSpanType<T>;
constexpr static std::ptrdiff_t extent = normalized_type::extent;
};
namespace internal_index_space {
using IndexVectorOrScalarContainer = std::variant<std::vector<Index>, Index>;
class IndexVectorOrScalarView {
public:
IndexVectorOrScalarView(const IndexVectorOrScalarContainer& c) {
if (auto* v = std::get_if<std::vector<Index>>(&c)) {
pointer = v->data();
size_or_scalar = v->size();
} else {
pointer = nullptr;
size_or_scalar = *std::get_if<Index>(&c);
}
}
IndexVectorOrScalarView(span<const Index> s)
: pointer(s.data()), size_or_scalar(s.size()) {}
IndexVectorOrScalarView(const Index scalar)
: pointer(nullptr), size_or_scalar(scalar) {}
Index operator[](DimensionIndex i) const {
return pointer ? pointer[i] : size_or_scalar;
}
const Index* pointer;
Index size_or_scalar;
};
absl::Status CheckIndexVectorSize(IndexVectorOrScalarView indices,
DimensionIndex size);
}
}
#endif
#include "tensorstore/index_space/index_vector_or_scalar.h"
#include <system_error>
#include "absl/status/status.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_index_space {
absl::Status CheckIndexVectorSize(IndexVectorOrScalarView indices,
DimensionIndex size) {
if (indices.pointer && indices.size_or_scalar != size)
return absl::InvalidArgumentError(tensorstore::StrCat(
"Number of dimensions (", size, ") does not match number of indices (",
indices.size_or_scalar, ")"));
return absl::OkStatus();
}
}
} | #include "tensorstore/index_space/index_vector_or_scalar.h"
#include <cstdint>
#include <system_error>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/index.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::dynamic_extent;
using ::tensorstore::Index;
using ::tensorstore::IsIndexVectorOrScalar;
using ::tensorstore::MatchesStatus;
using ::tensorstore::span;
using ::tensorstore::internal_index_space::CheckIndexVectorSize;
using ::tensorstore::internal_index_space::IndexVectorOrScalarView;
static_assert(IsIndexVectorOrScalar<Index>::value == true);
static_assert(IsIndexVectorOrScalar<std::int32_t>::value == true);
static_assert(IsIndexVectorOrScalar<float>::value == false);
static_assert(
std::is_same_v<
typename IsIndexVectorOrScalar<std::int32_t>::normalized_type, Index>);
static_assert(IsIndexVectorOrScalar<std::int32_t>::extent == dynamic_extent);
static_assert(IsIndexVectorOrScalar<std::vector<std::int32_t>>::value == false);
static_assert(IsIndexVectorOrScalar<const std::vector<Index>>::value == true);
static_assert(std::is_same_v<typename IsIndexVectorOrScalar<
const std::vector<Index>>::normalized_type,
span<const Index>>);
static_assert(IsIndexVectorOrScalar<const std::vector<Index>>::extent ==
dynamic_extent);
static_assert(IsIndexVectorOrScalar<span<const Index>>::value == true);
static_assert(
std::is_same_v<typename IsIndexVectorOrScalar<span<Index>>::normalized_type,
span<const Index>>);
static_assert(IsIndexVectorOrScalar<span<const Index>>::extent ==
dynamic_extent);
static_assert(IsIndexVectorOrScalar<span<const Index, 5>>::value == true);
static_assert(std::is_same_v<
typename IsIndexVectorOrScalar<span<Index, 5>>::normalized_type,
span<const Index, 5>>);
static_assert(IsIndexVectorOrScalar<span<Index, 5>>::extent == 5);
TEST(IndexVectorOrScalarTest, Scalar) {
IndexVectorOrScalarView v(5);
EXPECT_EQ(5, v.size_or_scalar);
EXPECT_EQ(nullptr, v.pointer);
EXPECT_EQ(5, v[0]);
EXPECT_EQ(5, v[1]);
EXPECT_TRUE(CheckIndexVectorSize(v, 3).ok());
}
TEST(IndexVectorOrScalarTest, Vector) {
const Index arr[] = {1, 2, 3};
IndexVectorOrScalarView v{span(arr)};
EXPECT_EQ(3, v.size_or_scalar);
EXPECT_EQ(&arr[0], v.pointer);
EXPECT_EQ(1, v[0]);
EXPECT_EQ(2, v[1]);
EXPECT_EQ(3, v[2]);
EXPECT_TRUE(CheckIndexVectorSize(v, 3).ok());
EXPECT_THAT(CheckIndexVectorSize(v, 5),
tensorstore::MatchesStatus(absl::StatusCode::kInvalidArgument));
}
} |
546 | cpp | google/tensorstore | dimension_permutation | tensorstore/index_space/dimension_permutation.cc | tensorstore/index_space/dimension_permutation_test.cc | #ifndef TENSORSTORE_INDEX_SPACE_DIMENSION_PERMUTATION_H_
#define TENSORSTORE_INDEX_SPACE_DIMENSION_PERMUTATION_H_
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
void SetPermutation(ContiguousLayoutOrder order,
span<DimensionIndex> permutation);
bool IsValidPermutation(span<const DimensionIndex> permutation);
bool PermutationMatchesOrder(span<const DimensionIndex> permutation,
ContiguousLayoutOrder order);
void InvertPermutation(DimensionIndex rank, const DimensionIndex* perm,
DimensionIndex* inverse_perm);
void SetPermutationFromStridedLayout(StridedLayoutView<> layout,
span<DimensionIndex> permutation);
void TransformOutputDimensionOrder(IndexTransformView<> transform,
span<const DimensionIndex> output_perm,
span<DimensionIndex> input_perm);
void TransformInputDimensionOrder(IndexTransformView<> transform,
span<const DimensionIndex> input_perm,
span<DimensionIndex> output_perm);
}
#endif
#include "tensorstore/index_space/dimension_permutation.h"
#include <algorithm>
#include <numeric>
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/dimension_set.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
void SetPermutation(ContiguousLayoutOrder order,
span<DimensionIndex> permutation) {
if (order == c_order) {
for (DimensionIndex i = 0; i < permutation.size(); ++i) {
permutation[i] = i;
}
} else {
for (DimensionIndex i = 0; i < permutation.size(); ++i) {
permutation[i] = permutation.size() - 1 - i;
}
}
}
bool IsValidPermutation(span<const DimensionIndex> permutation) {
DimensionSet seen_dims;
const DimensionIndex rank = permutation.size();
if (rank > kMaxRank) return false;
for (DimensionIndex i = 0; i < rank; ++i) {
DimensionIndex dim = permutation[i];
if (dim < 0 || dim >= rank || seen_dims[dim]) {
return false;
}
seen_dims[dim] = true;
}
return true;
}
bool PermutationMatchesOrder(span<const DimensionIndex> permutation,
ContiguousLayoutOrder order) {
if (order == c_order) {
for (DimensionIndex i = 0; i < permutation.size(); ++i) {
if (permutation[i] != i) return false;
}
} else {
for (DimensionIndex i = 0; i < permutation.size(); ++i) {
if (permutation[i] != permutation.size() - i - 1) return false;
}
}
return true;
}
void InvertPermutation(DimensionIndex rank, const DimensionIndex* perm,
DimensionIndex* inverse_perm) {
assert(IsValidPermutation(span(perm, rank)));
for (DimensionIndex i = 0; i < rank; ++i) {
inverse_perm[perm[i]] = i;
}
}
void SetPermutationFromStridedLayout(StridedLayoutView<> layout,
span<DimensionIndex> permutation) {
assert(layout.rank() == permutation.size());
std::iota(permutation.begin(), permutation.end(), DimensionIndex(0));
const auto get_effective_byte_stride_nabs = [&](DimensionIndex i) -> Index {
const Index byte_stride = layout.byte_strides()[i];
if (byte_stride > 0) return -byte_stride;
return byte_stride;
};
std::stable_sort(permutation.begin(), permutation.end(),
[&](DimensionIndex a, DimensionIndex b) {
return get_effective_byte_stride_nabs(a) <
get_effective_byte_stride_nabs(b);
});
}
void TransformOutputDimensionOrder(IndexTransformView<> transform,
span<const DimensionIndex> output_perm,
span<DimensionIndex> input_perm) {
assert(transform.valid());
assert(IsValidPermutation(output_perm));
const DimensionIndex output_rank = transform.output_rank();
const DimensionIndex input_rank = transform.input_rank();
assert(input_rank == input_perm.size());
assert(output_rank == output_perm.size());
DimensionIndex min_output_dim[kMaxRank];
std::fill_n(min_output_dim, input_rank, kMaxRank);
for (DimensionIndex orig_perm_i = 0; orig_perm_i < output_rank;
++orig_perm_i) {
const DimensionIndex output_dim = output_perm[orig_perm_i];
const auto map = transform.output_index_maps()[output_dim];
if (map.method() != OutputIndexMethod::single_input_dimension) continue;
const DimensionIndex input_dim = map.input_dimension();
min_output_dim[input_dim] =
std::min(min_output_dim[input_dim], orig_perm_i);
}
std::iota(input_perm.begin(), input_perm.end(), DimensionIndex(0));
std::sort(input_perm.begin(), input_perm.end(),
[&](DimensionIndex a, DimensionIndex b) {
DimensionIndex a_ordinal = min_output_dim[a];
DimensionIndex b_ordinal = min_output_dim[b];
if (a_ordinal != b_ordinal) return a_ordinal < b_ordinal;
return a < b;
});
assert(IsValidPermutation(input_perm));
}
void TransformInputDimensionOrder(IndexTransformView<> transform,
span<const DimensionIndex> input_perm,
span<DimensionIndex> output_perm) {
assert(transform.valid());
assert(IsValidPermutation(input_perm));
[[maybe_unused]] const DimensionIndex output_rank = transform.output_rank();
const DimensionIndex input_rank = transform.input_rank();
assert(input_rank == input_perm.size());
assert(output_rank == output_perm.size());
DimensionIndex inverse_input_perm[kMaxRank];
InvertPermutation(input_rank, input_perm.data(), inverse_input_perm);
std::iota(output_perm.begin(), output_perm.end(), DimensionIndex(0));
const auto get_output_dim_ordinal = [&](DimensionIndex output_dim) {
const auto map = transform.output_index_maps()[output_dim];
if (map.method() != OutputIndexMethod::single_input_dimension) {
return kMaxRank;
}
return inverse_input_perm[map.input_dimension()];
};
std::sort(output_perm.begin(), output_perm.end(),
[&](DimensionIndex a, DimensionIndex b) {
DimensionIndex a_ordinal = get_output_dim_ordinal(a);
DimensionIndex b_ordinal = get_output_dim_ordinal(b);
if (a_ordinal != b_ordinal) return a_ordinal < b_ordinal;
return a < b;
});
assert(IsValidPermutation(output_perm));
}
} | #include "tensorstore/index_space/dimension_permutation.h"
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/index.h"
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::DimensionIndex;
using ::tensorstore::Dims;
using ::tensorstore::IsValidPermutation;
using ::tensorstore::PermutationMatchesOrder;
using ::tensorstore::span;
TEST(SetPermutationTest, Rank0) {
std::vector<DimensionIndex> permutation(0);
tensorstore::SetPermutation(tensorstore::c_order, permutation);
tensorstore::SetPermutation(tensorstore::fortran_order, permutation);
}
TEST(SetPermutationTest, Rank1COrder) {
std::vector<DimensionIndex> permutation(1, 42);
tensorstore::SetPermutation(tensorstore::c_order, permutation);
EXPECT_THAT(permutation, ::testing::ElementsAre(0));
}
TEST(SetPermutationTest, Rank1FortranOrder) {
std::vector<DimensionIndex> permutation(1, 42);
tensorstore::SetPermutation(tensorstore::fortran_order, permutation);
EXPECT_THAT(permutation, ::testing::ElementsAre(0));
}
TEST(SetPermutationTest, Rank2COrder) {
std::vector<DimensionIndex> permutation(2, 42);
tensorstore::SetPermutation(tensorstore::c_order, permutation);
EXPECT_THAT(permutation, ::testing::ElementsAre(0, 1));
}
TEST(SetPermutationTest, Rank2FortranOrder) {
std::vector<DimensionIndex> permutation(2, 42);
tensorstore::SetPermutation(tensorstore::fortran_order, permutation);
EXPECT_THAT(permutation, ::testing::ElementsAre(1, 0));
}
TEST(SetPermutationTest, Rank3COrder) {
std::vector<DimensionIndex> permutation(3, 42);
tensorstore::SetPermutation(tensorstore::c_order, permutation);
EXPECT_THAT(permutation, ::testing::ElementsAre(0, 1, 2));
}
TEST(SetPermutationTest, Rank3FortranOrder) {
std::vector<DimensionIndex> permutation(3, 42);
tensorstore::SetPermutation(tensorstore::fortran_order, permutation);
EXPECT_THAT(permutation, ::testing::ElementsAre(2, 1, 0));
}
TEST(IsValidPermutationTest, Basic) {
EXPECT_TRUE(IsValidPermutation(span<const DimensionIndex>()));
EXPECT_TRUE(IsValidPermutation(span<const DimensionIndex>({0})));
EXPECT_FALSE(IsValidPermutation(span<const DimensionIndex>({1})));
EXPECT_FALSE(IsValidPermutation(span<const DimensionIndex>({-1})));
EXPECT_TRUE(IsValidPermutation(span<const DimensionIndex>({0, 1})));
EXPECT_TRUE(IsValidPermutation(span<const DimensionIndex>({1, 0})));
EXPECT_FALSE(IsValidPermutation(span<const DimensionIndex>({1, 1})));
EXPECT_FALSE(IsValidPermutation(span<const DimensionIndex>({0, 0})));
EXPECT_TRUE(IsValidPermutation(span<const DimensionIndex>({1, 2, 0})));
EXPECT_FALSE(IsValidPermutation(span<const DimensionIndex>({1, 2, 1})));
}
TEST(PermutationMatchesOrderTest, Basic) {
EXPECT_TRUE(PermutationMatchesOrder({}, tensorstore::c_order));
EXPECT_TRUE(PermutationMatchesOrder({}, tensorstore::fortran_order));
EXPECT_TRUE(PermutationMatchesOrder({{0}}, tensorstore::c_order));
EXPECT_TRUE(PermutationMatchesOrder({{0}}, tensorstore::fortran_order));
EXPECT_TRUE(PermutationMatchesOrder({{0, 1}}, tensorstore::c_order));
EXPECT_FALSE(PermutationMatchesOrder({{0, 1}}, tensorstore::fortran_order));
EXPECT_TRUE(PermutationMatchesOrder({{0, 1, 2}}, tensorstore::c_order));
EXPECT_FALSE(PermutationMatchesOrder({{1}}, tensorstore::c_order));
EXPECT_FALSE(PermutationMatchesOrder({{1}}, tensorstore::fortran_order));
EXPECT_FALSE(PermutationMatchesOrder({{1, 0}}, tensorstore::c_order));
EXPECT_TRUE(PermutationMatchesOrder({{1, 0}}, tensorstore::fortran_order));
}
TEST(InvertPermutationTest, Rank0) {
std::vector<DimensionIndex> source;
std::vector<DimensionIndex> dest;
tensorstore::InvertPermutation(0, source.data(), dest.data());
}
TEST(InvertPermutationTest, Rank1) {
std::vector<DimensionIndex> source{0};
std::vector<DimensionIndex> dest(1, 42);
tensorstore::InvertPermutation(1, source.data(), dest.data());
EXPECT_THAT(dest, ::testing::ElementsAre(0));
}
TEST(InvertPermutationTest, Rank2Identity) {
std::vector<DimensionIndex> source{0, 1};
std::vector<DimensionIndex> dest(2, 42);
tensorstore::InvertPermutation(2, source.data(), dest.data());
EXPECT_THAT(dest, ::testing::ElementsAre(0, 1));
}
TEST(InvertPermutationTest, Rank2Transpose) {
std::vector<DimensionIndex> source{1, 0};
std::vector<DimensionIndex> dest(2, 42);
tensorstore::InvertPermutation(2, source.data(), dest.data());
EXPECT_THAT(dest, ::testing::ElementsAre(1, 0));
}
TEST(InvertPermutationTest, Rank3) {
std::vector<DimensionIndex> source{1, 2, 0};
std::vector<DimensionIndex> dest(3, 42);
tensorstore::InvertPermutation(3, source.data(), dest.data());
EXPECT_THAT(dest, ::testing::ElementsAre(2, 0, 1));
std::vector<DimensionIndex> source2(3, 42);
tensorstore::InvertPermutation(3, dest.data(), source2.data());
EXPECT_EQ(source, source2);
}
TEST(SetPermutationFromStridedLayoutTest, Rank0) {
tensorstore::StridedLayout<> layout(0);
std::vector<DimensionIndex> permutation(0);
tensorstore::SetPermutationFromStridedLayout(layout, permutation);
}
TEST(SetPermutationFromStridedLayoutTest, Rank1) {
tensorstore::StridedLayout<> layout({5}, {10});
std::vector<DimensionIndex> permutation(1, 42);
tensorstore::SetPermutationFromStridedLayout(layout, permutation);
EXPECT_THAT(permutation, ::testing::ElementsAre(0));
}
TEST(SetPermutationFromStridedLayoutTest, Rank2COrder) {
tensorstore::StridedLayout<> layout({5, 6}, {10, 5});
std::vector<DimensionIndex> permutation(2, 42);
tensorstore::SetPermutationFromStridedLayout(layout, permutation);
EXPECT_THAT(permutation, ::testing::ElementsAre(0, 1));
}
TEST(SetPermutationFromStridedLayoutTest, Rank2FortranOrder) {
tensorstore::StridedLayout<> layout({5, 6}, {5, 10});
std::vector<DimensionIndex> permutation(2, 42);
tensorstore::SetPermutationFromStridedLayout(layout, permutation);
EXPECT_THAT(permutation, ::testing::ElementsAre(1, 0));
}
TEST(SetPermutationFromStridedLayoutTest, Rank2ZeroStride) {
tensorstore::StridedLayout<> layout({5, 6}, {0, 0});
std::vector<DimensionIndex> permutation(2, 42);
tensorstore::SetPermutationFromStridedLayout(layout, permutation);
EXPECT_THAT(permutation, ::testing::ElementsAre(0, 1));
}
TEST(SetPermutationFromStridedLayoutTest, Rank4) {
tensorstore::StridedLayout<> layout({5, 6, 7, 8}, {10, 5, 6, 6});
std::vector<DimensionIndex> permutation(4, 42);
tensorstore::SetPermutationFromStridedLayout(layout, permutation);
EXPECT_THAT(permutation, ::testing::ElementsAre(0, 2, 3, 1));
}
TEST(TransformOutputDimensionOrderTest, Rank0) {
std::vector<DimensionIndex> source;
std::vector<DimensionIndex> dest;
tensorstore::TransformOutputDimensionOrder(tensorstore::IdentityTransform(0),
source, dest);
}
TEST(TransformOutputDimensionOrderTest, Rank1Identity) {
std::vector<DimensionIndex> source{0};
std::vector<DimensionIndex> dest(1, 42);
tensorstore::TransformOutputDimensionOrder(tensorstore::IdentityTransform(1),
source, dest);
EXPECT_THAT(dest, ::testing::ElementsAre(0));
}
TEST(TransformOutputDimensionOrderTest, Rank2COrderIdentity) {
std::vector<DimensionIndex> source{0, 1};
std::vector<DimensionIndex> dest(2, 42);
std::vector<DimensionIndex> source2(2, 42);
auto transform = tensorstore::IdentityTransform(2);
tensorstore::TransformOutputDimensionOrder(transform, source, dest);
EXPECT_THAT(dest, ::testing::ElementsAre(0, 1));
tensorstore::TransformInputDimensionOrder(transform, dest, source2);
EXPECT_EQ(source, source2);
}
TEST(TransformOutputDimensionOrderTest, Rank2FortranOrderIdentity) {
std::vector<DimensionIndex> source{1, 0};
std::vector<DimensionIndex> dest(2, 42);
std::vector<DimensionIndex> source2(2, 42);
auto transform = tensorstore::IdentityTransform(2);
tensorstore::TransformOutputDimensionOrder(transform, source, dest);
EXPECT_THAT(dest, ::testing::ElementsAre(1, 0));
tensorstore::TransformInputDimensionOrder(transform, dest, source2);
EXPECT_EQ(source, source2);
}
TEST(TransformOutputDimensionOrderTest, Rank2COrderTranspose) {
std::vector<DimensionIndex> source{0, 1};
std::vector<DimensionIndex> dest(2, 42);
std::vector<DimensionIndex> source2(2, 42);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transform,
tensorstore::IdentityTransform(2) | Dims(1, 0).Transpose());
tensorstore::TransformOutputDimensionOrder(transform, source, dest);
EXPECT_THAT(dest, ::testing::ElementsAre(1, 0));
tensorstore::TransformInputDimensionOrder(transform, dest, source2);
EXPECT_EQ(source, source2);
}
TEST(TransformOutputDimensionOrderTest, Rank2FortranOrderTranspose) {
std::vector<DimensionIndex> source{1, 0};
std::vector<DimensionIndex> dest(2, 42);
std::vector<DimensionIndex> source2(2, 42);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transform,
tensorstore::IdentityTransform(2) | Dims(1, 0).Transpose());
tensorstore::TransformOutputDimensionOrder(transform, source, dest);
EXPECT_THAT(dest, ::testing::ElementsAre(0, 1));
tensorstore::TransformInputDimensionOrder(transform, dest, source2);
EXPECT_EQ(source, source2);
}
} |
547 | cpp | google/tensorstore | transformed_array | tensorstore/index_space/transformed_array.cc | tensorstore/index_space/transformed_array_test.cc | #ifndef TENSORSTORE_INDEX_SPACE_TRANSFORMED_ARRAY_H_
#define TENSORSTORE_INDEX_SPACE_TRANSFORMED_ARRAY_H_
#include <stddef.h>
#include <memory>
#include <string>
#include <type_traits>
#include <utility>
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/box.h"
#include "tensorstore/container_kind.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/index_domain.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/internal/transform_rep.h"
#include "tensorstore/index_space/transform_array_constraints.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/type_traits.h"
#include "tensorstore/internal/void_wrapper.h"
#include "tensorstore/rank.h"
#include "tensorstore/static_cast.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/element_pointer.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
template <typename ElementTagType, DimensionIndex Rank = dynamic_rank,
ContainerKind LayoutCKind = container>
class TransformedArray;
template <typename ElementTagType, DimensionIndex Rank = dynamic_rank>
using TransformedArrayView = TransformedArray<ElementTagType, Rank>;
template <typename Element, DimensionIndex Rank = dynamic_rank,
ContainerKind LayoutCKind = container>
using TransformedSharedArray =
TransformedArray<Shared<Element>, Rank, LayoutCKind>;
template <typename Element, DimensionIndex Rank = dynamic_rank>
using TransformedSharedArrayView =
TransformedArray<Shared<Element>, Rank, view>;
template <typename T>
constexpr inline bool IsTransformedArray = false;
template <typename ElementTagType, DimensionIndex Rank,
ContainerKind LayoutCKind>
constexpr inline bool
IsTransformedArray<TransformedArray<ElementTagType, Rank, LayoutCKind>> =
true;
template <typename T>
constexpr inline bool IsTransformedArrayLike =
IsArray<T> || IsTransformedArray<T>;
namespace internal_index_space {
TransformRep::Ptr<> MakeTransformFromStridedLayout(
StridedLayoutView<dynamic_rank, offset_origin> layout);
Result<TransformRep::Ptr<>> MakeTransformFromStridedLayoutAndTransform(
StridedLayoutView<dynamic_rank, offset_origin> layout,
TransformRep::Ptr<> transform);
StridedLayoutView<dynamic_rank, offset_origin> GetUnboundedLayout(
DimensionIndex rank);
template <typename A, typename Func>
using TransformedArrayMapTransformResultType = FlatMapResultType<
A::template RebindTransform,
internal::remove_cvref_t<std::invoke_result_t<
Func, const typename internal::remove_cvref_t<A>::Transform&>>>;
template <typename A, typename Func>
static TransformedArrayMapTransformResultType<internal::remove_cvref_t<A>, Func>
TransformedArrayMapTransform(A&& a, Func&& func) {
using ResultType =
TransformedArrayMapTransformResultType<internal::remove_cvref_t<A>, Func>;
using AX = internal::remove_cvref_t<A>;
using MappedTransform = UnwrapResultType<
std::invoke_result_t<Func, const typename AX::Transform&>>;
return MapResult(
[&](MappedTransform transform) {
return typename ResultType::value_type{
std::forward<A>(a).element_pointer(), std::move(transform)};
},
std::forward<Func>(func)(std::forward<A>(a).transform()));
}
template <bool Condition>
struct ConditionalTransformedArrayMapTransformResultType {
template <typename A, typename Func>
using type = TransformedArrayMapTransformResultType<A, Func>;
};
template <>
struct ConditionalTransformedArrayMapTransformResultType<false> {};
template <bool Condition, typename A, typename Func>
using EnableIfTransformedArrayMapTransformResultType =
typename ConditionalTransformedArrayMapTransformResultType<
Condition>::template type<A, Func>;
std::string DescribeTransformedArrayForCast(DataType dtype,
DimensionIndex rank);
}
template <typename ElementTagType, DimensionIndex Rank,
ContainerKind LayoutCKind>
class TransformedArray {
static_assert(IsElementTag<ElementTagType>,
"ElementTagType must be an ElementTag type.");
static_assert(Rank == dynamic_rank || Rank >= 0,
"Rank must be dynamic_rank or >= 0.");
public:
using ElementTag = ElementTagType;
using ElementPointer = tensorstore::ElementPointer<ElementTag>;
using Pointer = typename ElementPointer::Pointer;
using Transform = IndexTransform<Rank, dynamic_rank, LayoutCKind>;
using Element = typename ElementPointer::Element;
using DataType = dtype_t<Element>;
constexpr static DimensionIndex static_rank = Transform::static_input_rank;
constexpr static ContainerKind layout_container_kind = LayoutCKind;
using RankType = StaticOrDynamicRank<static_rank>;
template <ContainerKind CKind>
using RebindContainerKind = TransformedArray<ElementTagType, Rank, CKind>;
template <typename OtherTransform>
using RebindTransform =
TransformedArray<ElementTagType, OtherTransform::static_input_rank>;
TransformedArray() = default;
template <typename P, typename T,
std::enable_if_t<internal::IsPairImplicitlyConvertible<
P, T, ElementPointer, Transform>>* = nullptr>
TransformedArray(P&& element_pointer, T&& transform) noexcept
: element_pointer_(std::forward<P>(element_pointer)),
transform_(std::forward<T>(transform)) {}
template <typename A, ContainerKind SfinaeC = LayoutCKind,
typename = std::enable_if_t<
(SfinaeC == container && IsArray<internal::remove_cvref_t<A>> &&
std::is_convertible_v<
typename internal::remove_cvref_t<A>::ElementPointer,
ElementPointer> &&
RankConstraint::Implies(
internal::remove_cvref_t<A>::static_rank, Rank))>>
TransformedArray(A&& array)
: element_pointer_(std::forward<A>(array).element_pointer()),
transform_(internal_index_space::TransformAccess::Make<Transform>(
internal_index_space::MakeTransformFromStridedLayout(
array.layout()))) {}
template <typename Other,
std::enable_if_t<
(IsTransformedArray<internal::remove_cvref_t<Other>> &&
internal::IsPairImplicitlyConvertible<
typename internal::remove_cvref_t<Other>::ElementPointer,
typename internal::remove_cvref_t<Other>::Transform,
ElementPointer, Transform>)>* = nullptr>
TransformedArray(Other&& other) noexcept
: element_pointer_(std::forward<Other>(other).element_pointer()),
transform_(std::forward<Other>(other).transform()) {}
template <typename Other,
std::enable_if_t<(
IsTransformedArray<internal::remove_cvref_t<Other>> &&
IsStaticCastConstructible<
ElementPointer,
typename internal::remove_cvref_t<Other>::ElementPointer> &&
IsStaticCastConstructible<Transform,
typename internal::remove_cvref_t<
Other>::Transform>)>* = nullptr>
explicit TransformedArray(unchecked_t, Other&& other) noexcept
: element_pointer_(unchecked,
std::forward<Other>(other).element_pointer()),
transform_(unchecked, std::forward<Other>(other).transform()) {}
template <
typename A, ContainerKind SfinaeC = LayoutCKind,
std::enable_if_t<
(SfinaeC == container && IsArray<internal::remove_cvref_t<A>> &&
IsStaticCastConstructible<
ElementPointer,
typename internal::remove_cvref_t<A>::ElementPointer> &&
RankConstraint::EqualOrUnspecified(
internal::remove_cvref_t<A>::static_rank, Rank))>* = nullptr>
explicit TransformedArray(unchecked_t, A&& array) noexcept
: element_pointer_(unchecked, std::forward<A>(array).element_pointer()),
transform_(unchecked,
internal_index_space::TransformAccess::Make<Transform>(
internal_index_space::MakeTransformFromStridedLayout(
array.layout()))) {}
template <typename Other,
std::enable_if_t<
(IsTransformedArray<internal::remove_cvref_t<Other>> &&
internal::IsPairImplicitlyConvertible<
typename internal::remove_cvref_t<Other>::ElementPointer,
typename internal::remove_cvref_t<Other>::Transform,
ElementPointer, Transform>)>* = nullptr>
TransformedArray& operator=(Other&& other) noexcept {
element_pointer_ = std::forward<Other>(other).element_pointer();
transform_ = std::forward<Other>(other).transform();
return *this;
}
template <typename A, ContainerKind SfinaeC = LayoutCKind,
typename = std::enable_if_t<
(SfinaeC == container && IsArray<internal::remove_cvref_t<A>> &&
std::is_assignable_v<
ElementPointer,
typename internal::remove_cvref_t<A>::ElementPointer> &&
RankConstraint::Implies(
internal::remove_cvref_t<A>::static_rank, Rank))>>
TransformedArray& operator=(A&& array) noexcept {
element_pointer_ = std::forward<A>(array).element_pointer();
transform_ = internal_index_space::TransformAccess::Make<Transform>(
internal_index_space::MakeTransformFromStridedLayout(array.layout()));
return *this;
}
RankType rank() const { return transform_.input_rank(); }
IndexDomainView<static_rank> domain() const { return transform_.domain(); }
span<const Index, static_rank> origin() const { return domain().origin(); }
span<const Index, static_rank> shape() const { return domain().shape(); }
span<const std::string, static_rank> labels() const {
return transform_.input_labels();
}
DataType dtype() const { return element_pointer_.dtype(); }
const ElementPointer& element_pointer() const& { return element_pointer_; }
ElementPointer& element_pointer() & { return element_pointer_; }
ElementPointer&& element_pointer() && { return std::move(element_pointer_); }
Element* data() const { return element_pointer_.data(); }
const Transform& transform() const& { return transform_; }
Transform& transform() & { return transform_; }
Transform&& transform() && { return std::move(transform_); }
ArrayView<ElementTag, dynamic_rank, offset_origin> base_array() const {
return {element_pointer(),
internal_index_space::GetUnboundedLayout(transform_.output_rank())};
}
template <ArrayOriginKind OriginKind = offset_origin>
Result<SharedArray<const Element, Rank, OriginKind>> Materialize(
TransformArrayConstraints constraints = skip_repeated_elements) const {
return TransformArray<OriginKind>(UnownedToShared(base_array()),
transform(), constraints);
}
template <typename Func>
PipelineResultType<const TransformedArray&, Func> operator|(
Func&& func) const& {
return static_cast<Func&&>(func)(*this);
}
template <typename Func>
PipelineResultType<TransformedArray&&, Func> operator|(Func&& func) && {
return static_cast<Func&&>(func)(std::move(*this));
}
private:
ElementPointer element_pointer_;
Transform transform_;
};
template <typename Element, DimensionIndex Rank, ContainerKind LayoutCKind>
std::enable_if_t<!IsShared<Element>,
TransformedArray<Shared<Element>, Rank, LayoutCKind>>
UnownedToShared(TransformedArray<Element, Rank, LayoutCKind> array) {
return TransformedArray<Shared<Element>, Rank, LayoutCKind>(
UnownedToShared(array.element_pointer()), std::move(array.transform()));
}
template <typename T, typename Element, DimensionIndex Rank,
ContainerKind LayoutCKind>
std::enable_if_t<!IsShared<Element>,
TransformedArray<Shared<Element>, Rank, LayoutCKind>>
UnownedToShared(const std::shared_ptr<T>& owned,
TransformedArray<Element, Rank, LayoutCKind> array) {
return TransformedArray<Shared<Element>, Rank, LayoutCKind>(
UnownedToShared(owned, array.element_pointer()),
std::move(array.transform()));
}
template <typename Element, DimensionIndex Rank, ContainerKind LayoutCKind>
TransformedArray<Shared<Element>, Rank, LayoutCKind> UnownedToShared(
TransformedArray<Shared<Element>, Rank, LayoutCKind> array) {
return array;
}
template <typename ElementTagType, DimensionIndex Rank,
ContainerKind LayoutContainerKind>
struct StaticCastTraits<
TransformedArray<ElementTagType, Rank, LayoutContainerKind>>
: public DefaultStaticCastTraits<
TransformedArray<ElementTagType, Rank, LayoutContainerKind>> {
using type = TransformedArray<ElementTagType, Rank, LayoutContainerKind>;
template <typename TargetElement>
using RebindDataType = TransformedArray<
typename ElementTagTraits<ElementTagType>::template rebind<TargetElement>,
Rank, LayoutContainerKind>;
template <DimensionIndex TargetRank>
using RebindRank =
TransformedArray<ElementTagType, TargetRank, LayoutContainerKind>;
template <typename Other>
static bool IsCompatible(const Other& other) {
return RankConstraint::EqualOrUnspecified(other.rank(), Rank) &&
IsPossiblySameDataType(other.dtype(), typename type::DataType());
}
static std::string Describe() {
return internal_index_space::DescribeTransformedArrayForCast(
typename type::DataType(), Rank);
}
static std::string Describe(const type& value) {
return internal_index_space::DescribeTransformedArrayForCast(value.dtype(),
value.rank());
}
};
template <typename ElementTagType, DimensionIndex Rank,
ContainerKind LayoutCKind>
constexpr inline bool
HasBoxDomain<TransformedArray<ElementTagType, Rank, LayoutCKind>> = true;
template <typename ElementTagType, DimensionIndex Rank,
ContainerKind LayoutCKind>
BoxView<Rank> GetBoxDomainOf(
const TransformedArray<ElementTagType, Rank, LayoutCKind>& array) {
return array.domain().box();
}
template <typename A>
using TransformedArrayTypeFromArray =
std::enable_if_t<IsArray<A>,
TransformedArray<typename A::ElementTag, A::static_rank,
A::layout_container_kind>>;
template <typename ElementTag, DimensionIndex Rank, ArrayOriginKind OriginKind,
ContainerKind LayoutCKind>
TransformedArray(Array<ElementTag, Rank, OriginKind, LayoutCKind> array)
-> TransformedArray<ElementTag, RankConstraint::FromInlineRank(Rank)>;
template <typename A, typename T>
using TransformedArrayTypeFromArrayAndTransform = std::enable_if_t<
(IsArray<A> && IsIndexTransform<T> &&
A::static_rank == T::static_output_rank),
TransformedArray<typename A::ElementTag, T::static_input_rank, container>>;
template <DimensionIndex R, ArrayOriginKind O, ContainerKind AC, typename T>
inline std::enable_if_t<
(IsIndexTransform<internal::remove_cvref_t<T>>),
Result<IndexTransform<internal::remove_cvref_t<T>::static_input_rank,
RankConstraint::FromInlineRank(R)>>>
ComposeLayoutAndTransform(const StridedLayout<R, O, AC>& layout,
T&& transform) {
static_assert(RankConstraint::FromInlineRank(R) ==
internal::remove_cvref_t<T>::static_output_rank);
using TX = internal::remove_cvref_t<T>;
using internal_index_space::TransformAccess;
TENSORSTORE_ASSIGN_OR_RETURN(auto transform_ptr,
MakeTransformFromStridedLayoutAndTransform(
layout, TransformAccess::rep_ptr<container>(
std::forward<T>(transform))));
return TransformAccess::Make<
IndexTransform<TX::static_input_rank, RankConstraint::FromInlineRank(R)>>(
std::move(transform_ptr));
}
template <typename A, typename T>
inline Result<TransformedArrayTypeFromArrayAndTransform<
internal::remove_cvref_t<A>, internal::remove_cvref_t<T>>>
MakeTransformedArray(A&& array, T&& transform) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto composed_transform,
ComposeLayoutAndTransform(array.layout(), std::forward<T>(transform)));
return {std::in_place, std::forward<A>(array).element_pointer(),
std::move(composed_transform)};
}
template <ArrayOriginKind OriginKind = offset_origin, typename A>
inline std::enable_if_t<
IsTransformedArray<A>,
Result<SharedArray<std::remove_const_t<typename A::Element>, A::static_rank,
OriginKind>>>
MakeCopy(const A& transformed_array, IterationConstraints constraints = {
c_order, include_repeated_elements}) {
return MakeCopy<OriginKind>(transformed_array.base_array(),
transformed_array.transform(), constraints);
}
namespace internal_index_space {
absl::Status CopyTransformedArrayImpl(TransformedArrayView<const void> source,
TransformedArrayView<void> dest);
}
template <typename SourceResult, typename DestResult>
std::enable_if_t<(IsTransformedArrayLike<UnwrapResultType<SourceResult>> &&
IsTransformedArrayLike<UnwrapResultType<DestResult>>),
absl::Status>
CopyTransformedArray(const SourceResult& source, const DestResult& dest) {
using Source = UnwrapResultType<SourceResult>;
using Dest = UnwrapResultType<DestResult>;
static_assert(RankConstraint::EqualOrUnspecified(Dest::static_rank,
Source::static_rank),
"Arrays must have compatible ranks.");
static_assert(!std::is_const_v<typename Dest::Element>,
"Dest array must have a non-const element type.");
if constexpr (IsResult<SourceResult>) {
if (!source.ok()) return source.status();
}
if constexpr (IsResult<DestResult>) {
if (!dest.ok()) return dest.status();
}
return internal_index_space::CopyTransformedArrayImpl(UnwrapResult(source),
UnwrapResult(dest));
}
template <typename Expr, typename T>
internal_index_space::EnableIfTransformedArrayMapTransformResultType<
IsTransformedArray<internal::remove_cvref_t<T>>,
internal::remove_cvref_t<T>, Expr>
ApplyIndexTransform(Expr&& expr, T&& t) {
return internal_index_space::TransformedArrayMapTransform(
std::forward<T>(t), std::forward<Expr>(expr));
}
template <typename Expr, typename T>
internal_index_space::EnableIfTransformedArrayMapTransformResultT | #include "tensorstore/index_space/transformed_array.h"
#include <stddef.h>
#include <stdint.h>
#include <random>
#include <type_traits>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/array_testutil.h"
#include "tensorstore/box.h"
#include "tensorstore/container_kind.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/index_domain.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/index_transform_testutil.h"
#include "tensorstore/index_space/transform_array_constraints.h"
#include "tensorstore/internal/testing/random_seed.h"
#include "tensorstore/rank.h"
#include "tensorstore/static_cast.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::dynamic_rank;
using ::tensorstore::Index;
using ::tensorstore::IndexInterval;
using ::tensorstore::kImplicit;
using ::tensorstore::kInfIndex;
using ::tensorstore::kInfSize;
using ::tensorstore::MakeArray;
using ::tensorstore::MakeOffsetArray;
using ::tensorstore::MatchesStatus;
using ::tensorstore::Result;
using ::tensorstore::Shared;
using ::tensorstore::StaticDataTypeCast;
using ::tensorstore::StaticRankCast;
using ::tensorstore::TransformedArray;
using ::tensorstore::dtypes::float32_t;
static_assert(std::is_convertible_v<tensorstore::TransformedSharedArray<int, 1>,
tensorstore::TransformedArrayView<int, 1>>);
static_assert(
!std::is_convertible_v<tensorstore::TransformedArrayView<int, 1>,
tensorstore::TransformedSharedArray<int, 1>>);
static_assert(std::is_convertible_v<tensorstore::TransformedArrayView<int, 1>,
tensorstore::TransformedArray<int, 1>>);
static_assert(
std::is_same_v<typename tensorstore::TransformedArrayView<int, 1>::
template RebindContainerKind<tensorstore::container>,
tensorstore::TransformedArray<int, 1>>);
static_assert(tensorstore::HasBoxDomain<tensorstore::TransformedArray<int, 1>>);
template <typename TA>
std::vector<const typename TA::Element*> GetPointers(const TA& a) {
using Element = const typename TA::Element;
std::vector<Element*> pointers;
auto result = IterateOverTransformedArrays(
[&](Element* x) { pointers.push_back(x); },
tensorstore::skip_repeated_elements, a);
EXPECT_TRUE(result);
return pointers;
}
using TransformedArrayTestTypes =
::testing::Types<tensorstore::TransformedSharedArray<int>,
tensorstore::TransformedSharedArray<int, 1>>;
template <typename T>
class TransformedArrayConstructorTest : public ::testing::Test {};
TYPED_TEST_SUITE(TransformedArrayConstructorTest, TransformedArrayTestTypes);
template <typename TransformedArrayType, typename SourceArray>
void TestCopyAndMove(SourceArray&& source,
std::vector<const int*> expected_pointers) {
{
TransformedArrayType tb(source);
EXPECT_EQ(GetBoxDomainOf(source), GetBoxDomainOf(tb));
EXPECT_EQ(expected_pointers, GetPointers(tb));
}
{
auto source_copy = source;
TransformedArrayType tc(std::move(source_copy));
EXPECT_EQ(GetBoxDomainOf(source), GetBoxDomainOf(tc));
EXPECT_EQ(expected_pointers, GetPointers(tc));
}
{
TransformedArrayType td;
td = source;
EXPECT_EQ(GetBoxDomainOf(source), GetBoxDomainOf(td));
EXPECT_EQ(expected_pointers, GetPointers(td));
}
{
auto source_copy = source;
TransformedArrayType td;
td = std::move(source_copy);
EXPECT_EQ(expected_pointers, GetPointers(td));
EXPECT_EQ(GetBoxDomainOf(source), GetBoxDomainOf(td));
}
}
TYPED_TEST(TransformedArrayConstructorTest, DefaultConstruct) {
TypeParam ta;
EXPECT_FALSE(ta.transform());
EXPECT_EQ(nullptr, ta.element_pointer());
}
template <typename TransformedArrayType, typename Array>
void TestConstructFromArray(Array&& array,
std::vector<const int*> expected_pointers) {
auto array_copy = array;
TransformedArrayType ta(std::forward<Array>(array));
EXPECT_EQ(array_copy.domain(), ta.domain().box());
EXPECT_EQ(array_copy.domain(), GetBoxDomainOf(ta));
auto pointers = GetPointers(ta);
EXPECT_EQ(expected_pointers, pointers);
TestCopyAndMove<TransformedArrayType>(ta, expected_pointers);
TestCopyAndMove<typename TransformedArrayType::template RebindContainerKind<
tensorstore::container>>(ta, expected_pointers);
}
TYPED_TEST(TransformedArrayConstructorTest, ConstructFromZeroOriginArray) {
auto a = MakeArray<int>({1, 2, 3});
const std::vector<const int*> expected_pointers{&a(0), &a(1), &a(2)};
TestConstructFromArray<TypeParam>(a, expected_pointers);
TestConstructFromArray<TypeParam>(tensorstore::SharedArrayView<int, 1>(a),
expected_pointers);
}
TYPED_TEST(TransformedArrayConstructorTest, ConstructFromOffsetOriginArray) {
auto a = MakeOffsetArray<int>({3}, {1, 2, 3});
const std::vector<const int*> expected_pointers{&a(3), &a(4), &a(5)};
TestConstructFromArray<TypeParam>(a, expected_pointers);
TestConstructFromArray<TypeParam>(
tensorstore::SharedOffsetArrayView<int, 1>(a), expected_pointers);
}
template <typename TransformedArrayType, typename ElementPointer,
typename Transform>
void TestConstructFromElementPointerAndTransform(
ElementPointer&& element_pointer, Transform&& transform,
std::vector<const int*> expected_pointers) {
auto element_pointer_copy = element_pointer;
auto transform_copy = transform;
TransformedArrayType ta(std::forward<ElementPointer>(element_pointer),
std::forward<Transform>(transform));
EXPECT_EQ(GetBoxDomainOf(transform_copy), GetBoxDomainOf(ta));
EXPECT_EQ(transform_copy, ta.transform());
EXPECT_EQ(element_pointer_copy, ta.element_pointer());
auto pointers = GetPointers(ta);
EXPECT_EQ(expected_pointers, pointers);
TestCopyAndMove<TransformedArrayType>(ta, expected_pointers);
TestCopyAndMove<typename TransformedArrayType::template RebindContainerKind<
tensorstore::container>>(ta, expected_pointers);
}
TYPED_TEST(TransformedArrayConstructorTest,
ConstructFromElementPointerAndTransform) {
auto a = MakeArray<int>({1, 2, 3});
const std::vector<const int*> expected_pointers{&a(0), &a(1), &a(2)};
auto t = tensorstore::IndexTransformBuilder<1, 1>()
.input_origin({0})
.input_shape({3})
.output_single_input_dimension(0, 0, sizeof(int), 0)
.Finalize()
.value();
TestConstructFromElementPointerAndTransform<TypeParam>(a.element_pointer(), t,
expected_pointers);
auto element_pointer = a.element_pointer();
auto t_copy = t;
TestConstructFromElementPointerAndTransform<TypeParam>(
std::move(element_pointer), std::move(t_copy), expected_pointers);
tensorstore::IndexTransformView<1, 1> t_view = t;
TestConstructFromElementPointerAndTransform<TypeParam>(
a.element_pointer(), t_view, expected_pointers);
}
TEST(TransformedArrayTest, Array) {
auto a = MakeOffsetArray<int>({3}, {1, 2, 3});
auto ta = tensorstore::TransformedArray(a);
static_assert(std::is_same_v<decltype(ta),
tensorstore::TransformedSharedArray<int, 1>>);
auto a_copy = a;
EXPECT_EQ(3, a.pointer().use_count());
auto tb = tensorstore::TransformedArray(std::move(a_copy));
static_assert(std::is_same_v<decltype(tb),
tensorstore::TransformedSharedArray<int, 1>>);
EXPECT_EQ(3, a.pointer().use_count());
EXPECT_FALSE(a_copy.valid());
}
TEST(TransformedArrayTest, TransformedArray) {
auto a = MakeOffsetArray<int>({3}, {1, 2, 3});
auto ta = tensorstore::TransformedArray(a);
auto tb = tensorstore::TransformedArray(ta);
static_assert(std::is_same_v<decltype(tb),
tensorstore::TransformedSharedArray<int, 1>>);
auto ta_copy = ta;
EXPECT_EQ(4, a.pointer().use_count());
auto tc = tensorstore::TransformedArray(std::move(ta_copy));
static_assert(std::is_same_v<decltype(tc),
tensorstore::TransformedSharedArray<int, 1>>);
EXPECT_EQ(a.element_pointer(), tc.element_pointer());
EXPECT_EQ(4, a.pointer().use_count());
EXPECT_FALSE(ta_copy.element_pointer());
}
TEST(TransformedArrayTest, MapTransform) {
auto array = MakeArray<int>({1, 2, 3});
tensorstore::TransformedArray<int, 1> tarray(array);
auto tarray2 =
ChainResult(tarray, tensorstore::Dims(0).SizedInterval(1, 2)).value();
EXPECT_EQ(MakeOffsetArray<int>({1}, {2, 3}), tarray2.Materialize().value());
}
TEST(TransformedArrayTest, ArrayAndTransform) {
auto a = MakeOffsetArray<int>({3}, {1, 2, 3});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto t, (tensorstore::IndexTransformBuilder<1, 1>()
.input_origin({0})
.input_shape({3})
.input_labels({"a"})
.output_single_input_dimension(0, 3, 1, 0)
.Finalize()));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto ta,
tensorstore::MakeTransformedArray(a, t));
static_assert(std::is_same_v<decltype(ta),
tensorstore::TransformedSharedArray<int, 1>>);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto expected_transform, (tensorstore::IndexTransformBuilder<1, 1>()
.input_origin({0})
.input_shape({3})
.input_labels({"a"})
.output_single_input_dimension(
0, 3 * sizeof(int), 1 * sizeof(int), 0)
.Finalize()));
EXPECT_EQ(expected_transform, ta.transform());
}
TEST(TransformedArrayTest, DimExpression) {
auto a = MakeOffsetArray<int>({10, 20}, {{1, 2, 3}, {4, 5, 6}});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto ta, a |
tensorstore::Dims(0, 1).IndexVectorArraySlice(
MakeArray<Index>({{10, 22}, {11, 21}, {11, 22}})) |
tensorstore::Dims(0).Label("a"));
EXPECT_EQ(ta.transform(),
(tensorstore::IndexTransformBuilder<1, 2>()
.input_origin({0})
.input_shape({3})
.input_labels({"a"})
.output_index_array(0, 0, sizeof(int) * 3,
MakeArray<Index>({10, 11, 11}),
IndexInterval::Sized(10, 2))
.output_index_array(1, 0, sizeof(int),
MakeArray<Index>({22, 21, 22}),
IndexInterval::Sized(20, 3))
.Finalize()
.value()));
EXPECT_EQ(a.element_pointer(), ta.element_pointer());
EXPECT_EQ(ta.domain().box(), tensorstore::BoxView<1>({3}));
}
TEST(TransformedArrayTest, MaterializeWithOffsetOrigin) {
EXPECT_EQ(MakeOffsetArray<int>({2}, {3, 5, 6}),
ChainResult(MakeOffsetArray<int>({10, 20}, {{1, 2, 3}, {4, 5, 6}}),
tensorstore::Dims(0, 1)
.IndexVectorArraySlice(MakeArray<Index>(
{{10, 22}, {11, 21}, {11, 22}}))
.TranslateTo(2))
.value()
.Materialize());
}
TEST(TransformedArrayTest, MaterializeWithZeroOrigin) {
EXPECT_EQ(MakeArray<int>({3, 5, 6}),
ChainResult(MakeOffsetArray<int>({10, 20}, {{1, 2, 3}, {4, 5, 6}}),
tensorstore::Dims(0, 1)
.IndexVectorArraySlice(MakeArray<Index>(
{{10, 22}, {11, 21}, {11, 22}}))
.TranslateTo(2))
.value()
.template Materialize<tensorstore::zero_origin>()
.value());
}
TEST(TransformedArrayTest, MaterializeConstraints) {
auto array = MakeOffsetArray<int>({2, 3}, {{3, 4, 5}, {6, 7, 8}});
auto transformed_array =
ChainResult(array,
tensorstore::Dims(1)
.ClosedInterval(kImplicit, kImplicit, 2)
.MoveToFront(),
tensorstore::Dims(2).AddNew().SizedInterval(5, 3))
.value();
auto expected_array = MakeOffsetArray<int>(
{1, 2, 5}, {{{3, 3, 3}, {6, 6, 6}}, {{5, 5, 5}, {8, 8, 8}}});
{
auto new_array = transformed_array.Materialize().value();
EXPECT_EQ(GetPointers(transformed_array), GetPointers(new_array));
}
const auto ValidateCopy =
[&](const Result<tensorstore::SharedOffsetArray<const int, 3>>& new_array,
const std::vector<Index>& expected_byte_strides) {
TENSORSTORE_ASSERT_OK(new_array);
EXPECT_NE(GetPointers(transformed_array), GetPointers(*new_array));
EXPECT_EQ(expected_array, *new_array);
EXPECT_THAT(new_array->byte_strides(),
::testing::ElementsAreArray(expected_byte_strides));
};
const auto TestCopyAndMaterialize =
[&](tensorstore::TransformArrayConstraints constraints,
std::vector<Index> expected_byte_strides) {
SCOPED_TRACE(tensorstore::StrCat("TestCopyAndMaterialize: constraints=",
constraints.value()));
{
SCOPED_TRACE("Materialize");
auto new_array = transformed_array.Materialize(constraints);
static_assert(std::is_same_v<
decltype(new_array),
Result<tensorstore::SharedOffsetArray<const int, 3>>>);
ValidateCopy(new_array, expected_byte_strides);
}
{
SCOPED_TRACE("MakeCopy");
auto new_array =
MakeCopy(transformed_array, constraints.iteration_constraints());
static_assert(
std::is_same_v<decltype(new_array),
Result<tensorstore::SharedOffsetArray<int, 3>>>);
ValidateCopy(new_array, expected_byte_strides);
}
};
TestCopyAndMaterialize(
{tensorstore::skip_repeated_elements, tensorstore::must_allocate},
{sizeof(int), sizeof(int) * 2, 0});
TestCopyAndMaterialize(
{tensorstore::c_order, tensorstore::skip_repeated_elements,
tensorstore::must_allocate},
{sizeof(int) * 2, sizeof(int), 0});
TestCopyAndMaterialize(
{tensorstore::fortran_order, tensorstore::skip_repeated_elements,
tensorstore::must_allocate},
{sizeof(int), sizeof(int) * 2, 0});
TestCopyAndMaterialize(
{tensorstore::fortran_order, tensorstore::include_repeated_elements,
tensorstore::must_allocate},
{sizeof(int), sizeof(int) * 2, sizeof(int) * 2 * 2});
TestCopyAndMaterialize(
{tensorstore::c_order, tensorstore::include_repeated_elements,
tensorstore::must_allocate},
{sizeof(int) * 2 * 3, sizeof(int) * 3, sizeof(int)});
}
TEST(TransformedArrayTest, MaterializeError) {
EXPECT_THAT(
ChainResult(MakeArray<int>({1, 2}), tensorstore::Dims(0).IndexArraySlice(
MakeArray<Index>({3, 4})))
.value()
.Materialize(),
MatchesStatus(absl::StatusCode::kOutOfRange));
}
TEST(TransformedArrayTest, MakeCopy) {
EXPECT_THAT(MakeCopy(ChainResult(MakeArray<int>({1, 2}),
tensorstore::Dims(0).IndexArraySlice(
MakeArray<Index>({3, 4})))
.value()),
MatchesStatus(absl::StatusCode::kOutOfRange));
}
TEST(TransformedArrayTest, MoveConstructViewFromContainer) {
MapResult(
[](tensorstore::TransformedSharedArrayView<const void> x) {
EXPECT_EQ(tensorstore::BoxView({2, 3}, {2, 2}), GetBoxDomainOf(x));
return absl::OkStatus();
},
tensorstore::MakeTransformedArray(
tensorstore::MakeOffsetArray<int>({2, 3}, {{1, 2}, {3, 4}}),
tensorstore::IdentityTransform(tensorstore::BoxView({2, 3}, {2, 2}))))
.value();
}
TEST(ComposeLayoutAndTransformTest, NoTransform) {
tensorstore::StridedLayout<tensorstore::dynamic_rank,
tensorstore::offset_origin>
layout({1, 2}, {3, 4}, {5, 6});
auto transform = tensorstore::ComposeLayoutAndTransform(
layout, tensorstore::IndexTransform<>())
.value();
EXPECT_EQ(transform, tensorstore::IndexTransformBuilder<>(2, 2)
.input_origin({1, 2})
.input_shape({3, 4})
.output_single_input_dimension(0, 0, 5, 0)
.output_single_input_dimension(1, 0, 6, 1)
.Finalize()
.value());
}
TEST(ComposeLayoutAndTransformTest, ExistingTransform) {
tensorstore::StridedLayout<tensorstore::dynamic_rank,
tensorstore::offset_origin>
layout({1, 2}, {3, 4}, {5, 6});
auto transform = tensorstore::ComposeLayoutAndTransform(
layout, tensorstore::IndexTransformBuilder<>(2, 2)
.input_origin({11, 12})
.input_shape({3, 2})
.input_labels({"x", "y"})
.output_single_input_dimension(0, -10, 1, 0)
.output_single_input_dimension(1, -22, 2, 1)
.Finalize()
.value())
.value();
EXPECT_EQ(transform, tensorstore::IndexTransformBuilder<>(2, 2)
.input_origin({11, 12})
.input_shape({3, 2})
.input_labels({"x", "y"})
.output_single_input_dimension(0, -10 * 5, 1 * 5, 0)
.output_single_input_dimension(1, -22 * 6, 2 * 6, 1)
.Finalize()
.value());
}
TEST(ComposeLayoutAndTransformTest, RankMismatch) {
tensorstore::StridedLayout<tensorstore::dynamic_rank,
tensorstore::offset_origin>
layout({1, 2}, {3, 4}, {5, 6});
EXPECT_THAT(tensorstore::ComposeLayoutAndTransform(
layout, tensorstore::IdentityTransform(3)),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Transform output rank \\(3\\) does not equal "
"array rank \\(2\\)"));
}
TEST(MakeTransformedArrayTest, TwoArgumentBaseArrayAndTransform) {
auto array = MakeOffsetArray<int>({2, 3}, {{3, 4, 5}, {6, 7, 8}});
auto t = tensorstore::IndexTransformBuilder<1, 2>()
.implicit_lower_bounds({1})
.implicit_upper_bounds({1})
.output_single_input_dimension(0, 1, 1, 0)
.output_single_input_dimension(1, 2, 1, 0)
.Finalize()
.value();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto ta,
tensorstore::MakeTransformedArray(array, t));
EXPECT_EQ(array.element_pointer(), ta.element_pointer());
EXPECT_EQ(
tensorstore::IndexTransformBuilder<>(1, 2)
.input_origin({1})
.input_shape({2})
.output_single_input_dimension(0, sizeof(int) * 3, sizeof(int) * 3, 0)
.output_single_input_dimension(1, sizeof(int) * 2, sizeof(int), 0)
.Finalize()
.value(),
ta.transform());
}
TEST(GetUnboundedLayoutTest, Basic) {
EXPECT_EQ((tensorstore::StridedLayout<tensorstore::dynamic_rank,
tensorstore::offset_origin>(
{-kInfIndex, -kInfIndex}, {kInfSize, kInfSize}, {1, 1})),
tensorstore::internal_index_space::GetUnboundedLayout(2));
}
TEST(TransformedArrayTest, StaticDataTypeCast) {
TransformedArray<int32_t, 1> ta_orig = MakeArray<int32_t>({3, 4});
TransformedArray<void, 1> ta = ta_orig;
auto ta_int = StaticDataTypeCast<int32_t>(ta);
static_assert(
std::is_same_v<decltype(ta_int), Result<TransformedArray<int, 1>>>);
ASSERT_TRUE(ta_int);
EXPECT_THAT(GetPointers(*ta_int),
::testing::ElementsAreArray(GetPointers(ta_orig)));
}
TEST(TransformedArrayTest, CastArrayToTransformedArray) {
tensorstore::SharedArray<int32_t> a = MakeArray<int32_t>({1, 2});
auto ta_result =
tensorstore::StaticCast<tensorstore::TransformedArrayView<int32_t, 1>>(a);
TENSORSTORE_ASSERT_OK(ta_result);
EXPECT_THAT(GetPointers(*ta_result), ::testing::ElementsAre(&a(0), &a(1)));
}
TEST(TransformedArrayTest, StaticDataTypeCastShared) {
auto ta_orig = tensorstore::TransformedArray(MakeArray<int32_t>({3, 4}));
TransformedArray<Shared<void>, 1> ta = ta_orig;
auto ta_int = StaticDataTypeCast<int32_t>(ta);
static_assert(std::is_same_v<decltype(ta_int),
Result<TransformedArray<Shared<int32_t>, 1>>>);
ASSERT_TRUE(ta_int);
EXPECT_THAT(GetPointers(*ta_int),
::testing::ElementsAreArray(GetPointers(ta_orig)));
}
TEST(TransformedArrayTest, StaticRankCast) {
TransformedArray<Shared<int32_t>, dynamic_rank> ta =
MakeArray<int32_t>({3, 4});
auto ta1 = StaticRankCast<1>(ta);
static_assert(std::is_same_v<decltype(ta1),
Result<TransformedArray<Shared<int32_t>, 1>>>);
ASSERT_TRUE(ta1);
EXPECT_THAT(GetPointers(*ta1), ::testing::ElementsAreArray(GetPointers(ta)));
EXPECT_THAT(
StaticRankCast<2>(ta),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Cannot cast transformed array with data type of int32 and rank of 1 "
"to transformed array with data type of int32 and rank of 2"));
}
TEST(TransformedArrayTest, ApplyIndexTransform) {
auto array = MakeArray<int>({{1, 2, 3}, {4, 5, 6}});
auto result = ChainResult(array, tensorstore::IdentityTransform<2>());
TENSORSTORE_ASSERT_OK(result);
EXPECT_EQ(array, MakeCopy(*result));
}
TEST(CopyTransformedArrayTest, Int32ToUint32) {
auto a = MakeArray<int32_t>({{1, 2, 3}, {4, 5, 6}});
auto b = tensorstore::AllocateArray<uint32_t>({3, 2});
EXPECT_EQ(absl::OkStatus(),
CopyTransformedArray(
a, ChainResult(b, tensorstore::Dims(1, 0).Transpose())));
EXPECT_EQ(b, MakeArray<uint32_t>({{1, 4}, {2, 5}, {3, 6}}));
}
TEST(CopyTransformedArrayTest, Int32ToInt32) {
auto a = MakeArray<int32_t>({{1, 2, 3}, {4, 5, 6}});
auto b = tensorstore::AllocateArray<int32_t>({3, 2});
EXPECT_EQ(absl::OkStatus(),
CopyTransformedArray(
a, ChainResult(b, tensorstore::Dims(1, 0).Transpose())));
EXPECT_EQ(b, MakeArray<int32_t>({{1, 4}, {2, 5}, {3, 6}}));
}
TEST(CopyTransformedArrayTest, Int32ToFloat32) {
auto a = MakeArray<int32_t>({{1, 2, 3}, {4, 5, 6}});
auto b = tensorstore::AllocateArray<float32_t>({3, 2});
EXPECT_EQ(absl::OkStatus(),
CopyTransformedArray(
ChainResult(a, tensorstore::Dims(1, 0).Transpose()), b));
EXPECT_EQ(b, MakeArray<float32_t>({{1.0, 4.0}, {2.0, 5.0}, {3.0, 6.0}}));
}
TEST(CopyTransformedArrayTest, InvalidDataType) {
auto a = MakeArray<::tensorstore::dtypes::string_t>({"x", "y"});
auto b = tensorstore::AllocateArray<float32_t>({2});
EXPECT_THAT(CopyTransformedArray(a, b),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Cannot convert string -> float32"));
}
TEST(TransformedArrayTest, UnownedToShared) {
auto a = MakeArray<int>({1, 2, 3});
TransformedArray<int> ta = a;
auto shared_ta = UnownedToShared(ta);
static_assert(
std::is_same_v<decltype(shared_ta), TransformedArray<Shared<int>>>);
}
TEST(TransformedArrayTest, UnownedToSharedAliasing) {
auto a = MakeArray<int>({1, 2, 3});
TransformedArray<int> ta = a;
EXPECT_EQ(1, a.pointer().use_count());
{
auto shared_ta = UnownedToShared(a.pointer(), ta);
EXPECT_EQ(2, a.pointer().use_count());
static_assert(
std::is_same_v<decltype(shared_ta), TransformedArray<Shared<int>>>);
auto shared_ta_copy = UnownedToShared(shared_ta);
static_assert(
std::is_same_v<decltype(shared_ta), TransformedArray<Shared<int>>>);
EXPECT_EQ(3, a.pointer().use_count());
}
EXPECT_EQ(1, a.pointer().use_count());
}
TEST(TryConvertToArrayTest, Basic) {
auto array = tensorstore::AllocateArray<int32_t>({2, 3}, tensorstore::c_order,
tensorstore::value_init);
EXPECT_THAT(array | tensorstore::IdentityTransform<2>() |
tensorstore::TryConvertToArray(),
::testing::Optional(tensorstore::ReferencesSameDataAs(array)));
EXPECT_THAT(array | tensorstore::Dims(0).IndexSlice(1) |
tensorstore::TryConvertToArray(),
::testing::Optional(tensorstore::ReferencesSameDataAs(array[1])));
EXPECT_THAT(array | tensorstore::Dims(0).TranslateTo(1) |
tensorstore::TryConvertToArray<tensorstore::zero_origin>(),
::testing::Optional(tensorstore::ReferencesSameDataAs(array)));
EXPECT_THAT(array |
tensorstore::Dims(0).OuterIndexArraySlice(
tensorstore::MakeArray<Index>({0, 1, 1})) |
tensorstore::TryConvertToArray(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(TryConvertToArrayTest, Random) {
tensorstore::SharedArray<const void> array =
tensorstore::AllocateArray<int32_t>({2, 3}, tensorstore::c_order,
tensorstore::value_init);
std::minstd_rand gen{tensorstore::internal_testing::GetRandomSeedForTest(
"TENSORSTORE_INTERNAL_VIEW_AS_ARRAY")};
constexpr size_t kNumIterations = 10;
for (size_t iter_i = 0; iter_i < kNumIterations; ++iter_i) {
tensorstore::internal::MakeStridedIndexTransformForOutputSpaceParameters p;
p.max_stride = 2;
auto transform =
tensorstore::internal::MakeRandomStridedIndexTransformForOutputSpace(
gen, tensorstore::IndexDomain<>(array.domain()), p);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto materialized_zero_origin,
array | transform |
tensorstore::Materialize<tensorstore::zero_origin>());
EXPECT_THAT(array | transform |
tensorstore::TryConvertToArray<tensorstore::zero_origin>(),
::testing::Optional(materialized_zero_origin));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto materialized_offset_origin,
array | transform | tensorstore::Materialize());
EXPECT_THAT(array | transform | tensorstore::TryConvertToArray(),
::testing::Optional(materialized_offset_origin));
}
}
} |
548 | cpp | google/tensorstore | dimension_units | tensorstore/index_space/dimension_units.cc | tensorstore/index_space/dimension_units_test.cc | #ifndef TENSORSTORE_INDEX_SPACE_DIMENSION_UNITS_H_
#define TENSORSTORE_INDEX_SPACE_DIMENSION_UNITS_H_
#include <optional>
#include <vector>
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/unit.h"
namespace tensorstore {
using DimensionUnitsVector = std::vector<std::optional<Unit>>;
std::string DimensionUnitsToString(span<const std::optional<Unit>> u);
absl::Status MergeDimensionUnits(DimensionUnitsVector& existing_units,
span<const std::optional<Unit>> new_units);
Result<DimensionUnitsVector> TransformInputDimensionUnits(
IndexTransformView<> transform, DimensionUnitsVector input_units);
DimensionUnitsVector TransformOutputDimensionUnits(
IndexTransformView<> transform, DimensionUnitsVector output_units);
}
#endif
#include "tensorstore/index_space/dimension_units.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
Result<DimensionUnitsVector> TransformInputDimensionUnits(
IndexTransformView<> transform, DimensionUnitsVector input_units) {
if (!transform.valid()) return input_units;
const DimensionIndex input_rank = transform.input_rank(),
output_rank = transform.output_rank();
assert(input_units.size() == input_rank);
std::optional<Unit> output_units[kMaxRank];
DimensionSet seen_input_dims;
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
const auto map = transform.output_index_maps()[output_dim];
if (map.method() != OutputIndexMethod::single_input_dimension) continue;
const Index stride = map.stride();
if (stride == 0) continue;
const DimensionIndex input_dim = map.input_dimension();
const auto& input_unit = input_units[input_dim];
if (!input_unit) continue;
seen_input_dims[input_dim] = true;
auto& output_unit = output_units[output_dim];
output_unit = input_unit;
*output_unit /= std::abs(static_cast<double>(stride));
}
for (DimensionIndex input_dim = 0; input_dim < input_rank; ++input_dim) {
if (!input_units[input_dim] || seen_input_dims[input_dim]) continue;
return absl::InvalidArgumentError(tensorstore::StrCat(
"No output dimension corresponds to input dimension ", input_dim,
" with unit ", *input_units[input_dim]));
}
input_units.resize(output_rank);
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
input_units[output_dim] = std::move(output_units[output_dim]);
}
return input_units;
}
DimensionUnitsVector TransformOutputDimensionUnits(
IndexTransformView<> transform, DimensionUnitsVector output_units) {
if (!transform.valid()) return output_units;
const DimensionIndex input_rank = transform.input_rank(),
output_rank = transform.output_rank();
assert(output_units.size() == output_rank);
DimensionSet one_to_one_input_dims =
internal::GetOneToOneInputDimensions(transform).one_to_one;
std::optional<Unit> input_units[kMaxRank];
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
const auto& output_unit = output_units[output_dim];
if (!output_unit) continue;
const auto map = transform.output_index_maps()[output_dim];
if (map.method() != OutputIndexMethod::single_input_dimension) continue;
const Index stride = map.stride();
if (stride == 0) continue;
const DimensionIndex input_dim = map.input_dimension();
if (!one_to_one_input_dims[input_dim]) continue;
auto& input_unit = input_units[input_dim];
input_unit = output_unit;
*input_unit *= std::abs(static_cast<double>(stride));
}
output_units.resize(input_rank);
for (DimensionIndex input_dim = 0; input_dim < input_rank; ++input_dim) {
output_units[input_dim] = std::move(input_units[input_dim]);
}
return output_units;
}
absl::Status MergeDimensionUnits(DimensionUnitsVector& existing_units,
span<const std::optional<Unit>> new_units) {
assert(existing_units.empty() || existing_units.size() == new_units.size());
existing_units.resize(new_units.size());
for (size_t i = 0; i < new_units.size(); ++i) {
auto& existing_unit = existing_units[i];
auto& new_unit = new_units[i];
if (!new_unit) continue;
if (existing_unit && existing_unit != new_unit) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Cannot merge dimension units ", DimensionUnitsToString(new_units),
" and ", DimensionUnitsToString(existing_units)));
}
}
for (size_t i = 0; i < new_units.size(); ++i) {
auto& existing_unit = existing_units[i];
auto& new_unit = new_units[i];
if (!new_unit || existing_unit) continue;
existing_unit = new_unit;
}
return absl::OkStatus();
}
std::string DimensionUnitsToString(span<const std::optional<Unit>> u) {
std::string result = "[";
std::string_view sep = "";
for (const auto& unit : u) {
result += sep;
sep = ", ";
if (!unit) {
result += "null";
} else {
result += tensorstore::QuoteString(tensorstore::StrCat(*unit));
}
}
result += "]";
return result;
}
} | #include "tensorstore/index_space/dimension_units.h"
#include <stddef.h>
#include <iterator>
#include <optional>
#include <random>
#include <string>
#include <string_view>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/random/bit_gen_ref.h"
#include "absl/random/random.h"
#include "absl/status/status.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/index_domain.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/index_transform_testutil.h"
#include "tensorstore/internal/testing/random_seed.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/unit.h"
namespace {
using ::tensorstore::DimensionIndex;
using ::tensorstore::DimensionUnitsToString;
using ::tensorstore::DimensionUnitsVector;
using ::tensorstore::MatchesStatus;
using ::tensorstore::MergeDimensionUnits;
using ::tensorstore::TransformInputDimensionUnits;
using ::tensorstore::TransformOutputDimensionUnits;
using ::tensorstore::Unit;
TEST(DimensionUnitsToStringTest, Basic) {
EXPECT_EQ("[null, \"4 nm\"]", DimensionUnitsToString(DimensionUnitsVector{
std::nullopt, Unit("4nm")}));
}
TEST(MergeDimensionUnitsTest, BothUnspecified) {
DimensionUnitsVector existing_units{std::nullopt, std::nullopt};
DimensionUnitsVector new_units{std::nullopt, std::nullopt};
TENSORSTORE_EXPECT_OK(MergeDimensionUnits(existing_units, new_units));
EXPECT_THAT(existing_units,
::testing::ElementsAre(std::nullopt, std::nullopt));
}
TEST(MergeDimensionUnitsTest, OneSpecifiedOneUnspecified) {
DimensionUnitsVector existing_units{std::nullopt, Unit("4nm")};
DimensionUnitsVector new_units{Unit("8nm"), std::nullopt};
TENSORSTORE_EXPECT_OK(MergeDimensionUnits(existing_units, new_units));
EXPECT_THAT(existing_units, ::testing::ElementsAre(Unit("8nm"), Unit("4nm")));
}
TEST(MergeDimensionUnitsTest, BothSpecifiedSame) {
DimensionUnitsVector existing_units{Unit("8nm"), Unit("4nm")};
DimensionUnitsVector new_units{Unit("8nm"), std::nullopt};
TENSORSTORE_EXPECT_OK(MergeDimensionUnits(existing_units, new_units));
EXPECT_THAT(existing_units, ::testing::ElementsAre(Unit("8nm"), Unit("4nm")));
}
TEST(MergeDimensionUnitsTest, BothSpecifiedDistinct) {
DimensionUnitsVector existing_units{std::nullopt, Unit("4nm")};
DimensionUnitsVector new_units{Unit("8nm"), Unit("5nm")};
EXPECT_THAT(
MergeDimensionUnits(existing_units, new_units),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Cannot merge dimension units \\[\"8 nm\", \"5 nm\"\\] "
"and \\[null, \"4 nm\"\\]"));
EXPECT_THAT(existing_units,
::testing::ElementsAre(std::nullopt, Unit("4nm")));
}
std::optional<Unit> MakeRandomUnit(absl::BitGenRef gen) {
constexpr std::string_view kBaseUnits[] = {
"",
"nm",
"um",
};
if (absl::Bernoulli(gen, 0.2)) return std::nullopt;
const double multiplier = absl::Uniform<int>(gen, 5, 20);
const auto base_unit =
kBaseUnits[absl::Uniform<size_t>(gen, 0, std::size(kBaseUnits))];
return Unit(multiplier, std::string(base_unit));
}
DimensionUnitsVector MakeRandomDimensionUnits(DimensionIndex rank,
absl::BitGenRef gen) {
DimensionUnitsVector units(rank);
for (auto& unit : units) {
unit = MakeRandomUnit(gen);
}
return units;
}
TEST(TransformOutputDimensionUnitsTest, InvertibleRoundTrip) {
constexpr size_t kNumIterations = 100;
for (size_t i = 0; i < kNumIterations; ++i) {
std::minstd_rand gen{tensorstore::internal_testing::GetRandomSeedForTest(
"TENSORSTORE_INTERNAL_TRANSFORM_DIMENSION_UNITS_TEST_SEED")};
auto box = tensorstore::internal::MakeRandomBox(gen);
auto domain = tensorstore::IndexDomain(box);
auto transform =
tensorstore::internal::MakeRandomStridedIndexTransformForOutputSpace(
gen, domain);
auto output_units = MakeRandomDimensionUnits(domain.rank(), gen);
auto input_units = TransformOutputDimensionUnits(transform, output_units);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto inv_transform,
InverseTransform(transform));
EXPECT_THAT(TransformInputDimensionUnits(transform, input_units),
::testing::Optional(::testing::ElementsAreArray(output_units)));
EXPECT_THAT(TransformOutputDimensionUnits(inv_transform, input_units),
::testing::ElementsAreArray(output_units));
}
}
TEST(TransformOutputDimensionUnitsTest, StridedNonInvertibleRoundTrip) {
constexpr size_t kNumIterations = 100;
for (size_t i = 0; i < kNumIterations; ++i) {
std::minstd_rand gen{tensorstore::internal_testing::GetRandomSeedForTest(
"TENSORSTORE_INTERNAL_TRANSFORM_DIMENSION_UNITS_TEST_SEED")};
auto box = tensorstore::internal::MakeRandomBox(gen);
auto domain = tensorstore::IndexDomain(box);
tensorstore::internal::MakeStridedIndexTransformForOutputSpaceParameters p;
p.max_stride = 4;
auto transform =
tensorstore::internal::MakeRandomStridedIndexTransformForOutputSpace(
gen, domain, p);
auto output_units = MakeRandomDimensionUnits(domain.rank(), gen);
auto input_units = TransformOutputDimensionUnits(transform, output_units);
EXPECT_THAT(TransformInputDimensionUnits(transform, input_units),
::testing::Optional(::testing::ElementsAreArray(output_units)));
}
}
TEST(TransformInputDimensionUnitsTest, NoCorrespondingOutputDimension) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transform,
tensorstore::IndexTransformBuilder(1, 0).Finalize());
DimensionUnitsVector input_units{"4nm"};
EXPECT_THAT(TransformInputDimensionUnits(transform, input_units),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"No output dimension corresponds to "
"input dimension 0 with unit 4 nm"));
}
TEST(TransformOutputDimensionUnitsTest, NonUnique) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transform,
tensorstore::IndexTransformBuilder(2, 3)
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 0)
.output_single_input_dimension(2, 1)
.Finalize());
DimensionUnitsVector output_units{"4nm", "5nm", "6nm"};
EXPECT_THAT(TransformOutputDimensionUnits(transform, output_units),
::testing::ElementsAre(std::nullopt, Unit("6nm")));
}
} |
549 | cpp | google/tensorstore | json | tensorstore/serialization/json.cc | tensorstore/serialization/json_test.cc | #ifndef TENSORSTORE_UTIL_GARBAGE_COLLECTION_JSON_H_
#define TENSORSTORE_UTIL_GARBAGE_COLLECTION_JSON_H_
#include <nlohmann/json_fwd.hpp>
#include "tensorstore/util/garbage_collection/fwd.h"
TENSORSTORE_DECLARE_GARBAGE_COLLECTION_NOT_REQUIRED(::nlohmann::json)
#endif
#include "tensorstore/serialization/json.h"
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/riegeli/json_input.h"
#include "tensorstore/internal/riegeli/json_output.h"
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace serialization {
bool Serializer<::nlohmann::json>::Encode(EncodeSink& sink,
const ::nlohmann::json& value) {
return internal::WriteCbor(sink.writer(), value);
}
bool Serializer<::nlohmann::json>::Decode(DecodeSource& source,
::nlohmann::json& value) {
return internal::ReadCbor(source.reader(), value, false);
}
}
} | #include "tensorstore/serialization/json.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/serialization/test_util.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::MatchesStatus;
using ::tensorstore::serialization::SerializationRoundTrip;
using ::tensorstore::serialization::TestSerializationRoundTrip;
TEST(SerializationTest, Valid) {
TestSerializationRoundTrip(::nlohmann::json(5));
TestSerializationRoundTrip(::nlohmann::json("abc"));
}
TEST(SerializationTest, Invalid) {
EXPECT_THAT(SerializationRoundTrip(
::nlohmann::json(::nlohmann::json::value_t::discarded)),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Cannot encode discarded json value.*"));
}
} |
550 | cpp | google/tensorstore | transform_broadcastable_array | tensorstore/index_space/transform_broadcastable_array.cc | tensorstore/index_space/transform_broadcastable_array_test.cc | #ifndef TENSORSTORE_INDEX_SPACE_TRANSFORM_BROADCASTABLE_ARRAY_H_
#define TENSORSTORE_INDEX_SPACE_TRANSFORM_BROADCASTABLE_ARRAY_H_
#include "tensorstore/array.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
Result<SharedArray<const void>> TransformOutputBroadcastableArray(
IndexTransformView<> transform, SharedArrayView<const void> output_array,
IndexDomainView<> output_domain);
Result<SharedArray<const void>> TransformInputBroadcastableArray(
IndexTransformView<> transform, SharedArrayView<const void> input_array);
}
#endif
#include "tensorstore/index_space/transform_broadcastable_array.h"
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/box.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/transformed_array.h"
#include "tensorstore/util/dimension_set.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
Result<SharedArray<const void>> TransformOutputBroadcastableArray(
IndexTransformView<> transform, SharedArrayView<const void> output_array,
IndexDomainView<> output_domain) {
assert(transform.valid());
Box<dynamic_rank(kMaxRank)> broadcast_domain(transform.output_rank());
if (output_domain.valid()) {
broadcast_domain = output_domain.box();
} else {
TENSORSTORE_RETURN_IF_ERROR(
tensorstore::GetOutputRange(transform, broadcast_domain));
const DimensionIndex output_rank = transform.output_rank();
for (DimensionIndex output_dim = 0; output_dim < output_rank;
++output_dim) {
const auto map = transform.output_index_maps()[output_dim];
switch (map.method()) {
case OutputIndexMethod::constant:
break;
case OutputIndexMethod::array: {
broadcast_domain[output_dim] = IndexInterval();
break;
}
case OutputIndexMethod::single_input_dimension: {
const DimensionIndex input_dim = map.input_dimension();
if (map.stride() != 1 && map.stride() != -1) {
broadcast_domain[output_dim] = IndexInterval::Infinite();
} else {
const DimensionIndex output_array_dim =
output_dim + output_array.rank() - output_rank;
if (output_array_dim >= 0 &&
transform.domain()[input_dim].optionally_implicit_interval() ==
OptionallyImplicitIndexInterval{IndexInterval::Infinite(),
true, true}) {
broadcast_domain[output_dim] =
output_array.domain()[output_array_dim];
}
}
break;
}
}
}
}
TENSORSTORE_ASSIGN_OR_RETURN(
auto broadcast_output_array,
tensorstore::BroadcastArray(std::move(output_array), broadcast_domain));
TENSORSTORE_ASSIGN_OR_RETURN(auto input_array,
std::move(broadcast_output_array) | transform |
tensorstore::Materialize());
return UnbroadcastArray(std::move(input_array));
}
Result<SharedArray<const void>> TransformInputBroadcastableArray(
IndexTransformView<> transform, SharedArrayView<const void> input_array) {
assert(transform.valid());
SharedArray<const void> output_array;
output_array.layout().set_rank(transform.output_rank());
DimensionSet seen_input_dims;
ByteStridedPointer<const void> data_pointer =
input_array.byte_strided_pointer();
const DimensionIndex input_rank = transform.input_rank();
for (DimensionIndex output_dim = 0; output_dim < output_array.rank();
++output_dim) {
const auto map = transform.output_index_maps()[output_dim];
if (map.method() != OutputIndexMethod::single_input_dimension) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Cannot transform input array through ",
map.method(), " output index map"));
}
const DimensionIndex input_dim = map.input_dimension();
if (seen_input_dims[input_dim]) {
return absl::InvalidArgumentError(
"Cannot transform input array with multiple "
"output dimensions mapping to the same input dimension");
}
if (std::abs(map.stride()) != 1) {
return absl::InvalidArgumentError(
"Cannot transform input array through "
"non-unit-stride output index map");
}
seen_input_dims[input_dim] = true;
const DimensionIndex input_array_dim =
input_array.rank() - input_rank + input_dim;
if (input_array_dim < 0) {
output_array.shape()[output_dim] = 1;
output_array.byte_strides()[output_dim] = 0;
} else {
const Index size = input_array.shape()[input_array_dim];
output_array.shape()[output_dim] = size;
const Index byte_stride = input_array.byte_strides()[input_array_dim];
const Index stride = map.stride();
output_array.byte_strides()[output_dim] =
internal::wrap_on_overflow::Multiply(byte_stride, stride);
if (stride == -1 && size != 0) {
data_pointer +=
internal::wrap_on_overflow::Multiply(byte_stride, size - 1);
}
}
}
for (DimensionIndex input_array_dim = 0; input_array_dim < input_array.rank();
++input_array_dim) {
if (input_array.shape()[input_array_dim] == 1 ||
input_array.byte_strides()[input_array_dim] == 0) {
continue;
}
const DimensionIndex input_dim =
input_rank - input_array.rank() + input_array_dim;
if (input_dim < 0 || !seen_input_dims[input_dim]) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Cannot transform input array; "
"dimension ",
input_array_dim, " cannot be mapped"));
}
}
output_array.element_pointer() = SharedElementPointer<const void>(
std::shared_ptr<const void>(std::move(input_array.pointer()),
data_pointer.get()),
input_array.dtype());
return UnbroadcastArray(std::move(output_array));
}
} | #include "tensorstore/index_space/transform_broadcastable_array.h"
#include <stddef.h>
#include <random>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/index_domain.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/index_transform_testutil.h"
#include "tensorstore/internal/testing/random_seed.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::DimensionIndex;
using ::tensorstore::Index;
using ::tensorstore::IndexDomain;
using ::tensorstore::IndexDomainView;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::IndexTransformView;
using ::tensorstore::MakeArray;
using ::tensorstore::MakeScalarArray;
using ::tensorstore::MatchesStatus;
using ::tensorstore::SharedArray;
using ::tensorstore::SharedArrayView;
using ::tensorstore::span;
using ::tensorstore::TransformInputBroadcastableArray;
using ::tensorstore::TransformOutputBroadcastableArray;
void TestRoundTrip(IndexTransformView<> transform,
SharedArrayView<const void> input_array,
SharedArrayView<const void> output_array,
IndexDomainView<> output_domain) {
SCOPED_TRACE(tensorstore::StrCat(
"transform=", transform, ", output_domain=", output_domain,
", input_array.shape=", input_array.shape(),
", output_array.shape=", output_array.shape()));
EXPECT_THAT(
TransformOutputBroadcastableArray(transform, output_array, output_domain),
::testing::Optional(input_array));
EXPECT_THAT(TransformInputBroadcastableArray(transform, input_array),
::testing::Optional(output_array));
}
void TestRoundTrip(IndexTransformView<> transform,
SharedArrayView<const void> output_array,
IndexDomainView<> output_domain = IndexDomainView<>(),
bool test_inverse = false) {
SCOPED_TRACE(tensorstore::StrCat(
"transform=", transform, ", output_domain=", output_domain,
", output_array.shape=", output_array.shape()));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto input_array,
TransformOutputBroadcastableArray(
transform, output_array, output_domain));
EXPECT_THAT(TransformInputBroadcastableArray(transform, input_array),
::testing::Optional(output_array));
if (test_inverse) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto inverse_transform,
tensorstore::InverseTransform(transform));
EXPECT_THAT(
TransformInputBroadcastableArray(inverse_transform, output_array),
::testing::Optional(input_array));
}
}
SharedArray<int> MakeTestArray(span<const Index> shape) {
auto array = tensorstore::AllocateArray<int>(shape);
for (Index i = 0, num_elements = array.num_elements(); i < num_elements;
++i) {
array.data()[i] = i;
}
return array;
}
TEST(RoundTripTest, IdentityTransform) {
for (DimensionIndex rank = 0; rank <= 3; ++rank) {
SCOPED_TRACE(tensorstore::StrCat("rank=", rank));
std::vector<Index> shape(rank);
for (DimensionIndex dim = 0; dim < rank; ++dim) {
shape[dim] = dim + 2;
}
auto array = MakeTestArray(shape);
TestRoundTrip(tensorstore::IdentityTransform(shape), array, array,
tensorstore::IndexDomain<>());
TestRoundTrip(tensorstore::IdentityTransform(rank), array, array,
tensorstore::IndexDomain<>());
TestRoundTrip(tensorstore::IdentityTransform(shape), array, array,
tensorstore::IdentityTransform(shape).domain());
}
}
TEST(RoundTripTest, RandomInvertibleTransform) {
constexpr size_t kNumIterations = 100;
for (size_t i = 0; i < kNumIterations; ++i) {
std::minstd_rand gen{tensorstore::internal_testing::GetRandomSeedForTest(
"TENSORSTORE_INTERNAL_TRANSFORM_BROADCASTABLE_ARRAY_TEST_SEED")};
auto box = tensorstore::internal::MakeRandomBox(gen);
auto array = tensorstore::UnbroadcastArray(MakeTestArray(box.shape()));
auto domain = IndexDomain(box);
auto transform =
tensorstore::internal::MakeRandomStridedIndexTransformForOutputSpace(
gen, domain);
TestRoundTrip(transform, array);
TestRoundTrip(transform, array, domain);
}
}
TEST(RoundTripTest, RandomInvertibleTransformNoNewDims) {
constexpr size_t kNumIterations = 100;
for (size_t i = 0; i < kNumIterations; ++i) {
std::minstd_rand gen{tensorstore::internal_testing::GetRandomSeedForTest(
"TENSORSTORE_INTERNAL_TRANSFORM_BROADCASTABLE_ARRAY_TEST_SEED")};
auto box = tensorstore::internal::MakeRandomBox(gen);
auto array = tensorstore::UnbroadcastArray(MakeTestArray(box.shape()));
auto domain = IndexDomain(box);
tensorstore::internal::MakeStridedIndexTransformForOutputSpaceParameters p;
p.max_new_dims = 0;
auto transform =
tensorstore::internal::MakeRandomStridedIndexTransformForOutputSpace(
gen, domain, p);
TestRoundTrip(transform, array, IndexDomain(), true);
TestRoundTrip(transform, array, domain, true);
}
}
TEST(TransformOutputBroadcastableArrayTest, ConstantMap) {
auto array = MakeArray<int>({{1}, {2}, {3}});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transform, IndexTransformBuilder(1, 2)
.output_single_input_dimension(0, 5, -1, 0)
.output_constant(1, 42)
.Finalize());
EXPECT_THAT(
TransformOutputBroadcastableArray(transform, array, IndexDomain()),
::testing::Optional(MakeArray<int>({3, 2, 1})));
}
TEST(TransformOutputBroadcastableArrayTest, NonUnitStrideMap) {
auto array = MakeArray<int>({{1}, {2}, {3}});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transform, IndexTransformBuilder(2, 2)
.output_single_input_dimension(0, 5, -1, 0)
.output_single_input_dimension(1, 42, 2, 1)
.Finalize());
EXPECT_THAT(
TransformOutputBroadcastableArray(transform, array, IndexDomain()),
::testing::Optional(MakeArray<int>({{3}, {2}, {1}})));
}
TEST(TransformOutputBroadcastableArrayTest, ArrayMap) {
auto array = MakeArray<int>({{1}, {2}, {3}});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transform,
IndexTransformBuilder(1, 2)
.input_shape({3})
.output_single_input_dimension(0, 5, -1, 0)
.output_index_array(1, 20, 1, MakeArray<Index>({0, 5, 10}))
.Finalize());
EXPECT_THAT(
TransformOutputBroadcastableArray(transform, array, IndexDomain()),
::testing::Optional(MakeArray<int>({3, 2, 1})));
}
TEST(TransformInputBroadcastableArrayTest, ConstantMap) {
auto array = MakeScalarArray<int>(42);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transform,
IndexTransformBuilder(0, 1).output_constant(0, 42).Finalize());
EXPECT_THAT(
TransformInputBroadcastableArray(transform, array),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Cannot transform input array through constant output index map"));
}
TEST(TransformInputBroadcastableArrayTest, NonUnitStrideMap) {
auto array = MakeArray<int>({1, 2, 3});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transform, IndexTransformBuilder(1, 1)
.output_single_input_dimension(0, 5, 2, 0)
.Finalize());
EXPECT_THAT(TransformInputBroadcastableArray(transform, array),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Cannot transform input array through "
"non-unit-stride output index map"));
}
TEST(TransformInputBroadcastableArrayTest, ArrayMap) {
auto array = MakeArray<int>({1, 2, 3});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transform,
IndexTransformBuilder(1, 1)
.input_shape({3})
.output_index_array(0, 20, 1, MakeArray<Index>({0, 5, 10}))
.Finalize());
EXPECT_THAT(
TransformInputBroadcastableArray(transform, array),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Cannot transform input array through array output index map"));
}
TEST(TransformInputBroadcastableArrayTest, Diagonal) {
auto array = MakeArray<int>({1, 2, 3});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto transform,
IndexTransformBuilder(1, 2)
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 0)
.Finalize());
EXPECT_THAT(
TransformInputBroadcastableArray(transform, array),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Cannot transform input array with multiple "
"output dimensions mapping to the same input dimension"));
}
TEST(TransformInputBroadcastableArrayTest, UnmappedNoError) {
auto array = MakeArray<int>({1, 2, 3});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto transform,
IndexTransformBuilder(2, 1)
.output_single_input_dimension(0, 1)
.Finalize());
EXPECT_THAT(TransformInputBroadcastableArray(transform, array),
::testing::Optional(array));
}
TEST(TransformInputBroadcastableArrayTest, UnmappedError) {
auto array = MakeArray<int>({1, 2, 3});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto transform,
IndexTransformBuilder(2, 1)
.output_single_input_dimension(0, 0)
.Finalize());
EXPECT_THAT(
TransformInputBroadcastableArray(transform, array),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Cannot transform input array; dimension 0 cannot be mapped"));
}
TEST(TransformInputBroadcastableArrayTest, ExtraDimensionError) {
auto array = MakeArray<int>({{1, 2, 3}, {4, 5, 6}});
EXPECT_THAT(
TransformInputBroadcastableArray(tensorstore::IdentityTransform(1),
array),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Cannot transform input array; dimension 0 cannot be mapped"));
}
TEST(TransformInputBroadcastableArrayTest, ExtraDimensionNoError) {
auto array = MakeArray<int>({{1, 2, 3}});
EXPECT_THAT(TransformInputBroadcastableArray(
tensorstore::IdentityTransform(1), array),
::testing::Optional(MakeArray<int>({1, 2, 3})));
}
} |
551 | cpp | google/tensorstore | transpose_op | tensorstore/index_space/internal/transpose_op.cc | tensorstore/index_space/transpose_op_test.cc | #ifndef TENSORSTORE_INDEX_SPACE_INTERNAL_TRANSPOSE_OP_H_
#define TENSORSTORE_INDEX_SPACE_INTERNAL_TRANSPOSE_OP_H_
#include "tensorstore/index_space/dimension_identifier.h"
#include "tensorstore/index_space/dimension_index_buffer.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/internal/meta.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal_index_space {
Result<IndexTransform<>> ApplyTransposeTo(
IndexTransform<> transform, DimensionIndexBuffer* dimensions,
span<const DimensionIndex> target_dimensions, bool domain_only);
Result<IndexTransform<>> ApplyTransposeToDynamic(
IndexTransform<> transform, DimensionIndexBuffer* dimensions,
span<const DynamicDimSpec> target_dim_specs, bool domain_only);
Result<IndexTransform<>> ApplyTranspose(
IndexTransform<> transform, span<const DynamicDimSpec> source_dim_specs,
bool domain_only);
template <typename Container>
struct TransposeToOp {
static constexpr bool selected_dimensions_are_new = false;
static constexpr DimensionIndex static_selection_rank =
internal::ConstSpanType<Container>::extent;
constexpr static DimensionIndex GetNewStaticInputRank(
DimensionIndex input_rank, DimensionIndex num_input_dims) {
TENSORSTORE_CONSTEXPR_ASSERT(
(input_rank == dynamic_rank || input_rank >= static_selection_rank) &&
"Number of dimensions must not exceed input rank.");
return input_rank;
}
constexpr static DimensionIndex GetStaticSelectionRank(
DimensionIndex num_input_dims) {
TENSORSTORE_CONSTEXPR_ASSERT(
RankConstraint::EqualOrUnspecified(num_input_dims,
static_selection_rank) &&
"Number of selected dimensions must match number of target "
"dimensions.");
return num_input_dims == dynamic_rank ? static_selection_rank
: num_input_dims;
}
Result<IndexTransform<>> Apply(IndexTransform<> transform,
DimensionIndexBuffer* dimensions,
bool domain_only) const {
return ApplyTransposeTo(std::move(transform), dimensions, target_dimensions,
domain_only);
}
Container target_dimensions;
};
Result<IndexTransform<>> ApplyTranspose(IndexTransform<> transform,
DimensionIndexBuffer* dimensions,
bool domain_only);
struct TransposeOp {
static constexpr bool selected_dimensions_are_new = false;
constexpr static DimensionIndex GetNewStaticInputRank(
DimensionIndex input_rank, DimensionIndex num_input_dims) {
TENSORSTORE_CONSTEXPR_ASSERT(
RankConstraint::EqualOrUnspecified(input_rank, num_input_dims) &&
"Number of selected dimensions must equal input rank.");
return input_rank == dynamic_rank ? num_input_dims : input_rank;
}
constexpr static DimensionIndex GetStaticSelectionRank(
DimensionIndex num_input_dims) {
return num_input_dims;
}
Result<IndexTransform<>> Apply(IndexTransform<> transform,
DimensionIndexBuffer* dimensions,
bool domain_only) const {
return ApplyTranspose(std::move(transform), dimensions, domain_only);
}
};
Result<IndexTransform<>> ApplyMoveDimsTo(IndexTransform<> transform,
DimensionIndexBuffer* dimensions,
DimensionIndex target,
bool domain_only);
struct MoveToOp {
static constexpr bool selected_dimensions_are_new = false;
constexpr static DimensionIndex GetNewStaticInputRank(
DimensionIndex input_rank, DimensionIndex num_input_dims) {
return input_rank;
}
constexpr static DimensionIndex GetStaticSelectionRank(
DimensionIndex num_input_dims) {
return num_input_dims;
}
Result<IndexTransform<>> Apply(IndexTransform<> transform,
DimensionIndexBuffer* dimensions,
bool domain_only) const {
return ApplyMoveDimsTo(std::move(transform), dimensions, target,
domain_only);
}
DimensionIndex target;
};
}
}
#endif
#include "tensorstore/index_space/internal/transpose_op.h"
#include <cassert>
#include <numeric>
#include "absl/status/status.h"
#include "tensorstore/index_space/dimension_identifier.h"
#include "tensorstore/index_space/dimension_permutation.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/internal/transpose.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_index_space {
namespace {
absl::Status MakePermutationFromMoveDimsTarget(
DimensionIndexBuffer* dimensions, DimensionIndex target,
span<DimensionIndex> permutation) {
if (dimensions->empty()) {
std::iota(permutation.begin(), permutation.end(),
static_cast<DimensionIndex>(0));
return absl::OkStatus();
}
const DimensionIndex input_rank = permutation.size();
const DimensionIndex num_dims = dimensions->size();
TENSORSTORE_ASSIGN_OR_RETURN(
target, NormalizeDimensionIndex(target, input_rank - num_dims + 1));
std::fill(permutation.begin(), permutation.end(),
static_cast<DimensionIndex>(-1));
DimensionSet moved_dims = false;
for (DimensionIndex i = 0; i < num_dims; ++i) {
DimensionIndex& input_dim = (*dimensions)[i];
moved_dims[input_dim] = true;
permutation[target + i] = input_dim;
input_dim = target + i;
}
for (DimensionIndex i = 0, orig_input_dim = 0; i < input_rank; ++i) {
if (permutation[i] != -1) continue;
while (moved_dims[orig_input_dim]) ++orig_input_dim;
permutation[i] = orig_input_dim++;
}
return absl::OkStatus();
}
}
Result<IndexTransform<>> ApplyMoveDimsTo(IndexTransform<> transform,
DimensionIndexBuffer* dimensions,
DimensionIndex target,
bool domain_only) {
const DimensionIndex input_rank = transform.input_rank();
DimensionIndex permutation[kMaxRank];
TENSORSTORE_RETURN_IF_ERROR(MakePermutationFromMoveDimsTarget(
dimensions, target, span<DimensionIndex>(&permutation[0], input_rank)));
return TransformAccess::Make<IndexTransform<>>(TransposeInputDimensions(
TransformAccess::rep_ptr<container>(std::move(transform)),
span<const DimensionIndex>(&permutation[0], input_rank), domain_only));
}
Result<IndexTransform<>> ApplyTranspose(IndexTransform<> transform,
DimensionIndexBuffer* dimensions,
bool domain_only) {
if (static_cast<DimensionIndex>(dimensions->size()) !=
transform.input_rank()) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Number of dimensions (", dimensions->size(),
") must equal input_rank (", transform.input_rank(), ")."));
}
TransformRep::Ptr<> rep = TransposeInputDimensions(
TransformAccess::rep_ptr<container>(std::move(transform)), *dimensions,
domain_only);
std::iota(dimensions->begin(), dimensions->end(),
static_cast<DimensionIndex>(0));
return TransformAccess::Make<IndexTransform<>>(std::move(rep));
}
Result<IndexTransform<>> ApplyTransposeTo(
IndexTransform<> transform, DimensionIndexBuffer* dimensions,
span<const DimensionIndex> target_dimensions, bool domain_only) {
const DimensionIndex input_rank = transform.input_rank();
if (static_cast<DimensionIndex>(dimensions->size()) !=
target_dimensions.size()) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Number of selected dimensions (", dimensions->size(),
") must equal number of target dimensions (", target_dimensions.size(),
")"));
}
DimensionSet seen_existing_dim = false;
DimensionIndex permutation[kMaxRank];
std::fill_n(permutation, input_rank, -1);
for (DimensionIndex i = 0; i < target_dimensions.size(); ++i) {
DimensionIndex& orig_dim = (*dimensions)[i];
TENSORSTORE_ASSIGN_OR_RETURN(
const DimensionIndex target_dim,
NormalizeDimensionIndex(target_dimensions[i], input_rank));
if (permutation[target_dim] != -1) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Target dimension ", target_dim, " occurs more than once"));
}
seen_existing_dim[orig_dim] = true;
permutation[target_dim] = orig_dim;
orig_dim = target_dim;
}
for (DimensionIndex orig_dim = 0, target_dim = 0; orig_dim < input_rank;
++orig_dim) {
if (seen_existing_dim[orig_dim]) continue;
while (permutation[target_dim] != -1) ++target_dim;
permutation[target_dim] = orig_dim;
}
return TransformAccess::Make<IndexTransform<>>(TransposeInputDimensions(
TransformAccess::rep_ptr<container>(std::move(transform)),
span<const DimensionIndex>(&permutation[0], input_rank), domain_only));
}
Result<IndexTransform<>> ApplyTransposeToDynamic(
IndexTransform<> transform, DimensionIndexBuffer* dimensions,
span<const DynamicDimSpec> target_dim_specs, bool domain_only) {
if (target_dim_specs.size() == 1) {
if (auto* target = std::get_if<DimensionIndex>(&target_dim_specs.front())) {
return ApplyMoveDimsTo(std::move(transform), dimensions, *target,
domain_only);
}
}
DimensionIndexBuffer target_dimensions;
const DimensionIndex input_rank = transform.input_rank();
for (const auto& s : target_dim_specs) {
if (auto* index = std::get_if<DimensionIndex>(&s)) {
target_dimensions.push_back(*index);
} else if (auto* r = std::get_if<DimRangeSpec>(&s)) {
TENSORSTORE_RETURN_IF_ERROR(
NormalizeDimRangeSpec(*r, input_rank, &target_dimensions));
} else {
return absl::InvalidArgumentError(
"Target dimensions cannot be specified by label");
}
}
return ApplyTransposeTo(std::move(transform), dimensions, target_dimensions,
domain_only);
}
Result<IndexTransform<>> ApplyTranspose(
IndexTransform<> transform, span<const DynamicDimSpec> source_dim_specs,
bool domain_only) {
DimensionIndexBuffer source_dimensions;
source_dimensions.reserve(transform.input_rank());
TENSORSTORE_RETURN_IF_ERROR(NormalizeDynamicDimSpecs(
source_dim_specs, transform.input_labels(), &source_dimensions));
if (!IsValidPermutation(source_dimensions)) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Source dimension list ", span(source_dimensions),
" is not a valid dimension permutation for rank ",
transform.input_rank()));
}
return TransformAccess::Make<IndexTransform<>>(TransposeInputDimensions(
TransformAccess::rep_ptr<container>(std::move(transform)),
source_dimensions, domain_only));
}
}
} | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/internal/dim_expression_testutil.h"
#include "tensorstore/util/status.h"
namespace {
using ::tensorstore::Dims;
using ::tensorstore::Index;
using ::tensorstore::IndexInterval;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::MakeArray;
using ::tensorstore::internal_index_space::EquivalentIndices;
using ::tensorstore::internal_index_space::TestDimExpression;
TEST(TransposeTest, Example) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.implicit_lower_bounds({1, 0, 0})
.implicit_upper_bounds({0, 1, 0})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform = IndexTransformBuilder<3, 3>()
.input_origin({3, 1, 2})
.input_shape({2, 3, 4})
.implicit_lower_bounds({0, 1, 0})
.implicit_upper_bounds({0, 0, 1})
.input_labels({"z", "x", "y"})
.output_single_input_dimension(0, 1)
.output_single_input_dimension(1, 2)
.output_single_input_dimension(2, 0)
.Finalize()
.value();
const EquivalentIndices equivalent_indices = {{{2, 3, 4}, {4, 2, 3}}};
TestDimExpression(original_transform,
Dims(2, 0, 1).Transpose(),
{0, 1, 2},
expected_new_transform,
expected_new_transform,
equivalent_indices);
TestDimExpression(original_transform,
Dims("z", "x", "y").Transpose(),
{0, 1, 2},
expected_new_transform,
expected_new_transform,
equivalent_indices);
}
TEST(TransposeTest, Simple) {
TestDimExpression(
IndexTransformBuilder<4, 2>()
.input_origin({1, 2, 3, 4})
.input_shape({5, 6, 4, 8})
.output_single_input_dimension(0, 1, 2, 1)
.output_index_array(
1, 2, 3, MakeArray<Index>({{{{1}, {2}, {3}, {4}}}}),
IndexInterval::Closed(-3, 10))
.Finalize()
.value(),
Dims(2, 0, 1, 3).Transpose(),
{0, 1, 2, 3},
IndexTransformBuilder<4, 4>()
.input_origin({3, 1, 2, 4})
.input_shape({4, 5, 6, 8})
.output_single_input_dimension(0, 1)
.output_single_input_dimension(1, 2)
.output_single_input_dimension(2, 0)
.output_single_input_dimension(3, 3)
.Finalize()
.value(),
IndexTransformBuilder<4, 2>()
.input_origin({3, 1, 2, 4})
.input_shape({4, 5, 6, 8})
.output_single_input_dimension(0, 1, 2, 2)
.output_index_array(
1, 2, 3,
MakeArray<Index>(
{{{{1}}}, {{{2}}}, {{{3}}}, {{{4}}}}),
IndexInterval::Closed(-3, 10))
.Finalize()
.value(),
{{{2, 4, 3, 5}, {3, 2, 4, 5}}});
}
TEST(TransposeTest, Constant) {
TestDimExpression(IndexTransformBuilder<2, 2>()
.input_origin({1, 2})
.input_shape({5, 6})
.output_constant(0, 1)
.output_constant(1, 2)
.Finalize()
.value(),
Dims(1, 0).Transpose(),
{0, 1},
IndexTransformBuilder<2, 2>()
.input_origin({2, 1})
.input_shape({6, 5})
.output_single_input_dimension(0, 1)
.output_single_input_dimension(1, 0)
.Finalize()
.value(),
IndexTransformBuilder<2, 2>()
.input_origin({2, 1})
.input_shape({6, 5})
.output_constant(0, 1)
.output_constant(1, 2)
.Finalize()
.value(),
{});
}
TEST(TransposeTest, ErrorHandling) {
TestDimExpressionError(
IndexTransformBuilder<>(2, 2)
.input_origin({1, 2})
.input_shape({5, 6})
.output_constant(0, 1)
.output_constant(1, 2)
.Finalize()
.value(),
Dims(1).Transpose(), absl::StatusCode::kInvalidArgument,
"Number of dimensions \\(1\\) must equal input_rank \\(2\\)\\.");
}
TEST(TransposeTest, Labeled) {
TestDimExpression(
IndexTransformBuilder<4, 2>()
.input_origin({1, 2, 3, 4})
.input_shape({5, 6, 4, 8})
.input_labels({"a", "b", "c", "d"})
.output_single_input_dimension(0, 1, 2, 1)
.output_index_array(
1, 2, 3, MakeArray<Index>({{{{1}, {2}, {3}, {4}}}}),
IndexInterval::Closed(-3, 10))
.Finalize()
.value(),
Dims(2, 0, 1, 3).Transpose(),
{0, 1, 2, 3},
IndexTransformBuilder<4, 4>()
.input_origin({3, 1, 2, 4})
.input_shape({4, 5, 6, 8})
.input_labels({"c", "a", "b", "d"})
.output_single_input_dimension(0, 1)
.output_single_input_dimension(1, 2)
.output_single_input_dimension(2, 0)
.output_single_input_dimension(3, 3)
.Finalize()
.value(),
IndexTransformBuilder<4, 2>()
.input_origin({3, 1, 2, 4})
.input_shape({4, 5, 6, 8})
.input_labels({"c", "a", "b", "d"})
.output_single_input_dimension(0, 1, 2, 2)
.output_index_array(
1, 2, 3,
MakeArray<Index>(
{{{{1}}}, {{{2}}}, {{{3}}}, {{{4}}}}),
IndexInterval::Closed(-3, 10))
.Finalize()
.value(),
{{{2, 4, 3, 5}, {3, 2, 4, 5}}});
}
} |
552 | cpp | google/tensorstore | transpose | tensorstore/driver/zarr3/codec/transpose.cc | tensorstore/driver/zarr3/codec/transpose_test.cc | #ifndef TENSORSTORE_DRIVER_ZARR3_CODEC_TRANSPOSE_H_
#define TENSORSTORE_DRIVER_ZARR3_CODEC_TRANSPOSE_H_
#include <utility>
#include <variant>
#include <vector>
#include "absl/status/status.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/driver/zarr3/codec/codec.h"
#include "tensorstore/driver/zarr3/codec/codec_spec.h"
#include "tensorstore/index.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal_zarr3 {
class TransposeCodecSpec : public ZarrArrayToArrayCodecSpec {
public:
using Order =
std::variant<std::vector<DimensionIndex>, ContiguousLayoutOrder>;
struct Options {
Order order;
};
TransposeCodecSpec() = default;
explicit TransposeCodecSpec(Options&& options)
: options(std::move(options)) {}
absl::Status MergeFrom(const ZarrCodecSpec& other, bool strict) override;
ZarrCodecSpec::Ptr Clone() const override;
absl::Status PropagateDataTypeAndShape(
const ArrayDataTypeAndShapeInfo& decoded,
ArrayDataTypeAndShapeInfo& encoded) const override;
absl::Status GetDecodedChunkLayout(
const ArrayDataTypeAndShapeInfo& encoded_info,
const ArrayCodecChunkLayoutInfo& encoded,
const ArrayDataTypeAndShapeInfo& decoded_info,
ArrayCodecChunkLayoutInfo& decoded) const override;
Result<ZarrArrayToArrayCodec::Ptr> Resolve(
ArrayCodecResolveParameters&& decoded,
ArrayCodecResolveParameters& encoded,
ZarrArrayToArrayCodecSpec::Ptr* resolved_spec) const override;
Options options;
};
}
}
#endif
#include "tensorstore/driver/zarr3/codec/transpose.h"
#include <array>
#include <cassert>
#include <optional>
#include <string_view>
#include <utility>
#include <variant>
#include <vector>
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/driver/chunk.h"
#include "tensorstore/driver/zarr3/codec/codec.h"
#include "tensorstore/driver/zarr3/codec/codec_spec.h"
#include "tensorstore/driver/zarr3/codec/registry.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/dimension_permutation.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/internal/global_initializer.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/json_binding/dimension_indexed.h"
#include "tensorstore/internal/json_binding/enum.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/json_binding/std_variant.h"
#include "tensorstore/internal/storage_statistics.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/execution/any_receiver.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_zarr3 {
namespace {
namespace jb = ::tensorstore::internal_json_binding;
absl::Status InvalidPermutationError(span<const DimensionIndex> order,
DimensionIndex rank) {
return absl::InvalidArgumentError(tensorstore::StrCat(
order, " is not a valid dimension permutation for a rank ", rank,
" array"));
}
constexpr auto OrderJsonBinder() {
return jb::Variant(
jb::Validate(
[](const auto& options, auto* obj) {
if (!IsValidPermutation(*obj)) {
return absl::InvalidArgumentError(
tensorstore::StrCat(span<const DimensionIndex>(*obj),
" is not a valid permutation"));
}
return absl::OkStatus();
},
jb::DimensionIndexedVector(
nullptr, jb::Integer<DimensionIndex>(0, kMaxRank - 1))),
jb::Enum<ContiguousLayoutOrder, std::string_view>({
{c_order, "C"},
{fortran_order, "F"},
}));
}
bool TryMergeOrder(TransposeCodecSpec::Order& a,
const TransposeCodecSpec::Order& b) {
struct Visitor {
TransposeCodecSpec::Order& merged;
bool operator()(const std::vector<DimensionIndex>& a,
ContiguousLayoutOrder b) const {
return PermutationMatchesOrder(a, b);
}
bool operator()(ContiguousLayoutOrder a,
const std::vector<DimensionIndex>& b) const {
if (PermutationMatchesOrder(b, a)) {
merged = b;
return true;
}
return false;
}
bool operator()(ContiguousLayoutOrder a, ContiguousLayoutOrder b) {
return a == b;
}
bool operator()(const std::vector<DimensionIndex>& a,
const std::vector<DimensionIndex>& b) {
return a == b;
}
};
return std::visit(Visitor{a}, a, b);
}
}
absl::Status TransposeCodecSpec::MergeFrom(const ZarrCodecSpec& other,
bool strict) {
using Self = TransposeCodecSpec;
const auto& other_options = static_cast<const Self&>(other).options;
return MergeConstraint<&Options::order>("order", options, other_options,
OrderJsonBinder(), &TryMergeOrder);
}
ZarrCodecSpec::Ptr TransposeCodecSpec::Clone() const {
return internal::MakeIntrusivePtr<TransposeCodecSpec>(*this);
}
namespace {
class TransposeCodec : public ZarrArrayToArrayCodec {
public:
explicit TransposeCodec(std::vector<DimensionIndex> inverse_order)
: inverse_order_(std::move(inverse_order)) {}
class State : public ZarrArrayToArrayCodec::PreparedState {
public:
span<const Index> encoded_shape() const final { return encoded_shape_; }
Result<SharedArray<const void>> EncodeArray(
SharedArrayView<const void> decoded) const final {
span<const DimensionIndex> inverse_order = codec_->inverse_order_;
assert(decoded.rank() == inverse_order.size());
SharedArray<const void> encoded;
encoded.layout().set_rank(inverse_order.size());
encoded.element_pointer() = std::move(decoded.element_pointer());
for (DimensionIndex decoded_dim = 0; decoded_dim < encoded.rank();
++decoded_dim) {
const DimensionIndex encoded_dim = inverse_order[decoded_dim];
encoded.shape()[encoded_dim] = decoded.shape()[decoded_dim];
encoded.byte_strides()[encoded_dim] =
decoded.byte_strides()[decoded_dim];
}
return encoded;
}
Result<SharedArray<const void>> DecodeArray(
SharedArrayView<const void> encoded,
span<const Index> decoded_shape) const final {
span<const DimensionIndex> inverse_order = codec_->inverse_order_;
assert(encoded.rank() == inverse_order.size());
SharedArray<const void> decoded;
decoded.layout().set_rank(inverse_order.size());
decoded.element_pointer() = std::move(encoded.element_pointer());
for (DimensionIndex decoded_dim = 0; decoded_dim < encoded.rank();
++decoded_dim) {
const DimensionIndex encoded_dim = inverse_order[decoded_dim];
decoded.shape()[decoded_dim] = encoded.shape()[encoded_dim];
decoded.byte_strides()[decoded_dim] =
encoded.byte_strides()[encoded_dim];
}
assert(internal::RangesEqual(decoded_shape, decoded.shape()));
return decoded;
}
void Read(const NextReader& next, span<const Index> decoded_shape,
IndexTransform<> transform,
AnyFlowReceiver<absl::Status, internal::ReadChunk,
IndexTransform<>>&& receiver) const final {
next(std::move(transform).TransposeOutput(codec_->inverse_order_),
std::move(receiver));
}
void Write(const NextWriter& next, span<const Index> decoded_shape,
IndexTransform<> transform,
AnyFlowReceiver<absl::Status, internal::WriteChunk,
IndexTransform<>>&& receiver) const final {
next(std::move(transform).TransposeOutput(codec_->inverse_order_),
std::move(receiver));
}
void GetStorageStatistics(
const NextGetStorageStatistics& next, span<const Index> decoded_shape,
IndexTransform<> transform,
internal::IntrusivePtr<
internal::GetStorageStatisticsAsyncOperationState>
state) const final {
next(std::move(transform).TransposeOutput(codec_->inverse_order_),
std::move(state));
}
const TransposeCodec* codec_;
std::vector<Index> encoded_shape_;
};
Result<PreparedState::Ptr> Prepare(
span<const Index> decoded_shape) const final {
if (decoded_shape.size() != inverse_order_.size()) {
std::vector<DimensionIndex> order(inverse_order_.size());
InvertPermutation(order.size(), inverse_order_.data(), order.data());
return InvalidPermutationError(order, decoded_shape.size());
}
auto state = internal::MakeIntrusivePtr<State>();
state->codec_ = this;
state->encoded_shape_.resize(decoded_shape.size());
for (DimensionIndex decoded_dim = 0; decoded_dim < decoded_shape.size();
++decoded_dim) {
const DimensionIndex encoded_dim = inverse_order_[decoded_dim];
state->encoded_shape_[encoded_dim] = decoded_shape[decoded_dim];
}
return state;
}
private:
std::vector<DimensionIndex> inverse_order_;
};
Result<span<const DimensionIndex>> ResolveOrder(
const TransposeCodecSpec::Order& order, DimensionIndex rank,
span<DimensionIndex, kMaxRank> temp_permutation) {
if (auto* permutation = std::get_if<std::vector<DimensionIndex>>(&order)) {
if (!RankConstraint::Implies(permutation->size(), rank)) {
return InvalidPermutationError(*permutation, rank);
}
return {std::in_place, *permutation};
}
auto perm = temp_permutation.first(rank);
SetPermutation(std::get<ContiguousLayoutOrder>(order), perm);
return perm;
}
}
absl::Status TransposeCodecSpec::PropagateDataTypeAndShape(
const ArrayDataTypeAndShapeInfo& decoded,
ArrayDataTypeAndShapeInfo& encoded) const {
DimensionIndex temp_perm[kMaxRank];
TENSORSTORE_ASSIGN_OR_RETURN(
auto order, ResolveOrder(options.order, decoded.rank, temp_perm));
encoded.dtype = decoded.dtype;
encoded.rank = order.size();
if (decoded.shape) {
auto& encoded_shape = encoded.shape.emplace();
const auto& decoded_shape = *decoded.shape;
for (DimensionIndex encoded_dim = 0; encoded_dim < order.size();
++encoded_dim) {
const DimensionIndex decoded_dim = order[encoded_dim];
encoded_shape[encoded_dim] = decoded_shape[decoded_dim];
}
}
return absl::OkStatus();
}
namespace {
void PropagateInnerOrderToDecoded(
span<const DimensionIndex> order,
const std::optional<std::array<DimensionIndex, kMaxRank>>&
encoded_inner_order,
std::optional<std::array<DimensionIndex, kMaxRank>>& decoded_inner_order) {
if (!encoded_inner_order) return;
auto& encoded = *encoded_inner_order;
auto& decoded = decoded_inner_order.emplace();
for (DimensionIndex i = 0; i < order.size(); ++i) {
decoded[i] = order[encoded[i]];
}
}
void PropagateShapeToDecoded(
span<const DimensionIndex> order,
const std::optional<std::array<Index, kMaxRank>>& encoded_shape,
std::optional<std::array<Index, kMaxRank>>& decoded_shape) {
if (!encoded_shape) return;
auto& encoded = *encoded_shape;
auto& decoded = decoded_shape.emplace();
for (DimensionIndex encoded_dim = 0; encoded_dim < order.size();
++encoded_dim) {
const DimensionIndex decoded_dim = order[encoded_dim];
decoded[decoded_dim] = encoded[encoded_dim];
}
}
}
absl::Status TransposeCodecSpec::GetDecodedChunkLayout(
const ArrayDataTypeAndShapeInfo& encoded_info,
const ArrayCodecChunkLayoutInfo& encoded,
const ArrayDataTypeAndShapeInfo& decoded_info,
ArrayCodecChunkLayoutInfo& decoded) const {
DimensionIndex temp_perm[kMaxRank];
TENSORSTORE_ASSIGN_OR_RETURN(
auto order, ResolveOrder(options.order, decoded_info.rank, temp_perm));
assert(encoded_info.rank == order.size());
assert(decoded_info.rank == order.size());
PropagateInnerOrderToDecoded(order, encoded.inner_order, decoded.inner_order);
PropagateShapeToDecoded(order, encoded.read_chunk_shape,
decoded.read_chunk_shape);
PropagateShapeToDecoded(order, encoded.codec_chunk_shape,
decoded.codec_chunk_shape);
return absl::OkStatus();
}
Result<ZarrArrayToArrayCodec::Ptr> TransposeCodecSpec::Resolve(
ArrayCodecResolveParameters&& decoded, ArrayCodecResolveParameters& encoded,
ZarrArrayToArrayCodecSpec::Ptr* resolved_spec) const {
DimensionIndex temp_perm[kMaxRank];
TENSORSTORE_ASSIGN_OR_RETURN(
auto order, ResolveOrder(options.order, decoded.rank, temp_perm));
encoded.dtype = decoded.dtype;
encoded.rank = decoded.rank;
assert(decoded.fill_value.rank() == 0);
encoded.fill_value = std::move(decoded.fill_value);
std::vector<DimensionIndex> inverse_order(order.size());
InvertPermutation(order.size(), order.data(), inverse_order.data());
PropagateInnerOrderToDecoded(inverse_order, decoded.inner_order,
encoded.inner_order);
PropagateShapeToDecoded(inverse_order, decoded.read_chunk_shape,
encoded.read_chunk_shape);
PropagateShapeToDecoded(inverse_order, decoded.codec_chunk_shape,
encoded.codec_chunk_shape);
if (resolved_spec) {
resolved_spec->reset(new TransposeCodecSpec({TransposeCodecSpec::Order(
std::vector<DimensionIndex>(order.begin(), order.end()))}));
}
return internal::MakeIntrusivePtr<TransposeCodec>(std::move(inverse_order));
}
TENSORSTORE_GLOBAL_INITIALIZER {
using Self = TransposeCodecSpec;
using Options = Self::Options;
RegisterCodec<Self>(
"transpose",
jb::Projection<&Self::options>(jb::Sequence(jb::Member(
"order", jb::Projection<&Options::order>(OrderJsonBinder())))));
}
}
} | #include <cstdint>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/data_type.h"
#include "tensorstore/driver/zarr3/codec/codec_chain_spec.h"
#include "tensorstore/driver/zarr3/codec/codec_spec.h"
#include "tensorstore/driver/zarr3/codec/codec_test_util.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::dtype_v;
using ::tensorstore::MatchesJson;
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal_zarr3::ArrayCodecResolveParameters;
using ::tensorstore::internal_zarr3::CodecRoundTripTestParams;
using ::tensorstore::internal_zarr3::CodecSpecRoundTripTestParams;
using ::tensorstore::internal_zarr3::GetDefaultBytesCodecJson;
using ::tensorstore::internal_zarr3::TestCodecMerge;
using ::tensorstore::internal_zarr3::TestCodecRoundTrip;
using ::tensorstore::internal_zarr3::TestCodecSpecResolve;
using ::tensorstore::internal_zarr3::TestCodecSpecRoundTrip;
using ::tensorstore::internal_zarr3::ZarrCodecChainSpec;
TEST(TransposeTest, Basic) {
CodecSpecRoundTripTestParams p;
p.orig_spec = {
{{"name", "transpose"}, {"configuration", {{"order", {2, 1, 0}}}}},
};
p.resolve_params.rank = 3;
p.expected_spec = {
{{"name", "transpose"}, {"configuration", {{"order", {2, 1, 0}}}}},
GetDefaultBytesCodecJson(),
};
TestCodecSpecRoundTrip(p);
}
TEST(TransposeTest, C) {
CodecSpecRoundTripTestParams p;
p.orig_spec = {
{{"name", "transpose"}, {"configuration", {{"order", "C"}}}},
};
p.resolve_params.rank = 3;
p.expected_spec = {
{{"name", "transpose"}, {"configuration", {{"order", {0, 1, 2}}}}},
GetDefaultBytesCodecJson(),
};
TestCodecSpecRoundTrip(p);
}
TEST(TransposeTest, F) {
CodecSpecRoundTripTestParams p;
p.orig_spec = {
{{"name", "transpose"}, {"configuration", {{"order", "F"}}}},
};
p.resolve_params.rank = 3;
p.expected_spec = {
{{"name", "transpose"}, {"configuration", {{"order", {2, 1, 0}}}}},
GetDefaultBytesCodecJson(),
};
TestCodecSpecRoundTrip(p);
}
TEST(TransposeTest, InvalidPermutation) {
EXPECT_THAT(
ZarrCodecChainSpec::FromJson(
{{{"name", "transpose"}, {"configuration", {{"order", {2, 1, 2}}}}}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*is not a valid permutation.*"));
}
TEST(TransposeTest, RoundTrip) {
CodecRoundTripTestParams p;
p.spec = {{{"name", "transpose"}, {"configuration", {{"order", {2, 1, 0}}}}}};
TestCodecRoundTrip(p);
}
TEST(TransposeTest, RankMismatch) {
ArrayCodecResolveParameters p;
p.dtype = dtype_v<uint16_t>;
p.rank = 2;
EXPECT_THAT(
TestCodecSpecResolve(
{{{"name", "transpose"}, {"configuration", {{"order", {2, 1, 0}}}}}},
p),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error resolving codec spec .* is not a valid dimension "
"permutation for a rank 2 array"));
}
TEST(TransposeTest, AttributeMismatch) {
ArrayCodecResolveParameters p;
p.dtype = dtype_v<uint16_t>;
p.rank = 2;
EXPECT_THAT(
TestCodecSpecResolve(
{{{"name", "transpose"},
{"configuration", {{"order", {0, 1}}, {"extra", 1}}}}},
p),
MatchesStatus(absl::StatusCode::kInvalidArgument, ".* \"extra\""));
}
TEST(TransposeTest, Merge) {
::nlohmann::json perm_012 = {
{{"name", "transpose"}, {"configuration", {{"order", {0, 1, 2}}}}}};
::nlohmann::json perm_210 = {
{{"name", "transpose"}, {"configuration", {{"order", {2, 1, 0}}}}}};
::nlohmann::json perm_C = {
{{"name", "transpose"}, {"configuration", {{"order", "C"}}}}};
::nlohmann::json perm_F = {
{{"name", "transpose"}, {"configuration", {{"order", "F"}}}}};
EXPECT_THAT(TestCodecMerge(perm_012, perm_C,
false),
::testing::Optional(MatchesJson(perm_012)));
EXPECT_THAT(TestCodecMerge(perm_C, perm_012,
false),
::testing::Optional(MatchesJson(perm_012)));
EXPECT_THAT(TestCodecMerge(perm_210, perm_F,
false),
::testing::Optional(MatchesJson(perm_210)));
EXPECT_THAT(TestCodecMerge(perm_F, perm_210,
false),
::testing::Optional(MatchesJson(perm_210)));
EXPECT_THAT(TestCodecMerge(perm_C, perm_C,
false),
::testing::Optional(MatchesJson(perm_C)));
EXPECT_THAT(TestCodecMerge(perm_F, perm_F,
false),
::testing::Optional(MatchesJson(perm_F)));
EXPECT_THAT(TestCodecMerge(perm_012, perm_210, false),
MatchesStatus(absl::StatusCode::kFailedPrecondition));
EXPECT_THAT(TestCodecMerge(perm_C, perm_F, false),
MatchesStatus(absl::StatusCode::kFailedPrecondition));
}
} |
553 | cpp | google/tensorstore | index_array_slice_op | tensorstore/index_space/internal/index_array_slice_op.cc | tensorstore/index_space/index_array_slice_op_test.cc | #ifndef TENSORSTORE_INDEX_SPACE_INTERNAL_INDEX_ARRAY_SLICE_OP_H_
#define TENSORSTORE_INDEX_SPACE_INTERNAL_INDEX_ARRAY_SLICE_OP_H_
#include "tensorstore/array.h"
#include "tensorstore/index_space/dimension_index_buffer.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/internal/meta.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal_index_space {
Result<IndexTransform<>> ApplyIndexArraySlice(
IndexTransform<> transform, DimensionIndexBuffer* dimensions,
span<const SharedArrayView<const Index>> index_arrays, bool outer_indexing,
bool domain_only = false);
template <bool OuterIndexing, DimensionIndex IndexArrayInputRank,
typename IndexArrays>
struct IndexArraySliceOp {
static constexpr bool selected_dimensions_are_new = false;
using IndexArrayType = typename IndexArrays::value_type;
constexpr static DimensionIndex static_selection_rank =
internal::ConstSpanType<IndexArrays>::extent;
constexpr static DimensionIndex GetNewStaticInputRank(
DimensionIndex input_rank, DimensionIndex num_input_dims) {
TENSORSTORE_CONSTEXPR_ASSERT(
(input_rank == dynamic_rank || input_rank >= static_selection_rank) &&
"Number of dimensions must not exceed input rank.");
return RankConstraint::Add(
RankConstraint::Subtract(input_rank, num_input_dims),
IndexArrayInputRank);
}
constexpr static DimensionIndex GetStaticSelectionRank(
DimensionIndex num_input_dims) {
TENSORSTORE_CONSTEXPR_ASSERT(
RankConstraint::EqualOrUnspecified(num_input_dims,
static_selection_rank) &&
"Number of selected dimensions must match number of indices.");
return IndexArrayInputRank;
}
Result<IndexTransform<>> Apply(IndexTransform<> transform,
DimensionIndexBuffer* dimensions,
bool domain_only) const {
return ApplyIndexArraySlice(std::move(transform), dimensions, index_arrays,
OuterIndexing, domain_only);
}
IndexArrays index_arrays;
};
Result<IndexTransform<>> ApplyIndexVectorArraySlice(
IndexTransform<> transform, DimensionIndexBuffer* dimensions,
DimensionIndex vector_dimension,
const SharedArrayView<const Index>& index_vector_array,
bool domain_only = false);
template <DimensionIndex IndexVectorArrayRank>
struct IndexVectorArraySliceOp {
static_assert(IndexVectorArrayRank >= 1,
"Index vector array must have rank >= 1.");
static constexpr bool selected_dimensions_are_new = false;
constexpr static DimensionIndex GetNewStaticInputRank(
DimensionIndex input_rank, DimensionIndex num_input_dims) {
return RankConstraint::Add(
RankConstraint::Subtract(input_rank, num_input_dims),
RankConstraint::Subtract(IndexVectorArrayRank, 1));
}
constexpr static DimensionIndex GetStaticSelectionRank(
DimensionIndex num_input_dims) {
return RankConstraint::Subtract(IndexVectorArrayRank, 1);
}
Result<IndexTransform<>> Apply(IndexTransform<> transform,
DimensionIndexBuffer* dimensions,
bool domain_only) const {
return ApplyIndexVectorArraySlice(std::move(transform), dimensions,
vector_dimension, index_vector_array,
domain_only);
}
SharedArrayView<const Index, IndexVectorArrayRank> index_vector_array;
DimensionIndex vector_dimension;
};
}
}
#endif
#include "tensorstore/index_space/internal/index_array_slice_op.h"
#include <algorithm>
#include <numeric>
#include "tensorstore/index_space/dimension_identifier.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_index_space {
namespace {
bool BroadcastSizes(Index source, Index* result) {
if (source == *result) return true;
if (*result == 1) {
*result = source;
return true;
} else if (source == 1) {
return true;
}
return false;
}
bool BroadcastShapes(span<const Index> source_shape, span<Index> result_shape) {
if (source_shape.size() != result_shape.size()) return false;
for (DimensionIndex i = 0; i < source_shape.size(); ++i) {
if (!BroadcastSizes(source_shape[i], &result_shape[i])) return false;
}
return true;
}
template <typename GetNewDimensionShapeFn, typename GetIndexArrayBasePointerFn,
typename GetIndexArrayByteStrideFn>
Result<TransformRep::Ptr<>> MakeTransformFromJointIndexArrays(
DimensionIndex num_new_dims, TransformRep* orig_transform,
DimensionIndexBuffer* dimensions,
GetNewDimensionShapeFn get_new_dimension_bounds,
GetIndexArrayBasePointerFn get_index_array_base_pointer,
GetIndexArrayByteStrideFn get_index_array_byte_stride) {
const DimensionIndex num_indexed_dims = dimensions->size();
const DimensionIndex output_rank = orig_transform->input_rank;
const DimensionIndex input_rank =
output_rank - dimensions->size() + num_new_dims;
TENSORSTORE_RETURN_IF_ERROR(ValidateRank(input_rank));
auto result = TransformRep::Allocate(input_rank, output_rank);
result->input_rank = input_rank;
result->output_rank = output_rank;
result->implicit_lower_bounds = false;
result->implicit_upper_bounds = false;
span<OutputIndexMap> maps = result->output_index_maps().first(output_rank);
const DimensionIndex num_preserved_dims = output_rank - num_indexed_dims;
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
maps[output_dim].SetSingleInputDimension(0);
}
const auto input_domain = result->input_domain(input_rank);
for (DimensionIndex new_dim = 0; new_dim < num_new_dims; ++new_dim) {
input_domain[new_dim] = get_new_dimension_bounds(new_dim);
}
for (DimensionIndex indexed_dim = 0; indexed_dim < num_indexed_dims;
++indexed_dim) {
const DimensionIndex output_dim = (*dimensions)[indexed_dim];
auto& map = maps[output_dim];
map.offset() = 0;
map.stride() = 1;
auto& index_array_data = map.SetArrayIndexing(input_rank);
std::fill_n(index_array_data.byte_strides + num_new_dims,
num_preserved_dims, 0);
for (DimensionIndex new_dim = 0; new_dim < num_new_dims; ++new_dim) {
index_array_data.byte_strides[new_dim] =
get_index_array_byte_stride(indexed_dim, new_dim);
}
index_array_data.element_pointer =
get_index_array_base_pointer(indexed_dim);
}
for (DimensionIndex output_dim = 0, input_dim = num_new_dims;
output_dim < output_rank; ++output_dim) {
auto& map = maps[output_dim];
if (map.method() != OutputIndexMethod::single_input_dimension) continue;
map.SetSingleInputDimension(input_dim);
map.offset() = 0;
map.stride() = 1;
result->input_dimension(input_dim) =
orig_transform->input_dimension(output_dim);
++input_dim;
}
if (IsDomainExplicitlyEmpty(result.get())) {
ReplaceAllIndexArrayMapsWithConstantMaps(result.get());
}
dimensions->resize(num_new_dims);
std::iota(dimensions->begin(), dimensions->end(),
static_cast<DimensionIndex>(0));
internal_index_space::DebugCheckInvariants(result.get());
return result;
}
Result<TransformRep::Ptr<>> MakeTransformFromIndexArrays(
TransformRep* orig_transform, DimensionIndexBuffer* dimensions,
span<const SharedArrayView<const Index>> index_arrays) {
const DimensionIndex num_indexed_dims = dimensions->size();
if (index_arrays.size() != num_indexed_dims) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Number of selected dimensions (", num_indexed_dims,
") does not equal number of index arrays (", index_arrays.size(), ")"));
}
if (index_arrays.empty()) {
return absl::InvalidArgumentError(
tensorstore::StrCat("At least one index array must be specified"));
}
Index shape[kMaxRank];
const DimensionIndex num_new_dims = index_arrays[0].rank();
std::fill_n(&shape[0], num_new_dims, Index(1));
bool error = false;
for (DimensionIndex i = 0; i < index_arrays.size(); ++i) {
if (!BroadcastShapes(index_arrays[i].shape(),
span<Index>(&shape[0], num_new_dims))) {
error = true;
}
}
if (error) {
std::string shape_msg;
for (DimensionIndex i = 0; i < index_arrays.size(); ++i) {
tensorstore::StrAppend(&shape_msg, (shape_msg.empty() ? "" : ", "),
index_arrays[i].shape());
}
return absl::InvalidArgumentError(
tensorstore::StrCat("Index arrays with shapes ", shape_msg,
" cannot be broadcast to a common shape"));
}
const auto get_new_dimension_bounds = [&](DimensionIndex new_dim) {
return IndexInterval::UncheckedSized(0, shape[new_dim]);
};
const auto get_index_array_base_pointer = [&](DimensionIndex indexed_dim) {
return index_arrays[indexed_dim].pointer();
};
const auto get_index_array_byte_stride = [&](DimensionIndex indexed_dim,
DimensionIndex new_dim) {
return index_arrays[indexed_dim].shape()[new_dim] == 1
? 0
: index_arrays[indexed_dim].byte_strides()[new_dim];
};
return MakeTransformFromJointIndexArrays(
num_new_dims, orig_transform, dimensions, get_new_dimension_bounds,
get_index_array_base_pointer, get_index_array_byte_stride);
}
Result<TransformRep::Ptr<>> MakeTransformFromOuterIndexArrays(
TransformRep* orig_transform, DimensionIndexBuffer* dimensions,
span<const SharedArrayView<const Index>> index_arrays) {
const DimensionIndex num_indexed_dims = dimensions->size();
if (index_arrays.size() != num_indexed_dims) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Number of selected dimensions (", num_indexed_dims,
") does not equal number of index arrays (", index_arrays.size(), ")"));
}
const DimensionIndex output_rank = orig_transform->input_rank;
DimensionIndex input_rank = output_rank - num_indexed_dims;
for (const auto& index_array : index_arrays) {
input_rank += index_array.rank();
}
TENSORSTORE_RETURN_IF_ERROR(ValidateRank(input_rank));
auto result = TransformRep::Allocate(input_rank, output_rank);
result->input_rank = input_rank;
result->output_rank = output_rank;
result->implicit_lower_bounds = false;
result->implicit_upper_bounds = false;
DimensionIndex index_array_start_dim[kMaxRank];
DimensionIndex index_array_order[kMaxRank];
std::iota(&index_array_order[0], &index_array_order[num_indexed_dims],
static_cast<DimensionIndex>(0));
std::sort(&index_array_order[0], &index_array_order[num_indexed_dims],
[&](DimensionIndex a, DimensionIndex b) {
return (*dimensions)[a] < (*dimensions)[b];
});
span<Index> input_origin = result->input_origin().first(input_rank);
span<Index> input_shape = result->input_shape().first(input_rank);
span<OutputIndexMap> maps = result->output_index_maps().first(output_rank);
for (DimensionIndex output_dim = 0, reordered_indexed_dim = 0, input_dim = 0;
output_dim < output_rank; ++output_dim) {
auto& map = maps[output_dim];
map.stride() = 1;
map.offset() = 0;
if (reordered_indexed_dim < num_indexed_dims) {
const DimensionIndex indexed_dim =
index_array_order[reordered_indexed_dim];
if ((*dimensions)[indexed_dim] == output_dim) {
index_array_start_dim[indexed_dim] = input_dim;
const auto& array = index_arrays[indexed_dim];
MutableBoxView<>(input_origin.subspan(input_dim, array.rank()),
input_shape.subspan(input_dim, array.rank()))
.DeepAssign(array.domain());
const DimensionIndex end_input_dim = input_dim + array.rank();
if (array.num_elements() == 1) {
map.SetConstant();
map.offset() = *array.data();
map.stride() = 0;
} else {
auto& index_array_data = map.SetArrayIndexing(input_rank);
index_array_data.element_pointer = array.element_pointer();
std::fill_n(index_array_data.byte_strides, input_dim, 0);
std::copy(array.byte_strides().begin(), array.byte_strides().end(),
index_array_data.byte_strides + input_dim);
std::fill(index_array_data.byte_strides + end_input_dim,
index_array_data.byte_strides + input_rank, 0);
}
input_dim = end_input_dim;
++reordered_indexed_dim;
continue;
}
}
result->input_dimension(input_dim) =
orig_transform->input_dimension(output_dim);
map.SetSingleInputDimension(input_dim);
++input_dim;
}
if (IsDomainExplicitlyEmpty(result.get())) {
ReplaceAllIndexArrayMapsWithConstantMaps(result.get());
}
dimensions->clear();
dimensions->reserve(input_rank - output_rank);
for (DimensionIndex indexed_dim = 0; indexed_dim < num_indexed_dims;
++indexed_dim) {
const DimensionIndex start_input_dim = index_array_start_dim[indexed_dim];
for (DimensionIndex
input_dim = start_input_dim,
end_input_dim = start_input_dim + index_arrays[indexed_dim].rank();
input_dim != end_input_dim; ++input_dim) {
dimensions->push_back(input_dim);
}
}
internal_index_space::DebugCheckInvariants(result.get());
return result;
}
Result<TransformRep::Ptr<>> MakeTransformFromIndexVectorArray(
TransformRep* orig_transform, DimensionIndexBuffer* dimensions,
DimensionIndex vector_dimension,
const SharedArrayView<const Index>& index_vector_array) {
TENSORSTORE_ASSIGN_OR_RETURN(
vector_dimension,
NormalizeDimensionIndex(vector_dimension, index_vector_array.rank()));
const DimensionIndex num_indexed_dims = dimensions->size();
if (index_vector_array.shape()[vector_dimension] != num_indexed_dims) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Number of selected dimensions (", num_indexed_dims,
") does not equal index vector length (",
index_vector_array.shape()[vector_dimension], ")"));
}
const DimensionIndex num_new_dims = index_vector_array.rank() - 1;
const auto get_index_vector_array_dim = [&](DimensionIndex new_dim) {
return new_dim >= vector_dimension ? new_dim + 1 : new_dim;
};
const auto get_new_dimension_bounds = [&](DimensionIndex new_dim) {
return index_vector_array.domain()[get_index_vector_array_dim(new_dim)];
};
const auto get_index_array_base_pointer = [&](DimensionIndex indexed_dim) {
return std::shared_ptr<const Index>(
index_vector_array.pointer(),
index_vector_array.byte_strided_pointer() +
index_vector_array.byte_strides()[vector_dimension] * indexed_dim);
};
const auto get_index_array_byte_stride = [&](DimensionIndex indexed_dim,
DimensionIndex new_dim) {
const DimensionIndex index_vector_array_dim =
get_index_vector_array_dim(new_dim);
return index_vector_array.shape()[index_vector_array_dim] == 1
? 0
: index_vector_array.byte_strides()[index_vector_array_dim];
};
return MakeTransformFromJointIndexArrays(
num_new_dims, orig_transform, dimensions, get_new_dimension_bounds,
get_index_array_base_pointer, get_index_array_byte_stride);
}
}
Result<IndexTransform<>> ApplyIndexArraySlice(
IndexTransform<> transform, DimensionIndexBuffer* dimensions,
span<const SharedArrayView<const Index>> index_arrays, bool outer_indexing,
bool domain_only) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto other_transform,
outer_indexing
? MakeTransformFromOuterIndexArrays(TransformAccess::rep(transform),
dimensions, index_arrays)
: MakeTransformFromIndexArrays(TransformAccess::rep(transform),
dimensions, index_arrays));
TENSORSTORE_ASSIGN_OR_RETURN(
auto new_rep,
ComposeTransforms(TransformAccess::rep(transform),
false, other_transform.get(),
true, domain_only));
return TransformAccess::Make<IndexTransform<>>(std::move(new_rep));
}
Result<IndexTransform<>> ApplyIndexVectorArraySlice(
IndexTransform<> transform, DimensionIndexBuffer* dimensions,
DimensionIndex vector_dimension,
const SharedArrayView<const Index>& index_vector_array, bool domain_only) {
TENSORSTORE_ASSIGN_OR_RETURN(auto other_transform,
MakeTransformFromIndexVectorArray(
TransformAccess::rep(transform), dimensions,
vector_dimension, index_vector_array));
TENSORSTORE_ASSIGN_OR_RETURN(
auto new_rep,
ComposeTransforms(TransformAccess::rep(transform),
false, other_transform.get(),
true, domain_only));
return TransformAccess::Make<IndexTransform<>>(std::move(new_rep));
}
}
} | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/internal/dim_expression_testutil.h"
#include "tensorstore/util/status.h"
namespace {
using ::tensorstore::AllDims;
using ::tensorstore::DimensionIndex;
using ::tensorstore::Dims;
using ::tensorstore::Index;
using ::tensorstore::IndexInterval;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::MakeArray;
using ::tensorstore::SharedArrayView;
using ::tensorstore::span;
using ::tensorstore::internal_index_space::EquivalentIndices;
using ::tensorstore::internal_index_space::TestDimExpression;
TEST(IndexArraySliceTest, Example) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({0, 2, 0})
.input_shape({7, 4, 10})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform =
IndexTransformBuilder<3, 3>()
.input_origin({0, 0, 2})
.input_shape({2, 3, 4})
.input_labels({"", "", "y"})
.output_index_array(
0, 0, 1, MakeArray<Index>({{{1}, {2}, {3}}, {{4}, {5}, {6}}}),
IndexInterval::Sized(0, 7))
.output_single_input_dimension(1, 2)
.output_index_array(
2, 0, 1, MakeArray<Index>({{{7}, {8}, {9}}, {{0}, {1}, {2}}}),
IndexInterval::Sized(0, 10))
.Finalize()
.value();
const EquivalentIndices equivalent_indices = {
{{1, 3, 7}, {0, 0, 3}},
{{2, 3, 8}, {0, 1, 3}},
{{3, 3, 9}, {0, 2, 3}},
{{6, 3, 2}, {1, 2, 3}},
};
TestDimExpression(
original_transform,
Dims(0, 2).IndexArraySlice(MakeArray<Index>({{1, 2, 3}, {4, 5, 6}}),
MakeArray<Index>({{7, 8, 9}, {0, 1, 2}})),
{0, 1},
expected_new_transform,
expected_new_transform,
equivalent_indices,
false);
TestDimExpression(
original_transform,
Dims("x", "z").IndexArraySlice(MakeArray<Index>({{1, 2, 3}, {4, 5, 6}}),
MakeArray<Index>({{7, 8, 9}, {0, 1, 2}})),
{0, 1},
expected_new_transform,
expected_new_transform,
equivalent_indices,
false);
}
TEST(IndexVectorArraySliceTest, Example) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({0, 2, 0})
.input_shape({7, 4, 10})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform =
IndexTransformBuilder<3, 3>()
.input_origin({0, 0, 2})
.input_shape({2, 3, 4})
.input_labels({"", "", "y"})
.output_index_array(
0, 0, 1, MakeArray<Index>({{{1}, {2}, {3}}, {{4}, {5}, {6}}}),
IndexInterval::Sized(0, 7))
.output_single_input_dimension(1, 2)
.output_index_array(
2, 0, 1, MakeArray<Index>({{{7}, {8}, {9}}, {{0}, {1}, {2}}}),
IndexInterval::Sized(0, 10))
.Finalize()
.value();
const EquivalentIndices equivalent_indices = {
{{2, 3, 8}, {0, 1, 3}},
{{6, 3, 2}, {1, 2, 3}},
};
TestDimExpression(original_transform,
Dims(0, 2).IndexVectorArraySlice(
MakeArray<Index>({{{1, 7}, {2, 8}, {3, 9}},
{{4, 0}, {5, 1}, {6, 2}}}),
-1),
{0, 1},
expected_new_transform,
expected_new_transform,
equivalent_indices,
false);
TestDimExpression(original_transform,
Dims("x", "z").IndexVectorArraySlice(
MakeArray<Index>({{{1, 7}, {2, 8}, {3, 9}},
{{4, 0}, {5, 1}, {6, 2}}}),
-1),
{0, 1},
expected_new_transform,
expected_new_transform,
equivalent_indices,
false);
}
TEST(IndexArrayOuterIndexArraySliceTest, Example) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({4, 2, 0})
.input_shape({5, 4, 10})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform =
IndexTransformBuilder<4, 3>()
.input_origin({0, 2, 0, 0})
.input_shape({2, 4, 2, 2})
.input_labels({"", "y", "", ""})
.output_index_array(0, 0, 1, MakeArray<Index>({{{{6}}}, {{{7}}}}),
IndexInterval::Sized(4, 5))
.output_single_input_dimension(1, 1)
.output_index_array(2, 0, 1, MakeArray<Index>({{{{2, 3}, {4, 5}}}}),
IndexInterval::Sized(0, 10))
.Finalize()
.value();
const EquivalentIndices equivalent_indices = {
{{6, 3, 3}, {0, 3, 0, 1}},
{{7, 3, 4}, {1, 3, 1, 0}},
};
TestDimExpression(
original_transform,
Dims(2, 0).OuterIndexArraySlice(MakeArray<Index>({{2, 3}, {4, 5}}),
MakeArray<Index>({6, 7})),
{2, 3, 0},
expected_new_transform,
expected_new_transform,
equivalent_indices,
false);
TestDimExpression(
original_transform,
Dims("z", "x").OuterIndexArraySlice(MakeArray<Index>({{2, 3}, {4, 5}}),
MakeArray<Index>({6, 7})),
{2, 3, 0},
expected_new_transform,
expected_new_transform,
equivalent_indices,
false);
}
TEST(IndexArraySliceTest, OneDOutputOneDArray) {
TestDimExpression(
IndexTransformBuilder<2, 2>()
.input_origin({-10, -100})
.input_shape({20, 200})
.output_single_input_dimension(0, -2, -3, 0)
.output_single_input_dimension(1, 10, 11, 1)
.Finalize()
.value(),
Dims(0).IndexArraySlice(MakeArray<Index>({1, 2})),
{0},
IndexTransformBuilder<2, 2>()
.input_origin({0, -100})
.input_shape({2, 200})
.output_index_array(0, 0, 1, MakeArray<Index>({{1}, {2}}),
IndexInterval::Sized(-10, 20))
.output_single_input_dimension(1, 1)
.Finalize()
.value(),
IndexTransformBuilder<2, 2>()
.input_origin({0, -100})
.input_shape({2, 200})
.output_index_array(0, -2, -3, MakeArray<Index>({{1}, {2}}),
IndexInterval::Sized(-10, 20))
.output_single_input_dimension(1, 10, 11, 1)
.Finalize()
.value(),
{{{1, 5}, {0, 5}}, {{2, 5}, {1, 5}}},
false);
}
TEST(IndexArraySliceTest, ZeroElementIndexArray) {
TestDimExpression(
IndexTransformBuilder<2, 2>()
.input_origin({-10, -100})
.input_shape({20, 200})
.output_single_input_dimension(0, -2, -3, 0)
.output_single_input_dimension(1, 10, 11, 1)
.Finalize()
.value(),
Dims(0).IndexArraySlice(
tensorstore::AllocateArray<Index>({5, 0, 3})),
{0, 1, 2},
IndexTransformBuilder<4, 2>()
.input_origin({0, 0, 0, -100})
.input_shape({5, 0, 3, 200})
.output_constant(0, 0)
.output_single_input_dimension(1, 3)
.Finalize()
.value(),
IndexTransformBuilder<4, 2>()
.input_origin({0, 0, 0, -100})
.input_shape({5, 0, 3, 200})
.output_constant(0, -2)
.output_single_input_dimension(1, 10, 11, 3)
.Finalize()
.value(),
{},
false);
}
TEST(IndexArraySliceTest, OneElementIndexArray) {
TestDimExpression(
IndexTransformBuilder<2, 2>()
.input_origin({-10, -100})
.input_shape({20, 200})
.output_single_input_dimension(0, -2, -3, 0)
.output_single_input_dimension(1, 10, 11, 1)
.Finalize()
.value(),
Dims(0).IndexArraySlice(MakeArray<Index>({{5}})),
{0, 1},
IndexTransformBuilder<3, 2>()
.input_origin({0, 0, -100})
.input_shape({1, 1, 200})
.output_constant(0, 5)
.output_single_input_dimension(1, 2)
.Finalize()
.value(),
IndexTransformBuilder<3, 2>()
.input_origin({0, 0, -100})
.input_shape({1, 1, 200})
.output_constant(0, -17)
.output_single_input_dimension(1, 10, 11, 2)
.Finalize()
.value(),
{{{5, 6}, {0, 0, 6}}},
false);
}
TEST(IndexArraySliceTest, OneDOutputOneDArrayLabeled) {
TestDimExpression(
IndexTransformBuilder<2, 2>()
.input_origin({-10, -100})
.input_shape({20, 200})
.input_labels({"x", "y"})
.output_single_input_dimension(0, -2, -3, 0)
.output_single_input_dimension(1, 10, 11, 1)
.Finalize()
.value(),
Dims(0)
.IndexArraySlice(MakeArray<Index>({1, 2}))
.Label("index"),
{0},
IndexTransformBuilder<2, 2>()
.input_origin({0, -100})
.input_shape({2, 200})
.input_labels({"index", "y"})
.output_index_array(0, 0, 1,
MakeArray<Index>({{1}, {2}}),
IndexInterval::Sized(-10, 20))
.output_single_input_dimension(1, 1)
.Finalize()
.value(),
IndexTransformBuilder<2, 2>()
.input_origin({0, -100})
.input_shape({2, 200})
.input_labels({"index", "y"})
.output_index_array(0, -2, -3,
MakeArray<Index>({{1}, {2}}),
IndexInterval::Sized(-10, 20))
.output_single_input_dimension(1, 10, 11, 1)
.Finalize()
.value(),
{{{1, 5}, {0, 5}}, {{2, 5}, {1, 5}}},
false);
}
TEST(IndexArraySliceTest, TwoDOutputOneDArray) {
TestDimExpression(
IndexTransformBuilder<2, 2>()
.input_origin({-10, -2})
.input_shape({20, 15})
.output_single_input_dimension(0, -2, -3, 0)
.output_single_input_dimension(1, -4, 2, 1)
.Finalize()
.value(),
AllDims()
.IndexArraySlice(MakeArray<Index>({1, 2}),
MakeArray<Index>({3, 4}))
.Label("index"),
{0},
IndexTransformBuilder<1, 2>()
.input_origin({0})
.input_shape({2})
.input_labels({"index"})
.output_index_array(0, 0, 1, MakeArray<Index>({1, 2}),
IndexInterval::Sized(-10, 20))
.output_index_array(1, 0, 1, MakeArray<Index>({3, 4}),
IndexInterval::Sized(-2, 15))
.Finalize()
.value(),
IndexTransformBuilder<1, 2>()
.input_origin({0})
.input_shape({2})
.input_labels({"index"})
.output_index_array(0, -2, -3, MakeArray<Index>({1, 2}),
IndexInterval::Sized(-10, 20))
.output_index_array(1, -4, 2, MakeArray<Index>({3, 4}),
IndexInterval::Sized(-2, 15))
.Finalize()
.value(),
{{{1, 3}, {0}}, {{2, 4}, {1}}},
false);
}
TEST(IndexArraySliceTest, TwoDOutputOneDArrayBroadcast) {
TestDimExpression(
IndexTransformBuilder<2, 2>()
.input_origin({-10, -2})
.input_shape({20, 15})
.output_single_input_dimension(0, -2, -3, 0)
.output_single_input_dimension(1, -4, 2, 1)
.Finalize()
.value(),
AllDims().IndexArraySlice(MakeArray<Index>({{1, 2}}),
MakeArray<Index>({{3}, {4}})),
{0, 1},
IndexTransformBuilder<2, 2>()
.input_origin({0, 0})
.input_shape({2, 2})
.output_index_array(0, 0, 1, MakeArray<Index>({{1, 2}}),
IndexInterval::Sized(-10, 20))
.output_index_array(1, 0, 1, MakeArray<Index>({{3}, {4}}),
IndexInterval::Sized(-2, 15))
.Finalize()
.value(),
IndexTransformBuilder<2, 2>()
.input_origin({0, 0})
.input_shape({2, 2})
.output_index_array(0, -2, -3, MakeArray<Index>({{1, 2}}),
IndexInterval::Sized(-10, 20))
.output_index_array(1, -4, 2, MakeArray<Index>({{3}, {4}}),
IndexInterval::Sized(-2, 15))
.Finalize()
.value(),
{{{1, 3}, {0, 0}}, {{1, 4}, {1, 0}}, {{2, 4}, {1, 1}}},
false);
}
TEST(IndexArraySliceTest, ErrorHandling) {
TestDimExpressionError(
IndexTransformBuilder<2, 0>().Finalize().value(),
Dims(span<const DimensionIndex>({0}))
.IndexArraySlice(MakeArray<Index>({1, 2}), MakeArray<Index>({3, 4})),
absl::StatusCode::kInvalidArgument,
"Number of selected dimensions \\(1\\) does not equal number of index "
"arrays \\(2\\)");
TestDimExpressionError(
IndexTransformBuilder<1, 0>().Finalize().value(),
Dims(span<const DimensionIndex>())
.IndexArraySlice(span<const SharedArrayView<const Index>>()),
absl::StatusCode::kInvalidArgument,
"At least one index array must be specified");
TestDimExpressionError(
IndexTransformBuilder<2, 0>().Finalize().value(),
Dims(0, 1).IndexArraySlice(MakeArray<Index>({1, 2}),
MakeArray<Index>({3, 4, 5})),
absl::StatusCode::kInvalidArgument,
"Index arrays with shapes \\{2\\}, \\{3\\} cannot be broadcast "
"to a common shape");
}
TEST(IndexArraySliceTest, InvalidRank) {
auto index_array = tensorstore::AllocateArray<Index>(
std::vector<Index>(32, 1), tensorstore::c_order, tensorstore::value_init);
TestDimExpressionError(tensorstore::IdentityTransform(2),
Dims(0).IndexArraySlice(index_array),
absl::StatusCode::kInvalidArgument,
"Rank 33 is outside valid range \\[0, 32\\]");
}
TEST(IndexVectorArraySliceTest, OneDOutputOneDArray) {
TestDimExpression(
IndexTransformBuilder<2, 2>()
.input_origin({-10, -100})
.input_shape({20, 200})
.output_single_input_dimension(0, -2, -3, 0)
.output_single_input_dimension(1, 10, 11, 1)
.Finalize()
.value(),
Dims(0).IndexVectorArraySlice(MakeArray<Index>({{1, 2}}),
0),
{0},
IndexTransformBuilder<2, 2>()
.input_origin({0, -100})
.input_shape({2, 200})
.output_index_array(0, 0, 1,
MakeArray<Index>({{1}, {2}}),
IndexInterval::Sized(-10, 20))
.output_single_input_dimension(1, 1)
.Finalize()
.value(),
IndexTransformBuilder<2, 2>()
.input_origin({0, -100})
.input_shape({2, 200})
.output_index_array(0, -2, -3,
MakeArray<Index>({{1}, {2}}),
IndexInterval::Sized(-10, 20))
.output_single_input_dimension(1, 10, 11, 1)
.Finalize()
.value(),
{{{1, 5}, {0, 5}}, {{2, 5}, {1, 5}}},
false);
}
TEST(IndexVectorArraySliceTest, OneElementIndexArray) {
TestDimExpression(
IndexTransformBuilder<2, 2>()
.input_origin({-10, -100})
.input_shape({20, 200})
.output_single_input_dimension(0, -2, -3, 0)
.output_single_input_dimension(1, 10, 11, 1)
.Finalize()
.value(),
Dims(0).IndexVectorArraySlice(MakeArray<Index>({{1}}), 0),
{0},
IndexTransformBuilder<2, 2>()
.input_origin({0, -100})
.input_shape({1, 200})
.output_constant(0, 1)
.output_single_input_dimension(1, 1)
.Finalize()
.value(),
IndexTransformBuilder<2, 2>()
.input_origin({0, -100})
.input_shape({1, 200})
.output_constant(0, -5)
.output_single_input_dimension(1, 10, 11, 1)
.Finalize()
.value(),
{{{1, 5}, {0, 5}}},
false);
}
TEST(IndexVectorArraySliceTest, OneDOutputOneDArrayLabeled) {
TestDimExpression(
IndexTransformBuilder<2, 2>()
.input_origin({-10, -100})
.input_shape({20, 200})
.input_labels({"x", "y"})
.output_single_input_dimension(0, -2, -3, 0)
.output_single_input_dimension(1, 10, 11, 1)
.Finalize()
.value(),
Dims(0)
.IndexVectorArraySlice(MakeArray<Index>({{1, 2}}), 0)
.Label("index"),
{0},
IndexTransformBuilder<2, 2>()
.input_origin({0, -100})
.input_shape({2, 200})
.input_labels({"index", "y"})
.output_index_array(0, 0, 1, MakeArray<Index>({{1}, {2}}),
IndexInterval::Sized(-10, 20))
.output_single_input_dimension(1, 1)
.Finalize()
.value(),
IndexTransformBuilder<2, 2>()
.input_origin({0, -100})
.input_shape({2, 200})
.input_labels({"index", "y"})
.output_index_array(0, -2, -3, MakeArray<Index>({{1}, {2}}),
IndexInterval::Sized(-10, 20))
.output_single_input_dimension(1, 10, 11, 1)
.Finalize()
.value(),
{{{1, 5}, {0, 5}}, {{2, 5}, {1, 5}}},
false);
}
TEST(IndexVectorArraySliceTest, TwoDOutputOneDArray) {
TestDimExpression(
IndexTransformBuilder<2, 2>()
.input_origin({-10, -2})
.input_shape({20, 15})
.output_single_input_dimension(0, -2, -3, 0)
.output_single_input_dimension(1, -4, 2, 1)
.Finalize()
.value(),
AllDims()
.IndexVectorArraySlice(
MakeArray<Index>({{1, 3}, {2, 4}}), -1)
.Label("index"),
{0},
IndexTransformBuilder<1, 2>()
.input_origin({0})
.input_shape({2})
.input_labels({"index"})
.output_index_array(0, 0, 1, MakeArray<Index>({1, 2}),
IndexInterval::Sized(-10, 20))
.output_index_array(1, 0, 1, MakeArray<Index>({3, 4}),
IndexInterval::Sized(-2, 15))
.Finalize()
.value(),
IndexTransformBuilder<1, 2>()
.input_origin({0})
.input_shape({2})
.input_labels({"index"})
.output_index_array(0, -2, -3, MakeArray<Index>({1, 2}),
IndexInterval::Sized(-10, 20))
.output_index_array(1, -4, 2, MakeArray<Index>({3, 4}),
IndexInterval::Sized(-2, 15))
.Finalize()
.value(),
{{{1, 3}, {0}}, {{2, 4}, {1}}},
false);
}
TEST(IndexVectorArraySliceTest, ErrorHandling) {
TestDimExpressionError(
IndexTransformBuilder<2, 0>().Finalize().value(),
Dims(0).IndexVectorArraySlice(MakeArray<Index>({1, 2}), 0),
absl::StatusCode::kInvalidArgument,
"Number of selected dimensions \\(1\\) does not equal index vector "
"length \\(2\\)");
TestDimExpressionError(
IndexTransformBuilder<2, 0>().Finalize().value(),
Dims(0).IndexVectorArraySlice(MakeArray<Index>({1, 2}), 1),
absl::StatusCode::kInvalidArgument,
"Dimension index 1 is outside valid range \\[-1, 1\\)");
}
TEST(IndexVectorArraySliceTest, InvalidRank) {
TestDimExpressionError(
tensorstore::IdentityTransform(4),
Dims(0, 1).IndexVectorArraySlice(
tensorstore::AllocateArray<Index>({1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 2},
tensorstore::c_order,
tensorstore::default_init),
-1),
absl::StatusCode::kInvalidArgument,
"Rank 33 is outside valid range \\[0, 32\\]");
}
TEST(OuterIndexArraySliceTest, Integration) {
TestDimExpression(
IndexTransformBuilder<3, 3>()
.input_origin({-10, -100, -2})
.input_shape({21, 200, 15})
.output_single_input_dimension(0, -2, -3, 0)
.output_single_input_dimension(1, 6, 5, 1)
.output_single_input_dimension(2, -4, 2, 2)
.Finalize()
.value(),
Dims(0, 2).OuterIndexArraySlice(
MakeArray<Index>({{3, 4, 5}, {8, 9, 10}}),
MakeArray<Index>({1, 2})),
{0, 1, 3},
IndexTransformBuilder<4, 3>()
.input_origin({0, 0, -100, 0})
.input_shape({2, 3, 200, 2})
.output_index_array(
0, 0, 1,
MakeArray<Index>({{{{3}}, {{4}}, {{5}}},
{{{8}}, {{9}}, {{10}}}}),
IndexInterval::Sized(-10, 21))
.output_single_input_dimension(1, 2)
.output_index_array(2, 0, 1,
MakeArray<Index>({{{{1, 2}}}}),
IndexInterval::Sized(-2, 15))
.Finalize()
.value(),
IndexTransformBuilder<4, 3>()
.input_origin({0, 0, -100, 0})
.input_shape({2, 3, 200, 2})
.output_index_array(
0, -2, -3,
MakeArray<Index>({{{{3}}, {{4}}, {{5}}},
{{{8}}, {{9}}, {{10}}}}),
IndexInterval::Sized(-10, 21))
.output_single_input_dimension(1, 6, 5, 2)
.output_index_array(2, -4, 2,
MakeArray<Index>({{{{1, 2}}}}),
IndexInterval::Sized(-2, 15))
.Finalize()
.value(),
{{{3, 5, 1}, {0, 0, 5, 0}},
{{9, 5, 2}, {1, 1, 5, 1}},
{{8, 5, 2}, {1, 0, 5, 1}},
{{10, 5, 2}, {1, 2, 5, 1}}},
false);
}
TEST(OuterIndexArraySliceTest, OneElementIndexArray) {
TestDimExpression(
IndexTransformBuilder<3, 3>()
.input_origin({-10, -100, -2})
.input_shape({21, 200, 15})
.output_single_input_dimension(0, -2, -3, 0)
.output_single_input_dimension(1, 6, 5, 1)
.output_single_input_dimension(2, -4, 2, 2)
.Finalize()
.value(),
Dims(0, 2).OuterIndexArraySlice(
MakeArray<Index>({{3, 4, 5}, {8, 9, 10}}),
MakeArray<Index>({1})),
{0, 1, 3},
IndexTransformBuilder<4, 3>()
.input_origin({0, 0, -100, 0})
.input_shape({2, 3, 200, 1})
.output_index_array(
0, 0, 1,
MakeArray<Index>({{{{3}}, {{4}}, {{5}}},
{{{8}}, {{9}}, {{10}}}}),
IndexInterval::Sized(-10, 21))
.output_single_input_dimension(1, 2)
.output_constant(2, 1)
.Finalize()
.value(),
IndexTransformBuilder<4, 3>()
.input_origin({0, 0, -100, 0})
.input_shape({2, 3, 200, 1})
.output_index_array(
0, -2, -3,
MakeArray<Index>({{{{3}}, {{4}}, {{5}}},
{{{8}}, {{9}}, {{10}}}}),
IndexInterval::Sized(-10, 21))
.output_single_input_dimension(1, 6, 5, 2)
.output_constant(2, -2)
.Finalize()
.value(),
{{{3, 5, 1}, {0, 0, 5, 0}}},
false);
}
TEST(OuterIndexArraySliceTest, ErrorHandling) {
TestDimExpre |
554 | cpp | google/tensorstore | identity_transform | tensorstore/index_space/internal/identity_transform.cc | tensorstore/index_space/identity_transform_test.cc | #ifndef TENSORSTORE_INDEX_SPACE_INTERNAL_IDENTITY_TRANSFORM_H_
#define TENSORSTORE_INDEX_SPACE_INTERNAL_IDENTITY_TRANSFORM_H_
#include "tensorstore/box.h"
#include "tensorstore/index_space/internal/transform_rep.h"
namespace tensorstore {
namespace internal_index_space {
void SetToIdentityTransform(span<OutputIndexMap> maps);
TransformRep::Ptr<> MakeIdentityTransform(DimensionIndex rank,
bool domain_only = false);
TransformRep::Ptr<> MakeIdentityTransform(internal::StringLikeSpan labels,
bool domain_only = false);
TransformRep::Ptr<> MakeIdentityTransform(BoxView<> domain,
bool domain_only = false);
TransformRep::Ptr<> MakeIdentityTransformLike(TransformRep* data,
bool domain_only = false);
TransformRep::Ptr<> MakeIdentityTransform(span<const Index> shape,
bool domain_only = false);
}
}
#endif
#include "tensorstore/index_space/internal/identity_transform.h"
namespace tensorstore {
namespace internal_index_space {
void SetToIdentityTransform(span<OutputIndexMap> maps) {
for (DimensionIndex i = 0; i < maps.size(); ++i) {
auto& map = maps[i];
map.SetSingleInputDimension(i);
map.offset() = 0;
map.stride() = 1;
}
}
namespace {
void SetUnboundedDomain(TransformRep* data, DimensionIndex rank) {
assert(data->input_rank_capacity >= rank);
data->input_rank = rank;
std::fill_n(data->input_origin().begin(), rank, -kInfIndex);
std::fill_n(data->input_shape().begin(), rank, kInfSize);
const auto mask = DimensionSet::UpTo(rank);
data->implicit_lower_bounds = mask;
data->implicit_upper_bounds = mask;
}
void SetIdentityOutputOrDomainOnly(TransformRep* data, DimensionIndex rank,
bool domain_only) {
if (domain_only) {
data->output_rank = 0;
} else {
assert(data->output_rank_capacity >= rank);
data->output_rank = rank;
SetToIdentityTransform(data->output_index_maps().first(rank));
}
}
void SetToIdentityTransform(TransformRep* data, DimensionIndex rank,
bool domain_only) {
SetUnboundedDomain(data, rank);
SetIdentityOutputOrDomainOnly(data, rank, domain_only);
}
}
TransformRep::Ptr<> MakeIdentityTransform(DimensionIndex rank,
bool domain_only) {
auto data = TransformRep::Allocate(rank, domain_only ? 0 : rank);
SetToIdentityTransform(data.get(), rank, domain_only);
internal_index_space::DebugCheckInvariants(data.get());
return data;
}
TransformRep::Ptr<> MakeIdentityTransform(internal::StringLikeSpan labels,
bool domain_only) {
const DimensionIndex rank = labels.size();
auto data = TransformRep::Allocate(rank, domain_only ? 0 : rank);
SetToIdentityTransform(data.get(), rank, domain_only);
span<std::string> input_labels = data->input_labels().first(rank);
for (DimensionIndex i = 0; i < rank; ++i) {
std::string_view label = labels[i];
input_labels[i].assign(label.data(), label.size());
}
internal_index_space::DebugCheckInvariants(data.get());
return data;
}
TransformRep::Ptr<> MakeIdentityTransformLike(TransformRep* data,
bool domain_only) {
assert(data != nullptr);
const DimensionIndex rank = data->input_rank;
auto result = TransformRep::Allocate(rank, domain_only ? 0 : rank);
CopyTransformRepDomain(data, result.get());
SetIdentityOutputOrDomainOnly(result.get(), rank, domain_only);
internal_index_space::DebugCheckInvariants(result.get());
return result;
}
TransformRep::Ptr<> MakeIdentityTransform(span<const Index> shape,
bool domain_only) {
const DimensionIndex rank = shape.size();
auto result = TransformRep::Allocate(rank, domain_only ? 0 : rank);
result->input_rank = rank;
std::fill_n(result->input_origin().begin(), rank, 0);
std::copy_n(shape.begin(), rank, result->input_shape().begin());
result->implicit_lower_bounds = false;
result->implicit_upper_bounds = false;
SetIdentityOutputOrDomainOnly(result.get(), rank, domain_only);
internal_index_space::DebugCheckInvariants(result.get());
return result;
}
TransformRep::Ptr<> MakeIdentityTransform(BoxView<> domain, bool domain_only) {
const DimensionIndex rank = domain.rank();
auto result = TransformRep::Allocate(rank, domain_only ? 0 : rank);
result->input_rank = rank;
result->input_domain(rank).DeepAssign(domain);
result->implicit_lower_bounds = false;
result->implicit_upper_bounds = false;
SetIdentityOutputOrDomainOnly(result.get(), rank, domain_only);
internal_index_space::DebugCheckInvariants(result.get());
return result;
}
}
} | #include <type_traits>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/array.h"
#include "tensorstore/box.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::AllocateArray;
using ::tensorstore::Box;
using ::tensorstore::IdentityTransform;
using ::tensorstore::Index;
using ::tensorstore::IndexDomain;
using ::tensorstore::IndexTransform;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::span;
TEST(IdentityTransformTest, Static) {
auto t = IdentityTransform<2>();
static_assert(std::is_same_v<decltype(t), IndexTransform<2, 2>>);
EXPECT_EQ(IndexTransformBuilder<>(2, 2)
.implicit_lower_bounds({1, 1})
.implicit_upper_bounds({1, 1})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.Finalize()
.value(),
t);
auto d = IndexDomain(t.input_rank());
static_assert(std::is_same_v<decltype(d), IndexDomain<2>>);
EXPECT_EQ(t.domain(), d);
}
TEST(IdentityTransformTest, Dynamic) {
auto t = IdentityTransform(2);
static_assert(std::is_same_v<decltype(t), IndexTransform<>>);
EXPECT_EQ(IndexTransformBuilder<>(2, 2)
.implicit_lower_bounds({1, 1})
.implicit_upper_bounds({1, 1})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.Finalize()
.value(),
t);
auto d = IndexDomain(t.input_rank());
static_assert(std::is_same_v<decltype(d), IndexDomain<>>);
EXPECT_EQ(t.domain(), d);
}
TEST(IdentityTransformTest, LabeledCString) {
auto t = IdentityTransform({"x", "y"});
static_assert(std::is_same_v<decltype(t), IndexTransform<2, 2>>);
EXPECT_EQ(IndexTransformBuilder<>(2, 2)
.implicit_lower_bounds({1, 1})
.implicit_upper_bounds({1, 1})
.input_labels({"x", "y"})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.Finalize()
.value(),
t);
auto d = IndexDomain({"x", "y"});
static_assert(std::is_same_v<decltype(d), IndexDomain<2>>);
EXPECT_EQ(t.domain(), d);
}
TEST(IdentityTransformTest, LabeledStdString) {
auto t = IdentityTransform({std::string("x"), std::string("y")});
static_assert(std::is_same_v<decltype(t), IndexTransform<2, 2>>);
EXPECT_EQ(IndexTransformBuilder<>(2, 2)
.implicit_lower_bounds({1, 1})
.implicit_upper_bounds({1, 1})
.input_labels({"x", "y"})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.Finalize()
.value(),
t);
auto d = IndexDomain({std::string("x"), std::string("y")});
static_assert(std::is_same_v<decltype(d), IndexDomain<2>>);
EXPECT_EQ(t.domain(), d);
}
TEST(IndexTransformTest, LabeledStringView) {
auto t = IdentityTransform({std::string_view("x"), std::string_view("y")});
static_assert(std::is_same_v<decltype(t), IndexTransform<2, 2>>);
EXPECT_EQ(IndexTransformBuilder<>(2, 2)
.implicit_lower_bounds({1, 1})
.implicit_upper_bounds({1, 1})
.input_labels({"x", "y"})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.Finalize()
.value(),
t);
auto d = IndexDomain({std::string_view("x"), std::string_view("y")});
static_assert(std::is_same_v<decltype(d), IndexDomain<2>>);
EXPECT_EQ(t.domain(), d);
}
TEST(IdentityTransformLikeTest, IndexTransform) {
EXPECT_EQ((IndexTransformBuilder<2, 2>()
.input_origin({1, 2})
.input_shape({3, 4})
.implicit_lower_bounds({0, 1})
.implicit_upper_bounds({1, 0})
.input_labels({"x", "y"})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.Finalize()
.value()),
IdentityTransformLike(IndexTransformBuilder<2, 3>()
.input_origin({1, 2})
.input_shape({3, 4})
.implicit_lower_bounds({0, 1})
.implicit_upper_bounds({1, 0})
.input_labels({"x", "y"})
.output_single_input_dimension(0, 5, 7, 1)
.output_single_input_dimension(1, 6, 8, 0)
.output_single_input_dimension(2, 7, 9, 0)
.Finalize()
.value()));
}
TEST(IdentityTransformLikeTest, Array) {
EXPECT_EQ((IndexTransformBuilder<2, 2>()
.input_origin({0, 0})
.input_shape({3, 5})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.Finalize()
.value()),
IdentityTransformLike(AllocateArray<float>({3, 5})));
}
TEST(IdentityTransformTest, StaticBox) {
auto box = Box({1, 2}, {3, 4});
auto t = IdentityTransform(box);
static_assert(std::is_same_v<decltype(t), IndexTransform<2, 2>>);
EXPECT_EQ(IndexTransformBuilder<>(2, 2)
.input_origin({1, 2})
.input_shape({3, 4})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.Finalize()
.value(),
t);
EXPECT_EQ(box, t.domain().box());
static_assert(tensorstore::HasBoxDomain<IndexTransform<2, 2>>);
EXPECT_EQ(box, GetBoxDomainOf(t));
auto d = IndexDomain(box);
static_assert(std::is_same_v<decltype(d), IndexDomain<2>>);
EXPECT_EQ(t.domain(), d);
}
TEST(IdentityTransformTest, DynamicBox) {
auto t = IdentityTransform(Box<>({1, 2}, {3, 4}));
static_assert(std::is_same_v<decltype(t), IndexTransform<>>);
EXPECT_EQ(IndexTransformBuilder<>(2, 2)
.input_origin({1, 2})
.input_shape({3, 4})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.Finalize()
.value(),
t);
auto d = IndexDomain(Box<>({1, 2}, {3, 4}));
static_assert(std::is_same_v<decltype(d), IndexDomain<>>);
EXPECT_EQ(t.domain(), d);
}
TEST(IdentityTransformTest, FromShape) {
auto t = IdentityTransform(span<const Index, 2>({2, 3}));
static_assert(std::is_same_v<decltype(t), IndexTransform<2, 2>>);
EXPECT_EQ(IndexTransformBuilder<>(2, 2)
.input_origin({0, 0})
.input_shape({2, 3})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.Finalize()
.value(),
t);
auto d = IndexDomain(span<const Index, 2>({2, 3}));
static_assert(std::is_same_v<decltype(d), IndexDomain<2>>);
EXPECT_EQ(t.domain(), d);
}
TEST(IdentityTransformTest, FromShapeBracedList) {
auto t = IdentityTransform({2, 3});
static_assert(std::is_same_v<decltype(t), IndexTransform<2, 2>>);
EXPECT_EQ(IndexTransformBuilder<>(2, 2)
.input_origin({0, 0})
.input_shape({2, 3})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.Finalize()
.value(),
t);
auto d = IndexDomain({2, 3});
static_assert(std::is_same_v<decltype(d), IndexDomain<2>>);
EXPECT_EQ(t.domain(), d);
}
} |
555 | cpp | google/tensorstore | inverse_transform | tensorstore/index_space/internal/inverse_transform.cc | tensorstore/index_space/inverse_transform_test.cc | #ifndef TENSORSTORE_INDEX_SPACE_INTERNAL_INVERSE_TRANSFORM_H_
#define TENSORSTORE_INDEX_SPACE_INTERNAL_INVERSE_TRANSFORM_H_
#include "tensorstore/index_space/internal/transform_rep.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal_index_space {
Result<TransformRep::Ptr<>> InverseTransform(TransformRep* transform);
}
}
#endif
#include "tensorstore/index_space/internal/inverse_transform.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_index_space {
Result<TransformRep::Ptr<>> InverseTransform(TransformRep* transform) {
if (!transform) {
return TransformRep::Ptr<>();
}
const DimensionIndex input_rank = transform->input_rank;
const DimensionIndex output_rank = transform->output_rank;
auto new_transform = TransformRep::Allocate(output_rank, input_rank);
new_transform->input_rank = output_rank;
new_transform->output_rank = input_rank;
new_transform->implicit_lower_bounds = false;
new_transform->implicit_upper_bounds = false;
const auto maps = transform->output_index_maps().first(output_rank);
const auto new_maps = new_transform->output_index_maps().first(input_rank);
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
const auto& map = maps[output_dim];
const auto new_d = new_transform->input_dimension(output_dim);
switch (map.method()) {
case OutputIndexMethod::array:
return absl::InvalidArgumentError(tensorstore::StrCat(
"Transform is not invertible due to index array "
"map for output dimension ",
output_dim));
case OutputIndexMethod::constant: {
if (!IsFiniteIndex(map.offset())) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Transform is not invertible due to offset ", map.offset(),
" outside valid range ", IndexInterval::FiniteRange(),
" for output dimension ", output_dim));
}
new_d.domain() = IndexInterval::UncheckedSized(map.offset(), 1);
new_d.implicit_lower_bound() = false;
new_d.implicit_upper_bound() = false;
break;
}
case OutputIndexMethod::single_input_dimension: {
if (map.stride() != 1 && map.stride() != -1) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Transform is not invertible due to "
"stride of ",
map.stride(), " for output dimension ", output_dim));
}
const DimensionIndex input_dim = map.input_dimension();
auto& new_map = new_maps[input_dim];
if (new_map.method() == OutputIndexMethod::single_input_dimension) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Transform is not invertible because input dimension ", input_dim,
" maps to output dimensions ", new_map.input_dimension(), " and ",
output_dim));
}
new_map.SetSingleInputDimension(output_dim);
auto new_domain_result = GetAffineTransformRange(
transform->input_dimension(input_dim).optionally_implicit_domain(),
map.offset(), map.stride());
if (!new_domain_result.ok()) {
return MaybeAnnotateStatus(
new_domain_result.status(),
tensorstore::StrCat("Error inverting map from input dimension ",
input_dim, " -> output dimension ",
output_dim));
}
if (map.offset() == std::numeric_limits<Index>::min()) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Integer overflow occurred while inverting map from "
"input dimension ",
input_dim, " -> output dimension ", output_dim));
}
new_map.offset() = -map.offset() * map.stride();
new_map.stride() = map.stride();
new_d.domain() = new_domain_result->interval();
new_d.label() = transform->input_dimension(input_dim).label();
new_d.implicit_lower_bound() = new_domain_result->implicit_lower();
new_d.implicit_upper_bound() = new_domain_result->implicit_upper();
break;
}
}
}
for (DimensionIndex input_dim = 0; input_dim < input_rank; ++input_dim) {
auto& new_map = new_maps[input_dim];
if (new_map.method() == OutputIndexMethod::single_input_dimension) {
continue;
}
auto input_domain =
transform->input_dimension(input_dim).optionally_implicit_domain();
if (input_domain.implicit_lower() || input_domain.implicit_upper() ||
input_domain.size() != 1) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Transform is not invertible due to non-singleton input dimension ",
input_dim, " with domain ", input_domain,
" that is not mapped by an output dimension"));
}
new_map.offset() = input_domain.inclusive_min();
new_map.stride() = 0;
}
internal_index_space::DebugCheckInvariants(new_transform.get());
return new_transform;
}
}
} | #include <cstddef>
#include <limits>
#include <random>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/index_domain_builder.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/index_transform_testutil.h"
#include "tensorstore/internal/testing/random_seed.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::Index;
using ::tensorstore::IndexDomainBuilder;
using ::tensorstore::IndexTransform;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::InverseTransform;
using ::tensorstore::kMaxFiniteIndex;
using ::tensorstore::MakeArray;
using ::tensorstore::MatchesStatus;
TEST(InverseTransformTest, Null) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto inv,
InverseTransform(IndexTransform<>()));
EXPECT_FALSE(inv.valid());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto inv_static,
InverseTransform(IndexTransform<3, 3>()));
EXPECT_FALSE(inv_static.valid());
}
TEST(InverseTransformTest, Example) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto t,
IndexTransformBuilder(3, 3)
.input_labels({"x", "", "y"})
.input_origin({1, 3, 2})
.input_exclusive_max({5, 4, 8})
.implicit_lower_bounds({1, 0, 0})
.implicit_upper_bounds({0, 0, 1})
.output_single_input_dimension(0, 5, -1, 2)
.output_single_input_dimension(1, 3, 1, 0)
.output_constant(2, 7)
.Finalize());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto expected_inv,
IndexTransformBuilder(3, 3)
.input_labels({"y", "x", ""})
.input_origin({-2, 4, 7})
.input_exclusive_max({4, 8, 8})
.implicit_lower_bounds({1, 1, 0})
.implicit_upper_bounds({0, 0, 0})
.output_single_input_dimension(0, -3, 1, 1)
.output_constant(1, 3)
.output_single_input_dimension(2, 5, -1, 0)
.Finalize());
EXPECT_EQ(expected_inv, InverseTransform(t));
EXPECT_EQ(t, InverseTransform(expected_inv));
}
TEST(InverseTransformTest, IdentityRank3) {
auto t =
IndexTransformBuilder<>(3, 3)
.input_labels({"x", "y", "z"})
.input_origin({3, 4, 5})
.input_shape({10, 11, 12})
.implicit_lower_bounds({1, 0, 1})
.implicit_upper_bounds({0, 1, 1})
.output_identity_transform()
.Finalize()
.value();
EXPECT_EQ(t, InverseTransform(t));
}
TEST(InverseTransformTest, Offsets) {
auto t =
IndexTransformBuilder<>(3, 3)
.input_labels({"x", "y", "z"})
.input_origin({3, 4, 5})
.input_shape({10, 11, 12})
.implicit_lower_bounds({1, 0, 1})
.implicit_upper_bounds({0, 1, 1})
.output_single_input_dimension(0, 6, 1, 0)
.output_single_input_dimension(1, 7, 1, 1)
.output_single_input_dimension(2, 8, 1, 2)
.Finalize()
.value();
auto expected_inv =
IndexTransformBuilder<>(3, 3)
.input_labels({"x", "y", "z"})
.input_origin({9, 11, 13})
.input_shape({10, 11, 12})
.implicit_lower_bounds({1, 0, 1})
.implicit_upper_bounds({0, 1, 1})
.output_single_input_dimension(0, -6, 1, 0)
.output_single_input_dimension(1, -7, 1, 1)
.output_single_input_dimension(2, -8, 1, 2)
.Finalize()
.value();
EXPECT_EQ(expected_inv, InverseTransform(t));
EXPECT_EQ(t, InverseTransform(expected_inv));
}
TEST(InverseTransformTest, Strides) {
auto t = IndexTransformBuilder<>(3, 3)
.input_labels({"x", "y", "z"})
.input_origin({3, 4, 5})
.input_shape({10, 11, 12})
.implicit_lower_bounds({1, 0, 1})
.implicit_upper_bounds({0, 1, 1})
.output_single_input_dimension(0, 0, -1, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, 0, -1, 2)
.Finalize()
.value();
auto expected_inv = IndexTransformBuilder<>(3, 3)
.input_labels({"x", "y", "z"})
.input_origin({-12, 4, -16})
.input_shape({10, 11, 12})
.implicit_lower_bounds({0, 0, 1})
.implicit_upper_bounds({1, 1, 1})
.output_single_input_dimension(0, 0, -1, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, 0, -1, 2)
.Finalize()
.value();
EXPECT_EQ(expected_inv, InverseTransform(t));
EXPECT_EQ(t, InverseTransform(expected_inv));
}
TEST(InverseTransformTest, Permutation) {
auto t = IndexTransformBuilder<>(3, 3)
.input_labels({"x", "y", "z"})
.input_origin({3, 4, 5})
.input_shape({10, 11, 12})
.implicit_lower_bounds({1, 0, 1})
.implicit_upper_bounds({0, 1, 1})
.output_single_input_dimension(0, 1)
.output_single_input_dimension(1, 2)
.output_single_input_dimension(2, 0)
.Finalize()
.value();
auto expected_inv = IndexTransformBuilder<>(3, 3)
.input_labels({"y", "z", "x"})
.input_origin({4, 5, 3})
.input_shape({11, 12, 10})
.implicit_lower_bounds({0, 1, 1})
.implicit_upper_bounds({1, 1, 0})
.output_single_input_dimension(1, 0)
.output_single_input_dimension(2, 1)
.output_single_input_dimension(0, 2)
.Finalize()
.value();
EXPECT_EQ(expected_inv, InverseTransform(t));
EXPECT_EQ(t, InverseTransform(expected_inv));
}
TEST(InverseTransformTest, OffsetsAndStrides) {
auto t = IndexTransformBuilder<>(3, 3)
.input_labels({"x", "y", "z"})
.input_origin({9, 11, 13})
.input_shape({10, 11, 12})
.implicit_lower_bounds({1, 0, 1})
.implicit_upper_bounds({0, 1, 1})
.output_single_input_dimension(0, -6, -1, 0)
.output_single_input_dimension(1, -7, 1, 1)
.output_single_input_dimension(2, -8, -1, 2)
.Finalize()
.value();
auto expected_inv = IndexTransformBuilder<>(3, 3)
.input_labels({"x", "y", "z"})
.input_origin({-24, 4, -32})
.input_shape({10, 11, 12})
.implicit_lower_bounds({0, 0, 1})
.implicit_upper_bounds({1, 1, 1})
.output_single_input_dimension(0, -6, -1, 0)
.output_single_input_dimension(1, 7, 1, 1)
.output_single_input_dimension(2, -8, -1, 2)
.Finalize()
.value();
EXPECT_EQ(expected_inv, InverseTransform(t));
EXPECT_EQ(t, InverseTransform(expected_inv));
}
TEST(InverseTransformTest, OffsetsAndStridesAndPermutation) {
auto t = IndexTransformBuilder<>(3, 3)
.input_labels({"x", "y", "z"})
.input_origin({9, 11, 13})
.input_shape({10, 11, 12})
.implicit_lower_bounds({1, 0, 1})
.implicit_upper_bounds({0, 1, 1})
.output_single_input_dimension(0, -6, -1, 1)
.output_single_input_dimension(1, -7, 1, 2)
.output_single_input_dimension(2, -8, -1, 0)
.Finalize()
.value();
auto expected_inv = IndexTransformBuilder<>(3, 3)
.input_labels({"y", "z", "x"})
.input_origin({-27, 6, -26})
.input_shape({11, 12, 10})
.implicit_lower_bounds({1, 1, 0})
.implicit_upper_bounds({0, 1, 1})
.output_single_input_dimension(1, -6, -1, 0)
.output_single_input_dimension(2, 7, 1, 1)
.output_single_input_dimension(0, -8, -1, 2)
.Finalize()
.value();
EXPECT_EQ(expected_inv, InverseTransform(t));
EXPECT_EQ(t, InverseTransform(expected_inv));
}
TEST(InverseTransformTest, ErrorNonSingletonUnmappedInputDimension) {
EXPECT_THAT(
InverseTransform(IndexTransformBuilder<>(3, 2)
.output_identity_transform()
.Finalize()
.value()),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Transform is not invertible due to non-singleton "
"input dimension 2 with domain \\(-inf\\*, \\+inf\\*\\) "
"that is not mapped by an output dimension"));
EXPECT_THAT(InverseTransform(IndexTransformBuilder(1, 0)
.input_origin({0})
.input_shape({2})
.Finalize()
.value()),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Transform is not invertible due to non-singleton "
"input dimension 0 with domain \\[0, 2\\) "
"that is not mapped by an output dimension"));
EXPECT_THAT(InverseTransform(IndexTransformBuilder(1, 0)
.input_origin({0})
.input_shape({1})
.implicit_lower_bounds({1})
.Finalize()
.value()),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Transform is not invertible due to non-singleton "
"input dimension 0 with domain \\[0\\*, 1\\) "
"that is not mapped by an output dimension"));
EXPECT_THAT(InverseTransform(IndexTransformBuilder(1, 0)
.input_origin({0})
.input_shape({1})
.implicit_upper_bounds({1})
.Finalize()
.value()),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Transform is not invertible due to non-singleton "
"input dimension 0 with domain \\[0, 1\\*\\) "
"that is not mapped by an output dimension"));
}
TEST(InverseTransformTest, ConstantMap) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto t, IndexTransformBuilder(0, 1).output_constant(0, 42).Finalize());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto expected_inv,
IndexTransformBuilder(1, 0)
.input_origin({42})
.input_shape({1})
.Finalize());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto expected_inv_with_label,
IndexTransformBuilder(1, 0)
.input_origin({42})
.input_labels({"x"})
.input_shape({1})
.Finalize());
EXPECT_THAT(InverseTransform(t), ::testing::Optional(expected_inv));
EXPECT_THAT(InverseTransform(expected_inv), ::testing::Optional(t));
EXPECT_THAT(InverseTransform(expected_inv_with_label),
::testing::Optional(t));
}
TEST(InverseTransformTest, IndexArrayMap) {
EXPECT_THAT(InverseTransform(
IndexTransformBuilder<>(1, 1)
.input_shape({2})
.output_index_array(0, 0, 1, MakeArray<Index>({0, 1}))
.Finalize()
.value()),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Transform is not invertible due to "
"index array map for output dimension 0"));
}
TEST(InverseTransformTest, NonUnitStride) {
EXPECT_THAT(InverseTransform(IndexTransformBuilder<>(1, 1)
.output_single_input_dimension(0, 0, 2, 0)
.Finalize()
.value()),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Transform is not invertible due to stride of 2 "
"for output dimension 0"));
}
TEST(InverseTransformTest, Diagonal) {
EXPECT_THAT(InverseTransform(IndexTransformBuilder<>(2, 2)
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 0)
.Finalize()
.value()),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Transform is not invertible because input "
"dimension 0 maps to output dimensions 0 and 1"));
}
TEST(InverseTransformTest, DomainOverflow) {
EXPECT_THAT(InverseTransform(
IndexTransformBuilder<>(1, 1)
.input_origin({10})
.input_shape({5})
.output_single_input_dimension(0, kMaxFiniteIndex, 1, 0)
.Finalize()
.value()),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error inverting map from input dimension 0 -> "
"output dimension 0: Integer overflow .*"));
}
TEST(InverseTransformTest, OffsetOverflow) {
EXPECT_THAT(
InverseTransform(IndexTransformBuilder<>(1, 1)
.output_single_input_dimension(
0, std::numeric_limits<Index>::min(), 1, 0)
.Finalize()
.value()),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Integer overflow occurred while inverting map from input "
"dimension 0 -> output dimension 0"));
}
TEST(InverseTransformTest, RandomFromOutputSpace) {
constexpr size_t kNumIterations = 100;
for (size_t i = 0; i < kNumIterations; ++i) {
std::minstd_rand gen{tensorstore::internal_testing::GetRandomSeedForTest(
"TENSORSTORE_INTERNAL_INVERSE_TRANSFORM_TEST_SEED")};
auto box = tensorstore::internal::MakeRandomBox(gen);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto domain, IndexDomainBuilder(box.rank()).bounds(box).Finalize());
auto transform =
tensorstore::internal::MakeRandomStridedIndexTransformForOutputSpace(
gen, domain);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto inv_transform,
InverseTransform(transform));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto inv_inv_transform,
InverseTransform(inv_transform));
EXPECT_EQ(transform, inv_inv_transform);
}
}
TEST(InverseTransformTest, RandomFromInputSpace) {
constexpr size_t kNumIterations = 100;
for (size_t i = 0; i < kNumIterations; ++i) {
std::minstd_rand gen{tensorstore::internal_testing::GetRandomSeedForTest(
"TENSORSTORE_INTERNAL_INVERSE_TRANSFORM_TEST_SEED")};
auto box = tensorstore::internal::MakeRandomBox(gen);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto domain, IndexDomainBuilder(box.rank()).bounds(box).Finalize());
auto transform =
tensorstore::internal::MakeRandomStridedIndexTransformForInputSpace(
gen, domain);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto inv_transform,
InverseTransform(transform));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto inv_inv_transform,
InverseTransform(inv_transform));
EXPECT_EQ(transform, inv_inv_transform);
}
}
} |
556 | cpp | google/tensorstore | compose_transforms | tensorstore/index_space/internal/compose_transforms.cc | tensorstore/index_space/compose_transforms_test.cc | #ifndef TENSORSTORE_INDEX_SPACE_INTERNAL_COMPOSE_TRANSFORMS_H_
#define TENSORSTORE_INDEX_SPACE_INTERNAL_COMPOSE_TRANSFORMS_H_
#include "tensorstore/index_space/internal/transform_rep.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal_index_space {
Result<TransformRep::Ptr<>> ComposeTransforms(TransformRep* b_to_c,
bool can_move_from_b_to_c,
TransformRep* a_to_b,
bool can_move_from_a_to_b,
bool domain_only = false);
Result<IndexTransform<dynamic_rank, dynamic_rank, container>> ComposeTransforms(
IndexTransform<dynamic_rank, dynamic_rank, container> b_to_c,
IndexTransform<dynamic_rank, dynamic_rank, container> a_to_b,
bool domain_only);
}
}
#endif
#include "tensorstore/index_space/internal/compose_transforms.h"
#include <cassert>
#include <sstream>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_replace.h"
#include "tensorstore/box.h"
#include "tensorstore/container_kind.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/internal/propagate_bounds.h"
#include "tensorstore/index_space/internal/transform_array.h"
#include "tensorstore/index_space/internal/transform_rep.h"
#include "tensorstore/index_space/internal/transform_rep_impl.h"
#include "tensorstore/index_space/output_index_method.h"
#include "tensorstore/rank.h"
#include "tensorstore/static_cast.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/element_pointer.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_index_space {
namespace {
bool IsSingletonIndexArrayMap(StridedLayoutView<> layout) {
for (DimensionIndex dim = 0, rank = layout.rank(); dim < rank; ++dim) {
if (layout.byte_strides()[dim] == 0) continue;
if (layout.shape()[dim] != 1) return false;
}
return true;
}
absl::Status ComposeTransformsImpl(TransformRep* b_to_c,
bool can_move_from_b_to_c,
TransformRep* a_to_b,
bool can_move_from_a_to_b,
TransformRep* a_to_c, bool domain_only) {
assert(b_to_c != nullptr && a_to_b != nullptr && a_to_c != nullptr);
const DimensionIndex a_to_c_output_rank =
domain_only ? 0 : b_to_c->output_rank;
assert(a_to_c_output_rank <= a_to_c->output_rank_capacity &&
a_to_b->output_rank == b_to_c->input_rank &&
a_to_b->input_rank <= a_to_c->input_rank_capacity);
assert(a_to_c != b_to_c && a_to_c != a_to_b);
const DimensionIndex a_rank = a_to_b->input_rank;
const DimensionIndex b_rank = a_to_b->output_rank;
const DimensionIndex c_rank = b_to_c->output_rank;
a_to_c->input_rank = a_rank;
a_to_c->output_rank = a_to_c_output_rank;
CopyInputLabels(a_to_b, a_to_c,
can_move_from_a_to_b);
BoxView<> b_to_c_domain = b_to_c->input_domain(b_rank);
MutableBoxView<> a_to_c_domain = a_to_c->input_domain(a_rank);
TENSORSTORE_RETURN_IF_ERROR(PropagateBounds(
b_to_c_domain, b_to_c->implicit_lower_bounds,
b_to_c->implicit_upper_bounds, a_to_b, a_to_c_domain,
a_to_c->implicit_lower_bounds, a_to_c->implicit_upper_bounds));
if (domain_only) {
internal_index_space::DebugCheckInvariants(a_to_c);
return absl::OkStatus();
}
span<const OutputIndexMap> b_to_c_output_index_maps =
b_to_c->output_index_maps().first(c_rank);
span<const OutputIndexMap> a_to_b_output_index_maps =
a_to_b->output_index_maps().first(b_rank);
span<OutputIndexMap> a_to_c_output_index_maps =
a_to_c->output_index_maps().first(c_rank);
const bool a_to_c_domain_is_explicitly_empty =
IsDomainExplicitlyEmpty(a_to_c);
for (DimensionIndex c_dim = 0; c_dim < c_rank; ++c_dim) {
auto& b_to_c_map = b_to_c_output_index_maps[c_dim];
auto& a_to_c_map = a_to_c_output_index_maps[c_dim];
const OutputIndexMethod b_to_c_method = b_to_c_map.stride() == 0
? OutputIndexMethod::constant
: b_to_c_map.method();
switch (b_to_c_method) {
case OutputIndexMethod::constant: {
a_to_c_map.SetConstant();
a_to_c_map.stride() = 0;
a_to_c_map.offset() = b_to_c_map.offset();
break;
}
case OutputIndexMethod::single_input_dimension: {
const DimensionIndex b_dim = b_to_c_map.input_dimension();
assert(b_dim >= 0 && b_dim < b_rank);
auto& a_to_b_map = a_to_b_output_index_maps[b_dim];
const OutputIndexMethod a_to_b_method =
a_to_b_map.stride() == 0 ? OutputIndexMethod::constant
: a_to_b_map.method();
Index new_output_offset;
if (internal::MulOverflow(a_to_b_map.offset(), b_to_c_map.stride(),
&new_output_offset) ||
internal::AddOverflow(b_to_c_map.offset(), new_output_offset,
&a_to_c_map.offset())) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Integer overflow computing output "
"offset for output dimension ",
c_dim, "."));
}
if (a_to_b_method == OutputIndexMethod::constant) {
a_to_c_map.SetConstant();
a_to_c_map.stride() = 0;
break;
}
if (internal::MulOverflow(a_to_b_map.stride(), b_to_c_map.stride(),
&a_to_c_map.stride())) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Integer overflow computing output_strides[", c_dim,
"] = ", a_to_b_map.stride(), " * ", b_to_c_map.stride(), "."));
}
if (a_to_b_method == OutputIndexMethod::single_input_dimension) {
const DimensionIndex a_dim = a_to_b_map.input_dimension();
assert(a_dim >= 0 && a_dim < a_rank);
a_to_c_map.SetSingleInputDimension(a_dim);
break;
}
assert(a_to_b_method == OutputIndexMethod::array);
if (a_to_c_domain_is_explicitly_empty) {
a_to_c_map.SetConstant();
a_to_c_map.offset() = 0;
a_to_c_map.stride() = 0;
break;
}
const auto& a_to_b_index_array_data = a_to_b_map.index_array_data();
IndexInterval index_range;
{
TENSORSTORE_ASSIGN_OR_RETURN(
const IndexInterval propagated_bounds,
GetAffineTransformDomain(
OptionallyImplicitIndexInterval{
b_to_c_domain[b_dim],
b_to_c->implicit_lower_bounds[b_dim],
b_to_c->implicit_upper_bounds[b_dim]}
.effective_interval(),
a_to_b_map.offset(), a_to_b_map.stride()));
index_range =
Intersect(a_to_b_index_array_data.index_range, propagated_bounds);
}
if (IsSingletonIndexArrayMap(
StridedLayoutView<>(a_rank, a_to_c_domain.shape().data(),
a_to_b_index_array_data.byte_strides))) {
a_to_c_map.SetConstant();
TENSORSTORE_RETURN_IF_ERROR(ReplaceZeroRankIndexArrayIndexMap(
*a_to_b_index_array_data.array_view(a_to_b->input_domain(a_rank))
.byte_strided_origin_pointer(),
index_range, &a_to_c_map.offset(), &a_to_c_map.stride()));
} else {
auto& index_array =
a_to_c_map.SetArrayIndexing(a_rank, a_to_b_index_array_data);
index_array.index_range = index_range;
}
break;
}
case OutputIndexMethod::array: {
auto& a_to_c_map = a_to_c_output_index_maps[c_dim];
if (a_to_c_domain_is_explicitly_empty) {
a_to_c_map.SetConstant();
a_to_c_map.offset() = 0;
a_to_c_map.stride() = 0;
break;
}
auto& index_array_data = b_to_c_map.index_array_data();
auto& result_array_data = a_to_c_map.SetArrayIndexing(a_rank);
result_array_data.index_range = index_array_data.index_range;
TENSORSTORE_ASSIGN_OR_RETURN(
auto transformed_element_pointer,
TransformArraySubRegion(
index_array_data.shared_array_view(b_to_c_domain), a_to_b,
a_to_c_domain.origin().data(), a_to_c_domain.shape().data(),
result_array_data.byte_strides,
{skip_repeated_elements}));
auto new_index_array_origin_pointer =
StaticDataTypeCast<const Index, unchecked>(
std::move(transformed_element_pointer));
result_array_data.element_pointer = AddByteOffset(
new_index_array_origin_pointer,
-IndexInnerProduct(a_rank, result_array_data.byte_strides,
a_to_c_domain.origin().data()));
Index output_offset = b_to_c_map.offset();
Index output_stride = b_to_c_map.stride();
if (IsSingletonIndexArrayMap(
StridedLayoutView<>(a_rank, a_to_c_domain.shape().data(),
result_array_data.byte_strides))) {
TENSORSTORE_RETURN_IF_ERROR(ReplaceZeroRankIndexArrayIndexMap(
*new_index_array_origin_pointer.data(),
result_array_data.index_range, &output_offset, &output_stride));
a_to_c_map.SetConstant();
}
a_to_c_map.offset() = output_offset;
a_to_c_map.stride() = output_stride;
break;
}
}
}
internal_index_space::DebugCheckInvariants(a_to_c);
return absl::OkStatus();
}
}
Result<TransformRep::Ptr<>> ComposeTransforms(TransformRep* b_to_c,
bool can_move_from_b_to_c,
TransformRep* a_to_b,
bool can_move_from_a_to_b,
bool domain_only) {
assert(b_to_c);
assert(a_to_b);
const DimensionIndex a_rank = a_to_b->input_rank;
const DimensionIndex b_rank = a_to_b->output_rank;
const DimensionIndex c_rank = b_to_c->output_rank;
absl::Status status;
if (b_rank == b_to_c->input_rank) {
auto data = TransformRep::Allocate(a_rank, domain_only ? 0 : c_rank);
status =
ComposeTransformsImpl(b_to_c, can_move_from_b_to_c, a_to_b,
can_move_from_a_to_b, data.get(), domain_only);
if (status.ok()) {
return data;
}
} else {
status = absl::InvalidArgumentError(
tensorstore::StrCat("Rank ", b_to_c->input_rank, " -> ", c_rank,
" transform cannot be composed with rank ", a_rank,
" -> ", b_rank, " transform."));
}
assert(!status.ok());
auto format_transform = [](TransformRep* rep) {
std::ostringstream os;
internal_index_space::PrintToOstream(os, rep);
std::string str = os.str();
absl::StrReplaceAll({{"\n", " "}}, &str);
return absl::Cord(str);
};
AddStatusPayload(status, "transform", format_transform(a_to_b));
if (!status.GetPayload("domain").has_value()) {
AddStatusPayload(status, "left_transform", format_transform(b_to_c));
}
return status;
}
Result<IndexTransform<dynamic_rank, dynamic_rank, container>> ComposeTransforms(
IndexTransform<dynamic_rank, dynamic_rank, container> b_to_c,
IndexTransform<dynamic_rank, dynamic_rank, container> a_to_b,
bool domain_only) {
auto b_to_c_rep = TransformAccess::rep(b_to_c);
auto a_to_b_rep = TransformAccess::rep(a_to_b);
TENSORSTORE_ASSIGN_OR_RETURN(
auto a_to_c_rep,
internal_index_space::ComposeTransforms(
b_to_c_rep,
b_to_c_rep->is_unique(), a_to_b_rep,
a_to_b_rep->is_unique(), domain_only));
return TransformAccess::Make<IndexTransform<>>(std::move(a_to_c_rep));
}
}
} | #include "tensorstore/index_space/internal/compose_transforms.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/array.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::DimensionIndex;
using ::tensorstore::IdentityTransform;
using ::tensorstore::Index;
using ::tensorstore::IndexInterval;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::IndexTransformView;
using ::tensorstore::kMaxFiniteIndex;
using ::tensorstore::MakeArray;
using ::tensorstore::MatchesStatus;
TEST(ComposeTransformsTest, EmptyDomain) {
auto b_to_c = IndexTransformBuilder<3, 2>()
.input_origin({1, 2, 3})
.input_shape({5, 6, 7})
.output_identity_transform()
.Finalize()
.value();
auto a_to_b = IndexTransformBuilder<2, 3>()
.input_origin({1, 2})
.input_shape({5, 0})
.output_identity_transform()
.output_constant(2, 5)
.Finalize()
.value();
auto a_to_c = ComposeTransforms(b_to_c, a_to_b).value();
auto expected_a_to_c = IndexTransformBuilder<2, 2>()
.input_origin({1, 2})
.input_shape({5, 0})
.output_identity_transform()
.Finalize()
.value();
EXPECT_EQ(expected_a_to_c, a_to_c);
}
TEST(ComposeTransformsTest, TransformArrayError) {
auto b_to_c = IndexTransformBuilder<1, 1>()
.input_origin({0})
.input_shape({2})
.output_index_array(0, 0, 1, MakeArray<Index>({1, 2}))
.Finalize()
.value();
auto a_to_b = IndexTransformBuilder<1, 1>()
.input_origin({1})
.input_shape({1})
.output_index_array(0, 0, 1, MakeArray<Index>({1}),
IndexInterval::Closed(4, 6))
.Finalize()
.value();
EXPECT_THAT(ComposeTransforms(b_to_c, a_to_b),
MatchesStatus(absl::StatusCode::kOutOfRange));
}
TEST(ComposeTransformsTest, BtoCIndexArrayWithSingleIndex) {
auto b_to_c = IndexTransformBuilder<1, 1>()
.input_origin({0})
.input_shape({2})
.output_index_array(0, 0, 1, MakeArray<Index>({7, 8}))
.Finalize()
.value();
auto a_to_b = IndexTransformBuilder<1, 1>()
.input_origin({1})
.input_shape({1})
.output_identity_transform()
.Finalize()
.value();
auto a_to_c = ComposeTransforms(b_to_c, a_to_b).value();
auto expected_a_to_c = IndexTransformBuilder<1, 1>()
.input_origin({1})
.input_shape({1})
.output_constant(0, 8)
.Finalize()
.value();
EXPECT_EQ(expected_a_to_c, a_to_c);
}
TEST(ComposeTransformsTest, BtoCIndexArrayWithInvalidSingleIndex) {
auto b_to_c = IndexTransformBuilder<1, 1>()
.input_origin({0})
.input_shape({2})
.output_index_array(0, 0, 1, MakeArray<Index>({7, 8}),
IndexInterval::Closed(2, 3))
.Finalize()
.value();
auto a_to_b = IndexTransformBuilder<1, 1>()
.input_origin({1})
.input_shape({1})
.output_identity_transform()
.Finalize()
.value();
EXPECT_THAT(ComposeTransforms(b_to_c, a_to_b),
MatchesStatus(absl::StatusCode::kOutOfRange));
}
TEST(ComposeTransformsTest, AtoBIndexArrayWithSingleIndex) {
auto b_to_c = IndexTransformBuilder<1, 1>()
.output_identity_transform()
.Finalize()
.value();
auto a_to_b = IndexTransformBuilder<1, 1>()
.input_origin({0})
.input_shape({1})
.output_index_array(0, 0, 1, MakeArray<Index>({7}))
.Finalize()
.value();
auto a_to_c = ComposeTransforms(b_to_c, a_to_b).value();
auto expected_a_to_c = IndexTransformBuilder<1, 1>()
.input_origin({0})
.input_shape({1})
.output_constant(0, 7)
.Finalize()
.value();
EXPECT_EQ(expected_a_to_c, a_to_c);
}
TEST(ComposeTransformsTest, AtoBIndexArrayWithInvalidSingleIndex) {
auto b_to_c = IndexTransformBuilder<1, 1>()
.output_identity_transform()
.Finalize()
.value();
auto a_to_b = IndexTransformBuilder<1, 1>()
.input_origin({0})
.input_shape({1})
.output_index_array(0, 0, 1, MakeArray<Index>({7}),
IndexInterval::Closed(2, 3))
.Finalize()
.value();
EXPECT_THAT(ComposeTransforms(b_to_c, a_to_b),
MatchesStatus(absl::StatusCode::kOutOfRange));
}
TEST(ComposeTransformsTest, ConstantOutOfDomain) {
auto b_to_c = IndexTransformBuilder<3, 2>()
.input_origin({1, 2, 3})
.input_shape({5, 6, 7})
.output_identity_transform()
.Finalize()
.value();
auto a_to_b = IndexTransformBuilder<2, 3>()
.input_origin({1, 2})
.input_shape({5, 4})
.output_identity_transform()
.output_constant(2, 2)
.Finalize()
.value();
EXPECT_THAT(ComposeTransforms(b_to_c, a_to_b).status(),
MatchesStatus(absl::StatusCode::kOutOfRange,
".*Index 2 is outside valid range \\[3, 10\\)"));
}
TEST(ComposeTransformsTest, ConstantOverflow) {
EXPECT_THAT(ComposeTransforms(IndexTransformBuilder<1, 1>()
.output_single_input_dimension(0, 0, 100, 0)
.Finalize()
.value(),
IndexTransformBuilder<0, 1>()
.output_constant(0, kMaxFiniteIndex)
.Finalize()
.value())
.status(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(
ComposeTransforms(IndexTransformBuilder<1, 1>()
.output_single_input_dimension(
0, std::numeric_limits<Index>::max(), 1, 0)
.Finalize()
.value(),
IndexTransformBuilder<0, 1>()
.output_constant(0, 100)
.Finalize()
.value())
.status(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(ComposeTransformsTest, SingleInputDimensionOverflow) {
EXPECT_THAT(
ComposeTransforms(IndexTransformBuilder<1, 1>()
.output_single_input_dimension(
0, std::numeric_limits<Index>::max(), 1, 0)
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.input_origin({0})
.input_shape({10})
.output_single_input_dimension(0, 100, 1, 0)
.Finalize()
.value())
.status(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(ComposeTransforms(
IndexTransformBuilder<1, 1>()
.output_single_input_dimension(0, 0, 100, 0)
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.input_origin({0})
.input_shape({10})
.output_single_input_dimension(0, kMaxFiniteIndex, 1, 0)
.Finalize()
.value())
.status(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(
ComposeTransforms(IndexTransformBuilder<1, 1>()
.output_single_input_dimension(0, 0, 100, 0)
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.input_origin({0})
.input_shape({10})
.output_single_input_dimension(
0, 0, std::numeric_limits<Index>::max() - 1, 0)
.Finalize()
.value())
.status(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(ComposeTransformsTest, IndexArrayBoundsOverflow) {
EXPECT_THAT(ComposeTransforms(
IndexTransformBuilder<1, 1>()
.input_origin({2})
.input_shape({100})
.output_identity_transform()
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.input_origin({0})
.input_shape({2})
.output_index_array(0, std::numeric_limits<Index>::min(),
1, MakeArray<Index>({1, 2}),
IndexInterval::Closed(0, 100))
.Finalize()
.value())
.status(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(ComposeTransformsTest, RankMismatch) {
EXPECT_THAT(
ComposeTransforms(IdentityTransform(2), IdentityTransform(3)).status(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Rank 2 -> 2 transform cannot be composed with rank 3 -> 3 "
"transform\\."));
}
TEST(ComposeTransformsTest, FunctionCallOperator) {
const auto t0 = IndexTransformBuilder<1, 1>()
.input_origin({0})
.input_shape({3})
.output_single_input_dimension(0, 10, 1, 0)
.Finalize()
.value();
const auto t1 = IndexTransformBuilder<1, 1>()
.input_origin({10})
.input_shape({5})
.output_single_input_dimension(0, 20, 1, 0)
.Finalize()
.value();
const auto expected_composed = IndexTransformBuilder<1, 1>()
.input_origin({0})
.input_shape({3})
.output_single_input_dimension(0, 30, 1, 0)
.Finalize()
.value();
const auto composed = t0(t1).value();
EXPECT_EQ(expected_composed, composed);
EXPECT_EQ(expected_composed, ComposeTransforms(t1, t0).value());
}
TEST(ComposeTransformsTest, RankZero) {
auto t0 = IdentityTransform(0);
EXPECT_EQ(t0, ComposeTransforms(t0, t0).value());
}
TEST(ComposeTransformsTest, ImplicitOutOfBounds) {
const auto t0 = IndexTransformBuilder<1, 1>()
.input_origin({0})
.input_shape({4})
.implicit_lower_bounds({1})
.output_identity_transform()
.Finalize()
.value();
const auto t1 = IndexTransformBuilder<1, 1>()
.input_origin({-1})
.input_exclusive_max({2})
.output_identity_transform()
.Finalize()
.value();
EXPECT_THAT(ComposeTransforms(t0, t1), ::testing::Optional(t1));
}
TEST(ComposeTransformsTest, TransformIndexArraySkipRepeatedElements) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto t0, IndexTransformBuilder(2, 2)
.input_shape({5, 2})
.output_index_array(
0, 0, 1, MakeArray<Index>({{0}, {1}, {2}, {3}, {4}}))
.output_single_input_dimension(1, 1)
.Finalize());
EXPECT_THAT(t0.output_index_maps()[0].index_array().byte_strides(),
::testing::ElementsAre(8, 0));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto t1, ComposeTransforms(t0, t0));
EXPECT_EQ(t0, t1);
EXPECT_THAT(t1.output_index_maps()[0].index_array().byte_strides(),
::testing::ElementsAre(8, 0));
}
} |
557 | cpp | google/tensorstore | transform_rep | tensorstore/index_space/internal/transform_rep.cc | tensorstore/index_space/transform_rep_test.cc | #ifndef TENSORSTORE_INDEX_SPACE_INTERNAL_TRANSFORM_REP_H_
#define TENSORSTORE_INDEX_SPACE_INTERNAL_TRANSFORM_REP_H_
#include <atomic>
#include <cstddef>
#include <iosfwd>
#include <memory>
#include <string>
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/box.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/output_index_method.h"
#include "tensorstore/index_space/transform_array_constraints.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/string_like.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/dimension_set.h"
#include "tensorstore/util/division.h"
#include "tensorstore/util/element_pointer.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
template <DimensionIndex InputRank, DimensionIndex OutputRank,
ContainerKind CKind>
class IndexTransform;
template <DimensionIndex Rank, ContainerKind CKind>
class IndexDomain;
namespace internal_index_space {
struct IndexArrayData {
SharedElementPointer<const Index> element_pointer;
IndexInterval index_range;
DimensionIndex rank_capacity;
Index byte_strides[];
StridedLayoutView<dynamic_rank, offset_origin> layout(
BoxView<> input_domain) const {
assert(rank_capacity >= input_domain.rank());
return StridedLayoutView<dynamic_rank, offset_origin>(
input_domain.rank(), input_domain.origin().data(),
input_domain.shape().data(), byte_strides);
}
ArrayView<const Index, dynamic_rank, offset_origin> array_view(
BoxView<> input_domain) const {
return {element_pointer, layout(input_domain)};
}
SharedArrayView<const Index, dynamic_rank, offset_origin> shared_array_view(
BoxView<> input_domain) const {
return {element_pointer, layout(input_domain)};
}
};
static_assert(alignof(IndexArrayData) >= 2,
"Platform has unsupported alignment.");
class OutputIndexMap {
public:
OutputIndexMap() = default;
OutputIndexMap(OutputIndexMap&& other)
: value_(0), offset_(other.offset_), stride_(other.stride_) {
std::swap(other.value_, value_);
}
OutputIndexMap& operator=(OutputIndexMap&& other) {
std::swap(value_, other.value_);
offset_ = other.offset_;
stride_ = other.stride_;
return *this;
}
OutputIndexMethod method() const {
return value_ == 0 ? OutputIndexMethod::constant
: value_ & 1 ? OutputIndexMethod::single_input_dimension
: OutputIndexMethod::array;
}
DimensionIndex input_dimension() const {
assert(method() == OutputIndexMethod::single_input_dimension);
return static_cast<DimensionIndex>(value_ >> 1);
}
const IndexArrayData& index_array_data() const {
assert(method() == OutputIndexMethod::array);
return *reinterpret_cast<IndexArrayData*>(value_);
}
IndexArrayData& index_array_data() {
assert(method() == OutputIndexMethod::array);
return *reinterpret_cast<IndexArrayData*>(value_);
}
Result<Index> operator()(span<const Index> input_indices) const;
void SetConstant();
void SetSingleInputDimension(DimensionIndex input_dim);
IndexArrayData& SetArrayIndexing(DimensionIndex rank);
IndexArrayData& SetArrayIndexing(DimensionIndex rank,
const IndexArrayData& other);
void Assign(DimensionIndex rank, const OutputIndexMap& other);
~OutputIndexMap() { SetConstant(); }
constexpr Index offset() const { return offset_; }
constexpr Index stride() const { return stride_; }
Index& offset() { return offset_; }
Index& stride() { return stride_; }
private:
std::uintptr_t value_ = 0;
Index offset_, stride_;
};
class InputDimensionsView;
class InputDimensionRef;
struct TransformRep {
std::int16_t input_rank;
std::int16_t output_rank;
std::int16_t input_rank_capacity;
std::int16_t output_rank_capacity;
DimensionSet implicit_lower_bounds;
DimensionSet implicit_upper_bounds;
DimensionSet implicit_dimensions() const {
return implicit_lower_bounds | implicit_upper_bounds;
}
static_assert(kMaxRank * 2 <= 64);
std::atomic<std::uint64_t> reference_count;
bool is_unique() const {
return reference_count.load(std::memory_order_acquire) == 1;
}
InputDimensionsView all_input_dimensions(DimensionIndex rank);
InputDimensionRef input_dimension(DimensionIndex i);
span<Index> input_origin() {
return span(reinterpret_cast<Index*>(this + 1), input_rank_capacity);
}
span<Index> input_shape() {
return span(reinterpret_cast<Index*>(this + 1) + input_rank_capacity,
input_rank_capacity);
}
MutableBoxView<> input_domain(DimensionIndex rank) {
assert(0 <= rank && rank <= input_rank_capacity);
return MutableBoxView<>(rank, input_origin().data(), input_shape().data());
}
span<OutputIndexMap> output_index_maps() {
return span(reinterpret_cast<OutputIndexMap*>(this) - output_rank_capacity,
output_rank_capacity);
}
span<std::string> input_labels() {
return span(reinterpret_cast<std::string*>(input_shape().end()),
input_rank_capacity);
}
static void Free(TransformRep* ptr);
template <typename PointerType = TransformRep*>
struct IntrusivePtrTraits {
template <typename>
using pointer = PointerType;
static void increment(TransformRep* rep) {
rep->reference_count.fetch_add(1, std::memory_order_acq_rel);
}
static void decrement(TransformRep* rep) {
if (rep->reference_count.fetch_sub(1, std::memory_order_acq_rel) == 1) {
Free(rep);
}
}
};
template <ContainerKind CKind = container>
using Ptr = std::conditional_t<
CKind == view, TransformRep*,
internal::IntrusivePtr<TransformRep, IntrusivePtrTraits<>>>;
static Ptr<> Allocate(DimensionIndex input_rank_capacity,
DimensionIndex output_rank_capacity);
};
#ifdef NDEBUG
inline void DebugCheckInvariants(TransformRep* rep) {}
#else
void DebugCheckInvariants(TransformRep* rep);
#endif
inline void NormalizeImplicitBounds(TransformRep& rep) {
const auto mask = DimensionSet::UpTo(rep.input_rank);
rep.implicit_lower_bounds &= mask;
rep.implicit_upper_bounds &= mask;
}
static_assert(alignof(OutputIndexMap) <= sizeof(Index),
"Platform has unsupported alignment.");
static_assert(alignof(std::string) <= sizeof(Index),
"Platform has unsupported alignment.");
class InputDimensionRef {
public:
explicit InputDimensionRef(TransformRep* rep, DimensionIndex input_dim)
: rep_(rep), input_dim_(input_dim) {}
IndexIntervalRef domain() const {
return rep_->input_domain(rep_->input_rank_capacity)[input_dim_];
}
OptionallyImplicitIndexInterval optionally_implicit_domain() const {
return {domain(), implicit_lower_bound(), implicit_upper_bound()};
}
template <ContainerKind LabelCKind = view>
IndexDomainDimension<LabelCKind> index_domain_dimension() const {
return {optionally_implicit_domain(), label()};
}
DimensionSet::reference implicit_lower_bound() const {
return rep_->implicit_lower_bounds[input_dim_];
}
DimensionSet::reference implicit_upper_bound() const {
return rep_->implicit_upper_bounds[input_dim_];
}
std::string& label() const { return rep_->input_labels()[input_dim_]; }
const InputDimensionRef& operator=(const InputDimensionRef& other) const {
domain() = other.domain();
implicit_lower_bound() = other.implicit_lower_bound();
implicit_upper_bound() = other.implicit_upper_bound();
label() = other.label();
return *this;
}
void SetEmptyLabel() const { label().clear(); }
template <ContainerKind LabelCKind>
const InputDimensionRef& operator=(
const IndexDomainDimension<LabelCKind>& other) const {
domain() = other.interval();
implicit_lower_bound() = other.implicit_lower();
implicit_upper_bound() = other.implicit_upper();
label().assign(other.label().begin(), other.label().end());
return *this;
}
template <ContainerKind LabelCKind = view>
operator IndexDomainDimension<LabelCKind>() const {
return index_domain_dimension<LabelCKind>();
}
private:
TransformRep* const rep_;
const DimensionIndex input_dim_;
};
class InputDimensionsView {
public:
explicit InputDimensionsView(TransformRep* rep, DimensionIndex input_rank)
: rep_(rep), size_(input_rank) {}
DimensionIndex size() const { return size_; }
InputDimensionRef operator[](DimensionIndex i) const {
assert(i >= 0 && i <= size_);
return InputDimensionRef(rep_, i);
}
private:
TransformRep* rep_;
DimensionIndex size_;
};
inline ABSL_ATTRIBUTE_ALWAYS_INLINE InputDimensionsView
TransformRep::all_input_dimensions(DimensionIndex rank) {
assert(rank >= 0 && rank <= input_rank_capacity);
return InputDimensionsView(this, rank);
}
inline ABSL_ATTRIBUTE_ALWAYS_INLINE InputDimensionRef
TransformRep::input_dimension(DimensionIndex i) {
assert(i >= 0 && i <= input_rank_capacity);
return InputDimensionRef(this, i);
}
void CopyTransformRep(TransformRep* source, TransformRep* dest);
void CopyTransformRepDomain(TransformRep* source, TransformRep* dest);
void MoveTransformRep(TransformRep* source, TransformRep* dest);
TransformRep::Ptr<> MutableRep(TransformRep::Ptr<> ptr,
bool domain_only = false);
void ResetOutputIndexMaps(TransformRep* ptr);
TransformRep::Ptr<> NewOrMutableRep(TransformRep* ptr,
DimensionIndex input_rank_capacity,
DimensionIndex output_rank_capacity,
bool domain_only = false);
bool IsDomainExplicitlyEmpty(TransformRep* ptr);
void ReplaceAllIndexArrayMapsWithConstantMaps(TransformRep* ptr);
bool AreDomainsEqual(TransformRep* a, TransformRep* b);
bool AreEqual(TransformRep* a, TransformRep* b);
void PrintToOstream(std::ostream& os, TransformRep* transform);
void PrintDomainToOstream(std::ostream& os, TransformRep* transform);
DimensionSet GetIndexArrayInputDimensions(TransformRep* transform);
TransformRep::Ptr<> WithImplicitDimensions(TransformRep::Ptr<> transform,
DimensionSet implicit_lower_bounds,
DimensionSet implicit_upper_bounds,
bool domain_only);
absl::Status TransformIndices(TransformRep* data,
span<const Index> input_indices,
span<Index> output_indices);
TransformRep::Ptr<> GetSubDomain(TransformRep* rep,
span<const DimensionIndex> dims);
bool IsUnlabeled(span<const std::string> labels);
class TransformAccess {
public:
template <typename T>
static TransformRep* rep(const T& x) {
return internal::to_address(x.rep_);
}
template <ContainerKind TargetCKind, typename T>
static auto rep_ptr(T&& x) {
if constexpr (TargetCKind == view) {
return rep(x);
} else {
return TransformRep::Ptr<>(std::forward<T>(x).rep_);
}
}
template <typename T>
static decltype(auto) rep_ptr(T&& x) {
return (std::forward<T>(x).rep_);
}
template <DimensionIndex Rank, ContainerKind CKind>
static IndexTransform<Rank, dynamic_rank, view> transform(
const IndexDomain<Rank, CKind>& x) {
return Make<IndexTransform<Rank, dynamic_rank, view>>(rep(x));
}
template <DimensionIndex Rank>
static IndexTransform<Rank, dynamic_rank, container> transform(
IndexDomain<Rank, container>&& x) {
return Make<IndexTransform<Rank, dynamic_rank, container>>(
std::move(x.rep_));
}
template <typename T>
static T Make(TransformRep::Ptr<T::container_kind> ptr) {
T t;
t.rep_ = std::move(ptr);
return t;
}
};
}
}
#endif
#include "tensorstore/index_space/internal/transform_rep.h"
#include <memory>
#include <new>
#include <utility>
#include "absl/base/optimization.h"
#include "absl/log/absl_check.h"
#include "absl/status/status.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/internal/transform_rep_impl.h"
#include "tensorstore/internal/dimension_labels.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/util/dimension_set.h"
#include "tensorstore/util/division.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_index_space {
namespace {
void FreeIndexArrayData(IndexArrayData* data) {
std::destroy_at(data);
std::free(data);
}
void CopyTrivialFields(TransformRep* source, TransformRep* dest) {
assert(dest->input_rank_capacity >= source->input_rank &&
dest->output_rank_capacity >= source->output_rank);
const DimensionIndex input_rank = dest->input_rank = source->input_rank;
dest->output_rank = source->output_rank;
std::copy_n(source->input_origin().begin(), input_rank,
dest->input_origin().begin());
std::copy_n(source->input_shape().begin(), input_rank,
dest->input_shape().begin());
dest->implicit_lower_bounds = source->implicit_lower_bounds;
dest->implicit_upper_bounds = source->implicit_upper_bounds;
}
}
void CopyInputLabels(TransformRep* source, TransformRep* dest, bool can_move) {
assert(dest->input_rank_capacity >= source->input_rank);
const DimensionIndex input_rank = source->input_rank;
if (can_move) {
std::copy_n(std::make_move_iterator(source->input_labels().begin()),
input_rank, dest->input_labels().begin());
} else {
std::copy_n(source->input_labels().begin(), input_rank,
dest->input_labels().begin());
}
}
void OutputIndexMap::SetConstant() {
if (method() == OutputIndexMethod::array) {
FreeIndexArrayData(&index_array_data());
}
value_ = 0;
}
void OutputIndexMap::SetSingleInputDimension(DimensionIndex input_dim) {
if (method() == OutputIndexMethod::array) {
FreeIndexArrayData(&index_array_data());
}
value_ = (input_dim << 1) | 1;
}
IndexArrayData& OutputIndexMap::SetArrayIndexing(DimensionIndex rank) {
IndexArrayData* data;
if (method() == OutputIndexMethod::array) {
data = &index_array_data();
if (data->rank_capacity >= rank) return *data;
SharedElementPointer<const Index> element_pointer =
std::move(data->element_pointer);
auto bounds = data->index_range;
std::destroy_at(data);
IndexArrayData* new_data = static_cast<IndexArrayData*>(
std::realloc(static_cast<void*>(data),
sizeof(IndexArrayData) + sizeof(Index) * rank));
if (new_data) data = new_data;
new (data) IndexArrayData;
data->element_pointer = std::move(element_pointer);
data->index_range = bounds;
if (!new_data) TENSORSTORE_THROW_BAD_ALLOC;
data->rank_capacity = rank;
} else {
data = static_cast<IndexArrayData*>(
std::malloc(sizeof(IndexArrayData) + sizeof(Index) * rank));
if (!data) {
TENSORSTORE_THROW_BAD_ALLOC;
}
new (data) IndexArrayData;
data->rank_capacity = rank;
}
value_ = reinterpret_cast<std::uintptr_t>(data);
return *data;
}
IndexArrayData& OutputIndexMap::SetArrayIndexing(DimensionIndex rank,
const IndexArrayData& other) {
assert(other.rank_capacity >= rank);
auto& data = SetArrayIndexing(rank);
data.element_pointer = other.element_pointer;
data.index_range = other.index_range;
std::memcpy(data.byte_strides, other.byte_strides, sizeof(Index) * rank);
return data;
}
void OutputIndexMap::Assign(DimensionIndex rank, const OutputIndexMap& other) {
if (other.method() == OutputIndexMethod::array) {
SetArrayIndexing(rank, other.index_array_data());
} else {
value_ = other.value_;
}
offset_ = other.offset_;
stride_ = other.stride_;
}
TransformRep::Ptr<> TransformRep::Allocate(
DimensionIndex input_rank_capacity, DimensionIndex output_rank_capacity) {
ABSL_CHECK(input_rank_capacity >= 0 && output_rank_capacity >= 0 &&
input_rank_capacity <= kMaxRank &&
output_rank_capacity <= kMaxRank);
const size_t total_size =
sizeof(TransformRep) +
sizeof(OutputIndexMap) * output_rank_capacity +
input_rank_capacity * (sizeof(Index) * 2 + sizeof(std::string));
char* base_ptr = static_cast<char*>(::operator new(total_size));
TransformRep* ptr =
new (base_ptr + sizeof(OutputIndexMap) * output_rank_capacity)
TransformRep;
ptr->reference_count.store(1, std::memory_order_relaxed);
ptr->input_rank_capacity = input_rank_capacity;
ptr->output_rank_capacity = output_rank_capacity;
std::uninitialized_default_construct_n(ptr->output_index_maps().begin(),
output_rank_capacity);
std::uninitialized_default_construct_n(ptr->input_labels().begin(),
input_rank_capacity);
return TransformRep::Ptr<>(ptr, internal::adopt_object_ref);
}
void DestroyLabelFields(TransformRep* ptr) {
std::destroy_n(ptr->input_labels().begin(), ptr->input_rank_capacity);
}
void TransformRep::Free(TransformRep* ptr) {
assert(ptr->reference_count == 0);
DestroyLabelFields(ptr);
std::destroy_n(ptr->output_index_maps().begin(), ptr->output_rank_capacity);
::operator delete(static_cast<void*>(ptr->output_index_maps().data()));
}
void CopyTransformRep(TransformRep* source, TransformRep* dest) {
assert(source != nullptr);
assert(dest != nullptr);
assert(dest->output_rank_capacity >= source->output_rank);
CopyTransformRepDomain(source, dest);
const DimensionIndex input_rank = source->input_rank;
const DimensionIndex output_rank = dest->output_rank = source->output_rank;
span<const OutputIndexMap> source_maps =
source->output_index_maps().first(output_rank);
span<OutputIndexMap> dest_maps = dest->output_index_maps().first(output_rank);
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
dest_maps[output_dim].Assign(input_rank, source_maps[output_dim]);
}
}
void CopyTransformRepDomain(TransformRep* source, TransformRep* dest) {
assert(source != nullptr);
assert(dest != nullptr);
assert(dest->input_rank_capacity >= source->input_rank);
const DimensionIndex input_rank = dest->input_rank = sou | #include "tensorstore/index_space/internal/transform_rep.h"
#include <cstddef>
#include <limits>
#include <memory>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/base/macros.h"
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/container_kind.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/internal/transform_rep_impl.h"
#include "tensorstore/index_space/output_index_method.h"
#include "tensorstore/internal/testing/concurrent.h"
#include "tensorstore/util/dimension_set.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
#if ABSL_HAVE_EXCEPTIONS
#define TENSORSTORE_EXPECT_OOM(expr) EXPECT_THROW(expr, std::bad_alloc);
#else
#define TENSORSTORE_EXPECT_OOM(expr) EXPECT_DEATH(expr, "Out of memory");
#endif
using ::tensorstore::Box;
using ::tensorstore::DimensionIndex;
using ::tensorstore::Index;
using ::tensorstore::IndexInterval;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::kInfIndex;
using ::tensorstore::kInfSize;
using ::tensorstore::kMaxFiniteIndex;
using ::tensorstore::kMinFiniteIndex;
using ::tensorstore::MatchesStatus;
using ::tensorstore::OutputIndexMethod;
using ::tensorstore::internal_index_space::CopyTransformRep;
using ::tensorstore::internal_index_space::MoveTransformRep;
using ::tensorstore::internal_index_space::MutableRep;
using ::tensorstore::internal_index_space::NewOrMutableRep;
using ::tensorstore::internal_index_space::OutputIndexMap;
using ::tensorstore::internal_index_space::ReplaceZeroRankIndexArrayIndexMap;
using ::tensorstore::internal_index_space::TransformAccess;
using ::tensorstore::internal_index_space::TransformRep;
using ::tensorstore::internal_index_space::ValidateAndIntersectBounds;
using ::tensorstore::internal_testing::TestConcurrent;
TEST(OutputIndexMapTest, Basic) {
OutputIndexMap map;
EXPECT_EQ(OutputIndexMethod::constant, map.method());
map.SetSingleInputDimension(2);
EXPECT_EQ(OutputIndexMethod::single_input_dimension, map.method());
EXPECT_EQ(2, map.input_dimension());
map.SetSingleInputDimension(3);
EXPECT_EQ(OutputIndexMethod::single_input_dimension, map.method());
EXPECT_EQ(3, map.input_dimension());
map.SetConstant();
EXPECT_EQ(OutputIndexMethod::constant, map.method());
{
auto& index_array_data = map.SetArrayIndexing(3);
EXPECT_EQ(OutputIndexMethod::array, map.method());
EXPECT_EQ(3, index_array_data.rank_capacity);
EXPECT_EQ(IndexInterval(), index_array_data.index_range);
EXPECT_EQ(nullptr, index_array_data.element_pointer);
EXPECT_EQ(&index_array_data, &map.SetArrayIndexing(1));
EXPECT_EQ(3, index_array_data.rank_capacity);
auto ptr = std::make_shared<Index>();
index_array_data.element_pointer = ptr;
index_array_data.index_range = IndexInterval::UncheckedClosed(1, 10);
index_array_data.byte_strides[0] = 1;
index_array_data.byte_strides[1] = 2;
index_array_data.byte_strides[2] = 3;
auto& new_index_array_data = map.SetArrayIndexing(4);
EXPECT_EQ(4, new_index_array_data.rank_capacity);
EXPECT_EQ(ptr, new_index_array_data.element_pointer.pointer());
EXPECT_EQ(IndexInterval::UncheckedClosed(1, 10),
new_index_array_data.index_range);
EXPECT_EQ(1, new_index_array_data.byte_strides[0]);
EXPECT_EQ(2, new_index_array_data.byte_strides[1]);
EXPECT_EQ(3, new_index_array_data.byte_strides[2]);
}
map.SetSingleInputDimension(3);
EXPECT_EQ(OutputIndexMethod::single_input_dimension, map.method());
EXPECT_EQ(3, map.input_dimension());
{
auto& index_array_data = map.SetArrayIndexing(3);
EXPECT_EQ(OutputIndexMethod::array, map.method());
EXPECT_EQ(3, index_array_data.rank_capacity);
}
}
TEST(OutputIndexMapDeathTest, Basic) {
OutputIndexMap map;
TENSORSTORE_EXPECT_OOM(
map.SetArrayIndexing(static_cast<DimensionIndex>(1) << 60));
map.SetArrayIndexing(5);
TENSORSTORE_EXPECT_OOM(
map.SetArrayIndexing(static_cast<DimensionIndex>(1) << 60));
}
TEST(ReplaceZeroRankIndexArrayIndexMapTest, Basic) {
Index output_offset = 5, output_stride = 3;
EXPECT_EQ(absl::OkStatus(), ReplaceZeroRankIndexArrayIndexMap(
10, IndexInterval::UncheckedClosed(3, 15),
&output_offset, &output_stride));
EXPECT_EQ(5 + 10 * 3, output_offset);
EXPECT_EQ(0, output_stride);
}
TEST(ReplaceZeroRankIndexArrayIndexMapTest, OutOfBounds) {
Index output_offset = 5, output_stride = 3;
EXPECT_THAT(ReplaceZeroRankIndexArrayIndexMap(
10, IndexInterval::UncheckedClosed(11, 15), &output_offset,
&output_stride),
MatchesStatus(absl::StatusCode::kOutOfRange,
"Index 10 is outside valid range \\[11, 16\\)"));
}
TEST(ReplaceZeroRankIndexArrayIndexMapTest, OverflowOffset) {
Index output_offset = std::numeric_limits<Index>::max(), output_stride = 3;
EXPECT_THAT(
ReplaceZeroRankIndexArrayIndexMap(10,
IndexInterval::UncheckedClosed(5, 15),
&output_offset, &output_stride),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
".*Integer overflow computing offset for output dimension.*"));
}
TEST(ReplaceZeroRankIndexArrayIndexMapTest, OverflowStride) {
Index output_offset = 5, output_stride = 100;
EXPECT_THAT(
ReplaceZeroRankIndexArrayIndexMap(kMaxFiniteIndex, IndexInterval(),
&output_offset, &output_stride),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
".*Integer overflow computing offset for output dimension.*"));
}
TEST(Allocate, Basic) {
auto ptr = TransformRep::Allocate(3, 2);
EXPECT_EQ(3, ptr->input_rank_capacity);
EXPECT_EQ(2, ptr->output_rank_capacity);
EXPECT_EQ(OutputIndexMethod::constant, ptr->output_index_maps()[0].method());
EXPECT_EQ(OutputIndexMethod::constant, ptr->output_index_maps()[1].method());
EXPECT_TRUE(ptr->input_labels()[0].empty());
EXPECT_TRUE(ptr->input_labels()[1].empty());
EXPECT_TRUE(ptr->input_labels()[2].empty());
}
TEST(CopyTransformRep, Basic) {
auto source = TransformRep::Allocate(1, 2);
source->input_rank = 1;
source->output_rank = 2;
source->input_origin()[0] = 5;
source->input_shape()[0] = 2;
auto& source_map = source->output_index_maps()[0];
source_map.offset() = 3;
source_map.stride() = 4;
auto index_array_ptr = std::make_shared<Index>();
auto& source_index_array_data = source_map.SetArrayIndexing(1);
source_index_array_data.element_pointer = index_array_ptr;
source_index_array_data.byte_strides[0] = 0;
source->input_labels()[0] = "source";
auto dest = TransformRep::Allocate(1, 2);
dest->input_rank = 0;
dest->output_rank = 0;
dest->input_origin()[0] = 6;
dest->input_shape()[0] = 7;
dest->input_labels()[0] = "dest";
auto& dest_map = dest->output_index_maps()[0];
dest_map.offset() = 10;
dest_map.stride() = 11;
CopyTransformRep(source.get(), dest.get());
EXPECT_EQ(5, source->input_origin()[0]);
EXPECT_EQ(2, source->input_shape()[0]);
EXPECT_EQ(3, source_map.offset());
EXPECT_EQ(4, source_map.stride());
EXPECT_EQ(OutputIndexMethod::array, source_map.method());
EXPECT_EQ(&source_index_array_data, &source_map.index_array_data());
EXPECT_EQ(index_array_ptr, source_index_array_data.element_pointer.pointer());
EXPECT_EQ(0, source_index_array_data.byte_strides[0]);
EXPECT_EQ("source", source->input_labels()[0]);
EXPECT_EQ(1, dest->input_rank);
EXPECT_EQ(2, dest->output_rank);
EXPECT_EQ(5, dest->input_origin()[0]);
EXPECT_EQ(2, dest->input_shape()[0]);
EXPECT_EQ(3, dest_map.offset());
EXPECT_EQ(4, dest_map.stride());
EXPECT_EQ(OutputIndexMethod::array, dest_map.method());
auto& dest_index_array_data = dest_map.index_array_data();
EXPECT_EQ(index_array_ptr, dest_index_array_data.element_pointer.pointer());
EXPECT_EQ(0, dest_index_array_data.byte_strides[0]);
EXPECT_EQ(3, index_array_ptr.use_count());
EXPECT_EQ("source", dest->input_labels()[0]);
}
TEST(MoveTransformRep, Basic) {
using ::tensorstore::DimensionSet;
auto source = TransformRep::Allocate(1, 2);
source->input_rank = 1;
source->output_rank = 2;
source->implicit_lower_bounds = DimensionSet::UpTo(source->input_rank);
source->implicit_upper_bounds = DimensionSet::UpTo(source->input_rank);
source->input_origin()[0] = 5;
source->input_shape()[0] = 2;
auto& source_map = source->output_index_maps()[0];
source_map.SetSingleInputDimension(0);
source_map.offset() = 3;
source_map.stride() = 4;
auto index_array_ptr = std::make_shared<Index>();
auto& source_index_array_data = source_map.SetArrayIndexing(1);
source_index_array_data.element_pointer = index_array_ptr;
source_index_array_data.byte_strides[0] = 0;
source->input_labels()[0] = "source";
auto dest = TransformRep::Allocate(1, 2);
dest->input_rank = 0;
dest->output_rank = 0;
dest->input_origin()[0] = 6;
dest->input_shape()[0] = 7;
dest->input_labels()[0] = "dest";
auto& dest_map = dest->output_index_maps()[0];
dest_map.offset() = 10;
dest_map.stride() = 11;
MoveTransformRep(source.get(), dest.get());
EXPECT_EQ(5, source->input_origin()[0]);
EXPECT_EQ(2, source->input_shape()[0]);
EXPECT_EQ(3, source_map.offset());
EXPECT_EQ(4, source_map.stride());
EXPECT_EQ(OutputIndexMethod::constant, source_map.method());
EXPECT_EQ(1, dest->input_rank);
EXPECT_EQ(2, dest->output_rank);
EXPECT_EQ(5, dest->input_origin()[0]);
EXPECT_EQ(2, dest->input_shape()[0]);
EXPECT_EQ(3, dest_map.offset());
EXPECT_EQ(4, dest_map.stride());
EXPECT_EQ(OutputIndexMethod::array, dest_map.method());
auto& dest_index_array_data = dest_map.index_array_data();
EXPECT_EQ(&dest_index_array_data, &source_index_array_data);
EXPECT_EQ(index_array_ptr, dest_index_array_data.element_pointer.pointer());
EXPECT_EQ(0, dest_index_array_data.byte_strides[0]);
EXPECT_EQ(2, index_array_ptr.use_count());
EXPECT_EQ("source", dest->input_labels()[0]);
}
tensorstore::IndexTransform<> MakeTestTransform() {
return IndexTransformBuilder<>(3, 3)
.input_origin({1, 2, 3})
.input_shape({2, 3, 4})
.input_labels({"a", "b", "c"})
.implicit_lower_bounds({0, 1, 0})
.implicit_upper_bounds({0, 1, 1})
.output_constant(2, 5)
.output_single_input_dimension(1, 5, 7, 2)
.output_index_array(0, 8, 11,
tensorstore::MakeArray<Index>({{{8}}, {{9}}}),
tensorstore::IndexInterval::Sized(7, 3))
.Finalize()
.value();
}
TEST(MutableRepTest, Basic) {
auto transform = MakeTestTransform();
EXPECT_TRUE(TransformAccess::rep(transform)->is_unique());
auto rep1 = TransformAccess::rep_ptr<tensorstore::container>(transform);
EXPECT_FALSE(TransformAccess::rep(transform)->is_unique());
auto rep2 = MutableRep(std::move(rep1));
EXPECT_NE(TransformAccess::rep(transform), rep2.get());
EXPECT_EQ(transform, TransformAccess::Make<tensorstore::IndexTransformView<>>(
rep2.get()));
EXPECT_TRUE(rep2->is_unique());
TransformRep* rep2_ptr = rep2.get();
auto rep3 = MutableRep(std::move(rep2));
EXPECT_EQ(rep2_ptr, rep3.get());
}
TEST(MutableRepTest, Concurrent) {
auto orig = IndexTransformBuilder<>(1, 1)
.input_origin({1})
.input_shape({2})
.input_labels({"a"})
.implicit_lower_bounds({0})
.implicit_upper_bounds({0})
.output_constant(0, 5)
.Finalize()
.value();
TransformRep* orig_ptr;
TransformRep::Ptr<> write_ptr = TransformAccess::rep_ptr(orig);
write_ptr->output_rank = 0;
TransformRep::Ptr<> read_ptr;
[[maybe_unused]] size_t num_reads_before_write = 0;
const size_t num_iterations = 1000;
TestConcurrent(
num_iterations,
[&] {
write_ptr->input_rank = 1;
orig_ptr = write_ptr.get();
read_ptr = write_ptr;
},
[&] { EXPECT_EQ(0, write_ptr->input_rank); },
[&] {
write_ptr = MutableRep(std::move(write_ptr));
if (orig_ptr == write_ptr.get()) {
++num_reads_before_write;
}
write_ptr->input_rank = 0;
},
[&] {
EXPECT_EQ(1, read_ptr->input_rank);
read_ptr.reset();
});
#if 0
EXPECT_LT(0, num_reads_before_write);
EXPECT_LT(num_reads_before_write, num_iterations);
#endif
}
TEST(NewOrMutableRepTest, Basic) {
auto transform = MakeTestTransform();
{
auto mutable_rep = NewOrMutableRep(TransformAccess::rep(transform), 3, 3);
EXPECT_EQ(TransformAccess::rep(transform), mutable_rep.get());
}
{
auto mutable_rep = NewOrMutableRep(TransformAccess::rep(transform), 2, 2);
EXPECT_EQ(TransformAccess::rep(transform), mutable_rep.get());
}
{
auto transform_copy = transform;
auto mutable_rep = NewOrMutableRep(TransformAccess::rep(transform), 3, 3);
EXPECT_NE(TransformAccess::rep(transform), mutable_rep.get());
EXPECT_EQ(3, mutable_rep->input_rank_capacity);
EXPECT_EQ(3, mutable_rep->output_rank_capacity);
}
{
auto transform_copy = transform;
auto mutable_rep = NewOrMutableRep(TransformAccess::rep(transform), 1, 2);
EXPECT_NE(TransformAccess::rep(transform), mutable_rep.get());
EXPECT_EQ(1, mutable_rep->input_rank_capacity);
EXPECT_EQ(2, mutable_rep->output_rank_capacity);
}
{
auto mutable_rep = NewOrMutableRep(TransformAccess::rep(transform), 3, 4);
EXPECT_NE(TransformAccess::rep(transform), mutable_rep.get());
EXPECT_EQ(3, mutable_rep->input_rank_capacity);
EXPECT_EQ(4, mutable_rep->output_rank_capacity);
}
{
auto mutable_rep = NewOrMutableRep(TransformAccess::rep(transform), 4, 3);
EXPECT_NE(TransformAccess::rep(transform), mutable_rep.get());
EXPECT_EQ(4, mutable_rep->input_rank_capacity);
EXPECT_EQ(3, mutable_rep->output_rank_capacity);
}
{
auto transform_copy = transform;
auto mutable_rep = NewOrMutableRep(TransformAccess::rep(transform), 3, 4);
EXPECT_NE(TransformAccess::rep(transform), mutable_rep.get());
EXPECT_EQ(3, mutable_rep->input_rank_capacity);
EXPECT_EQ(4, mutable_rep->output_rank_capacity);
}
}
TEST(ValidateAndIntersectBoundsTest, Success) {
const Box<> inner({-kInfIndex, 6}, {kInfIndex + 8, 3});
Box<> combined({1, 5}, {9, kInfIndex - 5 + 1});
auto status = ValidateAndIntersectBounds(
inner, combined, [](IndexInterval outer, IndexInterval inner) {
return ContainsOrUnbounded(outer, inner);
});
TENSORSTORE_CHECK_OK(status);
EXPECT_EQ(Box<>({1, 6}, {7, 3}), combined);
}
TEST(ValidateAndIntersectBoundsTest, Failure) {
const Box<> inner({-kInfIndex, 4}, {kInfIndex + 8, 3});
Box<> combined({1, 5}, {9, kInfIndex - 5 + 1});
auto status = ValidateAndIntersectBounds(
inner, combined, [](IndexInterval outer, IndexInterval inner) {
return ContainsOrUnbounded(outer, inner);
});
EXPECT_THAT(
status,
MatchesStatus(
absl::StatusCode::kOutOfRange,
".*Propagated bounds are incompatible with existing bounds in "
"dimension 1 bounds .* vs. propagated bounds.*"));
}
} |
558 | cpp | google/tensorstore | propagate_bounds | tensorstore/index_space/internal/propagate_bounds.cc | tensorstore/index_space/propagate_bounds_test.cc | #ifndef TENSORSTORE_INDEX_SPACE_INTERNAL_PROPAGATE_BOUNDS_H_
#define TENSORSTORE_INDEX_SPACE_INTERNAL_PROPAGATE_BOUNDS_H_
#include "absl/status/status.h"
#include "tensorstore/index_space/internal/transform_rep.h"
#include "tensorstore/util/dimension_set.h"
namespace tensorstore {
namespace internal_index_space {
absl::Status PropagateBounds(BoxView<> b, DimensionSet b_implicit_lower_bounds,
DimensionSet b_implicit_upper_bounds,
TransformRep* a_to_b, MutableBoxView<> a);
absl::Status PropagateExplicitBounds(BoxView<> b, TransformRep* a_to_b,
MutableBoxView<> a);
absl::Status PropagateBounds(BoxView<> b, DimensionSet b_implicit_lower_bounds,
DimensionSet b_implicit_upper_bounds,
TransformRep* a_to_b, MutableBoxView<> a,
DimensionSet& a_implicit_lower_bounds,
DimensionSet& a_implicit_upper_bounds);
Result<TransformRep::Ptr<>> PropagateBoundsToTransform(
BoxView<> b_domain, DimensionSet b_implicit_lower_bounds,
DimensionSet b_implicit_upper_bounds, TransformRep::Ptr<> a_to_b);
Result<TransformRep::Ptr<>> PropagateExplicitBoundsToTransform(
BoxView<> b_domain, TransformRep::Ptr<> a_to_b);
}
}
#endif
#include "tensorstore/index_space/internal/propagate_bounds.h"
#include <algorithm>
#include <cassert>
#include <sstream>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_replace.h"
#include "tensorstore/box.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/internal/identity_transform.h"
#include "tensorstore/index_space/internal/transform_rep.h"
#include "tensorstore/index_space/output_index_method.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/dimension_set.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_index_space {
namespace {
absl::Status PropagateBoundsImpl(BoxView<> b,
DimensionSet b_implicit_lower_bounds,
DimensionSet b_implicit_upper_bounds,
TransformRep* a_to_b, MutableBoxView<> a) {
if (!a_to_b) {
assert(a.rank() == b.rank());
a.DeepAssign(b);
return absl::OkStatus();
}
assert(a_to_b->input_rank == a.rank());
assert(a_to_b->output_rank == b.rank());
a.Fill();
span<const OutputIndexMap> maps = a_to_b->output_index_maps().first(b.rank());
DimensionSet propagated_to_a;
DimensionSet inferred_implicit_lower_bounds(true);
DimensionSet inferred_implicit_upper_bounds(true);
auto& implicit_lower_bounds = a_to_b->implicit_lower_bounds;
auto& implicit_upper_bounds = a_to_b->implicit_upper_bounds;
const auto existing_input_domain = a_to_b->input_domain(a.rank());
bool is_domain_empty = false;
for (DimensionIndex a_dim = 0; a_dim < a.rank(); ++a_dim) {
if (!implicit_lower_bounds[a_dim] && !implicit_upper_bounds[a_dim] &&
existing_input_domain[a_dim].empty()) {
is_domain_empty = true;
break;
}
}
for (DimensionIndex b_dim = 0; b_dim < b.rank(); ++b_dim) {
auto& map = maps[b_dim];
const Index output_stride = map.stride();
if (map.method() == OutputIndexMethod::array) continue;
OptionallyImplicitIndexInterval b_bounds_oi{b[b_dim],
b_implicit_lower_bounds[b_dim],
b_implicit_upper_bounds[b_dim]};
if (output_stride == 0 || map.method() == OutputIndexMethod::constant) {
if (!is_domain_empty) {
TENSORSTORE_RETURN_IF_ERROR(
CheckContains(b_bounds_oi.effective_interval(), map.offset()),
MaybeAnnotateStatus(
_, tensorstore::StrCat("Checking bounds of constant output "
"index map for dimension ",
b_dim)));
}
continue;
}
const DimensionIndex a_dim = map.input_dimension();
assert(a_dim >= 0 && a_dim < a.rank());
TENSORSTORE_ASSIGN_OR_RETURN(
OptionallyImplicitIndexInterval propagated_a_bounds,
GetAffineTransformDomain(b_bounds_oi, map.offset(), map.stride()),
MaybeAnnotateStatus(
_, tensorstore::StrCat("Propagating bounds from dimension ", b_dim,
" to input dimension ", a_dim)));
propagated_a_bounds = IntersectPreferringExplicit(
propagated_a_bounds,
OptionallyImplicitIndexInterval{a[a_dim],
inferred_implicit_lower_bounds[a_dim],
inferred_implicit_upper_bounds[a_dim]});
a[a_dim] = propagated_a_bounds.interval();
inferred_implicit_lower_bounds[a_dim] =
propagated_a_bounds.implicit_lower();
inferred_implicit_upper_bounds[a_dim] =
propagated_a_bounds.implicit_upper();
propagated_to_a[a_dim] = true;
}
for (DimensionIndex a_dim = 0; a_dim < a.rank(); ++a_dim) {
IndexInterval existing = existing_input_domain[a_dim];
IndexIntervalRef inferred = a[a_dim];
if (!propagated_to_a[a_dim]) {
inferred = existing;
continue;
}
const Index inclusive_min = implicit_lower_bounds[a_dim]
? inferred.inclusive_min()
: existing.inclusive_min();
const Index inclusive_max =
std::max(inclusive_min - 1, implicit_upper_bounds[a_dim]
? inferred.inclusive_max()
: existing.inclusive_max());
const IndexInterval combined =
IndexInterval::UncheckedClosed(inclusive_min, inclusive_max);
const OptionallyImplicitIndexInterval inferred_oi{
inferred, inferred_implicit_lower_bounds[a_dim],
inferred_implicit_upper_bounds[a_dim]};
if (!is_domain_empty &&
!Contains(inferred_oi.effective_interval(), combined)) {
std::ostringstream os;
os << "Propagated bounds " << inferred_oi;
if (inferred_oi.size() != kInfSize) {
os << ", with size=" << inferred_oi.size() << ", ";
}
os << "for dimension " << a_dim
<< " are incompatible with existing bounds " << combined;
if (combined.size() != kInfSize) {
os << ", with size=" << combined.size();
}
os << ".";
return absl::OutOfRangeError(os.str());
}
inferred = combined;
}
return absl::OkStatus();
}
void PropagateImplicitBoundState(DimensionIndex b_rank,
DimensionSet b_implicit_lower_bounds,
DimensionSet b_implicit_upper_bounds,
TransformRep* a_to_b, DimensionIndex a_rank,
DimensionSet& a_implicit_lower_bounds,
DimensionSet& a_implicit_upper_bounds) {
if (!a_to_b) {
a_implicit_lower_bounds = b_implicit_lower_bounds;
a_implicit_upper_bounds = b_implicit_upper_bounds;
return;
}
a_implicit_lower_bounds = a_to_b->implicit_lower_bounds;
a_implicit_upper_bounds = a_to_b->implicit_upper_bounds;
span<const OutputIndexMap> maps = a_to_b->output_index_maps().first(b_rank);
for (DimensionIndex b_dim = 0; b_dim < b_rank; ++b_dim) {
auto& map = maps[b_dim];
if (map.method() != OutputIndexMethod::single_input_dimension ||
map.stride() == 0) {
continue;
}
const DimensionIndex a_dim = map.input_dimension();
assert(a_dim >= 0 && a_dim < a_rank);
bool implicit_lower = b_implicit_lower_bounds[b_dim];
bool implicit_upper = b_implicit_upper_bounds[b_dim];
if (map.stride() < 0) {
std::swap(implicit_lower, implicit_upper);
}
if (!implicit_lower) a_implicit_lower_bounds[a_dim] = false;
if (!implicit_upper) a_implicit_upper_bounds[a_dim] = false;
}
}
}
absl::Status PropagateBounds(BoxView<> b, DimensionSet b_implicit_lower_bounds,
DimensionSet b_implicit_upper_bounds,
TransformRep* a_to_b, MutableBoxView<> a) {
auto status = PropagateBoundsImpl(b, b_implicit_lower_bounds,
b_implicit_upper_bounds, a_to_b, a);
if (!status.ok()) {
std::ostringstream os;
internal_index_space::PrintToOstream(os, a_to_b);
std::string str = os.str();
absl::StrReplaceAll({{"\n", " "}}, &str);
AddStatusPayload(status, "transform", absl::Cord(str));
AddStatusPayload(status, "domain", absl::Cord(tensorstore::StrCat(b)));
}
return status;
}
absl::Status PropagateExplicitBounds(BoxView<> b, TransformRep* a_to_b,
MutableBoxView<> a) {
return PropagateBounds(b, false, false, a_to_b, a);
}
absl::Status PropagateBounds(BoxView<> b, DimensionSet b_implicit_lower_bounds,
DimensionSet b_implicit_upper_bounds,
TransformRep* a_to_b, MutableBoxView<> a,
DimensionSet& a_implicit_lower_bounds,
DimensionSet& a_implicit_upper_bounds) {
PropagateImplicitBoundState(b.rank(), b_implicit_lower_bounds,
b_implicit_upper_bounds, a_to_b, a.rank(),
a_implicit_lower_bounds, a_implicit_upper_bounds);
return PropagateBounds(b, b_implicit_lower_bounds, b_implicit_upper_bounds,
a_to_b, a);
}
Result<TransformRep::Ptr<>> PropagateBoundsToTransform(
BoxView<> b_domain, DimensionSet b_implicit_lower_bounds,
DimensionSet b_implicit_upper_bounds, TransformRep::Ptr<> a_to_b) {
const DimensionIndex b_rank = b_domain.rank();
if (!a_to_b) {
a_to_b = TransformRep::Allocate(b_rank, b_rank);
a_to_b->input_rank = a_to_b->output_rank = b_rank;
SetToIdentityTransform(a_to_b->output_index_maps().first(b_rank));
a_to_b->input_domain(b_rank).DeepAssign(b_domain);
a_to_b->implicit_lower_bounds = b_implicit_lower_bounds;
a_to_b->implicit_upper_bounds = b_implicit_upper_bounds;
internal_index_space::DebugCheckInvariants(a_to_b.get());
return a_to_b;
}
const DimensionIndex a_rank = a_to_b->input_rank;
Box<dynamic_rank(internal::kNumInlinedDims)> bounds_temp(a_rank);
TENSORSTORE_RETURN_IF_ERROR(PropagateBounds(b_domain, b_implicit_lower_bounds,
b_implicit_upper_bounds,
a_to_b.get(), bounds_temp));
a_to_b = MutableRep(std::move(a_to_b));
a_to_b->input_domain(a_rank).DeepAssign(bounds_temp);
PropagateImplicitBoundState(
b_rank, b_implicit_lower_bounds, b_implicit_upper_bounds, a_to_b.get(),
a_rank, a_to_b->implicit_lower_bounds, a_to_b->implicit_upper_bounds);
const bool domain_is_explicitly_empty = IsDomainExplicitlyEmpty(a_to_b.get());
const auto output_index_maps = a_to_b->output_index_maps().first(b_rank);
for (DimensionIndex b_dim = 0; b_dim < b_rank; ++b_dim) {
auto& map = output_index_maps[b_dim];
if (map.method() != OutputIndexMethod::array) continue;
if (domain_is_explicitly_empty) {
map.SetConstant();
map.offset() = 0;
map.stride() = 0;
continue;
}
auto& index_array_data = map.index_array_data();
TENSORSTORE_ASSIGN_OR_RETURN(
const IndexInterval propagated_bounds,
GetAffineTransformDomain(
OptionallyImplicitIndexInterval(b_domain[b_dim],
b_implicit_lower_bounds[b_dim],
b_implicit_upper_bounds[b_dim])
.effective_interval(),
map.offset(), map.stride()));
index_array_data.index_range =
Intersect(propagated_bounds, index_array_data.index_range);
}
internal_index_space::DebugCheckInvariants(a_to_b.get());
return a_to_b;
}
Result<TransformRep::Ptr<>> PropagateExplicitBoundsToTransform(
BoxView<> b_domain, TransformRep::Ptr<> a_to_b) {
return PropagateBoundsToTransform(b_domain, false, false, std::move(a_to_b));
}
}
} | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/index_domain_builder.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/internal/transform_rep.h"
#include "tensorstore/util/dimension_set.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::Box;
using ::tensorstore::BoxView;
using ::tensorstore::DimensionIndex;
using ::tensorstore::DimensionSet;
using ::tensorstore::Index;
using ::tensorstore::IndexDomainBuilder;
using ::tensorstore::IndexInterval;
using ::tensorstore::IndexTransform;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::kInfIndex;
using ::tensorstore::kMinFiniteIndex;
using ::tensorstore::MakeArray;
using ::tensorstore::MatchesStatus;
using ::tensorstore::PropagateBounds;
using ::tensorstore::PropagateBoundsToTransform;
using ::tensorstore::PropagateExplicitBounds;
using ::tensorstore::PropagateExplicitBoundsToTransform;
TEST(PropagateExplicitBoundsTest, IdentityTransform) {
DimensionIndex rank = 2;
const Box<> b({2, 3}, {4, 5});
Box<> a(rank);
TENSORSTORE_ASSERT_OK(PropagateExplicitBounds(b, IndexTransform<>(), a));
EXPECT_EQ(a, b);
}
TEST(PropagateBoundsTest, IdentityTransform) {
auto b = Box({2, 3}, {4, 5});
Box<2> a;
DimensionSet a_implicit_lower_bounds, a_implicit_upper_bounds;
auto b_implicit_lower_bounds = DimensionSet::FromBools({0, 1});
auto b_implicit_upper_bounds = DimensionSet::FromBools({1, 0});
TENSORSTORE_ASSERT_OK(
PropagateBounds(b, b_implicit_lower_bounds, b_implicit_upper_bounds,
IndexTransform<2, 2>(), a, a_implicit_lower_bounds,
a_implicit_upper_bounds));
EXPECT_EQ(a, b);
EXPECT_EQ(b_implicit_lower_bounds, a_implicit_lower_bounds);
EXPECT_EQ(b_implicit_upper_bounds, a_implicit_upper_bounds);
}
TEST(PropagateBoundsTest, ValidateOnly) {
auto transform = IndexTransformBuilder<2, 3>()
.input_origin({2, 3})
.input_shape({5, 10})
.output_single_input_dimension(0, 15, 2, 0)
.output_single_input_dimension(1, 30, 3, 1)
.output_single_input_dimension(2, 45, 4, 1)
.Finalize()
.value();
const Box<3> b({2, 3, 4}, {50, 66, 100});
Box<2> a;
DimensionSet b_implicit_lower_bounds = DimensionSet::FromBools({0, 1, 0});
DimensionSet b_implicit_upper_bounds = DimensionSet::FromBools({1, 0, 0});
DimensionSet a_implicit_lower_bounds, a_implicit_upper_bounds;
TENSORSTORE_ASSERT_OK(PropagateBounds(
b, b_implicit_lower_bounds, b_implicit_upper_bounds, transform, a,
a_implicit_lower_bounds, a_implicit_upper_bounds));
EXPECT_EQ(BoxView({2, 3}, {5, 10}), a);
EXPECT_THAT(a_implicit_lower_bounds, DimensionSet());
EXPECT_THAT(a_implicit_upper_bounds, DimensionSet());
}
TEST(PropagateBoundsTest, Constant) {
auto transform = IndexTransformBuilder<0, 2>()
.output_constant(0, 1)
.output_constant(1, 2)
.Finalize()
.value();
Box<0> a;
TENSORSTORE_ASSERT_OK(PropagateBounds(
Box({2, 1}, {2, 3}),
DimensionSet::FromBools({1, 0}),
DimensionSet::FromBools({0, 0}), transform,
a));
}
TEST(PropagateBoundsTest, ConstantError) {
auto transform = IndexTransformBuilder<0, 2>()
.output_constant(0, 1)
.output_constant(1, 2)
.Finalize()
.value();
Box<0> a;
EXPECT_THAT(PropagateBounds(
Box({2, 1}, {2, 3}),
DimensionSet::FromBools({0, 1}),
DimensionSet::FromBools({0, 0}),
transform, a),
MatchesStatus(absl::StatusCode::kOutOfRange,
"Checking bounds of constant output index map for "
"dimension 0: Index 1 is outside valid range .*"));
}
TEST(PropagateBoundsTest, ConstantEmptyDomain) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto transform,
(IndexTransformBuilder<2, 1>()
.input_shape({0, 2})
.output_constant(0, 42)
.Finalize()));
Box<2> a;
TENSORSTORE_EXPECT_OK(PropagateBounds(
Box<1>({5}),
DimensionSet(),
DimensionSet(), transform, a));
EXPECT_EQ(a, BoxView({0, 2}));
}
TEST(PropagateBoundsTest, Propagate0Upper1Lower) {
auto transform = IndexTransformBuilder<2, 3>()
.input_origin({2, 3})
.implicit_lower_bounds({0, 1})
.implicit_upper_bounds({1, 0})
.input_shape({5, 10})
.output_single_input_dimension(0, 15, 2, 0)
.output_single_input_dimension(1, 30, 3, 1)
.output_single_input_dimension(2, 45, 4, 1)
.Finalize()
.value();
Box<2> a;
DimensionSet a_implicit_lower_bounds, a_implicit_upper_bounds;
TENSORSTORE_ASSERT_OK(PropagateBounds(
Box({2, 3, 4}, {50, 66, 100}),
DimensionSet::FromBools({0, 1, 1}),
DimensionSet::FromBools({1, 0, 1}), transform,
a, a_implicit_lower_bounds, a_implicit_upper_bounds));
EXPECT_EQ(BoxView({2, -9}, {19 - 2, 13 - -9}), a);
EXPECT_THAT(a_implicit_lower_bounds, DimensionSet::FromBools({0, 1}));
EXPECT_THAT(a_implicit_upper_bounds, DimensionSet::FromBools({1, 0}));
}
TEST(PropagateBoundsTest, PropagateImplicitConstraints1) {
const auto transform = IndexTransformBuilder<1, 1>()
.input_origin({-1})
.input_exclusive_max({2})
.implicit_upper_bounds({1})
.output_identity_transform()
.Finalize()
.value();
Box<1> a;
DimensionSet a_implicit_lower_bounds, a_implicit_upper_bounds;
TENSORSTORE_ASSERT_OK(
PropagateBounds(Box({0}, {4}),
DimensionSet::FromBools({1}),
DimensionSet(), transform, a,
a_implicit_lower_bounds, a_implicit_upper_bounds));
EXPECT_EQ(BoxView({-1}, {5}), a);
EXPECT_THAT(a_implicit_lower_bounds, DimensionSet());
EXPECT_THAT(a_implicit_upper_bounds, DimensionSet());
}
TEST(PropagateBoundsTest, PropagateImplicitConstraints2) {
const auto transform = IndexTransformBuilder<1, 2>()
.input_origin({-1})
.input_exclusive_max({2})
.implicit_upper_bounds({1})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 0)
.Finalize()
.value();
Box<1> a;
DimensionSet a_implicit_lower_bounds, a_implicit_upper_bounds;
TENSORSTORE_ASSERT_OK(PropagateBounds(
Box({-1, 0}, {3, 4}),
DimensionSet::FromBools({1, 1}),
DimensionSet::FromBools({1, 0}), transform, a,
a_implicit_lower_bounds, a_implicit_upper_bounds));
EXPECT_EQ(BoxView({-1}, {5}), a);
EXPECT_THAT(a_implicit_lower_bounds, DimensionSet());
EXPECT_THAT(a_implicit_upper_bounds, DimensionSet());
}
TEST(PropagateBoundsTest, PropagateNegativeStride) {
auto transform = IndexTransformBuilder<2, 1>()
.input_origin({2, 3})
.implicit_lower_bounds({0, 1})
.implicit_upper_bounds({1, 0})
.input_shape({4, 10})
.output_single_input_dimension(0, 15, -2, 0)
.Finalize()
.value();
const Box<1> b({2}, {50});
Box<2> a;
DimensionSet b_implicit_lower_bounds;
DimensionSet b_implicit_upper_bounds = DimensionSet::FromBools({1});
DimensionSet a_implicit_lower_bounds, a_implicit_upper_bounds;
TENSORSTORE_ASSERT_OK(PropagateBounds(
b, b_implicit_lower_bounds, b_implicit_upper_bounds, transform, a,
a_implicit_lower_bounds, a_implicit_upper_bounds));
EXPECT_EQ(BoxView({2, 3}, {7 - 2, 10}), a);
EXPECT_THAT(a_implicit_lower_bounds, DimensionSet::FromBools({0, 1}));
EXPECT_THAT(a_implicit_upper_bounds, DimensionSet::FromBools({0, 0}));
}
TEST(PropagateExplicitBoundsTest, Propagate0Upper1Upper) {
auto transform = IndexTransformBuilder<2, 3>()
.input_origin({2, 10})
.input_shape({5, 11})
.implicit_lower_bounds({0, 1})
.implicit_upper_bounds({0, 1})
.output_single_input_dimension(0, 15, 2, 0)
.output_single_input_dimension(1, 30, 3, 1)
.output_single_input_dimension(2, 45, 4, 1)
.Finalize()
.value();
const Box<3> b({2, 3, 4}, {50, 66, 100});
Box<2> a;
TENSORSTORE_ASSERT_OK(PropagateExplicitBounds(b, transform, a));
EXPECT_EQ(Box<>({2, -9}, {5, 22}), a);
}
TEST(PropagateExplicitBoundsTest, PropagateExtraExplicit) {
auto transform = IndexTransformBuilder<3, 3>()
.input_origin({2, 10, 7})
.input_shape({5, 11, 8})
.implicit_lower_bounds({0, 1, 0})
.implicit_upper_bounds({0, 1, 0})
.output_single_input_dimension(0, 15, 2, 0)
.output_single_input_dimension(1, 30, 3, 1)
.output_single_input_dimension(2, 45, 4, 1)
.Finalize()
.value();
const Box<3> b({2, 3, 4}, {50, 66, 100});
Box<3> a;
TENSORSTORE_ASSERT_OK(PropagateExplicitBounds(b, transform, a));
EXPECT_EQ(Box<>({2, -9, 7}, {5, 22, 8}), a);
}
TEST(PropagateExplicitBoundsTest, PropagateExtraImplicitLower) {
auto transform = IndexTransformBuilder<3, 3>()
.input_origin({2, 10, 7})
.input_shape({5, 11, 8})
.implicit_lower_bounds({0, 1, 1})
.implicit_upper_bounds({0, 1, 0})
.output_single_input_dimension(0, 15, 2, 0)
.output_single_input_dimension(1, 30, 3, 1)
.output_single_input_dimension(2, 45, 4, 1)
.Finalize()
.value();
const Box<3> b({2, 3, 4}, {50, 66, 100});
Box<3> a;
TENSORSTORE_ASSERT_OK(PropagateExplicitBounds(b, transform, a));
EXPECT_EQ(Box<>({2, -9, 7}, {5, 22, 8}), a);
}
TEST(PropagateExplicitBoundsTest, PropagateExtraImplicitUpper) {
auto transform = IndexTransformBuilder<3, 3>()
.input_origin({2, 10, 7})
.input_shape({5, 11, 8})
.implicit_lower_bounds({0, 1, 0})
.implicit_upper_bounds({0, 1, 1})
.output_single_input_dimension(0, 15, 2, 0)
.output_single_input_dimension(1, 30, 3, 1)
.output_single_input_dimension(2, 45, 4, 1)
.Finalize()
.value();
const Box<3> b({2, 3, 4}, {50, 66, 100});
Box<3> a;
TENSORSTORE_ASSERT_OK(PropagateExplicitBounds(b, transform, a));
EXPECT_EQ(Box<>({2, -9, 7}, {5, 22, 8}), a);
}
TEST(PropagateExplicitBoundsTest, OutOfBounds) {
auto transform = IndexTransformBuilder<2, 3>()
.input_origin({2, 3})
.input_shape({5, 10})
.output_single_input_dimension(0, 15, 2, 0)
.output_single_input_dimension(1, 30, 3, 1)
.output_single_input_dimension(2, 45, 4, 1)
.Finalize()
.value();
const Box<3> b({2, 3, 4}, {50, 60, 100});
Box<2> a;
EXPECT_THAT(
PropagateExplicitBounds(b, transform, a),
MatchesStatus(
absl::StatusCode::kOutOfRange,
"Propagated bounds \\[-9, 11\\), with size=20, for dimension 1 are "
"incompatible with existing bounds \\[3, 13\\), with size=10.*"));
}
TEST(PropagateExplicitBoundsTest, OutOfBoundsEmptyDomain) {
auto transform = IndexTransformBuilder<2, 3>()
.input_origin({2, 3})
.input_shape({0, 10})
.output_single_input_dimension(0, 15, 2, 0)
.output_single_input_dimension(1, 30, 3, 1)
.output_single_input_dimension(2, 45, 4, 1)
.Finalize()
.value();
const Box<3> b({2, 3, 4}, {50, 60, 100});
Box<2> a;
TENSORSTORE_EXPECT_OK(PropagateExplicitBounds(b, transform, a));
}
TEST(PropagateExplicitBoundsTest, OutOfBoundsInfLower) {
auto transform = IndexTransformBuilder<2, 3>()
.input_origin({2, -kInfIndex})
.input_shape({5, kInfIndex + 4})
.output_single_input_dimension(0, 15, 2, 0)
.output_single_input_dimension(1, 30, 3, 1)
.output_single_input_dimension(2, 45, 4, 1)
.Finalize()
.value();
const Box<3> b({2, 3, 4}, {50, 60, 100});
Box<2> a;
EXPECT_THAT(PropagateExplicitBounds(b, transform, a),
MatchesStatus(
absl::StatusCode::kOutOfRange,
"Propagated bounds \\[-9, 11\\), with size=20, for dimension "
"1 are incompatible with existing bounds \\(-inf, 4\\).*"));
}
TEST(PropagateExplicitBoundsTest, OutOfBoundsInfUpper) {
auto transform = IndexTransformBuilder<2, 3>()
.input_origin({2, 2})
.input_shape({5, kInfIndex + 1 - 2})
.output_single_input_dimension(0, 15, 2, 0)
.output_single_input_dimension(1, 30, 3, 1)
.output_single_input_dimension(2, 45, 4, 1)
.Finalize()
.value();
const Box<3> b({2, 3, 4}, {50, 60, 100});
Box<2> a;
EXPECT_THAT(
PropagateExplicitBounds(b, transform, a),
MatchesStatus(
absl::StatusCode::kOutOfRange,
"Propagated bounds \\[-9, 11\\), with size=20, for dimension 1 are "
"incompatible with existing bounds \\[2, \\+inf\\).*"));
}
TEST(PropagateExplicitBoundsTest, Overflow) {
auto transform = IndexTransformBuilder<2, 3>()
.input_origin({2, -kInfIndex})
.input_shape({5, kInfIndex + 10})
.output_single_input_dimension(0, 15, 2, 0)
.output_single_input_dimension(1, 30, 1, 1)
.output_single_input_dimension(2, 45, 4, 1)
.Finalize()
.value();
const Box<3> b({2, kMinFiniteIndex, 4}, {50, -kMinFiniteIndex + 69, 100});
Box<2> a;
EXPECT_THAT(PropagateExplicitBounds(b, transform, a),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Propagating bounds from dimension 1 to input "
"dimension 1: Integer overflow propagating .*"));
}
TEST(PropagateExplicitBoundsTest, ZeroSize) {
auto transform = IndexTransformBuilder<2, 3>()
.input_origin({2, 3})
.input_shape({5, 0})
.output_single_input_dimension(0, 15, 2, 0)
.output_single_input_dimension(1, 30, 3, 1)
.output_single_input_dimension(2, 45, 4, 1)
.Finalize()
.value();
const Box<3> b({2, 3, 4}, {50, 66, 100});
Box<2> a;
TENSORSTORE_ASSERT_OK(PropagateExplicitBounds(b, transform, a));
EXPECT_EQ(BoxView({2, 3}, {5, 0}), a);
}
TEST(PropagateExplicitBoundsToTransformTest,
InvalidTransformTreatedAsIdentityTransformDefaultImplicit) {
IndexTransform<2, 2> t;
Box<2> output_domain({1, 2}, {3, 4});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto t_expected,
IndexTransformBuilder(2, 2)
.input_bounds(output_domain)
.implicit_lower_bounds({0, 0})
.implicit_upper_bounds({0, 0})
.output_identity_transform()
.Finalize());
EXPECT_THAT(PropagateExplicitBoundsToTransform(output_domain, t),
::testing::Optional(t_expected));
}
TEST(PropagateBoundsToTransformTest,
InvalidTransformTreatedAsIdentityTransformImplicit) {
IndexTransform<2, 2> t;
Box<2> output_domain({1, 2}, {3, 4});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto t_expected,
IndexTransformBuilder(2, 2)
.input_bounds(output_domain)
.implicit_lower_bounds({1, 0})
.implicit_upper_bounds({0, 1})
.output_identity_transform()
.Finalize());
EXPECT_THAT(
PropagateBoundsToTransform(output_domain, DimensionSet::FromBools({1, 0}),
DimensionSet::FromBools({0, 1}), t),
::testing::Optional(t_expected));
}
TEST(PropagateExplicitBoundsToTransformTest, IndexArrayNoPropagationNeeded) {
Box<1> output_domain({1}, {10});
auto t = IndexTransformBuilder<1, 1>()
.input_origin({11})
.input_shape({3})
.output_index_array(0, 2, 3, MakeArray<Index>({1, 2, 1}),
IndexInterval::Closed(1, 2))
.Finalize()
.value();
EXPECT_THAT(PropagateExplicitBoundsToTransform(output_domain, t),
::testing::Optional(t));
}
TEST(PropagateExplicitBoundsToTransformTest, IndexArrayZeroElements) {
Box<2> output_domain({0, 2});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto t,
IndexTransformBuilder(2, 2)
.input_shape({3, 2})
.implicit_upper_bounds({1, 0})
.output_single_input_dimension(0, 0)
.output_index_array(1, 0, 1, MakeArray<Index>({{1, 2}}))
.Finalize());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto t_expected,
IndexTransformBuilder(2, 2)
.input_shape({0, 2})
.output_single_input_dimension(0, 0)
.output_constant(1, 0)
.Finalize());
EXPECT_THAT(PropagateExplicitBoundsToTransform(output_domain, t),
::testing::Optional(t_expected));
}
TEST(PropagateExplicitBoundsToTransformTest,
SingleInputDimensionNoPropagationNeeded) {
Box<1> output_domain({1}, {10});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto t,
IndexTransformBuilder(1, 1)
.input_origin({11})
.input_shape({3})
.output_single_input_dimension(0, -32, 3, 0)
.Finalize());
EXPECT_THAT(PropagateExplicitBoundsToTransform(output_domain, t),
::testing::Optional(t));
}
TEST(PropagateExplicitBoundsToTransformTest, PropagateToIndexRange) {
Box<1> output_domain({1}, {10});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto t,
IndexTransformBuilder(1, 1)
.input_origin({11})
.input_shape({3})
.output_index_array(0, 2, 3, MakeArray<Index>({1, 2, 1}))
.Finalize());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto t_expected,
IndexTransformBuilder(1, 1)
.input_origin({11})
.input_shape({3})
.output_index_array(0, 2, 3, MakeArray<Index>({1, 2, 1}),
IndexInterval::Closed(0, 2))
.Finalize());
EXPECT_THAT(PropagateExplicitBoundsToTransform(output_domain, t),
::testing::Optional(t_expected));
}
TEST(PropagateBoundsToTransformTest, PropagateToIndexRange) {
Box<1> output_domain({1}, {10});
const auto get_transform =
[](tensorstore::Result<IndexInterval> index_range) {
return IndexTransformBuilder<1, 1>()
.input_origin({11})
.input_shape({3})
.output_index_array(0, 2, 3, MakeArray<Index>({1, 2, 1}),
index_range)
.Finalize()
.value();
};
EXPECT_THAT(
PropagateBoundsToTransform(output_domain, DimensionSet::FromBools({0}),
DimensionSet::FromBools({0}),
get_transform(IndexInterval())),
::testing::Optional(get_transform(IndexInterval::Closed(0, 2))));
EXPECT_THAT(
PropagateBoundsToTransform(output_domain, DimensionSet::FromBools({1}),
DimensionSet::FromBools({0}),
get_transform(IndexInterval())),
::testing::Optional(get_transform(IndexInterval::Closed(-kInfIndex, 2))));
EXPECT_THAT(
PropagateBoundsToTransform(output_domain, DimensionSet::FromBools({0}),
DimensionSet::FromBools({1}),
get_transform(IndexInterval())),
::testing::Optional(get_transform(IndexInterval::Closed(0, kInfIndex))));
EXPECT_THAT(
PropagateBoundsToTransform(output_domain, DimensionSet::FromBools({1}),
DimensionSet::FromBools({1}),
get_transform(IndexInterval())),
::testing::Optional(get_transform(IndexInterval())));
}
TEST(PropagateBoundsToTransformTest, PropagateToInputDomain) {
Box<1> output_bounds({1}, {10});
auto t = IndexTransformBuilder<1, 1>()
.implicit_lower_bounds({1})
.implicit_upper_bounds({1})
.output_single_input_dimension(0, -32, 3, 0)
.Finalize()
.value();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto propagated_transform,
PropagateBoundsToTransform(output_bounds, DimensionSet::FromBools({1}),
DimensionSet::FromBools({0}), t));
auto expected_transform = IndexTransformBuilder<1, 1>()
.input_origin({11})
.input_shape({4})
.implicit_lower_bounds({1})
.implicit_upper_bounds({0})
.output_single_input_dimension(0, -32, 3, 0)
.Finalize()
.value();
EXPECT_EQ(expected_transform, propagated_transform);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto output_domain,
IndexDomainBuilder<1>()
.bounds(output_bounds)
.implicit_lower_bounds({1})
.implicit_upper_bounds({0})
.Finalize());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto propagated_transform2, PropagateBoundsToTransform(output_domain, t));
EXPECT_EQ(expected_transform, propagated_transform2);
}
TEST(PropagateExplicitBoundsToTransformTest, OutOfBounds) {
auto t = IndexTransformBuilder<2, 3>()
.input_origin({2, 3})
.input_shape({5, 10})
.output_single_input_dimension(0, 15, 2, 0)
.output_single_input_dimension(1, 30, 3, 1)
.output_single_input_dimension(2, 45, 4, 1)
.Finalize()
.value();
const Box<3> output_domain({2, 3, 4}, {50, 60, 100});
EXPECT_THAT(
PropagateExplicitBoundsToTransform(output_domain, t),
MatchesStatus(
absl::StatusCode::kOutOfRange,
"Propagated bounds \\[-9, 11\\), with size=20, for dimension 1 are "
"incompatible with existing bounds \\[3, 13\\), with size=10.*"));
}
TEST(PropagateExplicitBoundsToTransformTest, Overflow) {
auto t = IndexTransformBuilder<2, 3>()
.input_origin({2, -kInfIndex})
.input_shape({5, kInfIndex + 10})
.output_single_input_dimension(0, 15, 2, 0)
.output_single_input_dimension(1, 30, 1, 1)
.output_single_input_dimension(2, 45, 4, 1)
.Finalize()
.value();
const Box<3> output_domain({2, kMinFiniteIndex, 4},
{50, -kMinFiniteIndex + 69, 100});
EXPECT_THAT(PropagateExplicitBoundsToTransform(output_domain, t),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Propagating bounds from dimension 1 to input "
"dimension 1: Integer overflow propagating .*"));
}
} |
559 | cpp | google/tensorstore | interval_slice_op | tensorstore/index_space/internal/interval_slice_op.cc | tensorstore/index_space/interval_slice_op_test.cc | #ifndef TENSORSTORE_INDEX_SPACE_INTERNAL_INTERVAL_SLICE_OP_H_
#define TENSORSTORE_INDEX_SPACE_INTERNAL_INTERVAL_SLICE_OP_H_
#include "tensorstore/index_space/dimension_index_buffer.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_vector_or_scalar.h"
#include "tensorstore/internal/meta.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal_index_space {
Result<IndexTransform<>> ApplyIntervalSliceOp(
IndexTransform<> transform, DimensionIndexBuffer* dimensions,
IntervalForm interval_form, bool translate,
IndexVectorOrScalarView start_vector,
IndexVectorOrScalarView stop_or_size_vector,
IndexVectorOrScalarView stride_vector, bool domain_only = false);
template <typename StartVector, typename StopOrSizeVector,
typename StrideVector>
struct IntervalSliceOp {
static constexpr bool selected_dimensions_are_new = false;
static constexpr DimensionIndex static_selection_rank =
RankConstraint::And({IsIndexVectorOrScalar<StartVector>::extent,
IsIndexVectorOrScalar<StopOrSizeVector>::extent,
IsIndexVectorOrScalar<StrideVector>::extent});
constexpr static DimensionIndex GetNewStaticInputRank(
DimensionIndex input_rank, DimensionIndex num_input_dims) {
TENSORSTORE_CONSTEXPR_ASSERT(
(input_rank == dynamic_rank || input_rank >= static_selection_rank) &&
"Number of dimensions must not exceed input rank.");
return input_rank;
}
constexpr static DimensionIndex GetStaticSelectionRank(
DimensionIndex num_input_dims) {
TENSORSTORE_CONSTEXPR_ASSERT(
RankConstraint::EqualOrUnspecified(num_input_dims,
static_selection_rank) &&
"Number of selected dimensions must match number of indices.");
return num_input_dims == dynamic_rank ? static_selection_rank
: num_input_dims;
}
Result<IndexTransform<>> Apply(IndexTransform<> transform,
DimensionIndexBuffer* dimensions,
bool domain_only) const {
return ApplyIntervalSliceOp(
std::move(transform), dimensions, interval_form, translate,
IndexVectorOrScalarView(start_vector),
IndexVectorOrScalarView(stop_or_size_vector),
IndexVectorOrScalarView(stride_vector), domain_only);
}
IntervalForm interval_form;
bool translate;
StartVector start_vector;
StopOrSizeVector stop_or_size_vector;
StrideVector stride_vector;
};
Result<IndexTransform<>> ApplyStrideOp(IndexTransform<> transform,
DimensionIndexBuffer* dimensions,
IndexVectorOrScalarView strides,
bool domain_only);
template <typename StrideVector>
struct StrideOp {
static constexpr bool selected_dimensions_are_new = false;
static constexpr DimensionIndex static_selection_rank =
IsIndexVectorOrScalar<StrideVector>::extent;
constexpr static DimensionIndex GetNewStaticInputRank(
DimensionIndex input_rank, DimensionIndex num_input_dims) {
TENSORSTORE_CONSTEXPR_ASSERT(
(input_rank == dynamic_rank || input_rank >= static_selection_rank) &&
"Number of dimensions must not exceed input rank.");
return input_rank;
}
constexpr static DimensionIndex GetStaticSelectionRank(
DimensionIndex num_input_dims) {
TENSORSTORE_CONSTEXPR_ASSERT(
RankConstraint::EqualOrUnspecified(num_input_dims,
static_selection_rank) &&
"Number of selected dimensions must match number of strides.");
return num_input_dims == dynamic_rank ? static_selection_rank
: num_input_dims;
}
Result<IndexTransform<>> Apply(IndexTransform<> transform,
DimensionIndexBuffer* dimensions,
bool domain_only) const {
return ApplyStrideOp(std::move(transform), dimensions,
IndexVectorOrScalarView(stride_vector), domain_only);
}
StrideVector stride_vector;
};
template <DimensionIndex Rank>
struct BoxSliceOp {
static constexpr bool selected_dimensions_are_new = false;
static constexpr DimensionIndex static_selection_rank = Rank;
constexpr static DimensionIndex GetNewStaticInputRank(
DimensionIndex input_rank, DimensionIndex num_input_dims) {
TENSORSTORE_CONSTEXPR_ASSERT(
(input_rank == dynamic_rank || input_rank >= static_selection_rank) &&
"Number of dimensions must not exceed input rank.");
return input_rank;
}
constexpr static DimensionIndex GetStaticSelectionRank(
DimensionIndex num_input_dims) {
TENSORSTORE_CONSTEXPR_ASSERT(
RankConstraint::EqualOrUnspecified(num_input_dims,
static_selection_rank) &&
"Number of selected dimensions must match number of strides.");
return num_input_dims == dynamic_rank ? static_selection_rank
: num_input_dims;
}
Result<IndexTransform<>> Apply(IndexTransform<> transform,
DimensionIndexBuffer* dimensions,
bool domain_only) const {
return ApplyIntervalSliceOp(
std::move(transform), dimensions, IntervalForm::sized, translate,
IndexVectorOrScalarView(box.origin()),
IndexVectorOrScalarView(box.shape()), 1, domain_only);
}
BoxView<Rank> box;
bool translate;
};
}
}
#endif
#include "tensorstore/index_space/internal/interval_slice_op.h"
#include <algorithm>
#include "absl/status/status.h"
#include "tensorstore/index_space/internal/transform_rep_impl.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/util/division.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_index_space {
namespace {
struct InputDimensionIntervalSliceInfo {
Index offset;
Index stride;
};
absl::Status GetIntervalSliceInfo(
span<InputDimensionIntervalSliceInfo> dimension_info,
TransformRep* transform, span<const DimensionIndex> dimensions,
IntervalForm interval_form, bool translate,
IndexVectorOrScalarView start_vector,
IndexVectorOrScalarView stop_or_size_vector,
IndexVectorOrScalarView stride_vector) {
const DimensionIndex input_rank = dimension_info.size();
assert(input_rank == transform->input_rank);
for (DimensionIndex input_dim = 0; input_dim < input_rank; ++input_dim) {
dimension_info[input_dim] = InputDimensionIntervalSliceInfo{0, 1};
}
auto compute_input_domain_slice = [&](DimensionIndex i,
DimensionIndex input_dim) {
const Index stride = stride_vector[i];
const InputDimensionRef d = transform->input_dimension(input_dim);
auto& info = dimension_info[input_dim];
info.stride = stride;
OptionallyImplicitIndexInterval new_domain;
TENSORSTORE_RETURN_IF_ERROR(ComputeStridedSliceMap(
d.optionally_implicit_domain(), interval_form,
translate ? 0 : kImplicit, start_vector[i], stop_or_size_vector[i],
stride, &new_domain, &info.offset));
d.domain() = new_domain.interval();
d.implicit_lower_bound() = new_domain.implicit_lower();
d.implicit_upper_bound() = new_domain.implicit_upper();
return absl::OkStatus();
};
for (DimensionIndex i = 0; i < dimensions.size(); ++i) {
const DimensionIndex input_dim = dimensions[i];
TENSORSTORE_RETURN_IF_ERROR(
compute_input_domain_slice(i, input_dim),
MaybeAnnotateStatus(
_,
tensorstore::StrCat("Computing interval slice for input dimension ",
input_dim)));
}
return absl::OkStatus();
}
absl::Status ApplyOffsetsAndStridesToOutputIndexMaps(
TransformRep* rep,
span<const InputDimensionIntervalSliceInfo> input_dimension_info) {
const DimensionIndex input_rank = input_dimension_info.size();
const DimensionIndex output_rank = rep->output_rank;
BoxView<> input_domain = rep->input_domain(input_rank);
const bool domain_is_explicitly_empty = IsDomainExplicitlyEmpty(rep);
span<OutputIndexMap> maps = rep->output_index_maps().first(output_rank);
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
auto& map = maps[output_dim];
switch (map.method()) {
case OutputIndexMethod::constant:
break;
case OutputIndexMethod::single_input_dimension: {
const DimensionIndex input_dim = map.input_dimension();
const auto& slice_info = input_dimension_info[input_dim];
Index offset;
if (internal::MulOverflow(slice_info.offset, map.stride(), &offset) ||
internal::AddOverflow(offset, map.offset(), &map.offset())) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Integer overflow computing offset for output dimension ",
output_dim));
}
if (internal::MulOverflow(slice_info.stride, map.stride(),
&map.stride())) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Integer overflow computing stride for output dimension ",
output_dim));
}
break;
}
case OutputIndexMethod::array: {
if (domain_is_explicitly_empty) {
map.SetConstant();
map.offset() = 0;
map.stride() = 0;
break;
}
auto& index_array_data = map.index_array_data();
Index element_pointer_byte_offset = 0;
bool array_is_singleton = true;
for (DimensionIndex input_dim = 0; input_dim < input_rank;
++input_dim) {
const auto& slice_info = input_dimension_info[input_dim];
Index& byte_stride = index_array_data.byte_strides[input_dim];
element_pointer_byte_offset = internal::wrap_on_overflow::Add(
element_pointer_byte_offset, internal::wrap_on_overflow::Multiply(
byte_stride, slice_info.offset));
byte_stride = internal::wrap_on_overflow::Multiply(byte_stride,
slice_info.stride);
if (input_domain.shape()[input_dim] == 1) {
element_pointer_byte_offset = internal::wrap_on_overflow::Add(
element_pointer_byte_offset,
internal::wrap_on_overflow::Multiply(
byte_stride, input_domain.origin()[input_dim]));
byte_stride = 0;
} else if (byte_stride != 0) {
array_is_singleton = false;
}
}
index_array_data.element_pointer =
AddByteOffset(std::move(index_array_data.element_pointer),
element_pointer_byte_offset);
if (array_is_singleton) {
const Index index = *index_array_data.array_view(input_domain)
.byte_strided_origin_pointer();
const IndexInterval index_range = index_array_data.index_range;
map.SetConstant();
TENSORSTORE_RETURN_IF_ERROR(ReplaceZeroRankIndexArrayIndexMap(
index, index_range, &map.offset(), &map.stride()));
}
break;
}
}
}
internal_index_space::DebugCheckInvariants(rep);
return absl::OkStatus();
}
}
Result<IndexTransform<>> ApplyIntervalSliceOp(
IndexTransform<> transform, DimensionIndexBuffer* dimensions,
IntervalForm interval_form, bool translate,
IndexVectorOrScalarView start_vector,
IndexVectorOrScalarView stop_or_size_vector,
IndexVectorOrScalarView stride_vector, bool domain_only) {
const DimensionIndex num_dims = dimensions->size();
const DimensionIndex input_rank = transform.input_rank();
TENSORSTORE_RETURN_IF_ERROR(CheckIndexVectorSize(start_vector, num_dims));
TENSORSTORE_RETURN_IF_ERROR(
CheckIndexVectorSize(stop_or_size_vector, num_dims));
TENSORSTORE_RETURN_IF_ERROR(CheckIndexVectorSize(stride_vector, num_dims));
TransformRep::Ptr<> rep = MutableRep(
TransformAccess::rep_ptr<container>(std::move(transform)), domain_only);
InputDimensionIntervalSliceInfo input_dimension_info[kMaxRank];
TENSORSTORE_RETURN_IF_ERROR(
GetIntervalSliceInfo(span(input_dimension_info).first(input_rank),
rep.get(), *dimensions, interval_form, translate,
start_vector, stop_or_size_vector, stride_vector));
TENSORSTORE_RETURN_IF_ERROR(ApplyOffsetsAndStridesToOutputIndexMaps(
rep.get(), span(input_dimension_info).first(input_rank)));
return TransformAccess::Make<IndexTransform<>>(std::move(rep));
}
Result<IndexTransform<>> ApplyStrideOp(IndexTransform<> transform,
DimensionIndexBuffer* dimensions,
IndexVectorOrScalarView strides,
bool domain_only) {
const DimensionIndex num_dims = dimensions->size();
const DimensionIndex input_rank = transform.input_rank();
TENSORSTORE_RETURN_IF_ERROR(CheckIndexVectorSize(strides, num_dims));
TransformRep::Ptr<> rep = MutableRep(
TransformAccess::rep_ptr<container>(std::move(transform)), domain_only);
InputDimensionIntervalSliceInfo input_dimension_info[kMaxRank];
std::fill_n(&input_dimension_info[0], input_rank,
InputDimensionIntervalSliceInfo{0, 1});
const auto compute_input_domain = [&](DimensionIndex i,
DimensionIndex input_dim) {
const Index stride = strides[i];
if (stride == 0) {
return absl::InvalidArgumentError("Stride must be non-zero");
}
input_dimension_info[input_dim].stride = stride;
const InputDimensionRef d = rep->input_dimension(input_dim);
TENSORSTORE_ASSIGN_OR_RETURN(
auto new_domain,
GetAffineTransformDomain(d.optionally_implicit_domain(), 0,
stride));
d.domain() = new_domain.interval();
d.implicit_lower_bound() = new_domain.implicit_lower();
d.implicit_upper_bound() = new_domain.implicit_upper();
return absl::OkStatus();
};
for (DimensionIndex i = 0; i < num_dims; ++i) {
const DimensionIndex input_dim = (*dimensions)[i];
TENSORSTORE_RETURN_IF_ERROR(
compute_input_domain(i, input_dim),
MaybeAnnotateStatus(
_, tensorstore::StrCat("Applying stride to input dimension ",
input_dim)));
}
TENSORSTORE_RETURN_IF_ERROR(ApplyOffsetsAndStridesToOutputIndexMaps(
rep.get(), span(input_dimension_info).first(input_rank)));
return TransformAccess::Make<IndexTransform<>>(std::move(rep));
}
}
} | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/box.h"
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/internal/dim_expression_testutil.h"
#include "tensorstore/util/status.h"
namespace {
using ::tensorstore::AllDims;
using ::tensorstore::BoxView;
using ::tensorstore::Dims;
using ::tensorstore::Index;
using ::tensorstore::IndexInterval;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::kImplicit;
using ::tensorstore::kInfIndex;
using ::tensorstore::kInfSize;
using ::tensorstore::MakeArray;
using ::tensorstore::StrCat;
using ::tensorstore::internal_index_space::EquivalentIndices;
using ::tensorstore::internal_index_space::TestDimExpression;
using ::tensorstore::internal_index_space::TestDimExpressionError;
TEST(ClosedIntervalTest, Example) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({0, 2, 0})
.input_shape({7, 4, 10})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform =
IndexTransformBuilder<3, 3>()
.input_origin({1, 2, -4})
.input_shape({4, 4, 3})
.input_labels({"x", "y", "z"})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, 0, -2, 2)
.Finalize()
.value();
const EquivalentIndices equivalent_indices = {
{{2, 3, 6}, {2, 3, -3}},
};
TestDimExpression(
original_transform,
Dims(0, 2).ClosedInterval({1, 8}, {4, 3}, {1, -2}),
{0, 2},
expected_new_transform,
expected_new_transform,
equivalent_indices);
TestDimExpression(
original_transform,
Dims("x", "z").ClosedInterval({1, 8}, {4, 3}, {1, -2}),
{0, 2},
expected_new_transform,
expected_new_transform,
equivalent_indices);
}
TEST(ClosedIntervalTest, ExampleWithOffset) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({0, 2, 0})
.input_shape({7, 4, 10})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform =
IndexTransformBuilder<3, 3>()
.input_origin({1, 2, -4})
.input_shape({4, 4, 4})
.input_labels({"x", "y", "z"})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, 1, -2, 2)
.Finalize()
.value();
const EquivalentIndices equivalent_indices = {
{{2, 3, 7}, {2, 3, -3}},
};
TestDimExpression(
original_transform,
Dims(0, 2).ClosedInterval({1, 9}, {4, 3}, {1, -2}),
{0, 2},
expected_new_transform,
expected_new_transform,
equivalent_indices);
}
TEST(HalfOpenIntervalTest, Example) {
const auto expected_new_transform =
IndexTransformBuilder<3, 3>()
.input_origin({1, 2, -4})
.input_shape({3, 4, 3})
.input_labels({"x", "y", "z"})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, 0, -2, 2)
.Finalize()
.value();
TestDimExpression(
IndexTransformBuilder<3, 3>()
.input_origin({0, 2, 0})
.input_shape({7, 4, 10})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value(),
Dims(0, 2).HalfOpenInterval({1, 8}, {4, 3}, {1, -2}),
{0, 2},
expected_new_transform,
expected_new_transform,
{{{2, 3, 6}, {2, 3, -3}}});
}
TEST(SizedIntervalTest, Example) {
const auto expected_new_transform =
IndexTransformBuilder<3, 3>()
.input_origin({1, 2, -4})
.input_shape({3, 4, 2})
.input_labels({"x", "y", "z"})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, 0, -2, 2)
.Finalize()
.value();
TestDimExpression(
IndexTransformBuilder<3, 3>()
.input_origin({0, 2, 0})
.input_shape({7, 4, 10})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value(),
Dims(0, 2).SizedInterval({1, 8}, {3, 2}, {1, -2}),
{0, 2},
expected_new_transform,
expected_new_transform,
{{{2, 3, 6}, {2, 3, -3}}});
}
TEST(ClosedIntervalTest, OneDimensionalConstantNonStrided) {
TestDimExpression(IndexTransformBuilder<1, 1>()
.input_origin({-5})
.input_shape({10})
.output_constant(0, 3)
.Finalize()
.value(),
AllDims().ClosedInterval(-3, 4),
{0},
IndexTransformBuilder<1, 1>()
.input_origin({-3})
.input_shape({8})
.output_identity_transform()
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.input_origin({-3})
.input_shape({8})
.output_constant(0, 3)
.Finalize()
.value(),
{{{1}, {1}}});
}
TEST(ClosedIntervalTest, OneDimensionalConstantPositiveStrided) {
TestDimExpression(IndexTransformBuilder<1, 1>()
.input_origin({-5})
.input_shape({12})
.output_constant(0, 3)
.Finalize()
.value(),
AllDims().ClosedInterval(-3, 5, 2),
{0},
IndexTransformBuilder<1, 1>()
.input_origin({-1})
.input_shape({5})
.output_single_input_dimension(0, -1, 2, 0)
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.input_origin({-1})
.input_shape({5})
.output_constant(0, 3)
.Finalize()
.value(),
{{{1}, {1}}});
}
TEST(ClosedIntervalTest, OneDimensionalConstantNegativeStrided) {
TestDimExpression(IndexTransformBuilder<1, 1>()
.input_origin({-5})
.input_shape({12})
.output_constant(0, 3)
.Finalize()
.value(),
AllDims().ClosedInterval(5, -3, -2),
{0},
IndexTransformBuilder<1, 1>()
.input_origin({-2})
.input_shape({5})
.output_single_input_dimension(0, 1, -2, 0)
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.input_origin({-2})
.input_shape({5})
.output_constant(0, 3)
.Finalize()
.value(),
{{{1}, {1}}});
}
TEST(ClosedIntervalTest, OneDimensionalSingleInputDimensionNonStrided) {
TestDimExpression(IndexTransformBuilder<1, 1>()
.input_origin({-5})
.input_shape({10})
.output_single_input_dimension(0, 3, 2, 0)
.Finalize()
.value(),
AllDims().ClosedInterval(-3, 4),
{0},
IndexTransformBuilder<1, 1>()
.input_origin({-3})
.input_shape({8})
.output_identity_transform()
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.input_origin({-3})
.input_shape({8})
.output_single_input_dimension(0, 3, 2, 0)
.Finalize()
.value(),
{{{1}, {1}}});
}
TEST(ClosedIntervalTest, OneDimensionalSingleInputDimensionPositiveStrided) {
TestDimExpression(IndexTransformBuilder<1, 1>()
.input_origin({-5})
.input_shape({12})
.output_single_input_dimension(0, 3, 2, 0)
.Finalize()
.value(),
AllDims().ClosedInterval(-3, 5, 2),
{0},
IndexTransformBuilder<1, 1>()
.input_origin({-1})
.input_shape({5})
.output_single_input_dimension(0, -1, 2, 0)
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.input_origin({-1})
.input_shape({5})
.output_single_input_dimension(0, 1, 4, 0)
.Finalize()
.value(),
{{{-3}, {-1}}, {{-1}, {0}}});
}
TEST(ClosedIntervalTest, OneDimensionalArrayNonStrided) {
TestDimExpression(
IndexTransformBuilder<1, 1>()
.input_origin({-2})
.input_shape({4})
.output_index_array(0, 3, 2, MakeArray<Index>({6, 5, 4, 3}))
.Finalize()
.value(),
AllDims().ClosedInterval(-1, 1),
{0},
IndexTransformBuilder<1, 1>()
.input_origin({-1})
.input_shape({3})
.output_identity_transform()
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.input_origin({-1})
.input_shape({3})
.output_index_array(0, 3, 2, MakeArray<Index>({5, 4, 3}))
.Finalize()
.value(),
{{{1}, {1}}});
}
TEST(ClosedIntervalTest, OneDimensionalArrayNonStridedZeroElements) {
TestDimExpression(
IndexTransformBuilder<1, 1>()
.input_origin({-2})
.input_shape({4})
.output_index_array(0, 3, 2, MakeArray<Index>({6, 5, 4, 3}))
.Finalize()
.value(),
AllDims().ClosedInterval(-1, -2),
{0},
IndexTransformBuilder<1, 1>()
.input_origin({-1})
.input_shape({0})
.output_identity_transform()
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.input_origin({-1})
.input_shape({0})
.output_constant(0, 0)
.Finalize()
.value(),
{});
}
TEST(ClosedIntervalTest, OneDimensionalArrayNonStridedOneElement) {
TestDimExpression(
IndexTransformBuilder<1, 1>()
.input_origin({-2})
.input_shape({4})
.output_index_array(0, 3, 2, MakeArray<Index>({6, 5, 4, 3}))
.Finalize()
.value(),
AllDims().ClosedInterval(-1, -1),
{0},
IndexTransformBuilder<1, 1>()
.input_origin({-1})
.input_shape({1})
.output_identity_transform()
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.input_origin({-1})
.input_shape({1})
.output_constant(0, 13)
.Finalize()
.value(),
{{{-1}, {-1}}});
}
TEST(ClosedIntervalTest, OneDimensionalArrayNonStridedInvalidOneElement) {
TestDimExpressionError(
IndexTransformBuilder<1, 1>()
.input_origin({-2})
.input_shape({4})
.output_index_array(0, 3, 2, MakeArray<Index>({6, 5, 4, 3}),
IndexInterval::Closed(3, 4))
.Finalize()
.value(),
AllDims().ClosedInterval(-1, -1), absl::StatusCode::kOutOfRange,
"Index 5 is outside valid range \\[3, 5\\)");
}
TEST(SliceTranslateClosedIntervalTest, OneDimensionalArrayNonStrided) {
TestDimExpression(
IndexTransformBuilder<1, 1>()
.input_origin({-2})
.input_shape({4})
.output_index_array(0, 3, 2, MakeArray<Index>({6, 5, 4, 3}))
.Finalize()
.value(),
AllDims().TranslateClosedInterval(-1, 1),
{0},
IndexTransformBuilder<1, 1>()
.input_origin({0})
.input_shape({3})
.output_single_input_dimension(0, -1, 1, 0)
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.input_origin({0})
.input_shape({3})
.output_index_array(0, 3, 2, MakeArray<Index>({5, 4, 3}))
.Finalize()
.value(),
{{{1}, {2}}});
}
TEST(SliceTranslateClosedIntervalTest, OneDimensionalArrayStrided) {
TestDimExpression(
IndexTransformBuilder<1, 1>()
.input_origin({-2})
.input_shape({4})
.output_index_array(0, 3, 2, MakeArray<Index>({6, 5, 4, 3}))
.Finalize()
.value(),
AllDims().TranslateClosedInterval(-1, 1, 2),
{0},
IndexTransformBuilder<1, 1>()
.input_origin({0})
.input_shape({2})
.output_single_input_dimension(0, -1, 2, 0)
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.input_origin({0})
.input_shape({2})
.output_index_array(0, 3, 2, MakeArray<Index>({5, 3}))
.Finalize()
.value(),
{{{-1}, {0}}});
}
TEST(ClosedIntervalTest, DimSubset) {
TestDimExpression(
IndexTransformBuilder<4, 4>()
.input_origin({-10, 1, 2, -kInfIndex})
.input_shape({kInfIndex + 1, 4, 5, kInfIndex + 7})
.output_single_input_dimension(0, 1, 4, 1)
.output_single_input_dimension(1, 2, 3, 3)
.output_constant(2, 3)
.output_index_array(
3, 4, 1,
MakeArray<Index>(
{{{{5}, {6}, {7}, {8}, {9}},
{{15}, {16}, {17}, {18}, {19}},
{{25}, {26}, {27}, {28}, {29}},
{{35}, {36}, {37}, {38}, {39}}}}))
.Finalize()
.value(),
Dims(1, 2, 0).ClosedInterval({2, 2, -5}, {3, 4, 10}),
{1, 2, 0},
IndexTransformBuilder<4, 4>()
.input_origin({-5, 2, 2, -kInfIndex})
.input_shape({16, 2, 3, kInfIndex + 7})
.output_identity_transform()
.Finalize()
.value(),
IndexTransformBuilder<4, 4>()
.input_origin({-5, 2, 2, -kInfIndex})
.input_shape({16, 2, 3, kInfIndex + 7})
.output_single_input_dimension(0, 1, 4, 1)
.output_single_input_dimension(1, 2, 3, 3)
.output_constant(2, 3)
.output_index_array(
3, 4, 1,
MakeArray<Index>(
{{{{15}, {16}, {17}}, {{25}, {26}, {27}}}}))
.Finalize()
.value(),
{{{1, 2, 3, 4}, {1, 2, 3, 4}}});
}
TEST(SliceClosedIntervalTest, DimSubsetStriding) {
TestDimExpression(
IndexTransformBuilder<4, 4>()
.input_origin({-10, 1, 2, -kInfIndex})
.input_shape({kInfIndex + 1, 4, 5, kInfIndex + 7})
.output_single_input_dimension(0, 1, 4, 1)
.output_single_input_dimension(1, 2, 3, 3)
.output_constant(2, 3)
.output_index_array(
3, 4, 1,
MakeArray<Index>(
{{{{5}, {6}, {7}, {8}, {9}},
{{15}, {16}, {17}, {18}, {19}},
{{25}, {26}, {27}, {28}, {29}},
{{35}, {36}, {37}, {38}, {39}}}}))
.Finalize()
.value(),
Dims(1, 2, 0, 3)
.ClosedInterval({3, 2, 10, 1}, {2, 4, -5, kImplicit},
{-1, 2, -2, 4}),
{1, 2, 0, 3},
IndexTransformBuilder<4, 4>()
.input_origin({-5, -3, 1, 0})
.input_shape({8, 2, 2, 2})
.output_single_input_dimension(0, 0, -2, 0)
.output_single_input_dimension(1, 0, -1, 1)
.output_single_input_dimension(2, 0, 2, 2)
.output_single_input_dimension(3, 1, 4, 3)
.Finalize()
.value(),
IndexTransformBuilder<4, 4>()
.input_origin({-5, -3, 1, 0})
.input_shape({8, 2, 2, 2})
.output_single_input_dimension(0, 1, -4, 1)
.output_single_input_dimension(1, 5, 12, 3)
.output_constant(2, 3)
.output_index_array(
3, 4, 1,
MakeArray<Index>({{{{25}, {27}}, {{15}, {17}}}}))
.Finalize()
.value(),
{{{2, 2, 2, 5}, {-1, -2, 1, 1}}});
}
TEST(SliceClosedIntervalTest, UnboundedStart) {
TestDimExpression(IndexTransformBuilder<1, 0>()
.input_origin({5})
.input_shape({10})
.Finalize()
.value(),
Dims(0).ClosedInterval(kImplicit, 9),
{0},
IndexTransformBuilder<1, 1>()
.input_origin({5})
.input_shape({5})
.output_identity_transform()
.Finalize()
.value(),
IndexTransformBuilder<1, 0>()
.input_origin({5})
.input_shape({5})
.Finalize()
.value(),
{});
}
TEST(SliceClosedIntervalTest, OneDimensionalNegativeStridedUnboundedStop) {
TestDimExpression(IndexTransformBuilder<1, 0>()
.input_origin({5})
.input_shape({10})
.Finalize()
.value(),
Dims(0).ClosedInterval(12, kImplicit, -1),
{0},
IndexTransformBuilder<1, 1>()
.input_origin({-12})
.input_shape({8})
.output_single_input_dimension(0, 0, -1, 0)
.Finalize()
.value(),
IndexTransformBuilder<1, 0>()
.input_origin({-12})
.input_shape({8})
.Finalize()
.value(),
{});
}
TEST(SliceHalfOpenIntervalTest, OneDimensionalUnstrided) {
TestDimExpression(IndexTransformBuilder<1, 0>()
.input_origin({5})
.input_shape({10})
.Finalize()
.value(),
Dims(0).HalfOpenInterval(6, 10),
{0},
IndexTransformBuilder<1, 1>()
.input_origin({6})
.input_shape({4})
.output_identity_transform()
.Finalize()
.value(),
IndexTransformBuilder<1, 0>()
.input_origin({6})
.input_shape({4})
.Finalize()
.value(),
{});
}
TEST(SliceHalfOpenIntervalTest, OneDimensionalUnstridedUnboundedStart) {
TestDimExpression(IndexTransformBuilder<1, 0>()
.input_origin({5})
.input_shape({10})
.Finalize()
.value(),
Dims(0).HalfOpenInterval(kImplicit, 10),
{0},
IndexTransformBuilder<1, 1>()
.input_origin({5})
.input_shape({5})
.output_identity_transform()
.Finalize()
.value(),
IndexTransformBuilder<1, 0>()
.input_origin({5})
.input_shape({5})
.Finalize()
.value(),
{});
}
TEST(SliceHalfOpenIntervalTest, OneDimensionalUnstridedUnboundedStop) {
TestDimExpression(IndexTransformBuilder<1, 0>()
.input_origin({5})
.input_shape({10})
.Finalize()
.value(),
Dims(0).HalfOpenInterval(6, kImplicit),
{0},
IndexTransformBuilder<1, 1>()
.input_origin({6})
.input_shape({9})
.output_identity_transform()
.Finalize()
.value(),
IndexTransformBuilder<1, 0>()
.input_origin({6})
.input_shape({9})
.Finalize()
.value(),
{});
}
TEST(SliceHalfOpenIntervalTest, OneDimensionalNegativeStridedUnboundedStop) {
TestDimExpression(IndexTransformBuilder<1, 0>()
.input_origin({5})
.input_shape({10})
.Finalize()
.value(),
Dims(0).HalfOpenInterval(12, kImplicit, -1),
{0},
IndexTransformBuilder<1, 1>()
.input_origin({-12})
.input_shape({8})
.output_single_input_dimension(0, 0, -1, 0)
.Finalize()
.value(),
IndexTransformBuilder<1, 0>()
.input_origin({-12})
.input_shape({8})
.Finalize()
.value(),
{});
}
TEST(SliceHalfOpenIntervalTest, ErrorHandling) {
TestDimExpressionError(
IndexTransformBuilder<1, 0>().Finalize().value(),
Dims(0).HalfOpenInterval(6, std::numeric_limits<Index>::min() + 1),
absl::StatusCode::kInvalidArgument,
StrCat(".* do not specify a valid closed index interval"));
}
TEST(SliceClosedIntervalTest, ErrorHandling) {
TestDimExpressionError(IndexTransformBuilder<1, 0>()
.input_origin({5})
.input_shape({10})
.Finalize()
.value(),
Dims(0).ClosedInterval(6, 10, 0),
absl::StatusCode::kInvalidArgument,
".*Invalid stride 0");
TestDimExpressionError(
IndexTransformBuilder<1, 0>()
.input_origin({5})
.input_shape({10})
.Finalize()
.value(),
Dims(0).ClosedInterval(6, 4), absl::StatusCode::kInvalidArgument,
".*\\(6, 4\\) do not specify a valid closed index interval");
TestDimExpressionError(
IndexTransformBuilder<1, 0>().input_shape({10}).Finalize().value(),
Dims(0).ClosedInterval(-kInfIndex, 4, 2),
absl::StatusCode::kInvalidArgument,
".*Slicing with non-unit stride of 2 requires a finite start index");
TestDimExpressionError(
IndexTransformBuilder<1, 0>()
.input_origin({2})
.input_shape({kInfIndex - 2 + 1})
.Finalize()
.value(),
Dims(0).ClosedInterval(kInfIndex, 4, -2),
absl::StatusCode::kInvalidArgument,
".*Slicing with non-unit stride of -2 requires a finite start index");
TestDimExpressionError(IndexTransformBuilder<1, 0>()
.input_origin({5})
.input_shape({10})
.Finalize()
.value(),
Dims(0).ClosedInterval(6, 15),
absl::StatusCode::kOutOfRange,
".*Slice interval \\[6, 16\\) is not "
"contained within domain \\[5, 15\\)");
TestDimExpressionError(
IndexTransformBuilder<1, 1>()
.input_origin({5})
.input_shape({10})
.output_single_input_dimension(0, 0,
std::numeric_limits<Index>::max(), 0)
.Finalize()
.value(),
Dims(0).ClosedInterval(5, 10, 3), absl::StatusCode::kInvalidArgument,
"Integer overflow computing offset for output dimension 0");
TestDimExpressionError(
IndexTransformBuilder<1, 1>()
.input_origin({5})
.input_shape({10})
.output_single_input_dimension(0, std::numeric_limits<Index>::max(),
1, 0)
.Finalize()
.value(),
Dims(0).ClosedInterval(5, 10, 3), absl::StatusCode::kInvalidArgument,
"Integer overflow computing offset for output dimension 0");
TestDimExpressionError(
IndexTransformBuilder<1, 1>()
.input_origin({5})
.input_shape({10})
.output_single_input_dimension(0, 0,
std::numeric_limits<Index>::max(), 0)
.Finalize()
.value(),
Dims(0).ClosedInterval(5, 10, 2), absl::StatusCode::kInvalidArgument,
"Integer overflow computing stride for output dimension 0");
}
TEST(SliceTranslateClosedIntervalTest, ErrorHandling) {
TestDimExpressionError(IndexTransformBuilder<1, 0>().Finalize().value(),
Dims(0).TranslateClosedInterval(-kInfIndex, 100),
absl::StatusCode::kInvalidArgument,
".*Interval \\(-inf, 101\\) is not bounded below");
}
TEST(SliceSizedIntervalTest, ErrorHandling) {
TestDimExpressionError(IndexTransformBuilder<1, 0>()
.input_origin({5})
.input_shape({10})
.Finalize()
.value(),
Dims(0).SizedInterval(6, -2),
absl::StatusCode::kInvalidArgument, |
560 | cpp | google/tensorstore | transform_array | tensorstore/index_space/internal/transform_array.cc | tensorstore/index_space/transform_array_test.cc | #ifndef TENSORSTORE_INDEX_SPACE_INTERNAL_TRANSFORM_ARRAY_H_
#define TENSORSTORE_INDEX_SPACE_INTERNAL_TRANSFORM_ARRAY_H_
#include "tensorstore/array.h"
#include "tensorstore/index_space/internal/transform_rep.h"
namespace tensorstore {
namespace internal_index_space {
Result<SharedElementPointer<const void>> TransformArraySubRegion(
const SharedArrayView<const void, dynamic_rank, offset_origin>& array,
TransformRep* transform, const Index* result_origin,
const Index* result_shape, Index* result_byte_strides,
TransformArrayConstraints constraints);
Result<SharedElementPointer<const void>> TransformArrayPreservingOrigin(
SharedArrayView<const void, dynamic_rank, offset_origin> array,
TransformRep* transform, Index* result_origin, Index* result_shape,
Index* result_byte_strides, TransformArrayConstraints constraints);
Result<SharedElementPointer<const void>> TransformArrayDiscardingOrigin(
SharedArrayView<const void, dynamic_rank, offset_origin> array,
TransformRep* transform, Index* result_shape, Index* result_byte_strides,
TransformArrayConstraints constraints);
}
}
#endif
#include "tensorstore/index_space/internal/transform_array.h"
#include "absl/status/status.h"
#include "tensorstore/index_space/internal/iterate_impl.h"
#include "tensorstore/index_space/internal/propagate_bounds.h"
#include "tensorstore/index_space/internal/transform_rep_impl.h"
namespace tensorstore {
namespace internal_index_space {
Result<SharedElementPointer<const void>> TransformArraySubRegion(
const SharedArrayView<const void, dynamic_rank, offset_origin>& array,
TransformRep* transform, const Index* result_origin,
const Index* result_shape, Index* result_byte_strides,
TransformArrayConstraints constraints) {
const DimensionIndex input_rank =
transform ? transform->input_rank : array.rank();
for (DimensionIndex i = 0; i < input_rank; ++i) {
if (result_shape[i] == 0) {
std::fill_n(result_byte_strides, input_rank, 0);
return SharedElementPointer<const void>(std::shared_ptr<const void>(),
array.dtype());
}
}
namespace flags = input_dimension_iteration_flags;
flags::Bitmask input_dimension_flags[kMaxRank];
std::fill_n(
&input_dimension_flags[0], input_rank,
flags::GetDefaultBitmask(constraints.repeated_elements_constraint()));
SingleArrayIterationState single_array_states[2];
TENSORSTORE_RETURN_IF_ERROR(
internal_index_space::InitializeSingleArrayIterationState(
array,
transform,
result_origin,
result_shape, &single_array_states[0],
&input_dimension_flags[0]));
if (single_array_states[0].num_array_indexed_output_dimensions == 0) {
if (constraints.allocate_constraint() != must_allocate) {
std::copy_n(&single_array_states[0].input_byte_strides[0], input_rank,
result_byte_strides);
return SharedElementPointer<void>(
std::shared_ptr<void>(array.pointer(),
single_array_states[0].base_pointer),
array.element_pointer().dtype());
}
const StridedLayoutView<> source_layout(
input_rank, result_shape,
&single_array_states[0].input_byte_strides[0]);
const StridedLayoutView<> new_layout(input_rank, result_shape,
result_byte_strides);
auto element_pointer = internal::AllocateArrayLike(
array.element_pointer().dtype(), source_layout, result_byte_strides,
constraints.iteration_constraints(), default_init);
CopyArray(ArrayView<const void>(
ElementPointer<void>(single_array_states[0].base_pointer,
array.element_pointer().dtype()),
source_layout),
ArrayView<void>(element_pointer, new_layout));
return element_pointer;
}
MarkSingletonDimsAsSkippable(span(result_shape, input_rank),
&input_dimension_flags[0]);
SharedElementPointer<void> new_element_pointer;
if (constraints.order_constraint()) {
Index new_shape[kMaxRank];
for (DimensionIndex input_dim = 0; input_dim < input_rank; ++input_dim) {
new_shape[input_dim] = input_dimension_flags[input_dim] == flags::can_skip
? 1
: result_shape[input_dim];
}
ComputeStrides(constraints.order_constraint().order(), array.dtype()->size,
span<const Index>(&new_shape[0], input_rank),
span(result_byte_strides, input_rank));
for (DimensionIndex input_dim = 0; input_dim < input_rank; ++input_dim) {
if (new_shape[input_dim] <= 1) result_byte_strides[input_dim] = 0;
}
const Index new_origin_offset =
IndexInnerProduct(input_rank, result_byte_strides, result_origin);
new_element_pointer = internal::AllocateAndConstructSharedElements(
ProductOfExtents(span<const Index>(new_shape, input_rank)),
default_init, array.dtype());
const absl::Status init_status =
internal_index_space::InitializeSingleArrayIterationState(
ArrayView<void, dynamic_rank, offset_origin>(
AddByteOffset(ElementPointer<void>(new_element_pointer),
-new_origin_offset),
StridedLayoutView<dynamic_rank, offset_origin>(
input_rank, result_origin, &new_shape[0],
result_byte_strides)),
nullptr,
result_origin,
result_shape, &single_array_states[1],
&input_dimension_flags[0]);
assert(init_status.ok());
}
DimensionIterationOrder base_layout =
constraints.order_constraint()
? ComputeDimensionIterationOrder<2>(
single_array_states,
span(input_dimension_flags).first(input_rank),
{})
: ComputeDimensionIterationOrder<1>(
{&single_array_states[0], 1},
span(input_dimension_flags).first(input_rank),
{});
if (!constraints.order_constraint()) {
Index new_shape[kMaxRank];
Index new_byte_strides[kMaxRank];
for (DimensionIndex i = 0; i < base_layout.pure_strided_end_dim; ++i) {
const DimensionIndex input_dim = base_layout.input_dimension_order[i];
new_shape[i] = result_shape[input_dim];
}
std::fill_n(result_byte_strides, input_rank, 0);
ComputeStrides(
ContiguousLayoutOrder::c, array.dtype()->size,
span<const Index>(&new_shape[0], base_layout.pure_strided_end_dim),
span<Index>(&new_byte_strides[0], base_layout.pure_strided_end_dim));
for (DimensionIndex i = 0; i < base_layout.pure_strided_end_dim; ++i) {
const DimensionIndex input_dim = base_layout.input_dimension_order[i];
result_byte_strides[input_dim] = new_byte_strides[i];
}
new_element_pointer = internal::AllocateAndConstructSharedElements(
ProductOfExtents(
span<const Index>(&new_shape[0], base_layout.pure_strided_end_dim)),
default_init, array.dtype());
const Index new_origin_offset =
IndexInnerProduct(input_rank, result_byte_strides, result_origin);
const absl::Status init_status =
internal_index_space::InitializeSingleArrayIterationState(
ArrayView<void, dynamic_rank, offset_origin>(
AddByteOffset(ElementPointer<void>(new_element_pointer),
-new_origin_offset),
StridedLayoutView<dynamic_rank, offset_origin>(
input_rank, result_origin, &new_shape[0],
result_byte_strides)),
nullptr,
result_origin,
result_shape, &single_array_states[1],
&input_dimension_flags[0]);
assert(init_status.ok());
}
SimplifiedDimensionIterationOrder layout = SimplifyDimensionIterationOrder<2>(
base_layout, span(result_shape, input_rank), single_array_states);
const std::array<std::ptrdiff_t, 2> element_sizes{array.dtype()->size,
array.dtype()->size};
[[maybe_unused]] const bool success = IterateUsingSimplifiedLayout<2>(
layout, span(result_shape, input_rank),
{&array.dtype()->copy_assign, nullptr},
nullptr, single_array_states, element_sizes);
assert(success);
return new_element_pointer;
}
Result<SharedElementPointer<const void>> TransformArrayPreservingOrigin(
SharedArrayView<const void, dynamic_rank, offset_origin> array,
TransformRep* transform, Index* result_origin, Index* result_shape,
Index* result_byte_strides, TransformArrayConstraints constraints) {
const DimensionIndex input_rank =
transform ? transform->input_rank : array.rank();
TENSORSTORE_RETURN_IF_ERROR(PropagateExplicitBounds(
array.domain(),
transform,
MutableBoxView<>(input_rank, result_origin, result_shape)));
TENSORSTORE_ASSIGN_OR_RETURN(
auto element_pointer,
TransformArraySubRegion(array, transform, result_origin, result_shape,
result_byte_strides, constraints));
return AddByteOffset(std::move(element_pointer),
-IndexInnerProduct(transform->input_rank,
result_byte_strides, result_origin));
}
Result<SharedElementPointer<const void>> TransformArrayDiscardingOrigin(
SharedArrayView<const void, dynamic_rank, offset_origin> array,
TransformRep* transform, Index* result_shape, Index* result_byte_strides,
TransformArrayConstraints constraints) {
const DimensionIndex input_rank =
transform ? transform->input_rank : array.rank();
Index result_origin[kMaxRank];
TENSORSTORE_RETURN_IF_ERROR(PropagateExplicitBounds(
array.domain(),
transform,
MutableBoxView<>(input_rank, &result_origin[0], result_shape)));
return TransformArraySubRegion(array, transform, &result_origin[0],
result_shape, result_byte_strides,
constraints);
}
}
} | #include "tensorstore/index_space/internal/transform_array.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/internal/transform_rep.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::DimensionIndex;
using ::tensorstore::IdentityTransform;
using ::tensorstore::Index;
using ::tensorstore::IndexInterval;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::IndexTransformView;
using ::tensorstore::MakeArray;
using ::tensorstore::MakeOffsetArray;
using ::tensorstore::MatchesStatus;
TEST(TransformArrayTest, OneDimensionalIdentity) {
auto original_array = tensorstore::MakeArray<int>({1, 2, 3, 4});
auto new_array =
tensorstore::TransformArray(original_array, IdentityTransform<1>())
.value();
EXPECT_EQ(original_array, new_array);
}
TEST(TransformArrayTest, OneDimensionalIdentityWithOrigin) {
auto original_array = tensorstore::MakeOffsetArray<int>({5}, {1, 2, 3, 4});
auto new_array =
tensorstore::TransformArray(original_array, IdentityTransform<1>())
.value();
EXPECT_EQ(original_array, new_array);
}
TEST(TransformArrayTest, OneDimensionalSliceUnstrided) {
auto original_array = tensorstore::MakeArray<int>({1, 2, 3, 4});
auto new_array = tensorstore::TransformArray(
original_array, IndexTransformBuilder<1, 1>()
.input_origin({1})
.input_shape({2})
.output_single_input_dimension(0, 0)
.Finalize()
.value())
.value();
EXPECT_EQ(&original_array(1), &new_array(1));
EXPECT_EQ(MakeOffsetArray<int>({1}, {2, 3}), new_array);
}
TEST(TransformArrayTest, OneDimensionalSliceUnstridedWithOrigin) {
auto original_array = tensorstore::MakeOffsetArray<int>({5}, {1, 2, 3, 4});
auto new_array =
tensorstore::TransformArray(original_array,
IndexTransformBuilder<1, 1>()
.input_origin({1})
.input_shape({2})
.output_single_input_dimension(0, 5, 1, 0)
.Finalize()
.value())
.value();
EXPECT_EQ(&original_array(6), &new_array(1));
EXPECT_EQ(MakeOffsetArray<int>({1}, {2, 3}), new_array);
}
TEST(TransformArrayTest, OneDimensionalSliceStrided) {
auto original_array = tensorstore::MakeArray<int>({1, 2, 3, 4});
auto new_array =
tensorstore::TransformArray(
original_array, IndexTransformBuilder<1, 1>()
.input_origin({1})
.input_shape({2})
.output_single_input_dimension(0, -1, 2, 0)
.Finalize()
.value())
.value();
EXPECT_EQ(&original_array(1), &new_array(1));
EXPECT_EQ(MakeOffsetArray<int>({1}, {2, 4}), new_array);
}
TEST(TransformArrayTest, OneDimensionalSliceStridedWithOrigin) {
auto original_array = tensorstore::MakeOffsetArray<int>({5}, {1, 2, 3, 4});
auto new_array =
tensorstore::TransformArray(original_array,
IndexTransformBuilder<1, 1>()
.input_origin({1})
.input_shape({2})
.output_single_input_dimension(0, 4, 2, 0)
.Finalize()
.value())
.value();
EXPECT_EQ(&original_array(6), &new_array(1));
EXPECT_EQ(MakeOffsetArray<int>({1}, {2, 4}), new_array);
}
TEST(TransformArrayTest, OneDArrayOneDIndexArray) {
auto original_array = tensorstore::MakeArray<int>({1, 2, 3, 4});
auto new_array =
tensorstore::TransformArray(
original_array,
IndexTransformBuilder<1, 1>()
.input_origin({2})
.input_shape({4})
.output_index_array(0, 1, 1, MakeArray<Index>({0, 2, 2, 1}))
.Finalize()
.value())
.value();
EXPECT_EQ(MakeOffsetArray<int>({2}, {2, 4, 4, 3}), new_array);
}
TEST(TransformArrayTest, OneDArrayOneDIndexArray1025) {
constexpr Index kSize = 1025;
auto index_array = tensorstore::AllocateArray<Index>({kSize});
for (Index i = 0; i < kSize; ++i) index_array(i) = i;
auto new_array =
tensorstore::TransformArray(index_array,
IndexTransformBuilder<1, 1>()
.input_shape({kSize})
.output_index_array(0, 0, 1, index_array)
.Finalize()
.value())
.value();
EXPECT_EQ(index_array, new_array);
}
TEST(TransformArrayTest, TwoDArrayOneDIndexArrayRetainZeroStride) {
auto index_array = tensorstore::MakeArray<Index>({0, 1, 2, 3, 4});
tensorstore::SharedArray<Index, 2> index_array2;
index_array2.element_pointer() = index_array.element_pointer();
index_array2.shape()[0] = 5;
index_array2.shape()[1] = 2;
index_array2.byte_strides()[0] = index_array.byte_strides()[0];
index_array2.byte_strides()[1] = 0;
EXPECT_EQ(index_array2,
MakeArray<Index>({{0, 0}, {1, 1}, {2, 2}, {3, 3}, {4, 4}}));
auto new_array =
tensorstore::TransformArray(index_array2,
IndexTransformBuilder<2, 2>()
.input_shape({5, 2})
.output_index_array(0, 0, 1, index_array2)
.output_single_input_dimension(1, 1)
.Finalize()
.value())
.value();
EXPECT_EQ(index_array2, new_array);
EXPECT_EQ(index_array2.layout(), new_array.layout());
}
TEST(TransformArrayTest, IndexArrayBoundsOverflow) {
auto original_array = tensorstore::MakeOffsetArray<int>({5}, {1, 2, 3, 4});
EXPECT_THAT(tensorstore::TransformArray(
original_array,
IndexTransformBuilder<1, 1>()
.input_origin({2})
.input_shape({4})
.output_index_array(0, std::numeric_limits<Index>::min(),
1, MakeArray<Index>({0, 2, 2, 1}))
.Finalize()
.value())
.status(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*Integer overflow propagating range.*"));
}
TEST(TransformArrayTest, OneDArrayOneDIndexArrayWithOrigin) {
auto original_array = tensorstore::MakeOffsetArray<int>({5}, {1, 2, 3, 4});
auto new_array =
tensorstore::TransformArray(
original_array,
IndexTransformBuilder<1, 1>()
.input_origin({2})
.input_shape({4})
.output_index_array(0, 6, 1, MakeArray<Index>({0, 2, 2, 1}))
.Finalize()
.value())
.value();
EXPECT_EQ(MakeOffsetArray<int>({2}, {2, 4, 4, 3}), new_array);
}
TEST(TransformArrayTest, TwoDArrayOneDIndexArray) {
auto original_array =
tensorstore::MakeArray<int>({{1, 2, 3, 4}, {5, 6, 7, 8}});
auto new_array =
tensorstore::TransformArray(
original_array,
IndexTransformBuilder<2, 2>()
.input_origin({1, 2})
.input_shape({2, 4})
.output_single_input_dimension(0, -1, 1, 0)
.output_index_array(1, 1, 1, MakeArray<Index>({{0, 2, 2, 1}}))
.Finalize()
.value())
.value();
EXPECT_EQ(MakeOffsetArray<int>({1, 2}, {{2, 4, 4, 3}, {6, 8, 8, 7}}),
new_array);
}
TEST(TransformArrayTest, TwoDArrayOneDIndexArrayWithOrigin) {
auto original_array =
tensorstore::MakeOffsetArray<int>({5, 6}, {{1, 2, 3, 4}, {5, 6, 7, 8}});
auto new_array =
tensorstore::TransformArray(
original_array,
IndexTransformBuilder<2, 2>()
.input_origin({1, 2})
.input_shape({2, 4})
.output_single_input_dimension(0, 4, 1, 0)
.output_index_array(1, 7, 1, MakeArray<Index>({{0, 2, 2, 1}}))
.Finalize()
.value())
.value();
EXPECT_EQ(MakeOffsetArray<int>({1, 2}, {{2, 4, 4, 3}, {6, 8, 8, 7}}),
new_array);
}
TEST(TransformArrayTest, TwoDArrayOneDIndexArrayStrided) {
auto original_array =
tensorstore::MakeArray<int>({{1, 2, 3, 4}, {5, 6, 7, 8}});
auto new_array =
tensorstore::TransformArray(
original_array,
IndexTransformBuilder<2, 2>()
.input_origin({1, 2})
.input_shape({2, 4})
.output_single_input_dimension(0, 2, -1, 0)
.output_index_array(1, 1, 2, MakeArray<Index>({{0, 1, 1, 0}}))
.Finalize()
.value())
.value();
EXPECT_EQ(MakeOffsetArray<int>({1, 2}, {{6, 8, 8, 6}, {2, 4, 4, 2}}),
new_array);
}
TEST(TransformArrayTest, ArrayIndexOutOfBounds) {
auto original_array =
tensorstore::MakeArray<int>({{1, 2, 3, 4}, {5, 6, 7, 8}});
EXPECT_THAT(
tensorstore::TransformArray(
original_array,
IndexTransformBuilder<2, 2>()
.input_origin({1, 2})
.input_shape({2, 4})
.output_single_input_dimension(0, 2, -1, 0)
.output_index_array(1, 1, 2, MakeArray<Index>({{0, 2, 1, 0}}))
.Finalize()
.value())
.status(),
MatchesStatus(absl::StatusCode::kOutOfRange,
".*Index 2 is outside valid range \\[0, 2\\).*"));
EXPECT_THAT(
tensorstore::TransformArray(
original_array,
IndexTransformBuilder<2, 2>()
.input_origin({1, 2})
.input_shape({2, 4})
.output_single_input_dimension(0, 2, -1, 0)
.output_index_array(1, 1, 2, MakeArray<Index>({{0, -1, 1, 0}}))
.Finalize()
.value())
.status(),
MatchesStatus(absl::StatusCode::kOutOfRange,
".*Index -1 is outside valid range \\[0, 2\\).*"));
}
TEST(TransformArrayTest, TwoDArrayOneDIndexArrayStridedWithOrigin) {
auto original_array =
tensorstore::MakeOffsetArray<int>({5, 6}, {{1, 2, 3, 4}, {5, 6, 7, 8}});
auto new_array =
tensorstore::TransformArray(
original_array,
IndexTransformBuilder<2, 2>()
.input_origin({1, 2})
.input_shape({2, 4})
.output_single_input_dimension(0, 7, -1, 0)
.output_index_array(1, 7, 2, MakeArray<Index>({{0, 1, 1, 0}}))
.Finalize()
.value())
.value();
EXPECT_EQ(MakeOffsetArray<int>({1, 2}, {{6, 8, 8, 6}, {2, 4, 4, 2}}),
new_array);
EXPECT_THAT(new_array.byte_strides(),
::testing::ElementsAre(sizeof(int), sizeof(int) * 2));
}
TEST(TransformArrayTest, IncludeRepeated) {
auto original_array =
tensorstore::MakeOffsetArray<int>({5, 6}, {{1, 2, 3, 4}, {5, 6, 7, 8}});
auto new_array =
tensorstore::TransformArray(
original_array,
IndexTransformBuilder<3, 2>()
.input_origin({1, 2, 3})
.input_shape({2, 4, 2})
.output_single_input_dimension(0, 7, -1, 0)
.output_index_array(1, 7, 2,
MakeArray<Index>({{{0}, {1}, {1}, {0}}}))
.Finalize()
.value(),
tensorstore::include_repeated_elements)
.value();
EXPECT_EQ(MakeOffsetArray<int>({1, 2, 3}, {{{6, 6}, {8, 8}, {8, 8}, {6, 6}},
{{2, 2}, {4, 4}, {4, 4}, {2, 2}}}),
new_array);
EXPECT_THAT(
new_array.byte_strides(),
::testing::ElementsAre(sizeof(int) * 2, sizeof(int) * 4, sizeof(int)));
}
TEST(TransformArrayTest, SkipSingleton) {
auto original_array =
tensorstore::MakeOffsetArray<int>({5, 6}, {{1, 2, 3, 4}, {5, 6, 7, 8}});
auto new_array =
tensorstore::TransformArray(
original_array,
IndexTransformBuilder<3, 2>()
.input_origin({1, 2, 3})
.input_shape({2, 4, 1})
.output_single_input_dimension(0, 7, -1, 0)
.output_index_array(1, 7, 2,
MakeArray<Index>({{{0}, {1}, {1}, {0}}}))
.Finalize()
.value(),
tensorstore::skip_repeated_elements)
.value();
EXPECT_EQ(MakeOffsetArray<int>({1, 2, 3},
{{{6}, {8}, {8}, {6}}, {{2}, {4}, {4}, {2}}}),
new_array);
EXPECT_THAT(new_array.byte_strides(),
::testing::ElementsAre(sizeof(int), sizeof(int) * 2, 0));
}
TEST(TransformArrayTest, SkipRepeated) {
auto original_array =
tensorstore::MakeOffsetArray<int>({5, 6}, {{1, 2, 3, 4}, {5, 6, 7, 8}});
auto new_array =
tensorstore::TransformArray(
original_array,
IndexTransformBuilder<3, 2>()
.input_origin({1, 2, 3})
.input_shape({2, 4, 2})
.output_single_input_dimension(0, 7, -1, 0)
.output_index_array(1, 7, 2,
MakeArray<Index>({{{0}, {1}, {1}, {0}}}))
.Finalize()
.value(),
tensorstore::skip_repeated_elements)
.value();
EXPECT_EQ(MakeOffsetArray<int>({1, 2, 3}, {{{6, 6}, {8, 8}, {8, 8}, {6, 6}},
{{2, 2}, {4, 4}, {4, 4}, {2, 2}}}),
new_array);
EXPECT_THAT(new_array.byte_strides(),
::testing::ElementsAre(sizeof(int), sizeof(int) * 2, 0));
}
TEST(TransformArrayTest, OrderConstraint) {
auto original_array =
tensorstore::MakeOffsetArray<int>({5, 6}, {{1, 2, 3, 4}, {5, 6, 7, 8}});
auto new_array =
tensorstore::TransformArray(
original_array,
IndexTransformBuilder<2, 2>()
.input_origin({1, 2})
.input_shape({2, 4})
.output_single_input_dimension(0, 7, -1, 0)
.output_index_array(1, 7, 2, MakeArray<Index>({{0, 1, 1, 0}}))
.Finalize()
.value(),
tensorstore::c_order)
.value();
EXPECT_EQ(MakeOffsetArray<int>({1, 2}, {{6, 8, 8, 6}, {2, 4, 4, 2}}),
new_array);
EXPECT_THAT(new_array.byte_strides(),
::testing::ElementsAre(sizeof(int) * 4, sizeof(int)));
}
TEST(TransformArrayTest, OrderConstraintIncludeRepeated) {
auto original_array =
tensorstore::MakeOffsetArray<int>({5, 6}, {{1, 2, 3, 4}, {5, 6, 7, 8}});
auto new_array =
tensorstore::TransformArray(
original_array,
IndexTransformBuilder<3, 2>()
.input_origin({1, 2, 3})
.input_shape({2, 4, 2})
.output_single_input_dimension(0, 7, -1, 0)
.output_index_array(1, 7, 2,
MakeArray<Index>({{{0}, {1}, {1}, {0}}}))
.Finalize()
.value(),
{tensorstore::c_order, tensorstore::include_repeated_elements})
.value();
EXPECT_EQ(MakeOffsetArray<int>({1, 2, 3}, {{{6, 6}, {8, 8}, {8, 8}, {6, 6}},
{{2, 2}, {4, 4}, {4, 4}, {2, 2}}}),
new_array);
EXPECT_THAT(
new_array.byte_strides(),
::testing::ElementsAre(sizeof(int) * 8, sizeof(int) * 2, sizeof(int)));
}
TEST(TransformArrayTest, OrderConstraintSkipRepeated) {
auto original_array =
tensorstore::MakeOffsetArray<int>({5, 6}, {{1, 2, 3, 4}, {5, 6, 7, 8}});
auto new_array =
tensorstore::TransformArray(
original_array,
IndexTransformBuilder<3, 2>()
.input_origin({1, 2, 3})
.input_shape({2, 4, 2})
.output_single_input_dimension(0, 7, -1, 0)
.output_index_array(1, 7, 2,
MakeArray<Index>({{{0}, {1}, {1}, {0}}}))
.Finalize()
.value(),
{tensorstore::c_order, tensorstore::skip_repeated_elements})
.value();
EXPECT_EQ(MakeOffsetArray<int>({1, 2, 3}, {{{6, 6}, {8, 8}, {8, 8}, {6, 6}},
{{2, 2}, {4, 4}, {4, 4}, {2, 2}}}),
new_array);
EXPECT_THAT(new_array.byte_strides(),
::testing::ElementsAre(sizeof(int) * 4, sizeof(int), 0));
}
TEST(TransformArrayTest, MultipleArrayIndexedDimensions) {
auto original_array = tensorstore::MakeArray<int>({{1, 2}, {5, 6}});
auto new_array =
tensorstore::TransformArray(
original_array,
IndexTransformBuilder<2, 2>()
.input_origin({0, 0})
.input_shape({2, 2})
.output_index_array(0, 0, 1, MakeArray<Index>({{0, 1}}))
.output_index_array(1, 0, 1, MakeArray<Index>({{0}, {1}}))
.Finalize()
.value())
.value();
EXPECT_EQ(MakeArray<int>({{1, 5}, {2, 6}}), new_array);
}
TEST(TransformArrayTest, EmptyDomain) {
auto original_array = tensorstore::MakeArray<int>({{1, 2, 3}, {4, 5, 6}});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transform,
(IndexTransformBuilder<2, 2>()
.input_shape({0, 3})
.implicit_upper_bounds({1, 0})
.output_single_input_dimension(0, 0)
.output_index_array(0, 0, 1, MakeArray<Index>({{0, 1, 2}}))
.Finalize()));
EXPECT_THAT(tensorstore::TransformArray(original_array, transform),
::testing::Optional(tensorstore::AllocateArray<int>({0, 3})));
}
} |
561 | cpp | google/tensorstore | mark_explicit_op | tensorstore/index_space/internal/mark_explicit_op.cc | tensorstore/index_space/mark_explicit_op_test.cc | #ifndef TENSORSTORE_INDEX_SPACE_INTERNAL_MARK_EXPLICIT_OP_H_
#define TENSORSTORE_INDEX_SPACE_INTERNAL_MARK_EXPLICIT_OP_H_
#include "tensorstore/index_space/dimension_index_buffer.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal_index_space {
Result<IndexTransform<>> ApplyChangeImplicitState(
IndexTransform<> transform, DimensionIndexBuffer* dimensions, bool implicit,
bool lower, bool upper, bool domain_only = false);
struct ChangeImplicitStateOp {
static constexpr bool selected_dimensions_are_new = false;
constexpr static DimensionIndex GetNewStaticInputRank(
DimensionIndex input_rank, DimensionIndex num_input_dims) {
return input_rank;
}
constexpr static DimensionIndex GetStaticSelectionRank(
DimensionIndex num_input_dims) {
return num_input_dims;
}
Result<IndexTransform<>> Apply(IndexTransform<> transform,
DimensionIndexBuffer* dimensions,
bool domain_only) const {
return ApplyChangeImplicitState(std::move(transform), dimensions,
implicit, lower, upper,
domain_only);
}
bool implicit;
bool lower;
bool upper;
};
}
}
#endif
#include "tensorstore/index_space/internal/mark_explicit_op.h"
#include "absl/status/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_index_space {
Result<IndexTransform<>> ApplyChangeImplicitState(
IndexTransform<> transform, DimensionIndexBuffer* dimensions, bool implicit,
bool lower, bool upper, bool domain_only) {
if (!lower && !upper) {
return transform;
}
TransformRep::Ptr<> rep = MutableRep(
TransformAccess::rep_ptr<container>(std::move(transform)), domain_only);
if (implicit) {
for (DimensionIndex output_dim = 0, output_rank = rep->output_rank;
output_dim < output_rank; ++output_dim) {
auto& map = rep->output_index_maps()[output_dim];
if (map.method() != OutputIndexMethod::array) continue;
auto& index_array_data = map.index_array_data();
for (DimensionIndex input_dim : *dimensions) {
if (index_array_data.byte_strides[input_dim] != 0) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Cannot mark input dimension ", input_dim,
" as having implicit bounds because it indexes the index array "
"map for output dimension ",
output_dim));
}
}
}
}
for (DimensionIndex input_dim : *dimensions) {
const auto d = rep->input_dimension(input_dim);
if (lower) d.implicit_lower_bound() = implicit;
if (upper) d.implicit_upper_bound() = implicit;
}
if (!implicit && IsDomainExplicitlyEmpty(rep.get())) {
ReplaceAllIndexArrayMapsWithConstantMaps(rep.get());
}
internal_index_space::DebugCheckInvariants(rep.get());
return TransformAccess::Make<IndexTransform<>>(std::move(rep));
}
}
} | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/array.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/index_domain_builder.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/internal/dim_expression_testutil.h"
namespace {
using ::tensorstore::DimensionIndex;
using ::tensorstore::Dims;
using ::tensorstore::Index;
using ::tensorstore::IndexDomainBuilder;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::MakeArray;
using ::tensorstore::internal_index_space::TestDimExpression;
using ::tensorstore::internal_index_space::TestDimExpressionErrorTransformOnly;
TEST(MarkBoundsExplicitTest, Example) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.implicit_lower_bounds({0, 1, 1})
.implicit_upper_bounds({1, 0, 0})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.implicit_lower_bounds({0, 1, 0})
.implicit_upper_bounds({0, 0, 0})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
TestDimExpression(original_transform,
Dims(0, 2).MarkBoundsExplicit(),
{0, 2},
expected_new_transform,
expected_new_transform,
{});
TestDimExpression(original_transform,
Dims(0, 2).MarkBoundsExplicit(true, true),
{0, 2},
expected_new_transform,
expected_new_transform,
{});
TestDimExpression(original_transform,
Dims("x", "z").MarkBoundsExplicit(),
{0, 2},
expected_new_transform,
expected_new_transform,
{});
}
TEST(MarkBoundsExplicitTest, IndexArray) {
TestDimExpression(
IndexTransformBuilder(2, 1)
.input_shape({2, 3})
.implicit_upper_bounds({1, 0})
.output_index_array(0, 0, 1, MakeArray<Index>({{1, 2, 3}}))
.Finalize()
.value(),
Dims(0).MarkBoundsExplicit(),
{0},
IndexTransformBuilder(2, 2)
.input_shape({2, 3})
.output_identity_transform()
.Finalize()
.value(),
IndexTransformBuilder(2, 1)
.input_shape({2, 3})
.output_index_array(0, 0, 1, MakeArray<Index>({{1, 2, 3}}))
.Finalize()
.value(),
{});
}
TEST(MarkBoundsExplicitTest, IndexArrayZeroSize) {
TestDimExpression(
IndexTransformBuilder(2, 1)
.input_shape({0, 3})
.implicit_upper_bounds({1, 0})
.output_index_array(0, 0, 1, MakeArray<Index>({{1, 2, 3}}))
.Finalize()
.value(),
Dims(0).MarkBoundsExplicit(),
{0},
IndexTransformBuilder(2, 2)
.input_shape({0, 3})
.output_identity_transform()
.Finalize()
.value(),
IndexTransformBuilder(2, 1)
.input_shape({0, 3})
.output_constant(0, 0)
.Finalize()
.value(),
{});
}
TEST(UnsafeMarkBoundsImplicitTest, Example) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.implicit_lower_bounds({0, 0, 0})
.implicit_upper_bounds({0, 0, 0})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.implicit_lower_bounds({1, 0, 1})
.implicit_upper_bounds({1, 0, 1})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
TestDimExpression(original_transform,
Dims(0, 2).UnsafeMarkBoundsImplicit(),
{0, 2},
expected_new_transform,
expected_new_transform,
{},
true,
false);
TestDimExpression(
original_transform,
Dims(0, 2).UnsafeMarkBoundsImplicit(true, true),
{0, 2},
expected_new_transform,
expected_new_transform,
{},
true,
false);
TestDimExpression(original_transform,
Dims("x", "z").UnsafeMarkBoundsImplicit(),
{0, 2},
expected_new_transform,
expected_new_transform,
{},
true,
false);
}
TEST(UnsafeMarkBoundsImplicitTest, IndexArray) {
TestDimExpression(
IndexTransformBuilder(2, 1)
.input_shape({2, 3})
.output_index_array(0, 0, 1, MakeArray<Index>({{1, 2, 3}}))
.Finalize()
.value(),
Dims(0).UnsafeMarkBoundsImplicit(false, true),
{0},
IndexTransformBuilder(2, 2)
.input_shape({2, 3})
.implicit_upper_bounds({1, 0})
.output_identity_transform()
.Finalize()
.value(),
IndexTransformBuilder(2, 1)
.input_shape({2, 3})
.implicit_upper_bounds({1, 0})
.output_index_array(0, 0, 1, MakeArray<Index>({{1, 2, 3}}))
.Finalize()
.value(),
{},
true,
false);
}
TEST(UnsafeMarkBoundsImplicitTest, IndexArrayInvalid) {
TestDimExpressionErrorTransformOnly(
IndexTransformBuilder(2, 1)
.input_shape({2, 3})
.output_index_array(0, 0, 1, MakeArray<Index>({{1, 2, 3}}))
.Finalize()
.value(),
Dims(1).UnsafeMarkBoundsImplicit(false, true),
absl::StatusCode::kInvalidArgument,
"Cannot mark input dimension 1 as having implicit bounds because it "
"indexes the index array map for output dimension 0",
IndexDomainBuilder(2)
.shape({2, 3})
.implicit_upper_bounds({0, 1})
.Finalize()
.value());
}
TEST(MarkBoundsExplicitTest, LowerOnly) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.implicit_lower_bounds({0, 1, 1})
.implicit_upper_bounds({1, 0, 0})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.implicit_lower_bounds({0, 1, 0})
.implicit_upper_bounds({1, 0, 0})
.output_identity_transform()
.Finalize()
.value();
TestDimExpression(original_transform,
Dims(0, 2).MarkBoundsExplicit(true, false),
{0, 2},
expected_new_transform,
expected_new_transform,
{});
}
TEST(MarkBoundsExplicitTest, UpperOnly) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.implicit_lower_bounds({0, 1, 1})
.implicit_upper_bounds({1, 0, 0})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.implicit_lower_bounds({0, 1, 1})
.implicit_upper_bounds({0, 0, 0})
.output_identity_transform()
.Finalize()
.value();
TestDimExpression(original_transform,
Dims(0, 2).MarkBoundsExplicit(false, true),
{0, 2},
expected_new_transform,
expected_new_transform,
{});
}
TEST(MarkBoundsExplicitTest, None) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.implicit_lower_bounds({0, 1, 1})
.implicit_upper_bounds({1, 0, 0})
.output_identity_transform()
.Finalize()
.value();
TestDimExpression(original_transform,
Dims(0, 2).MarkBoundsExplicit(false, false),
{0, 2},
original_transform,
original_transform,
{});
}
} |
562 | cpp | google/tensorstore | add_new_dims_op | tensorstore/index_space/internal/add_new_dims_op.cc | tensorstore/index_space/add_new_dims_op_test.cc | #ifndef TENSORSTORE_INDEX_SPACE_INTERNAL_ADD_NEW_DIMS_OP_H_
#define TENSORSTORE_INDEX_SPACE_INTERNAL_ADD_NEW_DIMS_OP_H_
#include "tensorstore/index_space/dimension_index_buffer.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/internal/string_like.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal_index_space {
Result<IndexTransform<>> ApplyAddNewDims(IndexTransform<> transform,
DimensionIndexBuffer* dimensions,
bool domain_only);
struct AddNewDimsOp {
static constexpr bool selected_dimensions_are_new = true;
constexpr static DimensionIndex GetNewStaticInputRank(
DimensionIndex input_rank, DimensionIndex num_input_dims) {
return RankConstraint::Add(input_rank, num_input_dims);
}
constexpr static DimensionIndex GetStaticSelectionRank(
DimensionIndex num_input_dims) {
return num_input_dims;
}
Result<IndexTransform<>> Apply(IndexTransform<> transform,
DimensionIndexBuffer* dimensions,
bool domain_only) const {
return ApplyAddNewDims(std::move(transform), dimensions, domain_only);
}
};
}
}
#endif
#include "tensorstore/index_space/internal/add_new_dims_op.h"
#include <cassert>
#include <utility>
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/dimension_index_buffer.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/internal/transform_rep.h"
#include "tensorstore/index_space/output_index_method.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/dimension_set.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace internal_index_space {
namespace {
void AddNewDims(TransformRep* original, TransformRep* result,
DimensionIndexBuffer* dimensions, bool domain_only) {
const DimensionIndex orig_input_rank = original->input_rank;
const DimensionIndex new_input_rank = orig_input_rank + dimensions->size();
assert(result->input_rank_capacity >= new_input_rank);
const DimensionIndex output_rank = domain_only ? 0 : original->output_rank;
assert(result->output_rank_capacity >= output_rank);
DimensionSet newly_added_input_dims;
for (DimensionIndex new_input_dim : *dimensions) {
newly_added_input_dims[new_input_dim] = true;
}
DimensionIndex orig_to_new_input_dim[kMaxRank];
for (DimensionIndex new_input_dim = 0, orig_input_dim = 0;
new_input_dim < new_input_rank; ++new_input_dim) {
if (newly_added_input_dims[new_input_dim]) continue;
orig_to_new_input_dim[orig_input_dim] = new_input_dim;
++orig_input_dim;
}
span<const OutputIndexMap> orig_maps =
original->output_index_maps().first(output_rank);
span<OutputIndexMap> result_maps =
result->output_index_maps().first(output_rank);
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
const auto& orig_map = orig_maps[output_dim];
auto& result_map = result_maps[output_dim];
result_map.stride() = orig_map.stride();
result_map.offset() = orig_map.offset();
switch (orig_map.method()) {
case OutputIndexMethod::constant:
result_map.SetConstant();
break;
case OutputIndexMethod::single_input_dimension: {
const DimensionIndex orig_input_dim = orig_map.input_dimension();
assert(orig_input_dim >= 0 && orig_input_dim < orig_input_rank);
const DimensionIndex new_input_dim =
orig_to_new_input_dim[orig_input_dim];
result_map.SetSingleInputDimension(new_input_dim);
break;
}
case OutputIndexMethod::array: {
auto& result_index_array = result_map.SetArrayIndexing(new_input_rank);
const auto& orig_index_array = orig_map.index_array_data();
for (DimensionIndex orig_input_dim = orig_input_rank - 1;
orig_input_dim >= 0; --orig_input_dim) {
const DimensionIndex new_input_dim =
orig_to_new_input_dim[orig_input_dim];
assert(new_input_dim >= orig_input_dim);
result_index_array.byte_strides[new_input_dim] =
orig_index_array.byte_strides[orig_input_dim];
}
for (const DimensionIndex new_input_dim : *dimensions) {
result_index_array.byte_strides[new_input_dim] = 0;
}
result_index_array.index_range = orig_index_array.index_range;
result_index_array.element_pointer = orig_index_array.element_pointer;
break;
}
}
}
for (DimensionIndex orig_input_dim = orig_input_rank - 1; orig_input_dim >= 0;
--orig_input_dim) {
const DimensionIndex new_input_dim = orig_to_new_input_dim[orig_input_dim];
result->input_dimension(new_input_dim) =
original->input_dimension(orig_input_dim);
}
for (DimensionIndex new_input_dim : *dimensions) {
const auto d = result->input_dimension(new_input_dim);
d.domain() = IndexInterval::UncheckedSized(-kInfIndex, kInfSize);
d.implicit_lower_bound() = true;
d.implicit_upper_bound() = true;
d.SetEmptyLabel();
}
result->input_rank = new_input_rank;
result->output_rank = output_rank;
}
}
Result<IndexTransform<>> ApplyAddNewDims(IndexTransform<> transform,
DimensionIndexBuffer* dimensions,
bool domain_only) {
const DimensionIndex new_input_rank =
transform.input_rank() + dimensions->size();
TENSORSTORE_RETURN_IF_ERROR(ValidateRank(new_input_rank));
auto new_rep =
NewOrMutableRep(TransformAccess::rep(transform), new_input_rank,
transform.output_rank(), domain_only);
AddNewDims(TransformAccess::rep(transform), new_rep.get(), dimensions,
domain_only);
internal_index_space::DebugCheckInvariants(new_rep.get());
return TransformAccess::Make<IndexTransform<>>(std::move(new_rep));
}
}
} | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/internal/dim_expression_testutil.h"
namespace {
using ::tensorstore::Dims;
using ::tensorstore::Index;
using ::tensorstore::IndexInterval;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::kInfIndex;
using ::tensorstore::kInfSize;
using ::tensorstore::MakeArray;
using ::tensorstore::internal_index_space::TestDimExpression;
using ::tensorstore::internal_index_space::TestDimExpressionError;
TEST(AddNewTest, Example) {
const auto expected_new_transform =
IndexTransformBuilder<3, 1>()
.input_origin({-kInfIndex, 1, -kInfIndex})
.input_shape({kInfSize, 5, kInfSize})
.implicit_lower_bounds({1, 0, 1})
.implicit_upper_bounds({1, 0, 1})
.input_labels({"", "x", ""})
.output_single_input_dimension(0, 1)
.Finalize()
.value();
TestDimExpression(
IndexTransformBuilder<1, 1>()
.input_origin({1})
.input_shape({5})
.input_labels({"x"})
.output_single_input_dimension(0, 0)
.Finalize()
.value(),
Dims(0, -1).AddNew(),
{0, 2},
expected_new_transform,
expected_new_transform,
{
{{2}, {1, 2, 8}},
{{2}, {5, 2, 9}},
},
false);
}
TEST(AddNewTest, Simple) {
TestDimExpression(
IndexTransformBuilder<2, 3>()
.input_origin({2, 3})
.input_shape({3, 4})
.output_single_input_dimension(0, 1, 3, 1)
.output_single_input_dimension(1, 2, 4, 0)
.output_index_array(2, 3, 5,
MakeArray<Index>({{1, 2, 3, 4}}),
IndexInterval::Closed(-1, 10))
.Finalize()
.value(),
Dims(0, -1).AddNew(),
{0, 3},
IndexTransformBuilder<4, 2>()
.input_origin({-kInfIndex, 2, 3, -kInfIndex})
.input_shape({kInfSize, 3, 4, kInfSize})
.implicit_lower_bounds({1, 0, 0, 1})
.implicit_upper_bounds({1, 0, 0, 1})
.output_single_input_dimension(0, 1)
.output_single_input_dimension(1, 2)
.Finalize()
.value(),
IndexTransformBuilder<4, 3>()
.input_origin({-kInfIndex, 2, 3, -kInfIndex})
.input_shape({kInfSize, 3, 4, kInfSize})
.implicit_lower_bounds({1, 0, 0, 1})
.implicit_upper_bounds({1, 0, 0, 1})
.output_single_input_dimension(0, 1, 3, 2)
.output_single_input_dimension(1, 2, 4, 1)
.output_index_array(
2, 3, 5, MakeArray<Index>({{{{1}, {2}, {3}, {4}}}}),
IndexInterval::Closed(-1, 10))
.Finalize()
.value(),
{
{{3, 4}, {100, 3, 4, 500}},
{{3, 4}, {-100, 3, 4, -500}},
},
false);
}
TEST(AddNewTest, Constant) {
TestDimExpression(IndexTransformBuilder<1, 1>()
.input_origin({1})
.input_shape({5})
.output_constant(0, 1)
.Finalize()
.value(),
Dims(0).AddNew(),
{0},
IndexTransformBuilder<2, 1>()
.input_origin({-kInfIndex, 1})
.input_shape({kInfSize, 5})
.implicit_lower_bounds({1, 0})
.implicit_upper_bounds({1, 0})
.output_single_input_dimension(0, 1)
.Finalize()
.value(),
IndexTransformBuilder<2, 1>()
.input_origin({-kInfIndex, 1})
.input_shape({kInfSize, 5})
.implicit_lower_bounds({1, 0})
.implicit_upper_bounds({1, 0})
.output_constant(0, 1)
.Finalize()
.value(),
{
{{1}, {-100, 1}},
{{1}, {100, 1}},
},
false);
}
TEST(AddNewTest, Labeled) {
TestDimExpression(IndexTransformBuilder<1, 1>()
.input_origin({1})
.input_shape({5})
.input_labels({"a"})
.output_constant(0, 1)
.Finalize()
.value(),
Dims(-1, 0).AddNew().Label("x", "y"),
{2, 0},
IndexTransformBuilder<3, 1>()
.input_origin({-kInfIndex, 1, -kInfIndex})
.input_shape({kInfSize, 5, kInfSize})
.implicit_lower_bounds({1, 0, 1})
.implicit_upper_bounds({1, 0, 1})
.input_labels({"y", "a", "x"})
.output_single_input_dimension(0, 1)
.Finalize()
.value(),
IndexTransformBuilder<3, 1>()
.input_origin({-kInfIndex, 1, -kInfIndex})
.input_shape({kInfSize, 5, kInfSize})
.implicit_lower_bounds({1, 0, 1})
.implicit_upper_bounds({1, 0, 1})
.input_labels({"y", "a", "x"})
.output_constant(0, 1)
.Finalize()
.value(),
{
{{2}, {1, 2, 8}},
{{2}, {5, 2, 9}},
},
false);
}
TEST(AddNewTest, EmptyDimensionSelection) {
const auto transform = IndexTransformBuilder<1, 1>()
.input_origin({1})
.input_shape({5})
.input_labels({"x"})
.output_single_input_dimension(0, 0)
.Finalize()
.value();
TestDimExpression(
transform,
Dims().AddNew(),
{},
transform,
transform,
{
{{2}, {2}},
{{3}, {3}},
},
true);
}
TEST(AddNewTest, InvalidRank) {
TestDimExpressionError(tensorstore::IdentityTransform(31),
Dims(0, 1).AddNew(),
absl::StatusCode::kInvalidArgument,
".*Rank 33 is outside valid range \\[0, 32\\]");
}
} |
563 | cpp | google/tensorstore | translate_op | tensorstore/index_space/internal/translate_op.cc | tensorstore/index_space/translate_op_test.cc | #ifndef TENSORSTORE_INDEX_SPACE_INTERNAL_TRANSLATE_OP_H_
#define TENSORSTORE_INDEX_SPACE_INTERNAL_TRANSLATE_OP_H_
#include "tensorstore/index_space/dimension_index_buffer.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_vector_or_scalar.h"
#include "tensorstore/internal/meta.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal_index_space {
enum class TranslateOpKind {
kTranslateTo,
kTranslateBy,
kTranslateBackwardBy,
};
Result<IndexTransform<>> ApplyTranslate(IndexTransform<> transform,
DimensionIndexBuffer* dimensions,
IndexVectorOrScalarView offsets,
TranslateOpKind kind,
bool domain_only = false);
template <typename OffsetOrOriginVector, TranslateOpKind Kind>
struct TranslateOp {
static constexpr bool selected_dimensions_are_new = false;
static constexpr DimensionIndex static_selection_rank =
IsIndexVectorOrScalar<OffsetOrOriginVector>::extent;
constexpr static DimensionIndex GetNewStaticInputRank(
DimensionIndex input_rank, DimensionIndex num_input_dims) {
TENSORSTORE_CONSTEXPR_ASSERT(
(input_rank == dynamic_rank || input_rank >= static_selection_rank) &&
"Number of dimensions must not exceed input rank.");
return input_rank;
}
constexpr static DimensionIndex GetStaticSelectionRank(
DimensionIndex num_input_dims) {
TENSORSTORE_CONSTEXPR_ASSERT(
RankConstraint::EqualOrUnspecified(num_input_dims,
static_selection_rank) &&
"Number of selected dimensions must match number of offsets.");
return num_input_dims == dynamic_rank ? static_selection_rank
: num_input_dims;
}
Result<IndexTransform<>> Apply(IndexTransform<> transform,
DimensionIndexBuffer* dimensions,
bool domain_only) const {
return ApplyTranslate(std::move(transform), dimensions,
IndexVectorOrScalarView(offset_or_origin_vector),
Kind, domain_only);
}
OffsetOrOriginVector offset_or_origin_vector;
};
template <typename Offsets>
using TranslateToOp = TranslateOp<Offsets, TranslateOpKind::kTranslateTo>;
template <typename Offsets>
using TranslateByOp = TranslateOp<Offsets, TranslateOpKind::kTranslateBy>;
template <typename Offsets>
using TranslateBackwardByOp =
TranslateOp<Offsets, TranslateOpKind::kTranslateBackwardBy>;
}
}
#endif
#include "tensorstore/index_space/internal/translate_op.h"
#include <algorithm>
#include "absl/status/status.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_index_space {
namespace {
absl::Status TranslateOutputOffsetsUsingInputOffsets(
TransformRep* transform, const Index* input_offsets) {
const DimensionIndex output_rank = transform->output_rank;
const DimensionIndex input_rank = transform->input_rank;
span<OutputIndexMap> maps = transform->output_index_maps().first(output_rank);
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
auto& map = maps[output_dim];
switch (map.method()) {
case OutputIndexMethod::single_input_dimension: {
const DimensionIndex input_dim = map.input_dimension();
const Index offset_change = input_offsets[input_dim];
Index new_offset;
if (internal::MulOverflow(offset_change, map.stride(), &new_offset) ||
internal::SubOverflow(map.offset(), new_offset, &map.offset())) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Integer overflow computing output offset for dimension ",
output_dim, "."));
}
break;
}
case OutputIndexMethod::array: {
auto& index_array_data = map.index_array_data();
index_array_data.element_pointer = AddByteOffset(
std::move(index_array_data.element_pointer),
-IndexInnerProduct(input_rank, index_array_data.byte_strides,
input_offsets));
break;
}
case OutputIndexMethod::constant:
break;
}
}
return absl::OkStatus();
}
}
Result<IndexTransform<>> ApplyTranslate(IndexTransform<> transform,
DimensionIndexBuffer* dimensions,
IndexVectorOrScalarView offsets,
TranslateOpKind kind,
bool domain_only) {
const DimensionIndex num_dims = dimensions->size();
const DimensionIndex input_rank = transform.input_rank();
TENSORSTORE_RETURN_IF_ERROR(CheckIndexVectorSize(offsets, num_dims));
TransformRep::Ptr<> rep = MutableRep(
TransformAccess::rep_ptr<container>(std::move(transform)), domain_only);
const auto input_domain = rep->input_domain(input_rank);
Index input_offsets[kMaxRank];
std::fill_n(&input_offsets[0], input_rank, static_cast<Index>(0));
for (DimensionIndex i = 0; i < num_dims; ++i) {
const DimensionIndex input_dim = (*dimensions)[i];
Index offset = offsets[i];
if (offset == kImplicit) continue;
const IndexInterval old_interval = input_domain[input_dim];
IndexInterval new_interval;
switch (kind) {
case TranslateOpKind::kTranslateTo: {
TENSORSTORE_ASSIGN_OR_RETURN(new_interval,
ShiftIntervalTo(old_interval, offset));
offset = new_interval.inclusive_min() - old_interval.inclusive_min();
break;
}
case TranslateOpKind::kTranslateBackwardBy: {
offset = -offset;
}
[[fallthrough]];
case TranslateOpKind::kTranslateBy: {
TENSORSTORE_ASSIGN_OR_RETURN(new_interval,
ShiftInterval(old_interval, offset));
break;
}
}
input_domain[input_dim] = new_interval;
input_offsets[input_dim] = offset;
}
TENSORSTORE_RETURN_IF_ERROR(
TranslateOutputOffsetsUsingInputOffsets(rep.get(), &input_offsets[0]));
internal_index_space::DebugCheckInvariants(rep.get());
return TransformAccess::Make<IndexTransform<>>(std::move(rep));
}
}
} | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/index_domain_builder.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/internal/dim_expression_testutil.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::AllDims;
using ::tensorstore::DimensionIndex;
using ::tensorstore::Dims;
using ::tensorstore::Index;
using ::tensorstore::IndexDomainBuilder;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::kImplicit;
using ::tensorstore::kInfIndex;
using ::tensorstore::kInfSize;
using ::tensorstore::kMaxFiniteIndex;
using ::tensorstore::kMinFiniteIndex;
using ::tensorstore::MakeArray;
using ::tensorstore::MatchesStatus;
using ::tensorstore::span;
using ::tensorstore::internal_index_space::EquivalentIndices;
using ::tensorstore::internal_index_space::TestDimExpression;
TEST(TranslateByTest, Example) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform =
IndexTransformBuilder<3, 3>()
.input_origin({11, 2, 23})
.input_shape({3, 4, 2})
.input_labels({"x", "y", "z"})
.output_single_input_dimension(0, -10, 1, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, -20, 1, 2)
.Finalize()
.value();
const EquivalentIndices equivalent_indices = {
{{2, 3, 3}, {12, 3, 23}},
};
TestDimExpression(original_transform,
Dims(0, 2).TranslateBy({10, 20}),
{0, 2},
expected_new_transform,
expected_new_transform,
equivalent_indices);
TestDimExpression(original_transform,
Dims("x", "z").TranslateBy({10, 20}),
{0, 2},
expected_new_transform,
expected_new_transform,
equivalent_indices);
}
TEST(TranslateBackwardByTest, Example) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform =
IndexTransformBuilder<3, 3>()
.input_origin({-9, 2, -17})
.input_shape({3, 4, 2})
.input_labels({"x", "y", "z"})
.output_single_input_dimension(0, 10, 1, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, 20, 1, 2)
.Finalize()
.value();
const EquivalentIndices equivalent_indices = {
{{2, 3, 3}, {-8, 3, -17}},
};
TestDimExpression(original_transform,
Dims(0, 2).TranslateBackwardBy({10, 20}),
{0, 2},
expected_new_transform,
expected_new_transform,
equivalent_indices);
TestDimExpression(original_transform,
Dims("x", "z").TranslateBackwardBy({10, 20}),
{0, 2},
expected_new_transform,
expected_new_transform,
equivalent_indices);
}
TEST(TranslateToTest, Example) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform =
IndexTransformBuilder<3, 3>()
.input_origin({10, 2, 20})
.input_shape({3, 4, 2})
.input_labels({"x", "y", "z"})
.output_single_input_dimension(0, -9, 1, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, -17, 1, 2)
.Finalize()
.value();
const EquivalentIndices equivalent_indices = {
{{2, 3, 3}, {11, 3, 20}},
};
TestDimExpression(original_transform,
Dims(0, 2).TranslateTo({10, 20}),
{0, 2},
expected_new_transform,
expected_new_transform,
equivalent_indices);
TestDimExpression(original_transform,
Dims(0, 2).TranslateTo({10, 20}),
{0, 2},
expected_new_transform,
expected_new_transform,
equivalent_indices);
}
TEST(TranslateByTest, OneDimensionalConstant) {
TestDimExpression(
IndexTransformBuilder<1, 1>()
.output_constant(0, 2)
.Finalize()
.value(),
AllDims().TranslateBy(5),
{0},
IndexTransformBuilder<1, 1>()
.output_single_input_dimension(0, -5, 1, 0)
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.output_constant(0, 2)
.Finalize()
.value(),
{{{4}, {9}}});
}
TEST(TranslateByTest, OneDimensionalSingleInputDimension) {
TestDimExpression(
IndexTransformBuilder<1, 1>()
.output_single_input_dimension(0, 2, 3, 0)
.Finalize()
.value(),
AllDims().TranslateBy(5),
{0},
IndexTransformBuilder<1, 1>()
.output_single_input_dimension(0, -5, 1, 0)
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.output_single_input_dimension(0, 2 - 3 * 5, 3, 0)
.Finalize()
.value(),
{{{4}, {9}}});
}
TEST(TranslateByTest, OneDimensionalSingleInputDimensionImplicit) {
TestDimExpression(
IndexTransformBuilder<1, 1>()
.output_single_input_dimension(0, 2, 3, 0)
.Finalize()
.value(),
AllDims().TranslateBy(kImplicit),
{0},
IndexTransformBuilder<1, 1>()
.output_identity_transform()
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.output_single_input_dimension(0, 2, 3, 0)
.Finalize()
.value(),
{{{4}, {4}}});
}
TEST(TranslateByTest, OneDimensionalIndexArray) {
TestDimExpression(
IndexTransformBuilder<1, 1>()
.input_origin({-2})
.input_shape({5})
.output_index_array(0, 2, 3, MakeArray<Index>({6, 7, 8, 9, 10}))
.Finalize()
.value(),
AllDims().TranslateBy(5),
{0},
IndexTransformBuilder<1, 1>()
.input_origin({3})
.input_shape({5})
.output_single_input_dimension(0, -5, 1, 0)
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.input_origin({3})
.input_shape({5})
.output_index_array(0, 2, 3, MakeArray<Index>({6, 7, 8, 9, 10}))
.Finalize()
.value(),
{{{1}, {6}}});
}
TEST(TranslateByTest, AllDimsUniform) {
TestDimExpression(
IndexTransformBuilder<3, 5>()
.input_origin({-kInfIndex, 5, -kInfIndex})
.input_shape({kInfSize, 30, kInfIndex + 10})
.output_single_input_dimension(0, 1, 4, 0)
.output_single_input_dimension(1, 2, 5, 0)
.output_constant(2, 3)
.output_single_input_dimension(3, 4, 7, 1)
.output_single_input_dimension(4, 5, 8, 2)
.Finalize()
.value(),
AllDims().TranslateBy(5),
{0, 1, 2},
IndexTransformBuilder<3, 3>()
.input_origin({-kInfIndex, 10, -kInfIndex})
.input_shape({kInfSize, 30, kInfIndex + 15})
.output_single_input_dimension(0, -5, 1, 0)
.output_single_input_dimension(1, -5, 1, 1)
.output_single_input_dimension(2, -5, 1, 2)
.Finalize()
.value(),
IndexTransformBuilder<3, 5>()
.input_origin({-kInfIndex, 10, -kInfIndex})
.input_shape({kInfSize, 30, kInfIndex + 15})
.output_single_input_dimension(0, 1 - 4 * 5, 4, 0)
.output_single_input_dimension(1, 2 - 5 * 5, 5, 0)
.output_constant(2, 3)
.output_single_input_dimension(3, 4 - 7 * 5, 7, 1)
.output_single_input_dimension(4, 5 - 8 * 5, 8, 2)
.Finalize()
.value(),
{{{4, 5, 6}, {4 + 5, 5 + 5, 6 + 5}}});
}
TEST(TranslateByTest, ErrorHandling) {
TestDimExpressionError(
IndexTransformBuilder<1, 1>().Finalize().value(),
AllDims().TranslateBy(span<const Index>({1, 2})),
absl::StatusCode::kInvalidArgument,
"Number of dimensions \\(1\\) does not match number of "
"indices \\(2\\)");
TestDimExpressionError(IndexTransformBuilder<1, 1>()
.input_origin({kMinFiniteIndex})
.input_shape({10})
.Finalize()
.value(),
AllDims().TranslateBy(-kInfIndex),
absl::StatusCode::kInvalidArgument,
".* is outside valid range .*");
TestDimExpressionError(IndexTransformBuilder<1, 1>()
.input_origin({kMinFiniteIndex})
.input_shape({10})
.Finalize()
.value(),
AllDims().TranslateBy(-1),
absl::StatusCode::kInvalidArgument,
".* is outside valid range .*");
TestDimExpressionError(IndexTransformBuilder<1, 1>()
.input_origin({kMaxFiniteIndex - 1})
.input_shape({2})
.Finalize()
.value(),
AllDims().TranslateBy(1),
absl::StatusCode::kInvalidArgument,
".* is outside valid range .*");
TestDimExpressionError(IndexTransformBuilder<1, 1>()
.output_single_input_dimension(
0, std::numeric_limits<Index>::min(), 1, 0)
.Finalize()
.value(),
AllDims().TranslateBy(1),
absl::StatusCode::kInvalidArgument,
"Integer overflow computing output offset .*");
}
TEST(TranslateByTest, DimSubsetUniform) {
TestDimExpression(IndexTransformBuilder<3, 2>()
.input_origin({1, 2, -kInfIndex})
.input_shape({4, 5, kInfIndex + 7})
.output_single_input_dimension(0, 1, 1, 1)
.output_single_input_dimension(1, 2, 2, 2)
.Finalize()
.value(),
Dims(0, 2).TranslateBy(5),
{0, 2},
IndexTransformBuilder<3, 3>()
.input_origin({6, 2, -kInfIndex})
.input_shape({4, 5, kInfIndex + 7 + 5})
.output_single_input_dimension(0, -5, 1, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, -5, 1, 2)
.Finalize()
.value(),
IndexTransformBuilder<3, 2>()
.input_origin({6, 2, -kInfIndex})
.input_shape({4, 5, kInfIndex + 7 + 5})
.output_single_input_dimension(0, 1, 1, 1)
.output_single_input_dimension(1, 2 - 2 * 5, 2, 2)
.Finalize()
.value(),
{{{4, 5, 6}, {4 + 5, 5, 6 + 5}}});
}
TEST(TranslateByTest, DimSubsetNonUniform) {
TestDimExpression(IndexTransformBuilder<3, 2>()
.input_origin({1, 2, -kInfIndex})
.input_shape({4, 5, kInfIndex + 7})
.output_single_input_dimension(0, 1, 1, 1)
.output_single_input_dimension(1, 2, 2, 2)
.Finalize()
.value(),
Dims(0, 2).TranslateBy({5, 6}),
{0, 2},
IndexTransformBuilder<3, 3>()
.input_origin({6, 2, -kInfIndex})
.input_shape({4, 5, kInfIndex + 7 + 6})
.output_single_input_dimension(0, -5, 1, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, -6, 1, 2)
.Finalize()
.value(),
IndexTransformBuilder<3, 2>()
.input_origin({6, 2, -kInfIndex})
.input_shape({4, 5, kInfIndex + 7 + 6})
.output_single_input_dimension(0, 1, 1, 1)
.output_single_input_dimension(1, 2 - 2 * 6, 2, 2)
.Finalize()
.value(),
{{{3, 4, 5}, {3 + 5, 4, 5 + 6}}});
}
TEST(TranslateToTest, OneDimensionalConstant) {
TestDimExpression(IndexTransformBuilder<1, 1>()
.input_origin({5})
.input_shape({10})
.output_constant(0, 2)
.Finalize()
.value(),
AllDims().TranslateTo(8),
{0},
IndexTransformBuilder<1, 1>()
.input_origin({8})
.input_shape({10})
.output_single_input_dimension(0, -3, 1, 0)
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.input_origin({8})
.input_shape({10})
.output_constant(0, 2)
.Finalize()
.value(),
{{{7}, {10}}});
}
TEST(TranslateToTest, OneDimensionalSingleInputDimension) {
TestDimExpression(IndexTransformBuilder<1, 1>()
.input_origin({4})
.input_shape({10})
.output_single_input_dimension(0, 2, 3, 0)
.Finalize()
.value(),
AllDims().TranslateTo(5),
{0},
IndexTransformBuilder<1, 1>()
.input_origin({5})
.input_shape({10})
.output_single_input_dimension(0, -1, 1, 0)
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.input_origin({5})
.input_shape({10})
.output_single_input_dimension(0, 2 - 3, 3, 0)
.Finalize()
.value(),
{{{6}, {7}}});
}
TEST(TranslateToTest, OneDimensionalSingleInputDimensionImplicit) {
TestDimExpression(IndexTransformBuilder<1, 1>()
.input_origin({4})
.input_shape({10})
.output_single_input_dimension(0, 2, 3, 0)
.Finalize()
.value(),
AllDims().TranslateTo(kImplicit),
{0},
IndexTransformBuilder<1, 1>()
.input_origin({4})
.input_shape({10})
.output_single_input_dimension(0, 0)
.Finalize()
.value(),
IndexTransformBuilder<1, 1>()
.input_origin({4})
.input_shape({10})
.output_single_input_dimension(0, 2, 3, 0)
.Finalize()
.value(),
{{{6}, {6}}});
}
TEST(TranslateToTest, TwoDimensionalSingleInputDimensionOneImplicit) {
TestDimExpression(IndexTransformBuilder<2, 2>()
.input_origin({4, 5})
.input_shape({10, 11})
.output_single_input_dimension(0, 2, 3, 0)
.output_single_input_dimension(1, 4, 5, 1)
.Finalize()
.value(),
AllDims().TranslateTo({kImplicit, 10}),
{0, 1},
IndexTransformBuilder<2, 2>()
.input_origin({4, 10})
.input_shape({10, 11})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, -5, 1, 1)
.Finalize()
.value(),
IndexTransformBuilder<2, 2>()
.input_origin({4, 10})
.input_shape({10, 11})
.output_single_input_dimension(0, 2, 3, 0)
.output_single_input_dimension(1, -25 + 4, 5, 1)
.Finalize()
.value(),
{{{6, 7}, {6, 12}}});
}
TEST(TranslateToTest, ErrorHandling) {
TestDimExpressionError(IndexTransformBuilder<1, 1>().Finalize().value(),
AllDims().TranslateTo(1),
absl::StatusCode::kInvalidArgument,
"Interval \\(-inf, \\+inf\\) is not bounded below");
TestDimExpressionError(
IndexTransformBuilder<1, 1>()
.input_origin({-5})
.input_shape({10})
.Finalize()
.value(),
AllDims().TranslateTo(std::numeric_limits<Index>::max()),
absl::StatusCode::kOutOfRange, "Origin [0-9]+ is outside valid range .*");
}
TEST(TranslateToTest, IndexDomain) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto domain,
IndexDomainBuilder<3>().origin({1, 2, 3}).shape({6, 7, 8}).Finalize());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto translated_domain,
IndexDomainBuilder<3>().origin({4, 5, 6}).shape({6, 7, 8}).Finalize());
EXPECT_THAT(domain | AllDims().TranslateTo({4, 5, 6}),
::testing::Optional(translated_domain));
}
TEST(TranslateToTest, IndexDomainOverflow) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transform,
IndexTransformBuilder(1, 1)
.input_shape({10})
.output_single_input_dimension(0, kMaxFiniteIndex, kMaxFiniteIndex, 0)
.Finalize());
auto domain = transform.domain();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto translated_domain,
IndexDomainBuilder(1).origin({-5}).shape({10}).Finalize());
EXPECT_THAT(transform | AllDims().TranslateTo({-5}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(domain | AllDims().TranslateTo({-5}),
::testing::Optional(translated_domain));
}
} |
564 | cpp | google/tensorstore | single_index_slice_op | tensorstore/index_space/internal/single_index_slice_op.cc | tensorstore/index_space/single_index_slice_op_test.cc | #ifndef TENSORSTORE_INDEX_SPACE_INTERNAL_SINGLE_INDEX_SLICE_OP_H_
#define TENSORSTORE_INDEX_SPACE_INTERNAL_SINGLE_INDEX_SLICE_OP_H_
#include "tensorstore/index_space/dimension_index_buffer.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_vector_or_scalar.h"
#include "tensorstore/internal/meta.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal_index_space {
Result<IndexTransform<>> ApplySingleIndexSlice(IndexTransform<> transform,
DimensionIndexBuffer* dimensions,
IndexVectorOrScalarView indices,
bool domain_only);
template <typename Indices>
struct SingleIndexSliceOp {
static constexpr bool selected_dimensions_are_new = false;
static constexpr DimensionIndex static_selection_rank =
IsIndexVectorOrScalar<Indices>::extent;
constexpr static DimensionIndex GetNewStaticInputRank(
DimensionIndex input_rank, DimensionIndex NumSelectedDims) {
TENSORSTORE_CONSTEXPR_ASSERT(
(input_rank == dynamic_rank || input_rank >= static_selection_rank) &&
"Number of dimensions must not exceed input rank.");
return RankConstraint::Subtract(
input_rank,
RankConstraint::And(NumSelectedDims, static_selection_rank));
}
constexpr static DimensionIndex GetStaticSelectionRank(
DimensionIndex num_input_dims) {
TENSORSTORE_CONSTEXPR_ASSERT(
RankConstraint::EqualOrUnspecified(num_input_dims,
static_selection_rank) &&
"Number of selected dimensions must match number of indices.");
return 0;
}
Result<IndexTransform<>> Apply(IndexTransform<> transform,
DimensionIndexBuffer* dimensions,
bool domain_only) const {
return ApplySingleIndexSlice(std::move(transform), dimensions,
IndexVectorOrScalarView(indices), domain_only);
}
Indices indices;
};
}
}
#endif
#include "tensorstore/index_space/internal/single_index_slice_op.h"
#include "absl/status/status.h"
#include "tensorstore/index_space/internal/transform_rep_impl.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_index_space {
namespace {
struct InputDimensionSingletonSliceInfo {
DimensionIndex new_input_dim;
Index offset;
};
struct SingletonSlicingInfo {
explicit SingletonSlicingInfo(DimensionIndex original_input_rank,
DimensionIndex new_input_rank)
: original_input_rank(original_input_rank),
new_input_rank(new_input_rank) {
std::fill_n(&original_input_dimension_info[0], original_input_rank,
InputDimensionSingletonSliceInfo{0, 0});
}
DimensionIndex original_input_rank;
DimensionIndex new_input_rank;
InputDimensionSingletonSliceInfo original_input_dimension_info[kMaxRank];
};
Result<SingletonSlicingInfo> GetSingletonSlicingInfo(
TransformRep* original, DimensionIndexBuffer* dimensions_buffer,
IndexVectorOrScalarView indices) {
const span<const DimensionIndex> dimensions(*dimensions_buffer);
const DimensionIndex num_dims = dimensions.size();
const DimensionIndex original_input_rank = original->input_rank;
const DimensionIndex new_input_rank = original_input_rank - num_dims;
TENSORSTORE_RETURN_IF_ERROR(CheckIndexVectorSize(indices, num_dims));
Result<SingletonSlicingInfo> result(tensorstore::in_place,
original_input_rank, new_input_rank);
const Index* indices_pointer =
indices.pointer ? indices.pointer : &indices.size_or_scalar;
const Index indices_stride = indices.pointer ? 1 : 0;
std::string slice_error;
for (DimensionIndex i = 0; i < num_dims; ++i) {
const DimensionIndex original_input_dim = dimensions[i];
const Index index = indices_pointer[i * indices_stride];
const auto domain = original->input_dimension(original_input_dim)
.optionally_implicit_domain();
if (!Contains(domain.effective_interval(), index)) {
tensorstore::StrAppend(&slice_error, (slice_error.empty() ? "" : ", "),
"in input dimension ", original_input_dim,
" index ", index, " is outside valid domain ",
domain);
}
result->original_input_dimension_info[original_input_dim] =
InputDimensionSingletonSliceInfo{-1, index};
}
if (!slice_error.empty()) {
result = absl::OutOfRangeError(
tensorstore::StrCat("Slice mismatch: ", slice_error));
return result;
}
for (DimensionIndex original_input_dim = 0, new_input_dim = 0;
original_input_dim < original_input_rank; ++original_input_dim) {
auto& new_dim =
result->original_input_dimension_info[original_input_dim].new_input_dim;
if (new_dim == -1) continue;
new_dim = new_input_dim;
++new_input_dim;
}
dimensions_buffer->clear();
return result;
}
absl::Status PerformSingleIndexSlice(TransformRep* original_transform,
TransformRep* new_transform,
const SingletonSlicingInfo& info,
bool domain_only) {
const DimensionIndex original_input_rank = original_transform->input_rank;
const DimensionIndex new_input_rank = info.new_input_rank;
span<const InputDimensionSingletonSliceInfo> original_input_dimension_info =
info.original_input_dimension_info;
bool domain_is_explicitly_empty = false;
for (DimensionIndex original_input_dim = 0, new_input_dim = 0;
original_input_dim < original_input_rank; ++original_input_dim) {
if (original_input_dimension_info[original_input_dim].new_input_dim < 0)
continue;
const InputDimensionRef new_dim_ref =
new_transform->input_dimension(new_input_dim);
new_dim_ref = original_transform->input_dimension(original_input_dim);
if (new_dim_ref.domain().empty() && !new_dim_ref.implicit_lower_bound() &&
!new_dim_ref.implicit_upper_bound()) {
domain_is_explicitly_empty = true;
}
++new_input_dim;
}
const DimensionIndex output_rank =
domain_only ? 0 : original_transform->output_rank;
span<const OutputIndexMap> original_maps =
original_transform->output_index_maps().first(output_rank);
span<OutputIndexMap> new_maps =
new_transform->output_index_maps().first(output_rank);
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
const OutputIndexMap& original_map = original_maps[output_dim];
OutputIndexMap& new_map = new_maps[output_dim];
switch (original_map.method()) {
case OutputIndexMethod::constant: {
new_map.offset() = original_map.offset();
new_map.SetConstant();
new_map.stride() = 0;
break;
}
case OutputIndexMethod::single_input_dimension: {
const DimensionIndex original_input_dim =
original_map.input_dimension();
assert(original_input_dim >= 0 &&
original_input_dim < original_input_rank);
const auto slice_info =
original_input_dimension_info[original_input_dim];
const Index output_stride = original_map.stride();
const Index output_offset = original_map.offset();
if (slice_info.new_input_dim == -1) {
Index new_offset;
if (internal::MulOverflow(slice_info.offset, output_stride,
&new_offset) ||
internal::AddOverflow(new_offset, output_offset,
&new_map.offset())) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Integer overflow computing offset for output dimension ",
output_dim, "."));
}
new_map.SetConstant();
new_map.stride() = 0;
} else {
new_map.SetSingleInputDimension(slice_info.new_input_dim);
new_map.stride() = output_stride;
new_map.offset() = output_offset;
}
break;
}
case OutputIndexMethod::array: {
if (domain_is_explicitly_empty) {
new_map.SetConstant();
new_map.offset() = 0;
new_map.stride() = 0;
break;
}
const IndexArrayData& original_index_array_data =
original_map.index_array_data();
IndexArrayData& new_index_array_data =
new_map.SetArrayIndexing(new_input_rank);
new_index_array_data.index_range =
original_index_array_data.index_range;
Index array_byte_offset = 0;
bool has_non_zero_byte_strides = false;
for (DimensionIndex original_input_dim = 0;
original_input_dim < original_input_rank; ++original_input_dim) {
const auto slice_info =
original_input_dimension_info[original_input_dim];
const Index byte_stride =
original_index_array_data.byte_strides[original_input_dim];
if (slice_info.new_input_dim == -1) {
array_byte_offset = internal::wrap_on_overflow::Add(
array_byte_offset, internal::wrap_on_overflow::Multiply(
byte_stride, slice_info.offset));
} else {
new_index_array_data.byte_strides[slice_info.new_input_dim] =
byte_stride;
if (byte_stride != 0) has_non_zero_byte_strides = true;
}
}
Index output_stride = original_map.stride();
Index output_offset = original_map.offset();
if (has_non_zero_byte_strides) {
new_index_array_data.element_pointer = AddByteOffset(
original_index_array_data.element_pointer, array_byte_offset);
} else {
TENSORSTORE_RETURN_IF_ERROR(ReplaceZeroRankIndexArrayIndexMap(
original_index_array_data.element_pointer
.byte_strided_pointer()[array_byte_offset],
new_index_array_data.index_range, &output_offset,
&output_stride));
new_map.SetConstant();
}
new_map.stride() = output_stride;
new_map.offset() = output_offset;
break;
}
}
}
new_transform->input_rank = new_input_rank;
new_transform->output_rank = output_rank;
NormalizeImplicitBounds(*new_transform);
internal_index_space::DebugCheckInvariants(new_transform);
return absl::OkStatus();
}
}
Result<IndexTransform<>> ApplySingleIndexSlice(IndexTransform<> transform,
DimensionIndexBuffer* dimensions,
IndexVectorOrScalarView indices,
bool domain_only) {
TransformRep* rep = TransformAccess::rep(transform);
auto slicing_info = GetSingletonSlicingInfo(rep, dimensions, indices);
if (!slicing_info) return slicing_info.status();
auto new_rep = NewOrMutableRep(rep, slicing_info->new_input_rank,
rep->output_rank, domain_only);
TENSORSTORE_RETURN_IF_ERROR(
PerformSingleIndexSlice(rep, new_rep.get(), *slicing_info, domain_only));
return TransformAccess::Make<IndexTransform<>>(new_rep);
}
}
} | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/index_domain_builder.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/internal/dim_expression_testutil.h"
#include "tensorstore/util/status.h"
namespace {
using ::tensorstore::AllDims;
using ::tensorstore::DimensionIndex;
using ::tensorstore::Dims;
using ::tensorstore::Index;
using ::tensorstore::IndexDomainBuilder;
using ::tensorstore::IndexInterval;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::kInfIndex;
using ::tensorstore::MakeArray;
using ::tensorstore::span;
using ::tensorstore::internal_index_space::EquivalentIndices;
using ::tensorstore::internal_index_space::TestDimExpression;
TEST(SingleIndexSliceTest, Example) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform = IndexTransformBuilder<1, 3>()
.input_origin({2})
.input_shape({4})
.input_labels({"y"})
.output_constant(0, 2)
.output_single_input_dimension(1, 0)
.output_constant(2, 4)
.Finalize()
.value();
const EquivalentIndices equivalent_indices = {
{{2, 3, 4}, {3}},
};
TestDimExpression(original_transform,
Dims(0, 2).IndexSlice({2, 4}),
{},
expected_new_transform,
expected_new_transform,
equivalent_indices);
TestDimExpression(original_transform,
Dims("x", "z").IndexSlice({2, 4}),
{},
expected_new_transform,
expected_new_transform,
equivalent_indices);
}
TEST(SingleIndexSliceTest, ImplicitLowerBound) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.implicit_lower_bounds({1, 1, 0})
.input_shape({3, 4, 2})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform = IndexTransformBuilder<1, 3>()
.input_origin({2})
.implicit_lower_bounds({1})
.input_shape({4})
.input_labels({"y"})
.output_constant(0, -7)
.output_single_input_dimension(1, 0)
.output_constant(2, 4)
.Finalize()
.value();
const EquivalentIndices equivalent_indices = {
{{-7, 3, 4}, {3}},
};
TestDimExpression(original_transform,
Dims(0, 2).IndexSlice({-7, 4}),
{},
expected_new_transform,
expected_new_transform,
equivalent_indices);
}
TEST(SingleIndexSliceTest, DimSubsetUniformIndexArrayRetained) {
TestDimExpression(
IndexTransformBuilder<3, 4>()
.input_origin({1, 2, -kInfIndex})
.input_shape({4, 5, kInfIndex + 7})
.output_single_input_dimension(0, 1, 4, 0)
.output_single_input_dimension(1, 2, 3, 2)
.output_constant(2, 3)
.output_index_array(
3, 4, 1,
MakeArray<Index>({{{5}, {6}, {7}, {8}, {9}},
{{15}, {16}, {17}, {18}, {19}},
{{25}, {26}, {27}, {28}, {29}},
{{35}, {36}, {37}, {38}, {39}}}))
.Finalize()
.value(),
Dims(1, 2).IndexSlice(3),
{},
IndexTransformBuilder<1, 3>()
.input_origin({1})
.input_shape({4})
.output_single_input_dimension(0, 0)
.output_constant(1, 3)
.output_constant(2, 3)
.Finalize()
.value(),
IndexTransformBuilder<1, 4>()
.input_origin({1})
.input_shape({4})
.output_single_input_dimension(0, 1, 4, 0)
.output_constant(1, 2 + 3 * 3)
.output_constant(2, 3)
.output_index_array(3, 4, 1,
MakeArray<Index>({6, 16, 26, 36}))
.Finalize()
.value(),
{{{4, 3, 3}, {4}}});
}
TEST(SingleIndexSliceTest, DimSubsetUniformIndexArrayEliminated) {
TestDimExpression(
IndexTransformBuilder<3, 4>()
.input_origin({1, 2, -kInfIndex})
.input_shape({4, 5, kInfIndex + 7})
.output_single_input_dimension(0, 1, 4, 0)
.output_single_input_dimension(1, 2, 3, 2)
.output_constant(2, 3)
.output_index_array(
3, 4, 1,
MakeArray<Index>({{{5}, {6}, {7}, {8}, {9}}}))
.Finalize()
.value(),
Dims(1, 2).IndexSlice(3),
{},
IndexTransformBuilder<1, 3>()
.input_origin({1})
.input_shape({4})
.output_single_input_dimension(0, 0)
.output_constant(1, 3)
.output_constant(2, 3)
.Finalize()
.value(),
IndexTransformBuilder<1, 4>()
.input_origin({1})
.input_shape({4})
.output_single_input_dimension(0, 1, 4, 0)
.output_constant(1, 2 + 3 * 3)
.output_constant(2, 3)
.output_constant(3, 4 + 1 * 6)
.Finalize()
.value(),
{{{4, 3, 3}, {4}}});
}
TEST(SingleIndexSliceTest, DimSubsetNonUniform) {
TestDimExpression(
IndexTransformBuilder<3, 4>()
.input_origin({1, 2, -kInfIndex})
.input_shape({4, 5, kInfIndex + 7})
.output_single_input_dimension(0, 1, 4, 0)
.output_single_input_dimension(1, 2, 3, 2)
.output_constant(2, 3)
.output_index_array(
3, 4, 1,
MakeArray<Index>({{{5}, {6}, {7}, {8}, {9}},
{{15}, {16}, {17}, {18}, {19}},
{{25}, {26}, {27}, {28}, {29}},
{{35}, {36}, {37}, {38}, {39}}}))
.Finalize()
.value(),
Dims(1, 2).IndexSlice({3, 4}),
{},
IndexTransformBuilder<1, 3>()
.input_origin({1})
.input_shape({4})
.output_single_input_dimension(0, 0)
.output_constant(1, 3)
.output_constant(2, 4)
.Finalize()
.value(),
IndexTransformBuilder<1, 4>()
.input_origin({1})
.input_shape({4})
.output_single_input_dimension(0, 1, 4, 0)
.output_constant(1, 2 + 4 * 3)
.output_constant(2, 3)
.output_index_array(3, 4, 1,
MakeArray<Index>({6, 16, 26, 36}))
.Finalize()
.value(),
{{{4, 3, 4}, {4}}});
}
TEST(SingleIndexSliceTest, DimSubsetNonUniformLabeled) {
TestDimExpression(
IndexTransformBuilder<3, 4>()
.input_origin({1, 2, -kInfIndex})
.input_shape({4, 5, kInfIndex + 7})
.input_labels({"x", "y", "z"})
.output_single_input_dimension(0, 1, 4, 0)
.output_single_input_dimension(1, 2, 3, 2)
.output_constant(2, 3)
.output_index_array(
3, 4, 1,
MakeArray<Index>({{{5}, {6}, {7}, {8}, {9}},
{{15}, {16}, {17}, {18}, {19}},
{{25}, {26}, {27}, {28}, {29}},
{{35}, {36}, {37}, {38}, {39}}}))
.Finalize()
.value(),
Dims(1, 2).IndexSlice({3, 4}),
{},
IndexTransformBuilder<1, 3>()
.input_origin({1})
.input_shape({4})
.input_labels({"x"})
.output_single_input_dimension(0, 0)
.output_constant(1, 3)
.output_constant(2, 4)
.Finalize()
.value(),
IndexTransformBuilder<1, 4>()
.input_origin({1})
.input_shape({4})
.input_labels({"x"})
.output_single_input_dimension(0, 1, 4, 0)
.output_constant(1, 2 + 4 * 3)
.output_constant(2, 3)
.output_index_array(3, 4, 1,
MakeArray<Index>({6, 16, 26, 36}))
.Finalize()
.value(),
{{{4, 3, 4}, {4}}});
}
TEST(SingleIndexSliceTest, EmptyDomain) {
TestDimExpression(
IndexTransformBuilder<2, 2>()
.input_origin({1, 2})
.input_shape({0, 3})
.input_labels({"x", "y"})
.output_single_input_dimension(0, 2, 7, 0)
.output_index_array(1, 4, 3,
MakeArray<Index>({{1, 2, 3}}))
.Finalize()
.value(),
Dims(1).IndexSlice({3}),
{},
IndexTransformBuilder<1, 2>()
.input_origin({1})
.input_shape({0})
.input_labels({"x"})
.output_single_input_dimension(0, 0)
.output_constant(1, 3)
.Finalize()
.value(),
IndexTransformBuilder<1, 2>()
.input_origin({1})
.input_shape({0})
.input_labels({"x"})
.output_single_input_dimension(0, 2, 7, 0)
.output_constant(1, 0)
.Finalize()
.value(),
{});
}
TEST(ErrorHandlingTest, DimensionSelectionRankMismatch) {
TestDimExpressionError(IndexTransformBuilder<1, 1>().Finalize().value(),
AllDims().IndexSlice(span<const Index>({1, 2})),
absl::StatusCode::kInvalidArgument,
"Number of dimensions .* does not match number of "
"indices .*");
}
TEST(ErrorHandlingTest, OutOfBounds) {
TestDimExpressionError(IndexTransformBuilder<1, 1>()
.input_origin({-10})
.input_shape({15})
.Finalize()
.value(),
AllDims().IndexSlice({5}),
absl::StatusCode::kOutOfRange,
"Slice mismatch: .* is outside valid domain .*");
}
TEST(ErrorHandlingTest, OutOfBoundsInfinity) {
TestDimExpressionError(IndexTransformBuilder<1, 1>()
.input_origin({-kInfIndex})
.input_shape({15})
.Finalize()
.value(),
AllDims().IndexSlice({-kInfIndex}),
absl::StatusCode::kOutOfRange,
"Slice mismatch: .* is outside valid domain .*");
}
TEST(ErrorHandlingTest, SingleInputDimensionMapIntegerOverflow) {
TestDimExpressionErrorTransformOnly(
IndexTransformBuilder<1, 1>()
.input_origin({0})
.input_shape({10})
.output_single_input_dimension(0, std::numeric_limits<Index>::max(),
1, 0)
.Finalize()
.value(),
AllDims().IndexSlice({1}), absl::StatusCode::kInvalidArgument,
"Integer overflow computing offset for output dimension.*",
IndexDomainBuilder<0>().Finalize().value());
}
TEST(ErrorHandlingTest, IndexArrayMapIntegerOverflow) {
TestDimExpressionErrorTransformOnly(
IndexTransformBuilder<1, 1>()
.input_origin({0})
.input_shape({3})
.output_index_array(0, std::numeric_limits<Index>::max(), 1,
MakeArray<Index>({0, 1, 2}))
.Finalize()
.value(),
AllDims().IndexSlice({1}), absl::StatusCode::kInvalidArgument,
"Integer overflow computing offset for output dimension.*",
IndexDomainBuilder<0>().Finalize().value());
}
TEST(ErrorHandlingTest, IndexArrayMapOutOfBounds) {
TestDimExpressionErrorTransformOnly(
IndexTransformBuilder<1, 1>()
.input_origin({0})
.input_shape({3})
.output_index_array(0, 0, 1, MakeArray<Index>({0, 1, 2}),
IndexInterval::Closed(-5, -3))
.Finalize()
.value(),
AllDims().IndexSlice({1}), absl::StatusCode::kOutOfRange,
"Index .* is outside valid range .*",
IndexDomainBuilder<0>().Finalize().value());
}
} |
565 | cpp | google/tensorstore | iterate | tensorstore/util/iterate.cc | tensorstore/util/iterate_test.cc | #ifndef TENSORSTORE_INTERNAL_ITERATE_H_
#define TENSORSTORE_INTERNAL_ITERATE_H_
#include <array>
#include <cstddef>
#include "absl/container/inlined_vector.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/util/byte_strided_pointer.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal_iterate {
template <size_t N>
struct DimensionSizeAndStrides {
Index size;
std::array<Index, N> strides;
};
template <size_t N>
using StridedIterationLayout =
absl::InlinedVector<DimensionSizeAndStrides<N>, internal::kNumInlinedDims>;
template <size_t N, DimensionIndex InnerRank>
struct InnerShapeAndStrides {
std::array<Index, InnerRank> shape;
std::array<std::array<Index, InnerRank>, N> strides;
};
}
namespace internal {
template <size_t Arity>
class StridedLayoutFunctionApplyer {
static_assert(Arity > 0 && Arity <= kMaxSupportedIterationArity,
"Invalid arity.");
public:
explicit StridedLayoutFunctionApplyer(
span<const Index> shape, std::array<const Index*, Arity> strides,
IterationConstraints constraints,
ElementwiseClosure<Arity, void*> function,
std::array<std::ptrdiff_t, Arity> element_sizes);
explicit StridedLayoutFunctionApplyer(
const Index* shape, span<const DimensionIndex> dimension_order,
std::array<const Index*, Arity> strides,
ElementwiseClosure<Arity, void*> function,
std::array<std::ptrdiff_t, Arity> element_sizes);
bool operator()(std::array<ByteStridedPointer<void>, Arity> pointers,
void* arg) const;
DimensionIndex outer_rank() const {
return static_cast<DimensionIndex>(iteration_layout_.size());
}
Index inner_size() const {
return inner_layout_.shape[0] * inner_layout_.shape[1];
}
private:
struct WrappedFunction;
internal_iterate::StridedIterationLayout<Arity> iteration_layout_;
internal_iterate::InnerShapeAndStrides<Arity, 2> inner_layout_;
void* context_;
SpecializedElementwiseFunctionPointer<Arity, void*> callback_;
};
}
}
#endif
#include "tensorstore/util/iterate.h"
#include <stddef.h>
#include <algorithm>
#include <array>
#include <ostream>
#include <type_traits>
#include "absl/container/inlined_vector.h"
#include "absl/utility/utility.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/util/byte_strided_pointer.h"
#include "tensorstore/util/internal/iterate.h"
#include "tensorstore/util/internal/iterate_impl.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal_iterate {
template <size_t Arity>
static bool AreStridesContiguous(
const InnerShapeAndStrides<Arity, 2>& inner_layout,
const std::array<ptrdiff_t, Arity>& element_sizes) {
if (inner_layout.shape[1] > 1) {
for (size_t i = 0; i < Arity; ++i) {
if (inner_layout.strides[i][1] != element_sizes[i]) return false;
}
}
return true;
}
absl::InlinedVector<DimensionIndex, internal::kNumInlinedDims>
ComputeStridedLayoutDimensionIterationOrder(IterationConstraints constraints,
span<const Index> shape,
span<const Index* const> strides) {
const DimensionIndex rank = shape.size();
absl::InlinedVector<DimensionIndex, internal::kNumInlinedDims>
dimension_order(rank);
{
DimensionIndex num_dims_preserved = 0;
for (DimensionIndex dim_i = 0; dim_i < rank; ++dim_i) {
const Index size = shape[dim_i];
if (size == 1) continue;
if (size != 0 && constraints.repeated_elements_constraint() ==
skip_repeated_elements) {
for (std::ptrdiff_t i = 0; i < strides.size(); ++i) {
if (strides[i][dim_i] != 0) goto cannot_skip_dimension;
}
continue;
}
cannot_skip_dimension:
dimension_order[num_dims_preserved++] = dim_i;
}
dimension_order.resize(num_dims_preserved);
}
if (constraints.order_constraint()) {
if (constraints.order_constraint() == ContiguousLayoutOrder::fortran) {
std::reverse(dimension_order.begin(), dimension_order.end());
}
} else {
std::sort(dimension_order.begin(), dimension_order.end(),
[&](DimensionIndex a, DimensionIndex b) {
for (ptrdiff_t j = 0; j < strides.size(); ++j) {
const Index stride_a = strides[j][a];
const Index stride_b = strides[j][b];
if (stride_a > stride_b) return true;
if (stride_a < stride_b) return false;
}
return false;
});
}
return dimension_order;
}
}
namespace internal {
template <size_t Arity>
static SpecializedElementwiseFunctionPointer<Arity, void*>
PickElementwiseFunction(
const internal_iterate::InnerShapeAndStrides<Arity, 2>& inner_layout,
const ElementwiseFunction<Arity, void*>& function,
std::array<std::ptrdiff_t, Arity> element_sizes) {
return function[internal_iterate::AreStridesContiguous(inner_layout,
element_sizes)
? IterationBufferKind::kContiguous
: IterationBufferKind::kStrided];
}
template <size_t Arity>
StridedLayoutFunctionApplyer<Arity>::StridedLayoutFunctionApplyer(
span<const Index> shape, std::array<const Index*, Arity> strides,
IterationConstraints constraints, ElementwiseClosure<Arity, void*> closure,
std::array<std::ptrdiff_t, Arity> element_sizes)
: iteration_layout_(internal_iterate::SimplifyStridedIterationLayout(
constraints, shape, strides)),
inner_layout_(
internal_iterate::ExtractInnerShapeAndStrides<2>(&iteration_layout_)),
context_(closure.context),
callback_(PickElementwiseFunction(inner_layout_, *closure.function,
element_sizes)) {}
template <size_t Arity>
StridedLayoutFunctionApplyer<Arity>::StridedLayoutFunctionApplyer(
const Index* shape, span<const DimensionIndex> dimension_order,
std::array<const Index*, Arity> strides,
ElementwiseClosure<Arity, void*> closure,
std::array<std::ptrdiff_t, Arity> element_sizes)
: iteration_layout_(
internal_iterate::PermuteAndSimplifyStridedIterationLayout(
shape, dimension_order, strides)),
inner_layout_(
internal_iterate::ExtractInnerShapeAndStrides<2>(&iteration_layout_)),
context_(closure.context),
callback_(PickElementwiseFunction(inner_layout_, *closure.function,
element_sizes)) {}
template <size_t Arity>
struct StridedLayoutFunctionApplyer<Arity>::WrappedFunction {
template <typename... Pointer>
bool operator()(Pointer... pointer) const {
return CallHelper(std::index_sequence_for<Pointer...>(), pointer...);
}
template <size_t... Is>
static bool OuterCallHelper(
const StridedLayoutFunctionApplyer& data, std::index_sequence<Is...>,
std::array<ByteStridedPointer<void>, Arity> pointers, void* arg) {
return internal_iterate::IterateHelper<
WrappedFunction,
std::enable_if_t<true || Is, ByteStridedPointer<void>>...>::
Start(WrappedFunction{data, arg}, data.iteration_layout_,
pointers[Is]...);
}
template <size_t... Is, typename... Pointer>
bool CallHelper(std::index_sequence<Is...>, Pointer... pointer) const {
return data_.callback_(
data_.context_, data_.inner_layout_.shape,
IterationBufferPointer{pointer, data_.inner_layout_.strides[Is][0],
data_.inner_layout_.strides[Is][1]}...,
arg_);
}
const StridedLayoutFunctionApplyer& data_;
void* arg_;
};
template <size_t Arity>
bool StridedLayoutFunctionApplyer<Arity>::operator()(
std::array<ByteStridedPointer<void>, Arity> pointers, void* arg) const {
return WrappedFunction::OuterCallHelper(
*this, std::make_index_sequence<Arity>(), pointers, arg);
}
template <size_t Arity>
bool IterateOverStridedLayouts(
ElementwiseClosure<Arity, void*> closure, void* arg,
span<const Index> shape,
std::array<ByteStridedPointer<void>, Arity> pointers,
std::array<const Index*, Arity> strides, IterationConstraints constraints,
std::array<std::ptrdiff_t, Arity> element_sizes) {
return StridedLayoutFunctionApplyer<Arity>(
shape, strides, constraints, closure, element_sizes)(pointers, arg);
}
#define TENSORSTORE_DO_INSTANTIATE_ITERATE(Arity) \
template class StridedLayoutFunctionApplyer<Arity>; \
template bool IterateOverStridedLayouts<Arity>( \
ElementwiseClosure<Arity, void*> closure, void* arg, \
span<const Index> shape, \
std::array<ByteStridedPointer<void>, Arity> pointers, \
std::array<const Index*, Arity> strides, \
IterationConstraints constraints, \
std::array<std::ptrdiff_t, Arity> element_sizes);
TENSORSTORE_INTERNAL_FOR_EACH_ARITY(TENSORSTORE_DO_INSTANTIATE_ITERATE)
#undef TENSORSTORE_DO_INSTANTIATE_ITERATE
}
} | #include "tensorstore/util/internal/iterate.h"
#include <array>
#include <tuple>
#include <type_traits>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/inlined_vector.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/index.h"
#include "tensorstore/util/internal/iterate_impl.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::c_order;
using ::tensorstore::ContiguousLayoutOrder;
using ::tensorstore::DimensionIndex;
using ::tensorstore::fortran_order;
using ::tensorstore::include_repeated_elements;
using ::tensorstore::Index;
using ::tensorstore::IterationConstraints;
using ::tensorstore::LayoutOrderConstraint;
using ::tensorstore::skip_repeated_elements;
using ::tensorstore::span;
using ::tensorstore::internal::AdvanceIndices;
using ::tensorstore::internal::DefaultIterationResult;
using ::tensorstore::internal_iterate::
ComputeStridedLayoutDimensionIterationOrder;
using ::tensorstore::internal_iterate::ExtractInnerShapeAndStrides;
using ::tensorstore::internal_iterate::InnerShapeAndStrides;
using ::tensorstore::internal_iterate::PermuteAndSimplifyStridedIterationLayout;
using ::tensorstore::internal_iterate::SimplifyStridedIterationLayout;
using ::tensorstore::internal_iterate::StridedIterationLayout;
using ::testing::ElementsAre;
TEST(LayoutOrderConstraint, Basic) {
static_assert(!LayoutOrderConstraint{}, "");
static_assert(!LayoutOrderConstraint(tensorstore::unspecified_order), "");
static_assert(LayoutOrderConstraint(ContiguousLayoutOrder::c), "");
static_assert(LayoutOrderConstraint(ContiguousLayoutOrder::fortran), "");
static_assert(
0 == LayoutOrderConstraint(tensorstore::unspecified_order).value(), "");
static_assert(2 == LayoutOrderConstraint(ContiguousLayoutOrder::c).value(),
"");
static_assert(
3 == LayoutOrderConstraint(ContiguousLayoutOrder::fortran).value(), "");
static_assert(ContiguousLayoutOrder::c ==
LayoutOrderConstraint(ContiguousLayoutOrder::c).order(),
"");
static_assert(
ContiguousLayoutOrder::fortran ==
LayoutOrderConstraint(ContiguousLayoutOrder::fortran).order(),
"");
}
TEST(IterationConstraintsTest, Basic) {
static_assert(!IterationConstraints().order_constraint(), "");
static_assert(
!IterationConstraints(tensorstore::unspecified_order).order_constraint(),
"");
static_assert(
IterationConstraints(ContiguousLayoutOrder::c).order_constraint(), "");
static_assert(
IterationConstraints(ContiguousLayoutOrder::fortran).order_constraint(),
"");
static_assert(
ContiguousLayoutOrder::c == IterationConstraints(ContiguousLayoutOrder::c)
.order_constraint()
.order(),
"");
static_assert(ContiguousLayoutOrder::fortran ==
IterationConstraints(ContiguousLayoutOrder::fortran)
.order_constraint()
.order(),
"");
static_assert(include_repeated_elements ==
IterationConstraints().repeated_elements_constraint(),
"");
static_assert(include_repeated_elements ==
IterationConstraints(include_repeated_elements)
.repeated_elements_constraint(),
"");
static_assert(
skip_repeated_elements == IterationConstraints(skip_repeated_elements)
.repeated_elements_constraint(),
"");
static_assert(
skip_repeated_elements ==
IterationConstraints(ContiguousLayoutOrder::c, skip_repeated_elements)
.repeated_elements_constraint(),
"");
static_assert(include_repeated_elements ==
IterationConstraints(ContiguousLayoutOrder::c,
include_repeated_elements)
.repeated_elements_constraint(),
"");
static_assert(ContiguousLayoutOrder::c ==
IterationConstraints(ContiguousLayoutOrder::c,
include_repeated_elements)
.order_constraint()
.order(),
"");
static_assert(ContiguousLayoutOrder::fortran ==
IterationConstraints(ContiguousLayoutOrder::fortran,
include_repeated_elements)
.order_constraint()
.order(),
"");
static_assert(ContiguousLayoutOrder::fortran ==
IterationConstraints(ContiguousLayoutOrder::fortran,
include_repeated_elements)
.order_constraint()
.order(),
"");
static_assert(3 == IterationConstraints(ContiguousLayoutOrder::fortran,
include_repeated_elements)
.value(),
"");
}
TEST(PermuteAndSimplifyStridedIterationLayoutTest, Fortran1D) {
const Index shape[] = {3, 4, 5};
const DimensionIndex dimension_order[] = {2, 1, 0};
const Index strides[] = {1, 3, 12};
auto layout = PermuteAndSimplifyStridedIterationLayout<1>(
shape, dimension_order, {{strides}});
StridedIterationLayout<1> expected_layout{{60, {{1}}}};
EXPECT_EQ(expected_layout, layout);
}
TEST(PermuteAndSimplifyStridedIterationLayoutTest, C1D) {
const Index shape[] = {3, 4, 5};
const DimensionIndex dimension_order[] = {0, 1, 2};
const Index strides[] = {20, 5, 1};
auto layout = PermuteAndSimplifyStridedIterationLayout<1>(
shape, dimension_order, {{strides}});
StridedIterationLayout<1> expected_layout{{60, {{1}}}};
EXPECT_EQ(expected_layout, layout);
}
TEST(PermuteAndSimplifyStridedIterationLayoutTest, C2D) {
const Index shape[] = {3, 4, 5};
const DimensionIndex dimension_order[] = {0, 1, 2};
const Index strides[] = {40, 5, 1};
auto layout = PermuteAndSimplifyStridedIterationLayout<1>(
shape, dimension_order, {{strides}});
StridedIterationLayout<1> expected_layout{{3, {{40}}}, {20, {{1}}}};
EXPECT_EQ(expected_layout, layout);
}
TEST(PermuteAndSimplifyStridedIterationLayoutTest, C2D2Layouts) {
const Index shape[] = {3, 4, 5};
const ptrdiff_t dimension_order[] = {0, 1, 2};
const Index strides0[] = {40, 5, 1};
const Index strides1[] = {40, 10, 2};
auto layout = PermuteAndSimplifyStridedIterationLayout<2>(
shape, dimension_order, {{strides0, strides1}});
StridedIterationLayout<2> expected_layout{{3, {{40, 40}}}, {20, {{1, 2}}}};
EXPECT_EQ(expected_layout, layout);
}
TEST(PermuteAndSimplifyStridedIterationLayoutTest, C3D2Layouts) {
const Index shape[] = {3, 4, 5};
const ptrdiff_t dimension_order[] = {0, 1, 2};
const Index strides0[] = {40, 5, 1};
const Index strides1[] = {40, 10, 1};
auto layout = PermuteAndSimplifyStridedIterationLayout<2>(
shape, dimension_order, {{strides0, strides1}});
StridedIterationLayout<2> expected_layout{
{3, {{40, 40}}}, {4, {{5, 10}}}, {5, {{1, 1}}}};
EXPECT_EQ(expected_layout, layout);
}
TEST(ComputeStridedLayoutDimensionIterationOrderTest, Unconstrained1D1Layout) {
const Index shape[] = {3, 4, 5};
const Index strides0[] = {20, 1, 4};
EXPECT_THAT(ComputeStridedLayoutDimensionIterationOrder(
include_repeated_elements, shape, span({strides0})),
ElementsAre(0, 2, 1));
}
TEST(ComputeStridedLayoutDimensionIterationOrderTest,
Unconstrained1D1LayoutSkipRepeated) {
const Index shape[] = {3, 5, 4, 5};
const Index strides0[] = {20, 0, 1, 4};
EXPECT_THAT(ComputeStridedLayoutDimensionIterationOrder(
include_repeated_elements, shape, span({strides0})),
ElementsAre(0, 3, 2, 1));
EXPECT_THAT(ComputeStridedLayoutDimensionIterationOrder(
skip_repeated_elements, shape, span({strides0})),
ElementsAre(0, 3, 2));
}
TEST(ComputeStridedLayoutDimensionIterationOrderTest,
Unconstrained1D1LayoutSingletonDims) {
const Index shape[] = {3, 1, 4, 5};
const Index strides0[] = {20, 5, 1, 4};
EXPECT_THAT(ComputeStridedLayoutDimensionIterationOrder(
include_repeated_elements, shape, span({strides0})),
ElementsAre(0, 3, 2));
}
TEST(ComputeStridedLayoutDimensionIterationOrderTest, Unconstrained1D2Layouts) {
const Index shape[] = {3, 4, 5};
const Index strides0[] = {20, 1, 4};
const Index strides1[] = {40, 2, 8};
EXPECT_THAT(ComputeStridedLayoutDimensionIterationOrder(
include_repeated_elements, shape, span({strides0, strides1})),
ElementsAre(0, 2, 1));
}
TEST(ComputeStridedLayoutDimensionIterationOrderTest,
Unconstrained1D2LayoutsSkipRepeated) {
const Index shape[] = {3, 5, 4, 5, 2};
const Index strides0[] = {20, 0, 1, 4, 71};
const Index strides1[] = {40, 0, 2, 8, 0};
EXPECT_THAT(ComputeStridedLayoutDimensionIterationOrder(
include_repeated_elements, shape, span({strides0, strides1})),
ElementsAre(4, 0, 3, 2, 1));
EXPECT_THAT(ComputeStridedLayoutDimensionIterationOrder(
skip_repeated_elements, shape, span({strides0, strides1})),
ElementsAre(4, 0, 3, 2));
EXPECT_THAT(ComputeStridedLayoutDimensionIterationOrder(
skip_repeated_elements, shape, span({strides1, strides0})),
ElementsAre(0, 3, 2, 4));
}
TEST(ComputeStridedLayoutDimensionIterationOrderTest, Fortran1D) {
const Index shape[] = {3, 4, 5};
const Index strides[] = {1, 3, 12};
EXPECT_THAT(ComputeStridedLayoutDimensionIterationOrder(
{ContiguousLayoutOrder::fortran, include_repeated_elements},
shape, span({strides})),
ElementsAre(2, 1, 0));
}
TEST(ComputeStridedLayoutDimensionIterationOrderTest, Fortran1DSkipRepeated) {
const Index shape[] = {3, 4, 2, 5};
const Index strides[] = {1, 3, 0, 12};
EXPECT_THAT(ComputeStridedLayoutDimensionIterationOrder(
{ContiguousLayoutOrder::fortran, include_repeated_elements},
shape, span({strides})),
ElementsAre(3, 2, 1, 0));
EXPECT_THAT(ComputeStridedLayoutDimensionIterationOrder(
{ContiguousLayoutOrder::fortran, skip_repeated_elements},
shape, span({strides})),
ElementsAre(3, 1, 0));
}
TEST(ComputeStridedLayoutDimensionIterationOrderTest, C3D) {
const Index shape[] = {3, 4, 5};
const Index strides[] = {1, 3, 12};
EXPECT_THAT(ComputeStridedLayoutDimensionIterationOrder(
{ContiguousLayoutOrder::c, include_repeated_elements}, shape,
span({strides})),
ElementsAre(0, 1, 2));
}
TEST(SimplifyStridedIterationLayoutTest, Unconstrained1D1Layout) {
const Index shape[] = {3, 4, 5};
const Index strides0[] = {20, 1, 4};
auto layout = SimplifyStridedIterationLayout<1>(include_repeated_elements,
shape, {{strides0}});
StridedIterationLayout<1> expected_layout{{60, {{1}}}};
EXPECT_EQ(expected_layout, layout);
}
TEST(SimplifyStridedIterationLayoutTest, Unconstrained1D1LayoutSkipRepeated) {
const Index shape[] = {3, 5, 4, 5};
const Index strides0[] = {20, 0, 1, 4};
{
auto layout = SimplifyStridedIterationLayout<1>(include_repeated_elements,
shape, {{strides0}});
StridedIterationLayout<1> expected_layout{{{60, {{1}}}, {5, {{0}}}}};
EXPECT_EQ(expected_layout, layout);
}
{
auto layout = SimplifyStridedIterationLayout<1>(skip_repeated_elements,
shape, {{strides0}});
StridedIterationLayout<1> expected_layout{{60, {{1}}}};
EXPECT_EQ(expected_layout, layout);
}
}
TEST(SimplifyStridedIterationLayoutTest, Unconstrained1D1LayoutSingletonDims) {
const Index shape[] = {3, 1, 4, 5};
const Index strides0[] = {20, 5, 1, 4};
auto layout = SimplifyStridedIterationLayout<1>(include_repeated_elements,
shape, {{strides0}});
StridedIterationLayout<1> expected_layout{{60, {{1}}}};
EXPECT_EQ(expected_layout, layout);
}
TEST(SimplifyStridedIterationLayoutTest, Unconstrained1D2Layouts) {
const Index shape[] = {3, 4, 5};
const Index strides0[] = {20, 1, 4};
const Index strides1[] = {40, 2, 8};
auto layout = SimplifyStridedIterationLayout<2>(
include_repeated_elements, shape, {{strides0, strides1}});
StridedIterationLayout<2> expected_layout{{60, {{1, 2}}}};
EXPECT_EQ(expected_layout, layout);
}
TEST(SimplifyStridedIterationLayoutTest, Unconstrained1D2LayoutsSkipRepeated) {
const Index shape[] = {3, 5, 4, 5, 2};
const Index strides0[] = {20, 0, 1, 4, 71};
const Index strides1[] = {40, 0, 2, 8, 0};
{
auto layout = SimplifyStridedIterationLayout<2>(
include_repeated_elements, shape, {{strides0, strides1}});
StridedIterationLayout<2> expected_layout{
{2, {{71, 0}}}, {60, {{1, 2}}}, {5, {{0, 0}}}};
EXPECT_EQ(expected_layout, layout);
}
{
auto layout = SimplifyStridedIterationLayout<2>(
skip_repeated_elements, shape, {{strides0, strides1}});
StridedIterationLayout<2> expected_layout{{2, {{71, 0}}}, {60, {{1, 2}}}};
EXPECT_EQ(expected_layout, layout);
}
{
auto layout = SimplifyStridedIterationLayout<2>(
skip_repeated_elements, shape, {{strides1, strides0}});
StridedIterationLayout<2> expected_layout{{60, {{2, 1}}}, {2, {{0, 71}}}};
EXPECT_EQ(expected_layout, layout);
}
}
TEST(SimplifyStridedIterationLayoutTest, Fortran1D) {
const Index shape[] = {3, 4, 5};
const Index strides[] = {1, 3, 12};
auto layout = SimplifyStridedIterationLayout<1>(
{ContiguousLayoutOrder::fortran, include_repeated_elements}, shape,
{{strides}});
StridedIterationLayout<1> expected_layout{{60, {{1}}}};
EXPECT_EQ(expected_layout, layout);
}
TEST(SimplifyStridedIterationLayoutTest, Fortran1DSkipRepeated) {
const Index shape[] = {3, 4, 2, 5};
const Index strides[] = {1, 3, 0, 12};
{
auto layout = SimplifyStridedIterationLayout<1>(
{ContiguousLayoutOrder::fortran, include_repeated_elements}, shape,
{{strides}});
StridedIterationLayout<1> expected_layout{
{5, {{12}}}, {2, {{0}}}, {12, {{1}}}};
EXPECT_EQ(expected_layout, layout);
}
{
auto layout = SimplifyStridedIterationLayout<1>(
{ContiguousLayoutOrder::fortran, skip_repeated_elements}, shape,
{{strides}});
StridedIterationLayout<1> expected_layout{{60, {{1}}}};
EXPECT_EQ(expected_layout, layout);
}
}
TEST(SimplifyStridedIterationLayoutTest, C3D) {
const Index shape[] = {3, 4, 5};
const Index strides[] = {1, 3, 12};
auto layout = SimplifyStridedIterationLayout<1>(
{ContiguousLayoutOrder::c, include_repeated_elements}, shape,
{{strides}});
StridedIterationLayout<1> expected_layout{
{3, {{1}}}, {4, {{3}}}, {5, {{12}}}};
EXPECT_EQ(expected_layout, layout);
}
TEST(ExtractInnerShapeAndStridesTest, N2Rank2Inner0) {
StridedIterationLayout<2> iteration_layout{{3, {{1, 2}}}, {4, {{4, 5}}}};
auto inner_layout = ExtractInnerShapeAndStrides<0>(&iteration_layout);
InnerShapeAndStrides<2, 0> expected_inner;
StridedIterationLayout<2> expected_outer{{3, {{1, 2}}}, {4, {{4, 5}}}};
EXPECT_EQ(expected_inner, inner_layout);
EXPECT_EQ(expected_outer, iteration_layout);
}
TEST(ExtractInnerShapeAndStridesTest, N2Rank2Inner1) {
StridedIterationLayout<2> iteration_layout{{3, {{1, 2}}}, {4, {{4, 5}}}};
auto inner_layout = ExtractInnerShapeAndStrides<1>(&iteration_layout);
InnerShapeAndStrides<2, 1> expected_inner{{{4}}, {{{{4}}, {{5}}}}};
StridedIterationLayout<2> expected_outer{{3, {{1, 2}}}};
EXPECT_EQ(expected_inner, inner_layout);
EXPECT_EQ(expected_outer, iteration_layout);
}
TEST(ExtractInnerShapeAndStridesTest, N2Rank2Inner2) {
StridedIterationLayout<2> iteration_layout{{3, {{1, 2}}}, {4, {{4, 5}}}};
auto inner_layout = ExtractInnerShapeAndStrides<2>(&iteration_layout);
InnerShapeAndStrides<2, 2> expected_inner{{{3, 4}}, {{{{1, 4}}, {{2, 5}}}}};
StridedIterationLayout<2> expected_outer;
EXPECT_EQ(expected_inner, inner_layout);
EXPECT_EQ(expected_outer, iteration_layout);
}
TEST(ExtractInnerShapeAndStridesTest, N2Rank2Inner3) {
StridedIterationLayout<2> iteration_layout{{3, {{1, 2}}}, {4, {{4, 5}}}};
auto inner_layout = ExtractInnerShapeAndStrides<3>(&iteration_layout);
InnerShapeAndStrides<2, 3> expected_inner{{{1, 3, 4}},
{{{{0, 1, 4}}, {{0, 2, 5}}}}};
StridedIterationLayout<2> expected_outer;
EXPECT_EQ(expected_inner, inner_layout);
EXPECT_EQ(expected_outer, iteration_layout);
}
template <typename Func, typename... Pointer>
std::invoke_result_t<Func&, Pointer...> IterateOverStridedLayouts(
span<const Index> shape,
std::array<const Index*, sizeof...(Pointer)> strides, Func&& func,
tensorstore::IterationConstraints constraints, Pointer... pointer) {
auto iteration_layout =
SimplifyStridedIterationLayout(constraints, shape, strides);
return tensorstore::internal_iterate::IterateHelper<Func&, Pointer...>::Start(
func, iteration_layout, pointer...);
}
TEST(IterateOverStridedLayoutsTest, InnerRank0ContiguousC) {
const Index shape[] = {2, 3};
const Index strides0[] = {3, 1};
const Index strides1[] = {3 * 4, 1 * 4};
using R = std::tuple<int, int>;
std::vector<R> result;
auto func = [&](int a, int b) {
result.emplace_back(a, b);
return true;
};
IterateOverStridedLayouts(shape, {{strides0, strides1}}, func,
ContiguousLayoutOrder::c, 0, 0);
std::vector<R> expected_result{R{0, 0}, R{1, 4}, R{2, 8},
R{3, 12}, R{4, 16}, R{5, 20}};
EXPECT_EQ(expected_result, result);
}
TEST(IterateOverStridedLayoutsTest, EmptyDomain) {
const Index shape[] = {0, 3};
const Index strides[] = {0, 1};
std::vector<int> result;
auto func = [&](int a) {
result.emplace_back(a);
return true;
};
IterateOverStridedLayouts(shape, {{strides}}, func,
{ContiguousLayoutOrder::c, skip_repeated_elements},
0);
EXPECT_THAT(result, ::testing::ElementsAre());
}
TEST(IterateOverStridedLayoutsTest, InnerRank0ContiguousCStop) {
const Index shape[] = {2, 3};
const Index strides0[] = {3, 1};
const Index strides1[] = {3 * 4, 1 * 4};
using R = std::tuple<int, int>;
std::vector<R> result;
auto func = [&](int a, int b) {
result.emplace_back(a, b);
return a != 2;
};
EXPECT_EQ(false,
IterateOverStridedLayouts(shape, {{strides0, strides1}}, func,
ContiguousLayoutOrder::c, 0, 0));
std::vector<R> expected_result{R{0, 0}, R{1, 4}, R{2, 8}};
EXPECT_EQ(expected_result, result);
}
TEST(IterateOverStridedLayoutsTest, InnerRank0NonContiguousFortran) {
const Index shape[] = {2, 3};
const Index strides0[] = {3, 1};
const Index strides1[] = {3 * 4, 1 * 4};
using R = std::tuple<int, int>;
std::vector<R> result;
auto func = [&](int a, int b) {
result.emplace_back(a, b);
return true;
};
IterateOverStridedLayouts(shape, {{strides0, strides1}}, func,
ContiguousLayoutOrder::fortran, 0, 0);
std::vector<R> expected_result{R{0, 0}, R{3, 12}, R{1, 4},
R{4, 16}, R{2, 8}, R{5, 20}};
EXPECT_EQ(expected_result, result);
}
TEST(IterateOverStridedLayoutsTest, InnerRank0NonContiguousFortranStop) {
const Index shape[] = {2, 3};
const Index strides0[] = {3, 1};
const Index strides1[] = {3 * 4, 1 * 4};
using R = std::tuple<int, int>;
std::vector<R> result;
auto func = [&](int a, int b) {
result.emplace_back(a, b);
return a != 3;
};
IterateOverStridedLayouts(shape, {{strides0, strides1}}, func,
ContiguousLayoutOrder::fortran, 0, 0);
std::vector<R> expected_result{R{0, 0}, R{3, 12}};
EXPECT_EQ(expected_result, result);
}
template <ContiguousLayoutOrder Order>
std::vector<std::vector<int>> GetIndexVectors(std::vector<int> shape) {
std::vector<std::vector<int>> result;
std::vector<int> indices(shape.size());
do {
result.push_back(indices);
} while (AdvanceIndices<Order>(indices.size(), indices.data(), shape.data()));
return result;
}
template <ContiguousLayoutOrder Order>
std::vector<std::vector<int>> GetIndexVectors(std::vector<int> inclusive_min,
std::vector<int> exclusive_max) {
std::vector<std::vector<int>> result;
std::vector<int> indices = inclusive_min;
do {
result.push_back(indices);
} while (AdvanceIndices<Order>(indices.size(), indices.data(),
inclusive_min.data(), exclusive_max.data()));
return result;
}
TEST(AdvanceIndicesTest, COrderRank0) {
EXPECT_THAT(GetIndexVectors<c_order>({}), ElementsAre(ElementsAre()));
}
TEST(AdvanceIndicesTest, FortranOrderRank0) {
EXPECT_THAT(GetIndexVectors<fortran_order>({}), ElementsAre(ElementsAre()));
}
TEST(AdvanceIndicesTest, COrderShape) {
EXPECT_THAT(GetIndexVectors<c_order>({2, 3}),
ElementsAre(
ElementsAre(0, 0), ElementsAre(0, 1), ElementsAre(0, 2),
ElementsAre(1, 0), ElementsAre(1, 1), ElementsAre(1, 2)));
}
TEST(AdvanceIndicesTest, FortranOrderShape) {
EXPECT_THAT(GetIndexVectors<fortran_order>({2, 3}),
ElementsAre(ElementsAre(0, 0), ElementsAre(1, 0),
ElementsAre(0, 1), ElementsAre(1, 1),
ElementsAre(0, 2), ElementsAre(1, 2)));
}
TEST(AdvanceIndicesTest, COrderInclusiveMinExclusiveMax) {
EXPECT_THAT(
GetIndexVectors<c_order>({1, 2}, {3, 5}),
ElementsAre(ElementsAre(1, 2), ElementsAre(1, 3), ElementsAre(1, 4),
ElementsAre(2, 2), ElementsAre(2, 3), ElementsAre(2, 4)));
}
TEST(AdvanceIndicesTest, FortranOrderInclusiveMinExclusiveMax) {
EXPECT_THAT(GetIndexVectors<fortran_order>({1, 2}, {3, 5}),
ElementsAre(ElementsAre(1, 2), ElementsAre(2, 2),
ElementsAre(1, 3), ElementsAre(2, 3),
ElementsAre(1, 4), ElementsAre(2, 4)));
}
static_assert(DefaultIterationResult<bool>::value() == true, "");
static_assert(DefaultIterationResult<int>::value() == 0, "");
} |
566 | cpp | google/tensorstore | label_op | tensorstore/index_space/internal/label_op.cc | tensorstore/index_space/label_op_test.cc | #ifndef TENSORSTORE_INDEX_SPACE_INTERNAL_LABEL_OP_H_
#define TENSORSTORE_INDEX_SPACE_INTERNAL_LABEL_OP_H_
#include "tensorstore/index_space/dimension_index_buffer.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/internal/meta.h"
#include "tensorstore/internal/string_like.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal_index_space {
Result<IndexTransform<>> ApplyLabel(IndexTransform<> transform,
DimensionIndexBuffer* dimensions,
internal::StringLikeSpan labels,
bool domain_only);
template <typename Labels>
struct LabelOp {
static constexpr DimensionIndex num_required_dims =
internal::ConstSpanType<Labels>::extent;
static constexpr bool selected_dimensions_are_new = false;
constexpr static DimensionIndex GetNewStaticInputRank(
DimensionIndex input_rank, DimensionIndex num_input_dims) {
TENSORSTORE_CONSTEXPR_ASSERT(
(input_rank == dynamic_rank || input_rank >= num_required_dims) &&
"Number of dimensions must not exceed input rank.");
return input_rank;
}
constexpr static DimensionIndex GetStaticSelectionRank(
DimensionIndex num_input_dims) {
TENSORSTORE_CONSTEXPR_ASSERT(
RankConstraint::EqualOrUnspecified(num_input_dims, num_required_dims) &&
"Number of selected dimensions must match number of indices.");
return num_input_dims == dynamic_rank ? num_required_dims : num_input_dims;
}
Result<IndexTransform<>> Apply(IndexTransform<> transform,
DimensionIndexBuffer* dimensions,
bool domain_only) const {
return ApplyLabel(std::move(transform), dimensions,
internal::StringLikeSpan(labels), domain_only);
}
Labels labels;
};
}
}
#endif
#include "tensorstore/index_space/internal/label_op.h"
#include <stddef.h>
#include <string>
#include <string_view>
#include <utility>
#include "absl/status/status.h"
#include "tensorstore/container_kind.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/dimension_index_buffer.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/internal/transform_rep.h"
#include "tensorstore/internal/dimension_labels.h"
#include "tensorstore/internal/string_like.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_index_space {
Result<IndexTransform<>> ApplyLabel(IndexTransform<> transform,
DimensionIndexBuffer* dimensions,
internal::StringLikeSpan labels,
bool domain_only) {
if (dimensions->size() != static_cast<size_t>(labels.size())) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Number of dimensions (", dimensions->size(),
") does not match number of labels (", labels.size(), ")."));
}
auto rep = MutableRep(
TransformAccess::rep_ptr<container>(std::move(transform)), domain_only);
const DimensionIndex input_rank = rep->input_rank;
span<std::string> input_labels = rep->input_labels().first(input_rank);
for (DimensionIndex i = 0;
i < static_cast<DimensionIndex>(dimensions->size()); ++i) {
const DimensionIndex input_dim = (*dimensions)[i];
std::string_view label = labels[i];
input_labels[input_dim].assign(label.begin(), label.end());
}
TENSORSTORE_RETURN_IF_ERROR(
internal::ValidateDimensionLabelsAreUnique(input_labels));
internal_index_space::DebugCheckInvariants(rep.get());
return TransformAccess::Make<IndexTransform<>>(std::move(rep));
}
}
} | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/internal/dim_expression_testutil.h"
#include "tensorstore/util/status.h"
namespace {
using ::tensorstore::DimensionIndex;
using ::tensorstore::Dims;
using ::tensorstore::IdentityTransform;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::span;
using ::tensorstore::internal_index_space::TestDimExpression;
TEST(LabelTest, Example) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.input_labels({"a", "y", "b"})
.output_identity_transform()
.Finalize()
.value();
TestDimExpression(original_transform,
Dims(0, 2).Label("a", "b"),
{0, 2},
expected_new_transform,
expected_new_transform,
{});
TestDimExpression(original_transform,
Dims("x", "z").Label("a", "b"),
{0, 2},
expected_new_transform,
expected_new_transform,
{});
}
TEST(LabelTest, MultipleArguments) {
TestDimExpression(
IndexTransformBuilder<3, 1>()
.output_constant(0, 1)
.Finalize()
.value(),
Dims(1, 0).Label("x", "y"),
{1, 0},
IndexTransformBuilder<3, 3>()
.input_labels({"y", "x", ""})
.output_identity_transform()
.Finalize()
.value(),
IndexTransformBuilder<3, 1>()
.input_labels({"y", "x", ""})
.output_constant(0, 1)
.Finalize()
.value(),
{});
}
TEST(LabelTest, ErrorHandling) {
TestDimExpressionError(
IdentityTransform(1),
Dims(span<const DimensionIndex>({0})).Label("x", "y"),
absl::StatusCode::kInvalidArgument,
"Number of dimensions \\(1\\) does not match number of "
"labels \\(2\\)\\.");
}
} |
567 | cpp | google/tensorstore | dimension_selection | tensorstore/index_space/internal/dimension_selection.cc | tensorstore/index_space/dimension_selection_test.cc | #ifndef TENSORSTORE_INDEX_SPACE_INTERNAL_DIMENSION_SELECTION_H_
#define TENSORSTORE_INDEX_SPACE_INTERNAL_DIMENSION_SELECTION_H_
#include "absl/status/status.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/dimension_identifier.h"
#include "tensorstore/index_space/dimension_index_buffer.h"
#include "tensorstore/index_space/index_transform.h"
namespace tensorstore {
namespace internal_index_space {
absl::Status GetDimensions(IndexTransformView<> transform,
span<const DimensionIndex> dimensions,
DimensionIndexBuffer* result);
absl::Status GetNewDimensions(DimensionIndex input_rank,
span<const DimensionIndex> dimensions,
DimensionIndexBuffer* result);
absl::Status GetDimensions(IndexTransformView<> transform,
span<const DimensionIdentifier> dimensions,
DimensionIndexBuffer* result);
absl::Status GetDimensions(span<const std::string> labels,
span<const DynamicDimSpec> dimensions,
DimensionIndexBuffer* result);
absl::Status GetNewDimensions(DimensionIndex input_rank,
span<const DynamicDimSpec> dimensions,
DimensionIndexBuffer* result);
absl::Status GetAllDimensions(DimensionIndex input_rank,
DimensionIndexBuffer* result);
template <typename Container>
class DimensionList {
public:
absl::Status GetDimensions(IndexTransformView<> transform,
DimensionIndexBuffer* buffer) const {
if constexpr (std::is_same_v<typename Container::value_type,
DynamicDimSpec>) {
return internal_index_space::GetDimensions(transform.input_labels(),
container, buffer);
} else {
return internal_index_space::GetDimensions(transform, container, buffer);
}
}
absl::Status GetNewDimensions(DimensionIndex input_rank,
DimensionIndexBuffer* buffer) const {
static_assert(
!std::is_same_v<typename Container::value_type, DimensionIdentifier>,
"New dimensions must be specified by index.");
return internal_index_space::GetNewDimensions(input_rank, container,
buffer);
}
constexpr static DimensionIndex GetStaticSelectionRank(
DimensionIndex input_rank) {
if constexpr (std::is_same_v<typename Container::value_type,
DynamicDimSpec>) {
return dynamic_rank;
} else {
return internal::ConstSpanType<Container>::extent;
}
}
constexpr static DimensionIndex GetNewStaticInputRank(
DimensionIndex input_rank, DimensionIndex selection_rank) {
return input_rank;
}
Container container;
};
class AllDims {
public:
absl::Status GetDimensions(IndexTransformView<> transform,
DimensionIndexBuffer* buffer) const {
return internal_index_space::GetAllDimensions(transform.input_rank(),
buffer);
}
constexpr static DimensionIndex GetStaticSelectionRank(
DimensionIndex input_rank) {
return input_rank;
}
constexpr static DimensionIndex GetNewStaticInputRank(
DimensionIndex input_rank, DimensionIndex selection_rank) {
return input_rank;
}
};
template <typename T>
constexpr inline bool IsDimensionIdentifier = false;
template <>
constexpr inline bool IsDimensionIdentifier<DimensionIndex> = true;
template <>
constexpr inline bool IsDimensionIdentifier<DimensionIdentifier> = true;
template <>
constexpr inline bool IsDimensionIdentifier<DynamicDimSpec> = true;
template <typename Dimensions,
typename DimensionsSpan = internal::ConstSpanType<Dimensions>>
using DimensionListFromSpanType =
std::enable_if_t<IsDimensionIdentifier<typename DimensionsSpan::value_type>,
DimensionList<DimensionsSpan>>;
template <typename... DimensionId>
using DimensionsFromPackType = std::conditional_t<
internal::IsPackConvertibleWithoutNarrowing<DimensionIndex, DimensionId...>,
DimensionList<std::array<DimensionIndex, sizeof...(DimensionId)>>,
std::conditional_t<
internal::IsPackConvertibleWithoutNarrowing<DimensionIdentifier,
DimensionId...>,
DimensionList<std::array<DimensionIdentifier, sizeof...(DimensionId)>>,
std::enable_if_t<internal::IsPackConvertibleWithoutNarrowing<
DynamicDimSpec, DimensionId...>,
DimensionList<std::array<DynamicDimSpec,
sizeof...(DimensionId)>>>>>;
}
}
#endif
#include "tensorstore/index_space/internal/dimension_selection.h"
#include <numeric>
#include "absl/status/status.h"
#include "absl/strings/str_join.h"
#include "tensorstore/index_space/dimension_identifier.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_index_space {
absl::Status CheckAndNormalizeDimensions(DimensionIndex input_rank,
span<DimensionIndex> dimensions) {
if (dimensions.size() > input_rank) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Number of dimensions (", dimensions.size(),
") exceeds input rank (", input_rank, ")."));
}
std::vector<DimensionIndex> error_dimensions;
for (DimensionIndex i = 0; i < dimensions.size(); ++i) {
TENSORSTORE_ASSIGN_OR_RETURN(
const DimensionIndex dim,
NormalizeDimensionIndex(dimensions[i], input_rank));
dimensions[i] = dim;
for (DimensionIndex j = 0; j < i; ++j) {
if (dimensions[j] == dim) {
error_dimensions.push_back(dim);
}
}
}
if (!error_dimensions.empty()) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Input dimensions {", absl::StrJoin(error_dimensions, ", "),
"} specified more than once"));
}
return absl::OkStatus();
}
absl::Status GetDimensions(DimensionIndex input_rank,
span<const DimensionIndex> dimensions,
DimensionIndexBuffer* result) {
result->assign(dimensions.begin(), dimensions.end());
return CheckAndNormalizeDimensions(input_rank, *result);
}
absl::Status GetDimensions(IndexTransformView<> transform,
span<const DimensionIndex> dimensions,
DimensionIndexBuffer* result) {
return GetDimensions(transform.input_rank(), dimensions, result);
}
absl::Status GetDimensions(IndexTransformView<> transform,
span<const DimensionIdentifier> dimensions,
DimensionIndexBuffer* result) {
const DimensionIndex input_rank = transform.input_rank();
result->resize(dimensions.size());
span<const std::string> input_labels = transform.input_labels();
for (DimensionIndex i = 0; i < dimensions.size(); ++i) {
TENSORSTORE_ASSIGN_OR_RETURN(
(*result)[i],
NormalizeDimensionIdentifier(dimensions[i], input_labels));
}
return CheckAndNormalizeDimensions(input_rank, *result);
}
absl::Status GetNewDimensions(DimensionIndex input_rank,
span<const DimensionIndex> dimensions,
DimensionIndexBuffer* result) {
return GetDimensions(input_rank + dimensions.size(), dimensions, result);
}
absl::Status GetAllDimensions(DimensionIndex input_rank,
DimensionIndexBuffer* result) {
result->resize(input_rank);
std::iota(result->begin(), result->end(), static_cast<DimensionIndex>(0));
return absl::OkStatus();
}
absl::Status GetDimensions(span<const std::string> labels,
span<const DynamicDimSpec> dimensions,
DimensionIndexBuffer* result) {
result->clear();
TENSORSTORE_RETURN_IF_ERROR(
NormalizeDynamicDimSpecs(dimensions, labels, result));
return CheckAndNormalizeDimensions(labels.size(), *result);
}
namespace {
Result<DimensionIndex> GetNumNewDimensions(const DimRangeSpec& spec) {
const DimensionIndex step = spec.step;
if (step == 0) return absl::InvalidArgumentError("step must not be 0");
if (spec.inclusive_start) {
const DimensionIndex inclusive_start = *spec.inclusive_start;
if (spec.exclusive_stop) {
const DimensionIndex exclusive_stop = *spec.exclusive_stop;
if ((exclusive_stop < 0) == (inclusive_start < 0) &&
((step > 0 && exclusive_stop >= inclusive_start) ||
(step < 0 && exclusive_stop <= inclusive_start))) {
return CeilOfRatio(*spec.exclusive_stop - inclusive_start, step);
}
} else if (step > 0) {
if (inclusive_start < 0) {
return CeilOfRatio(-inclusive_start, step);
}
} else {
if (inclusive_start >= 0) {
return CeilOfRatio(inclusive_start + 1, -step);
}
}
} else if (spec.exclusive_stop) {
const DimensionIndex exclusive_stop = *spec.exclusive_stop;
if (step > 0) {
if (exclusive_stop >= 0) {
return CeilOfRatio(exclusive_stop, step);
}
} else {
if (exclusive_stop < 0) {
return CeilOfRatio(-(exclusive_stop + 1), -step);
}
}
}
return absl::InvalidArgumentError(tensorstore::StrCat(
"`", spec, "` is not a valid specification for new dimensions"));
}
}
absl::Status GetNewDimensions(DimensionIndex input_rank,
span<const DynamicDimSpec> dimensions,
DimensionIndexBuffer* result) {
DimensionIndex new_rank = input_rank;
for (const auto& spec : dimensions) {
if (auto* r = std::get_if<DimRangeSpec>(&spec)) {
TENSORSTORE_ASSIGN_OR_RETURN(DimensionIndex x, GetNumNewDimensions(*r));
new_rank += x;
} else {
new_rank += 1;
}
}
result->clear();
result->reserve(new_rank);
struct Visitor {
DimensionIndex new_rank;
DimensionIndexBuffer* result;
absl::Status operator()(DimensionIndex i) const {
TENSORSTORE_ASSIGN_OR_RETURN(DimensionIndex index,
NormalizeDimensionIndex(i, new_rank));
result->push_back(index);
return absl::OkStatus();
}
absl::Status operator()(const std::string& label) const {
return absl::InvalidArgumentError(
"New dimensions cannot be specified by label");
}
absl::Status operator()(const DimRangeSpec& s) const {
return NormalizeDimRangeSpec(s, new_rank, result);
}
};
for (const auto& spec : dimensions) {
TENSORSTORE_RETURN_IF_ERROR(std::visit(Visitor{new_rank, result}, spec));
}
return CheckAndNormalizeDimensions(new_rank, *result);
}
}
} | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/index_domain_builder.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/internal/dim_expression_testutil.h"
#include "tensorstore/util/status.h"
namespace {
using ::tensorstore::DimensionIndex;
using ::tensorstore::DimensionIndexBuffer;
using ::tensorstore::DimRangeSpec;
using ::tensorstore::Dims;
using ::tensorstore::dynamic_rank;
using ::tensorstore::DynamicDims;
using ::tensorstore::IndexDomainBuilder;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::span;
using ::tensorstore::internal_index_space::TestDimExpressionError;
TEST(DimsTest, ErrorHandling) {
TestDimExpressionError(
IndexTransformBuilder<2, 0>().Finalize().value(),
Dims(span<const DimensionIndex>({0, 0, 1})).IndexSlice(0),
absl::StatusCode::kInvalidArgument,
"Number of dimensions .* exceeds input rank .*");
TestDimExpressionError(IndexTransformBuilder<2, 0>().Finalize().value(),
Dims(2).Label("b"), absl::StatusCode::kInvalidArgument,
"Dimension index 2 is outside valid range .*");
TestDimExpressionError(IndexTransformBuilder<2, 0>().Finalize().value(),
Dims(1, 1).Label("b", "c"),
absl::StatusCode::kInvalidArgument,
"Input dimensions \\{1\\} specified more than once.*");
}
TEST(DimsTest, SelectUsingLabels) {
TestDimExpression(
IndexTransformBuilder<2, 0>()
.input_labels({"x", "y"})
.Finalize()
.value(),
Dims("x").Label("a"),
{0},
IndexTransformBuilder<2, 2>()
.input_labels({"a", "y"})
.output_identity_transform()
.Finalize()
.value(),
IndexTransformBuilder<2, 0>().input_labels({"a", "y"}).Finalize().value(),
{});
TestDimExpressionError(
IndexTransformBuilder<2, 0>().input_labels({"x", "y"}).Finalize().value(),
Dims("a").Label("z"), absl::StatusCode::kInvalidArgument,
"Label \"a\" does not match one of \\{\"x\", \"y\"\\}");
TestDimExpressionError(
IndexTransformBuilder<2, 0>().input_labels({"", ""}).Finalize().value(),
Dims("").Label("z"), absl::StatusCode::kInvalidArgument,
"Dimension cannot be specified by empty label");
TestDimExpression(
IndexTransformBuilder<2, 0>()
.input_labels({"x", "y"})
.Finalize()
.value(),
Dims({"x", -1}).Label("a", "b"),
{0, 1},
IndexTransformBuilder<2, 2>()
.input_labels({"a", "b"})
.output_identity_transform()
.Finalize()
.value(),
IndexTransformBuilder<2, 0>().input_labels({"a", "b"}).Finalize().value(),
{});
}
TEST(DynamicDimsTest, Existing) {
const auto original_transform = IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value();
const auto expected_identity_new_transform =
IndexTransformBuilder<4, 4>()
.input_labels({"a1", "b1", "c1", "d1"})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform =
IndexTransformBuilder<4, 0>()
.input_labels({"a1", "b1", "c1", "d1"})
.Finalize()
.value();
TestDimExpression(
original_transform,
Dims(DimRangeSpec{1, 4, 2}, 0, "c").Label("b1", "d1", "a1", "c1"),
{1, 3, 0, 2},
expected_identity_new_transform,
expected_new_transform,
{});
TestDimExpression(
original_transform,
DynamicDims({DimRangeSpec{1, 4, 2}, 0, "c"})
.Label("b1", "d1", "a1", "c1"),
{1, 3, 0, 2},
expected_identity_new_transform,
expected_new_transform,
{});
}
TEST(DynamicDimsTest, CombinedNew) {
TestDimExpression(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{1, 4, 2}, 0, -1).AddNew().Label("e", "f", "g", "h"),
{1, 3, 0, 7},
IndexTransformBuilder<dynamic_rank, 4>(8, tensorstore::StaticRank<4>{})
.input_labels({"g", "e", "a", "f", "b", "c", "d", "h"})
.output_single_input_dimension(0, 2)
.output_single_input_dimension(1, 4)
.output_single_input_dimension(2, 5)
.output_single_input_dimension(3, 6)
.Finalize()
.value(),
IndexTransformBuilder<dynamic_rank, 0>(8)
.input_labels({"g", "e", "a", "f", "b", "c", "d", "h"})
.Finalize()
.value(),
{},
false);
}
TEST(DynamicDimsTest, InvalidNewLabel) {
TestDimExpressionError(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{1, 4, 2}, "x").AddNew(),
absl::StatusCode::kInvalidArgument,
"New dimensions cannot be specified by label");
}
TEST(DynamicDimsTest, InvalidDimRangeSpecNewUnbounded) {
TestDimExpressionError(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{std::nullopt, std::nullopt, 1}).AddNew(),
absl::StatusCode::kInvalidArgument,
"`:` is not a valid specification for new dimensions");
}
TEST(DynamicDimsTest, InvalidDimRangeSpecNewMissingStop) {
TestDimExpressionError(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{5, std::nullopt, 1}).AddNew(),
absl::StatusCode::kInvalidArgument,
"`5:` is not a valid specification for new dimensions");
}
TEST(DynamicDimsTest, InvalidDimRangeSpecNewNegativeStop) {
TestDimExpressionError(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{std::nullopt, -3, 1}).AddNew(),
absl::StatusCode::kInvalidArgument,
"`:-3` is not a valid specification for new dimensions");
}
TEST(DynamicDimsTest, InvalidDimRangeSpecNewNegativeStartNegativeStep) {
TestDimExpressionError(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{-5, std::nullopt, -1}).AddNew(),
absl::StatusCode::kInvalidArgument,
"`-5::-1` is not a valid specification for new dimensions");
}
TEST(DynamicDimsTest, InvalidDimRangeSpecNewMissingStart) {
TestDimExpressionError(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{std::nullopt, 5, -1}).AddNew(),
absl::StatusCode::kInvalidArgument,
"`:5:-1` is not a valid specification for new dimensions");
}
TEST(DynamicDimsTest, InvalidDimRangeSpecNewInvalidInterval) {
TestDimExpressionError(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{6, 5, 1}).AddNew(), absl::StatusCode::kInvalidArgument,
"`6:5` is not a valid specification for new dimensions");
}
TEST(DynamicDimsTest, InvalidDimRangeSpecNewInvalidMixedSigns) {
TestDimExpressionError(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{-1, 4, 1}).AddNew(), absl::StatusCode::kInvalidArgument,
"`-1:4` is not a valid specification for new dimensions");
}
TEST(DynamicDimsTest, InvalidDimRangeSpecNewZeroStep) {
TestDimExpressionError(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{1, 4, 0}).AddNew(), absl::StatusCode::kInvalidArgument,
"step must not be 0");
}
TEST(DynamicDimsTest, InvalidDimRangeSpecNewInvalidIntervalNegativeStep) {
TestDimExpressionError(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{5, 6, -1}).AddNew(), absl::StatusCode::kInvalidArgument,
"`5:6:-1` is not a valid specification for new dimensions");
}
TEST(DimsTest, DimRangeSpecNegativeStep) {
TestDimExpression(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{-4, -7, -2}).AddNew().Label("e", "f"),
{2, 0},
IndexTransformBuilder<dynamic_rank, 4>(6)
.input_labels({"f", "a", "e", "b", "c", "d"})
.output_single_input_dimension(0, 1)
.output_single_input_dimension(1, 3)
.output_single_input_dimension(2, 4)
.output_single_input_dimension(3, 5)
.Finalize()
.value(),
IndexTransformBuilder<dynamic_rank, 0>(6)
.input_labels({"f", "a", "e", "b", "c", "d"})
.Finalize()
.value(),
{},
false);
}
TEST(DimsTest, DimRangeSpecNegativeIndicesNew) {
TestDimExpression(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{-6, -3, 2}).AddNew().Label("e", "f"),
{0, 2},
IndexTransformBuilder<dynamic_rank, 4>(6)
.input_labels({"e", "a", "f", "b", "c", "d"})
.output_single_input_dimension(0, 1)
.output_single_input_dimension(1, 3)
.output_single_input_dimension(2, 4)
.output_single_input_dimension(3, 5)
.Finalize()
.value(),
IndexTransformBuilder<dynamic_rank, 0>(6)
.input_labels({"e", "a", "f", "b", "c", "d"})
.Finalize()
.value(),
{},
false);
}
TEST(DimsTest, DimRangeSpecImplicitStopNew) {
TestDimExpression(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{-3, std::nullopt, 2}).AddNew().Label("e", "f"),
{3, 5},
IndexTransformBuilder<dynamic_rank, 4>(6)
.input_labels({"a", "b", "c", "e", "d", "f"})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, 2)
.output_single_input_dimension(3, 4)
.Finalize()
.value(),
IndexTransformBuilder<dynamic_rank, 0>(6)
.input_labels({"a", "b", "c", "e", "d", "f"})
.Finalize()
.value(),
{},
false);
}
TEST(DimsTest, DimRangeSpecImplicitStopNegativeStepNew) {
TestDimExpression(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{1, std::nullopt, -1}).AddNew().Label("e", "f"),
{1, 0},
IndexTransformBuilder<dynamic_rank, 4>(6)
.input_labels({"f", "e", "a", "b", "c", "d"})
.output_single_input_dimension(0, 2)
.output_single_input_dimension(1, 3)
.output_single_input_dimension(2, 4)
.output_single_input_dimension(3, 5)
.Finalize()
.value(),
IndexTransformBuilder<dynamic_rank, 0>(6)
.input_labels({"f", "e", "a", "b", "c", "d"})
.Finalize()
.value(),
{},
false);
}
TEST(DimsTest, DimRangeSpecImplicitStartNegativeStepNew) {
TestDimExpression(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{std::nullopt, -4, -2}).AddNew().Label("e", "f"),
{5, 3},
IndexTransformBuilder<dynamic_rank, 4>(6)
.input_labels({"a", "b", "c", "f", "d", "e"})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, 2)
.output_single_input_dimension(3, 4)
.Finalize()
.value(),
IndexTransformBuilder<dynamic_rank, 0>(6)
.input_labels({"a", "b", "c", "f", "d", "e"})
.Finalize()
.value(),
{},
false);
}
TEST(DimsTest, DimRangeSpecImplicitStartNew) {
TestDimExpression(
IndexTransformBuilder<4, 0>()
.input_labels({"a", "b", "c", "d"})
.Finalize()
.value(),
Dims(DimRangeSpec{std::nullopt, 3, 2}).AddNew().Label("e", "f"),
{0, 2},
IndexTransformBuilder<dynamic_rank, 4>(6)
.input_labels({"e", "a", "f", "b", "c", "d"})
.output_single_input_dimension(0, 1)
.output_single_input_dimension(1, 3)
.output_single_input_dimension(2, 4)
.output_single_input_dimension(3, 5)
.Finalize()
.value(),
IndexTransformBuilder<dynamic_rank, 0>(6)
.input_labels({"e", "a", "f", "b", "c", "d"})
.Finalize()
.value(),
{},
false);
}
TEST(ResolveTest, Example) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto domain, IndexDomainBuilder<3>().labels({"x", "y", "z"}).Finalize());
DimensionIndexBuffer buffer;
TENSORSTORE_EXPECT_OK(Dims("x", "z").Resolve(domain, &buffer));
EXPECT_THAT(buffer, ::testing::ElementsAre(0, 2));
}
} |
568 | cpp | google/tensorstore | diagonal_op | tensorstore/index_space/internal/diagonal_op.cc | tensorstore/index_space/diagonal_op_test.cc | #ifndef TENSORSTORE_INDEX_SPACE_INTERNAL_DIAGONAL_OP_H_
#define TENSORSTORE_INDEX_SPACE_INTERNAL_DIAGONAL_OP_H_
#include "tensorstore/index_space/dimension_index_buffer.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/internal/string_like.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal_index_space {
Result<IndexTransform<>> ApplyDiagonal(IndexTransform<> transform,
DimensionIndexBuffer* dimensions,
bool domain_only);
struct DiagonalOp {
static constexpr bool selected_dimensions_are_new = false;
constexpr static DimensionIndex GetNewStaticInputRank(
DimensionIndex input_rank, DimensionIndex num_input_dims) {
return RankConstraint::Add(
RankConstraint::Subtract(input_rank, num_input_dims), 1);
}
constexpr static DimensionIndex GetStaticSelectionRank(
DimensionIndex num_input_dims) {
return 1;
}
Result<IndexTransform<>> Apply(IndexTransform<> transform,
DimensionIndexBuffer* dimensions,
bool domain_only) const {
return ApplyDiagonal(std::move(transform), dimensions, domain_only);
}
};
}
}
#endif
#include "tensorstore/index_space/internal/diagonal_op.h"
#include <algorithm>
namespace tensorstore {
namespace internal_index_space {
namespace {
template <typename R>
void ShiftRangeForwardByOne(R range) {
for (DimensionIndex i = range.size() - 1; i > 0; --i) {
range[i] = range[i - 1];
}
}
void ExtractDiagonal(TransformRep* original, TransformRep* result,
DimensionIndexBuffer* dimensions, bool domain_only) {
const DimensionIndex orig_input_rank = original->input_rank;
const DimensionIndex output_rank = domain_only ? 0 : original->output_rank;
const DimensionIndex new_input_rank =
orig_input_rank - dimensions->size() + 1;
assert(result->input_rank_capacity >= new_input_rank);
const DimensionIndex diag_input_dim = 0;
DimensionIndex orig_to_new_input_dim[kMaxRank];
std::fill_n(&orig_to_new_input_dim[0], orig_input_rank,
static_cast<DimensionIndex>(-1));
bool lower_diagonal_bound_implicit = true,
upper_diagonal_bound_implicit = true;
IndexInterval diagonal_bounds;
for (DimensionIndex orig_input_dim : *dimensions) {
orig_to_new_input_dim[orig_input_dim] = diag_input_dim;
const auto d = original->input_dimension(orig_input_dim);
diagonal_bounds = Intersect(diagonal_bounds, d.domain());
if (!d.implicit_lower_bound()) {
lower_diagonal_bound_implicit = false;
}
if (!d.implicit_upper_bound()) {
upper_diagonal_bound_implicit = false;
}
}
for (DimensionIndex orig_input_dim = 0, new_input_dim = 1;
orig_input_dim < orig_input_rank; ++orig_input_dim) {
if (orig_to_new_input_dim[orig_input_dim] == -1) {
orig_to_new_input_dim[orig_input_dim] = new_input_dim++;
}
}
const bool domain_is_explicitly_empty = !lower_diagonal_bound_implicit &&
!upper_diagonal_bound_implicit &&
diagonal_bounds.empty();
span<const OutputIndexMap> orig_maps =
original->output_index_maps().first(output_rank);
span<OutputIndexMap> result_maps =
result->output_index_maps().first(output_rank);
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
const auto& orig_map = orig_maps[output_dim];
auto& result_map = result_maps[output_dim];
result_map.stride() = orig_map.stride();
result_map.offset() = orig_map.offset();
switch (orig_map.method()) {
case OutputIndexMethod::constant:
result_map.SetConstant();
break;
case OutputIndexMethod::single_input_dimension: {
const DimensionIndex orig_input_dim = orig_map.input_dimension();
assert(orig_input_dim >= 0 && orig_input_dim < orig_input_rank);
const DimensionIndex new_input_dim =
orig_to_new_input_dim[orig_input_dim];
result_map.SetSingleInputDimension(new_input_dim);
break;
}
case OutputIndexMethod::array: {
if (domain_is_explicitly_empty) {
result_map.SetConstant();
result_map.stride() = 0;
result_map.offset() = 0;
break;
}
auto& result_index_array = result_map.SetArrayIndexing(new_input_rank);
const auto& orig_index_array = orig_map.index_array_data();
assert(orig_index_array.rank_capacity >= orig_input_rank);
Index diag_byte_stride = 0;
for (DimensionIndex orig_input_dim : *dimensions) {
diag_byte_stride += orig_index_array.byte_strides[orig_input_dim];
}
for (DimensionIndex orig_input_dim = 0;
orig_input_dim < orig_input_rank; ++orig_input_dim) {
const DimensionIndex new_input_dim =
orig_to_new_input_dim[orig_input_dim];
if (new_input_dim == diag_input_dim) continue;
assert(new_input_dim - 1 <= orig_input_dim);
result_index_array.byte_strides[new_input_dim - 1] =
orig_index_array.byte_strides[orig_input_dim];
}
ShiftRangeForwardByOne(
span(result_index_array.byte_strides, new_input_rank));
result_index_array.byte_strides[diag_input_dim] = diag_byte_stride;
result_index_array.index_range = orig_index_array.index_range;
result_index_array.element_pointer =
orig_index_array.element_pointer.pointer();
break;
}
}
}
for (DimensionIndex orig_input_dim = 0; orig_input_dim < orig_input_rank;
++orig_input_dim) {
const DimensionIndex new_input_dim = orig_to_new_input_dim[orig_input_dim];
if (new_input_dim == diag_input_dim) continue;
assert(new_input_dim - 1 <= orig_input_dim);
result->input_dimension(new_input_dim - 1) =
original->input_dimension(orig_input_dim);
}
ShiftRangeForwardByOne(result->all_input_dimensions(new_input_rank));
{
const auto d = result->input_dimension(diag_input_dim);
d.domain() = diagonal_bounds;
d.implicit_lower_bound() = lower_diagonal_bound_implicit;
d.implicit_upper_bound() = upper_diagonal_bound_implicit;
d.SetEmptyLabel();
}
result->input_rank = new_input_rank;
result->output_rank = output_rank;
dimensions->clear();
dimensions->push_back(diag_input_dim);
NormalizeImplicitBounds(*result);
}
}
Result<IndexTransform<>> ApplyDiagonal(IndexTransform<> transform,
DimensionIndexBuffer* dimensions,
bool domain_only) {
TransformRep* rep = TransformAccess::rep(transform);
const DimensionIndex new_input_rank =
rep->input_rank - dimensions->size() + 1;
TransformRep::Ptr<> new_rep =
NewOrMutableRep(rep, new_input_rank, rep->output_rank, domain_only);
ExtractDiagonal(rep, new_rep.get(), dimensions, domain_only);
internal_index_space::DebugCheckInvariants(new_rep.get());
return TransformAccess::Make<IndexTransform<>>(std::move(new_rep));
}
}
} | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/internal/dim_expression_testutil.h"
namespace {
using ::tensorstore::Dims;
using ::tensorstore::Index;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::kInfIndex;
using ::tensorstore::kInfSize;
using ::tensorstore::MakeArray;
using ::tensorstore::internal_index_space::EquivalentIndices;
using ::tensorstore::internal_index_space::TestDimExpression;
TEST(DiagonalTest, Example) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({5, 4, 5})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform = IndexTransformBuilder<2, 3>()
.input_origin({3, 2})
.input_shape({3, 4})
.input_labels({"", "y"})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, 0)
.Finalize()
.value();
const EquivalentIndices equivalent_indices = {
{{4, 3, 4}, {4, 3}},
};
TestDimExpression(original_transform,
Dims(0, 2).Diagonal(),
{0},
expected_new_transform,
expected_new_transform,
equivalent_indices);
TestDimExpression(original_transform,
Dims("x", "z").Diagonal(),
{0},
expected_new_transform,
expected_new_transform,
equivalent_indices);
}
TEST(DiagonalTest, ZeroDimensional) {
TestDimExpression(IndexTransformBuilder<2, 2>()
.input_origin({1, 2})
.input_shape({5, 4})
.input_labels({"x", "y"})
.output_single_input_dimension(0, 5, 1, 0)
.output_single_input_dimension(1, 1)
.Finalize()
.value(),
Dims().Diagonal(),
{0},
IndexTransformBuilder<3, 2>()
.input_origin({-kInfIndex, 1, 2})
.input_shape({kInfSize, 5, 4})
.implicit_lower_bounds({1, 0, 0})
.implicit_upper_bounds({1, 0, 0})
.input_labels({"", "x", "y"})
.output_single_input_dimension(0, 1)
.output_single_input_dimension(1, 2)
.Finalize()
.value(),
IndexTransformBuilder<3, 2>()
.input_origin({-kInfIndex, 1, 2})
.input_shape({kInfSize, 5, 4})
.implicit_lower_bounds({1, 0, 0})
.implicit_upper_bounds({1, 0, 0})
.input_labels({"", "x", "y"})
.output_single_input_dimension(0, 5, 1, 1)
.output_single_input_dimension(1, 2)
.Finalize()
.value(),
{{{3, 4}, {8, 3, 4}}},
false);
}
TEST(DiagonalTest, OneDimensional) {
TestDimExpression(IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({5, 4, 5})
.input_labels({"x", "y", "z"})
.output_single_input_dimension(0, 5, 1, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, 2)
.Finalize()
.value(),
Dims(1).Diagonal(),
{0},
IndexTransformBuilder<3, 3>()
.input_origin({2, 1, 3})
.input_shape({4, 5, 5})
.input_labels({"", "x", "z"})
.output_single_input_dimension(0, 1)
.output_single_input_dimension(1, 0)
.output_single_input_dimension(2, 2)
.Finalize()
.value(),
IndexTransformBuilder<3, 3>()
.input_origin({2, 1, 3})
.input_shape({4, 5, 5})
.input_labels({"", "x", "z"})
.output_single_input_dimension(0, 5, 1, 1)
.output_single_input_dimension(1, 0)
.output_single_input_dimension(2, 2)
.Finalize()
.value(),
{{{4, 3, 5}, {3, 4, 5}}});
}
TEST(DiagonalTest, TwoDimensionalSimple) {
TestDimExpression(IndexTransformBuilder<3, 3>()
.input_origin({5, 6, 7})
.input_shape({10, 9, 15})
.output_single_input_dimension(0, 1, 1, 1)
.output_single_input_dimension(1, 2, 2, 0)
.output_single_input_dimension(2, 3, 3, 2)
.Finalize()
.value(),
Dims(2, 0).Diagonal(),
{0},
IndexTransformBuilder<2, 3>()
.input_origin({7, 6})
.input_shape({8, 9})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, 0)
.Finalize()
.value(),
IndexTransformBuilder<2, 3>()
.input_origin({7, 6})
.input_shape({8, 9})
.output_single_input_dimension(0, 1, 1, 1)
.output_single_input_dimension(1, 2, 2, 0)
.output_single_input_dimension(2, 3, 3, 0)
.Finalize()
.value(),
{{{10, 11, 10}, {10, 11}}});
}
TEST(DiagonalTest, TwoDimensionalSimpleImplicitLower) {
TestDimExpression(
IndexTransformBuilder<3, 3>()
.input_origin({5, 6, 7})
.input_shape({10, 9, 15})
.implicit_lower_bounds({1, 0, 1})
.output_single_input_dimension(0, 1, 1, 1)
.output_single_input_dimension(1, 2, 2, 0)
.output_single_input_dimension(2, 3, 3, 2)
.Finalize()
.value(),
Dims(2, 0).Diagonal(),
{0},
IndexTransformBuilder<2, 3>()
.input_origin({7, 6})
.input_shape({8, 9})
.implicit_lower_bounds({1, 0})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, 0)
.Finalize()
.value(),
IndexTransformBuilder<2, 3>()
.input_origin({7, 6})
.input_shape({8, 9})
.implicit_lower_bounds({1, 0})
.output_single_input_dimension(0, 1, 1, 1)
.output_single_input_dimension(1, 2, 2, 0)
.output_single_input_dimension(2, 3, 3, 0)
.Finalize()
.value(),
{{{10, 11, 10}, {10, 11}}});
}
TEST(DiagonalTest, TwoDimensionalSimpleImplicitUpper) {
TestDimExpression(
IndexTransformBuilder<3, 3>()
.input_origin({5, 6, 7})
.input_shape({10, 9, 15})
.implicit_upper_bounds({1, 0, 1})
.output_single_input_dimension(0, 1, 1, 1)
.output_single_input_dimension(1, 2, 2, 0)
.output_single_input_dimension(2, 3, 3, 2)
.Finalize()
.value(),
Dims(2, 0).Diagonal(),
{0},
IndexTransformBuilder<2, 3>()
.input_origin({7, 6})
.input_shape({8, 9})
.implicit_upper_bounds({1, 0})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, 0)
.Finalize()
.value(),
IndexTransformBuilder<2, 3>()
.input_origin({7, 6})
.input_shape({8, 9})
.implicit_upper_bounds({1, 0})
.output_single_input_dimension(0, 1, 1, 1)
.output_single_input_dimension(1, 2, 2, 0)
.output_single_input_dimension(2, 3, 3, 0)
.Finalize()
.value(),
{{{10, 11, 10}, {10, 11}}});
}
TEST(DiagonalTest, IndexArray) {
TestDimExpression(
IndexTransformBuilder<3, 2>()
.input_origin({5, 6, 6})
.input_shape({4, 5, 2})
.output_index_array(
0, 2, 3,
MakeArray<Index>(
{{{1, 4}}, {{2, 5}}, {{3, 6}}, {{4, 7}}}))
.output_constant(1, 0)
.Finalize()
.value(),
Dims(0, 2).Diagonal(),
{0},
IndexTransformBuilder<2, 3>()
.input_origin({6, 6})
.input_shape({2, 5})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, 0)
.Finalize()
.value(),
IndexTransformBuilder<2, 2>()
.input_origin({6, 6})
.input_shape({2, 5})
.output_index_array(0, 2, 3,
MakeArray<Index>({{2}, {6}}))
.output_constant(1, 0)
.Finalize()
.value(),
{{{6, 8, 6}, {6, 8}}});
}
TEST(DiagonalTest, IndexArrayZeroSize) {
TestDimExpression(
IndexTransformBuilder<2, 2>()
.input_shape({0, 2})
.implicit_upper_bounds({1, 0})
.output_single_input_dimension(0, 0)
.output_index_array(1, 0, 1, MakeArray<Index>({{1, 2}}))
.Finalize()
.value(),
Dims(0, 1).Diagonal(),
{0},
IndexTransformBuilder<1, 2>()
.input_shape({0})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 0)
.Finalize()
.value(),
IndexTransformBuilder<1, 2>()
.input_shape({0})
.output_single_input_dimension(0, 0)
.output_constant(1, 0)
.Finalize()
.value(),
{});
}
TEST(DiagonalTest, Labeled) {
TestDimExpression(
IndexTransformBuilder<3, 2>()
.input_origin({5, 6, 6})
.input_shape({4, 5, 2})
.input_labels({"a", "b", "c"})
.output_index_array(
0, 2, 3,
MakeArray<Index>(
{{{1, 4}}, {{2, 5}}, {{3, 6}}, {{4, 7}}}))
.output_constant(1, 0)
.Finalize()
.value(),
Dims(0, 2).Diagonal().Label("diag"),
{0},
IndexTransformBuilder<2, 3>()
.input_origin({6, 6})
.input_shape({2, 5})
.input_labels({"diag", "b"})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.output_single_input_dimension(2, 0)
.Finalize()
.value(),
IndexTransformBuilder<2, 2>()
.input_origin({6, 6})
.input_shape({2, 5})
.input_labels({"diag", "b"})
.output_index_array(0, 2, 3,
MakeArray<Index>({{2}, {6}}))
.output_constant(1, 0)
.Finalize()
.value(),
{{{6, 8, 6}, {6, 8}}});
}
} |
569 | cpp | google/tensorstore | kvs_backed_chunk_driver | tensorstore/driver/kvs_backed_chunk_driver.cc | tensorstore/driver/kvs_backed_chunk_driver_test.cc | #ifndef TENSORSTORE_DRIVER_KVS_BACKED_CHUNK_DRIVER_H_
#define TENSORSTORE_DRIVER_KVS_BACKED_CHUNK_DRIVER_H_
#include <memory>
#include <string_view>
#include "absl/status/status.h"
#include "tensorstore/box.h"
#include "tensorstore/driver/chunk_cache_driver.h"
#include "tensorstore/driver/driver.h"
#include "tensorstore/driver/registry.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/internal/cache/aggregate_writeback_cache.h"
#include "tensorstore/internal/cache/async_cache.h"
#include "tensorstore/internal/cache/async_initialized_cache_mixin.h"
#include "tensorstore/internal/cache/cache_pool_resource.h"
#include "tensorstore/internal/cache/chunk_cache.h"
#include "tensorstore/internal/cache/kvs_backed_cache.h"
#include "tensorstore/internal/cache/kvs_backed_chunk_cache.h"
#include "tensorstore/internal/chunk_grid_specification.h"
#include "tensorstore/internal/context_binding.h"
#include "tensorstore/internal/data_copy_concurrency_resource.h"
#include "tensorstore/internal/estimate_heap_usage/std_vector.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/open_mode_spec.h"
#include "tensorstore/internal/type_traits.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/open_mode.h"
#include "tensorstore/serialization/absl_time.h"
#include "tensorstore/spec.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal_kvs_backed_chunk_driver {
struct KvsDriverSpec : public internal::DriverSpec,
public internal::OpenModeSpec {
kvstore::Spec store;
Context::Resource<internal::DataCopyConcurrencyResource>
data_copy_concurrency;
Context::Resource<internal::CachePoolResource> cache_pool;
StalenessBounds staleness;
static constexpr auto ApplyMembers = [](auto& x, auto f) {
return f(internal::BaseCast<internal::DriverSpec>(x),
internal::BaseCast<internal::OpenModeSpec>(x), x.store,
x.data_copy_concurrency, x.cache_pool, x.staleness);
};
kvstore::Spec GetKvstore() const override;
OpenMode open_mode() const override;
absl::Status ApplyOptions(SpecOptions&& options) override;
};
TENSORSTORE_DECLARE_JSON_BINDER(SpecJsonBinder, KvsDriverSpec,
JsonSerializationOptions,
JsonSerializationOptions,
::nlohmann::json::object_t);
enum AtomicUpdateConstraint {
kNone,
kRequireExisting,
kRequireMissing,
};
class MetadataCache
: public internal::AggregateWritebackCache<
MetadataCache,
internal::KvsBackedCache<MetadataCache, internal::AsyncCache>>,
public internal::AsyncInitializedCacheMixin {
using Base = internal::AggregateWritebackCache<
MetadataCache,
internal::KvsBackedCache<MetadataCache, internal::AsyncCache>>;
public:
using MetadataPtr = std::shared_ptr<const void>;
struct Initializer {
Context::Resource<internal::DataCopyConcurrencyResource>
data_copy_concurrency;
Context::Resource<internal::CachePoolResource> cache_pool;
};
explicit MetadataCache(Initializer initializer);
virtual std::string GetMetadataStorageKey(std::string_view entry_key) = 0;
virtual Result<MetadataPtr> DecodeMetadata(std::string_view entry_key,
absl::Cord encoded_metadata) = 0;
virtual Result<absl::Cord> EncodeMetadata(std::string_view entry_key,
const void* metadata) = 0;
using UpdateFunction =
std::function<Result<MetadataPtr>(const MetadataPtr& existing_metadata)>;
struct PendingWrite {
UpdateFunction update;
AtomicUpdateConstraint update_constraint;
Promise<void> promise;
};
class Entry : public Base::Entry {
public:
using OwningCache = MetadataCache;
MetadataPtr GetMetadata() { return ReadLock<void>(*this).shared_data(); }
Result<MetadataPtr> GetMetadata(internal::OpenTransactionPtr transaction);
void DoDecode(std::optional<absl::Cord> value,
DecodeReceiver receiver) override;
void DoEncode(std::shared_ptr<const void> data,
EncodeReceiver receiver) override;
std::string GetKeyValueStoreKey() override;
Future<const void> RequestAtomicUpdate(
const internal::OpenTransactionPtr& transaction, UpdateFunction update,
AtomicUpdateConstraint update_constraint,
std::optional<absl::Time> read_time = {});
};
class TransactionNode : public Base::TransactionNode {
public:
using OwningCache = MetadataCache;
using MetadataCache::Base::TransactionNode::TransactionNode;
Result<MetadataPtr> GetUpdatedMetadata(MetadataPtr metadata);
Result<MetadataPtr> GetUpdatedMetadata();
void DoApply(ApplyOptions options, ApplyReceiver receiver) override;
void InvalidateReadState() override;
private:
friend class Entry;
MetadataPtr updated_metadata_base_state_;
Result<MetadataPtr> updated_metadata_ = nullptr;
};
Entry* DoAllocateEntry() final { return new Entry; }
size_t DoGetSizeofEntry() final { return sizeof(Entry); }
TransactionNode* DoAllocateTransactionNode(AsyncCache::Entry& entry) final {
return new TransactionNode(static_cast<Entry&>(entry));
}
kvstore::Driver* base_store() { return base_store_.get(); }
const Executor& executor() { return data_copy_concurrency_->executor; }
kvstore::DriverPtr base_store_;
Context::Resource<internal::DataCopyConcurrencyResource>
data_copy_concurrency_;
Context::Resource<internal::CachePoolResource> cache_pool_;
};
class DataCacheBase {
public:
using MetadataPtr = MetadataCache::MetadataPtr;
struct Initializer {
internal::PinnedCacheEntry<MetadataCache> metadata_cache_entry;
MetadataPtr metadata;
};
explicit DataCacheBase(Initializer&& initializer);
virtual ~DataCacheBase();
virtual internal::Cache& cache() = 0;
using Ptr = internal::CachePtr<DataCacheBase>;
virtual absl::Status ValidateMetadataCompatibility(
const void* existing_metadata, const void* new_metadata) = 0;
virtual absl::Status GetBoundSpecData(KvsDriverSpec& spec,
const void* metadata,
size_t component_index) = 0;
virtual Result<IndexTransform<>> GetExternalToInternalTransform(
const void* metadata, size_t component_index);
virtual void GetComponentBounds(const void* metadata, size_t component_index,
Box<dynamic_rank(kMaxRank)>& bounds,
DimensionSet& implicit_lower_bounds,
DimensionSet& implicit_upper_bounds) = 0;
virtual std::string GetBaseKvstorePath() = 0;
MetadataCache* metadata_cache() const {
return &GetOwningCache(*metadata_cache_entry_);
}
const Executor& executor() const { return metadata_cache()->executor(); }
const internal::PinnedCacheEntry<MetadataCache>& metadata_cache_entry()
const {
return metadata_cache_entry_;
}
const MetadataPtr& initial_metadata() const { return initial_metadata_; }
const internal::PinnedCacheEntry<MetadataCache> metadata_cache_entry_;
const MetadataPtr initial_metadata_;
};
class ChunkedDataCacheBase : public DataCacheBase {
public:
using Ptr = internal::CachePtr<ChunkedDataCacheBase>;
using DataCacheBase::DataCacheBase;
using DataCacheBase::executor;
virtual const internal::ChunkGridSpecification& grid() const = 0;
virtual std::string GetChunkStorageKey(span<const Index> cell_indices) = 0;
virtual void GetChunkGridBounds(const void* metadata, MutableBoxView<> bounds,
DimensionSet& implicit_lower_bounds,
DimensionSet& implicit_upper_bounds) = 0;
void GetComponentBounds(const void* metadata, size_t component_index,
Box<dynamic_rank(kMaxRank)>& bounds,
DimensionSet& implicit_lower_bounds,
DimensionSet& implicit_upper_bounds) override;
virtual Result<ChunkLayout> GetChunkLayoutFromMetadata(
const void* metadata_ptr, size_t component_index) = 0;
virtual Result<ChunkLayout> GetChunkLayout(size_t component_index);
virtual Result<std::shared_ptr<const void>> GetResizedMetadata(
const void* existing_metadata, span<const Index> new_inclusive_min,
span<const Index> new_exclusive_max) = 0;
virtual Future<const void> DeleteCell(
span<const Index> grid_cell_indices,
internal::OpenTransactionPtr transaction) = 0;
};
struct DataCacheInitializer : public ChunkedDataCacheBase::Initializer {
kvstore::DriverPtr store;
};
class DataCache : public internal::KvsBackedChunkCache,
public ChunkedDataCacheBase {
public:
using DataCacheBase::executor;
using Initializer = DataCacheInitializer;
explicit DataCache(Initializer&& initializer,
internal::ChunkGridSpecification&& grid);
const Executor& executor() const final {
return ChunkedDataCacheBase::executor();
}
internal::Cache& cache() final { return *this; }
const internal::ChunkGridSpecification& grid() const final { return grid_; }
Future<const void> DeleteCell(span<const Index> grid_cell_indices,
internal::OpenTransactionPtr transaction) final;
internal::ChunkGridSpecification grid_;
};
struct PrivateOpenState {
internal::OpenTransactionPtr transaction_;
Batch batch_{no_batch};
internal::DriverSpec::PtrT<const KvsDriverSpec> spec_;
ReadWriteMode read_write_mode_;
std::string metadata_cache_key_;
internal::PinnedCacheEntry<MetadataCache> metadata_cache_entry_;
absl::Time request_time_;
};
class KvsMetadataDriverBase : public internal::Driver {
public:
Future<IndexTransform<>> ResolveBounds(ResolveBoundsRequest request) override;
Future<IndexTransform<>> ResolveBounds(
ResolveBoundsRequest request, StalenessBound metadata_staleness_bound);
Future<MetadataCache::MetadataPtr> ResolveMetadata(
internal::OpenTransactionPtr transaction,
absl::Time metadata_staleness_bound);
Result<IndexTransform<>> GetBoundSpecData(
internal::OpenTransactionPtr transaction, KvsDriverSpec& spec,
IndexTransformView<> transform);
KvStore GetKvstore(const Transaction& transaction) override;
struct GarbageCollectionBase {
static void Visit(garbage_collection::GarbageCollectionVisitor& visitor,
const KvsMetadataDriverBase& value);
};
virtual DataCacheBase* cache() const = 0;
virtual size_t component_index() const = 0;
const StalenessBound& metadata_staleness_bound() const {
return metadata_staleness_bound_;
}
virtual const StalenessBound& data_staleness_bound() const = 0;
StalenessBound metadata_staleness_bound_;
std::shared_ptr<const void> assumed_metadata_;
absl::Time assumed_metadata_time_ = absl::InfinitePast();
};
class KvsChunkedDriverBase : public KvsMetadataDriverBase {
public:
virtual ChunkedDataCacheBase* cache() const override = 0;
Result<ChunkLayout> GetChunkLayout(IndexTransformView<> transform) override;
Future<IndexTransform<>> Resize(
internal::Driver::ResizeRequest request) override;
};
using DriverInitializer = internal::ChunkCacheDriverInitializer<DataCacheBase>;
class MetadataOpenState
: public internal::AtomicReferenceCount<MetadataOpenState>,
private PrivateOpenState {
public:
using Ptr = internal::IntrusivePtr<MetadataOpenState>;
struct Initializer {
internal::DriverSpec::PtrT<const KvsDriverSpec> spec;
internal::DriverOpenRequest request;
};
explicit MetadataOpenState(Initializer initializer);
virtual ~MetadataOpenState();
virtual std::string GetPrefixForDeleteExisting() = 0;
virtual std::string GetMetadataCacheEntryKey() = 0;
virtual AtomicUpdateConstraint GetCreateConstraint();
struct CreateOptions {
bool assume_metadata = false;
};
virtual Result<std::shared_ptr<const void>> Create(
const void* existing_metadata, CreateOptions options) = 0;
virtual std::string GetMetadataCacheKey();
virtual std::unique_ptr<MetadataCache> GetMetadataCache(
MetadataCache::Initializer initializer) = 0;
virtual Result<kvstore::DriverPtr> GetMetadataKeyValueStore(
kvstore::DriverPtr base_kv_store);
virtual Result<internal::Driver::Handle> CreateDriverHandleFromMetadata(
std::shared_ptr<const void> metadata) = 0;
virtual ReadWriteMode GetReadWriteMode(const void* metadata);
const KvsDriverSpec& spec() const { return *spec_; }
const Executor& executor() const {
return spec_->data_copy_concurrency->executor;
}
const Context::Resource<internal::CachePoolResource>& cache_pool() const {
return spec_->cache_pool;
}
};
class OpenState : public MetadataOpenState {
public:
using Ptr = internal::IntrusivePtr<OpenState>;
using MetadataOpenState::MetadataOpenState;
Result<internal::Driver::Handle> CreateDriverHandleFromMetadata(
std::shared_ptr<const void> metadata) override;
virtual KvsMetadataDriverBase* AllocateDriver(
DriverInitializer&& initializer) = 0;
virtual std::string GetDataCacheKey(const void* metadata) = 0;
virtual std::unique_ptr<DataCacheBase> GetDataCache(
DataCacheInitializer&& initializer) = 0;
virtual Result<kvstore::DriverPtr> GetDataKeyValueStore(
kvstore::DriverPtr base_kv_store, const void* metadata);
virtual Result<size_t> GetComponentIndex(const void* metadata,
OpenMode open_mode) = 0;
}; | #include "tensorstore/driver/kvs_backed_chunk_driver.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/box.h"
#include "tensorstore/driver/kvs_backed_chunk_driver_impl.h"
#include "tensorstore/index.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::Box;
using ::tensorstore::Index;
using ::tensorstore::kImplicit;
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal_kvs_backed_chunk_driver::
ValidateResizeConstraints;
using ISpan = ::tensorstore::span<const Index>;
TEST(ValidateResizeConstraintsTest, Success) {
EXPECT_EQ(absl::OkStatus(),
ValidateResizeConstraints(
Box({0, 0}, {4, 5}),
ISpan({kImplicit, kImplicit}),
ISpan({kImplicit, 6}),
ISpan({0, 0}),
ISpan({kImplicit, kImplicit}),
false,
false));
EXPECT_EQ(absl::OkStatus(),
ValidateResizeConstraints(
Box({0, 0}, {4, 5}),
ISpan({kImplicit, kImplicit}),
ISpan({4, 6}),
ISpan({0, 0}),
ISpan({4, kImplicit}),
false,
false));
EXPECT_EQ(absl::OkStatus(),
ValidateResizeConstraints(
Box({0, 0}, {4, 5}),
ISpan({kImplicit, kImplicit}),
ISpan({kImplicit, 6}),
ISpan({0, 0}),
ISpan({kImplicit, kImplicit}),
true,
false));
EXPECT_EQ(absl::OkStatus(),
ValidateResizeConstraints(
Box({0, 0}, {4, 5}),
ISpan({kImplicit, kImplicit}),
ISpan({kImplicit, 3}),
ISpan({0, 0}),
ISpan({kImplicit, kImplicit}),
false,
true));
EXPECT_EQ(absl::OkStatus(),
ValidateResizeConstraints(
Box({0, 0}, {4, 5}),
ISpan({kImplicit, kImplicit}),
ISpan({kImplicit, 5}),
ISpan({0, 0}),
ISpan({kImplicit, kImplicit}),
true,
true));
EXPECT_EQ(absl::OkStatus(),
ValidateResizeConstraints(
Box({0, 0}, {4, 5}),
ISpan({kImplicit, kImplicit}),
ISpan({kImplicit, 5}),
ISpan({0, 0}),
ISpan({kImplicit, kImplicit}),
true,
true));
}
TEST(ValidateResizeConstraintsTest, Failure) {
EXPECT_THAT(
ValidateResizeConstraints(
Box({0, 0}, {4, 5}),
ISpan({kImplicit, kImplicit}),
ISpan({kImplicit, 6}),
ISpan({0, 0}),
ISpan({5, kImplicit}),
false,
false),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"Resize operation would also affect output dimension 0 "
"over the out-of-bounds interval \\[4, 5\\)"));
EXPECT_THAT(
ValidateResizeConstraints(
Box({0, 0}, {4, 5}),
ISpan({kImplicit, kImplicit}),
ISpan({kImplicit, 6}),
ISpan({0, 0}),
ISpan({3, kImplicit}),
false,
false),
MatchesStatus(
absl::StatusCode::kFailedPrecondition,
"Resize operation would also affect output dimension 0 over the "
"interval \\[3, 4\\) but `resize_tied_bounds` was not specified"));
EXPECT_THAT(
ValidateResizeConstraints(
Box({0, 0}, {4, 5}),
ISpan({kImplicit, kImplicit}),
ISpan({kImplicit, 6}),
ISpan({0, 0}),
ISpan({kImplicit, kImplicit}),
false,
true),
MatchesStatus(
absl::StatusCode::kFailedPrecondition,
"Resize operation would expand output dimension 1 from "
"\\[0, 5\\) to \\[0, 6\\) but `shrink_only` was specified"));
EXPECT_THAT(
ValidateResizeConstraints(
Box({0, 0}, {4, 5}),
ISpan({kImplicit, kImplicit}),
ISpan({kImplicit, 4}),
ISpan({0, 0}),
ISpan({kImplicit, kImplicit}),
true,
false),
MatchesStatus(
absl::StatusCode::kFailedPrecondition,
"Resize operation would shrink output dimension 1 from "
"\\[0, 5\\) to \\[0, 4\\) but `expand_only` was specified"));
}
} |
570 | cpp | google/tensorstore | driver | tensorstore/kvstore/ocdbt/driver.cc | tensorstore/kvstore/ocdbt/distributed/driver_test.cc | #ifndef TENSORSTORE_KVSTORE_OCDBT_DRIVER_H_
#define TENSORSTORE_KVSTORE_OCDBT_DRIVER_H_
#include <stddef.h>
#include <optional>
#include <string>
#include <string_view>
#include "absl/status/status.h"
#include "absl/time/time.h"
#include <nlohmann/json_fwd.hpp>
#include "tensorstore/context.h"
#include "tensorstore/context_resource_provider.h"
#include "tensorstore/internal/cache/cache_pool_resource.h"
#include "tensorstore/internal/concurrency_resource.h"
#include "tensorstore/internal/data_copy_concurrency_resource.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/json_serialization_options_base.h"
#include "tensorstore/kvstore/driver.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/ocdbt/btree_writer.h"
#include "tensorstore/kvstore/ocdbt/config.h"
#include "tensorstore/kvstore/ocdbt/distributed/rpc_security.h"
#include "tensorstore/kvstore/ocdbt/io/io_handle_impl.h"
#include "tensorstore/kvstore/ocdbt/io_handle.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/registry.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/kvstore/supported_features.h"
#include "tensorstore/transaction.h"
#include "tensorstore/util/executor.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/garbage_collection/garbage_collection.h"
#include "tensorstore/util/result.h"
#include "tensorstore/internal/cache_key/absl_time.h"
#include "tensorstore/internal/cache_key/std_optional.h"
#include "tensorstore/internal/cache_key/std_variant.h"
#include "tensorstore/serialization/absl_time.h"
#include "tensorstore/serialization/std_optional.h"
#include "tensorstore/serialization/std_variant.h"
#include "tensorstore/util/garbage_collection/std_optional.h"
namespace tensorstore {
namespace internal_ocdbt {
struct OcdbtCoordinatorResource
: public internal::ContextResourceTraits<OcdbtCoordinatorResource> {
static constexpr char id[] = "ocdbt_coordinator";
struct Spec {
std::optional<std::string> address;
std::optional<absl::Duration> lease_duration;
RpcSecurityMethod::Ptr security;
constexpr static auto ApplyMembers = [](auto&& x, auto f) {
return f(x.address, x.lease_duration, x.security);
};
};
using Resource = Spec;
};
struct OcdbtDriverSpecData {
Context::Resource<internal::CachePoolResource> cache_pool;
Context::Resource<internal::DataCopyConcurrencyResource>
data_copy_concurrency;
kvstore::Spec base;
std::optional<kvstore::Spec> manifest;
ConfigConstraints config;
DataFilePrefixes data_file_prefixes;
std::optional<size_t> experimental_read_coalescing_threshold_bytes;
std::optional<size_t> experimental_read_coalescing_merged_bytes;
std::optional<absl::Duration> experimental_read_coalescing_interval;
std::optional<size_t> target_data_file_size;
bool assume_config = false;
Context::Resource<OcdbtCoordinatorResource> coordinator;
TENSORSTORE_DECLARE_JSON_DEFAULT_BINDER(OcdbtDriverSpecData,
internal_json_binding::NoOptions,
IncludeDefaults,
::nlohmann::json::object_t)
constexpr static auto ApplyMembers = [](auto&& x, auto f) {
return f(x.base, x.manifest, x.config, x.data_file_prefixes, x.cache_pool,
x.data_copy_concurrency,
x.experimental_read_coalescing_threshold_bytes,
x.experimental_read_coalescing_merged_bytes,
x.experimental_read_coalescing_interval, x.target_data_file_size,
x.coordinator);
};
};
class OcdbtDriverSpec
: public internal_kvstore::RegisteredDriverSpec<OcdbtDriverSpec,
OcdbtDriverSpecData> {
public:
static constexpr char id[] = "ocdbt";
Future<kvstore::DriverPtr> DoOpen() const override;
absl::Status ApplyOptions(kvstore::DriverSpecOptions&& options) override;
Result<kvstore::Spec> GetBase(std::string_view path) const override;
};
class OcdbtDriver
: public internal_kvstore::RegisteredDriver<OcdbtDriver, OcdbtDriverSpec> {
public:
Future<ReadResult> Read(Key key, ReadOptions options) override;
Future<TimestampedStorageGeneration> Write(Key key,
std::optional<Value> value,
WriteOptions options) override;
Future<const void> DeleteRange(KeyRange range) override;
Future<const void> ExperimentalCopyRangeFrom(
const internal::OpenTransactionPtr& transaction, const KvStore& source,
Key target_prefix, kvstore::CopyRangeOptions options) override;
std::string DescribeKey(std::string_view key) override;
void ListImpl(ListOptions options, ListReceiver receiver) override;
absl::Status GetBoundSpecData(OcdbtDriverSpecData& spec) const;
kvstore::SupportedFeatures GetSupportedFeatures(
const KeyRange& key_range) const final;
Result<KvStore> GetBase(std::string_view path,
const Transaction& transaction) const override;
absl::Status ReadModifyWrite(internal::OpenTransactionPtr& transaction,
size_t& phase, Key key,
ReadModifyWriteSource& source) override;
absl::Status TransactionalDeleteRange(
const internal::OpenTransactionPtr& transaction, KeyRange range) override;
const Executor& executor() { return data_copy_concurrency_->executor; }
IoHandle::Ptr io_handle_;
Context::Resource<internal::CachePoolResource> cache_pool_;
Context::Resource<internal::DataCopyConcurrencyResource>
data_copy_concurrency_;
kvstore::KvStore base_;
kvstore::KvStore manifest_kvstore_;
BtreeWriterPtr btree_writer_;
DataFilePrefixes data_file_prefixes_;
std::optional<size_t> experimental_read_coalescing_threshold_bytes_;
std::optional<size_t> experimental_read_coalescing_merged_bytes_;
std::optional<absl::Duration> experimental_read_coalescing_interval_;
std::optional<size_t> target_data_file_size_;
Context::Resource<OcdbtCoordinatorResource> coordinator_;
};
}
namespace garbage_collection {
template <>
struct GarbageCollection<internal_ocdbt::OcdbtDriver> {
static void Visit(GarbageCollectionVisitor& visitor,
const internal_ocdbt::OcdbtDriver& value) {
garbage_collection::GarbageCollectionVisit(visitor, value.base_);
garbage_collection::GarbageCollectionVisit(visitor,
value.manifest_kvstore_);
}
};
}
}
#endif
#include <stddef.h>
#include <stdint.h>
#include <algorithm>
#include <memory>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/base/attributes.h"
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/match.h"
#include "absl/strings/str_format.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tensorstore/context.h"
#include "tensorstore/context_resource_provider.h"
#include "tensorstore/internal/concurrency_resource.h"
#include "tensorstore/internal/concurrency_resource_provider.h"
#include "tensorstore/internal/http/curl_transport.h"
#include "tensorstore/internal/http/http_header.h"
#include "tensorstore/internal/http/http_request.h"
#include "tensorstore/internal/http/http_response.h"
#include "tensorstore/internal/http/http_transport.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/log/verbose_flag.h"
#include "tensorstore/internal/metrics/counter.h"
#include "tensorstore/internal/path.h"
#include "tensorstore/internal/retries_context_resource.h"
#include "tensorstore/internal/retry.h"
#include "tensorstore/internal/uri_utils.h"
#include "tensorstore/kvstore/batch_util.h"
#include "tensorstore/kvstore/byte_range.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/generic_coalescing_batch_util.h"
#include "tensorstore/kvstore/http/byte_range_util.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/read_result.h"
#include "tensorstore/kvstore/registry.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/kvstore/url_registry.h"
#include "tensorstore/util/executor.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/garbage_collection/fwd.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
#include "tensorstore/internal/cache_key/std_vector.h"
#include "tensorstore/internal/json_binding/std_array.h"
#include "tensorstore/serialization/std_vector.h"
using ::tensorstore::internal::IntrusivePtr;
using ::tensorstore::internal_http::HttpRequestBuilder;
using ::tensorstore::internal_http::HttpResponse;
using ::tensorstore::internal_http::HttpTransport;
namespace tensorstore {
namespace {
namespace jb = tensorstore::internal_json_binding;
auto& http_read = internal_metrics::Counter<int64_t>::New(
"/tensorstore/kvstore/http/read", "http driver kvstore::Read calls");
auto& http_batch_read = internal_metrics::Counter<int64_t>::New(
"/tensorstore/kvstore/http/batch_read", "http driver reads after batching");
auto& http_bytes_read = internal_metrics::Counter<int64_t>::New(
"/tensorstore/kvstore/http/bytes_read",
"Bytes read by the http kvstore driver");
ABSL_CONST_INIT internal_log::VerboseFlag http_logging("http_kvstore");
struct HttpRequestConcurrencyResource : public internal::ConcurrencyResource {
static constexpr char id[] = "http_request_concurrency";
};
struct HttpRequestRetries
: public internal::RetriesResource<HttpRequestRetries> {
static constexpr char id[] = "http_request_retries";
};
struct HttpRequestConcurrencyResourceTraits
: public internal::ConcurrencyResourceTraits,
public internal::ContextResourceTraits<HttpRequestConcurrencyResource> {
HttpRequestConcurrencyResourceTraits() : ConcurrencyResourceTraits(32) {}
};
const internal::ContextResourceRegistration<
HttpRequestConcurrencyResourceTraits>
http_request_concurrency_registration;
const internal::ContextResourceRegistration<HttpRequestRetries>
http_request_retries_registration;
bool IsRetriable(const absl::Status& status) {
return (status.code() == absl::StatusCode::kDeadlineExceeded ||
status.code() == absl::StatusCode::kUnavailable);
}
absl::Status ValidateParsedHttpUrl(const internal::ParsedGenericUri& parsed) {
if (parsed.scheme != "http" && parsed.scheme != "https") {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Expected scheme of \"http\" or \"https\" but received: ",
tensorstore::QuoteString(parsed.scheme)));
}
if (!parsed.fragment.empty()) {
return absl::InvalidArgumentError("Fragment identifier not supported");
}
return absl::OkStatus();
}
void SplitParsedHttpUrl(const internal::ParsedGenericUri& parsed,
std::string& base_url, std::string& path) {
size_t end_of_authority = parsed.authority_and_path.find('/');
std::string_view authority =
parsed.authority_and_path.substr(0, end_of_authority);
std::string_view encoded_path =
(end_of_authority == std::string_view::npos)
? "/"
: parsed.authority_and_path.substr(end_of_authority);
base_url = tensorstore::StrCat(parsed.scheme, ":
parsed.query.empty() ? "" : "?", parsed.query);
path = internal::PercentDecode(encoded_path);
}
struct HttpKeyValueStoreSpecData {
std::string base_url;
Context::Resource<HttpRequestConcurrencyResource> request_concurrency;
Context::Resource<HttpRequestRetries> retries;
std::vector<std::string> headers;
constexpr static auto ApplyMembers = [](auto& x, auto f) {
return f(x.base_url, x.request_concurrency, x.retries, x.headers);
};
constexpr static auto default_json_binder = jb::Object(
jb::Member(
"base_url",
jb::Projection<&HttpKeyValueStoreSpecData::base_url>(
jb::Validate([](const auto& options, const std::string* x) {
return ValidateParsedHttpUrl(internal::ParseGenericUri(*x));
}))),
jb::Member("headers",
jb::Projection<&HttpKeyValueStoreSpecData::headers>(
jb::DefaultInitializedValue(jb::Array(jb::Validate(
[](const auto& options, const std::string* x) {
return internal_http::ValidateHttpHeader(*x);
}))))),
jb::Member(
HttpRequestConcurrencyResource::id,
jb::Projection<&HttpKeyValueStoreSpecData::request_concurrency>()),
jb::Member(HttpRequestRetries::id,
jb::Projection<&HttpKeyValueStoreSpecData::retries>()));
std::string GetUrl(std::string_view path) const {
auto parsed = internal::ParseGenericUri(base_url);
return tensorstore::StrCat(parsed.scheme, ":
absl::StartsWith(path, "/") ? "" : "/",
internal::PercentEncodeUriPath(path),
parsed.query.empty() ? "" : "?", parsed.query);
}
};
class HttpKeyValueStoreSpec
: public internal_kvstore::RegisteredDriverSpec<HttpKeyValueStoreSpec,
HttpKeyValueStoreSpecData> {
public:
static constexpr char id[] = "http";
Future<kvstore::DriverPtr> DoOpen() const override;
Result<std::string> ToUrl(std::string_view path) const override {
return data_.GetUrl(path);
}
absl::Status NormalizeSpec(std::string& path) override {
auto parsed = internal::ParseGenericUri(data_.base_url);
std::string base_url;
std::string new_path;
SplitParsedHttpUrl(parsed, base_url, new_path);
if (path.empty()) {
path = std::move(new_path);
} else if (path[0] != '/') {
internal::AppendPathComponent(new_path, path);
path = std::move(new_path);
} else if (new_path != "/") {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Cannot specify absolute path ", tensorstore::QuoteString(path),
" in conjunction with base URL ",
tensorstore::QuoteString(data_.base_url),
" that includes a path component"));
}
data_.base_url = std::move(base_url);
return absl::OkStatus();
}
};
class HttpKeyValueStore
: public internal_kvstore::RegisteredDriver<HttpKeyValueStore,
HttpKeyValueStoreSpec> {
public:
internal_kvstore_batch::CoalescingOptions GetBatchReadCoalescingOptions()
const {
return internal_kvstore_batch::kDefaultRemoteStorageCoalescingOptions;
}
Future<ReadResult> Read(Key key, ReadOptions options) override;
Future<ReadResult> ReadImpl(Key&& key, ReadOptions&& options);
const Executor& executor() const {
return spec_.request_concurrency->executor;
}
absl::Status GetBoundSpecData(SpecData& spec) const {
spec = spec_;
return absl::OkStatus();
}
std::string DescribeKey(std::string_view key) override {
return spec_.GetUrl(key);
}
HttpKeyValueStoreSpecData spec_;
std::shared_ptr<HttpTransport> transport_;
};
Future<kvstore::DriverPtr> HttpKeyValueStoreSpec::DoOpen() const {
auto driver = internal::MakeIntrusivePtr<HttpKeyValueStore>();
driver->spec_ = data_;
driver->transport_ = internal_http::GetDefaultHttpTransport();
return driver;
}
struct ReadTask {
IntrusivePtr<HttpKeyValueStore> owner;
std::string url;
kvstore::ReadOptions options;
HttpResponse httpresponse;
absl::Status DoRead() {
HttpRequestBuilder request_builder(
options.byte_range.size() == 0 ? "HEAD" : "GET", url);
for (const auto& header : owner->spec_.headers) {
request_builder.AddHeader(header);
}
if (options.byte_range.size() != 0) {
request_builder.MaybeAddRangeHeader(options.byte_range);
}
request_builder
.MaybeAddStalenessBoundCacheControlHeader(options.staleness_bound)
.EnableAcceptEncoding();
if (StorageGeneration::IsCleanValidValue(
options.generation_conditions.if_equal)) {
request_builder.AddHeader(absl::StrFormat(
"if-match: \"%s\"", StorageGeneration::DecodeString(
options.generation_conditions.if_equal)));
}
if (StorageGeneration::IsCleanValidValue(
options.generation_conditions.if_not_equal)) {
request_builder.AddHeader(
absl::StrFormat("if-none-match: \"%s\"",
StorageGeneration::DecodeString(
options.generation_conditions.if_not_equal)));
}
auto request = request_builder.BuildRequest();
ABSL_LOG_IF(INFO, http_logging) << "[http] Read: " << request;
auto response = owner->transport_->IssueRequest(request, {}).result();
if (!response.ok()) return response.status();
httpresponse = std::move(*response);
http_bytes_read.IncrementBy(httpresponse.payload.size());
ABSL_LOG_IF(INFO, http_logging.Level(1))
<< "[http] Read response: " << httpresponse;
switch (httpresponse.status_code) {
case 412:
case 404:
case 304:
return absl::OkStatus();
}
return HttpResponseCodeToStatus(httpresponse);
}
Result<kvstore::ReadResult> HandleResult(absl::Time start_time) {
absl::Time response_date;
if (auto date_it = httpresponse.headers.find("date");
date_it != httpresponse.headers.end()) {
if (!absl::ParseTime(internal_http::kHttpTimeFormat, date_it->second,
&response_date, nullptr) ||
response_date == absl::InfiniteFuture() ||
response_date == absl::InfinitePast()) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Invalid \"date\" response header: ",
tensorstore::QuoteString(date_it->second)));
}
if (response_date < start_time) {
if (options.staleness_bound < start_time &&
response_date < options.staleness_bound) {
start_time = options.staleness_bound;
} else {
start_time = response_date;
}
}
}
switch (httpresponse.status_code) {
case 204:
case 404:
return kvstore::ReadResult::Missing(start_time);
case 412:
return kvstore::ReadResult::Unspecified(TimestampedStorageGeneration{
StorageGeneration::Unknown(), start_time});
case 304:
return kvstore::ReadResult::Unspecified(TimestampedStorageGeneration{
options.generation_conditions.if_not_equal, start_time});
}
absl::Cord value;
if (options.byte_range.size() != 0) {
ByteRange byte_range;
int64_t total_size;
TENSORSTORE_RETURN_IF_ERROR(internal_http::ValidateResponseByteRange(
httpresponse, options.byte_range, value, byte_range, total_size));
}
StorageGeneration generation = StorageGeneration::Invalid();
{
auto it = httpresponse.headers.find("etag");
if (it != httpresponse.headers.end() && it->second.size() > 2 &&
it->second.front() == '"' && it->second.back() == '"') {
std::string_view etag(it->second);
etag.remove_prefix(1);
etag.remove_suffix(1);
generation = StorageGeneration::FromString(etag);
}
}
return kvstore::ReadResult::Value(
std::move(value),
TimestampedStorageGeneration{std::move(generation), start_time});
}
Result<kvstore::ReadResult> operator()() {
absl::Time start_time;
absl::Status status;
const int max_retries = owner->spec_.retries->max_retries;
int attempt = 0;
for (; attempt < max_retries; attempt++) {
start_time = absl::Now();
status = DoRead();
if (status.ok() || !IsRetriable(status)) break;
auto delay = internal::BackoffForAttempt(
attempt, owner->spec_.retries->initial_delay,
owner->spec_.retries->max_delay,
std::min(absl::Seconds(1), owner->spec_.retries->initial_delay));
ABSL_LOG_IF(INFO, http_logging)
<< "The operation failed and will be automatically retried in "
<< delay << " seconds (attempt " << attempt + 1 << " out of "
<< max_retries << "), caused by: " << status;
absl::SleepFor(delay);
}
if (!status.ok()) {
if (IsRetriable(status)) {
return MaybeAnnotateStatus(
std::move(status),
absl::StrFormat("All %d retry attempts failed", attempt),
absl::StatusCode::kAborted);
}
return status;
}
return HandleResult(start_time);
}
};
Future<kvstore::ReadResult> HttpKeyValueStore::Read(Key key,
ReadOptions options) {
http_read.Increment();
return internal_kvstore_batch::HandleBatchRequestByGenericByteRangeCoalescing(
*this, std::move(key), std::move(options));
}
Future<kvstore::ReadResult> HttpKeyValueStore::ReadImpl(Key&& key,
ReadOptions&& options) {
http_batch_read.Increment();
std::string url = spec_.GetUrl(key);
return MapFuture(executor(), ReadTask{IntrusivePtr<HttpKeyValueStore>(this),
std::move(url), std::move(options)});
}
Result<kvstore::Spec> ParseHttpUrl(std::string_view url) {
auto parsed = internal::ParseGenericUri(url);
TENSORSTORE_RETURN_IF_ERROR(ValidateParsedHttpUrl(parsed));
std::string path;
auto driver_spec = internal::MakeIntrusivePtr<HttpKeyValueStoreSpec>();
SplitParsedHttpUrl(parsed, driver_spec->data_.base_url, path);
driver_spec->data_.request_concurrency =
Context::Resource<HttpRequestConcurrencyResource>::DefaultSpec();
driver_spec->data_.retries =
Context::Resource<HttpRequestRetries>::DefaultSpec();
return {std::in_place, std::move(driver_spec), std::move(path)};
}
}
}
TENSORSTORE_DECLARE_GARBAGE_COLLECTION_NOT_REQUIRED(
tensorstore::HttpKeyValueStore)
TENSORSTORE_DECLARE_GARBAGE_COLLECTION_NOT_REQUIRED(
tensorstore::HttpKeyValueStoreSpecData)
namespace {
const tensorstore::internal_kvstore::DriverRegistration<
tensorstore::HttpKeyValueStoreSpec>
registration;
const tensorstore::internal_kvstore::UrlSchemeRegistration
http_url_scheme_registration{"http", tensorstore::ParseHttpUrl};
const tensorstore::internal_kvstore::UrlSchemeRegistration
https_url_scheme_registration{"https", tensorstore::ParseHttpUrl};
} | #include "tensorstore/kvstore/driver.h"
#include <functional>
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tensorstore/batch.h"
#include "tensorstore/internal/http/curl_transport.h"
#include "tensorstore/internal/http/http_header.h"
#include "tensorstore/internal/http/http_request.h"
#include "tensorstore/internal/http/http_response.h"
#include "tensorstore/internal/http/http_transport.h"
#include "tensorstore/internal/http/mock_http_transport.h"
#include "tensorstore/internal/queue_testutil.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/read_result.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/kvstore/test_matchers.h"
#include "tensorstore/kvstore/test_util.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
namespace {
namespace kvstore = ::tensorstore::kvstore;
using ::tensorstore::Batch;
using ::tensorstore::Future;
using ::tensorstore::MatchesStatus;
using ::tensorstore::Result;
using ::tensorstore::StorageGeneration;
using ::tensorstore::internal::MatchesKvsReadResult;
using ::tensorstore::internal::MatchesKvsReadResultNotFound;
using ::tensorstore::internal_http::ApplyResponseToHandler;
using ::tensorstore::internal_http::HttpRequest;
using ::tensorstore::internal_http::HttpResponse;
using ::tensorstore::internal_http::HttpResponseHandler;
using ::tensorstore::internal_http::HttpTransport;
using ::tensorstore::internal_http::IssueRequestOptions;
using ::tensorstore::internal_http::SetDefaultHttpTransport;
class MyMockTransport : public HttpTransport {
public:
void IssueRequestWithHandler(const HttpRequest& request,
IssueRequestOptions options,
HttpResponseHandler* response_handler) override {
requests_.push({request, [response_handler](Result<HttpResponse> response) {
ApplyResponseToHandler(response, response_handler);
}});
}
struct Request {
HttpRequest request;
std::function<void(tensorstore::Result<HttpResponse>)> set_result;
};
void Reset() { requests_.pop_all(); }
tensorstore::internal::ConcurrentQueue<Request> requests_;
};
class HttpKeyValueStoreTest : public ::testing::Test {
public:
~HttpKeyValueStoreTest() { mock_transport->Reset(); }
static void SetUpTestSuite() { SetDefaultHttpTransport(mock_transport); }
static void TearDownTestSuite() { SetDefaultHttpTransport(nullptr); }
static std::shared_ptr<MyMockTransport> mock_transport;
};
std::shared_ptr<MyMockTransport> HttpKeyValueStoreTest::mock_transport =
std::make_shared<MyMockTransport>();
TEST(DescribeKeyTest, Basic) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, kvstore::Open("https:
EXPECT_EQ("https:
store.driver->DescribeKey("/my/path/xyz"));
}
TEST_F(HttpKeyValueStoreTest, UnconditionalReadUncachedWithEtag) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, kvstore::Open("https:
auto read_future = kvstore::Read(store, "abc");
auto request = mock_transport->requests_.pop();
EXPECT_EQ("https:
EXPECT_THAT(request.request.headers,
::testing::ElementsAre("cache-control: no-cache"));
request.set_result(
HttpResponse{200, absl::Cord("value"), {{"etag", "\"xyz\""}}});
EXPECT_THAT(read_future.result(),
MatchesKvsReadResult(absl::Cord("value"),
StorageGeneration::FromString("xyz")));
}
TEST_F(HttpKeyValueStoreTest, ReadNotFound) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, kvstore::Open("https:
auto read_future = kvstore::Read(store, "abc");
auto request = mock_transport->requests_.pop();
EXPECT_EQ("https:
EXPECT_THAT(request.request.headers,
::testing::ElementsAre("cache-control: no-cache"));
request.set_result(HttpResponse{404, absl::Cord()});
EXPECT_THAT(read_future.result(), MatchesKvsReadResultNotFound());
}
TEST_F(HttpKeyValueStoreTest, UnconditionalReadWeakEtag) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, kvstore::Open("https:
auto read_future = kvstore::Read(store, "abc");
auto request = mock_transport->requests_.pop();
EXPECT_EQ("https:
EXPECT_THAT(request.request.headers,
::testing::ElementsAre("cache-control: no-cache"));
request.set_result(
HttpResponse{200, absl::Cord("value"), {{"etag", "W/\"xyz\""}}});
EXPECT_THAT(
read_future.result(),
MatchesKvsReadResult(absl::Cord("value"), StorageGeneration::Invalid()));
}
TEST_F(HttpKeyValueStoreTest, ReadByteRange) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, kvstore::Open("https:
kvstore::ReadOptions options;
options.byte_range.inclusive_min = 10;
options.byte_range.exclusive_max = 20;
auto read_future = kvstore::Read(store, "abc", options);
auto request = mock_transport->requests_.pop();
EXPECT_EQ("https:
EXPECT_THAT(request.request.method, "GET");
EXPECT_THAT(request.request.headers,
::testing::UnorderedElementsAre("cache-control: no-cache",
"Range: bytes=10-19"));
request.set_result(HttpResponse{
206, absl::Cord("valueabcde"), {{"content-range", "bytes 10-19/50"}}});
EXPECT_THAT(read_future.result(),
MatchesKvsReadResult(absl::Cord("valueabcde"),
StorageGeneration::Invalid()));
}
TEST_F(HttpKeyValueStoreTest, ReadBatch) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, kvstore::Open("https:
std::vector<Future<kvstore::ReadResult>> futures;
{
auto batch = Batch::New();
{
kvstore::ReadOptions options;
options.byte_range.inclusive_min = 10;
options.byte_range.exclusive_max = 20;
options.batch = batch;
futures.push_back(kvstore::Read(store, "abc", options));
}
{
kvstore::ReadOptions options;
options.byte_range.inclusive_min = 20;
options.byte_range.exclusive_max = 25;
options.batch = batch;
futures.push_back(kvstore::Read(store, "abc", options));
}
}
auto request = mock_transport->requests_.pop();
EXPECT_EQ("https:
EXPECT_THAT(request.request.method, "GET");
EXPECT_THAT(request.request.headers,
::testing::UnorderedElementsAre("cache-control: no-cache",
"Range: bytes=10-24"));
request.set_result(HttpResponse{206,
absl::Cord("valueabcde01234"),
{{"content-range", "bytes 10-24/50"}}});
EXPECT_THAT(futures[0].result(),
MatchesKvsReadResult(absl::Cord("valueabcde"),
StorageGeneration::Invalid()));
EXPECT_THAT(
futures[1].result(),
MatchesKvsReadResult(absl::Cord("01234"), StorageGeneration::Invalid()));
}
TEST_F(HttpKeyValueStoreTest, ReadZeroByteRange) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, kvstore::Open("https:
kvstore::ReadOptions options;
options.byte_range.inclusive_min = 10;
options.byte_range.exclusive_max = 10;
auto read_future = kvstore::Read(store, "abc", options);
auto request = mock_transport->requests_.pop();
EXPECT_EQ("https:
EXPECT_THAT(request.request.headers,
::testing::ElementsAre("cache-control: no-cache"));
request.set_result(HttpResponse{200, absl::Cord(), {}});
EXPECT_THAT(read_future.result(),
MatchesKvsReadResult(absl::Cord(), StorageGeneration::Invalid()));
}
TEST_F(HttpKeyValueStoreTest, ReadWithStalenessBound) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, kvstore::Open("https:
kvstore::ReadOptions options;
options.staleness_bound = absl::Now() - absl::Milliseconds(4900);
auto read_future = kvstore::Read(store, "abc", options);
auto request = mock_transport->requests_.pop();
EXPECT_EQ("https:
EXPECT_THAT(request.request.headers,
::testing::ElementsAre(::testing::AnyOf(
"cache-control: max-age=5", "cache-control: max-age=4",
"cache-control: max-age=3")));
request.set_result(HttpResponse{200, absl::Cord("value")});
EXPECT_THAT(
read_future.result(),
MatchesKvsReadResult(absl::Cord("value"), StorageGeneration::Invalid()));
}
TEST_F(HttpKeyValueStoreTest, IfEqualSatisfied) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, kvstore::Open("https:
kvstore::ReadOptions options;
options.generation_conditions.if_equal = StorageGeneration::FromString("xyz");
auto read_future = kvstore::Read(store, "abc", options);
auto request = mock_transport->requests_.pop();
EXPECT_EQ("https:
EXPECT_THAT(
request.request.headers,
::testing::ElementsAre("cache-control: no-cache", "if-match: \"xyz\""));
request.set_result(HttpResponse{200, absl::Cord("value")});
EXPECT_THAT(read_future.result(), MatchesKvsReadResult(absl::Cord("value")));
}
TEST_F(HttpKeyValueStoreTest, IfEqualNotSatisfied) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, kvstore::Open("https:
kvstore::ReadOptions options;
options.generation_conditions.if_equal = StorageGeneration::FromString("xyz");
auto read_future = kvstore::Read(store, "abc", options);
auto request = mock_transport->requests_.pop();
EXPECT_EQ("https:
EXPECT_THAT(
request.request.headers,
::testing::ElementsAre("cache-control: no-cache", "if-match: \"xyz\""));
request.set_result(HttpResponse{412});
EXPECT_THAT(read_future.result(),
MatchesKvsReadResult(kvstore::ReadResult::kUnspecified,
StorageGeneration::Unknown()));
}
TEST_F(HttpKeyValueStoreTest, IfNotEqualSatisfied) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, kvstore::Open("https:
kvstore::ReadOptions options;
options.generation_conditions.if_not_equal =
StorageGeneration::FromString("xyz");
auto read_future = kvstore::Read(store, "abc", options);
auto request = mock_transport->requests_.pop();
EXPECT_EQ("https:
EXPECT_THAT(request.request.headers,
::testing::ElementsAre("cache-control: no-cache",
"if-none-match: \"xyz\""));
request.set_result(HttpResponse{200, absl::Cord("value")});
EXPECT_THAT(
read_future.result(),
MatchesKvsReadResult(absl::Cord("value"), StorageGeneration::Invalid()));
}
TEST_F(HttpKeyValueStoreTest, IfNotEqualNotSatisfied) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, kvstore::Open("https:
kvstore::ReadOptions options;
options.generation_conditions.if_not_equal =
StorageGeneration::FromString("xyz");
auto read_future = kvstore::Read(store, "abc", options);
auto request = mock_transport->requests_.pop();
EXPECT_EQ("https:
EXPECT_THAT(request.request.headers,
::testing::ElementsAre("cache-control: no-cache",
"if-none-match: \"xyz\""));
request.set_result(HttpResponse{304});
EXPECT_THAT(read_future.result(),
MatchesKvsReadResult(kvstore::ReadResult::kUnspecified,
StorageGeneration::FromString("xyz")));
}
TEST_F(HttpKeyValueStoreTest, Retry) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, kvstore::Open("https:
auto read_future = kvstore::Read(store, "abc");
{
auto request = mock_transport->requests_.pop();
EXPECT_EQ("https:
EXPECT_THAT(request.request.headers,
::testing::ElementsAre("cache-control: no-cache"));
request.set_result(HttpResponse{503, absl::Cord()});
}
{
auto request = mock_transport->requests_.pop();
EXPECT_EQ("https:
EXPECT_THAT(request.request.headers,
::testing::ElementsAre("cache-control: no-cache"));
request.set_result(
HttpResponse{200, absl::Cord("value"), {{"etag", "\"xyz\""}}});
}
EXPECT_THAT(read_future.result(),
MatchesKvsReadResult(absl::Cord("value"),
StorageGeneration::FromString("xyz")));
}
TEST_F(HttpKeyValueStoreTest, RetryMax) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store,
kvstore::Open(
{{"driver", "http"},
{"base_url", "https:
{"context", {{"http_request_retries", {{"max_retries", 1}}}}}})
.result());
auto read_future = kvstore::Read(store, "abc");
{
auto request = mock_transport->requests_.pop();
EXPECT_EQ("https:
EXPECT_THAT(request.request.headers,
::testing::ElementsAre("cache-control: no-cache"));
request.set_result(HttpResponse{503, absl::Cord()});
}
EXPECT_THAT(read_future.result(), MatchesStatus(absl::StatusCode::kAborted));
}
TEST_F(HttpKeyValueStoreTest, Date) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, kvstore::Open("https:
kvstore::ReadOptions options;
options.staleness_bound = absl::InfinitePast();
auto read_future = kvstore::Read(store, "abc", options);
auto response_date = absl::UnixEpoch() + absl::Seconds(100);
auto request = mock_transport->requests_.pop();
EXPECT_EQ("https:
EXPECT_THAT(request.request.headers, ::testing::ElementsAre());
request.set_result(HttpResponse{
200,
absl::Cord("value"),
{{"date", absl::FormatTime(tensorstore::internal_http::kHttpTimeFormat,
response_date, absl::UTCTimeZone())}}});
EXPECT_THAT(
read_future.result(),
MatchesKvsReadResult(absl::Cord("value"), StorageGeneration::Invalid(),
response_date));
}
TEST_F(HttpKeyValueStoreTest, DateSkew) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, kvstore::Open("https:
kvstore::ReadOptions options;
options.staleness_bound = absl::Now() - absl::Milliseconds(5900);
auto read_future = kvstore::Read(store, "abc", options);
auto response_date = absl::UnixEpoch() + absl::Seconds(100);
auto request = mock_transport->requests_.pop();
EXPECT_EQ("https:
EXPECT_THAT(request.request.headers,
::testing::ElementsAre(::testing::AnyOf(
"cache-control: max-age=5", "cache-control: max-age=4")));
request.set_result(HttpResponse{
200,
absl::Cord("value"),
{{"date", absl::FormatTime(tensorstore::internal_http::kHttpTimeFormat,
response_date, absl::UTCTimeZone())}}});
EXPECT_THAT(
read_future.result(),
MatchesKvsReadResult(absl::Cord("value"), StorageGeneration::Invalid(),
options.staleness_bound));
}
TEST_F(HttpKeyValueStoreTest, Query) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store,
kvstore::Open("https:
auto read_future = kvstore::Read(store, "abc");
auto request = mock_transport->requests_.pop();
EXPECT_EQ("https:
EXPECT_THAT(request.request.headers,
::testing::ElementsAre("cache-control: no-cache"));
request.set_result(HttpResponse{200, absl::Cord("value")});
EXPECT_THAT(read_future.result(), MatchesKvsReadResult(absl::Cord("value")));
}
TEST_F(HttpKeyValueStoreTest, InvalidDate) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, kvstore::Open("https:
auto read_future = kvstore::Read(store, "abc");
auto request = mock_transport->requests_.pop();
EXPECT_EQ("https:
EXPECT_THAT(request.request.headers,
::testing::ElementsAre("cache-control: no-cache"));
request.set_result(HttpResponse{200, absl::Cord("value"), {{"date", "xyz"}}});
EXPECT_THAT(read_future.result(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Invalid \"date\" response header: \"xyz\""));
}
TEST_F(HttpKeyValueStoreTest, ExtraHeaders) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store,
kvstore::Open({{"driver", "http"},
{"base_url", "https:
{"headers", {"a!#$%&'*+-.^_`|~3X: b\xfe"}}})
.result());
auto read_future = kvstore::Read(store, "abc");
auto request = mock_transport->requests_.pop();
EXPECT_EQ("https:
EXPECT_THAT(request.request.headers,
::testing::ElementsAre("a!#$%&'*+-.^_`|~3X: b\xfe",
"cache-control: no-cache"));
request.set_result(HttpResponse{200, absl::Cord("value")});
EXPECT_THAT(read_future.result(), MatchesKvsReadResult(absl::Cord("value")));
}
TEST(UrlTest, UrlRoundtrip) {
tensorstore::internal::TestKeyValueStoreUrlRoundtrip(
{{"driver", "http"},
{"base_url", "https:
{"path", "/abc"}},
"https:
tensorstore::internal::TestKeyValueStoreUrlRoundtrip(
{{"driver", "http"},
{"base_url", "https:
{"path", "/abc def"}},
"https:
tensorstore::internal::TestKeyValueStoreUrlRoundtrip(
{{"driver", "http"},
{"base_url", "http:
{"path", "/abc def"}},
"http:
tensorstore::internal::TestKeyValueStoreUrlRoundtrip(
{{"driver", "http"},
{"base_url", "https:
{"path", "/abc def"}},
"https:
}
TEST(UrlTest, InvalidUri) {
EXPECT_THAT(kvstore::Spec::FromUrl("http:
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*: Fragment identifier not supported"));
}
TEST(SpecTest, InvalidScheme) {
EXPECT_THAT(
kvstore::Open({{"driver", "http"}, {"base_url", "file:
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(SpecTest, MissingScheme) {
EXPECT_THAT(kvstore::Open({{"driver", "http"}, {"base_url", "abc"}}).result(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(SpecTest, InvalidFragment) {
EXPECT_THAT(kvstore::Open({{"driver", "http"},
{"base_url", "https:
.result(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(SpecTest, InvalidHeader) {
EXPECT_THAT(kvstore::Open({{"driver", "http"},
{"base_url", "https:
{"headers", {"a"}}})
.result(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(SpecTest, SpecRoundtrip) {
tensorstore::internal::KeyValueStoreSpecRoundtripOptions options;
options.check_write_read = false;
options.check_data_persists = false;
options.check_data_after_serialization = false;
options.full_spec = {{"driver", "http"},
{"base_url", "https:
{"headers", {"a: b"}},
{"path", "/abc"}};
tensorstore::internal::TestKeyValueStoreSpecRoundtrip(options);
}
TEST(SpecTest, NormalizeSpecRelativePath) {
tensorstore::internal::TestKeyValueStoreSpecRoundtripNormalize(
{{"driver", "http"},
{"base_url", "https:
{"path", "abc"}},
{{"driver", "http"},
{"base_url", "https:
{"path", "/my/path/abc"}});
}
TEST(SpecTest, NormalizeSpecAbsolutePath) {
tensorstore::internal::TestKeyValueStoreSpecRoundtripNormalize(
{{"driver", "http"},
{"base_url", "https:
{"path", "/abc"}},
{{"driver", "http"},
{"base_url", "https:
{"path", "/abc"}});
}
TEST(SpecTest, NormalizeSpecInvalidAbsolutePath) {
EXPECT_THAT(
kvstore::Open({{"driver", "http"},
{"base_url", "https:
{"path", "/abc"}})
.result(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Cannot specify absolute path \"/abc\" in conjunction with "
"base URL \".*\" that includes a path component"));
}
} |
571 | cpp | google/tensorstore | compressor | tensorstore/driver/n5/compressor.cc | tensorstore/driver/zarr/compressor_test.cc | #ifndef TENSORSTORE_DRIVER_N5_COMPRESSOR_H_
#define TENSORSTORE_DRIVER_N5_COMPRESSOR_H_
#include "tensorstore/internal/compression/json_specified_compressor.h"
#include "tensorstore/internal/json_binding/bindable.h"
namespace tensorstore {
namespace internal_n5 {
class Compressor : public internal::JsonSpecifiedCompressor::Ptr {
public:
TENSORSTORE_DECLARE_JSON_DEFAULT_BINDER(
Compressor, internal::JsonSpecifiedCompressor::FromJsonOptions,
internal::JsonSpecifiedCompressor::ToJsonOptions)
};
}
}
#endif
#include "tensorstore/driver/n5/compressor.h"
#include <utility>
#include "absl/base/no_destructor.h"
#include "tensorstore/driver/n5/compressor_registry.h"
#include "tensorstore/internal/compression/json_specified_compressor.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/enum.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/json_registry.h"
namespace tensorstore {
namespace internal_n5 {
using CompressorRegistry = internal::JsonSpecifiedCompressor::Registry;
CompressorRegistry& GetCompressorRegistry() {
static absl::NoDestructor<CompressorRegistry> registry;
return *registry;
}
TENSORSTORE_DEFINE_JSON_DEFAULT_BINDER(Compressor, [](auto is_loading,
const auto& options,
auto* obj,
::nlohmann::json* j) {
namespace jb = tensorstore::internal_json_binding;
auto& registry = GetCompressorRegistry();
return jb::Object(
jb::Member("type",
jb::MapValue(registry.KeyBinder(),
std::make_pair(Compressor{}, "raw"))),
registry.RegisteredObjectBinder())(is_loading, options, obj, j);
})
}
} | #include "tensorstore/driver/zarr/compressor.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal_zarr::Compressor;
TEST(ParseCompressorTest, Null) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto compressor,
Compressor::FromJson(nullptr));
EXPECT_EQ(nullptr, ::nlohmann::json(compressor));
}
TEST(ParseCompressorTest, ZlibSuccess) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto compressor, Compressor::FromJson({{"id", "zlib"}, {"level", 5}}));
EXPECT_EQ((::nlohmann::json{{"id", "zlib"}, {"level", 5}}),
::nlohmann::json(compressor));
}
TEST(ParseCompressorTest, ZlibFailure) {
EXPECT_THAT(
Compressor::FromJson(::nlohmann::json{{"id", "zlib"}, {"level", "a"}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing object member \"level\": .*"));
}
TEST(ParseCompressorTest, UnsupportedId) {
EXPECT_THAT(
Compressor::FromJson(::nlohmann::json{{"id", "invalid"}, {"level", "a"}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing object member \"id\": "
"\"invalid\" is not registered"));
}
TEST(ParseCompressorTest, InvalidId) {
EXPECT_THAT(Compressor::FromJson(::nlohmann::json{{"id", 5}, {"level", "a"}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing object member \"id\": "
"Expected string, but received: 5"));
}
} |
572 | cpp | google/tensorstore | metadata | tensorstore/driver/neuroglancer_precomputed/metadata.cc | tensorstore/driver/neuroglancer_precomputed/metadata_test.cc | #ifndef TENSORSTORE_INTERNAL_METRICS_METADATA_H_
#define TENSORSTORE_INTERNAL_METRICS_METADATA_H_
#include <string_view>
namespace tensorstore {
namespace internal_metrics {
struct MetricMetadata {
MetricMetadata() = default;
MetricMetadata(const char* description) : description(description) {}
MetricMetadata(std::string_view description) : description(description) {}
std::string_view description;
};
bool IsValidMetricName(std::string_view name);
bool IsValidMetricLabel(std::string_view name);
}
}
#endif
#include "tensorstore/internal/metrics/metadata.h"
#include <string_view>
#include "absl/strings/ascii.h"
namespace tensorstore {
namespace internal_metrics {
bool IsValidMetricName(std::string_view name) {
if (name.size() < 2) return false;
if (name[0] != '/') return false;
if (name[name.size() - 1] == '/') return false;
if (!absl::ascii_isalpha(name[1])) return false;
size_t last_slash = 0;
for (size_t i = 1; i < name.size(); i++) {
const auto ch = name[i];
if (ch == '/') {
if (i - last_slash == 1) return false;
if (i - last_slash > 63) return false;
last_slash = i;
} else if (ch != '_' && !absl::ascii_isalnum(ch)) {
return false;
}
}
return true;
}
bool IsValidMetricLabel(std::string_view name) {
if (name.empty()) return false;
if (!absl::ascii_isalpha(name[0])) return false;
for (auto ch : name) {
if (ch != '_' && !absl::ascii_isalnum(ch)) {
return false;
}
}
return true;
}
}
} | #include "tensorstore/internal/metrics/metadata.h"
#include <string_view>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace {
using ::tensorstore::internal_metrics::IsValidMetricLabel;
using ::tensorstore::internal_metrics::IsValidMetricName;
TEST(MetadataTest, IsValidMetricName) {
EXPECT_FALSE(IsValidMetricName(""));
EXPECT_FALSE(IsValidMetricName("/"));
EXPECT_FALSE(IsValidMetricName("
EXPECT_FALSE(IsValidMetricName("/foo/"));
EXPECT_FALSE(IsValidMetricName("/foo
EXPECT_FALSE(IsValidMetricName("/_foo"));
EXPECT_FALSE(IsValidMetricName("/foo%"));
EXPECT_FALSE(IsValidMetricName("/foo%"));
EXPECT_FALSE(IsValidMetricName("/foo.bar"));
EXPECT_FALSE(IsValidMetricName("foo_1"));
EXPECT_TRUE(IsValidMetricName("/foo/1_bar/Baz"));
}
TEST(MetadataTest, IsValidMetricLabel) {
EXPECT_FALSE(IsValidMetricLabel(""));
EXPECT_FALSE(IsValidMetricLabel("/"));
EXPECT_FALSE(IsValidMetricLabel("1_bar"));
EXPECT_FALSE(IsValidMetricLabel("_bar"));
EXPECT_FALSE(IsValidMetricLabel("foo/bar"));
EXPECT_FALSE(IsValidMetricLabel("foo-bar"));
EXPECT_FALSE(IsValidMetricLabel("foo.bar"));
EXPECT_TRUE(IsValidMetricLabel("a"));
EXPECT_TRUE(IsValidMetricLabel("foB_1"));
}
} |
573 | cpp | google/tensorstore | bzip2_compressor | tensorstore/driver/n5/bzip2_compressor.cc | tensorstore/driver/n5/bzip2_compressor_test.cc | #ifndef TENSORSTORE_INTERNAL_COMPRESSION_BZIP2_COMPRESSOR_H_
#define TENSORSTORE_INTERNAL_COMPRESSION_BZIP2_COMPRESSOR_H_
#include <cstddef>
#include <memory>
#include "riegeli/bytes/reader.h"
#include "riegeli/bytes/writer.h"
#include "tensorstore/internal/compression/json_specified_compressor.h"
namespace tensorstore {
namespace internal {
struct Bzip2Options {
int level = 1;
};
class Bzip2Compressor : public internal::JsonSpecifiedCompressor,
public Bzip2Options {
public:
std::unique_ptr<riegeli::Writer> GetWriter(
std::unique_ptr<riegeli::Writer> base_writer,
size_t element_bytes) const override;
virtual std::unique_ptr<riegeli::Reader> GetReader(
std::unique_ptr<riegeli::Reader> base_reader,
size_t element_bytes) const override;
};
}
}
#endif
#include "tensorstore/internal/compression/bzip2_compressor.h"
#include <cstddef>
#include "riegeli/bzip2/bzip2_reader.h"
#include "riegeli/bzip2/bzip2_writer.h"
#include "tensorstore/internal/compression/json_specified_compressor.h"
namespace tensorstore {
namespace internal {
std::unique_ptr<riegeli::Writer> Bzip2Compressor::GetWriter(
std::unique_ptr<riegeli::Writer> base_writer, size_t element_bytes) const {
using Writer = riegeli::Bzip2Writer<std::unique_ptr<riegeli::Writer>>;
Writer::Options options;
options.set_compression_level(level);
return std::make_unique<Writer>(std::move(base_writer), options);
}
std::unique_ptr<riegeli::Reader> Bzip2Compressor::GetReader(
std::unique_ptr<riegeli::Reader> base_reader, size_t element_bytes) const {
using Reader = riegeli::Bzip2Reader<std::unique_ptr<riegeli::Reader>>;
return std::make_unique<Reader>(std::move(base_reader));
}
}
} | #include <cstdint>
#include <string_view>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/array.h"
#include "tensorstore/driver/n5/compressor.h"
#include "tensorstore/driver/n5/metadata.h"
#include "tensorstore/internal/json_binding/gtest.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::Index;
using ::tensorstore::MakeArray;
using ::tensorstore::MatchesStatus;
using ::tensorstore::span;
using ::tensorstore::internal_n5::Compressor;
using ::tensorstore::internal_n5::DecodeChunk;
using ::tensorstore::internal_n5::N5Metadata;
TEST(Bzip2CompressionTest, Parse) {
tensorstore::TestJsonBinderRoundTripJsonOnlyInexact<Compressor>({
{{{"type", "bzip2"}}, {{"type", "bzip2"}, {"blockSize", 9}}},
{{{"type", "bzip2"}, {"blockSize", 3}},
{{"type", "bzip2"}, {"blockSize", 3}}},
});
EXPECT_THAT(Compressor::FromJson({{"type", "bzip2"}, {"blockSize", "x"}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(Compressor::FromJson({{"type", "bzip2"}, {"blockSize", 0}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(Compressor::FromJson({{"type", "bzip2"}, {"blockSize", 10}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(Compressor::FromJson({{"type", "bzip2"}, {"extra", "x"}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(Bzip2CompressionTest, Golden) {
const unsigned char kData[] = {
0x00, 0x00,
0x00, 0x03,
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x02,
0x00, 0x00, 0x00, 0x03,
0x42, 0x5a, 0x68, 0x39,
0x31, 0x41, 0x59, 0x26,
0x53, 0x59, 0x02, 0x3e,
0x0d, 0xd2, 0x00, 0x00,
0x00, 0x40, 0x00, 0x7f,
0x00, 0x20, 0x00, 0x31,
0x0c, 0x01, 0x0d, 0x31,
0xa8, 0x73, 0x94, 0x33,
0x7c, 0x5d, 0xc9, 0x14,
0xe1, 0x42, 0x40, 0x08,
0xf8, 0x37, 0x48,
};
std::string encoded_data(std::begin(kData), std::end(kData));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto metadata,
N5Metadata::FromJson({{"dimensions", {10, 11, 12}},
{"blockSize", {1, 2, 3}},
{"dataType", "uint16"},
{"compression", {{"type", "bzip2"}}}}));
auto array = MakeArray<uint16_t>({{{1, 3, 5}, {2, 4, 6}}});
EXPECT_EQ(array, DecodeChunk(metadata, absl::Cord(encoded_data)));
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto buffer, EncodeChunk(metadata, array));
EXPECT_EQ(array, DecodeChunk(metadata, buffer));
}
}
} |
574 | cpp | google/tensorstore | blosc_compressor | tensorstore/driver/n5/blosc_compressor.cc | tensorstore/driver/n5/blosc_compressor_test.cc | #ifndef TENSORSTORE_INTERNAL_COMPRESSION_BLOSC_COMPRESSOR_H_
#define TENSORSTORE_INTERNAL_COMPRESSION_BLOSC_COMPRESSOR_H_
#include <cstddef>
#include <memory>
#include <string>
#include "absl/status/status.h"
#include <blosc.h>
#include "riegeli/bytes/reader.h"
#include "riegeli/bytes/writer.h"
#include "tensorstore/internal/compression/json_specified_compressor.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/json_serialization_options_base.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal {
class BloscCompressor : public JsonSpecifiedCompressor {
public:
std::unique_ptr<riegeli::Writer> GetWriter(
std::unique_ptr<riegeli::Writer> base_writer,
size_t element_bytes) const override;
std::unique_ptr<riegeli::Reader> GetReader(
std::unique_ptr<riegeli::Reader> base_reader,
size_t element_bytes) const override;
static constexpr auto CodecBinder() {
namespace jb = tensorstore::internal_json_binding;
return jb::Validate([](const auto& options, std::string* cname) {
if (cname->find('\0') != std::string::npos ||
blosc_compname_to_compcode(cname->c_str()) == -1) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Expected one of ", blosc_list_compressors(),
" but received: ", QuoteString(*cname)));
}
return absl::OkStatus();
});
}
std::string codec;
int level;
int shuffle;
size_t blocksize;
};
}
}
#endif
#include "tensorstore/internal/compression/blosc_compressor.h"
#include <cstddef>
#include <limits>
#include <memory>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/cord.h"
#include "absl/strings/string_view.h"
#include "riegeli/base/chain.h"
#include "riegeli/bytes/chain_reader.h"
#include "riegeli/bytes/cord_writer.h"
#include "riegeli/bytes/read_all.h"
#include "riegeli/bytes/reader.h"
#include "riegeli/bytes/write.h"
#include "riegeli/bytes/writer.h"
#include "tensorstore/internal/compression/blosc.h"
namespace tensorstore {
namespace internal {
namespace {
class BloscDeferredWriter : public riegeli::CordWriter<absl::Cord> {
public:
explicit BloscDeferredWriter(blosc::Options options,
std::unique_ptr<riegeli::Writer> base_writer)
: CordWriter(riegeli::CordWriterBase::Options().set_max_block_size(
std::numeric_limits<size_t>::max())),
options_(std::move(options)),
base_writer_(std::move(base_writer)) {}
void Done() override {
CordWriter::Done();
auto output = blosc::Encode(dest().Flatten(), options_);
if (!output.ok()) {
Fail(std::move(output).status());
return;
}
auto status = riegeli::Write(*std::move(output), std::move(base_writer_));
if (!status.ok()) {
Fail(std::move(status));
return;
}
}
private:
blosc::Options options_;
std::unique_ptr<riegeli::Writer> base_writer_;
};
}
std::unique_ptr<riegeli::Writer> BloscCompressor::GetWriter(
std::unique_ptr<riegeli::Writer> base_writer, size_t element_bytes) const {
return std::make_unique<BloscDeferredWriter>(
blosc::Options{codec.c_str(), level, shuffle, blocksize, element_bytes},
std::move(base_writer));
}
std::unique_ptr<riegeli::Reader> BloscCompressor::GetReader(
std::unique_ptr<riegeli::Reader> base_reader, size_t element_bytes) const {
auto output = riegeli::ReadAll(
std::move(base_reader),
[](absl::string_view input) -> absl::StatusOr<std::string> {
auto output = blosc::Decode(input);
if (!output.ok()) return std::move(output).status();
return *std::move(output);
});
auto reader = std::make_unique<riegeli::ChainReader<riegeli::Chain>>(
output.ok() ? riegeli::Chain(std::move(*output)) : riegeli::Chain());
if (!output.ok()) {
reader->Fail(std::move(output).status());
}
return reader;
}
}
} | #include <cstdint>
#include <string_view>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/array.h"
#include "tensorstore/driver/n5/compressor.h"
#include "tensorstore/driver/n5/metadata.h"
#include "tensorstore/internal/json_binding/gtest.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::Index;
using ::tensorstore::MakeArray;
using ::tensorstore::MatchesStatus;
using ::tensorstore::span;
using ::tensorstore::internal_n5::Compressor;
using ::tensorstore::internal_n5::DecodeChunk;
using ::tensorstore::internal_n5::N5Metadata;
TEST(BloscCompressionTest, Parse) {
for (auto codec : {"lz4", "blosclz", "lz4hc", "snappy", "zlib", "zstd"}) {
for (int level = 0; level <= 9; ++level) {
for (int shuffle = 0; shuffle <= 2; ++shuffle) {
for (int blocksize : {0, 256}) {
::nlohmann::json j{{"type", "blosc"},
{"cname", codec},
{"shuffle", shuffle},
{"clevel", level},
{"blocksize", blocksize}};
tensorstore::TestJsonBinderRoundTripJsonOnly<Compressor>({j});
}
}
}
}
EXPECT_THAT(
Compressor::FromJson({{"type", "blosc"}, {"shuffle", 0}, {"clevel", 5}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(Compressor::FromJson(
{{"type", "blosc"}, {"cname", "lz4"}, {"clevel", 5}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(Compressor::FromJson(
{{"type", "blosc"}, {"cname", "lz4"}, {"shuffle", 0}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(
Compressor::FromJson(
{{"type", "blosc"}, {"cname", 3}, {"shuffle", 0}, {"clevel", 5}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(Compressor::FromJson({{"type", "blosc"},
{"cname", "invalid"},
{"shuffle", 0},
{"clevel", 5}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(Compressor::FromJson({{"type", "blosc"},
{"cname", "lz4"},
{"shuffle", 0},
{"clevel", -1}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(Compressor::FromJson({{"type", "blosc"},
{"cname", "lz4"},
{"shuffle", 0},
{"clevel", 10}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(Compressor::FromJson({{"type", "blosc"},
{"cname", "lz4"},
{"shuffle", -1},
{"clevel", 3}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(
Compressor::FromJson(
{{"type", "blosc"}, {"cname", "lz4"}, {"shuffle", 3}, {"clevel", 3}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(Compressor::FromJson({{"type", "blosc"},
{"cname", "lz4"},
{"shuffle", 0},
{"clevel", 3},
{"extra", 5}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(BloscCompressionTest, RoundTrip) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto metadata, N5Metadata::FromJson({{"dimensions", {10, 11, 12}},
{"blockSize", {1, 2, 3}},
{"dataType", "uint16"},
{"compression",
{{"type", "blosc"},
{"cname", "lz4"},
{"clevel", 5},
{"shuffle", 0}}}}));
auto array = MakeArray<uint16_t>({{{1, 2, 3}, {4, 5, 6}}});
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto buffer, EncodeChunk(metadata, array));
EXPECT_EQ(array, DecodeChunk(metadata, buffer));
}
}
TEST(BloscCompressionTest, Golden) {
const unsigned char kData[] = {
0x00, 0x00,
0x00, 0x03,
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x02,
0x00, 0x00, 0x00, 0x03,
0x02, 0x01, 0x96, 0x02, 0x0c, 0x00, 0x00, 0x00, 0x0c, 0x00,
0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x02,
0x00, 0x03, 0x00, 0x04, 0x00, 0x05, 0x00, 0x06,
};
std::string encoded_data(std::begin(kData), std::end(kData));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto metadata,
N5Metadata::FromJson({
{"dimensions", {10, 11, 12}},
{"blockSize", {1, 2, 3}},
{"dataType", "uint16"},
{"compression",
{
{"type", "blosc"},
{"clevel", 3},
{"blocksize", 0},
{"cname", "zstd"},
{"shuffle", 2},
}},
}));
auto array = MakeArray<uint16_t>({{{1, 3, 5}, {2, 4, 6}}});
EXPECT_EQ(array, DecodeChunk(metadata, absl::Cord(encoded_data)));
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto buffer, EncodeChunk(metadata, array));
EXPECT_EQ(array, DecodeChunk(metadata, buffer));
}
}
} |
575 | cpp | google/tensorstore | dtype | tensorstore/driver/zarr/dtype.cc | tensorstore/driver/zarr/dtype_test.cc | #ifndef TENSORSTORE_DRIVER_ZARR_DTYPE_H_
#define TENSORSTORE_DRIVER_ZARR_DTYPE_H_
#include <nlohmann/json.hpp>
#include "tensorstore/data_type.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/util/endian.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal_zarr {
struct ZarrDType {
struct BaseDType {
std::string encoded_dtype;
DataType dtype;
tensorstore::endian endian;
std::vector<Index> flexible_shape;
};
struct Field : public BaseDType {
std::vector<Index> outer_shape;
std::string name;
std::vector<Index> field_shape;
Index num_inner_elements;
Index byte_offset;
Index num_bytes;
};
bool has_fields;
std::vector<Field> fields;
Index bytes_per_outer_element;
TENSORSTORE_DECLARE_JSON_DEFAULT_BINDER(ZarrDType,
internal_json_binding::NoOptions)
friend void to_json(::nlohmann::json& out,
const ZarrDType& dtype);
};
Result<ZarrDType> ParseDType(const ::nlohmann::json& value);
absl::Status ValidateDType(ZarrDType& dtype);
Result<ZarrDType::BaseDType> ParseBaseDType(std::string_view dtype);
Result<ZarrDType::BaseDType> ChooseBaseDType(DataType dtype);
}
}
#endif
#include "tensorstore/driver/zarr/dtype.h"
#include <stddef.h>
#include "absl/base/optimization.h"
#include "tensorstore/data_type.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/util/extents.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_zarr {
constexpr char kDtypeBfloat16[] = "bfloat16";
constexpr char kDtypeFloat8e4m3fn[] = "float8_e4m3fn";
constexpr char kDtypeFloat8e4m3fnuz[] = "float8_e4m3fnuz";
constexpr char kDtypeFloat8e4m3b11fnuz[] = "float8_e4m3b11fnuz";
constexpr char kDtypeFloat8e5m2[] = "float8_e5m2";
constexpr char kDtypeFloat8e5m2fnuz[] = "float8_e5m2fnuz";
constexpr char kDtypeInt4[] = "int4";
Result<ZarrDType::BaseDType> ParseBaseDType(std::string_view dtype) {
using D = ZarrDType::BaseDType;
if (dtype == kDtypeBfloat16) {
return D{std::string(dtype), dtype_v<::tensorstore::dtypes::bfloat16_t>,
endian::little};
}
if (dtype == kDtypeFloat8e4m3fn) {
return D{std::string(dtype),
dtype_v<::tensorstore::dtypes::float8_e4m3fn_t>, endian::little};
}
if (dtype == kDtypeFloat8e4m3fnuz) {
return D{std::string(dtype),
dtype_v<::tensorstore::dtypes::float8_e4m3fnuz_t>, endian::little};
}
if (dtype == kDtypeFloat8e4m3b11fnuz) {
return D{std::string(dtype),
dtype_v<::tensorstore::dtypes::float8_e4m3b11fnuz_t>,
endian::little};
}
if (dtype == kDtypeFloat8e5m2) {
return D{std::string(dtype), dtype_v<::tensorstore::dtypes::float8_e5m2_t>,
endian::little};
}
if (dtype == kDtypeFloat8e5m2fnuz) {
return D{std::string(dtype),
dtype_v<::tensorstore::dtypes::float8_e5m2fnuz_t>, endian::little};
}
if (dtype == kDtypeInt4) {
return D{std::string(dtype), dtype_v<::tensorstore::dtypes::int4_t>,
endian::little};
}
if (dtype.size() < 3) goto error;
{
const char endian_indicator = dtype[0];
const char type_indicator = dtype[1];
const std::string_view suffix = dtype.substr(2);
endian endian_value;
switch (endian_indicator) {
case '<':
endian_value = endian::little;
break;
case '>':
endian_value = endian::big;
break;
case '|':
endian_value = endian::native;
break;
default:
goto error;
}
switch (type_indicator) {
case 'b':
if (suffix != "1") goto error;
ABSL_FALLTHROUGH_INTENDED;
case 'S':
case 'V':
endian_value = endian::native;
break;
case 'i':
case 'u':
if (endian_indicator == '|') {
if (suffix != "1") goto error;
endian_value = endian::native;
break;
} else if (suffix == "1") {
endian_value = endian::native;
break;
}
[[fallthrough]];
case 'f':
case 'c':
case 'm':
case 'M':
if (endian_indicator == '|') {
goto error;
}
break;
}
switch (type_indicator) {
case 'b':
return D{std::string(dtype), dtype_v<bool>, endian::native};
case 'i':
if (suffix == "1") {
return D{std::string(dtype), dtype_v<int8_t>, endian_value};
}
if (suffix == "2") {
return D{std::string(dtype), dtype_v<int16_t>, endian_value};
}
if (suffix == "4") {
return D{std::string(dtype), dtype_v<int32_t>, endian_value};
}
if (suffix == "8") {
return D{std::string(dtype), dtype_v<int64_t>, endian_value};
}
goto error;
case 'u':
if (suffix == "1") {
return D{std::string(dtype), dtype_v<uint8_t>, endian_value};
}
if (suffix == "2") {
return D{std::string(dtype), dtype_v<uint16_t>, endian_value};
}
if (suffix == "4") {
return D{std::string(dtype), dtype_v<uint32_t>, endian_value};
}
if (suffix == "8") {
return D{std::string(dtype), dtype_v<uint64_t>, endian_value};
}
goto error;
case 'f':
if (suffix == "2") {
return D{std::string(dtype),
dtype_v<::tensorstore::dtypes::float16_t>, endian_value};
}
if (suffix == "4") {
return D{std::string(dtype),
dtype_v<::tensorstore::dtypes::float32_t>, endian_value};
}
if (suffix == "8") {
return D{std::string(dtype),
dtype_v<::tensorstore::dtypes::float64_t>, endian_value};
}
goto error;
case 'c':
if (suffix == "8") {
return D{std::string(dtype),
dtype_v<::tensorstore::dtypes::complex64_t>, endian_value};
}
if (suffix == "16") {
return D{std::string(dtype),
dtype_v<::tensorstore::dtypes::complex128_t>, endian_value};
}
goto error;
case 'S':
case 'V': {
Index num_elements = 0;
for (char c : suffix) {
if (internal::MulOverflow(num_elements, Index(10), &num_elements))
goto error;
if (c < '0' || c > '9') goto error;
if (internal::AddOverflow(num_elements, Index(c - '0'),
&num_elements))
goto error;
}
return D{std::string(dtype),
(type_indicator == 'S')
? DataType(dtype_v<::tensorstore::dtypes::char_t>)
: DataType(dtype_v<::tensorstore::dtypes::byte_t>),
endian::native,
{num_elements}};
}
}
}
error:
return absl::InvalidArgumentError(
tensorstore::StrCat("Unsupported zarr dtype: ", QuoteString(dtype)));
}
namespace {
Result<ZarrDType> ParseDTypeNoDerived(const nlohmann::json& value) {
ZarrDType out;
if (value.is_string()) {
out.has_fields = false;
out.fields.resize(1);
TENSORSTORE_ASSIGN_OR_RETURN(
static_cast<ZarrDType::BaseDType&>(out.fields[0]),
ParseBaseDType(value.get<std::string>()));
return out;
}
out.has_fields = true;
auto parse_result = internal_json::JsonParseArray(
value,
[&](std::ptrdiff_t size) {
out.fields.resize(size);
return absl::OkStatus();
},
[&](const ::nlohmann::json& x, std::ptrdiff_t field_i) {
auto& field = out.fields[field_i];
return internal_json::JsonParseArray(
x,
[&](std::ptrdiff_t size) {
if (size < 2 || size > 3) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Expected array of size 2 or 3, but received: ", x.dump()));
}
return absl::OkStatus();
},
[&](const ::nlohmann::json& v, std::ptrdiff_t i) {
switch (i) {
case 0:
if (internal_json::JsonRequireValueAs(v, &field.name).ok()) {
if (!field.name.empty()) return absl::OkStatus();
}
return absl::InvalidArgumentError(tensorstore::StrCat(
"Expected non-empty string, but received: ", v.dump()));
case 1: {
std::string dtype_string;
TENSORSTORE_RETURN_IF_ERROR(
internal_json::JsonRequireValueAs(v, &dtype_string));
TENSORSTORE_ASSIGN_OR_RETURN(
static_cast<ZarrDType::BaseDType&>(field),
ParseBaseDType(dtype_string));
return absl::OkStatus();
}
case 2: {
return internal_json::JsonParseArray(
v,
[&](std::ptrdiff_t size) {
field.outer_shape.resize(size);
return absl::OkStatus();
},
[&](const ::nlohmann::json& x, std::ptrdiff_t j) {
return internal_json::JsonRequireInteger(
x, &field.outer_shape[j], true, 1,
kInfIndex);
});
}
default:
ABSL_UNREACHABLE();
}
});
});
if (!parse_result.ok()) return parse_result;
return out;
}
}
absl::Status ValidateDType(ZarrDType& dtype) {
dtype.bytes_per_outer_element = 0;
for (size_t field_i = 0; field_i < dtype.fields.size(); ++field_i) {
auto& field = dtype.fields[field_i];
if (std::any_of(
dtype.fields.begin(), dtype.fields.begin() + field_i,
[&](const ZarrDType::Field& f) { return f.name == field.name; })) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Field name ", QuoteString(field.name), " occurs more than once"));
}
field.field_shape.resize(field.flexible_shape.size() +
field.outer_shape.size());
std::copy(field.flexible_shape.begin(), field.flexible_shape.end(),
std::copy(field.outer_shape.begin(), field.outer_shape.end(),
field.field_shape.begin()));
field.num_inner_elements = ProductOfExtents(span(field.field_shape));
if (field.num_inner_elements == std::numeric_limits<Index>::max()) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Product of dimensions ", span(field.field_shape), " is too large"));
}
if (internal::MulOverflow(field.num_inner_elements,
static_cast<Index>(field.dtype->size),
&field.num_bytes)) {
return absl::InvalidArgumentError("Field size in bytes is too large");
}
field.byte_offset = dtype.bytes_per_outer_element;
if (internal::AddOverflow(dtype.bytes_per_outer_element, field.num_bytes,
&dtype.bytes_per_outer_element)) {
return absl::InvalidArgumentError(
"Total number of bytes per outer array element is too large");
}
}
return absl::OkStatus();
}
Result<ZarrDType> ParseDType(const nlohmann::json& value) {
TENSORSTORE_ASSIGN_OR_RETURN(ZarrDType dtype, ParseDTypeNoDerived(value));
TENSORSTORE_RETURN_IF_ERROR(ValidateDType(dtype));
return dtype;
}
void to_json(::nlohmann::json& out, const ZarrDType::Field& field) {
using array_t = ::nlohmann::json::array_t;
if (field.outer_shape.empty()) {
out = array_t{field.name, field.encoded_dtype};
} else {
out = array_t{field.name, field.encoded_dtype, field.outer_shape};
}
}
void to_json(::nlohmann::json& out,
const ZarrDType& dtype) {
if (!dtype.has_fields) {
out = dtype.fields[0].encoded_dtype;
} else {
out = dtype.fields;
}
}
TENSORSTORE_DEFINE_JSON_DEFAULT_BINDER(ZarrDType, [](auto is_loading,
const auto& options,
auto* obj, auto* j) {
if constexpr (is_loading) {
TENSORSTORE_ASSIGN_OR_RETURN(*obj, ParseDType(*j));
} else {
to_json(*j, *obj);
}
return absl::OkStatus();
})
char EndianIndicator(tensorstore::endian e) {
return e == tensorstore::endian::little ? '<' : '>';
}
Result<ZarrDType::BaseDType> ChooseBaseDType(DataType dtype) {
ZarrDType::BaseDType base_dtype;
base_dtype.endian = endian::native;
base_dtype.dtype = dtype;
const auto set_typestr = [&](std::string_view typestr, int size) {
if (size > 1) {
base_dtype.encoded_dtype = tensorstore::StrCat(
EndianIndicator(base_dtype.endian), typestr, size);
} else {
base_dtype.encoded_dtype = tensorstore::StrCat("|", typestr, size);
}
};
switch (dtype.id()) {
case DataTypeId::bool_t:
set_typestr("b", 1);
break;
case DataTypeId::uint8_t:
set_typestr("u", 1);
break;
case DataTypeId::uint16_t:
set_typestr("u", 2);
break;
case DataTypeId::uint32_t:
set_typestr("u", 4);
break;
case DataTypeId::uint64_t:
set_typestr("u", 8);
break;
case DataTypeId::int4_t:
base_dtype.endian = endian::little;
base_dtype.encoded_dtype = kDtypeInt4;
break;
case DataTypeId::int8_t:
set_typestr("i", 1);
break;
case DataTypeId::int16_t:
set_typestr("i", 2);
break;
case DataTypeId::int32_t:
set_typestr("i", 4);
break;
case DataTypeId::int64_t:
set_typestr("i", 8);
break;
case DataTypeId::float8_e4m3fn_t:
base_dtype.endian = endian::little;
base_dtype.encoded_dtype = kDtypeFloat8e4m3fn;
break;
case DataTypeId::float8_e4m3fnuz_t:
base_dtype.endian = endian::little;
base_dtype.encoded_dtype = kDtypeFloat8e4m3fnuz;
break;
case DataTypeId::float8_e4m3b11fnuz_t:
base_dtype.endian = endian::little;
base_dtype.encoded_dtype = kDtypeFloat8e4m3b11fnuz;
break;
case DataTypeId::float8_e5m2_t:
base_dtype.endian = endian::little;
base_dtype.encoded_dtype = kDtypeFloat8e5m2;
break;
case DataTypeId::float8_e5m2fnuz_t:
base_dtype.endian = endian::little;
base_dtype.encoded_dtype = kDtypeFloat8e5m2fnuz;
break;
case DataTypeId::float16_t:
set_typestr("f", 2);
break;
case DataTypeId::bfloat16_t:
base_dtype.endian = endian::little;
base_dtype.encoded_dtype = kDtypeBfloat16;
break;
case DataTypeId::float32_t:
set_typestr("f", 4);
break;
case DataTypeId::float64_t:
set_typestr("f", 8);
break;
case DataTypeId::complex64_t:
set_typestr("c", 8);
break;
case DataTypeId::complex128_t:
set_typestr("c", 16);
break;
default:
return absl::InvalidArgumentError(
tensorstore::StrCat("Data type not supported: ", dtype));
}
return base_dtype;
}
}
} | #include "tensorstore/driver/zarr/dtype.h"
#include <stdint.h>
#include <cstddef>
#include <cstdint>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "tensorstore/data_type.h"
#include "tensorstore/driver/zarr/metadata_testutil.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/util/endian.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::DataType;
using ::tensorstore::dtype_v;
using ::tensorstore::endian;
using ::tensorstore::Index;
using ::tensorstore::kInfIndex;
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal_zarr::ChooseBaseDType;
using ::tensorstore::internal_zarr::ParseBaseDType;
using ::tensorstore::internal_zarr::ParseDType;
using ::tensorstore::internal_zarr::ZarrDType;
void CheckBaseDType(std::string dtype, DataType r, endian e,
std::vector<Index> flexible_shape) {
EXPECT_THAT(ParseBaseDType(dtype), ::testing::Optional(ZarrDType::BaseDType{
dtype, r, e, flexible_shape}))
<< dtype;
}
TEST(ParseBaseDType, Success) {
CheckBaseDType("|b1", dtype_v<bool>, endian::native, {});
CheckBaseDType("<b1", dtype_v<bool>, endian::native, {});
CheckBaseDType(">b1", dtype_v<bool>, endian::native, {});
CheckBaseDType("|S150", dtype_v<char>, endian::native, {150});
CheckBaseDType(">S150", dtype_v<char>, endian::native, {150});
CheckBaseDType("<S150", dtype_v<char>, endian::native, {150});
CheckBaseDType("|S9223372036854775807", dtype_v<char>, endian::native,
{9223372036854775807});
CheckBaseDType("|V150", dtype_v<std::byte>, endian::native, {150});
CheckBaseDType("<V150", dtype_v<std::byte>, endian::native, {150});
CheckBaseDType(">V150", dtype_v<std::byte>, endian::native, {150});
CheckBaseDType("|i1", dtype_v<std::int8_t>, endian::native, {});
CheckBaseDType("<i1", dtype_v<std::int8_t>, endian::native, {});
CheckBaseDType(">i1", dtype_v<std::int8_t>, endian::native, {});
CheckBaseDType("|u1", dtype_v<std::uint8_t>, endian::native, {});
CheckBaseDType("<u1", dtype_v<std::uint8_t>, endian::native, {});
CheckBaseDType(">u1", dtype_v<std::uint8_t>, endian::native, {});
CheckBaseDType("<i2", dtype_v<std::int16_t>, endian::little, {});
CheckBaseDType("<i4", dtype_v<std::int32_t>, endian::little, {});
CheckBaseDType("<i8", dtype_v<std::int64_t>, endian::little, {});
CheckBaseDType("<u2", dtype_v<std::uint16_t>, endian::little, {});
CheckBaseDType("<u4", dtype_v<std::uint32_t>, endian::little, {});
CheckBaseDType("<u8", dtype_v<std::uint64_t>, endian::little, {});
CheckBaseDType(">i2", dtype_v<std::int16_t>, endian::big, {});
CheckBaseDType(">i4", dtype_v<std::int32_t>, endian::big, {});
CheckBaseDType(">i8", dtype_v<std::int64_t>, endian::big, {});
CheckBaseDType(">u2", dtype_v<std::uint16_t>, endian::big, {});
CheckBaseDType(">u4", dtype_v<std::uint32_t>, endian::big, {});
CheckBaseDType(">u8", dtype_v<std::uint64_t>, endian::big, {});
CheckBaseDType("float8_e4m3fn", dtype_v<tensorstore::dtypes::float8_e4m3fn_t>,
endian::little, {});
CheckBaseDType("float8_e4m3fnuz",
dtype_v<tensorstore::dtypes::float8_e4m3fnuz_t>,
endian::little, {});
CheckBaseDType("float8_e4m3b11fnuz",
dtype_v<tensorstore::dtypes::float8_e4m3b11fnuz_t>,
endian::little, {});
CheckBaseDType("float8_e5m2", dtype_v<tensorstore::dtypes::float8_e5m2_t>,
endian::little, {});
CheckBaseDType("float8_e5m2fnuz",
dtype_v<tensorstore::dtypes::float8_e5m2fnuz_t>,
endian::little, {});
CheckBaseDType("<f2", dtype_v<tensorstore::dtypes::float16_t>, endian::little,
{});
CheckBaseDType("bfloat16", dtype_v<tensorstore::dtypes::bfloat16_t>,
endian::little, {});
CheckBaseDType("<f4", dtype_v<tensorstore::dtypes::float32_t>, endian::little,
{});
CheckBaseDType("<f8", dtype_v<tensorstore::dtypes::float64_t>, endian::little,
{});
CheckBaseDType(">f2", dtype_v<tensorstore::dtypes::float16_t>, endian::big,
{});
CheckBaseDType(">f4", dtype_v<tensorstore::dtypes::float32_t>, endian::big,
{});
CheckBaseDType(">f8", dtype_v<tensorstore::dtypes::float64_t>, endian::big,
{});
CheckBaseDType("<c8", dtype_v<tensorstore::dtypes::complex64_t>,
endian::little, {});
CheckBaseDType("<c16", dtype_v<tensorstore::dtypes::complex128_t>,
endian::little, {});
CheckBaseDType(">c8", dtype_v<tensorstore::dtypes::complex64_t>, endian::big,
{});
CheckBaseDType(">c16", dtype_v<tensorstore::dtypes::complex128_t>,
endian::big, {});
}
TEST(ParseBaseDType, Failure) {
EXPECT_THAT(ParseBaseDType(""),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Unsupported zarr dtype: \"\""));
EXPECT_THAT(ParseBaseDType("|f4"),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(ParseBaseDType("|f8"),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(ParseBaseDType("|c8"),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(ParseBaseDType("|c16"),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(ParseBaseDType("|b2"),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(ParseBaseDType("|i2"),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(ParseBaseDType("<i9"),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(ParseBaseDType("<u9"),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(ParseBaseDType("<S"),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(ParseBaseDType("|S999999999999999999999999999"),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(ParseBaseDType("|S9223372036854775808"),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(ParseBaseDType("|Sa"),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(ParseBaseDType("|S "),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(ParseBaseDType("<f5"),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(ParseBaseDType("<c5"),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(ParseBaseDType("<m8"),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(ParseBaseDType("<M8"),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(ParseBaseDType("<X5"),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
void CheckDType(const ::nlohmann::json& json, const ZarrDType& expected) {
SCOPED_TRACE(json.dump());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto dtype, ParseDType(json));
EXPECT_EQ(expected, dtype);
EXPECT_EQ(json, ::nlohmann::json(dtype));
}
TEST(ParseDType, SimpleStringBool) {
CheckDType("|b1", ZarrDType{
false,
{
{{
"|b1",
dtype_v<bool>,
endian::native,
{},
},
{},
"",
{},
1,
0,
1},
},
1,
});
}
TEST(ParseDType, SingleNamedFieldChar) {
CheckDType(::nlohmann::json::array_t{{"x", "|S10"}},
ZarrDType{
true,
{
{{
"|S10",
dtype_v<char>,
endian::native,
{10},
},
{},
"x",
{10},
10,
0,
10},
},
10,
});
}
TEST(ParseDType, TwoNamedFieldsCharAndInt) {
CheckDType(
::nlohmann::json::array_t{{"x", "|S10", {2, 3}}, {"y", "<i2", {5}}},
ZarrDType{
true,
{
{{
"|S10",
dtype_v<char>,
endian::native,
{10},
},
{2, 3},
"x",
{2, 3, 10},
10 * 2 * 3,
0,
10 * 2 * 3},
{{
"<i2",
dtype_v<std::int16_t>,
endian::little,
{},
},
{5},
"y",
{5},
5,
10 * 2 * 3,
2 * 5},
},
10 * 2 * 3 + 2 * 5,
});
}
TEST(ParseDType, FieldSpecTooShort) {
EXPECT_THAT(ParseDType(::nlohmann::json::array_t{{"x"}}),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Error parsing value at position 0: "
"Expected array of size 2 or 3, but received: \\[\"x\"\\]"));
}
TEST(ParseDType, FieldSpecTooLong) {
EXPECT_THAT(ParseDType(::nlohmann::json::array_t{{"x", "<i2", {2, 3}, 5}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing value at position 0: "
"Expected array of size 2 or 3, but received: "
"\\[\"x\",\"<i2\",\\[2,3\\],5\\]"));
}
TEST(ParseDType, InvalidFieldName) {
EXPECT_THAT(ParseDType(::nlohmann::json::array_t{{3, "<i2"}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing value at position 0: "
"Error parsing value at position 0: "
"Expected non-empty string, but received: 3"));
}
TEST(ParseDType, EmptyFieldName) {
EXPECT_THAT(ParseDType(::nlohmann::json::array_t{{"", "<i2"}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing value at position 0: "
"Error parsing value at position 0: "
"Expected non-empty string, but received: \"\""));
}
TEST(ParseDType, DuplicateFieldName) {
EXPECT_THAT(ParseDType(::nlohmann::json::array_t{{"x", "<i2"}, {"x", "<u2"}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Field name \"x\" occurs more than once"));
}
TEST(ParseDType, NonStringFieldBaseDType) {
EXPECT_THAT(ParseDType(::nlohmann::json::array_t{{"x", 3}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing value at position 0: "
"Error parsing value at position 1: "
"Expected string, but received: 3"));
}
TEST(ParseDType, InvalidFieldBaseDType) {
EXPECT_THAT(ParseDType(::nlohmann::json::array_t{{"x", "<X2"}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing value at position 0: "
"Error parsing value at position 1: "
"Unsupported zarr dtype: \"<X2\""));
}
TEST(ParseDType, ProductOfDimensionsOverflow) {
EXPECT_THAT(ParseDType(::nlohmann::json::array_t{
{"x", "|i1", {kInfIndex, kInfIndex}}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Product of dimensions .* is too large"));
}
TEST(ParseDType, FieldSizeInBytesOverflow) {
EXPECT_THAT(ParseDType(::nlohmann::json::array_t{{"x", "<f8", {kInfIndex}}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Field size in bytes is too large"));
}
TEST(ParseDType, BytesPerOuterElementOverflow) {
EXPECT_THAT(
ParseDType(::nlohmann::json::array_t{{"x", "<i2", {kInfIndex}},
{"y", "<i2", {kInfIndex}}}),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Total number of bytes per outer array element is too large"));
}
TEST(ChooseBaseDTypeTest, RoundTrip) {
constexpr tensorstore::DataType kSupportedDataTypes[] = {
dtype_v<bool>,
dtype_v<uint8_t>,
dtype_v<uint16_t>,
dtype_v<uint32_t>,
dtype_v<uint64_t>,
dtype_v<int8_t>,
dtype_v<int16_t>,
dtype_v<int32_t>,
dtype_v<int64_t>,
dtype_v<::tensorstore::dtypes::float8_e4m3fn_t>,
dtype_v<::tensorstore::dtypes::float8_e4m3fnuz_t>,
dtype_v<::tensorstore::dtypes::float8_e4m3b11fnuz_t>,
dtype_v<::tensorstore::dtypes::float8_e5m2_t>,
dtype_v<::tensorstore::dtypes::float8_e5m2fnuz_t>,
dtype_v<::tensorstore::dtypes::float16_t>,
dtype_v<::tensorstore::dtypes::bfloat16_t>,
dtype_v<::tensorstore::dtypes::float32_t>,
dtype_v<::tensorstore::dtypes::float64_t>,
dtype_v<::tensorstore::dtypes::complex64_t>,
dtype_v<::tensorstore::dtypes::complex128_t>,
};
for (auto dtype : kSupportedDataTypes) {
SCOPED_TRACE(tensorstore::StrCat("dtype=", dtype));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto base_zarr_dtype,
ChooseBaseDType(dtype));
EXPECT_EQ(dtype, base_zarr_dtype.dtype);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto parsed, ParseBaseDType(base_zarr_dtype.encoded_dtype));
EXPECT_EQ(dtype, parsed.dtype);
EXPECT_EQ(base_zarr_dtype.endian, parsed.endian);
EXPECT_EQ(base_zarr_dtype.flexible_shape, parsed.flexible_shape);
EXPECT_EQ(base_zarr_dtype.encoded_dtype, parsed.encoded_dtype);
}
}
TEST(ChooseBaseDTypeTest, Invalid) {
struct X {};
EXPECT_THAT(ChooseBaseDType(dtype_v<X>),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Data type not supported: .*"));
EXPECT_THAT(ChooseBaseDType(dtype_v<::tensorstore::dtypes::string_t>),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Data type not supported: string"));
}
} |
576 | cpp | google/tensorstore | zstd_compressor | tensorstore/driver/n5/zstd_compressor.cc | tensorstore/driver/n5/zstd_compressor_test.cc | #ifndef TENSORSTORE_INTERNAL_COMPRESSION_ZSTD_COMPRESSOR_H_
#define TENSORSTORE_INTERNAL_COMPRESSION_ZSTD_COMPRESSOR_H_
#include <cstddef>
#include "riegeli/bytes/reader.h"
#include "riegeli/bytes/writer.h"
#include "tensorstore/internal/compression/json_specified_compressor.h"
namespace tensorstore {
namespace internal {
struct ZstdOptions {
int level = 0;
};
class ZstdCompressor : public JsonSpecifiedCompressor, public ZstdOptions {
public:
std::unique_ptr<riegeli::Writer> GetWriter(
std::unique_ptr<riegeli::Writer> base_writer,
size_t element_bytes) const override;
virtual std::unique_ptr<riegeli::Reader> GetReader(
std::unique_ptr<riegeli::Reader> base_reader,
size_t element_bytes) const override;
};
}
}
#endif
#include "tensorstore/internal/compression/zstd_compressor.h"
#include <cstddef>
#include "riegeli/zstd/zstd_reader.h"
#include "riegeli/zstd/zstd_writer.h"
#include "tensorstore/internal/compression/json_specified_compressor.h"
namespace tensorstore {
namespace internal {
std::unique_ptr<riegeli::Writer> ZstdCompressor::GetWriter(
std::unique_ptr<riegeli::Writer> base_writer, size_t element_bytes) const {
using Writer = riegeli::ZstdWriter<std::unique_ptr<riegeli::Writer>>;
Writer::Options options;
options.set_compression_level(level);
return std::make_unique<Writer>(std::move(base_writer), options);
}
std::unique_ptr<riegeli::Reader> ZstdCompressor::GetReader(
std::unique_ptr<riegeli::Reader> base_reader, size_t element_bytes) const {
using Reader = riegeli::ZstdReader<std::unique_ptr<riegeli::Reader>>;
return std::make_unique<Reader>(std::move(base_reader));
}
}
} | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/driver/n5/compressor.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal_n5::Compressor;
TEST(ZstdCompressorTest, SmallRoundtrip) {
auto compressor =
Compressor::FromJson({{"type", "zstd"}, {"level", 6}}).value();
const absl::Cord input("The quick brown fox jumped over the lazy dog.");
absl::Cord encode_result, decode_result;
TENSORSTORE_ASSERT_OK(compressor->Encode(input, &encode_result, 1));
TENSORSTORE_ASSERT_OK(compressor->Decode(encode_result, &decode_result, 1));
EXPECT_EQ(input, decode_result);
}
TEST(ZstdCompressorTest, DefaultLevel) {
auto compressor1 = Compressor::FromJson({{"type", "zstd"}}).value();
auto compressor2 =
Compressor::FromJson({{"type", "zstd"}, {"level", 1}}).value();
const absl::Cord input("The quick brown fox jumped over the lazy dog.");
absl::Cord encode_result1, encode_result2;
TENSORSTORE_ASSERT_OK(compressor1->Encode(input, &encode_result1, 1));
TENSORSTORE_ASSERT_OK(compressor2->Encode(input, &encode_result2, 1));
EXPECT_EQ(encode_result1, encode_result2);
}
TEST(ZstdCompressorTest, NonDefaultLevel) {
auto compressor =
Compressor::FromJson({{"type", "zstd"}, {"level", 9}}).value();
const absl::Cord input("The quick brown fox jumped over the lazy dog.");
absl::Cord encode_result;
TENSORSTORE_ASSERT_OK(compressor->Encode(input, &encode_result, 1));
absl::Cord decode_result;
TENSORSTORE_ASSERT_OK(compressor->Decode(encode_result, &decode_result, 1));
EXPECT_EQ(input, decode_result);
}
TEST(ZstdCompressorTest, InvalidParameter) {
EXPECT_THAT(Compressor::FromJson({{"type", "zstd"}, {"level", "6"}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing object member \"level\": .*"));
EXPECT_THAT(Compressor::FromJson({{"type", "zstd"}, {"level", -131073}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing object member \"level\": .*"));
EXPECT_THAT(Compressor::FromJson({{"type", "zstd"}, {"level", 23}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing object member \"level\": .*"));
EXPECT_THAT(Compressor::FromJson({{"type", "zstd"}, {"foo", 10}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Object includes extra members: \"foo\""));
}
TEST(ZstdCompressorTest, ToJson) {
auto compressor =
Compressor::FromJson({{"type", "zstd"}, {"level", 5}}).value();
EXPECT_EQ(nlohmann::json({{"type", "zstd"}, {"level", 5}}),
compressor.ToJson());
}
} |
577 | cpp | google/tensorstore | zlib_compressor | tensorstore/driver/zarr/zlib_compressor.cc | tensorstore/driver/zarr/zlib_compressor_test.cc | #ifndef TENSORSTORE_INTERNAL_COMPRESSION_ZLIB_COMPRESSOR_H_
#define TENSORSTORE_INTERNAL_COMPRESSION_ZLIB_COMPRESSOR_H_
#include <cstddef>
#include <memory>
#include "riegeli/bytes/reader.h"
#include "riegeli/bytes/writer.h"
#include "tensorstore/internal/compression/json_specified_compressor.h"
#include "tensorstore/internal/compression/zlib.h"
namespace tensorstore {
namespace internal {
class ZlibCompressor : public JsonSpecifiedCompressor, public zlib::Options {
public:
std::unique_ptr<riegeli::Writer> GetWriter(
std::unique_ptr<riegeli::Writer> base_writer,
size_t element_bytes) const override;
virtual std::unique_ptr<riegeli::Reader> GetReader(
std::unique_ptr<riegeli::Reader> base_reader,
size_t element_bytes) const override;
};
}
}
#endif
#include "tensorstore/internal/compression/zlib_compressor.h"
#include <cstddef>
#include <memory>
#include "riegeli/zlib/zlib_reader.h"
#include "riegeli/zlib/zlib_writer.h"
#include "tensorstore/internal/compression/json_specified_compressor.h"
namespace tensorstore {
namespace internal {
std::unique_ptr<riegeli::Writer> ZlibCompressor::GetWriter(
std::unique_ptr<riegeli::Writer> base_writer, size_t element_bytes) const {
using Writer = riegeli::ZlibWriter<std::unique_ptr<riegeli::Writer>>;
Writer::Options options;
if (level != -1) options.set_compression_level(level);
options.set_header(use_gzip_header ? Writer::Header::kGzip
: Writer::Header::kZlib);
return std::make_unique<Writer>(std::move(base_writer), options);
}
std::unique_ptr<riegeli::Reader> ZlibCompressor::GetReader(
std::unique_ptr<riegeli::Reader> base_reader, size_t element_bytes) const {
using Reader = riegeli::ZlibReader<std::unique_ptr<riegeli::Reader>>;
Reader::Options options;
options.set_header(use_gzip_header ? Reader::Header::kGzip
: Reader::Header::kZlib);
return std::make_unique<Reader>(std::move(base_reader), options);
}
}
} | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/driver/zarr/compressor.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal_zarr::Compressor;
class ZlibCompressorTest : public ::testing::TestWithParam<const char*> {};
INSTANTIATE_TEST_SUITE_P(ZlibCompressorTestCases, ZlibCompressorTest,
::testing::Values("zlib", "gzip"));
TEST_P(ZlibCompressorTest, SmallRoundtrip) {
auto compressor =
Compressor::FromJson({{"id", GetParam()}, {"level", 6}}).value();
const absl::Cord input("The quick brown fox jumped over the lazy dog.");
absl::Cord encode_result, decode_result;
TENSORSTORE_ASSERT_OK(compressor->Encode(input, &encode_result, 1));
TENSORSTORE_ASSERT_OK(compressor->Decode(encode_result, &decode_result, 1));
EXPECT_EQ(input, decode_result);
}
TEST_P(ZlibCompressorTest, DefaultLevel) {
auto compressor1 = Compressor::FromJson({{"id", GetParam()}}).value();
auto compressor2 =
Compressor::FromJson({{"id", GetParam()}, {"level", 1}}).value();
const absl::Cord input("The quick brown fox jumped over the lazy dog.");
absl::Cord encode_result1, encode_result2;
TENSORSTORE_ASSERT_OK(compressor1->Encode(input, &encode_result1, 1));
TENSORSTORE_ASSERT_OK(compressor2->Encode(input, &encode_result2, 1));
EXPECT_EQ(encode_result1, encode_result2);
}
TEST_P(ZlibCompressorTest, NonDefaultLevel) {
auto compressor =
Compressor::FromJson({{"id", GetParam()}, {"level", 9}}).value();
const absl::Cord input("The quick brown fox jumped over the lazy dog.");
absl::Cord encode_result;
TENSORSTORE_ASSERT_OK(compressor->Encode(input, &encode_result, 1));
absl::Cord decode_result;
TENSORSTORE_ASSERT_OK(compressor->Decode(encode_result, &decode_result, 1));
EXPECT_EQ(input, decode_result);
}
TEST_P(ZlibCompressorTest, InvalidParameter) {
EXPECT_THAT(Compressor::FromJson({{"id", GetParam()}, {"level", "6"}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing object member \"level\": .*"));
EXPECT_THAT(Compressor::FromJson({{"id", GetParam()}, {"level", -1}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing object member \"level\": .*"));
EXPECT_THAT(Compressor::FromJson({{"id", GetParam()}, {"level", 10}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing object member \"level\": .*"));
EXPECT_THAT(Compressor::FromJson({{"id", GetParam()}, {"foo", 10}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Object includes extra members: \"foo\""));
}
TEST_P(ZlibCompressorTest, ToJson) {
auto compressor =
Compressor::FromJson({{"id", GetParam()}, {"level", 5}}).value();
EXPECT_EQ(nlohmann::json({{"id", GetParam()}, {"level", 5}}),
compressor.ToJson());
}
} |
578 | cpp | google/tensorstore | downsample_util | tensorstore/driver/downsample/downsample_util.cc | tensorstore/driver/downsample/downsample_util_test.cc | #ifndef TENSORSTORE_DRIVER_DOWNSAMPLE_DOWNSAMPLE_UTIL_H_
#define TENSORSTORE_DRIVER_DOWNSAMPLE_DOWNSAMPLE_UTIL_H_
#include <iosfwd>
#include "absl/container/inlined_vector.h"
#include "tensorstore/box.h"
#include "tensorstore/downsample_method.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal_downsample {
struct PropagatedIndexTransformDownsampling {
IndexTransform<> transform;
absl::InlinedVector<Index, internal::kNumInlinedDims>
input_downsample_factors;
friend bool operator==(const PropagatedIndexTransformDownsampling& a,
const PropagatedIndexTransformDownsampling& b) {
return a.transform == b.transform &&
a.input_downsample_factors == b.input_downsample_factors;
}
friend bool operator!=(const PropagatedIndexTransformDownsampling& a,
const PropagatedIndexTransformDownsampling& b) {
return !(a == b);
}
friend std::ostream& operator<<(
std::ostream& os, const PropagatedIndexTransformDownsampling& x);
};
absl::Status PropagateIndexTransformDownsampling(
IndexTransformView<> downsampled_transform, BoxView<> output_base_bounds,
span<const Index> output_downsample_factors,
PropagatedIndexTransformDownsampling& propagated);
absl::Status PropagateAndComposeIndexTransformDownsampling(
IndexTransformView<> downsampled_transform,
IndexTransformView<> base_transform,
span<const Index> base_downsample_factors,
PropagatedIndexTransformDownsampling& propagated);
Result<PropagatedIndexTransformDownsampling>
PropagateIndexTransformDownsampling(
IndexTransformView<> downsampled_transform, BoxView<> output_base_bounds,
span<const Index> output_downsample_factors);
IndexInterval DownsampleInterval(IndexInterval base_interval,
Index downsample_factor,
DownsampleMethod method);
void DownsampleBounds(BoxView<> base_bounds,
MutableBoxView<> downsampled_bounds,
span<const Index> downsample_factors,
DownsampleMethod method);
IndexDomain<> DownsampleDomain(IndexDomainView<> base_domain,
span<const Index> downsample_factors,
DownsampleMethod method);
IndexTransform<> GetDownsampledDomainIdentityTransform(
IndexDomainView<> base_domain, span<const Index> downsample_factors,
DownsampleMethod method);
bool CanDownsampleIndexTransform(IndexTransformView<> base_transform,
BoxView<> base_bounds,
span<const Index> downsample_factors);
}
}
#endif
#include "tensorstore/driver/downsample/downsample_util.h"
#include <algorithm>
#include <cassert>
#include <cstdlib>
#include <limits>
#include <ostream>
#include <utility>
#include "absl/base/optimization.h"
#include "absl/container/inlined_vector.h"
#include "absl/status/status.h"
#include "absl/strings/str_join.h"
#include "tensorstore/array.h"
#include "tensorstore/box.h"
#include "tensorstore/downsample_method.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/index_domain.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/internal/identity_transform.h"
#include "tensorstore/index_space/internal/transform_rep.h"
#include "tensorstore/index_space/output_index_method.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/byte_strided_pointer.h"
#include "tensorstore/util/division.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_downsample {
std::ostream& operator<<(std::ostream& os,
const PropagatedIndexTransformDownsampling& x) {
return os << "transform=" << x.transform << "\ninput_downsample_factors="
<< absl::StrJoin(x.input_downsample_factors, ",");
}
namespace {
DimensionIndex ComputeAdditionalInputDimensionsNeeded(
IndexTransformView<> downsampled_transform,
span<const Index> output_downsample_factors,
span<DimensionIndex> input_dimension_ref_counts, bool is_domain_empty) {
using internal_index_space::TransformAccess;
assert(downsampled_transform.valid());
const DimensionIndex output_rank = downsampled_transform.output_rank();
assert(input_dimension_ref_counts.size() ==
downsampled_transform.input_rank());
assert(output_downsample_factors.size() == output_rank);
DimensionIndex additional_input_dims = 0;
auto old_transform_rep = TransformAccess::rep(downsampled_transform);
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
assert(output_downsample_factors[output_dim] > 0);
if (output_downsample_factors[output_dim] == 1) {
continue;
}
const auto& output_map = old_transform_rep->output_index_maps()[output_dim];
switch (output_map.method()) {
case OutputIndexMethod::constant:
if (!is_domain_empty) {
++additional_input_dims;
}
break;
case OutputIndexMethod::single_input_dimension:
if ((std::abs(output_map.stride()) != 1 ||
input_dimension_ref_counts[output_map.input_dimension()] != 1) &&
!downsampled_transform.input_domain()
.box()[output_map.input_dimension()]
.empty()) {
++additional_input_dims;
}
break;
case OutputIndexMethod::array: {
++additional_input_dims;
break;
}
}
}
return additional_input_dims;
}
absl::Status ExtendOutputIndexMap(
const internal_index_space::OutputIndexMap& output_map,
internal_index_space::OutputIndexMap& new_output_map,
DimensionIndex input_rank, DimensionIndex new_input_rank) {
new_output_map.offset() = output_map.offset();
new_output_map.stride() = output_map.stride();
switch (output_map.method()) {
case OutputIndexMethod::constant:
new_output_map.SetConstant();
break;
case OutputIndexMethod::single_input_dimension:
new_output_map.SetSingleInputDimension(output_map.input_dimension());
break;
case OutputIndexMethod::array: {
const auto& index_array_data = output_map.index_array_data();
auto& new_index_array_data =
new_output_map.SetArrayIndexing(new_input_rank);
new_index_array_data.element_pointer = index_array_data.element_pointer;
new_index_array_data.index_range = index_array_data.index_range;
std::copy_n(index_array_data.byte_strides, input_rank,
new_index_array_data.byte_strides);
std::fill_n(new_index_array_data.byte_strides + input_rank,
new_input_rank - input_rank, Index(0));
break;
}
}
return absl::OkStatus();
}
absl::Status PropagateUnitStrideSingleInputDimensionMapDownsampling(
Index original_offset, Index original_stride, IndexInterval input_interval,
Index output_downsample_factor,
internal_index_space::OutputIndexMap& new_output_map,
IndexInterval output_base_bounds, MutableBoxView<> new_input_domain,
DimensionIndex new_input_dim,
PropagatedIndexTransformDownsampling& propagated) {
assert(original_stride == 1 || original_stride == -1);
if (internal::MulOverflow(original_offset, output_downsample_factor,
&new_output_map.offset())) {
return absl::OutOfRangeError(
tensorstore::StrCat("Integer overflow computing output offset ",
original_offset, " * ", output_downsample_factor));
}
TENSORSTORE_ASSIGN_OR_RETURN(
auto bounds_interval,
GetAffineTransformDomain(output_base_bounds, new_output_map.offset(),
original_stride));
auto input_bounds = DownsampleInterval(
bounds_interval, output_downsample_factor, DownsampleMethod::kMean);
if (!Contains(input_bounds, input_interval)) {
return absl::OutOfRangeError(
tensorstore::StrCat("Propagated bounds interval ", input_bounds,
" does not contain ", input_interval));
}
propagated.input_downsample_factors[new_input_dim] = output_downsample_factor;
new_output_map.SetSingleInputDimension(new_input_dim);
TENSORSTORE_ASSIGN_OR_RETURN(
auto new_interval,
GetAffineTransformInverseDomain(
input_interval, 0, original_stride * output_downsample_factor));
new_interval = Intersect(new_interval, bounds_interval);
new_output_map.stride() = original_stride;
new_input_domain[new_input_dim] = new_interval;
return absl::OkStatus();
}
absl::Status PropagateSingleInputDimensionMapDownsamplingAsNewDimension(
const internal_index_space::OutputIndexMap& output_map,
IndexInterval input_interval, Index output_downsample_factor,
internal_index_space::OutputIndexMap& new_output_map,
IndexInterval output_base_bounds, MutableBoxView<> new_input_domain,
DimensionIndex new_input_dim,
PropagatedIndexTransformDownsampling& propagated) {
if (input_interval.size() == 1 || output_map.stride() == 0) {
Index adjusted_offset;
if (internal::MulOverflow(input_interval.inclusive_min(),
output_map.stride(), &adjusted_offset) ||
internal::AddOverflow(adjusted_offset, output_map.offset(),
&adjusted_offset)) {
return absl::OutOfRangeError(tensorstore::StrCat(
"Integer overflow computing offset ", output_map.offset(), " + ",
input_interval.inclusive_min(), " * ", output_map.stride()));
}
return PropagateUnitStrideSingleInputDimensionMapDownsampling(
adjusted_offset, 1,
IndexInterval::UncheckedSized(0, 1),
output_downsample_factor, new_output_map, output_base_bounds,
new_input_domain, new_input_dim, propagated);
}
propagated.input_downsample_factors[new_input_dim] = output_downsample_factor;
if (output_downsample_factor > kInfIndex) {
return absl::OutOfRangeError("Downsample factor is out of range");
}
new_input_domain[new_input_dim] =
IndexInterval::UncheckedSized(0, output_downsample_factor);
new_output_map.offset() = 0;
new_output_map.stride() = 1;
auto& new_index_array_data =
new_output_map.SetArrayIndexing(new_input_domain.rank());
new_index_array_data.index_range = output_base_bounds;
Index adjusted_stride;
Index adjusted_offset;
if (internal::MulOverflow(output_map.stride(), output_downsample_factor,
&adjusted_stride)) {
return absl::OutOfRangeError(tensorstore::StrCat(
"Integer overflow computing stride ", output_map.stride(), " * ",
output_downsample_factor));
}
if (internal::MulOverflow(output_map.offset(), output_downsample_factor,
&adjusted_offset)) {
return absl::OutOfRangeError(tensorstore::StrCat(
"Integer overflow computing offset ", output_map.offset(), " * ",
output_downsample_factor));
}
if (!input_interval.empty()) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto output_range,
GetAffineTransformRange(input_interval, adjusted_offset,
adjusted_stride));
TENSORSTORE_ASSIGN_OR_RETURN(
output_range,
ShiftInterval(output_range, output_downsample_factor - 1, 0));
if (!Contains(output_base_bounds, output_range)) {
return absl::OutOfRangeError(tensorstore::StrCat(
"Output bounds interval ", output_base_bounds,
" does not contain output range interval ", output_range));
}
}
std::fill_n(new_index_array_data.byte_strides, new_input_domain.rank(),
Index(0));
new_index_array_data.byte_strides[output_map.input_dimension()] = 1;
new_index_array_data.byte_strides[new_input_dim] = 2;
new_index_array_data.element_pointer = AllocateArrayElementsLike<Index>(
new_index_array_data.layout(new_input_domain),
new_index_array_data.byte_strides, skip_repeated_elements);
Index* array_origin =
const_cast<Index*>(new_index_array_data.array_view(new_input_domain)
.byte_strided_origin_pointer()
.get());
for (Index j = 0; j < input_interval.size(); ++j) {
const Index base_index =
adjusted_offset +
adjusted_stride * (input_interval.inclusive_min() + j);
for (Index i = 0; i < output_downsample_factor; ++i) {
Index x;
if (internal::AddOverflow(base_index, i, &x) ||
x > output_base_bounds.inclusive_max()) {
x = output_base_bounds.inclusive_max();
} else if (x < output_base_bounds.inclusive_min()) {
x = output_base_bounds.inclusive_min();
}
array_origin[input_interval.size() * i + j] = x;
}
}
return absl::OkStatus();
}
absl::Status PropagateIndexMapThatRequiresNewInputDimensionForEmptyDomain(
Index output_downsample_factor,
internal_index_space::OutputIndexMap& new_output_map,
MutableBoxView<> new_input_domain, DimensionIndex new_input_dim,
PropagatedIndexTransformDownsampling& propagated) {
propagated.input_downsample_factors[new_input_dim] = output_downsample_factor;
if (output_downsample_factor > kInfIndex) {
return absl::OutOfRangeError("Downsample factor is out of range");
}
new_input_domain[new_input_dim] =
IndexInterval::UncheckedSized(0, output_downsample_factor);
new_output_map.SetConstant();
new_output_map.offset() = 0;
new_output_map.stride() = 0;
return absl::OkStatus();
}
absl::Status PropagateIndexArrayMapDownsampling(
const internal_index_space::OutputIndexMap& output_map,
BoxView<> downsampled_input_domain, Index output_downsample_factor,
internal_index_space::OutputIndexMap& new_output_map,
IndexInterval output_base_bounds, MutableBoxView<> new_input_domain,
DimensionIndex new_input_dim,
PropagatedIndexTransformDownsampling& propagated) {
new_output_map.offset() = 0;
propagated.input_downsample_factors[new_input_dim] = output_downsample_factor;
if (output_downsample_factor > kInfIndex) {
return absl::OutOfRangeError("Downsample factor is out of range");
}
new_input_domain[new_input_dim] =
IndexInterval::UncheckedSized(0, output_downsample_factor);
const DimensionIndex input_rank = downsampled_input_domain.rank();
const auto& index_array_data = output_map.index_array_data();
new_output_map.stride() = 1;
auto& new_index_array_data =
new_output_map.SetArrayIndexing(new_input_domain.rank());
Index adjusted_stride;
Index adjusted_offset;
if (internal::MulOverflow(output_map.stride(), output_downsample_factor,
&adjusted_stride)) {
return absl::OutOfRangeError(tensorstore::StrCat(
"Integer overflow computing stride ", output_map.stride(), " * ",
output_downsample_factor));
}
if (internal::MulOverflow(output_map.offset(), output_downsample_factor,
&adjusted_offset)) {
return absl::OutOfRangeError(tensorstore::StrCat(
"Integer overflow computing offset ", output_map.offset(), " * ",
output_downsample_factor));
}
TENSORSTORE_ASSIGN_OR_RETURN(
auto padded_output_interval,
ShiftInterval(output_base_bounds, -(output_downsample_factor - 1), 0));
TENSORSTORE_ASSIGN_OR_RETURN(
auto effective_index_range,
GetAffineTransformDomain(padded_output_interval, adjusted_offset,
adjusted_stride));
effective_index_range =
Intersect(effective_index_range, index_array_data.index_range);
new_index_array_data.index_range = output_base_bounds;
std::copy_n(index_array_data.byte_strides, input_rank,
new_index_array_data.byte_strides);
std::fill_n(new_index_array_data.byte_strides + input_rank,
new_input_domain.rank() - input_rank, Index(0));
new_index_array_data.byte_strides[new_input_dim] =
std::numeric_limits<Index>::max();
new_index_array_data.element_pointer = AllocateArrayElementsLike<Index>(
new_index_array_data.layout(new_input_domain),
new_index_array_data.byte_strides, skip_repeated_elements);
absl::Status status;
IterateOverArrays(
[&](const Index* existing_index,
ByteStridedPointer<const Index> new_index) {
const Index existing_index_value = *existing_index;
if (!Contains(effective_index_range, existing_index_value)) {
status = CheckContains(effective_index_range, existing_index_value);
return false;
}
Index base_index =
existing_index_value * adjusted_stride + adjusted_offset;
const Index byte_stride =
new_index_array_data.byte_strides[new_input_dim];
Index cur_index =
std::max(base_index, output_base_bounds.inclusive_min());
for (Index i = 0; i < output_downsample_factor; ++i) {
Index x;
if (!internal::AddOverflow(base_index, i, &x) &&
output_base_bounds.exclusive_max() > x) {
cur_index = std::max(cur_index, x);
}
assert(Contains(output_base_bounds, cur_index));
*const_cast<Index*>((new_index + i * byte_stride).get()) = cur_index;
}
return true;
},
skip_repeated_elements,
index_array_data.array_view(downsampled_input_domain),
new_index_array_data.array_view(downsampled_input_domain));
return status;
}
}
absl::Status PropagateIndexTransformDownsampling(
IndexTransformView<> downsampled_transform, BoxView<> output_base_bounds,
span<const Index> output_downsample_factors,
PropagatedIndexTransformDownsampling& propagated) {
using internal_index_space::TransformAccess;
using internal_index_space::TransformRep;
assert(downsampled_transform.valid());
const DimensionIndex output_rank = downsampled_transform.output_rank();
const DimensionIndex input_rank = downsampled_transform.input_rank();
assert(output_base_bounds.rank() == output_rank);
assert(output_downsample_factors.size() == output_rank);
DimensionIndex input_dimension_ref_counts[kMaxRank];
internal::ComputeInputDimensionReferenceCounts(
downsampled_transform, span(&input_dimension_ref_counts[0], input_rank));
const bool is_domain_empty = downsampled_transform.domain().box().is_empty();
Dime | #include "tensorstore/driver/downsample/downsample_util.h"
#include <stddef.h>
#include <stdint.h>
#include <limits>
#include <random>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/random/bit_gen_ref.h"
#include "absl/random/random.h"
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/data_type.h"
#include "tensorstore/downsample_method.h"
#include "tensorstore/driver/downsample/downsample_array.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/dimension_identifier.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/index_transform_testutil.h"
#include "tensorstore/index_space/transformed_array.h"
#include "tensorstore/internal/data_type_random_generator.h"
#include "tensorstore/internal/testing/random_seed.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::Box;
using ::tensorstore::BoxView;
using ::tensorstore::DimensionIndex;
using ::tensorstore::Dims;
using ::tensorstore::DownsampleMethod;
using ::tensorstore::Index;
using ::tensorstore::IndexInterval;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::kInfIndex;
using ::tensorstore::MakeArray;
using ::tensorstore::MatchesStatus;
using ::tensorstore::span;
using ::tensorstore::internal_downsample::CanDownsampleIndexTransform;
using ::tensorstore::internal_downsample::DownsampleArray;
using ::tensorstore::internal_downsample::DownsampleBounds;
using ::tensorstore::internal_downsample::DownsampleInterval;
using ::tensorstore::internal_downsample::DownsampleTransformedArray;
using ::tensorstore::internal_downsample::PropagatedIndexTransformDownsampling;
using ::tensorstore::internal_downsample::PropagateIndexTransformDownsampling;
using ::testing::Optional;
TEST(PropagateIndexTransformDownsamplingTest, Rank0) {
EXPECT_THAT(PropagateIndexTransformDownsampling(
tensorstore::IdentityTransform(0), {}, {}),
Optional(PropagatedIndexTransformDownsampling{
tensorstore::IdentityTransform(0), {}}));
}
TEST(PropagateIndexTransformDownsamplingTest, Rank1SingleInputDimension) {
EXPECT_THAT(PropagateIndexTransformDownsampling(
tensorstore::IdentityTransform(BoxView({1}, {3})),
BoxView<1>({7}), span<const Index>({2})),
Optional(PropagatedIndexTransformDownsampling{
tensorstore::IdentityTransform(BoxView({2}, {5})), {2}}));
}
TEST(PropagateIndexTransformDownsamplingTest, InvalidRank) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto downsampled_transform,
tensorstore::IdentityTransform(32) | Dims(0).Stride(2));
EXPECT_THAT(PropagateIndexTransformDownsampling(
downsampled_transform, Box(32), std::vector<Index>(32, 2)),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Rank 33 is outside valid range \\[0, 32\\]"));
}
TEST(PropagateIndexTransformDownsamplingTest, Rank1Constant) {
EXPECT_THAT(
PropagateIndexTransformDownsampling(
IndexTransformBuilder(0, 1).output_constant(0, 2).Finalize().value(),
BoxView({7}, {2}), span<const Index>({3})),
Optional(PropagatedIndexTransformDownsampling{
IndexTransformBuilder(1, 1)
.input_origin({1})
.input_exclusive_max({3})
.output_single_input_dimension(0, 6, 1, 0)
.Finalize()
.value(),
{3}}));
}
TEST(PropagateIndexTransformDownsamplingTest,
Rank1SingleInputDimensionPartialStartBlock) {
EXPECT_THAT(PropagateIndexTransformDownsampling(
tensorstore::IdentityTransform(BoxView({0}, {4})),
BoxView({1}, {6}), span<const Index>({2})),
Optional(PropagatedIndexTransformDownsampling{
tensorstore::IdentityTransform(BoxView({1}, {6})), {2}}));
}
TEST(PropagateIndexTransformDownsamplingTest, Rank2WithIgnoredDimension) {
EXPECT_THAT(
PropagateIndexTransformDownsampling(
tensorstore::IdentityTransform(BoxView({1, 2}, {3, 5})),
BoxView({7, 10}), span<const Index>({2, 1})),
Optional(PropagatedIndexTransformDownsampling{
tensorstore::IdentityTransform(BoxView({2, 2}, {5, 5})), {2, 1}}));
}
TEST(PropagateIndexTransformDownsamplingTest, Rank1IndexArray) {
EXPECT_THAT(PropagateIndexTransformDownsampling(
IndexTransformBuilder(1, 1)
.input_shape({3})
.output_index_array(0, 0, 1, MakeArray<Index>({4, 7, 3}))
.Finalize()
.value(),
BoxView<1>({50}), span<const Index>({4})),
Optional(PropagatedIndexTransformDownsampling{
IndexTransformBuilder(2, 1)
.input_shape({3, 4})
.output_index_array(0, 0, 1,
MakeArray<Index>({{16, 17, 18, 19},
{28, 29, 30, 31},
{12, 13, 14, 15}}),
IndexInterval::Sized(0, 50))
.Finalize()
.value(),
{1, 4}}));
}
TEST(PropagateIndexTransformDownsamplingTest,
Rank3IndexArrayConstantNoDownsampling) {
EXPECT_THAT(
PropagateIndexTransformDownsampling(
IndexTransformBuilder(2, 3)
.input_shape({3, 4})
.output_index_array(0, 0, 1, MakeArray<Index>({{4}, {7}, {3}}))
.output_single_input_dimension(1, 1)
.output_constant(2, 42)
.Finalize()
.value(),
BoxView({30, 50, 55}), span<const Index>({1, 2, 1})),
Optional(PropagatedIndexTransformDownsampling{
IndexTransformBuilder(2, 3)
.input_shape({3, 8})
.output_index_array(0, 0, 1, MakeArray<Index>({{4}, {7}, {3}}))
.output_single_input_dimension(1, 1)
.output_constant(2, 42)
.Finalize()
.value(),
{1, 2}}));
}
TEST(PropagateIndexTransformDownsamplingTest, Rank2IndexArray) {
EXPECT_THAT(
PropagateIndexTransformDownsampling(
IndexTransformBuilder(2, 1)
.input_shape({2, 3})
.output_index_array(0, 0, 1,
MakeArray<Index>({{1, 2, 3}, {4, 5, 6}}))
.Finalize()
.value(),
BoxView<1>({50}), span<const Index>({4})),
Optional(PropagatedIndexTransformDownsampling{
IndexTransformBuilder(3, 1)
.input_shape({2, 3, 4})
.output_index_array(
0, 0, 1,
MakeArray<Index>(
{{{4, 5, 6, 7}, {8, 9, 10, 11}, {12, 13, 14, 15}},
{{16, 17, 18, 19}, {20, 21, 22, 23}, {24, 25, 26, 27}}}),
IndexInterval::Sized(0, 50))
.Finalize()
.value(),
{1, 1, 4}}));
}
TEST(PropagateIndexTransformDownsamplingTest,
Rank1SingleInputDimensionStrided) {
EXPECT_THAT(PropagateIndexTransformDownsampling(
IndexTransformBuilder(1, 1)
.input_shape({3})
.output_single_input_dimension(0, 1, 5, 0)
.Finalize()
.value(),
BoxView<1>({50}), span<const Index>({4})),
Optional(PropagatedIndexTransformDownsampling{
IndexTransformBuilder(2, 1)
.input_shape({3, 4})
.output_index_array(0, 0, 1,
MakeArray<Index>({{4, 5, 6, 7},
{24, 25, 26, 27},
{44, 45, 46, 47}}),
IndexInterval::Sized(0, 50))
.Finalize()
.value(),
{1, 4}}));
}
TEST(PropagateIndexTransformDownsamplingTest, ErrorRank1ConstantOverflow) {
EXPECT_THAT(
PropagateIndexTransformDownsampling(
IndexTransformBuilder(0, 1)
.output_constant(0, tensorstore::kMaxFiniteIndex)
.Finalize()
.value(),
BoxView<1>({0}, {kInfIndex}), span<const Index>({1000})),
MatchesStatus(absl::StatusCode::kOutOfRange, ".*Integer overflow.*"));
}
TEST(PropagateIndexTransformDownsamplingTest, ErrorRank1ConstantOutOfBounds) {
TENSORSTORE_EXPECT_OK(PropagateIndexTransformDownsampling(
IndexTransformBuilder(0, 1).output_constant(0, 4).Finalize().value(),
BoxView<1>({0}, {15}), span<const Index>({3})));
TENSORSTORE_EXPECT_OK(PropagateIndexTransformDownsampling(
IndexTransformBuilder(0, 1).output_constant(0, 4).Finalize().value(),
BoxView<1>({0}, {14}), span<const Index>({3})));
TENSORSTORE_EXPECT_OK(PropagateIndexTransformDownsampling(
IndexTransformBuilder(0, 1).output_constant(0, 4).Finalize().value(),
BoxView<1>({0}, {13}), span<const Index>({3})));
TENSORSTORE_EXPECT_OK(PropagateIndexTransformDownsampling(
IndexTransformBuilder(0, 1).output_constant(0, 0).Finalize().value(),
BoxView<1>({1}, {13}), span<const Index>({3})));
TENSORSTORE_EXPECT_OK(PropagateIndexTransformDownsampling(
IndexTransformBuilder(0, 1).output_constant(0, 0).Finalize().value(),
BoxView<1>({2}, {13}), span<const Index>({3})));
EXPECT_THAT(
PropagateIndexTransformDownsampling(
IndexTransformBuilder(0, 1).output_constant(0, 5).Finalize().value(),
BoxView<1>({0}, {15}), span<const Index>({3})),
MatchesStatus(absl::StatusCode::kOutOfRange,
".*Propagated bounds interval .* does not contain .*"));
EXPECT_THAT(
PropagateIndexTransformDownsampling(
IndexTransformBuilder(0, 1).output_constant(0, 0).Finalize().value(),
BoxView<1>({3}, {15}), span<const Index>({3})),
MatchesStatus(absl::StatusCode::kOutOfRange,
".*Propagated bounds interval .* does not contain .*"));
}
TEST(PropagateIndexTransformDownsamplingTest,
ErrorSingleInputDimensionStridedNonFiniteDomain) {
EXPECT_THAT(PropagateIndexTransformDownsampling(
IndexTransformBuilder(1, 1)
.input_origin({0})
.output_single_input_dimension(0, 0, 2, 0)
.Finalize()
.value(),
BoxView<1>({0}, {kInfIndex}), span<const Index>({1000})),
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*Input domain .* is not finite"));
}
TEST(PropagateIndexTransformDownsamplingTest,
ErrorSingleInputDimensionSize1StridedOverflow) {
EXPECT_THAT(
PropagateIndexTransformDownsampling(
IndexTransformBuilder(1, 1)
.input_origin({100})
.input_shape({1})
.output_single_input_dimension(
0, std::numeric_limits<Index>::max(), 2, 0)
.Finalize()
.value(),
BoxView<1>({0}, {kInfIndex}), span<const Index>({1000})),
MatchesStatus(absl::StatusCode::kOutOfRange, ".*Integer overflow.*"));
EXPECT_THAT(
PropagateIndexTransformDownsampling(
IndexTransformBuilder(1, 1)
.input_origin({100})
.input_shape({1})
.output_single_input_dimension(
0, 0, std::numeric_limits<Index>::max(), 0)
.Finalize()
.value(),
BoxView<1>({0}, {kInfIndex}), span<const Index>({1000})),
MatchesStatus(absl::StatusCode::kOutOfRange, ".*Integer overflow.*"));
}
TEST(PropagateIndexTransformDownsamplingTest,
ErrorSingleInputDimStridedInvalidDownsampleFactor) {
EXPECT_THAT(PropagateIndexTransformDownsampling(
IndexTransformBuilder(1, 1)
.input_shape({100})
.output_single_input_dimension(0, 0, 2, 0)
.Finalize()
.value(),
BoxView<1>({0}, {1000}),
span<const Index>({std::numeric_limits<Index>::max()})),
MatchesStatus(absl::StatusCode::kOutOfRange,
".*Downsample factor is out of range"));
}
TEST(PropagateIndexTransformDownsamplingTest,
ErrorSingleInputDimStridedOverflowMultiplyingStrideAndDownsampleFactor) {
EXPECT_THAT(
PropagateIndexTransformDownsampling(
IndexTransformBuilder(1, 1)
.input_shape({100})
.output_single_input_dimension(0, 0, 100, 0)
.Finalize()
.value(),
BoxView<1>({0}, {1000}), span<const Index>({kInfIndex})),
MatchesStatus(absl::StatusCode::kOutOfRange, ".*Integer overflow.*"));
}
TEST(PropagateIndexTransformDownsamplingTest,
ErrorSingleInputDimStridedOverflowMultiplyingOffsetAndDownsampleFactor) {
EXPECT_THAT(
PropagateIndexTransformDownsampling(
IndexTransformBuilder(1, 1)
.input_shape({100})
.output_single_input_dimension(
0, std::numeric_limits<Index>::max(), 2, 0)
.Finalize()
.value(),
BoxView<1>({0}, {1000}), span<const Index>({0xfffffffffffff})),
MatchesStatus(absl::StatusCode::kOutOfRange, ".*Integer overflow.*"));
}
TEST(PropagateIndexTransformDownsamplingTest,
ErrorSingleInputDimStridedOutOfRange) {
EXPECT_THAT(PropagateIndexTransformDownsampling(
IndexTransformBuilder(1, 1)
.input_shape({100})
.output_single_input_dimension(0, 0, 2, 0)
.Finalize()
.value(),
BoxView<1>({0}, {199}), span<const Index>({2})),
MatchesStatus(absl::StatusCode::kOutOfRange,
".*Output bounds interval .* does not contain "
"output range interval .*"));
}
TEST(PropagateIndexTransformDownsamplingTest,
ErrorIndexArrayInvalidDownsampleFactor) {
EXPECT_THAT(PropagateIndexTransformDownsampling(
IndexTransformBuilder(1, 1)
.input_shape({3})
.output_index_array(0, 0, 1, MakeArray<Index>({3, 4, 5}))
.Finalize()
.value(),
BoxView<1>({0}, {100}),
span<const Index>({std::numeric_limits<Index>::max()})),
MatchesStatus(absl::StatusCode::kOutOfRange,
".*Downsample factor is out of range"));
}
TEST(PropagateIndexTransformDownsamplingTest,
ErrorIndexArrayOverflowMultiplyingStrideAndDownsampleFactor) {
EXPECT_THAT(
PropagateIndexTransformDownsampling(
IndexTransformBuilder(1, 1)
.input_shape({3})
.output_index_array(0, 0, 100, MakeArray<Index>({3, 4, 5}))
.Finalize()
.value(),
BoxView<1>({0}, {100}), span<const Index>({kInfIndex})),
MatchesStatus(absl::StatusCode::kOutOfRange, ".*Integer overflow.*"));
}
TEST(PropagateIndexTransformDownsamplingTest,
ErrorIndexArrayOverflowMultiplyingOffsetAndDownsampleFactor) {
EXPECT_THAT(
PropagateIndexTransformDownsampling(
IndexTransformBuilder(1, 1)
.input_shape({3})
.output_index_array(0, 100, 1, MakeArray<Index>({3, 4, 5}))
.Finalize()
.value(),
BoxView<1>({0}, {100}), span<const Index>({kInfIndex})),
MatchesStatus(absl::StatusCode::kOutOfRange, ".*Integer overflow.*"));
}
TEST(PropagateIndexTransformDownsamplingTest, ErrorIndexArrayOutOfRange) {
EXPECT_THAT(
PropagateIndexTransformDownsampling(
IndexTransformBuilder(1, 1)
.input_shape({3})
.output_index_array(0, 0, 1, MakeArray<Index>({3, 4, 5}))
.Finalize()
.value(),
BoxView<1>({0}, {9}), span<const Index>({2})),
MatchesStatus(
absl::StatusCode::kOutOfRange,
"Propagating downsampling factor 2 through output dimension 0: "
"Index 5 is outside valid range \\[0, 5\\)"));
}
TEST(CanDownsampleIndexTransformTest, Rank0) {
EXPECT_TRUE(
CanDownsampleIndexTransform(tensorstore::IdentityTransform(0), {}, {}));
}
TEST(CanDownsampleIndexTransformTest, Constant) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transform,
tensorstore::IdentityTransform(1) | Dims(0).IndexSlice(42));
EXPECT_TRUE(CanDownsampleIndexTransform(transform, BoxView<1>({42}, {1}),
span<const Index>({3})));
EXPECT_FALSE(CanDownsampleIndexTransform(transform, BoxView<1>({42}, {2}),
span<const Index>({3})));
EXPECT_FALSE(CanDownsampleIndexTransform(transform, BoxView<1>({41}, {3}),
span<const Index>({3})));
EXPECT_TRUE(CanDownsampleIndexTransform(transform, BoxView<1>({41}, {2}),
span<const Index>({3})));
EXPECT_FALSE(CanDownsampleIndexTransform(transform, BoxView<1>({100}),
span<const Index>({3})));
EXPECT_TRUE(CanDownsampleIndexTransform(transform, BoxView<1>({100}),
span<const Index>({1})));
}
TEST(CanDownsampleIndexTransformTest, SingleInputDimension) {
EXPECT_TRUE(CanDownsampleIndexTransform(
(tensorstore::IdentityTransform(1) | Dims(0).SizedInterval(9, 3)).value(),
BoxView<1>({9}, {10}), span<const Index>({3})));
EXPECT_TRUE(CanDownsampleIndexTransform(
(tensorstore::IdentityTransform(1) | Dims(0).SizedInterval(18, 1))
.value(),
BoxView<1>({9}, {10}), span<const Index>({3})));
EXPECT_FALSE(CanDownsampleIndexTransform(
(tensorstore::IdentityTransform(1) | Dims(0).SizedInterval(9, 2)).value(),
BoxView<1>({9}, {10}), span<const Index>({3})));
EXPECT_FALSE(CanDownsampleIndexTransform(
(tensorstore::IdentityTransform(1) | Dims(0).SizedInterval(9, 3, -1))
.value(),
BoxView<1>({9}, {10}), span<const Index>({3})));
EXPECT_FALSE(CanDownsampleIndexTransform(
(tensorstore::IdentityTransform(1) | Dims(0).SizedInterval(10, 2))
.value(),
BoxView<1>({9}, {10}), span<const Index>({3})));
EXPECT_FALSE(CanDownsampleIndexTransform(
(tensorstore::IdentityTransform(1) | Dims(0).SizedInterval(9, 3, 2))
.value(),
BoxView<1>({9}, {10}), span<const Index>({3})));
}
TEST(CanDownsampleIndexTransformTest, IndexArray) {
EXPECT_FALSE(CanDownsampleIndexTransform(
(tensorstore::IdentityTransform(1) |
Dims(0).IndexArraySlice(MakeArray<Index>({2, 5, 3})))
.value(),
BoxView<1>({0}, {100}), span<const Index>({2})));
}
void TestPropagateIndexTransformDownsamplingInvariance(DimensionIndex rank) {
std::minstd_rand gen{tensorstore::internal_testing::GetRandomSeedForTest(
"TENSORSTORE_DOWNSAMPLE_PROPAGATE_INVARIANCE_SEED")};
tensorstore::internal::MakeRandomBoxParameters box_p;
box_p.min_rank = box_p.max_rank = rank;
auto base_bounds = tensorstore::internal::MakeRandomBox(gen, box_p);
SCOPED_TRACE(tensorstore::StrCat("base_bounds=", base_bounds));
auto base_data = tensorstore::internal::MakeRandomArray(
gen, base_bounds, tensorstore::dtype_v<uint8_t>);
SCOPED_TRACE(tensorstore::StrCat("base_data=", base_data));
std::vector<Index> downsample_factors(rank);
for (DimensionIndex i = 0; i < rank; ++i) {
downsample_factors[i] =
absl::Uniform<Index>(absl::IntervalClosedClosed, gen, 1, 2);
}
SCOPED_TRACE(tensorstore::StrCat("downsample_factors=",
tensorstore::span(downsample_factors)));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto downsampled_data,
DownsampleArray(base_data, downsample_factors, DownsampleMethod::kMean));
Box<> downsampled_bounds(rank);
DownsampleBounds(base_bounds, downsampled_bounds, downsample_factors,
DownsampleMethod::kMean);
SCOPED_TRACE(tensorstore::StrCat("downsampled_bounds=", downsampled_bounds));
auto downsampled_transform = tensorstore::internal::MakeRandomIndexTransform(
gen, downsampled_bounds, rank * 2);
SCOPED_TRACE(
tensorstore::StrCat("downsampled_transform=", downsampled_transform));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto propagated,
PropagateIndexTransformDownsampling(downsampled_transform, base_bounds,
downsample_factors));
SCOPED_TRACE(tensorstore::StrCat("propagated=", propagated));
SCOPED_TRACE(tensorstore::StrCat("downsampled_data=", downsampled_data));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto downsample_then_transform,
downsampled_data | downsampled_transform | tensorstore::Materialize());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto transformed_base,
base_data | propagated.transform);
tensorstore::SharedOffsetArray<const void> transform_then_downsample;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
transform_then_downsample,
DownsampleTransformedArray(transformed_base,
propagated.input_downsample_factors,
DownsampleMethod::kMean));
if (downsampled_transform.input_rank() < propagated.transform.input_rank()) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
transform_then_downsample,
transform_then_downsample |
tensorstore::DynamicDims(
{tensorstore::DimRangeSpec{downsampled_transform.input_rank()}})
.IndexSlice(0) |
tensorstore::Materialize());
}
EXPECT_EQ(transform_then_downsample, downsample_then_transform);
}
constexpr size_t kNumRandomTests = 50;
TEST(PropagateIndexTransformDownsamplingTest, InvarianceRank0) {
for (size_t i = 0; i < kNumRandomTests; ++i) {
TestPropagateIndexTransformDownsamplingInvariance(0);
}
}
TEST(PropagateIndexTransformDownsamplingTest, InvarianceRank1) {
for (size_t i = 0; i < kNumRandomTests; ++i) {
TestPropagateIndexTransformDownsamplingInvariance(1);
}
}
TEST(PropagateIndexTransformDownsamplingTest, InvarianceRank2) {
for (size_t i = 0; i < kNumRandomTests; ++i) {
TestPropagateIndexTransformDownsamplingInvariance(2);
}
}
TEST(PropagateIndexTransformDownsamplingTest, InvarianceRank3) {
for (size_t i = 0; i < kNumRandomTests; ++i) {
TestPropagateIndexTransformDownsamplingInvariance(3);
}
}
TEST(DownsampleIntervalTest, UnboundedLower) {
EXPECT_EQ(IndexInterval::Closed(-kInfIndex, 10),
DownsampleInterval(IndexInterval::UncheckedClosed(-kInfIndex, 30),
3, DownsampleMethod::kMean));
}
TEST(DownsampleIntervalTest, UnboundedUpper) {
EXPECT_EQ(IndexInterval::Closed(-10, kInfIndex),
DownsampleInterval(IndexInterval::UncheckedClosed(-30, kInfIndex),
3, DownsampleMethod::kMean));
}
} |
579 | cpp | google/tensorstore | grid_occupancy_map | tensorstore/driver/downsample/grid_occupancy_map.cc | tensorstore/driver/downsample/grid_occupancy_map_test.cc | #ifndef TENSORSTORE_DRIVER_DOWNSAMPLE_GRID_OCCUPANCY_MAP_H_
#define TENSORSTORE_DRIVER_DOWNSAMPLE_GRID_OCCUPANCY_MAP_H_
#include <vector>
#include "tensorstore/array.h"
#include "tensorstore/box.h"
#include "tensorstore/index.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal_downsample {
class GridOccupancyTracker {
public:
std::vector<Index> occupied_chunks;
void MarkOccupied(BoxView<> box) {
occupied_chunks.insert(occupied_chunks.end(), box.origin().begin(),
box.origin().end());
occupied_chunks.insert(occupied_chunks.end(), box.shape().begin(),
box.shape().end());
}
};
class GridOccupancyMap {
public:
explicit GridOccupancyMap(GridOccupancyTracker&& tracker, BoxView<> domain);
DimensionIndex rank() const { return occupied_chunk_mask.rank(); }
bool GetGridCellDomain(span<const Index> grid_cell,
MutableBoxView<> grid_cell_domain) const;
void InitializeCellIterator(span<Index> grid_cell) const;
bool AdvanceCellIterator(span<Index> grid_cell) const;
std::vector<std::vector<Index>> partition_points;
SharedArray<bool> occupied_chunk_mask;
};
}
}
#endif
#include "tensorstore/driver/downsample/grid_occupancy_map.h"
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "tensorstore/array.h"
#include "tensorstore/box.h"
#include "tensorstore/index.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal_downsample {
GridOccupancyMap::GridOccupancyMap(GridOccupancyTracker&& tracker,
BoxView<> domain)
: partition_points(domain.rank()) {
const DimensionIndex rank = domain.rank();
span<Index> occupied_chunks = tracker.occupied_chunks;
{
absl::flat_hash_map<Index, Index> partition_map;
for (DimensionIndex dim = 0; dim < rank; ++dim) {
partition_map.clear();
IndexInterval bounds = domain[dim];
partition_map.emplace(bounds.inclusive_min(), 0);
partition_map.emplace(bounds.exclusive_max(), 0);
for (ptrdiff_t i = dim; i < occupied_chunks.size(); i += 2 * rank) {
Index begin = occupied_chunks[i];
Index end = begin + occupied_chunks[i + rank];
partition_map.emplace(begin, 0);
partition_map.emplace(end, 0);
}
auto& dim_partition_points = partition_points[dim];
dim_partition_points.reserve(partition_map.size());
for (const auto& p : partition_map) {
dim_partition_points.push_back(p.first);
}
std::sort(dim_partition_points.begin(), dim_partition_points.end());
for (size_t i = 0, size = dim_partition_points.size(); i < size; ++i) {
partition_map.at(dim_partition_points[i]) = i;
}
for (ptrdiff_t i = dim; i < occupied_chunks.size(); i += 2 * rank) {
Index& begin = occupied_chunks[i];
Index& end = occupied_chunks[i + rank];
end = partition_map.at(begin + end);
begin = partition_map.at(begin);
}
}
}
Index grid_cell[kMaxRank];
span<Index> grid_cell_span(&grid_cell[0], rank);
{
for (DimensionIndex dim = 0; dim < rank; ++dim) {
grid_cell[dim] = partition_points[dim].size() - 1;
}
occupied_chunk_mask =
AllocateArray<bool>(grid_cell_span, c_order, value_init);
}
for (ptrdiff_t i = 0; i < occupied_chunks.size(); i += 2 * rank) {
std::copy_n(&occupied_chunks[i], rank, &grid_cell[0]);
do {
occupied_chunk_mask(grid_cell_span) = true;
} while (internal::AdvanceIndices(rank, &grid_cell[0], &occupied_chunks[i],
&occupied_chunks[i + rank]));
}
}
bool GridOccupancyMap::GetGridCellDomain(
span<const Index> grid_cell, MutableBoxView<> grid_cell_domain) const {
assert(grid_cell.size() == grid_cell_domain.rank());
assert(grid_cell.size() == rank());
if (occupied_chunk_mask(grid_cell)) return false;
for (DimensionIndex dim = 0; dim < grid_cell.size(); ++dim) {
const Index partition_index = grid_cell[dim];
grid_cell_domain[dim] = IndexInterval::UncheckedHalfOpen(
partition_points[dim][partition_index],
partition_points[dim][partition_index + 1]);
}
return true;
}
void GridOccupancyMap::InitializeCellIterator(span<Index> grid_cell) const {
std::fill(grid_cell.begin(), grid_cell.end(), 0);
}
bool GridOccupancyMap::AdvanceCellIterator(span<Index> grid_cell) const {
assert(grid_cell.size() == occupied_chunk_mask.rank());
return internal::AdvanceIndices(grid_cell.size(), grid_cell.data(),
occupied_chunk_mask.shape().data());
}
}
} | #include "tensorstore/driver/downsample/grid_occupancy_map.h"
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/box.h"
#include "tensorstore/index.h"
namespace {
using ::tensorstore::Box;
using ::tensorstore::BoxView;
using ::tensorstore::Index;
using ::tensorstore::MakeArray;
using ::tensorstore::internal_downsample::GridOccupancyMap;
using ::tensorstore::internal_downsample::GridOccupancyTracker;
std::vector<Box<>> GetUnoccupiedBoxes(const GridOccupancyMap& map) {
std::vector<Box<>> boxes;
std::vector<Index> grid_cell(map.rank());
map.InitializeCellIterator(grid_cell);
Box<> box(map.rank());
do {
if (map.GetGridCellDomain(grid_cell, box)) {
boxes.push_back(box);
}
} while (map.AdvanceCellIterator(grid_cell));
return boxes;
}
TEST(GridOccupancyMapTest, Rank1) {
GridOccupancyTracker tracker;
tracker.MarkOccupied(BoxView<1>({1}, {3}));
tracker.MarkOccupied(BoxView<1>({5}, {4}));
GridOccupancyMap map(std::move(tracker), BoxView<1>({-1}, {11}));
EXPECT_THAT(
map.partition_points,
::testing::ElementsAre(::testing::ElementsAre(-1, 1, 4, 5, 9, 10)));
EXPECT_EQ(map.occupied_chunk_mask, MakeArray<bool>({0, 1, 0, 1, 0}));
EXPECT_THAT(GetUnoccupiedBoxes(map),
::testing::ElementsAre(Box<>({-1}, {2}), Box<>({4}, {1}),
Box<>({9}, {1})));
}
TEST(GridOccupancyMapTest, Rank2) {
GridOccupancyTracker tracker;
tracker.MarkOccupied(BoxView<2>({0, 0}, {3, 2}));
tracker.MarkOccupied(BoxView<2>({3, 3}, {1, 3}));
tracker.MarkOccupied(BoxView<2>({0, 5}, {2, 3}));
GridOccupancyMap map(std::move(tracker), BoxView<2>({4, 10}));
EXPECT_THAT(
map.partition_points,
::testing::ElementsAre(::testing::ElementsAre(0, 2, 3, 4),
::testing::ElementsAre(0, 2, 3, 5, 6, 8, 10)));
EXPECT_EQ(map.occupied_chunk_mask, MakeArray<bool>({
{1, 0, 0, 1, 1, 0},
{1, 0, 0, 0, 0, 0},
{0, 0, 1, 1, 0, 0},
}));
EXPECT_THAT(
GetUnoccupiedBoxes(map),
::testing::ElementsAre(
Box<>({0, 2}, {2, 1}), Box<>({0, 3}, {2, 2}), Box<>({0, 8}, {2, 2}),
Box<>({2, 2}, {1, 1}), Box<>({2, 3}, {1, 2}), Box<>({2, 5}, {1, 1}),
Box<>({2, 6}, {1, 2}), Box<>({2, 8}, {1, 2}), Box<>({3, 0}, {1, 2}),
Box<>({3, 2}, {1, 1}), Box<>({3, 6}, {1, 2}), Box<>({3, 8}, {1, 2})));
}
} |
580 | cpp | google/tensorstore | downsample_array | tensorstore/driver/downsample/downsample_array.cc | tensorstore/driver/downsample/downsample_array_test.cc | #ifndef TENSORSTORE_DRIVER_DOWNSAMPLE_DOWNSAMPLE_ARRAY_H_
#define TENSORSTORE_DRIVER_DOWNSAMPLE_DOWNSAMPLE_ARRAY_H_
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/downsample_method.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/transformed_array.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal_downsample {
absl::Status DownsampleArray(OffsetArrayView<const void> source,
OffsetArrayView<void> target,
span<const Index> downsample_factors,
DownsampleMethod method);
Result<SharedOffsetArray<void>> DownsampleArray(
OffsetArrayView<const void> source, span<const Index> downsample_factors,
DownsampleMethod method);
absl::Status DownsampleTransformedArray(TransformedArrayView<const void> source,
TransformedArrayView<void> target,
span<const Index> downsample_factors,
DownsampleMethod method);
Result<SharedOffsetArray<void>> DownsampleTransformedArray(
TransformedArrayView<const void> source,
span<const Index> downsample_factors, DownsampleMethod method);
}
}
#endif
#include "tensorstore/driver/downsample/downsample_array.h"
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/downsample_method.h"
#include "tensorstore/driver/downsample/downsample_nditerable.h"
#include "tensorstore/driver/downsample/downsample_util.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/transformed_array.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/nditerable.h"
#include "tensorstore/internal/nditerable_array.h"
#include "tensorstore/internal/nditerable_copy.h"
#include "tensorstore/internal/nditerable_transformed_array.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace internal_downsample {
namespace {
absl::Status ValidateDownsampleDomain(BoxView<> base_domain,
BoxView<> downsampled_domain,
span<const Index> downsample_factors,
DownsampleMethod method) {
const DimensionIndex rank = base_domain.rank();
if (rank != downsampled_domain.rank()) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Cannot downsample domain ", base_domain, " to domain ",
downsampled_domain, " with different rank"));
}
if (rank != downsample_factors.size()) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Cannot downsample domain ", base_domain, " with downsample factors ",
downsample_factors, " of different rank"));
}
for (DimensionIndex i = 0; i < rank; ++i) {
const auto expected_interval =
DownsampleInterval(base_domain[i], downsample_factors[i], method);
if (expected_interval != downsampled_domain[i]) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Cannot downsample array with domain ", base_domain, " by factors ",
downsample_factors, " with method ", method, " to array with domain ",
downsampled_domain, ": expected target dimension ", i,
" to have domain ", expected_interval));
}
}
return absl::OkStatus();
}
}
absl::Status DownsampleArray(OffsetArrayView<const void> source,
OffsetArrayView<void> target,
span<const Index> downsample_factors,
DownsampleMethod method) {
if (source.dtype() != target.dtype()) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Source data type (", source.dtype(),
") does not match target data type (", target.dtype(), ")"));
}
TENSORSTORE_RETURN_IF_ERROR(ValidateDownsampleMethod(source.dtype(), method));
TENSORSTORE_RETURN_IF_ERROR(ValidateDownsampleDomain(
source.domain(), target.domain(), downsample_factors, method));
if (method == DownsampleMethod::kStride) {
return CopyTransformedArray(
source | tensorstore::AllDims().Stride(downsample_factors), target);
}
internal::DefaultNDIterableArena arena;
auto base_iterable = GetArrayNDIterable(UnownedToShared(source), arena);
auto target_iterable = GetArrayNDIterable(UnownedToShared(target), arena);
auto downsampled_iterable = DownsampleNDIterable(
std::move(base_iterable), source.domain(), downsample_factors, method,
downsample_factors.size(), arena);
internal::NDIterableCopier copier(*downsampled_iterable, *target_iterable,
target.shape(), skip_repeated_elements,
arena);
return copier.Copy();
}
Result<SharedOffsetArray<void>> DownsampleArray(
OffsetArrayView<const void> source, span<const Index> downsample_factors,
DownsampleMethod method) {
SharedOffsetArray<void> target;
target.layout().set_rank(source.rank());
DownsampleBounds(source.domain(),
MutableBoxView<>(target.origin(), target.shape()),
downsample_factors, method);
target.element_pointer() = AllocateArrayElementsLike<void>(
StridedLayoutView<dynamic_rank, offset_origin>(
target.rank(), target.origin().data(), target.shape().data(),
source.byte_strides().data()),
target.byte_strides().data(), skip_repeated_elements, default_init,
source.dtype());
TENSORSTORE_RETURN_IF_ERROR(
DownsampleArray(source, target, downsample_factors, method));
return target;
}
absl::Status DownsampleTransformedArray(TransformedArrayView<const void> source,
TransformedArrayView<void> target,
span<const Index> downsample_factors,
DownsampleMethod method) {
if (source.dtype() != target.dtype()) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Source data type (", source.dtype(),
") does not match target data type (", target.dtype(), ")"));
}
TENSORSTORE_RETURN_IF_ERROR(ValidateDownsampleMethod(source.dtype(), method));
TENSORSTORE_RETURN_IF_ERROR(
ValidateDownsampleDomain(source.domain().box(), target.domain().box(),
downsample_factors, method));
if (method == DownsampleMethod::kStride) {
return CopyTransformedArray(
std::move(source) | tensorstore::AllDims().Stride(downsample_factors),
target);
}
internal::DefaultNDIterableArena arena;
TENSORSTORE_ASSIGN_OR_RETURN(
auto base_iterable,
GetTransformedArrayNDIterable(UnownedToShared(source), arena));
TENSORSTORE_ASSIGN_OR_RETURN(
auto target_iterable,
GetTransformedArrayNDIterable(UnownedToShared(target), arena));
auto downsampled_iterable = DownsampleNDIterable(
std::move(base_iterable), source.domain().box(), downsample_factors,
method, downsample_factors.size(), arena);
internal::NDIterableCopier copier(*downsampled_iterable, *target_iterable,
target.shape(), skip_repeated_elements,
arena);
return copier.Copy();
}
Result<SharedOffsetArray<void>> DownsampleTransformedArray(
TransformedArrayView<const void> source,
span<const Index> downsample_factors, DownsampleMethod method) {
SharedOffsetArray<void> target;
target.layout().set_rank(source.rank());
DownsampleBounds(source.domain().box(),
MutableBoxView<>(target.origin(), target.shape()),
downsample_factors, method);
target =
AllocateArray(target.domain(), c_order, default_init, source.dtype());
TENSORSTORE_RETURN_IF_ERROR(DownsampleTransformedArray(
source, TransformedArray(target), downsample_factors, method));
return target;
}
}
} | #include "tensorstore/driver/downsample/downsample_array.h"
#include <stdint.h>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <nlohmann/json.hpp>
#include "tensorstore/array.h"
#include "tensorstore/array_testutil.h"
#include "tensorstore/data_type.h"
#include "tensorstore/downsample_method.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/transformed_array.h"
#include "tensorstore/util/span.h"
namespace {
using ::tensorstore::Dims;
using ::tensorstore::DownsampleMethod;
using ::tensorstore::Index;
using ::tensorstore::kImplicit;
using ::tensorstore::MakeArray;
using ::tensorstore::MakeOffsetArray;
using ::tensorstore::span;
using ::tensorstore::internal_downsample::DownsampleArray;
using ::tensorstore::internal_downsample::DownsampleTransformedArray;
using ::testing::Optional;
TEST(DownsampleArrayTest, MeanRank0) {
EXPECT_THAT(DownsampleArray(tensorstore::MakeScalarArray<float>(42.0),
span<const Index>(), DownsampleMethod::kMean),
Optional(tensorstore::MakeScalarArray<float>(42.0)));
}
TEST(DownsampleArrayTest, MeanRank1ExactMultiple) {
EXPECT_THAT(DownsampleArray(MakeArray<float>({1, 2, 5, 7}),
span<const Index>({2}), DownsampleMethod::kMean),
Optional(MakeArray<float>({1.5, 6})));
EXPECT_THAT(DownsampleArray(MakeArray<float>({1, 2, 3, 5, 7, 12}),
span<const Index>({3}), DownsampleMethod::kMean),
Optional(MakeArray<float>({2, 8})));
}
TEST(DownsampleArrayTest, MeanRoundingUint8) {
EXPECT_THAT(DownsampleArray(MakeArray<uint8_t>({253, 254, 254}),
span<const Index>({3}), DownsampleMethod::kMean),
Optional(MakeArray<uint8_t>({254})));
}
TEST(DownsampleArrayTest, MeanRoundingInt16) {
EXPECT_THAT(DownsampleArray(MakeArray<int16_t>({-253, -254, -254}),
span<const Index>({3}), DownsampleMethod::kMean),
Optional(MakeArray<int16_t>({-254})));
}
TEST(DownsampleArrayTest, MeanRoundingToEvenInt16) {
EXPECT_THAT(DownsampleArray(MakeArray<int16_t>({3, 3, 2, 2}),
span<const Index>({4}), DownsampleMethod::kMean),
Optional(MakeArray<int16_t>({2})));
EXPECT_THAT(DownsampleArray(MakeArray<int16_t>({3, 3, 4, 4}),
span<const Index>({4}), DownsampleMethod::kMean),
Optional(MakeArray<int16_t>({4})));
EXPECT_THAT(DownsampleArray(MakeArray<int16_t>({-3, -3, -2, -2}),
span<const Index>({4}), DownsampleMethod::kMean),
Optional(MakeArray<int16_t>({-2})));
EXPECT_THAT(DownsampleArray(MakeArray<int16_t>({-3, -3, -4, -4}),
span<const Index>({4}), DownsampleMethod::kMean),
Optional(MakeArray<int16_t>({-4})));
}
TEST(DownsampleArrayTest, MeanRoundingUint64) {
EXPECT_THAT(DownsampleArray(MakeArray<uint64_t>({253, 254, 254}),
span<const Index>({3}), DownsampleMethod::kMean),
Optional(MakeArray<uint64_t>({254})));
}
TEST(DownsampleArrayTest, MeanRoundingBool) {
EXPECT_THAT(DownsampleArray(MakeArray<bool>({0, 0, 1}),
span<const Index>({3}), DownsampleMethod::kMean),
Optional(MakeArray<bool>({0})));
EXPECT_THAT(DownsampleArray(MakeArray<bool>({0, 1, 1}),
span<const Index>({3}), DownsampleMethod::kMean),
Optional(MakeArray<bool>({1})));
EXPECT_THAT(DownsampleArray(MakeArray<bool>({0, 1, 1, 0}),
span<const Index>({4}), DownsampleMethod::kMean),
Optional(MakeArray<bool>({0})));
}
TEST(DownsampleArrayTest, MeanRank1Offset) {
EXPECT_THAT(DownsampleArray(MakeOffsetArray<float>({1}, {1, 2, 5, 9}),
span<const Index>({2}), DownsampleMethod::kMean),
Optional(MakeArray<float>({1, 3.5, 9})));
}
TEST(DownsampleArrayTest, MeanRank1SingleDownsampledElement) {
EXPECT_THAT(DownsampleArray(MakeArray<float>({1, 2}), span<const Index>({2}),
DownsampleMethod::kMean),
Optional(MakeArray<float>({1.5})));
}
TEST(DownsampleArrayTest, MeanRank1NotExactMultiple) {
EXPECT_THAT(DownsampleArray(MakeArray<float>({1, 2, 5, 7, 9}),
span<const Index>({2}), DownsampleMethod::kMean),
Optional(MakeArray<float>({1.5, 6, 9})));
EXPECT_THAT(DownsampleArray(MakeArray<float>({1, 2, 6, 7, 9}),
span<const Index>({3}), DownsampleMethod::kMean),
Optional(MakeArray<float>({3, 8})));
}
TEST(DownsampleArrayTest, MeanRank1NoDownsampling) {
EXPECT_THAT(DownsampleArray(MakeArray<float>({1, 2, 5, 7}),
span<const Index>({1}), DownsampleMethod::kMean),
Optional(MakeArray<float>({1, 2, 5, 7})));
}
TEST(DownsampleArrayTest, MeanRank2SingleDownsampleDim1) {
EXPECT_THAT(
DownsampleArray(MakeArray<float>({
{1, 2, 5, 7},
{5, 6, 15, 25},
}),
span<const Index>({1, 2}), DownsampleMethod::kMean),
Optional(MakeArray<float>({{1.5, 6}, {5.5, 20}})));
}
TEST(DownsampleArrayTest, MeanRank2SingleDownsampleDim0) {
EXPECT_THAT(
DownsampleArray(MakeArray<float>({
{1, 2, 5, 7},
{5, 6, 15, 25},
}),
span<const Index>({2, 1}), DownsampleMethod::kMean),
Optional(MakeArray<float>({{3, 4, 10, 16}})));
}
TEST(DownsampleArrayTest, MeanRank2TwoDownsampleDims) {
EXPECT_THAT(
DownsampleArray(MakeArray<float>({
{1, 2, 5, 7},
{5, 6, 15, 25},
}),
span<const Index>({2, 2}), DownsampleMethod::kMean),
Optional(MakeArray<float>({{3.5, 13.0}})));
}
TEST(DownsampleArrayTest, MeanRank2NotExactMultiple) {
EXPECT_THAT(
DownsampleArray(MakeArray<float>({
{1, 2, 3, 4, 5},
{6, 7, 8, 9, 10},
{11, 12, 13, 14, 15},
}),
span<const Index>({2, 2}), DownsampleMethod::kMean),
Optional(MakeArray<float>({
{4, 6, 7.5},
{11.5, 13.5, 15},
})));
}
TEST(DownsampleArrayTest, MeanRank2PartialStartBlock) {
EXPECT_THAT(
DownsampleArray(MakeOffsetArray<float>({3, 8}, {{1, 2, 3, 4, 5},
{6, 7, 8, 9, 10},
{11, 12, 13, 14, 15}}),
span<const Index>({2, 3}), DownsampleMethod::kMean),
Optional(MakeOffsetArray<float>({1, 2}, {{1, 3, 5}, {8.5, 10.5, 12.5}})));
}
TEST(DownsampleArrayTest, MedianRank2PartialStartBlock) {
EXPECT_THAT(
DownsampleArray(MakeOffsetArray<float>({3, 8}, {{1, 2, 3, 4, 5},
{6, 7, 8, 9, 10},
{11, 12, 13, 14, 15}}),
span<const Index>({2, 3}), DownsampleMethod::kMedian),
Optional(MakeOffsetArray<float>({1, 2}, {{1, 3, 5}, {6, 9, 10}})));
}
TEST(DownsampleArrayTest, ModeRank2PartialStartBlock) {
EXPECT_THAT(
DownsampleArray(MakeOffsetArray<float>({3, 8},
{
{1, 2, 3, 3, 5},
{6, 4, 5, 5, 10},
{11, 6, 6, 6, 15},
}),
span<const Index>({2, 3}), DownsampleMethod::kMode),
Optional(MakeOffsetArray<float>({1, 2}, {{1, 3, 5}, {6, 6, 10}})));
}
TEST(DownsampleArrayTest, StrideRank2PartialEndBlock) {
EXPECT_THAT(
DownsampleArray(MakeOffsetArray<float>({2, 6},
{
{1, 2, 3, 4, 5},
{6, 7, 8, 9, 10},
{11, 12, 13, 14, 15},
}),
span<const Index>({2, 3}), DownsampleMethod::kStride),
Optional(MakeOffsetArray<float>({1, 2}, {
{1, 4},
{11, 14},
})));
}
TEST(DownsampleArrayTest, StrideRank2PartialStartBlock) {
EXPECT_THAT(
DownsampleArray(MakeOffsetArray<float>({3, 8},
{
{1, 2, 3, 4, 5},
{6, 7, 8, 9, 10},
{11, 12, 13, 14, 15},
}),
span<const Index>({2, 3}), DownsampleMethod::kStride),
Optional(MakeOffsetArray<float>({2, 3}, {
{7, 10},
})));
}
TEST(DownsampleArrayTest, MeanRank3ThreeDownsampleDims) {
EXPECT_THAT(
DownsampleArray(MakeArray<float>({{
{1, 2, 3, 4},
{5, 6, 7, 8},
{9, 10, 11, 12},
},
{
{13, 14, 15, 16},
{17, 18, 19, 20},
{21, 22, 23, 24},
},
{
{25, 26, 27, 28},
{29, 30, 31, 32},
{33, 34, 35, 36},
}}),
span<const Index>({2, 2, 2}), DownsampleMethod::kMean),
Optional(MakeArray<float>({{
{9.5, 11.5},
{15.5, 17.5},
},
{
{27.5, 29.5},
{33.5, 35.5},
}})));
}
TEST(DownsampleArrayTest, MeanRank1ReversedExactMultiple) {
EXPECT_THAT(DownsampleTransformedArray(
(MakeArray<float>({1, 2, 3, 4}) |
Dims(0).TranslateSizedInterval(kImplicit, kImplicit, -1))
.value(),
span<const Index>({2}), DownsampleMethod::kMean),
Optional(MakeArray<float>({3.5, 1.5})));
}
TEST(DownsampleArrayTest, MeanRank1ReversedNotExactMultiple) {
EXPECT_THAT(DownsampleTransformedArray(
(MakeArray<float>({1, 2, 3, 4, 5}) |
Dims(0).TranslateSizedInterval(kImplicit, kImplicit, -1))
.value(),
span<const Index>({2}), DownsampleMethod::kMean),
Optional(MakeArray<float>({4.5, 2.5, 1})));
}
TEST(DownsampleArrayTest, MeanRank2ReversedNotExactMultiple) {
EXPECT_THAT(DownsampleTransformedArray(
(MakeArray<float>({
{1, 2, 3, 4, 5},
{6, 7, 8, 9, 10},
{11, 12, 13, 14, 15},
}) |
Dims(0, 1).TranslateSizedInterval(kImplicit, kImplicit, -1))
.value(),
span<const Index>({2, 2}), DownsampleMethod::kMean),
Optional(MakeArray<float>({
{12, 10, 8.5},
{4.5, 2.5, 1},
})));
}
TEST(DownsampleArrayTest, MinRank1ExactMultiple) {
EXPECT_THAT(DownsampleArray(MakeArray<float>({2, 3, 5, 1}),
span<const Index>({2}), DownsampleMethod::kMin),
Optional(MakeArray<float>({2, 1})));
EXPECT_THAT(DownsampleArray(MakeArray<int>({2, 3, 8, 7, 1, 5}),
span<const Index>({3}), DownsampleMethod::kMin),
Optional(MakeArray<int>({2, 1})));
}
TEST(DownsampleArrayTest, MaxRank1ExactMultiple) {
EXPECT_THAT(DownsampleArray(MakeArray<float>({2, 3, 5, 1}),
span<const Index>({2}), DownsampleMethod::kMax),
Optional(MakeArray<float>({3, 5})));
EXPECT_THAT(DownsampleArray(MakeArray<int>({2, 3, 8, 7, 1, 5}),
span<const Index>({3}), DownsampleMethod::kMax),
Optional(MakeArray<int>({8, 7})));
}
TEST(DownsampleArrayTest, MedianRank1ExactMultiple) {
EXPECT_THAT(
DownsampleArray(MakeArray<float>({100, 3, 1, 2, 99, 98, 97, 5}),
span<const Index>({4}), DownsampleMethod::kMedian),
Optional(MakeArray<float>({2, 97})));
}
TEST(DownsampleArrayTest, MedianRank1Partial) {
EXPECT_THAT(
DownsampleArray(MakeArray<float>({100, 3, 1, 2, 99, 97, 98}),
span<const Index>({4}), DownsampleMethod::kMedian),
Optional(MakeArray<float>({2, 98})));
}
TEST(DownsampleArrayTest, ModeRank1ExactMultiple) {
EXPECT_THAT(DownsampleArray(MakeArray<float>({100, 99, 99, 99, 3, 3, 2, 2}),
span<const Index>({4}), DownsampleMethod::kMode),
Optional(MakeArray<float>({99, 2})));
}
TEST(DownsampleArrayTest, ModeRank1Partial) {
EXPECT_THAT(DownsampleArray(MakeArray<float>({100, 99, 99, 99, 3, 3, 2}),
span<const Index>({4}), DownsampleMethod::kMode),
Optional(MakeArray<float>({99, 3})));
}
TEST(DownsampleArrayTest, ModeBool) {
EXPECT_THAT(DownsampleArray(MakeArray<bool>({0, 0, 1, 1}),
span<const Index>({4}), DownsampleMethod::kMode),
Optional(MakeArray<bool>({0})));
EXPECT_THAT(DownsampleArray(MakeArray<bool>({0, 1, 1, 1}),
span<const Index>({4}), DownsampleMethod::kMode),
Optional(MakeArray<bool>({1})));
EXPECT_THAT(DownsampleArray(MakeArray<bool>({0, 0, 1, 1, 1}),
span<const Index>({5}), DownsampleMethod::kMode),
Optional(MakeArray<bool>({1})));
}
TEST(DownsampleArrayTest, MeanBool) {
EXPECT_THAT(DownsampleArray(MakeArray<bool>({0, 0, 1, 1}),
span<const Index>({4}), DownsampleMethod::kMean),
Optional(MakeArray<bool>({0})));
EXPECT_THAT(DownsampleArray(MakeArray<bool>({0, 1, 1, 1}),
span<const Index>({4}), DownsampleMethod::kMean),
Optional(MakeArray<bool>({1})));
EXPECT_THAT(DownsampleArray(MakeArray<bool>({0, 0, 1, 1, 1}),
span<const Index>({5}), DownsampleMethod::kMean),
Optional(MakeArray<bool>({1})));
}
TEST(DownsampleArrayTest, MedianBool) {
EXPECT_THAT(
DownsampleArray(MakeArray<bool>({0, 0, 1, 1}), span<const Index>({4}),
DownsampleMethod::kMedian),
Optional(MakeArray<bool>({0})));
EXPECT_THAT(
DownsampleArray(MakeArray<bool>({0, 1, 1, 1}), span<const Index>({4}),
DownsampleMethod::kMedian),
Optional(MakeArray<bool>({1})));
EXPECT_THAT(
DownsampleArray(MakeArray<bool>({0, 0, 1, 1, 1}), span<const Index>({5}),
DownsampleMethod::kMedian),
Optional(MakeArray<bool>({1})));
}
TEST(DownsampleArrayTest, ModeJson) {
using ::tensorstore::dtypes::json_t;
EXPECT_THAT(DownsampleArray(MakeArray<json_t>({"a", "a", 3.0, 3, 3u}),
span<const Index>({5}), DownsampleMethod::kMode),
Optional(MakeArray<::nlohmann::json>({json_t(3)})));
}
TEST(DownsampleArrayTest, MultipleBlocks) {
auto source_array = tensorstore::AllocateArray<uint8_t>({128, 128});
auto expected_downsampled = tensorstore::AllocateArray<uint8_t>({64, 64});
for (int i = 0; i < 128; ++i) {
for (int j = 0; j < 128; ++j) {
source_array(i, j) = static_cast<uint8_t>(i);
}
}
for (int i = 0; i < 64; ++i) {
for (int j = 0; j < 64; ++j) {
expected_downsampled(i, j) = static_cast<uint8_t>(i * 2);
}
}
EXPECT_THAT(DownsampleArray(source_array, {{2, 2}}, DownsampleMethod::kMean),
Optional(tensorstore::MatchesArray(expected_downsampled)));
}
} |
581 | cpp | google/tensorstore | xz_compressor | tensorstore/driver/n5/xz_compressor.cc | tensorstore/driver/n5/xz_compressor_test.cc | #ifndef TENSORSTORE_INTERNAL_COMPRESSION_XZ_COMPRESSOR_H_
#define TENSORSTORE_INTERNAL_COMPRESSION_XZ_COMPRESSOR_H_
#include <cstddef>
#include <lzma.h>
#include "tensorstore/internal/compression/json_specified_compressor.h"
namespace tensorstore {
namespace internal {
struct XzOptions {
int level = 6;
bool extreme = false;
::lzma_check check = LZMA_CHECK_CRC64;
};
class XzCompressor : public JsonSpecifiedCompressor, public XzOptions {
public:
std::unique_ptr<riegeli::Writer> GetWriter(
std::unique_ptr<riegeli::Writer> base_writer,
size_t element_bytes) const override;
std::unique_ptr<riegeli::Reader> GetReader(
std::unique_ptr<riegeli::Reader> base_reader,
size_t element_bytes) const override;
};
}
}
#endif
#include "tensorstore/internal/compression/xz_compressor.h"
#include "riegeli/bytes/cord_reader.h"
#include "riegeli/bytes/cord_writer.h"
#include "riegeli/bytes/reader.h"
#include "riegeli/bytes/writer.h"
#include "riegeli/xz/xz_reader.h"
#include "riegeli/xz/xz_writer.h"
namespace tensorstore {
namespace internal {
std::unique_ptr<riegeli::Writer> XzCompressor::GetWriter(
std::unique_ptr<riegeli::Writer> base_writer, size_t element_bytes) const {
using Writer = riegeli::XzWriter<std::unique_ptr<riegeli::Writer>>;
Writer::Options options;
options.set_container(Writer::Container::kXz);
options.set_check(static_cast<Writer::Check>(check));
options.set_compression_level(level);
options.set_extreme(extreme);
return std::make_unique<Writer>(std::move(base_writer), options);
}
std::unique_ptr<riegeli::Reader> XzCompressor::GetReader(
std::unique_ptr<riegeli::Reader> base_reader, size_t element_bytes) const {
using Reader = riegeli::XzReader<std::unique_ptr<riegeli::Reader>>;
Reader::Options options;
options.set_container(Reader::Container::kXzOrLzma);
options.set_concatenate(true);
return std::make_unique<Reader>(std::move(base_reader), options);
}
}
} | #include "tensorstore/internal/compression/xz_compressor.h"
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/cord.h"
#include "absl/strings/cord_test_helpers.h"
#include <lzma.h>
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal::XzCompressor;
TEST(XzCompressorTest, SmallRoundtrip) {
XzCompressor compressor;
const absl::Cord input("The quick brown fox jumped over the lazy dog.");
absl::Cord encode_result("abc"), decode_result("def");
TENSORSTORE_ASSERT_OK(compressor.Encode(input, &encode_result, 0));
ASSERT_GE(encode_result.size(), 3);
EXPECT_EQ("abc", encode_result.Subcord(0, 3));
TENSORSTORE_ASSERT_OK(compressor.Decode(
encode_result.Subcord(3, encode_result.size() - 3), &decode_result, 0));
EXPECT_EQ("def" + std::string(input), decode_result);
}
TEST(XzCompressorTest, SmallRoundtripFragmented) {
XzCompressor compressor;
const absl::Cord input = absl::MakeFragmentedCord(
{"The quick", " brown fox", " jumped over", " ", "the lazy dog."});
absl::Cord encode_result("abc"), decode_result("def");
TENSORSTORE_ASSERT_OK(compressor.Encode(input, &encode_result, 0));
ASSERT_GE(encode_result.size(), 3);
EXPECT_EQ("abc", encode_result.Subcord(0, 3));
std::vector<std::string> encode_result_fragments;
for (size_t i = 3; i < encode_result.size(); ++i) {
encode_result_fragments.push_back(std::string(encode_result.Subcord(i, 1)));
}
TENSORSTORE_ASSERT_OK(compressor.Decode(
absl::MakeFragmentedCord(encode_result_fragments), &decode_result, 0));
EXPECT_EQ("def" + std::string(input), decode_result);
}
TEST(XzCompressorTest, LargeRoundtrip) {
std::string input(100000, '\0');
unsigned char x = 0;
for (auto& v : input) {
v = x;
x += 7;
}
XzCompressor compressor;
absl::Cord encode_result, decode_result;
TENSORSTORE_ASSERT_OK(
compressor.Encode(absl::Cord(input), &encode_result, 0));
TENSORSTORE_ASSERT_OK(compressor.Decode(encode_result, &decode_result, 0));
EXPECT_EQ(input, decode_result);
}
TEST(XzCompressorTest, NonDefaultLevel) {
XzCompressor compressor;
XzCompressor compressor2;
compressor2.level = 9;
const absl::Cord input("The quick brown fox jumped over the lazy dog.");
absl::Cord encode_result1, encode_result2;
TENSORSTORE_ASSERT_OK(compressor.Encode(input, &encode_result1, 0));
TENSORSTORE_ASSERT_OK(compressor2.Encode(input, &encode_result2, 0));
EXPECT_NE(encode_result1, encode_result2);
absl::Cord decode_result;
TENSORSTORE_ASSERT_OK(compressor.Decode(encode_result2, &decode_result, 0));
EXPECT_EQ(input, decode_result);
}
TEST(XzCompressorTest, NonDefaultCheck) {
XzCompressor compressor;
XzCompressor compressor2;
compressor2.check = LZMA_CHECK_CRC32;
const absl::Cord input("The quick brown fox jumped over the lazy dog.");
absl::Cord encode_result1, encode_result2;
TENSORSTORE_ASSERT_OK(compressor.Encode(input, &encode_result1, 0));
TENSORSTORE_ASSERT_OK(compressor2.Encode(input, &encode_result2, 0));
EXPECT_NE(encode_result1, encode_result2);
absl::Cord decode_result;
TENSORSTORE_ASSERT_OK(compressor.Decode(encode_result2, &decode_result, 0));
EXPECT_EQ(input, decode_result);
}
TEST(XzCompressorTest, DecodeCorruptData) {
XzCompressor compressor;
const absl::Cord input("The quick brown fox jumped over the lazy dog.");
{
absl::Cord encode_result, decode_result;
TENSORSTORE_ASSERT_OK(compressor.Encode(input, &encode_result, 0));
ASSERT_GE(encode_result.size(), 1);
std::string corrupted(encode_result);
corrupted[0] = 0;
EXPECT_THAT(compressor.Decode(absl::Cord(corrupted), &decode_result, 0),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
{
absl::Cord encode_result, decode_result;
TENSORSTORE_ASSERT_OK(compressor.Encode(input, &encode_result, 0));
ASSERT_GE(encode_result.size(), 1);
EXPECT_THAT(
compressor.Decode(encode_result.Subcord(0, encode_result.size() - 1),
&decode_result, 0),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
}
} |
582 | cpp | google/tensorstore | chunk_cache | tensorstore/driver/zarr3/chunk_cache.cc | tensorstore/internal/cache/chunk_cache_test.cc | #ifndef TENSORSTORE_INTERNAL_CACHE_CHUNK_CACHE_H_
#define TENSORSTORE_INTERNAL_CACHE_CHUNK_CACHE_H_
#include <stddef.h>
#include <atomic>
#include <memory>
#include <string_view>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/data_type.h"
#include "tensorstore/driver/chunk.h"
#include "tensorstore/driver/read_request.h"
#include "tensorstore/driver/write_request.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/internal/async_write_array.h"
#include "tensorstore/internal/cache/async_cache.h"
#include "tensorstore/internal/cache/cache.h"
#include "tensorstore/internal/chunk_grid_specification.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/staleness_bound.h"
#include "tensorstore/transaction.h"
#include "tensorstore/util/execution/any_receiver.h"
#include "tensorstore/util/execution/sender.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal {
class ChunkCache : public AsyncCache {
public:
using ReadData = SharedArray<const void>;
static SharedArrayView<const void> GetReadComponent(
const ChunkCache::ReadData* components, size_t component_index) {
if (!components) return {};
return components[component_index];
}
class Entry : public AsyncCache::Entry {
public:
using OwningCache = ChunkCache;
span<const Index> cell_indices() {
return {reinterpret_cast<const Index*>(key().data()),
static_cast<ptrdiff_t>(key().size() / sizeof(Index))};
}
span<const ChunkGridSpecification::Component> component_specs() {
return GetOwningCache(*this).grid().components;
}
Future<const void> Delete(internal::OpenTransactionPtr transaction);
size_t ComputeReadDataSizeInBytes(const void* read_data) override;
};
class TransactionNode : public AsyncCache::TransactionNode {
public:
using OwningCache = ChunkCache;
explicit TransactionNode(Entry& entry);
using Component = AsyncWriteArray;
span<Component> components() { return components_; }
absl::Status Delete();
size_t ComputeWriteStateSizeInBytes() override;
span<const ChunkGridSpecification::Component> component_specs() {
return GetOwningCache(*this).grid().components;
}
bool IsUnconditional() const {
return unconditional_.load(std::memory_order_relaxed);
}
void SetUnconditional() {
unconditional_.store(true, std::memory_order_relaxed);
}
virtual absl::Status OnModified();
void DoApply(ApplyOptions options, ApplyReceiver receiver) override;
void InvalidateReadState() override;
virtual absl::Status RequireRepeatableRead(
const StorageGeneration& generation) {
return absl::OkStatus();
}
private:
friend class ChunkCache;
absl::InlinedVector<Component, 1> components_;
std::atomic<bool> unconditional_{false};
public:
bool is_modified{false};
};
class WritebackSnapshot {
public:
explicit WritebackSnapshot(TransactionNode& node,
AsyncCache::ReadView<ReadData> read_state);
bool equals_fill_value() const { return !new_read_data_; }
const std::shared_ptr<ReadData>& new_read_data() const {
return new_read_data_;
}
std::shared_ptr<ReadData>& new_read_data() { return new_read_data_; }
private:
std::shared_ptr<ReadData> new_read_data_;
};
virtual const ChunkGridSpecification& grid() const = 0;
virtual const Executor& executor() const = 0;
struct ReadRequest : public internal::DriverReadRequest {
size_t component_index;
absl::Time staleness_bound;
};
virtual void Read(
ReadRequest request,
AnyFlowReceiver<absl::Status, ReadChunk, IndexTransform<>> receiver);
struct WriteRequest : public internal::DriverWriteRequest {
size_t component_index;
};
virtual void Write(
WriteRequest request,
AnyFlowReceiver<absl::Status, WriteChunk, IndexTransform<>> receiver);
Future<const void> DeleteCell(span<const Index> grid_cell_indices,
internal::OpenTransactionPtr transaction);
};
class ConcreteChunkCache : public ChunkCache {
public:
explicit ConcreteChunkCache(ChunkGridSpecification grid, Executor executor)
: grid_(std::move(grid)), executor_(std::move(executor)) {}
const ChunkGridSpecification& grid() const override { return grid_; }
const Executor& executor() const override { return executor_; }
private:
internal::ChunkGridSpecification grid_;
Executor executor_;
};
}
}
#endif
#include "tensorstore/internal/cache/chunk_cache.h"
#include <stddef.h>
#include <algorithm>
#include <atomic>
#include <cassert>
#include <memory>
#include <mutex>
#include <numeric>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/box.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/driver/chunk.h"
#include "tensorstore/driver/chunk_receiver_utils.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/transformed_array.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/async_write_array.h"
#include "tensorstore/internal/cache/async_cache.h"
#include "tensorstore/internal/cache/cache.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/grid_partition.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/memory.h"
#include "tensorstore/internal/metrics/counter.h"
#include "tensorstore/internal/mutex.h"
#include "tensorstore/internal/nditerable.h"
#include "tensorstore/rank.h"
#include "tensorstore/read_write_options.h"
#include "tensorstore/staleness_bound.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/transaction.h"
#include "tensorstore/util/element_pointer.h"
#include "tensorstore/util/execution/any_receiver.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/execution/sender.h"
#include "tensorstore/util/extents.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace internal {
auto& num_writes = internal_metrics::Counter<int64_t>::New(
"/tensorstore/cache/chunk_cache/writes", "Number of writes to ChunkCache.");
auto& num_reads = internal_metrics::Counter<int64_t>::New(
"/tensorstore/cache/chunk_cache/reads", "Number of reads from ChunkCache.");
namespace {
bool IsFullyOverwritten(ChunkCache::TransactionNode& node) {
auto& entry = GetOwningEntry(node);
const auto& grid = GetOwningCache(entry).grid();
const auto& component_specs = grid.components;
const span<const Index> cell_indices = entry.cell_indices();
for (size_t component_index = 0, num_components = component_specs.size();
component_index != num_components; ++component_index) {
if (!node.components()[component_index].write_state.IsFullyOverwritten(
component_specs[component_index].array_spec,
grid.GetCellDomain(component_index, cell_indices))) {
return false;
}
}
return true;
}
struct ReadChunkImpl {
size_t component_index;
PinnedCacheEntry<ChunkCache> entry;
absl::Status operator()(internal::LockCollection& lock_collection) const {
return absl::OkStatus();
}
Result<NDIterable::Ptr> operator()(ReadChunk::BeginRead,
IndexTransform<> chunk_transform,
Arena* arena) const {
auto& grid = GetOwningCache(*entry).grid();
auto domain = grid.GetCellDomain(component_index, entry->cell_indices());
SharedArray<const void, dynamic_rank(kMaxRank)> read_array{
ChunkCache::GetReadComponent(
AsyncCache::ReadLock<ChunkCache::ReadData>(*entry).data(),
component_index)};
return grid.components[component_index].array_spec.GetReadNDIterable(
std::move(read_array), domain, std::move(chunk_transform), arena);
}
};
struct ReadChunkTransactionImpl {
size_t component_index;
OpenTransactionNodePtr<ChunkCache::TransactionNode> node;
absl::Status operator()(internal::LockCollection& lock_collection) const {
constexpr auto lock_chunk = [](void* data, bool lock)
ABSL_NO_THREAD_SAFETY_ANALYSIS -> bool {
auto& node = *static_cast<ChunkCache::TransactionNode*>(data);
if (lock) {
node.WriterLock();
} else {
node.WriterUnlock();
}
return true;
};
lock_collection.Register(node.get(), +lock_chunk, true);
return absl::OkStatus();
}
Result<NDIterable::Ptr> operator()(ReadChunk::BeginRead,
IndexTransform<> chunk_transform,
Arena* arena) const {
auto& entry = GetOwningEntry(*node);
auto& grid = GetOwningCache(entry).grid();
const auto& component_spec = grid.components[component_index];
auto& component = node->components()[component_index];
auto domain = grid.GetCellDomain(component_index, entry.cell_indices());
SharedArray<const void, dynamic_rank(kMaxRank)> read_array;
StorageGeneration read_generation;
{
AsyncCache::ReadLock<ChunkCache::ReadData> read_lock(*node);
read_array =
ChunkCache::GetReadComponent(read_lock.data(), component_index);
read_generation = read_lock.stamp().generation;
if (!node->IsUnconditional() &&
(node->transaction()->mode() & repeatable_read)) {
TENSORSTORE_RETURN_IF_ERROR(
node->RequireRepeatableRead(read_generation));
}
}
return component.GetReadNDIterable(component_spec.array_spec, domain,
std::move(read_array), read_generation,
std::move(chunk_transform), arena);
}
};
struct WriteChunkImpl {
size_t component_index;
OpenTransactionNodePtr<ChunkCache::TransactionNode> node;
absl::Status operator()(internal::LockCollection& lock_collection) {
constexpr auto lock_chunk = [](void* data, bool lock)
ABSL_NO_THREAD_SAFETY_ANALYSIS -> bool {
auto& node = *static_cast<ChunkCache::TransactionNode*>(data);
if (lock) {
return node.try_lock();
} else {
node.WriterUnlock();
return true;
}
};
if (node->IsRevoked()) {
OpenTransactionPtr transaction(node->transaction());
TENSORSTORE_ASSIGN_OR_RETURN(
node, GetTransactionNode(GetOwningEntry(*node), transaction));
}
lock_collection.Register(node.get(), +lock_chunk, false);
return absl::OkStatus();
}
Result<NDIterable::Ptr> operator()(WriteChunk::BeginWrite,
IndexTransform<> chunk_transform,
Arena* arena) const {
auto& entry = GetOwningEntry(*node);
auto& grid = GetOwningCache(entry).grid();
const auto& component_spec = grid.components[component_index];
auto domain = grid.GetCellDomain(component_index, entry.cell_indices());
node->MarkSizeUpdated();
return node->components()[component_index].BeginWrite(
component_spec.array_spec, domain, std::move(chunk_transform), arena);
}
WriteChunk::EndWriteResult operator()(WriteChunk::EndWrite,
IndexTransformView<> chunk_transform,
bool success, Arena* arena) const {
auto& entry = GetOwningEntry(*node);
auto& grid = GetOwningCache(entry).grid();
const auto& component_spec = grid.components[component_index];
auto domain = grid.GetCellDomain(component_index, entry.cell_indices());
node->components()[component_index].EndWrite(
component_spec.array_spec, domain, chunk_transform, success, arena);
node->is_modified = true;
if (IsFullyOverwritten(*node)) {
node->SetUnconditional();
}
return {node->OnModified(), node->transaction()->future()};
}
bool operator()(WriteChunk::WriteArray, IndexTransformView<> chunk_transform,
WriteChunk::GetWriteSourceArrayFunction get_source_array,
Arena* arena,
WriteChunk::EndWriteResult& end_write_result) const {
auto& entry = GetOwningEntry(*node);
auto& grid = GetOwningCache(entry).grid();
const auto& component_spec = grid.components[component_index];
auto domain = grid.GetCellDomain(component_index, entry.cell_indices());
using WriteArraySourceCapabilities =
AsyncWriteArray::WriteArraySourceCapabilities;
auto status = node->components()[component_index].WriteArray(
component_spec.array_spec, domain, chunk_transform,
[&]() -> Result<std::pair<TransformedSharedArray<const void>,
WriteArraySourceCapabilities>> {
TENSORSTORE_ASSIGN_OR_RETURN(auto info, get_source_array());
auto source_restriction = std::get<1>(info);
WriteArraySourceCapabilities source_capabilities;
switch (source_restriction) {
case cannot_reference_source_data:
source_capabilities = WriteArraySourceCapabilities::kCannotRetain;
break;
case can_reference_source_data_indefinitely:
source_capabilities = WriteArraySourceCapabilities::
kImmutableAndCanRetainIndefinitely;
break;
}
return {std::in_place, std::move(std::get<0>(info)),
source_capabilities};
});
if (!status.ok()) {
if (absl::IsCancelled(status)) return false;
end_write_result = {status};
return true;
}
node->is_modified = true;
node->SetUnconditional();
end_write_result = {node->OnModified(), node->transaction()->future()};
return true;
}
};
}
void ChunkCache::Read(
ReadRequest request,
AnyFlowReceiver<absl::Status, ReadChunk, IndexTransform<>> receiver) {
assert(request.component_index >= 0 &&
request.component_index < grid().components.size());
const auto& component_spec = grid().components[request.component_index];
using ReadOperationState = ChunkOperationState<ReadChunk>;
auto state = MakeIntrusivePtr<ReadOperationState>(std::move(receiver));
auto status = PartitionIndexTransformOverRegularGrid(
component_spec.chunked_to_cell_dimensions, grid().chunk_shape,
request.transform,
[&](span<const Index> grid_cell_indices,
IndexTransformView<> cell_transform) {
if (state->cancelled()) {
return absl::CancelledError("");
}
num_reads.Increment();
TENSORSTORE_ASSIGN_OR_RETURN(
auto cell_to_source,
ComposeTransforms(request.transform, cell_transform));
auto entry = GetEntryForGridCell(*this, grid_cell_indices);
ReadChunk chunk;
chunk.transform = std::move(cell_to_source);
Future<const void> read_future;
const auto get_cache_read_request = [&] {
AsyncCache::AsyncCacheReadRequest cache_request;
cache_request.staleness_bound = request.staleness_bound;
cache_request.batch = request.batch;
return cache_request;
};
if (request.transaction) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto node, GetTransactionNode(*entry, request.transaction));
read_future = node->IsUnconditional()
? MakeReadyFuture()
: node->Read(get_cache_read_request());
chunk.impl = ReadChunkTransactionImpl{request.component_index,
std::move(node)};
} else {
read_future = entry->Read(get_cache_read_request());
chunk.impl = ReadChunkImpl{request.component_index, std::move(entry)};
}
LinkValue(
[state, chunk = std::move(chunk),
cell_transform = IndexTransform<>(cell_transform)](
Promise<void> promise, ReadyFuture<const void> future) mutable {
execution::set_value(state->shared_receiver->receiver,
std::move(chunk), std::move(cell_transform));
},
state->promise, std::move(read_future));
return absl::OkStatus();
});
if (!status.ok()) {
state->SetError(std::move(status));
}
}
void ChunkCache::Write(
WriteRequest request,
AnyFlowReceiver<absl::Status, WriteChunk, IndexTransform<>> receiver) {
assert(request.component_index >= 0 &&
request.component_index < grid().components.size());
const auto& component_spec = grid().components[request.component_index];
std::atomic<bool> cancelled{false};
execution::set_starting(receiver, [&cancelled] { cancelled = true; });
absl::Status status = PartitionIndexTransformOverRegularGrid(
component_spec.chunked_to_cell_dimensions, grid().chunk_shape,
request.transform,
[&](span<const Index> grid_cell_indices,
IndexTransformView<> cell_transform) {
if (cancelled) return absl::CancelledError("");
num_writes.Increment();
TENSORSTORE_ASSIGN_OR_RETURN(
auto cell_to_dest,
ComposeTransforms(request.transform, cell_transform));
auto entry = GetEntryForGridCell(*this, grid_cell_indices);
auto transaction_copy = request.transaction;
TENSORSTORE_ASSIGN_OR_RETURN(
auto node, GetTransactionNode(*entry, transaction_copy));
execution::set_value(
receiver,
WriteChunk{WriteChunkImpl{request.component_index, std::move(node)},
std::move(cell_to_dest)},
IndexTransform<>(cell_transform));
return absl::OkStatus();
});
if (!status.ok()) {
execution::set_error(receiver, status);
} else {
execution::set_done(receiver);
}
execution::set_stopping(receiver);
}
Future<const void> ChunkCache::DeleteCell(
span<const Index> grid_cell_indices,
internal::OpenTransactionPtr transaction) {
return GetEntryForGridCell(*this, grid_cell_indices)->Delete(transaction);
}
absl::Status ChunkCache::TransactionNode::Delete() {
Un | #include "tensorstore/internal/cache/chunk_cache.h"
#include <stddef.h>
#include <stdint.h>
#include <algorithm>
#include <array>
#include <cmath>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/inlined_vector.h"
#include "absl/log/absl_check.h"
#include "absl/status/status.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_split.h"
#include "absl/time/time.h"
#include "riegeli/bytes/cord_reader.h"
#include "riegeli/bytes/cord_writer.h"
#include "tensorstore/array.h"
#include "tensorstore/box.h"
#include "tensorstore/context.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/driver/chunk.h"
#include "tensorstore/driver/chunk_cache_driver.h"
#include "tensorstore/driver/driver.h"
#include "tensorstore/driver/driver_handle.h"
#include "tensorstore/driver/driver_testutil.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/transformed_array.h"
#include "tensorstore/internal/async_write_array.h"
#include "tensorstore/internal/cache/async_cache.h"
#include "tensorstore/internal/cache/cache.h"
#include "tensorstore/internal/cache/kvs_backed_cache.h"
#include "tensorstore/internal/chunk_grid_specification.h"
#include "tensorstore/internal/element_copy_function.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/global_initializer.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/memory.h"
#include "tensorstore/internal/queue_testutil.h"
#include "tensorstore/internal/riegeli/array_endian_codec.h"
#include "tensorstore/internal/thread/thread_pool.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/memory/memory_key_value_store.h"
#include "tensorstore/kvstore/mock_kvstore.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/open_mode.h"
#include "tensorstore/progress.h"
#include "tensorstore/read_write_options.h"
#include "tensorstore/staleness_bound.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/tensorstore.h"
#include "tensorstore/transaction.h"
#include "tensorstore/util/constant_vector.h"
#include "tensorstore/util/endian.h"
#include "tensorstore/util/execution/any_receiver.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/executor.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/garbage_collection/garbage_collection.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
namespace kvstore = tensorstore::kvstore;
using ::tensorstore::ArrayView;
using ::tensorstore::Box;
using ::tensorstore::BoxView;
using ::tensorstore::DimensionIndex;
using ::tensorstore::Executor;
using ::tensorstore::Future;
using ::tensorstore::Index;
using ::tensorstore::IndexTransform;
using ::tensorstore::MakeArray;
using ::tensorstore::MakeCopy;
using ::tensorstore::MatchesStatus;
using ::tensorstore::no_transaction;
using ::tensorstore::ReadProgressFunction;
using ::tensorstore::Result;
using ::tensorstore::SharedArray;
using ::tensorstore::span;
using ::tensorstore::StalenessBound;
using ::tensorstore::StorageGeneration;
using ::tensorstore::TensorStore;
using ::tensorstore::Transaction;
using ::tensorstore::WriteProgressFunction;
using ::tensorstore::internal::AsyncCache;
using ::tensorstore::internal::AsyncWriteArray;
using ::tensorstore::internal::CachePool;
using ::tensorstore::internal::CachePtr;
using ::tensorstore::internal::ChunkCache;
using ::tensorstore::internal::ChunkGridSpecification;
using ::tensorstore::internal::ConcreteChunkCache;
using ::tensorstore::internal::ElementCopyFunction;
using ::tensorstore::internal::GetCache;
using ::tensorstore::internal::GetEntryForGridCell;
using ::tensorstore::internal::MakeReadWritePtr;
using ::tensorstore::internal::MockKeyValueStore;
using ::tensorstore::internal::PinnedCacheEntry;
using ::tensorstore::internal::ReadWritePtr;
using ::tensorstore::internal::SimpleElementwiseFunction;
using ::testing::ElementsAre;
Result<std::shared_ptr<const ChunkCache::ReadData>> DecodeRaw(
const ChunkGridSpecification& grid, const absl::Cord* value) {
const auto& component_specs = grid.components;
std::shared_ptr<ChunkCache::ReadData> read_data;
if (value) {
read_data = tensorstore::internal::make_shared_for_overwrite<
ChunkCache::ReadData[]>(component_specs.size());
riegeli::CordReader<const absl::Cord*> reader{value};
for (size_t component_i = 0; component_i < component_specs.size();
++component_i) {
const auto& spec = component_specs[component_i];
TENSORSTORE_ASSIGN_OR_RETURN(
read_data.get()[component_i],
tensorstore::internal::DecodeArrayEndian(
reader, spec.dtype(), spec.shape(), tensorstore::endian::native,
tensorstore::c_order));
}
if (!reader.VerifyEndAndClose()) return reader.status();
}
return std::static_pointer_cast<ChunkCache::ReadData>(std::move(read_data));
}
template <typename ComponentArrays = std::vector<SharedArray<const void>>>
absl::Cord EncodeRaw(const ChunkGridSpecification& grid,
const ComponentArrays& component_arrays) {
absl::Cord value;
riegeli::CordWriter<absl::Cord*> writer{&value};
const auto& component_specs = grid.components;
for (size_t component_i = 0; component_i < component_specs.size();
++component_i) {
const auto& spec = component_specs[component_i];
auto& array = component_arrays[component_i];
ABSL_CHECK(tensorstore::internal::RangesEqual(array.shape(), spec.shape()));
ABSL_CHECK(array.dtype() == spec.dtype());
ABSL_CHECK(tensorstore::internal::EncodeArrayEndian(
array, tensorstore::endian::native, tensorstore::c_order, writer));
}
ABSL_CHECK(writer.Close());
return value;
}
std::string EncodeKey(span<const Index> indices) {
return absl::StrJoin(indices, ",");
}
class TestCache
: public tensorstore::internal::KvsBackedCache<TestCache,
ConcreteChunkCache> {
using Base =
tensorstore::internal::KvsBackedCache<TestCache, ConcreteChunkCache>;
public:
using Base::Base;
class Entry : public Base::Entry {
public:
using OwningCache = TestCache;
void DoDecode(std::optional<absl::Cord> value,
DecodeReceiver receiver) override {
GetOwningCache(*this).executor()([this, value = std::move(value),
receiver =
std::move(receiver)]() mutable {
TENSORSTORE_ASSIGN_OR_RETURN(
auto read_data,
DecodeRaw(GetOwningCache(*this).grid(), value ? &*value : nullptr),
tensorstore::execution::set_error(receiver, _));
tensorstore::execution::set_value(receiver, std::move(read_data));
});
}
void DoEncode(std::shared_ptr<const ReadData> data,
EncodeReceiver receiver) override {
std::optional<absl::Cord> encoded;
if (data) {
encoded = EncodeRaw(GetOwningCache(*this).grid(), data.get());
}
tensorstore::execution::set_value(receiver, std::move(encoded));
}
std::string GetKeyValueStoreKey() override {
return EncodeKey(this->cell_indices());
}
};
Entry* DoAllocateEntry() final { return new Entry; }
size_t DoGetSizeofEntry() final { return sizeof(Entry); }
TransactionNode* DoAllocateTransactionNode(AsyncCache::Entry& entry) final {
return new TransactionNode(static_cast<Entry&>(entry));
}
};
class TestDriver : public tensorstore::internal::ChunkCacheDriver {
public:
using ::tensorstore::internal::ChunkCacheDriver::ChunkCacheDriver;
void GarbageCollectionVisit(
tensorstore::garbage_collection::GarbageCollectionVisitor& visitor)
const final {
}
};
template <typename T>
ElementCopyFunction GetCopyFunction() {
[[maybe_unused]] const auto copy_func =
[](const T* source, T* dest, absl::Status* status) { *dest = *source; };
return SimpleElementwiseFunction<decltype(copy_func), const T, T>();
}
TEST(ChunkGridSpecificationTest, Basic) {
ChunkGridSpecification grid({ChunkGridSpecification::Component{
AsyncWriteArray::Spec{SharedArray<const void>(MakeArray<int>({1, 2})),
Box<>(1)},
{2}}});
EXPECT_EQ(1, grid.components[0].rank());
EXPECT_EQ(1, grid.components[0].chunked_to_cell_dimensions.size());
EXPECT_EQ(1, grid.chunk_shape.size());
absl::InlinedVector<Index, 1> origin;
origin.resize(grid.components[0].rank());
grid.GetComponentOrigin(0, span<const Index>({0}), origin);
EXPECT_THAT(origin, testing::ElementsAre(0));
grid.GetComponentOrigin(0, span<const Index>({1}), origin);
EXPECT_THAT(origin, testing::ElementsAre(2));
}
TEST(ChunkGridSpecificationTest, MoreComplicated) {
std::vector<Index> shape = {1, 2, 3, 4};
SharedArray<const void> fill_value(
tensorstore::internal::AllocateAndConstructSharedElements(
1, tensorstore::value_init, tensorstore::dtype_v<int>),
tensorstore::StridedLayout<>(
shape, tensorstore::GetConstantVector<Index, 0, 4>()));
ChunkGridSpecification grid({ChunkGridSpecification::Component{
AsyncWriteArray::Spec{fill_value, Box<>(shape)},
shape,
{3, 2, 1}}});
EXPECT_EQ(3, grid.chunk_shape.size());
EXPECT_THAT(grid.chunk_shape, testing::ElementsAre(4, 3, 2));
EXPECT_EQ(4, grid.components[0].array_spec.overall_fill_value.rank());
EXPECT_EQ(4, grid.components[0].rank());
EXPECT_EQ(3, grid.components[0].chunked_to_cell_dimensions.size());
EXPECT_THAT(grid.components[0].chunked_to_cell_dimensions,
testing::ElementsAre(3, 2, 1));
absl::InlinedVector<Index, 4> origin;
origin.resize(grid.components[0].rank());
grid.GetComponentOrigin(0, span<const Index>({0, 0, 0}), origin);
EXPECT_THAT(origin, testing::ElementsAre(0, 0, 0, 0));
grid.GetComponentOrigin(0, span<const Index>({1, 1, 1}), origin);
EXPECT_THAT(origin, testing::ElementsAre(0, 2, 3, 4));
grid.GetComponentOrigin(0, span<const Index>({3, 2, 1}), origin);
EXPECT_THAT(origin, testing::ElementsAre(0, 2, 6, 12));
}
std::vector<Index> ParseKey(std::string_view key) {
std::vector<Index> result;
for (auto s : absl::StrSplit(key, ',')) {
Index i = 0;
ABSL_CHECK(absl::SimpleAtoi(s, &i));
result.push_back(i);
}
return result;
}
ReadWritePtr<TestDriver> MakeDriver(CachePtr<ChunkCache> cache,
size_t component_index = 0,
StalenessBound data_staleness = {}) {
return MakeReadWritePtr<TestDriver>(
tensorstore::ReadWriteMode::read_write,
TestDriver::Initializer{std::move(cache), component_index,
data_staleness});
}
class ChunkCacheTest : public ::testing::Test {
public:
Executor thread_pool = tensorstore::internal::DetachedThreadPool(1);
std::optional<ChunkGridSpecification> grid;
kvstore::DriverPtr memory_store = tensorstore::GetMemoryKeyValueStore();
MockKeyValueStore::MockPtr mock_store = MockKeyValueStore::Make();
std::vector<ChunkCache::ReadData> GetChunk(
const std::vector<Index>& indices) {
auto read_result = memory_store->Read(EncodeKey(indices)).value();
const size_t num_components = grid->components.size();
std::vector<ChunkCache::ReadData> components(num_components);
if (auto read_data =
DecodeRaw(*grid,
read_result.has_value() ? &read_result.value : nullptr)
.value()) {
for (size_t i = 0; i < num_components; ++i) {
components[i] = read_data.get()[i];
}
}
return components;
}
bool HasChunk(const std::vector<Index>& indices) {
auto read_result = memory_store->Read(EncodeKey(indices)).value();
return read_result.has_value();
}
void SetChunk(
const std::vector<Index>& indices,
std::vector<tensorstore::SharedArrayView<const void>> components) {
TENSORSTORE_CHECK_OK(
memory_store->Write(EncodeKey(indices), EncodeRaw(*grid, components)));
}
CachePtr<ChunkCache> MakeChunkCache(
std::string_view cache_identifier = {},
std::optional<CachePool::StrongPtr> pool = {}) {
if (!pool) {
pool = CachePool::Make(CachePool::Limits{10000000});
}
return GetCache<TestCache>(pool->get(), cache_identifier, [&] {
return std::make_unique<TestCache>(mock_store, *grid, thread_pool);
});
}
TensorStore<> GetTensorStore(CachePtr<ChunkCache> cache = {},
StalenessBound data_staleness = {},
size_t component_index = 0,
Transaction transaction = no_transaction) {
if (!cache) cache = MakeChunkCache();
return tensorstore::internal::TensorStoreAccess::Construct<TensorStore<>>(
tensorstore::internal::Driver::Handle{
MakeDriver(cache, component_index, data_staleness),
tensorstore::IdentityTransform(
grid->components[component_index].rank()),
transaction});
}
};
template <typename T>
tensorstore::SharedOffsetArray<T> MakeSequentialArray(BoxView<> domain) {
auto array = tensorstore::AllocateArray<T>(domain);
T value = T{};
IterateOverArrays(
[&](T* ptr) {
*ptr = value;
++value;
},
tensorstore::c_order, array);
return array;
}
ChunkGridSpecification GetSimple1DGrid() {
return ChunkGridSpecification({ChunkGridSpecification::Component{
AsyncWriteArray::Spec{MakeSequentialArray<int>(BoxView<>{{0}, {10}}),
Box<>(1)},
{2}}});
}
TEST_F(ChunkCacheTest, ReadSingleComponentOneDimensionalFill) {
grid = GetSimple1DGrid();
auto cache = MakeChunkCache();
{
auto read_future =
tensorstore::Read(GetTensorStore(cache, absl::InfinitePast()) |
tensorstore::Dims(0).TranslateSizedInterval(3, 3));
{
auto r = mock_store->read_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(1));
EXPECT_EQ(StorageGeneration::Unknown(),
r.options.generation_conditions.if_not_equal);
r(memory_store);
}
{
auto r = mock_store->read_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(2));
EXPECT_EQ(StorageGeneration::Unknown(),
r.options.generation_conditions.if_not_equal);
r(memory_store);
}
EXPECT_THAT(read_future.result(),
::testing::Optional(tensorstore::MakeArray({3, 4, 5})));
}
{
auto read_future =
tensorstore::Read(GetTensorStore(cache, absl::InfinitePast()) |
tensorstore::Dims(0).TranslateSizedInterval(3, 3));
EXPECT_THAT(read_future.result(),
::testing::Optional(tensorstore::MakeArray({3, 4, 5})));
}
{
auto read_future =
tensorstore::Read(GetTensorStore(cache, absl::InfiniteFuture()) |
tensorstore::Dims(0).TranslateSizedInterval(3, 3));
{
auto r = mock_store->read_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(1));
EXPECT_EQ(StorageGeneration::NoValue(),
r.options.generation_conditions.if_not_equal);
r(memory_store);
}
{
auto r = mock_store->read_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(2));
EXPECT_EQ(StorageGeneration::NoValue(),
r.options.generation_conditions.if_not_equal);
r(memory_store);
}
EXPECT_THAT(read_future.result(),
::testing::Optional(tensorstore::MakeArray({3, 4, 5})));
}
}
TEST_F(ChunkCacheTest, CancelRead) {
grid = GetSimple1DGrid();
auto cache = MakeChunkCache();
mock_store->forward_to = memory_store;
{
auto read_future =
tensorstore::Read(GetTensorStore(cache, absl::InfinitePast()) |
tensorstore::Dims(0).TranslateSizedInterval(3, 3));
}
}
struct CancelWriteReceiver {
friend void set_starting(CancelWriteReceiver& receiver,
tensorstore::AnyCancelReceiver cancel) {
receiver.cancel = std::move(cancel);
}
friend void set_value(CancelWriteReceiver& receiver,
tensorstore::internal::WriteChunk chunk,
tensorstore::IndexTransform<> cell_transform) {
EXPECT_FALSE(receiver.set_value_called);
receiver.set_value_called = true;
EXPECT_EQ(tensorstore::IndexTransformBuilder<>(1, 1)
.input_origin({0})
.input_shape({1})
.output_single_input_dimension(0, 3, 1, 0)
.Finalize()
.value(),
chunk.transform);
EXPECT_EQ(tensorstore::IndexTransformBuilder<>(1, 1)
.input_origin({0})
.input_shape({1})
.output_single_input_dimension(0, 0)
.Finalize()
.value(),
cell_transform);
receiver.cancel();
}
friend void set_done(CancelWriteReceiver& receiver) {}
friend void set_error(CancelWriteReceiver& receiver, absl::Status status) {}
friend void set_stopping(CancelWriteReceiver& receiver) {
receiver.cancel = nullptr;
}
bool set_value_called = false;
tensorstore::AnyCancelReceiver cancel;
};
TEST_F(ChunkCacheTest, CancelWrite) {
grid = GetSimple1DGrid();
CancelWriteReceiver receiver;
auto cache = MakeChunkCache();
cache->Write(
ChunkCache::WriteRequest{
{{},
(tensorstore::IdentityTransform(1) |
tensorstore::Dims(0).TranslateSizedInterval(3, 3))
.value()},
0},
std::ref(receiver));
EXPECT_TRUE(receiver.set_value_called);
}
TEST_F(ChunkCacheTest, DriverDataType) {
grid = ChunkGridSpecification({
ChunkGridSpecification::Component{
AsyncWriteArray::Spec{SharedArray<const void>(MakeArray<int>({1, 2})),
Box<>(1)},
{2}},
ChunkGridSpecification::Component{
AsyncWriteArray::Spec{
SharedArray<const void>(MakeArray<float>({{1, 2}, {3, 4}})),
Box<>(2)},
{2, 2},
{1}},
});
auto cache = MakeChunkCache();
EXPECT_EQ(tensorstore::dtype_v<int>, MakeDriver(cache, 0)->dtype());
EXPECT_EQ(tensorstore::dtype_v<float>, MakeDriver(cache, 1)->dtype());
}
TEST_F(ChunkCacheTest, ReadSingleComponentOneDimensionalExisting) {
grid = GetSimple1DGrid();
SetChunk({1}, {MakeArray<int>({42, 43})});
auto cache = MakeChunkCache();
{
auto read_future =
tensorstore::Read(GetTensorStore(cache, absl::InfinitePast()) |
tensorstore::Dims(0).TranslateSizedInterval(3, 3));
{
auto r = mock_store->read_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(1));
r(memory_store);
}
{
auto r = mock_store->read_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(2));
r(memory_store);
}
EXPECT_THAT(read_future.result(),
::testing::Optional(tensorstore::MakeArray({43, 4, 5})));
}
SetChunk({2}, {MakeArray<int>({44, 45})});
{
auto read_future =
tensorstore::Read(GetTensorStore(cache, absl::InfinitePast()) |
tensorstore::Dims(0).TranslateSizedInterval(3, 3));
EXPECT_THAT(read_future.result(),
::testing::Optional(tensorstore::MakeArray({43, 4, 5})));
}
{
auto read_future =
tensorstore::Read(GetTensorStore(cache, absl::InfiniteFuture()) |
tensorstore::Dims(0).TranslateSizedInterval(3, 3));
{
auto r = mock_store->read_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(1));
r(memory_store);
}
{
auto r = mock_store->read_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(2));
r(memory_store);
}
EXPECT_THAT(read_future.result(),
::testing::Optional(tensorstore::MakeArray({43, 44, 45})));
}
}
TEST_F(ChunkCacheTest, TwoDimensional) {
grid = ChunkGridSpecification({ChunkGridSpecification::Component{
AsyncWriteArray::Spec{
MakeSequentialArray<int>(BoxView<>({0, 0}, {10, 100})), Box<>(2)},
{2, 3},
{1, 0}}});
auto cache = MakeChunkCache();
auto read_future = tensorstore::Read(
GetTensorStore(cache, absl::InfinitePast()) |
tensorstore::Dims(0, 1).TranslateSizedInterval({1, 5}, {6, 5}));
for (auto cell_indices : std::vector<std::vector<Index>>{{1, 0},
{1, 1},
{1, 2},
{1, 3},
{2, 0},
{2, 1},
{2, 2},
{2, 3},
{3, 0},
{3, 1},
{3, 2},
{3, 3}}) {
auto r = mock_store->read_requests.pop();
EXPECT_THAT(ParseKey(r.key), ::testing::ElementsAreArray(cell_indices));
r(memory_store);
}
EXPECT_THAT(read_future.result(), ::testing::Optional(MakeArray<int>({
{105, 106, 107, 108, 109},
{205, 206, 207, 208, 209},
{305, 306, 307, 308, 309},
{405, 406, 407, 408, 409},
{505, 506, 507, 508, 509},
{605, 606, 607, 608, 609},
})));
}
TEST_F(ChunkCacheTest, ReadRequestErrorBasic) {
grid = GetSimple1DGrid();
auto cache = MakeChunkCache();
{
auto read_future =
tensorstore::Read(GetTensorStore(cache, absl::InfinitePast()) |
tensorstore::Dims(0).TranslateSizedInterval(3, 3));
{
auto r = mock_store->read_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(1));
r(memory_store);
}
{
auto r = mock_store->read_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(2));
r.promise.SetResult(absl::UnknownError("Test read error"));
}
EXPECT_THAT(read_future.result(),
MatchesStatus(absl::StatusCode::kUnknown,
"Error reading .*: Test read error"));
}
{
auto read_future =
tensorstore::Read(GetTensorStore(cache, absl::InfinitePast()) |
tensorstore::Dims(0).TranslateSizedInterval(3, 3));
{
auto r = mock_store->read_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(2));
r.promise.SetResult(absl::UnknownError("Test read error 2"));
}
EXPECT_THAT(read_future.result(),
MatchesStatus(absl::StatusCode::kUnknown,
"Error reading .*: Test read error 2"));
}
{
auto read_future =
tensorstore::Read(GetTensorStore(cache, absl::InfiniteFuture()) |
tensorstore::Dims(0).TranslateSizedInterval(3, 3));
{
auto r = mock_store->read_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(1));
r(memory_store);
}
{
auto r = mock_store->read_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(2));
r(memory_store);
}
EXPECT_THAT(read_future.result(),
::testing::Optional(tensorstore::MakeArray({3, 4, 5})));
}
}
TEST_F(ChunkCacheTest, WriteSingleComponentOneDimensional) {
grid = GetSimple1DGrid();
auto cache = MakeChunkCache();
{
auto read_future =
tensorstore::Read(GetTensorStore(cache, absl::InfinitePast()) |
tensorstore::Dims(0).TranslateSizedInterval(6, 2));
{
auto r = mock_store->read_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(3));
r(memory_store);
}
EXPECT_THAT(read_future.result(),
::testing::Optional(tensorstore::MakeArray({6, 7})));
}
auto write_future =
tensorstore::Write(MakeArray<int>({13, 14, 15, 16}),
GetTensorStore(cache) |
tensorstore::Dims(0).TranslateSizedInterval(3, 4));
write_future.Force();
{
auto r = mock_store->read_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(1));
EXPECT_EQ(StorageGeneration::Unknown(),
r.options.generation_conditions.if_equal);
r(memory_store);
}
std::vector<std::pair<std::vector<Index>, StorageGeneration>> write_requests;
for (size_t i = 0; i < 3; ++i) {
auto r = mock_store->write_requests.pop();
write_requests.emplace_back(ParseKey(r.key),
r.options.generation_conditions.if_equal);
r(memory_store);
}
EXPECT_THAT(
write_requests,
::testing::UnorderedElementsAre(
::testing::Pair(ElementsAre(2), StorageGeneration::Unknown()),
::testing::Pair(ElementsAre(3), StorageGeneration::NoValue()),
::testing::Pair(ElementsAre(1), StorageGeneration::NoValue())));
EXPECT_THAT(GetChunk({1}), ElementsAre(MakeArray<int>({2, 13})));
EXPECT_THAT(GetChunk({2}), ElementsAre(MakeArray<int>({14, 15})));
EXPECT_THAT(GetChunk({3}), ElementsAre(MakeArray<int>({16, 7})));
TENSORSTORE_EXPECT_OK(write_future);
}
TEST_F(ChunkCacheTest, WriteSingleComponentOneDimensionalCacheDisabled) {
grid = GetSimple1DGrid();
auto cache = MakeChunkCache("", CachePool::StrongPtr{});
{
auto read_future =
tensorstore::Read(GetTensorStore(cache, absl::InfinitePast()) |
tensorstore::Dims(0).TranslateSizedInterval(6, 2));
{
auto r = mock_store->read_requests.pop();
EXPECT_THAT(ParseKey(r.key), ElementsAre(3));
r(memory_store);
}
EXPECT_THAT(read_future.result(),
::testing::Optional(tensorstore::MakeArray({6, 7})));
}
auto write_future =
tensorstore::Write(MakeArray<int>({13, 14, 15, 16}),
GetTensorStore(cache) |
tensorstore::Dims(0).TranslateSizedInterval(3, 4));
write_future.Force();
{
std::vector<std::vector<Index>> read_requests;
for (size_t i = 0; i < 2; ++i) {
auto r = mock_store->read_requests.pop();
read_requests.emplace_back(ParseKey(r.key));
EXPECT_EQ(StorageGeneration::Unknown(),
r.options.generation_conditions.if_equal);
r(memory_store);
}
EXPECT_THAT(read_requests, ::testing::UnorderedElementsAre(ElementsAre(1),
ElementsAre(3)));
}
{
std::vector<std::pair<std::vector<Index>, StorageGeneration>>
write_requests;
for (size_t i = 0; i < 3; ++i) {
auto r = mock_store->write_requests.pop();
write_requests.emplace_back(ParseKey(r.key),
r.options.generation_conditions.if_equal);
r(memory_store);
}
EXPECT_THAT(
write_requests,
::testing::UnorderedElementsAre(
::testing::Pair(ElementsAre(2), StorageGeneration::Unknown()), |
583 | cpp | google/tensorstore | blosc | tensorstore/driver/zarr3/codec/blosc.cc | tensorstore/driver/zarr3/codec/blosc_test.cc | #ifndef TENSORSTORE_INTERNAL_COMPRESSION_BLOSC_H_
#define TENSORSTORE_INTERNAL_COMPRESSION_BLOSC_H_
#include <cstddef>
#include <string>
#include <string_view>
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace blosc {
struct Options {
const char* compressor;
int clevel;
int shuffle;
size_t blocksize;
size_t element_size;
};
Result<std::string> Encode(std::string_view input, const Options& options);
Result<std::string> Decode(std::string_view input);
}
}
#endif
#include "tensorstore/internal/compression/blosc.h"
#include <cstddef>
#include <string>
#include <string_view>
#include "absl/status/status.h"
#include <blosc.h>
#include "tensorstore/util/result.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace blosc {
Result<std::string> Encode(std::string_view input, const Options& options) {
if (input.size() > BLOSC_MAX_BUFFERSIZE) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Blosc compression input of ", input.size(),
" bytes exceeds maximum size of ", BLOSC_MAX_BUFFERSIZE));
}
std::string output(input.size() + BLOSC_MAX_OVERHEAD, '\0');
int shuffle = options.shuffle;
if (shuffle == -1) {
shuffle = options.element_size == 1 ? BLOSC_BITSHUFFLE : BLOSC_SHUFFLE;
}
const int n = blosc_compress_ctx(
options.clevel, shuffle, options.element_size, input.size(), input.data(),
output.data(), output.size(), options.compressor, options.blocksize,
1);
if (n < 0) {
return absl::InternalError(
tensorstore::StrCat("Internal blosc error: ", n));
}
output.erase(n);
return output;
}
Result<std::string> Decode(std::string_view input) {
size_t nbytes;
if (blosc_cbuffer_validate(input.data(), input.size(), &nbytes) != 0) {
return absl::InvalidArgumentError("Invalid blosc-compressed data");
}
std::string output(nbytes, '\0');
if (nbytes > 0) {
const int n =
blosc_decompress_ctx(input.data(), output.data(), output.size(),
1);
if (n <= 0) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Blosc error: ", n));
}
}
return output;
}
}
} | #include "tensorstore/internal/compression/blosc.h"
#include <cstddef>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include <blosc.h>
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::MatchesStatus;
namespace blosc = tensorstore::blosc;
std::vector<blosc::Options> GetTestOptions() {
return {
blosc::Options{"lz4", 5, -1, 0},
blosc::Options{"lz4", 5, BLOSC_SHUFFLE, 0},
blosc::Options{"lz4", 0, BLOSC_SHUFFLE, 0},
blosc::Options{"lz4hc", 5, BLOSC_SHUFFLE, 0},
blosc::Options{"lz4", 5, BLOSC_SHUFFLE, 0},
blosc::Options{"lz4", 1, BLOSC_NOSHUFFLE, 0},
blosc::Options{"lz4", 5, BLOSC_SHUFFLE, 0},
blosc::Options{"lz4", 9, BLOSC_BITSHUFFLE, 0},
blosc::Options{"zlib", 1, BLOSC_NOSHUFFLE, 0},
blosc::Options{"zstd", 1, BLOSC_SHUFFLE, 0},
blosc::Options{"blosclz", 1, BLOSC_BITSHUFFLE, 0},
blosc::Options{"snappy", 1, BLOSC_NOSHUFFLE, 0},
blosc::Options{"lz4", 5, BLOSC_SHUFFLE, 0},
blosc::Options{"lz4", 5, BLOSC_SHUFFLE, 256},
blosc::Options{"lz4", 1, BLOSC_NOSHUFFLE, 256},
};
}
std::vector<std::string> GetTestArrays() {
std::vector<std::string> arrays;
arrays.emplace_back();
{
std::string arr(100, '\0');
unsigned char v = 0;
for (auto& x : arr) {
x = (v += 7);
}
arrays.push_back(std::move(arr));
}
arrays.push_back("The quick brown fox jumped over the lazy dog.");
return arrays;
}
TEST(BloscTest, EncodeDecode) {
for (blosc::Options options : GetTestOptions()) {
for (const auto& array : GetTestArrays()) {
for (const size_t element_size : {1, 2, 10}) {
options.element_size = element_size;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto encoded,
blosc::Encode(array, options));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto decoded, blosc::Decode(encoded));
EXPECT_EQ(array, decoded);
}
}
}
}
TEST(BloscTest, CheckComplib) {
const std::string_view array =
"The quick brown fox jumped over the lazy dog.";
const std::vector<std::pair<std::string, std::string>>
cnames_and_complib_names{{BLOSC_BLOSCLZ_COMPNAME, BLOSC_BLOSCLZ_LIBNAME},
{BLOSC_LZ4_COMPNAME, BLOSC_LZ4_LIBNAME},
{BLOSC_LZ4HC_COMPNAME, BLOSC_LZ4_LIBNAME},
{BLOSC_SNAPPY_COMPNAME, BLOSC_SNAPPY_LIBNAME},
{BLOSC_ZLIB_COMPNAME, BLOSC_ZLIB_LIBNAME},
{BLOSC_ZSTD_COMPNAME, BLOSC_ZSTD_LIBNAME}};
for (const auto& pair : cnames_and_complib_names) {
blosc::Options options{pair.first.c_str(), 5,
-1, 0,
1};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto encoded,
blosc::Encode(array, options));
ASSERT_GE(encoded.size(), BLOSC_MIN_HEADER_LENGTH);
const char* complib = blosc_cbuffer_complib(encoded.data());
EXPECT_EQ(pair.second, complib);
}
}
TEST(BloscTest, CheckShuffleAndElementSize) {
const std::string_view array =
"The quick brown fox jumped over the lazy dog.";
for (int shuffle = -1; shuffle <= 2; ++shuffle) {
for (const size_t element_size : {1, 2, 10}) {
blosc::Options options{"lz4", 5,
shuffle, 0,
element_size};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto encoded,
blosc::Encode(array, options));
ASSERT_GE(encoded.size(), BLOSC_MIN_HEADER_LENGTH);
size_t typesize;
int flags;
blosc_cbuffer_metainfo(encoded.data(), &typesize, &flags);
EXPECT_EQ(element_size, typesize);
const bool expected_byte_shuffle =
shuffle == 1 || (shuffle == -1 && element_size != 1);
const bool expected_bit_shuffle =
shuffle == 2 || (shuffle == -1 && element_size == 1);
EXPECT_EQ(expected_byte_shuffle,
static_cast<bool>(flags & BLOSC_DOSHUFFLE));
EXPECT_EQ(expected_bit_shuffle,
static_cast<bool>(flags & BLOSC_DOBITSHUFFLE));
}
}
}
TEST(BloscTest, CheckBlocksize) {
const std::string array(100000, '\0');
for (size_t blocksize : {256, 512, 1024}) {
blosc::Options options{"lz4", 0,
0, blocksize,
1};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto encoded,
blosc::Encode(array, options));
ASSERT_GE(encoded.size(), BLOSC_MIN_HEADER_LENGTH);
size_t nbytes, cbytes, bsize;
blosc_cbuffer_sizes(encoded.data(), &nbytes, &cbytes, &bsize);
EXPECT_EQ(blocksize, bsize);
}
}
TEST(BloscTest, TooLong) {
blosc::Options options{"lz4", 5,
-1, 0,
1};
EXPECT_THAT(
blosc::Encode(std::string(BLOSC_MAX_BUFFERSIZE + 1, '\0'), options),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(BloscTest, DecodeHeaderCorrupted) {
const std::string_view input =
"The quick brown fox jumped over the lazy dog.";
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto encoded,
blosc::Encode(input, blosc::Options{"lz4", 1,
-1, 0,
1}));
ASSERT_GE(encoded.size(), 1);
std::string corrupted = std::move(encoded);
corrupted[0] = 0;
EXPECT_THAT(blosc::Decode(corrupted),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(BloscCompressorTest, DecodeHeaderTruncated) {
const std::string_view input =
"The quick brown fox jumped over the lazy dog.";
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto encoded,
blosc::Encode(input, blosc::Options{"lz4", 1,
-1, 0,
1}));
ASSERT_GE(encoded.size(), 5);
EXPECT_THAT(blosc::Decode(std::string_view(encoded).substr(0, 5)),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(BloscCompressorTest, DecodeDataTruncated) {
const std::string_view input =
"The quick brown fox jumped over the lazy dog.";
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto encoded,
blosc::Encode(input, blosc::Options{"lz4", 1,
-1, 0,
1}));
EXPECT_THAT(blosc::Decode(
std::string_view(encoded).substr(0, BLOSC_MIN_HEADER_LENGTH)),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
} |
584 | cpp | google/tensorstore | codec_chain_spec | tensorstore/driver/zarr3/codec/codec_chain_spec.cc | tensorstore/driver/zarr3/codec/codec_chain_spec_test.cc | #ifndef TENSORSTORE_DRIVER_ZARR3_CODEC_CODEC_CHAIN_SPEC_H_
#define TENSORSTORE_DRIVER_ZARR3_CODEC_CODEC_CHAIN_SPEC_H_
#include <stddef.h>
#include <optional>
#include <string>
#include <type_traits>
#include <vector>
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "tensorstore/codec_spec.h"
#include "tensorstore/driver/zarr3/codec/codec.h"
#include "tensorstore/driver/zarr3/codec/codec_spec.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/cache_key/fwd.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/serialization/fwd.h"
#include "tensorstore/util/garbage_collection/fwd.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal_zarr3 {
struct ZarrCodecChainSpec {
std::vector<ZarrArrayToArrayCodecSpec::Ptr> array_to_array;
ZarrArrayToBytesCodecSpec::Ptr array_to_bytes;
std::vector<ZarrBytesToBytesCodecSpec::Ptr> bytes_to_bytes;
size_t sharding_height() const;
absl::Status MergeFrom(const ZarrCodecChainSpec& other, bool strict);
absl::Status GetDecodedChunkLayout(
const ArrayDataTypeAndShapeInfo& array_info,
ArrayCodecChunkLayoutInfo& decoded) const;
Result<internal::IntrusivePtr<const ZarrCodecChain>> Resolve(
ArrayCodecResolveParameters&& decoded,
BytesCodecResolveParameters& encoded,
ZarrCodecChainSpec* resolved_spec = nullptr) const;
using FromJsonOptions = ZarrCodecSpec::FromJsonOptions;
using ToJsonOptions = ZarrCodecSpec::ToJsonOptions;
TENSORSTORE_DECLARE_JSON_DEFAULT_BINDER(ZarrCodecChainSpec, FromJsonOptions,
ToJsonOptions);
};
class ZarrShardingCodecSpec : public ZarrArrayToBytesCodecSpec {
public:
bool SupportsInnerOrder(
const ArrayCodecResolveParameters& decoded,
span<DimensionIndex> preferred_inner_order) const override;
virtual absl::Status MergeSubChunkCodecsFrom(const ZarrCodecChainSpec& other,
bool strict) = 0;
virtual const ZarrCodecChainSpec* GetSubChunkCodecs() const = 0;
size_t sharding_height() const override;
};
template <bool Constraints>
constexpr auto ZarrCodecChainJsonBinder =
[](auto is_loading, const auto& orig_options, auto* obj, auto* j) {
using CodecOptions = std::conditional_t<decltype(is_loading)::value,
ZarrCodecSpec::FromJsonOptions,
ZarrCodecSpec::ToJsonOptions>;
CodecOptions codec_options;
codec_options.constraints = Constraints;
if constexpr (!is_loading) {
static_cast<IncludeDefaults&>(codec_options) = orig_options;
}
return ZarrCodecChainSpec::default_json_binder(is_loading, codec_options,
obj, j);
};
absl::Status MergeZarrCodecSpecs(
std::optional<ZarrCodecChainSpec>& target,
const std::optional<ZarrCodecChainSpec>& source, bool strict);
class TensorStoreCodecSpec : public internal::CodecDriverSpec {
public:
constexpr static char id[] = "zarr3";
CodecSpec Clone() const final;
absl::Status DoMergeFrom(const internal::CodecDriverSpec& other_base) final;
std::optional<ZarrCodecChainSpec> codecs;
TENSORSTORE_DECLARE_JSON_DEFAULT_BINDER(TensorStoreCodecSpec, FromJsonOptions,
ToJsonOptions,
::nlohmann::json::object_t)
};
}
namespace internal {
template <>
struct CacheKeyEncoder<internal_zarr3::ZarrCodecChainSpec> {
static void Encode(std::string* out,
const internal_zarr3::ZarrCodecChainSpec& value);
};
}
}
TENSORSTORE_DECLARE_SERIALIZER_SPECIALIZATION(
tensorstore::internal_zarr3::ZarrCodecChainSpec)
TENSORSTORE_DECLARE_GARBAGE_COLLECTION_NOT_REQUIRED(
tensorstore::internal_zarr3::ZarrCodecChainSpec)
#endif
#include "tensorstore/driver/zarr3/codec/codec_chain_spec.h"
#include <stddef.h>
#include <cassert>
#include <optional>
#include <string>
#include <string_view>
#include <type_traits>
#include <utility>
#include <vector>
#include "absl/container/fixed_array.h"
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include <nlohmann/json.hpp>
#include "tensorstore/codec_spec.h"
#include "tensorstore/codec_spec_registry.h"
#include "tensorstore/driver/zarr3/codec/bytes.h"
#include "tensorstore/driver/zarr3/codec/codec.h"
#include "tensorstore/driver/zarr3/codec/codec_spec.h"
#include "tensorstore/driver/zarr3/codec/registry.h"
#include "tensorstore/driver/zarr3/codec/transpose.h"
#include "tensorstore/driver/zarr3/name_configuration_json_binder.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/cache_key/cache_key.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/json_binding/std_array.h"
#include "tensorstore/internal/json_binding/std_optional.h"
#include "tensorstore/internal/unaligned_data_type_functions.h"
#include "tensorstore/rank.h"
#include "tensorstore/serialization/fwd.h"
#include "tensorstore/serialization/json_bindable.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_zarr3 {
namespace jb = ::tensorstore::internal_json_binding;
namespace {
struct ZarrCodecJsonBinderImpl {
static absl::Status FromJson(const ZarrCodecSpec::FromJsonOptions& options,
ZarrCodecSpec::Ptr* obj, ::nlohmann::json* j);
static absl::Status ToJson(const ZarrCodecSpec::ToJsonOptions& options,
const ZarrCodecSpec* const* obj,
::nlohmann::json* j);
absl::Status operator()(std::true_type is_loading,
const ZarrCodecSpec::FromJsonOptions& options,
ZarrCodecSpec::Ptr* obj, ::nlohmann::json* j) const {
return FromJson(options, obj, j);
}
template <typename T>
absl::Status operator()(std::false_type is_loading,
const ZarrCodecSpec::ToJsonOptions& options, T* obj,
::nlohmann::json* j) const {
static_assert(
std::is_convertible_v<decltype(&**obj), const ZarrCodecSpec*>);
const ZarrCodecSpec* ptr = &**obj;
return ToJson(options, &ptr, j);
}
};
constexpr inline ZarrCodecJsonBinderImpl ZarrCodecJsonBinder{};
constexpr auto ZarrCodecJsonBinderImplBase =
[](auto is_loading, const auto& options, auto* obj, auto* j) {
const auto& registry = GetCodecRegistry();
if constexpr (is_loading) {
if (options.constraints && j->is_string()) {
::nlohmann::json::object_t j_obj;
j_obj.emplace("name", std::move(*j));
*j = std::move(j_obj);
}
}
return jb::Object(NameConfigurationJsonBinder(
registry.KeyBinder(), registry.RegisteredObjectBinder()))
(is_loading, options, obj, j);
};
absl::Status ZarrCodecJsonBinderImpl::FromJson(
const ZarrCodecSpec::FromJsonOptions& options, ZarrCodecSpec::Ptr* obj,
::nlohmann::json* j) {
return ZarrCodecJsonBinderImplBase(std::true_type{}, options, obj, j);
}
absl::Status ZarrCodecJsonBinderImpl::ToJson(
const ZarrCodecSpec::ToJsonOptions& options,
const ZarrCodecSpec* const* obj, ::nlohmann::json* j) {
return ZarrCodecJsonBinderImplBase(std::false_type{}, options, obj, j);
}
constexpr auto ZarrCodecChainSpecJsonBinderImpl = jb::Compose<
std::vector<ZarrCodecSpec::Ptr>>(
[](auto is_loading, const auto& options, auto* obj, auto* j) {
if constexpr (is_loading) {
auto it = j->begin(), end = j->end();
for (; it != end && (*it)->kind() == ZarrCodecKind::kArrayToArray;
++it) {
obj->array_to_array.push_back(
internal::static_pointer_cast<const ZarrArrayToArrayCodecSpec>(
std::move(*it)));
}
if (it != end && (*it)->kind() == ZarrCodecKind::kArrayToBytes) {
obj->array_to_bytes =
internal::static_pointer_cast<const ZarrArrayToBytesCodecSpec>(
std::move(*it));
++it;
} else if (!options.constraints) {
return absl::InvalidArgumentError(
"array -> bytes codec must be specified");
}
for (; it != end; ++it) {
if ((*it)->kind() != ZarrCodecKind::kBytesToBytes) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Expected bytes -> bytes codec, but received: ",
jb::ToJson(*it, ZarrCodecJsonBinder).value().dump()));
}
obj->bytes_to_bytes.push_back(
internal::static_pointer_cast<const ZarrBytesToBytesCodecSpec>(
std::move(*it)));
}
} else {
j->insert(j->end(), obj->array_to_array.begin(),
obj->array_to_array.end());
if (obj->array_to_bytes) {
j->push_back(obj->array_to_bytes);
}
j->insert(j->end(), obj->bytes_to_bytes.begin(),
obj->bytes_to_bytes.end());
}
return absl::OkStatus();
},
jb::Array(ZarrCodecJsonBinder));
}
TENSORSTORE_DEFINE_JSON_DEFAULT_BINDER(ZarrCodecChainSpec,
ZarrCodecChainSpecJsonBinderImpl);
namespace {
Result<ZarrArrayToBytesCodecSpec::Ptr> GetDefaultArrayToBytesCodecSpec(
const ArrayCodecResolveParameters& decoded) {
if (internal::IsTrivialDataType(decoded.dtype)) {
return DefaultBytesCodec();
}
return absl::InternalError(tensorstore::StrCat(
"No default codec defined for data type ", decoded.dtype));
}
absl::Status CodecResolveError(const ZarrCodecSpec& codec_spec,
std::string_view message,
const absl::Status& status) {
return tensorstore::MaybeAnnotateStatus(
status, tensorstore::StrCat(
"Error ", message, " through ",
jb::ToJson(&codec_spec, ZarrCodecJsonBinder).value().dump()));
}
}
size_t ZarrCodecChainSpec::sharding_height() const {
return array_to_bytes ? array_to_bytes->sharding_height() : 0;
}
absl::Status ZarrCodecChainSpec::GetDecodedChunkLayout(
const ArrayDataTypeAndShapeInfo& array_info,
ArrayCodecChunkLayoutInfo& decoded) const {
absl::FixedArray<ArrayDataTypeAndShapeInfo, 2> array_infos(
array_to_array.size());
const ArrayDataTypeAndShapeInfo* decoded_array_info = &array_info;
for (size_t i = 0; i < array_to_array.size(); ++i) {
const auto& codec_spec = *array_to_array[i];
auto& encoded_array_info = array_infos[i];
TENSORSTORE_RETURN_IF_ERROR(
codec_spec.PropagateDataTypeAndShape(*decoded_array_info,
encoded_array_info),
CodecResolveError(codec_spec, "propagating data type and shape", _));
decoded_array_info = &encoded_array_info;
}
std::optional<ArrayCodecChunkLayoutInfo> temp_info[2];
const ArrayCodecChunkLayoutInfo* encoded_info;
if (array_to_bytes) {
auto& decoded_info = array_infos.empty() ? decoded : temp_info[0].emplace();
TENSORSTORE_RETURN_IF_ERROR(
array_to_bytes->GetDecodedChunkLayout(
array_infos.empty() ? array_info : array_infos.back(),
decoded_info),
CodecResolveError(*array_to_bytes, "propagating chunk layout", _));
encoded_info = &decoded_info;
} else if (!array_to_array.empty()) {
encoded_info = &temp_info[0].emplace();
}
for (size_t i = array_to_array.size(); i--;) {
auto& decoded_info =
i == 0 ? decoded : temp_info[(array_to_array.size() - i) % 2].emplace();
const auto& codec_spec = *array_to_array[i];
TENSORSTORE_RETURN_IF_ERROR(
codec_spec.GetDecodedChunkLayout(
array_infos[i], *encoded_info,
i == 0 ? array_info : array_infos[i - 1], decoded_info),
CodecResolveError(codec_spec, "propagating chunk layout", _));
encoded_info = &decoded_info;
}
return absl::OkStatus();
}
Result<internal::IntrusivePtr<const ZarrCodecChain>>
ZarrCodecChainSpec::Resolve(ArrayCodecResolveParameters&& decoded,
BytesCodecResolveParameters& encoded,
ZarrCodecChainSpec* resolved_spec) const {
auto chain = internal::MakeIntrusivePtr<ZarrCodecChain>();
std::optional<ArrayCodecResolveParameters> temp_array_resolve_params[2];
chain->array_to_array.reserve(array_to_array.size());
chain->bytes_to_bytes.reserve(bytes_to_bytes.size());
if (resolved_spec) {
assert(resolved_spec != this);
assert(resolved_spec->array_to_array.empty());
resolved_spec->array_to_array.reserve(array_to_array.size());
assert(!resolved_spec->array_to_bytes);
assert(resolved_spec->bytes_to_bytes.empty());
resolved_spec->bytes_to_bytes.reserve(bytes_to_bytes.size());
}
ArrayCodecResolveParameters* decoded_params = &decoded;
size_t temp_i = 0;
const auto resolve_array_to_array =
[&](const ZarrArrayToArrayCodecSpec& codec_spec) -> absl::Status {
auto& encoded_params = temp_array_resolve_params[(temp_i++) % 2].emplace();
TENSORSTORE_ASSIGN_OR_RETURN(
auto codec,
codec_spec.Resolve(std::move(*decoded_params), encoded_params,
resolved_spec
? &resolved_spec->array_to_array.emplace_back()
: nullptr),
CodecResolveError(codec_spec, "resolving codec spec", _));
chain->array_to_array.push_back(std::move(codec));
decoded_params = &encoded_params;
return absl::OkStatus();
};
for (size_t i = 0; i < array_to_array.size(); ++i) {
TENSORSTORE_RETURN_IF_ERROR(resolve_array_to_array(*array_to_array[i]));
}
std::optional<BytesCodecResolveParameters> temp_bytes_resolve_params[2];
auto* bytes_decoded_params = &temp_bytes_resolve_params[0].emplace();
ZarrArrayToBytesCodecSpec::Ptr temp_array_to_bytes_codec;
auto* array_to_bytes_codec_ptr = this->array_to_bytes.get();
if (!array_to_bytes_codec_ptr) {
TENSORSTORE_ASSIGN_OR_RETURN(
temp_array_to_bytes_codec,
GetDefaultArrayToBytesCodecSpec(*decoded_params));
array_to_bytes_codec_ptr = temp_array_to_bytes_codec.get();
}
DimensionIndex preferred_order[kMaxRank];
if (DimensionIndex rank = decoded_params->rank;
decoded_params->inner_order &&
!array_to_bytes_codec_ptr->SupportsInnerOrder(
*decoded_params, span<DimensionIndex>(&preferred_order[0], rank))) {
const auto& existing_inner_order = *decoded_params->inner_order;
std::vector<DimensionIndex> new_order(rank);
for (DimensionIndex i = 0; i < rank; ++i) {
new_order[preferred_order[i]] = existing_inner_order[i];
}
TENSORSTORE_RETURN_IF_ERROR(
resolve_array_to_array(*internal::MakeIntrusivePtr<TransposeCodecSpec>(
TransposeCodecSpec::Options{std::move(new_order)})));
}
TENSORSTORE_ASSIGN_OR_RETURN(
chain->array_to_bytes,
array_to_bytes_codec_ptr->Resolve(
std::move(*decoded_params), *bytes_decoded_params,
resolved_spec ? &resolved_spec->array_to_bytes : nullptr),
CodecResolveError(*array_to_bytes, "resolving codec spec", _));
if (chain->array_to_bytes->is_sharding_codec() && !bytes_to_bytes.empty()) {
return absl::InvalidArgumentError(absl::StrFormat(
"Sharding codec %s is not compatible with subsequent bytes -> "
"bytes codecs %s that apply to the entire shard. Instead, "
"bytes -> bytes codecs may be specified as inner codecs that apply "
"to each sub-chunk individually.",
jb::ToJson(array_to_bytes_codec_ptr, ZarrCodecJsonBinder)
.value()
.dump(),
jb::ToJson(bytes_to_bytes, jb::Array(ZarrCodecJsonBinder))
.value()
.dump()));
}
for (size_t i = 0; i < bytes_to_bytes.size(); ++i) {
auto& encoded_params = temp_bytes_resolve_params[(i + 1) % 2].emplace();
const auto& codec_spec = *bytes_to_bytes[i];
TENSORSTORE_ASSIGN_OR_RETURN(
auto codec,
codec_spec.Resolve(std::move(*bytes_decoded_params), encoded_params,
resolved_spec
? &resolved_spec->bytes_to_bytes.emplace_back()
: nullptr),
CodecResolveError(codec_spec, "resolving codec spec", _));
bytes_decoded_params = &encoded_params;
chain->bytes_to_bytes.push_back(std::move(codec));
}
encoded = std::move(*bytes_decoded_params);
return chain;
}
namespace {
template <typename T, typename Binder>
std::string MergeErrorMessage(const T& a, const T& b, const Binder& binder) {
return absl::StrFormat("Cannot merge zarr codec constraints %s and %s",
jb::ToJson(a, binder).value().dump(),
jb::ToJson(b, binder).value().dump());
}
std::string MergeErrorMessage(const ZarrCodecSpec& a, const ZarrCodecSpec& b) {
return MergeErrorMessage(ZarrCodecSpec::Ptr(&a), ZarrCodecSpec::Ptr(&b),
ZarrCodecJsonBinder);
}
template <typename T>
void EnsureMutableCodecSpec(internal::IntrusivePtr<const T>& ptr) {
static_assert(std::is_base_of_v<ZarrCodecSpec, T>);
assert(ptr);
if (ptr->use_count() > 1) {
ptr = internal::static_pointer_cast<const T>(ptr->Clone());
}
}
absl::Status MergeZarrCodecSpecs(ZarrCodecSpec::Ptr& target,
const ZarrCodecSpec* source, bool strict) {
if (!source) {
return absl::OkStatus();
}
if (!target) {
target.reset(source);
return absl::OkStatus();
}
absl::Status status;
const auto& target_ref = *target;
const auto& source_ref = *source;
if (typeid(target_ref) != typeid(source_ref)) {
status = absl::FailedPreconditionError("");
} else {
EnsureMutableCodecSpec(target);
status = const_cast<ZarrCodecSpec&>(*target).MergeFrom(*source, strict);
}
if (status.ok()) return absl::OkStatus();
return tensorstore::MaybeAnnotateStatus(status,
MergeErrorMessage(*target, *source));
}
template <typename T>
absl::Status MergeZarrCodecSpecs(typename T::Ptr& target, const T* source,
bool strict) {
static_assert(std::is_base_of_v<ZarrCodecSpec, T>);
ZarrCodecSpec::Ptr target_base = std::move(target);
auto status = MergeZarrCodecSpecs(target_base, source, strict);
target = internal::static_pointer_cast<const T>(std::move(target_base));
TENSORSTORE_RETURN_IF_ERROR(status);
return absl::OkStatus();
}
template <typename T>
absl::Status MergeZarrCodecSpecs(std::vector<T>& targets,
const std::vector<T>& sources, bool strict) {
constexpr bool kIsArrayToArray =
std::is_same_v<ZarrArrayToArrayCodecSpec::Ptr, T>;
size_t merge_count = targets.size();
bool size_mismatch = targets.size() != sources.size();
if constexpr (kIsArrayToArray) {
if (!strict) {
if (sources.size() == targets.size() + 1 &&
typeid(*sources.back()) == typeid(TransposeCodecSpec)) {
targets.push_back(sources.back());
size_mismatch = false;
} else if (sources.size() + 1 == targets.size() &&
typeid(*targets.back()) == typeid(TransposeCodecSpec)) {
--merge_count;
size_mismatch = false;
}
}
}
if (size_mismatch) {
return tensorstore::MaybeAnnotateStatus(
absl::FailedPreconditionError(absl::StrFormat(
"Mismatch in number of %s codecs (%d vs %d)",
kIsArrayToArray ? "array -> array" : "bytes -> bytes",
targets.size(), sources.size())),
MergeErrorMessage(targets, sources, jb::Array(ZarrCodecJsonBinder)));
}
for (size_t i = 0; i < merge_count; ++i) {
TENSORSTORE_RETURN_IF_ERROR(
MergeZarrCodecSpecs(targets[i], sources[i].get(), strict));
}
return absl::OkStatus();
}
}
absl::Status ZarrCodecChainSpec::MergeFrom(const ZarrCodecChainSpec& other,
bool strict) {
if (!strict) {
size_t self_sharding_height = sharding_height();
size_t other_sharding_height = other.sharding_height();
if (self_sharding_height > other_sharding_height &&
array_to_array.empty() && bytes_to_bytes.empty()) {
EnsureMutableCodecSpec(array_to_bytes);
return static_cast<ZarrShardingCodecSpec&>(
const_cast<ZarrArrayToBytesCodecSpec&>(*array_to_bytes))
.MergeSubChunkCodecsFrom(other, strict);
}
if (self_sharding_height < other_sharding_height &&
other.array_to_array.empty() && other.bytes_to_bytes.empty()) {
auto new_array_to_bytes_codec =
internal::static_pointer_cast<const ZarrShardingCodecSpec>(
other.array_to_bytes->Clone());
TENSORSTORE_RETURN_IF_ERROR(
const_cast<ZarrShardingCodecSpec&>(*new_array_to_bytes_codec)
.MergeSubChunkCodecsFrom(*this, strict));
array_to_array.clear();
bytes_to_bytes.clear();
array_to_bytes = std::move(new_array_to_bytes_codec);
return absl::OkStatus();
}
}
TENSORSTORE_RETURN_IF_ERROR(
MergeZarrCodecSpecs(array_to_array, other.array_to_array, strict));
TENSORSTORE_RETURN_IF_ERROR(
MergeZarrCodecSpecs(array_to_bytes, other.array_to_bytes.get(), strict));
TENSORSTORE_RETURN_IF_ERROR(
MergeZarrCodecSpecs(bytes_to_bytes, other.bytes_to_bytes, strict));
return absl::OkStatus();
}
absl::Status MergeZarrCodecSpecs(
std::optional<ZarrCodecChainSpec>& target,
const std::optional<ZarrCodecChainSpec>& source, bool strict) {
if (!target) {
if (source) {
target = *source;
}
return absl::OkStatus();
}
if (!source) {
return absl::OkStatus();
}
return target->MergeFrom(*source, strict);
}
bool ZarrShardingCodecSpec::SupportsInnerOrder(
const ArrayCodecResolveParameters& decoded,
span<DimensionIndex> preferred_inner_order) const {
return true;
}
size_t ZarrShardingCodecSpec::sharding_height() const {
auto* sub_chunk_codecs = this->GetSubChunkCodecs();
return 1 + (sub_chunk_codecs ? sub_chunk_codecs->sharding_height() : 0);
}
CodecSpec TensorStoreCodecSpec::Clone() const {
return internal::CodecDriverSpec::Make<TensorStoreCodecSpec>(*this);
}
absl::Status TensorStoreCodecSpec::DoMergeFrom(
const internal::CodecDriverSpec& other_base) {
if (typeid(other_base) != typeid(TensorStoreCodecSpec)) {
return absl::InvalidArgumentError("");
}
auto& other = static_cast<const TensorStoreCodecSpec&>(other_base);
return MergeZarrCodecSpecs(codecs, other.codecs, false);
}
TENSORSTORE_DEFINE_JSON_DEFAULT_BINDER(
TensorStoreCodecSpec,
jb::Sequence(
jb::Member("codecs",
jb::Projection<&TensorStoreCodecSpec::codecs>(jb::Optional(
ZarrCodecChainJsonBinder<true>)))
))
namespace {
const internal::CodecSpecRegistration<TensorStoreCodecSpec>
encoding_registration;
}
}
namespace internal {
void CacheKeyEncoder<internal_zarr3::ZarrCodecChainSpec>::Encode(
std::string* out, const internal_zarr3::ZarrCodecChainSpec& value) {
internal::EncodeCacheKey(out, value.ToJson().value().dump());
}
}
}
TENSORSTORE_DEFINE_SERIALIZER_SPECIALIZATION(
tensorstore::internal_zarr3::ZarrCodecChainSpec,
tensorstore::serialization::JsonBindableSerializer<
tensorstore::internal_zarr3::ZarrCodecChainSpec>()) | #include "tensorstore/driver/zarr3/codec/codec_chain_spec.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/codec_spec.h"
#include "tensorstore/driver/zarr3/codec/codec_test_util.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::CodecSpec;
using ::tensorstore::MatchesJson;
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal_zarr3::GetDefaultBytesCodecJson;
using ::tensorstore::internal_zarr3::TestCodecMerge;
using ::tensorstore::internal_zarr3::ZarrCodecChainSpec;
TEST(CodecMergeTest, Basic) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto a,
CodecSpec::FromJson({
{"driver", "zarr3"},
{"codecs",
{{
{"name", "sharding_indexed"},
{"configuration",
{
{"chunk_shape", {30, 40, 50}},
{"index_codecs",
{GetDefaultBytesCodecJson(), {{"name", "crc32c"}}}},
{"codecs",
{
{{"name", "transpose"},
{"configuration", {{"order", {2, 0, 1}}}}},
GetDefaultBytesCodecJson(),
{{"name", "gzip"}, {"configuration", {{"level", 6}}}},
}},
}},
}}},
}));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto b, CodecSpec::FromJson(
{{"driver", "zarr3"},
{"codecs",
{{{"name", "gzip"}, {"configuration", {{"level", 5}}}}}}}));
EXPECT_THAT(a.MergeFrom(b),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
".*: Incompatible \"level\": 6 vs 5"));
}
TEST(CodecChainSpecTest, MissingArrayToBytes) {
EXPECT_THAT(ZarrCodecChainSpec::FromJson(::nlohmann::json::array_t()),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"array -> bytes codec must be specified"));
}
TEST(CodecChainSpecTest, MergeCodecNameMismatch) {
EXPECT_THAT(
TestCodecMerge({"gzip"}, {"crc32c"}, true),
MatchesStatus(absl::StatusCode::kFailedPrecondition, "Cannot merge .*"));
}
TEST(CodecChainSpecTest, MergeArrayToBytes) {
EXPECT_THAT(
TestCodecMerge(
{{{"name", "bytes"}, {"configuration", {{"endian", "little"}}}}},
::nlohmann::json::array_t(), true),
::testing::Optional(MatchesJson(
{{{"name", "bytes"}, {"configuration", {{"endian", "little"}}}}})));
}
TEST(CodecChainSpecTest, ExtraTranspose) {
::nlohmann::json a = {
{{"name", "transpose"}, {"configuration", {{"order", {0, 2, 1}}}}},
{{"name", "bytes"}, {"configuration", {{"endian", "little"}}}},
};
::nlohmann::json b = {
{{"name", "bytes"}, {"configuration", {{"endian", "little"}}}},
};
EXPECT_THAT(TestCodecMerge(a, b, false),
::testing::Optional(MatchesJson(a)));
EXPECT_THAT(
TestCodecMerge(a, b, true),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
".*: Mismatch in number of array -> array codecs.*"));
}
TEST(CodecChainSpecTest, ExtraSharding) {
::nlohmann::json a = {{
{"name", "sharding_indexed"},
{"configuration",
{
{"chunk_shape", {30, 40, 50}},
{"index_codecs", {GetDefaultBytesCodecJson(), {{"name", "crc32c"}}}},
{"codecs",
{
{{"name", "transpose"},
{"configuration", {{"order", {2, 0, 1}}}}},
GetDefaultBytesCodecJson(),
{{"name", "gzip"}, {"configuration", {{"level", 6}}}},
}},
}},
}};
::nlohmann::json b = {
{{"name", "transpose"}, {"configuration", {{"order", {2, 0, 1}}}}},
GetDefaultBytesCodecJson(),
{{"name", "gzip"}, {"configuration", {{"level", 6}}}},
};
::nlohmann::json c = {
GetDefaultBytesCodecJson(),
{{"name", "gzip"}, {"configuration", {{"level", 6}}}},
};
EXPECT_THAT(TestCodecMerge(a, b, false),
::testing::Optional(MatchesJson(a)));
EXPECT_THAT(TestCodecMerge(a, c, false),
::testing::Optional(MatchesJson(a)));
EXPECT_THAT(
TestCodecMerge(a, b, true),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
".*: Mismatch in number of array -> array codecs.*"));
EXPECT_THAT(TestCodecMerge(a, c, true),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"Cannot merge zarr codec constraints .*"));
}
} |
585 | cpp | google/tensorstore | sharding_indexed | tensorstore/driver/zarr3/codec/sharding_indexed.cc | tensorstore/driver/zarr3/codec/sharding_indexed_test.cc | #ifndef TENSORSTORE_DRIVER_ZARR3_CODEC_SHARDING_INDEXED_H_
#define TENSORSTORE_DRIVER_ZARR3_CODEC_SHARDING_INDEXED_H_
#include <optional>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "tensorstore/driver/zarr3/codec/codec.h"
#include "tensorstore/driver/zarr3/codec/codec_chain_spec.h"
#include "tensorstore/driver/zarr3/codec/codec_spec.h"
#include "tensorstore/index.h"
#include "tensorstore/kvstore/zarr3_sharding_indexed/shard_format.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal_zarr3 {
using ::tensorstore::zarr3_sharding_indexed::ShardIndexLocation;
class ShardingIndexedCodecSpec : public ZarrShardingCodecSpec {
public:
struct Options {
std::optional<std::vector<Index>> sub_chunk_shape;
std::optional<ZarrCodecChainSpec> index_codecs;
std::optional<ZarrCodecChainSpec> sub_chunk_codecs;
std::optional<ShardIndexLocation> index_location;
};
ShardingIndexedCodecSpec() = default;
explicit ShardingIndexedCodecSpec(Options&& options)
: options(std::move(options)) {}
absl::Status MergeFrom(const ZarrCodecSpec& other, bool strict) override;
ZarrCodecSpec::Ptr Clone() const override;
absl::Status MergeSubChunkCodecsFrom(const ZarrCodecChainSpec& other,
bool strict) override;
const ZarrCodecChainSpec* GetSubChunkCodecs() const override;
absl::Status GetDecodedChunkLayout(
const ArrayDataTypeAndShapeInfo& array_info,
ArrayCodecChunkLayoutInfo& decoded) const override;
Result<ZarrArrayToBytesCodec::Ptr> Resolve(
ArrayCodecResolveParameters&& decoded,
BytesCodecResolveParameters& encoded,
ZarrArrayToBytesCodecSpec::Ptr* resolved_spec) const override;
Options options;
};
}
}
#endif
#include "tensorstore/driver/zarr3/codec/sharding_indexed.h"
#include <stdint.h>
#include <algorithm>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "riegeli/bytes/reader.h"
#include "riegeli/bytes/writer.h"
#include "tensorstore/array.h"
#include "tensorstore/box.h"
#include "tensorstore/data_type.h"
#include "tensorstore/driver/zarr3/codec/bytes.h"
#include "tensorstore/driver/zarr3/codec/codec.h"
#include "tensorstore/driver/zarr3/codec/codec_chain_spec.h"
#include "tensorstore/driver/zarr3/codec/codec_spec.h"
#include "tensorstore/driver/zarr3/codec/crc32c.h"
#include "tensorstore/driver/zarr3/codec/registry.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/internal/async_write_array.h"
#include "tensorstore/internal/cache/cache.h"
#include "tensorstore/internal/chunk_grid_specification.h"
#include "tensorstore/internal/global_initializer.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/json_binding/std_array.h"
#include "tensorstore/internal/lexicographical_grid_index_key.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/kvstore/zarr3_sharding_indexed/key.h"
#include "tensorstore/kvstore/zarr3_sharding_indexed/shard_format.h"
#include "tensorstore/kvstore/zarr3_sharding_indexed/zarr3_sharding_indexed.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/executor.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_zarr3 {
absl::Status SubChunkRankMismatch(span<const Index> sub_chunk_shape,
DimensionIndex outer_rank) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"sharding_indexed sub-chunk shape of ", sub_chunk_shape,
" is not compatible with array of rank ", outer_rank));
}
absl::Status SubChunkShapeMismatch(span<const Index> sub_chunk_shape,
span<const Index> chunk_shape) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"sharding_indexed sub-chunk shape of ", sub_chunk_shape,
" does not evenly divide chunk shape of ", chunk_shape));
}
namespace {
class ShardingIndexedCodec : public ZarrShardingCodec {
public:
explicit ShardingIndexedCodec(
internal::ChunkGridSpecification&& sub_chunk_grid)
: sub_chunk_grid_(std::move(sub_chunk_grid)) {}
class State : public ZarrShardingCodec::PreparedState,
public internal::LexicographicalGridIndexKeyParser {
public:
absl::Status EncodeArray(SharedArrayView<const void> decoded,
riegeli::Writer& writer) const final {
return absl::InternalError("");
}
Result<SharedArray<const void>> DecodeArray(
span<const Index> decoded_shape, riegeli::Reader& reader) const final {
return absl::InternalError("");
}
kvstore::DriverPtr GetSubChunkKvstore(
kvstore::DriverPtr parent, std::string parent_key,
const Executor& executor,
internal::CachePool::WeakPtr cache_pool) const override {
zarr3_sharding_indexed::ShardedKeyValueStoreParameters params;
params.base_kvstore = std::move(parent);
params.base_kvstore_path = std::move(parent_key);
params.executor = executor;
params.cache_pool = std::move(cache_pool);
params.index_params = shard_index_params_;
return zarr3_sharding_indexed::GetShardedKeyValueStore(std::move(params));
}
const LexicographicalGridIndexKeyParser& GetSubChunkStorageKeyParser()
const final {
return *this;
}
std::string FormatKey(span<const Index> grid_indices) const final {
return zarr3_sharding_indexed::IndicesToKey(grid_indices);
}
bool ParseKey(std::string_view key, span<Index> grid_indices) const final {
return zarr3_sharding_indexed::KeyToIndices(key, grid_indices);
}
Index MinGridIndexForLexicographicalOrder(
DimensionIndex dim, IndexInterval grid_interval) const final {
return 0;
}
internal::IntrusivePtr<const ZarrShardingCodec> parent_codec_;
std::vector<Index> sub_chunk_grid_shape_;
ZarrCodecChain::PreparedState::Ptr codec_state_;
zarr3_sharding_indexed::ShardIndexParameters shard_index_params_;
};
Result<ZarrArrayToBytesCodec::PreparedState::Ptr> Prepare(
span<const Index> decoded_shape) const final {
span<const Index> sub_chunk_shape = sub_chunk_grid_.components[0].shape();
if (decoded_shape.size() != sub_chunk_shape.size()) {
return SubChunkRankMismatch(sub_chunk_shape, decoded_shape.size());
}
auto state = internal::MakeIntrusivePtr<State>();
state->parent_codec_.reset(this);
auto& sub_chunk_grid_shape = state->sub_chunk_grid_shape_;
sub_chunk_grid_shape.resize(decoded_shape.size());
for (DimensionIndex i = 0; i < sub_chunk_shape.size(); ++i) {
if (decoded_shape[i] % sub_chunk_shape[i] != 0) {
return SubChunkShapeMismatch(sub_chunk_shape, decoded_shape);
}
const int64_t grid_size = decoded_shape[i] / sub_chunk_shape[i];
sub_chunk_grid_shape[i] = grid_size;
}
TENSORSTORE_ASSIGN_OR_RETURN(
state->codec_state_, sub_chunk_codec_chain_->Prepare(sub_chunk_shape));
state->sub_chunk_grid = &sub_chunk_grid_;
state->sub_chunk_codec_chain = sub_chunk_codec_chain_.get();
state->sub_chunk_codec_state = state->codec_state_.get();
state->shard_index_params_.index_location = index_location_;
TENSORSTORE_RETURN_IF_ERROR(state->shard_index_params_.Initialize(
*index_codec_chain_, sub_chunk_grid_shape));
return {std::in_place, std::move(state)};
}
internal::ChunkGridSpecification sub_chunk_grid_;
ZarrCodecChain::Ptr sub_chunk_codec_chain_;
ZarrCodecChain::Ptr index_codec_chain_;
ShardIndexLocation index_location_;
};
}
absl::Status ShardingIndexedCodecSpec::MergeFrom(const ZarrCodecSpec& other,
bool strict) {
using Self = ShardingIndexedCodecSpec;
const auto& other_options = static_cast<const Self&>(other).options;
TENSORSTORE_RETURN_IF_ERROR(MergeConstraint<&Options::sub_chunk_shape>(
"chunk_shape", options, other_options));
TENSORSTORE_RETURN_IF_ERROR(
internal_zarr3::MergeZarrCodecSpecs(options.index_codecs,
other_options.index_codecs, strict),
tensorstore::MaybeAnnotateStatus(_, "Incompatible \"index_codecs\""));
TENSORSTORE_RETURN_IF_ERROR(
internal_zarr3::MergeZarrCodecSpecs(
options.sub_chunk_codecs, other_options.sub_chunk_codecs, strict),
tensorstore::MaybeAnnotateStatus(_, "Incompatible sub-chunk \"codecs\""));
TENSORSTORE_RETURN_IF_ERROR(MergeConstraint<&Options::index_location>(
"index_location", options, other_options));
return absl::OkStatus();
}
absl::Status ShardingIndexedCodecSpec::MergeSubChunkCodecsFrom(
const ZarrCodecChainSpec& other, bool strict) {
if (!options.sub_chunk_codecs) {
options.sub_chunk_codecs = other;
return absl::OkStatus();
}
return options.sub_chunk_codecs->MergeFrom(other, strict);
}
ZarrCodecSpec::Ptr ShardingIndexedCodecSpec::Clone() const {
return internal::MakeIntrusivePtr<ShardingIndexedCodecSpec>(*this);
}
const ZarrCodecChainSpec* ShardingIndexedCodecSpec::GetSubChunkCodecs() const {
return options.sub_chunk_codecs ? &*options.sub_chunk_codecs : nullptr;
}
absl::Status ShardingIndexedCodecSpec::GetDecodedChunkLayout(
const ArrayDataTypeAndShapeInfo& array_info,
ArrayCodecChunkLayoutInfo& decoded) const {
ArrayDataTypeAndShapeInfo sub_chunk_info;
if (options.sub_chunk_shape &&
!RankConstraint::Implies(options.sub_chunk_shape->size(),
array_info.rank)) {
return SubChunkRankMismatch(*options.sub_chunk_shape, array_info.rank);
}
sub_chunk_info.dtype = array_info.dtype;
sub_chunk_info.rank = array_info.rank;
if (options.sub_chunk_shape) {
std::copy(options.sub_chunk_shape->begin(), options.sub_chunk_shape->end(),
sub_chunk_info.shape.emplace().begin());
}
if (options.sub_chunk_codecs) {
TENSORSTORE_RETURN_IF_ERROR(options.sub_chunk_codecs->GetDecodedChunkLayout(
sub_chunk_info, decoded));
}
return absl::OkStatus();
}
namespace {
ZarrCodecChainSpec DefaultIndexCodecChainSpec() {
ZarrCodecChainSpec codecs;
codecs.array_to_bytes = DefaultBytesCodec();
codecs.bytes_to_bytes.push_back(
internal::MakeIntrusivePtr<const Crc32cCodecSpec>());
return codecs;
}
}
Result<ZarrArrayToBytesCodec::Ptr> ShardingIndexedCodecSpec::Resolve(
ArrayCodecResolveParameters&& decoded, BytesCodecResolveParameters& encoded,
ZarrArrayToBytesCodecSpec::Ptr* resolved_spec) const {
ShardingIndexedCodecSpec::Options* resolved_options = nullptr;
if (resolved_spec) {
auto* resolved_spec_ptr = new ShardingIndexedCodecSpec;
resolved_options = &resolved_spec_ptr->options;
resolved_spec->reset(resolved_spec_ptr);
}
span<const Index> sub_chunk_shape;
if (options.sub_chunk_shape) {
sub_chunk_shape = *options.sub_chunk_shape;
} else if (decoded.read_chunk_shape) {
sub_chunk_shape =
span<const Index>(decoded.read_chunk_shape->data(), decoded.rank);
} else {
return absl::InvalidArgumentError("\"chunk_shape\" must be specified");
}
if (sub_chunk_shape.size() != decoded.rank) {
return SubChunkRankMismatch(sub_chunk_shape, decoded.rank);
}
internal::ChunkGridSpecification::ComponentList components;
TENSORSTORE_ASSIGN_OR_RETURN(
auto broadcast_fill_value,
BroadcastArray(decoded.fill_value, BoxView<>(sub_chunk_shape.size())));
components.emplace_back(
internal::AsyncWriteArray::Spec{std::move(broadcast_fill_value),
Box<>(sub_chunk_shape.size())},
std::vector<Index>(sub_chunk_shape.begin(), sub_chunk_shape.end()));
components.back().array_spec.fill_value_comparison_kind =
EqualityComparisonKind::identical;
auto codec = internal::MakeIntrusivePtr<ShardingIndexedCodec>(
internal::ChunkGridSpecification(std::move(components)));
codec->index_location_ =
options.index_location.value_or(ShardIndexLocation::kEnd);
if (resolved_options) {
resolved_options->sub_chunk_shape = codec->sub_chunk_grid_.chunk_shape;
resolved_options->index_location = codec->index_location_;
}
auto set_up_codecs =
[&](const ZarrCodecChainSpec& sub_chunk_codecs) -> absl::Status {
ArrayCodecResolveParameters sub_chunk_decoded;
sub_chunk_decoded.dtype = decoded.dtype;
sub_chunk_decoded.rank = decoded.rank;
sub_chunk_decoded.fill_value = std::move(decoded.fill_value);
if (decoded.read_chunk_shape) {
std::copy_n(decoded.read_chunk_shape->begin(), decoded.rank,
sub_chunk_decoded.read_chunk_shape.emplace().begin());
}
if (decoded.codec_chunk_shape) {
std::copy_n(decoded.codec_chunk_shape->begin(), decoded.rank,
sub_chunk_decoded.codec_chunk_shape.emplace().begin());
}
if (decoded.inner_order) {
std::copy_n(decoded.inner_order->begin(), decoded.rank,
sub_chunk_decoded.inner_order.emplace().begin());
}
TENSORSTORE_ASSIGN_OR_RETURN(
codec->sub_chunk_codec_chain_,
sub_chunk_codecs.Resolve(
std::move(sub_chunk_decoded), encoded,
resolved_options ? &resolved_options->sub_chunk_codecs.emplace()
: nullptr));
return absl::OkStatus();
};
TENSORSTORE_RETURN_IF_ERROR(
set_up_codecs(options.sub_chunk_codecs ? *options.sub_chunk_codecs
: ZarrCodecChainSpec{}),
tensorstore::MaybeAnnotateStatus(_, "Error resolving sub-chunk codecs"));
auto set_up_index_codecs =
[&](const ZarrCodecChainSpec& index_codecs) -> absl::Status {
TENSORSTORE_ASSIGN_OR_RETURN(
codec->index_codec_chain_,
zarr3_sharding_indexed::InitializeIndexCodecChain(
index_codecs, sub_chunk_shape.size(),
resolved_options ? &resolved_options->index_codecs.emplace()
: nullptr));
return absl::OkStatus();
};
TENSORSTORE_RETURN_IF_ERROR(
set_up_index_codecs(options.index_codecs ? *options.index_codecs
: DefaultIndexCodecChainSpec()),
tensorstore::MaybeAnnotateStatus(_, "Error resolving index_codecs"));
return {std::in_place, std::move(codec)};
}
TENSORSTORE_GLOBAL_INITIALIZER {
using Self = ShardingIndexedCodecSpec;
using Options = Self::Options;
namespace jb = ::tensorstore::internal_json_binding;
RegisterCodec<Self>(
"sharding_indexed",
jb::Projection<&Self::options>(jb::Sequence(
jb::Member("chunk_shape", jb::Projection<&Options::sub_chunk_shape>(
OptionalIfConstraintsBinder(
jb::Array(jb::Integer<Index>(1))))),
jb::Member("index_codecs", jb::Projection<&Options::index_codecs>(
OptionalIfConstraintsBinder())),
jb::Member("codecs", jb::Projection<&Options::sub_chunk_codecs>(
OptionalIfConstraintsBinder())),
jb::Member(
"index_location",
jb::Projection<&Options::index_location>(
[](auto is_loading, const auto& options, auto* obj, auto* j) {
if constexpr (!is_loading) {
if (!options.constraints &&
*obj == ShardIndexLocation::kEnd) {
return absl::OkStatus();
}
}
return jb::Validate([](const auto& options, auto* obj) {
if (!options.constraints) {
if (!obj->has_value()) *obj = ShardIndexLocation::kEnd;
}
return absl::OkStatus();
})(is_loading, options, obj, j);
}))
)));
}
}
} | #include <stdint.h>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/data_type.h"
#include "tensorstore/driver/zarr3/codec/codec_chain_spec.h"
#include "tensorstore/driver/zarr3/codec/codec_spec.h"
#include "tensorstore/driver/zarr3/codec/codec_test_util.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::MatchesJson;
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal_zarr3::ArrayCodecResolveParameters;
using ::tensorstore::internal_zarr3::BytesCodecResolveParameters;
using ::tensorstore::internal_zarr3::CodecSpecRoundTripTestParams;
using ::tensorstore::internal_zarr3::TestCodecSpecRoundTrip;
using ::tensorstore::internal_zarr3::ZarrCodecChainSpec;
TEST(ShardingIndexedTest, Basic) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto codec, ZarrCodecChainSpec::FromJson(
{{{"name", "sharding_indexed"},
{"configuration",
{
{"chunk_shape", {2, 3}},
{"codecs",
{{
{"name", "bytes"},
{"configuration", {{"endian", "little"}}},
}}},
{"index_codecs",
{
{
{"name", "bytes"},
{"configuration", {{"endian", "little"}}},
},
{
{"name", "crc32c"},
},
}},
}}}}));
}
TEST(ShardingIndexedTest, InvalidBytesToBytes) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto spec, ZarrCodecChainSpec::FromJson({
{{"name", "sharding_indexed"},
{"configuration",
{
{"chunk_shape", {2, 3}},
{"codecs",
{{
{"name", "bytes"},
{"configuration", {{"endian", "little"}}},
}}},
{"index_codecs",
{
{
{"name", "bytes"},
{"configuration", {{"endian", "little"}}},
},
{
{"name", "crc32c"},
},
}},
}}},
{
{"name", "gzip"},
{"configuration", {{"level", 5}}},
},
}));
ArrayCodecResolveParameters decoded_params;
decoded_params.dtype = tensorstore::dtype_v<uint32_t>;
decoded_params.rank = 2;
decoded_params.fill_value = tensorstore::MakeScalarArray<uint32_t>(42);
BytesCodecResolveParameters encoded_params;
EXPECT_THAT(
spec.Resolve(std::move(decoded_params), encoded_params, nullptr),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Sharding codec .* is not compatible with subsequent bytes "
"-> bytes .*"));
}
TEST(ShardingIndexedTest, DefaultIndexLocation) {
CodecSpecRoundTripTestParams p;
p.resolve_params.rank = 2;
p.orig_spec = {
{{"name", "sharding_indexed"},
{"configuration",
{
{"chunk_shape", {2, 3}},
{"codecs",
{{
{"name", "bytes"},
{"configuration", {{"endian", "little"}}},
}}},
{"index_codecs",
{
{
{"name", "bytes"},
{"configuration", {{"endian", "little"}}},
},
{
{"name", "crc32c"},
},
}},
}}},
};
p.expected_spec = {
{{"name", "sharding_indexed"},
{"configuration",
{
{"chunk_shape", {2, 3}},
{"codecs",
{{
{"name", "bytes"},
{"configuration", {{"endian", "little"}}},
}}},
{"index_location", "end"},
{"index_codecs",
{
{
{"name", "bytes"},
{"configuration", {{"endian", "little"}}},
},
{
{"name", "crc32c"},
},
}},
}}},
};
p.to_json_options.constraints = true;
TestCodecSpecRoundTrip(p);
p.expected_spec = {
{{"name", "sharding_indexed"},
{"configuration",
{
{"chunk_shape", {2, 3}},
{"codecs",
{{
{"name", "bytes"},
{"configuration", {{"endian", "little"}}},
}}},
{"index_codecs",
{
{
{"name", "bytes"},
{"configuration", {{"endian", "little"}}},
},
{
{"name", "crc32c"},
},
}},
}}},
};
p.to_json_options.constraints = false;
TestCodecSpecRoundTrip(p);
}
TEST(ShardingIndexedTest, IndexLocationEndNotStored) {
ArrayCodecResolveParameters p;
p.dtype = tensorstore::dtype_v<uint16_t>;
p.rank = 2;
EXPECT_THAT(TestCodecSpecResolve(
::nlohmann::json::array_t{
{{"name", "sharding_indexed"},
{"configuration",
{
{"chunk_shape", {2, 3}},
{"codecs",
{{
{"name", "bytes"},
{"configuration", {{"endian", "little"}}},
}}},
{"index_codecs",
{
{
{"name", "bytes"},
{"configuration", {{"endian", "little"}}},
},
{
{"name", "crc32c"},
},
}},
{"index_location", "end"},
}}}},
p,
false),
::testing::Optional(MatchesJson(::nlohmann::json::array_t{
{{"name", "sharding_indexed"},
{"configuration",
{
{"chunk_shape", {2, 3}},
{"codecs",
{{
{"name", "bytes"},
{"configuration", {{"endian", "little"}}},
}}},
{"index_codecs",
{
{
{"name", "bytes"},
{"configuration", {{"endian", "little"}}},
},
{
{"name", "crc32c"},
},
}},
}}}})));
}
} |
586 | cpp | google/tensorstore | gzip | tensorstore/driver/zarr3/codec/gzip.cc | tensorstore/driver/zarr3/codec/gzip_test.cc | #ifndef TENSORSTORE_DRIVER_ZARR3_CODEC_GZIP_H_
#define TENSORSTORE_DRIVER_ZARR3_CODEC_GZIP_H_
#include <optional>
#include "absl/status/status.h"
#include "tensorstore/driver/zarr3/codec/codec.h"
#include "tensorstore/driver/zarr3/codec/codec_spec.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal_zarr3 {
class GzipCodecSpec : public ZarrBytesToBytesCodecSpec {
public:
struct Options {
std::optional<int> level;
};
GzipCodecSpec() = default;
explicit GzipCodecSpec(const Options& options) : options(options) {}
absl::Status MergeFrom(const ZarrCodecSpec& other, bool strict) override;
ZarrCodecSpec::Ptr Clone() const override;
Result<ZarrBytesToBytesCodec::Ptr> Resolve(
BytesCodecResolveParameters&& decoded,
BytesCodecResolveParameters& encoded,
ZarrBytesToBytesCodecSpec::Ptr* resolved_spec) const final;
Options options;
};
}
}
#endif
#include "tensorstore/driver/zarr3/codec/gzip.h"
#include <stdint.h>
#include <memory>
#include "absl/status/status.h"
#include "riegeli/bytes/reader.h"
#include "riegeli/bytes/writer.h"
#include "riegeli/zlib/zlib_reader.h"
#include "riegeli/zlib/zlib_writer.h"
#include "tensorstore/driver/zarr3/codec/codec.h"
#include "tensorstore/driver/zarr3/codec/codec_spec.h"
#include "tensorstore/driver/zarr3/codec/registry.h"
#include "tensorstore/internal/global_initializer.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace internal_zarr3 {
namespace {
constexpr int kDefaultLevel = 6;
class GzipCodec : public ZarrBytesToBytesCodec {
public:
explicit GzipCodec(int level) : level_(level) {}
class State : public ZarrBytesToBytesCodec::PreparedState {
public:
Result<std::unique_ptr<riegeli::Writer>> GetEncodeWriter(
riegeli::Writer& encoded_writer) const final {
using Writer = riegeli::ZlibWriter<riegeli::Writer*>;
Writer::Options options;
options.set_compression_level(level_);
options.set_header(Writer::Header::kGzip);
return std::make_unique<Writer>(&encoded_writer, options);
}
Result<std::unique_ptr<riegeli::Reader>> GetDecodeReader(
riegeli::Reader& encoded_reader) const final {
using Reader = riegeli::ZlibReader<riegeli::Reader*>;
Reader::Options options;
options.set_header(Reader::Header::kGzip);
return std::make_unique<Reader>(&encoded_reader, options);
}
int level_;
};
Result<PreparedState::Ptr> Prepare(int64_t decoded_size) const final {
auto state = internal::MakeIntrusivePtr<State>();
state->level_ = level_;
return state;
}
private:
int level_;
};
}
absl::Status GzipCodecSpec::MergeFrom(const ZarrCodecSpec& other, bool strict) {
using Self = GzipCodecSpec;
const auto& other_options = static_cast<const Self&>(other).options;
TENSORSTORE_RETURN_IF_ERROR(
MergeConstraint<&Options::level>("level", options, other_options));
return absl::OkStatus();
}
ZarrCodecSpec::Ptr GzipCodecSpec::Clone() const {
return internal::MakeIntrusivePtr<GzipCodecSpec>(*this);
}
Result<ZarrBytesToBytesCodec::Ptr> GzipCodecSpec::Resolve(
BytesCodecResolveParameters&& decoded, BytesCodecResolveParameters& encoded,
ZarrBytesToBytesCodecSpec::Ptr* resolved_spec) const {
auto resolved_level = options.level.value_or(kDefaultLevel);
if (resolved_spec) {
resolved_spec->reset(
options.level ? this : new GzipCodecSpec(Options{resolved_level}));
}
return internal::MakeIntrusivePtr<GzipCodec>(resolved_level);
}
TENSORSTORE_GLOBAL_INITIALIZER {
using Self = GzipCodecSpec;
using Options = Self::Options;
namespace jb = ::tensorstore::internal_json_binding;
RegisterCodec<Self>(
"gzip",
jb::Projection<&Self::options>(jb::Sequence(
jb::Member("level", jb::Projection<&Options::level>(
OptionalIfConstraintsBinder(
jb::Integer<int>(0, 9))))
)));
}
}
} | #include <gtest/gtest.h>
#include "tensorstore/driver/zarr3/codec/codec_test_util.h"
namespace {
using ::tensorstore::internal_zarr3::CodecRoundTripTestParams;
using ::tensorstore::internal_zarr3::CodecSpecRoundTripTestParams;
using ::tensorstore::internal_zarr3::GetDefaultBytesCodecJson;
using ::tensorstore::internal_zarr3::TestCodecRoundTrip;
using ::tensorstore::internal_zarr3::TestCodecSpecRoundTrip;
TEST(GzipTest, EndianInferred) {
CodecSpecRoundTripTestParams p;
p.orig_spec = {
{{"name", "gzip"}, {"configuration", {{"level", 7}}}},
};
p.expected_spec = {
GetDefaultBytesCodecJson(),
{{"name", "gzip"}, {"configuration", {{"level", 7}}}},
};
TestCodecSpecRoundTrip(p);
}
TEST(GzipTest, DefaultLevel) {
CodecSpecRoundTripTestParams p;
p.orig_spec = {
{{"name", "gzip"}},
};
p.expected_spec = {
GetDefaultBytesCodecJson(),
{{"name", "gzip"}, {"configuration", {{"level", 6}}}},
};
TestCodecSpecRoundTrip(p);
}
TEST(GzipTest, RoundTrip) {
CodecRoundTripTestParams p;
p.spec = {"gzip"};
TestCodecRoundTrip(p);
}
} |
587 | cpp | google/tensorstore | bytes | tensorstore/driver/zarr3/codec/bytes.cc | tensorstore/driver/zarr3/codec/bytes_test.cc | #ifndef TENSORSTORE_DRIVER_ZARR3_CODEC_BYTES_H_
#define TENSORSTORE_DRIVER_ZARR3_CODEC_BYTES_H_
#include <optional>
#include "absl/status/status.h"
#include "tensorstore/driver/zarr3/codec/codec.h"
#include "tensorstore/driver/zarr3/codec/codec_spec.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/util/endian.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal_zarr3 {
class BytesCodecSpec : public ZarrArrayToBytesCodecSpec {
public:
struct Options {
std::optional<endian> endianness;
bool constraints = false;
};
BytesCodecSpec() = default;
explicit BytesCodecSpec(const Options& options) : options(options) {}
absl::Status MergeFrom(const ZarrCodecSpec& other, bool strict) override;
ZarrCodecSpec::Ptr Clone() const override;
absl::Status GetDecodedChunkLayout(
const ArrayDataTypeAndShapeInfo& array_info,
ArrayCodecChunkLayoutInfo& decoded) const override;
bool SupportsInnerOrder(
const ArrayCodecResolveParameters& decoded,
span<DimensionIndex> preferred_inner_order) const override;
Result<ZarrArrayToBytesCodec::Ptr> Resolve(
ArrayCodecResolveParameters&& decoded,
BytesCodecResolveParameters& encoded,
ZarrArrayToBytesCodecSpec::Ptr* resolved_spec) const override;
Options options;
};
internal::IntrusivePtr<const BytesCodecSpec> DefaultBytesCodec();
}
}
#endif
#include "tensorstore/driver/zarr3/codec/bytes.h"
#include <assert.h>
#include <stdint.h>
#include <optional>
#include <string_view>
#include <utility>
#include "absl/status/status.h"
#include "riegeli/bytes/reader.h"
#include "riegeli/bytes/writer.h"
#include "tensorstore/array.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/driver/zarr3/codec/codec.h"
#include "tensorstore/driver/zarr3/codec/codec_spec.h"
#include "tensorstore/driver/zarr3/codec/registry.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/dimension_permutation.h"
#include "tensorstore/internal/global_initializer.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/json_binding/enum.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/json_binding/std_optional.h"
#include "tensorstore/internal/riegeli/array_endian_codec.h"
#include "tensorstore/internal/unaligned_data_type_functions.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/endian.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_zarr3 {
namespace {
absl::Status InvalidDataTypeError(DataType dtype) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Data type ", dtype, " not compatible with \"bytes\" codec"));
}
class BytesCodec : public ZarrArrayToBytesCodec {
public:
explicit BytesCodec(DataType decoded_dtype, endian endianness)
: dtype_(decoded_dtype), endianness_(endianness) {}
Result<PreparedState::Ptr> Prepare(
span<const Index> decoded_shape) const final;
private:
DataType dtype_;
endian endianness_;
};
}
absl::Status BytesCodecSpec::GetDecodedChunkLayout(
const ArrayDataTypeAndShapeInfo& array_info,
ArrayCodecChunkLayoutInfo& decoded) const {
if (array_info.dtype.valid() &&
!internal::IsTrivialDataType(array_info.dtype)) {
return InvalidDataTypeError(array_info.dtype);
}
const DimensionIndex rank = array_info.rank;
if (rank != dynamic_rank) {
auto& inner_order = decoded.inner_order.emplace();
for (DimensionIndex i = 0; i < rank; ++i) {
inner_order[i] = i;
}
}
if (array_info.shape) {
auto& shape = *array_info.shape;
auto& read_chunk_shape = decoded.read_chunk_shape.emplace();
for (DimensionIndex i = 0; i < rank; ++i) {
read_chunk_shape[i] = shape[i];
}
}
return absl::OkStatus();
}
bool BytesCodecSpec::SupportsInnerOrder(
const ArrayCodecResolveParameters& decoded,
span<DimensionIndex> preferred_inner_order) const {
if (!decoded.inner_order) return true;
if (PermutationMatchesOrder(span(decoded.inner_order->data(), decoded.rank),
c_order)) {
return true;
}
SetPermutation(c_order, preferred_inner_order);
return false;
}
Result<ZarrArrayToBytesCodec::Ptr> BytesCodecSpec::Resolve(
ArrayCodecResolveParameters&& decoded, BytesCodecResolveParameters& encoded,
ZarrArrayToBytesCodecSpec::Ptr* resolved_spec) const {
assert(decoded.dtype.valid());
if (!internal::IsTrivialDataType(decoded.dtype)) {
return InvalidDataTypeError(decoded.dtype);
}
const bool is_endian_invariant =
internal::IsEndianInvariantDataType(decoded.dtype);
if (!options.constraints && !is_endian_invariant && !options.endianness) {
return absl::InvalidArgumentError(
tensorstore::StrCat("\"bytes\" codec requires that \"endian\" option "
"is specified for data type ",
decoded.dtype));
}
encoded.item_bits = decoded.dtype.size() * 8;
DimensionIndex rank = decoded.rank;
if (decoded.codec_chunk_shape) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"\"bytes\" codec does not support codec_chunk_shape (",
span<const Index>(decoded.codec_chunk_shape->data(), rank),
" was specified"));
}
if (decoded.inner_order) {
auto& decoded_inner_order = *decoded.inner_order;
for (DimensionIndex i = 0; i < rank; ++i) {
if (decoded_inner_order[i] != i) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"\"bytes\" codec does not support inner_order of ",
span<const DimensionIndex>(decoded_inner_order.data(), rank)));
}
}
}
endian resolved_endianness = options.endianness.value_or(endian::native);
if (resolved_spec) {
resolved_spec->reset(new BytesCodecSpec(Options{
is_endian_invariant ? std::optional<endian>()
: std::optional<endian>(resolved_endianness)}));
}
return internal::MakeIntrusivePtr<BytesCodec>(decoded.dtype,
resolved_endianness);
}
namespace {
namespace jb = ::tensorstore::internal_json_binding;
constexpr auto EndiannessBinder() {
return jb::Enum<endian, std::string_view>({
{endian::little, "little"},
{endian::big, "big"},
});
}
}
absl::Status BytesCodecSpec::MergeFrom(const ZarrCodecSpec& other,
bool strict) {
using Self = BytesCodecSpec;
const auto& other_options = static_cast<const Self&>(other).options;
TENSORSTORE_RETURN_IF_ERROR(MergeConstraint<&Options::endianness>(
"endian", options, other_options, EndiannessBinder()));
return absl::OkStatus();
}
ZarrCodecSpec::Ptr BytesCodecSpec::Clone() const {
return internal::MakeIntrusivePtr<BytesCodecSpec>(*this);
}
namespace {
class BytesCodecPreparedState : public ZarrArrayToBytesCodec::PreparedState {
public:
int64_t encoded_size() const final { return encoded_size_; }
absl::Status EncodeArray(SharedArrayView<const void> decoded,
riegeli::Writer& writer) const final {
if (internal::EncodeArrayEndian(std::move(decoded), endianness_, c_order,
writer)) {
return absl::OkStatus();
}
assert(!writer.ok());
return writer.status();
}
Result<SharedArray<const void>> DecodeArray(
span<const Index> decoded_shape, riegeli::Reader& reader) const final {
return internal::DecodeArrayEndian(reader, dtype_, decoded_shape,
endianness_, c_order);
}
DataType dtype_;
endian endianness_;
int64_t encoded_size_;
};
}
Result<ZarrArrayToBytesCodec::PreparedState::Ptr> BytesCodec::Prepare(
span<const Index> decoded_shape) const {
int64_t bytes = dtype_.size();
for (auto size : decoded_shape) {
if (internal::MulOverflow(size, bytes, &bytes)) {
return absl::OutOfRangeError(tensorstore::StrCat(
"Integer overflow computing encoded size of array of shape ",
decoded_shape));
}
}
auto state = internal::MakeIntrusivePtr<BytesCodecPreparedState>();
state->dtype_ = dtype_;
state->endianness_ = endianness_;
state->encoded_size_ = bytes;
return state;
}
internal::IntrusivePtr<const BytesCodecSpec> DefaultBytesCodec() {
return internal::MakeIntrusivePtr<BytesCodecSpec>(
BytesCodecSpec::Options{endian::native});
}
TENSORSTORE_GLOBAL_INITIALIZER {
using Self = BytesCodecSpec;
using Options = Self::Options;
RegisterCodec<Self>(
"bytes",
jb::Projection<&Self::options>(jb::Sequence(
[](auto is_loading, const auto& options, auto* obj, auto* j) {
if constexpr (is_loading) {
obj->constraints = options.constraints;
}
return absl::OkStatus();
},
jb::Member("endian",
jb::Projection<&Options::endianness>(
jb::Optional(EndiannessBinder())))
)));
}
}
} | #include <stdint.h>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "tensorstore/data_type.h"
#include "tensorstore/driver/zarr3/codec/codec_chain_spec.h"
#include "tensorstore/driver/zarr3/codec/codec_spec.h"
#include "tensorstore/driver/zarr3/codec/codec_test_util.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::dtype_v;
using ::tensorstore::MatchesJson;
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal_zarr3::ArrayCodecResolveParameters;
using ::tensorstore::internal_zarr3::CodecRoundTripTestParams;
using ::tensorstore::internal_zarr3::CodecSpecRoundTripTestParams;
using ::tensorstore::internal_zarr3::GetDefaultBytesCodecJson;
using ::tensorstore::internal_zarr3::TestCodecRoundTrip;
using ::tensorstore::internal_zarr3::TestCodecSpecRoundTrip;
using ::tensorstore::internal_zarr3::ZarrCodecChainSpec;
TEST(BytesTest, SpecRoundTrip) {
CodecSpecRoundTripTestParams p;
p.orig_spec = {"bytes"};
p.expected_spec = ::nlohmann::json::array_t{GetDefaultBytesCodecJson()};
TestCodecSpecRoundTrip(p);
}
TEST(BytesTest, DuplicateArrayToBytes) {
EXPECT_THAT(
ZarrCodecChainSpec::FromJson({
{{"name", "bytes"}, {"configuration", {{"endian", "little"}}}},
{{"name", "bytes"}, {"configuration", {{"endian", "little"}}}},
}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected bytes -> bytes codec, but received: .*"));
}
TEST(BytesTest, RoundTrip) {
CodecRoundTripTestParams p;
p.spec = {"bytes"};
TestCodecRoundTrip(p);
}
TEST(BytesTest, AutomaticTranspose) {
ArrayCodecResolveParameters p;
p.dtype = dtype_v<uint16_t>;
p.rank = 2;
auto& inner_order = p.inner_order.emplace();
inner_order[0] = 1;
inner_order[1] = 0;
EXPECT_THAT(
TestCodecSpecResolve(
::nlohmann::json::array_t{GetDefaultBytesCodecJson()}, p),
::testing::Optional(MatchesJson({
{{"name", "transpose"}, {"configuration", {{"order", {1, 0}}}}},
GetDefaultBytesCodecJson(),
})));
}
TEST(BytesTest, EndianInvariantDataType) {
ArrayCodecResolveParameters p;
p.dtype = dtype_v<uint8_t>;
p.rank = 2;
EXPECT_THAT(
TestCodecSpecResolve(::nlohmann::json::array_t{{{"name", "bytes"}}}, p,
false),
::testing::Optional(
MatchesJson(::nlohmann::json::array_t{{{"name", "bytes"}}})));
}
TEST(BytesTest, MissingEndianEndianInvariantDataType) {
ArrayCodecResolveParameters p;
p.dtype = dtype_v<uint16_t>;
p.rank = 2;
EXPECT_THAT(
TestCodecSpecResolve(::nlohmann::json::array_t{{{"name", "bytes"}}}, p,
false),
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*: \"bytes\" codec requires that \"endian\" option is "
"specified for data type uint16"));
}
} |
588 | cpp | google/tensorstore | chunk_encoding | tensorstore/driver/neuroglancer_precomputed/chunk_encoding.cc | tensorstore/driver/neuroglancer_precomputed/chunk_encoding_test.cc | #ifndef TENSORSTORE_DRIVER_NEUROGLANCER_PRECOMPUTED_CHUNK_ENCODING_H_
#define TENSORSTORE_DRIVER_NEUROGLANCER_PRECOMPUTED_CHUNK_ENCODING_H_
#include <stddef.h>
#include "absl/strings/cord.h"
#include "tensorstore/array.h"
#include "tensorstore/driver/neuroglancer_precomputed/metadata.h"
#include "tensorstore/index.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal_neuroglancer_precomputed {
Result<SharedArray<const void>> DecodeChunk(span<const Index> chunk_indices,
const MultiscaleMetadata& metadata,
size_t scale_index,
StridedLayoutView<4> chunk_layout,
absl::Cord buffer);
Result<absl::Cord> EncodeChunk(span<const Index> chunk_indices,
const MultiscaleMetadata& metadata,
size_t scale_index,
const SharedArrayView<const void>& array);
}
}
#endif
#include "tensorstore/driver/neuroglancer_precomputed/chunk_encoding.h"
#include <algorithm>
#include <array>
#include <cstddef>
#include <cstdint>
#include <limits>
#include <string>
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/base/optimization.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "riegeli/bytes/cord_reader.h"
#include "riegeli/bytes/cord_writer.h"
#include "tensorstore/array.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/driver/neuroglancer_precomputed/metadata.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/compression/neuroglancer_compressed_segmentation.h"
#include "tensorstore/internal/data_type_endian_conversion.h"
#include "tensorstore/internal/flat_cord_builder.h"
#include "tensorstore/internal/image/image_info.h"
#include "tensorstore/internal/image/jpeg_reader.h"
#include "tensorstore/internal/image/jpeg_writer.h"
#include "tensorstore/internal/image/png_reader.h"
#include "tensorstore/internal/image/png_writer.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/endian.h"
#include "tensorstore/util/extents.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_neuroglancer_precomputed {
using ::tensorstore::internal_image::ImageInfo;
using ::tensorstore::internal_image::JpegWriterOptions;
using ::tensorstore::internal_image::PngWriterOptions;
Result<SharedArray<const void>> DecodeRawChunk(
DataType dtype, span<const Index, 4> shape,
StridedLayoutView<4> chunk_layout, absl::Cord buffer) {
const Index expected_bytes = ProductOfExtents(shape) * dtype.size();
if (expected_bytes != static_cast<Index>(buffer.size())) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Expected chunk length to be ", expected_bytes,
", but received ", buffer.size(), " bytes"));
}
auto flat_buffer = buffer.Flatten();
if (absl::c_equal(shape, chunk_layout.shape())) {
auto decoded_array = internal::TryViewCordAsArray(
buffer, 0, dtype, endian::little, chunk_layout);
if (decoded_array.valid()) return {std::in_place, decoded_array};
}
Array<const void, 4> source(
{static_cast<const void*>(flat_buffer.data()), dtype}, shape);
SharedArray<void> full_decoded_array(
internal::AllocateAndConstructSharedElements(chunk_layout.num_elements(),
value_init, dtype),
chunk_layout);
ArrayView<void> partial_decoded_array(
full_decoded_array.element_pointer(),
StridedLayoutView<>{shape, chunk_layout.byte_strides()});
internal::DecodeArray(source, endian::little, partial_decoded_array);
return full_decoded_array;
}
template <typename ImageReader>
Result<SharedArray<const void>> DecodeImageChunk(
DataType dtype, span<const Index, 4> partial_shape,
StridedLayoutView<4> chunk_layout, absl::Cord encoded_input) {
auto array = AllocateArray(
{partial_shape[1], partial_shape[2], partial_shape[3], partial_shape[0]},
c_order, default_init, dtype);
{
riegeli::CordReader<> cord_reader(&encoded_input);
ImageReader reader;
TENSORSTORE_RETURN_IF_ERROR(reader.Initialize(&cord_reader));
auto info = reader.GetImageInfo();
const Index num_elements = ProductOfExtents(partial_shape.subspan<1>());
size_t total_pixels;
if (internal::MulOverflow(static_cast<size_t>(info.width),
static_cast<size_t>(info.height),
&total_pixels) ||
num_elements == std::numeric_limits<Index>::max() ||
static_cast<Index>(total_pixels) != num_elements ||
static_cast<Index>(info.num_components) != partial_shape[0]) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Image dimensions (", info.width, ", ", info.height, ", ",
info.num_components,
") are not compatible with expected chunk shape ", partial_shape));
}
TENSORSTORE_RETURN_IF_ERROR(reader.Decode(
tensorstore::span(reinterpret_cast<unsigned char*>(array.data()),
ImageRequiredBytes(info))));
if (!cord_reader.Close()) {
return cord_reader.status();
}
}
if (partial_shape[0] == 1 &&
absl::c_equal(partial_shape, chunk_layout.shape())) {
return SharedArray<const void>(array.element_pointer(), chunk_layout);
}
SharedArray<void> full_decoded_array(
internal::AllocateAndConstructSharedElements(chunk_layout.num_elements(),
default_init, dtype),
chunk_layout);
Array<void, 4> partial_decoded_array(
full_decoded_array.element_pointer(),
StridedLayout<4>(
{partial_shape[1], partial_shape[2], partial_shape[3],
partial_shape[0]},
{chunk_layout.byte_strides()[1], chunk_layout.byte_strides()[2],
chunk_layout.byte_strides()[3], chunk_layout.byte_strides()[0]}));
CopyArray(array, partial_decoded_array);
return full_decoded_array;
}
Result<SharedArray<const void>> DecodeJpegChunk(
DataType dtype, span<const Index, 4> partial_shape,
StridedLayoutView<4> chunk_layout, absl::Cord encoded_input) {
return DecodeImageChunk<internal_image::JpegReader>(
dtype, partial_shape, chunk_layout, std::move(encoded_input));
}
Result<SharedArray<const void>> DecodePngChunk(
DataType dtype, span<const Index, 4> partial_shape,
StridedLayoutView<4> chunk_layout, absl::Cord encoded_input) {
return DecodeImageChunk<internal_image::PngReader>(
dtype, partial_shape, chunk_layout, std::move(encoded_input));
}
Result<SharedArray<const void>> DecodeCompressedSegmentationChunk(
DataType dtype, span<const Index, 4> shape,
StridedLayoutView<4> chunk_layout, std::array<Index, 3> block_size,
absl::Cord buffer) {
auto flat_buffer = buffer.Flatten();
SharedArray<void> full_decoded_array(
internal::AllocateAndConstructSharedElements(chunk_layout.num_elements(),
default_init, dtype),
chunk_layout);
std::ptrdiff_t output_shape_ptrdiff_t[4] = {shape[0], shape[1], shape[2],
shape[3]};
std::ptrdiff_t block_shape_ptrdiff_t[3] = {block_size[2], block_size[1],
block_size[0]};
std::ptrdiff_t output_byte_strides[4] = {
chunk_layout.byte_strides()[0], chunk_layout.byte_strides()[1],
chunk_layout.byte_strides()[2], chunk_layout.byte_strides()[3]};
bool success = false;
switch (dtype.id()) {
case DataTypeId::uint32_t:
success = neuroglancer_compressed_segmentation::DecodeChannels(
flat_buffer, block_shape_ptrdiff_t, output_shape_ptrdiff_t,
output_byte_strides,
static_cast<uint32_t*>(full_decoded_array.data()));
break;
case DataTypeId::uint64_t:
success = neuroglancer_compressed_segmentation::DecodeChannels(
flat_buffer, block_shape_ptrdiff_t, output_shape_ptrdiff_t,
output_byte_strides,
static_cast<uint64_t*>(full_decoded_array.data()));
break;
default:
ABSL_UNREACHABLE();
}
if (!success) {
return absl::InvalidArgumentError(
"Corrupted Neuroglancer compressed segmentation");
}
return full_decoded_array;
}
void GetChunkShape(span<const Index> chunk_indices,
const MultiscaleMetadata& metadata, size_t scale_index,
span<const Index, 4> full_chunk_shape,
span<Index, 4> partial_chunk_shape) {
const auto& scale = metadata.scales[scale_index];
partial_chunk_shape[0] = full_chunk_shape[0];
for (int i = 0; i < 3; ++i) {
const Index full_size = full_chunk_shape[3 - i];
partial_chunk_shape[3 - i] = std::min(
scale.box.shape()[i] - chunk_indices[i] * full_size, full_size);
}
}
Result<SharedArray<const void>> DecodeChunk(span<const Index> chunk_indices,
const MultiscaleMetadata& metadata,
size_t scale_index,
StridedLayoutView<4> chunk_layout,
absl::Cord buffer) {
const auto& scale_metadata = metadata.scales[scale_index];
std::array<Index, 4> chunk_shape;
GetChunkShape(chunk_indices, metadata, scale_index, chunk_layout.shape(),
chunk_shape);
switch (scale_metadata.encoding) {
case ScaleMetadata::Encoding::raw:
return DecodeRawChunk(metadata.dtype, chunk_shape, chunk_layout,
std::move(buffer));
case ScaleMetadata::Encoding::png:
return DecodePngChunk(metadata.dtype, chunk_shape, chunk_layout,
std::move(buffer));
case ScaleMetadata::Encoding::jpeg:
return DecodeJpegChunk(metadata.dtype, chunk_shape, chunk_layout,
std::move(buffer));
case ScaleMetadata::Encoding::compressed_segmentation:
return DecodeCompressedSegmentationChunk(
metadata.dtype, chunk_shape, chunk_layout,
scale_metadata.compressed_segmentation_block_size, std::move(buffer));
}
ABSL_UNREACHABLE();
}
absl::Cord EncodeRawChunk(DataType dtype, span<const Index, 4> shape,
const SharedArrayView<const void>& array) {
ArrayView<const void> partial_source(
array.element_pointer(),
StridedLayoutView<>(shape, array.byte_strides()));
internal::FlatCordBuilder buffer(ProductOfExtents(shape) * dtype.size());
Array<void, 4> encoded_array({static_cast<void*>(buffer.data()), dtype},
shape);
internal::EncodeArray(partial_source, encoded_array, endian::little);
return std::move(buffer).Build();
}
template <typename ImageWriter, typename Options>
Result<absl::Cord> EncodeImageChunk(Options options, DataType dtype,
span<const Index, 4> shape,
ArrayView<const void> array) {
Array<const void, 4> partial_source(
array.element_pointer(),
StridedLayout<4>({shape[1], shape[2], shape[3], shape[0]},
{array.byte_strides()[1], array.byte_strides()[2],
array.byte_strides()[3], array.byte_strides()[0]}));
auto contiguous_array = MakeCopy(partial_source, c_order);
absl::Cord buffer;
{
ImageWriter writer;
riegeli::CordWriter<> cord_writer(&buffer);
TENSORSTORE_RETURN_IF_ERROR(writer.Initialize(&cord_writer, options));
ImageInfo info{static_cast<int32_t>(shape[3]),
static_cast<int32_t>(shape[1] * shape[2]),
static_cast<int32_t>(shape[0]),
dtype};
TENSORSTORE_RETURN_IF_ERROR(writer.Encode(
info, tensorstore::span(reinterpret_cast<const unsigned char*>(
contiguous_array.data()),
contiguous_array.num_elements() *
contiguous_array.dtype().size())));
TENSORSTORE_RETURN_IF_ERROR(writer.Done());
}
return buffer;
}
Result<absl::Cord> EncodeJpegChunk(DataType dtype, int quality,
span<const Index, 4> shape,
ArrayView<const void> array) {
internal_image::JpegWriterOptions options;
options.quality = quality;
return EncodeImageChunk<internal_image::JpegWriter>(options, dtype, shape,
array);
}
Result<absl::Cord> EncodePngChunk(DataType dtype, int compression_level,
span<const Index, 4> shape,
ArrayView<const void> array) {
internal_image::PngWriterOptions options;
options.compression_level = compression_level;
return EncodeImageChunk<internal_image::PngWriter>(options, dtype, shape,
array);
}
Result<absl::Cord> EncodeCompressedSegmentationChunk(
DataType dtype, span<const Index, 4> shape, ArrayView<const void> array,
std::array<Index, 3> block_size) {
std::ptrdiff_t input_shape_ptrdiff_t[4] = {shape[0], shape[1], shape[2],
shape[3]};
std::ptrdiff_t block_shape_ptrdiff_t[3] = {block_size[2], block_size[1],
block_size[0]};
std::string out;
std::ptrdiff_t input_byte_strides[4] = {
array.byte_strides()[0], array.byte_strides()[1], array.byte_strides()[2],
array.byte_strides()[3]};
switch (dtype.id()) {
case DataTypeId::uint32_t:
neuroglancer_compressed_segmentation::EncodeChannels(
static_cast<const uint32_t*>(array.data()), input_shape_ptrdiff_t,
input_byte_strides, block_shape_ptrdiff_t, &out);
break;
case DataTypeId::uint64_t:
neuroglancer_compressed_segmentation::EncodeChannels(
static_cast<const uint64_t*>(array.data()), input_shape_ptrdiff_t,
input_byte_strides, block_shape_ptrdiff_t, &out);
break;
default:
ABSL_UNREACHABLE();
}
return absl::Cord(std::move(out));
}
Result<absl::Cord> EncodeChunk(span<const Index> chunk_indices,
const MultiscaleMetadata& metadata,
size_t scale_index,
const SharedArrayView<const void>& array) {
const auto& scale_metadata = metadata.scales[scale_index];
std::array<Index, 4> partial_chunk_shape;
GetChunkShape(chunk_indices, metadata, scale_index,
span<const Index, 4>(array.shape().data(), 4),
partial_chunk_shape);
switch (scale_metadata.encoding) {
case ScaleMetadata::Encoding::raw:
return EncodeRawChunk(metadata.dtype, partial_chunk_shape, array);
case ScaleMetadata::Encoding::jpeg:
return EncodeJpegChunk(metadata.dtype, scale_metadata.jpeg_quality,
partial_chunk_shape, array);
case ScaleMetadata::Encoding::png:
return EncodePngChunk(metadata.dtype, scale_metadata.png_level,
partial_chunk_shape, array);
case ScaleMetadata::Encoding::compressed_segmentation:
return EncodeCompressedSegmentationChunk(
metadata.dtype, partial_chunk_shape, array,
scale_metadata.compressed_segmentation_block_size);
}
ABSL_UNREACHABLE();
}
}
} | #include "tensorstore/driver/neuroglancer_precomputed/chunk_encoding.h"
#include <cstddef>
#include <cstdint>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/base/optimization.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include <nlohmann/json_fwd.hpp>
#include "tensorstore/array.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/driver/neuroglancer_precomputed/metadata.h"
#include "tensorstore/index.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::Index;
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal_neuroglancer_precomputed::DecodeChunk;
using ::tensorstore::internal_neuroglancer_precomputed::EncodeChunk;
using ::tensorstore::internal_neuroglancer_precomputed::MultiscaleMetadata;
struct P {
::nlohmann::json metadata_json;
tensorstore::DataType dtype;
bool compare = true;
bool truncate = true;
};
class ChunkEncodingTest : public testing::TestWithParam<P> {
public:
template <typename T>
tensorstore::SharedArray<void> AllocateArrayImpl(Index num_channels) {
auto array = tensorstore::AllocateArray<T>({num_channels, 5, 4, 3});
for (Index i = 0, n = array.num_elements(); i < n; ++i) {
array.data()[i] = static_cast<T>(i);
}
return array;
}
tensorstore::SharedArray<void> GetArrayForDType(tensorstore::DataTypeId id,
Index num_channels) {
switch (id) {
case tensorstore::DataTypeId::uint8_t:
return AllocateArrayImpl<uint8_t>(num_channels);
case tensorstore::DataTypeId::uint16_t:
return AllocateArrayImpl<uint16_t>(num_channels);
case tensorstore::DataTypeId::uint32_t:
return AllocateArrayImpl<uint32_t>(num_channels);
case tensorstore::DataTypeId::uint64_t:
return AllocateArrayImpl<uint64_t>(num_channels);
default:
ABSL_UNREACHABLE();
}
}
};
TEST_P(ChunkEncodingTest, Roundtrip) {
auto metadata_json = GetParam().metadata_json;
auto dtype = GetParam().dtype;
metadata_json["data_type"] = dtype.name();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto metadata,
MultiscaleMetadata::FromJson(metadata_json));
auto array = GetArrayForDType(dtype.id(), metadata.num_channels);
std::vector<Index> chunk_indices{0, 0, 0};
const size_t scale_index = 0;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
absl::Cord out, EncodeChunk(chunk_indices, metadata, scale_index, array));
tensorstore::StridedLayout chunk_layout(tensorstore::c_order, dtype.size(),
{metadata.num_channels, 5, 4, 3});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto decode_result,
DecodeChunk(chunk_indices, metadata, scale_index, chunk_layout, out));
if (!out.empty() && GetParam().truncate) {
auto corrupt = out.Subcord(0, out.size() - 1);
EXPECT_THAT(
DecodeChunk(chunk_indices, metadata, scale_index, chunk_layout,
corrupt),
testing::AnyOf(MatchesStatus(absl::StatusCode::kDataLoss),
MatchesStatus(absl::StatusCode::kInvalidArgument)));
}
if (GetParam().compare) {
EXPECT_THAT(decode_result, array);
}
}
std::vector<P> GenerateParams() {
std::vector<P> result;
for (const int num_channels : {1, 2, 3, 4}) {
P param;
param.metadata_json =
::nlohmann::json{{"@type", "neuroglancer_multiscale_volume"},
{"num_channels", num_channels},
{"scales",
{{{"chunk_sizes", {{3, 4, 5}}},
{"encoding", "raw"},
{"key", "k"},
{"resolution", {5, 6, 7}},
{"size", {10, 11, 12}}}}},
{"type", "image"}};
param.dtype = tensorstore::dtype_v<uint16_t>;
result.push_back(param);
param.truncate = false;
if (num_channels >= 1 && num_channels <= 4) {
param.metadata_json["scales"][0]["encoding"] = "png";
param.dtype = tensorstore::dtype_v<uint8_t>;
result.push_back(param);
if (num_channels == 1) {
param.dtype = tensorstore::dtype_v<uint16_t>;
result.push_back(param);
}
}
param.truncate = true;
param.compare = false;
if (num_channels == 1 || num_channels == 3) {
param.metadata_json["scales"][0]["encoding"] = "jpeg";
param.dtype = tensorstore::dtype_v<uint8_t>;
result.push_back(param);
}
param.compare = true;
param.metadata_json["scales"][0]["encoding"] = "compressed_segmentation";
param.metadata_json["scales"][0]["compressed_segmentation_block_size"] = {
2, 3, 4};
param.dtype = tensorstore::dtype_v<uint32_t>;
result.push_back(param);
param.dtype = tensorstore::dtype_v<uint64_t>;
result.push_back(param);
}
return result;
}
INSTANTIATE_TEST_SUITE_P(
All, ChunkEncodingTest, testing::ValuesIn(GenerateParams()),
[](const testing::TestParamInfo<P>& info) {
const auto& p = info.param;
auto encoding =
p.metadata_json["scales"][0]["encoding"].get<std::string>();
return tensorstore::StrCat(encoding, "_", p.metadata_json["num_channels"],
"_", p.dtype.name());
});
} |
589 | cpp | google/tensorstore | json_change_map | tensorstore/driver/json/json_change_map.cc | tensorstore/driver/json/json_change_map_test.cc | #ifndef TENSORSTORE_DRIVER_JSON_JSON_CHANGE_MAP_H_
#define TENSORSTORE_DRIVER_JSON_JSON_CHANGE_MAP_H_
#include <string>
#include <string_view>
#include "absl/container/btree_map.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json_pointer.h"
namespace tensorstore {
namespace internal_json_driver {
class JsonChangeMap {
private:
struct MapCompare {
using is_transparent = void;
bool operator()(std::string_view a, std::string_view b) const {
return json_pointer::Compare(a, b) < json_pointer::kEqual;
}
};
public:
using Map = absl::btree_map<std::string, ::nlohmann::json, MapCompare>;
Result<::nlohmann::json> Apply(const ::nlohmann::json& existing,
std::string_view sub_value_pointer = {}) const;
bool CanApplyUnconditionally(std::string_view sub_value_pointer) const;
absl::Status AddChange(std::string_view sub_value_pointer,
::nlohmann::json sub_value);
const Map& underlying_map() const { return map_; }
private:
Map map_;
};
}
}
#endif
#include "tensorstore/driver/json/json_change_map.h"
#include <string>
#include <string_view>
#include <utility>
#include "absl/container/btree_map.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json_pointer.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace internal_json_driver {
Result<::nlohmann::json> JsonChangeMap::Apply(
const ::nlohmann::json& existing,
std::string_view sub_value_pointer) const {
Map::const_iterator changes_it = map_.lower_bound(sub_value_pointer),
changes_end = map_.end();
if (changes_it != changes_end && changes_it->first == sub_value_pointer) {
TENSORSTORE_RETURN_IF_ERROR(
json_pointer::Dereference(existing, sub_value_pointer,
json_pointer::kSimulateCreate),
internal::ConvertInvalidArgumentToFailedPrecondition(_));
return {std::in_place, changes_it->second};
}
if (changes_it != map_.begin()) {
auto prev_it = std::prev(changes_it);
if (json_pointer::Compare(prev_it->first, sub_value_pointer) ==
json_pointer::kContains) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto* modified_value,
json_pointer::Dereference(
prev_it->second, sub_value_pointer.substr(prev_it->first.size()),
json_pointer::kMustExist));
TENSORSTORE_RETURN_IF_ERROR(
json_pointer::Dereference(existing, prev_it->first,
json_pointer::kSimulateCreate),
internal::ConvertInvalidArgumentToFailedPrecondition(_));
return {std::in_place, *modified_value};
}
}
::nlohmann::json new_value;
{
TENSORSTORE_ASSIGN_OR_RETURN(
const ::nlohmann::json* restricted_existing,
json_pointer::Dereference(existing, sub_value_pointer,
json_pointer::kSimulateCreate));
if (restricted_existing) {
new_value = *restricted_existing;
} else {
new_value = ::nlohmann::json(::nlohmann::json::value_t::discarded);
}
}
for (; changes_it != changes_end &&
json_pointer::Compare(changes_it->first, sub_value_pointer) ==
json_pointer::kContainedIn;
++changes_it) {
TENSORSTORE_RETURN_IF_ERROR(
json_pointer::Replace(new_value,
std::string_view(changes_it->first)
.substr(sub_value_pointer.size()),
changes_it->second),
internal::ConvertInvalidArgumentToFailedPrecondition(_));
}
return new_value;
}
bool JsonChangeMap::CanApplyUnconditionally(
std::string_view sub_value_pointer) const {
Map::const_iterator changes_it;
if (sub_value_pointer.empty()) {
changes_it = map_.begin();
} else {
changes_it = map_.lower_bound(sub_value_pointer);
}
if (changes_it != map_.end()) {
if (changes_it->first == sub_value_pointer) {
return true;
}
}
if (changes_it != map_.begin()) {
auto prev_it = std::prev(changes_it);
return json_pointer::Compare(prev_it->first, sub_value_pointer) ==
json_pointer::kContains;
}
return false;
}
absl::Status JsonChangeMap::AddChange(std::string_view sub_value_pointer,
::nlohmann::json sub_value) {
auto it = map_.lower_bound(sub_value_pointer);
if (it != map_.end()) {
auto compare_result = json_pointer::Compare(sub_value_pointer, it->first);
assert(compare_result <= json_pointer::kEqual);
if (compare_result == json_pointer::kEqual) {
it->second = std::move(sub_value);
return absl::OkStatus();
}
while (compare_result == json_pointer::kContains) {
it = map_.erase(it);
if (it == map_.end()) break;
compare_result = json_pointer::Compare(sub_value_pointer, it->first);
}
}
if (it != map_.begin()) {
auto prev_it = std::prev(it);
if (json_pointer::Compare(prev_it->first, sub_value_pointer) ==
json_pointer::kContains) {
return json_pointer::Replace(
prev_it->second, sub_value_pointer.substr(prev_it->first.size()),
std::move(sub_value));
}
}
map_.try_emplace(it, std::string(sub_value_pointer), std::move(sub_value));
return absl::OkStatus();
}
}
} | #include "tensorstore/driver/json/json_change_map.h"
#include <string_view>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::MatchesJson;
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal_json_driver::JsonChangeMap;
using ::testing::ElementsAre;
using ::testing::Optional;
using ::testing::Pair;
TEST(JsonChangeMapTest, AddChangeValid) {
JsonChangeMap changes;
TENSORSTORE_EXPECT_OK(changes.AddChange("/a/b/c", 42));
EXPECT_THAT(changes.underlying_map(),
ElementsAre(Pair("/a/b/c", MatchesJson(42))));
TENSORSTORE_EXPECT_OK(changes.AddChange("/a/b/a", false));
EXPECT_THAT(changes.underlying_map(),
ElementsAre(Pair("/a/b/a", MatchesJson(false)),
Pair("/a/b/c", MatchesJson(42))));
TENSORSTORE_EXPECT_OK(changes.AddChange("/a/b/a", true));
EXPECT_THAT(changes.underlying_map(),
ElementsAre(Pair("/a/b/a", MatchesJson(true)),
Pair("/a/b/c", MatchesJson(42))));
TENSORSTORE_EXPECT_OK(changes.AddChange("/a/b", {{"d", "xyz"}}));
EXPECT_THAT(
changes.underlying_map(),
ElementsAre(Pair("/a/b", MatchesJson(::nlohmann::json{{"d", "xyz"}}))));
TENSORSTORE_EXPECT_OK(changes.AddChange("/a/b/c", 42));
EXPECT_THAT(changes.underlying_map(),
ElementsAre(Pair("/a/b", MatchesJson(::nlohmann::json{
{"d", "xyz"}, {"c", 42}}))));
TENSORSTORE_EXPECT_OK(changes.AddChange("/a/b/a", false));
EXPECT_THAT(
changes.underlying_map(),
ElementsAre(Pair("/a/b", MatchesJson(::nlohmann::json{
{"d", "xyz"}, {"c", 42}, {"a", false}}))));
}
TEST(JsonChangeMapTest, AddChangeValidIndependent) {
JsonChangeMap changes;
TENSORSTORE_EXPECT_OK(changes.AddChange("/a/b/c", 42));
TENSORSTORE_EXPECT_OK(changes.AddChange("/a/e", "xx"));
TENSORSTORE_EXPECT_OK(changes.AddChange("/a/a", "yy"));
TENSORSTORE_EXPECT_OK(changes.AddChange("/a/b/a", false));
TENSORSTORE_EXPECT_OK(changes.AddChange("/a/b", {{"d", "xyz"}}));
EXPECT_THAT(
changes.underlying_map(),
ElementsAre(Pair("/a/a", MatchesJson("yy")),
Pair("/a/b", MatchesJson(::nlohmann::json{{"d", "xyz"}})),
Pair("/a/e", MatchesJson("xx"))));
}
TEST(JsonChangeMapTest, AddChangeInvalid) {
JsonChangeMap changes;
TENSORSTORE_EXPECT_OK(changes.AddChange("/a", 42));
EXPECT_THAT(changes.AddChange("/a/b", 43),
MatchesStatus(absl::StatusCode::kFailedPrecondition));
}
TEST(JsonChangeMapTest, ApplyEmptyChangeMap) {
JsonChangeMap changes;
EXPECT_THAT(changes.Apply({{"x", "y"}, {"z", "w"}}),
Optional(MatchesJson(::nlohmann::json{{"x", "y"}, {"z", "w"}})));
EXPECT_THAT(changes.Apply({{"x", "y"}, {"z", "w"}}, "/x"),
Optional(MatchesJson(::nlohmann::json("y"))));
}
TEST(JsonChangeMapTest, ApplyContainingChangeMap1) {
JsonChangeMap changes;
TENSORSTORE_EXPECT_OK(changes.AddChange("", {{"a", {{"b", {{"c", 42}}}}}}));
EXPECT_THAT(changes.Apply("old", "/a/b/c"), Optional(MatchesJson(42)));
}
TEST(JsonChangeMapTest, ApplyInvalidContainingChangeMap) {
JsonChangeMap changes;
TENSORSTORE_EXPECT_OK(changes.AddChange("/a", {{"b", {{"c", 42}}}}));
EXPECT_THAT(changes.Apply(false, "/a/b/c"),
MatchesStatus(absl::StatusCode::kFailedPrecondition));
}
TEST(JsonChangeMapTest, ApplyChangeMapPriorNonContaining) {
JsonChangeMap changes;
TENSORSTORE_EXPECT_OK(changes.AddChange("/a", 10));
EXPECT_THAT(changes.Apply({{"b", 42}}, "/b"), Optional(MatchesJson(42)));
}
TEST(JsonChangeMapTest, ApplyContainingChangeMap2) {
JsonChangeMap changes;
TENSORSTORE_EXPECT_OK(changes.AddChange("/a", {{"b", {{"c", 42}}}}));
EXPECT_THAT(changes.Apply({{"e", "f"}}, "/a/b/c"), Optional(MatchesJson(42)));
}
TEST(JsonChangeMapTest, ApplyChangeMap) {
JsonChangeMap changes;
TENSORSTORE_EXPECT_OK(changes.AddChange("/a", {{"b", {{"c", 42}}}}));
TENSORSTORE_EXPECT_OK(changes.AddChange("/e", 42));
EXPECT_THAT(changes.Apply({{"x", "y"}, {"e", "f"}}),
Optional(MatchesJson(::nlohmann::json{
{"a", {{"b", {{"c", 42}}}}}, {"e", 42}, {"x", "y"}})));
}
TEST(JsonChangeMapTest, ApplyInvalidChangeMap1) {
JsonChangeMap changes;
TENSORSTORE_EXPECT_OK(changes.AddChange("/e", 42));
EXPECT_THAT(changes.Apply(42),
MatchesStatus(absl::StatusCode::kFailedPrecondition));
}
TEST(JsonChangeMapTest, ApplyInvalidChangeMap2) {
JsonChangeMap changes;
TENSORSTORE_EXPECT_OK(changes.AddChange("/4", 42));
EXPECT_THAT(changes.Apply({1, 2, 3}),
MatchesStatus(absl::StatusCode::kFailedPrecondition));
}
TEST(JsonChangeMapTest, ApplyRequestInvalidJsonPointer) {
JsonChangeMap changes;
TENSORSTORE_EXPECT_OK(changes.AddChange("/a/b", 42));
EXPECT_THAT(changes.Apply(false, "/a"),
MatchesStatus(absl::StatusCode::kFailedPrecondition));
}
TEST(JsonChangeMapTest, ApplyRequestInvalidJsonPointerNoChanges) {
JsonChangeMap changes;
EXPECT_THAT(changes.Apply(false, "/a"),
MatchesStatus(absl::StatusCode::kFailedPrecondition));
}
TEST(JsonChangeMapTest, ApplyRequestNewMember) {
JsonChangeMap changes;
TENSORSTORE_EXPECT_OK(changes.AddChange("/a/b", 42));
EXPECT_THAT(changes.Apply(::nlohmann::json::object_t{}, "/a"),
Optional(MatchesJson(::nlohmann::json{{"b", 42}})));
}
TEST(JsonChangeMapTest, ApplyIncompatibleChangeExactRequest) {
JsonChangeMap changes;
TENSORSTORE_EXPECT_OK(changes.AddChange("/a", 42));
EXPECT_THAT(changes.Apply(false, "/a"),
MatchesStatus(absl::StatusCode::kFailedPrecondition));
}
TEST(JsonChangeMapTest, AddIncompatibleChanges) {
JsonChangeMap changes;
TENSORSTORE_EXPECT_OK(changes.AddChange("", 42));
EXPECT_THAT(changes.AddChange("/a", 50),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"JSON Pointer reference \"/a\" cannot be applied "
"to number value: 42"));
}
TEST(JsonChangeMapTest, CanApplyUnconditionally) {
JsonChangeMap changes;
EXPECT_FALSE(changes.CanApplyUnconditionally(""));
EXPECT_FALSE(changes.CanApplyUnconditionally("/a/b/c"));
TENSORSTORE_EXPECT_OK(changes.AddChange("/a/b", {{"c", 42}}));
EXPECT_TRUE(changes.CanApplyUnconditionally("/a/b/c"));
EXPECT_TRUE(changes.CanApplyUnconditionally("/a/b"));
EXPECT_TRUE(changes.CanApplyUnconditionally("/a/b/d"));
EXPECT_FALSE(changes.CanApplyUnconditionally("/a"));
EXPECT_FALSE(changes.CanApplyUnconditionally("/a/x"));
EXPECT_FALSE(changes.CanApplyUnconditionally(""));
TENSORSTORE_EXPECT_OK(changes.AddChange("", {{"a", false}}));
EXPECT_TRUE(changes.CanApplyUnconditionally(""));
EXPECT_TRUE(changes.CanApplyUnconditionally("/a"));
}
} |
590 | cpp | google/tensorstore | byte_range | tensorstore/kvstore/byte_range.cc | tensorstore/kvstore/byte_range_test.cc | #ifndef TENSORSTORE_KVSTORE_BYTE_RANGE_REQUEST_H_
#define TENSORSTORE_KVSTORE_BYTE_RANGE_REQUEST_H_
#include <stddef.h>
#include <stdint.h>
#include <cassert>
#include <ostream>
#include "absl/strings/cord.h"
#include "tensorstore/serialization/fwd.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
struct ByteRange {
int64_t inclusive_min;
int64_t exclusive_max;
constexpr bool SatisfiesInvariants() const {
return inclusive_min >= 0 && exclusive_max >= 0 &&
exclusive_max >= inclusive_min;
}
int64_t size() const {
assert(SatisfiesInvariants());
return exclusive_max - inclusive_min;
}
friend bool operator==(const ByteRange& a, const ByteRange& b) {
return a.inclusive_min == b.inclusive_min &&
a.exclusive_max == b.exclusive_max;
}
friend bool operator!=(const ByteRange& a, const ByteRange& b) {
return !(a == b);
}
friend std::ostream& operator<<(std::ostream& os, const ByteRange& r);
constexpr static auto ApplyMembers = [](auto&& x, auto f) {
return f(x.inclusive_min, x.exclusive_max);
};
};
struct OptionalByteRangeRequest {
constexpr OptionalByteRangeRequest() : inclusive_min(0), exclusive_max(-1) {}
explicit constexpr OptionalByteRangeRequest(int64_t inclusive_min,
int64_t exclusive_max = -1)
: inclusive_min(inclusive_min), exclusive_max(exclusive_max) {}
constexpr OptionalByteRangeRequest(ByteRange r)
: inclusive_min(r.inclusive_min), exclusive_max(r.exclusive_max) {}
bool IsFull() const { return inclusive_min == 0 && exclusive_max == -1; }
bool IsRange() const { return exclusive_max != -1; }
bool IsSuffixLength() const { return inclusive_min < 0; }
bool IsSuffix() const { return exclusive_max == -1 && inclusive_min > 0; }
static OptionalByteRangeRequest Range(int64_t inclusive_min,
int64_t exclusive_max) {
assert(inclusive_min >= 0);
assert(exclusive_max >= 0);
return OptionalByteRangeRequest{inclusive_min, exclusive_max};
}
static OptionalByteRangeRequest SuffixLength(int64_t length) {
assert(length >= 0);
return OptionalByteRangeRequest{-length, -1};
}
static OptionalByteRangeRequest Suffix(int64_t inclusive_min) {
assert(inclusive_min >= 0);
return OptionalByteRangeRequest{inclusive_min, -1};
}
int64_t inclusive_min = 0;
int64_t exclusive_max = -1;
int64_t size() const {
assert(SatisfiesInvariants());
if (inclusive_min < 0) return -inclusive_min;
if (exclusive_max != -1) return exclusive_max - inclusive_min;
return -1;
}
friend bool operator==(const OptionalByteRangeRequest& a,
const OptionalByteRangeRequest& b) {
return a.inclusive_min == b.inclusive_min &&
a.exclusive_max == b.exclusive_max;
}
friend bool operator!=(const OptionalByteRangeRequest& a,
const OptionalByteRangeRequest& b) {
return !(a == b);
}
ByteRange AsByteRange() const {
assert(IsRange());
return {inclusive_min, exclusive_max};
}
friend std::ostream& operator<<(std::ostream& os,
const OptionalByteRangeRequest& r);
constexpr bool SatisfiesInvariants() const {
return (exclusive_max == -1 ||
(exclusive_max >= inclusive_min && inclusive_min >= 0));
}
Result<ByteRange> Validate(int64_t size) const;
constexpr static auto ApplyMembers = [](auto&& x, auto f) {
return f(x.inclusive_min, x.exclusive_max);
};
};
namespace internal {
inline absl::Cord GetSubCord(const absl::Cord& s, ByteRange r) {
assert(r.SatisfiesInvariants());
const size_t size = s.size();
assert(r.exclusive_max <= size);
if (r.inclusive_min == 0 && r.size() == size) return s;
return s.Subcord(r.inclusive_min, r.size());
}
}
}
TENSORSTORE_DECLARE_SERIALIZER_SPECIALIZATION(tensorstore::ByteRange)
TENSORSTORE_DECLARE_SERIALIZER_SPECIALIZATION(
tensorstore::OptionalByteRangeRequest)
#endif
#include "tensorstore/kvstore/byte_range.h"
#include <cassert>
#include <optional>
#include <ostream>
#include <string>
#include "absl/status/status.h"
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/serialization/std_optional.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
std::ostream& operator<<(std::ostream& os, const OptionalByteRangeRequest& r) {
os << "[" << r.inclusive_min << ", ";
if (r.exclusive_max != -1) {
os << r.exclusive_max;
} else {
os << "?";
}
os << ")";
return os;
}
std::ostream& operator<<(std::ostream& os, const ByteRange& r) {
return os << "[" << r.inclusive_min << ", " << r.exclusive_max << ")";
}
Result<ByteRange> OptionalByteRangeRequest::Validate(int64_t size) const {
assert(SatisfiesInvariants());
int64_t inclusive_min = this->inclusive_min;
int64_t exclusive_max = this->exclusive_max;
if (exclusive_max == -1) exclusive_max = size;
if (inclusive_min < 0) {
inclusive_min += size;
}
if (inclusive_min < 0 || exclusive_max > size ||
inclusive_min > exclusive_max) {
return absl::OutOfRangeError(
tensorstore::StrCat("Requested byte range ", *this,
" is not valid for value of size ", size));
}
return ByteRange{inclusive_min, exclusive_max};
}
}
TENSORSTORE_DEFINE_SERIALIZER_SPECIALIZATION(
tensorstore::ByteRange, tensorstore::serialization::ApplyMembersSerializer<
tensorstore::ByteRange>())
TENSORSTORE_DEFINE_SERIALIZER_SPECIALIZATION(
tensorstore::OptionalByteRangeRequest,
tensorstore::serialization::ApplyMembersSerializer<
tensorstore::OptionalByteRangeRequest>()) | #include "tensorstore/kvstore/byte_range.h"
#include <optional>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/serialization/test_util.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::ByteRange;
using ::tensorstore::MatchesStatus;
using ::tensorstore::OptionalByteRangeRequest;
using ::tensorstore::StrCat;
using ::tensorstore::internal::GetSubCord;
using ::tensorstore::serialization::TestSerializationRoundTrip;
TEST(ByteRangeTest, SatisfiesInvariants) {
EXPECT_TRUE((ByteRange{0, 0}).SatisfiesInvariants());
EXPECT_TRUE((ByteRange{0, 1}).SatisfiesInvariants());
EXPECT_TRUE((ByteRange{0, 100}).SatisfiesInvariants());
EXPECT_TRUE((ByteRange{10, 100}).SatisfiesInvariants());
EXPECT_TRUE((ByteRange{100, 100}).SatisfiesInvariants());
EXPECT_FALSE((ByteRange{100, 99}).SatisfiesInvariants());
EXPECT_FALSE((ByteRange{100, 0}).SatisfiesInvariants());
EXPECT_FALSE((ByteRange{-100, 0}).SatisfiesInvariants());
}
TEST(ByteRangeTest, Size) {
EXPECT_EQ(5, (ByteRange{2, 7}.size()));
EXPECT_EQ(0, (ByteRange{2, 2}.size()));
}
TEST(ByteRangeTest, Comparison) {
ByteRange a{1, 2};
ByteRange b{1, 3};
ByteRange c{2, 3};
EXPECT_TRUE(a == a);
EXPECT_TRUE(b == b);
EXPECT_TRUE(c == c);
EXPECT_FALSE(a != a);
EXPECT_FALSE(b != b);
EXPECT_FALSE(c != c);
EXPECT_FALSE(a == b);
EXPECT_TRUE(a != b);
EXPECT_NE(a, c);
EXPECT_NE(b, c);
}
TEST(ByteRangeTest, Ostream) {
EXPECT_EQ("[1, 10)", tensorstore::StrCat(ByteRange{1, 10}));
}
TEST(OptionalByteRangeRequestTest, DefaultConstruct) {
OptionalByteRangeRequest r;
EXPECT_EQ(0, r.inclusive_min);
EXPECT_EQ(-1, r.exclusive_max);
}
TEST(OptionalByteRangeRequestTest, ConstructInclusiveMin) {
OptionalByteRangeRequest r(5);
EXPECT_EQ(5, r.inclusive_min);
EXPECT_EQ(-1, r.exclusive_max);
}
TEST(OptionalByteRangeRequestTest, ConstructInclusiveMinExclusiveMax) {
OptionalByteRangeRequest r(5, 10);
EXPECT_EQ(5, r.inclusive_min);
EXPECT_EQ(10, r.exclusive_max);
}
TEST(OptionalByteRangeRequestTest, ConstructByteRange) {
OptionalByteRangeRequest r(ByteRange{5, 10});
EXPECT_EQ(5, r.inclusive_min);
EXPECT_EQ(10, r.exclusive_max);
}
TEST(OptionalByteRangeRequestTest, Comparison) {
OptionalByteRangeRequest a{1, 2};
OptionalByteRangeRequest b{1, 3};
OptionalByteRangeRequest c{2, 3};
OptionalByteRangeRequest d{1, -1};
EXPECT_TRUE(a == a);
EXPECT_TRUE(b == b);
EXPECT_TRUE(c == c);
EXPECT_TRUE(d == d);
EXPECT_FALSE(a != a);
EXPECT_FALSE(a == b);
EXPECT_TRUE(a != b);
EXPECT_TRUE(a != c);
EXPECT_TRUE(a != d);
EXPECT_TRUE(b != d);
EXPECT_TRUE(c != d);
}
TEST(OptionalByteRangeRequestTest, SatisfiesInvariants) {
EXPECT_TRUE(OptionalByteRangeRequest().SatisfiesInvariants());
EXPECT_TRUE(OptionalByteRangeRequest(10).SatisfiesInvariants());
EXPECT_TRUE(OptionalByteRangeRequest(0, 1).SatisfiesInvariants());
EXPECT_TRUE(OptionalByteRangeRequest(0, 0).SatisfiesInvariants());
EXPECT_TRUE(OptionalByteRangeRequest(0, 100).SatisfiesInvariants());
EXPECT_TRUE(OptionalByteRangeRequest(10, 100).SatisfiesInvariants());
EXPECT_TRUE(OptionalByteRangeRequest(100, 100).SatisfiesInvariants());
EXPECT_FALSE(OptionalByteRangeRequest(100, 99).SatisfiesInvariants());
EXPECT_FALSE(OptionalByteRangeRequest(100, 0).SatisfiesInvariants());
EXPECT_FALSE(OptionalByteRangeRequest(-5, 0).SatisfiesInvariants());
EXPECT_FALSE(OptionalByteRangeRequest(-5, 3).SatisfiesInvariants());
EXPECT_FALSE(OptionalByteRangeRequest(3, -2).SatisfiesInvariants());
}
TEST(OptionalByteRangeRequestTest, Ostream) {
EXPECT_EQ("[5, 10)", StrCat(OptionalByteRangeRequest(5, 10)));
EXPECT_EQ("[5, ?)", StrCat(OptionalByteRangeRequest(5)));
}
TEST(OptionalByteRangeRequestTest, Validate) {
EXPECT_THAT(OptionalByteRangeRequest().Validate(0),
::testing::Optional(ByteRange{0, 0}));
EXPECT_THAT(OptionalByteRangeRequest().Validate(1),
::testing::Optional(ByteRange{0, 1}));
EXPECT_THAT(OptionalByteRangeRequest(5, 10).Validate(20),
::testing::Optional(ByteRange{5, 10}));
EXPECT_THAT(OptionalByteRangeRequest(5, 10).Validate(10),
::testing::Optional(ByteRange{5, 10}));
EXPECT_THAT(OptionalByteRangeRequest(5).Validate(10),
::testing::Optional(ByteRange{5, 10}));
EXPECT_THAT(OptionalByteRangeRequest(-3).Validate(10),
::testing::Optional(ByteRange{7, 10}));
EXPECT_THAT(OptionalByteRangeRequest(-10).Validate(10),
::testing::Optional(ByteRange{0, 10}));
EXPECT_THAT(OptionalByteRangeRequest(5, 10).Validate(9),
MatchesStatus(absl::StatusCode::kOutOfRange,
"Requested byte range \\[5, 10\\) is not valid for "
"value of size 9"));
EXPECT_THAT(
OptionalByteRangeRequest(10, 15).Validate(9),
MatchesStatus(absl::StatusCode::kOutOfRange,
"Requested byte range \\[10, 15\\) is not valid for "
"value of size 9"));
EXPECT_THAT(
OptionalByteRangeRequest(-10).Validate(9),
MatchesStatus(absl::StatusCode::kOutOfRange,
"Requested byte range \\[-10, \\?\\) is not valid for "
"value of size 9"));
}
TEST(GetSubStringTest, Basic) {
EXPECT_EQ("bcd", GetSubCord(absl::Cord("abcde"), {1, 4}));
EXPECT_EQ("bcd", GetSubCord(absl::Cord("abcde"), {1, 4}));
EXPECT_EQ("abcde", GetSubCord(absl::Cord("abcde"), {0, 5}));
}
TEST(ByteRangeSerializationTest, Basic) {
TestSerializationRoundTrip(ByteRange{1, 5});
}
TEST(OptionalByteRangeRequestSerializationTest, Basic) {
TestSerializationRoundTrip(OptionalByteRangeRequest{1, 5});
TestSerializationRoundTrip(OptionalByteRangeRequest{1});
}
} |
591 | cpp | google/tensorstore | generation | tensorstore/kvstore/generation.cc | tensorstore/kvstore/generation_test.cc | #ifndef TENSORSTORE_KVSTORE_GENERATION_H_
#define TENSORSTORE_KVSTORE_GENERATION_H_
#include <cstring>
#include <iosfwd>
#include <string>
#include <string_view>
#include <utility>
#include "absl/time/time.h"
#include "tensorstore/serialization/fwd.h"
namespace tensorstore {
struct StorageGeneration {
std::string value;
explicit operator bool() const { return !value.empty(); }
constexpr static char kBaseGeneration = 1;
constexpr static char kDirty = 2;
constexpr static char kNewlyDirty = 16;
constexpr static char kNoValue = 4;
constexpr static char kInvalid = 8;
static StorageGeneration Unknown() { return {}; }
static StorageGeneration NoValue() {
return StorageGeneration{std::string(1, kBaseGeneration | kNoValue)};
}
static StorageGeneration Invalid() {
return StorageGeneration{std::string(1, kInvalid)};
}
static StorageGeneration FromUint64(uint64_t n);
static bool IsUint64(const StorageGeneration& generation) {
return generation.value.size() == 9 &&
generation.value.back() == kBaseGeneration;
}
static uint64_t ToUint64(const StorageGeneration& generation) {
uint64_t n = 0;
if (IsUint64(generation)) {
std::memcpy(&n, generation.value.data(), 8);
}
return n;
}
void MarkDirty();
static bool IsNewlyDirty(const StorageGeneration& generation) {
return !generation.value.empty() && (generation.value.back() & kNewlyDirty);
}
bool ClearNewlyDirty() {
bool is_newly_dirty = IsNewlyDirty(*this);
if (is_newly_dirty) {
value.back() &= ~kNewlyDirty;
}
return is_newly_dirty;
}
static StorageGeneration FromString(std::string_view s);
static bool IsCleanValidValue(const StorageGeneration& generation) {
return !generation.value.empty() &&
generation.value.back() == kBaseGeneration;
}
static std::string_view DecodeString(const StorageGeneration& generation);
template <typename... T>
static StorageGeneration FromValues(const T&... value) {
constexpr auto as_string_view = [](const auto& value) -> std::string_view {
using value_type = std::decay_t<decltype(value)>;
if constexpr (std::is_same_v<std::string_view, value_type> ||
std::is_same_v<std::string, value_type>) {
return value;
} else {
static_assert(std::is_trivial_v<value_type>);
return std::string_view(reinterpret_cast<const char*>(&value),
sizeof(value));
}
};
const size_t n = (as_string_view(value).size() + ...);
StorageGeneration gen;
gen.value.resize(n + 1);
size_t offset = 0;
const auto copy_value = [&](const auto& value) {
auto s = as_string_view(value);
std::memcpy(&gen.value[offset], s.data(), s.size());
offset += s.size();
};
(copy_value(value), ...);
gen.value[n] = kBaseGeneration;
return gen;
}
static StorageGeneration Dirty(StorageGeneration generation);
static StorageGeneration Clean(StorageGeneration generation);
static bool Equivalent(std::string_view a, std::string_view b);
static bool IsDirty(const StorageGeneration& generation);
static bool IsInnerLayerDirty(const StorageGeneration& generation);
static StorageGeneration Condition(const StorageGeneration& generation,
StorageGeneration condition);
static StorageGeneration AddLayer(StorageGeneration generation);
static bool IsConditional(const StorageGeneration& generation);
static bool IsConditionalOn(const StorageGeneration& generation,
const StorageGeneration& condition);
static bool EqualOrUnspecified(const StorageGeneration& generation,
const StorageGeneration& if_equal) {
return StorageGeneration::IsUnknown(if_equal) ||
generation.value == if_equal.value;
}
static bool NotEqualOrUnspecified(const StorageGeneration& generation,
const StorageGeneration& if_not_equal) {
return StorageGeneration::IsUnknown(if_not_equal) ||
generation.value != if_not_equal.value;
}
static bool IsUnknown(const StorageGeneration& generation) {
return generation.value.empty();
}
static bool IsClean(const StorageGeneration& generation) {
return !generation.value.empty() &&
(generation.value.back() & (kBaseGeneration | kDirty)) ==
kBaseGeneration;
}
static bool IsNoValue(const StorageGeneration& generation) {
return generation.value.size() == 1 &&
generation.value[0] == (kNoValue | kBaseGeneration);
}
friend inline bool operator==(const StorageGeneration& a,
const StorageGeneration& b) {
return Equivalent(a.value, b.value);
}
friend inline bool operator==(std::string_view a,
const StorageGeneration& b) {
return Equivalent(a, b.value);
}
friend inline bool operator==(const StorageGeneration& a,
std::string_view b) {
return Equivalent(a.value, b);
}
friend inline bool operator!=(const StorageGeneration& a,
const StorageGeneration& b) {
return !(a == b);
}
friend inline bool operator!=(const StorageGeneration& a,
std::string_view b) {
return !(a == b);
}
friend inline bool operator!=(std::string_view a,
const StorageGeneration& b) {
return !(a == b);
}
friend std::ostream& operator<<(std::ostream& os, const StorageGeneration& g);
constexpr static auto ApplyMembers = [](auto&& x, auto f) {
return f(x.value);
};
template <typename H>
friend H AbslHashValue(H h, const StorageGeneration& x) {
return H::combine(std::move(h), x.value);
}
};
struct TimestampedStorageGeneration {
TimestampedStorageGeneration() = default;
TimestampedStorageGeneration(StorageGeneration generation, absl::Time time)
: generation(std::move(generation)), time(std::move(time)) {}
StorageGeneration generation;
absl::Time time = absl::InfinitePast();
bool unconditional() const { return time == absl::InfiniteFuture(); }
static TimestampedStorageGeneration Unconditional() {
return {StorageGeneration::Unknown(), absl::InfiniteFuture()};
}
friend bool operator==(const TimestampedStorageGeneration& a,
const TimestampedStorageGeneration& b) {
return a.generation == b.generation && a.time == b.time;
}
friend bool operator!=(const TimestampedStorageGeneration& a,
const TimestampedStorageGeneration& b) {
return !(a == b);
}
friend std::ostream& operator<<(std::ostream& os,
const TimestampedStorageGeneration& x);
constexpr static auto ApplyMembers = [](auto&& x, auto f) {
return f(x.generation, x.time);
};
};
}
TENSORSTORE_DECLARE_SERIALIZER_SPECIALIZATION(tensorstore::StorageGeneration)
TENSORSTORE_DECLARE_SERIALIZER_SPECIALIZATION(
tensorstore::TimestampedStorageGeneration)
#endif
#include "tensorstore/kvstore/generation.h"
#include <stddef.h>
#include <stdint.h>
#include <cstring>
#include <ostream>
#include <string_view>
#include <utility>
#include "absl/time/time.h"
#include "tensorstore/serialization/absl_time.h"
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/util/quote_string.h"
namespace tensorstore {
namespace {
std::string_view CanonicalGeneration(std::string_view generation) {
size_t new_size = generation.size();
while (new_size && generation[new_size - 1] == 0) {
--new_size;
}
return generation.substr(0, new_size);
}
}
std::ostream& operator<<(std::ostream& os, const StorageGeneration& g) {
return os << QuoteString(g.value);
}
std::ostream& operator<<(std::ostream& os,
const TimestampedStorageGeneration& x) {
return os << "{generation=" << x.generation << ", time=" << x.time << "}";
}
bool StorageGeneration::Equivalent(std::string_view a, std::string_view b) {
return CanonicalGeneration(a) == CanonicalGeneration(b);
}
StorageGeneration StorageGeneration::Clean(StorageGeneration generation) {
size_t new_size = generation.value.size();
while (new_size) {
if (generation.value[new_size - 1] & kBaseGeneration) {
generation.value[new_size - 1] &= ~(kDirty | kNewlyDirty);
break;
}
--new_size;
}
generation.value.resize(new_size);
return generation;
}
void StorageGeneration::MarkDirty() {
if (value.empty()) {
value = (kDirty | kNewlyDirty);
} else {
value.back() |= (kDirty | kNewlyDirty);
}
}
StorageGeneration StorageGeneration::Dirty(StorageGeneration generation) {
if (generation.value.empty()) {
return StorageGeneration{std::string(1, kDirty)};
}
generation.value.back() |= kDirty;
return generation;
}
StorageGeneration StorageGeneration::FromUint64(uint64_t n) {
StorageGeneration generation;
generation.value.resize(9);
std::memcpy(generation.value.data(), &n, 8);
generation.value[8] = kBaseGeneration;
return generation;
}
StorageGeneration StorageGeneration::FromString(std::string_view s) {
StorageGeneration generation;
generation.value.reserve(s.size() + 1);
generation.value += s;
generation.value += kBaseGeneration;
return generation;
}
StorageGeneration StorageGeneration::Condition(
const StorageGeneration& generation, StorageGeneration condition) {
if (IsDirty(generation)) {
return Dirty(Clean(std::move(condition)));
}
return Clean(std::move(condition));
}
bool StorageGeneration::IsDirty(const StorageGeneration& generation) {
auto canonical = CanonicalGeneration(generation.value);
return !canonical.empty() && (canonical.back() & kDirty);
}
bool StorageGeneration::IsInnerLayerDirty(const StorageGeneration& generation) {
return !generation.value.empty() && (generation.value.back() & kDirty);
}
StorageGeneration StorageGeneration::AddLayer(StorageGeneration generation) {
generation.value.resize(generation.value.size() + 1);
return generation;
}
bool StorageGeneration::IsConditional(const StorageGeneration& generation) {
size_t new_size = generation.value.size();
while (new_size && !(generation.value[new_size - 1] & kBaseGeneration)) {
--new_size;
}
return (new_size != 0);
}
bool StorageGeneration::IsConditionalOn(const StorageGeneration& generation,
const StorageGeneration& condition) {
size_t size = generation.value.size();
return size != 0 && condition.value.size() == size &&
std::memcmp(generation.value.data(), condition.value.data(),
size - 1) == 0 &&
(generation.value[size] | kDirty | kNewlyDirty) ==
(condition.value[size] | kDirty | kNewlyDirty);
}
std::string_view StorageGeneration::DecodeString(
const StorageGeneration& generation) {
std::string_view s = generation.value;
if (s.empty()) return {};
while (true) {
bool start_of_tags = static_cast<bool>(s.back() & kBaseGeneration);
s.remove_suffix(1);
if (start_of_tags || s.empty()) break;
}
return s;
}
}
TENSORSTORE_DEFINE_SERIALIZER_SPECIALIZATION(
tensorstore::StorageGeneration,
tensorstore::serialization::ApplyMembersSerializer<
tensorstore::StorageGeneration>())
TENSORSTORE_DEFINE_SERIALIZER_SPECIALIZATION(
tensorstore::TimestampedStorageGeneration,
tensorstore::serialization::ApplyMembersSerializer<
tensorstore::TimestampedStorageGeneration>()) | #include "tensorstore/kvstore/generation.h"
#include <gtest/gtest.h>
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/serialization/test_util.h"
namespace {
using ::tensorstore::StorageGeneration;
using ::tensorstore::TimestampedStorageGeneration;
using ::tensorstore::serialization::TestSerializationRoundTrip;
TEST(StorageGenerationTest, Basic) {
EXPECT_TRUE(StorageGeneration::IsUnknown(StorageGeneration::Unknown()));
EXPECT_FALSE(StorageGeneration::IsUnknown(StorageGeneration::NoValue()));
EXPECT_FALSE(StorageGeneration::IsNoValue(StorageGeneration::Unknown()));
EXPECT_TRUE(StorageGeneration::IsNoValue(StorageGeneration::NoValue()));
EXPECT_EQ(StorageGeneration{std::string{StorageGeneration::kDirty}},
StorageGeneration::Dirty(StorageGeneration::Unknown()));
StorageGeneration gen{
std::string{1, 2, 3, 4, 5, StorageGeneration::kBaseGeneration}};
StorageGeneration local_gen{std::string{
1, 2, 3, 4, 5,
StorageGeneration::kBaseGeneration | StorageGeneration::kDirty}};
EXPECT_FALSE(StorageGeneration::IsUnknown(gen));
EXPECT_FALSE(StorageGeneration::IsUnknown(local_gen));
EXPECT_TRUE(StorageGeneration::IsClean(gen));
EXPECT_FALSE(StorageGeneration::IsClean(local_gen));
EXPECT_FALSE(StorageGeneration::IsDirty(gen));
EXPECT_TRUE(StorageGeneration::IsDirty(local_gen));
EXPECT_EQ(local_gen, StorageGeneration::Dirty(gen));
EXPECT_EQ(gen, StorageGeneration::Clean(local_gen));
EXPECT_TRUE(StorageGeneration::IsClean(StorageGeneration::NoValue()));
EXPECT_FALSE(StorageGeneration::IsClean(StorageGeneration::Unknown()));
EXPECT_EQ(StorageGeneration::NoValue(),
StorageGeneration::Clean(StorageGeneration::NoValue()));
}
TEST(StorageGenerationTest, Uint64) {
auto g = StorageGeneration::FromUint64(12345);
EXPECT_TRUE(StorageGeneration::IsUint64(g));
EXPECT_EQ(12345, StorageGeneration::ToUint64(g));
EXPECT_FALSE(StorageGeneration::IsUint64(StorageGeneration::Unknown()));
EXPECT_FALSE(StorageGeneration::IsUint64(StorageGeneration::NoValue()));
EXPECT_FALSE(StorageGeneration::IsUint64(StorageGeneration::Invalid()));
}
TEST(StorageGenerationSerializationTest, Basic) {
TestSerializationRoundTrip(StorageGeneration::Unknown());
TestSerializationRoundTrip(StorageGeneration::FromUint64(12345));
}
TEST(TimestampedStorageGenerationSerializationTest, Basic) {
TestSerializationRoundTrip(TimestampedStorageGeneration(
StorageGeneration::FromUint64(12345), absl::InfinitePast()));
TestSerializationRoundTrip(TimestampedStorageGeneration(
StorageGeneration::FromUint64(12345), absl::InfiniteFuture()));
}
TEST(StorageGenerationTest, IsCleanValidValue) {
EXPECT_FALSE(
StorageGeneration::IsCleanValidValue(StorageGeneration::Unknown()));
EXPECT_FALSE(
StorageGeneration::IsCleanValidValue(StorageGeneration::NoValue()));
EXPECT_FALSE(
StorageGeneration::IsCleanValidValue(StorageGeneration::Invalid()));
EXPECT_TRUE(StorageGeneration::IsCleanValidValue(
StorageGeneration::FromString("abc")));
EXPECT_TRUE(
StorageGeneration::IsCleanValidValue(StorageGeneration::FromUint64(42)));
}
TEST(StorageGenerationTest, DecodeString) {
EXPECT_EQ("abc", StorageGeneration::DecodeString(
StorageGeneration::FromString("abc")));
}
} |
592 | cpp | google/tensorstore | key_range | tensorstore/kvstore/key_range.cc | tensorstore/kvstore/key_range_test.cc | #ifndef TENSORSTORE_KVSTORE_KEY_RANGE_H_
#define TENSORSTORE_KVSTORE_KEY_RANGE_H_
#include <stddef.h>
#include <iosfwd>
#include <string>
#include <string_view>
#include <utility>
#include "absl/types/compare.h"
#include "tensorstore/internal/compare.h"
namespace tensorstore {
class KeyRange {
public:
KeyRange() = default;
static KeyRange EmptyRange() {
return KeyRange(std::string(1, '\0'), std::string(1, '\0'));
}
explicit KeyRange(std::string inclusive_min, std::string exclusive_max)
: inclusive_min(std::move(inclusive_min)),
exclusive_max(std::move(exclusive_max)) {}
static KeyRange Prefix(std::string prefix);
static KeyRange AddPrefix(std::string_view prefix, KeyRange range);
static KeyRange RemovePrefix(std::string_view prefix, KeyRange range);
static KeyRange RemovePrefixLength(size_t n, const KeyRange& range);
static std::string Successor(std::string_view key);
static KeyRange Singleton(std::string key);
static std::string PrefixExclusiveMax(std::string_view prefix);
static absl::weak_ordering CompareKeyAndExclusiveMax(std::string_view key,
std::string_view bound);
static absl::weak_ordering CompareExclusiveMaxAndKey(std::string_view bound,
std::string_view key) {
return internal::InvertWeakOrdering(CompareKeyAndExclusiveMax(key, bound));
}
static absl::weak_ordering CompareExclusiveMax(std::string_view a,
std::string_view b);
bool empty() const {
return !exclusive_max.empty() && inclusive_min >= exclusive_max;
}
bool full() const { return exclusive_max.empty() && inclusive_min.empty(); }
bool is_singleton() const;
bool is_non_empty_prefix() const;
friend bool operator==(const KeyRange& a, const KeyRange& b) {
return a.inclusive_min == b.inclusive_min &&
a.exclusive_max == b.exclusive_max;
}
friend bool operator!=(const KeyRange& a, const KeyRange& b) {
return !(a == b);
}
friend std::ostream& operator<<(std::ostream& os, const KeyRange& range);
static constexpr auto ApplyMembers = [](auto&& x, auto f) {
return f(x.inclusive_min, x.exclusive_max);
};
std::string inclusive_min;
std::string exclusive_max;
};
bool Contains(const KeyRange& haystack, std::string_view needle);
bool Contains(const KeyRange& haystack, const KeyRange& needle);
bool ContainsPrefix(const KeyRange& haystack, std::string_view prefix);
KeyRange Intersect(const KeyRange& a, const KeyRange& b);
bool Intersects(const KeyRange& a, const KeyRange& b);
bool IntersectsPrefix(const KeyRange& a, std::string_view prefix);
std::string_view LongestPrefix(const KeyRange& range);
}
#endif
#include "tensorstore/kvstore/key_range.h"
#include <algorithm>
#include <cstddef>
#include <ostream>
#include <string>
#include <string_view>
#include <utility>
#include "absl/strings/match.h"
#include "absl/types/compare.h"
#include "tensorstore/internal/compare.h"
#include "tensorstore/util/quote_string.h"
namespace tensorstore {
namespace {
std::string_view PartialPrefix(std::string_view prefix) {
while (!prefix.empty() && prefix.back() == '\xff') {
prefix.remove_suffix(1);
}
return prefix;
}
std::string_view MinExclusiveMax(std::string_view a, std::string_view b) {
return KeyRange::CompareExclusiveMax(a, b) < 0 ? a : b;
}
}
KeyRange KeyRange::Prefix(std::string prefix) {
KeyRange range;
range.exclusive_max = PrefixExclusiveMax(prefix);
range.inclusive_min = std::move(prefix);
return range;
}
std::string KeyRange::Successor(std::string_view key) {
std::string successor;
successor.reserve(key.size() + 1);
successor.append(key);
successor += '\x00';
return successor;
}
KeyRange KeyRange::Singleton(std::string key) {
auto exclusive_max = Successor(key);
return KeyRange(std::move(key), std::move(exclusive_max));
}
bool KeyRange::is_singleton() const {
return exclusive_max.size() == (inclusive_min.size() + 1) &&
exclusive_max.back() == '\x00' &&
std::string_view(exclusive_max).substr(0, inclusive_min.size()) ==
inclusive_min;
}
bool KeyRange::is_non_empty_prefix() const {
std::string_view prefix = PartialPrefix(inclusive_min);
return !full() && exclusive_max.size() == prefix.size() &&
(prefix.empty() ||
(exclusive_max.back() == (prefix.back() + 1) &&
std::string_view(exclusive_max).substr(0, prefix.size() - 1) ==
prefix.substr(0, prefix.size() - 1)));
}
std::string KeyRange::PrefixExclusiveMax(std::string_view prefix) {
std::string prefix_copy(PartialPrefix(prefix));
if (!prefix_copy.empty()) {
auto& last_byte = prefix_copy.back();
last_byte = static_cast<unsigned char>(last_byte) + 1;
}
return prefix_copy;
}
absl::weak_ordering KeyRange::CompareKeyAndExclusiveMax(
std::string_view key, std::string_view bound) {
return bound.empty()
? absl::weak_ordering::less
: internal::CompareResultAsWeakOrdering(key.compare(bound));
}
absl::weak_ordering KeyRange::CompareExclusiveMax(std::string_view a,
std::string_view b) {
return a.empty() != b.empty()
? (a.empty() ? absl::weak_ordering::greater
: absl::weak_ordering::less)
: internal::CompareResultAsWeakOrdering(a.compare(b));
}
bool Contains(const KeyRange& haystack, std::string_view needle) {
return haystack.inclusive_min <= needle &&
KeyRange::CompareKeyAndExclusiveMax(needle, haystack.exclusive_max) <
0;
}
KeyRange Intersect(const KeyRange& a, const KeyRange& b) {
const auto* a_ptr = &a;
const auto* b_ptr = &b;
if (a_ptr->inclusive_min > b_ptr->inclusive_min) {
std::swap(a_ptr, b_ptr);
}
KeyRange result;
result.inclusive_min = b_ptr->inclusive_min;
result.exclusive_max =
std::string(MinExclusiveMax(a_ptr->exclusive_max, b_ptr->exclusive_max));
if (result.empty()) {
result.exclusive_max = result.inclusive_min;
}
return result;
}
bool Intersects(const KeyRange& a, const KeyRange& b) {
return !Intersect(a, b).empty();
}
bool Contains(const KeyRange& haystack, const KeyRange& needle) {
return haystack.inclusive_min <= needle.inclusive_min &&
KeyRange::CompareExclusiveMax(needle.exclusive_max,
haystack.exclusive_max) <= 0;
}
std::string_view LongestPrefix(const KeyRange& range) {
std::string_view inclusive_min = range.inclusive_min;
std::string_view exclusive_max = range.exclusive_max;
size_t i = 0;
if (exclusive_max.empty()) {
while (i < inclusive_min.size() && inclusive_min[i] == '\xff') {
++i;
}
} else {
size_t min_length = std::min(inclusive_min.size(), exclusive_max.size());
while (i < min_length && inclusive_min[i] == exclusive_max[i]) {
++i;
}
if (i + 1 == min_length && inclusive_min[i] != '\xff' &&
static_cast<unsigned char>(inclusive_min[i]) + 1 ==
static_cast<unsigned char>(exclusive_max[i])) {
++i;
while (i < inclusive_min.size() && inclusive_min[i] == '\xff') {
++i;
}
}
}
return inclusive_min.substr(0, i);
}
bool ContainsPrefix(const KeyRange& haystack, std::string_view prefix) {
return tensorstore::Contains(haystack, KeyRange::Prefix(std::string(prefix)));
}
bool IntersectsPrefix(const KeyRange& a, std::string_view prefix) {
return tensorstore::Intersects(a, KeyRange::Prefix(std::string(prefix)));
}
std::ostream& operator<<(std::ostream& os, const KeyRange& range) {
return os << "[" << tensorstore::QuoteString(range.inclusive_min) << ", "
<< tensorstore::QuoteString(range.exclusive_max) << ")";
}
KeyRange KeyRange::AddPrefix(std::string_view prefix, KeyRange range) {
if (prefix.empty()) return range;
range.inclusive_min.insert(0, prefix);
if (range.exclusive_max.empty()) {
range.exclusive_max = KeyRange::PrefixExclusiveMax(std::string(prefix));
} else {
range.exclusive_max.insert(0, prefix);
}
return range;
}
KeyRange KeyRange::RemovePrefix(std::string_view prefix, KeyRange range) {
if (prefix.empty()) return range;
if (prefix >= range.inclusive_min) {
range.inclusive_min.clear();
} else {
if (!absl::StartsWith(range.inclusive_min, prefix)) return EmptyRange();
range.inclusive_min.erase(0, prefix.size());
}
const auto c = CompareKeyAndExclusiveMax(prefix, range.exclusive_max);
if (c < 0) {
if (absl::StartsWith(range.exclusive_max, prefix)) {
range.exclusive_max.erase(0, prefix.size());
} else {
range.exclusive_max.clear();
}
} else {
return EmptyRange();
}
return range;
}
KeyRange KeyRange::RemovePrefixLength(size_t n, const KeyRange& range) {
std::string_view inclusive_min(range.inclusive_min);
if (n < inclusive_min.size()) {
inclusive_min.remove_prefix(n);
} else {
inclusive_min = {};
}
std::string_view exclusive_max(range.exclusive_max);
if (n < exclusive_max.size()) {
exclusive_max.remove_prefix(n);
} else {
exclusive_max = {};
}
return KeyRange(std::string(inclusive_min), std::string(exclusive_max));
}
} | #include "tensorstore/kvstore/key_range.h"
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/types/compare.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::KeyRange;
TEST(KeyRangeTest, Comparison) {
KeyRange r1("a", "b");
EXPECT_EQ("a", r1.inclusive_min);
EXPECT_EQ("b", r1.exclusive_max);
KeyRange r2("a", "c");
KeyRange r3("", "b");
KeyRange r4("", "c");
EXPECT_EQ(r1, r1);
EXPECT_EQ(r2, r2);
EXPECT_EQ(r3, r3);
EXPECT_EQ(r4, r4);
EXPECT_NE(r1, r2);
EXPECT_NE(r1, r3);
EXPECT_NE(r1, r4);
EXPECT_NE(r2, r3);
EXPECT_NE(r2, r4);
EXPECT_NE(r3, r4);
}
TEST(KeyRangeTest, Full) {
KeyRange full;
EXPECT_TRUE(full.full());
EXPECT_EQ(std::string(), full.inclusive_min);
EXPECT_EQ(std::string(), full.exclusive_max);
EXPECT_EQ(full, KeyRange({}, {}));
EXPECT_NE(full, KeyRange("a", "b"));
EXPECT_NE(full, KeyRange("", "b"));
EXPECT_NE(full, KeyRange("a", ""));
EXPECT_FALSE(full.empty());
EXPECT_EQ("", tensorstore::LongestPrefix(full));
EXPECT_TRUE(tensorstore::Contains(full, "abc"));
EXPECT_EQ(KeyRange::Prefix(""), full);
}
TEST(KeyRangeTest, Empty) {
EXPECT_FALSE(KeyRange("a", "b").empty());
EXPECT_FALSE(KeyRange("a", "").empty());
EXPECT_TRUE(KeyRange("b", "a").empty());
EXPECT_TRUE(KeyRange("b", "b").empty());
}
TEST(KeyRangeTest, Prefix) {
EXPECT_EQ(KeyRange(), KeyRange::Prefix(""));
EXPECT_EQ(KeyRange("abc", "abd"), KeyRange::Prefix("abc"));
EXPECT_EQ(KeyRange("ab\xff", "ac"), KeyRange::Prefix("ab\xff"));
EXPECT_EQ(KeyRange("ab\xff\xff\xff", "ac"),
KeyRange::Prefix("ab\xff\xff\xff"));
EXPECT_EQ(KeyRange("\xff", ""), KeyRange::Prefix("\xff"));
EXPECT_EQ(KeyRange("\xff\xff\xff", ""), KeyRange::Prefix("\xff\xff\xff"));
EXPECT_FALSE(KeyRange::Prefix("").is_non_empty_prefix());
EXPECT_TRUE(KeyRange::Prefix("abc").is_non_empty_prefix());
EXPECT_TRUE(KeyRange::Prefix("ab\xff").is_non_empty_prefix());
EXPECT_TRUE(KeyRange::Prefix("ab\xff\xff\xff").is_non_empty_prefix());
EXPECT_TRUE(KeyRange::Prefix("\xff").is_non_empty_prefix());
EXPECT_TRUE(KeyRange::Prefix("\xff\xff\xff").is_non_empty_prefix());
EXPECT_FALSE(KeyRange::Prefix("ab\xff").full());
EXPECT_FALSE(KeyRange::Prefix("ab\xff").is_singleton());
}
TEST(KeyRangeTest, Successor) {
EXPECT_EQ(std::string({'a', 'b', 'c', '\x00'}), KeyRange::Successor("abc"));
EXPECT_EQ(std::string({'\x00'}), KeyRange::Successor(""));
}
TEST(KeyRangeTest, ContainsKey) {
EXPECT_TRUE(tensorstore::Contains(KeyRange("a", "c"), "a"));
EXPECT_TRUE(tensorstore::Contains(KeyRange("a", "c"), "ab"));
EXPECT_TRUE(tensorstore::Contains(KeyRange("a", "c"), "abc"));
EXPECT_TRUE(tensorstore::Contains(KeyRange("a", "c"), "b"));
EXPECT_TRUE(tensorstore::Contains(KeyRange("a", "c"), "ba"));
EXPECT_FALSE(tensorstore::Contains(KeyRange("a", "c"), "c"));
EXPECT_FALSE(tensorstore::Contains(KeyRange("a", "c"), "ca"));
EXPECT_FALSE(tensorstore::Contains(KeyRange("a", "c"), "d"));
}
TEST(KeyRangeTest, ContainsRange) {
EXPECT_TRUE(tensorstore::Contains(KeyRange(), KeyRange("ab", "cd")));
EXPECT_TRUE(tensorstore::Contains(KeyRange("a", "c"), KeyRange("a", "c")));
EXPECT_TRUE(tensorstore::Contains(KeyRange("a", "c"), KeyRange("ab", "c")));
EXPECT_TRUE(tensorstore::Contains(KeyRange("a", "c"), KeyRange("ab", "ba")));
EXPECT_TRUE(tensorstore::Contains(KeyRange("a", "c"), KeyRange("b", "ba")));
EXPECT_TRUE(tensorstore::Contains(KeyRange("a", "c"), KeyRange::Prefix("a")));
EXPECT_TRUE(
tensorstore::Contains(KeyRange("a", "c"), KeyRange::Prefix("ab")));
EXPECT_TRUE(tensorstore::Contains(KeyRange("a", "c"), KeyRange::Prefix("b")));
EXPECT_FALSE(tensorstore::Contains(KeyRange("a", "c"), KeyRange("a", "ca")));
EXPECT_FALSE(tensorstore::Contains(KeyRange("a", "c"), KeyRange("0", "a")));
EXPECT_FALSE(tensorstore::Contains(KeyRange("a", "c"), KeyRange()));
}
TEST(KeyRangeTest, Intersect) {
EXPECT_EQ(KeyRange("b", "b"),
tensorstore::Intersect(KeyRange("a", "b"), KeyRange("b", "c")));
EXPECT_EQ(KeyRange("c", "c"),
tensorstore::Intersect(KeyRange("a", "b"), KeyRange("c", "d")));
EXPECT_EQ(KeyRange("b", "b"),
tensorstore::Intersect(KeyRange("", "b"), KeyRange("b", "")));
EXPECT_EQ(KeyRange("a", "b"),
tensorstore::Intersect(KeyRange(), KeyRange("a", "b")));
EXPECT_EQ(KeyRange("a", "b"),
tensorstore::Intersect(KeyRange("a", "b"), KeyRange()));
EXPECT_EQ(KeyRange("a", "b"),
tensorstore::Intersect(KeyRange("a", "b"), KeyRange("a", "c")));
EXPECT_EQ(KeyRange("a", "b"),
tensorstore::Intersect(KeyRange("a", "c"), KeyRange("a", "b")));
EXPECT_EQ(KeyRange("b", "c"),
tensorstore::Intersect(KeyRange("a", "c"), KeyRange("b", "c")));
EXPECT_EQ(KeyRange("aa", "b"),
tensorstore::Intersect(KeyRange("aa", "c"), KeyRange("a", "b")));
EXPECT_EQ(KeyRange("aa", "b"),
tensorstore::Intersect(KeyRange("aa", ""), KeyRange("a", "b")));
}
TEST(KeyRangeTest, LongestPrefix) {
EXPECT_EQ("", tensorstore::LongestPrefix(KeyRange("a", "c")));
EXPECT_EQ("a", tensorstore::LongestPrefix(KeyRange("a", "b")));
EXPECT_EQ("a", tensorstore::LongestPrefix(KeyRange("aa", "b")));
EXPECT_EQ("abc", tensorstore::LongestPrefix(KeyRange("abc", "abcd")));
EXPECT_EQ("abc", tensorstore::LongestPrefix(KeyRange("abc", "abd")));
EXPECT_EQ("ab", tensorstore::LongestPrefix(KeyRange("abc", "abe")));
EXPECT_EQ("ab\xff", tensorstore::LongestPrefix(KeyRange("ab\xff", "ac")));
EXPECT_EQ("ab\xff\xff",
tensorstore::LongestPrefix(KeyRange("ab\xff\xff", "ac")));
EXPECT_EQ("\xff", tensorstore::LongestPrefix(KeyRange("\xff", "")));
EXPECT_EQ("\xff\xff", tensorstore::LongestPrefix(KeyRange("\xff\xff", "")));
}
TEST(KeyRangeTest, Ostream) {
EXPECT_EQ("[\"a\", \"b\")", tensorstore::StrCat(KeyRange("a", "b")));
EXPECT_EQ("[\"a\", \"ba\")", tensorstore::StrCat(KeyRange("a", "ba")));
}
TEST(KeyRangeTest, CompareKeyAndExclusiveMax) {
EXPECT_THAT(KeyRange::CompareKeyAndExclusiveMax("a", "a"),
::testing::Eq(absl::weak_ordering::equivalent));
EXPECT_THAT(KeyRange::CompareKeyAndExclusiveMax("a", "b"),
::testing::Eq(absl::weak_ordering::less));
EXPECT_THAT(KeyRange::CompareKeyAndExclusiveMax("b", "a"),
::testing::Eq(absl::weak_ordering::greater));
EXPECT_THAT(KeyRange::CompareKeyAndExclusiveMax("", ""),
::testing::Eq(absl::weak_ordering::less));
EXPECT_THAT(KeyRange::CompareKeyAndExclusiveMax("a", ""),
::testing::Eq(absl::weak_ordering::less));
EXPECT_THAT(KeyRange::CompareExclusiveMaxAndKey("a", "a"),
::testing::Eq(absl::weak_ordering::equivalent));
EXPECT_THAT(KeyRange::CompareExclusiveMaxAndKey("a", "b"),
::testing::Eq(absl::weak_ordering::less));
EXPECT_THAT(KeyRange::CompareExclusiveMaxAndKey("b", "a"),
::testing::Eq(absl::weak_ordering::greater));
EXPECT_THAT(KeyRange::CompareExclusiveMaxAndKey("", ""),
::testing::Eq(absl::weak_ordering::greater));
EXPECT_THAT(KeyRange::CompareExclusiveMaxAndKey("", "a"),
::testing::Eq(absl::weak_ordering::greater));
}
TEST(KeyRangeTest, CompareExclusiveMax) {
EXPECT_THAT(KeyRange::CompareExclusiveMax("", ""),
::testing::Eq(absl::weak_ordering::equivalent));
EXPECT_THAT(KeyRange::CompareExclusiveMax("a", "a"),
::testing::Eq(absl::weak_ordering::equivalent));
EXPECT_THAT(KeyRange::CompareExclusiveMax("a", "b"),
::testing::Eq(absl::weak_ordering::less));
EXPECT_THAT(KeyRange::CompareExclusiveMax("b", "a"),
::testing::Eq(absl::weak_ordering::greater));
EXPECT_THAT(KeyRange::CompareExclusiveMax("a", ""),
::testing::Eq(absl::weak_ordering::less));
EXPECT_THAT(KeyRange::CompareExclusiveMax("", "a"),
::testing::Eq(absl::weak_ordering::greater));
}
TEST(KeyRangeTest, AddPrefix) {
EXPECT_THAT(KeyRange::AddPrefix("", KeyRange("a", "b")),
::testing::Eq(KeyRange("a", "b")));
EXPECT_THAT(KeyRange::AddPrefix("x", KeyRange("a", "b")),
::testing::Eq(KeyRange("xa", "xb")));
EXPECT_THAT(KeyRange::AddPrefix("x", KeyRange("a", "")),
::testing::Eq(KeyRange("xa", "y")));
}
TEST(KeyRangeTest, EmptyRange) {
auto range = KeyRange::EmptyRange();
EXPECT_TRUE(range.empty());
EXPECT_EQ(range.inclusive_min, range.exclusive_max);
}
TEST(KeyRangeTest, RemovePrefix) {
EXPECT_THAT(KeyRange::RemovePrefix("", KeyRange("a", "b")),
::testing::Eq(KeyRange("a", "b")));
EXPECT_THAT(KeyRange::RemovePrefix("a/", KeyRange("a/b", "a/d")),
::testing::Eq(KeyRange("b", "d")));
EXPECT_THAT(KeyRange::RemovePrefix("a/b", KeyRange("a/b", "a/d")),
::testing::Eq(KeyRange()));
EXPECT_THAT(KeyRange::RemovePrefix("a/d", KeyRange("a/b", "a/d")),
::testing::Eq(KeyRange::EmptyRange()));
EXPECT_THAT(KeyRange::RemovePrefix("a/bc", KeyRange("a/b", "a/bcb")),
::testing::Eq(KeyRange("", "b")));
EXPECT_THAT(KeyRange::RemovePrefix("x", KeyRange("xa", "y")),
::testing::Eq(KeyRange("a", "")));
EXPECT_THAT(KeyRange::RemovePrefix("ab", KeyRange::Prefix("ab")),
::testing::Eq(KeyRange()));
EXPECT_THAT(KeyRange::RemovePrefix("ab", KeyRange::Prefix("ab\xff")),
::testing::Eq(KeyRange("\xff", "")));
}
TEST(KeyRangeTest, RemovePrefixLength) {
EXPECT_THAT(KeyRange::RemovePrefixLength(0, KeyRange("a", "b")),
::testing::Eq(KeyRange("a", "b")));
EXPECT_THAT(KeyRange::RemovePrefixLength(2, KeyRange("a/b", "a/d")),
::testing::Eq(KeyRange("b", "d")));
EXPECT_THAT(KeyRange::RemovePrefixLength(3, KeyRange("a/b", "a/d")),
::testing::Eq(KeyRange()));
EXPECT_THAT(KeyRange::RemovePrefixLength(4, KeyRange("a/b", "a/bcb")),
::testing::Eq(KeyRange("", "b")));
EXPECT_THAT(KeyRange::RemovePrefixLength(1, KeyRange("xa", "y")),
::testing::Eq(KeyRange("a", "")));
EXPECT_THAT(KeyRange::RemovePrefixLength(2, KeyRange::Prefix("ab")),
::testing::Eq(KeyRange()));
EXPECT_THAT(KeyRange::RemovePrefixLength(2, KeyRange::Prefix("ab\xff")),
::testing::Eq(KeyRange("\xff", "")));
}
TEST(KeyRangeTest, Singleton) {
auto r = KeyRange::Singleton("x");
EXPECT_TRUE(Contains(r, "x"));
EXPECT_FALSE(Contains(r, KeyRange::Successor("x")));
EXPECT_EQ(KeyRange("x", KeyRange::Successor("x")), r);
EXPECT_TRUE(KeyRange::Singleton("x").is_singleton());
EXPECT_FALSE(KeyRange::Singleton("y").full());
EXPECT_FALSE(KeyRange::Singleton("x").is_non_empty_prefix());
}
} |
593 | cpp | google/tensorstore | object_metadata | tensorstore/kvstore/gcs_http/object_metadata.cc | tensorstore/kvstore/gcs_http/object_metadata_test.cc | #ifndef TENSORSTORE_KVSTORE_GCS_HTTP_OBJECT_METADATA_H_
#define TENSORSTORE_KVSTORE_GCS_HTTP_OBJECT_METADATA_H_
#include <stdint.h>
#include <string>
#include <string_view>
#include "absl/container/btree_map.h"
#include "absl/time/time.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/json_serialization_options_base.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal_kvstore_gcs_http {
struct ObjectMetadata {
std::string name;
std::string md5_hash;
std::string crc32c;
uint64_t size = 0;
int64_t generation = 0;
int64_t metageneration = 0;
absl::Time time_created = absl::InfinitePast();
absl::Time updated = absl::InfinitePast();
absl::Time time_deleted = absl::InfinitePast();
using ToJsonOptions = IncludeDefaults;
using FromJsonOptions = internal_json_binding::NoOptions;
TENSORSTORE_DECLARE_JSON_DEFAULT_BINDER(
ObjectMetadata,
internal_kvstore_gcs_http::ObjectMetadata::FromJsonOptions,
internal_kvstore_gcs_http::ObjectMetadata::ToJsonOptions)
};
Result<ObjectMetadata> ParseObjectMetadata(std::string_view source);
void SetObjectMetadataFromHeaders(
const absl::btree_multimap<std::string, std::string>& headers,
ObjectMetadata* result);
}
}
#endif
#include "tensorstore/kvstore/gcs_http/object_metadata.h"
#include <stdint.h>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include "absl/container/btree_map.h"
#include "absl/status/status.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_split.h"
#include "absl/time/time.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/http/http_header.h"
#include "tensorstore/internal/json/json.h"
#include "tensorstore/internal/json_binding/absl_time.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_kvstore_gcs_http {
using ::tensorstore::internal_http::TryParseIntHeader;
using ::tensorstore::internal_json_binding::DefaultInitializedValue;
namespace jb = tensorstore::internal_json_binding;
inline constexpr auto ObjectMetadataBinder = jb::Object(
jb::Member("name", jb::Projection(&ObjectMetadata::name)),
jb::Member("md5Hash", jb::Projection(&ObjectMetadata::md5_hash,
DefaultInitializedValue())),
jb::Member("crc32c", jb::Projection(&ObjectMetadata::crc32c,
DefaultInitializedValue())),
jb::Member("size", jb::Projection(&ObjectMetadata::size,
jb::DefaultInitializedValue(
jb::LooseValueAsBinder))),
jb::Member("generation", jb::Projection(&ObjectMetadata::generation,
jb::DefaultInitializedValue(
jb::LooseValueAsBinder))),
jb::Member("metageneration", jb::Projection(&ObjectMetadata::metageneration,
jb::DefaultInitializedValue(
jb::LooseValueAsBinder))),
jb::Member("timeCreated", jb::Projection(&ObjectMetadata::time_created,
jb::DefaultValue([](auto* x) {
*x = absl::InfinitePast();
}))),
jb::Member("updated", jb::Projection(&ObjectMetadata::updated,
jb::DefaultValue([](auto* x) {
*x = absl::InfinitePast();
}))),
jb::Member("timeDeleted", jb::Projection(&ObjectMetadata::time_deleted,
jb::DefaultValue([](auto* x) {
*x = absl::InfinitePast();
}))),
jb::DiscardExtraMembers);
TENSORSTORE_DEFINE_JSON_DEFAULT_BINDER(ObjectMetadata,
[](auto is_loading, const auto& options,
auto* obj, ::nlohmann::json* j) {
return ObjectMetadataBinder(
is_loading, options, obj, j);
})
void SetObjectMetadataFromHeaders(
const absl::btree_multimap<std::string, std::string>& headers,
ObjectMetadata* result) {
result->size =
TryParseIntHeader<uint64_t>(headers, "content-length").value_or(0);
result->generation =
TryParseIntHeader<int64_t>(headers, "x-goog-generation").value_or(0);
result->metageneration =
TryParseIntHeader<uint64_t>(headers, "x-goog-metageneration").value_or(0);
auto it = headers.find("x-goog-hash");
if (it != headers.end()) {
for (std::string_view kv : absl::StrSplit(it->second, absl::ByChar(','))) {
std::pair<std::string_view, std::string_view> split =
absl::StrSplit(kv, absl::MaxSplits('=', 1));
if (split.first == "crc32c") {
result->crc32c = std::string(split.second);
} else if (split.first == "md5") {
result->md5_hash = std::string(split.second);
}
}
}
}
Result<ObjectMetadata> ParseObjectMetadata(std::string_view source) {
auto json = internal::ParseJson(source);
if (json.is_discarded()) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Failed to parse object metadata: ", source));
}
return jb::FromJson<ObjectMetadata>(std::move(json));
}
}
} | #include "tensorstore/kvstore/gcs_http/object_metadata.h"
#include <string>
#include <gtest/gtest.h>
#include "absl/time/time.h"
#include "tensorstore/util/result.h"
namespace {
using ::tensorstore::internal_kvstore_gcs_http::ParseObjectMetadata;
const char kObjectMetadata[] = R"""({
"acl": [{
"kind": "storage#objectAccessControl",
"id": "acl-id-0",
"selfLink": "https:
"bucket": "foo-bar",
"object": "foo",
"generation": 12345,
"entity": "user-qux",
"role": "OWNER",
"email": "[email protected]",
"entityId": "user-qux-id-123",
"domain": "example.com",
"projectTeam": {
"projectNumber": "4567",
"team": "owners"
},
"etag": "AYX="
}, {
"kind": "storage#objectAccessControl",
"id": "acl-id-1",
"selfLink": "https:
"bucket": "foo-bar",
"object": "foo",
"generation": 12345,
"entity": "user-quux",
"role": "READER",
"email": "[email protected]",
"entityId": "user-quux-id-123",
"domain": "example.com",
"projectTeam": {
"projectNumber": "4567",
"team": "viewers"
},
"etag": "AYX="
}
],
"bucket": "foo-bar",
"cacheControl": "no-cache",
"componentCount": 7,
"contentDisposition": "a-disposition",
"contentEncoding": "an-encoding",
"contentLanguage": "a-language",
"contentType": "application/octet-stream",
"crc32c": "deadbeef",
"customerEncryption": {
"encryptionAlgorithm": "some-algo",
"keySha256": "abc123"
},
"etag": "XYZ=",
"eventBasedHold": true,
"generation": "12345",
"id": "foo-bar/baz/12345",
"kind": "storage#object",
"kmsKeyName": "/foo/bar/baz/key",
"md5Hash": "deaderBeef=",
"mediaLink": "https:
"metadata": {
"foo": "bar",
"baz": "qux"
},
"metageneration": "4",
"name": "baz",
"owner": {
"entity": "user-qux",
"entityId": "user-qux-id-123"
},
"retentionExpirationTime": "2019-01-01T00:00:00Z",
"selfLink": "https:
"size": 102400,
"storageClass": "STANDARD",
"temporaryHold": true,
"timeCreated": "2018-05-19T19:31:14Z",
"timeDeleted": "2018-05-19T19:32:24Z",
"timeStorageClassUpdated": "2018-05-19T19:31:34Z",
"updated": "2018-05-19T19:31:24Z"
})""";
absl::Time AsTime(const std::string& time) {
absl::Time result;
if (absl::ParseTime(absl::RFC3339_full, time, &result, nullptr)) {
return result;
}
return absl::InfinitePast();
}
TEST(ParseObjectMetadata, Basic) {
EXPECT_FALSE(ParseObjectMetadata("").ok());
auto result = ParseObjectMetadata(kObjectMetadata);
ASSERT_TRUE(result.ok()) << result.status();
EXPECT_EQ("baz", result->name);
EXPECT_EQ("deaderBeef=", result->md5_hash);
EXPECT_EQ(102400u, result->size);
EXPECT_EQ(12345, result->generation);
EXPECT_EQ(4, result->metageneration);
EXPECT_EQ(AsTime("2018-05-19T12:31:14-07:00"), result->time_created);
EXPECT_EQ(AsTime("2018-05-19T12:31:24-07:00"), result->updated);
EXPECT_EQ(AsTime("2018-05-19T12:32:24-07:00"), result->time_deleted);
}
const char kObjectMetadata2[] = R"""({
"name": "fafb_v14/fafb_v14_clahe/128_128_160/0-64_1408-1472_896-960",
"kind": "storage#object",
"id": "neuroglancer-fafb-data/fafb_v14/fafb_v14_clahe/128_128_160/0-64_1408-1472_896-960/1540426531840872",
"bucket": "neuroglancer-fafb-data",
"generation": "1540426531840872",
"contentType": "image/jpeg",
"timeCreated": "2018-10-25T00:15:31.840Z",
"updated": "2018-10-25T00:15:31.840Z",
"timeStorageClassUpdated": "2018-10-25T00:15:31.840Z",
"size": "3404"
})""";
TEST(ParseObjectMetadata, Example2) {
EXPECT_FALSE(ParseObjectMetadata("").ok());
auto result = ParseObjectMetadata(kObjectMetadata2);
ASSERT_TRUE(result.ok()) << result.status();
EXPECT_EQ("fafb_v14/fafb_v14_clahe/128_128_160/0-64_1408-1472_896-960",
result->name);
EXPECT_EQ(3404u, result->size);
EXPECT_EQ(1540426531840872, result->generation);
EXPECT_EQ(AsTime("2018-10-24T17:15:31.84-07:00"), result->time_created);
EXPECT_EQ(AsTime("2018-10-24T17:15:31.84-07:00"), result->updated);
EXPECT_EQ(0, result->metageneration);
}
} |
594 | cpp | google/tensorstore | key | tensorstore/kvstore/zarr3_sharding_indexed/key.cc | tensorstore/kvstore/zarr3_sharding_indexed/key_test.cc | #ifndef TENSORSTORE_KVSTORE_ZARR_SHARDING_INDEXED_KEY_H_
#define TENSORSTORE_KVSTORE_ZARR_SHARDING_INDEXED_KEY_H_
#include <stdint.h>
#include <optional>
#include <string>
#include <string_view>
#include "tensorstore/index.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace zarr3_sharding_indexed {
using EntryId = uint32_t;
std::string IndicesToKey(span<const Index> grid_cell_indices);
bool KeyToIndices(std::string_view key, span<Index> grid_cell_indices);
std::optional<EntryId> KeyToEntryId(std::string_view key,
span<const Index> grid_shape);
Result<EntryId> KeyToEntryIdOrError(std::string_view key,
span<const Index> grid_shape);
std::string EntryIdToKey(EntryId entry_id, span<const Index> grid_shape);
EntryId LowerBoundToEntryId(std::string_view key, span<const Index> grid_shape);
std::pair<EntryId, EntryId> KeyRangeToEntryRange(std::string_view inclusive_min,
std::string_view exclusive_max,
span<const Index> grid_shape);
std::string EntryIdToInternalKey(EntryId entry_id);
EntryId InternalKeyToEntryId(std::string_view key);
EntryId InternalKeyLowerBoundToEntryId(std::string_view key,
int64_t num_entries_per_shard);
std::pair<EntryId, EntryId> InternalKeyRangeToEntryRange(
std::string_view inclusive_min, std::string_view exclusive_max,
int64_t num_entries_per_shard);
KeyRange KeyRangeToInternalKeyRange(const KeyRange& range,
span<const Index> grid_shape);
std::string DescribeEntryId(EntryId entry_id, span<const Index> grid_shape);
std::string DescribeKey(std::string_view key, span<const Index> grid_shape);
std::string DescribeInternalKey(std::string_view key,
span<const Index> grid_shape);
}
}
#endif
#include "tensorstore/kvstore/zarr3_sharding_indexed/key.h"
#include <stddef.h>
#include <stdint.h>
#include <algorithm>
#include <cassert>
#include <cstring>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include "absl/base/internal/endian.h"
#include "absl/status/status.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/index.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/extents.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace zarr3_sharding_indexed {
std::string IndicesToKey(span<const Index> grid_cell_indices) {
std::string key;
key.resize(grid_cell_indices.size() * 4);
for (DimensionIndex i = 0; i < grid_cell_indices.size(); ++i) {
absl::big_endian::Store32(key.data() + i * 4, grid_cell_indices[i]);
}
return key;
}
bool KeyToIndices(std::string_view key, span<Index> grid_cell_indices) {
if (key.size() != grid_cell_indices.size() * 4) {
return false;
}
for (DimensionIndex i = 0; i < grid_cell_indices.size(); ++i) {
grid_cell_indices[i] = absl::big_endian::Load32(key.data() + i * 4);
}
return true;
}
std::optional<EntryId> KeyToEntryId(std::string_view key,
span<const Index> grid_shape) {
const DimensionIndex rank = grid_shape.size();
if (rank * sizeof(uint32_t) != key.size()) return {};
EntryId id = 0;
for (DimensionIndex i = 0; i < rank; ++i) {
auto index = absl::big_endian::Load32(key.data() + i * 4);
if (index >= grid_shape[i]) return {};
id *= grid_shape[i];
id += index;
}
return id;
}
Result<EntryId> KeyToEntryIdOrError(std::string_view key,
span<const Index> grid_shape) {
if (auto entry_id = KeyToEntryId(key, grid_shape)) {
return *entry_id;
}
return absl::InvalidArgumentError(
tensorstore::StrCat("Invalid key (grid_shape=", grid_shape,
"): ", tensorstore::QuoteString(key)));
}
std::string EntryIdToKey(EntryId entry_id, span<const Index> grid_shape) {
std::string key;
key.resize(grid_shape.size() * 4);
for (DimensionIndex i = grid_shape.size(); i--;) {
const Index size = grid_shape[i];
absl::big_endian::Store32(key.data() + i * 4, entry_id % size);
entry_id /= size;
}
return key;
}
EntryId LowerBoundToEntryId(std::string_view key,
span<const Index> grid_shape) {
char key_padded[kMaxRank * 4];
const size_t full_key_size = grid_shape.size() * 4;
const size_t key_bytes_to_copy = std::min(full_key_size, key.size());
std::memcpy(key_padded, key.data(), key_bytes_to_copy);
std::memset(key_padded + key_bytes_to_copy, 0,
full_key_size - key_bytes_to_copy);
EntryId entry_id = 0;
EntryId remaining_indices_mask = ~static_cast<EntryId>(0);
EntryId max_entry_id = 1;
for (DimensionIndex i = 0; i < grid_shape.size(); ++i) {
const EntryId size = grid_shape[i];
max_entry_id *= size;
EntryId index = absl::big_endian::Load32(&key_padded[i * 4]);
entry_id *= size;
if (index >= size) {
entry_id += (size & remaining_indices_mask);
remaining_indices_mask = 0;
} else {
entry_id += (index & remaining_indices_mask);
}
}
assert(entry_id <= max_entry_id);
if (key.size() > full_key_size) {
if (entry_id < max_entry_id) {
++entry_id;
}
}
return entry_id;
}
std::pair<EntryId, EntryId> KeyRangeToEntryRange(std::string_view inclusive_min,
std::string_view exclusive_max,
span<const Index> grid_shape) {
EntryId lower_bound = LowerBoundToEntryId(inclusive_min, grid_shape);
EntryId upper_bound;
if (exclusive_max.empty()) {
upper_bound = static_cast<EntryId>(ProductOfExtents(grid_shape));
} else {
upper_bound = LowerBoundToEntryId(exclusive_max, grid_shape);
}
return {lower_bound, upper_bound};
}
EntryId InternalKeyLowerBoundToEntryId(std::string_view key,
int64_t num_entries_per_shard) {
char key_bytes[4] = {};
std::memcpy(key_bytes, key.data(),
std::min(static_cast<size_t>(4), key.size()));
EntryId entry_id = absl::big_endian::Load32(key_bytes);
if (entry_id > num_entries_per_shard) {
entry_id = num_entries_per_shard;
}
if (key.size() > 4 && entry_id < num_entries_per_shard) {
++entry_id;
}
return entry_id;
}
std::pair<EntryId, EntryId> InternalKeyRangeToEntryRange(
std::string_view inclusive_min, std::string_view exclusive_max,
int64_t num_entries_per_shard) {
return {InternalKeyLowerBoundToEntryId(inclusive_min, num_entries_per_shard),
exclusive_max.empty() ? EntryId(num_entries_per_shard)
: InternalKeyLowerBoundToEntryId(
exclusive_max, num_entries_per_shard)};
}
std::string EntryIdToInternalKey(EntryId entry_id) {
std::string key;
key.resize(4);
absl::big_endian::Store32(key.data(), entry_id);
return key;
}
EntryId InternalKeyToEntryId(std::string_view key) {
assert(key.size() == 4);
return static_cast<EntryId>(absl::big_endian::Load32(key.data()));
}
KeyRange KeyRangeToInternalKeyRange(const KeyRange& range,
span<const Index> grid_shape) {
auto [inclusive_min_entry, exclusive_max_entry] = KeyRangeToEntryRange(
range.inclusive_min, range.exclusive_max, grid_shape);
return KeyRange{EntryIdToInternalKey(inclusive_min_entry),
EntryIdToInternalKey(exclusive_max_entry)};
}
std::string DescribeEntryId(EntryId entry_id, span<const Index> grid_shape) {
Index indices[kMaxRank];
span<Index> indices_span(&indices[0], grid_shape.size());
GetContiguousIndices<c_order, Index>(entry_id, grid_shape, indices_span);
return tensorstore::StrCat("shard entry ", indices_span, "/", grid_shape);
}
std::string DescribeKey(std::string_view key, span<const Index> grid_shape) {
if (auto entry_id = KeyToEntryId(key, grid_shape)) {
return DescribeEntryId(*entry_id, grid_shape);
}
return tensorstore::StrCat("invalid shard entry ",
tensorstore::QuoteString(key), "/", grid_shape);
}
std::string DescribeInternalKey(std::string_view key,
span<const Index> grid_shape) {
return DescribeEntryId(InternalKeyToEntryId(key), grid_shape);
}
}
} | #include "tensorstore/kvstore/zarr3_sharding_indexed/key.h"
#include <optional>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/index.h"
#include "tensorstore/kvstore/key_range.h"
namespace {
using ::tensorstore::Index;
using ::tensorstore::KeyRange;
using ::tensorstore::zarr3_sharding_indexed::EntryId;
using ::tensorstore::zarr3_sharding_indexed::EntryIdToInternalKey;
using ::tensorstore::zarr3_sharding_indexed::EntryIdToKey;
using ::tensorstore::zarr3_sharding_indexed::IndicesToKey;
using ::tensorstore::zarr3_sharding_indexed::InternalKeyLowerBoundToEntryId;
using ::tensorstore::zarr3_sharding_indexed::InternalKeyRangeToEntryRange;
using ::tensorstore::zarr3_sharding_indexed::InternalKeyToEntryId;
using ::tensorstore::zarr3_sharding_indexed::KeyRangeToEntryRange;
using ::tensorstore::zarr3_sharding_indexed::KeyRangeToInternalKeyRange;
using ::tensorstore::zarr3_sharding_indexed::KeyToEntryId;
using ::tensorstore::zarr3_sharding_indexed::KeyToIndices;
using ::tensorstore::zarr3_sharding_indexed::LowerBoundToEntryId;
TEST(KeyToEntryIdTest, Basic) {
EntryId entry_id = 1 * 5 * 6 + 2 * 6 + 3;
std::string key{0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 3};
Index grid_shape[] = {4, 5, 6};
EXPECT_THAT(KeyToEntryId(key, grid_shape), ::testing::Optional(entry_id));
EXPECT_THAT(EntryIdToKey(entry_id, grid_shape), ::testing::Eq(key));
}
TEST(KeyToEntryIdTest, OutOfRange) {
EXPECT_THAT(KeyToEntryId(std::string{0, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0, 3},
{{4, 5, 6}}),
::testing::Eq(std::nullopt));
}
TEST(KeyToEntryIdTest, Invalid) {
EXPECT_THAT(
KeyToEntryId(std::string{0, 0, 0, 4, 0, 0, 0, 2, 0, 0, 0}, {{4, 5, 6}}),
::testing::Eq(std::nullopt));
}
TEST(IndicesToKeyTest, Basic) {
const Index indices[] = {1, 2, 3};
std::string key{0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 3};
EXPECT_THAT(IndicesToKey(indices), ::testing::Eq(key));
Index decoded_indices[3];
EXPECT_TRUE(KeyToIndices(key, decoded_indices));
EXPECT_THAT(decoded_indices, ::testing::ElementsAreArray(indices));
EXPECT_FALSE(KeyToIndices(key.substr(1), decoded_indices));
}
TEST(LowerBoundToEntryId, Exact) {
Index grid_shape[] = {4, 5, 6};
EXPECT_THAT(LowerBoundToEntryId(
std::string{0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 3}, grid_shape),
::testing::Eq(1 * 5 * 6 + 2 * 6 + 3));
}
TEST(LowerBoundToEntryId, Longer) {
Index grid_shape[] = {4, 5, 6};
EXPECT_THAT(
LowerBoundToEntryId(std::string{0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 3, 0},
grid_shape),
::testing::Eq(1 * 5 * 6 + 2 * 6 + 4));
}
TEST(KeyRangeToEntryRange, Full) {
Index grid_shape[] = {4, 5, 6};
EXPECT_THAT(KeyRangeToEntryRange("", "", grid_shape),
::testing::Pair(0, 4 * 5 * 6));
}
TEST(KeyRangeToEntryRange, Partial) {
Index grid_shape[] = {4, 5, 6};
EXPECT_THAT(
KeyRangeToEntryRange(
std::string{
0, 0, 0, 2,
0, 0, 0, 3,
0, 0, 0, 4,
},
std::string{
0, 0, 0, 2,
0, 0, 0, 4,
0, 0, 0, 5,
},
grid_shape),
::testing::Pair(2 * (5 * 6) + 3 * 6 + 4, 2 * (5 * 6) + 4 * 6 + 5));
EXPECT_THAT(KeyRangeToInternalKeyRange(KeyRange{std::string{
0, 0, 0, 2,
0, 0, 0, 3,
0, 0, 0, 4,
},
std::string{
0, 0, 0, 2,
0, 0, 0, 4,
0, 0, 0, 5,
}},
grid_shape),
KeyRange(EntryIdToInternalKey(2 * (5 * 6) + 3 * 6 + 4),
EntryIdToInternalKey(2 * (5 * 6) + 4 * 6 + 5)));
}
TEST(EntryIdToInternalKeyTest, Basic) {
EntryId entry_id = 0x01020304;
std::string internal_key{0x01, 0x02, 0x03, 0x04};
EXPECT_THAT(EntryIdToInternalKey(entry_id), ::testing::Eq(internal_key));
EXPECT_THAT(InternalKeyToEntryId(internal_key), ::testing::Eq(entry_id));
}
TEST(InternalKeyLowerBoundToEntryIdTest, Basic) {
EXPECT_THAT(InternalKeyLowerBoundToEntryId(
std::string{0x01, 0x02, 0x03, 0x04}, 0x88888888),
::testing::Eq(0x01020304));
EXPECT_THAT(InternalKeyLowerBoundToEntryId(
std::string{0x01, 0x02, 0x03, 0x04, 0x0}, 0x88888888),
::testing::Eq(0x01020304 + 1));
EXPECT_THAT(
InternalKeyLowerBoundToEntryId(std::string{0x01, 0x02, 0x03}, 0x88888888),
::testing::Eq(0x01020300));
EXPECT_THAT(InternalKeyLowerBoundToEntryId(
std::string{0x01, 0x02, 0x03, 0x04}, 0x01020302),
::testing::Eq(0x01020302));
}
TEST(InternalKeyRangeToEntryRange, Basic) {
EXPECT_THAT(InternalKeyRangeToEntryRange(std::string{0x01, 0x02, 0x03, 0x04},
std::string{0x01, 0x02, 0x03, 0x07},
0x88888888),
::testing::Pair(0x01020304, 0x01020307));
EXPECT_THAT(InternalKeyRangeToEntryRange(std::string{0x01, 0x02, 0x03, 0x04},
{}, 0x88888888),
::testing::Pair(0x01020304, 0x88888888));
}
} |
595 | cpp | google/tensorstore | shard_format | tensorstore/kvstore/zarr3_sharding_indexed/shard_format.cc | tensorstore/kvstore/zarr3_sharding_indexed/shard_format_test.cc | #ifndef TENSORSTORE_KVSTORE_ZARR_SHARDING_INDEXED_SHARD_FORMAT_H_
#define TENSORSTORE_KVSTORE_ZARR_SHARDING_INDEXED_SHARD_FORMAT_H_
#include <stddef.h>
#include <stdint.h>
#include <cassert>
#include <limits>
#include <optional>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "tensorstore/array.h"
#include "tensorstore/driver/zarr3/codec/codec.h"
#include "tensorstore/driver/zarr3/codec/codec_chain_spec.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/kvstore/byte_range.h"
#include "tensorstore/kvstore/zarr3_sharding_indexed/key.h"
#include "tensorstore/util/extents.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace zarr3_sharding_indexed {
using internal_zarr3::ZarrCodecChain;
using internal_zarr3::ZarrCodecChainSpec;
constexpr int64_t kMaxNumEntries = 1024 * 1024 * 1024;
enum ShardIndexLocation {
kStart,
kEnd,
};
TENSORSTORE_DECLARE_JSON_BINDER(ShardIndexLocationJsonBinder,
ShardIndexLocation,
internal_json_binding::NoOptions,
internal_json_binding::NoOptions);
struct ShardIndexEntry {
uint64_t offset = std::numeric_limits<uint64_t>::max();
uint64_t length = std::numeric_limits<uint64_t>::max();
static constexpr ShardIndexEntry Missing() { return ShardIndexEntry{}; }
bool IsMissing() const {
return offset == std::numeric_limits<uint64_t>::max() &&
length == std::numeric_limits<uint64_t>::max();
}
absl::Status Validate(EntryId entry_id) const;
absl::Status Validate(EntryId entry_id, int64_t total_size) const;
ByteRange AsByteRange() const {
return ByteRange{static_cast<int64_t>(offset),
static_cast<int64_t>(offset + length)};
}
};
struct ShardIndex {
ShardIndexEntry operator[](int64_t i) const {
assert(0 <= i &&
i < ProductOfExtents(entries.shape().first(entries.rank() - 1)));
return ShardIndexEntry{entries.data()[i * 2], entries.data()[i * 2 + 1]};
}
constexpr static auto ApplyMembers = [](auto&& x, auto f) {
return f(x.entries);
};
SharedArray<const uint64_t> entries;
};
absl::Status ValidateGridShape(span<const Index> grid_shape);
Result<ZarrCodecChain::Ptr> InitializeIndexCodecChain(
const ZarrCodecChainSpec& codec_chain_spec, DimensionIndex grid_rank,
ZarrCodecChainSpec* resolved_codec_chain_spec = nullptr);
struct ShardIndexParameters {
span<const Index> grid_shape() const {
assert(index_shape.size() >= 0);
return {index_shape.data(), static_cast<ptrdiff_t>(index_shape.size() - 1)};
}
absl::Status InitializeIndexShape(span<const Index> grid_shape);
absl::Status Initialize(const ZarrCodecChain& codec_chain,
span<const Index> grid_shape);
absl::Status Initialize(
const ZarrCodecChainSpec& codec_chain_spec, span<const Index> grid_shape,
ZarrCodecChainSpec* resolved_codec_chain_spec = nullptr);
ShardIndexLocation index_location;
int64_t num_entries;
std::vector<Index> index_shape;
ZarrCodecChain::Ptr index_codec_chain;
ZarrCodecChain::PreparedState::Ptr index_codec_state;
};
Result<ShardIndex> DecodeShardIndex(const absl::Cord& input,
const ShardIndexParameters& parameters);
Result<ShardIndex> DecodeShardIndexFromFullShard(
const absl::Cord& shard_data,
const ShardIndexParameters& shard_index_parameters);
using ShardEntry = std::optional<absl::Cord>;
struct ShardEntries {
std::vector<ShardEntry> entries;
constexpr static auto ApplyMembers = [](auto&& x, auto f) {
return f(x.entries);
};
};
Result<ShardEntries> DecodeShard(
const absl::Cord& shard_data,
const ShardIndexParameters& shard_index_parameters);
Result<std::optional<absl::Cord>> EncodeShard(
const ShardEntries& entries,
const ShardIndexParameters& shard_index_parameters);
}
namespace internal_json_binding {
template <>
constexpr inline auto
DefaultBinder<zarr3_sharding_indexed::ShardIndexLocation> =
zarr3_sharding_indexed::ShardIndexLocationJsonBinder;
}
}
#endif
#include "tensorstore/kvstore/zarr3_sharding_indexed/shard_format.h"
#include <stddef.h>
#include <stdint.h>
#include <algorithm>
#include <cassert>
#include <limits>
#include <optional>
#include <utility>
#include <vector>
#include "absl/log/absl_check.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_format.h"
#include "riegeli/bytes/cord_writer.h"
#include "riegeli/bytes/wrapping_writer.h"
#include "riegeli/bytes/writer.h"
#include "tensorstore/array.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/driver/zarr3/codec/codec.h"
#include "tensorstore/driver/zarr3/codec/codec_spec.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/enum.h"
#include "tensorstore/internal/unowned_to_shared.h"
#include "tensorstore/kvstore/byte_range.h"
#include "tensorstore/kvstore/zarr3_sharding_indexed/key.h"
#include "tensorstore/rank.h"
#include "tensorstore/static_cast.h"
#include "tensorstore/util/extents.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace zarr3_sharding_indexed {
namespace jb = ::tensorstore::internal_json_binding;
TENSORSTORE_DEFINE_JSON_BINDER(ShardIndexLocationJsonBinder,
jb::Enum<ShardIndexLocation, const char*>({
{ShardIndexLocation::kStart, "start"},
{ShardIndexLocation::kEnd, "end"},
}));
absl::Status ShardIndexEntry::Validate(EntryId entry_id) const {
if (!IsMissing()) {
uint64_t exclusive_max;
if (internal::AddOverflow(offset, length, &exclusive_max) ||
exclusive_max > std::numeric_limits<int64_t>::max()) {
return absl::DataLossError(absl::StrFormat(
"Invalid shard index entry %d with offset=%d, length=%d", entry_id,
offset, length));
}
}
return absl::OkStatus();
}
absl::Status ShardIndexEntry::Validate(EntryId entry_id,
int64_t total_size) const {
if (auto status = Validate(entry_id); !status.ok()) return status;
auto byte_range = AsByteRange();
if (byte_range.exclusive_max > total_size) {
return absl::DataLossError(tensorstore::StrCat(
"Shard index entry ", entry_id, " with byte range ", byte_range,
" is invalid for shard of size ", total_size));
}
return absl::OkStatus();
}
Result<ShardIndex> DecodeShardIndex(const absl::Cord& input,
const ShardIndexParameters& parameters) {
assert(parameters.index_shape.back() == 2);
SharedArray<const void> entries;
TENSORSTORE_ASSIGN_OR_RETURN(
entries,
parameters.index_codec_state->DecodeArray(parameters.index_shape, input));
if (!IsContiguousLayout(entries, c_order)) {
entries = MakeCopy(entries);
}
return ShardIndex{
StaticDataTypeCast<const uint64_t, unchecked>(std::move(entries))};
}
Result<ShardIndex> DecodeShardIndexFromFullShard(
const absl::Cord& shard_data,
const ShardIndexParameters& shard_index_parameters) {
int64_t shard_index_size =
shard_index_parameters.index_codec_state->encoded_size();
if (shard_index_size > shard_data.size()) {
return absl::DataLossError(absl::StrFormat(
"Existing shard has size of %d bytes, but expected at least %d bytes",
shard_data.size(), shard_index_size));
}
absl::Cord encoded_shard_index;
switch (shard_index_parameters.index_location) {
case ShardIndexLocation::kStart:
encoded_shard_index = shard_data.Subcord(0, shard_index_size);
break;
case ShardIndexLocation::kEnd:
encoded_shard_index = shard_data.Subcord(
shard_data.size() - shard_index_size, shard_index_size);
break;
}
TENSORSTORE_ASSIGN_OR_RETURN(
auto shard_index,
DecodeShardIndex(encoded_shard_index, shard_index_parameters),
tensorstore::MaybeAnnotateStatus(_, "Error decoding shard index"));
return shard_index;
}
absl::Status EncodeShardIndex(riegeli::Writer& writer,
const ShardIndex& shard_index,
const ShardIndexParameters& parameters) {
riegeli::WrappingWriter wrapping_writer{&writer};
return parameters.index_codec_state->EncodeArray(shard_index.entries,
wrapping_writer);
}
absl::Status ValidateGridShape(span<const Index> grid_shape) {
if (grid_shape.size() > kMaxRank - 1) {
return absl::InvalidArgumentError(
absl::StrFormat("grid rank of %d exceeds maximum of %d",
grid_shape.size(), kMaxRank - 1));
}
if (ProductOfExtents(grid_shape) > kMaxNumEntries) {
return absl::InvalidArgumentError(
tensorstore::StrCat("grid shape of ", grid_shape, " has more than ",
kMaxNumEntries, " entries"));
}
return absl::OkStatus();
}
Result<ZarrCodecChain::Ptr> InitializeIndexCodecChain(
const ZarrCodecChainSpec& codec_chain_spec, DimensionIndex grid_rank,
ZarrCodecChainSpec* resolved_codec_chain_spec) {
if (grid_rank > kMaxRank - 1) {
return absl::InvalidArgumentError(absl::StrFormat(
"Rank of %d exceeds maximum ran of %d supported for sharding_indexed",
grid_rank, kMaxRank - 1));
}
static const uint64_t fill_value{std::numeric_limits<uint64_t>::max()};
internal_zarr3::ArrayCodecResolveParameters array_params;
array_params.dtype = dtype_v<uint64_t>;
array_params.rank = grid_rank + 1;
array_params.fill_value =
SharedArray<const void>(internal::UnownedToShared(&fill_value));
internal_zarr3::BytesCodecResolveParameters bytes_params;
return codec_chain_spec.Resolve(std::move(array_params), bytes_params,
resolved_codec_chain_spec);
}
absl::Status ShardIndexParameters::InitializeIndexShape(
span<const Index> grid_shape) {
TENSORSTORE_RETURN_IF_ERROR(ValidateGridShape(grid_shape));
num_entries = ProductOfExtents(grid_shape);
index_shape.resize(grid_shape.size() + 1);
std::copy(grid_shape.begin(), grid_shape.end(), index_shape.begin());
index_shape.back() = 2;
return absl::OkStatus();
}
absl::Status ShardIndexParameters::Initialize(
const ZarrCodecChainSpec& codec_chain_spec, span<const Index> grid_shape,
ZarrCodecChainSpec* resolved_codec_chain_spec) {
TENSORSTORE_ASSIGN_OR_RETURN(
index_codec_chain,
InitializeIndexCodecChain(codec_chain_spec, grid_shape.size(),
resolved_codec_chain_spec));
return Initialize(*index_codec_chain, grid_shape);
return absl::OkStatus();
}
absl::Status ShardIndexParameters::Initialize(const ZarrCodecChain& codec_chain,
span<const Index> grid_shape) {
if (index_codec_chain.get() != &codec_chain) {
index_codec_chain.reset(&codec_chain);
}
TENSORSTORE_RETURN_IF_ERROR(InitializeIndexShape(grid_shape));
TENSORSTORE_ASSIGN_OR_RETURN(index_codec_state,
index_codec_chain->Prepare(index_shape));
if (index_codec_state->encoded_size() == -1) {
return absl::InvalidArgumentError(
"Invalid index_codecs specified: only fixed-size encodings are "
"supported");
}
return absl::OkStatus();
}
Result<ShardEntries> DecodeShard(
const absl::Cord& shard_data,
const ShardIndexParameters& shard_index_parameters) {
const int64_t num_entries = shard_index_parameters.num_entries;
ShardEntries entries;
entries.entries.resize(num_entries);
TENSORSTORE_ASSIGN_OR_RETURN(
auto shard_index,
DecodeShardIndexFromFullShard(shard_data, shard_index_parameters));
for (int64_t i = 0; i < num_entries; ++i) {
const auto entry_index = shard_index[i];
if (entry_index.IsMissing()) continue;
TENSORSTORE_RETURN_IF_ERROR(entry_index.Validate(i, shard_data.size()));
entries.entries[i] =
internal::GetSubCord(shard_data, entry_index.AsByteRange());
}
return entries;
}
Result<std::optional<absl::Cord>> EncodeShard(
const ShardEntries& entries,
const ShardIndexParameters& shard_index_parameters) {
int64_t shard_index_size =
shard_index_parameters.index_codec_state->encoded_size();
absl::Cord shard_data;
riegeli::CordWriter writer{&shard_data};
auto shard_index_array = AllocateArray<uint64_t>(
shard_index_parameters.index_shape, c_order, default_init);
bool has_entry = false;
uint64_t offset =
shard_index_parameters.index_location == ShardIndexLocation::kStart
? shard_index_size
: 0;
for (size_t i = 0; i < entries.entries.size(); ++i) {
const auto& entry = entries.entries[i];
uint64_t entry_offset;
uint64_t length;
if (entry) {
has_entry = true;
length = entry->size();
entry_offset = offset;
offset += length;
ABSL_CHECK(writer.Write(*entry));
} else {
entry_offset = std::numeric_limits<uint64_t>::max();
length = std::numeric_limits<uint64_t>::max();
}
shard_index_array.data()[i * 2] = entry_offset;
shard_index_array.data()[i * 2 + 1] = length;
}
if (!has_entry) return std::nullopt;
switch (shard_index_parameters.index_location) {
case ShardIndexLocation::kStart: {
ABSL_CHECK(writer.Close());
absl::Cord encoded_shard_index;
riegeli::CordWriter index_writer{&encoded_shard_index};
TENSORSTORE_RETURN_IF_ERROR(EncodeShardIndex(
index_writer, ShardIndex{std::move(shard_index_array)},
shard_index_parameters));
ABSL_CHECK(index_writer.Close());
encoded_shard_index.Append(std::move(shard_data));
shard_data = std::move(encoded_shard_index);
break;
}
case ShardIndexLocation::kEnd: {
TENSORSTORE_RETURN_IF_ERROR(
EncodeShardIndex(writer, ShardIndex{std::move(shard_index_array)},
shard_index_parameters));
ABSL_CHECK(writer.Close());
break;
}
}
return shard_data;
}
}
} | #include "tensorstore/kvstore/zarr3_sharding_indexed/shard_format.h"
#include <optional>
#include <string_view>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include <nlohmann/json.hpp>
#include "tensorstore/driver/zarr3/codec/codec_chain_spec.h"
#include "tensorstore/driver/zarr3/codec/codec_test_util.h"
#include "tensorstore/index.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::Index;
using ::tensorstore::MatchesStatus;
using ::tensorstore::Result;
using ::tensorstore::internal_zarr3::GetDefaultBytesCodecJson;
using ::tensorstore::internal_zarr3::ZarrCodecChainSpec;
using ::tensorstore::zarr3_sharding_indexed::DecodeShard;
using ::tensorstore::zarr3_sharding_indexed::EncodeShard;
using ::tensorstore::zarr3_sharding_indexed::ShardEntries;
using ::tensorstore::zarr3_sharding_indexed::ShardIndexLocation;
using ::tensorstore::zarr3_sharding_indexed::ShardIndexParameters;
Result<ShardIndexParameters> GetParams(
ShardIndexLocation index_location, std::vector<Index> grid_shape,
::nlohmann::json::array_t index_codecs_json = {GetDefaultBytesCodecJson(),
{{"name", "crc32c"}}}) {
TENSORSTORE_ASSIGN_OR_RETURN(auto index_codecs,
ZarrCodecChainSpec::FromJson(index_codecs_json));
ShardIndexParameters p;
p.index_location = index_location;
TENSORSTORE_RETURN_IF_ERROR(p.Initialize(index_codecs, grid_shape));
return p;
}
TEST(InitializeTest, Success) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto p,
GetParams(ShardIndexLocation::kEnd, {2, 3}));
EXPECT_EQ(6, p.num_entries);
EXPECT_THAT(p.index_shape, ::testing::ElementsAre(2, 3, 2));
}
TEST(InitializeTest, InvalidIndexCodecs) {
EXPECT_THAT(
GetParams(ShardIndexLocation::kEnd, {2, 3},
{GetDefaultBytesCodecJson(),
{{"name", "gzip"}, {"configuration", {{"level", 5}}}}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*: only fixed-size encodings are supported"));
}
TEST(InitializeTest, InvalidGridShape) {
EXPECT_THAT(
GetParams(ShardIndexLocation::kEnd, {1024 * 1024 * 1024 + 1}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"grid shape of .* has more than 1073741824 entries"));
}
TEST(EncodeShardTest, RoundTrip) {
for (auto index_location :
{ShardIndexLocation::kStart, ShardIndexLocation::kEnd}) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto p, GetParams(index_location, {2, 3}));
ShardEntries entries;
entries.entries = {
absl::Cord("(0, 0)"), absl::Cord("(0, 1)"), std::nullopt,
std::nullopt, absl::Cord("(1, 1)"), std::nullopt
};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto encoded, EncodeShard(entries, p));
ASSERT_TRUE(encoded.has_value());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto decoded_entries,
DecodeShard(*encoded, p));
EXPECT_THAT(decoded_entries.entries,
::testing::ElementsAreArray(entries.entries));
}
}
TEST(EncodeShardTest, RoundTripEmpty) {
for (auto index_location :
{ShardIndexLocation::kStart, ShardIndexLocation::kEnd}) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto p, GetParams(index_location, {2, 3}));
ShardEntries entries;
entries.entries.resize(6);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto encoded, EncodeShard(entries, p));
ASSERT_FALSE(encoded.has_value());
}
}
TEST(DecodeShardTest, TooShort) {
absl::Cord encoded(std::string{1, 2, 3});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto p,
GetParams(ShardIndexLocation::kEnd, {2}));
EXPECT_THAT(DecodeShard(encoded, p),
MatchesStatus(absl::StatusCode::kDataLoss,
"Existing shard has size of 3 bytes, but expected "
"at least .* bytes"));
}
TEST(DecodeShardTest, ByteRangeOutOfRange) {
absl::Cord encoded(std::string{
0, 0, 0, 0, 0, 0, 0, 0,
17, 0, 0, 0, 0, 0, 0, 0,
});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto p, GetParams(ShardIndexLocation::kEnd, {1},
{{{"name", "bytes"},
{"configuration", {{"endian", "little"}}}}}));
EXPECT_THAT(
DecodeShard(encoded, p),
MatchesStatus(absl::StatusCode::kDataLoss,
"Shard index entry 0 with byte range .* is invalid .*"));
}
TEST(DecodeShardTest, ByteRangeInvalid) {
unsigned char data[] = {
0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
1, 0, 0, 0, 0, 0, 0, 0,
};
absl::Cord encoded(
std::string_view(reinterpret_cast<const char*>(data), sizeof(data)));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto p, GetParams(ShardIndexLocation::kEnd, {1},
{{{"name", "bytes"},
{"configuration", {{"endian", "little"}}}}}));
EXPECT_THAT(DecodeShard(encoded, p),
MatchesStatus(absl::StatusCode::kDataLoss,
"Invalid shard index entry 0 with .*"));
}
} |
596 | cpp | google/tensorstore | zarr3_sharding_indexed | tensorstore/kvstore/zarr3_sharding_indexed/zarr3_sharding_indexed.cc | tensorstore/kvstore/zarr3_sharding_indexed/zarr3_sharding_indexed_test.cc | #ifndef TENSORSTORE_KVSTORE_ZARR_SHARDING_INDEXED_ZARR_SHARDING_INDEXED_H_
#define TENSORSTORE_KVSTORE_ZARR_SHARDING_INDEXED_ZARR_SHARDING_INDEXED_H_
#include <stdint.h>
#include <string>
#include <string_view>
#include "tensorstore/internal/cache/cache.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/zarr3_sharding_indexed/shard_format.h"
#include "tensorstore/util/executor.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace zarr3_sharding_indexed {
struct ShardedKeyValueStoreParameters {
kvstore::DriverPtr base_kvstore;
std::string base_kvstore_path;
Executor executor;
internal::CachePool::WeakPtr cache_pool;
ShardIndexParameters index_params;
};
kvstore::DriverPtr GetShardedKeyValueStore(
ShardedKeyValueStoreParameters&& parameters);
}
}
#endif
#include "tensorstore/kvstore/zarr3_sharding_indexed/zarr3_sharding_indexed.h"
#include <stddef.h>
#include <stdint.h>
#include <cassert>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/base/optimization.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_format.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include <nlohmann/json.hpp>
#include "tensorstore/batch.h"
#include "tensorstore/context.h"
#include "tensorstore/driver/zarr3/codec/codec_chain_spec.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/cache/async_cache.h"
#include "tensorstore/internal/cache/cache.h"
#include "tensorstore/internal/cache/cache_pool_resource.h"
#include "tensorstore/internal/cache/kvs_backed_cache.h"
#include "tensorstore/internal/cache_key/cache_key.h"
#include "tensorstore/internal/data_copy_concurrency_resource.h"
#include "tensorstore/internal/estimate_heap_usage/estimate_heap_usage.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/dimension_indexed.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/mutex.h"
#include "tensorstore/json_serialization_options_base.h"
#include "tensorstore/kvstore/batch_util.h"
#include "tensorstore/kvstore/byte_range.h"
#include "tensorstore/kvstore/driver.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/read_modify_write.h"
#include "tensorstore/kvstore/read_result.h"
#include "tensorstore/kvstore/registry.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/kvstore/supported_features.h"
#include "tensorstore/kvstore/transaction.h"
#include "tensorstore/kvstore/zarr3_sharding_indexed/key.h"
#include "tensorstore/kvstore/zarr3_sharding_indexed/shard_format.h"
#include "tensorstore/transaction.h"
#include "tensorstore/util/bit_vec.h"
#include "tensorstore/util/execution/any_receiver.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/execution/flow_sender_operation_state.h"
#include "tensorstore/util/executor.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/garbage_collection/fwd.h"
#include "tensorstore/util/garbage_collection/garbage_collection.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
#include "tensorstore/internal/cache_key/std_vector.h"
#include "tensorstore/internal/estimate_heap_usage/std_optional.h"
#include "tensorstore/internal/estimate_heap_usage/std_vector.h"
#include "tensorstore/serialization/std_vector.h"
#include "tensorstore/util/execution/result_sender.h"
#include "tensorstore/util/garbage_collection/std_vector.h"
namespace tensorstore {
namespace zarr3_sharding_indexed {
namespace {
using ::tensorstore::internal_kvstore::DeleteRangeEntry;
using ::tensorstore::internal_kvstore::kReadModifyWrite;
using ::tensorstore::kvstore::ListEntry;
using ::tensorstore::kvstore::ListReceiver;
class ShardIndexKeyValueStore : public kvstore::Driver {
public:
explicit ShardIndexKeyValueStore(kvstore::DriverPtr base,
ShardIndexLocation index_location,
int64_t index_size_in_bytes)
: base_(std::move(base)),
index_location_(index_location),
index_size_in_bytes_(index_size_in_bytes) {}
Future<kvstore::ReadResult> Read(kvstore::Key key,
kvstore::ReadOptions options) override {
assert(options.byte_range == OptionalByteRangeRequest{});
switch (index_location_) {
case ShardIndexLocation::kStart:
options.byte_range =
OptionalByteRangeRequest::Range(0, index_size_in_bytes_);
break;
case ShardIndexLocation::kEnd:
options.byte_range =
OptionalByteRangeRequest::SuffixLength(index_size_in_bytes_);
break;
}
return MapFutureError(
InlineExecutor{},
[](const absl::Status& status) {
return internal::ConvertInvalidArgumentToFailedPrecondition(status);
},
base_->Read(std::move(key), std::move(options)));
}
std::string DescribeKey(std::string_view key) override {
return tensorstore::StrCat("shard index in ", base_->DescribeKey(key));
}
void GarbageCollectionVisit(
garbage_collection::GarbageCollectionVisitor& visitor) const final {
}
kvstore::Driver* base() { return base_.get(); }
private:
kvstore::DriverPtr base_;
ShardIndexLocation index_location_;
int64_t index_size_in_bytes_;
};
class ShardIndexCache
: public internal::KvsBackedCache<ShardIndexCache, internal::AsyncCache> {
using Base = internal::KvsBackedCache<ShardIndexCache, internal::AsyncCache>;
public:
using ReadData = ShardIndex;
class Entry : public Base::Entry {
public:
using OwningCache = ShardIndexCache;
size_t ComputeReadDataSizeInBytes(const void* read_data) override {
const auto& cache = GetOwningCache(*this);
return read_data
? cache.shard_index_params().num_entries * sizeof(uint64_t) * 2
: 0;
}
std::string GetKeyValueStoreKey() override {
return GetOwningCache(*this).base_kvstore_path_;
}
void DoDecode(std::optional<absl::Cord> value,
DecodeReceiver receiver) override {
GetOwningCache(*this).executor()(
[this, value = std::move(value),
receiver = std::move(receiver)]() mutable {
std::shared_ptr<ReadData> read_data;
if (value) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto shard_index,
DecodeShardIndex(*value,
GetOwningCache(*this).shard_index_params()),
static_cast<void>(execution::set_error(receiver, _)));
read_data = std::make_shared<ReadData>(std::move(shard_index));
}
execution::set_value(receiver, std::move(read_data));
});
}
};
Entry* DoAllocateEntry() final { return new Entry; }
size_t DoGetSizeofEntry() final { return sizeof(Entry); }
TransactionNode* DoAllocateTransactionNode(AsyncCache::Entry& entry) final {
ABSL_UNREACHABLE();
}
explicit ShardIndexCache(kvstore::DriverPtr base_kvstore,
std::string base_kvstore_path, Executor executor,
ShardIndexParameters&& params)
: Base(kvstore::DriverPtr(new ShardIndexKeyValueStore(
std::move(base_kvstore), params.index_location,
params.index_codec_state->encoded_size()))),
base_kvstore_path_(std::move(base_kvstore_path)),
executor_(std::move(executor)),
shard_index_params_(std::move(params)) {}
ShardIndexKeyValueStore* shard_index_kvstore_driver() {
return static_cast<ShardIndexKeyValueStore*>(this->Base::kvstore_driver());
}
kvstore::Driver* base_kvstore_driver() {
return shard_index_kvstore_driver()->base();
}
const std::string& base_kvstore_path() const { return base_kvstore_path_; }
const Executor& executor() { return executor_; }
span<const Index> grid_shape() const {
return span<const Index>(shard_index_params_.index_shape.data(),
shard_index_params_.index_shape.size() - 1);
}
const ShardIndexParameters& shard_index_params() const {
return shard_index_params_;
}
std::string base_kvstore_path_;
Executor executor_;
ShardIndexParameters shard_index_params_;
};
class ShardedKeyValueStoreWriteCache
: public internal::KvsBackedCache<ShardedKeyValueStoreWriteCache,
internal::AsyncCache> {
using Base = internal::KvsBackedCache<ShardedKeyValueStoreWriteCache,
internal::AsyncCache>;
public:
using ReadData = ShardEntries;
explicit ShardedKeyValueStoreWriteCache(
internal::CachePtr<ShardIndexCache> shard_index_cache)
: Base(kvstore::DriverPtr(shard_index_cache->base_kvstore_driver())),
shard_index_cache_(std::move(shard_index_cache)) {}
class Entry : public Base::Entry {
public:
using OwningCache = ShardedKeyValueStoreWriteCache;
size_t ComputeReadDataSizeInBytes(const void* data) override {
return internal::EstimateHeapUsage(*static_cast<const ReadData*>(data));
}
void DoDecode(std::optional<absl::Cord> value,
DecodeReceiver receiver) override {
GetOwningCache(*this).executor()(
[this, value = std::move(value),
receiver = std::move(receiver)]() mutable {
ShardEntries entries;
const auto& shard_index_params =
GetOwningCache(*this).shard_index_params();
if (value) {
TENSORSTORE_ASSIGN_OR_RETURN(
entries, DecodeShard(*value, shard_index_params),
static_cast<void>(execution::set_error(receiver, _)));
} else {
entries.entries.resize(shard_index_params.num_entries);
}
execution::set_value(
receiver, std::make_shared<ShardEntries>(std::move(entries)));
});
}
void DoEncode(std::shared_ptr<const ShardEntries> data,
EncodeReceiver receiver) override {
TENSORSTORE_ASSIGN_OR_RETURN(
auto encoded_shard,
EncodeShard(*data, GetOwningCache(*this).shard_index_params()),
static_cast<void>(execution::set_error(receiver, _)));
execution::set_value(receiver, std::move(encoded_shard));
}
std::string GetKeyValueStoreKey() override {
return GetOwningCache(*this).base_kvstore_path();
}
};
class TransactionNode : public Base::TransactionNode,
public internal_kvstore::AtomicMultiPhaseMutation {
public:
using OwningCache = ShardedKeyValueStoreWriteCache;
using Base::TransactionNode::TransactionNode;
absl::Mutex& mutex() override { return this->mutex_; }
void PhaseCommitDone(size_t next_phase) override {}
internal::TransactionState::Node& GetTransactionNode() override {
return *this;
}
void Abort() override {
this->AbortRemainingPhases();
Base::TransactionNode::Abort();
}
std::string DescribeKey(std::string_view key) override {
auto& cache = GetOwningCache(*this);
return tensorstore::StrCat(
DescribeInternalKey(key, cache.shard_index_params().grid_shape()),
" in ",
cache.kvstore_driver()->DescribeKey(cache.base_kvstore_path()));
}
void DoApply(ApplyOptions options, ApplyReceiver receiver) override;
void StartApply();
void AllEntriesDone(
internal_kvstore::SinglePhaseMutation& single_phase_mutation) override;
void MergeForWriteback(bool conditional);
void RecordEntryWritebackError(
internal_kvstore::ReadModifyWriteEntry& entry,
absl::Status error) override {
absl::MutexLock lock(&mutex_);
if (apply_status_.ok()) {
apply_status_ = std::move(error);
}
}
void Revoke() override {
Base::TransactionNode::Revoke();
{ UniqueWriterLock(*this); }
this->RevokeAllEntries();
}
void WritebackSuccess(ReadState&& read_state) override;
void WritebackError() override;
void InvalidateReadState() override;
bool MultiPhaseReadsCommitted() override { return this->reads_committed_; }
void Read(
internal_kvstore::ReadModifyWriteEntry& entry,
kvstore::ReadModifyWriteTarget::TransactionalReadOptions&& options,
kvstore::ReadModifyWriteTarget::ReadReceiver&& receiver) override {
this->AsyncCache::TransactionNode::Read({options.staleness_bound})
.ExecuteWhenReady(WithExecutor(
GetOwningCache(*this).executor(),
[&entry,
if_not_equal =
std::move(options.generation_conditions.if_not_equal),
receiver = std::move(receiver)](
ReadyFuture<const void> future) mutable {
if (!future.result().ok()) {
execution::set_error(receiver, future.result().status());
return;
}
execution::submit(HandleShardReadSuccess(entry, if_not_equal),
receiver);
}));
}
static Result<kvstore::ReadResult> HandleShardReadSuccess(
internal_kvstore::ReadModifyWriteEntry& entry,
const StorageGeneration& if_not_equal) {
auto& self = static_cast<TransactionNode&>(entry.multi_phase());
TimestampedStorageGeneration stamp;
std::shared_ptr<const ShardEntries> entries;
{
AsyncCache::ReadLock<ShardEntries> lock{self};
stamp = lock.stamp();
entries = lock.shared_data();
}
if (!StorageGeneration::IsUnknown(stamp.generation) &&
stamp.generation == if_not_equal) {
return kvstore::ReadResult::Unspecified(std::move(stamp));
}
if (StorageGeneration::IsDirty(stamp.generation)) {
stamp.generation =
StorageGeneration::AddLayer(std::move(stamp.generation));
}
auto entry_id = InternalKeyToEntryId(entry.key_);
const auto& shard_entry = entries->entries[entry_id];
if (!shard_entry) {
return kvstore::ReadResult::Missing(std::move(stamp));
} else {
return kvstore::ReadResult::Value(*shard_entry, std::move(stamp));
}
}
ApplyReceiver apply_receiver_;
ApplyOptions apply_options_;
absl::Status apply_status_;
};
Entry* DoAllocateEntry() final { return new Entry; }
size_t DoGetSizeofEntry() final { return sizeof(Entry); }
TransactionNode* DoAllocateTransactionNode(AsyncCache::Entry& entry) final {
return new TransactionNode(static_cast<Entry&>(entry));
}
const internal::CachePtr<ShardIndexCache>& shard_index_cache() const {
return shard_index_cache_;
}
const Executor& executor() { return shard_index_cache()->executor(); }
const ShardIndexParameters& shard_index_params() const {
return shard_index_cache_->shard_index_params();
}
int64_t num_entries_per_shard() const {
return shard_index_cache_->shard_index_params().num_entries;
}
const std::string& base_kvstore_path() const {
return shard_index_cache_->base_kvstore_path();
}
internal::CachePtr<ShardIndexCache> shard_index_cache_;
};
void ShardedKeyValueStoreWriteCache::TransactionNode::InvalidateReadState() {
Base::TransactionNode::InvalidateReadState();
internal_kvstore::InvalidateReadState(phases_);
}
void ShardedKeyValueStoreWriteCache::TransactionNode::DoApply(
ApplyOptions options, ApplyReceiver receiver) {
apply_receiver_ = std::move(receiver);
apply_options_ = options;
apply_status_ = absl::OkStatus();
GetOwningCache(*this).executor()([this] { this->StartApply(); });
}
void ShardedKeyValueStoreWriteCache::TransactionNode::StartApply() {
RetryAtomicWriteback(apply_options_.staleness_bound);
}
void ShardedKeyValueStoreWriteCache::TransactionNode::AllEntriesDone(
internal_kvstore::SinglePhaseMutation& single_phase_mutation) {
if (!apply_status_.ok()) {
execution::set_error(std::exchange(apply_receiver_, {}),
std::exchange(apply_status_, {}));
return;
}
auto& self = *this;
GetOwningCache(*this).executor()([&self] {
TimestampedStorageGeneration stamp;
bool mismatch = false;
bool modified = false;
int64_t num_entries = 0;
auto& cache = GetOwningCache(self);
const int64_t num_entries_per_shard = cache.num_entries_per_shard();
for (auto& entry : self.phases_.entries_) {
if (entry.entry_type() != kReadModifyWrite) {
auto& dr_entry = static_cast<DeleteRangeEntry&>(entry);
auto [begin_id, end_id] = InternalKeyRangeToEntryRange(
dr_entry.key_, dr_entry.exclusive_max_, num_entries_per_shard);
modified = true;
num_entries += end_id - begin_id;
continue;
}
auto& buffered_entry =
static_cast<AtomicMultiPhaseMutation::BufferedReadModifyWriteEntry&>(
entry);
if (buffered_entry.value_state_ != kvstore::ReadResult::kUnspecified) {
modified = true;
++num_entries;
}
auto& entry_stamp = buffered_entry.stamp();
if (StorageGeneration::IsConditional(entry_stamp.generation)) {
if (!StorageGeneration::IsUnknown(stamp.generation) &&
StorageGeneration::Clean(stamp.generation) !=
StorageGeneration::Clean(entry_stamp.generation)) {
mismatch = true;
break;
} else {
stamp = entry_stamp;
}
}
}
if (mismatch) {
self.apply_options_.staleness_bound = absl::Now();
self.StartApply();
return;
}
if (!modified && StorageGeneration::IsUnknown(stamp.generation) &&
self.apply_options_.apply_mode !=
ApplyOptions::ApplyMode::kSpecifyUnchanged) {
internal::AsyncCache::ReadState update;
update.stamp = TimestampedStorageGeneration::Unconditional();
execution::set_value(std::exchange(self.apply_receiver_, {}),
std::move(update));
return;
}
if (!StorageGeneration::IsUnknown(stamp.generation) ||
num_entries != num_entries_per_shard) {
self.internal::AsyncCache::TransactionNode::Read(
{self.apply_options_.staleness_bound})
.ExecuteWhenReady([&self](ReadyFuture<const void> future) {
if (!future.result().ok()) {
execution::set_error(std::exchange(self.apply_receiver_, {}),
future.result().status());
return;
}
GetOwningCache(self).executor()(
[&self] { self.MergeForWriteback(true); });
});
return;
}
self.MergeForWriteback(false);
});
}
void ShardedKeyValueStoreWriteCache::TransactionNode::MergeForWriteback(
bool conditional) {
TimestampedStorageGeneration stamp;
ShardEntries new_entries;
if (conditional) {
auto lock = internal::AsyncCache::ReadLock<ShardEntries>{*this};
stamp = lock.stamp();
new_entries = *lock.shared_data();
} else {
stamp = TimestampedStorageGeneration::Unconditional();
}
auto& cache = GetOwningCache(*this);
const int64_t num_entries_per_shard = cache.num_entries_per_shard();
const bool has_existing_entries = !new_entries.entries.empty();
new_entries.entries.resize(num_entries_per_shard);
bool mismatch = false;
bool changed = false;
for (auto& entry : phases_.entries_) {
if (entry.entry_type() != kReadModifyWrite) {
auto& dr_entry = static_cast<DeleteRangeEntry&>(entry);
auto [begin_id, end_id] = InternalKeyRangeToEntryRange(
dr_entry.key_, dr_entry.exclusive_max_, num_entries_per_shard);
if (has_existing_entries) {
for (EntryId id = begin_id; id < end_id; ++id) {
new_entries.entries[id] = std::nullopt;
}
}
changed = true;
continue;
}
auto& buffered_entry =
static_cast<internal_kvstore::AtomicMultiPhaseMutation::
BufferedReadModifyWriteEntry&>(entry);
auto& entry_stamp = buffered_entry.stamp();
if (StorageGeneration::IsConditional(entry_stamp.generation) &&
StorageGeneration::Clean(entry_stamp.generation) !=
StorageGeneration::Clean(stamp.generation)) {
mismatch = true;
break;
}
if (buffered_entry.value_state_ == kvstore::ReadResult::kUnspecified ||
!StorageGeneration::IsInnerLayerDirty(entry_stamp.generation)) {
continue;
}
auto entry_id = InternalKeyToEntryId(buffered_entry.key_);
auto& new_entry = new_entries.entries[entry_id];
if (buffered_entry.value_state_ == kvstore::ReadResult::kValue) {
new_entry = buffered_entry.value_;
changed = true;
} else if (new_entry) {
new_entry = std::nullopt;
changed = true;
} else if (!conditional) {
changed = true;
}
}
if (mismatch) {
apply_options_.staleness_bound = absl::Now();
this->StartApply();
return;
}
internal::AsyncCache::ReadState update;
update.stamp = std::move(stamp);
if (changed) {
update.stamp.generation.MarkDirty();
}
update.data = std::make_shared<ShardEntries>(std::move(new_entries));
execution::set_value(std::exchange(apply_receiver_, {}), std::move(update));
}
void ShardedKeyValueStoreWriteCache::TransactionNode::WritebackSuccess(
ReadState&& read_state) {
for (auto& entry : phases_.entries_) {
if (entry.entry_type() != kReadModifyWrite) {
internal_kvstore::WritebackSuccess(static_cast<DeleteRangeEntry&>(entry));
} else {
internal_kvstore::WritebackSuccess(
static_cast<internal_kvstore::ReadModifyWriteEntry&>(entry),
read_state.stamp);
}
}
internal_kvstore::DestroyPhaseEntries(phases_);
Base::TransactionNode::WritebackSuccess(std::move(read_state));
}
void ShardedKeyValueStoreWriteCache::TransactionNode::WritebackError() {
internal_kvstore::WritebackError(phases_);
internal_kvstore::DestroyPhaseEntries(phases_);
Base::TransactionNode::WritebackError();
}
struct ShardedKeyValueStoreSpecData {
Context::Resource<internal::CachePoolResource> cache_pool;
Context::Resource<internal::DataCopyConcurrencyResource>
data_copy_concurrency;
kvstore::Spec base;
std::vector<Index> grid_shape;
internal_zarr3::ZarrCodecChainSpec index_codecs;
ShardIndexLocation index_location;
TENSORSTORE_DECLARE_JSON_DEFAULT_BINDER(ShardedKeyValueStoreSpecData,
internal_json_binding::NoOptions,
IncludeDefaults,
::nlohmann::json::object_t)
constexpr static auto ApplyMembers = [](auto&& x, auto f) {
return f(x.cache_pool, x.data_copy_concurrency, x.base, x.grid_shape,
x.index_codecs, x.index_location);
};
};
namespace jb = ::tensorstore::internal_json_binding;
TENSORSTORE_DEFINE_JSON_DEFAULT_BINDER(
ShardedKeyValueStoreSpecData,
jb::Object(
jb::Member( | #include "tensorstore/kvstore/zarr3_sharding_indexed/zarr3_sharding_indexed.h"
#include <stddef.h>
#include <stdint.h>
#include <functional>
#include <initializer_list>
#include <map>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "absl/log/absl_check.h"
#include "absl/random/random.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "re2/re2.h"
#include "riegeli/bytes/cord_writer.h"
#include "riegeli/bytes/write.h"
#include "riegeli/digests/crc32c_digester.h"
#include "tensorstore/batch.h"
#include "tensorstore/context.h"
#include "tensorstore/driver/zarr3/codec/codec_chain_spec.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/cache/cache.h"
#include "tensorstore/internal/cache/kvs_backed_cache_testutil.h"
#include "tensorstore/internal/global_initializer.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/riegeli/digest_suffixed_writer.h"
#include "tensorstore/internal/testing/scoped_directory.h"
#include "tensorstore/internal/thread/thread_pool.h"
#include "tensorstore/kvstore/byte_range.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/memory/memory_key_value_store.h"
#include "tensorstore/kvstore/mock_kvstore.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/read_result.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/kvstore/test_matchers.h"
#include "tensorstore/kvstore/test_util.h"
#include "tensorstore/kvstore/zarr3_sharding_indexed/key.h"
#include "tensorstore/transaction.h"
#include "tensorstore/util/executor.h"
#include "tensorstore/util/extents.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
namespace kvstore = ::tensorstore::kvstore;
using ::tensorstore::Batch;
using ::tensorstore::Executor;
using ::tensorstore::Future;
using ::tensorstore::Index;
using ::tensorstore::KvStore;
using ::tensorstore::MatchesStatus;
using ::tensorstore::OptionalByteRangeRequest;
using ::tensorstore::Result;
using ::tensorstore::span;
using ::tensorstore::StorageGeneration;
using ::tensorstore::TimestampedStorageGeneration;
using ::tensorstore::Transaction;
using ::tensorstore::internal::CachePool;
using ::tensorstore::internal::GetCache;
using ::tensorstore::internal::KvsBackedTestCache;
using ::tensorstore::internal::MatchesKvsReadResult;
using ::tensorstore::internal::MatchesKvsReadResultNotFound;
using ::tensorstore::internal::MatchesTimestampedStorageGeneration;
using ::tensorstore::internal::MockKeyValueStore;
using ::tensorstore::internal::UniqueNow;
using ::tensorstore::internal_zarr3::ZarrCodecChainSpec;
using ::tensorstore::kvstore::ReadResult;
using ::tensorstore::zarr3_sharding_indexed::EntryId;
using ::tensorstore::zarr3_sharding_indexed::EntryIdToKey;
using ::tensorstore::zarr3_sharding_indexed::GetShardedKeyValueStore;
using ::tensorstore::zarr3_sharding_indexed::ShardedKeyValueStoreParameters;
using ::tensorstore::zarr3_sharding_indexed::ShardIndexLocation;
constexpr CachePool::Limits kSmallCacheLimits{10000000};
absl::Cord Bytes(std::initializer_list<unsigned char> x) {
return absl::Cord(std::string(x.begin(), x.end()));
}
absl::Cord WithCrc32c(absl::Cord input) {
absl::Cord output;
riegeli::CordWriter writer{&output};
TENSORSTORE_CHECK_OK(riegeli::Write(
input, tensorstore::internal::DigestSuffixedWriter<
riegeli::Crc32cDigester,
tensorstore::internal::LittleEndianDigestWriter>{&writer}));
ABSL_CHECK(writer.Close());
return output;
}
class GetKey {
public:
GetKey(bool sequential, std::vector<Index> grid_shape)
: sequential_(sequential),
grid_shape_(std::move(grid_shape)),
num_entries_(
tensorstore::ProductOfExtents(span<const Index>(grid_shape_))) {}
std::string operator()(std::string key) const {
auto it = key_to_entry_id_.find(key);
if (it == key_to_entry_id_.end()) {
ABSL_CHECK_LT(entry_id_to_key_.size(), num_entries_);
while (true) {
auto x = sequential_ ? next_entry_id_++ : absl::Uniform<EntryId>(gen_);
x = x % num_entries_;
if (entry_id_to_key_.emplace(x, key).second) {
it = key_to_entry_id_.emplace(key, x).first;
break;
}
}
}
return EntryIdToKey(it->second, grid_shape_);
}
private:
bool sequential_;
std::vector<Index> grid_shape_;
EntryId num_entries_;
mutable EntryId next_entry_id_ = 0;
mutable absl::BitGen gen_;
mutable absl::flat_hash_map<std::string, EntryId> key_to_entry_id_;
mutable absl::flat_hash_map<EntryId, std::string> entry_id_to_key_;
};
kvstore::DriverPtr GetDefaultStore(kvstore::DriverPtr base_kvstore,
std::string base_kvstore_path,
Executor executor,
CachePool::StrongPtr cache_pool,
const std::vector<Index>& grid_shape) {
ShardedKeyValueStoreParameters params;
params.base_kvstore = base_kvstore;
params.base_kvstore_path = base_kvstore_path;
params.executor = executor;
params.cache_pool = CachePool::WeakPtr(cache_pool);
TENSORSTORE_CHECK_OK_AND_ASSIGN(
auto index_codecs,
ZarrCodecChainSpec::FromJson(
{{{"name", "bytes"}, {"configuration", {{"endian", "little"}}}},
{{"name", "crc32c"}}}));
params.index_params.index_location = ShardIndexLocation::kEnd;
TENSORSTORE_CHECK_OK(
params.index_params.Initialize(index_codecs, grid_shape));
return GetShardedKeyValueStore(std::move(params));
}
TEST(ShardedKeyValueStoreTest, BasicFunctionality) {
std::vector<std::pair<std::string, tensorstore::Executor>> executors{
{"inline", tensorstore::InlineExecutor{}},
{"thread_pool", tensorstore::internal::DetachedThreadPool(2)}};
for (const auto& [executor_name, executor] : executors) {
for (const auto sequential_ids : {true, false}) {
auto cache_pool = CachePool::Make(kSmallCacheLimits);
auto base_kv_store = tensorstore::GetMemoryKeyValueStore();
const int64_t num_entries = 100;
SCOPED_TRACE(executor_name);
auto store = GetDefaultStore(base_kv_store, "shard_path", executor,
cache_pool, {num_entries});
GetKey get_key_fn(sequential_ids, {num_entries});
tensorstore::internal::TestKeyValueReadWriteOps(store, get_key_fn);
}
}
}
TEST(ShardedKeyValueStoreTest, DescribeKey) {
CachePool::StrongPtr cache_pool = CachePool::Make(kSmallCacheLimits);
kvstore::DriverPtr base_kv_store = tensorstore::GetMemoryKeyValueStore();
int64_t num_entries = 100;
std::vector<Index> grid_shape{num_entries};
kvstore::DriverPtr store =
GetDefaultStore(base_kv_store, "shard_path",
tensorstore::InlineExecutor{}, cache_pool, grid_shape);
for (const auto& [key, description] :
std::vector<std::pair<uint32_t, std::string>>{
{0, "shard entry {0}/{100} in \"shard_path\""},
{1, "shard entry {1}/{100} in \"shard_path\""},
}) {
EXPECT_EQ(description, store->DescribeKey(EntryIdToKey(key, grid_shape)));
}
}
class RawEncodingTest : public ::testing::Test {
protected:
CachePool::StrongPtr cache_pool = CachePool::Make(kSmallCacheLimits);
kvstore::DriverPtr base_kv_store = tensorstore::GetMemoryKeyValueStore();
kvstore::DriverPtr GetStore(const std::vector<Index>& grid_shape) {
return GetDefaultStore(base_kv_store, "shard_path",
tensorstore::InlineExecutor{}, cache_pool,
grid_shape);
}
};
TEST_F(RawEncodingTest, MultipleUnconditionalWrites) {
std::vector<Index> grid_shape{100};
kvstore::DriverPtr store = GetStore(grid_shape);
std::vector<absl::Cord> values{absl::Cord("abc"), absl::Cord("aaaaa"),
absl::Cord("efgh")};
std::vector<Future<TimestampedStorageGeneration>> futures;
auto key = EntryIdToKey(10, grid_shape);
tensorstore::Transaction txn(tensorstore::isolated);
for (auto value : values) {
futures.push_back(kvstore::WriteCommitted(KvStore{store, txn}, key, value));
}
txn.CommitAsync().IgnoreFuture();
std::vector<Result<TimestampedStorageGeneration>> results;
for (const auto& future : futures) {
results.push_back(future.result());
}
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto shard_read,
base_kv_store->Read("shard_path").result());
EXPECT_THAT(
results,
::testing::UnorderedElementsAre(
MatchesTimestampedStorageGeneration(StorageGeneration::Invalid()),
MatchesTimestampedStorageGeneration(StorageGeneration::Invalid()),
MatchesTimestampedStorageGeneration(shard_read.stamp.generation)));
for (size_t i = 0; i < results.size(); ++i) {
if (results[i] && results[i]->generation == shard_read.stamp.generation) {
EXPECT_THAT(store->Read(key).result(),
MatchesKvsReadResult(values[i], results[i]->generation));
}
}
}
TEST_F(RawEncodingTest, List) {
std::vector<Index> grid_shape{100};
kvstore::DriverPtr store = GetStore(grid_shape);
std::map<std::string, absl::Cord> values{
{EntryIdToKey(1, grid_shape), absl::Cord("a")},
{EntryIdToKey(2, grid_shape), absl::Cord("bc")},
{EntryIdToKey(3, grid_shape), absl::Cord("def")},
{EntryIdToKey(10, grid_shape), absl::Cord("xyz")}};
for (auto [key, value] : values) {
TENSORSTORE_EXPECT_OK(store->Write(key, value));
}
EXPECT_THAT(tensorstore::internal::GetMap(store),
::testing::Optional(::testing::ElementsAreArray(values)));
}
TEST_F(RawEncodingTest, WritesAndDeletes) {
std::vector<Index> grid_shape{100};
kvstore::DriverPtr store = GetStore(grid_shape);
StorageGeneration gen1, gen2, gen3;
{
tensorstore::Transaction txn(tensorstore::isolated);
auto init_future1 = kvstore::WriteCommitted(
KvStore{store, txn}, EntryIdToKey(1, grid_shape), absl::Cord("a"));
auto init_future2 = kvstore::WriteCommitted(
KvStore{store, txn}, EntryIdToKey(2, grid_shape), absl::Cord("bc"));
auto init_future3 = kvstore::WriteCommitted(
KvStore{store, txn}, EntryIdToKey(3, grid_shape), absl::Cord("def"));
txn.CommitAsync().IgnoreFuture();
gen1 = init_future1.value().generation;
gen2 = init_future2.value().generation;
gen3 = init_future3.value().generation;
}
tensorstore::Transaction txn(tensorstore::isolated);
auto future1 =
kvstore::DeleteCommitted(KvStore{store, txn}, EntryIdToKey(1, grid_shape),
{StorageGeneration::NoValue()});
auto future2 =
kvstore::WriteCommitted(KvStore{store, txn}, EntryIdToKey(2, grid_shape),
absl::Cord("ww"), {gen2});
auto future3 =
kvstore::WriteCommitted(KvStore{store, txn}, EntryIdToKey(2, grid_shape),
absl::Cord("xx"), {gen2});
auto future4 =
kvstore::WriteCommitted(KvStore{store, txn}, EntryIdToKey(4, grid_shape),
absl::Cord("zz"), {StorageGeneration::NoValue()});
auto future5 = kvstore::DeleteCommitted(KvStore{store, txn},
EntryIdToKey(3, grid_shape), {gen3});
txn.CommitAsync().IgnoreFuture();
EXPECT_THAT(future1.result(), MatchesTimestampedStorageGeneration(
StorageGeneration::Unknown()));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto shard_read,
base_kv_store->Read("shard_path").result());
EXPECT_THAT(
std::vector({future2.result(), future3.result()}),
::testing::UnorderedElementsAre(
MatchesTimestampedStorageGeneration(StorageGeneration::Unknown()),
MatchesTimestampedStorageGeneration(shard_read.stamp.generation)));
EXPECT_THAT(store->Read(EntryIdToKey(1, grid_shape)).result(),
MatchesKvsReadResult(absl::Cord("a")));
EXPECT_THAT(store->Read(EntryIdToKey(2, grid_shape)).result(),
MatchesKvsReadResult(
!StorageGeneration::IsUnknown(future2.result()->generation)
? absl::Cord("ww")
: absl::Cord("xx")));
EXPECT_THAT(store->Read(EntryIdToKey(3, grid_shape)).result(),
MatchesKvsReadResultNotFound());
EXPECT_THAT(store->Read(EntryIdToKey(4, grid_shape)).result(),
MatchesKvsReadResult(absl::Cord("zz")));
}
std::vector<std::vector<Result<TimestampedStorageGeneration>>>
TestOrderDependentWrites(
std::function<void()> init,
std::function<Future<TimestampedStorageGeneration>()> op0,
std::function<Future<TimestampedStorageGeneration>()> op1,
std::function<void()> finalize) {
std::vector<std::vector<Result<TimestampedStorageGeneration>>> all_results;
for (int i = 0; i < 2; ++i) {
std::vector<Future<TimestampedStorageGeneration>> futures(2);
init();
if (i == 0) {
futures[0] = op0();
futures[1] = op1();
} else {
futures[1] = op1();
futures[0] = op0();
}
finalize();
all_results.push_back({futures[0].result(), futures[1].result()});
}
return all_results;
}
TEST_F(RawEncodingTest, WriteThenDelete) {
std::vector<Index> grid_shape{100};
kvstore::DriverPtr store = GetStore(grid_shape);
TENSORSTORE_ASSERT_OK(
store->Write(EntryIdToKey(1, grid_shape), absl::Cord("a")).result());
EXPECT_THAT(store->Read(EntryIdToKey(1, grid_shape)).result(),
MatchesKvsReadResult(absl::Cord("a")));
TENSORSTORE_ASSERT_OK(store->Delete(EntryIdToKey(1, grid_shape)).result());
EXPECT_THAT(store->Read(EntryIdToKey(1, grid_shape)).result(),
MatchesKvsReadResultNotFound());
}
TEST_F(RawEncodingTest, MultipleDeleteExisting) {
std::vector<Index> grid_shape{100};
kvstore::DriverPtr store = GetStore(grid_shape);
StorageGeneration gen;
tensorstore::Transaction txn{tensorstore::no_transaction};
EXPECT_THAT(
TestOrderDependentWrites(
[&] {
gen = store->Write(EntryIdToKey(1, grid_shape), absl::Cord("a"))
.value()
.generation;
txn = tensorstore::Transaction(tensorstore::isolated);
},
[&] {
return kvstore::DeleteCommitted(KvStore{store, txn},
EntryIdToKey(1, grid_shape),
{gen});
},
[&] {
return kvstore::DeleteCommitted(
KvStore{store, txn}, EntryIdToKey(1, grid_shape),
{StorageGeneration::NoValue()});
},
[&] { txn.CommitAsync().IgnoreFuture(); }),
::testing::UnorderedElementsAre(
::testing::ElementsAre(
MatchesTimestampedStorageGeneration(StorageGeneration::Invalid()),
MatchesTimestampedStorageGeneration(
StorageGeneration::NoValue())),
::testing::ElementsAre(
MatchesTimestampedStorageGeneration(StorageGeneration::NoValue()),
MatchesTimestampedStorageGeneration(
StorageGeneration::Unknown()))));
}
TEST_F(RawEncodingTest, WriteWithUnmatchedConditionAfterDelete) {
std::vector<Index> grid_shape{100};
kvstore::DriverPtr store = GetStore(grid_shape);
tensorstore::Transaction txn{tensorstore::no_transaction};
EXPECT_THAT(
TestOrderDependentWrites(
[&] {
store->Delete(EntryIdToKey(0, grid_shape)).value();
txn = tensorstore::Transaction(tensorstore::isolated);
},
[&] {
return kvstore::WriteCommitted(KvStore{store, txn},
EntryIdToKey(0, grid_shape),
absl::Cord("a"));
},
[&] {
return kvstore::WriteCommitted(
KvStore{store, txn}, EntryIdToKey(0, grid_shape),
absl::Cord("b"),
{StorageGeneration::FromString("g")});
},
[&] { txn.CommitAsync().IgnoreFuture(); }),
::testing::Each(::testing::ElementsAre(
MatchesTimestampedStorageGeneration(
::testing::AllOf(::testing::Not(StorageGeneration::NoValue()),
::testing::Not(StorageGeneration::Invalid()))),
MatchesTimestampedStorageGeneration(StorageGeneration::Unknown()))));
}
TEST_F(RawEncodingTest, MultipleDeleteNonExisting) {
std::vector<Index> grid_shape{100};
kvstore::DriverPtr store = GetStore(grid_shape);
tensorstore::Transaction txn(tensorstore::isolated);
std::vector futures{
kvstore::DeleteCommitted(KvStore{store, txn}, EntryIdToKey(1, grid_shape),
{StorageGeneration::NoValue()}),
kvstore::DeleteCommitted(KvStore{store, txn}, EntryIdToKey(1, grid_shape),
{StorageGeneration::NoValue()})};
txn.CommitAsync().IgnoreFuture();
std::vector results{futures[0].result(), futures[1].result()};
EXPECT_THAT(
results,
::testing::UnorderedElementsAre(
MatchesTimestampedStorageGeneration(StorageGeneration::Invalid()),
MatchesTimestampedStorageGeneration(StorageGeneration::NoValue())));
}
TEST_F(RawEncodingTest, ShardIndexTooShort) {
std::vector<Index> grid_shape{100};
kvstore::DriverPtr store = GetStore(grid_shape);
base_kv_store->Write("shard_path", Bytes({1, 2, 3})).value();
EXPECT_THAT(store->Read(EntryIdToKey(1, grid_shape)).result(),
MatchesStatus(
absl::StatusCode::kFailedPrecondition,
RE2::QuoteMeta("Error reading shard index in \"shard_path\": "
"Requested byte range [-1604, ?) is not valid "
"for value of size 3")));
EXPECT_THAT(
store->Write(EntryIdToKey(10, grid_shape), absl::Cord("abc")).result(),
MatchesStatus(absl::StatusCode::kDataLoss,
"Error reading \"shard_path\": "
"Existing shard has size of 3 bytes, but expected at least "
"1604 bytes"));
}
TEST_F(RawEncodingTest, ShardIndexByteRangeOverflow) {
std::vector<Index> grid_shape{2};
kvstore::DriverPtr store = GetStore(grid_shape);
auto content = WithCrc32c(Bytes({
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
}));
TENSORSTORE_ASSERT_OK(base_kv_store->Write("shard_path", content));
EXPECT_THAT(
store->Read(EntryIdToKey(1, grid_shape)).result(),
MatchesStatus(absl::StatusCode::kDataLoss,
"Error reading shard index in \"shard_path\": "
"Invalid shard index entry 1 with offset=.*, length=.*"));
}
TEST_F(RawEncodingTest, ShardIndexEntryByteRangeOutOfRange) {
std::vector<Index> grid_shape{2};
kvstore::DriverPtr store = GetStore(grid_shape);
auto content = WithCrc32c(Bytes({
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0, 0, 0, 0, 0, 0, 0, 0,
37, 0, 0, 0, 0, 0, 0, 0,
}));
TENSORSTORE_ASSERT_OK(base_kv_store->Write("shard_path", content));
EXPECT_THAT(
store->Write(EntryIdToKey(1, grid_shape), absl::Cord("x")).result(),
MatchesStatus(absl::StatusCode::kDataLoss,
"Error reading \"shard_path\": "
"Shard index entry 1 with byte range .* is invalid "
"for shard of size .*"));
}
TEST_F(RawEncodingTest, ShardIndexInvalidChecksum) {
std::vector<Index> grid_shape{2};
kvstore::DriverPtr store = GetStore(grid_shape);
auto content = Bytes({
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0, 0, 0, 0, 0, 0, 0, 0,
5, 0, 0, 0, 0, 0, 0, 0,
});
content.Append("abcd");
TENSORSTORE_ASSERT_OK(base_kv_store->Write("shard_path", content));
EXPECT_THAT(store->Read(EntryIdToKey(1, grid_shape)).result(),
MatchesStatus(absl::StatusCode::kDataLoss,
"Error reading shard index in \"shard_path\": "
"Digest mismatch.*"));
}
class UnderlyingKeyValueStoreTest : public ::testing::Test {
protected:
CachePool::StrongPtr cache_pool = CachePool::Make(kSmallCacheLimits);
MockKeyValueStore::MockPtr mock_store = MockKeyValueStore::Make();
kvstore::DriverPtr GetStore(std::vector<Index> grid_shape) {
return GetDefaultStore(mock_store, "shard_path",
tensorstore::InlineExecutor{}, cache_pool,
grid_shape);
}
std::vector<Index> grid_shape{5};
kvstore::DriverPtr store = GetStore(grid_shape);
};
TEST_F(UnderlyingKeyValueStoreTest, Read) {
absl::Time init_time = UniqueNow();
absl::Time shard_index_time;
{
auto future = store->Read(EntryIdToKey(2, grid_shape), {});
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("shard_path", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_equal);
EXPECT_EQ(OptionalByteRangeRequest::SuffixLength(5 * 16 + 4),
req.options.byte_range);
EXPECT_THAT(req.options.staleness_bound, ::testing::Gt(init_time));
shard_index_time = absl::Now();
req.promise.SetResult(
ReadResult{ReadResult::kValue,
WithCrc32c(Bytes({
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
10, 0, 0, 0, 0, 0, 0, 0,
5, 0, 0, 0, 0, 0, 0, 0,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
})),
{StorageGeneration::FromString("g0"), shard_index_time}});
}
ASSERT_FALSE(future.ready()) << future.status();
absl::Time read_time;
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("shard_path", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(StorageGeneration::FromString("g0"),
req.options.generation_conditions.if_equal);
EXPECT_EQ(OptionalByteRangeRequest(10, 15), req.options.byte_range);
read_time = absl::Now();
req.promise.SetResult(
ReadResult{ReadResult::kValue,
Bytes({5, 6, 7, 8, 9}),
{StorageGeneration::FromString("g0"), read_time}});
}
ASSERT_EQ(0, mock_store->read_requests.size());
ASSERT_TRUE(future.ready());
EXPECT_THAT(
future.result(),
MatchesKvsReadResult(Bytes({5, 6, 7, 8, 9}),
StorageGeneration::FromString("g0"), read_time));
}
{
kvstore::ReadOptions options;
options.staleness_bound = init_time;
auto future = store->Read(EntryIdToKey(3, grid_shape), options);
ASSERT_TRUE(future.ready());
EXPECT_THAT(future.result(),
MatchesKvsReadResultNotFound(shard_index_time));
}
{
auto req_time = UniqueNow();
auto future = store->Read(EntryIdToKey(3, grid_shape), {});
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("shard_path", req.key);
EXPECT_EQ(StorageGeneration::FromString("g0"),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_equal);
EXPECT_EQ(OptionalByteRangeRequest::SuffixLength(5 * 16 + 4),
req.options.byte_range);
EXPECT_THAT(req.options.staleness_bound, ::testing::Gt(req_time));
shard_index_time = absl::Now();
req.promise.SetResult(ReadResult::Unspecified(
{StorageGeneration::FromString("g0"), shard_index_time}));
}
ASSERT_TRUE(future.ready());
EXPECT_THAT(future.result(),
MatchesKvsReadResultNotFound(shard_index_time));
}
{
kvstore::ReadOptions options;
options.staleness_bound = init_time;
auto future = store->Read(EntryIdToKey(2, grid_shape), options);
absl::Time read_time;
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("shard_path", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(StorageGeneration::FromString("g0"),
req.options.generation_conditions.if_equal);
EXPECT_EQ(OptionalByteRangeRequest(10, 15), req.options.byte_range);
EXPECT_EQ(init_time, req.options.staleness_bound);
read_time = absl::Now();
req.promise.SetResult(
ReadResult{ReadResult::kValue,
Bytes({5, 6, 7, 8, 9}),
{StorageGeneration::FromString("g0"), read_time}});
}
ASSERT_EQ(0, mock_store->read_requests.size());
ASSERT_TRUE(future.ready());
EXPECT_THAT(
future.result(),
MatchesKvsReadResult(Bytes({5, 6, 7, 8, 9}),
StorageGeneration::FromString("g0"), read_time));
}
{
kvstore::ReadOptions options;
options.staleness_bound = init_time;
auto future = store->Read(EntryIdToKey(2, grid_shape), options);
absl::Time abort_time;
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("shard_path", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(init_time, req.options.staleness_bound);
EXPECT_EQ(StorageGeneration::FromString("g0"),
req.options.generation_conditions.if_equal);
EXPECT_EQ(OptionalByteRangeRequest(10, 15), req.options.byte_range);
abort_time = absl::Now();
req.promise.SetResult(ReadResult::Unspecified(
{StorageGeneration::FromString("g0"), abort_time}));
}
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("shard_path", req.key);
EXPECT_EQ(StorageGeneration::FromString("g0"),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_equal);
EXPECT_EQ(OptionalByteRangeRequest::SuffixLength(5 * 16 + 4),
req.options.byte_range);
EXPECT_THAT(req.options.staleness_bound, ::testing::Ge(abort_time));
shard_index_time = absl::Now();
req.promise.SetResult(
ReadResult{ReadResult::kValue,
WithCrc32c(Bytes({
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
10, 0, 0, 0, 0, 0, 0, 0,
6, 0, 0, 0, 0, 0, 0, 0,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, |
597 | cpp | google/tensorstore | util | tensorstore/kvstore/file/util.cc | tensorstore/kvstore/file/util_test.cc | #ifndef TENSORSTORE_KVSTORE_FILE_UTIL_H_
#define TENSORSTORE_KVSTORE_FILE_UTIL_H_
#include <string_view>
#include "tensorstore/kvstore/key_range.h"
namespace tensorstore {
namespace internal_file_util {
bool IsKeyValid(std::string_view key, std::string_view lock_suffix);
std::string_view LongestDirectoryPrefix(const KeyRange& range);
}
}
#endif
#include "tensorstore/kvstore/file/util.h"
#include <stddef.h>
#include <string_view>
#include "absl/strings/match.h"
#include "tensorstore/kvstore/key_range.h"
namespace tensorstore {
namespace internal_file_util {
bool IsKeyValid(std::string_view key, std::string_view lock_suffix) {
if (key.find('\0') != std::string_view::npos) return false;
if (key.empty()) return false;
if (key.back() == '/') return false;
while (true) {
size_t next_delimiter = key.find('/');
std::string_view component = next_delimiter == std::string_view::npos
? key
: key.substr(0, next_delimiter);
if (component == ".") return false;
if (component == "..") return false;
if (!lock_suffix.empty() && component.size() >= lock_suffix.size() &&
absl::EndsWith(component, lock_suffix)) {
return false;
}
if (next_delimiter == std::string_view::npos) return true;
key.remove_prefix(next_delimiter + 1);
}
}
std::string_view LongestDirectoryPrefix(const KeyRange& range) {
std::string_view prefix = tensorstore::LongestPrefix(range);
const size_t i = prefix.rfind('/');
if (i == std::string_view::npos) return {};
return prefix.substr(0, i);
}
}
} | #include "tensorstore/kvstore/file/util.h"
#include <gtest/gtest.h>
#include "tensorstore/kvstore/key_range.h"
namespace {
using ::tensorstore::KeyRange;
using ::tensorstore::internal_file_util::IsKeyValid;
using ::tensorstore::internal_file_util::LongestDirectoryPrefix;
TEST(IsKeyValid, Basic) {
EXPECT_TRUE(IsKeyValid("tmp/root", ""));
EXPECT_TRUE(IsKeyValid("a", ""));
EXPECT_TRUE(IsKeyValid("a/b", ""));
EXPECT_FALSE(IsKeyValid("", ""));
EXPECT_FALSE(IsKeyValid("/", ""));
EXPECT_TRUE(IsKeyValid("/tmp/root", ""));
EXPECT_FALSE(IsKeyValid("/tmp/root/", ""));
EXPECT_TRUE(IsKeyValid("tmp
EXPECT_FALSE(IsKeyValid("tmp/./root", ""));
EXPECT_FALSE(IsKeyValid("tmp/../root", ""));
EXPECT_FALSE(IsKeyValid("tmp/root/", ""));
EXPECT_FALSE(IsKeyValid("tmp/.lock/a", ".lock"));
EXPECT_FALSE(IsKeyValid("tmp/foo.lock/a", ".lock"));
EXPECT_FALSE(IsKeyValid(std::string_view("tmp/\0bar", 8), ""));
}
TEST(LongestDirectoryPrefix, Basic) {
EXPECT_EQ("", LongestDirectoryPrefix(KeyRange{"a", "b"}));
EXPECT_EQ("", LongestDirectoryPrefix(KeyRange{"/a", "/b"}));
EXPECT_EQ("/a", LongestDirectoryPrefix(KeyRange{"/a/a", "/a/b"}));
}
} |
598 | cpp | google/tensorstore | coordinator_server | tensorstore/kvstore/ocdbt/distributed/coordinator_server.cc | tensorstore/kvstore/ocdbt/distributed/coordinator_server_test.cc | #ifndef TENSORSTORE_KVSTORE_OCDBT_DISTRIBUTED_COORDINATOR_SERVER_H_
#define TENSORSTORE_KVSTORE_OCDBT_DISTRIBUTED_COORDINATOR_SERVER_H_
#include <functional>
#include <memory>
#include <string>
#include <vector>
#include "absl/time/time.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/json_serialization_options.h"
#include "tensorstore/kvstore/ocdbt/distributed/rpc_security.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace ocdbt {
class CoordinatorServer {
public:
struct Spec {
TENSORSTORE_DECLARE_JSON_DEFAULT_BINDER(Spec, JsonSerializationOptions,
JsonSerializationOptions);
internal_ocdbt::RpcSecurityMethod::Ptr security;
std::vector<std::string> bind_addresses;
};
using Clock = std::function<absl::Time()>;
struct Options {
Spec spec;
Clock clock;
};
CoordinatorServer();
static Result<CoordinatorServer> Start(Options options);
CoordinatorServer(CoordinatorServer&&);
CoordinatorServer& operator=(CoordinatorServer&&);
~CoordinatorServer();
int port() const;
span<const int> ports() const;
private:
class Impl;
std::unique_ptr<Impl> impl_;
};
}
}
#endif
#include "tensorstore/kvstore/ocdbt/distributed/coordinator_server.h"
#include <stddef.h>
#include <stdint.h>
#include <functional>
#include <iterator>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/attributes.h"
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/log/absl_log.h"
#include "absl/meta/type_traits.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "absl/types/compare.h"
#include "grpcpp/security/server_credentials.h"
#include "grpcpp/server.h"
#include "grpcpp/server_builder.h"
#include "grpcpp/server_context.h"
#include "grpcpp/support/server_callback.h"
#include "tensorstore/internal/container/heterogeneous_container.h"
#include "tensorstore/internal/container/intrusive_red_black_tree.h"
#include "tensorstore/internal/grpc/peer_address.h"
#include "tensorstore/internal/grpc/utils.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/json_binding/std_array.h"
#include "tensorstore/internal/log/verbose_flag.h"
#include "tensorstore/kvstore/ocdbt/distributed/coordinator.grpc.pb.h"
#include "tensorstore/kvstore/ocdbt/distributed/coordinator.pb.h"
#include "tensorstore/kvstore/ocdbt/distributed/rpc_security.h"
#include "tensorstore/kvstore/ocdbt/distributed/rpc_security_registry.h"
#include "tensorstore/proto/encode_time.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace ocdbt {
namespace {
ABSL_CONST_INIT internal_log::VerboseFlag ocdbt_logging("ocdbt");
struct LeaseNode;
using LeaseTree = internal::intrusive_red_black_tree::Tree<LeaseNode>;
struct LeaseNode : public LeaseTree::NodeBase {
std::string key;
std::string owner;
absl::Time expiration_time;
uint64_t lease_id;
};
}
namespace jb = ::tensorstore::internal_json_binding;
TENSORSTORE_DEFINE_JSON_DEFAULT_BINDER(
CoordinatorServer::Spec,
jb::Object(
jb::Member("security",
jb::Projection<&CoordinatorServer::Spec::security>(
internal_ocdbt::RpcSecurityMethodJsonBinder)),
jb::Member("bind_addresses",
jb::Projection<&CoordinatorServer::Spec::bind_addresses>(
jb::DefaultInitializedValue()))));
CoordinatorServer::CoordinatorServer() = default;
CoordinatorServer::~CoordinatorServer() = default;
CoordinatorServer::CoordinatorServer(CoordinatorServer&&) = default;
CoordinatorServer& CoordinatorServer::operator=(CoordinatorServer&&) = default;
class CoordinatorServer::Impl
: public internal_ocdbt::grpc_gen::Coordinator::CallbackService {
public:
std::vector<int> listening_ports_;
std::unique_ptr<grpc::Server> server_;
internal_ocdbt::RpcSecurityMethod::Ptr security_;
Clock clock_;
grpc::ServerUnaryReactor* RequestLease(
grpc::CallbackServerContext* context,
const internal_ocdbt::grpc_gen::LeaseRequest* request,
internal_ocdbt::grpc_gen::LeaseResponse* response) override;
void PurgeExpiredLeases() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
absl::Mutex mutex_;
LeaseTree leases_by_expiration_time_ ABSL_GUARDED_BY(mutex_);
using LeaseSet =
internal::HeterogeneousHashSet<std::unique_ptr<LeaseNode>,
std::string_view, &LeaseNode::key>;
LeaseSet leases_by_key_ ABSL_GUARDED_BY(mutex_);
};
span<const int> CoordinatorServer::ports() const {
return impl_->listening_ports_;
}
int CoordinatorServer::port() const { return impl_->listening_ports_.front(); }
void CoordinatorServer::Impl::PurgeExpiredLeases() {
auto now = clock_();
for (LeaseTree::iterator it = leases_by_expiration_time_.begin(), next;
it != leases_by_expiration_time_.end() && it->expiration_time < now;
it = next) {
next = std::next(it);
LeaseNode& node = *it;
leases_by_expiration_time_.Remove(node);
leases_by_key_.erase(node.key);
}
}
grpc::ServerUnaryReactor* CoordinatorServer::Impl::RequestLease(
grpc::CallbackServerContext* context,
const internal_ocdbt::grpc_gen::LeaseRequest* request,
internal_ocdbt::grpc_gen::LeaseResponse* response) {
auto* reactor = context->DefaultReactor();
if (auto status = security_->ValidateServerRequest(context); !status.ok()) {
reactor->Finish(internal::AbslStatusToGrpcStatus(status));
return reactor;
}
auto peer_address = internal::GetGrpcPeerAddressAndPort(context);
if (!peer_address.ok()) {
reactor->Finish(grpc::Status(grpc::StatusCode::INTERNAL,
std::string(peer_address.status().message())));
ABSL_LOG_IF(INFO, ocdbt_logging)
<< "Coordinator: internal error: request=" << *request;
return reactor;
}
TENSORSTORE_ASSIGN_OR_RETURN(
auto lease_duration,
internal::ProtoToAbslDuration(request->lease_duration()),
(reactor->Finish(grpc::Status(
grpc::StatusCode::INVALID_ARGUMENT,
tensorstore::StrCat("Invalid lease duration: ", _.message()))),
reactor));
{
absl::MutexLock lock(&mutex_);
PurgeExpiredLeases();
LeaseNode* node;
bool assign_new_lease = false;
bool renew_lease = false;
if (auto it = leases_by_key_.find(request->key());
it != leases_by_key_.end()) {
node = it->get();
if (request->has_renew_lease_id() &&
request->renew_lease_id() == node->lease_id) {
leases_by_expiration_time_.Remove(*node);
renew_lease = true;
} else if (request->has_uncooperative_lease_id() &&
request->uncooperative_lease_id() == node->lease_id) {
leases_by_expiration_time_.Remove(*node);
assign_new_lease = true;
}
} else {
auto new_node = std::make_unique<LeaseNode>();
new_node->key = request->key();
node = new_node.get();
leases_by_key_.insert(std::move(new_node));
assign_new_lease = true;
}
if (assign_new_lease || renew_lease) {
auto cur_time = clock_();
node->expiration_time = cur_time + lease_duration;
if (assign_new_lease) {
node->lease_id = static_cast<uint64_t>(
absl::ToInt64Nanoseconds(cur_time - absl::UnixEpoch()));
node->owner = tensorstore::StrCat(peer_address->first, ":",
request->cooperator_port());
}
response->set_is_owner(true);
leases_by_expiration_time_.FindOrInsert(
[&](LeaseNode& other) {
return node->expiration_time > other.expiration_time
? absl::weak_ordering::greater
: absl::weak_ordering::less;
},
[&] { return node; });
}
response->set_owner(node->owner);
internal::AbslTimeToProto(node->expiration_time,
response->mutable_expiration_time());
response->set_lease_id(node->lease_id);
}
ABSL_LOG_IF(INFO, ocdbt_logging)
<< "Coordinator: request=" << *request << ", response=" << *response;
reactor->Finish(grpc::Status());
return reactor;
}
Result<CoordinatorServer> CoordinatorServer::Start(Options options) {
auto impl = std::make_unique<Impl>();
if (options.clock) {
impl->clock_ = std::move(options.clock);
} else {
impl->clock_ = [] { return absl::Now(); };
}
impl->security_ = options.spec.security;
if (!impl->security_) {
impl->security_ = internal_ocdbt::GetInsecureRpcSecurityMethod();
}
grpc::ServerBuilder builder;
builder.RegisterService(impl.get());
auto creds = impl->security_->GetServerCredentials();
if (options.spec.bind_addresses.empty()) {
options.spec.bind_addresses.push_back("[::]:0");
}
impl->listening_ports_.resize(options.spec.bind_addresses.size());
for (size_t i = 0; i < options.spec.bind_addresses.size(); ++i) {
builder.AddListeningPort(options.spec.bind_addresses[i], creds,
&impl->listening_ports_[i]);
}
impl->server_ = builder.BuildAndStart();
CoordinatorServer server;
server.impl_ = std::move(impl);
return server;
}
}
} | #include "tensorstore/kvstore/ocdbt/distributed/coordinator_server.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/absl_log.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "grpcpp/create_channel.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/kvstore/ocdbt/distributed/btree_node_identifier.h"
#include "tensorstore/kvstore/ocdbt/distributed/coordinator.grpc.pb.h"
#include "tensorstore/kvstore/ocdbt/distributed/lease_cache_for_cooperator.h"
#include "tensorstore/kvstore/ocdbt/distributed/rpc_security.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::KeyRange;
using ::tensorstore::internal_ocdbt::BtreeNodeIdentifier;
using ::tensorstore::internal_ocdbt_cooperator::LeaseCacheForCooperator;
using ::tensorstore::ocdbt::CoordinatorServer;
class CoordinatorServerTest : public ::testing::Test {
protected:
absl::Time cur_time;
CoordinatorServer server_;
LeaseCacheForCooperator lease_cache;
void SetUp() override {
auto security =
::tensorstore::internal_ocdbt::GetInsecureRpcSecurityMethod();
CoordinatorServer::Options options;
options.spec.security = security;
options.spec.bind_addresses.push_back("localhost:0");
options.clock = [this] { return cur_time; };
TENSORSTORE_CHECK_OK_AND_ASSIGN(
server_, CoordinatorServer::Start(std::move(options)));
std::string address = tensorstore::StrCat("localhost:", server_.port());
auto channel =
::grpc::CreateChannel(address, security->GetClientCredentials());
if (!channel->WaitForConnected(
absl::ToChronoTime(absl::Now() + absl::Milliseconds(100)))) {
ABSL_LOG(WARNING) << "Failed to connect to coordinator after 100ms: "
<< address;
}
LeaseCacheForCooperator::Options lease_cache_options;
lease_cache_options.clock = {};
lease_cache_options.cooperator_port = 42;
lease_cache_options.coordinator_stub =
tensorstore::internal_ocdbt::grpc_gen::Coordinator::NewStub(
std::move(channel));
lease_cache_options.security = security;
lease_cache = LeaseCacheForCooperator(std::move(lease_cache_options));
}
};
TEST_F(CoordinatorServerTest, Basic) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto lease_info,
lease_cache
.GetLease("key", BtreeNodeIdentifier{1, KeyRange{"abc", "def"}})
.result());
EXPECT_FALSE(lease_info->peer_stub);
EXPECT_THAT(lease_info->peer_address, ::testing::MatchesRegex(".*:42"));
}
} |
599 | cpp | google/tensorstore | read_version | tensorstore/kvstore/ocdbt/non_distributed/read_version.cc | tensorstore/kvstore/ocdbt/read_version_test.cc | #ifndef TENSORSTORE_KVSTORE_OCDBT_NON_DISTRIBUTED_READ_VERSION_H_
#define TENSORSTORE_KVSTORE_OCDBT_NON_DISTRIBUTED_READ_VERSION_H_
#include <variant>
#include "absl/time/time.h"
#include "tensorstore/kvstore/ocdbt/format/version_tree.h"
#include "tensorstore/kvstore/ocdbt/io_handle.h"
#include "tensorstore/util/future.h"
namespace tensorstore {
namespace internal_ocdbt {
Future<BtreeGenerationReference> ReadVersion(
ReadonlyIoHandle::Ptr io_handle, VersionSpec version_spec,
absl::Time staleness_bound = absl::Now());
}
}
#endif
#include "tensorstore/kvstore/ocdbt/non_distributed/read_version.h"
#include <cassert>
#include <memory>
#include <utility>
#include <variant>
#include "absl/base/attributes.h"
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include "absl/time/time.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/log/verbose_flag.h"
#include "tensorstore/kvstore/ocdbt/format/indirect_data_reference.h"
#include "tensorstore/kvstore/ocdbt/format/manifest.h"
#include "tensorstore/kvstore/ocdbt/format/version_tree.h"
#include "tensorstore/kvstore/ocdbt/io_handle.h"
#include "tensorstore/util/executor.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace internal_ocdbt {
namespace {
ABSL_CONST_INIT internal_log::VerboseFlag ocdbt_logging("ocdbt");
struct ReadVersionOperation
: public internal::AtomicReferenceCount<ReadVersionOperation> {
using Ptr = internal::IntrusivePtr<ReadVersionOperation>;
using PromiseType = Promise<BtreeGenerationReference>;
ReadonlyIoHandle::Ptr io_handle;
VersionSpec version_spec;
absl::Time staleness_bound;
static Future<BtreeGenerationReference> Start(ReadonlyIoHandle::Ptr io_handle,
VersionSpec version_spec,
absl::Time staleness_bound) {
auto op = internal::MakeIntrusivePtr<ReadVersionOperation>();
op->io_handle = std::move(io_handle);
op->version_spec = version_spec;
op->staleness_bound = staleness_bound;
auto [promise, future] =
PromiseFuturePair<BtreeGenerationReference>::Make();
RequestManifest(std::move(op), std::move(promise), absl::InfinitePast());
return std::move(future);
}
static void RequestManifest(ReadVersionOperation::Ptr op, PromiseType promise,
absl::Time staleness_bound) {
auto* op_ptr = op.get();
LinkValue(
WithExecutor(op_ptr->io_handle->executor,
[op = std::move(op)](
PromiseType promise,
ReadyFuture<const ManifestWithTime> future) mutable {
ManifestReady(std::move(op), std::move(promise),
future.value());
}),
std::move(promise), op_ptr->io_handle->GetManifest(staleness_bound));
}
static void ManifestReady(ReadVersionOperation::Ptr op, PromiseType promise,
const ManifestWithTime& manifest_with_time) {
if (!manifest_with_time.manifest ||
CompareVersionSpecToVersion(
op->version_spec, manifest_with_time.manifest->latest_version()) >
0) {
if (manifest_with_time.time < op->staleness_bound) {
auto staleness_bound = op->staleness_bound;
RequestManifest(std::move(op), std::move(promise), staleness_bound);
return;
}
if (!manifest_with_time.manifest ||
IsVersionSpecExact(op->version_spec)) {
op->VersionNotPresent(promise);
return;
}
}
const auto& manifest = *manifest_with_time.manifest;
if (CompareVersionSpecToVersion(op->version_spec,
manifest.versions.front()) >= 0) {
if (auto* ref = internal_ocdbt::FindVersion(manifest.versions,
op->version_spec)) {
promise.SetResult(*ref);
return;
}
op->VersionNotPresent(promise);
return;
}
auto* ref = internal_ocdbt::FindVersion(
manifest.config.version_tree_arity_log2, manifest.version_tree_nodes,
op->version_spec);
if (!ref) {
op->VersionNotPresent(promise);
return;
}
LookupNodeReference(std::move(op), std::move(promise), *ref);
}
void VersionNotPresent(const PromiseType& promise) {
promise.SetResult(absl::NotFoundError(absl::StrFormat(
"Version where %s not present", FormatVersionSpec(version_spec))));
}
static void LookupNodeReference(ReadVersionOperation::Ptr op,
PromiseType promise,
const VersionNodeReference& node_ref) {
ABSL_LOG_IF(INFO, ocdbt_logging)
<< "ReadVersion: " << FormatVersionSpec(op->version_spec)
<< ", node_ref=" << node_ref;
auto read_future = op->io_handle->GetVersionTreeNode(node_ref.location);
auto executor = op->io_handle->executor;
LinkValue(WithExecutor(std::move(executor),
NodeReadyCallback{std::move(op), node_ref}),
std::move(promise), std::move(read_future));
}
struct NodeReadyCallback {
ReadVersionOperation::Ptr op;
VersionNodeReference node_ref;
void operator()(
PromiseType promise,
ReadyFuture<const std::shared_ptr<const VersionTreeNode>> read_future) {
auto node = read_future.value();
auto* config = op->io_handle->config_state->GetExistingConfig();
assert(config);
TENSORSTORE_RETURN_IF_ERROR(
ValidateVersionTreeNodeReference(
*node, *config, node_ref.generation_number, node_ref.height),
static_cast<void>(promise.SetResult(_)));
if (node->height > 0) {
VisitInteriorNode(std::move(op), *node, std::move(promise));
} else {
VisitLeafNode(std::move(op), *node, std::move(promise));
}
}
};
static void VisitInteriorNode(ReadVersionOperation::Ptr op,
const VersionTreeNode& node,
PromiseType promise) {
auto& entries =
std::get<VersionTreeNode::InteriorNodeEntries>(node.entries);
auto* config = op->io_handle->config_state->GetExistingConfig();
assert(config);
auto* node_ref = internal_ocdbt::FindVersion(
config->version_tree_arity_log2, entries, op->version_spec);
if (!node_ref) {
op->VersionNotPresent(std::move(promise));
return;
}
LookupNodeReference(std::move(op), std::move(promise), *node_ref);
}
static void VisitLeafNode(ReadVersionOperation::Ptr op,
const VersionTreeNode& node, PromiseType promise) {
auto& entries = std::get<VersionTreeNode::LeafNodeEntries>(node.entries);
auto* ref = internal_ocdbt::FindVersion(entries, op->version_spec);
if (!ref) {
op->VersionNotPresent(std::move(promise));
return;
}
promise.SetResult(*ref);
}
};
}
Future<BtreeGenerationReference> ReadVersion(ReadonlyIoHandle::Ptr io_handle,
VersionSpec version_spec,
absl::Time staleness_bound) {
if (const GenerationNumber* generation_number =
std::get_if<GenerationNumber>(&version_spec)) {
if (*generation_number == 0) {
return absl::InvalidArgumentError("Generation number must be positive");
}
}
return ReadVersionOperation::Start(std::move(io_handle), version_spec,
std::move(staleness_bound));
}
}
} | #include "tensorstore/kvstore/ocdbt/non_distributed/read_version.h"
#include <stddef.h>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/str_format.h"
#include <nlohmann/json.hpp>
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/ocdbt/driver.h"
#include "tensorstore/kvstore/ocdbt/format/version_tree.h"
#include "tensorstore/kvstore/ocdbt/non_distributed/create_new_manifest.h"
#include "tensorstore/kvstore/ocdbt/non_distributed/list_versions.h"
#include "tensorstore/kvstore/ocdbt/test_util.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/test_util.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status_testutil.h"
namespace {
namespace kvstore = ::tensorstore::kvstore;
using ::tensorstore::MatchesStatus;
using ::tensorstore::span;
using ::tensorstore::internal::UniqueNow;
using ::tensorstore::internal_ocdbt::BtreeGenerationReference;
using ::tensorstore::internal_ocdbt::CommitTime;
using ::tensorstore::internal_ocdbt::CommitTimeUpperBound;
using ::tensorstore::internal_ocdbt::EnsureExistingManifest;
using ::tensorstore::internal_ocdbt::GenerationNumber;
using ::tensorstore::internal_ocdbt::GetOcdbtIoHandle;
using ::tensorstore::internal_ocdbt::ListVersionsFuture;
using ::tensorstore::internal_ocdbt::ListVersionsOptions;
using ::tensorstore::internal_ocdbt::OcdbtDriver;
using ::tensorstore::internal_ocdbt::ReadManifest;
using ::tensorstore::internal_ocdbt::ReadVersion;
void TestVersioning(::nlohmann::json config_json, size_t num_writes) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto ocdbt_store,
kvstore::Open(
{{"driver", "ocdbt"}, {"config", config_json}, {"base", "memory:
.result());
auto io_handle = GetOcdbtIoHandle(*ocdbt_store.driver);
std::vector<BtreeGenerationReference> generations;
TENSORSTORE_ASSERT_OK(EnsureExistingManifest(io_handle));
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto manifest,
ReadManifest(static_cast<OcdbtDriver&>(*ocdbt_store.driver)));
ASSERT_TRUE(manifest);
ASSERT_EQ(1, manifest->latest_generation());
generations.push_back(manifest->latest_version());
}
for (int i = 0; i < num_writes; ++i) {
UniqueNow(absl::Nanoseconds(2));
TENSORSTORE_ASSERT_OK(
kvstore::Write(ocdbt_store, "a", absl::Cord(tensorstore::StrCat(i))));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto manifest,
ReadManifest(static_cast<OcdbtDriver&>(*ocdbt_store.driver)));
ASSERT_TRUE(manifest);
ASSERT_EQ(i + 2, manifest->latest_generation());
generations.push_back(manifest->latest_version());
}
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto manifest,
ReadManifest(static_cast<OcdbtDriver&>(*ocdbt_store.driver)));
ASSERT_TRUE(manifest);
SCOPED_TRACE(tensorstore::StrCat(*manifest));
{
ListVersionsOptions list_versions_options;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto final_generations,
ListVersionsFuture(io_handle, list_versions_options).result());
EXPECT_EQ(generations, final_generations);
}
for (size_t version_i = 0; version_i < generations.size(); ++version_i) {
const auto& version = generations[version_i];
EXPECT_THAT(ReadVersion(io_handle, version.generation_number).result(),
::testing::Optional(version));
EXPECT_THAT(ReadVersion(io_handle, version.commit_time).result(),
::testing::Optional(version));
EXPECT_THAT(
ReadVersion(io_handle, CommitTimeUpperBound{version.commit_time})
.result(),
::testing::Optional(version));
{
CommitTime newer_commit_time = version.commit_time;
newer_commit_time.value++;
EXPECT_THAT(
ReadVersion(io_handle, CommitTimeUpperBound{newer_commit_time})
.result(),
::testing::Optional(version));
}
}
EXPECT_THAT(ReadVersion(io_handle, GenerationNumber(0)).result(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(
ReadVersion(io_handle, generations.back().generation_number + 1).result(),
MatchesStatus(absl::StatusCode::kNotFound));
{
CommitTime newer_commit_time = generations.back().commit_time;
newer_commit_time.value++;
EXPECT_THAT(ReadVersion(io_handle, newer_commit_time).result(),
MatchesStatus(absl::StatusCode::kNotFound));
}
{
CommitTime older_commit_time = generations.front().commit_time;
older_commit_time.value--;
EXPECT_THAT(ReadVersion(io_handle, older_commit_time).result(),
MatchesStatus(absl::StatusCode::kNotFound));
EXPECT_THAT(ReadVersion(io_handle, CommitTimeUpperBound{older_commit_time})
.result(),
MatchesStatus(absl::StatusCode::kNotFound));
}
for (ptrdiff_t version_i = -1; version_i <= generations.size(); ++version_i) {
SCOPED_TRACE(absl::StrFormat("version_i=%d", version_i));
GenerationNumber generation_number =
static_cast<GenerationNumber>(version_i + 1);
CommitTime intermediate_commit_time, exact_commit_time;
if (version_i == -1) {
exact_commit_time = generations[0].commit_time;
--exact_commit_time.value;
intermediate_commit_time = exact_commit_time;
} else if (version_i < generations.size()) {
exact_commit_time = generations[0].commit_time;
intermediate_commit_time = exact_commit_time;
intermediate_commit_time.value--;
} else {
exact_commit_time = generations.back().commit_time;
exact_commit_time.value++;
intermediate_commit_time = exact_commit_time;
}
{
auto expected_generations =
span(generations).subspan(std::max(ptrdiff_t(0), version_i));
{
ListVersionsOptions list_versions_options;
list_versions_options.min_generation_number = generation_number;
EXPECT_THAT(
ListVersionsFuture(io_handle, list_versions_options).result(),
::testing::Optional(
::testing::ElementsAreArray(expected_generations)));
}
{
ListVersionsOptions list_versions_options;
list_versions_options.min_commit_time = exact_commit_time;
EXPECT_THAT(
ListVersionsFuture(io_handle, list_versions_options).result(),
::testing::Optional(
::testing::ElementsAreArray(expected_generations)));
}
{
ListVersionsOptions list_versions_options;
list_versions_options.min_commit_time = intermediate_commit_time;
EXPECT_THAT(
ListVersionsFuture(io_handle, list_versions_options).result(),
::testing::Optional(
::testing::ElementsAreArray(expected_generations)));
}
}
{
auto expected_generations =
span(generations)
.subspan(0,
std::min(ptrdiff_t(generations.size()), version_i + 1));
{
ListVersionsOptions list_versions_options;
list_versions_options.max_generation_number = generation_number;
EXPECT_THAT(
ListVersionsFuture(io_handle, list_versions_options).result(),
::testing::Optional(
::testing::ElementsAreArray(expected_generations)));
}
{
ListVersionsOptions list_versions_options;
list_versions_options.max_commit_time = exact_commit_time;
EXPECT_THAT(
ListVersionsFuture(io_handle, list_versions_options).result(),
::testing::Optional(
::testing::ElementsAreArray(expected_generations)));
}
{
auto expected_generations =
span(generations).subspan(0, std::max(ptrdiff_t(0), version_i));
ListVersionsOptions list_versions_options;
list_versions_options.max_commit_time = intermediate_commit_time;
EXPECT_THAT(
ListVersionsFuture(io_handle, list_versions_options).result(),
::testing::Optional(
::testing::ElementsAreArray(expected_generations)));
}
}
}
}
TEST(ReadVersionTest, VersionTreeArityLog2_1) {
TestVersioning({{"version_tree_arity_log2", 1}}, 10);
}
TEST(ReadVersionTest, VersionTreeArityLog2_2) {
TestVersioning({{"version_tree_arity_log2", 2}}, 10);
}
} |