Delete unused variants of rtc::tokenize
Bug: webrtc:6424 Change-Id: I16f3313e242e0e9ee2039a79d3a8b50c28190832 Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/231129 Reviewed-by: Harald Alvestrand <hta@webrtc.org> Commit-Queue: Niels Moller <nisse@webrtc.org> Cr-Commit-Position: refs/heads/main@{#34918}
This commit is contained in:

committed by
WebRTC LUCI CQ

parent
0bf94aaa7c
commit
2bd1fadb77
@ -186,52 +186,6 @@ size_t tokenize_with_empty_tokens(const std::string& source,
|
||||
return fields->size();
|
||||
}
|
||||
|
||||
size_t tokenize_append(const std::string& source,
|
||||
char delimiter,
|
||||
std::vector<std::string>* fields) {
|
||||
if (!fields)
|
||||
return 0;
|
||||
|
||||
std::vector<std::string> new_fields;
|
||||
tokenize(source, delimiter, &new_fields);
|
||||
fields->insert(fields->end(), new_fields.begin(), new_fields.end());
|
||||
return fields->size();
|
||||
}
|
||||
|
||||
size_t tokenize(const std::string& source,
|
||||
char delimiter,
|
||||
char start_mark,
|
||||
char end_mark,
|
||||
std::vector<std::string>* fields) {
|
||||
if (!fields)
|
||||
return 0;
|
||||
fields->clear();
|
||||
|
||||
std::string remain_source = source;
|
||||
while (!remain_source.empty()) {
|
||||
size_t start_pos = remain_source.find(start_mark);
|
||||
if (std::string::npos == start_pos)
|
||||
break;
|
||||
std::string pre_mark;
|
||||
if (start_pos > 0) {
|
||||
pre_mark = remain_source.substr(0, start_pos - 1);
|
||||
}
|
||||
|
||||
++start_pos;
|
||||
size_t end_pos = remain_source.find(end_mark, start_pos);
|
||||
if (std::string::npos == end_pos)
|
||||
break;
|
||||
|
||||
// We have found the matching marks. First tokenize the pre-mask. Then add
|
||||
// the marked part as a single field. Finally, loop back for the post-mark.
|
||||
tokenize_append(pre_mark, delimiter, fields);
|
||||
fields->push_back(remain_source.substr(start_pos, end_pos - start_pos));
|
||||
remain_source = remain_source.substr(end_pos + 1);
|
||||
}
|
||||
|
||||
return tokenize_append(remain_source, delimiter, fields);
|
||||
}
|
||||
|
||||
bool tokenize_first(const std::string& source,
|
||||
const char delimiter,
|
||||
std::string* token,
|
||||
|
@ -77,24 +77,6 @@ size_t tokenize_with_empty_tokens(const std::string& source,
|
||||
char delimiter,
|
||||
std::vector<std::string>* fields);
|
||||
|
||||
// Tokenize and append the tokens to fields. Return the new size of fields.
|
||||
size_t tokenize_append(const std::string& source,
|
||||
char delimiter,
|
||||
std::vector<std::string>* fields);
|
||||
|
||||
// Splits the source string into multiple fields separated by delimiter, with
|
||||
// duplicates of delimiter ignored. Trailing delimiter ignored. A substring in
|
||||
// between the start_mark and the end_mark is treated as a single field. Return
|
||||
// the size of fields. For example, if source is "filename
|
||||
// \"/Library/Application Support/media content.txt\"", delimiter is ' ', and
|
||||
// the start_mark and end_mark are '"', this method returns two fields:
|
||||
// "filename" and "/Library/Application Support/media content.txt".
|
||||
size_t tokenize(const std::string& source,
|
||||
char delimiter,
|
||||
char start_mark,
|
||||
char end_mark,
|
||||
std::vector<std::string>* fields);
|
||||
|
||||
// Extract the first token from source as separated by delimiter, with
|
||||
// duplicates of delimiter ignored. Return false if the delimiter could not be
|
||||
// found, otherwise return true.
|
||||
|
@ -169,57 +169,6 @@ TEST(TokenizeTest, CompareSubstrings) {
|
||||
ASSERT_EQ(0ul, fields.size());
|
||||
}
|
||||
|
||||
TEST(TokenizeTest, TokenizeAppend) {
|
||||
ASSERT_EQ(0ul, tokenize_append("A B C", ' ', nullptr));
|
||||
|
||||
std::vector<std::string> fields;
|
||||
|
||||
tokenize_append("A B C", ' ', &fields);
|
||||
ASSERT_EQ(3ul, fields.size());
|
||||
ASSERT_STREQ("B", fields.at(1).c_str());
|
||||
|
||||
tokenize_append("D E", ' ', &fields);
|
||||
ASSERT_EQ(5ul, fields.size());
|
||||
ASSERT_STREQ("B", fields.at(1).c_str());
|
||||
ASSERT_STREQ("E", fields.at(4).c_str());
|
||||
}
|
||||
|
||||
TEST(TokenizeTest, TokenizeWithMarks) {
|
||||
ASSERT_EQ(0ul, tokenize("D \"A B", ' ', '(', ')', nullptr));
|
||||
|
||||
std::vector<std::string> fields;
|
||||
tokenize("A B C", ' ', '"', '"', &fields);
|
||||
ASSERT_EQ(3ul, fields.size());
|
||||
ASSERT_STREQ("C", fields.at(2).c_str());
|
||||
|
||||
tokenize("\"A B\" C", ' ', '"', '"', &fields);
|
||||
ASSERT_EQ(2ul, fields.size());
|
||||
ASSERT_STREQ("A B", fields.at(0).c_str());
|
||||
|
||||
tokenize("D \"A B\" C", ' ', '"', '"', &fields);
|
||||
ASSERT_EQ(3ul, fields.size());
|
||||
ASSERT_STREQ("D", fields.at(0).c_str());
|
||||
ASSERT_STREQ("A B", fields.at(1).c_str());
|
||||
|
||||
tokenize("D \"A B\" C \"E F\"", ' ', '"', '"', &fields);
|
||||
ASSERT_EQ(4ul, fields.size());
|
||||
ASSERT_STREQ("D", fields.at(0).c_str());
|
||||
ASSERT_STREQ("A B", fields.at(1).c_str());
|
||||
ASSERT_STREQ("E F", fields.at(3).c_str());
|
||||
|
||||
// No matching marks.
|
||||
tokenize("D \"A B", ' ', '"', '"', &fields);
|
||||
ASSERT_EQ(3ul, fields.size());
|
||||
ASSERT_STREQ("D", fields.at(0).c_str());
|
||||
ASSERT_STREQ("\"A", fields.at(1).c_str());
|
||||
|
||||
tokenize("D (A B) C (E F) G", ' ', '(', ')', &fields);
|
||||
ASSERT_EQ(5ul, fields.size());
|
||||
ASSERT_STREQ("D", fields.at(0).c_str());
|
||||
ASSERT_STREQ("A B", fields.at(1).c_str());
|
||||
ASSERT_STREQ("E F", fields.at(3).c_str());
|
||||
}
|
||||
|
||||
TEST(TokenizeTest, TokenizeWithEmptyTokens) {
|
||||
std::vector<std::string> fields;
|
||||
EXPECT_EQ(3ul, tokenize_with_empty_tokens("a.b.c", '.', &fields));
|
||||
|
Reference in New Issue
Block a user