Skip to content

Commit

Permalink
⚡ added improvements (#365)
Browse files Browse the repository at this point in the history
  • Loading branch information
nlohmann committed Nov 26, 2016
1 parent 4bd3b1b commit 2773038
Show file tree
Hide file tree
Showing 4 changed files with 62 additions and 26 deletions.
2 changes: 1 addition & 1 deletion README.md
Expand Up @@ -500,7 +500,7 @@ I deeply appreciate the help of the following people.
- [Vladimir Petrigo](https://github.com/vpetrigo) made a SFINAE hack more readable. - [Vladimir Petrigo](https://github.com/vpetrigo) made a SFINAE hack more readable.
- [Denis Andrejew](https://github.com/seeekr) fixed a grammar issue in the README file. - [Denis Andrejew](https://github.com/seeekr) fixed a grammar issue in the README file.
- [Pierre-Antoine Lacaze](https://github.com/palacaze) found a subtle bug in the `dump()` function. - [Pierre-Antoine Lacaze](https://github.com/palacaze) found a subtle bug in the `dump()` function.
- [TurpentineDistillery](https://github.com/TurpentineDistillery) pointed to [`std::locale::classic()`](http://en.cppreference.com/w/cpp/locale/locale/classic) to avoid too much locale joggling and found some nice performance improvements in the parser. - [TurpentineDistillery](https://github.com/TurpentineDistillery) pointed to [`std::locale::classic()`](http://en.cppreference.com/w/cpp/locale/locale/classic) to avoid too much locale joggling, found some nice performance improvements in the parser and improved the benchmarking code.


Thanks a lot for helping out! Thanks a lot for helping out!


Expand Down
2 changes: 1 addition & 1 deletion benchmarks/benchmarks.cpp
Expand Up @@ -58,7 +58,7 @@ static void bench(benchpress::context& ctx,


for (size_t i = 0; i < ctx.num_iterations(); ++i) for (size_t i = 0; i < ctx.num_iterations(); ++i)
{ {
// clear flags and rewind // clear flags and rewind
istr.clear(); istr.clear();
istr.seekg(0); istr.seekg(0);
json j; json j;
Expand Down
42 changes: 30 additions & 12 deletions src/json.hpp
Expand Up @@ -8719,8 +8719,22 @@ class basic_json
*/ */
void fill_line_buffer(size_t n = 0) void fill_line_buffer(size_t n = 0)
{ {
// if line buffer is used, m_content points to its data
assert(m_line_buffer.empty()
or m_content == reinterpret_cast<const lexer_char_t*>(m_line_buffer.data()));

// if line buffer is used, m_limit is set past the end of its data
assert(m_line_buffer.empty()
or m_limit == m_content + m_line_buffer.size());

// pointer relationships
assert(m_content <= m_start);
assert(m_start <= m_cursor);
assert(m_cursor <= m_limit);
assert(m_marker == nullptr or m_marker <= m_limit);

// number of processed characters (p) // number of processed characters (p)
const auto offset_start = m_start - m_content; const size_t num_processed_chars = static_cast<size_t>(m_start - m_content);
// offset for m_marker wrt. to m_start // offset for m_marker wrt. to m_start
const auto offset_marker = (m_marker == nullptr) ? 0 : m_marker - m_start; const auto offset_marker = (m_marker == nullptr) ? 0 : m_marker - m_start;
// number of unprocessed characters (u) // number of unprocessed characters (u)
Expand All @@ -8729,23 +8743,23 @@ class basic_json
// no stream is used or end of file is reached // no stream is used or end of file is reached
if (m_stream == nullptr or m_stream->eof()) if (m_stream == nullptr or m_stream->eof())
{ {
// skip this part if we are already using the line buffer // m_start may or may not be pointing into m_line_buffer at
if (m_start != reinterpret_cast<const lexer_char_t*>(m_line_buffer.data())) // this point. We trust the standand library to do the right
{ // thing. See http://stackoverflow.com/q/28142011/266378
// copy unprocessed characters to line buffer m_line_buffer.assign(m_start, m_limit);
m_line_buffer.assign(m_start, m_limit);
m_cursor = m_limit;
}


// append n characters to make sure that there is sufficient // append n characters to make sure that there is sufficient
// space between m_cursor and m_limit // space between m_cursor and m_limit
m_line_buffer.append(1, '\x00'); m_line_buffer.append(1, '\x00');
m_line_buffer.append(n - 1, '\x01'); if (n > 0)
{
m_line_buffer.append(n - 1, '\x01');
}
} }
else else
{ {
// delete processed characters from line buffer // delete processed characters from line buffer
m_line_buffer.erase(0, static_cast<size_t>(offset_start)); m_line_buffer.erase(0, num_processed_chars);
// read next line from input stream // read next line from input stream
m_line_buffer_tmp.clear(); m_line_buffer_tmp.clear();
std::getline(*m_stream, m_line_buffer_tmp, '\n'); std::getline(*m_stream, m_line_buffer_tmp, '\n');
Expand All @@ -8756,7 +8770,7 @@ class basic_json
} }


// set pointers // set pointers
m_content = reinterpret_cast<const lexer_char_t*>(m_line_buffer.c_str()); m_content = reinterpret_cast<const lexer_char_t*>(m_line_buffer.data());
assert(m_content != nullptr); assert(m_content != nullptr);
m_start = m_content; m_start = m_content;
m_marker = m_start + offset_marker; m_marker = m_start + offset_marker;
Expand Down Expand Up @@ -8843,7 +8857,11 @@ class basic_json
auto e = std::find(i, m_cursor - 1, '\\'); auto e = std::find(i, m_cursor - 1, '\\');
if (e != i) if (e != i)
{ {
result.append(i, e); // see https://github.com/nlohmann/json/issues/365#issuecomment-262874705
for (auto k = i; k < e; k++)
{
result.push_back(static_cast<typename string_t::value_type>(*k));
}
i = e - 1; // -1 because of ++i i = e - 1; // -1 because of ++i
} }
else else
Expand Down
42 changes: 30 additions & 12 deletions src/json.hpp.re2c
Expand Up @@ -7868,8 +7868,22 @@ class basic_json
*/ */
void fill_line_buffer(size_t n = 0) void fill_line_buffer(size_t n = 0)
{ {
// if line buffer is used, m_content points to its data
assert(m_line_buffer.empty()
or m_content == reinterpret_cast<const lexer_char_t*>(m_line_buffer.data()));

// if line buffer is used, m_limit is set past the end of its data
assert(m_line_buffer.empty()
or m_limit == m_content + m_line_buffer.size());

// pointer relationships
assert(m_content <= m_start);
assert(m_start <= m_cursor);
assert(m_cursor <= m_limit);
assert(m_marker == nullptr or m_marker <= m_limit);

// number of processed characters (p) // number of processed characters (p)
const auto offset_start = m_start - m_content; const size_t num_processed_chars = static_cast<size_t>(m_start - m_content);
// offset for m_marker wrt. to m_start // offset for m_marker wrt. to m_start
const auto offset_marker = (m_marker == nullptr) ? 0 : m_marker - m_start; const auto offset_marker = (m_marker == nullptr) ? 0 : m_marker - m_start;
// number of unprocessed characters (u) // number of unprocessed characters (u)
Expand All @@ -7878,23 +7892,23 @@ class basic_json
// no stream is used or end of file is reached // no stream is used or end of file is reached
if (m_stream == nullptr or m_stream->eof()) if (m_stream == nullptr or m_stream->eof())
{ {
// skip this part if we are already using the line buffer // m_start may or may not be pointing into m_line_buffer at
if (m_start != reinterpret_cast<const lexer_char_t*>(m_line_buffer.data())) // this point. We trust the standand library to do the right
{ // thing. See http://stackoverflow.com/q/28142011/266378
// copy unprocessed characters to line buffer m_line_buffer.assign(m_start, m_limit);
m_line_buffer.assign(m_start, m_limit);
m_cursor = m_limit;
}


// append n characters to make sure that there is sufficient // append n characters to make sure that there is sufficient
// space between m_cursor and m_limit // space between m_cursor and m_limit
m_line_buffer.append(1, '\x00'); m_line_buffer.append(1, '\x00');
m_line_buffer.append(n - 1, '\x01'); if (n > 0)
{
m_line_buffer.append(n - 1, '\x01');
}
} }
else else
{ {
// delete processed characters from line buffer // delete processed characters from line buffer
m_line_buffer.erase(0, static_cast<size_t>(offset_start)); m_line_buffer.erase(0, num_processed_chars);
// read next line from input stream // read next line from input stream
m_line_buffer_tmp.clear(); m_line_buffer_tmp.clear();
std::getline(*m_stream, m_line_buffer_tmp, '\n'); std::getline(*m_stream, m_line_buffer_tmp, '\n');
Expand All @@ -7905,7 +7919,7 @@ class basic_json
} }


// set pointers // set pointers
m_content = reinterpret_cast<const lexer_char_t*>(m_line_buffer.c_str()); m_content = reinterpret_cast<const lexer_char_t*>(m_line_buffer.data());
assert(m_content != nullptr); assert(m_content != nullptr);
m_start = m_content; m_start = m_content;
m_marker = m_start + offset_marker; m_marker = m_start + offset_marker;
Expand Down Expand Up @@ -7992,7 +8006,11 @@ class basic_json
auto e = std::find(i, m_cursor - 1, '\\'); auto e = std::find(i, m_cursor - 1, '\\');
if (e != i) if (e != i)
{ {
result.append(i, e); // see https://github.com/nlohmann/json/issues/365#issuecomment-262874705
for (auto k = i; k < e; k++)
{
result.push_back(static_cast<typename string_t::value_type>(*k));
}
i = e - 1; // -1 because of ++i i = e - 1; // -1 because of ++i
} }
else else
Expand Down

0 comments on commit 2773038

Please sign in to comment.