From 5d4b3bc419fc5eb2bd78b684e695091ac0a6961b Mon Sep 17 00:00:00 2001 From: Johan Wikman Date: Mon, 18 Jun 2018 12:07:56 +0300 Subject: [PATCH] MXS-1887 Prevent gwbuf_copy_data() performance hit The cache filter walks through the resultset in order to detect when the resultset ends. That is, it reads each packet header as they arrive. In case the resultset is large, the cache will have to read several packet headers. That it does using gwbuf_copy_data(). However, as that was done using the first received GWBUF as the starting point, it meant that in gwbuf_copy_data() the buffer chain was walked over and over and over again, with a significant performance hit as the result. Now we separetely store the last buffer received, and the the starting offset of it. That way there will be no buffer chain walking. As this is a common problem, GWBUF could cache the offset of the tail, thus removing the performance penalty if you read from an offset that happens to be in the tail. However, it's better to do that as a part of a general overhaul of GWBUF. --- .../filter/cache/cachefiltersession.cc | 39 +++++++++++++++---- .../filter/cache/cachefiltersession.hh | 8 +++- 2 files changed, 39 insertions(+), 8 deletions(-) diff --git a/server/modules/filter/cache/cachefiltersession.cc b/server/modules/filter/cache/cachefiltersession.cc index 32df9e29c..f04a92ec5 100644 --- a/server/modules/filter/cache/cachefiltersession.cc +++ b/server/modules/filter/cache/cachefiltersession.cc @@ -309,11 +309,15 @@ int CacheFilterSession::clientReply(GWBUF* pData) if (m_res.pData) { gwbuf_append(m_res.pData, pData); + m_res.pData_last = pData; + m_res.offset_last = m_res.length; m_res.length += gwbuf_length(pData); // pData may be a chain, so not GWBUF_LENGTH(). } else { m_res.pData = pData; + m_res.pData_last = pData; + m_res.offset_last = 0; m_res.length = gwbuf_length(pData); } @@ -406,7 +410,7 @@ int CacheFilterSession::handle_expecting_fields() while (!insufficient && (buflen - m_res.offset >= MYSQL_HEADER_LEN)) { uint8_t header[MYSQL_HEADER_LEN + 1]; - gwbuf_copy_data(m_res.pData, m_res.offset, MYSQL_HEADER_LEN + 1, header); + copy_command_header_at_offset(header); size_t packetlen = MYSQL_HEADER_LEN + MYSQL_GET_PAYLOAD_LEN(header); @@ -491,7 +495,7 @@ int CacheFilterSession::handle_expecting_response() // Reserve enough space to accomodate for the largest length encoded integer, // which is type field + 8 bytes. uint8_t header[MYSQL_HEADER_LEN + 1 + 8]; - gwbuf_copy_data(m_res.pData, 0, MYSQL_HEADER_LEN + 1, header); + copy_data(0, MYSQL_HEADER_LEN + 1, header); switch ((int)MYSQL_GET_COMMAND(header)) { @@ -524,8 +528,7 @@ int CacheFilterSession::handle_expecting_response() { // Now we can figure out how many fields there are, but first we // need to copy some more data. - gwbuf_copy_data(m_res.pData, - MYSQL_HEADER_LEN + 1, n_bytes - 1, &header[MYSQL_HEADER_LEN + 1]); + copy_data(MYSQL_HEADER_LEN + 1, n_bytes - 1, &header[MYSQL_HEADER_LEN + 1]); m_res.nTotalFields = mxs_leint_value(&header[4]); m_res.offset = MYSQL_HEADER_LEN + n_bytes; @@ -563,7 +566,7 @@ int CacheFilterSession::handle_expecting_rows() while (!insufficient && (buflen - m_res.offset >= MYSQL_HEADER_LEN)) { uint8_t header[MYSQL_HEADER_LEN + 1]; - gwbuf_copy_data(m_res.pData, m_res.offset, MYSQL_HEADER_LEN + 1, header); + copy_command_header_at_offset(header); size_t packetlen = MYSQL_HEADER_LEN + MYSQL_GET_PAYLOAD_LEN(header); @@ -624,8 +627,7 @@ int CacheFilterSession::handle_expecting_use_response() if (buflen >= MYSQL_HEADER_LEN + 1) // We need the command byte. { uint8_t command; - - gwbuf_copy_data(m_res.pData, MYSQL_HEADER_LEN, 1, &command); + copy_data(MYSQL_HEADER_LEN, 1, &command); switch (command) { @@ -692,6 +694,8 @@ void CacheFilterSession::reset_response_state() { m_res.pData = NULL; m_res.length = 0; + m_res.pData_last = NULL; + m_res.offset_last = 0; m_res.nTotalFields = 0; m_res.nFields = 0; m_res.nRows = 0; @@ -1042,3 +1046,24 @@ CacheFilterSession::routing_action_t CacheFilterSession::route_SELECT(cache_acti return routing_action; } + +void CacheFilterSession::copy_data(size_t offset, size_t nBytes, uint8_t* pTo) const +{ + if (offset >= m_res.offset_last) + { + gwbuf_copy_data(m_res.pData_last, + offset - m_res.offset_last, nBytes, pTo); + } + else + { + // We do not expect this to happen. + ss_dassert(!true); + gwbuf_copy_data(m_res.pData, offset, nBytes, pTo); + } +} + +void CacheFilterSession::copy_command_header_at_offset(uint8_t* pHeader) const +{ + copy_data(m_res.offset, MYSQL_HEADER_LEN + 1, pHeader); +} + diff --git a/server/modules/filter/cache/cachefiltersession.hh b/server/modules/filter/cache/cachefiltersession.hh index 7eea52ccd..fcd639fc5 100644 --- a/server/modules/filter/cache/cachefiltersession.hh +++ b/server/modules/filter/cache/cachefiltersession.hh @@ -35,11 +35,13 @@ public: struct CACHE_RESPONSE_STATE { GWBUF* pData; /**< Response data, possibly incomplete. */ + size_t offset; /**< Where we are in the response buffer. */ size_t length; /**< Length of pData. */ + GWBUF* pData_last; /**< Last data received. */ + size_t offset_last; /**< Offset of last data. */ size_t nTotalFields; /**< The number of fields a resultset contains. */ size_t nFields; /**< How many fields we have received, <= n_totalfields. */ size_t nRows; /**< How many rows we have received. */ - size_t offset; /**< Where we are in the response buffer. */ }; /** @@ -138,6 +140,10 @@ private: routing_action_t route_COM_QUERY(GWBUF* pPacket); routing_action_t route_SELECT(cache_action_t action, GWBUF* pPacket); + void copy_data(size_t offset, size_t nBytes, uint8_t* pTo) const; + + void copy_command_header_at_offset(uint8_t* pHeader) const; + private: CacheFilterSession(MXS_SESSION* pSession, Cache* pCache, char* zDefaultDb);