Merge branch '2.2' into develop

This commit is contained in:
Johan Wikman 2018-06-20 12:55:15 +03:00
commit 47b2036afa
2 changed files with 38 additions and 8 deletions

View File

@ -354,11 +354,15 @@ int CacheFilterSession::clientReply(GWBUF* pData)
if (m_res.pData)
{
gwbuf_append(m_res.pData, pData);
m_res.pData_last = pData;
m_res.offset_last = m_res.length;
m_res.length += gwbuf_length(pData); // pData may be a chain, so not GWBUF_LENGTH().
}
else
{
m_res.pData = pData;
m_res.pData_last = pData;
m_res.offset_last = 0;
m_res.length = gwbuf_length(pData);
}
@ -451,7 +455,7 @@ int CacheFilterSession::handle_expecting_fields()
while (!insufficient && (buflen - m_res.offset >= MYSQL_HEADER_LEN))
{
uint8_t header[MYSQL_HEADER_LEN + 1];
gwbuf_copy_data(m_res.pData, m_res.offset, MYSQL_HEADER_LEN + 1, header);
copy_command_header_at_offset(header);
size_t packetlen = MYSQL_HEADER_LEN + MYSQL_GET_PAYLOAD_LEN(header);
@ -536,7 +540,7 @@ int CacheFilterSession::handle_expecting_response()
// Reserve enough space to accomodate for the largest length encoded integer,
// which is type field + 8 bytes.
uint8_t header[MYSQL_HEADER_LEN + 1 + 8];
gwbuf_copy_data(m_res.pData, 0, MYSQL_HEADER_LEN + 1, header);
copy_data(0, MYSQL_HEADER_LEN + 1, header);
switch ((int)MYSQL_GET_COMMAND(header))
{
@ -569,8 +573,7 @@ int CacheFilterSession::handle_expecting_response()
{
// Now we can figure out how many fields there are, but first we
// need to copy some more data.
gwbuf_copy_data(m_res.pData,
MYSQL_HEADER_LEN + 1, n_bytes - 1, &header[MYSQL_HEADER_LEN + 1]);
copy_data(MYSQL_HEADER_LEN + 1, n_bytes - 1, &header[MYSQL_HEADER_LEN + 1]);
m_res.nTotalFields = mxs_leint_value(&header[4]);
m_res.offset = MYSQL_HEADER_LEN + n_bytes;
@ -608,7 +611,7 @@ int CacheFilterSession::handle_expecting_rows()
while (!insufficient && (buflen - m_res.offset >= MYSQL_HEADER_LEN))
{
uint8_t header[MYSQL_HEADER_LEN + 1];
gwbuf_copy_data(m_res.pData, m_res.offset, MYSQL_HEADER_LEN + 1, header);
copy_command_header_at_offset(header);
size_t packetlen = MYSQL_HEADER_LEN + MYSQL_GET_PAYLOAD_LEN(header);
@ -669,8 +672,7 @@ int CacheFilterSession::handle_expecting_use_response()
if (buflen >= MYSQL_HEADER_LEN + 1) // We need the command byte.
{
uint8_t command;
gwbuf_copy_data(m_res.pData, MYSQL_HEADER_LEN, 1, &command);
copy_data(MYSQL_HEADER_LEN, 1, &command);
switch (command)
{
@ -737,6 +739,8 @@ void CacheFilterSession::reset_response_state()
{
m_res.pData = NULL;
m_res.length = 0;
m_res.pData_last = NULL;
m_res.offset_last = 0;
m_res.nTotalFields = 0;
m_res.nFields = 0;
m_res.nRows = 0;
@ -1393,3 +1397,23 @@ char* CacheFilterSession::set_cache_hard_ttl(void* pContext,
return pThis->set_cache_hard_ttl(zName, pValue_begin, pValue_end);
}
void CacheFilterSession::copy_data(size_t offset, size_t nBytes, uint8_t* pTo) const
{
if (offset >= m_res.offset_last)
{
gwbuf_copy_data(m_res.pData_last,
offset - m_res.offset_last, nBytes, pTo);
}
else
{
// We do not expect this to happen.
ss_dassert(!true);
gwbuf_copy_data(m_res.pData, offset, nBytes, pTo);
}
}
void CacheFilterSession::copy_command_header_at_offset(uint8_t* pHeader) const
{
copy_data(m_res.offset, MYSQL_HEADER_LEN + 1, pHeader);
}

View File

@ -38,11 +38,13 @@ public:
struct CACHE_RESPONSE_STATE
{
GWBUF* pData; /**< Response data, possibly incomplete. */
size_t offset; /**< Where we are in the response buffer. */
size_t length; /**< Length of pData. */
GWBUF* pData_last; /**< Last data received. */
size_t offset_last; /**< Offset of last data. */
size_t nTotalFields; /**< The number of fields a resultset contains. */
size_t nFields; /**< How many fields we have received, <= n_totalfields. */
size_t nRows; /**< How many rows we have received. */
size_t offset; /**< Where we are in the response buffer. */
};
/**
@ -171,6 +173,10 @@ private:
const char* pValue_begin,
const char* pValue_end);
void copy_data(size_t offset, size_t nBytes, uint8_t* pTo) const;
void copy_command_header_at_offset(uint8_t* pHeader) const;
private:
CacheFilterSession(MXS_SESSION* pSession, Cache* pCache, char* zDefaultDb);