Code alignment with masstree third party code
This commit is contained in:
@ -25,6 +25,8 @@
|
||||
#ifndef MOT_MASSTREE_HPP
|
||||
#define MOT_MASSTREE_HPP
|
||||
|
||||
using namespace MOT;
|
||||
|
||||
#include "masstree.hh"
|
||||
|
||||
namespace Masstree {
|
||||
|
||||
@ -32,11 +32,10 @@
|
||||
|
||||
namespace Masstree {
|
||||
template <typename P>
|
||||
void basic_table<P>::find(MOT::Key const* const& key, void*& output, bool& found, const uint32_t& pid) const
|
||||
void basic_table<P>::find(
|
||||
const uint8_t* key, const uint32_t key_len, void*& output, bool& found, const uint32_t& pid) const
|
||||
{
|
||||
|
||||
unlocked_cursor_type lp(
|
||||
*this, reinterpret_cast<const unsigned char*>(key->GetKeyBuf()), ALIGN8(key->GetKeyLength()));
|
||||
unlocked_cursor_type lp(*this, reinterpret_cast<const unsigned char*>(key), ALIGN8(key_len));
|
||||
|
||||
found = lp.find_unlocked(*mtSessionThreadInfo);
|
||||
|
||||
|
||||
@ -31,12 +31,13 @@
|
||||
|
||||
namespace Masstree {
|
||||
template <typename P>
|
||||
void* basic_table<P>::insert(MOT::Key const* const& key, void* const& entry, bool& result, const uint32_t& pid)
|
||||
void* basic_table<P>::insert(
|
||||
const uint8_t* key, const uint32_t key_len, void* const& entry, bool& result, const uint32_t& pid)
|
||||
{
|
||||
|
||||
MOT_LOG_DEBUG("table: %s", name_.c_str());
|
||||
// This should be optimized at compile time by bitshifts and using ctz
|
||||
cursor_type lp(*this, key->GetKeyBuf(), ALIGN8(key->GetKeyLength()));
|
||||
cursor_type lp(*this, key, ALIGN8(key_len));
|
||||
void* value_to_return = nullptr;
|
||||
|
||||
/* The handler represents a thread and its main purpose is to work lockless
|
||||
|
||||
@ -31,7 +31,7 @@
|
||||
|
||||
namespace Masstree {
|
||||
template <typename P>
|
||||
void basic_table<P>::iteratorScan(const char* keybuf, uint32_t keylen, const bool& matchKey, Iterator* const& it,
|
||||
void basic_table<P>::iteratorScan(const char* keybuf, uint32_t keylen, const bool& matchKey, void* const& it,
|
||||
const bool& forwardDirection, bool& result, const uint32_t& pid)
|
||||
{
|
||||
ForwardIterator* fit = nullptr;
|
||||
|
||||
@ -90,7 +90,7 @@ void* threadinfo::allocate(size_t sz, memtag tag, size_t* actual_size)
|
||||
int size = sz;
|
||||
void* p = nullptr;
|
||||
if (likely(!use_pool())) {
|
||||
p = cur_working_index->AllocateMem(size, tag);
|
||||
p = ((MasstreePrimaryIndex*)cur_working_index)->AllocateMem(size, tag);
|
||||
} else {
|
||||
p = malloc(sz + memdebug_size);
|
||||
}
|
||||
@ -110,7 +110,7 @@ void threadinfo::deallocate(void* p, size_t sz, memtag tag)
|
||||
MOT_ASSERT(p);
|
||||
p = memdebug::check_free(p, sz, tag);
|
||||
if (likely(!use_pool())) {
|
||||
cur_working_index->DeallocateMem(p, sz, tag);
|
||||
((MasstreePrimaryIndex*)cur_working_index)->DeallocateMem(p, sz, tag);
|
||||
} else {
|
||||
free(p);
|
||||
}
|
||||
@ -121,16 +121,17 @@ void threadinfo::ng_record_rcu(void* p, int sz, memtag tag)
|
||||
{
|
||||
MOT_ASSERT(p);
|
||||
memdebug::check_rcu(p, sz, tag);
|
||||
cur_working_index->RecordMemRcu(p, sz, tag);
|
||||
((MasstreePrimaryIndex*)cur_working_index)->RecordMemRcu(p, sz, tag);
|
||||
mark(threadcounter(tc_alloc + (tag > memtag_value)), -sz);
|
||||
}
|
||||
|
||||
void threadinfo::set_gc_session(MOT::GcManager* gc_session)
|
||||
// MOT is using MOT::GcManager class to manage gc_session
|
||||
void threadinfo::set_gc_session(void* gc_session)
|
||||
{
|
||||
gc_session_ = gc_session;
|
||||
}
|
||||
|
||||
inline MOT::GcManager* threadinfo::get_gc_session()
|
||||
inline void* threadinfo::get_gc_session()
|
||||
{
|
||||
return gc_session_;
|
||||
}
|
||||
|
||||
@ -28,6 +28,10 @@
|
||||
#include "masstree_get.hh"
|
||||
#include "btree_leaflink.hh"
|
||||
#include "circular_int.hh"
|
||||
namespace MOT {
|
||||
class MasstreePrimaryIndex;
|
||||
}
|
||||
|
||||
namespace Masstree {
|
||||
|
||||
template <typename P>
|
||||
@ -39,7 +43,10 @@ struct gc_layer_rcu_callback_ng : public P::threadinfo_type::mrcu_callback {
|
||||
MOT::MasstreePrimaryIndex* index_;
|
||||
char s_[0];
|
||||
gc_layer_rcu_callback_ng(node_base<P>** root_ref, Str prefix, size_t size)
|
||||
: root_ref_(root_ref), len_(prefix.length()), size_(size), index_(mtSessionThreadInfo->get_working_index())
|
||||
: root_ref_(root_ref),
|
||||
len_(prefix.length()),
|
||||
size_(size),
|
||||
index_((MOT::MasstreePrimaryIndex*)mtSessionThreadInfo->get_working_index())
|
||||
{
|
||||
errno_t erc = memcpy_s(s_, len_, prefix.data(), len_);
|
||||
securec_check(erc, "\0", "\0");
|
||||
@ -60,8 +67,8 @@ size_t gc_layer_rcu_callback_ng<P>::operator()(bool drop_index)
|
||||
// If drop_index == true, all index's pools are going to be cleaned, so we can skip gc_layer call (which might add
|
||||
// more elements into GC)
|
||||
if (drop_index == false) {
|
||||
// GC layer remove might delete elements from tree and might create new gc layer removal requests and add them to GC.
|
||||
// Index must be provided to allow access to the memory pools.
|
||||
// GC layer remove might delete elements from tree and might create new gc layer removal requests and add them
|
||||
// to GC. Index must be provided to allow access to the memory pools.
|
||||
mtSessionThreadInfo->set_working_index(index_);
|
||||
(*this)(*mtSessionThreadInfo);
|
||||
mtSessionThreadInfo->set_working_index(NULL);
|
||||
@ -92,9 +99,12 @@ void gc_layer_rcu_callback_ng<P>::make(node_base<P>** root_ref, Str prefix, thre
|
||||
void* data = ti.allocate(sz, memtag_masstree_gc, &sz /* IN/OUT PARAM */);
|
||||
if (!data) {
|
||||
// If allocation fails, gc layer removal command will not be added to GC and this layer wont be removed.
|
||||
// We might deal with this issue in the future by replacing the current mechanism with one of the following options:
|
||||
// 1. Use thread local GC layer removal object (per threadinfo) and keep list of key suffixes to clean (also in threadinfo)
|
||||
// 2. Move this feature to VACUUM process: Create special iterator that adds GC Layer callbacks when it finds empty layers
|
||||
// We might deal with this issue in the future by replacing the current mechanism with one of the following
|
||||
// options:
|
||||
// 1. Use thread local GC layer removal object (per threadinfo) and keep list of key suffixes to clean (also
|
||||
// in threadinfo)
|
||||
// 2. Move this feature to VACUUM process: Create special iterator that adds GC Layer callbacks when it finds
|
||||
// empty layers
|
||||
ti.set_last_error(MT_MERR_GC_LAYER_REMOVAL_MAKE);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -71,7 +71,7 @@ Sentinel* MasstreePrimaryIndex::IndexInsertImpl(const Key* key, Sentinel* sentin
|
||||
|
||||
mtSessionThreadInfo->set_last_error(MT_MERR_OK);
|
||||
|
||||
existingItem = m_index.insert(key, sentinel, inserted, pid);
|
||||
existingItem = m_index.insert(key->GetKeyBuf(), key->GetKeyLength(), sentinel, inserted, pid);
|
||||
|
||||
mtSessionThreadInfo->set_gc_session(NULL);
|
||||
mtSessionThreadInfo->set_working_index(NULL);
|
||||
@ -91,7 +91,7 @@ Sentinel* MasstreePrimaryIndex::IndexReadImpl(const Key* key, uint32_t pid) cons
|
||||
void* output = nullptr;
|
||||
|
||||
// Operation does not allocate memory from pools nor remove nodes. No need to set index's ptr
|
||||
m_index.find(key, output, result, pid);
|
||||
m_index.find(key->GetKeyBuf(), key->GetKeyLength(), output, result, pid);
|
||||
|
||||
if (result) {
|
||||
sentinel = reinterpret_cast<Sentinel*>(output);
|
||||
|
||||
@ -39,6 +39,8 @@
|
||||
#include <cmath>
|
||||
#include "mot_engine.h"
|
||||
|
||||
static_assert(MASSTREE_MAXKEYLEN == MAX_KEY_SIZE, "MASSTREE_MAXKEYLEN must be equal to MAX_KEY_SIZE");
|
||||
|
||||
namespace MOT {
|
||||
/**
|
||||
* @class MasstreePrimaryIndex.
|
||||
@ -485,27 +487,28 @@ public:
|
||||
bool RecordMemRcu(void* ptr, int size, enum memtag tag)
|
||||
{
|
||||
void* ptrToFree = nullptr;
|
||||
GcManager* gc_session = (GcManager*)mtSessionThreadInfo->get_gc_session();
|
||||
MOT_ASSERT(gc_session);
|
||||
switch (tag) {
|
||||
case memtag_masstree_leaf:
|
||||
mtSessionThreadInfo->get_gc_session()->GcRecordObject(
|
||||
gc_session->GcRecordObject(
|
||||
GetIndexId(), (void*)m_leafsPool, ptr, DeallocateFromPoolCallBack, m_leafsPool->m_size);
|
||||
return true;
|
||||
|
||||
case memtag_masstree_internode:
|
||||
mtSessionThreadInfo->get_gc_session()->GcRecordObject(
|
||||
gc_session->GcRecordObject(
|
||||
GetIndexId(), (void*)m_internodesPool, ptr, DeallocateFromPoolCallBack, m_internodesPool->m_size);
|
||||
return true;
|
||||
|
||||
case memtag_masstree_ksuffixes:
|
||||
MOT_ASSERT((size >> 16) == 0); // validate that size using 2 bytes or less
|
||||
ptrToFree = (void*)((uint64_t)ptr | ((uint64_t)size << 48));
|
||||
mtSessionThreadInfo->get_gc_session()->GcRecordObject(
|
||||
gc_session->GcRecordObject(
|
||||
GetIndexId(), (void*)m_ksuffixSlab, ptrToFree, DeallocateFromSlabCallBack, size);
|
||||
return true;
|
||||
|
||||
case memtag_masstree_gc:
|
||||
mtSessionThreadInfo->get_gc_session()->GcRecordObject(
|
||||
GetIndexId(), (void*)m_ksuffixSlab, ptr, DeallocateFromSlabGcCallBack, size);
|
||||
gc_session->GcRecordObject(GetIndexId(), (void*)m_ksuffixSlab, ptr, DeallocateFromSlabGcCallBack, size);
|
||||
return true;
|
||||
|
||||
default:
|
||||
|
||||
Reference in New Issue
Block a user