Merge branch 'blr' into develop
Conflicts: client/maxadmin.c server/core/CMakeLists.txt server/core/dcb.c server/core/gateway.c server/core/poll.c server/core/test/CMakeLists.txt server/core/test/makefile server/include/poll.h server/modules/routing/debugcmd.c
This commit is contained in:
@ -4,9 +4,10 @@ if(BUILD_TESTS)
|
||||
target_link_libraries(fullcore log_manager utils pthread ${EMBEDDED_LIB} ssl aio rt crypt dl crypto inih z m stdc++)
|
||||
endif()
|
||||
add_executable(maxscale atomic.c buffer.c spinlock.c gateway.c
|
||||
gw_utils.c utils.c dcb.c load_utils.c session.c service.c server.c
|
||||
poll.c config.c users.c hashtable.c dbusers.c thread.c gwbitmask.c
|
||||
monitor.c adminusers.c secrets.c filter.c modutil.c hint.c housekeeper.c)
|
||||
gw_utils.c utils.c dcb.c load_utils.c session.c service.c server.c
|
||||
poll.c config.c users.c hashtable.c dbusers.c thread.c gwbitmask.c
|
||||
monitor.c adminusers.c secrets.c filter.c modutil.c hint.c
|
||||
housekeeper.c memlog.c)
|
||||
target_link_libraries(maxscale ${EMBEDDED_LIB} log_manager utils ssl aio pthread crypt dl crypto inih z rt m stdc++)
|
||||
install(TARGETS maxscale DESTINATION bin)
|
||||
|
||||
@ -20,4 +21,4 @@ install(TARGETS maxpasswd DESTINATION bin)
|
||||
|
||||
if(BUILD_TESTS)
|
||||
add_subdirectory(test)
|
||||
endif()
|
||||
endif()
|
||||
|
@ -65,7 +65,8 @@ include ../../makefile.inc
|
||||
SRCS= atomic.c buffer.c spinlock.c gateway.c \
|
||||
gw_utils.c utils.c dcb.c load_utils.c session.c service.c server.c \
|
||||
poll.c config.c users.c hashtable.c dbusers.c thread.c gwbitmask.c \
|
||||
monitor.c adminusers.c secrets.c filter.c modutil.c hint.c housekeeper.c
|
||||
monitor.c adminusers.c secrets.c filter.c modutil.c hint.c \
|
||||
housekeeper.c memlog.c
|
||||
|
||||
HDRS= ../include/atomic.h ../include/buffer.h ../include/dcb.h \
|
||||
../include/gw.h ../modules/include/mysql_client_server_protocol.h \
|
||||
@ -73,7 +74,8 @@ HDRS= ../include/atomic.h ../include/buffer.h ../include/dcb.h \
|
||||
../include/modules.h ../include/poll.h ../include/config.h \
|
||||
../include/users.h ../include/hashtable.h ../include/gwbitmask.h \
|
||||
../include/adminusers.h ../include/version.h ../include/maxscale.h \
|
||||
../include/filter.h ../include/modutil.h ../hint.h ../include/housekeeper.h
|
||||
../include/filter.h ../include/modutil.h ../hint.h \
|
||||
../include/housekeeper.h ../include/memlog.h
|
||||
|
||||
OBJ=$(SRCS:.c=.o)
|
||||
|
||||
|
@ -1150,6 +1150,31 @@ config_threadcount()
|
||||
return gateway.n_threads;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the number of non-blocking polls to be done before a blocking poll
|
||||
* is issued.
|
||||
*
|
||||
* @return The number of blocking poll calls to make before a blocking call
|
||||
*/
|
||||
unsigned int
|
||||
config_nbpolls()
|
||||
{
|
||||
return gateway.n_nbpoll;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the configured number of milliseconds for which we wait when we do
|
||||
* a blocking poll call.
|
||||
*
|
||||
* @return The number of milliseconds to sleep in a blocking poll call
|
||||
*/
|
||||
unsigned int
|
||||
config_pollsleep()
|
||||
{
|
||||
return gateway.pollsleep;
|
||||
}
|
||||
|
||||
|
||||
static struct {
|
||||
char *logname;
|
||||
logfile_id_t logfile;
|
||||
@ -1170,9 +1195,20 @@ static int
|
||||
handle_global_item(const char *name, const char *value)
|
||||
{
|
||||
int i;
|
||||
if (strcmp(name, "threads") == 0) {
|
||||
if (strcmp(name, "threads") == 0)
|
||||
{
|
||||
gateway.n_threads = atoi(value);
|
||||
} else {
|
||||
}
|
||||
else if (strcmp(name, "non_blocking_polls") == 0)
|
||||
{
|
||||
gateway.n_nbpoll = atoi(value);
|
||||
}
|
||||
else if (strcmp(name, "poll_sleep") == 0)
|
||||
{
|
||||
gateway.pollsleep = atoi(value);
|
||||
}
|
||||
else
|
||||
{
|
||||
for (i = 0; lognames[i].logname; i++)
|
||||
{
|
||||
if (strcasecmp(name, lognames[i].logname) == 0)
|
||||
@ -1194,6 +1230,8 @@ static void
|
||||
global_defaults()
|
||||
{
|
||||
gateway.n_threads = 1;
|
||||
gateway.n_nbpoll = DEFAULT_NBPOLLS;
|
||||
gateway.pollsleep = DEFAULT_POLLSLEEP;
|
||||
if (version_string != NULL)
|
||||
gateway.version_string = strdup(version_string);
|
||||
else
|
||||
|
@ -206,6 +206,7 @@ DCB *rval;
|
||||
rval->low_water = 0;
|
||||
rval->next = NULL;
|
||||
rval->callbacks = NULL;
|
||||
rval->data = NULL;
|
||||
|
||||
rval->remote = NULL;
|
||||
rval->user = NULL;
|
||||
@ -240,7 +241,7 @@ dcb_free(DCB *dcb)
|
||||
{
|
||||
LOGIF(LE, (skygw_log_write_flush(
|
||||
LOGFILE_ERROR,
|
||||
"Error : Attempt to free a DCB via dcb_fee "
|
||||
"Error : Attempt to free a DCB via dcb_free "
|
||||
"that has been associated with a descriptor.")));
|
||||
}
|
||||
}
|
||||
@ -342,6 +343,15 @@ DCB_CALLBACK *cb;
|
||||
dcb->state == DCB_STATE_ALLOC,
|
||||
"dcb not in DCB_STATE_DISCONNECTED not in DCB_STATE_ALLOC state.");
|
||||
|
||||
if (DCB_POLL_BUSY(dcb))
|
||||
{
|
||||
/* Check if DCB has outstanding poll events */
|
||||
LOGIF(LE, (skygw_log_write_flush(
|
||||
LOGFILE_ERROR,
|
||||
"dcb_final_free: DCB %p has outstanding events",
|
||||
dcb)));
|
||||
}
|
||||
|
||||
/*< First remove this DCB from the chain */
|
||||
spinlock_acquire(&dcbspin);
|
||||
if (allDCBs == dcb)
|
||||
@ -413,6 +423,7 @@ DCB_CALLBACK *cb;
|
||||
}
|
||||
spinlock_release(&dcb->cb_lock);
|
||||
|
||||
|
||||
bitmask_free(&dcb->memdata.bitmask);
|
||||
free(dcb);
|
||||
}
|
||||
@ -1208,7 +1219,8 @@ dcb_close(DCB *dcb)
|
||||
*/
|
||||
if (dcb->state == DCB_STATE_POLLING)
|
||||
{
|
||||
rc = poll_remove_dcb(dcb);
|
||||
if (dcb->fd != -1)
|
||||
rc = poll_remove_dcb(dcb);
|
||||
|
||||
if (rc == 0) {
|
||||
LOGIF(LD, (skygw_log_write(
|
||||
@ -1276,9 +1288,9 @@ printDCB(DCB *dcb)
|
||||
dcb->stats.n_buffered);
|
||||
printf("\t\tNo. of Accepts: %d\n",
|
||||
dcb->stats.n_accepts);
|
||||
printf("\t\tNo. of High Water Events: %d\n",
|
||||
printf("\t\tNo. of High Water Events: %d\n",
|
||||
dcb->stats.n_high_water);
|
||||
printf("\t\tNo. of Low Water Events: %d\n",
|
||||
printf("\t\tNo. of Low Water Events: %d\n",
|
||||
dcb->stats.n_low_water);
|
||||
}
|
||||
/**
|
||||
@ -1463,6 +1475,12 @@ dprintDCB(DCB *pdcb, DCB *dcb)
|
||||
dcb->stats.n_high_water);
|
||||
dcb_printf(pdcb, "\t\tNo. of Low Water Events: %d\n",
|
||||
dcb->stats.n_low_water);
|
||||
if (DCB_POLL_BUSY(dcb))
|
||||
{
|
||||
dcb_printf(pdcb, "\t\tPending events in the queue: %x %s\n",
|
||||
dcb->evq.pending_events, dcb->evq.processing ? "(processing)" : "");
|
||||
|
||||
}
|
||||
if (dcb->flags & DCBF_CLONE)
|
||||
dcb_printf(pdcb, "\t\tDCB is a clone.\n");
|
||||
#if SPINLOCK_PROFILE
|
||||
|
@ -56,6 +56,7 @@
|
||||
#include <config.h>
|
||||
#include <poll.h>
|
||||
#include <housekeeper.h>
|
||||
#include <memlog.h>
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <unistd.h>
|
||||
@ -1548,13 +1549,10 @@ int main(int argc, char **argv)
|
||||
* instances of the gateway are beign run on the same
|
||||
* machine.
|
||||
*/
|
||||
sprintf(datadir, "%s/data%d", home_dir, getpid());
|
||||
if(mkdir(datadir, 0777) != 0){
|
||||
LOGIF(LE,(skygw_log_write_flush(
|
||||
LOGFILE_ERROR,
|
||||
"Error : Directory creation failed due to %s.",
|
||||
strerror(errno))));
|
||||
}
|
||||
sprintf(datadir, "%s/data", home_dir);
|
||||
mkdir(datadir, 0777);
|
||||
sprintf(datadir, "%s/data/data%d", home_dir, getpid());
|
||||
mkdir(datadir, 0777);
|
||||
|
||||
if (!daemon_mode)
|
||||
{
|
||||
@ -1758,6 +1756,7 @@ int main(int argc, char **argv)
|
||||
LOGFILE_MESSAGE,
|
||||
"MaxScale shutdown completed.")));
|
||||
|
||||
unload_all_modules();
|
||||
/* Remove Pidfile */
|
||||
unlink_pidfile();
|
||||
|
||||
@ -1776,10 +1775,11 @@ return_main:
|
||||
* Shutdown MaxScale server
|
||||
*/
|
||||
void
|
||||
shutdown_server()
|
||||
shutdown_server()
|
||||
{
|
||||
poll_shutdown();
|
||||
hkshutdown();
|
||||
memlog_flush_all();
|
||||
log_flush_shutdown();
|
||||
}
|
||||
|
||||
|
@ -24,11 +24,21 @@
|
||||
/**
|
||||
* @file housekeeper.c Provide a mechanism to run periodic tasks
|
||||
*
|
||||
* The housekeeper provides a mechanism to allow for tasks, function
|
||||
* calls basically, to be run on a tiem basis. A task may be run
|
||||
* repeatedly, with a given frequency (in seconds), or may be a one
|
||||
* shot task that will only be run once after a specified number of
|
||||
* seconds.
|
||||
*
|
||||
* The housekeeper also maintains a global variable, hkheartbeat, that
|
||||
* is incremented every 100ms.
|
||||
*
|
||||
* @verbatim
|
||||
* Revision History
|
||||
*
|
||||
* Date Who Description
|
||||
* 29/08/14 Mark Riddoch Initial implementation
|
||||
* 22/10/14 Mark Riddoch Addition of one-shot tasks
|
||||
*
|
||||
* @endverbatim
|
||||
*/
|
||||
@ -43,6 +53,7 @@ static HKTASK *tasks = NULL;
|
||||
static SPINLOCK tasklock = SPINLOCK_INIT;
|
||||
|
||||
static int do_shutdown = 0;
|
||||
unsigned long hkheartbeat = 0;
|
||||
|
||||
static void hkthread(void *);
|
||||
|
||||
@ -69,7 +80,7 @@ hkinit()
|
||||
* @param taskfn The function to call for the task
|
||||
* @param data Data to pass to the task function
|
||||
* @param frequency How often to run the task, expressed in seconds
|
||||
* @return Return the tiem in seconds when the task will be first run if the task was added, otherwise 0
|
||||
* @return Return the time in seconds when the task will be first run if the task was added, otherwise 0
|
||||
*/
|
||||
int
|
||||
hktask_add(char *name, void (*taskfn)(void *), void *data, int frequency)
|
||||
@ -88,6 +99,7 @@ HKTASK *task, *ptr;
|
||||
task->task = taskfn;
|
||||
task->data = data;
|
||||
task->frequency = frequency;
|
||||
task->type = HK_REPEATED;
|
||||
task->nextdue = time(0) + frequency;
|
||||
task->next = NULL;
|
||||
spinlock_acquire(&tasklock);
|
||||
@ -112,6 +124,61 @@ HKTASK *task, *ptr;
|
||||
return task->nextdue;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a one-shot task to the housekeeper task list
|
||||
*
|
||||
* Task names must be unique.
|
||||
*
|
||||
* @param name The unique name for this housekeeper task
|
||||
* @param taskfn The function to call for the task
|
||||
* @param data Data to pass to the task function
|
||||
* @param when How many second until the task is executed
|
||||
* @return Return the time in seconds when the task will be first run if the task was added, otherwise 0
|
||||
*
|
||||
*/
|
||||
int
|
||||
hktask_oneshot(char *name, void (*taskfn)(void *), void *data, int when)
|
||||
{
|
||||
HKTASK *task, *ptr;
|
||||
|
||||
if ((task = (HKTASK *)malloc(sizeof(HKTASK))) == NULL)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
if ((task->name = strdup(name)) == NULL)
|
||||
{
|
||||
free(task);
|
||||
return 0;
|
||||
}
|
||||
task->task = taskfn;
|
||||
task->data = data;
|
||||
task->frequency = 0;
|
||||
task->type = HK_ONESHOT;
|
||||
task->nextdue = time(0) + when;
|
||||
task->next = NULL;
|
||||
spinlock_acquire(&tasklock);
|
||||
ptr = tasks;
|
||||
while (ptr && ptr->next)
|
||||
{
|
||||
if (strcmp(ptr->name, name) == 0)
|
||||
{
|
||||
spinlock_release(&tasklock);
|
||||
free(task->name);
|
||||
free(task);
|
||||
return 0;
|
||||
}
|
||||
ptr = ptr->next;
|
||||
}
|
||||
if (ptr)
|
||||
ptr->next = task;
|
||||
else
|
||||
tasks = task;
|
||||
spinlock_release(&tasklock);
|
||||
|
||||
return task->nextdue;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Remove a named task from the housekeepers task list
|
||||
*
|
||||
@ -171,12 +238,17 @@ HKTASK *ptr;
|
||||
time_t now;
|
||||
void (*taskfn)(void *);
|
||||
void *taskdata;
|
||||
int i;
|
||||
|
||||
for (;;)
|
||||
{
|
||||
if (do_shutdown)
|
||||
return;
|
||||
thread_millisleep(1000);
|
||||
for (i = 0; i < 10; i++)
|
||||
{
|
||||
if (do_shutdown)
|
||||
return;
|
||||
thread_millisleep(100);
|
||||
hkheartbeat++;
|
||||
}
|
||||
now = time(0);
|
||||
spinlock_acquire(&tasklock);
|
||||
ptr = tasks;
|
||||
@ -189,6 +261,8 @@ void *taskdata;
|
||||
taskdata = ptr->data;
|
||||
spinlock_release(&tasklock);
|
||||
(*taskfn)(taskdata);
|
||||
if (ptr->type == HK_ONESHOT)
|
||||
hktask_remove(ptr->name);
|
||||
spinlock_acquire(&tasklock);
|
||||
ptr = tasks;
|
||||
}
|
||||
@ -208,3 +282,33 @@ hkshutdown()
|
||||
{
|
||||
do_shutdown = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Show the tasks that are scheduled for the house keeper
|
||||
*
|
||||
* @param pdcb The DCB to send to output
|
||||
*/
|
||||
void
|
||||
hkshow_tasks(DCB *pdcb)
|
||||
{
|
||||
HKTASK *ptr;
|
||||
struct tm tm;
|
||||
char buf[40];
|
||||
|
||||
dcb_printf(pdcb, "%-25s | Type | Frequency | Next Due\n", "Name");
|
||||
dcb_printf(pdcb, "--------------------------+----------+-----------+-------------------------\n");
|
||||
spinlock_acquire(&tasklock);
|
||||
ptr = tasks;
|
||||
while (ptr)
|
||||
{
|
||||
localtime_r(&ptr->nextdue, &tm);
|
||||
asctime_r(&tm, buf);
|
||||
dcb_printf(pdcb, "%-25s | %-8s | %-9d | %s",
|
||||
ptr->name,
|
||||
ptr->type == HK_REPEATED ? "Repeated" : "One-Shot",
|
||||
ptr->frequency,
|
||||
buf);
|
||||
ptr = ptr->next;
|
||||
}
|
||||
spinlock_release(&tasklock);
|
||||
}
|
||||
|
@ -330,12 +330,28 @@ MODULES *ptr;
|
||||
* The module is now not in the linked list and all
|
||||
* memory related to it can be freed
|
||||
*/
|
||||
dlclose(mod->handle);
|
||||
free(mod->module);
|
||||
free(mod->type);
|
||||
free(mod->version);
|
||||
free(mod);
|
||||
}
|
||||
|
||||
/**
|
||||
* Unload all modules
|
||||
*
|
||||
* Remove all the modules from the system, called during shutdown
|
||||
* to allow termination hooks to be called.
|
||||
*/
|
||||
void
|
||||
unload_all_modules()
|
||||
{
|
||||
while (registered)
|
||||
{
|
||||
unregister_module(registered->module);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Print Modules
|
||||
*
|
||||
|
254
server/core/memlog.c
Normal file
254
server/core/memlog.c
Normal file
@ -0,0 +1,254 @@
|
||||
/*
|
||||
* This file is distributed as part of the MariaDB MaxScale. It is free
|
||||
* software: you can redistribute it and/or modify it under the terms of the
|
||||
* GNU General Public License as published by the Free Software Foundation,
|
||||
* version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
|
||||
* FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
|
||||
* details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program; if not, write to the Free Software Foundation, Inc., 51
|
||||
* Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Copyright MariaDB Ab 2014
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file memlog.c - Implementation of memory logging mechanism for debug purposes
|
||||
*
|
||||
* @verbatim
|
||||
* Revision History
|
||||
*
|
||||
* Date Who Description
|
||||
* 26/09/14 Mark Riddoch Initial implementation
|
||||
*
|
||||
* @endverbatim
|
||||
*/
|
||||
#include <memlog.h>
|
||||
#include <stdio.h>
|
||||
|
||||
static MEMLOG *memlogs = NULL;
|
||||
static SPINLOCK *memlock = SPINLOCK_INIT;
|
||||
|
||||
/**
|
||||
* Create a new instance of a memory logger.
|
||||
*
|
||||
* @param name The name of the memory log
|
||||
* @param type The type of item being logged
|
||||
* @param size The number of items to store in memory before flushign to disk
|
||||
*
|
||||
* @return MEMLOG* A memory log handle
|
||||
*/
|
||||
MEMLOG *
|
||||
memlog_create(char *name, MEMLOGTYPE type, int size)
|
||||
{
|
||||
MEMLOG *log;
|
||||
|
||||
if ((log = (MEMLOG *)malloc(sizeof(MEMLOG))) == NULL)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
log->name = strdup(name);
|
||||
spinlock_init(&log->lock);
|
||||
log->type = type;
|
||||
log->offset = 0;
|
||||
log->size = size;
|
||||
log->flags = 0;
|
||||
switch (type)
|
||||
{
|
||||
case ML_INT:
|
||||
log->values = malloc(sizeof(int) * size);
|
||||
break;
|
||||
case ML_LONG:
|
||||
log->values = malloc(sizeof(long) * size);
|
||||
break;
|
||||
case ML_LONGLONG:
|
||||
log->values = malloc(sizeof(long long) * size);
|
||||
break;
|
||||
case ML_STRING:
|
||||
log->values = malloc(sizeof(char *) * size);
|
||||
break;
|
||||
}
|
||||
if (log->values == NULL)
|
||||
{
|
||||
free(log);
|
||||
return NULL;
|
||||
}
|
||||
spinlock_acquire(&memlock);
|
||||
log->next = memlogs;
|
||||
memlogs = log;
|
||||
spinlock_release(&memlock);
|
||||
|
||||
return log;
|
||||
}
|
||||
|
||||
/**
|
||||
* Destroy a memory logger any unwritten data will be flushed to disk
|
||||
*
|
||||
* @param log The memory log to destroy
|
||||
*/
|
||||
void
|
||||
memlog_destroy(MEMLOG *log)
|
||||
{
|
||||
MEMLOG *ptr;
|
||||
|
||||
if ((log->flags & MLNOAUTOFLUSH) == 0)
|
||||
memlog_flush(log);
|
||||
free(log->values);
|
||||
|
||||
spinlock_acquire(&memlock);
|
||||
if (memlogs == log)
|
||||
memlogs = log->next;
|
||||
else
|
||||
{
|
||||
ptr = memlogs;
|
||||
while (ptr && ptr->next != log)
|
||||
ptr = ptr->next;
|
||||
if (ptr)
|
||||
ptr->next = log->next;
|
||||
}
|
||||
spinlock_release(&memlock);
|
||||
free(log->name);
|
||||
free(log);
|
||||
}
|
||||
|
||||
/**
|
||||
* Log a data item to the memory logger
|
||||
*
|
||||
* @param log The memory logger
|
||||
* @param value The value to log
|
||||
*/
|
||||
void
|
||||
memlog_log(MEMLOG *log, void *value)
|
||||
{
|
||||
if (!log)
|
||||
return;
|
||||
spinlock_acquire(&log->lock);
|
||||
switch (log->type)
|
||||
{
|
||||
case ML_INT:
|
||||
((int *)(log->values))[log->offset] = (int)value;
|
||||
break;
|
||||
case ML_LONG:
|
||||
((long *)(log->values))[log->offset] = (long)value;
|
||||
break;
|
||||
case ML_LONGLONG:
|
||||
((long long *)(log->values))[log->offset] = (long long)value;
|
||||
break;
|
||||
case ML_STRING:
|
||||
((char **)(log->values))[log->offset] = (char *)value;
|
||||
break;
|
||||
}
|
||||
log->offset++;
|
||||
if (log->offset == log->size)
|
||||
{
|
||||
if ((log->flags & MLNOAUTOFLUSH) == 0)
|
||||
memlog_flush(log);
|
||||
log->offset = 0;
|
||||
log->iflags = MLWRAPPED;
|
||||
}
|
||||
spinlock_release(&log->lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* Flush all memlogs to disk, called during shutdown
|
||||
*
|
||||
*/
|
||||
void
|
||||
memlog_flush_all()
|
||||
{
|
||||
MEMLOG *log;
|
||||
|
||||
spinlock_acquire(&memlock);
|
||||
log = memlogs;
|
||||
while (log)
|
||||
{
|
||||
spinlock_acquire(&log->lock);
|
||||
memlog_flush(log);
|
||||
spinlock_release(&log->lock);
|
||||
log = log->next;
|
||||
}
|
||||
spinlock_release(&memlock);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the flags for a memlog
|
||||
*
|
||||
* @param log The memlog to set the flags for
|
||||
* @param flags The new flags values
|
||||
*/
|
||||
void
|
||||
memlog_set(MEMLOG *log, unsigned int flags)
|
||||
{
|
||||
log->flags = flags;
|
||||
}
|
||||
|
||||
/**
|
||||
* Flush a memory log to disk
|
||||
*
|
||||
* Assumes the the log->lock has been acquired by the caller
|
||||
*
|
||||
* @param log The memory log to flush
|
||||
*/
|
||||
void
|
||||
memlog_flush(MEMLOG *log)
|
||||
{
|
||||
FILE *fp;
|
||||
int i;
|
||||
|
||||
if ((fp = fopen(log->name, "a")) == NULL)
|
||||
return;
|
||||
if ((log->flags & MLNOAUTOFLUSH) && (log->iflags & MLWRAPPED))
|
||||
{
|
||||
for (i = 0; i < log->size; i++)
|
||||
{
|
||||
int ind = (i + log->offset) % log->size;
|
||||
switch (log->type)
|
||||
{
|
||||
case ML_INT:
|
||||
fprintf(fp, "%d\n",
|
||||
((int *)(log->values))[ind]);
|
||||
break;
|
||||
case ML_LONG:
|
||||
fprintf(fp, "%ld\n",
|
||||
((long *)(log->values))[ind]);
|
||||
break;
|
||||
case ML_LONGLONG:
|
||||
fprintf(fp, "%lld\n",
|
||||
((long long *)(log->values))[ind]);
|
||||
break;
|
||||
case ML_STRING:
|
||||
fprintf(fp, "%s\n",
|
||||
((char **)(log->values))[ind]);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
for (i = 0; i < log->offset; i++)
|
||||
{
|
||||
switch (log->type)
|
||||
{
|
||||
case ML_INT:
|
||||
fprintf(fp, "%d\n", ((int *)(log->values))[i]);
|
||||
break;
|
||||
case ML_LONG:
|
||||
fprintf(fp, "%ld\n", ((long *)(log->values))[i]);
|
||||
break;
|
||||
case ML_LONGLONG:
|
||||
fprintf(fp, "%lld\n", ((long long *)(log->values))[i]);
|
||||
break;
|
||||
case ML_STRING:
|
||||
fprintf(fp, "%s\n", ((char **)(log->values))[i]);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
log->offset = 0;
|
||||
fclose(fp);
|
||||
}
|
@ -33,10 +33,14 @@
|
||||
#include <housekeeper.h>
|
||||
#include <mysql.h>
|
||||
|
||||
#define PROFILE_POLL 1
|
||||
#define PROFILE_POLL 0
|
||||
|
||||
#if PROFILE_POLL
|
||||
#include <rdtsc.h>
|
||||
#include <memlog.h>
|
||||
|
||||
extern unsigned long hkheartbeat;
|
||||
MEMLOG *plog;
|
||||
#endif
|
||||
|
||||
/** Defined in log_manager.cc */
|
||||
@ -44,6 +48,9 @@ extern int lm_enabled_logfiles_bitmask;
|
||||
extern size_t log_ses_count[];
|
||||
extern __thread log_info_t tls_log_info;
|
||||
|
||||
int number_poll_spins;
|
||||
int max_poll_sleep;
|
||||
|
||||
/**
|
||||
* @file poll.c - Abstraction of the epoll functionality
|
||||
*
|
||||
@ -70,7 +77,7 @@ extern __thread log_info_t tls_log_info;
|
||||
/**
|
||||
* Control the use of mutexes for the epoll_wait call. Setting to 1 will
|
||||
* cause the epoll_wait calls to be moved under a mutex. This may be useful
|
||||
* for debuggign purposes but should be avoided in general use.
|
||||
* for debugging purposes but should be avoided in general use.
|
||||
*/
|
||||
#define MUTEX_EPOLL 0
|
||||
|
||||
@ -97,6 +104,7 @@ static int load_samples = 0;
|
||||
static int load_nfds = 0;
|
||||
static double current_avg = 0.0;
|
||||
static double *avg_samples = NULL;
|
||||
static int *evqp_samples = NULL;
|
||||
static int next_sample = 0;
|
||||
static int n_avg_samples;
|
||||
|
||||
@ -144,13 +152,29 @@ static struct {
|
||||
int n_hup; /*< Number of hangup events */
|
||||
int n_accept; /*< Number of accept events */
|
||||
int n_polls; /*< Number of poll cycles */
|
||||
int n_pollev; /*< Number of polls returnign events */
|
||||
int n_nbpollev; /*< Number of polls returnign events */
|
||||
int n_nothreads; /*< Number of times no threads are polling */
|
||||
int n_fds[MAXNFDS]; /*< Number of wakeups with particular
|
||||
n_fds value */
|
||||
int evq_length; /*< Event queue length */
|
||||
int evq_pending; /*< Number of pending descriptors in event queue */
|
||||
int evq_max; /*< Maximum event queue length */
|
||||
int wake_evqpending;/*< Woken from epoll_wait with pending events in queue */
|
||||
int blockingpolls; /*< Number of epoll_waits with a timeout specified */
|
||||
} pollStats;
|
||||
|
||||
#define N_QUEUE_TIMES 30
|
||||
/**
|
||||
* The event queue statistics
|
||||
*/
|
||||
static struct {
|
||||
unsigned int qtimes[N_QUEUE_TIMES+1];
|
||||
unsigned int exectimes[N_QUEUE_TIMES+1];
|
||||
unsigned long maxqtime;
|
||||
unsigned long maxexectime;
|
||||
} queueStats;
|
||||
|
||||
/**
|
||||
* How frequently to call the poll_loadav function used to monitor the load
|
||||
* average of the poll subsystem.
|
||||
@ -179,6 +203,7 @@ int i;
|
||||
exit(-1);
|
||||
}
|
||||
memset(&pollStats, 0, sizeof(pollStats));
|
||||
memset(&queueStats, 0, sizeof(queueStats));
|
||||
bitmask_init(&poll_mask);
|
||||
n_threads = config_threadcount();
|
||||
if ((thread_data =
|
||||
@ -195,10 +220,19 @@ int i;
|
||||
|
||||
hktask_add("Load Average", poll_loadav, NULL, POLL_LOAD_FREQ);
|
||||
n_avg_samples = 15 * 60 / POLL_LOAD_FREQ;
|
||||
avg_samples = (double *)malloc(sizeof(double *) * n_avg_samples);
|
||||
avg_samples = (double *)malloc(sizeof(double) * n_avg_samples);
|
||||
for (i = 0; i < n_avg_samples; i++)
|
||||
avg_samples[i] = 0.0;
|
||||
evqp_samples = (int *)malloc(sizeof(int) * n_avg_samples);
|
||||
for (i = 0; i < n_avg_samples; i++)
|
||||
evqp_samples[i] = 0.0;
|
||||
|
||||
number_poll_spins = config_nbpolls();
|
||||
max_poll_sleep = config_pollsleep();
|
||||
|
||||
#if PROFILE_POLL
|
||||
plog = memlog_create("EventQueueWaitTime", ML_LONG, 10000);
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
@ -362,7 +396,7 @@ return_rc:
|
||||
* deschedule a process if a timeout is included, but will not do this if a 0 timeout
|
||||
* value is given. this improves performance when the gateway is under heavy load.
|
||||
*
|
||||
* In order to provide a fairer means of sharign the threads between the different
|
||||
* In order to provide a fairer means of sharing the threads between the different
|
||||
* DCB's the poll mechanism has been decoupled from the processing of the events.
|
||||
* The events are now recieved via the epoll_wait call, a queue of DCB's that have
|
||||
* events pending is maintained and as new events arrive the DCB is added to the end
|
||||
@ -373,15 +407,33 @@ return_rc:
|
||||
* events at a high rate will not block the execution of events for other DCB's and
|
||||
* should result in a fairer polling strategy.
|
||||
*
|
||||
* The introduction of the ability to inject "fake" write events into the event queue meant
|
||||
* that there was a possibility to "starve" new events sicne the polling loop would
|
||||
* consume the event queue before looking for new events. If the DCB that inject
|
||||
* the fake event then injected another fake event as a result of the first it meant
|
||||
* that new events did not get added to the queue. The strategy has been updated to
|
||||
* not consume the entire event queue, but process one event before doing a non-blocking
|
||||
* call to add any new events before processing any more events. A blocking call to
|
||||
* collect events is only made if there are no pending events to be processed on the
|
||||
* event queue.
|
||||
*
|
||||
* Also introduced a "timeout bias" mechanism. This mechansim control the length of
|
||||
* of timeout passed to epoll_wait in blocking calls based on previous behaviour.
|
||||
* The initial call will block for 10% of the define timeout peroid, this will be
|
||||
* increased in increments of 10% until the full timeout value is used. If at any
|
||||
* point there is an event to be processed then the value will be reduced to 10% again
|
||||
* for the next blocking call.
|
||||
*
|
||||
* @param arg The thread ID passed as a void * to satisfy the threading package
|
||||
*/
|
||||
void
|
||||
poll_waitevents(void *arg)
|
||||
{
|
||||
struct epoll_event events[MAX_EVENTS];
|
||||
int i, nfds;
|
||||
int i, nfds, timeout_bias = 1;
|
||||
int thread_id = (int)arg;
|
||||
DCB *zombies = NULL;
|
||||
int poll_spins = 0;
|
||||
|
||||
/** Add this thread to the bitmask of running polling threads */
|
||||
bitmask_set(&poll_mask, thread_id);
|
||||
@ -395,12 +447,9 @@ DCB *zombies = NULL;
|
||||
|
||||
while (1)
|
||||
{
|
||||
/* Process of the queue of waiting requests */
|
||||
while (do_shutdown == 0 && process_pollq(thread_id))
|
||||
if (pollStats.evq_pending == 0 && timeout_bias < 10)
|
||||
{
|
||||
if (thread_data)
|
||||
thread_data[thread_id].state = THREAD_ZPROCESSING;
|
||||
zombies = dcb_process_zombies(thread_id);
|
||||
timeout_bias++;
|
||||
}
|
||||
|
||||
atomic_add(&n_waiting, 1);
|
||||
@ -416,6 +465,7 @@ DCB *zombies = NULL;
|
||||
thread_data[thread_id].state = THREAD_POLLING;
|
||||
}
|
||||
|
||||
atomic_add(&pollStats.n_polls, 1);
|
||||
if ((nfds = epoll_wait(epoll_fd, events, MAX_EVENTS, 0)) == -1)
|
||||
{
|
||||
atomic_add(&n_waiting, -1);
|
||||
@ -428,19 +478,28 @@ DCB *zombies = NULL;
|
||||
pthread_self(),
|
||||
nfds,
|
||||
eno)));
|
||||
atomic_add(&n_waiting, -1);
|
||||
}
|
||||
/*
|
||||
* If there are no new descriptors from the non-blocking call
|
||||
* and nothing to proces on the event queue then for do a
|
||||
* and nothing to process on the event queue then for do a
|
||||
* blocking call to epoll_wait.
|
||||
*
|
||||
* We calculate a timeout bias to alter the length of the blocking
|
||||
* call based on the time since we last received an event to process
|
||||
*/
|
||||
else if (nfds == 0 && process_pollq(thread_id) == 0)
|
||||
else if (nfds == 0 && pollStats.evq_pending == 0 && poll_spins++ > number_poll_spins)
|
||||
{
|
||||
atomic_add(&n_waiting, 1);
|
||||
atomic_add(&pollStats.blockingpolls, 1);
|
||||
nfds = epoll_wait(epoll_fd,
|
||||
events,
|
||||
MAX_EVENTS,
|
||||
EPOLL_TIMEOUT);
|
||||
(max_poll_sleep * timeout_bias) / 10);
|
||||
if (nfds == 0 && pollStats.evq_pending)
|
||||
{
|
||||
atomic_add(&pollStats.wake_evqpending, 1);
|
||||
poll_spins = 0;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -455,12 +514,16 @@ DCB *zombies = NULL;
|
||||
#endif /* BLOCKINGPOLL */
|
||||
if (nfds > 0)
|
||||
{
|
||||
timeout_bias = 1;
|
||||
if (poll_spins <= number_poll_spins + 1)
|
||||
atomic_add(&pollStats.n_nbpollev, 1);
|
||||
poll_spins = 0;
|
||||
LOGIF(LD, (skygw_log_write(
|
||||
LOGFILE_DEBUG,
|
||||
"%lu [poll_waitevents] epoll_wait found %d fds",
|
||||
pthread_self(),
|
||||
nfds)));
|
||||
atomic_add(&pollStats.n_polls, 1);
|
||||
atomic_add(&pollStats.n_pollev, 1);
|
||||
if (thread_data)
|
||||
{
|
||||
thread_data[thread_id].n_fds = nfds;
|
||||
@ -479,7 +542,7 @@ DCB *zombies = NULL;
|
||||
/*
|
||||
* Process every DCB that has a new event and add
|
||||
* it to the poll queue.
|
||||
* If the DCB is currently beign processed then we
|
||||
* If the DCB is currently being processed then we
|
||||
* or in the new eent bits to the pending event bits
|
||||
* and leave it in the queue.
|
||||
* If the DCB was not already in the queue then it was
|
||||
@ -494,6 +557,11 @@ DCB *zombies = NULL;
|
||||
spinlock_acquire(&pollqlock);
|
||||
if (DCB_POLL_BUSY(dcb))
|
||||
{
|
||||
if (dcb->evq.pending_events == 0)
|
||||
{
|
||||
pollStats.evq_pending++;
|
||||
dcb->evq.inserted = hkheartbeat;
|
||||
}
|
||||
dcb->evq.pending_events |= ev;
|
||||
}
|
||||
else
|
||||
@ -513,6 +581,8 @@ DCB *zombies = NULL;
|
||||
dcb->evq.next = dcb;
|
||||
}
|
||||
pollStats.evq_length++;
|
||||
pollStats.evq_pending++;
|
||||
dcb->evq.inserted = hkheartbeat;
|
||||
if (pollStats.evq_length > pollStats.evq_max)
|
||||
{
|
||||
pollStats.evq_max = pollStats.evq_length;
|
||||
@ -523,17 +593,20 @@ DCB *zombies = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* If there was nothing to process then process the zombie queue
|
||||
* Process of the queue of waiting requests
|
||||
* This is done without checking the evq_pending count as a
|
||||
* precautionary measure to avoid issues if the house keeping
|
||||
* of the count goes wrong.
|
||||
*/
|
||||
if (process_pollq(thread_id) == 0)
|
||||
{
|
||||
if (thread_data)
|
||||
{
|
||||
thread_data[thread_id].state = THREAD_ZPROCESSING;
|
||||
}
|
||||
zombies = dcb_process_zombies(thread_id);
|
||||
}
|
||||
|
||||
if (process_pollq(thread_id))
|
||||
timeout_bias = 1;
|
||||
|
||||
if (thread_data)
|
||||
thread_data[thread_id].state = THREAD_ZPROCESSING;
|
||||
zombies = dcb_process_zombies(thread_id);
|
||||
if (thread_data)
|
||||
thread_data[thread_id].state = THREAD_IDLE;
|
||||
|
||||
if (do_shutdown)
|
||||
{
|
||||
/*<
|
||||
@ -556,6 +629,34 @@ DCB *zombies = NULL;
|
||||
} /*< while(1) */
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the number of non-blocking poll cycles that will be done before
|
||||
* a blocking poll will take place. Whenever an event arrives on a thread
|
||||
* or the thread sees a pending event to execute it will reset it's
|
||||
* poll_spin coutn to zero and will then poll with a 0 timeout until the
|
||||
* poll_spin value is greater than the value set here.
|
||||
*
|
||||
* @param nbpolls Number of non-block polls to perform before blocking
|
||||
*/
|
||||
void
|
||||
poll_set_nonblocking_polls(unsigned int nbpolls)
|
||||
{
|
||||
number_poll_spins = nbpolls;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the maximum amount of time, in milliseconds, the polling thread
|
||||
* will block before it will wake and check the event queue for work
|
||||
* that may have been added by another thread.
|
||||
*
|
||||
* @param maxwait Maximum wait time in milliseconds
|
||||
*/
|
||||
void
|
||||
poll_set_maxwait(unsigned int maxwait)
|
||||
{
|
||||
max_poll_sleep = maxwait;
|
||||
}
|
||||
|
||||
/**
|
||||
* Process of the queue of DCB's that have outstanding events
|
||||
*
|
||||
@ -576,6 +677,7 @@ process_pollq(int thread_id)
|
||||
DCB *dcb;
|
||||
int found = 0;
|
||||
uint32_t ev;
|
||||
unsigned long qtime;
|
||||
|
||||
spinlock_acquire(&pollqlock);
|
||||
if (eventq == NULL)
|
||||
@ -612,13 +714,28 @@ uint32_t ev;
|
||||
if (found)
|
||||
{
|
||||
ev = dcb->evq.pending_events;
|
||||
dcb->evq.processing_events = ev;
|
||||
dcb->evq.pending_events = 0;
|
||||
pollStats.evq_pending--;
|
||||
}
|
||||
spinlock_release(&pollqlock);
|
||||
|
||||
if (found == 0)
|
||||
return 0;
|
||||
|
||||
#if PROFILE_POLL
|
||||
memlog_log(plog, hkheartbeat - dcb->evq.inserted);
|
||||
#endif
|
||||
qtime = hkheartbeat - dcb->evq.inserted;
|
||||
dcb->evq.started = hkheartbeat;
|
||||
|
||||
if (qtime > N_QUEUE_TIMES)
|
||||
queueStats.qtimes[N_QUEUE_TIMES]++;
|
||||
else
|
||||
queueStats.qtimes[qtime]++;
|
||||
if (qtime > queueStats.maxqtime)
|
||||
queueStats.maxqtime = qtime;
|
||||
|
||||
|
||||
CHK_DCB(dcb);
|
||||
if (thread_data)
|
||||
@ -836,7 +953,18 @@ uint32_t ev;
|
||||
spinlock_release(&dcb->dcb_initlock);
|
||||
}
|
||||
#endif
|
||||
qtime = hkheartbeat - dcb->evq.started;
|
||||
|
||||
if (qtime > N_QUEUE_TIMES)
|
||||
queueStats.exectimes[N_QUEUE_TIMES]++;
|
||||
else
|
||||
queueStats.exectimes[qtime % N_QUEUE_TIMES]++;
|
||||
if (qtime > queueStats.maxexectime)
|
||||
queueStats.maxexectime = qtime;
|
||||
|
||||
spinlock_acquire(&pollqlock);
|
||||
dcb->evq.processing_events = 0;
|
||||
|
||||
if (dcb->evq.pending_events == 0)
|
||||
{
|
||||
/* No pending events so remove from the queue */
|
||||
@ -930,24 +1058,35 @@ dprintPollStats(DCB *dcb)
|
||||
{
|
||||
int i;
|
||||
|
||||
dcb_printf(dcb, "Number of epoll cycles: %d\n",
|
||||
dcb_printf(dcb, "\nPoll Statistics.\n\n");
|
||||
dcb_printf(dcb, "No. of epoll cycles: %d\n",
|
||||
pollStats.n_polls);
|
||||
dcb_printf(dcb, "Number of read events: %d\n",
|
||||
dcb_printf(dcb, "No. of epoll cycles with wait: %d\n",
|
||||
pollStats.blockingpolls);
|
||||
dcb_printf(dcb, "No. of epoll calls returning events: %d\n",
|
||||
pollStats.n_pollev);
|
||||
dcb_printf(dcb, "No. of non-blocking calls returning events: %d\n",
|
||||
pollStats.n_nbpollev);
|
||||
dcb_printf(dcb, "No. of read events: %d\n",
|
||||
pollStats.n_read);
|
||||
dcb_printf(dcb, "Number of write events: %d\n",
|
||||
dcb_printf(dcb, "No. of write events: %d\n",
|
||||
pollStats.n_write);
|
||||
dcb_printf(dcb, "Number of error events: %d\n",
|
||||
dcb_printf(dcb, "No. of error events: %d\n",
|
||||
pollStats.n_error);
|
||||
dcb_printf(dcb, "Number of hangup events: %d\n",
|
||||
dcb_printf(dcb, "No. of hangup events: %d\n",
|
||||
pollStats.n_hup);
|
||||
dcb_printf(dcb, "Number of accept events: %d\n",
|
||||
dcb_printf(dcb, "No. of accept events: %d\n",
|
||||
pollStats.n_accept);
|
||||
dcb_printf(dcb, "Number of times no threads polling: %d\n",
|
||||
dcb_printf(dcb, "No. of times no threads polling: %d\n",
|
||||
pollStats.n_nothreads);
|
||||
dcb_printf(dcb, "Current event queue length: %d\n",
|
||||
dcb_printf(dcb, "Current event queue length: %d\n",
|
||||
pollStats.evq_length);
|
||||
dcb_printf(dcb, "Maximum event queue length: %d\n",
|
||||
dcb_printf(dcb, "Maximum event queue length: %d\n",
|
||||
pollStats.evq_max);
|
||||
dcb_printf(dcb, "No. of DCBs with pending events: %d\n",
|
||||
pollStats.evq_pending);
|
||||
dcb_printf(dcb, "No. of wakeups with pending queue: %d\n",
|
||||
pollStats.wake_evqpending);
|
||||
|
||||
dcb_printf(dcb, "No of poll completions with descriptors\n");
|
||||
dcb_printf(dcb, "\tNo. of descriptors\tNo. of poll completions.\n");
|
||||
@ -1024,6 +1163,7 @@ dShowThreads(DCB *dcb)
|
||||
int i, j, n;
|
||||
char *state;
|
||||
double avg1 = 0.0, avg5 = 0.0, avg15 = 0.0;
|
||||
double qavg1 = 0.0, qavg5 = 0.0, qavg15 = 0.0;
|
||||
|
||||
|
||||
dcb_printf(dcb, "Polling Threads.\n\n");
|
||||
@ -1032,8 +1172,12 @@ double avg1 = 0.0, avg5 = 0.0, avg15 = 0.0;
|
||||
|
||||
/* Average all the samples to get the 15 minute average */
|
||||
for (i = 0; i < n_avg_samples; i++)
|
||||
{
|
||||
avg15 += avg_samples[i];
|
||||
qavg15 += evqp_samples[i];
|
||||
}
|
||||
avg15 = avg15 / n_avg_samples;
|
||||
qavg15 = qavg15 / n_avg_samples;
|
||||
|
||||
/* Average the last third of the samples to get the 5 minute average */
|
||||
n = 5 * 60 / POLL_LOAD_FREQ;
|
||||
@ -1041,8 +1185,12 @@ double avg1 = 0.0, avg5 = 0.0, avg15 = 0.0;
|
||||
if (i < 0)
|
||||
i += n_avg_samples;
|
||||
for (j = i; j < i + n; j++)
|
||||
{
|
||||
avg5 += avg_samples[j % n_avg_samples];
|
||||
qavg5 += evqp_samples[j % n_avg_samples];
|
||||
}
|
||||
avg5 = (3 * avg5) / (n_avg_samples);
|
||||
qavg5 = (3 * qavg5) / (n_avg_samples);
|
||||
|
||||
/* Average the last 15th of the samples to get the 1 minute average */
|
||||
n = 60 / POLL_LOAD_FREQ;
|
||||
@ -1050,16 +1198,23 @@ double avg1 = 0.0, avg5 = 0.0, avg15 = 0.0;
|
||||
if (i < 0)
|
||||
i += n_avg_samples;
|
||||
for (j = i; j < i + n; j++)
|
||||
{
|
||||
avg1 += avg_samples[j % n_avg_samples];
|
||||
qavg1 += evqp_samples[j % n_avg_samples];
|
||||
}
|
||||
avg1 = (15 * avg1) / (n_avg_samples);
|
||||
qavg1 = (15 * qavg1) / (n_avg_samples);
|
||||
|
||||
dcb_printf(dcb, "15 Minute Average: %.2f, 5 Minute Average: %.2f, "
|
||||
"1 Minute Average: %.2f\n\n", avg15, avg5, avg1);
|
||||
dcb_printf(dcb, "Pending event queue length averages:\n");
|
||||
dcb_printf(dcb, "15 Minute Average: %.2f, 5 Minute Average: %.2f, "
|
||||
"1 Minute Average: %.2f\n\n", qavg15, qavg5, qavg1);
|
||||
|
||||
if (thread_data == NULL)
|
||||
return;
|
||||
dcb_printf(dcb, " ID | State | # fds | Descriptor | Event\n");
|
||||
dcb_printf(dcb, "----+------------+--------+------------------+---------------\n");
|
||||
dcb_printf(dcb, " ID | State | # fds | Descriptor | Running | Event\n");
|
||||
dcb_printf(dcb, "----+------------+--------+------------------+----------+---------------\n");
|
||||
for (i = 0; i < n_threads; i++)
|
||||
{
|
||||
switch (thread_data[i].state)
|
||||
@ -1082,11 +1237,11 @@ double avg1 = 0.0, avg5 = 0.0, avg15 = 0.0;
|
||||
}
|
||||
if (thread_data[i].state != THREAD_PROCESSING)
|
||||
dcb_printf(dcb,
|
||||
" %2d | %-10s | | |\n",
|
||||
" %2d | %-10s | | | |\n",
|
||||
i, state);
|
||||
else if (thread_data[i].cur_dcb == NULL)
|
||||
dcb_printf(dcb,
|
||||
" %2d | %-10s | %6d | |\n",
|
||||
" %2d | %-10s | %6d | | |\n",
|
||||
i, state, thread_data[i].n_fds);
|
||||
else
|
||||
{
|
||||
@ -1104,9 +1259,10 @@ double avg1 = 0.0, avg5 = 0.0, avg15 = 0.0;
|
||||
from_heap = true;
|
||||
}
|
||||
dcb_printf(dcb,
|
||||
" %2d | %-10s | %6d | %-16p | %s\n",
|
||||
" %2d | %-10s | %6d | %-16p | <%3d00ms | %s\n",
|
||||
i, state, thread_data[i].n_fds,
|
||||
thread_data[i].cur_dcb, event_string);
|
||||
thread_data[i].cur_dcb, 1 + hkheartbeat - dcb->evq.started,
|
||||
event_string);
|
||||
|
||||
if (from_heap)
|
||||
{
|
||||
@ -1139,6 +1295,7 @@ int new_samples, new_nfds;
|
||||
else
|
||||
current_avg = 0.0;
|
||||
avg_samples[next_sample] = current_avg;
|
||||
evqp_samples[next_sample] = pollStats.evq_pending;
|
||||
next_sample++;
|
||||
if (next_sample >= n_avg_samples)
|
||||
next_sample = 0;
|
||||
@ -1207,4 +1364,136 @@ static void poll_add_event_to_dcb(
|
||||
}
|
||||
}
|
||||
spinlock_release(&pollqlock);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Insert a fake write completion event for a DCB into the polling
|
||||
* queue.
|
||||
*
|
||||
* This is used to trigger transmission activity on another DCB from
|
||||
* within the event processing routine of a DCB. or to allow a DCB
|
||||
* to defer some further output processing, to allow for other DCBs
|
||||
* to receive a slice of the processing time. Fake events are added
|
||||
* to the tail of the event queue, in the same way that real events
|
||||
* are, so maintain the "fairness" of processing.
|
||||
*
|
||||
* @param dcb DCB to emulate an EPOLLOUT event for
|
||||
*/
|
||||
void
|
||||
poll_fake_write_event(DCB *dcb)
|
||||
{
|
||||
uint32_t ev = EPOLLOUT;
|
||||
|
||||
spinlock_acquire(&pollqlock);
|
||||
/*
|
||||
* If the DCB is already on the queue, there are no pending events and
|
||||
* there are other events on the queue, then
|
||||
* take it off the queue. This stops the DCB hogging the threads.
|
||||
*/
|
||||
if (DCB_POLL_BUSY(dcb) && dcb->evq.pending_events == 0 && dcb->evq.prev != dcb)
|
||||
{
|
||||
dcb->evq.prev->evq.next = dcb->evq.next;
|
||||
dcb->evq.next->evq.prev = dcb->evq.prev;
|
||||
if (eventq == dcb)
|
||||
eventq = dcb->evq.next;
|
||||
dcb->evq.next = NULL;
|
||||
dcb->evq.prev = NULL;
|
||||
pollStats.evq_length--;
|
||||
}
|
||||
|
||||
if (DCB_POLL_BUSY(dcb))
|
||||
{
|
||||
if (dcb->evq.pending_events == 0)
|
||||
pollStats.evq_pending++;
|
||||
dcb->evq.pending_events |= ev;
|
||||
}
|
||||
else
|
||||
{
|
||||
dcb->evq.pending_events = ev;
|
||||
dcb->evq.inserted = hkheartbeat;
|
||||
if (eventq)
|
||||
{
|
||||
dcb->evq.prev = eventq->evq.prev;
|
||||
eventq->evq.prev->evq.next = dcb;
|
||||
eventq->evq.prev = dcb;
|
||||
dcb->evq.next = eventq;
|
||||
}
|
||||
else
|
||||
{
|
||||
eventq = dcb;
|
||||
dcb->evq.prev = dcb;
|
||||
dcb->evq.next = dcb;
|
||||
}
|
||||
pollStats.evq_length++;
|
||||
pollStats.evq_pending++;
|
||||
dcb->evq.inserted = hkheartbeat;
|
||||
if (pollStats.evq_length > pollStats.evq_max)
|
||||
{
|
||||
pollStats.evq_max = pollStats.evq_length;
|
||||
}
|
||||
}
|
||||
spinlock_release(&pollqlock);
|
||||
}
|
||||
|
||||
/**
|
||||
* Print the event queue contents
|
||||
*
|
||||
* @param pdcb The DCB to print the event queue to
|
||||
*/
|
||||
void
|
||||
dShowEventQ(DCB *pdcb)
|
||||
{
|
||||
DCB *dcb;
|
||||
|
||||
spinlock_acquire(&pollqlock);
|
||||
if (eventq == NULL)
|
||||
{
|
||||
/* Nothing to process */
|
||||
spinlock_release(&pollqlock);
|
||||
return;
|
||||
}
|
||||
dcb = eventq;
|
||||
dcb_printf(pdcb, "\nEvent Queue.\n");
|
||||
dcb_printf(pdcb, "%-16s | %-10s | %-18s | %s\n", "DCB", "Status", "Processing Events",
|
||||
"Pending Events");
|
||||
dcb_printf(pdcb, "-----------------+------------+--------------------+-------------------\n");
|
||||
do {
|
||||
dcb_printf(pdcb, "%-16p | %-10s | %-18s | %-18s\n", dcb,
|
||||
dcb->evq.processing ? "Processing" : "Pending",
|
||||
event_to_string(dcb->evq.processing_events),
|
||||
event_to_string(dcb->evq.pending_events));
|
||||
dcb = dcb->evq.next;
|
||||
} while (dcb != eventq);
|
||||
spinlock_release(&pollqlock);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Print the event queue statistics
|
||||
*
|
||||
* @param pdcb The DCB to print the event queue to
|
||||
*/
|
||||
void
|
||||
dShowEventStats(DCB *pdcb)
|
||||
{
|
||||
int i;
|
||||
|
||||
dcb_printf(pdcb, "\nEvent statistics.\n");
|
||||
dcb_printf(pdcb, "Maximum queue time: %3d00ms\n", queueStats.maxqtime);
|
||||
dcb_printf(pdcb, "Maximum execution time: %3d00ms\n", queueStats.maxexectime);
|
||||
dcb_printf(pdcb, "Maximum event queue length: %3d\n", pollStats.evq_max);
|
||||
dcb_printf(pdcb, "Current event queue length: %3d\n", pollStats.evq_length);
|
||||
dcb_printf(pdcb, "\n");
|
||||
dcb_printf(pdcb, " | Number of events\n");
|
||||
dcb_printf(pdcb, "Duration | Queued | Executed\n");
|
||||
dcb_printf(pdcb, "---------------+------------+-----------\n");
|
||||
dcb_printf(pdcb, " < 100ms | %-10d | %-10d\n",
|
||||
queueStats.qtimes[0], queueStats.exectimes[0]);
|
||||
for (i = 1; i < N_QUEUE_TIMES; i++)
|
||||
{
|
||||
dcb_printf(pdcb, " %2d00 - %2d00ms | %-10d | %-10d\n", i, i + 1,
|
||||
queueStats.qtimes[i], queueStats.exectimes[i]);
|
||||
}
|
||||
dcb_printf(pdcb, " > %2d00ms | %-10d | %-10d\n", N_QUEUE_TIMES,
|
||||
queueStats.qtimes[N_QUEUE_TIMES], queueStats.exectimes[N_QUEUE_TIMES]);
|
||||
}
|
||||
|
@ -11,6 +11,7 @@ add_executable(test_service testservice.c)
|
||||
add_executable(test_server testserver.c)
|
||||
add_executable(test_users testusers.c)
|
||||
add_executable(test_adminusers testadminusers.c)
|
||||
add_executable(testmemlog testmemlog.c)
|
||||
target_link_libraries(test_mysql_users MySQLClient fullcore)
|
||||
target_link_libraries(test_hash fullcore)
|
||||
target_link_libraries(test_hint fullcore)
|
||||
@ -25,6 +26,7 @@ target_link_libraries(test_server fullcore)
|
||||
target_link_libraries(test_users fullcore)
|
||||
target_link_libraries(test_adminusers fullcore)
|
||||
add_test(testMySQLUsers test_mysql_users)
|
||||
target_link_libraries(testmemlog fullcore)
|
||||
add_test(TestHash test_hash)
|
||||
add_test(TestHint test_hint)
|
||||
add_test(TestSpinlock test_spinlock)
|
||||
@ -37,3 +39,4 @@ add_test(TestService test_service)
|
||||
add_test(TestServer test_server)
|
||||
add_test(TestUsers test_users)
|
||||
add_test(TestAdminUsers test_adminusers)
|
||||
add_test(TestMemlog testmemlog)
|
||||
|
@ -22,7 +22,7 @@ LIBS= -L$(EMBEDDED_LIB) -lmysqld \
|
||||
-lz -lm -lcrypt -lcrypto -ldl -laio -lrt -pthread -llog_manager \
|
||||
-L../../inih/extra -linih -lssl -lstdc++
|
||||
|
||||
TESTS=testhash testspinlock testbuffer testmodutil testpoll testservice testdcb testfilter testadminusers
|
||||
TESTS=testhash testspinlock testbuffer testmodutil testpoll testservice testdcb testfilter testadminusers testmemlog
|
||||
|
||||
cleantests:
|
||||
- $(DEL) *.o
|
||||
@ -100,6 +100,13 @@ testadminusers: testadminusers.c libcore.a
|
||||
-I$(ROOT_PATH)/utils \
|
||||
testadminusers.c libcore.a $(UTILSPATH)/skygw_utils.o $(LIBS) -o testadminusers
|
||||
|
||||
testmemlog: testmemlog.c libcore.a
|
||||
$(CC) $(CFLAGS) $(LDFLAGS) \
|
||||
-I$(ROOT_PATH)/server/include \
|
||||
-I$(ROOT_PATH)/utils \
|
||||
testmemlog.c libcore.a $(UTILSPATH)/skygw_utils.o $(LIBS) -o testmemlog
|
||||
|
||||
|
||||
libcore.a: ../*.o
|
||||
ar rv libcore.a ../*.o
|
||||
|
||||
|
404
server/core/test/testmemlog.c
Normal file
404
server/core/test/testmemlog.c
Normal file
@ -0,0 +1,404 @@
|
||||
/*
|
||||
* This file is distributed as part of MaxScale from MariaDB. It is free
|
||||
* software: you can redistribute it and/or modify it under the terms of the
|
||||
* GNU General Public License as published by the Free Software Foundation,
|
||||
* version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
|
||||
* FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
|
||||
* details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program; if not, write to the Free Software Foundation, Inc., 51
|
||||
* Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Copyright MariaDB Corporation 2014
|
||||
*/
|
||||
|
||||
/**
|
||||
*
|
||||
* @verbatim
|
||||
* Revision History
|
||||
*
|
||||
* Date Who Description
|
||||
* 30/09/2014 Mark Riddoch Initial implementation
|
||||
*
|
||||
* @endverbatim
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <unistd.h>
|
||||
#include <string.h>
|
||||
#include <memlog.h>
|
||||
|
||||
/**
|
||||
* Count the number of lines in a file
|
||||
*
|
||||
* @param file The name of the file
|
||||
* @return -1 if the file could not be opened or the numebr of lines
|
||||
*/
|
||||
int
|
||||
linecount(char *file)
|
||||
{
|
||||
FILE *fp;
|
||||
int i = 0;
|
||||
char buffer[180];
|
||||
|
||||
if ((fp = fopen(file, "r")) == NULL)
|
||||
return -1;
|
||||
while (fgets(buffer, 180, fp) != NULL)
|
||||
i++;
|
||||
fclose(fp);
|
||||
return i;
|
||||
}
|
||||
|
||||
/* Some strings to log */
|
||||
char *strings[] = {
|
||||
"First log entry",
|
||||
"Second entry",
|
||||
"Third",
|
||||
"The fourth thing to log",
|
||||
"Add a final 5th item"
|
||||
};
|
||||
|
||||
int
|
||||
main()
|
||||
{
|
||||
MEMLOG *log, *log2;
|
||||
int i;
|
||||
long j;
|
||||
long long k;
|
||||
int failures = 0;
|
||||
|
||||
unlink("memlog1");
|
||||
if ((log = memlog_create("memlog1", ML_INT, 100)) == NULL)
|
||||
{
|
||||
printf("Memlog Creation: Failed\n");
|
||||
failures++;
|
||||
}
|
||||
else
|
||||
{
|
||||
printf("Memlog Creation: Passed\n");
|
||||
if (access("memlog1",R_OK) == 0)
|
||||
{
|
||||
printf("File existance 1: Failed\n");
|
||||
failures++;
|
||||
}
|
||||
else
|
||||
printf("File existance 1: Passed\n");
|
||||
for (i = 0; i < 50; i++)
|
||||
memlog_log(log, (void *)i);
|
||||
if (access("memlog1",R_OK) == 0)
|
||||
{
|
||||
printf("File existance 2: Failed\n");
|
||||
failures++;
|
||||
}
|
||||
else
|
||||
printf("File existance 2: Passed\n");
|
||||
for (i = 0; i < 50; i++)
|
||||
memlog_log(log, (void *)i);
|
||||
if (access("memlog1",R_OK) != 0)
|
||||
{
|
||||
printf("File existance 3: Failed\n");
|
||||
failures++;
|
||||
}
|
||||
else
|
||||
printf("File existance 3: Passed\n");
|
||||
if (linecount("memlog1") != 100)
|
||||
{
|
||||
printf("Incorrect entry count: Failed\n");
|
||||
failures++;
|
||||
}
|
||||
else
|
||||
printf("Incorrect entry count: Passed\n");
|
||||
for (i = 0; i < 50; i++)
|
||||
memlog_log(log, (void *)i);
|
||||
if (linecount("memlog1") != 100)
|
||||
{
|
||||
printf("Premature Flushing: Failed\n");
|
||||
failures++;
|
||||
}
|
||||
else
|
||||
printf("Premature Flushing: Passed\n");
|
||||
memlog_destroy(log);
|
||||
if (linecount("memlog1") != 150)
|
||||
{
|
||||
printf("Flush on destroy: Failed\n");
|
||||
failures++;
|
||||
}
|
||||
else
|
||||
printf("Flush on destroy: Passed\n");
|
||||
}
|
||||
|
||||
unlink("memlog2");
|
||||
if ((log = memlog_create("memlog2", ML_LONG, 100)) == NULL)
|
||||
{
|
||||
printf("Memlog Creation: Failed\n");
|
||||
failures++;
|
||||
}
|
||||
else
|
||||
{
|
||||
printf("Memlog Creation: Passed\n");
|
||||
if (access("memlog2",R_OK) == 0)
|
||||
{
|
||||
printf("File existance 1: Failed\n");
|
||||
failures++;
|
||||
}
|
||||
else
|
||||
printf("File existance 1: Passed\n");
|
||||
for (j = 0; j < 50; j++)
|
||||
memlog_log(log, (void *)j);
|
||||
if (access("memlog2",R_OK) == 0)
|
||||
{
|
||||
printf("File existance 2: Failed\n");
|
||||
failures++;
|
||||
}
|
||||
else
|
||||
printf("File existance 2: Passed\n");
|
||||
for (j = 0; j < 50; j++)
|
||||
memlog_log(log, (void *)j);
|
||||
if (access("memlog2",R_OK) != 0)
|
||||
{
|
||||
printf("File existance 3: Failed\n");
|
||||
failures++;
|
||||
}
|
||||
else
|
||||
printf("File existance 3: Passed\n");
|
||||
if (linecount("memlog2") != 100)
|
||||
{
|
||||
printf("Incorrect entry count: Failed\n");
|
||||
failures++;
|
||||
}
|
||||
else
|
||||
printf("Incorrect entry count: Passed\n");
|
||||
for (j = 0; j < 50; j++)
|
||||
memlog_log(log, (void *)j);
|
||||
if (linecount("memlog2") != 100)
|
||||
{
|
||||
printf("Premature Flushing: Failed\n");
|
||||
failures++;
|
||||
}
|
||||
else
|
||||
printf("Premature Flushing: Passed\n");
|
||||
memlog_destroy(log);
|
||||
if (linecount("memlog2") != 150)
|
||||
{
|
||||
printf("Flush on destroy: Failed\n");
|
||||
failures++;
|
||||
}
|
||||
else
|
||||
printf("Flush on destroy: Passed\n");
|
||||
}
|
||||
|
||||
unlink("memlog3");
|
||||
if ((log = memlog_create("memlog3", ML_LONGLONG, 100)) == NULL)
|
||||
{
|
||||
printf("Memlog Creation: Failed\n");
|
||||
failures++;
|
||||
}
|
||||
else
|
||||
{
|
||||
printf("Memlog Creation: Passed\n");
|
||||
if (access("memlog3",R_OK) == 0)
|
||||
{
|
||||
printf("File existance 1: Failed\n");
|
||||
failures++;
|
||||
}
|
||||
else
|
||||
printf("File existance 1: Passed\n");
|
||||
for (k = 0; k < 50; k++)
|
||||
memlog_log(log, (void *)k);
|
||||
if (access("memlog3",R_OK) == 0)
|
||||
{
|
||||
printf("File existance 2: Failed\n");
|
||||
failures++;
|
||||
}
|
||||
else
|
||||
printf("File existance 2: Passed\n");
|
||||
for (k = 0; k < 50; k++)
|
||||
memlog_log(log, (void *)k);
|
||||
if (access("memlog3",R_OK) != 0)
|
||||
{
|
||||
printf("File existance 3: Failed\n");
|
||||
failures++;
|
||||
}
|
||||
else
|
||||
printf("File existance 3: Passed\n");
|
||||
if (linecount("memlog3") != 100)
|
||||
{
|
||||
printf("Incorrect entry count: Failed\n");
|
||||
failures++;
|
||||
}
|
||||
else
|
||||
printf("Incorrect entry count: Passed\n");
|
||||
for (k = 0; k < 50; k++)
|
||||
memlog_log(log, (void *)k);
|
||||
if (linecount("memlog3") != 100)
|
||||
{
|
||||
printf("Premature Flushing: Failed\n");
|
||||
failures++;
|
||||
}
|
||||
else
|
||||
printf("Premature Flushing: Passed\n");
|
||||
memlog_destroy(log);
|
||||
if (linecount("memlog3") != 150)
|
||||
{
|
||||
printf("Flush on destroy: Failed\n");
|
||||
failures++;
|
||||
}
|
||||
else
|
||||
printf("Flush on destroy: Passed\n");
|
||||
}
|
||||
|
||||
unlink("memlog4");
|
||||
if ((log = memlog_create("memlog4", ML_STRING, 100)) == NULL)
|
||||
{
|
||||
printf("Memlog Creation: Failed\n");
|
||||
failures++;
|
||||
}
|
||||
else
|
||||
{
|
||||
printf("Memlog Creation: Passed\n");
|
||||
if (access("memlog4",R_OK) == 0)
|
||||
{
|
||||
printf("File existance 1: Failed\n");
|
||||
failures++;
|
||||
}
|
||||
else
|
||||
printf("File existance 1: Passed\n");
|
||||
for (i = 0; i < 50; i++)
|
||||
memlog_log(log, strings[i%5]);
|
||||
if (access("memlog4",R_OK) == 0)
|
||||
{
|
||||
printf("File existance 2: Failed\n");
|
||||
failures++;
|
||||
}
|
||||
else
|
||||
printf("File existance 2: Passed\n");
|
||||
for (i = 0; i < 50; i++)
|
||||
memlog_log(log, strings[i%5]);
|
||||
if (access("memlog4",R_OK) != 0)
|
||||
{
|
||||
printf("File existance 3: Failed\n");
|
||||
failures++;
|
||||
}
|
||||
else
|
||||
printf("File existance 3: Passed\n");
|
||||
if (linecount("memlog4") != 100)
|
||||
{
|
||||
printf("Incorrect entry count: Failed\n");
|
||||
failures++;
|
||||
}
|
||||
else
|
||||
printf("Incorrect entry count: Passed\n");
|
||||
for (i = 0; i < 50; i++)
|
||||
memlog_log(log, strings[i%5]);
|
||||
if (linecount("memlog4") != 100)
|
||||
{
|
||||
printf("Premature Flushing: Failed\n");
|
||||
failures++;
|
||||
}
|
||||
else
|
||||
printf("Premature Flushing: Passed\n");
|
||||
memlog_destroy(log);
|
||||
if (linecount("memlog4") != 150)
|
||||
{
|
||||
printf("Flush on destroy: Failed\n");
|
||||
failures++;
|
||||
}
|
||||
else
|
||||
printf("Flush on destroy: Passed\n");
|
||||
}
|
||||
|
||||
unlink("memlog5");
|
||||
unlink("memlog6");
|
||||
if ((log = memlog_create("memlog5", ML_INT, 100)) == NULL)
|
||||
{
|
||||
printf("Memlog Creation: Failed\n");
|
||||
failures++;
|
||||
}
|
||||
else
|
||||
{
|
||||
printf("Memlog Creation: Passed\n");
|
||||
if ((log2 = memlog_create("memlog6", ML_INT, 100)) == NULL)
|
||||
{
|
||||
printf("Memlog Creation: Failed\n");
|
||||
failures++;
|
||||
}
|
||||
else
|
||||
{
|
||||
printf("Memlog Creation: Passed\n");
|
||||
for (i = 0; i < 40; i++)
|
||||
memlog_log(log, (void *)i);
|
||||
for (i = 0; i < 30; i++)
|
||||
memlog_log(log2, (void *)i);
|
||||
memlog_flush_all();
|
||||
if (linecount("memlog5") != 40 ||
|
||||
linecount("memlog6") != 30)
|
||||
{
|
||||
printf(
|
||||
"Memlog flush all: Failed\n");
|
||||
failures++;
|
||||
}
|
||||
else
|
||||
printf(
|
||||
"Memlog flush all: Passed\n");
|
||||
}
|
||||
}
|
||||
|
||||
unlink("memlog7");
|
||||
if ((log = memlog_create("memlog7", ML_INT, 100)) == NULL)
|
||||
{
|
||||
printf("Memlog Creation: Failed\n");
|
||||
failures++;
|
||||
}
|
||||
else
|
||||
{
|
||||
printf("Memlog Creation: Passed\n");
|
||||
if (access("memlog7",R_OK) == 0)
|
||||
{
|
||||
printf("File existance 1: Failed\n");
|
||||
failures++;
|
||||
}
|
||||
else
|
||||
printf("File existance 1: Passed\n");
|
||||
for (i = 0; i < 5050; i++)
|
||||
memlog_log(log, (void *)i);
|
||||
if (access("memlog7",R_OK) != 0)
|
||||
{
|
||||
printf("File existance 3: Failed\n");
|
||||
failures++;
|
||||
}
|
||||
else
|
||||
printf("File existance 3: Passed\n");
|
||||
if (linecount("memlog7") != 5000)
|
||||
{
|
||||
printf("Incorrect entry count: Failed\n");
|
||||
failures++;
|
||||
}
|
||||
else
|
||||
printf("Incorrect entry count: Passed\n");
|
||||
for (i = 0; i < 50; i++)
|
||||
memlog_log(log, (void *)i);
|
||||
if (linecount("memlog7") != 5100)
|
||||
{
|
||||
printf("Residual flushing: Failed\n");
|
||||
failures++;
|
||||
}
|
||||
else
|
||||
printf("Premature Flushing: Passed\n");
|
||||
for (i = 0; i < 10120; i++)
|
||||
memlog_log(log, (void *)i);
|
||||
memlog_destroy(log);
|
||||
if (linecount("memlog7") != 15220)
|
||||
{
|
||||
printf("Flush on destroy: Failed\n");
|
||||
failures++;
|
||||
}
|
||||
else
|
||||
printf("Flush on destroy: Passed\n");
|
||||
}
|
||||
exit(failures);
|
||||
}
|
Reference in New Issue
Block a user