Refactor stats reporting to make custom reports possible.

This commit is contained in:
Alexey Kopytov
2017-01-29 19:32:14 +03:00
parent bc531b28ad
commit 04b4f756d9
32 changed files with 641 additions and 610 deletions

View File

@ -933,43 +933,32 @@ int db_bulk_insert_done(db_conn_t *con)
return 0;
}
static void db_print_stats_intermediate(void)
void db_report_intermediate(sb_stat_t *stat)
{
sb_counters_t cnt;
/* Don't print stats if no drivers are used */
/* Use default stats handler if no drivers are used */
if (!check_print_stats())
{
sb_report_intermediate(stat);
return;
}
const double percentile_val =
sb_histogram_get_pct_intermediate(&sb_latency_histogram,
sb_globals.percentile);
sb_counters_agg_intermediate(cnt);
const double seconds = stat->time_interval;
const double seconds = NS2SEC(sb_timer_checkpoint(&sb_intermediate_timer));
const uint64_t events = cnt[SB_CNT_EVENT];
const uint64_t reads = cnt[SB_CNT_READ];
const uint64_t writes = cnt[SB_CNT_WRITE];
const uint64_t others = cnt[SB_CNT_OTHER];
const uint64_t errors = cnt[SB_CNT_ERROR];
const uint64_t reconnects = cnt[SB_CNT_RECONNECT];
log_timestamp(LOG_NOTICE, NS2SEC(sb_timer_value(&sb_exec_timer)),
"threads: %d tps: %4.2f "
log_timestamp(LOG_NOTICE, stat->time_total,
"threads: %u tps: %4.2f "
"qps: %4.2f (r/w/o: %4.2f/%4.2f/%4.2f) "
"latency: %4.2fms (%u%%) errors/s: %4.2f "
"latency (ms,%u%%): %4.2f errors/s: %4.2f "
"reconnects/s: %4.2f",
sb_globals.num_running,
events / seconds,
(reads + writes + others) / seconds,
reads / seconds,
writes / seconds,
others / seconds,
percentile_val,
stat->threads_running,
stat->events / seconds,
(stat->reads + stat->writes + stat->other) / seconds,
stat->reads / seconds,
stat->writes / seconds,
stat->other / seconds,
sb_globals.percentile,
errors / seconds,
reconnects / seconds);
SEC2MS(stat->latency_pct),
stat->errors / seconds,
stat->reconnects / seconds);
if (sb_globals.tx_rate > 0)
{
@ -982,46 +971,40 @@ static void db_print_stats_intermediate(void)
}
static void db_print_stats_cumulative(void)
void db_report_cumulative(sb_stat_t *stat)
{
sb_counters_t cnt;
sb_timer_t exec_timer;
sb_timer_t fetch_timer;
/* Don't print stats if no drivers are used */
/* Use default stats handler if no drivers are used */
if (!check_print_stats())
{
sb_report_cumulative(stat);
return;
}
sb_counters_agg_cumulative(cnt);
const double seconds = stat->time_interval;
const uint64_t queries = stat->reads + stat->writes + stat->other;
const double seconds = NS2SEC(sb_timer_checkpoint(&sb_checkpoint_timer1));
const uint64_t events = cnt[SB_CNT_EVENT];
const uint64_t reads = cnt[SB_CNT_READ];
const uint64_t writes = cnt[SB_CNT_WRITE];
const uint64_t others = cnt[SB_CNT_OTHER];
const uint64_t errors = cnt[SB_CNT_ERROR];
const uint64_t reconnects = cnt[SB_CNT_RECONNECT];
log_text(LOG_NOTICE, "OLTP test statistics:");
log_text(LOG_NOTICE, "SQL statistics:");
log_text(LOG_NOTICE, " queries performed:");
log_text(LOG_NOTICE, " read: %" PRIu64,
reads);
stat->reads);
log_text(LOG_NOTICE, " write: %" PRIu64,
writes);
stat->writes);
log_text(LOG_NOTICE, " other: %" PRIu64,
others);
stat->other);
log_text(LOG_NOTICE, " total: %" PRIu64,
reads + writes + others);
queries);
log_text(LOG_NOTICE, " transactions: %-6" PRIu64
" (%.2f per sec.)", events, events / seconds);
" (%.2f per sec.)", stat->events, stat->events / seconds);
log_text(LOG_NOTICE, " queries: %-6" PRIu64
" (%.2f per sec.)", reads + writes + others,
(reads + writes + others) / seconds);
" (%.2f per sec.)", queries,
queries / seconds);
log_text(LOG_NOTICE, " ignored errors: %-6" PRIu64
" (%.2f per sec.)", errors, errors / seconds);
" (%.2f per sec.)", stat->errors, stat->errors / seconds);
log_text(LOG_NOTICE, " reconnects: %-6" PRIu64
" (%.2f per sec.)", reconnects, reconnects / seconds);
" (%.2f per sec.)", stat->reconnects, stat->reconnects / seconds);
if (db_globals.debug)
{
@ -1055,17 +1038,9 @@ static void db_print_stats_cumulative(void)
log_text(LOG_DEBUG, " total: %.4fs",
NS2SEC(sb_timer_sum(&fetch_timer)));
}
}
/* Print database-specific test stats */
void db_print_stats(sb_report_t type)
{
if (type == SB_REPORT_INTERMEDIATE)
db_print_stats_intermediate();
else
db_print_stats_cumulative();
/* Report sysbench general stats */
sb_report_cumulative(stat);
}

View File

@ -310,7 +310,8 @@ int db_bulk_insert_next(db_conn_t *, const char *, size_t);
int db_bulk_insert_done(db_conn_t *);
/* Print database-specific test stats */
void db_print_stats(sb_report_t type);
void db_report_intermediate(sb_stat_t *);
void db_report_cumulative(sb_stat_t *);
/* DB drivers registrars */

View File

@ -53,15 +53,6 @@
#define OPER_LOG_MIN_VALUE 1e-3
#define OPER_LOG_MAX_VALUE 1E5
/* per-thread timers for response time stats */
sb_timer_t *timers;
/*
Mutex protecting timers.
TODO: replace with an rwlock (and implement pthread rwlocks for Windows).
*/
pthread_mutex_t timers_mutex;
/* Array of message handlers (one chain per message type) */
static sb_list_t handlers[LOG_MSG_TYPE_MAX];
@ -73,9 +64,6 @@ static pthread_mutex_t text_mutex;
static unsigned int text_cnt;
static char text_buf[TEXT_BUFFER_SIZE];
/* Temporary copy of timers */
static sb_timer_t *timers_copy;
static int text_handler_init(void);
static int text_handler_process(log_msg_t *msg);
@ -488,7 +476,6 @@ int text_handler_process(log_msg_t *msg)
int oper_handler_init(void)
{
unsigned int i;
int tmp;
tmp = sb_get_value_int("percentile");
@ -511,195 +498,14 @@ int oper_handler_init(void)
OPER_LOG_MIN_VALUE, OPER_LOG_MAX_VALUE))
return 1;
timers = sb_memalign(sb_globals.num_threads * sizeof(sb_timer_t),
CK_MD_CACHELINE);
timers_copy = sb_memalign(sb_globals.num_threads * sizeof(sb_timer_t),
CK_MD_CACHELINE);
if (timers == NULL || timers_copy == NULL)
{
log_text(LOG_FATAL, "Memory allocation failure");
return 1;
}
for (i = 0; i < sb_globals.num_threads; i++)
sb_timer_init(&timers[i]);
if (sb_globals.n_checkpoints > 0)
pthread_mutex_init(&timers_mutex, NULL);
return 0;
}
/*
Print global stats either from the last checkpoint (if used) or
from the test start.
*/
int print_global_stats(void)
{
double diff;
unsigned int i;
unsigned int nthreads;
sb_timer_t t;
/* variables to count thread fairness */
double events_avg;
double events_stddev;
double time_avg;
double time_stddev;
unsigned long long total_time_ns;
sb_timer_init(&t);
nthreads = sb_globals.num_threads;
/* Create a temporary copy of timers and reset them */
if (sb_globals.n_checkpoints > 0)
pthread_mutex_lock(&timers_mutex);
memcpy(timers_copy, timers, sb_globals.num_threads * sizeof(sb_timer_t));
for (i = 0; i < sb_globals.num_threads; i++)
sb_timer_reset(&timers[i]);
total_time_ns = sb_timer_checkpoint(&sb_checkpoint_timer2);
if (sb_globals.n_checkpoints > 0)
pthread_mutex_unlock(&timers_mutex);
if (sb_globals.forced_shutdown_in_progress)
{
/*
In case we print statistics on forced shutdown, there may be (potentially
long running or hung) transactions which are still in progress.
We still want to reflect them in statistics, so stop running timers to
consider long transactions as done at the forced shutdown time, and print
a counter of still running transactions.
*/
unsigned int unfinished = 0;
for (i = 0; i < nthreads; i++)
{
if (sb_timer_running(&timers_copy[i]))
{
unfinished++;
sb_timer_stop(&timers_copy[i]);
};
}
if (unfinished > 0)
{
log_text(LOG_NOTICE, "");
log_text(LOG_NOTICE, "Number of unfinished transactions on "
"forced shutdown: %u", unfinished);
}
}
for(i = 0; i < nthreads; i++)
t = sb_timer_merge(&t, &timers_copy[i]);
/* Print total statistics */
log_text(LOG_NOTICE, "");
log_text(LOG_NOTICE, "General statistics:");
log_text(LOG_NOTICE, " total time: %.4fs",
NS2SEC(total_time_ns));
log_text(LOG_NOTICE, " total number of events: %llu",
(unsigned long long) t.events);
log_text(LOG_NOTICE, " total time taken by event execution: %.4fs",
NS2SEC(sb_timer_sum(&t)));
log_text(LOG_NOTICE, "");
if (sb_globals.histogram)
{
log_text(LOG_NOTICE, "Latency histogram (values are in milliseconds)");
sb_histogram_print(&sb_latency_histogram);
log_text(LOG_NOTICE, " ");
}
log_text(LOG_NOTICE, "Latency statistics:");
log_text(LOG_NOTICE, " min: %10.2fms",
NS2MS(sb_timer_min(&t)));
log_text(LOG_NOTICE, " avg: %10.2fms",
NS2MS(sb_timer_avg(&t)));
log_text(LOG_NOTICE, " max: %10.2fms",
NS2MS(sb_timer_max(&t)));
/* Print approximate percentile value for event latency */
if (sb_globals.percentile > 0)
{
log_text(LOG_NOTICE, " approx. %3dth percentile: %10.2fms",
sb_globals.percentile,
sb_histogram_get_pct_checkpoint(&sb_latency_histogram,
sb_globals.percentile));
}
else
{
log_text(LOG_NOTICE, " percentile stats: disabled");
}
log_text(LOG_NOTICE, "");
/*
Check how fair thread distribution over task is.
We check amount of events/thread as well as avg event execution time.
Fairness is calculated in %, looking at maximum and average difference
from the average time/request and req/thread
*/
events_avg = (double)t.events / nthreads;
time_avg = NS2SEC(sb_timer_sum(&t)) / nthreads;
events_stddev = 0;
time_stddev = 0;
for(i = 0; i < nthreads; i++)
{
diff = fabs(events_avg - timers_copy[i].events);
events_stddev += diff*diff;
diff = fabs(time_avg - NS2SEC(sb_timer_sum(&timers_copy[i])));
time_stddev += diff*diff;
}
events_stddev = sqrt(events_stddev / nthreads);
time_stddev = sqrt(time_stddev / nthreads);
log_text(LOG_NOTICE, "Threads fairness:");
log_text(LOG_NOTICE, " events (avg/stddev): %.4f/%3.2f",
events_avg, events_stddev);
log_text(LOG_NOTICE, " execution time (avg/stddev): %.4f/%3.2f",
time_avg, time_stddev);
log_text(LOG_NOTICE, "");
if (sb_globals.debug)
{
log_text(LOG_DEBUG, "Verbose per-thread statistics:\n");
for(i = 0; i < nthreads; i++)
{
log_text(LOG_DEBUG, " thread #%3d: min: %.4fs avg: %.4fs max: %.4fs "
"events: %llu",i,
NS2SEC(sb_timer_min(&timers_copy[i])),
NS2SEC(sb_timer_avg(&timers_copy[i])),
NS2SEC(sb_timer_max(&timers_copy[i])),
(unsigned long long) timers_copy[i].events);
log_text(LOG_DEBUG, " "
"total time taken by even execution: %.4fs",
NS2SEC(sb_timer_sum(&timers_copy[i]))
);
}
log_text(LOG_NOTICE, "");
}
return 0;
}
/* Uninitialize operations messages handler */
int oper_handler_done(void)
{
print_global_stats();
free(timers);
free(timers_copy);
if (sb_globals.n_checkpoints > 0)
pthread_mutex_destroy(&timers_mutex);
sb_histogram_done(&sb_latency_histogram);
return 0;

View File

@ -107,10 +107,6 @@ typedef struct {
sb_list_item_t listitem; /* can be linked in a list */
} log_handler_t;
/* per-thread timers for response time stats */
extern sb_timer_t *timers;
extern pthread_mutex_t timers_mutex;
/* Register logger */
int log_register(void);
@ -154,11 +150,4 @@ void log_errno(log_msg_priority_t priority, const char *fmt, ...)
void log_done(void);
/*
Print global stats either from the last checkpoint (if used) or
from the test start.
*/
int print_global_stats(void);
#endif /* SB_LOGGER_H */

View File

@ -133,12 +133,13 @@ static int sb_lua_op_done(void);
static int sb_lua_op_thread_init(int);
static int sb_lua_op_thread_run(int);
static int sb_lua_op_thread_done(int);
static void sb_lua_op_print_stats(sb_report_t type);
static sb_operations_t lua_ops = {
.init = sb_lua_op_init,
.thread_init = sb_lua_op_thread_init,
.thread_done = sb_lua_op_thread_done,
.report_intermediate = db_report_intermediate,
.report_cumulative = db_report_cumulative,
.done = sb_lua_op_done
};
@ -339,8 +340,6 @@ sb_test_t *sb_load_lua(const char *testname)
if (func_available(gstate, THREAD_RUN_FUNC))
sbtest.ops.thread_run = &sb_lua_op_thread_run;
sbtest.ops.print_stats = &sb_lua_op_print_stats;
/* Allocate per-thread interpreters array */
states = (lua_State **)calloc(sb_globals.num_threads, sizeof(lua_State *));
if (states == NULL)
@ -449,11 +448,6 @@ int sb_lua_op_thread_done(int thread_id)
return rc;
}
void sb_lua_op_print_stats(sb_report_t type)
{
db_print_stats(type);
}
int sb_lua_op_done(void)
{
xfree(states);

View File

@ -51,6 +51,10 @@
#define NS2MS(nsec) ((nsec)/1000000.)
#define MS2NS(sec) ((sec)*1000000ULL)
/* Convert milliseconds to seconds and vice versa */
#define MS2SEC(msec) ((msec)/1000.)
#define SEC2MS(sec) ((sec)*1000)
/* Difference between two 'timespec' values in nanoseconds */
#define TIMESPEC_DIFF(a,b) (SEC2NS(a.tv_sec - b.tv_sec) + \
(a.tv_nsec - b.tv_nsec))

View File

@ -140,13 +140,21 @@ static int report_thread_created;
static int checkpoints_thread_created;
static int eventgen_thread_created;
/* per-thread timers for response time stats */
static sb_timer_t *timers;
/* Mutex protecting timers. */
static pthread_mutex_t timers_mutex;
/* Temporary copy of timers for checkpoint reports */
static sb_timer_t *timers_copy;
/* Global execution timer */
sb_timer_t sb_exec_timer CK_CC_CACHELINE;
/* timers for intermediate/checkpoint reports */
sb_timer_t sb_intermediate_timer CK_CC_CACHELINE;
sb_timer_t sb_checkpoint_timer1 CK_CC_CACHELINE;
sb_timer_t sb_checkpoint_timer2 CK_CC_CACHELINE;
sb_timer_t sb_checkpoint_timer CK_CC_CACHELINE;
TLS int sb_tls_thread_id;
@ -167,6 +175,226 @@ static void sigalrm_thread_init_timeout_handler(int sig)
exit(2);
}
/* Default intermediate reports handler */
void sb_report_intermediate(sb_stat_t *stat)
{
log_timestamp(LOG_NOTICE, stat->time_total,
"threads: %" PRIu32 " events/s: %4.2f latency (ms,%u%%): %4.2f",
stat->threads_running,
stat->events / stat->time_interval,
sb_globals.percentile,
SEC2MS(stat->latency_pct));
}
static void report_get_common_stat(sb_stat_t *stat, sb_counters_t cnt)
{
memset(stat, 0, sizeof(sb_stat_t));
stat->threads_running = sb_globals.num_running;
stat->events = cnt[SB_CNT_EVENT];
stat->reads = cnt[SB_CNT_READ];
stat->writes = cnt[SB_CNT_WRITE];
stat->other = cnt[SB_CNT_OTHER];
stat->errors = cnt[SB_CNT_ERROR];
stat->reconnects = cnt[SB_CNT_RECONNECT];
stat->time_total = NS2SEC(sb_timer_value(&sb_exec_timer));
}
static void report_intermediate(void)
{
sb_stat_t stat;
sb_counters_t cnt;
/*
sb_globals.report_interval may be set to 0 by the master thread to
silence intermediate reports at the end of the test
*/
if (ck_pr_load_uint(&sb_globals.report_interval) == 0)
{
return;
}
sb_counters_agg_intermediate(cnt);
report_get_common_stat(&stat, cnt);
stat.latency_pct =
MS2SEC(sb_histogram_get_pct_intermediate(&sb_latency_histogram,
sb_globals.percentile));
stat.time_interval = NS2SEC(sb_timer_checkpoint(&sb_intermediate_timer));
if (current_test && current_test->ops.report_intermediate)
current_test->ops.report_intermediate(&stat);
else
sb_report_intermediate(&stat);
}
/* Default cumulative reports handler */
void sb_report_cumulative(sb_stat_t *stat)
{
const unsigned int nthreads = sb_globals.num_threads;
if (sb_globals.forced_shutdown_in_progress)
{
/*
In case we print statistics on forced shutdown, there may be (potentially
long running or hung) transactions which are still in progress.
We still want to reflect them in statistics, so stop running timers to
consider long transactions as done at the forced shutdown time, and print
a counter of still running transactions.
*/
unsigned unfinished = 0;
for (unsigned i = 0; i < nthreads; i++)
{
if (sb_timer_running(&timers_copy[i]))
{
unfinished++;
sb_timer_stop(&timers_copy[i]);
};
}
if (unfinished > 0)
{
log_text(LOG_NOTICE, "");
log_text(LOG_NOTICE, "Number of unfinished transactions on "
"forced shutdown: %u", unfinished);
}
}
log_text(LOG_NOTICE, "");
log_text(LOG_NOTICE, "General statistics:");
log_text(LOG_NOTICE, " total time: %.4fs",
stat->time_total);
log_text(LOG_NOTICE, " total number of events: %llu",
stat->events);
log_text(LOG_NOTICE, "");
log_text(LOG_NOTICE, "Latency (ms):");
log_text(LOG_NOTICE, " min: %10.2f",
SEC2MS(stat->latency_min));
log_text(LOG_NOTICE, " avg: %10.2f",
SEC2MS(stat->latency_avg));
log_text(LOG_NOTICE, " max: %10.2f",
SEC2MS(stat->latency_max));
if (sb_globals.percentile > 0)
log_text(LOG_NOTICE, " %3dth percentile: %10.2f",
sb_globals.percentile, SEC2MS(stat->latency_pct));
else
log_text(LOG_NOTICE, " percentile stats: disabled");
log_text(LOG_NOTICE, " sum: %10.2f",
SEC2MS(stat->latency_sum));
log_text(LOG_NOTICE, "");
/* Aggregate temporary timers copy */
sb_timer_t t;
sb_timer_init(&t);
for(unsigned i = 0; i < nthreads; i++)
t = sb_timer_merge(&t, &timers_copy[i]);
/* Calculate and print events distribution by threads */
const double events_avg = (double) t.events / nthreads;
const double time_avg = stat->latency_sum / nthreads;
double events_stddev = 0;
double time_stddev = 0;
for(unsigned i = 0; i < nthreads; i++)
{
double diff = fabs(events_avg - timers_copy[i].events);
events_stddev += diff * diff;
diff = fabs(time_avg - NS2SEC(sb_timer_sum(&timers_copy[i])));
time_stddev += diff * diff;
}
events_stddev = sqrt(events_stddev / nthreads);
time_stddev = sqrt(time_stddev / nthreads);
log_text(LOG_NOTICE, "Threads fairness:");
log_text(LOG_NOTICE, " events (avg/stddev): %.4f/%3.2f",
events_avg, events_stddev);
log_text(LOG_NOTICE, " execution time (avg/stddev): %.4f/%3.2f",
time_avg, time_stddev);
log_text(LOG_NOTICE, "");
if (sb_globals.debug)
{
log_text(LOG_DEBUG, "Verbose per-thread statistics:\n");
for(unsigned i = 0; i < nthreads; i++)
{
log_text(LOG_DEBUG, " thread #%3d: min: %.4fs avg: %.4fs max: %.4fs "
"events: %" PRIu64,
i,
NS2SEC(sb_timer_min(&timers_copy[i])),
NS2SEC(sb_timer_avg(&timers_copy[i])),
NS2SEC(sb_timer_max(&timers_copy[i])),
timers_copy[i].events);
log_text(LOG_DEBUG, " "
"total time taken by event execution: %.4fs",
NS2SEC(sb_timer_sum(&timers_copy[i])));
}
log_text(LOG_NOTICE, "");
}
}
static void report_cumulative(void)
{
sb_stat_t stat;
unsigned i;
sb_counters_t cnt;
sb_counters_agg_cumulative(cnt);
report_get_common_stat(&stat, cnt);
stat.latency_pct =
MS2SEC(sb_histogram_get_pct_checkpoint(&sb_latency_histogram,
sb_globals.percentile));
sb_timer_t t;
sb_timer_init(&t);
const unsigned nthreads = sb_globals.num_threads;
/* Create a temporary copy of timers and reset them */
if (sb_globals.n_checkpoints > 0)
pthread_mutex_lock(&timers_mutex);
memcpy(timers_copy, timers, nthreads * sizeof(sb_timer_t));
for (i = 0; i < nthreads; i++)
sb_timer_reset(&timers[i]);
if (sb_globals.n_checkpoints > 0)
pthread_mutex_unlock(&timers_mutex);
/* Aggregate temporary timers copy */
for(i = 0; i < nthreads; i++)
t = sb_timer_merge(&t, &timers_copy[i]);
/* Calculate aggregate latency values */
stat.latency_min = NS2SEC(sb_timer_min(&t));
stat.latency_max = NS2SEC(sb_timer_max(&t));
stat.latency_avg = NS2SEC(sb_timer_avg(&t));
stat.latency_sum = NS2SEC(sb_timer_sum(&t));
stat.time_interval = NS2SEC(sb_timer_checkpoint(&sb_checkpoint_timer));
if (current_test && current_test->ops.report_cumulative)
current_test->ops.report_cumulative(&stat);
else
sb_report_cumulative(&stat);
}
static void sigalrm_forced_shutdown_handler(int sig)
{
if (sig != SIGALRM)
@ -176,14 +404,12 @@ static void sigalrm_forced_shutdown_handler(int sig)
sb_timer_stop(&sb_exec_timer);
sb_timer_stop(&sb_intermediate_timer);
sb_timer_stop(&sb_checkpoint_timer1);
sb_timer_stop(&sb_checkpoint_timer2);
sb_timer_stop(&sb_checkpoint_timer);
log_text(LOG_FATAL,
"The --max-time limit has expired, forcing shutdown...");
if (current_test && current_test->ops.print_stats)
current_test->ops.print_stats(SB_REPORT_CUMULATIVE);
report_cumulative();
log_done();
@ -723,13 +949,6 @@ static void *report_thread_proc(void *arg)
if (sb_barrier_wait(&thread_start_barrier) < 0)
return NULL;
if (current_test->ops.print_stats == NULL)
{
log_text(LOG_DEBUG, "Reporting is not supported by the current test, "
"terminating the reporting thread");
return NULL;
}
report_thread_created = 1;
pause_ns = interval_ns;
@ -737,12 +956,8 @@ static void *report_thread_proc(void *arg)
for (;;)
{
usleep(pause_ns / 1000);
/*
sb_globals.report_interval may be set to 0 by the master thread
to silence report at the end of the test
*/
if (ck_pr_load_uint(&sb_globals.report_interval) > 0)
current_test->ops.print_stats(SB_REPORT_INTERMEDIATE);
report_intermediate();
curr_ns = sb_timer_value(&sb_exec_timer);
do
@ -773,13 +988,6 @@ static void *checkpoints_thread_proc(void *arg)
if (sb_barrier_wait(&thread_start_barrier) < 0)
return NULL;
if (current_test->ops.print_stats == NULL)
{
log_text(LOG_DEBUG, "Reporting is not supported by the current test, "
"terminating the checkpoints thread");
return NULL;
}
checkpoints_thread_created = 1;
for (i = 0; i < sb_globals.n_checkpoints; i++)
@ -792,12 +1000,10 @@ static void *checkpoints_thread_proc(void *arg)
pause_ns = next_ns - curr_ns;
usleep(pause_ns / 1000);
SB_THREAD_MUTEX_LOCK();
log_timestamp(LOG_NOTICE, NS2SEC(sb_timer_value(&sb_exec_timer)),
"Checkpoint report:");
current_test->ops.print_stats(SB_REPORT_CUMULATIVE);
print_global_stats();
SB_THREAD_MUTEX_UNLOCK();
report_cumulative();
}
return NULL;
@ -817,8 +1023,7 @@ static int threads_started_callback(void *arg)
sb_timer_start(&sb_exec_timer);
sb_timer_copy(&sb_intermediate_timer, &sb_exec_timer);
sb_timer_copy(&sb_checkpoint_timer1, &sb_exec_timer);
sb_timer_copy(&sb_checkpoint_timer2, &sb_exec_timer);
sb_timer_copy(&sb_checkpoint_timer, &sb_exec_timer);
log_text(LOG_NOTICE, "Threads started!\n");
@ -848,8 +1053,7 @@ static int run_test(sb_test_t *test)
/* initialize timers */
sb_timer_init(&sb_exec_timer);
sb_timer_init(&sb_intermediate_timer);
sb_timer_init(&sb_checkpoint_timer1);
sb_timer_init(&sb_checkpoint_timer2);
sb_timer_init(&sb_checkpoint_timer);
/* prepare test */
if (test->ops.prepare != NULL && test->ops.prepare() != 0)
@ -946,8 +1150,7 @@ static int run_test(sb_test_t *test)
sb_timer_stop(&sb_exec_timer);
sb_timer_stop(&sb_intermediate_timer);
sb_timer_stop(&sb_checkpoint_timer1);
sb_timer_stop(&sb_checkpoint_timer2);
sb_timer_stop(&sb_checkpoint_timer);
/* Silence periodic reports if they were on */
ck_pr_store_uint(&sb_globals.report_interval, 0);
@ -961,10 +1164,19 @@ static int run_test(sb_test_t *test)
/* cleanup test */
if (test->ops.cleanup != NULL && test->ops.cleanup() != 0)
return 1;
/* print test-specific stats */
if (test->ops.print_stats != NULL && !sb_globals.error)
test->ops.print_stats(SB_REPORT_CUMULATIVE);
if (!sb_globals.error)
{
if (sb_globals.histogram)
{
log_text(LOG_NOTICE, "Latency histogram (values are in milliseconds)");
sb_histogram_print(&sb_latency_histogram);
log_text(LOG_NOTICE, " ");
}
report_cumulative();
}
pthread_mutex_destroy(&sb_globals.exec_mutex);
@ -1123,6 +1335,23 @@ static int init(void)
sizeof(unsigned int), checkpoint_cmp);
}
/* Initialize timers */
timers = sb_memalign(sb_globals.num_threads * sizeof(sb_timer_t),
CK_MD_CACHELINE);
timers_copy = sb_memalign(sb_globals.num_threads * sizeof(sb_timer_t),
CK_MD_CACHELINE);
if (timers == NULL || timers_copy == NULL)
{
log_text(LOG_FATAL, "Memory allocation failure");
return 1;
}
for (unsigned i = 0; i < sb_globals.num_threads; i++)
sb_timer_init(&timers[i]);
if (sb_globals.n_checkpoints > 0)
pthread_mutex_init(&timers_mutex, NULL);
return 0;
}
@ -1287,7 +1516,13 @@ int main(int argc, char *argv[])
sb_thread_done();
exit(0);
free(timers);
free(timers_copy);
if (sb_globals.n_checkpoints > 0)
pthread_mutex_destroy(&timers_mutex);
return EXIT_SUCCESS;
}
/* Print a description of available command line options for the current test */

View File

@ -84,11 +84,28 @@ typedef struct
} u;
} sb_event_t;
typedef enum
{
SB_REPORT_INTERMEDIATE,
SB_REPORT_CUMULATIVE
} sb_report_t;
/* Statistics */
typedef struct {
uint32_t threads_running; /* Number of active threads */
double time_interval; /* Time elapsed since the last report */
double time_total; /* Time elapsed since the benchmark start */
double latency_pct; /* Latency percentile */
double latency_min; /* Minimum latency (cumulative reports only) */
double latency_max; /* Maximum latency (cumulative reports only) */
double latency_avg; /* Average latency (cumulative reports only) */
double latency_sum; /* Sum latency (cumulative reports only) */
uint64_t events; /* Number of executed events */
uint64_t reads; /* Number of read operations */
uint64_t writes; /* Number of write operations */
uint64_t other; /* Number of other operations */
uint64_t errors; /* Number of ignored errors */
uint64_t reconnects; /* Number of reconnects to server */
} sb_stat_t;
/* Commands */
@ -104,7 +121,7 @@ typedef int sb_op_thread_run(int);
typedef void sb_op_print_mode(void);
typedef sb_event_t sb_op_next_event(int);
typedef int sb_op_execute_event(sb_event_t *, int);
typedef void sb_op_print_stats(sb_report_t);
typedef void sb_op_report(sb_stat_t *);
typedef int sb_op_thread_done(int);
typedef int sb_op_cleanup(void);
typedef int sb_op_done(void);
@ -131,7 +148,8 @@ typedef struct
sb_op_print_mode *print_mode; /* print mode function */
sb_op_next_event *next_event; /* event generation function */
sb_op_execute_event *execute_event; /* event execution function */
sb_op_print_stats *print_stats; /* stats generation function */
sb_op_report *report_intermediate; /* intermediate reports handler */
sb_op_report *report_cumulative; /* cumulative reports handler */
sb_op_thread_run *thread_run; /* main thread loop */
sb_op_thread_done *thread_done; /* thread finalize function */
sb_op_cleanup *cleanup; /* called after exit from thread,
@ -193,8 +211,7 @@ extern sb_timer_t sb_exec_timer CK_CC_CACHELINE;
/* timers for checkpoint reports */
extern sb_timer_t sb_intermediate_timer;
extern sb_timer_t sb_checkpoint_timer1;
extern sb_timer_t sb_checkpoint_timer2;
extern sb_timer_t sb_checkpoint_timer;
extern TLS int sb_tls_thread_id;
@ -206,4 +223,10 @@ void sb_event_stop(int thread_id);
/* Print a description of available command line options for the current test */
void sb_print_test_options(void);
/* Default intermediate reports handler */
void sb_report_intermediate(sb_stat_t *stat);
/* Default cumulative reports handler */
void sb_report_cumulative(sb_stat_t *stat);
#endif

View File

@ -252,7 +252,8 @@ static int file_execute_event(sb_event_t *, int);
static int file_thread_done(int);
#endif
static int file_done(void);
static void file_print_stats(sb_report_t);
static void file_report_intermediate(sb_stat_t *);
static void file_report_cumulative(sb_stat_t *);
static sb_test_t fileio_test =
{
@ -264,7 +265,8 @@ static sb_test_t fileio_test =
.print_mode = file_print_mode,
.next_event = file_next_event,
.execute_event = file_execute_event,
.print_stats = file_print_stats,
.report_intermediate = file_report_intermediate,
.report_cumulative = file_report_cumulative,
#ifdef HAVE_LIBAIO
.thread_done = file_thread_done,
#else
@ -832,76 +834,74 @@ void file_print_mode(void)
log_text(LOG_NOTICE, "Doing %s test", get_test_mode_str(test_mode));
}
/*
Print intermediate test statistics.
/* Print test statistics */
TODO: remove the mutex, use sb_stat_t and sb_counter_t.
*/
void file_print_stats(sb_report_t type)
void file_report_intermediate(sb_stat_t *stat)
{
double seconds;
unsigned long long diff_read;
unsigned long long diff_written;
unsigned long long diff_other_ops;
switch (type) {
case SB_REPORT_INTERMEDIATE:
{
SB_THREAD_MUTEX_LOCK();
SB_THREAD_MUTEX_LOCK();
seconds = NS2SEC(sb_timer_checkpoint(&sb_intermediate_timer));
diff_read = bytes_read - last_bytes_read;
diff_written = bytes_written - last_bytes_written;
diff_other_ops = other_ops - last_other_ops;
diff_read = bytes_read - last_bytes_read;
diff_written = bytes_written - last_bytes_written;
diff_other_ops = other_ops - last_other_ops;
last_bytes_read = bytes_read;
last_bytes_written = bytes_written;
last_other_ops = other_ops;
last_bytes_read = bytes_read;
last_bytes_written = bytes_written;
last_other_ops = other_ops;
SB_THREAD_MUTEX_UNLOCK();
SB_THREAD_MUTEX_UNLOCK();
const double percentile_val =
sb_histogram_get_pct_intermediate(&sb_latency_histogram,
sb_globals.percentile);
log_timestamp(LOG_NOTICE, seconds,
"reads: %4.2f MiB/s writes: %4.2f MiB/s fsyncs: %4.2f/s "
"latency: %4.3f ms (%uth pct.)",
diff_read / megabyte / seconds,
diff_written / megabyte / seconds,
diff_other_ops / seconds,
percentile_val,
sb_globals.percentile);
break;
}
case SB_REPORT_CUMULATIVE:
seconds = NS2SEC(sb_timer_checkpoint(&sb_checkpoint_timer1));
log_text(LOG_NOTICE, "\n"
"File operations:\n"
" reads/s: %4.2f\n"
" writes/s: %4.2f\n"
" fsyncs/s: %4.2f\n"
"\n"
"Throughput:\n"
" read, MiB/s: %4.2f\n"
" written, MiB/s: %4.2f",
read_ops / seconds, write_ops / seconds, other_ops / seconds,
bytes_read / megabyte / seconds,
bytes_written / megabyte / seconds);
clear_stats();
break;
}
log_timestamp(LOG_NOTICE, stat->time_total,
"reads: %4.2f MiB/s writes: %4.2f MiB/s fsyncs: %4.2f/s "
"latency (ms,%u%%): %4.3f",
diff_read / megabyte / stat->time_interval,
diff_written / megabyte / stat->time_interval,
diff_other_ops / stat->time_interval,
sb_globals.percentile,
SEC2MS(stat->latency_pct));
}
/*
Print cumulative test statistics.
TODO: remove the mutex, use sb_stat_t and sb_counter_t.
*/
void file_report_cumulative(sb_stat_t *stat)
{
const double seconds = stat->time_interval;
SB_THREAD_MUTEX_LOCK();
log_text(LOG_NOTICE, "\n"
"File operations:\n"
" reads/s: %4.2f\n"
" writes/s: %4.2f\n"
" fsyncs/s: %4.2f\n"
"\n"
"Throughput:\n"
" read, MiB/s: %4.2f\n"
" written, MiB/s: %4.2f",
read_ops / seconds, write_ops / seconds, other_ops / seconds,
bytes_read / megabyte / seconds,
bytes_written / megabyte / seconds);
clear_stats();
SB_THREAD_MUTEX_UNLOCK();
sb_report_cumulative(stat);
}
/* Return name for I/O mode */
const char *get_io_mode_str(file_io_mode_t mode)
{
switch (mode) {
@ -1169,11 +1169,6 @@ void clear_stats(void)
last_bytes_read = 0;
bytes_written = 0;
last_bytes_written = 0;
/*
So that intermediate stats are calculated from the current moment
rather than from the previous intermediate report
*/
sb_timer_checkpoint(&sb_intermediate_timer);
}

View File

@ -58,7 +58,8 @@ static int memory_init(void);
static void memory_print_mode(void);
static sb_event_t memory_next_event(int);
static int memory_execute_event(sb_event_t *, int);
static void memory_print_stats(sb_report_t type);
static void memory_report_intermediate(sb_stat_t *);
static void memory_report_cumulative(sb_stat_t *);
static sb_test_t memory_test =
{
@ -69,7 +70,8 @@ static sb_test_t memory_test =
.print_mode = memory_print_mode,
.next_event = memory_next_event,
.execute_event = memory_execute_event,
.print_stats = memory_print_stats
.report_intermediate = memory_report_intermediate,
.report_cumulative = memory_report_cumulative
},
.args = memory_args
};
@ -339,44 +341,51 @@ void memory_print_mode(void)
log_text(LOG_INFO, "Memory scope type: %s", str);
}
/*
Print intermediate test statistics.
void memory_print_stats(sb_report_t type)
TODO: remove the mutex, use sb_stat_t and sb_counter_t.
*/
void memory_report_intermediate(sb_stat_t *stat)
{
double seconds;
const double megabyte = 1024.0 * 1024.0;
switch (type) {
case SB_REPORT_INTERMEDIATE:
SB_THREAD_MUTEX_LOCK();
seconds = NS2SEC(sb_timer_checkpoint(&sb_intermediate_timer));
SB_THREAD_MUTEX_LOCK();
log_timestamp(LOG_NOTICE, seconds,
"%4.2f MiB/sec,",
(double)(total_bytes - last_bytes) / megabyte / seconds);
last_bytes = total_bytes;
SB_THREAD_MUTEX_UNLOCK();
log_timestamp(LOG_NOTICE, stat->time_total,
"%4.2f MiB/sec,", (double)(total_bytes - last_bytes) /
megabyte / stat->time_interval);
last_bytes = total_bytes;
break;
SB_THREAD_MUTEX_UNLOCK();
}
case SB_REPORT_CUMULATIVE:
seconds = NS2SEC(sb_timer_checkpoint(&sb_checkpoint_timer1));
/*
Print cumulative test statistics.
log_text(LOG_NOTICE, "Operations performed: %d (%8.2f ops/sec)\n",
total_ops, total_ops / seconds);
if (memory_oper != SB_MEM_OP_NONE)
log_text(LOG_NOTICE, "%4.2f MiB transferred (%4.2f MiB/sec)\n",
total_bytes / megabyte,
total_bytes / megabyte / seconds);
total_ops = 0;
total_bytes = 0;
/*
So that intermediate stats are calculated from the current moment
rather than from the previous intermediate report
*/
sb_timer_checkpoint(&sb_intermediate_timer);
TODO: remove the mutex, use sb_stat_t and sb_counter_t.
*/
break;
}
void memory_report_cumulative(sb_stat_t *stat)
{
const double megabyte = 1024.0 * 1024.0;
SB_THREAD_MUTEX_LOCK();
log_text(LOG_NOTICE, "Operations performed: %d (%8.2f ops/sec)\n",
total_ops, total_ops / stat->time_interval);
if (memory_oper != SB_MEM_OP_NONE)
log_text(LOG_NOTICE, "%4.2f MiB transferred (%4.2f MiB/sec)\n",
total_bytes / megabyte,
total_bytes / megabyte / stat->time_interval);
total_ops = 0;
total_bytes = 0;
SB_THREAD_MUTEX_UNLOCK();
sb_report_cumulative(stat);
}
#ifdef HAVE_LARGE_PAGES

View File

@ -185,7 +185,7 @@ Legacy SQL Lua API tests
Threads started!
OLTP test statistics:
SQL statistics:
queries performed:
read: 1400
write: 400
@ -199,13 +199,13 @@ Legacy SQL Lua API tests
General statistics:
total time: *s (glob)
total number of events: 100
total time taken by event execution: *s (glob)
Latency statistics:
min: *.*ms (glob)
avg: *.*ms (glob)
max: *.*ms (glob)
approx. 95th percentile: *.*ms (glob)
Latency (ms):
min: *.* (glob)
avg: *.* (glob)
max: *.* (glob)
95th percentile: *.* (glob)
sum: *.* (glob)
Threads fairness:
events (avg/stddev): */* (glob)
@ -366,7 +366,7 @@ Legacy SQL Lua API tests
Threads started!
OLTP test statistics:
SQL statistics:
queries performed:
read: 1400
write: 400
@ -380,13 +380,13 @@ Legacy SQL Lua API tests
General statistics:
total time: *s (glob)
total number of events: 100
total time taken by event execution: *s (glob)
Latency statistics:
min: *.*ms (glob)
avg: *.*ms (glob)
max: *.*ms (glob)
approx. 95th percentile: *.*ms (glob)
Latency (ms):
min: *.* (glob)
avg: *.* (glob)
max: *.* (glob)
95th percentile: *.* (glob)
sum: *.* (glob)
Threads fairness:
events (avg/stddev): */* (glob)

View File

@ -75,13 +75,13 @@
General statistics:
total time: *s (glob)
total number of events: 1
total time taken by event execution: *s (glob)
Latency statistics:
min: *ms (glob)
avg: *ms (glob)
max: *ms (glob)
approx. 95th percentile: *ms (glob)
Latency (ms):
min: *.* (glob)
avg: *.* (glob)
max: *.* (glob)
95th percentile: *.* (glob)
sum: *.* (glob)
Threads fairness:
events (avg/stddev): 1.0000/0.00

View File

@ -15,7 +15,7 @@ MySQL driver tests
Threads started!
OLTP test statistics:
SQL statistics:
queries performed:
read: 10
write: 0
@ -29,13 +29,13 @@ MySQL driver tests
General statistics:
total time: *.*s (glob)
total number of events: 10
total time taken by event execution: *.*s (glob)
Latency statistics:
min: *.*ms (glob)
avg: *.*ms (glob)
max: *.*ms (glob)
approx. 95th percentile: *.*ms (glob)
Latency (ms):
min: *.* (glob)
avg: *.* (glob)
max: *.* (glob)
95th percentile: *.* (glob)
sum: *.* (glob)
Threads fairness:
events (avg/stddev): *.*/*.* (glob)

View File

@ -15,7 +15,7 @@ PostgreSQL driver tests
Threads started!
OLTP test statistics:
SQL statistics:
queries performed:
read: 10
write: 0
@ -29,13 +29,13 @@ PostgreSQL driver tests
General statistics:
total time: *.*s (glob)
total number of events: 10
total time taken by event execution: *.*s (glob)
Latency statistics:
min: *.*ms (glob)
avg: *.*ms (glob)
max: *.*ms (glob)
approx. 95th percentile: *.*ms (glob)
Latency (ms):
min: *.* (glob)
avg: *.* (glob)
max: *.* (glob)
95th percentile: *.* (glob)
sum: *.* (glob)
Threads fairness:
events (avg/stddev): *.*/*.* (glob)

View File

@ -27,22 +27,22 @@
Threads started!
General statistics:
total time: *s (glob)
total number of events: 2
total time taken by event execution: *s (glob)
Latency histogram (values are in milliseconds)
value ------------- distribution ------------- count
* |\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\* 1 (glob)
* |\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\* 1 (glob)
Latency statistics:
min: *ms (glob)
avg: *ms (glob)
max: *ms (glob)
approx. 95th percentile: *ms (glob)
General statistics:
total time: *s (glob)
total number of events: 2
Latency (ms):
min: *.* (glob)
avg: *.* (glob)
max: *.* (glob)
95th percentile: *.* (glob)
sum: *.* (glob)
Threads fairness:
events (avg/stddev): 1.0000/0.00

View File

@ -7,14 +7,14 @@
> exit 80
> fi
$ sysbench ${SBTEST_SCRIPTDIR}/oltp_read_write.lua --db-driver=mysql --mysql-dry-run --max-time=3 --max-requests=0 --report-checkpoints=1,2 run | egrep '(Checkpoint report|OLTP test statistics)'
$ sysbench ${SBTEST_SCRIPTDIR}/oltp_read_write.lua --db-driver=mysql --mysql-dry-run --max-time=3 --max-requests=0 --report-checkpoints=1,2 run | egrep '(Checkpoint report|SQL statistics)'
[ 1s] Checkpoint report:
OLTP test statistics:
SQL statistics:
[ 2s] Checkpoint report:
OLTP test statistics:
OLTP test statistics:
SQL statistics:
SQL statistics:
# Run a test that does not support checkpoint reports
$ sysbench cpu --report-checkpoints=1 --max-time=1 --debug run | grep 'not supported'
DEBUG: Reporting is not supported by the current test, terminating the checkpoints thread
$ sysbench cpu --report-checkpoints=1 --max-time=1 run | grep 'Checkpoint report'
[ 1s] Checkpoint report:

View File

@ -8,9 +8,9 @@
> fi
$ sysbench ${SBTEST_SCRIPTDIR}/oltp_read_write.lua --db-driver=mysql --mysql-dry-run --max-time=3 --max-requests=0 --report-interval=1 run | grep '\[ 2s\]'
[ 2s] threads: 1 tps: * qps: * (r/w/o: */*/*) latency: *ms (95%) errors/s: 0.00 reconnects/s: 0.00 (glob)
[ 2s] threads: 1 tps: * qps: * (r/w/o: */*/*) latency (ms,95%): *.* errors/s: 0.00 reconnects/s: 0.00 (glob)
# Run a test that does not support intermediate reports
$ sysbench cpu --report-interval=1 --max-time=1 --debug run | grep 'not supported'
DEBUG: Reporting is not supported by the current test, terminating the reporting thread
$ sysbench cpu --report-interval=1 --max-time=2 run | grep '\[ 1s\]'
[ 1s] threads: 1 events/s: * latency (ms,95%): * (glob)

View File

@ -32,7 +32,7 @@ bulk_insert.lua + MySQL tests
Threads started!
OLTP test statistics:
SQL statistics:
queries performed:
read: 0
write: [12] (re)
@ -46,13 +46,13 @@ bulk_insert.lua + MySQL tests
General statistics:
total time: *s (glob)
total number of events: 100
total time taken by event execution: *s (glob)
Latency statistics:
min: *.*ms (glob)
avg: *.*ms (glob)
max: *.*ms (glob)
approx. 95th percentile: *.*ms (glob)
Latency (ms):
min: *.* (glob)
avg: *.* (glob)
max: *.* (glob)
95th percentile: *.* (glob)
sum: *.* (glob)
Threads fairness:
events (avg/stddev):* (glob)

View File

@ -34,7 +34,7 @@ bulk_insert.lua + PostgreSQL tests
Threads started!
OLTP test statistics:
SQL statistics:
queries performed:
read: 0
write: [12] (re)
@ -48,13 +48,13 @@ bulk_insert.lua + PostgreSQL tests
General statistics:
total time: *s (glob)
total number of events: 100
total time taken by event execution: *s (glob)
Latency statistics:
min: *.*ms (glob)
avg: *.*ms (glob)
max: *.*ms (glob)
approx. 95th percentile: *.*ms (glob)
Latency (ms):
min: *.* (glob)
avg: *.* (glob)
max: *.* (glob)
95th percentile: *.* (glob)
sum: *.* (glob)
Threads fairness:
events (avg/stddev):* (glob)

View File

@ -133,7 +133,7 @@ oltp_point_select.lua + MySQL tests
Threads started!
OLTP test statistics:
SQL statistics:
queries performed:
read: 0
write: 100
@ -147,13 +147,13 @@ oltp_point_select.lua + MySQL tests
General statistics:
total time: *s (glob)
total number of events: 100
total time taken by event execution: *s (glob)
Latency statistics:
min: *.*ms (glob)
avg: *.*ms (glob)
max: *.*ms (glob)
approx. 95th percentile: *.*ms (glob)
Latency (ms):
min: *.* (glob)
avg: *.* (glob)
max: *.* (glob)
95th percentile: *.* (glob)
sum: *.* (glob)
Threads fairness:
events (avg/stddev): */* (glob)

View File

@ -134,7 +134,7 @@ oltp_point_select.lua + PostgreSQL tests
Threads started!
OLTP test statistics:
SQL statistics:
queries performed:
read: 0
write: 100
@ -148,13 +148,13 @@ oltp_point_select.lua + PostgreSQL tests
General statistics:
total time: *s (glob)
total number of events: 100
total time taken by event execution: *s (glob)
Latency statistics:
min: *.*ms (glob)
avg: *.*ms (glob)
max: *.*ms (glob)
approx. 95th percentile: *.*ms (glob)
Latency (ms):
min: *.* (glob)
avg: *.* (glob)
max: *.* (glob)
95th percentile: *.* (glob)
sum: *.* (glob)
Threads fairness:
events (avg/stddev): */* (glob)

View File

@ -133,7 +133,7 @@ oltp_point_select.lua + MySQL tests
Threads started!
OLTP test statistics:
SQL statistics:
queries performed:
read: 100
write: 0
@ -147,13 +147,13 @@ oltp_point_select.lua + MySQL tests
General statistics:
total time: *s (glob)
total number of events: 100
total time taken by event execution: *s (glob)
Latency statistics:
min: *.*ms (glob)
avg: *.*ms (glob)
max: *.*ms (glob)
approx. 95th percentile: *.*ms (glob)
Latency (ms):
min: *.* (glob)
avg: *.* (glob)
max: *.* (glob)
95th percentile: *.* (glob)
sum: *.* (glob)
Threads fairness:
events (avg/stddev): */* (glob)

View File

@ -134,7 +134,7 @@ oltp_point_select.lua + PostgreSQL tests
Threads started!
OLTP test statistics:
SQL statistics:
queries performed:
read: 100
write: 0
@ -148,13 +148,13 @@ oltp_point_select.lua + PostgreSQL tests
General statistics:
total time: *s (glob)
total number of events: 100
total time taken by event execution: *s (glob)
Latency statistics:
min: *.*ms (glob)
avg: *.*ms (glob)
max: *.*ms (glob)
approx. 95th percentile: *.*ms (glob)
Latency (ms):
min: *.* (glob)
avg: *.* (glob)
max: *.* (glob)
95th percentile: *.* (glob)
sum: *.* (glob)
Threads fairness:
events (avg/stddev): */* (glob)

View File

@ -134,7 +134,7 @@ oltp_read_write.lua + MySQL tests
Threads started!
OLTP test statistics:
SQL statistics:
queries performed:
read: 1400
write: 400
@ -148,13 +148,13 @@ oltp_read_write.lua + MySQL tests
General statistics:
total time: *s (glob)
total number of events: 100
total time taken by event execution: *s (glob)
Latency statistics:
min: *.*ms (glob)
avg: *.*ms (glob)
max: *.*ms (glob)
approx. 95th percentile: *.*ms (glob)
Latency (ms):
min: *.* (glob)
avg: *.* (glob)
max: *.* (glob)
95th percentile: *.* (glob)
sum: *.* (glob)
Threads fairness:
events (avg/stddev): */* (glob)
@ -325,7 +325,7 @@ oltp_read_write.lua + MySQL tests
Threads started!
OLTP test statistics:
SQL statistics:
queries performed:
read: 1400
write: 400
@ -339,13 +339,13 @@ oltp_read_write.lua + MySQL tests
General statistics:
total time: *s (glob)
total number of events: 100
total time taken by event execution: *s (glob)
Latency statistics:
min: *.*ms (glob)
avg: *.*ms (glob)
max: *.*ms (glob)
approx. 95th percentile: *.*ms (glob)
Latency (ms):
min: *.* (glob)
avg: *.* (glob)
max: *.* (glob)
95th percentile: *.* (glob)
sum: *.* (glob)
Threads fairness:
events (avg/stddev): */* (glob)

View File

@ -134,7 +134,7 @@ oltp_read_write.lua + PostgreSQL tests
Threads started!
OLTP test statistics:
SQL statistics:
queries performed:
read: 1400
write: 400
@ -148,13 +148,13 @@ oltp_read_write.lua + PostgreSQL tests
General statistics:
total time: *s (glob)
total number of events: 100
total time taken by event execution: *s (glob)
Latency statistics:
min: *.*ms (glob)
avg: *.*ms (glob)
max: *.*ms (glob)
approx. 95th percentile: *.*ms (glob)
Latency (ms):
min: *.* (glob)
avg: *.* (glob)
max: *.* (glob)
95th percentile: *.* (glob)
sum: *.* (glob)
Threads fairness:
events (avg/stddev): */* (glob)

View File

@ -37,7 +37,7 @@ select_random_*.lua + MySQL tests
Threads started!
OLTP test statistics:
SQL statistics:
queries performed:
read: 100
write: 0
@ -51,13 +51,13 @@ select_random_*.lua + MySQL tests
General statistics:
total time: *s (glob)
total number of events: 100
total time taken by event execution: *s (glob)
Latency statistics:
min: *.*ms (glob)
avg: *.*ms (glob)
max: *.*ms (glob)
approx. 95th percentile: *.*ms (glob)
Latency (ms):
min: *.* (glob)
avg: *.* (glob)
max: *.* (glob)
95th percentile: *.* (glob)
sum: *.* (glob)
Threads fairness:
events (avg/stddev):* (glob)
@ -107,7 +107,7 @@ select_random_*.lua + MySQL tests
Threads started!
OLTP test statistics:
SQL statistics:
queries performed:
read: 100
write: 0
@ -121,13 +121,13 @@ select_random_*.lua + MySQL tests
General statistics:
total time: *s (glob)
total number of events: 100
total time taken by event execution: *s (glob)
Latency statistics:
min: *.*ms (glob)
avg: *.*ms (glob)
max: *.*ms (glob)
approx. 95th percentile: *.*ms (glob)
Latency (ms):
min: *.* (glob)
avg: *.* (glob)
max: *.* (glob)
95th percentile: *.* (glob)
sum: *.* (glob)
Threads fairness:
events (avg/stddev):* (glob)

View File

@ -38,7 +38,7 @@ select_random_*.lua + PostgreSQL tests
Threads started!
OLTP test statistics:
SQL statistics:
queries performed:
read: 100
write: 0
@ -52,13 +52,13 @@ select_random_*.lua + PostgreSQL tests
General statistics:
total time: *s (glob)
total number of events: 100
total time taken by event execution: *s (glob)
Latency statistics:
min: *.*ms (glob)
avg: *.*ms (glob)
max: *.*ms (glob)
approx. 95th percentile: *.*ms (glob)
Latency (ms):
min: *.* (glob)
avg: *.* (glob)
max: *.* (glob)
95th percentile: *.* (glob)
sum: *.* (glob)
Threads fairness:
events (avg/stddev):* (glob)
@ -109,7 +109,7 @@ select_random_*.lua + PostgreSQL tests
Threads started!
OLTP test statistics:
SQL statistics:
queries performed:
read: 100
write: 0
@ -123,13 +123,13 @@ select_random_*.lua + PostgreSQL tests
General statistics:
total time: *s (glob)
total number of events: 100
total time taken by event execution: *s (glob)
Latency statistics:
min: *.*ms (glob)
avg: *.*ms (glob)
max: *.*ms (glob)
approx. 95th percentile: *.*ms (glob)
Latency (ms):
min: *.* (glob)
avg: *.* (glob)
max: *.* (glob)
95th percentile: *.* (glob)
sum: *.* (glob)
Threads fairness:
events (avg/stddev):* (glob)

View File

@ -31,13 +31,13 @@ cpu benchmark tests
General statistics:
total time: *s (glob)
total number of events: 100
total time taken by event execution: *s (glob)
Latency statistics:
min: *.*ms (glob)
avg: *.*ms (glob)
max: *.*ms (glob)
approx. 95th percentile: *.*ms (glob)
Latency (ms):
min: *.* (glob)
avg: *.* (glob)
max: *.* (glob)
95th percentile: *.* (glob)
sum: *.* (glob)
Threads fairness:
events (avg/stddev): 50.0000/* (glob)

View File

@ -69,13 +69,13 @@ fileio benchmark tests
General statistics:
total time: *.*s (glob)
total number of events: 158
total time taken by event execution: *.*s (glob)
Latency statistics:
min: *.*ms (glob)
avg: *.*ms (glob)
max: *.*ms (glob)
approx. 95th percentile: *.*ms (glob)
Latency (ms):
min: *.* (glob)
avg: *.* (glob)
max: *.* (glob)
95th percentile: *.* (glob)
sum: *.* (glob)
Threads fairness:
events (avg/stddev): 158.0000/0.00
@ -116,13 +116,13 @@ fileio benchmark tests
General statistics:
total time: *.*s (glob)
total number of events: 150
total time taken by event execution: *.*s (glob)
Latency statistics:
min: *.*ms (glob)
avg: *.*ms (glob)
max: *.*ms (glob)
approx. 95th percentile: *.*ms (glob)
Latency (ms):
min: *.* (glob)
avg: *.* (glob)
max: *.* (glob)
95th percentile: *.* (glob)
sum: *.* (glob)
Threads fairness:
events (avg/stddev): 150.0000/0.00
@ -162,13 +162,13 @@ fileio benchmark tests
General statistics:
total time: *.*s (glob)
total number of events: 150
total time taken by event execution: *.*s (glob)
Latency statistics:
min: *.*ms (glob)
avg: *.*ms (glob)
max: *.*ms (glob)
approx. 95th percentile: *.*ms (glob)
Latency (ms):
min: *.* (glob)
avg: *.* (glob)
max: *.* (glob)
95th percentile: *.* (glob)
sum: *.* (glob)
Threads fairness:
events (avg/stddev): 150.0000/0.00
@ -210,13 +210,13 @@ fileio benchmark tests
General statistics:
total time: *.*s (glob)
total number of events: 158
total time taken by event execution: *.*s (glob)
Latency statistics:
min: *.*ms (glob)
avg: *.*ms (glob)
max: *.*ms (glob)
approx. 95th percentile: *.*ms (glob)
Latency (ms):
min: *.* (glob)
avg: *.* (glob)
max: *.* (glob)
95th percentile: *.* (glob)
sum: *.* (glob)
Threads fairness:
events (avg/stddev): 158.0000/0.00

View File

@ -50,13 +50,13 @@ help' only on Linux.
General statistics:
total time: *s (glob)
total number of events: 262144 (glob)
total time taken by event execution: *s (glob)
Latency statistics:
min: *.*ms (glob)
avg: *.*ms (glob)
max: *.*ms (glob)
approx. 95th percentile: *.*ms (glob)
Latency (ms):
min: *.* (glob)
avg: *.* (glob)
max: *.* (glob)
95th percentile: *.* (glob)
sum: *.* (glob)
Threads fairness:
events (avg/stddev): */* (glob)

View File

@ -31,13 +31,13 @@ mutex benchmark tests
General statistics:
total time: *s (glob)
total number of events: 2
total time taken by event execution: *s (glob)
Latency statistics:
min: *.*ms (glob)
avg: *.*ms (glob)
max: *.*ms (glob)
approx. 95th percentile: *.*ms (glob)
Latency (ms):
min: *.* (glob)
avg: *.* (glob)
max: *.* (glob)
95th percentile: *.* (glob)
sum: *.* (glob)
Threads fairness:
events (avg/stddev): */* (glob)

View File

@ -31,13 +31,13 @@ threads benchmark tests
General statistics:
total time: *s (glob)
total number of events: 100
total time taken by event execution: *s (glob)
Latency statistics:
min: *.*ms (glob)
avg: *.*ms (glob)
max: *.*ms (glob)
approx. 95th percentile: *.*ms (glob)
Latency (ms):
min: *.* (glob)
avg: *.* (glob)
max: *.* (glob)
95th percentile: *.* (glob)
sum: *.* (glob)
Threads fairness:
events (avg/stddev): */* (glob)