Merge branch '1.0'

This commit is contained in:
Alexey Kopytov
2017-02-14 21:46:39 +03:00
4 changed files with 50 additions and 40 deletions

View File

@ -1083,7 +1083,7 @@ static void db_reset_stats(void)
So that intermediate stats are calculated from the current moment
rather than from the previous intermediate report
*/
sb_timer_checkpoint(&sb_intermediate_timer);
sb_timer_current(&sb_intermediate_timer);
if (db_globals.debug)
{

View File

@ -42,6 +42,9 @@ void sb_timer_init(sb_timer_t *t)
memset(&t->time_start, 0, sizeof(struct timespec));
memset(&t->time_end, 0, sizeof(struct timespec));
ck_spinlock_init(&t->lock);
sb_timer_reset(t);
}
@ -61,6 +64,8 @@ void sb_timer_reset(sb_timer_t *t)
void sb_timer_copy(sb_timer_t *to, sb_timer_t *from)
{
memcpy(to, from, sizeof(sb_timer_t));
ck_spinlock_init(&to->lock);
}
/* check whether the timer is running */
@ -71,12 +76,12 @@ bool sb_timer_running(sb_timer_t *t)
}
/*
get time elapsed since the previous call to sb_timer_checkpoint() for the
get time elapsed since the previous call to sb_timer_current() for the
specified timer without stopping it. The first call returns time elapsed
since the timer was started.
*/
uint64_t sb_timer_checkpoint(sb_timer_t *t)
uint64_t sb_timer_current(sb_timer_t *t)
{
struct timespec tmp;
uint64_t res;
@ -88,6 +93,23 @@ uint64_t sb_timer_checkpoint(sb_timer_t *t)
return res;
}
/*
Atomically reset a given timer after copying its state into the timer pointed
to by 'old'.
*/
void sb_timer_checkpoint(sb_timer_t *t, sb_timer_t *old)
{
ck_spinlock_lock(&t->lock);
memcpy(old, t, sizeof(*old));
ck_spinlock_init(&old->lock);
sb_timer_reset(t);
ck_spinlock_unlock(&t->lock);
}
/* get average time per event */

View File

@ -42,6 +42,7 @@
#include <stdbool.h>
#include "sb_util.h"
#include "ck_spinlock.h"
/* Convert nanoseconds to seconds and vice versa */
#define NS2SEC(nsec) ((nsec)/1000000000.)
@ -87,7 +88,10 @@ typedef struct
uint64_t max_time;
uint64_t sum_time;
char pad[SB_CACHELINE_PAD(sizeof(struct timespec)*2 + sizeof(uint64_t)*5)];
ck_spinlock_t lock;
char pad[SB_CACHELINE_PAD(sizeof(struct timespec)*2 + sizeof(uint64_t)*5 +
sizeof(ck_spinlock_t))];
} sb_timer_t;
@ -105,12 +109,18 @@ bool sb_timer_running(sb_timer_t *t);
/* start timer */
static inline void sb_timer_start(sb_timer_t *t)
{
ck_spinlock_lock(&t->lock);
SB_GETTIME(&t->time_start);
ck_spinlock_unlock(&t->lock);
}
/* stop timer */
static inline uint64_t sb_timer_stop(sb_timer_t *t)
{
ck_spinlock_lock(&t->lock);
SB_GETTIME(&t->time_end);
uint64_t elapsed = TIMESPEC_DIFF(t->time_end, t->time_start) + t->queue_time;
@ -123,6 +133,8 @@ static inline uint64_t sb_timer_stop(sb_timer_t *t)
if (SB_UNLIKELY(elapsed > t->max_time))
t->max_time = elapsed;
ck_spinlock_unlock(&t->lock);
return elapsed;
}
@ -146,7 +158,13 @@ void sb_timer_copy(sb_timer_t *to, sb_timer_t *from);
specified timer without stopping it. The first call returns time elapsed
since the timer was started.
*/
uint64_t sb_timer_checkpoint(sb_timer_t *t);
uint64_t sb_timer_current(sb_timer_t *t);
/*
Atomically reset a given timer after copying its state into the timer pointed
to by 'old'.
*/
void sb_timer_checkpoint(sb_timer_t *t, sb_timer_t *old);
/* get average time per event */
uint64_t sb_timer_avg(sb_timer_t *);

View File

@ -147,9 +147,6 @@ static int eventgen_thread_created;
/* per-thread timers for response time stats */
static sb_timer_t *timers;
/* Mutex protecting timers. */
static pthread_mutex_t timers_mutex;
/* Temporary copy of timers for checkpoint reports */
static sb_timer_t *timers_copy;
@ -230,7 +227,7 @@ static void report_intermediate(void)
MS2SEC(sb_histogram_get_pct_intermediate(&sb_latency_histogram,
sb_globals.percentile));
stat.time_interval = NS2SEC(sb_timer_checkpoint(&sb_intermediate_timer));
stat.time_interval = NS2SEC(sb_timer_current(&sb_intermediate_timer));
if (sb_globals.tx_rate > 0)
{
@ -375,16 +372,9 @@ static void report_cumulative(void)
const unsigned nthreads = sb_globals.threads;
/* Create a temporary copy of timers and reset them */
if (sb_globals.n_checkpoints > 0)
pthread_mutex_lock(&timers_mutex);
memcpy(timers_copy, timers, nthreads * sizeof(sb_timer_t));
/* Atomically reset each timer after copying into its timers_copy slot */
for (i = 0; i < nthreads; i++)
sb_timer_reset(&timers[i]);
if (sb_globals.n_checkpoints > 0)
pthread_mutex_unlock(&timers_mutex);
sb_timer_checkpoint(&timers[i], &timers_copy[i]);
/* Aggregate temporary timers copy */
for(i = 0; i < nthreads; i++)
@ -396,7 +386,7 @@ static void report_cumulative(void)
stat.latency_avg = NS2SEC(sb_timer_avg(&t));
stat.latency_sum = NS2SEC(sb_timer_sum(&t));
stat.time_interval = NS2SEC(sb_timer_checkpoint(&sb_checkpoint_timer));
stat.time_interval = NS2SEC(sb_timer_current(&sb_checkpoint_timer));
if (current_test && current_test->ops.report_cumulative)
current_test->ops.report_cumulative(&stat);
@ -773,15 +763,7 @@ sb_event_t sb_next_event(sb_test_t *test, int thread_id)
void sb_event_start(int thread_id)
{
sb_timer_t *timer = &timers[thread_id];
if (sb_globals.n_checkpoints > 0)
pthread_mutex_lock(&timers_mutex);
sb_timer_start(timer);
if (sb_globals.n_checkpoints > 0)
pthread_mutex_unlock(&timers_mutex);
sb_timer_start(&timers[thread_id]);
}
@ -790,14 +772,8 @@ void sb_event_stop(int thread_id)
sb_timer_t *timer = &timers[thread_id];
long long value;
if (sb_globals.n_checkpoints > 0)
pthread_mutex_lock(&timers_mutex);
value = sb_timer_stop(timer);
if (sb_globals.n_checkpoints > 0)
pthread_mutex_unlock(&timers_mutex);
if (sb_globals.percentile > 0)
sb_histogram_update(&sb_latency_histogram, NS2MS(value));
@ -1416,9 +1392,6 @@ static int init(void)
for (unsigned i = 0; i < sb_globals.threads; i++)
sb_timer_init(&timers[i]);
if (sb_globals.n_checkpoints > 0)
pthread_mutex_init(&timers_mutex, NULL);
return 0;
}
@ -1602,9 +1575,6 @@ end:
free(sb_globals.argv);
if (sb_globals.n_checkpoints > 0)
pthread_mutex_destroy(&timers_mutex);
return rc;
}