1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208
|
#include <time.h>
#include <sys/time.h>
#include "fio.h"
static struct timespec genesis;
static unsigned long ns_granularity;
void timespec_add_msec(struct timespec *ts, unsigned int msec)
{
uint64_t adj_nsec = 1000000ULL * msec;
ts->tv_nsec += adj_nsec;
if (adj_nsec >= 1000000000) {
uint64_t adj_sec = adj_nsec / 1000000000;
ts->tv_nsec -= adj_sec * 1000000000;
ts->tv_sec += adj_sec;
}
if (ts->tv_nsec >= 1000000000){
ts->tv_nsec -= 1000000000;
ts->tv_sec++;
}
}
/*
* busy looping version for the last few usec
*/
uint64_t usec_spin(unsigned int usec)
{
struct timespec start;
uint64_t t;
fio_gettime(&start, NULL);
while ((t = utime_since_now(&start)) < usec)
nop;
return t;
}
/*
* busy loop for a fixed amount of cycles
*/
void cycles_spin(unsigned int n)
{
unsigned long i;
for (i=0; i < n; i++)
nop;
}
uint64_t usec_sleep(struct thread_data *td, unsigned long usec)
{
struct timespec req;
struct timespec tv;
uint64_t t = 0;
do {
unsigned long ts = usec;
if (usec < ns_granularity) {
t += usec_spin(usec);
break;
}
ts = usec - ns_granularity;
if (ts >= 1000000) {
req.tv_sec = ts / 1000000;
ts -= 1000000 * req.tv_sec;
/*
* Limit sleep to ~1 second at most, otherwise we
* don't notice then someone signaled the job to
* exit manually.
*/
if (req.tv_sec > 1)
req.tv_sec = 1;
} else
req.tv_sec = 0;
req.tv_nsec = ts * 1000;
fio_gettime(&tv, NULL);
if (nanosleep(&req, NULL) < 0)
break;
ts = utime_since_now(&tv);
t += ts;
if (ts >= usec)
break;
usec -= ts;
} while (!td->terminate);
return t;
}
uint64_t time_since_genesis(void)
{
return time_since_now(&genesis);
}
uint64_t mtime_since_genesis(void)
{
return mtime_since_now(&genesis);
}
uint64_t utime_since_genesis(void)
{
return utime_since_now(&genesis);
}
bool in_ramp_time(struct thread_data *td)
{
return td->o.ramp_time && !td->ramp_time_over;
}
static bool parent_update_ramp(struct thread_data *td)
{
struct thread_data *parent = td->parent;
if (!parent || parent->ramp_time_over)
return false;
reset_all_stats(parent);
parent->ramp_time_over = true;
td_set_runstate(parent, TD_RAMP);
return true;
}
bool ramp_time_over(struct thread_data *td)
{
if (!td->o.ramp_time || td->ramp_time_over)
return true;
if (utime_since_now(&td->epoch) >= td->o.ramp_time) {
td->ramp_time_over = true;
reset_all_stats(td);
reset_io_stats(td);
td_set_runstate(td, TD_RAMP);
/*
* If we have a parent, the parent isn't doing IO. Hence
* the parent never enters do_io(), which will switch us
* from RAMP -> RUNNING. Do this manually here.
*/
if (parent_update_ramp(td))
td_set_runstate(td, TD_RUNNING);
return true;
}
return false;
}
void fio_time_init(void)
{
int i;
fio_clock_init();
/*
* Check the granularity of the nanosleep function
*/
for (i = 0; i < 10; i++) {
struct timespec tv, ts;
unsigned long elapsed;
fio_gettime(&tv, NULL);
ts.tv_sec = 0;
ts.tv_nsec = 1000;
nanosleep(&ts, NULL);
elapsed = utime_since_now(&tv);
if (elapsed > ns_granularity)
ns_granularity = elapsed;
}
}
void set_genesis_time(void)
{
fio_gettime(&genesis, NULL);
}
void set_epoch_time(struct thread_data *td, clockid_t log_alternate_epoch_clock_id, clockid_t job_start_clock_id)
{
struct timespec ts;
fio_gettime(&td->epoch, NULL);
clock_gettime(log_alternate_epoch_clock_id, &ts);
td->alternate_epoch = (unsigned long long)(ts.tv_sec) * 1000 +
(unsigned long long)(ts.tv_nsec) / 1000000;
if (job_start_clock_id == log_alternate_epoch_clock_id)
{
td->job_start = td->alternate_epoch;
}
else
{
clock_gettime(job_start_clock_id, &ts);
td->job_start = (unsigned long long)(ts.tv_sec) * 1000 +
(unsigned long long)(ts.tv_nsec) / 1000000;
}
}
void fill_start_time(struct timespec *t)
{
memcpy(t, &genesis, sizeof(genesis));
}
|