I am trying to measure short time intervals, in order few of milliseconds. This proves to be some nontrivial task, since the ubiquitous time()
function works on whole seconds precision. After some research I came out with the four methods seen in the code sample below:
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
int main(int argc, char *argv[])
{
clock_t t0, t1;
double dt0;
struct timespec t2, t3;
double dt1;
struct timespec t4, t5;
double dt2;
struct timeval t6, t7;
double dt3;
volatile long long i;
t2.tv_sec = 0;
t2.tv_nsec = 0;
clock_settime(CLOCK_MONOTONIC, &t2);
clock_settime(CLOCK_REALTIME, &t2);
t0 = clock();
clock_gettime(CLOCK_REALTIME, &t2);
clock_gettime(CLOCK_MONOTONIC, &t4);
gettimeofday(&t6, NULL);
getchar();
for (i=0; i<1e9; i++) {};
gettimeofday(&t7, NULL);
clock_gettime(CLOCK_MONOTONIC, &t5);
clock_gettime(CLOCK_REALTIME, &t3);
t1 = clock();
dt0 = (double) (t1 - t0) / CLOCKS_PER_SEC;
dt1 = (double) (t3.tv_nsec - t2.tv_nsec) / 1e9;
dt2 = (double) (t5.tv_nsec - t4.tv_nsec) / 1e9;
dt3 = (double) (t7.tv_usec - t6.tv_usec) / 1e6;
printf("1. clock(): [0]=%10.0f, [1]=%10.0f, [1-0]=%10.6f sec\n", (double) t0, (double) t1, dt0);
printf("2. clock_gettime(R): [2]=%10ld, [3]=%10ld, [3-2]=%10f sec\n", (long) t2.tv_nsec, (long) t3.tv_nsec, dt1);
printf("3. clock_gettime(M): [2]=%10ld, [3]=%10ld, [3-2]=%10f sec\n", (long) t4.tv_nsec, (long) t5.tv_nsec, dt2);
printf("4. gettimeofday(): [4]=%10ld, [5]=%10ld, [5-4]=%10f sec\n", (long) t6.tv_usec, (long) t7.tv_usec, dt3);
return 0;
}
Then, I compiled and run it with:
gcc -lrt -o timer.e timer.c; time ./timer.e
I used the getchar()
call to introduce a couple of seconds delay to see a difference between real
and user
time reports, and the result was:
1. clock(): [0]= 0, [1]= 3280000, [1-0]= 3.280000 sec
2. clock_gettime(R): [2]= 823922476, [3]= 478650549, [3-2]= -0.345272 sec
3. clock_gettime(M): [2]= 671137949, [3]= 325864897, [3-2]= -0.345273 sec
4. gettimeofday(): [4]= 823924, [5]= 478648, [5-4]= -0.345276 sec
real 0m6.659s
user 0m3.280s
sys 0m0.010s
As you can see, the only meaningful result is the correlation between method 1 and the user
time reported by the time
command.
This brings a couple of questions:
1. Why are results 2-4 meaningless?
2. How do I measure the real time spent running a program, like the real
number time
reports?
My environment is Ubuntu 10.04 LTS 64-bit on VMware on Windows 7 on an AMD based HP laptop.
You're on the right track with clock_gettime()
.
However, you ought to get rid of those clock_settime()
calls: simply call clock_gettime()
before and after a block of code, and look at the difference.
Also, when computing the difference, you should take both tv_sec
and tv_nsec
into account; your current code just looks at the nanosecond component and ignores the full seconds.
On my Ubuntu system the following code gives a pretty accurate measurement of the time it takes to execute the loop:
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
int main(int argc, char *argv[])
{
struct timespec t2, t3;
double dt1;
volatile long long i;
clock_gettime(CLOCK_MONOTONIC, &t2);
for (i=0; i<1e9; i++) {};
clock_gettime(CLOCK_MONOTONIC, &t3);
//time in seconds
dt1 = (t3.tv_sec - t2.tv_sec) + (double) (t3.tv_nsec - t2.tv_nsec) * 1e-9;
printf("%f\n", dt1);
return 0;
}