Hello everybody,
I'm trying to benchmark the difference between a function pointer call and a virtual function call. To do this, I have written two pieces of code, that do the same mathematical computation over an array. One variant uses an array of pointers to functions and calls those in a loop. The other variant uses an array of pointers to a base class and calls its virtual function, which is overloaded in the derived classes to do absolutely the same thing as the functions in the first variant. Then I print the time elapsed and use a simple shell script to run the benchmark many times and compute the average run time.
Here is the code:
#include <iostream>
#include <cstdlib>
#include <ctime>
#include <cmath>
using namespace std;
long long timespecDiff(struct timespec *timeA_p, struct timespec *timeB_p)
{
return ((timeA_p->tv_sec * 1000000000) + timeA_p->tv_nsec) -
((timeB_p->tv_sec * 1000000000) + timeB_p->tv_nsec);
}
void function_not( double *d ) {
*d = sin(*d);
}
void function_and( double *d ) {
*d = cos(*d);
}
void function_or( double *d ) {
*d = tan(*d);
}
void function_xor( double *d ) {
*d = sqrt(*d);
}
void ( * const function_table[4] )( double* ) = { &function_not, &function_and, &function_or, &function_xor };
int main(void)
{
srand(time(0));
void ( * index_array[100000] )( double * );
double array[100000];
for ( long int i = 0; i < 100000; ++i ) {
index_array[i] = function_table[ rand() % 4 ];
array[i] = ( double )( rand() / 1000 );
}
struct timespec start, end;
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &start);
for ( long int i = 0; i < 100000; ++i ) {
index_array[i]( &array[i] );
}
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &end);
unsigned long long time_elapsed = timespecDiff(&end, &start);
cout << time_elapsed / 1000000000.0 << endl;
}
and here is the virtual function variant:
#include <iostream>
#include <cstdlib>
#include <ctime>
#include <cmath>
using namespace std;
long long timespecDiff(struct timespec *timeA_p, struct timespec *timeB_p)
{
return ((timeA_p->tv_sec * 1000000000) + timeA_p->tv_nsec) -
((timeB_p->tv_sec * 1000000000) + timeB_p->tv_nsec);
}
class A {
public:
virtual void calculate( double *i ) = 0;
};
class A1 : public A {
public:
void calculate( double *i ) {
*i = sin(*i);
}
};
class A2 : public A {
public:
void calculate( double *i ) {
*i = cos(*i);
}
};
class A3 : public A {
public:
void calculate( double *i ) {
*i = tan(*i);
}
};
class A4 : public A {
public:
void calculate( double *i ) {
*i = sqrt(*i);
}
};
int main(void)
{
srand(time(0));
A *base[100000];
double array[100000];
for ( long int i = 0; i < 100000; ++i ) {
array[i] = ( double )( rand() / 1000 );
switch ( rand() % 4 ) {
case 0:
base[i] = new A1();
break;
case 1:
base[i] = new A2();
break;
case 2:
base[i] = new A3();
break;
case 3:
base[i] = new A4();
break;
}
}
struct timespec start, end;
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &start);
for ( int i = 0; i < 100000; ++i ) {
base[i]->calculate( &array[i] );
}
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &end);
unsigned long long time_elapsed = timespecDiff(&end, &start);
cout << time_elapsed / 1000000000.0 << endl;
}
My system is LInux, Fedora 13, gcc 4.4.2. The code is compiled it with g++ -O3. The first one is test1, the second is test2.
Now I see this in console:
[Ignat@localhost circuit_testing]$ ./test2 && ./test2
0.0153142
0.0153166
Well, more or less, I think. And then, this:
[Ignat@localhost circuit_testing]$ ./test2 && ./test2
0.01531
0.0152476
Where are the 25% which should be visible? How can the first executable be even slower than the second one?
I'm asking this because I'm doing a project which involves calling a lot of small functions in a row like this in order to compute the values of an array, and the code I've inherited does a very complex manipulation to avoid the virtual function call overhead. Now where is this famous call overhead?