1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146
|
#include <pthread.h>
#include <stdlib.h>
#include <limits.h>
/************
* kt_for() *
************/
struct kt_for_t;
typedef struct {
struct kt_for_t *t;
long i;
} ktf_worker_t;
typedef struct kt_for_t {
int n_threads;
long n;
ktf_worker_t *w;
void (*func)(void*,long,int);
void *data;
} kt_for_t;
static inline long steal_work(kt_for_t *t)
{
int i, min_i = -1;
long k, min = LONG_MAX;
for (i = 0; i < t->n_threads; ++i)
if (min > t->w[i].i) min = t->w[i].i, min_i = i;
k = __sync_fetch_and_add(&t->w[min_i].i, t->n_threads);
return k >= t->n? -1 : k;
}
static void *ktf_worker(void *data)
{
ktf_worker_t *w = (ktf_worker_t*)data;
long i;
for (;;) {
i = __sync_fetch_and_add(&w->i, w->t->n_threads);
if (i >= w->t->n) break;
w->t->func(w->t->data, i, w - w->t->w);
}
while ((i = steal_work(w->t)) >= 0)
w->t->func(w->t->data, i, w - w->t->w);
pthread_exit(0);
}
void kt_for(int n_threads, void (*func)(void*,long,int), void *data, long n)
{
int i;
kt_for_t t;
pthread_t *tid;
t.func = func, t.data = data, t.n_threads = n_threads, t.n = n;
t.w = (ktf_worker_t*)alloca(n_threads * sizeof(ktf_worker_t));
tid = (pthread_t*)alloca(n_threads * sizeof(pthread_t));
for (i = 0; i < n_threads; ++i)
t.w[i].t = &t, t.w[i].i = i;
for (i = 0; i < n_threads; ++i) pthread_create(&tid[i], 0, ktf_worker, &t.w[i]);
for (i = 0; i < n_threads; ++i) pthread_join(tid[i], 0);
}
/*****************
* kt_pipeline() *
*****************/
struct ktp_t;
typedef struct {
struct ktp_t *pl;
int64_t index;
int step;
void *data;
} ktp_worker_t;
typedef struct ktp_t {
void *shared;
void *(*func)(void*, int, void*);
int64_t index;
int n_workers, n_steps;
ktp_worker_t *workers;
pthread_mutex_t mutex;
pthread_cond_t cv;
} ktp_t;
static void *ktp_worker(void *data)
{
ktp_worker_t *w = (ktp_worker_t*)data;
ktp_t *p = w->pl;
while (w->step < p->n_steps) {
// test whether we can kick off the job with this worker
pthread_mutex_lock(&p->mutex);
for (;;) {
int i;
// test whether another worker is doing the same step
for (i = 0; i < p->n_workers; ++i) {
if (w == &p->workers[i]) continue; // ignore itself
if (p->workers[i].step <= w->step && p->workers[i].index < w->index)
break;
}
if (i == p->n_workers) break; // no workers with smaller indices are doing w->step or the previous steps
pthread_cond_wait(&p->cv, &p->mutex);
}
pthread_mutex_unlock(&p->mutex);
// working on w->step
w->data = p->func(p->shared, w->step, w->step? w->data : 0); // for the first step, input is NULL
// update step and let other workers know
pthread_mutex_lock(&p->mutex);
w->step = w->step == p->n_steps - 1 || w->data? (w->step + 1) % p->n_steps : p->n_steps;
if (w->step == 0) w->index = p->index++;
pthread_cond_broadcast(&p->cv);
pthread_mutex_unlock(&p->mutex);
}
pthread_exit(0);
}
void kt_pipeline(int n_threads, void *(*func)(void*, int, void*), void *shared_data, int n_steps)
{
ktp_t aux;
pthread_t *tid;
int i;
if (n_threads < 1) n_threads = 1;
aux.n_workers = n_threads;
aux.n_steps = n_steps;
aux.func = func;
aux.shared = shared_data;
aux.index = 0;
pthread_mutex_init(&aux.mutex, 0);
pthread_cond_init(&aux.cv, 0);
aux.workers = (ktp_worker_t*)alloca(n_threads * sizeof(ktp_worker_t));
for (i = 0; i < n_threads; ++i) {
ktp_worker_t *w = &aux.workers[i];
w->step = 0; w->pl = &aux; w->data = 0;
w->index = aux.index++;
}
tid = (pthread_t*)alloca(n_threads * sizeof(pthread_t));
for (i = 0; i < n_threads; ++i) pthread_create(&tid[i], 0, ktp_worker, &aux.workers[i]);
for (i = 0; i < n_threads; ++i) pthread_join(tid[i], 0);
pthread_mutex_destroy(&aux.mutex);
pthread_cond_destroy(&aux.cv);
}
|