File: lwp_pt.c

package info (click to toggle)
lwp 2.3%2Bdebian-1
  • links: PTS
  • area: main
  • in suites: etch, etch-m68k
  • size: 1,808 kB
  • ctags: 613
  • sloc: sh: 9,024; ansic: 2,923; asm: 684; makefile: 122
file content (586 lines) | stat: -rw-r--r-- 15,014 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
/* BLURB lgpl

                           Coda File System
                              Release 5

            Copyright (c) 1999 Carnegie Mellon University
                  Additional copyrights listed below

This  code  is  distributed "AS IS" without warranty of any kind under
the  terms of the  GNU  Library General Public Licence  Version 2,  as
shown in the file LICENSE. The technical and financial contributors to
Coda are listed in the file CREDITS.

                        Additional copyrights
#*/

#include <pthread.h>
#include <semaphore.h>
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <signal.h>
#include <assert.h>

#include <lwp/lwp.h>
#include <lwp/lock.h>
#include "lwp.private_pt.h"

/* BEGIN - NOT USED exported variables */
int          lwp_debug;	          /* ON = show LWP debugging trace */
int          lwp_overflowAction;  /* Action to take on stack overflow. */
int          lwp_stackUseEnabled; /* Tells if stack size counting is enabled. */
/* variables used for checking work time of an lwp */
struct timeval last_context_switch; /* how long a lwp was running */
struct timeval cont_sw_threshold;  /* how long a lwp is allowed to run */
struct timeval run_wait_threshold;
/* END - NOT USED exported variables */

FILE *lwp_logfile = NULL; /* where to log debug messages to */
int   lwp_loglevel = 0;   /* which messages to log */

static pthread_key_t    lwp_private; /* thread specific data */
static struct list_head lwp_list;    /* list of all threads */

/* information passed to a child process */
struct lwp_forkinfo {
    void  (*func)(void *);
    char   *parm; 
    char   *name;
    int     prio;
    PROCESS pid;
};

/* mutexes to block concurrent threads & various run queues */
static pthread_mutex_t run_mutex = PTHREAD_MUTEX_INITIALIZER;
static pthread_cond_t run_cond = PTHREAD_COND_INITIALIZER;
PROCESS lwp_cpptr = NULL; /* the currently running LWP thread */

/* Short explanation of the scheduling
 * 
 * All non-active non-concurrent threads are waiting:
 *  - in SCHEDULE on the pid->run_cond condition variable (runnable threads)
 *  - in LWP_MwaitProcess on the pid->event condition variable, or
 *  - in ObtainLock on the lock->wakeup condition variable
 *  
 * All these condition variables have run_mutex as their protecting mutex.
 * Whenever a non-concurrent thread is about to block in cond_wait it has
 * to call SIGNAL to unblock the next runnable thread.
 * 
 * IOMGR_Select and LWP_QWait make a non-concurrent thread temporarily
 * concurrent, using lwp_LEAVE and lwp_YIELD. lwp_LEAVE unblocks a runnable
 * thread before releasing the run_mutex. lwp_LEAVE and lwp_YIELD are the
 * _only_ functions that obtain and release the run_mutex, for the rest it
 * is only implicitly released while waiting on condition variables.
 * 
 * SCHEDULE links a thread on the tail of it's run-queue, and attempts to
 * unblock a runnable thread. It then starts waiting to get signalled
 * itself. This is a strict priority based roundrobin scheduler, as long as
 * there are runnable higher priority threads, lower queues will not be run
 * at all. All threads on the same queue are scheduled in a roundrobin order.
 * 
 * Non-concurrent threads have to be very careful not to get cancelled while
 * waiting on condition variables because the cleanup handler needs to get
 * access to the shared list of processes, and therefore needs to lock the
 * run_mutex.
 */

/*-------------BEGIN SCHEDULER CODE------------*/

static int lwp_waiting;

int lwp_threads_waiting(void)
{
    int ret;

    lwp_mutex_lock(&run_mutex);
    ret = lwp_waiting;
    lwp_mutex_unlock(&run_mutex);

    return ret;
}

static void _SCHEDULE(PROCESS pid, int leave)
{
}

void lwp_LEAVE(PROCESS pid)
{
    if (pid == lwp_cpptr) {
	lwp_mutex_lock(&run_mutex);
	lwp_cpptr = NULL;
	pthread_cond_signal(&run_cond);
	lwp_mutex_unlock(&run_mutex);
    }
    pthread_testcancel();
}

void lwp_YIELD(PROCESS pid)
{
    lwp_LEAVE(pid);

    if (pid->concurrent)
	return;

    lwp_mutex_lock(&run_mutex);
    if (lwp_cpptr || lwp_waiting) {
	lwp_waiting++;
	pid->waiting = 1;

	/* block at least once to give others a chance to run */
	do {
	    pthread_cond_wait(&run_cond, &run_mutex);
	} while(lwp_cpptr);

	lwp_waiting--;
	pid->waiting = 0;
    }
    lwp_cpptr = pid;
    lwp_mutex_unlock(&run_mutex);
}

/*-------------END SCHEDULER CODE------------*/


/* this function is called when a thread is cancelled and the thread specific
 * data is going to be destroyed */
static void lwp_cleanup_process(void *data)
{
    PROCESS pid = (PROCESS)data;

    /* now we need the run_mutex to fiddle around with the process list */
    lwp_mutex_lock(&run_mutex);
    {
	if (pid == lwp_cpptr) {
	    lwp_cpptr = NULL;
	    pthread_cond_signal(&run_cond);
	}
	if (pid->waiting)
	    lwp_waiting--;

	list_del(&pid->list);
    }
    lwp_mutex_unlock(&run_mutex);

    /* ok, we're safe, start cleaning up */
    sem_destroy(&pid->waitq);
    pthread_cond_destroy(&pid->event);

    if (pid->name)   free(pid->name);
    if (pid->evlist) free(pid->evlist);
    free(data);
}

static int lwp_inited = 0;
int LWP_Init (int version, int priority, PROCESS *ret)
{
    PROCESS pid;

    if (version != LWP_VERSION) {
	fprintf(stderr, "**** FATAL ERROR: LWP VERSION MISMATCH ****\n");
	exit(-1);
    }

    if (lwp_inited)
	return LWP_SUCCESS;

    lwp_logfile = stderr;

    if (priority < 0 || priority > LWP_MAX_PRIORITY)
	return LWP_EBADPRI;

    assert(pthread_key_create(&lwp_private, lwp_cleanup_process) == 0);

    list_init(&lwp_list);

    lwp_inited = 1;

    /* now set up our private process structure */
    assert(LWP_CurrentProcess(&pid) == 0);

    pid->name = strdup("Main Process");
    pid->priority = priority;

    /* As we're still initializing (and therefore the first LWP thread) we
     * don't need to fiddle with the run_mutex */
    list_add(&pid->list, &lwp_list);
    lwp_cpptr = pid;

    if (ret) *ret = pid;

    return LWP_SUCCESS;
}

int LWP_CurrentProcess(PROCESS *pid)
{
    /* normally this is a short function */
    if (!pid) return LWP_EBADPID;
    *pid = (PROCESS)pthread_getspecific(lwp_private);
    if (*pid) return LWP_SUCCESS;

    /* but if there wasn't any thread specific data yet, we need to
     * initialize it now */
    *pid = (PROCESS)malloc(sizeof(struct lwp_pcb));

    if (!*pid) {
	fprintf(lwp_logfile, "Couldn't allocate thread specific data\n");
	return LWP_ENOMEM;
    }
    memset(*pid, 0, sizeof(struct lwp_pcb));

    (*pid)->thread   = pthread_self();
    (*pid)->evsize   = 5;
    (*pid)->evlist   = (char **)malloc((*pid)->evsize * sizeof(char*));

    list_init(&(*pid)->list);
    assert(sem_init(&(*pid)->waitq, 0, 0) == 0);
    assert(pthread_cond_init(&(*pid)->event, NULL) == 0);

    pthread_setspecific(lwp_private, *pid);

    return LWP_SUCCESS;
}

/* The entry point for new threads, this sets up the thread specific data
 * and locks */
static void *lwp_newprocess(void *arg)
{
    struct lwp_forkinfo *newproc = (struct lwp_forkinfo *)arg;
    PROCESS              pid, parent = newproc->pid;

    /* block incoming signals to this thread */
    sigset_t mask;
    sigemptyset(&mask);
    /* just adding the ones that venus tends to use */
    sigaddset(&mask, SIGHUP);
#ifdef SIGIOT
    sigaddset(&mask, SIGIOT);
#endif
    sigaddset(&mask, SIGTERM);
    sigaddset(&mask, SIGINT);
    sigaddset(&mask, SIGXCPU);
    sigaddset(&mask, SIGXFSZ);
    sigaddset(&mask, SIGVTALRM);
    sigaddset(&mask, SIGUSR1);
    pthread_sigmask(SIG_SETMASK, &mask, NULL);

    /* Initialize the thread specific data */
    LWP_CurrentProcess(&pid);

    pid->func = newproc->func;
    pid->parm = newproc->parm;
    pid->priority = newproc->prio;
    pid->name = strdup(newproc->name);

    /* Tell the parent thread that he's off the hook (although the caller
     * of LWP_CreateProcess isn't if any volatile parameters were passed,
     * but that was already the case). */
    newproc->pid = pid;
    LWP_QSignal(parent);

    lwp_mutex_lock(&run_mutex);
    list_add(&pid->list, &lwp_list);
    lwp_mutex_unlock(&run_mutex);

    lwp_YIELD(pid);

    /* Fire off the newborn */
    pid->func(pid->parm);

    lwp_LEAVE(pid);
    pthread_exit(NULL);
    /* Not reached */
}

int LWP_CreateProcess(void (*ep)(void *), int stacksize, int priority,
		      void *parm, char *name, PROCESS *ret)
{
    PROCESS             pid;
    struct lwp_forkinfo newproc;
    pthread_attr_t      attr;
    pthread_t           threadid;
    int                 err;

    if (priority < 0 || priority > LWP_MAX_PRIORITY)
	return LWP_EBADPRI;

    assert(LWP_CurrentProcess(&pid) == 0);

    newproc.func = ep;
    newproc.parm = parm;
    newproc.name = name;
    newproc.prio = priority;
    newproc.pid  = pid;

    /* For some reason cygwin likes to return EBUSY here */
    (void)pthread_attr_init(&attr);
    (void)pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);

    err = pthread_create(&threadid, &attr, lwp_newprocess, &newproc);
    if (err) {
	fprintf(lwp_logfile, "Thread %s creation failed, error %s",
		name, strerror(errno));
	return LWP_EMAXPROC;
    }

    /* Wait until the new thread has finished initialization. */
    LWP_QWait();
    if (ret) *ret = newproc.pid;

    return LWP_SUCCESS;
}

static void _LWP_DestroyProcess (PROCESS pid)
{
    pthread_cancel(pid->thread);
    if (pid->waitcnt) {
	pid->waitcnt = 0;
	pthread_cond_signal(&pid->event);
    }
}

int LWP_DestroyProcess (PROCESS pid)
{
    lwp_mutex_lock(&run_mutex);
    {
	_LWP_DestroyProcess(pid);
    }
    lwp_mutex_unlock(&run_mutex);
    return LWP_SUCCESS;
}

int LWP_TerminateProcessSupport()
{
    struct list_head *ptr;
    PROCESS           this, pid;

    assert(LWP_CurrentProcess(&this) == 0);

    lwp_mutex_lock(&run_mutex);
    {
	/* I should not kill myself. */
	list_del(&this->list);

	for (ptr = lwp_list.next; ptr != &lwp_list; ptr = ptr->next) {
	    pid = list_entry(ptr, struct lwp_pcb, list);
	    _LWP_DestroyProcess(pid);
	}
    }
    lwp_mutex_unlock(&run_mutex);

    /* Threads should be cancelled by now, we just have to wait for them to
     * terminate. */
    while(!list_empty(&lwp_list))
	lwp_YIELD(this);

    /* We can start cleaning. */
    lwp_cleanup_process(this);
    pthread_mutex_destroy(&run_mutex);
    pthread_key_delete(lwp_private);

    return LWP_SUCCESS;
}

int LWP_DispatchProcess(void)
{
    PROCESS pid;

    if (LWP_CurrentProcess(&pid))
	return LWP_EBADPID;

    lwp_YIELD(pid);

    return LWP_SUCCESS;
}

/* QSignal/QWait give _at least once_ semantics, and does almost no locking
 * while LWP_INTERNALSIGNAL/LWP_MWaitEvent give _at most once_ semantics
 * and require more elaborate locking */
/* As QSignals don't get lost, it would have solved the RVM thread deadlock
 * too. My guess is that this is the preferred behaviour. */
int LWP_QSignal(PROCESS pid)
{
    sem_post(&pid->waitq);
    return LWP_SUCCESS;
}

int LWP_QWait()
{
    PROCESS pid;

    if (LWP_CurrentProcess(&pid))
	return LWP_EBADPID;

    lwp_LEAVE(pid);
    sem_wait(&pid->waitq); /* wait until we get signalled */
    lwp_YIELD(pid);

    return LWP_SUCCESS;
}

int LWP_INTERNALSIGNAL(void *event, int yield)
{
    struct list_head *ptr;
    PROCESS           this, pid;
    int               i;

    assert(LWP_CurrentProcess(&this) == 0);

    lwp_mutex_lock(&run_mutex);
    {
	list_for_each(ptr, &lwp_list)
	{
	    pid = list_entry(ptr, struct lwp_pcb, list);
	    if (pid == this) continue;
	    if (!pid->waitcnt) continue;

	    for (i = 0; i < pid->eventcnt; i++) {
		if (pid->evlist[i] == event) {
		    pid->evlist[i] = NULL;
		    pid->waitcnt--;
		    break;
		}
	    }
	    if (pid->eventcnt && !pid->waitcnt)
		pthread_cond_signal(&pid->event);
	}
    }
    lwp_mutex_unlock(&run_mutex);

    if (yield)
	lwp_YIELD(this);

    return LWP_SUCCESS;
}

/* MWaitProcess actually knows a lot about how the scheduling works.
 * We need to avoid cancellations because we get stuck in the the
 * cleanup handler if we get cancelled while waiting on the condition
 * variable. (cleanup needs to lock the run_mutex to removing us from the
 * list of threads, but we're already sort of `joined') */
int LWP_MwaitProcess (int wcount, char *evlist[])
{
    PROCESS pid;
    int     entries, i;

    if (!evlist) return LWP_EBADCOUNT;

    /* count number of entries in the eventlist */
    for (entries = 0; evlist[entries] != NULL; entries++) /* loop */;
    if (wcount <= 0 || wcount > entries) return LWP_EBADCOUNT;

    if (LWP_CurrentProcess(&pid)) return LWP_EBADPID;

    /* copy the events */
    if (entries > pid->evsize) {
        pid->evlist = (char **)realloc(pid->evlist, entries * sizeof(*evlist));
	assert(pid->evlist != NULL);
        pid->evsize = entries;
    }
    memcpy(pid->evlist, evlist, entries * sizeof(*evlist));
    pid->waitcnt = wcount;
    pid->eventcnt = entries;

    lwp_LEAVE(pid);

    /* wait until we received enough events */
    lwp_mutex_lock(&run_mutex);
    while (pid->waitcnt)
	pthread_cond_wait(&pid->event, &run_mutex);
    lwp_mutex_unlock(&run_mutex);

    lwp_YIELD(pid);

    return LWP_SUCCESS;
}

int LWP_WaitProcess (void *event)
{
    void *evlist[2];

    evlist[0] = event; evlist[1] = NULL;
    return LWP_MwaitProcess(1, (char**)evlist);
}

int LWP_NewRock (int Tag, char *Value)
{
    PROCESS pid;
    int     i;
    
    if (LWP_CurrentProcess(&pid))
        return LWP_EBADPID;
    
    for (i = 0; i < pid->nrocks; i++)
        if (Tag == pid->rock[i].tag)
            return LWP_EBADROCK;

    if (pid->nrocks == MAXROCKS - 1)
        return LWP_ENOROCKS;

    pid->rock[pid->nrocks].tag   = Tag;
    pid->rock[pid->nrocks].value = Value;
    pid->nrocks++;
    
    return LWP_SUCCESS;
}

int LWP_GetRock (int Tag,  char **Value)
{
    PROCESS pid;
    int     i;
    
    if (LWP_CurrentProcess(&pid))
        return LWP_EBADPID;
    
    for (i = 0; i < pid->nrocks; i++) {
        if (Tag == pid->rock[i].tag) {
            *Value = pid->rock[i].value;
            return LWP_SUCCESS;
        }
    }

    return LWP_EBADROCK;
}

char *LWP_Name(void)
{
    PROCESS pid;
    if (LWP_CurrentProcess(&pid)) return NULL;
    return pid->name;
}

int LWP_GetProcessPriority (PROCESS pid, int *priority)
{
    if (priority) *priority = pid->priority;
    return LWP_SUCCESS;
}

void LWP_SetLog(FILE *file, int level)
{
    lwp_logfile  = file;
    lwp_loglevel = level;
}

/* silly function, is already covered by LWP_CurrentProcess */
PROCESS LWP_ThisProcess(void)
{
    PROCESS pid;
    int     err;
    err = LWP_CurrentProcess(&pid);
    return (err ? NULL : pid);
}

int LWP_StackUsed (PROCESS pid, int *max, int *used)
{
    if (max)  max  = 0;
    if (used) used = 0;
    return LWP_SUCCESS;
}

int LWP_Index()            { return 0; }
int LWP_HighestIndex()     { return 0; }
void LWP_UnProtectStacks() { return; } /* only available for newlwp */
void LWP_ProtectStacks()   { return; }