1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
|
/* libguestfs
* Copyright (C) 2012 Red Hat Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
/* Regression test for RHBZ#790721.
*
* This bug involves locking issues when building the appliance in
* parallel from multiple threads in the same process. We use a read
* lock on the 'checksum' file, and it turns out this causes two
* problems: (1) locks don't have any effect on threads in the same
* process, and (2) because the PID is identical in different threads,
* the file we are trying to overwrite has the same name.
*
* To test this we want to create the appliance repeatedly from
* multiple threads, but we don't really care about launching the full
* qemu (a waste of time and memory for this test). Therefore replace
* qemu with a fake process and just look for the linking error.
*/
#include <config.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <errno.h>
#include <error.h>
#include <pthread.h>
#include "guestfs.h"
#include "guestfs-utils.h"
#include "getprogname.h"
/* Number of worker threads running the test. */
#define NR_THREADS 20
static pthread_barrier_t barrier;
static void *start_thread (void *);
int
main (int argc, char *argv[])
{
pthread_t thread[NR_THREADS];
int data[NR_THREADS];
int i, r, errors;
guestfs_h *g;
char *backend;
/* Test is only meaningful if the backend "direct" is used. */
g = guestfs_create ();
if (!g)
error (EXIT_FAILURE, errno, "guestfs_create");
backend = guestfs_get_backend (g);
if (backend == NULL) {
guestfs_close (g);
exit (EXIT_FAILURE);
}
if (STRNEQ (backend, "direct")) {
fprintf (stderr, "%s: test skipped because backend isn't 'direct'.\n",
getprogname ());
free (backend);
guestfs_close (g);
exit (77);
}
free (backend);
guestfs_close (g);
/* Ensure error messages are not translated. */
setenv ("LC_ALL", "C", 1);
pthread_barrier_init (&barrier, NULL, NR_THREADS);
/* Create the other threads which will set up their own libguestfs
* handle then wait at a barrier before launching.
*/
for (i = 0; i < NR_THREADS; ++i) {
data[i] = i;
r = pthread_create (&thread[i], NULL, start_thread, &data[i]);
if (r != 0)
error (EXIT_FAILURE, r, "pthread_create");
}
/* Wait for the threads to exit. */
errors = 0;
for (i = 0; i < NR_THREADS; ++i) {
int *ret;
r = pthread_join (thread[i], (void **) &ret);
if (r != 0)
error (EXIT_FAILURE, r, "pthread_join");
if (*ret == -1)
errors++;
}
exit (errors == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
}
static void *
start_thread (void *vi)
{
guestfs_h *g;
int r, thread_id = *(int *)vi;
const char *error;
g = guestfs_create ();
if (g == NULL) {
perror ("guestfs_create");
*(int *)vi = -1;
pthread_exit (vi);
}
if (guestfs_add_drive_opts (g, "/dev/null",
GUESTFS_ADD_DRIVE_OPTS_FORMAT, "raw",
GUESTFS_ADD_DRIVE_OPTS_READONLY, 1,
-1) == -1) {
*(int *)vi = -1;
pthread_exit (vi);
}
/* Fake out hypervisor. */
if (guestfs_set_hv (g, TOOL_TRUE) == -1) {
*(int *)vi = -1;
pthread_exit (vi);
}
/* Wait for the other threads to finish starting up. */
r = pthread_barrier_wait (&barrier);
if (r != 0 && r != PTHREAD_BARRIER_SERIAL_THREAD) {
fprintf (stderr, "pthread_barrier_wait: [thread %d]: %s\n",
thread_id, strerror (r));
*(int *)vi = -1;
pthread_exit (vi);
}
/* Launch the handle. Because of the faked out qemu, we expect this
* will fail with "child process died unexpectedly". We are
* interested in other failures.
*/
guestfs_push_error_handler (g, NULL, NULL);
r = guestfs_launch (g);
error = guestfs_last_error (g);
if (r == 0) { /* This should NOT happen. */
fprintf (stderr, "rhbz790721: [thread %d]: "
"strangeness in test: expected launch to fail, but it didn't!\n",
thread_id);
*(int *)vi = -1;
pthread_exit (vi);
}
if (error == NULL) { /* This also should NOT happen. */
fprintf (stderr, "rhbz790721: [thread %d]: "
"strangeness in test: no error message!\n",
thread_id);
*(int *)vi = -1;
pthread_exit (vi);
}
/* The error message should match the one printed by
* guestfs_int_launch_failed_error. If not, it indicates a bug/race
* in the appliance building code which is what this regression test
* is designed to spot.
*/
if (strstr (error, "guestfs_launch failed") == NULL) {
fprintf (stderr, "rhbz790721: [thread %d]: error: %s\n", thread_id, error);
*(int *)vi = -1;
pthread_exit (vi);
}
guestfs_pop_error_handler (g);
/* Close the handle. */
guestfs_close (g);
*(int *)vi = 0;
pthread_exit (vi);
}
|