File: scheduler.c

package info (click to toggle)
pacemaker 3.0.1-1
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 68,576 kB
  • sloc: xml: 160,564; ansic: 143,744; python: 5,670; sh: 2,969; makefile: 2,426
file content (416 lines) | stat: -rw-r--r-- 12,605 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
/*
 * Copyright 2004-2025 the Pacemaker project contributors
 *
 * The version control history for this file may have further details.
 *
 * This source code is licensed under the GNU Lesser General Public License
 * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
 */

#include <crm_internal.h>

#include <stdint.h>             // uint32_t
#include <errno.h>              // EINVAL
#include <glib.h>               // gboolean, FALSE, etc.
#include <libxml/tree.h>        // xmlNode

#include <crm/common/scheduler.h>

uint32_t pcmk__warnings = 0;

/*!
 * \brief Create a new object to hold scheduler data
 *
 * \return New, initialized scheduler data, or NULL on memory error
 * \note Only pcmk_scheduler_t objects created with this function (as opposed
 *       to statically declared or directly allocated) should be used with the
 *       functions in this library, to allow for future extensions to the
 *       data type. The caller is responsible for freeing the memory with
 *       pcmk_free_scheduler() when the instance is no longer needed.
 */
pcmk_scheduler_t *
pcmk_new_scheduler(void)
{
    pcmk_scheduler_t *scheduler = calloc(1, sizeof(pcmk_scheduler_t));

    if (scheduler == NULL) {
        return NULL;
    }
    scheduler->priv = calloc(1, sizeof(pcmk__scheduler_private_t));
    if (scheduler->priv == NULL) {
        free(scheduler);
        return NULL;
    }
    pcmk__set_scheduler_defaults(scheduler);
    return scheduler;
}

/*!
 * \internal
 * \brief Set non-zero default values in scheduler data
 *
 * \param[in,out] scheduler  Scheduler data to modify
 *
 * \note Values that default to NULL or 0 will remain unchanged
 */
void
pcmk__set_scheduler_defaults(pcmk_scheduler_t *scheduler)
{
    pcmk__assert(scheduler != NULL);
    scheduler->flags = 0U;
#if PCMK__CONCURRENT_FENCING_DEFAULT_TRUE
    pcmk__set_scheduler_flags(scheduler,
                              pcmk__sched_symmetric_cluster
                              |pcmk__sched_concurrent_fencing
                              |pcmk__sched_stop_removed_resources
                              |pcmk__sched_cancel_removed_actions);
#else
    pcmk__set_scheduler_flags(scheduler,
                              pcmk__sched_symmetric_cluster
                              |pcmk__sched_stop_removed_resources
                              |pcmk__sched_cancel_removed_actions);
#endif
    scheduler->no_quorum_policy = pcmk_no_quorum_stop;
    scheduler->priv->next_action_id = 1;
    scheduler->priv->next_ordering_id = 1;
}

/*!
 * \brief Reset scheduler data to defaults
 *
 * Free scheduler data except the local node name and output object, and reset
 * all other values to defaults, so the data is suitable for rerunning status
 *
 * \param[in,out] scheduler  Scheduler data to reset
 */
void
pcmk_reset_scheduler(pcmk_scheduler_t *scheduler)
{
    if (scheduler == NULL) {
        return;
    }

    /* Be careful about the order of freeing members. Many contain references to
     * other members that will become dangling if those members are freed first.
     * For example, the node name and ID of Pacemaker Remote nodes are pointers
     * into resource objects. Ensure that earlier-freed members are not needed
     * by any of the free functions for later-freed members.
     */

    scheduler->dc_node = NULL;

    g_list_free_full(scheduler->nodes, pcmk__free_node);
    scheduler->nodes = NULL;

    // Do not reset local_node_name or out

    crm_time_free(scheduler->priv->now);
    scheduler->priv->now = NULL;

    if (scheduler->priv->options != NULL) {
        g_hash_table_destroy(scheduler->priv->options);
        scheduler->priv->options = NULL;
    }

    scheduler->priv->fence_action = NULL;
    scheduler->priv->fence_timeout_ms = 0U;
    scheduler->priv->priority_fencing_ms = 0U;
    scheduler->priv->shutdown_lock_ms = 0U;
    scheduler->priv->node_pending_ms = 0U;
    scheduler->priv->placement_strategy = NULL;
    scheduler->priv->rsc_defaults = NULL;
    scheduler->priv->op_defaults = NULL;

    g_list_free_full(scheduler->priv->resources, pcmk__free_resource);
    scheduler->priv->resources = NULL;

    if (scheduler->priv->templates != NULL) {
        g_hash_table_destroy(scheduler->priv->templates);
        scheduler->priv->templates = NULL;
    }
    if (scheduler->priv->tags != NULL) {
        g_hash_table_destroy(scheduler->priv->tags);
        scheduler->priv->tags = NULL;
    }

    g_list_free_full(scheduler->priv->actions, pcmk__free_action);
    scheduler->priv->actions = NULL;

    if (scheduler->priv->singletons != NULL) {
        g_hash_table_destroy(scheduler->priv->singletons);
        scheduler->priv->singletons = NULL;
    }

    pcmk__xml_free(scheduler->priv->failed);
    scheduler->priv->failed = NULL;

    pcmk__free_param_checks(scheduler);

    g_list_free(scheduler->priv->stop_needed);
    scheduler->priv->stop_needed = NULL;

    g_list_free_full(scheduler->priv->location_constraints,
                     pcmk__free_location);
    scheduler->priv->location_constraints = NULL;

    g_list_free_full(scheduler->priv->colocation_constraints, free);
    scheduler->priv->colocation_constraints = NULL;

    g_list_free_full(scheduler->priv->ordering_constraints,
                     pcmk__free_action_relation);
    scheduler->priv->ordering_constraints = NULL;

    if (scheduler->priv->ticket_constraints != NULL) {
        g_hash_table_destroy(scheduler->priv->ticket_constraints);
        scheduler->priv->ticket_constraints = NULL;
    }

    scheduler->priv->ninstances = 0;
    scheduler->priv->blocked_resources = 0;
    scheduler->priv->disabled_resources = 0;
    scheduler->priv->recheck_by = 0;

    pcmk__xml_free(scheduler->priv->graph);
    scheduler->priv->graph = NULL;

    scheduler->priv->synapse_count = 0;

    pcmk__xml_free(scheduler->input);
    scheduler->input = NULL;

    pcmk__set_scheduler_defaults(scheduler);

    pcmk__config_has_error = false;
    pcmk__config_has_warning = false;
}

/*!
 * \brief Free scheduler data
 *
 * \param[in,out] scheduler  Scheduler data to free
 */
void
pcmk_free_scheduler(pcmk_scheduler_t *scheduler)
{
    if (scheduler != NULL) {
        pcmk_reset_scheduler(scheduler);
        free(scheduler->priv->local_node_name);
        free(scheduler->priv);
        free(scheduler);
    }
}

/*!
 * \internal
 * \brief Get the Designated Controller node from scheduler data
 *
 * \param[in] scheduler  Scheduler data
 *
 * \return Designated Controller node from scheduler data, or NULL if none
 */
pcmk_node_t *
pcmk_get_dc(const pcmk_scheduler_t *scheduler)
{
    return (scheduler == NULL)? NULL : scheduler->dc_node;
}

/*!
 * \internal
 * \brief Get the no quorum policy from scheduler data
 *
 * \param[in] scheduler  Scheduler data
 *
 * \return No quorum policy from scheduler data
 */
enum pe_quorum_policy
pcmk_get_no_quorum_policy(const pcmk_scheduler_t *scheduler)
{
    if (scheduler == NULL) {
        return pcmk_no_quorum_stop; // The default
    }
    return scheduler->no_quorum_policy;
}

/*!
 * \internal
 * \brief Set CIB XML as scheduler input in scheduler data
 *
 * \param[out] scheduler  Scheduler data
 * \param[in]  cib        CIB XML to set as scheduler input
 *
 * \return Standard Pacemaker return code (EINVAL if \p scheduler is NULL,
 *         otherwise pcmk_rc_ok)
 * \note This will not free any previously set scheduler CIB.
 */
int
pcmk_set_scheduler_cib(pcmk_scheduler_t *scheduler, xmlNode *cib)
{
    if (scheduler == NULL) {
        return EINVAL;
    }
    scheduler->input = cib;
    return pcmk_rc_ok;
}

/*!
 * \internal
 * \brief Check whether cluster has quorum
 *
 * \param[in] scheduler  Scheduler data
 *
 * \return true if cluster has quorum, otherwise false
 */
bool
pcmk_has_quorum(const pcmk_scheduler_t *scheduler)
{
    if (scheduler == NULL) {
        return false;
    }
    return pcmk_is_set(scheduler->flags, pcmk__sched_quorate);
}

/*!
 * \brief Find a node by name in scheduler data
 *
 * \param[in] scheduler  Scheduler data
 * \param[in] node_name  Name of node to find
 *
 * \return Node from scheduler data that matches \p node_name if any,
 *         otherwise NULL
 */
pcmk_node_t *
pcmk_find_node(const pcmk_scheduler_t *scheduler, const char *node_name)
{
    if ((scheduler == NULL) || (node_name == NULL)) {
        return NULL;
    }
    return pcmk__find_node_in_list(scheduler->nodes, node_name);
}

/*!
 * \internal
 * \brief Get scheduler data's "now" in epoch time
 *
 * \param[in,out] scheduler  Scheduler data
 *
 * \return Scheduler data's "now" as seconds since epoch (defaulting to current
 *         time)
 */
time_t
pcmk__scheduler_epoch_time(pcmk_scheduler_t *scheduler)
{
    if (scheduler == NULL) {
        return time(NULL);
    }
    if (scheduler->priv->now == NULL) {
        crm_trace("Scheduler 'now' set to current time");
        scheduler->priv->now = crm_time_new(NULL);
    }
    return crm_time_get_seconds_since_epoch(scheduler->priv->now);
}

/*!
 * \internal
 * \brief Update "recheck by" time in scheduler data
 *
 * \param[in]     recheck    Epoch time when recheck should happen
 * \param[in,out] scheduler  Scheduler data
 * \param[in]     reason     What time is being updated for (for logs)
 */
void
pcmk__update_recheck_time(time_t recheck, pcmk_scheduler_t *scheduler,
                          const char *reason)
{
    pcmk__assert(scheduler != NULL);

    if ((recheck > pcmk__scheduler_epoch_time(scheduler))
        && ((scheduler->priv->recheck_by == 0)
            || (scheduler->priv->recheck_by > recheck))) {
        scheduler->priv->recheck_by = recheck;
        crm_debug("Updated next scheduler recheck to %s for %s",
                  pcmk__trim(ctime(&recheck)),
                  pcmk__s(reason, "some reason"));
    }
}

/* Fail count clearing for parameter changes normally happens when unpacking
 * history, before resources are unpacked. However, for bundles using the
 * REMOTE_CONTAINER_HACK, we can't check the conditions until after unpacking
 * the bundle, so those parameter checks are deferred using the APIs below.
 */

// History entry to be checked later for fail count clearing
struct param_check {
    const xmlNode *rsc_history; // History entry XML
    pcmk_resource_t *rsc;       // Resource corresponding to history entry
    pcmk_node_t *node;          // Node corresponding to history entry
    enum pcmk__check_parameters check_type; // What needs checking
};

/*!
 * \internal
 * \brief Add a deferred parameter check
 *
 * \param[in]     rsc_history  Resource history XML to check later
 * \param[in,out] rsc          Resource that history is for
 * \param[in]     node         Node that history is for
 * \param[in]     flag         What needs to be checked later
 */
void
pcmk__add_param_check(const xmlNode *rsc_history, pcmk_resource_t *rsc,
                      pcmk_node_t *node, enum pcmk__check_parameters flag)
{
    struct param_check *param_check = NULL;

    CRM_CHECK((rsc_history != NULL) && (rsc != NULL) && (node != NULL), return);

    crm_trace("Deferring checks of %s until after assignment",
              pcmk__xe_id(rsc_history));
    param_check = pcmk__assert_alloc(1, sizeof(struct param_check));
    param_check->rsc_history = rsc_history;
    param_check->rsc = rsc;
    param_check->node = node;
    param_check->check_type = flag;

    rsc->priv->scheduler->priv->param_check =
        g_list_prepend(rsc->priv->scheduler->priv->param_check, param_check);
}

/*!
 * \internal
 * \brief Call a function for each deferred parameter check
 *
 * \param[in,out] scheduler  Scheduler data
 * \param[in]     cb         Function to be called
 */
void
pcmk__foreach_param_check(pcmk_scheduler_t *scheduler,
                          void (*cb)(pcmk_resource_t*, pcmk_node_t*,
                                     const xmlNode*,
                                     enum pcmk__check_parameters))
{
    CRM_CHECK((scheduler != NULL) && (cb != NULL), return);

    for (GList *item = scheduler->priv->param_check;
         item != NULL; item = item->next) {
        struct param_check *param_check = item->data;

        cb(param_check->rsc, param_check->node, param_check->rsc_history,
           param_check->check_type);
    }
}

/*!
 * \internal
 * \brief Free all deferred parameter checks
 *
 * \param[in,out] scheduler  Scheduler data
 */
void
pcmk__free_param_checks(pcmk_scheduler_t *scheduler)
{
    if ((scheduler != NULL) && (scheduler->priv->param_check != NULL)) {
        g_list_free_full(scheduler->priv->param_check, free);
        scheduler->priv->param_check = NULL;
    }
}