File: pmix_cray_component.c

package info (click to toggle)
openmpi 4.1.0-10
  • links: PTS, VCS
  • area: main
  • in suites: bullseye
  • size: 126,560 kB
  • sloc: ansic: 685,465; makefile: 42,952; f90: 19,220; sh: 7,002; java: 6,360; perl: 3,524; cpp: 2,227; python: 1,350; lex: 989; fortran: 61; tcl: 12
file content (144 lines) | stat: -rw-r--r-- 4,627 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
 * Copyright (c) 2014      Intel, Inc.  All rights reserved.
 * Copyright (c) 2015-2016 Los Alamos National Security, LLC. All rights
 *                         reserved.
 * Copyright (c) 2016 Cisco Systems, Inc.  All rights reserved.
 * $COPYRIGHT$
 *
 * Additional copyrights may follow
 *
 * $HEADER$
 *
 * These symbols are in a file by themselves to provide nice linker
 * semantics.  Since linkers generally pull in symbols by object
 * files, keeping these symbols as the only symbols in this file
 * prevents utility programs such as "ompi_info" from having to import
 * entire components just to query their version and parameters.
 */

#include "opal_config.h"

#include "opal/constants.h"
#include "opal/mca/pmix/pmix.h"
#include "opal/util/show_help.h"
#include "pmix_cray.h"
#include <sys/syscall.h>
#include <pmi.h>

/*
 * Public string showing the pmix cray component version number
 */
const char *opal_pmix_cray_component_version_string =
    "OPAL cray pmix MCA component version " OPAL_VERSION;

/*
 * Local function
 */
static int pmix_cray_component_open(void);
static int pmix_cray_component_query(mca_base_module_t **module, int *priority);
static int pmix_cray_component_close(void);


/*
 * Instantiate the public struct with all of our public information
 * and pointers to our public functions in it
 */

opal_pmix_cray_component_t mca_pmix_cray_component = {
    {
    /* First, the mca_component_t struct containing meta information
       about the component itself */

        .base_version = {
            /* Indicate that we are a pmix v1.1.0 component (which also
               implies a specific MCA version) */

            OPAL_PMIX_BASE_VERSION_2_0_0,

            /* Component name and version */

            .mca_component_name = "cray",
            MCA_BASE_MAKE_VERSION(component, OPAL_MAJOR_VERSION, OPAL_MINOR_VERSION,
                                  OPAL_RELEASE_VERSION),

            /* Component open and close functions */

            .mca_open_component = pmix_cray_component_open,
            .mca_close_component = pmix_cray_component_close,
            .mca_query_component = pmix_cray_component_query,
        },
        /* Next the MCA v1.0.0 component meta data */
        .base_data = {
            /* The component is checkpoint ready */
            MCA_BASE_METADATA_PARAM_CHECKPOINT
        }
    },
    .cache_local = NULL,
    .cache_global = NULL,
};

static int pmix_cray_component_open(void)
{
    /*
     * Turns out that there's a lot of reliance on libevent
     * and the default behavior of Cray PMI to fork
     * in a constructor breaks libevent.
     *
     * Open MPI will not launch correctly on Cray XE/XC systems
     * under these conditions:
     *
     * 1) direct launch using aprun, and
     * 2) PMI_NO_FORK env. variable is not set, nor was
     * 3) --disable-dlopen used as part of configury
     *
     * Under SLURM, PMI_NO_FORK is always set, so we can combine
     * the check for conditions 1) and 2) together
     */

#if OPAL_ENABLE_DLOPEN_SUPPORT
    if (NULL == getenv("PMI_NO_FORK")) {
        opal_show_help("help-pmix-cray.txt", "aprun-not-supported", true);
        exit(-1);
    }
#endif
    return OPAL_SUCCESS;
}

static int pmix_cray_component_query(mca_base_module_t **module, int *priority)
{
    int rc;
    const char proc_job_file[]="/proc/job";
    FILE *fd = NULL, *fd_task_is_app = NULL;
    char task_is_app_fname[PATH_MAX];

    /* disqualify ourselves if not running in a Cray PAGG container, or we
       were launched by the orte/mpirun launcher */
    fd = fopen(proc_job_file, "r");
    if ((fd == NULL) || (getenv("OMPI_NO_USE_CRAY_PMI") != NULL)) {
        *priority = 0;
        *module = NULL;
        rc = OPAL_ERROR;
    } else {
        snprintf(task_is_app_fname,sizeof(task_is_app_fname),
                 "/proc/self/task/%ld/task_is_app",syscall(SYS_gettid));
        fd_task_is_app = fopen(task_is_app_fname, "r");
        if (fd_task_is_app != NULL) {   /* okay we're in a PAGG container,
                                           and we are an app task (not just a process
                                           running on a mom node, for example),
                                           so we should give cray pmi a shot. */
            *priority = 90;
            *module = (mca_base_module_t *)&opal_pmix_cray_module;
            fclose(fd_task_is_app);
            rc = OPAL_SUCCESS;
        }
        fclose(fd);
    }

    return rc;
}

static int pmix_cray_component_close(void)
{
    return OPAL_SUCCESS;
}