1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236
|
/*
* Copyright (c) 2004-2009 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2010-2012 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2010 Oracle and/or its affiliates. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*
* Simple routine to expose three things to the MPI process:
*
* 1. What processor(s) Open MPI bound this process to
* 2. What processor(s) this process is bound to
* 3. What processor(s) exist on this host
*
* Note that 1 and 2 may be different!
*/
#include "ompi_config.h"
#include <stdio.h>
#include <string.h>
#include "opal/mca/paffinity/base/base.h"
#include "ompi/communicator/communicator.h"
#include "ompi/errhandler/errhandler.h"
#include "ompi/mpi/c/bindings.h"
#include "ompi/mpiext/affinity/mpiext_affinity_c.h"
static const char FUNC_NAME[] = "OMPI_Affinity";
static int get_rsrc_ompi_bound(char str[OMPI_AFFINITY_STRING_MAX]);
static int get_rsrc_current_binding(char str[OMPI_AFFINITY_STRING_MAX]);
static int get_rsrc_exists(char str[OMPI_AFFINITY_STRING_MAX]);
static int get_layout_ompi_bound(char str[OMPI_AFFINITY_STRING_MAX]);
static int get_layout_current_binding(char str[OMPI_AFFINITY_STRING_MAX]);
static int get_layout_exists(char str[OMPI_AFFINITY_STRING_MAX]);
int OMPI_Affinity_str(ompi_affinity_fmt_t fmt_type,
char ompi_bound[OMPI_AFFINITY_STRING_MAX],
char current_binding[OMPI_AFFINITY_STRING_MAX],
char exists[OMPI_AFFINITY_STRING_MAX])
{
int ret;
memset(ompi_bound, 0, sizeof(ompi_bound));
memset(current_binding, 0, sizeof(current_binding));
switch(fmt_type) {
case OMPI_AFFINITY_RSRC_STRING_FMT:
if (OPAL_SUCCESS != (ret = get_rsrc_ompi_bound(ompi_bound)) ||
OPAL_SUCCESS != (ret = get_rsrc_current_binding(current_binding)) ||
OPAL_SUCCESS != (ret = get_rsrc_exists(exists))) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, ret, FUNC_NAME);
}
break;
case OMPI_AFFINITY_LAYOUT_FMT:
if (OPAL_SUCCESS != (ret = get_layout_ompi_bound(ompi_bound)) ||
OPAL_SUCCESS != (ret = get_layout_current_binding(current_binding)) ||
OPAL_SUCCESS != (ret = get_layout_exists(exists))) {
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, ret, FUNC_NAME);
}
break;
default:
return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_ARG, FUNC_NAME);
}
return MPI_SUCCESS;
}
static int get_rsrc_ompi_bound(char str[OMPI_AFFINITY_STRING_MAX])
{
int ret;
opal_paffinity_base_cpu_set_t cset;
/* If OMPI did not bind, indicate that */
if (!opal_paffinity_base_bound) {
const char tmp[] = "Open MPI did not bind this process";
strncpy(str, tmp, OMPI_AFFINITY_STRING_MAX - 1);
return OPAL_SUCCESS;
}
/* Find out what OMPI bound us to and prettyprint it */
ret =
opal_paffinity_base_parse_binding(opal_paffinity_base_applied_binding,
&cset);
if (OPAL_SUCCESS != ret) {
return ret;
}
return opal_paffinity_base_cset2str(str, OMPI_AFFINITY_STRING_MAX, &cset);
}
static int get_rsrc_current_binding(char str[OMPI_AFFINITY_STRING_MAX])
{
int ret, flag;
opal_paffinity_base_cpu_set_t cset;
/* Get our binding */
ret = opal_paffinity_base_get(&cset);
if (OPAL_SUCCESS != ret) {
return ret;
}
/* Are we bound anywhere? */
OPAL_PAFFINITY_PROCESS_IS_BOUND(cset, &flag);
if (!flag) {
const char tmp[] = "Not bound (or bound to all available processors)";
strncat(str, tmp, OMPI_AFFINITY_STRING_MAX - 1);
return OPAL_SUCCESS;
}
return opal_paffinity_base_cset2str(str, OMPI_AFFINITY_STRING_MAX, &cset);
}
/* Prettyprint a list of all available processors */
static int get_rsrc_exists(char str[OMPI_AFFINITY_STRING_MAX])
{
int ret, i, num_sockets, num_cores;
char tmp[BUFSIZ];
const int stmp = sizeof(tmp) - 1;
str[0] = tmp[stmp] = '\0';
/* Loop over the number of sockets in this machine */
ret = opal_paffinity_base_get_socket_info(&num_sockets);
if (OPAL_SUCCESS != ret) {
return ret;
}
for (i = 0; i < num_sockets; ++i) {
if (i > 0) {
strncat(str, ", ", OMPI_AFFINITY_STRING_MAX - strlen(str));
}
snprintf(tmp, stmp, "socket %d has ", i);
strncat(str, tmp, OMPI_AFFINITY_STRING_MAX - strlen(str));
/* Loop over the number of cores in this socket */
ret = opal_paffinity_base_get_core_info(i, &num_cores);
if (OPAL_SUCCESS != ret) {
return ret;
}
if (1 == num_cores) {
strncat(str, "1 core", OMPI_AFFINITY_STRING_MAX - strlen(str));
} else {
snprintf(tmp, stmp, "%d cores", num_cores);
strncat(str, tmp, OMPI_AFFINITY_STRING_MAX - strlen(str));
}
}
return OPAL_SUCCESS;
}
static int get_layout_ompi_bound(char str[OMPI_AFFINITY_STRING_MAX])
{
int ret;
opal_paffinity_base_cpu_set_t cset;
/* If OMPI did not bind, indicate that */
if (!opal_paffinity_base_bound) {
const char tmp[] = "Open MPI did not bind this process";
strncpy(str, tmp, OMPI_AFFINITY_STRING_MAX - 1);
return OPAL_SUCCESS;
}
/* Find out what OMPI bound us to and prettyprint it */
ret =
opal_paffinity_base_parse_binding(opal_paffinity_base_applied_binding,
&cset);
if (OPAL_SUCCESS != ret) {
return ret;
}
return opal_paffinity_base_cset2mapstr(str, OMPI_AFFINITY_STRING_MAX, &cset);
}
static int get_layout_current_binding(char str[OMPI_AFFINITY_STRING_MAX])
{
int ret = OPAL_SUCCESS, flag;
opal_paffinity_base_cpu_set_t cset;
/* Get our binding */
ret = opal_paffinity_base_get(&cset);
if (OPAL_SUCCESS != ret) {
return ret;
}
/* Are we bound anywhere? */
OPAL_PAFFINITY_PROCESS_IS_BOUND(cset, &flag);
if (!flag) {
const char tmp[] = "Not bound (or bound to all available processors)";
strncat(str, tmp, OMPI_AFFINITY_STRING_MAX - 1);
return OPAL_SUCCESS;
}
return opal_paffinity_base_cset2mapstr(str, OMPI_AFFINITY_STRING_MAX, &cset);
}
/* Prettyprint a list of all available processors in layout format*/
static int get_layout_exists(char str[OMPI_AFFINITY_STRING_MAX])
{
int ret, i, j, num_sockets, num_cores;
int len = OMPI_AFFINITY_STRING_MAX;
str[0] = '\0';
/* Loop over the number of sockets in this machine */
ret = opal_paffinity_base_get_socket_info(&num_sockets);
if (OPAL_SUCCESS != ret) {
return ret;
}
for (i = 0; i < num_sockets; ++i) {
strncat(str, "[", len - strlen(str));
/* Loop over the number of cores in this socket */
ret = opal_paffinity_base_get_core_info(i, &num_cores);
if (OPAL_SUCCESS != ret) {
return ret;
}
for (j = 0; j < num_cores; j++) {
if (0 < j) {
/* add space after first core is printed */
strncat(str, " ", len - strlen(str));
}
/* mark core exists */
strncat(str, ".", len - strlen(str));
}
strncat(str, "]", len - strlen(str));
}
return OPAL_SUCCESS;
}
|