File: common_sm_rml.c

package info (click to toggle)
openmpi 1.6.5-9.1%2Bdeb8u1
  • links: PTS, VCS
  • area: main
  • in suites: jessie
  • size: 91,628 kB
  • ctags: 44,305
  • sloc: ansic: 408,966; cpp: 44,454; sh: 27,828; makefile: 10,486; asm: 3,882; python: 1,239; lex: 805; perl: 549; csh: 253; fortran: 232; f90: 126; tcl: 12
file content (154 lines) | stat: -rw-r--r-- 5,735 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
/*
 * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
 *                         University Research and Technology
 *                         Corporation.  All rights reserved.
 * Copyright (c) 2004-2011 The University of Tennessee and The University
 *                         of Tennessee Research Foundation.  All rights
 *                         reserved.
 * Copyright (c) 2004-2009 High Performance Computing Center Stuttgart,
 *                         University of Stuttgart.  All rights reserved.
 * Copyright (c) 2004-2005 The Regents of the University of California.
 *                         All rights reserved.
 * Copyright (c) 2007      Sun Microsystems, Inc.  All rights reserved.
 * Copyright (c) 2008-2010 Cisco Systems, Inc.  All rights reserved.
 * Copyright (c) 2010-2012 Los Alamos National Security, LLC.
 *                         All rights reserved.
 * $COPYRIGHT$
 *
 * Additional copyrights may follow
 *
 * $HEADER$
 */

#include "ompi_config.h"

#include "opal/types.h"
#include "opal/dss/dss.h"

#include "orte/mca/rml/rml.h"
#include "orte/util/proc_info.h"
#include "orte/util/name_fns.h"
#include "orte/util/show_help.h"
#include "orte/runtime/orte_globals.h"
#include "orte/mca/errmgr/errmgr.h"

#include "ompi/constants.h"
#include "ompi/mca/dpm/dpm.h"
#include "ompi/mca/common/sm/common_sm_rml.h"

/* only for debug purposes only */
#include <assert.h>
#ifdef HAVE_STRING_H
#include <string.h>
#endif

/* ////////////////////////////////////////////////////////////////////////// */
/**
 * this routine assumes that sorted_procs is in the following state:
 *     o all the local procs at the beginning.
 *     o procs[0] is the lowest named process.
 */
int
mca_common_sm_rml_info_bcast(opal_shmem_ds_t *out_ds_buf,
                             ompi_proc_t **procs,
                             size_t num_local_procs,
                             int tag,
                             bool proc0,
                             char *msg_id_str)
{
    int rc = OMPI_SUCCESS, tmprc;
    char *msg_id_str_to_tx = NULL;
    opal_buffer_t *buffer = NULL;

    if (NULL == (buffer = OBJ_NEW(opal_buffer_t))) {
        return OMPI_ERR_OUT_OF_RESOURCE;
    }
    /* figure out if i am the root proc in the group.  if i am, bcast the
     * message the rest of the local procs. */
    if (proc0) {
        size_t p;
        /* pack the data that we are going to send. first the queueing id, then
         * the shmem_ds buf. note that msg_id_str is used only for verifying
         * "expected" common sm usage.  see "RML Messaging and Our Assumptions"
         * note in common_sm.c for more details. */
        tmprc = opal_dss.pack(buffer, &msg_id_str, 1, OPAL_STRING);
        if (OPAL_SUCCESS != tmprc) {
            ORTE_ERROR_LOG(ORTE_ERR_PACK_FAILURE);
            rc = OMPI_ERR_PACK_FAILURE;
            goto out;
        }
        tmprc = opal_dss.pack(buffer, out_ds_buf,
                              (int32_t)sizeof(opal_shmem_ds_t),
                              OPAL_BYTE);
        if (OPAL_SUCCESS != tmprc) {
            ORTE_ERROR_LOG(ORTE_ERR_PACK_FAILURE);
            rc = OMPI_ERR_PACK_FAILURE;
            goto out;
        }
        opal_progress_event_users_increment();
        /* first num_local_procs items should be local procs */
        for (p = 1; p < num_local_procs; ++p) {
            /* a potential future optimization: use non-blocking routines */
            tmprc = orte_rml.send_buffer(&(procs[p]->proc_name), buffer, tag,
                                         0);
            if (0 > tmprc) {
                ORTE_ERROR_LOG(tmprc);
                opal_progress_event_users_decrement();
                rc = OMPI_ERROR;
                goto out;
            }
        }
        opal_progress_event_users_decrement();
    }
    /* i am NOT the root proc */
    else {
        int32_t num_vals;
        /* bump up the libevent polling frequency while we're in this RML recv,
         * just to ensure we're checking libevent frequently. */
        opal_progress_event_users_increment();
        tmprc = orte_rml.recv_buffer(&(procs[0]->proc_name), buffer, tag, 0);
        opal_progress_event_users_decrement();
        if (0 > tmprc) {
            ORTE_ERROR_LOG(tmprc);
            rc = OMPI_ERROR;
            goto out;
        }
        /* unpack the buffer */
        num_vals = 1;
        tmprc = opal_dss.unpack(buffer, &msg_id_str_to_tx, &num_vals,
                                OPAL_STRING);
        if (0 > tmprc) {
            ORTE_ERROR_LOG(ORTE_ERR_UNPACK_FAILURE);
            rc = OMPI_ERROR;
            goto out;
        }
        num_vals = (int32_t)sizeof(opal_shmem_ds_t);
        tmprc = opal_dss.unpack(buffer, out_ds_buf, &num_vals, OPAL_BYTE);
        if (0 > tmprc) {
            ORTE_ERROR_LOG(ORTE_ERR_UNPACK_FAILURE);
            rc = OMPI_ERROR;
            goto out;
        }
        /* the message better be for me.  if not, freak out because this
         * probably means that common sm is being used in a new way that lies
         * outside of our current scope of assumptions. see "RML Messaging and
         * Our Assumptions" note in common_sm.c */
        if (0 != strcmp(msg_id_str_to_tx, msg_id_str)) {
            orte_show_help("help-mpi-common-sm.txt", "unexpected message id",
                           true, orte_process_info.nodename,
                           msg_id_str, msg_id_str_to_tx);
            rc = OMPI_ERROR;
            /* here for extra debug info only */
            assert(0);
            goto out;
        }
    }

out:
    if (NULL != msg_id_str_to_tx) {
        free(msg_id_str_to_tx);
        msg_id_str_to_tx = NULL;
    }
    OBJ_RELEASE(buffer);
    return rc;
}