File: paffinity_hwloc_module.c

package info (click to toggle)
openmpi 1.6.5-9.1%2Bdeb8u1
  • links: PTS, VCS
  • area: main
  • in suites: jessie
  • size: 91,628 kB
  • ctags: 44,305
  • sloc: ansic: 408,966; cpp: 44,454; sh: 27,828; makefile: 10,486; asm: 3,882; python: 1,239; lex: 805; perl: 549; csh: 253; fortran: 232; f90: 126; tcl: 12
file content (658 lines) | stat: -rw-r--r-- 20,671 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
/*
 * Copyright (c) 2004-2008 The Trustees of Indiana University and Indiana
 *                         University Research and Technology
 *                         Corporation.  All rights reserved.
 * Copyright (c) 2004-2005 The University of Tennessee and The University
 *                         of Tennessee Research Foundation.  All rights
 *                         reserved.
 * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, 
 *                         University of Stuttgart.  All rights reserved.
 * Copyright (c) 2004-2005 The Regents of the University of California.
 *                         All rights reserved.
 * Copyright (c) 2006-2012 Cisco Systems, Inc.  All rights reserved.
 * Copyright (c) 2012      Los Alamos National Security, LLC.
 *                         All rights reserved.
 *
 * $COPYRIGHT$
 * 
 * Additional copyrights may follow
 * 
 * $HEADER$
 */

/*
 * 24 May 2012
 *
 * Paffinity term          Corresponding value in this module
 * ----------------------- ----------------------------------
 * Physical CPU            hwloc logical core ID
 * Physical processor ID   hwloc logical core ID
 * Physical socket ID      hwloc logical socket ID
 * Physical core ID        hwloc logical core ID
 *
 * Logical CPU             hwloc logical core ID
 * Logical processor ID    hwloc logical core ID
 * Logical socket ID       hwloc logical socket ID
 * Logical core ID         [0, num_cores on socket)
 *
 * This mapping is done because underlying physical/OS IDs may not be
 * unique.  So we always use hwloc logical IDs, except for the case of
 * "paffinity logical core ID", in which the range is [0, num_cores on
 * socket), because that value is relative to the socket, as opposed
 * to hwloc's logical core IDs, which are in the range [0,
 * total_num_cores) and are unique across all cores.
 *
 * Also, note that the paffinity framework has no concept of PUs.  So
 * when it asks for physical processor IDs, it only makes sense to
 * return a unique core ID.  Specifically: in this module, we define
 * that physical processor IDs are hwloc logical core IDs.
 *
 * This really only has relevance for the v1.5/v1.6 branch, as the
 * trunk/v1.7 has been revamped w.r.t. paffinity, and we use hwloc
 * objects for everything.  Meaning: this whole paffinity mess goes
 * away in v1.7.  There is hope.
 */

#include "opal_config.h"

/* This component will only be compiled on Hwloc, where we are
   guaranteed to have <unistd.h> and friends */
#include <stdio.h>

#include <unistd.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>

#include "opal/constants.h"
#include "opal/util/output.h"
#include "opal/mca/base/mca_base_param.h"
#include "opal/mca/paffinity/paffinity.h"
#include "opal/mca/paffinity/base/base.h"
#include "paffinity_hwloc.h"
#include "opal/mca/hwloc/hwloc.h"

/*
 * Local functions
 */
static int module_init(void);
static int module_set(opal_paffinity_base_cpu_set_t cpumask);
static int module_get(opal_paffinity_base_cpu_set_t *cpumask);
static int module_map_to_processor_id(int socket, int core, int *processor_id);
static int module_map_to_socket_core(int processor_id, int *socket, int *core);
static int module_get_processor_info(int *num_processors);
static int module_get_socket_info(int *num_sockets);
static int module_get_core_info(int socket, int *num_cores);
static int module_get_physical_processor_id(int logical_processor_id);
static int module_get_physical_socket_id(int logical_socket_id);
static int module_get_physical_core_id(int physical_socket_id, 
                                       int logical_core_id);

/*
 * Local values
 */
static int core_type = HWLOC_OBJ_CORE;


/*
 * Hwloc paffinity module
 */
static const opal_paffinity_base_module_1_1_0_t loc_module = {
    /* Initialization function */
    module_init,

    /* Module function pointers */
    module_set,
    module_get,
    module_map_to_processor_id,
    module_map_to_socket_core,
    module_get_processor_info,
    module_get_socket_info,
    module_get_core_info,
    module_get_physical_processor_id,
    module_get_physical_socket_id,
    module_get_physical_core_id,
    NULL
};

/*
 * Trivial DFS traversal recursion function
 */
static hwloc_obj_t dfs_find_nth_item(hwloc_obj_t root, 
                                     hwloc_obj_type_t type, 
                                     unsigned *current,
                                     unsigned n)
{
    unsigned i;
    hwloc_obj_t ret;

    if (root->type == type) {
        if (*current == n) {
            return root;
        }
        ++(*current);
    }
    for (i = 0; i < root->arity; ++i) {
        ret = dfs_find_nth_item(root->children[i], type, current, n);
        if (NULL != ret) {
            return ret;
        }
    }

    return NULL;
}

/*
 * Trivial DFS traversal recursion function
 */
static int dfs_count_type(hwloc_obj_t root, hwloc_obj_type_t type)
{
    unsigned i;
    int count = 0;
    if (root->type == type) {
        ++count;
    }
    for (i = 0; i < root->arity; ++i) {
        count += dfs_count_type(root->children[i], type);
    }

    return count;
}


int opal_paffinity_hwloc_component_query(mca_base_module_t **module, 
                                         int *priority)
{
    int param;

    param = mca_base_param_find("paffinity", "hwloc", "priority");
    mca_base_param_lookup_int(param, priority);

    *module = (mca_base_module_t *)&loc_module;


    return OPAL_SUCCESS;
}


static int module_init(void)
{
    /* Note that opal_hwloc_topology has not yet been set when this
       function is called.  Nothing to do here. */

    return OPAL_SUCCESS;
}

static void check_for_cores(void)
{
    int num_cores, num_pus;
    static bool already_been_here = false;

    if (already_been_here) {
        return;
    }

    if (NULL == opal_hwloc_topology) {
        return;
    }
    already_been_here = true;

    /* Special workaround for some POWER processors that report PUs
       but not COREs (on these machines, the PUs are architecturally
       "hardware threads", but they don't share resources with other
       PUs, so they're effectively the same as cores, from OMPI's
       perspective).  If hwloc found 0 cores, then change our query
       term from HWLOC_OBJ_PU to HWLOC_OBJ_CORE. */
    num_cores = (int) hwloc_get_nbobjs_by_type(opal_hwloc_topology,
                                               HWLOC_OBJ_CORE);
    num_pus = (int) hwloc_get_nbobjs_by_type(opal_hwloc_topology,
                                             HWLOC_OBJ_PU);
    if (0 == num_cores && num_pus > 0) {
        core_type = HWLOC_OBJ_PU;
    }
}


/*
 * Bind this process to a set of PHYSICAL processor IDs.
 *
 * Per comment in the beginning of this file, the input mask to this
 * function will be a set of hwloc logical core IDs.  We need to
 * convert it to a bitmap of hwloc physical PU IDs.  Specifically, for
 * any hwloc (logical) core ID in the output mask, set all hwloc
 * physical PU IDs in are in that core in the mask that we use to
 * bind.  Then bind to that.
 */
static int module_set(opal_paffinity_base_cpu_set_t mask)
{
    int ret = OPAL_SUCCESS;
    hwloc_bitmap_t set = NULL, tmp = NULL, tmp2 = NULL;
    hwloc_obj_t core;

    /* bozo check */
    if (NULL == opal_hwloc_topology) {
        return OPAL_ERR_NOT_SUPPORTED;
    }

    check_for_cores();

    set = hwloc_bitmap_alloc();
    if (NULL == set) {
        return OPAL_ERR_OUT_OF_RESOURCE;
    }
    hwloc_bitmap_zero(set);

    tmp = hwloc_bitmap_alloc();
    if (NULL == tmp) {
        ret = OPAL_ERR_OUT_OF_RESOURCE;
        goto out;
    }
    tmp2 = hwloc_bitmap_alloc();
    if (NULL == tmp2) {
        ret = OPAL_ERR_OUT_OF_RESOURCE;
        goto out;
    }

    /* Iterate through the cores */
    for (core = hwloc_get_obj_by_type(opal_hwloc_topology, core_type, 0);
         core && core->logical_index < OPAL_PAFFINITY_BITMASK_CPU_MAX;
         core = core->next_cousin) {
        if (OPAL_PAFFINITY_CPU_ISSET(core->logical_index, mask)) {
            /* This is a core that's in the input mask.  Yay!  Get the
               actually-available PUs (i.e., (online & allowed)) */
            hwloc_bitmap_and(tmp, core->online_cpuset, core->allowed_cpuset);
            /* OR those PUs with the set of PUs that we already have */
            hwloc_bitmap_or(tmp2, set, tmp);
            /* Now copy that bitmap from the temp output back to the main set */
            hwloc_bitmap_copy(set, tmp2);
        }
    }

    if (0 != hwloc_set_cpubind(opal_hwloc_topology, set, 0)) {
        ret = OPAL_ERR_IN_ERRNO;
    }

 out:
    if (NULL != set) {
        hwloc_bitmap_free(set);
    }
    if (NULL != tmp) {
        hwloc_bitmap_free(tmp);
    }
    if (NULL != tmp2) {
        hwloc_bitmap_free(tmp2);
    }

    return ret;
}


/*
 * Return the set of PHYSICAL processor IDs to which this process is bound.
 *
 * Per the comment at the top of this file, we need to return a bitmap
 * of hwloc logical core IDs.  So we have to get the binding from
 * hwloc (which returns a bitmap of physical PU IDs) and then convert
 * it to a bitmap of hwloc logical core IDs.
 *
 * Also see https://svn.open-mpi.org/trac/ompi/ticket/3085.
 */
static int module_get(opal_paffinity_base_cpu_set_t *mask)
{
    int ret = OPAL_SUCCESS;
    hwloc_bitmap_t set = NULL;
    hwloc_topology_t *t;
    hwloc_obj_t pu, core;

    /* bozo check */
    if (NULL == opal_hwloc_topology) {
        return OPAL_ERR_NOT_SUPPORTED;
    }
    t = &opal_hwloc_topology;

    if (NULL == mask) {
        return OPAL_ERR_BAD_PARAM;
    }

    check_for_cores();

    set = hwloc_bitmap_alloc();
    if (NULL == set) {
        return OPAL_ERR_OUT_OF_RESOURCE;
    }

    /* Get the physical bitmap representing the binding */
    if (0 != hwloc_get_cpubind(*t, set, 0)) {
        ret = OPAL_ERR_IN_ERRNO;
        goto out;
    } 

    /* Now convert that bitmap of physical PU IDs to *logical* core
       IDs */
    OPAL_PAFFINITY_CPU_ZERO(*mask);
    for (pu = hwloc_get_obj_by_type(*t, HWLOC_OBJ_PU, 0);
         pu && pu->logical_index < OPAL_PAFFINITY_BITMASK_CPU_MAX;
         pu = pu->next_cousin) {
        if (hwloc_bitmap_isset(set, pu->os_index)) {
            /* This PU is set. */

            /* See module_init(): if hwloc found cores, then search
               for the parent core.  If hwloc found no cores (and only
               found PUs), then there's no need to find the parent. */

            /* We have cores -- so find the parent */
            if (HWLOC_OBJ_CORE == core_type) {
                core = pu->parent;
                while (NULL != core && HWLOC_OBJ_CORE != core->type) {
                    core = core->parent;
                }

                if (NULL == core) {
                    /* If hwloc didn't report the parent core, then give
                       up */
                    ret = OPAL_ERR_NOT_FOUND;
                    goto out;
                } else {
                    /* Otherwise, save this core's logical index in the
                       output mask */
                    OPAL_PAFFINITY_CPU_SET(core->logical_index, *mask);
                }
            }

            /* We have no cores -- just use the PU logical_index */
            else {
                OPAL_PAFFINITY_CPU_SET(pu->logical_index, *mask);
            }
        }
    }

 out:
    if (NULL != set) {
        hwloc_bitmap_free(set);
    }

    return ret;
}

/*
 * Returns mapping of PHYSICAL socket:core -> PHYSICAL processor id.
 *
 * If the socket/core tuple is valid (which are both hwloc logical
 * values), simply return the core value -- this is a unity operation.
 */
static int module_map_to_processor_id(int socket, int core, int *processor_id)
{
    hwloc_topology_t *t;
    hwloc_obj_t core_obj;

    opal_output_verbose(10, opal_paffinity_base_output,
                        "hwloc_module_map_to_processor_id: IN: socket=%d, core=%d", socket, core);
    /* bozo check */
    if (NULL == opal_hwloc_topology) {
        return OPAL_ERR_NOT_SUPPORTED;
    }
    t = &opal_hwloc_topology;

    check_for_cores();

    /* Per comment at the beginning of this file, the "physcial core
       IDs" that this module exposes are actually hwloc core logical
       IDs, which are unique in hwloc.  So we can just look up that
       hwloc core ID directly. */
    core_obj = hwloc_get_obj_by_type(*t, core_type, core);
    if (NULL == core_obj) {
        opal_output_verbose(10, opal_paffinity_base_output,
                            "hwloc_module_map_to_processor_id: OUT: Didn't find core %d", core);
        return OPAL_ERR_NOT_FOUND;
    }

    /* Now that we've validated the core, the operation is actually
       just a unity -- in this module, physical processor ID's are
       defined to be the same as the hwloc logical core IDs. */
    *processor_id = core;
    return OPAL_SUCCESS;
}

/*
 * Provides mapping of PHYSICAL processor id -> PHYSICAL socket:core.
 *
 * Remember that in this module, physical processor IDs are defined to
 * be the hwloc core logical IDs (which are unique across all cores).
 * So just take that hwloc logical core ID and find its parent socket
 * logical ID.
 */
static int module_map_to_socket_core(int processor_id, int *socket, int *core)
{
    hwloc_obj_t obj;
    hwloc_topology_t *t;

    opal_output_verbose(10, opal_paffinity_base_output,
                        "hwloc_module_map_to_socket_core: IN: proc_id = %d", processor_id);

    /* bozo check */
    if (NULL == opal_hwloc_topology) {
        return OPAL_ERR_NOT_SUPPORTED;
    }
    t = &opal_hwloc_topology;

    check_for_cores();

    /* Per comment at the beginning of this file, the "physcial core
       IDs" that this module exposes are actually hwloc core logical
       IDs, which are unique in hwloc.  So we can just look up that
       hwloc core ID directly. */
    obj = hwloc_get_obj_by_type(*t, core_type, processor_id);
    if (NULL == obj) {
        opal_output_verbose(10, opal_paffinity_base_output,
                            "hwloc_module_map_to_socket_core: OUT: Didn't find core %d", 
                            processor_id);
        return OPAL_ERR_NOT_FOUND;
    }

    /* Now that we've validated the core, the operation is actually
       just a unity -- in this module, physical processor ID's are
       defined to be the same as the hwloc logical core IDs. */
    *core = processor_id;

    /* Now find the parent socket and get its logical ID, too */
    while (NULL != obj && HWLOC_OBJ_SOCKET != obj->type) {
        obj = obj->parent;
    }
    if (NULL == obj) {
        return OPAL_ERR_NOT_FOUND;
    } else {
        *socket = obj->logical_index;
        return OPAL_SUCCESS;
    }
}

/*
 * Provides number of LOGICAL processors in a host.  Since paffinity
 * does not currently understand hardware threads, we interpret
 * "processors" to mean "cores".
 */
static int module_get_processor_info(int *num_processors)
{
    hwloc_topology_t *t;

    opal_output_verbose(10, opal_paffinity_base_output,
                        "hwloc_get_processor_info: IN");

    /* bozo check */
    if (NULL == opal_hwloc_topology) {
        return OPAL_ERR_NOT_SUPPORTED;
    }
    t = &opal_hwloc_topology;

    check_for_cores();

    *num_processors = (int) hwloc_get_nbobjs_by_type(*t, core_type);

    opal_output_verbose(10, opal_paffinity_base_output,
                        "hwloc_get_processor_info: OUT: returning %d processors (cores)", *num_processors);
    return OPAL_SUCCESS;
}

/*
 * Provides the number of LOGICAL sockets in a host.
 */
static int module_get_socket_info(int *num_sockets)
{
    hwloc_topology_t *t;

    opal_output_verbose(10, opal_paffinity_base_output,
                        "hwloc_module_get_socket_info: IN");

    /* bozo check */
    if (NULL == opal_hwloc_topology) {
        return OPAL_ERR_NOT_SUPPORTED;
    }
    t = &opal_hwloc_topology;

    *num_sockets = (int) hwloc_get_nbobjs_by_type(*t, HWLOC_OBJ_SOCKET);

    opal_output_verbose(10, opal_paffinity_base_output,
                        "hwloc_module_get_socket_info: OUT: returning %d sockets", *num_sockets);
    return OPAL_SUCCESS;
}

/*
 * Provides the number of LOGICAL cores in a PHYSICAL socket. 
 */
static int module_get_core_info(int socket, int *num_cores)
{
    hwloc_obj_t obj;
    hwloc_topology_t *t;

    opal_output_verbose(10, opal_paffinity_base_output,
                        "hwloc_module_get_core_info: IN: socket=%d", socket);

    /* bozo check */
    if (NULL == opal_hwloc_topology) {
        return OPAL_ERR_NOT_SUPPORTED;
    }
    t = &opal_hwloc_topology;

    check_for_cores();

    /* Find the socket */
    obj = hwloc_get_obj_by_type(*t, HWLOC_OBJ_SOCKET, socket);
    if (NULL == obj) {
        return OPAL_ERR_NOT_FOUND;
    }

    /* Ok, we found the right socket.  Browse its descendants looking
       for all cores. */
    *num_cores = dfs_count_type(obj, core_type);
    opal_output_verbose(10, opal_paffinity_base_output,
                        "hwloc_module_get_core_info: OUT: socket=%d, num_cores=%d", socket, *num_cores);
    return OPAL_SUCCESS;
}

/*
 * Provide the PHYSICAL processor id that corresponds to the given
 * LOGICAL processor id.  
 *
 * Remember: paffinity does not understand hardware threads, so
 * "processor" here [usually] means "core" -- except that on some
 * platforms, hwloc won't find any cores; it'll only find PUs (!).  On
 * such platforms, then do the same calculation but with PUs instead
 * of COREs.
 */
static int module_get_physical_processor_id(int logical_processor_id)
{
    hwloc_obj_t obj;
    hwloc_topology_t *t;

    opal_output_verbose(10, opal_paffinity_base_output,
                        "hwloc_module_get_physical_processor_id: INOUT: logical proc %d (unity)", logical_processor_id);

    /* bozo check */
    if (NULL == opal_hwloc_topology) {
        return OPAL_ERR_NOT_SUPPORTED;
    }
    t = &opal_hwloc_topology;

    check_for_cores();

    /* Ensure that logical_processor_id exists */
    obj = hwloc_get_obj_by_type(*t, core_type, logical_processor_id);
    if (NULL == obj) {
        return OPAL_ERR_NOT_FOUND;
    }

    /* Ok, the processor exists.  Return it */
    return logical_processor_id;
}

/*
 * Provide the PHYSICAL socket id that corresponds to the given
 * LOGICAL socket id
 */
static int module_get_physical_socket_id(int logical_socket_id)
{
    hwloc_obj_t obj;
    hwloc_topology_t *t;

    opal_output_verbose(10, opal_paffinity_base_output,
                        "hwloc_module_get_physical_processor_id: INOUT: logical socket %d (unity)", logical_socket_id);

    /* bozo check */
    if (NULL == opal_hwloc_topology) {
        return OPAL_ERR_NOT_SUPPORTED;
    }
    t = &opal_hwloc_topology;

    /* Ensure that logical_socket_id exists */
    obj = hwloc_get_obj_by_type(*t, HWLOC_OBJ_SOCKET, logical_socket_id);
    if (NULL == obj) {
        return OPAL_ERR_NOT_FOUND;
    }

    /* Ok, the socket exists.  Return it */
    return logical_socket_id;
}

/*
 * Provide the PHYSICAL core id that corresponds to the given LOGICAL
 * core id on the given PHYSICAL socket id.
 *
 * In this case, the caller will be asking about a specific socket,
 * but a logical core *under that specific socket*.  So we need to
 * return the overall hwloc core logical ID for that core.
 */
static int module_get_physical_core_id(int physical_socket_id, 
                                       int logical_core_id)
{
    unsigned count = 0;
    hwloc_obj_t obj;
    hwloc_topology_t *t;

    opal_output_verbose(10, opal_paffinity_base_output,
                        "hwloc_module_get_physical_core_id: IN: phys socket=%d, logical core=%d",
                        physical_socket_id, logical_core_id);
    /* bozo check */
    if (NULL == opal_hwloc_topology) {
        return OPAL_ERR_NOT_SUPPORTED;
    }
    t = &opal_hwloc_topology;

    check_for_cores();

    obj = hwloc_get_obj_by_type(*t, HWLOC_OBJ_SOCKET, physical_socket_id);
    if (NULL == obj) {
        return OPAL_ERR_NOT_FOUND;
    }

    /* Note that we can't look at hwloc's logical_index here -- hwloc
       counts logically across *all* cores.  We only want to find the
       Nth logical core under this particular socket. */
    obj = dfs_find_nth_item(obj, core_type, &count, logical_core_id);
    if (NULL == obj) {
        return OPAL_ERR_NOT_FOUND;
    }
    opal_output_verbose(10, opal_paffinity_base_output,
                        "hwloc_module_get_physical_core_id: OUT: phys socket=%d, logical core=%d: return %d",
                        physical_socket_id, logical_core_id, obj->logical_index);
    return obj->logical_index;
}