1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159
|
// SPDX-License-Identifier: BSD-2-Clause
/* Copyright (C) 2020 - 2021 Intel Corporation. */
#include <math.h>
#include <memkind.h>
#include <numa.h>
#include <numaif.h>
#include <string>
#include <unistd.h>
#include "TestPrereq.hpp"
#include "proc_stat.h"
#include "sys/sysinfo.h"
#include "sys/types.h"
#include "common.h"
class MemkindHiCapacityFunctionalTests: public ::testing::Test
{
protected:
TestPrereq tp;
void SetUp()
{}
void TearDown()
{}
};
class MemkindHiCapacityFunctionalTestsParam: public ::Memkind_Param_Test
{
protected:
TestPrereq tp;
};
INSTANTIATE_TEST_CASE_P(KindParam, MemkindHiCapacityFunctionalTestsParam,
::testing::Values(MEMKIND_HIGHEST_CAPACITY,
MEMKIND_HIGHEST_CAPACITY_PREFERRED));
TEST_P(MemkindHiCapacityFunctionalTestsParam, test_TC_HiCapacity_alloc_size_max)
{
errno = 0;
void *test1 = memkind_malloc(memory_kind, SIZE_MAX);
ASSERT_EQ(test1, nullptr);
ASSERT_EQ(errno, ENOMEM);
}
TEST_F(MemkindHiCapacityFunctionalTests, test_TC_HiCapacityLocal_alloc_size_max)
{
if (tp.is_libhwloc_supported()) {
errno = 0;
void *test1 = memkind_malloc(MEMKIND_HIGHEST_CAPACITY_LOCAL, SIZE_MAX);
ASSERT_EQ(test1, nullptr);
ASSERT_EQ(errno, ENOMEM);
} else {
GTEST_SKIP() << "libhwloc is required." << std::endl;
}
}
TEST_P(MemkindHiCapacityFunctionalTestsParam, test_TC_HiCapacity_correct_numa)
{
auto high_capacity_nodes = tp.get_highest_capacity_nodes();
if (tp.is_kind_preferred(memory_kind) && high_capacity_nodes.size() != 1) {
GTEST_SKIP()
<< "This test requires exactly 1 highest capacity NUMA Node in the OS.";
}
int size = 10;
void *ptr = memkind_malloc(memory_kind, size);
ASSERT_NE(ptr, nullptr);
memset(ptr, 0, size);
// get ID and capacity of NUMA node where the allocation is made
int numa_id = -1;
int ret =
get_mempolicy(&numa_id, nullptr, 0, ptr, MPOL_F_NODE | MPOL_F_ADDR);
ASSERT_EQ(ret, 0);
long long numa_capacity = numa_node_size64(numa_id, NULL);
// get capacity of NUMA node(s) that has the highest capacity in the system
for (auto const &node : high_capacity_nodes) {
long long capacity = numa_node_size64(node, NULL);
ASSERT_EQ(numa_capacity, capacity);
}
memkind_free(memory_kind, ptr);
}
TEST_F(MemkindHiCapacityFunctionalTests,
test_TC_HiCapacityPreferred_TwoOrMoreNodes)
{
auto high_capacity_nodes = tp.get_highest_capacity_nodes();
if (high_capacity_nodes.size() < 2) {
GTEST_SKIP()
<< "This test requires minimum 2 highest capacity NUMA Nodes in the OS.";
}
const size_t alloc_size = 512;
void *test1 =
memkind_malloc(MEMKIND_HIGHEST_CAPACITY_PREFERRED, alloc_size);
ASSERT_EQ(test1, nullptr);
}
TEST_P(MemkindHiCapacityFunctionalTestsParam,
test_TC_HiCapacity_alloc_until_full_numa)
{
auto high_capacity_nodes = tp.get_highest_capacity_nodes();
// TODO add API to check this in as general condition
if (memory_kind == MEMKIND_HIGHEST_CAPACITY_PREFERRED &&
high_capacity_nodes.size() != 1) {
GTEST_SKIP()
<< "This test requires exactly 1 highest capacity NUMA Node in the OS.";
}
ProcStat stat;
void *ptr;
const size_t alloc_size = 100 * MB;
const size_t alloc_size_swap = 1 * MB;
std::vector<void *> allocations;
size_t sum_of_free_space = tp.get_free_space(high_capacity_nodes);
int numa_id = -1;
const int n_swap_alloc = 20;
size_t sum_of_alloc = 0;
while (sum_of_free_space > sum_of_alloc) {
ptr = memkind_malloc(memory_kind, alloc_size);
ASSERT_NE(nullptr, ptr);
memset(ptr, 'a', alloc_size);
allocations.emplace_back(ptr);
sum_of_alloc += alloc_size;
int ret =
get_mempolicy(&numa_id, nullptr, 0, ptr, MPOL_F_NODE | MPOL_F_ADDR);
ASSERT_EQ(ret, 0);
// signal the moment when we actually reach free space of High Capacity
// Nodes
if (high_capacity_nodes.find(numa_id) == high_capacity_nodes.end()) {
ASSERT_GE(sum_of_alloc, 0.99 * sum_of_free_space);
}
}
size_t init_swap = stat.get_used_swap_space_size_bytes();
for (int i = 0; i < n_swap_alloc; ++i) {
ptr = memkind_malloc(memory_kind, alloc_size_swap);
ASSERT_NE(nullptr, ptr);
memset(ptr, 'a', alloc_size_swap);
allocations.emplace_back(ptr);
}
if (tp.is_kind_preferred(memory_kind)) {
ASSERT_GE(stat.get_used_swap_space_size_bytes(), init_swap);
} else {
ASSERT_LE(stat.get_used_swap_space_size_bytes(), init_swap);
}
for (auto const &ptr : allocations) {
memkind_free(memory_kind, ptr);
}
}
|