File: memory_test.go

package info (click to toggle)
mpich 4.3.2-2
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 101,184 kB
  • sloc: ansic: 1,040,629; cpp: 82,270; javascript: 40,763; perl: 27,933; python: 16,041; sh: 14,676; xml: 14,418; f90: 12,916; makefile: 9,270; fortran: 8,046; java: 4,635; asm: 324; ruby: 103; awk: 27; lisp: 19; php: 8; sed: 4
file content (133 lines) | stat: -rw-r--r-- 3,314 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
/*
 * Copyright (C) 2021, NVIDIA CORPORATION & AFFILIATES. ALL RIGHTS RESERVED.
 * See file LICENSE for terms.
 */

package goucxtests

import (
	"flag"
	"fmt"
	"testing"
	. "github.com/openucx/ucx/bindings/go/src/ucx"
	"unsafe"
	. "github.com/openucx/ucx/bindings/go/src/cuda"
)

var maxSize = flag.Uint64("s", 10_000_000, "Max size of memory to mmap. Default: 10M")

func memoryMap(context *UcpContext, address unsafe.Pointer, size uint64,
	memoryType UcsMemoryType) (*UcpMemory, error) {
	mmapParams := &UcpMmapParams{}
	mmapParams.SetAddress(address).SetLength(size).SetMemoryType(memoryType)

	return context.MemMap(mmapParams)
}

func memAlloc(context *UcpContext, size uint64, memoryType UcsMemoryType) (*UcpMemory, error) {
	mmapParams := &UcpMmapParams{}
	mmapParams.Allocate().SetLength(size).SetMemoryType(memoryType)

	return context.MemMap(mmapParams)
}

func BenchmarkUcpMmap(b *testing.B) {
	ucpParams := &UcpParams{}
	ucpParams.EnableTag()

	context, err := NewUcpContext(ucpParams)
	defer context.Close()
	memTypeMask, _ := context.MemoryTypesMask()

	if err != nil {
		b.Fatalf("Failed to create a context %v", err)
	}

	for i := 0; i < b.N; i++ {
		var size uint64 = 1024
		for size < *maxSize {
			b.Run(fmt.Sprintf("Allocate host memory %d", size), func(b *testing.B) {
				allocatedMemory, err := memAlloc(context, size, UCS_MEMORY_TYPE_HOST)

				if err != nil {
					b.Fatalf("Failed to allocate memory %v", err)
				}

				allocatedMemory.Close()
			})
			size = size << 1
		}
	}

	if IsMemTypeSupported(UCS_MEMORY_TYPE_CUDA, memTypeMask) {
		for i := 0; i < b.N; i++ {
			var size uint64 = 1024
			for size < *maxSize {
				b.Run(fmt.Sprintf("Allocate GPU memory %d", size), func(b *testing.B) {
					if err:= CudaSetDevice(); err != nil {
						b.Fatalf("%v", err)
					}
					gpuMemory, err := memAlloc(context, size, UCS_MEMORY_TYPE_CUDA)

					if err != nil {
						b.Fatalf("Failed to allocate GPU memory %v", err)
					}

					gpuMemory.Close()
				})
				size = size << 1
			}
		}
	}
}

func TestUcpMmap(t *testing.T) {
	const testMemorySize uint64 = 1024
	ucpParams := &UcpParams{}
	ucpParams.EnableTag()

	context, err := NewUcpContext(ucpParams)
	defer context.Close()

	if err != nil {
		t.Fatalf("Failed to create a context %v", err)
	}

	allocatedMemory, err := memAlloc(context, testMemorySize, UCS_MEMORY_TYPE_HOST)

	if err != nil {
		t.Fatalf("Failed to allocate memory %v", err)
	}

	mmapAttrs, _ := allocatedMemory.Query(UCP_MEM_ATTR_FIELD_ADDRESS, UCP_MEM_ATTR_FIELD_LENGTH, UCP_MEM_ATTR_FIELD_MEM_TYPE)

	if UcsMemoryType(mmapAttrs.MemType) != UCS_MEMORY_TYPE_HOST {
		t.Fatalf("Allocated memory type is not host")
	}

	allocatedMemory.Close()
	nativeMemory := AllocateNativeMemory(testMemorySize)
	mapedMemory, err := memoryMap(context, nativeMemory, testMemorySize, UCS_MEMORY_TYPE_HOST)

	if err != nil {
		t.Fatalf("Failed to map memory %v", err)
	}

	mapedMemory.Close()
	FreeNativeMemory(nativeMemory)
	memTypeMask, _ := context.MemoryTypesMask()

	if IsMemTypeSupported(UCS_MEMORY_TYPE_CUDA, memTypeMask) {
		if err:= CudaSetDevice(); err != nil {
			t.Fatalf("%v", err)
		}

		gpuMemory, err := memAlloc(context, testMemorySize, UCS_MEMORY_TYPE_CUDA)

		if err != nil {
			t.Fatalf("Failed to allocate GPU memory %v", gpuMemory)
		}

		gpuMemory.Close()
	}
}