File: state.go

package info (click to toggle)
incus 6.0.5-1
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 24,392 kB
  • sloc: sh: 16,313; ansic: 3,121; python: 457; makefile: 337; ruby: 51; sql: 50; lisp: 6
file content (114 lines) | stat: -rw-r--r-- 2,706 bytes parent folder | download | duplicates (6)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
package project

import (
	"context"
	"fmt"
	"strconv"
	"strings"

	"github.com/lxc/incus/v6/internal/server/db"
	"github.com/lxc/incus/v6/internal/server/instance/instancetype"
	"github.com/lxc/incus/v6/shared/api"
)

// GetCurrentAllocations returns the current resource utilization for a given project.
func GetCurrentAllocations(ctx context.Context, tx *db.ClusterTx, projectName string) (map[string]api.ProjectStateResource, error) {
	result := map[string]api.ProjectStateResource{}

	// Get the project.
	info, err := fetchProject(tx, projectName, false)
	if err != nil {
		return nil, err
	}

	if info == nil {
		return nil, fmt.Errorf("Project %q returned empty info struct", projectName)
	}

	info.Instances, err = expandInstancesConfigAndDevices(info.Instances, info.Profiles)
	if err != nil {
		return nil, err
	}

	// Get per-pool limits.
	poolLimits := []string{}
	for k := range info.Project.Config {
		if strings.HasPrefix(k, projectLimitDiskPool) {
			poolLimits = append(poolLimits, k)
		}
	}

	allAggregateLimits := append(allAggregateLimits, poolLimits...)

	// Get the instance aggregated values.
	raw, err := getAggregateLimits(info, allAggregateLimits)
	if err != nil {
		return nil, err
	}

	result["cpu"] = raw["limits.cpu"]
	result["disk"] = raw["limits.disk"]
	result["memory"] = raw["limits.memory"]
	result["networks"] = raw["limits.networks"]
	result["processes"] = raw["limits.processes"]

	// Add the pool-specific disk limits.
	for k, v := range raw {
		if strings.HasPrefix(k, projectLimitDiskPool) && v.Limit > 0 {
			result[fmt.Sprintf("disk.%s", strings.SplitN(k, ".", 4)[3])] = v
		}
	}

	// Get the instance count values.
	count, limit, err := getTotalInstanceCountLimit(info)
	if err != nil {
		return nil, err
	}

	result["instances"] = api.ProjectStateResource{
		Limit: int64(limit),
		Usage: int64(count),
	}

	count, limit, err = getInstanceCountLimit(info, instancetype.Container)
	if err != nil {
		return nil, err
	}

	result["containers"] = api.ProjectStateResource{
		Limit: int64(limit),
		Usage: int64(count),
	}

	count, limit, err = getInstanceCountLimit(info, instancetype.VM)
	if err != nil {
		return nil, err
	}

	result["virtual-machines"] = api.ProjectStateResource{
		Limit: int64(limit),
		Usage: int64(count),
	}

	// Get the network limit and usage.
	overallValue, ok := info.Project.Config["limits.networks"]
	limit = -1
	if ok {
		limit, err = strconv.Atoi(overallValue)
		if err != nil {
			return nil, err
		}
	}

	networks, err := tx.GetCreatedNetworks(ctx)
	if err != nil {
		return nil, err
	}

	result["networks"] = api.ProjectStateResource{
		Limit: int64(limit),
		Usage: int64(len(networks[projectName])),
	}

	return result, nil
}