1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.30
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1TopologySpreadConstraint(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'label_selector': 'V1LabelSelector',
'match_label_keys': 'list[str]',
'max_skew': 'int',
'min_domains': 'int',
'node_affinity_policy': 'str',
'node_taints_policy': 'str',
'topology_key': 'str',
'when_unsatisfiable': 'str'
}
attribute_map = {
'label_selector': 'labelSelector',
'match_label_keys': 'matchLabelKeys',
'max_skew': 'maxSkew',
'min_domains': 'minDomains',
'node_affinity_policy': 'nodeAffinityPolicy',
'node_taints_policy': 'nodeTaintsPolicy',
'topology_key': 'topologyKey',
'when_unsatisfiable': 'whenUnsatisfiable'
}
def __init__(self, label_selector=None, match_label_keys=None, max_skew=None, min_domains=None, node_affinity_policy=None, node_taints_policy=None, topology_key=None, when_unsatisfiable=None, local_vars_configuration=None): # noqa: E501
"""V1TopologySpreadConstraint - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._label_selector = None
self._match_label_keys = None
self._max_skew = None
self._min_domains = None
self._node_affinity_policy = None
self._node_taints_policy = None
self._topology_key = None
self._when_unsatisfiable = None
self.discriminator = None
if label_selector is not None:
self.label_selector = label_selector
if match_label_keys is not None:
self.match_label_keys = match_label_keys
self.max_skew = max_skew
if min_domains is not None:
self.min_domains = min_domains
if node_affinity_policy is not None:
self.node_affinity_policy = node_affinity_policy
if node_taints_policy is not None:
self.node_taints_policy = node_taints_policy
self.topology_key = topology_key
self.when_unsatisfiable = when_unsatisfiable
@property
def label_selector(self):
"""Gets the label_selector of this V1TopologySpreadConstraint. # noqa: E501
:return: The label_selector of this V1TopologySpreadConstraint. # noqa: E501
:rtype: V1LabelSelector
"""
return self._label_selector
@label_selector.setter
def label_selector(self, label_selector):
"""Sets the label_selector of this V1TopologySpreadConstraint.
:param label_selector: The label_selector of this V1TopologySpreadConstraint. # noqa: E501
:type: V1LabelSelector
"""
self._label_selector = label_selector
@property
def match_label_keys(self):
"""Gets the match_label_keys of this V1TopologySpreadConstraint. # noqa: E501
MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. MatchLabelKeys cannot be set when LabelSelector isn't set. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector. This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). # noqa: E501
:return: The match_label_keys of this V1TopologySpreadConstraint. # noqa: E501
:rtype: list[str]
"""
return self._match_label_keys
@match_label_keys.setter
def match_label_keys(self, match_label_keys):
"""Sets the match_label_keys of this V1TopologySpreadConstraint.
MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. MatchLabelKeys cannot be set when LabelSelector isn't set. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector. This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). # noqa: E501
:param match_label_keys: The match_label_keys of this V1TopologySpreadConstraint. # noqa: E501
:type: list[str]
"""
self._match_label_keys = match_label_keys
@property
def max_skew(self):
"""Gets the max_skew of this V1TopologySpreadConstraint. # noqa: E501
MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. The global minimum is the minimum number of matching pods in an eligible domain or zero if the number of eligible domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 2/2/1: In this case, the global minimum is 1. | zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It's a required field. Default value is 1 and 0 is not allowed. # noqa: E501
:return: The max_skew of this V1TopologySpreadConstraint. # noqa: E501
:rtype: int
"""
return self._max_skew
@max_skew.setter
def max_skew(self, max_skew):
"""Sets the max_skew of this V1TopologySpreadConstraint.
MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. The global minimum is the minimum number of matching pods in an eligible domain or zero if the number of eligible domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 2/2/1: In this case, the global minimum is 1. | zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It's a required field. Default value is 1 and 0 is not allowed. # noqa: E501
:param max_skew: The max_skew of this V1TopologySpreadConstraint. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and max_skew is None: # noqa: E501
raise ValueError("Invalid value for `max_skew`, must not be `None`") # noqa: E501
self._max_skew = max_skew
@property
def min_domains(self):
"""Gets the min_domains of this V1TopologySpreadConstraint. # noqa: E501
MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. # noqa: E501
:return: The min_domains of this V1TopologySpreadConstraint. # noqa: E501
:rtype: int
"""
return self._min_domains
@min_domains.setter
def min_domains(self, min_domains):
"""Sets the min_domains of this V1TopologySpreadConstraint.
MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. # noqa: E501
:param min_domains: The min_domains of this V1TopologySpreadConstraint. # noqa: E501
:type: int
"""
self._min_domains = min_domains
@property
def node_affinity_policy(self):
"""Gets the node_affinity_policy of this V1TopologySpreadConstraint. # noqa: E501
NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. If this value is nil, the behavior is equivalent to the Honor policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. # noqa: E501
:return: The node_affinity_policy of this V1TopologySpreadConstraint. # noqa: E501
:rtype: str
"""
return self._node_affinity_policy
@node_affinity_policy.setter
def node_affinity_policy(self, node_affinity_policy):
"""Sets the node_affinity_policy of this V1TopologySpreadConstraint.
NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. If this value is nil, the behavior is equivalent to the Honor policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. # noqa: E501
:param node_affinity_policy: The node_affinity_policy of this V1TopologySpreadConstraint. # noqa: E501
:type: str
"""
self._node_affinity_policy = node_affinity_policy
@property
def node_taints_policy(self):
"""Gets the node_taints_policy of this V1TopologySpreadConstraint. # noqa: E501
NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. If this value is nil, the behavior is equivalent to the Ignore policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. # noqa: E501
:return: The node_taints_policy of this V1TopologySpreadConstraint. # noqa: E501
:rtype: str
"""
return self._node_taints_policy
@node_taints_policy.setter
def node_taints_policy(self, node_taints_policy):
"""Sets the node_taints_policy of this V1TopologySpreadConstraint.
NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. If this value is nil, the behavior is equivalent to the Ignore policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. # noqa: E501
:param node_taints_policy: The node_taints_policy of this V1TopologySpreadConstraint. # noqa: E501
:type: str
"""
self._node_taints_policy = node_taints_policy
@property
def topology_key(self):
"""Gets the topology_key of this V1TopologySpreadConstraint. # noqa: E501
TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a \"bucket\", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is \"kubernetes.io/hostname\", each Node is a domain of that topology. And, if TopologyKey is \"topology.kubernetes.io/zone\", each zone is a domain of that topology. It's a required field. # noqa: E501
:return: The topology_key of this V1TopologySpreadConstraint. # noqa: E501
:rtype: str
"""
return self._topology_key
@topology_key.setter
def topology_key(self, topology_key):
"""Sets the topology_key of this V1TopologySpreadConstraint.
TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each <key, value> as a \"bucket\", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is \"kubernetes.io/hostname\", each Node is a domain of that topology. And, if TopologyKey is \"topology.kubernetes.io/zone\", each zone is a domain of that topology. It's a required field. # noqa: E501
:param topology_key: The topology_key of this V1TopologySpreadConstraint. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and topology_key is None: # noqa: E501
raise ValueError("Invalid value for `topology_key`, must not be `None`") # noqa: E501
self._topology_key = topology_key
@property
def when_unsatisfiable(self):
"""Gets the when_unsatisfiable of this V1TopologySpreadConstraint. # noqa: E501
WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered \"Unsatisfiable\" for an incoming pod if and only if every possible node assignment for that pod would violate \"MaxSkew\" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won't make it *more* imbalanced. It's a required field. # noqa: E501
:return: The when_unsatisfiable of this V1TopologySpreadConstraint. # noqa: E501
:rtype: str
"""
return self._when_unsatisfiable
@when_unsatisfiable.setter
def when_unsatisfiable(self, when_unsatisfiable):
"""Sets the when_unsatisfiable of this V1TopologySpreadConstraint.
WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered \"Unsatisfiable\" for an incoming pod if and only if every possible node assignment for that pod would violate \"MaxSkew\" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won't make it *more* imbalanced. It's a required field. # noqa: E501
:param when_unsatisfiable: The when_unsatisfiable of this V1TopologySpreadConstraint. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and when_unsatisfiable is None: # noqa: E501
raise ValueError("Invalid value for `when_unsatisfiable`, must not be `None`") # noqa: E501
self._when_unsatisfiable = when_unsatisfiable
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1TopologySpreadConstraint):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1TopologySpreadConstraint):
return True
return self.to_dict() != other.to_dict()
|