1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402
|
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import os
import time
import jsonschema
from jsonschema import exceptions as json_schema_exc
from proliantutils import exception
from proliantutils.hpssa import constants
from proliantutils.hpssa import disk_allocator
from proliantutils.hpssa import objects
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
RAID_CONFIG_SCHEMA = os.path.join(CURRENT_DIR, "raid_config_schema.json")
def _update_physical_disk_details(raid_config, server):
"""Adds the physical disk details to the RAID configuration passed."""
raid_config['physical_disks'] = []
physical_drives = server.get_physical_drives()
for physical_drive in physical_drives:
physical_drive_dict = physical_drive.get_physical_drive_dict()
raid_config['physical_disks'].append(physical_drive_dict)
def validate(raid_config):
"""Validates the RAID configuration provided.
This method validates the RAID configuration provided against
a JSON schema.
:param raid_config: The RAID configuration to be validated.
:raises: InvalidInputError, if validation of the input fails.
"""
raid_schema_fobj = open(RAID_CONFIG_SCHEMA, 'r')
raid_config_schema = json.load(raid_schema_fobj)
try:
jsonschema.validate(raid_config, raid_config_schema)
except json_schema_exc.ValidationError as e:
raise exception.InvalidInputError(e.message)
for logical_disk in raid_config['logical_disks']:
# If user has provided 'number_of_physical_disks' or
# 'physical_disks', validate that they have mentioned at least
# minimum number of physical disks required for that RAID level.
raid_level = logical_disk['raid_level']
min_disks_reqd = constants.RAID_LEVEL_MIN_DISKS[raid_level]
no_of_disks_specified = None
if 'number_of_physical_disks' in logical_disk:
no_of_disks_specified = logical_disk['number_of_physical_disks']
elif 'physical_disks' in logical_disk:
no_of_disks_specified = len(logical_disk['physical_disks'])
if (no_of_disks_specified
and no_of_disks_specified < min_disks_reqd):
msg = ("RAID level %(raid_level)s requires at least %(number)s "
"disks." % {'raid_level': raid_level,
'number': min_disks_reqd})
raise exception.InvalidInputError(msg)
def _select_controllers_by(server, select_condition, msg):
"""Filters out the hpssa controllers based on the condition.
This method updates the server with only the controller which satisfies
the condition. The controllers which doesn't satisfies the selection
condition will be removed from the list.
:param server: The object containing all the supported hpssa controllers
details.
:param select_condition: A lambda function to select the controllers based
on requirement.
:param msg: A String which describes the controller selection.
:raises exception.HPSSAOperationError, if all the controller are in HBA
mode.
"""
all_controllers = server.controllers
supported_controllers = [c for c in all_controllers if select_condition(c)]
if not supported_controllers:
reason = ("None of the available SSA controllers %(controllers)s "
"have %(msg)s"
% {'controllers': ', '.join([c.id for c in all_controllers]),
'msg': msg})
raise exception.HPSSAOperationError(reason=reason)
server.controllers = supported_controllers
def create_configuration(raid_config):
"""Create a RAID configuration on this server.
This method creates the given RAID configuration on the
server based on the input passed.
:param raid_config: The dictionary containing the requested
RAID configuration. This data structure should be as follows:
raid_config = {'logical_disks': [{'raid_level': 1, 'size_gb': 100},
<info-for-logical-disk-2>
]}
:returns: the current raid configuration. This is same as raid_config
with some extra properties like root_device_hint, volume_name,
controller, physical_disks, etc filled for each logical disk
after its creation.
:raises exception.InvalidInputError, if input is invalid.
:raises exception.HPSSAOperationError, if all the controllers are in HBA
mode.
"""
server = objects.Server()
select_controllers = lambda x: not x.properties.get('HBA Mode Enabled',
False)
_select_controllers_by(server, select_controllers, 'RAID enabled')
validate(raid_config)
# Make sure we create the large disks first. This is avoid the
# situation that we avoid giving large disks to smaller requests.
# For example, consider this:
# - two logical disks - LD1(50), LD(100)
# - have 4 physical disks - PD1(50), PD2(50), PD3(100), PD4(100)
#
# In this case, for RAID1 configuration, if we were to consider
# LD1 first and allocate PD3 and PD4 for it, then allocation would
# fail. So follow a particular order for allocation.
#
# Also make sure we create the MAX logical_disks the last to make sure
# we allot only the remaining space available.
logical_disks_sorted = (
sorted((x for x in raid_config['logical_disks']
if x['size_gb'] != "MAX"),
reverse=True,
key=lambda x: x['size_gb'])
+ [x for x in raid_config['logical_disks'] if x['size_gb'] == "MAX"])
if any(logical_disk['share_physical_disks']
for logical_disk in logical_disks_sorted
if 'share_physical_disks' in logical_disk):
logical_disks_sorted = _sort_shared_logical_disks(logical_disks_sorted)
# We figure out the new disk created by recording the wwns
# before and after the create, and then figuring out the
# newly found wwn from it.
wwns_before_create = set([x.wwn for x in
server.get_logical_drives()])
for logical_disk in logical_disks_sorted:
if 'physical_disks' not in logical_disk:
disk_allocator.allocate_disks(logical_disk, server,
raid_config)
controller_id = logical_disk['controller']
controller = server.get_controller_by_id(controller_id)
if not controller:
msg = ("Unable to find controller named '%(controller)s'."
" The available controllers are '%(ctrl_list)s'." %
{'controller': controller_id,
'ctrl_list': ', '.join(
[c.id for c in server.controllers])})
raise exception.InvalidInputError(reason=msg)
if 'physical_disks' in logical_disk:
for physical_disk in logical_disk['physical_disks']:
disk_obj = controller.get_physical_drive_by_id(physical_disk)
if not disk_obj:
msg = ("Unable to find physical disk '%(physical_disk)s' "
"on '%(controller)s'" %
{'physical_disk': physical_disk,
'controller': controller_id})
raise exception.InvalidInputError(msg)
controller.create_logical_drive(logical_disk)
# Now find the new logical drive created.
server.refresh()
wwns_after_create = set([x.wwn for x in
server.get_logical_drives()])
new_wwn = wwns_after_create - wwns_before_create
if not new_wwn:
reason = ("Newly created logical disk with raid_level "
"'%(raid_level)s' and size %(size_gb)s GB not "
"found." % {'raid_level': logical_disk['raid_level'],
'size_gb': logical_disk['size_gb']})
raise exception.HPSSAOperationError(reason=reason)
new_logical_disk = server.get_logical_drive_by_wwn(new_wwn.pop())
new_log_drive_properties = new_logical_disk.get_logical_drive_dict()
logical_disk.update(new_log_drive_properties)
wwns_before_create = wwns_after_create.copy()
_update_physical_disk_details(raid_config, server)
return raid_config
def _sort_shared_logical_disks(logical_disks):
"""Sort the logical disks based on the following conditions.
When the share_physical_disks is True make sure we create the volume
which needs more disks first. This avoids the situation of insufficient
disks for some logical volume request.
For example,
- two logical disk with number of disks - LD1(3), LD2(4)
- have 4 physical disks
In this case, if we consider LD1 first then LD2 will fail since not
enough disks available to create LD2. So follow a order for allocation
when share_physical_disks is True.
Also RAID1 can share only when there is logical volume with only 2 disks.
So make sure we create RAID 1 first when share_physical_disks is True.
And RAID 1+0 can share only when the logical volume with even number of
disks.
:param logical_disks: 'logical_disks' to be sorted for shared logical
disks.
:returns: the logical disks sorted based the above conditions.
"""
is_shared = (lambda x: True if ('share_physical_disks' in x
and x['share_physical_disks']) else False)
num_of_disks = (lambda x: x['number_of_physical_disks']
if 'number_of_physical_disks' in x else
constants.RAID_LEVEL_MIN_DISKS[x['raid_level']])
# Separate logical disks based on share_physical_disks value.
# 'logical_disks_shared' when share_physical_disks is True and
# 'logical_disks_nonshared' when share_physical_disks is False
logical_disks_shared = []
logical_disks_nonshared = []
for x in logical_disks:
target = (logical_disks_shared if is_shared(x)
else logical_disks_nonshared)
target.append(x)
# Separete logical disks with raid 1 from the 'logical_disks_shared' into
# 'logical_disks_shared_raid1' and remaining as
# 'logical_disks_shared_excl_raid1'.
logical_disks_shared_raid1 = []
logical_disks_shared_excl_raid1 = []
for x in logical_disks_shared:
target = (logical_disks_shared_raid1 if x['raid_level'] == '1'
else logical_disks_shared_excl_raid1)
target.append(x)
# Sort the 'logical_disks_shared' in reverse order based on
# 'number_of_physical_disks' attribute, if provided, otherwise minimum
# disks required to create the logical volume.
logical_disks_shared = sorted(logical_disks_shared_excl_raid1,
reverse=True,
key=num_of_disks)
# Move RAID 1+0 to first in 'logical_disks_shared' when number of physical
# disks needed to create logical volume cannot be shared with odd number of
# disks and disks higher than that of RAID 1+0.
check = True
for x in logical_disks_shared:
if x['raid_level'] == "1+0":
x_num = num_of_disks(x)
for y in logical_disks_shared:
if y['raid_level'] != "1+0":
y_num = num_of_disks(y)
if x_num < y_num:
check = (True if y_num % 2 == 0 else False)
if check:
break
if not check:
logical_disks_shared.remove(x)
logical_disks_shared.insert(0, x)
check = True
# Final 'logical_disks_sorted' list should have non shared logical disks
# first, followed by shared logical disks with RAID 1, and finally by the
# shared logical disks sorted based on number of disks and RAID 1+0
# condition.
logical_disks_sorted = (logical_disks_nonshared
+ logical_disks_shared_raid1
+ logical_disks_shared)
return logical_disks_sorted
def delete_configuration():
"""Delete a RAID configuration on this server.
:returns: the current RAID configuration after deleting all
the logical disks.
"""
server = objects.Server()
select_controllers = lambda x: not x.properties.get('HBA Mode Enabled',
False)
_select_controllers_by(server, select_controllers, 'RAID enabled')
for controller in server.controllers:
# Trigger delete only if there is some RAID array, otherwise
# hpssacli/ssacli will fail saying "no logical drives found.".
if controller.raid_arrays:
controller.delete_all_logical_drives()
return get_configuration()
def get_configuration():
"""Get the current RAID configuration.
Get the RAID configuration from the server and return it
as a dictionary.
:returns: A dictionary of the below format.
raid_config = {
'logical_disks': [{
'size_gb': 100,
'raid_level': 1,
'physical_disks': [
'5I:0:1',
'5I:0:2'],
'controller': 'Smart array controller'
},
]
}
"""
server = objects.Server()
logical_drives = server.get_logical_drives()
raid_config = {}
raid_config['logical_disks'] = []
for logical_drive in logical_drives:
logical_drive_dict = logical_drive.get_logical_drive_dict()
raid_config['logical_disks'].append(logical_drive_dict)
_update_physical_disk_details(raid_config, server)
return raid_config
def has_erase_completed():
server = objects.Server()
drives = server.get_physical_drives()
if any((drive.erase_status == 'Erase In Progress')
for drive in drives):
return False
else:
return True
def erase_devices():
"""Erase all the drives on this server.
This method performs sanitize erase on all the supported physical drives
in this server. This erase cannot be performed on logical drives.
:returns: a dictionary of controllers with drives and the erase status.
:raises exception.HPSSAException, if none of the drives support
sanitize erase.
"""
server = objects.Server()
for controller in server.controllers:
drives = [x for x in controller.unassigned_physical_drives
if (x.get_physical_drive_dict().get('erase_status', '')
== 'OK')]
if drives:
controller.erase_devices(drives)
while not has_erase_completed():
time.sleep(300)
server.refresh()
status = {}
for controller in server.controllers:
drive_status = {x.id: x.erase_status
for x in controller.unassigned_physical_drives}
sanitize_supported = controller.properties.get(
'Sanitize Erase Supported', 'False')
if sanitize_supported == 'False':
msg = ("Drives overwritten with zeros because sanitize erase "
"is not supported on the controller.")
else:
msg = ("Sanitize Erase performed on the disks attached to "
"the controller.")
drive_status.update({'Summary': msg})
status[controller.id] = drive_status
return status
|