1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389
|
# Copyright (C) 2016 Canonical Ltd.
#
# Author: Ryan Harper <ryan.harper@canonical.com>
#
# This file is part of cloud-init. See LICENSE file for license information.
import base64
import datetime
import json
import os
import time
import sys
from cloudinit import util
from cloudinit.distros import uses_systemd
# An event:
'''
{
"description": "executing late commands",
"event_type": "start",
"level": "INFO",
"name": "cmd-install/stage-late"
"origin": "cloudinit",
"timestamp": 1461164249.1590767,
},
{
"description": "executing late commands",
"event_type": "finish",
"level": "INFO",
"name": "cmd-install/stage-late",
"origin": "cloudinit",
"result": "SUCCESS",
"timestamp": 1461164249.1590767
}
'''
format_key = {
'%d': 'delta',
'%D': 'description',
'%E': 'elapsed',
'%e': 'event_type',
'%I': 'indent',
'%l': 'level',
'%n': 'name',
'%o': 'origin',
'%r': 'result',
'%t': 'timestamp',
'%T': 'total_time',
}
formatting_help = " ".join(["{0}: {1}".format(k.replace('%', '%%'), v)
for k, v in format_key.items()])
SUCCESS_CODE = 'successful'
FAIL_CODE = 'failure'
CONTAINER_CODE = 'container'
TIMESTAMP_UNKNOWN = (FAIL_CODE, -1, -1, -1)
def format_record(msg, event):
for i, j in format_key.items():
if i in msg:
# ensure consistent formatting of time values
if j in ['delta', 'elapsed', 'timestamp']:
msg = msg.replace(i, "{%s:08.5f}" % j)
else:
msg = msg.replace(i, "{%s}" % j)
return msg.format(**event)
def dump_event_files(event):
content = dict((k, v) for k, v in event.items() if k not in ['content'])
files = content['files']
saved = []
for f in files:
fname = f['path']
fn_local = os.path.basename(fname)
fcontent = base64.b64decode(f['content']).decode('ascii')
util.write_file(fn_local, fcontent)
saved.append(fn_local)
return saved
def event_name(event):
if event:
return event.get('name')
return None
def event_type(event):
if event:
return event.get('event_type')
return None
def event_parent(event):
if event:
return event_name(event).split("/")[0]
return None
def event_timestamp(event):
return float(event.get('timestamp'))
def event_datetime(event):
return datetime.datetime.utcfromtimestamp(event_timestamp(event))
def delta_seconds(t1, t2):
return (t2 - t1).total_seconds()
def event_duration(start, finish):
return delta_seconds(event_datetime(start), event_datetime(finish))
def event_record(start_time, start, finish):
record = finish.copy()
record.update({
'delta': event_duration(start, finish),
'elapsed': delta_seconds(start_time, event_datetime(start)),
'indent': '|' + ' ' * (event_name(start).count('/') - 1) + '`->',
})
return record
def total_time_record(total_time):
return 'Total Time: %3.5f seconds\n' % total_time
class SystemctlReader(object):
'''
Class for dealing with all systemctl subp calls in a consistent manner.
'''
def __init__(self, property, parameter=None):
self.epoch = None
self.args = ['/bin/systemctl', 'show']
if parameter:
self.args.append(parameter)
self.args.extend(['-p', property])
# Don't want the init of our object to break. Instead of throwing
# an exception, set an error code that gets checked when data is
# requested from the object
self.failure = self.subp()
def subp(self):
'''
Make a subp call based on set args and handle errors by setting
failure code
:return: whether the subp call failed or not
'''
try:
value, err = util.subp(self.args, capture=True)
if err:
return err
self.epoch = value
return None
except Exception as systemctl_fail:
return systemctl_fail
def parse_epoch_as_float(self):
'''
If subp call succeeded, return the timestamp from subp as a float.
:return: timestamp as a float
'''
# subp has 2 ways to fail: it either fails and throws an exception,
# or returns an error code. Raise an exception here in order to make
# sure both scenarios throw exceptions
if self.failure:
raise RuntimeError('Subprocess call to systemctl has failed, '
'returning error code ({})'
.format(self.failure))
# Output from systemctl show has the format Property=Value.
# For example, UserspaceMonotonic=1929304
timestamp = self.epoch.split('=')[1]
# Timestamps reported by systemctl are in microseconds, converting
return float(timestamp) / 1000000
def dist_check_timestamp():
'''
Determine which init system a particular linux distro is using.
Each init system (systemd, upstart, etc) has a different way of
providing timestamps.
:return: timestamps of kernelboot, kernelendboot, and cloud-initstart
or TIMESTAMP_UNKNOWN if the timestamps cannot be retrieved.
'''
if uses_systemd():
return gather_timestamps_using_systemd()
# Use dmesg to get timestamps if the distro does not have systemd
if util.is_FreeBSD() or 'gentoo' in \
util.system_info()['system'].lower():
return gather_timestamps_using_dmesg()
# this distro doesn't fit anything that is supported by cloud-init. just
# return error codes
return TIMESTAMP_UNKNOWN
def gather_timestamps_using_dmesg():
'''
Gather timestamps that corresponds to kernel begin initialization,
kernel finish initialization using dmesg as opposed to systemctl
:return: the two timestamps plus a dummy timestamp to keep consistency
with gather_timestamps_using_systemd
'''
try:
data, _ = util.subp(['dmesg'], capture=True)
split_entries = data[0].splitlines()
for i in split_entries:
if i.decode('UTF-8').find('user') != -1:
splitup = i.decode('UTF-8').split()
stripped = splitup[1].strip(']')
# kernel timestamp from dmesg is equal to 0,
# with the userspace timestamp relative to it.
user_space_timestamp = float(stripped)
kernel_start = float(time.time()) - float(util.uptime())
kernel_end = kernel_start + user_space_timestamp
# systemd wont start cloud-init in this case,
# so we cannot get that timestamp
return SUCCESS_CODE, kernel_start, kernel_end, \
kernel_end
except Exception:
pass
return TIMESTAMP_UNKNOWN
def gather_timestamps_using_systemd():
'''
Gather timestamps that corresponds to kernel begin initialization,
kernel finish initialization. and cloud-init systemd unit activation
:return: the three timestamps
'''
kernel_start = float(time.time()) - float(util.uptime())
try:
delta_k_end = SystemctlReader('UserspaceTimestampMonotonic')\
.parse_epoch_as_float()
delta_ci_s = SystemctlReader('InactiveExitTimestampMonotonic',
'cloud-init-local').parse_epoch_as_float()
base_time = kernel_start
status = SUCCESS_CODE
# lxc based containers do not set their monotonic zero point to be when
# the container starts, instead keep using host boot as zero point
# time.CLOCK_MONOTONIC_RAW is only available in python 3.3
if util.is_container():
# clock.monotonic also uses host boot as zero point
if sys.version_info >= (3, 3):
base_time = float(time.time()) - float(time.monotonic())
# TODO: lxcfs automatically truncates /proc/uptime to seconds
# in containers when https://github.com/lxc/lxcfs/issues/292
# is fixed, util.uptime() should be used instead of stat on
try:
file_stat = os.stat('/proc/1/cmdline')
kernel_start = file_stat.st_atime
except OSError as err:
raise RuntimeError('Could not determine container boot '
'time from /proc/1/cmdline. ({})'
.format(err))
status = CONTAINER_CODE
else:
status = FAIL_CODE
kernel_end = base_time + delta_k_end
cloudinit_sysd = base_time + delta_ci_s
except Exception as e:
# Except ALL exceptions as Systemctl reader can throw many different
# errors, but any failure in systemctl means that timestamps cannot be
# obtained
print(e)
return TIMESTAMP_UNKNOWN
return status, kernel_start, kernel_end, cloudinit_sysd
def generate_records(events, blame_sort=False,
print_format="(%n) %d seconds in %I%D",
dump_files=False, log_datafiles=False):
'''
Take in raw events and create parent-child dependencies between events
in order to order events in chronological order.
:param events: JSONs from dump that represents events taken from logs
:param blame_sort: whether to sort by timestamp or by time taken.
:param print_format: formatting to represent event, time stamp,
and time taken by the event in one line
:param dump_files: whether to dump files into JSONs
:param log_datafiles: whether or not to log events generated
:return: boot records ordered chronologically
'''
sorted_events = sorted(events, key=lambda x: x['timestamp'])
records = []
start_time = None
total_time = 0.0
stage_start_time = {}
stages_seen = []
boot_records = []
unprocessed = []
for e in range(0, len(sorted_events)):
event = events[e]
try:
next_evt = events[e + 1]
except IndexError:
next_evt = None
if event_type(event) == 'start':
if event.get('name') in stages_seen:
records.append(total_time_record(total_time))
boot_records.append(records)
records = []
start_time = None
total_time = 0.0
if start_time is None:
stages_seen = []
start_time = event_datetime(event)
stage_start_time[event_parent(event)] = start_time
# see if we have a pair
if event_name(event) == event_name(next_evt):
if event_type(next_evt) == 'finish':
records.append(format_record(print_format,
event_record(start_time,
event,
next_evt)))
else:
# This is a parent event
records.append("Starting stage: %s" % event.get('name'))
unprocessed.append(event)
stages_seen.append(event.get('name'))
continue
else:
prev_evt = unprocessed.pop()
if event_name(event) == event_name(prev_evt):
record = event_record(start_time, prev_evt, event)
records.append(format_record("Finished stage: "
"(%n) %d seconds",
record) + "\n")
total_time += record.get('delta')
else:
# not a match, put it back
unprocessed.append(prev_evt)
records.append(total_time_record(total_time))
boot_records.append(records)
return boot_records
def show_events(events, print_format):
'''
A passthrough method that makes it easier to call generate_records()
:param events: JSONs from dump that represents events taken from logs
:param print_format: formatting to represent event, time stamp,
and time taken by the event in one line
:return: boot records ordered chronologically
'''
return generate_records(events, print_format=print_format)
def load_events_infile(infile):
'''
Takes in a log file, read it, and convert to json.
:param infile: The Log file to be read
:return: json version of logfile, raw file
'''
data = infile.read()
try:
return json.loads(data), data
except ValueError:
return None, data
|