File: processor.py

package info (click to toggle)
chromium 139.0.7258.138-1
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 6,120,676 kB
  • sloc: cpp: 35,100,869; ansic: 7,163,530; javascript: 4,103,002; python: 1,436,920; asm: 946,517; xml: 746,709; pascal: 187,653; perl: 88,691; sh: 88,436; objc: 79,953; sql: 51,488; cs: 44,583; fortran: 24,137; makefile: 22,147; tcl: 15,277; php: 13,980; yacc: 8,984; ruby: 7,485; awk: 3,720; lisp: 3,096; lex: 1,327; ada: 727; jsp: 228; sed: 36
file content (581 lines) | stat: -rw-r--r-- 22,766 bytes parent folder | download | duplicates (5)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
# Copyright 2019 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Implements the interface of the results_processor module.

Provides functions to process intermediate results, and the entry point to
the standalone version of Results Processor.
"""

from __future__ import print_function

import datetime
import gzip
import json
import logging
import os
import posixpath
import pprint
import random
import re
import shutil
import time

from py_utils import cloud_storage
from core.results_processor import command_line
from core.results_processor import compute_metrics
from core.results_processor import formatters
from core.results_processor import util
from core.tbmv3 import trace_processor

# The import error below is mysterious: it produces no detailed error message,
# while appending a proper sys.path does not help.
from core import path_util

path_util.AddAndroidDeviceInteractionToPath()
from devil.android import device_utils  # pylint: disable=import-error
from devil.android.sdk import adb_wrapper  # pylint: disable=import-error

path_util.AddTelemetryToPath()
from telemetry.core import cros_interface
from telemetry.internal.platform import device_finder

from tracing.trace_data import trace_data
from tracing.value.diagnostics import all_diagnostics
from tracing.value.diagnostics import generic_set
from tracing.value.diagnostics import reserved_infos
from tracing.value import histogram
from tracing.value import histogram_set
from tracing.value import legacy_unit_info

TEST_RESULTS = '_test_results.jsonl'
DIAGNOSTICS_NAME = 'diagnostics.json'
MEASUREMENTS_NAME = 'measurements.json'
CONVERTED_JSON_SUFFIX = '_converted.json'

FORMATS_WITH_METRICS = ['csv', 'histograms', 'html']


def ProcessResults(options, is_unittest=False):
  """Process intermediate results and produce the requested outputs.

  This function takes the intermediate results generated by Telemetry after
  running benchmarks (including artifacts such as traces, etc.), and processes
  them as requested by the result processing options.

  Args:
    options: An options object with values parsed from the command line and
      after any adjustments from ProcessOptions were applied.
    is_unittest: If True, this benchmark is run as part of a unittest, and
      should not upload to result sink (the calling unittest is responsible for
      determining test result).
  """
  if not getattr(options, 'output_formats', None):
    return 0

  test_results = _LoadTestResults(options.intermediate_dir)
  if not test_results:
    # TODO(crbug.com/40634925): Make sure that no one is expecting Results
    # Processor to output results in the case of empty input
    # and make this an error.
    logging.warning('No test results to process.')

  test_suite_start = (test_results[0]['startTime']
                      if test_results and 'startTime' in test_results[0] else
                      datetime.datetime.utcnow().isoformat() + 'Z')
  run_identifier = RunIdentifier(options.results_label, test_suite_start)
  should_compute_metrics = any(
      fmt in FORMATS_WITH_METRICS for fmt in options.output_formats)

  if options.extra_metrics:
    _AddExtraMetrics(test_results, options.extra_metrics)

  begin_time = time.time()
  util.ApplyInParallel(
      lambda result: ProcessTestResult(
          test_result=result,
          upload_bucket=options.upload_bucket,
          results_label=options.results_label,
          run_identifier=run_identifier,
          test_suite_start=test_suite_start,
          should_compute_metrics=should_compute_metrics,
          max_num_values=options.max_values_per_test_case,
          test_path_format=options.test_path_format,
          trace_processor_path=options.trace_processor_path,
          enable_tbmv3=options.experimental_tbmv3_metrics,
          fetch_power_profile=options.fetch_power_profile),
      test_results,
      on_failure=util.SetUnexpectedFailure,
  )
  processing_duration = time.time() - begin_time
  _AmortizeProcessingDuration(processing_duration, test_results)

  if should_compute_metrics:
    histogram_dicts = ExtractHistograms(test_results)

  if not is_unittest:
    util.TryUploadingResultToResultSink(test_results)

  for output_format in options.output_formats:
    logging.info('Processing format: %s', output_format)
    formatter = formatters.FORMATTERS[output_format]
    if output_format in FORMATS_WITH_METRICS:
      output_file = formatter.ProcessHistogramDicts(histogram_dicts, options)
    else:
      output_file = formatter.ProcessIntermediateResults(test_results, options)

    print('View results at file://', output_file, sep='')

  exit_code = GenerateExitCode(test_results)

  if options.fetch_device_data:
    if options.fetch_device_data_on_success and exit_code != 0:
      logging.warning('Not fetching device data due to non zero exit code.')
    else:
      PullDeviceArtifacts(options)

  return exit_code


def _AmortizeProcessingDuration(processing_duration, test_results):
  test_results_count = len(test_results)
  if test_results_count:
    per_story_cost = processing_duration / len(test_results)
    logging.info(
        'Amortizing processing cost to story runtimes: %.2fs per story.',
        per_story_cost)
    for result in test_results:
      if 'runDuration' in result and result['runDuration']:
        current_duration = float(result['runDuration'].rstrip('s'))
        new_story_cost = current_duration + per_story_cost
        result['runDuration'] = str(new_story_cost) + 's'


def _AddExtraMetrics(test_results, extra_metrics):
  extra_metric_tags = []
  for metric in extra_metrics:
    version, name = metric.split(':')
    if version not in ('tbmv2', 'tbmv3'):
      raise ValueError('Invalid metric name: %s' % metric)
    extra_metric_tags.append({'key': version, 'value': name})

  for test_result in test_results:
    test_result.setdefault('tags', []).extend(extra_metric_tags)


def ProcessTestResult(test_result, upload_bucket, results_label, run_identifier,
                      test_suite_start, should_compute_metrics, max_num_values,
                      test_path_format, trace_processor_path, enable_tbmv3,
                      fetch_power_profile):
  ConvertProtoTraces(test_result, trace_processor_path)
  AggregateTBMv2Traces(test_result)
  if enable_tbmv3:
    AggregateTBMv3Traces(test_result)
  if upload_bucket is not None:
    UploadArtifacts(test_result, upload_bucket, run_identifier)

  if should_compute_metrics:
    test_result['_histograms'] = histogram_set.HistogramSet()
    compute_metrics.ComputeTBMv2Metrics(test_result)
    if enable_tbmv3:
      compute_metrics.ComputeTBMv3Metrics(test_result, trace_processor_path,
                                          fetch_power_profile)
    ExtractMeasurements(test_result)
    num_values = len(test_result['_histograms'])
    if max_num_values is not None and num_values > max_num_values:
      logging.error('%s produced %d values, but only %d are allowed.',
                    test_result['testPath'], num_values, max_num_values)
      util.SetUnexpectedFailure(test_result)
      del test_result['_histograms']
    else:
      AddDiagnosticsToHistograms(test_result, test_suite_start, results_label,
                                 test_path_format)


def ExtractHistograms(test_results):
  histograms = histogram_set.HistogramSet()
  for result in test_results:
    if '_histograms' in result:
      histograms.Merge(result['_histograms'])
  histograms.DeduplicateDiagnostics()
  return histograms.AsDicts()


def GenerateExitCode(test_results):
  """Generate an exit code as expected by callers.

  Returns:
    1 if there were failed tests.
    111 if all tests were skipped. (See crbug.com/1019139#c8 for details).
    0 otherwise.
  """
  if any(r['status'] == 'FAIL' for r in test_results):
    return 1
  if test_results and all(r['status'] == 'SKIP' for r in test_results):
    return 111
  return 0


def _LoadTestResults(intermediate_dir):
  """Load intermediate results from a file into a list of test results."""
  intermediate_file = os.path.join(intermediate_dir, TEST_RESULTS)
  test_results = []
  with open(intermediate_file) as f:
    for line in f:
      record = json.loads(line)
      if 'testResult' in record:
        test_results.append(record['testResult'])
  return test_results


def _IsProtoTrace(trace_name):
  return (trace_name.startswith('trace/') and
          (trace_name.endswith('.pb') or trace_name.endswith('.pb.gz')))


def _IsTBMv2Trace(trace_name):
  return (trace_name.startswith('trace/') and
          (trace_name.endswith('.json') or trace_name.endswith('.json.gz')
           or trace_name.endswith('.txt') or trace_name.endswith('.txt.gz')))


def _BuildOutputPath(input_files, output_name):
  """Build a path to a file in the same folder as input_files."""
  return os.path.join(os.path.dirname(os.path.commonprefix(input_files)),
                      output_name)


def ConvertProtoTraces(test_result, trace_processor_path):
  """Convert proto traces to json.

  For a test result with proto traces, converts them to json using
  trace_processor and stores the json trace as a separate artifact.
  """
  artifacts = test_result.get('outputArtifacts', {})
  proto_traces = [name for name in artifacts if _IsProtoTrace(name)]

  # TODO(crbug.com/40638725): After implementation of TBMv3-style clock sync,
  # it will be possible to convert the aggregated proto trace, not
  # individual ones.
  for proto_trace_name in proto_traces:
    proto_file_path = artifacts[proto_trace_name]['filePath']
    json_file_path = (os.path.splitext(proto_file_path)[0] +
                      CONVERTED_JSON_SUFFIX)
    json_trace_name = (posixpath.splitext(proto_trace_name)[0] +
                       CONVERTED_JSON_SUFFIX)
    trace_processor.ConvertProtoTraceToJson(
        trace_processor_path, proto_file_path, json_file_path)
    artifacts[json_trace_name] = {
        'filePath': json_file_path,
        'contentType': 'application/json',
    }
    logging.info('%s: Proto trace converted. Source: %s. Destination: %s.',
                 test_result['testPath'], proto_file_path, json_file_path)


def AggregateTBMv2Traces(test_result):
  """Replace individual non-proto traces with an aggregate HTML trace.

  For a test result with non-proto traces, generates an aggregate HTML trace.
  Removes all entries for individual traces and adds one entry for
  the aggregate one.
  """
  artifacts = test_result.get('outputArtifacts', {})
  traces = [name for name in artifacts if _IsTBMv2Trace(name)]
  if traces:
    trace_files = [artifacts[name]['filePath'] for name in traces]
    html_path = _BuildOutputPath(trace_files, compute_metrics.HTML_TRACE_NAME)
    trace_data.SerializeAsHtml(trace_files, html_path)
    artifacts[compute_metrics.HTML_TRACE_NAME] = {
        'filePath': html_path,
        'contentType': 'text/html',
    }
    logging.info('%s: TBMv2 traces aggregated. Sources: %s. Destination: %s.',
                 test_result['testPath'], trace_files, html_path)
  for name in traces:
    del artifacts[name]


def AggregateTBMv3Traces(test_result):
  """Replace individual proto traces with an aggregate one.

  For a test result with proto traces, concatenates them into one file.
  Removes all entries for individual traces and adds one entry for
  the aggregate one.
  """
  artifacts = test_result.get('outputArtifacts', {})
  traces = [name for name in artifacts if _IsProtoTrace(name)]
  if traces:
    proto_files = [artifacts[name]['filePath'] for name in traces]
    concatenated_path = _BuildOutputPath(
        proto_files, compute_metrics.CONCATENATED_PROTO_NAME)
    with open(concatenated_path, 'wb') as concatenated_trace:
      for trace_file in proto_files:
        if trace_file.endswith('.pb.gz'):
          with gzip.open(trace_file, 'rb') as f:
            shutil.copyfileobj(f, concatenated_trace)
        else:
          with open(trace_file, 'rb') as f:
            shutil.copyfileobj(f, concatenated_trace)
    artifacts[compute_metrics.CONCATENATED_PROTO_NAME] = {
        'filePath': concatenated_path,
        'contentType': 'application/x-protobuf',
    }
    logging.info('%s: Proto traces aggregated. Sources: %s. Destination: %s.',
                 test_result['testPath'], proto_files, concatenated_path)
  for name in traces:
    del artifacts[name]


def RunIdentifier(results_label, test_suite_start):
  """Construct an identifier for the current script run"""
  if results_label:
    identifier_parts = [re.sub(r'\W+', '_', results_label)]
  else:
    identifier_parts = []
  # Time is rounded to seconds and delimiters are removed.
  # The first 19 chars of the string match 'YYYY-MM-DDTHH:MM:SS'.
  identifier_parts.append(re.sub(r'\W+', '', test_suite_start[:19]))
  identifier_parts.append(str(random.randint(1, 1e5)))
  return '_'.join(identifier_parts)


def UploadArtifacts(test_result, upload_bucket, run_identifier):
  """Upload all artifacts to cloud.

  For a test run, uploads all its artifacts to cloud and sets fetchUrl and
  viewUrl fields in intermediate_results.
  """
  artifacts = test_result.get('outputArtifacts', {})
  for name, artifact in artifacts.items():
    # TODO(crbug.com/40634925): Think of a more general way to
    # specify which artifacts deserve uploading.
    if name in [DIAGNOSTICS_NAME, MEASUREMENTS_NAME]:
      continue
    retry_identifier = 'retry_%s' % test_result.get('resultId', '0')
    remote_name = '/'.join(
        [run_identifier, test_result['testPath'], retry_identifier, name])
    urlsafe_remote_name = re.sub(r'[^A-Za-z0-9/.-]+', '_', remote_name)
    cloud_filepath = cloud_storage.Upload(upload_bucket, urlsafe_remote_name,
                                          artifact['filePath'])
    # Per crbug.com/1033755 some services require fetchUrl.
    artifact['fetchUrl'] = cloud_filepath.fetch_url
    artifact['viewUrl'] = cloud_filepath.view_url
    logging.info('%s: Uploaded %s to %s', test_result['testPath'], name,
                 artifact['viewUrl'])


def GetTraceUrl(test_result):
  artifacts = test_result.get('outputArtifacts', {})
  trace_artifact = artifacts.get(compute_metrics.HTML_TRACE_NAME, {})
  if 'viewUrl' in trace_artifact:
    return trace_artifact['viewUrl']
  if 'filePath' in trace_artifact:
    return 'file://' + trace_artifact['filePath']
  return None


def AddDiagnosticsToHistograms(test_result, test_suite_start, results_label,
                               test_path_format):
  """Add diagnostics to all histograms of a test result.

  Reads diagnostics from the test artifact and adds them to all histograms.
  Also sets additional diagnostics based on test result metadata.
  This overwrites the corresponding diagnostics previously set by e.g.
  run_metrics.
  """
  artifacts = test_result.get('outputArtifacts', {})
  if DIAGNOSTICS_NAME in artifacts:
    with open(artifacts[DIAGNOSTICS_NAME]['filePath']) as f:
      diagnostics = json.load(f)['diagnostics']
    for name, diag in diagnostics.items():
      # For now, we only support GenericSet diagnostics that are serialized
      # as lists of values.
      assert isinstance(diag, list)
      test_result['_histograms'].AddSharedDiagnosticToAllHistograms(
          name, generic_set.GenericSet(diag))
    del artifacts[DIAGNOSTICS_NAME]

  test_suite, test_case = util.SplitTestPath(test_result, test_path_format)
  if 'startTime' in test_result:
    test_start_ms = util.IsoTimestampToEpoch(test_result['startTime']) * 1e3
  else:
    test_start_ms = None
  test_suite_start_ms = util.IsoTimestampToEpoch(test_suite_start) * 1e3
  story_tags = [
      tag['value'] for tag in test_result.get('tags', [])
      if tag['key'] == 'story_tag'
  ]
  result_id = int(test_result.get('resultId', 0))
  trace_url = GetTraceUrl(test_result)

  additional_diagnostics = [
      (reserved_infos.BENCHMARKS, test_suite),
      (reserved_infos.BENCHMARK_START, test_suite_start_ms),
      (reserved_infos.LABELS, results_label),
      (reserved_infos.STORIES, test_case),
      (reserved_infos.STORYSET_REPEATS, result_id),
      (reserved_infos.STORY_TAGS, story_tags),
      (reserved_infos.TRACE_START, test_start_ms),
      (reserved_infos.TRACE_URLS, trace_url),
  ]
  for name, value in _WrapDiagnostics(additional_diagnostics):
    test_result['_histograms'].AddSharedDiagnosticToAllHistograms(name, value)


def MeasurementToHistogram(name, measurement):
  unit = measurement['unit']
  samples = measurement['samples']
  description = measurement.get('description')
  if unit in legacy_unit_info.LEGACY_UNIT_INFO:
    info = legacy_unit_info.LEGACY_UNIT_INFO[unit]
    unit = info.name
    samples = [s * info.conversion_factor for s in samples]
  if unit not in histogram.UNIT_NAMES:
    raise ValueError(
        ('Unknown unit: "%s". Valid options include:\n%s\n'
         'Valid legacy options include:\n%s') %
        (unit, pprint.pformat(histogram.UNIT_NAMES),
         pprint.pformat(list(legacy_unit_info.LEGACY_UNIT_INFO.keys()))))
  return histogram.Histogram.Create(name,
                                    unit,
                                    samples,
                                    description=description)


def _WrapDiagnostics(info_value_pairs):
  """Wrap diagnostic values in corresponding Diagnostics classes.

  Args:
    info_value_pairs: any iterable of pairs (info, value), where info is one of
      reserved infos defined in tracing.value.diagnostics.reserved_infos, and
      value can be any json-serializable object.

  Returns:
    An iterator over pairs (diagnostic name, diagnostic value).
  """
  for info, value in info_value_pairs:
    if value is None or value == []:
      continue
    if info.type == 'GenericSet' and not isinstance(value, list):
      value = [value]
    diag_class = all_diagnostics.GetDiagnosticClassForName(info.type)
    yield info.name, diag_class(value)


def ExtractMeasurements(test_result):
  """Add ad-hoc measurements to histogram dicts"""
  artifacts = test_result.get('outputArtifacts', {})
  if MEASUREMENTS_NAME in artifacts:
    with open(artifacts[MEASUREMENTS_NAME]['filePath']) as f:
      measurements = json.load(f)['measurements']
    for name, measurement in measurements.items():
      test_result['_histograms'].AddHistogram(
          MeasurementToHistogram(name, measurement))
    del artifacts[MEASUREMENTS_NAME]


def PullDeviceArtifacts(options):
  """Pull files from on-device path using `adb`

  Args:
    device_path: (string) absolute path to the file/folder on-device to pull.
    local_path: (string) absolute path to local destination.
    platform: (string) platform associated with device.
              one of 'android' or 'chromeos'.

  Raises:
    device_errors.AdbCommandFailedError
  """
  device_path = options.device_data_path
  local_path = options.local_data_path
  platform = options.fetch_data_platform

  if not device_path:
    logging.warning('No path to data specified to pull from device. '
                    'Skipping.')
    return

  if platform == 'android':
    logging.info('Getting devices...')
    devices = adb_wrapper.AdbWrapper.Devices()
    logging.info('Found devices: %s', ', '.join(str(d) for d in devices))
    # Each docker host in chrome-swarming has one device attached, so we'll use
    # the first AdbWrapper instance as the assumed attached device in question
    utils = device_utils.DeviceUtils(devices[0])
    logging.info('Pulling files from %s to %s', device_path, local_path)
    utils.PullFile(device_path, local_path)
    local_profile_dir = os.path.join(local_path, os.path.basename(device_path))
    for root, _, filenames in os.walk(local_profile_dir):
      for filename in filenames:
        relative_path = os.path.relpath(os.path.join(root, filename),
                                        local_profile_dir)
        to_be_removed = f"{device_path}/{relative_path}"
        utils.RemovePath(to_be_removed)
        logging.info('Removed %s', to_be_removed)
    logging.info('Finished pulling files.')
  elif platform == 'chromeos':
    logging.warning('Searching for devices')
    # Each docker host in chrome-swarming should only have one local device.
    devices = device_finder.GetDevicesMatchingOptions(options)

    device = None
    if options.remote:
      target_identifier = 'cros:' + options.remote
      logging.info('Target identifier: %s' % target_identifier)
      for d in devices:
        if d.guid == target_identifier:
          # remote should be "variable_chromeos_device_hostname" so use the
          # device that matches this.
          # guid is cros:variable_chromeos_device_hostname.
          # desktop guid should just be desktop.
          # searching for devices usually returns 2:
          # 1. variable_chromeos_device_hostname
          # 2. desktop
          # variable_chromeos_device_hostname is usually the first device, in
          # the list of devices, but to be sure we verify before selecting it.
          device = d
          logging.info('Selecting %s' % device.name)
          break
    else:
      # default behavior is to select the first one, since no identifier
      # is provided.
      device = devices[0]
      logging.info('Defaulting to first selected device: %s' % device.name)

    if not device:
      logging.warning('No device found with name %s' % str(options.remote))
      return

    interface = cros_interface.CrOSInterface(device.host_name, device.ssh_port,
                                             device.ssh_identity)
    # Search for all profraw files
    logging.info('Searching for .profraw files at %s' % device_path)
    stdout, _ = interface.RunCmdOnDevice(
        ['find', device_path, '-regex', '.*.profraw'])
    files = stdout.splitlines()
    if not files:
      logging.warning('No profraw files found at %s' % device_path)
      return
    logging.info('Found %d profiles: %s' % (len(files), str(files)))

    # profraw files are written to ${ISOLATED_DIR}/profraw/
    write_path = os.path.join(local_path, 'profraw')
    if not os.path.exists(write_path):
      logging.warning('%s does not exist. creating it' % write_path)
      os.mkdir(write_path)

    for f in files:
      logging.info('Copying file %s' % f)
      interface.GetFile(f, os.path.join(write_path, os.path.basename(f)))
  else:
    logging.warning('No supported platform specified. Doing nothing.')
    return


def main(args=None):
  """Entry point for the standalone version of the results_processor script."""
  parser = command_line.ArgumentParser(standalone=True)
  options = parser.parse_args(args)
  command_line.ProcessOptions(options)
  return ProcessResults(options, options.is_unittest)