File: safe_math_arm_impl.h

package info (click to toggle)
thunderbird 1%3A143.0.1-1
  • links: PTS, VCS
  • area: main
  • in suites: experimental
  • size: 4,703,968 kB
  • sloc: cpp: 7,770,492; javascript: 5,943,842; ansic: 3,918,754; python: 1,418,263; xml: 653,354; asm: 474,045; java: 183,079; sh: 111,238; makefile: 20,410; perl: 14,359; objc: 13,059; yacc: 4,583; pascal: 3,405; lex: 1,720; ruby: 999; exp: 762; sql: 715; awk: 580; php: 436; lisp: 430; sed: 69; csh: 10
file content (125 lines) | stat: -rw-r--r-- 4,368 bytes parent folder | download | duplicates (10)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
// Copyright 2017 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#ifndef BASE_NUMERICS_SAFE_MATH_ARM_IMPL_H_
#define BASE_NUMERICS_SAFE_MATH_ARM_IMPL_H_

#include <cassert>
#include <type_traits>

#include "base/numerics/safe_conversions.h"

namespace base {
namespace internal {

template <typename T, typename U>
struct CheckedMulFastAsmOp {
  static const bool is_supported =
      kEnableAsmCode && FastIntegerArithmeticPromotion<T, U>::is_contained;

  // The following is not an assembler routine and is thus constexpr safe, it
  // just emits much more efficient code than the Clang and GCC builtins for
  // performing overflow-checked multiplication when a twice wider type is
  // available. The below compiles down to 2-3 instructions, depending on the
  // width of the types in use.
  // As an example, an int32_t multiply compiles to:
  //    smull   r0, r1, r0, r1
  //    cmp     r1, r1, asr #31
  // And an int16_t multiply compiles to:
  //    smulbb  r1, r1, r0
  //    asr     r2, r1, #16
  //    cmp     r2, r1, asr #15
  template <typename V>
  static constexpr bool Do(T x, U y, V* result) {
    using Promotion = typename FastIntegerArithmeticPromotion<T, U>::type;
    Promotion presult;

    presult = static_cast<Promotion>(x) * static_cast<Promotion>(y);
    if (!IsValueInRangeForNumericType<V>(presult))
      return false;
    *result = static_cast<V>(presult);
    return true;
  }
};

template <typename T, typename U>
struct ClampedAddFastAsmOp {
  static const bool is_supported =
      kEnableAsmCode && BigEnoughPromotion<T, U>::is_contained &&
      IsTypeInRangeForNumericType<
          int32_t,
          typename BigEnoughPromotion<T, U>::type>::value;

  template <typename V>
  __attribute__((always_inline)) static V Do(T x, U y) {
    // This will get promoted to an int, so let the compiler do whatever is
    // clever and rely on the saturated cast to bounds check.
    if (IsIntegerArithmeticSafe<int, T, U>::value)
      return saturated_cast<V>(static_cast<int>(x) + static_cast<int>(y));

    int32_t result;
    int32_t x_i32 = checked_cast<int32_t>(x);
    int32_t y_i32 = checked_cast<int32_t>(y);

    asm("qadd %[result], %[first], %[second]"
        : [result] "=r"(result)
        : [first] "r"(x_i32), [second] "r"(y_i32));
    return saturated_cast<V>(result);
  }
};

template <typename T, typename U>
struct ClampedSubFastAsmOp {
  static const bool is_supported =
      kEnableAsmCode && BigEnoughPromotion<T, U>::is_contained &&
      IsTypeInRangeForNumericType<
          int32_t,
          typename BigEnoughPromotion<T, U>::type>::value;

  template <typename V>
  __attribute__((always_inline)) static V Do(T x, U y) {
    // This will get promoted to an int, so let the compiler do whatever is
    // clever and rely on the saturated cast to bounds check.
    if (IsIntegerArithmeticSafe<int, T, U>::value)
      return saturated_cast<V>(static_cast<int>(x) - static_cast<int>(y));

    int32_t result;
    int32_t x_i32 = checked_cast<int32_t>(x);
    int32_t y_i32 = checked_cast<int32_t>(y);

    asm("qsub %[result], %[first], %[second]"
        : [result] "=r"(result)
        : [first] "r"(x_i32), [second] "r"(y_i32));
    return saturated_cast<V>(result);
  }
};

template <typename T, typename U>
struct ClampedMulFastAsmOp {
  static const bool is_supported =
      kEnableAsmCode && CheckedMulFastAsmOp<T, U>::is_supported;

  template <typename V>
  __attribute__((always_inline)) static V Do(T x, U y) {
    // Use the CheckedMulFastAsmOp for full-width 32-bit values, because
    // it's fewer instructions than promoting and then saturating.
    if (!IsIntegerArithmeticSafe<int32_t, T, U>::value &&
        !IsIntegerArithmeticSafe<uint32_t, T, U>::value) {
      V result;
      return CheckedMulFastAsmOp<T, U>::Do(x, y, &result)
                 ? result
                 : CommonMaxOrMin<V>(IsValueNegative(x) ^ IsValueNegative(y));
    }

    assert((FastIntegerArithmeticPromotion<T, U>::is_contained));
    using Promotion = typename FastIntegerArithmeticPromotion<T, U>::type;
    return saturated_cast<V>(static_cast<Promotion>(x) *
                             static_cast<Promotion>(y));
  }
};

}  // namespace internal
}  // namespace base

#endif  // BASE_NUMERICS_SAFE_MATH_ARM_IMPL_H_