File: disable-propagate-nan-in-eigen3.patch

package info (click to toggle)
onnxruntime 1.19.2%2Bdfsg-9
  • links: PTS, VCS
  • area: main
  • in suites: trixie
  • size: 331,816 kB
  • sloc: cpp: 3,113,108; python: 172,090; ansic: 108,275; cs: 46,492; asm: 37,308; java: 10,249; javascript: 6,459; pascal: 4,025; sh: 3,117; xml: 609; objc: 283; makefile: 52
file content (65 lines) | stat: -rw-r--r-- 3,275 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
Author: Shengqi Chen <harry-chen@outlook.com>
Description: Fix compilation with Eigen 3.4.0
 onnxruntime >= 1.19.0 invokes eigen3 with PropagateNaN, which is not supported in eigen 3.4.0.
 This patch removes PropagateNaN from code.
 See: https://github.com/microsoft/onnxruntime/commit/7543dd040b2d32109a2718d7276d3aca1edadaae
 See: https://gitlab.com/libeigen/eigen/-/commit/5d918b82a80118ebb19572770a0c8e1f5fe06b91
Forwarded: not-needed

--- a/onnxruntime/core/providers/cpu/math/element_wise_ops.cc
+++ b/onnxruntime/core/providers/cpu/math/element_wise_ops.cc
@@ -705,7 +705,7 @@
   for (int index = 1; index < inputCount; index++) {
     auto& data_n = *ctx->Input<Tensor>(index);
     ORT_ENFORCE(data_n.Shape() == shape, "All inputs must have the same shape");
-    min = min.array().template min<Eigen::PropagateNaN>(EigenMap<float>(data_n).array());
+    min = min.array().min(EigenMap<float>(data_n).array());
   }

   return Status::OK();
@@ -721,15 +721,15 @@
     ProcessBroadcastSpanFuncs funcs{
         [](BroadcastHelper& per_iter_bh) {
           per_iter_bh.OutputEigen<T>() =
-              per_iter_bh.EigenInput1<T>().array().template min<Eigen::PropagateNaN>(per_iter_bh.ScalarInput0<T>());
+              per_iter_bh.EigenInput1<T>().array().min(per_iter_bh.ScalarInput0<T>());
         },
         [](BroadcastHelper& per_iter_bh) {
           per_iter_bh.OutputEigen<T>() =
-              per_iter_bh.EigenInput0<T>().array().template min<Eigen::PropagateNaN>(per_iter_bh.ScalarInput1<T>());
+              per_iter_bh.EigenInput0<T>().array().min(per_iter_bh.ScalarInput1<T>());
         },
         [](BroadcastHelper& per_iter_bh) {
           per_iter_bh.OutputEigen<T>() =
-              per_iter_bh.EigenInput0<T>().array().template min<Eigen::PropagateNaN>(
+              per_iter_bh.EigenInput0<T>().array().min(
                   per_iter_bh.EigenInput1<T>().array());
         }};

@@ -828,7 +828,7 @@
   for (int index = 1; index < inputCount; index++) {
     auto& data_n = *ctx->Input<Tensor>(index);
     ORT_ENFORCE(data_n.Shape() == shape, "All inputs must have the same shape");
-    max = max.array().template max<Eigen::PropagateNaN>(EigenMap<float>(data_n).array());
+    max = max.array().max(EigenMap<float>(data_n).array());
   }

   return Status::OK();
@@ -844,15 +844,15 @@
     ProcessBroadcastSpanFuncs funcs{
         [](BroadcastHelper& per_iter_bh) {
           per_iter_bh.OutputEigen<T>() =
-              per_iter_bh.EigenInput1<T>().array().template max<Eigen::PropagateNaN>(per_iter_bh.ScalarInput0<T>());
+              per_iter_bh.EigenInput1<T>().array().max(per_iter_bh.ScalarInput0<T>());
         },
         [](BroadcastHelper& per_iter_bh) {
           per_iter_bh.OutputEigen<T>() =
-              per_iter_bh.EigenInput0<T>().array().template max<Eigen::PropagateNaN>(per_iter_bh.ScalarInput1<T>());
+              per_iter_bh.EigenInput0<T>().array().max(per_iter_bh.ScalarInput1<T>());
         },
         [](BroadcastHelper& per_iter_bh) {
           per_iter_bh.OutputEigen<T>() =
-              per_iter_bh.EigenInput0<T>().array().template max<Eigen::PropagateNaN>(
+              per_iter_bh.EigenInput0<T>().array().max(
                   per_iter_bh.EigenInput1<T>().array());
         }};