File: include-system-onnxruntime-header.patch

package info (click to toggle)
toppic 1.8.0%2Brepack1-1
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 111,364 kB
  • sloc: cpp: 62,240; xml: 8,121; javascript: 1,356; python: 755; sh: 108; makefile: 24
file content (146 lines) | stat: -rw-r--r-- 6,354 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
diff --git a/src/topfd/ecscore/score/onnx_ecscore.cpp b/src/topfd/ecscore/score/onnx_ecscore.cpp
index b0772c1..7a5ece3 100644
--- a/src/topfd/ecscore/score/onnx_ecscore.cpp
+++ b/src/topfd/ecscore/score/onnx_ecscore.cpp
@@ -16,7 +16,7 @@
 #include <cmath>
 
 #include "common/util/logger.hpp"
-#include "onnx/onnxruntime_cxx_api.h"
+#include "onnxruntime/onnxruntime_cxx_api.h"
 
 #include "common/util/file_util.hpp"
 #include "topfd/ecscore/score/onnx_ecscore.hpp"
@@ -34,7 +34,7 @@ std::vector<const char*> input_node_names = {"input"};
 std::vector<const char*> output_node_names = {"output"};
 
 void initModel(const std::string &dir_name, int thread_num) {
-  std::string file_name = dir_name 
+  std::string file_name = dir_name
     + file_util::getFileSeparator() + "ecscore_models"
     + file_util::getFileSeparator() + "ecscore_seven_attr.onnx";
 
@@ -54,9 +54,9 @@ void initModel(const std::string &dir_name, int thread_num) {
 
 
 std::vector<double> predict(int feat_num, std::vector<float> &input_tensor_values) {
-  std::vector<int64_t> input_node_dims {feat_num, 7};  
-  size_t input_tensor_size; 
-  input_tensor_size = feat_num * 7;  
+  std::vector<int64_t> input_node_dims {feat_num, 7};
+  size_t input_tensor_size;
+  input_tensor_size = feat_num * 7;
 
   // create input tensor object from data values
   auto memory_info = Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault);
@@ -79,7 +79,7 @@ std::vector<double> predict(int feat_num, std::vector<float> &input_tensor_value
     double zero = float_arr[2*i];
     double one = float_arr[2*i+1];
     //softmax
-    double score = std::exp(one)/ (std::exp(zero) + std::exp(one)); 
+    double score = std::exp(one)/ (std::exp(zero) + std::exp(one));
     pred_results.push_back(score);
   }
   return pred_results;
diff --git a/src/topfd/envcnn/onnx_env_cnn.cpp b/src/topfd/envcnn/onnx_env_cnn.cpp
index ebaac03..3001a09 100644
--- a/src/topfd/envcnn/onnx_env_cnn.cpp
+++ b/src/topfd/envcnn/onnx_env_cnn.cpp
@@ -20,7 +20,7 @@
 #include <iostream>
 #include <cmath>
 
-#include "onnx/onnxruntime_cxx_api.h"
+#include "onnxruntime/onnxruntime_cxx_api.h"
 
 #include "common/util/logger.hpp"
 #include "common/util/file_util.hpp"
@@ -39,7 +39,7 @@ std::vector<const char*> input_node_names = {"input"};
 std::vector<const char*> output_node_names = {"output"};
 
 void initModel(const std::string &dir_name, int thread_num) {
-  std::string file_name = dir_name 
+  std::string file_name = dir_name
     + file_util::getFileSeparator() + "envcnn_models"
     + file_util::getFileSeparator() + "envcnn_two_block.onnx";
 
@@ -83,7 +83,7 @@ void extractTheoPeakData(EnvPtr &theo_env,
   }
 }
 
-void getTheoEnvData(MatchEnvPtr &ori_env, std::vector<double> &theo_mass, 
+void getTheoEnvData(MatchEnvPtr &ori_env, std::vector<double> &theo_mass,
                     std::vector<double> &theo_intes,
                     double &max_inte, double &theo_mono_mz) {
   EnvPtr theo_env = ori_env->getTheoEnvPtr();
@@ -96,7 +96,7 @@ void getTheoEnvData(MatchEnvPtr &ori_env, std::vector<double> &theo_mass,
 }
 
 void getExpIntervaledPeakData(const PeakPtrVec &peak_list, double real_mono_mz,
-                              std::vector<double> &theo_mass, 
+                              std::vector<double> &theo_mass,
                               std::vector<double> &peak_mass,
                               std::vector<double> &peak_intes) {
   double max_theo_mass = *std::max_element(std::begin(theo_mass), std::end(theo_mass));
@@ -112,9 +112,9 @@ void getExpIntervaledPeakData(const PeakPtrVec &peak_list, double real_mono_mz,
   }
 }
 
-void getExpEnvData(const PeakPtrVec &peak_list, MatchEnvPtr &ori_env, 
+void getExpEnvData(const PeakPtrVec &peak_list, MatchEnvPtr &ori_env,
                    std::vector<double> &theo_mass,
-                   std::vector<double> &peak_mass, 
+                   std::vector<double> &peak_mass,
                    std::vector<double> &peak_intes) {
   ExpEnvPtr real_env = ori_env->getExpEnvPtr();
   double real_mono_mz = real_env->getMonoMz();
@@ -236,8 +236,8 @@ std::vector<float> getTensor(const PeakPtrVec &peak_list, MatchEnvPtr env) {
   getExpEnvData(peak_list, env, theo_masses, peak_masses, peak_intes);
 
   // init matrix
-  size_t row = 4; 
-  size_t col = 300; 
+  size_t row = 4;
+  size_t col = 300;
   std::vector<std::vector<float>> matrix  = initializeMatrix(row, col);
 
   double tolerance = 0.02;
@@ -265,7 +265,7 @@ std::vector<float> getTensor(const PeakPtrVec &peak_list, MatchEnvPtr env) {
 std::vector<float> getBatchTensor(const PeakPtrVec &peak_list, MatchEnvPtrVec &envs){
   std::vector<float> result;
   for (size_t i = 0; i < envs.size(); i++) {
-    std::vector<float> env_result = getTensor(peak_list, envs[i]); 
+    std::vector<float> env_result = getTensor(peak_list, envs[i]);
     result.insert(std::end(result), std::begin(env_result), std::end(env_result));
   }
   return result;
@@ -276,15 +276,15 @@ void predict(MatchEnvPtrVec &envs, std::vector<float> &input_tensor_values) {
   std::vector<double> pred_results = predict(env_num, input_tensor_values);
 
   for (int i = 0; i < env_num; i++) {
-    envs[i]->setEnvcnnScore(pred_results[i]); 
-    LOG_DEBUG(" msdeconv " << envs[i]->getMsdeconvScore() << " predict " << pred_results[i]);  
+    envs[i]->setEnvcnnScore(pred_results[i]);
+    LOG_DEBUG(" msdeconv " << envs[i]->getMsdeconvScore() << " predict " << pred_results[i]);
   }
 }
 
 std::vector<double> predict(int env_num, std::vector<float> &input_tensor_values) {
-  std::vector<int64_t> input_node_dims {env_num, 4, 300};  
-  size_t input_tensor_size; 
-  input_tensor_size = env_num * 4 * 300;  
+  std::vector<int64_t> input_node_dims {env_num, 4, 300};
+  size_t input_tensor_size;
+  input_tensor_size = env_num * 4 * 300;
 
   // create input tensor object from data values
   auto memory_info = Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault);
@@ -307,7 +307,7 @@ std::vector<double> predict(int env_num, std::vector<float> &input_tensor_values
     double zero = float_arr[2*i];
     double one = float_arr[2*i+1];
     //softmax
-    double score = std::exp(one)/ (std::exp(zero) + std::exp(one)); 
+    double score = std::exp(one)/ (std::exp(zero) + std::exp(one));
     pred_results.push_back(score);
   }
   return pred_results;