Michael Martis | 26abcd8 | 2018-08-08 10:57:25 +1000 | [diff] [blame] | 1 | // Copyright 2018 The Chromium OS Authors. All rights reserved. |
| 2 | // Use of this source code is governed by a BSD-style license that can be |
| 3 | // found in the LICENSE file. |
| 4 | |
| 5 | #include "ml/graph_executor_impl.h" |
alanlxl | cb1f856 | 2018-11-01 15:16:11 +1100 | [diff] [blame] | 6 | #include "ml/request_metrics.h" |
Michael Martis | 26abcd8 | 2018-08-08 10:57:25 +1000 | [diff] [blame] | 7 | |
| 8 | #include <set> |
| 9 | #include <utility> |
Hidehiko Abe | 8ab64a6 | 2018-09-19 00:04:39 +0900 | [diff] [blame] | 10 | #include <vector> |
Michael Martis | 26abcd8 | 2018-08-08 10:57:25 +1000 | [diff] [blame] | 11 | |
Hidehiko Abe | aa488c3 | 2018-08-31 23:49:41 +0900 | [diff] [blame] | 12 | #include "ml/mojom/tensor.mojom.h" |
Michael Martis | 26abcd8 | 2018-08-08 10:57:25 +1000 | [diff] [blame] | 13 | #include "ml/tensor_view.h" |
Michael Martis | 26abcd8 | 2018-08-08 10:57:25 +1000 | [diff] [blame] | 14 | |
| 15 | namespace ml { |
| 16 | |
| 17 | namespace { |
| 18 | |
| 19 | using ::chromeos::machine_learning::mojom::ExecuteResult; |
| 20 | using ::chromeos::machine_learning::mojom::GraphExecutorRequest; |
| 21 | using ::chromeos::machine_learning::mojom::Int64List; |
| 22 | using ::chromeos::machine_learning::mojom::Tensor; |
| 23 | using ::chromeos::machine_learning::mojom::TensorPtr; |
| 24 | using ::chromeos::machine_learning::mojom::ValueList; |
| 25 | |
alanlxl | cb1f856 | 2018-11-01 15:16:11 +1100 | [diff] [blame] | 26 | // Base name for UMA metrics related to graph execution |
| 27 | constexpr char kMetricsNameBase[] = "ExecuteResult"; |
| 28 | |
Michael Martis | 26abcd8 | 2018-08-08 10:57:25 +1000 | [diff] [blame] | 29 | // Verifies |tensor| is valid (i.e. is of type |TensorType| and of the correct |
| 30 | // shape for this input) and copies its data into the graph |interpreter| at |
| 31 | // position |index|. |
| 32 | template <typename TensorType, typename MemoryType> |
| 33 | ExecuteResult PopulateInput(const TensorPtr& tensor, |
| 34 | const int index, |
| 35 | tflite::Interpreter* const interpreter) { |
| 36 | const TensorView<TensorType> tensor_view(tensor); |
| 37 | |
| 38 | if (!tensor_view.IsValidType()) |
| 39 | return ExecuteResult::INPUT_TYPE_ERROR; |
| 40 | |
| 41 | if (!tensor_view.IsValidFormat()) |
| 42 | return ExecuteResult::INPUT_FORMAT_ERROR; |
| 43 | |
| 44 | // Check that given input shape matches that expected by TF lite. |
| 45 | |
| 46 | const TfLiteIntArray& expected_dims = *interpreter->tensor(index)->dims; |
Hidehiko Abe | 8ab64a6 | 2018-09-19 00:04:39 +0900 | [diff] [blame] | 47 | const std::vector<int64_t>& actual_dims = tensor_view.GetShape(); |
Michael Martis | 26abcd8 | 2018-08-08 10:57:25 +1000 | [diff] [blame] | 48 | |
| 49 | bool shape_matches = expected_dims.size == actual_dims.size(); |
| 50 | for (int i = 0; shape_matches && i < expected_dims.size; ++i) { |
| 51 | shape_matches = expected_dims.data[i] == actual_dims[i]; |
| 52 | } |
| 53 | |
| 54 | if (!shape_matches) |
| 55 | return ExecuteResult::INPUT_SHAPE_ERROR; |
| 56 | |
| 57 | MemoryType* const input_memory = interpreter->typed_tensor<MemoryType>(index); |
Hidehiko Abe | 8ab64a6 | 2018-09-19 00:04:39 +0900 | [diff] [blame] | 58 | const std::vector<TensorType>& tensor_values = tensor_view.GetValues(); |
Michael Martis | 26abcd8 | 2018-08-08 10:57:25 +1000 | [diff] [blame] | 59 | for (int i = 0; i < tensor_values.size(); ++i) { |
| 60 | input_memory[i] = tensor_values[i]; |
| 61 | } |
| 62 | |
| 63 | return ExecuteResult::OK; |
| 64 | } |
| 65 | |
| 66 | ExecuteResult InvalidInput(const TensorPtr&, int, tflite::Interpreter*) { |
| 67 | return ExecuteResult::EXECUTION_ERROR; |
| 68 | } |
| 69 | |
| 70 | // A table of functions to validate / populate data for model nodes expecting |
| 71 | // input of each TF lite type. |
| 72 | // |
| 73 | // This table is indexed by TfLiteType, the possible values of which can be |
| 74 | // found at <tensorflow/contrib/lite/context.h>. We make the following |
| 75 | // assumptions about index values: |
| 76 | // 1) They will remain consistent across TF lite releases, and |
| 77 | // 2) They will always start from (close to) 0 and be (mostly) consecutive. |
| 78 | // |
| 79 | // Since TfLiteType is part of the stable C API for TF lite, these assumptions |
| 80 | // seem fair. |
| 81 | constexpr decltype(&InvalidInput) kPopulateInputFns[] = { |
| 82 | &InvalidInput, // kTfLiteNoType |
| 83 | &PopulateInput<double, float>, // kTfLiteFloat32 |
| 84 | &PopulateInput<int64_t, int32_t>, // kTfLiteInt32 |
| 85 | &PopulateInput<int64_t, uint8_t>, // kTfLiteUInt8 |
| 86 | &PopulateInput<int64_t, int64_t>, // kTfLiteInt64 |
| 87 | &InvalidInput, // kTfLiteString |
| 88 | &PopulateInput<int64_t, bool>, // kTfLiteBool |
| 89 | }; |
| 90 | |
| 91 | // Copies data from position |index| in the graph |interpreter| into the given |
| 92 | // tensor object. |
| 93 | template <typename TensorType, typename MemoryType> |
| 94 | ExecuteResult PopulateOutput(const int index, |
| 95 | const tflite::Interpreter& interpreter, |
| 96 | const TensorPtr& tensor) { |
| 97 | TensorView<TensorType> tensor_view(tensor); |
| 98 | tensor_view.Allocate(); |
| 99 | |
| 100 | // Empty output is not valid. |
| 101 | const TfLiteIntArray& dims = *interpreter.tensor(index)->dims; |
| 102 | if (dims.size == 0) |
| 103 | return ExecuteResult::EXECUTION_ERROR; |
| 104 | |
| 105 | // Copy across size information and calculate the number of elements being |
| 106 | // output. |
| 107 | int64_t num_entries = 1; |
Hidehiko Abe | 8ab64a6 | 2018-09-19 00:04:39 +0900 | [diff] [blame] | 108 | std::vector<int64_t>& tensor_dims = tensor_view.GetShape(); |
Michael Martis | 26abcd8 | 2018-08-08 10:57:25 +1000 | [diff] [blame] | 109 | tensor_dims.resize(dims.size); |
| 110 | for (int i = 0; i < dims.size; ++i) { |
| 111 | const int64_t dim_length = dims.data[i]; |
| 112 | |
| 113 | if (dim_length <= 0) |
| 114 | return ExecuteResult::EXECUTION_ERROR; |
| 115 | |
| 116 | tensor_dims[i] = dim_length; |
| 117 | num_entries *= dim_length; |
| 118 | } |
| 119 | |
| 120 | // Populate tensor values. |
| 121 | const MemoryType* const output_memory = |
| 122 | interpreter.typed_tensor<MemoryType>(index); |
Hidehiko Abe | 8ab64a6 | 2018-09-19 00:04:39 +0900 | [diff] [blame] | 123 | std::vector<TensorType>& tensor_values = tensor_view.GetValues(); |
Michael Martis | 26abcd8 | 2018-08-08 10:57:25 +1000 | [diff] [blame] | 124 | tensor_values.resize(num_entries); |
| 125 | for (int i = 0; i < num_entries; ++i) { |
| 126 | tensor_values[i] = output_memory[i]; |
| 127 | } |
| 128 | |
| 129 | return ExecuteResult::OK; |
| 130 | } |
| 131 | |
| 132 | ExecuteResult InvalidOutput(int, const tflite::Interpreter&, const TensorPtr&) { |
| 133 | return ExecuteResult::EXECUTION_ERROR; |
| 134 | } |
| 135 | |
| 136 | // A table of functions to populate data for tensors from output of each TF lite |
| 137 | // type. |
| 138 | // |
| 139 | // This table is indexed by TfLiteType, the possible values of which can be |
| 140 | // found at <tensorflow/contrib/lite/context.h>. See the caveats discussed in |
| 141 | // the comment above |kPopulateInputFns|. |
| 142 | constexpr decltype(&InvalidOutput) kPopulateOutputFns[] = { |
| 143 | &InvalidOutput, // kTfLiteNoType |
| 144 | &PopulateOutput<double, float>, // kTfLiteFloat32 |
| 145 | &PopulateOutput<int64_t, int32_t>, // kTfLiteInt32 |
| 146 | &PopulateOutput<int64_t, uint8_t>, // kTfLiteUInt8 |
| 147 | &PopulateOutput<int64_t, int64_t>, // kTfLiteInt64 |
| 148 | &InvalidOutput, // kTfLiteString |
| 149 | &PopulateOutput<int64_t, bool>, // kTfLiteBool |
| 150 | }; |
| 151 | |
Michael Martis | 26abcd8 | 2018-08-08 10:57:25 +1000 | [diff] [blame] | 152 | } // namespace |
| 153 | |
| 154 | GraphExecutorImpl::GraphExecutorImpl( |
| 155 | const std::map<std::string, int>& required_inputs, |
| 156 | const std::map<std::string, int>& required_outputs, |
| 157 | std::unique_ptr<tflite::Interpreter> interpreter, |
| 158 | GraphExecutorRequest request) |
| 159 | : required_inputs_(required_inputs), |
| 160 | required_outputs_(required_outputs), |
| 161 | interpreter_(std::move(interpreter)), |
| 162 | binding_(this, std::move(request)) {} |
| 163 | |
| 164 | void GraphExecutorImpl::set_connection_error_handler( |
| 165 | base::Closure connection_error_handler) { |
| 166 | binding_.set_connection_error_handler(std::move(connection_error_handler)); |
| 167 | } |
| 168 | |
| 169 | void GraphExecutorImpl::Execute( |
Hidehiko Abe | 31bb963 | 2018-11-23 02:49:56 +0900 | [diff] [blame] | 170 | std::unordered_map<std::string, TensorPtr> tensors, |
| 171 | const std::vector<std::string>& outputs, |
Michael Martis | 26abcd8 | 2018-08-08 10:57:25 +1000 | [diff] [blame] | 172 | const ExecuteCallback& callback) { |
alanlxl | cb1f856 | 2018-11-01 15:16:11 +1100 | [diff] [blame] | 173 | |
| 174 | RequestMetrics<ExecuteResult> request_metrics(kMetricsNameBase); |
| 175 | request_metrics.StartRecordingPerformanceMetrics(); |
| 176 | |
Michael Martis | 26abcd8 | 2018-08-08 10:57:25 +1000 | [diff] [blame] | 177 | // Validate input and output names (before executing graph, for efficiency). |
| 178 | |
| 179 | for (const auto& kv : tensors) { |
Hidehiko Abe | 8ab64a6 | 2018-09-19 00:04:39 +0900 | [diff] [blame] | 180 | const std::string& cur_input_name = kv.first; |
Michael Martis | 26abcd8 | 2018-08-08 10:57:25 +1000 | [diff] [blame] | 181 | |
| 182 | const auto name_lookup = required_inputs_.find(cur_input_name); |
| 183 | if (name_lookup == required_inputs_.end() || |
| 184 | name_lookup->second >= interpreter_->tensors_size()) { |
Hidehiko Abe | 8ab64a6 | 2018-09-19 00:04:39 +0900 | [diff] [blame] | 185 | callback.Run(ExecuteResult::UNKNOWN_INPUT_ERROR, base::nullopt); |
alanlxl | cb1f856 | 2018-11-01 15:16:11 +1100 | [diff] [blame] | 186 | request_metrics.RecordRequestEvent(ExecuteResult::UNKNOWN_INPUT_ERROR); |
Michael Martis | 26abcd8 | 2018-08-08 10:57:25 +1000 | [diff] [blame] | 187 | return; |
| 188 | } |
| 189 | } |
| 190 | if (tensors.size() != required_inputs_.size()) { |
Hidehiko Abe | 8ab64a6 | 2018-09-19 00:04:39 +0900 | [diff] [blame] | 191 | callback.Run(ExecuteResult::INPUT_MISSING_ERROR, base::nullopt); |
alanlxl | cb1f856 | 2018-11-01 15:16:11 +1100 | [diff] [blame] | 192 | request_metrics.RecordRequestEvent(ExecuteResult::INPUT_MISSING_ERROR); |
Michael Martis | 26abcd8 | 2018-08-08 10:57:25 +1000 | [diff] [blame] | 193 | return; |
| 194 | } |
| 195 | |
| 196 | std::set<std::string> seen_outputs; |
Hidehiko Abe | 31bb963 | 2018-11-23 02:49:56 +0900 | [diff] [blame] | 197 | for (const auto& cur_output_name : outputs) { |
Hidehiko Abe | 8ab64a6 | 2018-09-19 00:04:39 +0900 | [diff] [blame] | 198 | const auto name_lookup = required_outputs_.find(cur_output_name); |
Michael Martis | 26abcd8 | 2018-08-08 10:57:25 +1000 | [diff] [blame] | 199 | if (name_lookup == required_outputs_.end() || |
| 200 | name_lookup->second >= interpreter_->tensors_size()) { |
Hidehiko Abe | 8ab64a6 | 2018-09-19 00:04:39 +0900 | [diff] [blame] | 201 | callback.Run(ExecuteResult::UNKNOWN_OUTPUT_ERROR, base::nullopt); |
alanlxl | cb1f856 | 2018-11-01 15:16:11 +1100 | [diff] [blame] | 202 | request_metrics.RecordRequestEvent(ExecuteResult::UNKNOWN_OUTPUT_ERROR); |
Michael Martis | 26abcd8 | 2018-08-08 10:57:25 +1000 | [diff] [blame] | 203 | return; |
| 204 | } |
| 205 | |
| 206 | // Specifying the same output twice is an error. |
Hidehiko Abe | 8ab64a6 | 2018-09-19 00:04:39 +0900 | [diff] [blame] | 207 | const auto insert_result = seen_outputs.insert(cur_output_name); |
Michael Martis | 26abcd8 | 2018-08-08 10:57:25 +1000 | [diff] [blame] | 208 | if (!insert_result.second) { |
Hidehiko Abe | 8ab64a6 | 2018-09-19 00:04:39 +0900 | [diff] [blame] | 209 | callback.Run(ExecuteResult::DUPLICATE_OUTPUT_ERROR, base::nullopt); |
alanlxl | cb1f856 | 2018-11-01 15:16:11 +1100 | [diff] [blame] | 210 | request_metrics.RecordRequestEvent(ExecuteResult::DUPLICATE_OUTPUT_ERROR); |
Michael Martis | 26abcd8 | 2018-08-08 10:57:25 +1000 | [diff] [blame] | 211 | return; |
| 212 | } |
| 213 | } |
| 214 | if (outputs.size() != required_outputs_.size()) { |
Hidehiko Abe | 8ab64a6 | 2018-09-19 00:04:39 +0900 | [diff] [blame] | 215 | callback.Run(ExecuteResult::OUTPUT_MISSING_ERROR, base::nullopt); |
alanlxl | cb1f856 | 2018-11-01 15:16:11 +1100 | [diff] [blame] | 216 | request_metrics.RecordRequestEvent(ExecuteResult::OUTPUT_MISSING_ERROR); |
Michael Martis | 26abcd8 | 2018-08-08 10:57:25 +1000 | [diff] [blame] | 217 | return; |
| 218 | } |
| 219 | |
| 220 | // Copy input data into the interpreter. |
| 221 | for (const auto& kv : tensors) { |
Hidehiko Abe | 8ab64a6 | 2018-09-19 00:04:39 +0900 | [diff] [blame] | 222 | const std::string& cur_input_name = kv.first; |
Michael Martis | 26abcd8 | 2018-08-08 10:57:25 +1000 | [diff] [blame] | 223 | const TensorPtr& cur_input = kv.second; |
| 224 | |
| 225 | // Always valid, by the input name check at the start of this function. |
| 226 | const int cur_input_id = required_inputs_.find(cur_input_name)->second; |
| 227 | |
| 228 | // Check that the current input node is a supported type. |
| 229 | const uint32_t cur_input_type = interpreter_->tensor(cur_input_id)->type; |
| 230 | if (cur_input_type >= arraysize(kPopulateInputFns)) { |
| 231 | LOG(ERROR) << "TF lite graph contains invalid input node " << cur_input_id |
| 232 | << " of type " << cur_input_type << "."; |
Hidehiko Abe | 8ab64a6 | 2018-09-19 00:04:39 +0900 | [diff] [blame] | 233 | callback.Run(ExecuteResult::EXECUTION_ERROR, base::nullopt); |
alanlxl | cb1f856 | 2018-11-01 15:16:11 +1100 | [diff] [blame] | 234 | request_metrics.RecordRequestEvent(ExecuteResult::EXECUTION_ERROR); |
Michael Martis | 26abcd8 | 2018-08-08 10:57:25 +1000 | [diff] [blame] | 235 | return; |
| 236 | } |
| 237 | |
| 238 | // Attempt to copy input data into the current input node. |
| 239 | const ExecuteResult populate_input_result = |
| 240 | (*kPopulateInputFns[cur_input_type])(cur_input, cur_input_id, |
| 241 | interpreter_.get()); |
| 242 | if (populate_input_result != ExecuteResult::OK) { |
Hidehiko Abe | 8ab64a6 | 2018-09-19 00:04:39 +0900 | [diff] [blame] | 243 | callback.Run(populate_input_result, base::nullopt); |
alanlxl | cb1f856 | 2018-11-01 15:16:11 +1100 | [diff] [blame] | 244 | request_metrics.RecordRequestEvent(populate_input_result); |
Michael Martis | 26abcd8 | 2018-08-08 10:57:25 +1000 | [diff] [blame] | 245 | return; |
| 246 | } |
| 247 | } |
| 248 | |
| 249 | // Execute graph. |
| 250 | if (interpreter_->Invoke() != kTfLiteOk) { |
| 251 | LOG(ERROR) << "TF lite graph execution failed unexpectedly."; |
Hidehiko Abe | 8ab64a6 | 2018-09-19 00:04:39 +0900 | [diff] [blame] | 252 | callback.Run(ExecuteResult::EXECUTION_ERROR, base::nullopt); |
alanlxl | cb1f856 | 2018-11-01 15:16:11 +1100 | [diff] [blame] | 253 | request_metrics.RecordRequestEvent(ExecuteResult::EXECUTION_ERROR); |
Michael Martis | 26abcd8 | 2018-08-08 10:57:25 +1000 | [diff] [blame] | 254 | return; |
| 255 | } |
| 256 | |
| 257 | // Extract output. |
Hidehiko Abe | 31bb963 | 2018-11-23 02:49:56 +0900 | [diff] [blame] | 258 | std::vector<chromeos::machine_learning::mojom::TensorPtr> output_tensors; |
| 259 | for (const auto& cur_output_name : outputs) { |
Michael Martis | 26abcd8 | 2018-08-08 10:57:25 +1000 | [diff] [blame] | 260 | output_tensors.push_back(Tensor::New()); |
| 261 | |
| 262 | // Always valid, by the output name check at the start of this function. |
| 263 | const int cur_output_id = |
Hidehiko Abe | 8ab64a6 | 2018-09-19 00:04:39 +0900 | [diff] [blame] | 264 | required_outputs_.find(cur_output_name)->second; |
Michael Martis | 26abcd8 | 2018-08-08 10:57:25 +1000 | [diff] [blame] | 265 | |
| 266 | // Check that the current output node is a supported type. |
| 267 | const uint32_t cur_output_type = interpreter_->tensor(cur_output_id)->type; |
| 268 | if (cur_output_type >= arraysize(kPopulateOutputFns)) { |
| 269 | LOG(ERROR) << "TF lite graph contains invalid output node " |
| 270 | << cur_output_id << " of type " << cur_output_type << "."; |
Hidehiko Abe | 8ab64a6 | 2018-09-19 00:04:39 +0900 | [diff] [blame] | 271 | callback.Run(ExecuteResult::EXECUTION_ERROR, base::nullopt); |
alanlxl | cb1f856 | 2018-11-01 15:16:11 +1100 | [diff] [blame] | 272 | request_metrics.RecordRequestEvent(ExecuteResult::EXECUTION_ERROR); |
Michael Martis | 26abcd8 | 2018-08-08 10:57:25 +1000 | [diff] [blame] | 273 | return; |
| 274 | } |
| 275 | |
| 276 | // Attempt to extract data from the current output node. |
| 277 | const ExecuteResult populate_output_result = |
| 278 | (*kPopulateOutputFns[cur_output_type])(cur_output_id, *interpreter_, |
| 279 | *--output_tensors.end()); |
| 280 | if (populate_output_result != ExecuteResult::OK) { |
Hidehiko Abe | 8ab64a6 | 2018-09-19 00:04:39 +0900 | [diff] [blame] | 281 | callback.Run(populate_output_result, base::nullopt); |
alanlxl | cb1f856 | 2018-11-01 15:16:11 +1100 | [diff] [blame] | 282 | request_metrics.RecordRequestEvent(populate_output_result); |
Michael Martis | 26abcd8 | 2018-08-08 10:57:25 +1000 | [diff] [blame] | 283 | return; |
| 284 | } |
| 285 | } |
| 286 | |
| 287 | callback.Run(ExecuteResult::OK, std::move(output_tensors)); |
alanlxl | cb1f856 | 2018-11-01 15:16:11 +1100 | [diff] [blame] | 288 | request_metrics.FinishRecordingPerformanceMetrics(); |
| 289 | request_metrics.RecordRequestEvent(ExecuteResult::OK); |
Michael Martis | 26abcd8 | 2018-08-08 10:57:25 +1000 | [diff] [blame] | 290 | } |
| 291 | |
| 292 | } // namespace ml |