onnx.hpp 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722
  1. // This file is part of OpenCV project.
  2. // It is subject to the license terms in the LICENSE file found in the top-level directory
  3. // of this distribution and at http://opencv.org/license.html.
  4. //
  5. // Copyright (C) 2020-2021 Intel Corporation
  6. #ifndef OPENCV_GAPI_INFER_ONNX_HPP
  7. #define OPENCV_GAPI_INFER_ONNX_HPP
  8. #include <unordered_map>
  9. #include <string>
  10. #include <array>
  11. #include <tuple> // tuple, tuple_size
  12. #include <map>
  13. #include <opencv2/gapi/opencv_includes.hpp>
  14. #include <opencv2/gapi/util/any.hpp>
  15. #include <opencv2/core/cvdef.h> // GAPI_EXPORTS
  16. #include <opencv2/gapi/gkernel.hpp> // GKernelPackage
  17. #include <opencv2/gapi/infer.hpp> // Generic
  18. namespace cv {
  19. namespace gapi {
  20. /**
  21. * @brief This namespace contains G-API ONNX Runtime backend functions, structures, and symbols.
  22. */
  23. namespace onnx {
  24. /**
  25. * @brief This namespace contains Execution Providers structures for G-API ONNX Runtime backend.
  26. */
  27. namespace ep {
  28. /**
  29. * @brief This structure provides functions
  30. * that fill inference options for ONNX CoreML Execution Provider.
  31. * Please follow https://onnxruntime.ai/docs/execution-providers/CoreML-ExecutionProvider.html#coreml-execution-provider
  32. */
  33. struct GAPI_EXPORTS_W_SIMPLE CoreML {
  34. /** @brief Class constructor.
  35. Constructs CoreML parameters.
  36. */
  37. GAPI_WRAP
  38. CoreML() = default;
  39. /** @brief Limit CoreML Execution Provider to run on CPU only.
  40. This function is used to limit CoreML to run on CPU only.
  41. Please follow: https://onnxruntime.ai/docs/execution-providers/CoreML-ExecutionProvider.html#coreml_flag_use_cpu_only
  42. @return reference to this parameter structure.
  43. */
  44. GAPI_WRAP
  45. CoreML& cfgUseCPUOnly() {
  46. use_cpu_only = true;
  47. return *this;
  48. }
  49. /** @brief Enable CoreML EP to run on a subgraph in the body of a control flow ONNX operator (i.e. a Loop, Scan or If operator).
  50. This function is used to enable CoreML EP to run on
  51. a subgraph of a control flow of ONNX operation.
  52. Please follow: https://onnxruntime.ai/docs/execution-providers/CoreML-ExecutionProvider.html#coreml_flag_enable_on_subgraph
  53. @return reference to this parameter structure.
  54. */
  55. GAPI_WRAP
  56. CoreML& cfgEnableOnSubgraph() {
  57. enable_on_subgraph = true;
  58. return *this;
  59. }
  60. /** @brief Enable CoreML EP to run only on Apple Neural Engine.
  61. This function is used to enable CoreML EP to run only on Apple Neural Engine.
  62. Please follow: https://onnxruntime.ai/docs/execution-providers/CoreML-ExecutionProvider.html#coreml_flag_only_enable_device_with_ane
  63. @return reference to this parameter structure.
  64. */
  65. GAPI_WRAP
  66. CoreML& cfgEnableOnlyNeuralEngine() {
  67. enable_only_ane = true;
  68. return *this;
  69. }
  70. bool use_cpu_only = false;
  71. bool enable_on_subgraph = false;
  72. bool enable_only_ane = false;
  73. };
  74. /**
  75. * @brief This structure provides functions
  76. * that fill inference options for CUDA Execution Provider.
  77. * Please follow https://onnxruntime.ai/docs/execution-providers/CUDA-ExecutionProvider.html#cuda-execution-provider
  78. */
  79. struct GAPI_EXPORTS_W_SIMPLE CUDA {
  80. // NB: Used from python.
  81. /// @private -- Exclude this constructor from OpenCV documentation
  82. GAPI_WRAP
  83. CUDA() = default;
  84. /** @brief Class constructor.
  85. Constructs CUDA parameters based on device type information.
  86. @param dev_id Target device id to use.
  87. */
  88. GAPI_WRAP
  89. explicit CUDA(const int dev_id)
  90. : device_id(dev_id) {
  91. }
  92. int device_id;
  93. };
  94. /**
  95. * @brief This structure provides functions
  96. * that fill inference options for TensorRT Execution Provider.
  97. * Please follow https://onnxruntime.ai/docs/execution-providers/TensorRT-ExecutionProvider.html#tensorrt-execution-provider
  98. */
  99. struct GAPI_EXPORTS_W_SIMPLE TensorRT {
  100. // NB: Used from python.
  101. /// @private -- Exclude this constructor from OpenCV documentation
  102. GAPI_WRAP
  103. TensorRT() = default;
  104. /** @brief Class constructor.
  105. Constructs TensorRT parameters based on device type information.
  106. @param dev_id Target device id to use.
  107. */
  108. GAPI_WRAP
  109. explicit TensorRT(const int dev_id)
  110. : device_id(dev_id) {
  111. }
  112. int device_id;
  113. };
  114. /**
  115. * @brief This structure provides functions
  116. * that fill inference options for ONNX OpenVINO Execution Provider.
  117. * Please follow https://onnxruntime.ai/docs/execution-providers/OpenVINO-ExecutionProvider.html#summary-of-options
  118. */
  119. struct GAPI_EXPORTS_W_SIMPLE OpenVINO {
  120. // NB: Used from python.
  121. /// @private -- Exclude this constructor from OpenCV documentation
  122. GAPI_WRAP
  123. OpenVINO() = default;
  124. /** @brief Class constructor.
  125. Constructs OpenVINO parameters based on device type information.
  126. @param dev_type Target device type to use. ("CPU", "GPU", "GPU.0" etc)
  127. */
  128. GAPI_WRAP
  129. explicit OpenVINO(const std::string &dev_type)
  130. : device_type(dev_type) {
  131. }
  132. /** @brief Class constructor.
  133. Constructs OpenVINO parameters based on map of options passed.
  134. * @param params A map of parameter names and their corresponding string values.
  135. */
  136. GAPI_WRAP
  137. explicit OpenVINO(const std::map<std::string, std::string>& params)
  138. : params_map(params) {
  139. }
  140. /** @brief Specifies OpenVINO Execution Provider cache dir.
  141. This function is used to explicitly specify the path to save and load
  142. the blobs enabling model caching feature.
  143. @param dir Path to the directory what will be used as cache.
  144. @return reference to this parameter structure.
  145. */
  146. GAPI_WRAP
  147. OpenVINO& cfgCacheDir(const std::string &dir) {
  148. if (!params_map.empty()) {
  149. cv::util::throw_error(std::logic_error("ep::OpenVINO cannot be changed if"
  150. "created from the parameters map."));
  151. }
  152. cache_dir = dir;
  153. return *this;
  154. }
  155. /** @brief Specifies OpenVINO Execution Provider number of threads.
  156. This function is used to override the accelerator default value
  157. of number of threads with this value at runtime.
  158. @param nthreads Number of threads.
  159. @return reference to this parameter structure.
  160. */
  161. GAPI_WRAP
  162. OpenVINO& cfgNumThreads(size_t nthreads) {
  163. if (!params_map.empty()) {
  164. cv::util::throw_error(std::logic_error("ep::OpenVINO cannot be changed if"
  165. "created from the parameters map."));
  166. }
  167. num_of_threads = nthreads;
  168. return *this;
  169. }
  170. /** @brief Enables OpenVINO Execution Provider opencl throttling.
  171. This function is used to enable OpenCL queue throttling for GPU devices
  172. (reduces CPU utilization when using GPU).
  173. @return reference to this parameter structure.
  174. */
  175. GAPI_WRAP
  176. OpenVINO& cfgEnableOpenCLThrottling() {
  177. if (!params_map.empty()) {
  178. cv::util::throw_error(std::logic_error("ep::OpenVINO cannot be changed if"
  179. "created from the parameters map."));
  180. }
  181. enable_opencl_throttling = true;
  182. return *this;
  183. }
  184. /** @brief Enables OpenVINO Execution Provider dynamic shapes.
  185. This function is used to enable OpenCL queue throttling for GPU devices
  186. (reduces CPU utilization when using GPU).
  187. This function is used to enable work with dynamic shaped models
  188. whose shape will be set dynamically based on the infer input
  189. image/data shape at run time in CPU.
  190. @return reference to this parameter structure.
  191. */
  192. GAPI_WRAP
  193. OpenVINO& cfgEnableDynamicShapes() {
  194. if (!params_map.empty()) {
  195. cv::util::throw_error(std::logic_error("ep::OpenVINO cannot be changed if"
  196. "created from the parameters map."));
  197. }
  198. enable_dynamic_shapes = true;
  199. return *this;
  200. }
  201. std::string device_type;
  202. std::string cache_dir;
  203. size_t num_of_threads = 0;
  204. bool enable_opencl_throttling = false;
  205. bool enable_dynamic_shapes = false;
  206. std::map<std::string, std::string> params_map;
  207. };
  208. /**
  209. * @brief This structure provides functions
  210. * that fill inference options for ONNX DirectML Execution Provider.
  211. * Please follow https://onnxruntime.ai/docs/execution-providers/DirectML-ExecutionProvider.html#directml-execution-provider
  212. */
  213. class GAPI_EXPORTS_W_SIMPLE DirectML {
  214. public:
  215. // NB: Used from python.
  216. /// @private -- Exclude this constructor from OpenCV documentation
  217. GAPI_WRAP
  218. DirectML() = default;
  219. /** @brief Class constructor.
  220. Constructs DirectML parameters based on device id.
  221. @param device_id Target device id to use. ("0", "1", etc)
  222. */
  223. GAPI_WRAP
  224. explicit DirectML(const int device_id) : ddesc(device_id) { };
  225. /** @brief Class constructor.
  226. Constructs DirectML parameters based on adapter name.
  227. @param adapter_name Target adapter_name to use.
  228. */
  229. GAPI_WRAP
  230. explicit DirectML(const std::string &adapter_name) : ddesc(adapter_name) { };
  231. using DeviceDesc = cv::util::variant<int, std::string>;
  232. DeviceDesc ddesc;
  233. };
  234. using EP = cv::util::variant< cv::util::monostate
  235. , OpenVINO
  236. , DirectML
  237. , CoreML
  238. , CUDA
  239. , TensorRT>;
  240. } // namespace ep
  241. GAPI_EXPORTS cv::gapi::GBackend backend();
  242. enum class TraitAs: int {
  243. TENSOR, //!< G-API traits an associated cv::Mat as a raw tensor
  244. // and passes dimensions as-is
  245. IMAGE //!< G-API traits an associated cv::Mat as an image so
  246. // creates an "image" blob (NCHW/NHWC, etc)
  247. };
  248. using PostProc = std::function<void(const std::unordered_map<std::string, cv::Mat> &,
  249. std::unordered_map<std::string, cv::Mat> &)>;
  250. namespace detail {
  251. /**
  252. * @brief This structure contains description of inference parameters
  253. * which is specific to ONNX models.
  254. */
  255. struct ParamDesc {
  256. std::string model_path; //!< Path to model.
  257. // NB: nun_* may differ from topology's real input/output port numbers
  258. // (e.g. topology's partial execution)
  259. std::size_t num_in; //!< How many inputs are defined in the operation
  260. std::size_t num_out; //!< How many outputs are defined in the operation
  261. // NB: Here order follows the `Net` API
  262. std::vector<std::string> input_names; //!< Names of input network layers.
  263. std::vector<std::string> output_names; //!< Names of output network layers.
  264. using ConstInput = std::pair<cv::Mat, TraitAs>;
  265. std::unordered_map<std::string, ConstInput> const_inputs; //!< Map with pair of name of network layer and ConstInput which will be associated with this.
  266. std::vector<cv::Scalar> mean; //!< Mean values for preprocessing.
  267. std::vector<cv::Scalar> stdev; //!< Standard deviation values for preprocessing.
  268. std::vector<cv::GMatDesc> out_metas; //!< Out meta information about your output (type, dimension).
  269. PostProc custom_post_proc; //!< Post processing function.
  270. std::vector<bool> normalize; //!< Vector of bool values that enabled or disabled normalize of input data.
  271. std::vector<std::string> names_to_remap; //!< Names of output layers that will be processed in PostProc function.
  272. bool is_generic;
  273. // TODO: Needs to modify the rest of ParamDesc accordingly to support
  274. // both generic and non-generic options without duplication
  275. // (as it was done for the OV IE backend)
  276. // These values are pushed into the respective vector<> fields above
  277. // when the generic infer parameters are unpacked (see GONNXBackendImpl::unpackKernel)
  278. std::unordered_map<std::string, std::pair<cv::Scalar, cv::Scalar> > generic_mstd;
  279. std::unordered_map<std::string, bool> generic_norm;
  280. std::vector<cv::gapi::onnx::ep::EP> execution_providers;
  281. bool disable_mem_pattern;
  282. };
  283. } // namespace detail
  284. template<typename Net>
  285. struct PortCfg {
  286. using In = std::array
  287. < std::string
  288. , std::tuple_size<typename Net::InArgs>::value >;
  289. using Out = std::array
  290. < std::string
  291. , std::tuple_size<typename Net::OutArgs>::value >;
  292. using NormCoefs = std::array
  293. < cv::Scalar
  294. , std::tuple_size<typename Net::InArgs>::value >;
  295. using Normalize = std::array
  296. < bool
  297. , std::tuple_size<typename Net::InArgs>::value >;
  298. };
  299. /**
  300. * Contains description of inference parameters and kit of functions that
  301. * fill this parameters.
  302. */
  303. template<typename Net> class Params {
  304. public:
  305. /** @brief Class constructor.
  306. Constructs Params based on model information and sets default values for other
  307. inference description parameters.
  308. @param model Path to model (.onnx file).
  309. */
  310. Params(const std::string &model) {
  311. desc.model_path = model;
  312. desc.num_in = std::tuple_size<typename Net::InArgs>::value;
  313. desc.num_out = std::tuple_size<typename Net::OutArgs>::value;
  314. desc.is_generic = false;
  315. desc.disable_mem_pattern = false;
  316. }
  317. /** @brief Specifies sequence of network input layers names for inference.
  318. The function is used to associate data of graph inputs with input layers of
  319. network topology. Number of names has to match the number of network inputs. If a network
  320. has only one input layer, there is no need to call it as the layer is
  321. associated with input automatically but this doesn't prevent you from
  322. doing it yourself. Count of names has to match to number of network inputs.
  323. @param layer_names std::array<std::string, N> where N is the number of inputs
  324. as defined in the @ref G_API_NET. Contains names of input layers.
  325. @return the reference on modified object.
  326. */
  327. Params<Net>& cfgInputLayers(const typename PortCfg<Net>::In &layer_names) {
  328. desc.input_names.assign(layer_names.begin(), layer_names.end());
  329. return *this;
  330. }
  331. /** @brief Specifies sequence of output layers names for inference.
  332. The function is used to associate data of graph outputs with output layers of
  333. network topology. If a network has only one output layer, there is no need to call it
  334. as the layer is associated with output automatically but this doesn't prevent
  335. you from doing it yourself. Count of names has to match to number of network
  336. outputs or you can set your own output but for this case you have to
  337. additionally use @ref cfgPostProc function.
  338. @param layer_names std::array<std::string, N> where N is the number of outputs
  339. as defined in the @ref G_API_NET. Contains names of output layers.
  340. @return the reference on modified object.
  341. */
  342. Params<Net>& cfgOutputLayers(const typename PortCfg<Net>::Out &layer_names) {
  343. desc.output_names.assign(layer_names.begin(), layer_names.end());
  344. return *this;
  345. }
  346. /** @brief Sets a constant input.
  347. The function is used to set constant input. This input has to be
  348. a prepared tensor since preprocessing is disabled for this case. You should
  349. provide name of network layer which will receive provided data.
  350. @param layer_name Name of network layer.
  351. @param data cv::Mat that contains data which will be associated with network layer.
  352. @param hint Type of input (TENSOR).
  353. @return the reference on modified object.
  354. */
  355. Params<Net>& constInput(const std::string &layer_name,
  356. const cv::Mat &data,
  357. TraitAs hint = TraitAs::TENSOR) {
  358. desc.const_inputs[layer_name] = {data, hint};
  359. return *this;
  360. }
  361. /** @brief Specifies mean value and standard deviation for preprocessing.
  362. The function is used to set mean value and standard deviation for preprocessing
  363. of input data.
  364. @param m std::array<cv::Scalar, N> where N is the number of inputs
  365. as defined in the @ref G_API_NET. Contains mean values.
  366. @param s std::array<cv::Scalar, N> where N is the number of inputs
  367. as defined in the @ref G_API_NET. Contains standard deviation values.
  368. @return the reference on modified object.
  369. */
  370. Params<Net>& cfgMeanStd(const typename PortCfg<Net>::NormCoefs &m,
  371. const typename PortCfg<Net>::NormCoefs &s) {
  372. desc.mean.assign(m.begin(), m.end());
  373. desc.stdev.assign(s.begin(), s.end());
  374. return *this;
  375. }
  376. /** @brief Configures graph output and provides the post processing function from user.
  377. The function is used when you work with networks with dynamic outputs.
  378. Since we can't know dimensions of inference result needs provide them for
  379. construction of graph output. This dimensions can differ from inference result.
  380. So you have to provide @ref PostProc function that gets information from inference
  381. result and fill output which is constructed by dimensions from out_metas.
  382. @param out_metas Out meta information about your output (type, dimension).
  383. @param remap_function Post processing function, which has two parameters. First is onnx
  384. result, second is graph output. Both parameters is std::map that contain pair of
  385. layer's name and cv::Mat.
  386. @return the reference on modified object.
  387. */
  388. Params<Net>& cfgPostProc(const std::vector<cv::GMatDesc> &out_metas,
  389. const PostProc &remap_function) {
  390. desc.out_metas = out_metas;
  391. desc.custom_post_proc = remap_function;
  392. return *this;
  393. }
  394. /** @overload
  395. Function with a rvalue parameters.
  396. @param out_metas rvalue out meta information about your output (type, dimension).
  397. @param remap_function rvalue post processing function, which has two parameters. First is onnx
  398. result, second is graph output. Both parameters is std::map that contain pair of
  399. layer's name and cv::Mat.
  400. @return the reference on modified object.
  401. */
  402. Params<Net>& cfgPostProc(std::vector<cv::GMatDesc> &&out_metas,
  403. PostProc &&remap_function) {
  404. desc.out_metas = std::move(out_metas);
  405. desc.custom_post_proc = std::move(remap_function);
  406. return *this;
  407. }
  408. /** @overload
  409. The function has additional parameter names_to_remap. This parameter provides
  410. information about output layers which will be used for inference and post
  411. processing function.
  412. @param out_metas Out meta information.
  413. @param remap_function Post processing function.
  414. @param names_to_remap Names of output layers. network's inference will
  415. be done on these layers. Inference's result will be processed in post processing
  416. function using these names.
  417. @return the reference on modified object.
  418. */
  419. Params<Net>& cfgPostProc(const std::vector<cv::GMatDesc> &out_metas,
  420. const PostProc &remap_function,
  421. const std::vector<std::string> &names_to_remap) {
  422. desc.out_metas = out_metas;
  423. desc.custom_post_proc = remap_function;
  424. desc.names_to_remap = names_to_remap;
  425. return *this;
  426. }
  427. /** @overload
  428. Function with a rvalue parameters and additional parameter names_to_remap.
  429. @param out_metas rvalue out meta information.
  430. @param remap_function rvalue post processing function.
  431. @param names_to_remap rvalue names of output layers. network's inference will
  432. be done on these layers. Inference's result will be processed in post processing
  433. function using these names.
  434. @return the reference on modified object.
  435. */
  436. Params<Net>& cfgPostProc(std::vector<cv::GMatDesc> &&out_metas,
  437. PostProc &&remap_function,
  438. std::vector<std::string> &&names_to_remap) {
  439. desc.out_metas = std::move(out_metas);
  440. desc.custom_post_proc = std::move(remap_function);
  441. desc.names_to_remap = std::move(names_to_remap);
  442. return *this;
  443. }
  444. /** @brief Specifies normalize parameter for preprocessing.
  445. The function is used to set normalize parameter for preprocessing of input data.
  446. @param normalizations std::array<cv::Scalar, N> where N is the number of inputs
  447. as defined in the @ref G_API_NET. Сontains bool values that enabled or disabled
  448. normalize of input data.
  449. @return the reference on modified object.
  450. */
  451. Params<Net>& cfgNormalize(const typename PortCfg<Net>::Normalize &normalizations) {
  452. desc.normalize.assign(normalizations.begin(), normalizations.end());
  453. return *this;
  454. }
  455. /** @brief Adds execution provider for runtime.
  456. The function is used to add ONNX Runtime OpenVINO Execution Provider options.
  457. @param ep OpenVINO Execution Provider options.
  458. @see cv::gapi::onnx::ep::OpenVINO.
  459. @return the reference on modified object.
  460. */
  461. Params<Net>& cfgAddExecutionProvider(ep::OpenVINO&& ep) {
  462. desc.execution_providers.emplace_back(std::move(ep));
  463. return *this;
  464. }
  465. /** @brief Adds execution provider for runtime.
  466. The function is used to add ONNX Runtime DirectML Execution Provider options.
  467. @param ep DirectML Execution Provider options.
  468. @see cv::gapi::onnx::ep::DirectML.
  469. @return the reference on modified object.
  470. */
  471. Params<Net>& cfgAddExecutionProvider(ep::DirectML&& ep) {
  472. desc.execution_providers.emplace_back(std::move(ep));
  473. return *this;
  474. }
  475. /** @brief Adds execution provider for runtime.
  476. The function is used to add ONNX Runtime CoreML Execution Provider options.
  477. @param ep CoreML Execution Provider options.
  478. @see cv::gapi::onnx::ep::CoreML.
  479. @return the reference on modified object.
  480. */
  481. Params<Net>& cfgAddExecutionProvider(ep::CoreML&& ep) {
  482. desc.execution_providers.emplace_back(std::move(ep));
  483. return *this;
  484. }
  485. /** @brief Adds execution provider for runtime.
  486. The function is used to add ONNX Runtime CUDA Execution Provider options.
  487. @param ep CUDA Execution Provider options.
  488. @see cv::gapi::onnx::ep::CUDA.
  489. @return the reference on modified object.
  490. */
  491. Params<Net>& cfgAddExecutionProvider(ep::CUDA&& ep) {
  492. desc.execution_providers.emplace_back(std::move(ep));
  493. return *this;
  494. }
  495. /** @brief Adds execution provider for runtime.
  496. The function is used to add ONNX Runtime TensorRT Execution Provider options.
  497. @param ep TensorRT Execution Provider options.
  498. @see cv::gapi::onnx::ep::TensorRT.
  499. @return the reference on modified object.
  500. */
  501. Params<Net>& cfgAddExecutionProvider(ep::TensorRT&& ep) {
  502. desc.execution_providers.emplace_back(std::move(ep));
  503. return *this;
  504. }
  505. /** @brief Disables the memory pattern optimization.
  506. @return the reference on modified object.
  507. */
  508. Params<Net>& cfgDisableMemPattern() {
  509. desc.disable_mem_pattern = true;
  510. return *this;
  511. }
  512. // BEGIN(G-API's network parametrization API)
  513. GBackend backend() const { return cv::gapi::onnx::backend(); }
  514. std::string tag() const { return Net::tag(); }
  515. cv::util::any params() const { return { desc }; }
  516. // END(G-API's network parametrization API)
  517. protected:
  518. detail::ParamDesc desc;
  519. };
  520. /*
  521. * @brief This structure provides functions for generic network type that
  522. * fill inference parameters.
  523. * @see struct Generic
  524. */
  525. template<>
  526. class Params<cv::gapi::Generic> {
  527. public:
  528. /** @brief Class constructor.
  529. Constructs Params based on input information and sets default values for other
  530. inference description parameters.
  531. @param tag string tag of the network for which these parameters are intended.
  532. @param model_path path to model file (.onnx file).
  533. */
  534. Params(const std::string& tag, const std::string& model_path)
  535. : desc{model_path, 0u, 0u, {}, {}, {}, {}, {}, {}, {}, {}, {}, true, {}, {}, {}, false }, m_tag(tag) {}
  536. /** @see onnx::Params::cfgMeanStdDev. */
  537. void cfgMeanStdDev(const std::string &layer,
  538. const cv::Scalar &m,
  539. const cv::Scalar &s) {
  540. desc.generic_mstd[layer] = std::make_pair(m, s);
  541. }
  542. /** @see onnx::Params::cfgNormalize. */
  543. void cfgNormalize(const std::string &layer, bool flag) {
  544. desc.generic_norm[layer] = flag;
  545. }
  546. /** @see onnx::Params::cfgAddExecutionProvider. */
  547. void cfgAddExecutionProvider(ep::OpenVINO&& ep) {
  548. desc.execution_providers.emplace_back(std::move(ep));
  549. }
  550. /** @see onnx::Params::cfgAddExecutionProvider. */
  551. void cfgAddExecutionProvider(ep::DirectML&& ep) {
  552. desc.execution_providers.emplace_back(std::move(ep));
  553. }
  554. /** @see onnx::Params::cfgAddExecutionProvider. */
  555. void cfgAddExecutionProvider(ep::CoreML&& ep) {
  556. desc.execution_providers.emplace_back(std::move(ep));
  557. }
  558. /** @see onnx::Params::cfgAddExecutionProvider. */
  559. void cfgAddExecutionProvider(ep::CUDA&& ep) {
  560. desc.execution_providers.emplace_back(std::move(ep));
  561. }
  562. /** @see onnx::Params::cfgAddExecutionProvider. */
  563. void cfgAddExecutionProvider(ep::TensorRT&& ep) {
  564. desc.execution_providers.emplace_back(std::move(ep));
  565. }
  566. /** @see onnx::Params::cfgDisableMemPattern. */
  567. void cfgDisableMemPattern() {
  568. desc.disable_mem_pattern = true;
  569. }
  570. // BEGIN(G-API's network parametrization API)
  571. GBackend backend() const { return cv::gapi::onnx::backend(); }
  572. std::string tag() const { return m_tag; }
  573. cv::util::any params() const { return { desc }; }
  574. // END(G-API's network parametrization API)
  575. protected:
  576. detail::ParamDesc desc;
  577. std::string m_tag;
  578. };
  579. } // namespace onnx
  580. } // namespace gapi
  581. } // namespace cv
  582. #endif // OPENCV_GAPI_INFER_HPP