ov.hpp 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709
  1. // This file is part of OpenCV project.
  2. // It is subject to the license terms in the LICENSE file found in the top-level directory
  3. // of this distribution and at http://opencv.org/license.html.
  4. //
  5. // Copyright (C) 2023 Intel Corporation
  6. #ifndef OPENCV_GAPI_INFER_OV_HPP
  7. #define OPENCV_GAPI_INFER_OV_HPP
  8. #include <string>
  9. #include <opencv2/gapi/util/any.hpp>
  10. #include <opencv2/gapi/own/exports.hpp> // GAPI_EXPORTS
  11. #include <opencv2/gapi/gkernel.hpp> // GKernelType[M], GBackend
  12. #include <opencv2/gapi/infer.hpp> // Generic
  13. #include <map>
  14. namespace cv {
  15. namespace gapi {
  16. /**
  17. * @brief This namespace contains G-API OpenVINO 2.0 backend functions,
  18. * structures, and symbols.
  19. */
  20. namespace ov {
  21. GAPI_EXPORTS cv::gapi::GBackend backend();
  22. namespace detail {
  23. template <typename T>
  24. using AttrMap = std::map<std::string, T>;
  25. // NB: This type is supposed to be used to hold in/out layers
  26. // attributes such as precision, layout, shape etc.
  27. //
  28. // User can provide attributes either:
  29. // 1. cv::util::monostate - No value specified explicitly.
  30. // 2. Attr - value specified explicitly that should be broadcasted to all layers.
  31. // 3. AttrMap[str->T] - map specifies value for particular layer.
  32. template <typename Attr>
  33. using LayerVariantAttr = cv::util::variant< cv::util::monostate
  34. , AttrMap<Attr>
  35. , Attr>;
  36. struct ParamDesc {
  37. struct Model {
  38. Model(const std::string &model_path_,
  39. const std::string &bin_path_)
  40. : model_path(model_path_), bin_path(bin_path_) {
  41. }
  42. std::string model_path;
  43. std::string bin_path;
  44. LayerVariantAttr<std::string> input_tensor_layout;
  45. LayerVariantAttr<std::string> input_model_layout;
  46. LayerVariantAttr<std::string> output_tensor_layout;
  47. LayerVariantAttr<std::string> output_model_layout;
  48. LayerVariantAttr<int> output_tensor_precision;
  49. LayerVariantAttr<std::vector<size_t>> new_shapes;
  50. LayerVariantAttr<std::vector<float>> mean_values;
  51. LayerVariantAttr<std::vector<float>> scale_values;
  52. LayerVariantAttr<int> interpolation;
  53. };
  54. struct CompiledModel {
  55. std::string blob_path;
  56. };
  57. using Kind = cv::util::variant<Model, CompiledModel>;
  58. ParamDesc(Kind &&kind_,
  59. const std::string &device_,
  60. const bool is_generic_,
  61. const size_t num_in_,
  62. const size_t num_out_)
  63. : kind(std::move(kind_)), device(device_),
  64. is_generic(is_generic_),
  65. num_in(num_in_), num_out(num_out_) {
  66. }
  67. Kind kind;
  68. std::string device;
  69. bool is_generic;
  70. std::size_t num_in;
  71. std::size_t num_out;
  72. std::vector<std::string> input_names;
  73. std::vector<std::string> output_names;
  74. using PluginConfigT = std::map<std::string, std::string>;
  75. PluginConfigT config;
  76. size_t nireq = 1;
  77. };
  78. // NB: Just helper to avoid code duplication.
  79. static detail::ParamDesc::Model&
  80. getModelToSetAttrOrThrow(detail::ParamDesc::Kind &kind,
  81. const std::string &attr_name) {
  82. if (cv::util::holds_alternative<detail::ParamDesc::CompiledModel>(kind)) {
  83. cv::util::throw_error(
  84. std::logic_error("Specifying " + attr_name + " isn't"
  85. " possible for compiled model."));
  86. }
  87. GAPI_Assert(cv::util::holds_alternative<detail::ParamDesc::Model>(kind));
  88. return cv::util::get<detail::ParamDesc::Model>(kind);
  89. }
  90. } // namespace detail
  91. /**
  92. * @brief This structure provides functions
  93. * that fill inference parameters for "OpenVINO Toolkit" model.
  94. */
  95. template<typename Net> struct Params {
  96. public:
  97. /** @brief Class constructor.
  98. Constructs Params based on model information and specifies default values for other
  99. inference description parameters. Model is loaded and compiled using "OpenVINO Toolkit".
  100. @param model_path Path to a model.
  101. @param bin_path Path to a data file.
  102. For IR format (*.bin):
  103. If path is empty, will try to read a bin file with the same name as xml.
  104. If the bin file with the same name is not found, will load IR without weights.
  105. For PDPD (*.pdmodel) and ONNX (*.onnx) formats bin_path isn't used.
  106. @param device target device to use.
  107. */
  108. Params(const std::string &model_path,
  109. const std::string &bin_path,
  110. const std::string &device)
  111. : m_desc( detail::ParamDesc::Kind{detail::ParamDesc::Model{model_path, bin_path}}
  112. , device
  113. , false /* is generic */
  114. , std::tuple_size<typename Net::InArgs>::value
  115. , std::tuple_size<typename Net::OutArgs>::value) {
  116. }
  117. /** @overload
  118. Use this constructor to work with pre-compiled network.
  119. Model is imported from a pre-compiled blob.
  120. @param blob_path path to the compiled model (*.blob).
  121. @param device target device to use.
  122. */
  123. Params(const std::string &blob_path,
  124. const std::string &device)
  125. : m_desc( detail::ParamDesc::Kind{detail::ParamDesc::CompiledModel{blob_path}}
  126. , device
  127. , false /* is generic */
  128. , std::tuple_size<typename Net::InArgs>::value
  129. , std::tuple_size<typename Net::OutArgs>::value) {
  130. }
  131. /** @brief Specifies sequence of network input layers names for inference.
  132. The function is used to associate cv::gapi::infer<> inputs with the model inputs.
  133. Number of names has to match the number of network inputs as defined in G_API_NET().
  134. In case a network has only single input layer, there is no need to specify name manually.
  135. @param layer_names std::array<std::string, N> where N is the number of inputs
  136. as defined in the @ref G_API_NET. Contains names of input layers.
  137. @return reference to this parameter structure.
  138. */
  139. Params<Net>& cfgInputLayers(const std::vector<std::string> &layer_names) {
  140. m_desc.input_names = layer_names;
  141. return *this;
  142. }
  143. /** @brief Specifies sequence of network output layers names for inference.
  144. The function is used to associate cv::gapi::infer<> outputs with the model outputs.
  145. Number of names has to match the number of network outputs as defined in G_API_NET().
  146. In case a network has only single output layer, there is no need to specify name manually.
  147. @param layer_names std::array<std::string, N> where N is the number of outputs
  148. as defined in the @ref G_API_NET. Contains names of output layers.
  149. @return reference to this parameter structure.
  150. */
  151. Params<Net>& cfgOutputLayers(const std::vector<std::string> &layer_names) {
  152. m_desc.output_names = layer_names;
  153. return *this;
  154. }
  155. /** @brief Specifies OpenVINO plugin configuration.
  156. The function is used to set configuration for OpenVINO plugin. Some parameters
  157. can be different for each plugin. Please follow https://docs.openvinotoolkit.org/latest/index.html
  158. to check information about specific plugin.
  159. @param config Map of pairs: (config parameter name, config parameter value).
  160. @return reference to this parameter structure.
  161. */
  162. Params<Net>& cfgPluginConfig(const detail::ParamDesc::PluginConfigT &config) {
  163. m_desc.config = config;
  164. return *this;
  165. }
  166. /** @brief Specifies tensor layout for an input layer.
  167. The function is used to set tensor layout for an input layer.
  168. @param layout Tensor layout ("NCHW", "NWHC", etc)
  169. will be applied to all input layers.
  170. @return reference to this parameter structure.
  171. */
  172. Params<Net>& cfgInputTensorLayout(std::string layout) {
  173. detail::getModelToSetAttrOrThrow(m_desc.kind, "input tensor layout")
  174. .input_tensor_layout = std::move(layout);
  175. return *this;
  176. }
  177. /** @overload
  178. @param layout_map Map of pairs: name of corresponding input layer
  179. and its tensor layout represented in std::string ("NCHW", "NHWC", etc)
  180. @return reference to this parameter structure.
  181. */
  182. Params<Net>&
  183. cfgInputTensorLayout(detail::AttrMap<std::string> layout_map) {
  184. detail::getModelToSetAttrOrThrow(m_desc.kind, "input tensor layout")
  185. .input_tensor_layout = std::move(layout_map);
  186. return *this;
  187. }
  188. /** @brief Specifies model layout for an input layer.
  189. The function is used to set model layout for an input layer.
  190. @param layout Model layout ("NCHW", "NHWC", etc)
  191. will be applied to all input layers.
  192. @return reference to this parameter structure.
  193. */
  194. Params<Net>& cfgInputModelLayout(std::string layout) {
  195. detail::getModelToSetAttrOrThrow(m_desc.kind, "input model layout")
  196. .input_model_layout = std::move(layout);
  197. return *this;
  198. }
  199. /** @overload
  200. @param layout_map Map of pairs: name of corresponding input layer
  201. and its model layout ("NCHW", "NHWC", etc)
  202. @return reference to this parameter structure.
  203. */
  204. Params<Net>&
  205. cfgInputModelLayout(detail::AttrMap<std::string> layout_map) {
  206. detail::getModelToSetAttrOrThrow(m_desc.kind, "input model layout")
  207. .input_model_layout = std::move(layout_map);
  208. return *this;
  209. }
  210. /** @brief Specifies tensor layout for an output layer.
  211. The function is used to set tensor layout for an output layer.
  212. @param layout Tensor layout ("NCHW", "NWHC", etc)
  213. will be applied to all output layers.
  214. @return reference to this parameter structure.
  215. */
  216. Params<Net>& cfgOutputTensorLayout(std::string layout) {
  217. detail::getModelToSetAttrOrThrow(m_desc.kind, "output tensor layout")
  218. .output_tensor_layout = std::move(layout);
  219. return *this;
  220. }
  221. /** @overload
  222. @param layout_map Map of pairs: name of corresponding output layer
  223. and its tensor layout represented in std::string ("NCHW", "NHWC", etc)
  224. @return reference to this parameter structure.
  225. */
  226. Params<Net>&
  227. cfgOutputTensorLayout(detail::AttrMap<std::string> layout_map) {
  228. detail::getModelToSetAttrOrThrow(m_desc.kind, "output tensor layout")
  229. .output_tensor_layout = std::move(layout_map);
  230. return *this;
  231. }
  232. /** @brief Specifies model layout for an output layer.
  233. The function is used to set model layout for an output layer.
  234. @param layout Model layout ("NCHW", "NHWC", etc)
  235. will be applied to all output layers.
  236. @return reference to this parameter structure.
  237. */
  238. Params<Net>& cfgOutputModelLayout(std::string layout) {
  239. detail::getModelToSetAttrOrThrow(m_desc.kind, "output model layout")
  240. .output_model_layout = std::move(layout);
  241. return *this;
  242. }
  243. /** @overload
  244. @param layout_map Map of pairs: name of corresponding output layer
  245. and its model layout ("NCHW", "NHWC", etc)
  246. @return reference to this parameter structure.
  247. */
  248. Params<Net>&
  249. cfgOutputModelLayout(detail::AttrMap<std::string> layout_map) {
  250. detail::getModelToSetAttrOrThrow(m_desc.kind, "output model layout")
  251. .output_model_layout = std::move(layout_map);
  252. return *this;
  253. }
  254. /** @brief Specifies tensor precision for an output layer.
  255. The function is used to set tensor precision for an output layer..
  256. @param precision Precision in OpenCV format (CV_8U, CV_32F, ...)
  257. will be applied to all output layers.
  258. @return reference to this parameter structure.
  259. */
  260. Params<Net>& cfgOutputTensorPrecision(int precision) {
  261. detail::getModelToSetAttrOrThrow(m_desc.kind, "output tensor precision")
  262. .output_tensor_precision = precision;
  263. return *this;
  264. }
  265. /** @overload
  266. @param precision_map Map of pairs: name of corresponding output layer
  267. and its precision in OpenCV format (CV_8U, CV_32F, ...)
  268. @return reference to this parameter structure.
  269. */
  270. Params<Net>&
  271. cfgOutputTensorPrecision(detail::AttrMap<int> precision_map) {
  272. detail::getModelToSetAttrOrThrow(m_desc.kind, "output tensor precision")
  273. .output_tensor_precision = std::move(precision_map);
  274. return *this;
  275. }
  276. /** @brief Specifies the new shape for input layers.
  277. The function is used to set new shape for input layers.
  278. @param new_shape New shape will be applied to all input layers.
  279. @return reference to this parameter structure.
  280. */
  281. Params<Net>&
  282. cfgReshape(std::vector<size_t> new_shape) {
  283. detail::getModelToSetAttrOrThrow(m_desc.kind, "reshape")
  284. .new_shapes = std::move(new_shape);
  285. return *this;
  286. }
  287. /** @overload
  288. @param new_shape_map Map of pairs: name of corresponding output layer
  289. and its new shape.
  290. @return reference to this parameter structure.
  291. */
  292. Params<Net>&
  293. cfgReshape(detail::AttrMap<std::vector<size_t>> new_shape_map) {
  294. detail::getModelToSetAttrOrThrow(m_desc.kind, "reshape")
  295. .new_shapes = std::move(new_shape_map);
  296. return *this;
  297. }
  298. /** @brief Specifies number of asynchronous inference requests.
  299. @param nireq Number of inference asynchronous requests.
  300. @return reference to this parameter structure.
  301. */
  302. Params<Net>& cfgNumRequests(const size_t nireq) {
  303. if (nireq == 0) {
  304. cv::util::throw_error(
  305. std::logic_error("Number of inference requests"
  306. " must be greater than zero."));
  307. }
  308. m_desc.nireq = nireq;
  309. return *this;
  310. }
  311. /** @brief Specifies mean values for preprocessing.
  312. *
  313. The function is used to set mean values for input layer preprocessing.
  314. @param mean_values Float vector contains mean values
  315. @return reference to this parameter structure.
  316. */
  317. Params<Net>& cfgMean(std::vector<float> mean_values) {
  318. detail::getModelToSetAttrOrThrow(m_desc.kind, "mean values")
  319. .mean_values = std::move(mean_values);
  320. return *this;
  321. }
  322. /** @overload
  323. @param mean_map Map of pairs: name of corresponding input layer
  324. and its mean values.
  325. @return reference to this parameter structure.
  326. */
  327. Params<Net>& cfgMean(detail::AttrMap<std::vector<float>> mean_map) {
  328. detail::getModelToSetAttrOrThrow(m_desc.kind, "mean values")
  329. .mean_values = std::move(mean_map);
  330. return *this;
  331. }
  332. /** @brief Specifies scale values for preprocessing.
  333. *
  334. The function is used to set scale values for input layer preprocessing.
  335. @param scale_values Float vector contains scale values
  336. @return reference to this parameter structure.
  337. */
  338. Params<Net>& cfgScale(std::vector<float> scale_values) {
  339. detail::getModelToSetAttrOrThrow(m_desc.kind, "scale values")
  340. .scale_values = std::move(scale_values);
  341. return *this;
  342. }
  343. /** @overload
  344. @param scale_map Map of pairs: name of corresponding input layer
  345. and its mean values.
  346. @return reference to this parameter structure.
  347. */
  348. Params<Net>& cfgScale(detail::AttrMap<std::vector<float>> scale_map) {
  349. detail::getModelToSetAttrOrThrow(m_desc.kind, "scale values")
  350. .scale_values = std::move(scale_map);
  351. return *this;
  352. }
  353. /** @brief Specifies resize interpolation algorithm.
  354. *
  355. The function is used to configure resize preprocessing for input layer.
  356. @param interpolation Resize interpolation algorithm.
  357. Supported algorithms: #INTER_NEAREST, #INTER_LINEAR, #INTER_CUBIC.
  358. @return reference to this parameter structure.
  359. */
  360. Params<Net>& cfgResize(int interpolation) {
  361. detail::getModelToSetAttrOrThrow(m_desc.kind, "resize preprocessing")
  362. .interpolation = std::move(interpolation);
  363. return *this;
  364. }
  365. /** @overload
  366. @param interpolation Map of pairs: name of corresponding input layer
  367. and its resize algorithm.
  368. @return reference to this parameter structure.
  369. */
  370. Params<Net>& cfgResize(detail::AttrMap<int> interpolation) {
  371. detail::getModelToSetAttrOrThrow(m_desc.kind, "resize preprocessing")
  372. .interpolation = std::move(interpolation);
  373. return *this;
  374. }
  375. // BEGIN(G-API's network parametrization API)
  376. GBackend backend() const { return cv::gapi::ov::backend(); }
  377. std::string tag() const { return Net::tag(); }
  378. cv::util::any params() const { return { m_desc }; }
  379. // END(G-API's network parametrization API)
  380. protected:
  381. detail::ParamDesc m_desc;
  382. };
  383. /*
  384. * @brief This structure provides functions for generic network type that
  385. * fill inference parameters.
  386. * @see struct Generic
  387. */
  388. template<>
  389. class Params<cv::gapi::Generic> {
  390. public:
  391. /** @brief Class constructor.
  392. Constructs Params based on model information and specifies default values for other
  393. inference description parameters. Model is loaded and compiled using "OpenVINO Toolkit".
  394. @param tag string tag of the network for which these parameters are intended.
  395. @param model_path Path to a model.
  396. @param bin_path Path to a data file.
  397. For IR format (*.bin):
  398. If path is empty, will try to read a bin file with the same name as xml.
  399. If the bin file with the same name is not found, will load IR without weights.
  400. For PDPD (*.pdmodel) and ONNX (*.onnx) formats bin_path isn't used.
  401. @param device target device to use.
  402. */
  403. Params(const std::string &tag,
  404. const std::string &model_path,
  405. const std::string &bin_path,
  406. const std::string &device)
  407. : m_tag(tag),
  408. m_desc( detail::ParamDesc::Kind{detail::ParamDesc::Model{model_path, bin_path}}
  409. , device
  410. , true /* is generic */
  411. , 0u
  412. , 0u) {
  413. }
  414. /** @overload
  415. This constructor for pre-compiled networks. Model is imported from pre-compiled
  416. blob.
  417. @param tag string tag of the network for which these parameters are intended.
  418. @param blob_path path to the compiled model (*.blob).
  419. @param device target device to use.
  420. */
  421. Params(const std::string &tag,
  422. const std::string &blob_path,
  423. const std::string &device)
  424. : m_tag(tag),
  425. m_desc( detail::ParamDesc::Kind{detail::ParamDesc::CompiledModel{blob_path}}
  426. , device
  427. , true /* is generic */
  428. , 0u
  429. , 0u) {
  430. }
  431. /** @see ov::Params::cfgPluginConfig. */
  432. Params& cfgPluginConfig(const detail::ParamDesc::PluginConfigT &config) {
  433. m_desc.config = config;
  434. return *this;
  435. }
  436. /** @see ov::Params::cfgInputTensorLayout. */
  437. Params& cfgInputTensorLayout(std::string layout) {
  438. detail::getModelToSetAttrOrThrow(m_desc.kind, "input tensor layout")
  439. .input_tensor_layout = std::move(layout);
  440. return *this;
  441. }
  442. /** @overload */
  443. Params&
  444. cfgInputTensorLayout(detail::AttrMap<std::string> layout_map) {
  445. detail::getModelToSetAttrOrThrow(m_desc.kind, "input tensor layout")
  446. .input_tensor_layout = std::move(layout_map);
  447. return *this;
  448. }
  449. /** @see ov::Params::cfgInputModelLayout. */
  450. Params& cfgInputModelLayout(std::string layout) {
  451. detail::getModelToSetAttrOrThrow(m_desc.kind, "input model layout")
  452. .input_model_layout = std::move(layout);
  453. return *this;
  454. }
  455. /** @overload */
  456. Params&
  457. cfgInputModelLayout(detail::AttrMap<std::string> layout_map) {
  458. detail::getModelToSetAttrOrThrow(m_desc.kind, "input model layout")
  459. .input_model_layout = std::move(layout_map);
  460. return *this;
  461. }
  462. /** @see ov::Params::cfgOutputTensorLayout. */
  463. Params& cfgOutputTensorLayout(std::string layout) {
  464. detail::getModelToSetAttrOrThrow(m_desc.kind, "output tensor layout")
  465. .output_tensor_layout = std::move(layout);
  466. return *this;
  467. }
  468. /** @overload */
  469. Params&
  470. cfgOutputTensorLayout(detail::AttrMap<std::string> layout_map) {
  471. detail::getModelToSetAttrOrThrow(m_desc.kind, "output tensor layout")
  472. .output_tensor_layout = std::move(layout_map);
  473. return *this;
  474. }
  475. /** @see ov::Params::cfgOutputModelLayout. */
  476. Params& cfgOutputModelLayout(std::string layout) {
  477. detail::getModelToSetAttrOrThrow(m_desc.kind, "output model layout")
  478. .output_model_layout = std::move(layout);
  479. return *this;
  480. }
  481. /** @overload */
  482. Params&
  483. cfgOutputModelLayout(detail::AttrMap<std::string> layout_map) {
  484. detail::getModelToSetAttrOrThrow(m_desc.kind, "output model layout")
  485. .output_model_layout = std::move(layout_map);
  486. return *this;
  487. }
  488. /** @see ov::Params::cfgOutputTensorPrecision. */
  489. Params& cfgOutputTensorPrecision(int precision) {
  490. detail::getModelToSetAttrOrThrow(m_desc.kind, "output tensor precision")
  491. .output_tensor_precision = precision;
  492. return *this;
  493. }
  494. /** @overload */
  495. Params&
  496. cfgOutputTensorPrecision(detail::AttrMap<int> precision_map) {
  497. detail::getModelToSetAttrOrThrow(m_desc.kind, "output tensor precision")
  498. .output_tensor_precision = std::move(precision_map);
  499. return *this;
  500. }
  501. /** @see ov::Params::cfgReshape. */
  502. Params& cfgReshape(std::vector<size_t> new_shape) {
  503. detail::getModelToSetAttrOrThrow(m_desc.kind, "reshape")
  504. .new_shapes = std::move(new_shape);
  505. return *this;
  506. }
  507. /** @overload */
  508. Params&
  509. cfgReshape(detail::AttrMap<std::vector<size_t>> new_shape_map) {
  510. detail::getModelToSetAttrOrThrow(m_desc.kind, "reshape")
  511. .new_shapes = std::move(new_shape_map);
  512. return *this;
  513. }
  514. /** @see ov::Params::cfgNumRequests. */
  515. Params& cfgNumRequests(const size_t nireq) {
  516. if (nireq == 0) {
  517. cv::util::throw_error(
  518. std::logic_error("Number of inference requests"
  519. " must be greater than zero."));
  520. }
  521. m_desc.nireq = nireq;
  522. return *this;
  523. }
  524. /** @see ov::Params::cfgMean. */
  525. Params& cfgMean(std::vector<float> mean_values) {
  526. detail::getModelToSetAttrOrThrow(m_desc.kind, "mean values")
  527. .mean_values = std::move(mean_values);
  528. return *this;
  529. }
  530. /** @overload */
  531. Params& cfgMean(detail::AttrMap<std::vector<float>> mean_map) {
  532. detail::getModelToSetAttrOrThrow(m_desc.kind, "mean values")
  533. .mean_values = std::move(mean_map);
  534. return *this;
  535. }
  536. /** @see ov::Params::cfgScale. */
  537. Params& cfgScale(std::vector<float> scale_values) {
  538. detail::getModelToSetAttrOrThrow(m_desc.kind, "scale values")
  539. .scale_values = std::move(scale_values);
  540. return *this;
  541. }
  542. /** @overload */
  543. Params& cfgScale(detail::AttrMap<std::vector<float>> scale_map) {
  544. detail::getModelToSetAttrOrThrow(m_desc.kind, "scale values")
  545. .scale_values = std::move(scale_map);
  546. return *this;
  547. }
  548. /** @see ov::Params::cfgResize. */
  549. Params& cfgResize(int interpolation) {
  550. detail::getModelToSetAttrOrThrow(m_desc.kind, "resize preprocessing")
  551. .interpolation = std::move(interpolation);
  552. return *this;
  553. }
  554. /** @overload */
  555. Params& cfgResize(detail::AttrMap<int> interpolation) {
  556. detail::getModelToSetAttrOrThrow(m_desc.kind, "resize preprocessing")
  557. .interpolation = std::move(interpolation);
  558. return *this;
  559. }
  560. // BEGIN(G-API's network parametrization API)
  561. GBackend backend() const { return cv::gapi::ov::backend(); }
  562. std::string tag() const { return m_tag; }
  563. cv::util::any params() const { return { m_desc }; }
  564. // END(G-API's network parametrization API)
  565. protected:
  566. std::string m_tag;
  567. detail::ParamDesc m_desc;
  568. };
  569. } // namespace ov
  570. namespace wip { namespace ov {
  571. /**
  572. * @brief Ask G-API OpenVINO backend to run only inference of model provided.
  573. *
  574. * G-API OpenVINO backend will perform only the inference of the model provided
  575. * without populating input and copying back output data.
  576. * This mode is used to evaluate the pure inference performance of the model without
  577. * taking into account the i/o data transfer.
  578. */
  579. struct benchmark_mode { };
  580. } // namespace ov
  581. } // namespace wip
  582. } // namespace gapi
  583. namespace detail
  584. {
  585. template<> struct CompileArgTag<cv::gapi::wip::ov::benchmark_mode>
  586. {
  587. static const char* tag() { return "gapi.wip.ov.benchmark_mode"; }
  588. };
  589. }
  590. } // namespace cv
  591. #endif // OPENCV_GAPI_INFER_OV_HPP