Skip to content

Commit e5a43e0

Browse files
committed
[C-Api] async invoke feature
Add new callback and options to get the inference result asynchronously. Signed-off-by: Jaeyun Jung <jy1210.jung@samsung.com>
1 parent 6b627e3 commit e5a43e0

3 files changed

Lines changed: 119 additions & 1 deletion

File tree

c/include/nnstreamer-tizen-internal.h

Lines changed: 14 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,17 @@
1919
extern "C" {
2020
#endif /* __cplusplus */
2121

22+
/**
23+
* @brief Callback for tensor data stream of machine-learning API.
24+
* @details Note that the buffer may be deallocated after the return and this is synchronously called. Thus, if you need the data afterwards, copy the data to another buffer and return fast. Do not spend too much time in the callback.
25+
* @since_tizen 10.0
26+
* @remarks The @a data can be used only in the callback. To use outside, make a copy.
27+
* @param[in] data The handle of the tensor data (a single frame. tensor/tensors). You can get the information of given tensor data frame using ml_tensors_data_get_info().
28+
* @param[in,out] user_data User application's private data.
29+
* @return @c 0 on success. Otherwise a negative error value.
30+
*/
31+
typedef int (*ml_tensors_data_cb) (const ml_tensors_data_h data, void *user_data);
32+
2233
/**
2334
* @brief Constructs the pipeline (GStreamer + NNStreamer).
2435
* @details This function is to construct the pipeline without checking the permission in platform internally. See ml_pipeline_construct() for the details.
@@ -38,7 +49,9 @@ typedef struct {
3849
char *custom_option; /**< Custom option string for neural network framework. */
3950
char *fw_name; /**< The explicit framework name given by user */
4051
int invoke_dynamic; /**< True for supporting invoke with flexible output. */
41-
int invoke_async; /**< The sub-plugin must support asynchronous output to use this option. If set to TRUE, the sub-plugin can generate multiple outputs asynchronously per single input. Otherwise, only synchronous single-output is expected and async callback/handle are ignored. */
52+
int invoke_async; /**< The sub-plugin must support asynchronous output to use this option. If set to TRUE, the sub-plugin can generate multiple outputs asynchronously per single input. Otherwise, only synchronous single-output is expected and async callback is ignored. */
53+
ml_tensors_data_cb invoke_async_cb; /**< Callback function to be called when the sub-plugin generates an output asynchronously. This is only available when invoke_async is set to TRUE. */
54+
void *invoke_async_pdata; /**< Private data to be passed to async callback. */
4255
} ml_single_preset;
4356

4457
/**

c/src/ml-api-inference-single.c

Lines changed: 89 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -143,6 +143,8 @@ typedef struct
143143
GList *destroy_data_list; /**< data to be freed by filter */
144144
gboolean invoke_dynamic; /**< true to invoke flexible tensor */
145145
gboolean invoke_async; /**< true to invoke and return result asynchronously */
146+
ml_tensors_data_cb invoke_async_cb; /**< Callback function to be called when the sub-plugin generates an output asynchronously. */
147+
void *invoke_async_pdata; /**< Private data to be passed to async callback. */
146148
} ml_single;
147149

148150
/**
@@ -568,6 +570,72 @@ invoke_thread (void *arg)
568570
return NULL;
569571
}
570572

573+
/**
574+
* @brief Internal function to get the asynchronous invoke.
575+
*/
576+
static int
577+
ml_single_async_cb (GstTensorMemory * data, GstTensorsInfo * info,
578+
void *user_data)
579+
{
580+
ml_single_h single = (ml_single_h) user_data;
581+
ml_single *single_h;
582+
ml_tensors_info_h _info = NULL;
583+
ml_tensors_data_h _data = NULL;
584+
unsigned int i;
585+
int ret = ML_ERROR_NONE;
586+
587+
ML_SINGLE_GET_VALID_HANDLE_LOCKED (single_h, single, 0);
588+
589+
if (!single_h->invoke_async_cb) {
590+
/* No callback, do nothing. Internal state changing? */
591+
goto done;
592+
}
593+
594+
ret = _ml_tensors_info_create_from_gst (&_info, info);
595+
if (ret != ML_ERROR_NONE) {
596+
_ml_error_report
597+
("Cannot handle tensor data stream. Failed to create ml information.");
598+
goto done;
599+
}
600+
601+
ret = ml_tensors_data_create (_info, &_data);
602+
if (ret != ML_ERROR_NONE) {
603+
_ml_error_report
604+
("Cannot handle tensor data stream. Failed to create ml data.");
605+
goto done;
606+
}
607+
608+
for (i = 0; i < info->num_tensors; ++i) {
609+
ret = ml_tensors_data_set_tensor_data (_data, i,
610+
data[i].data, data[i].size);
611+
if (ret != ML_ERROR_NONE) {
612+
_ml_error_report
613+
("Cannot handle tensor data stream. Failed to update ml data of index %u, size is %zu.",
614+
i, data[i].size);
615+
goto done;
616+
}
617+
}
618+
619+
ret = single_h->invoke_async_cb (_data, single_h->invoke_async_pdata);
620+
if (ret != ML_ERROR_NONE) {
621+
_ml_error_report
622+
("Cannot handle tensor data stream. The callback function returns error '%d'.",
623+
ret);
624+
}
625+
626+
done:
627+
if (_info) {
628+
ml_tensors_info_destroy (_info);
629+
}
630+
631+
if (_data) {
632+
ml_tensors_data_destroy (_data);
633+
}
634+
635+
ML_SINGLE_HANDLE_UNLOCK (single_h);
636+
return (ret == ML_ERROR_NONE) ? 0 : -1;
637+
}
638+
571639
/**
572640
* @brief Sets the information (tensor dimension, type, name and so on) of required input data for the given model, and get updated output data information.
573641
* @details Note that a model/framework may not support setting such information.
@@ -922,6 +990,10 @@ _ml_single_open_custom_validate_arguments (ml_single_h * single,
922990
_ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
923991
"The parameter, 'info' (ml_single_preset *), is not valid. Its models entry if NULL (info->models is NULL).");
924992

993+
if (info->invoke_async && !info->invoke_async_cb)
994+
_ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
995+
"The parameter, 'info' (ml_single_preset *), is not valid. It has 'invoke_async' entry but its callback 'invoke_async_cb' is NULL");
996+
925997
return ML_ERROR_NONE;
926998
}
927999

@@ -1016,6 +1088,8 @@ ml_single_open_custom (ml_single_h * single, ml_single_preset * info)
10161088

10171089
single_h->invoke_dynamic = info->invoke_dynamic;
10181090
single_h->invoke_async = info->invoke_async;
1091+
single_h->invoke_async_cb = info->invoke_async_cb;
1092+
single_h->invoke_async_pdata = info->invoke_async_pdata;
10191093

10201094
filter_obj = G_OBJECT (single_h->filter);
10211095

@@ -1095,6 +1169,12 @@ ml_single_open_custom (ml_single_h * single, ml_single_preset * info)
10951169
g_object_set (filter_obj, "custom", info->custom_option, NULL);
10961170
}
10971171

1172+
/* Set async callback. */
1173+
if (single_h->invoke_async) {
1174+
single_h->klass->set_invoke_async_callback (single_h->filter,
1175+
ml_single_async_cb, single_h);
1176+
}
1177+
10981178
/* 4. Start the nnfw to get inout configurations if needed */
10991179
if (!single_h->klass->start (single_h->filter)) {
11001180
_ml_error_report
@@ -1232,6 +1312,12 @@ ml_single_open_with_option (ml_single_h * single, const ml_option_h option)
12321312
if (g_ascii_strcasecmp ((gchar *) value, "true") == 0)
12331313
info.invoke_async = TRUE;
12341314
}
1315+
if (ML_ERROR_NONE == ml_option_get (option, "async_callback", &value)) {
1316+
info.invoke_async_cb = (ml_tensors_data_cb) value;
1317+
}
1318+
if (ML_ERROR_NONE == ml_option_get (option, "async_data", &value)) {
1319+
info.invoke_async_pdata = value;
1320+
}
12351321

12361322
return ml_single_open_custom (single, &info);
12371323
}
@@ -1259,6 +1345,9 @@ ml_single_close (ml_single_h single)
12591345

12601346
ML_SINGLE_GET_VALID_HANDLE_LOCKED (single_h, single, 1);
12611347

1348+
/* First, clear all callbacks. */
1349+
single_h->invoke_async_cb = NULL;
1350+
12621351
single_h->state = JOIN_REQUESTED;
12631352
g_cond_broadcast (&single_h->cond);
12641353
invoking = single_h->invoking;

c/src/ml-api-service-extension.c

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -67,6 +67,17 @@ typedef struct
6767
GHashTable *node_table;
6868
} ml_extension_s;
6969

70+
/**
71+
* @brief Internal function to handle the asynchronous invoke.
72+
*/
73+
static int
74+
_ml_extension_async_cb (const ml_tensors_data_h data, void *user_data)
75+
{
76+
ml_service_s *mls = (ml_service_s *) user_data;
77+
78+
return _ml_service_invoke_event_new_data (mls, NULL, data);
79+
}
80+
7081
/**
7182
* @brief Internal function to create node info in pipeline.
7283
*/
@@ -355,6 +366,11 @@ _ml_extension_conf_parse_single (ml_service_s * mls, JsonObject * single)
355366

356367
if (STR_IS_VALID (invoke_async)) {
357368
ml_option_set (option, "invoke_async", g_strdup (invoke_async), g_free);
369+
370+
if (g_ascii_strcasecmp (invoke_async, "true") == 0) {
371+
ml_option_set (option, "async_callback", _ml_extension_async_cb, NULL);
372+
ml_option_set (option, "async_data", mls, NULL);
373+
}
358374
}
359375
}
360376

0 commit comments

Comments
 (0)