diff --git a/README.md b/README.md index 33be50bc..67759dfb 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,11 @@ This repo contains gnuradio OOT blocks, used by IQTLabs projects (including [gam ### [image_inference](grc/iqtlabs_image_inference.block.yml) -Produce a spectrogram like image that may be used by machine learning pipelines (typically torchserve based) to infer the presence of signals in FFT output. +Produce a spectrogram like image for inference pipelines (typically Torchserve based) to infer the presence of signals in FFT output. + +### [image_inference](grc/iqtlabs_iq_inference.block.yml) + +Correlate samples and power for inference pipelines (typically Torchserve based) to infer the presence of signals raw I/Q samples. ### [retune_pre_fft](grc/iqtlabs_retune_pre_fft.block.yml) diff --git a/grc/iqtlabs_image_inference.block.yml b/grc/iqtlabs_image_inference.block.yml index 583c82f1..dfa30a76 100644 --- a/grc/iqtlabs_image_inference.block.yml +++ b/grc/iqtlabs_image_inference.block.yml @@ -5,7 +5,7 @@ category: '[iqtlabs]' flags: [python, cpp] documentation: |- This block accepts dB values from retune_fft, reformats them - as images and runs inference on them via torchserve. The inference + as images and runs inference on them via Torchserve. The inference results are used to annotate the images with bounding boxes and RSSI values (the bounding boxes are used to select the original dB power values within the boxes). @@ -13,8 +13,7 @@ documentation: |- Torchserve inference is done in a background thread, to avoid blocking the flowgraph. Torchserve batching is currently not done, to trade inference latency for efficiency (generally, the inference - response time is much less than scanner dwell time). Also currently, - only one model name/server is supported. + response time is much less than scanner dwell time). input: vector of floats, representing FFT dB power values, diff --git a/grc/iqtlabs_iq_inference.block.yml b/grc/iqtlabs_iq_inference.block.yml index 57b395d1..965909ed 100644 --- a/grc/iqtlabs_iq_inference.block.yml +++ b/grc/iqtlabs_iq_inference.block.yml @@ -4,7 +4,31 @@ label: iq_inference category: '[iqtlabs]' flags: [python, cpp] documentation: |- - Test IQ inference + This block accepts a stream of complex I/Q samples and dB values, + correlates them in time, and runs inference on the samples via Torchserve. + + Torchserve inference is done in a background thread, to avoid + blocking the flowgraph. Torchserve batching is currently not done, + to trade inference latency for efficiency (generally, the inference + response time is much less than scanner dwell time). + + input: + vector of complex, representing original I/Q samples, + tagged with center frequency. + vector of floats, representing FFT dB power values, + tagged with center frequency. + output: + JSON inference results. + + parameters: + tag: received frequency tag name. + vlen: length of complex/FFT dB vector. + min_peak_points: Only run inference on buckets with this minimum dB power. + model_names: if not empty, comma separated list of model names. + model_server: if not empty, do torchserve inference to this address. + confidence: Only output inference results where confidence is greater. + n_inference: if > 0, only run inference on 1/n_inference images. + samp_rate: sample rate. templates: imports: from gnuradio import iqtlabs diff --git a/lib/image_inference_impl.cc b/lib/image_inference_impl.cc index fd5e1371..6fced91c 100644 --- a/lib/image_inference_impl.cc +++ b/lib/image_inference_impl.cc @@ -456,7 +456,7 @@ void image_inference_impl::bbox_text(const output_item_type &output_item, size_t image_inference_impl::parse_inference_( const output_item_type &output_item, const std::string &results, const std::string &model_name, nlohmann::json &results_json, - std::string &error) { + std::string &error, bool &valid_json) { size_t rendered_predictions = 0; const float xf = float(output_item.points_buffer->cols) / float(output_item.image_buffer->cols); @@ -515,6 +515,7 @@ size_t image_inference_impl::parse_inference_( } } catch (std::exception &ex) { error = "invalid json: " + std::string(ex.what()) + " " + results; + valid_json = false; } return rendered_predictions; } @@ -574,6 +575,7 @@ void image_inference_impl::run_inference_() { req.body() = body; req.prepare_payload(); std::string results; + bool valid_json = true; // attempt to re-use existing connection. may fail if an http 1.1 server // has dropped the connection to use in the meantime. @@ -613,16 +615,22 @@ void image_inference_impl::run_inference_() { if (error.size() == 0 && (results.size() == 0 || !nlohmann::json::accept(results))) { error = "invalid json: " + results; + valid_json = false; } if (error.size() == 0) { - rendered_predictions += parse_inference_( - output_item, results, model_name, results_json, error); + rendered_predictions += + parse_inference_(output_item, results, model_name, results_json, + error, valid_json); } if (error.size()) { d_logger->error(error); - output_json["error"] = error; + if (valid_json) { + output_json["error"] = error; + } else { + output_json["error"] = "invalid json"; + } inference_connected_ = false; } } diff --git a/lib/image_inference_impl.h b/lib/image_inference_impl.h index 7a0ca8d7..29be284b 100644 --- a/lib/image_inference_impl.h +++ b/lib/image_inference_impl.h @@ -277,7 +277,8 @@ class image_inference_impl : public image_inference, base_impl { size_t parse_inference_(const output_item_type &output_item, const std::string &results, const std::string &model_names, - nlohmann::json &results_json, std::string &error); + nlohmann::json &results_json, std::string &error, + bool &valid_json); void bbox_text(const output_item_type &output_item, const std::string &text, int pos, int cx, int cy); void volk_min_max_mean(const cv::Mat &mat, float &min, float &max, diff --git a/lib/iq_inference_impl.cc b/lib/iq_inference_impl.cc index f655145a..e1c4419d 100644 --- a/lib/iq_inference_impl.cc +++ b/lib/iq_inference_impl.cc @@ -328,6 +328,7 @@ void iq_inference_impl::run_inference_() { std::string results; // TODO: troubleshoot test flask server hang after one request. inference_connected_ = false; + bool valid_json = true; // attempt to re-use existing connection. may fail if an http 1.1 server // has dropped the connection to use in the meantime. @@ -367,6 +368,7 @@ void iq_inference_impl::run_inference_() { if (error.size() == 0 && (results.size() == 0 || !nlohmann::json::accept(results))) { error = "invalid json: " + results; + valid_json = false; } if (error.size() == 0) { @@ -387,12 +389,17 @@ void iq_inference_impl::run_inference_() { } } catch (std::exception &ex) { error = "invalid json: " + std::string(ex.what()) + " " + results; + valid_json = false; } } if (error.size()) { d_logger->error(error); - output_json["error"] = error; + if (valid_json) { + output_json["error"] = error; + } else { + output_json["error"] = "invalid json"; + } inference_connected_ = false; } }