Skip to content

Commit

Permalink
Merge pull request #268 from anarkiwi/log2
Browse files Browse the repository at this point in the history
add background inference, make consistent.
  • Loading branch information
anarkiwi authored May 9, 2024
2 parents 4930435 + 88f5c1f commit 0f80b0a
Show file tree
Hide file tree
Showing 9 changed files with 143 additions and 82 deletions.
7 changes: 5 additions & 2 deletions grc/iqtlabs_iq_inference.block.yml
Original file line number Diff line number Diff line change
Expand Up @@ -32,14 +32,15 @@ documentation: |-
n_inference: if > 0, only run inference on 1/n_inference images.
samp_rate: sample rate.
power_inference: if True, infer on power as well as samples.
background: if True, do inference in the background.
templates:
imports: from gnuradio import iqtlabs
make: >
iqtlabs.iq_inference(
${tag}, ${vlen}, ${n_vlen}, ${sample_buffer}, ${min_peak_points},
${model_server}, ${model_names}, ${confidence}, ${n_inference},
${samp_rate}, ${power_inference})
${samp_rate}, ${power_inference}, ${background})
cpp_templates:
includes: ['#include <gnuradio/iqtlabs/iq_inference.h>']
Expand All @@ -48,7 +49,7 @@ cpp_templates:
this->${id} = gr::iqtlabs::iq_inference::make(
${tag}, ${vlen}, ${n_vlen}, ${sample_buffer, ${min_peak_points},
${model_server}, ${model_names}, ${confidence}, ${n_inference},
${samp_rate}, ${power_inference});
${samp_rate}, ${power_inference}, ${background});
link: ['libgnuradio-iqtlabs.so']

parameters:
Expand All @@ -75,6 +76,8 @@ parameters:
dtype: int
- id: power_inference
dtype: bool
- id: background
dtype: bool

asserts:
- ${ tag != "" }
Expand Down
3 changes: 2 additions & 1 deletion include/gnuradio/iqtlabs/iq_inference.h
Original file line number Diff line number Diff line change
Expand Up @@ -232,7 +232,8 @@ class IQTLABS_API iq_inference : virtual public gr::block {
uint64_t sample_buffer, double min_peak_points,
const std::string &model_server,
const std::string &model_names, double confidence,
uint64_t n_inference, int samp_rate, bool power_inference);
uint64_t n_inference, int samp_rate, bool power_inference,
bool background);
};

} // namespace iqtlabs
Expand Down
84 changes: 50 additions & 34 deletions lib/iq_inference_impl.cc
Original file line number Diff line number Diff line change
Expand Up @@ -211,28 +211,25 @@
namespace gr {
namespace iqtlabs {

iq_inference::sptr iq_inference::make(const std::string &tag, COUNT_T vlen,
COUNT_T n_vlen, COUNT_T sample_buffer,
double min_peak_points,
const std::string &model_server,
const std::string &model_names,
double confidence, COUNT_T n_inference,
int samp_rate, bool power_inference) {
iq_inference::sptr iq_inference::make(
const std::string &tag, COUNT_T vlen, COUNT_T n_vlen, COUNT_T sample_buffer,
double min_peak_points, const std::string &model_server,
const std::string &model_names, double confidence, COUNT_T n_inference,
int samp_rate, bool power_inference, bool background) {
return gnuradio::make_block_sptr<iq_inference_impl>(
tag, vlen, n_vlen, sample_buffer, min_peak_points, model_server,
model_names, confidence, n_inference, samp_rate, power_inference);
model_names, confidence, n_inference, samp_rate, power_inference,
background);
}

/*
* The private constructor
*/
iq_inference_impl::iq_inference_impl(const std::string &tag, COUNT_T vlen,
COUNT_T n_vlen, COUNT_T sample_buffer,
double min_peak_points,
const std::string &model_server,
const std::string &model_names,
double confidence, COUNT_T n_inference,
int samp_rate, bool power_inference)
iq_inference_impl::iq_inference_impl(
const std::string &tag, COUNT_T vlen, COUNT_T n_vlen, COUNT_T sample_buffer,
double min_peak_points, const std::string &model_server,
const std::string &model_names, double confidence, COUNT_T n_inference,
int samp_rate, bool power_inference, bool background)
: gr::block("iq_inference",
gr::io_signature::makev(
2 /* min inputs */, 2 /* min inputs */,
Expand All @@ -244,9 +241,9 @@ iq_inference_impl::iq_inference_impl(const std::string &tag, COUNT_T vlen,
sample_buffer_(sample_buffer), min_peak_points_(min_peak_points),
model_server_(model_server), confidence_(confidence),
n_inference_(n_inference), samp_rate_(samp_rate),
power_inference_(power_inference), inference_count_(0), running_(true),
last_rx_freq_(0), last_rx_time_(0), samples_since_tag_(0),
sample_clock_(0), last_full_time_(0) {
power_inference_(power_inference), background_(background),
inference_count_(0), running_(true), last_rx_freq_(0), last_rx_time_(0),
samples_since_tag_(0), sample_clock_(0), last_full_time_(0) {
batch_ = vlen_ * n_vlen_;
samples_lookback_.reset(new gr_complex[batch_ * sample_buffer]);
unsigned int alignment = volk_get_alignment();
Expand All @@ -265,8 +262,10 @@ iq_inference_impl::iq_inference_impl(const std::string &tag, COUNT_T vlen,
d_logger->error("missing model name(s)");
}
}
inference_thread_.reset(
new std::thread(&iq_inference_impl::background_run_inference_, this));
if (background_) {
inference_thread_.reset(
new std::thread(&iq_inference_impl::background_run_inference_, this));
}
torchserve_client_.reset(new torchserve_client(host_, port_));
set_output_multiple(n_vlen_);
message_port_register_out(INFERENCE_KEY);
Expand All @@ -293,7 +292,9 @@ void iq_inference_impl::background_run_inference_() {
bool iq_inference_impl::stop() {
d_logger->info("stopping");
running_ = false;
inference_thread_->join();
if (inference_thread_) {
inference_thread_->join();
}
run_inference_();
return true;
}
Expand Down Expand Up @@ -385,15 +386,16 @@ void iq_inference_impl::run_inference_() {
void iq_inference_impl::forecast(int noutput_items,
gr_vector_int &ninput_items_required) {
ninput_items_required[0] = 1;
ninput_items_required[1] = 1;
}

void iq_inference_impl::process_items_(COUNT_T power_in_count,
COUNT_T &power_read,
const float *&power_in,
COUNT_T &consumed) {
for (COUNT_T i = 0; i < power_in_count; i += n_vlen_, power_in += batch_,
samples_since_tag_ += batch_, sample_clock_ += batch_,
consumed += n_vlen_) {
for (COUNT_T i = 0; i < power_in_count; i += n_vlen_, consumed += n_vlen_,
power_in += batch_, samples_since_tag_ += batch_,
sample_clock_ += batch_) {
COUNT_T j = (power_read + i) % sample_buffer_;
// Gate on average power.
volk_32f_accumulator_s32f(power_total_.get(), power_in, batch_);
Expand All @@ -410,9 +412,6 @@ void iq_inference_impl::process_items_(COUNT_T power_in_count,
if (n_inference_ > 0 && ++inference_count_ % n_inference_) {
continue;
}
if (!last_rx_freq_) {
continue;
}
// TODO: we select one slice in time (samples and power),
// where at least one sample exceeded the minimum. We could
// potentially select more samples either side for example.
Expand All @@ -427,13 +426,25 @@ void iq_inference_impl::process_items_(COUNT_T power_in_count,
memcpy(output_item.samples, (void *)&samples_lookback_[j * batch_],
batch_ * sizeof(gr_complex));
memcpy(output_item.power, (void *)power_in, batch_ * sizeof(float));
if (!inference_q_.push(output_item)) {
delete_output_item_(output_item);
if (host_now_() - last_full_time_ > 5) {
d_logger->error("inference queue full (increase inference dB threshold "
"to admit fewer signals?)");
last_full_time_ = host_now_();
if (background_) {
if (!last_rx_freq_) {

continue;
}
if (!inference_q_.push(output_item)) {
delete_output_item_(output_item);
if (host_now_() - last_full_time_ > 5) {
d_logger->error(
"inference queue full (increase inference dB threshold "
"to admit fewer signals?)");
last_full_time_ = host_now_();
}
}
} else {
d_logger->info("inference attempt at sample_clock {}",
output_item.sample_clock);
inference_q_.push(output_item);
run_inference_();
}
}
}
Expand All @@ -453,6 +464,11 @@ int iq_inference_impl::general_work(int noutput_items,
COUNT_T consumed = 0;
COUNT_T leftover = 0;

// Ensure we stay in power/samples sync.
samples_in_count =
int(std::min(samples_in_count, power_in_count) / n_vlen_) * n_vlen_;
power_in_count = samples_in_count;

while (!json_q_.empty()) {
std::string json;
json_q_.pop(json);
Expand Down Expand Up @@ -508,7 +524,7 @@ int iq_inference_impl::general_work(int noutput_items,
}

consume(0, samples_in_count);
consume(1, power_in_count);
consume(1, consumed);
return leftover;
}

Expand Down
4 changes: 3 additions & 1 deletion lib/iq_inference_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -258,6 +258,7 @@ class iq_inference_impl : public iq_inference, base_impl {
std::deque<char> out_buf_;
std::string host_, port_;
boost::scoped_ptr<std::thread> inference_thread_;
bool background_;

void process_items_(COUNT_T power_in_count, COUNT_T &power_read,
const float *&power_in, COUNT_T &consumed);
Expand All @@ -272,7 +273,8 @@ class iq_inference_impl : public iq_inference, base_impl {
COUNT_T sample_buffer, double min_peak_points,
const std::string &model_server,
const std::string &model_names, double confidence,
COUNT_T n_inference, int samp_rate, bool power_inference);
COUNT_T n_inference, int samp_rate, bool power_inference,
bool background);
void forecast(int noutput_items, gr_vector_int &ninput_items_required);
int general_work(int noutput_items, gr_vector_int &ninput_items,
gr_vector_const_void_star &input_items,
Expand Down
11 changes: 7 additions & 4 deletions python/iqtlabs/bindings/docstrings/iq_inference_pydoc_template.h
Original file line number Diff line number Diff line change
Expand Up @@ -7,18 +7,21 @@
*
*/
#include "pydoc_macros.h"
#define D(...) DOC(gr, iqtlabs, __VA_ARGS__)
#define D(...) DOC(gr,iqtlabs, __VA_ARGS__ )
/*
This file contains placeholders for docstrings for the Python bindings.
Do not edit! These were automatically extracted during the binding process
and will be overwritten during the build process
*/


static const char* __doc_gr_iqtlabs_iq_inference = R"doc()doc";

static const char *__doc_gr_iqtlabs_iq_inference = R"doc()doc";


static const char* __doc_gr_iqtlabs_iq_inference_iq_inference = R"doc()doc";
static const char *__doc_gr_iqtlabs_iq_inference_iq_inference = R"doc()doc";


static const char* __doc_gr_iqtlabs_iq_inference_make = R"doc()doc";
static const char *__doc_gr_iqtlabs_iq_inference_make = R"doc()doc";


Original file line number Diff line number Diff line change
Expand Up @@ -7,19 +7,21 @@
*
*/
#include "pydoc_macros.h"
#define D(...) DOC(gr, iqtlabs, __VA_ARGS__)
#define D(...) DOC(gr,iqtlabs, __VA_ARGS__ )
/*
This file contains placeholders for docstrings for the Python bindings.
Do not edit! These were automatically extracted during the binding process
and will be overwritten during the build process
*/


static const char* __doc_gr_iqtlabs_iq_inference_standalone = R"doc()doc";

static const char *__doc_gr_iqtlabs_iq_inference_standalone = R"doc()doc";


static const char* __doc_gr_iqtlabs_iq_inference_standalone_iq_inference_standalone =
R"doc()doc";
static const char *__doc_gr_iqtlabs_iq_inference_standalone_iq_inference_standalone = R"doc()doc";


static const char* __doc_gr_iqtlabs_iq_inference_standalone_make = R"doc()doc";
static const char *__doc_gr_iqtlabs_iq_inference_standalone_make = R"doc()doc";


48 changes: 32 additions & 16 deletions python/iqtlabs/bindings/iq_inference_python.cc
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
/* BINDTOOL_GEN_AUTOMATIC(0) */
/* BINDTOOL_USE_PYGCCXML(0) */
/* BINDTOOL_HEADER_FILE(iq_inference.h) */
/* BINDTOOL_HEADER_FILE_HASH(bda97b811b8010b3e26c61b90e9144ce) */
/* BINDTOOL_HEADER_FILE_HASH(e9be2dc8c2985cc44c5e04e576515eea) */
/***********************************************************************************/

#include <pybind11/complex.h>
Expand All @@ -30,26 +30,42 @@ namespace py = pybind11;
void bind_iq_inference(py::module& m)
{

using iq_inference = ::gr::iqtlabs::iq_inference;
using iq_inference = ::gr::iqtlabs::iq_inference;


py::class_<iq_inference, gr::block, gr::basic_block, std::shared_ptr<iq_inference>>(
m, "iq_inference", D(iq_inference))
py::class_<iq_inference, gr::block, gr::basic_block,
std::shared_ptr<iq_inference>>(m, "iq_inference", D(iq_inference))

.def(py::init(&iq_inference::make),
py::arg("tag"),
py::arg("vlen"),
py::arg("n_vlen"),
py::arg("sample_buffer"),
py::arg("min_peak_points"),
py::arg("model_server"),
py::arg("model_names"),
py::arg("confidence"),
py::arg("n_inference"),
py::arg("samp_rate"),
py::arg("power_inference"),
D(iq_inference, make))
py::arg("tag"),
py::arg("vlen"),
py::arg("n_vlen"),
py::arg("sample_buffer"),
py::arg("min_peak_points"),
py::arg("model_server"),
py::arg("model_names"),
py::arg("confidence"),
py::arg("n_inference"),
py::arg("samp_rate"),
py::arg("power_inference"),
py::arg("background"),
D(iq_inference,make)
)




;




}








35 changes: 23 additions & 12 deletions python/iqtlabs/bindings/iq_inference_standalone_python.cc
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
/* If manual edits are made, the following tags should be modified accordingly. */
/* BINDTOOL_GEN_AUTOMATIC(0) */
/* BINDTOOL_USE_PYGCCXML(0) */
/* BINDTOOL_HEADER_FILE(iq_inference_standalone.h) */
/* BINDTOOL_HEADER_FILE(iq_inference_standalone.h) */
/* BINDTOOL_HEADER_FILE_HASH(47123aeae5b97ed6fca1be996559bbcb) */
/***********************************************************************************/

Expand All @@ -30,22 +30,33 @@ namespace py = pybind11;
void bind_iq_inference_standalone(py::module& m)
{

using iq_inference_standalone = ::gr::iqtlabs::iq_inference_standalone;
using iq_inference_standalone = ::gr::iqtlabs::iq_inference_standalone;


py::class_<iq_inference_standalone,
gr::sync_block,
gr::block,
gr::basic_block,
std::shared_ptr<iq_inference_standalone>>(
m, "iq_inference_standalone", D(iq_inference_standalone))
py::class_<iq_inference_standalone, gr::sync_block, gr::block, gr::basic_block,
std::shared_ptr<iq_inference_standalone>>(m, "iq_inference_standalone", D(iq_inference_standalone))

.def(py::init(&iq_inference_standalone::make),
py::arg("vlen"),
py::arg("model_server"),
py::arg("model_names"),
D(iq_inference_standalone, make))
py::arg("vlen"),
py::arg("model_server"),
py::arg("model_names"),
D(iq_inference_standalone,make)
)




;




}








Loading

0 comments on commit 0f80b0a

Please sign in to comment.