Skip to content

Commit

Permalink
first inference in prepare to play
Browse files Browse the repository at this point in the history
  • Loading branch information
faressc committed Oct 24, 2023
1 parent 350a9d6 commit ccde79c
Show file tree
Hide file tree
Showing 5 changed files with 11 additions and 2 deletions.
2 changes: 2 additions & 0 deletions source/dsp/inference/InferenceConfig.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,8 @@ enum InferenceBackend {


#define MODEL_OUTPUT_SIZE_BACKEND 1
#define MODEL_OUTPUT_SHAPE {BATCH_SIZE, MODEL_OUTPUT_SIZE_BACKEND}


#define MAX_INFERENCE_TIME 2048
#define MODEL_LATENCY 0
Expand Down
2 changes: 1 addition & 1 deletion source/dsp/inference/InferenceThread.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ void InferenceThread::run() {

processingTime.store(duration.count());

std::cout << "Inference took " << duration.count() << "ms" << std::endl;
// std::cout << "Inference took " << duration.count() << "ms" << std::endl;
}

void InferenceThread::inference() {
Expand Down
1 change: 0 additions & 1 deletion source/dsp/inference/InferenceThread.h
Original file line number Diff line number Diff line change
Expand Up @@ -42,5 +42,4 @@ class InferenceThread : public juce::Thread {
juce::ListenerList<Listener> listeners;
};


#endif //NN_INFERENCE_TEMPLATE_INFERENCETHREAD_H
4 changes: 4 additions & 0 deletions source/dsp/inference/backends/LibtorchProcessor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,10 @@ LibtorchProcessor::~LibtorchProcessor() {
void LibtorchProcessor::prepareToPlay() {
inputs.clear();
inputs.push_back(torch::zeros(MODEL_INPUT_SHAPE_LIBTORCH));
// first run takes longest, so we do it here
std::array<float, BATCH_SIZE * MODEL_INPUT_SIZE_BACKEND> input;
std::array<float, BATCH_SIZE * MODEL_OUTPUT_SIZE_BACKEND> output;
processBlock(input, output);
}

void LibtorchProcessor::processBlock(std::array<float, BATCH_SIZE * MODEL_INPUT_SIZE_BACKEND>& input, std::array<float, BATCH_SIZE * MODEL_OUTPUT_SIZE_BACKEND>& output) {
Expand Down
4 changes: 4 additions & 0 deletions source/dsp/inference/backends/OnnxRuntimeProcessor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,10 @@ OnnxRuntimeProcessor::~OnnxRuntimeProcessor()
void OnnxRuntimeProcessor::prepareToPlay() {
// Define the shape of input tensor
inputShape = MODEL_INPUT_SHAPE_ONNX;
// first run takes longest, so we do it here
std::array<float, BATCH_SIZE * MODEL_INPUT_SIZE_BACKEND> input;
std::array<float, BATCH_SIZE * MODEL_OUTPUT_SIZE_BACKEND> output;
processBlock(input, output);
}

void OnnxRuntimeProcessor::processBlock(std::array<float, BATCH_SIZE * MODEL_INPUT_SIZE_BACKEND>& input, std::array<float, BATCH_SIZE * MODEL_OUTPUT_SIZE_BACKEND>& output) {
Expand Down

0 comments on commit ccde79c

Please sign in to comment.