Skip to content

Commit

Permalink
Merge pull request #1262 from anarkiwi/diag
Browse files Browse the repository at this point in the history
default to torchsig model, gr-iqtlabs '97
  • Loading branch information
anarkiwi authored May 7, 2024
2 parents fc5f5f2 + 9f3f908 commit 2a2a210
Show file tree
Hide file tree
Showing 3 changed files with 18 additions and 16 deletions.
2 changes: 1 addition & 1 deletion docker/Dockerfile.base
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
libvulkan-dev \
python3-numpy
WORKDIR /root
RUN git clone https://github.com/iqtlabs/gr-iqtlabs -b 1.0.96
RUN git clone https://github.com/iqtlabs/gr-iqtlabs -b 1.0.97
COPY --from=iqtlabs/gamutrf-vkfft:latest /root /root/gr-iqtlabs
WORKDIR /root/gr-iqtlabs/build
COPY --from=iqtlabs/gamutrf-sigmf:latest /usr/local /usr/local
Expand Down
29 changes: 16 additions & 13 deletions orchestrator.yml
Original file line number Diff line number Diff line change
Expand Up @@ -86,26 +86,29 @@ services:
- --peak_fft_range=50
- --use_external_gps
- --use_external_heading
- --inference_addr=0.0.0.0
- --inference_port=10001
- --inference_min_confidence=0.25
- --inference_min_db=-1e9
- --inference_text_color=black
# can be multiple, separate with comma
- --inference_model_name=mini2_snr
- --n_inference=7
- --n_image=7
- --no-vkfft
- --rotate_secs=60
- --colormap=20
- --tune-jitter-hz=0
- --pretune
# - --external_gps_server=1.2.3.4
# - --external_gps_server_port=8888
- --inference_text_color=black
- --inference_output_dir=/logs/inference
- --inference_model_server=torchserve:8080
# - --iq_inference_model_server=torchserve:8080
# - --iq_inference_model_name=torchsig_model
- --pretune
- --inference_addr=0.0.0.0
- --inference_port=10001
- --inference_min_confidence=0.25
- --inference_min_db=-1e9
- --inference_text_color=black
- --n_inference=7
- --n_image=7
# - --inference_model_server=torchserve:8080
# - --inference_model_name=mini2_snr
- --iq_inference_squelch_db=-50
- --iq_inference_model_server=torchserve:8080
- --iq_inference_model_name=torchsig_model
# - --write_samples=1000000000
# - --sample_dir=/logs/samples
healthcheck:
test: [CMD, "/gamutrf/bin/scanhc.sh", "9000"]
interval: 10s
Expand Down
3 changes: 1 addition & 2 deletions torchserve-cuda.yml
Original file line number Diff line number Diff line change
Expand Up @@ -26,5 +26,4 @@ services:
command:
# can be multiple models
# e.g. mini2_snr=mini2_snr.mar,another_mini2_snr=another_mini2_snr.mar
# --models torchsig_model=torchsig_model.mar
- --models mini2_snr=mini2_snr.mar
- --models torchsig_model=torchsig_model.mar

0 comments on commit 2a2a210

Please sign in to comment.