Skip to content

Commit

Permalink
Merge pull request #1104 from anarkiwi/v70
Browse files Browse the repository at this point in the history
gr-iqtlabs v70
  • Loading branch information
anarkiwi authored Jan 16, 2024
2 parents 29278d4 + bf02dd5 commit ee53894
Show file tree
Hide file tree
Showing 3 changed files with 12 additions and 9 deletions.
2 changes: 1 addition & 1 deletion docker/Dockerfile.base
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
libvulkan-dev \
python3-numpy
WORKDIR /root
RUN git clone https://github.com/iqtlabs/gr-iqtlabs -b 1.0.69
RUN git clone https://github.com/iqtlabs/gr-iqtlabs -b 1.0.70
COPY --from=iqtlabs/gamutrf-vkfft:latest /root /root/gr-iqtlabs
WORKDIR /root/gr-iqtlabs/build
COPY --from=sigmf-builder /usr/local /usr/local
Expand Down
13 changes: 8 additions & 5 deletions gamutrf/grscan.py
Original file line number Diff line number Diff line change
Expand Up @@ -322,7 +322,7 @@ def get_pretune_block(
):
# if pretuning, the pretune block will also do the batching.
if pretune:
return self.iqtlabs.retune_pre_fft(
block = self.iqtlabs.retune_pre_fft(
nfft,
fft_batch_size,
"rx_freq",
Expand All @@ -334,8 +334,10 @@ def get_pretune_block(
tuning_ranges,
self.tag_now,
)
# otherwise, the pretuning block will just do batching.
return blocks.stream_to_vector(gr.sizeof_gr_complex, fft_batch_size * nfft)
else:
# otherwise, the pretuning block will just do batching.
block = blocks.stream_to_vector(gr.sizeof_gr_complex, fft_batch_size * nfft)
return block

def apply_window(self, nfft, fft_batch_size):
window_constants = [val for val in self.get_window(nfft) for _ in range(2)]
Expand All @@ -350,13 +352,14 @@ def get_offload_fft_blocks(
fft_block = None
fft_roll = False
if self.wavelearner:
fft_block = self.wavelearner.fft(int(fft_batch_size * nfft), (nfft), True)
fft_block = self.wavelearner.fft(int(fft_batch_size * nfft), nfft, True)
fft_roll = True
elif vkfft:
fft_block = self.iqtlabs.vkfft(int(fft_batch_size * nfft), nfft, True)
fft_block = self.iqtlabs.vkfft(fft_batch_size, nfft, True)
else:
fft_batch_size = 1
fft_block = fft.fft_vcc(nfft, True, [], True, 1)
fft_block.set_thread_priority(99)

fft_blocks = [
self.apply_window(nfft, fft_batch_size),
Expand Down
6 changes: 3 additions & 3 deletions orchestrator.yml
Original file line number Diff line number Diff line change
Expand Up @@ -81,10 +81,10 @@ services:
- --inference_text_color=black
# can be multiple, separate with comma
- --inference_model_name=mini2_snr
- --n_inference=5
- --n_image=5
- --n_inference=10
- --n_image=10
- --no-vkfft
- --rotate_secs=300
- --rotate_secs=60
- --colormap=20
# - --external_gps_server=1.2.3.4
# - --external_gps_server_port=8888
Expand Down

0 comments on commit ee53894

Please sign in to comment.