Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion gen-demos-max78000.sh
Original file line number Diff line number Diff line change
Expand Up @@ -21,4 +21,6 @@ python izer/add_fake_passthrough.py --input-checkpoint-path trained/ai85-aisegme
python ai8xize.py --test-dir $TARGET --prefix aisegment_unet --checkpoint-file trained/ai85-aisegment-unet-large-fakept-q.pth.tar --config-file networks/aisegment-unet-large-fakept.yaml $COMMON_ARGS --overlap-data --mlator --no-unload --max-checklines 8192 --new-kernel-loader "$@"
python ai8xize.py --test-dir $TARGET --prefix svhn_tinierssd --checkpoint-file trained/ai85-svhn-tinierssd-qat8-q.pth.tar --config-file networks/svhn-tinierssd.yaml --overlap-data $COMMON_ARGS "$@"
python ai8xize.py --test-dir $TARGET --prefix facedet_tinierssd --checkpoint-file trained/ai85-facedet-tinierssd-qat8-q.pth.tar --config-file networks/ai85-facedet-tinierssd.yaml --sample-input tests/sample_vggface2_facedetection.npy --fifo $COMMON_ARGS "$@"
python ai8xize.py --test-dir $TARGET --prefix kinetics --checkpoint-file trained/ai85-kinetics-qat8-q.pth.tar --config-file networks/ai85-kinetics-actiontcn.yaml --overlap-data --softmax --zero-sram --no-kat $COMMON_ARGS "$@" # note: known-answer test can't be done on MAX78000 due to memory constraints (sample data is too large)
python ai8xize.py --test-dir $TARGET --prefix kinetics --checkpoint-file trained/ai85-kinetics-qat8-q.pth.tar --config-file networks/ai85-kinetics-actiontcn.yaml --overlap-data --softmax --zero-sram --no-kat $COMMON_ARGS "$@" # note: known-answer test can't be done on MAX78000 due to memory constraints (sample data is too large)
python izer/add_fake_passthrough.py --input-checkpoint-path trained/ai85-spectrumsense-unet-large-q.pth.tar --output-checkpoint-path trained/ai85-spectrumsense-unet-large-fakept-q.pth.tar --layer-name pt --layer-depth 56 --layer-name-after-pt upconv3 "$@"
python ai8xize.py --test-dir $TARGET --prefix spectrumsense --checkpoint-file trained/ai85-spectrumsense-unet-large-fakept-q.pth.tar --config-file networks/spectrumsense-unet-large-fakept.yaml --sample-input tests/sample_spectrumsense.npy --energy $COMMON_ARGS --compact-data --mexpress --overlap-data --mlator --no-unload "$@"
180 changes: 180 additions & 0 deletions networks/spectrumsense-unet-large-fakept.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,180 @@
---
# HWC (little data) configuration for Spectrum Sense (UNet)
# UNet Model

arch: ai85unetlarge
dataset: SpectrumSense_s352_c2

layers:
# Layer 0: prep0
- out_offset: 0x0600
in_offset: 0x0700
processors: 0x0000ffffffffffff
output_processors: 0xffffffffffffffff
operation: conv2d
kernel_size: 1x1
pad: 0
activate: ReLU
# Layer 1: prep1
- out_offset: 0x0400
processors: 0xffffffffffffffff
output_processors: 0xffffffffffffffff
operation: conv2d
kernel_size: 1x1
pad: 0
activate: ReLU
# Layer 2: prep2
- out_offset: 0x0200
processors: 0xffffffffffffffff
output_processors: 0x00000000ffffffff
operation: conv2d
kernel_size: 1x1
pad: 0
activate: ReLU
# Layer 3: enc1
- out_offset: 0x0000
processors: 0x00000000ffffffff
output_processors: 0xff00000000000000
operation: conv2d
kernel_size: 3x3
pad: 1
activate: ReLU
# Layer 4: enc2
- out_offset: 0x0000
processors: 0xff00000000000000
output_processors: 0x00fffffff0000000
operation: conv2d
kernel_size: 3x3
pad: 1
max_pool: 2
pool_stride: 2
activate: ReLU
# Layer 5: enc3
- out_offset: 0x5000
processors: 0x00fffffff0000000
output_processors: 0x00ffffffffffffff
operation: conv2d
kernel_size: 3x3
pad: 1
max_pool: 2
pool_stride: 2
activate: ReLU
# Layer 6: bneck
- out_offset: 0x6000
processors: 0x00ffffffffffffff
output_processors: 0x00ffffffffffffff
operation: conv2d
kernel_size: 3x3
pad: 1
max_pool: 2
pool_stride: 2
activate: ReLU
# Layer 7: pt
- in_offset: 0x5000
out_offset: 0x4004
processors: 0x00ffffffffffffff
output_processors: 0x00ffffffffffffff
operation: conv2d
kernel_size: 1x1
pad: 0
write_gap: 1
in_sequences: [5]
activate: None
# Layer 8: upconv3
- in_offset: 0x6000
out_offset: 0x4000
processors: 0x00ffffffffffffff
output_processors: 0x00ffffffffffffff
operation: convtranspose2d
kernel_size: 3x3
pad: 1
activate: None
write_gap: 1
in_sequences: [6]
# Layer 9: dec3
- out_offset: 0x2000
in_offset: 0x4000
processors: 0x00ffffffffffffff
output_processors: 0x00ffffffffffffff
operation: conv2d
kernel_size: 3x3
pad: 1
activate: ReLU
in_sequences: [8, 7]
# Layer 10: upconv2
- out_offset: 0x0000
in_offset: 0x2000
processors: 0x00ffffffffffffff
output_processors: 0x000000000fffffff
operation: convtranspose2d
kernel_size: 3x3
pad: 1
activate: None
# Layer 11: dec2
- out_offset: 0x2000
in_offset: 0x0000
processors: 0x00ffffffffffffff
output_processors: 0x000000000fffffff
operation: conv2d
kernel_size: 3x3
pad: 1
activate: ReLU
in_sequences: [10, 4]
# Layer 12: upconv1
- out_offset: 0x0000
processors: 0x000000000fffffff
output_processors: 0x00ff000000000000
operation: convtranspose2d
kernel_size: 3x3
pad: 1
activate: None
# Layer 13: dec1
- out_offset: 0x0700
in_offset: 0x0000
processors: 0xffff000000000000
output_processors: 0x0000ffffffffffff
operation: conv2d
kernel_size: 3x3
pad: 1
activate: ReLU
in_sequences: [12, 3]
# Layer 14: dec0
- out_offset: 0x0550
processors: 0x0000ffffffffffff
output_processors: 0xffffffffffffffff
operation: conv2d
kernel_size: 3x3
pad: 1
activate: ReLU
# Layer 15: conv_p1
- out_offset: 0x0400
processors: 0xffffffffffffffff
output_processors: 0xffffffffffffffff
operation: conv2d
kernel_size: 1x1
pad: 0
activate: ReLU
# Layer 16: conv_p2
- out_offset: 0x0250
processors: 0xffffffffffffffff
output_processors: 0xffffffffffffffff
operation: conv2d
kernel_size: 1x1
pad: 0
activate: ReLU
# Layer 17: conv_p3
- out_offset: 0x0100
processors: 0xffffffffffffffff
output_processors: 0xffffffffffffffff
operation: conv2d
kernel_size: 1x1
pad: 0
activate: None
# Layer 18: conv
- out_offset: 0x0000
processors: 0xffffffffffffffff
output_processors: 0x0000ffffffffffff
operation: conv2d
kernel_size: 1x1
pad: 0
activate: None
7 changes: 7 additions & 0 deletions scripts/gen_spectrumsense_max78000.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
#!/bin/sh
DEVICE="MAX78000"
TARGET="sdk/Examples/$DEVICE/CNN"
COMMON_ARGS="--device $DEVICE --timer 0 --display-checkpoint --verbose"

python ai8xize.py --test-dir $TARGET --prefix spectrumsense --checkpoint-file trained/ai85-spectrumsense-unet-large-fakept-q.pth.tar --config-file networks/spectrumsense-unet-large-fakept.yaml --sample-input tests/sample_spectrumsense_352.npy --energy $COMMON_ARGS --compact-data --mexpress --overlap-data --mlator --no-unload "$@"

3 changes: 3 additions & 0 deletions scripts/quantize_spectrumsense_unet.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
#!/bin/sh
python quantize.py trained/ai85-spectrumsense-unet-large.pth.tar trained/ai85-spectrumsense-unet-large-q.pth.tar --device MAX78000 -v
python izer/add_fake_passthrough.py --input-checkpoint-path trained/ai85-spectrumsense-unet-large-q.pth.tar --output-checkpoint-path trained/ai85-spectrumsense-unet-large-fakept-q.pth.tar --layer-name pt --layer-depth 56 --layer-name-after-pt upconv3
Binary file added tests/sample_spectrumsense_352.npy
Binary file not shown.
Binary file not shown.
Binary file added trained/ai85-spectrumsense-unet-large-q.pth.tar
Binary file not shown.
Loading