Skip to content
This repository was archived by the owner on Feb 3, 2025. It is now read-only.

Commit 7831a76

Browse files
Merge pull request #298 from nvkevihu/22.06-container-cpp-linking
Update container versions and toggle CXX11_ABI flag for CPP examples
2 parents 6f541ae + 9fcdd9c commit 7831a76

File tree

4 files changed

+6
-6
lines changed

4 files changed

+6
-6
lines changed

tftrt/benchmarking-cpp/CMakeLists.txt

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ add_executable(tf_trt_benchmark_runner main.cc)
3434
target_link_libraries(tf_trt_benchmark_runner tensorflow_cc)
3535
target_link_libraries(tf_trt_benchmark_runner tensorflow_framework)
3636

37-
target_compile_options(tf_trt_benchmark_runner PRIVATE -D_GLIBCXX_USE_CXX11_ABI=0 -DGOOGLE_CUDA -DGOOGLE_TENSORRT)
37+
target_compile_options(tf_trt_benchmark_runner PRIVATE -D_GLIBCXX_USE_CXX11_ABI=1 -DGOOGLE_CUDA -DGOOGLE_TENSORRT)
3838

3939
target_link_directories(tf_trt_benchmark_runner PRIVATE ${tf_python_dir})
4040
target_link_directories(tf_trt_benchmark_runner PRIVATE ${tf_dir})
@@ -44,4 +44,4 @@ target_compile_options(tf_trt_benchmark_runner PRIVATE -O2 -Wl,-rpath=${tf_pytho
4444
target_include_directories(tf_trt_benchmark_runner PRIVATE ${tf_python_dir}/include)
4545
target_include_directories(tf_trt_benchmark_runner PRIVATE ${trt_include_path})
4646

47-
add_dependencies(tf_trt_benchmark_runner tf_symlinks)
47+
add_dependencies(tf_trt_benchmark_runner tf_symlinks)

tftrt/benchmarking-cpp/README.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7,13 +7,13 @@ This straightforward example uses TF's C++ API to serve a saved model and measur
77
Pull the image:
88

99
```
10-
docker pull nvcr.io/nvidia/tensorflow:22.05-tf2-py3
10+
docker pull nvcr.io/nvidia/tensorflow:22.06-tf2-py3
1111
```
1212

1313
Start the container:
1414

1515
```
16-
docker run --rm --gpus all --ipc=host --ulimit memlock=-1 --ulimit stack=67108864 -it --name TFTRT_CPP nvcr.io/nvidia/tensorflow:22.05-tf2-py3
16+
docker run --rm --gpus all --ipc=host --ulimit memlock=-1 --ulimit stack=67108864 -it --name TFTRT_CPP nvcr.io/nvidia/tensorflow:22.06-tf2-py3
1717
```
1818

1919
Clone the repo:

tftrt/examples-cpp/mnist_demo/CMakeLists.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ add_executable(tf_trt_example main.cc mnist.h mnist.cc)
3434
target_link_libraries(tf_trt_example tensorflow_cc)
3535
target_link_libraries(tf_trt_example tensorflow_framework)
3636

37-
target_compile_options(tf_trt_example PRIVATE -D_GLIBCXX_USE_CXX11_ABI=0 -DGOOGLE_CUDA -DGOOGLE_TENSORRT)
37+
target_compile_options(tf_trt_example PRIVATE -D_GLIBCXX_USE_CXX11_ABI=1 -DGOOGLE_CUDA -DGOOGLE_TENSORRT)
3838

3939
target_link_directories(tf_trt_example PRIVATE ${tf_python_dir})
4040
target_link_directories(tf_trt_example PRIVATE ${tf_dir})

tftrt/examples-cpp/mnist_demo/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ The MNIST inference example is based on https://github.com/bmzhao/saved-model-ex
1515
git clone https://github.com/tensorflow/tensorrt.git
1616
git clone https://github.com/tensorflow/tensorflow.git tensorflow-source
1717
mkdir bazel-cache
18-
docker run --gpus=all --rm -it --shm-size=1g --ulimit memlock=-1 --ulimit stack=67108864 -v $PWD:/workspace -w /workspace -v $PWD/bazel-cache:/root/.cache/bazel nvcr.io/nvidia/tensorflow:21.06-tf2-py3
18+
docker run --gpus=all --rm -it --shm-size=1g --ulimit memlock=-1 --ulimit stack=67108864 -v $PWD:/workspace -w /workspace -v $PWD/bazel-cache:/root/.cache/bazel nvcr.io/nvidia/tensorflow:22.06-tf2-py3
1919
2020
# Inside the container
2121
cp /opt/tensorflow/nvbuild* /opt/tensorflow/bazel_build.sh .

0 commit comments

Comments
 (0)