Skip to content

Commit e84368d

Browse files
MoFHekarhdong
authored andcommitted
[Feat] Compatible with Tensorflow later than version 2.11.0.
It also supports automatic detection of TF compile cxx standard versions.
1 parent c1a6eaa commit e84368d

36 files changed

+482
-175
lines changed

build_deps/tf_dependency/build_defs.bzl.tpl

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,8 @@
22

33
D_GLIBCXX_USE_CXX11_ABI = "%{tf_cx11_abi}"
44

5+
TF_CXX_STANDARD = "%{tf_cxx_standard}"
6+
57
DTF_VERSION_INTEGER = "%{tf_version_integer}"
68

79
FOR_TF_SERVING = "%{for_tf_serving}"

build_deps/tf_dependency/tf_configure.bzl

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,8 @@ _TF_SHARED_LIBRARY_NAME = "TF_SHARED_LIBRARY_NAME"
88

99
_TF_CXX11_ABI_FLAG = "TF_CXX11_ABI_FLAG"
1010

11+
_TF_CXX_STANDARD = "TF_CXX_STANDARD"
12+
1113
_FOR_TF_SERVING = "FOR_TF_SERVING"
1214

1315
_TF_VERSION_INTEGER = "TF_VERSION_INTEGER"
@@ -208,6 +210,7 @@ def _tf_pip_impl(repository_ctx):
208210
tf_shared_library_name = repository_ctx.os.environ[_TF_SHARED_LIBRARY_NAME]
209211
tf_shared_library_path = "%s/%s" % (tf_shared_library_dir, tf_shared_library_name)
210212
tf_cx11_abi = "-D_GLIBCXX_USE_CXX11_ABI=%s" % (repository_ctx.os.environ[_TF_CXX11_ABI_FLAG])
213+
tf_cxx_standard = "%s" % (repository_ctx.os.environ[_TF_CXX_STANDARD])
211214
tf_version_integer = "-DTF_VERSION_INTEGER=%s" % (repository_ctx.os.environ[_TF_VERSION_INTEGER])
212215
for_tf_serving = repository_ctx.os.environ[_FOR_TF_SERVING]
213216

@@ -231,6 +234,7 @@ def _tf_pip_impl(repository_ctx):
231234
"build_defs.bzl",
232235
{
233236
"%{tf_cx11_abi}": tf_cx11_abi,
237+
"%{tf_cxx_standard}": tf_cxx_standard,
234238
"%{tf_version_integer}": tf_version_integer,
235239
"%{for_tf_serving}": for_tf_serving,
236240
},
@@ -242,6 +246,7 @@ tf_configure = repository_rule(
242246
_TF_SHARED_LIBRARY_DIR,
243247
_TF_SHARED_LIBRARY_NAME,
244248
_TF_CXX11_ABI_FLAG,
249+
_TF_CXX_STANDARD,
245250
_FOR_TF_SERVING,
246251
],
247252
implementation = _tf_pip_impl,

build_deps/toolchains/gcc7_manylinux2010-nvcc-cuda11/clang/bin/crosstool_wrapper_driver_is_not_gcc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -178,7 +178,7 @@ def InvokeNvcc(argv, log=False):
178178
undefines = ''.join([' -U' + define for define in undefines])
179179
std_options = GetOptionValue(argv, '-std')
180180
# Supported -std flags as of CUDA 9.0. Only keep last to mimic gcc/clang.
181-
nvcc_allowed_std_options = ["c++03", "c++11", "c++14"]
181+
nvcc_allowed_std_options = ["c++03", "c++11", "c++14", "c++17"]
182182
std_options = ''.join([' -std=' + define
183183
for define in std_options if define in nvcc_allowed_std_options][-1:])
184184
fatbin_options = ''.join([' --fatbin-options=' + option

build_deps/toolchains/gpu/crosstool/clang/bin/crosstool_wrapper_driver_is_not_gcc.tpl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -167,7 +167,7 @@ def InvokeNvcc(argv, log=False):
167167
undefines = ''.join([' -U' + define for define in undefines])
168168
std_options = GetOptionValue(argv, 'std')
169169
# currently only c++14 is supported by Cuda 10.0 std argument
170-
nvcc_allowed_std_options = ["c++14"]
170+
nvcc_allowed_std_options = ["c++03", "c++11", "c++14", "c++17"]
171171
std_options = ''.join([' -std=' + define
172172
for define in std_options if define in nvcc_allowed_std_options])
173173

build_deps/toolchains/gpu/cuda_configure.bzl

Lines changed: 23 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,7 @@ _DEFAULT_CUDA_COMPUTE_CAPABILITIES.update(
7474
"7.5",
7575
"8.0",
7676
"8.6",
77-
] for v in range(1, 8)},
77+
] for v in range(0, 8)},
7878
)
7979

8080
_DEFAULT_CUDA_COMPUTE_CAPABILITIES.update(
@@ -86,9 +86,23 @@ _DEFAULT_CUDA_COMPUTE_CAPABILITIES.update(
8686
"8.0",
8787
"8.6",
8888
"8.9",
89+
"9.0",
8990
] for v in range(8, 9)},
9091
)
9192

93+
_DEFAULT_CUDA_COMPUTE_CAPABILITIES.update(
94+
{"12.{}".format(v): [
95+
"6.0",
96+
"6.1",
97+
"7.0",
98+
"7.5",
99+
"8.0",
100+
"8.6",
101+
"8.9",
102+
"9.0",
103+
] for v in range(0, 8)},
104+
)
105+
92106
def _get_python_bin(repository_ctx):
93107
"""Gets the python bin path."""
94108
python_bin = repository_ctx.os.environ.get(_PYTHON_BIN_PATH)
@@ -562,10 +576,17 @@ def _find_cuda_lib(
562576
Returns the path to the library.
563577
"""
564578
file_name = lib_name(lib, cpu_value, version, static)
579+
paths = ["%s/%s" % (basedir, file_name)]
580+
if version:
581+
# In cuda 12.1, the name of libcupti.so is no longer libcupti.so.12.1 but libcupti.so.2023.1.0.
582+
# And there is still reserve a libcupti.so.12 link, so we need to find it but not "*.12.1".
583+
major_version = version.split(".")[0]
584+
file_name_major = lib_name(lib, cpu_value, major_version, static)
585+
paths.append("%s/%s" % (basedir, file_name_major))
565586

566587
return find_lib(
567588
repository_ctx,
568-
["%s/%s" % (basedir, file_name)],
589+
paths,
569590
check_soname = version and not static,
570591
)
571592

build_deps/toolchains/redis/redis-plus-plus.BUILD

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ load("@rules_foreign_cc//foreign_cc:defs.bzl", "cmake")
22
load(
33
"@local_config_tf//:build_defs.bzl",
44
"D_GLIBCXX_USE_CXX11_ABI",
5+
"TF_CXX_STANDARD",
56
)
67

78
package(
@@ -30,10 +31,10 @@ cmake(
3031
generate_args = [
3132
"-DCMAKE_BUILD_TYPE=Release",
3233
"-DREDIS_PLUS_PLUS_BUILD_TEST=OFF",
33-
"-DREDIS_PLUS_PLUS_CXX_STANDARD=11",
34-
"-DCMAKE_CXX_FLAGS="+D_GLIBCXX_USE_CXX11_ABI,
34+
"-DREDIS_PLUS_PLUS_CXX_STANDARD=" + TF_CXX_STANDARD.split("c++")[-1],
35+
"-DCMAKE_CXX_FLAGS=" + D_GLIBCXX_USE_CXX11_ABI,
3536
],
3637
lib_source = "@redis-plus-plus//:all_srcs",
3738
out_static_libs = ["libredis++.a"],
3839
deps = ["@hiredis"],
39-
)
40+
)

configure.py

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -219,6 +219,18 @@ def create_build_configuration():
219219
write_action_env("TF_SHARED_LIBRARY_DIR", get_tf_shared_lib_dir())
220220
write_action_env("TF_SHARED_LIBRARY_NAME", get_shared_lib_name())
221221
write_action_env("TF_CXX11_ABI_FLAG", tf.sysconfig.CXX11_ABI_FLAG)
222+
tf_cxx_standard_compile_flags = [
223+
flag for flag in tf.sysconfig.get_compile_flags() if "-std=" in flag
224+
]
225+
if len(tf_cxx_standard_compile_flags) > 0:
226+
tf_cxx_standard_compile_flag = tf_cxx_standard_compile_flags[-1]
227+
else:
228+
tf_cxx_standard_compile_flag = None
229+
if tf_cxx_standard_compile_flag is None:
230+
tf_cxx_standard = "c++14"
231+
else:
232+
tf_cxx_standard = tf_cxx_standard_compile_flag.split("-std=")[-1]
233+
write_action_env("TF_CXX_STANDARD", tf_cxx_standard)
222234

223235
tf_version_integer = get_tf_version_integer()
224236
# This is used to trace the difference between Tensorflow versions.

tensorflow_recommenders_addons/dynamic_embedding/core/kernels/cuckoo_hashtable_op.cc

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -219,7 +219,7 @@ class CuckooHashTableOfTensors final : public LookupInterface {
219219
LaunchTensorsFind<CPUDevice, K, V> launcher(value_dim);
220220
launcher.launch(ctx, table_, key, value, default_value);
221221

222-
return Status::OK();
222+
return TFOkStatus;
223223
}
224224

225225
Status FindWithExists(OpKernelContext* ctx, const Tensor& key, Tensor* value,
@@ -229,7 +229,7 @@ class CuckooHashTableOfTensors final : public LookupInterface {
229229
LaunchTensorsFindWithExists<CPUDevice, K, V> launcher(value_dim);
230230
launcher.launch(ctx, table_, key, value, default_value, exists);
231231

232-
return Status::OK();
232+
return TFOkStatus;
233233
}
234234

235235
Status DoInsert(bool clear, OpKernelContext* ctx, const Tensor& keys,
@@ -243,7 +243,7 @@ class CuckooHashTableOfTensors final : public LookupInterface {
243243
LaunchTensorsInsert<CPUDevice, K, V> launcher(value_dim);
244244
launcher.launch(ctx, table_, keys, values);
245245

246-
return Status::OK();
246+
return TFOkStatus;
247247
}
248248

249249
Status DoAccum(bool clear, OpKernelContext* ctx, const Tensor& keys,
@@ -257,7 +257,7 @@ class CuckooHashTableOfTensors final : public LookupInterface {
257257
LaunchTensorsAccum<CPUDevice, K, V> launcher(value_dim);
258258
launcher.launch(ctx, table_, keys, values_or_deltas, exists);
259259

260-
return Status::OK();
260+
return TFOkStatus;
261261
}
262262

263263
Status Insert(OpKernelContext* ctx, const Tensor& keys,
@@ -272,12 +272,12 @@ class CuckooHashTableOfTensors final : public LookupInterface {
272272
for (int64 i = 0; i < key_flat.size(); ++i) {
273273
table_->erase(tensorflow::lookup::SubtleMustCopyIfIntegral(key_flat(i)));
274274
}
275-
return Status::OK();
275+
return TFOkStatus;
276276
}
277277

278278
Status Clear(OpKernelContext* ctx) {
279279
table_->clear();
280-
return Status::OK();
280+
return TFOkStatus;
281281
}
282282

283283
Status Accum(OpKernelContext* ctx, const Tensor& keys,
@@ -304,7 +304,7 @@ class CuckooHashTableOfTensors final : public LookupInterface {
304304
table_->dump((K*)keys->tensor_data().data(),
305305
(V*)values->tensor_data().data(), 0, table_size);
306306

307-
return Status::OK();
307+
return TFOkStatus;
308308
}
309309

310310
Status SaveToFileSystemImpl(FileSystem* fs, const size_t value_dim,
@@ -319,7 +319,7 @@ class CuckooHashTableOfTensors final : public LookupInterface {
319319
bool has_atomic_move = false;
320320
auto has_atomic_move_ret = fs->HasAtomicMove(filepath, &has_atomic_move);
321321
bool need_tmp_file =
322-
(has_atomic_move == false) || (has_atomic_move_ret != Status::OK());
322+
(has_atomic_move == false) || (has_atomic_move_ret != TFOkStatus);
323323
if (!need_tmp_file) {
324324
key_tmpfilepath = key_filepath;
325325
value_tmpfilepath = value_filepath;
@@ -387,7 +387,7 @@ class CuckooHashTableOfTensors final : public LookupInterface {
387387
TF_RETURN_IF_ERROR(fs->RenameFile(value_tmpfilepath, value_filepath));
388388
}
389389

390-
return Status::OK();
390+
return TFOkStatus;
391391
}
392392

393393
Status SaveToFileSystem(OpKernelContext* ctx, const string& dirpath,
@@ -461,7 +461,7 @@ class CuckooHashTableOfTensors final : public LookupInterface {
461461
LOG(INFO) << "Finish loading " << key_size << " keys and values from "
462462
<< key_filepath << " and " << value_filepath << " in total.";
463463

464-
return Status::OK();
464+
return TFOkStatus;
465465
}
466466

467467
Status LoadFromFileSystem(OpKernelContext* ctx, const string& dirpath,
@@ -500,7 +500,7 @@ class CuckooHashTableOfTensors final : public LookupInterface {
500500
string filepath = io::JoinPath(dirpath, file_name);
501501
return LoadFromFileSystemImpl(fs, value_dim, filepath, buffer_size);
502502
}
503-
return Status::OK();
503+
return TFOkStatus;
504504
}
505505

506506
DataType key_dtype() const override { return DataTypeToEnum<K>::v(); }
@@ -557,7 +557,7 @@ class HashTableOpKernel : public OpKernel {
557557
*container = h(0);
558558
*table_handle = h(1);
559559
}
560-
return Status::OK();
560+
return TFOkStatus;
561561
}
562562

563563
Status GetResourceHashTable(StringPiece input_name, OpKernelContext* ctx,

tensorflow_recommenders_addons/dynamic_embedding/core/kernels/cuckoo_hashtable_op.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ limitations under the License.
3030
#include "tensorflow/core/platform/thread_annotations.h"
3131
#include "tensorflow/core/util/env_var.h"
3232
#include "tensorflow_recommenders_addons/dynamic_embedding/core/utils/types.h"
33+
#include "tensorflow_recommenders_addons/dynamic_embedding/core/utils/utils.h"
3334

3435
namespace tensorflow {
3536
namespace recommenders_addons {
@@ -76,7 +77,7 @@ class HashTableOp : public OpKernel {
7677
table_.AllocatedBytes());
7778
}
7879
*ret = container;
79-
return Status::OK();
80+
return TFOkStatus;
8081
};
8182

8283
LookupInterface* table = nullptr;

0 commit comments

Comments
 (0)