Skip to content

Commit

Permalink
[CMS] Changes to build TF cms externals
Browse files Browse the repository at this point in the history
  • Loading branch information
smuzaffar committed Sep 27, 2024
1 parent ad6d8cc commit 95cfcd1
Show file tree
Hide file tree
Showing 37 changed files with 484 additions and 155 deletions.
4 changes: 0 additions & 4 deletions tensorflow/api_template.__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,6 @@
"""
# pylint: disable=g-bad-import-order,protected-access,g-import-not-at-top

import distutils as _distutils
import importlib
import inspect as _inspect
import os as _os
Expand Down Expand Up @@ -100,9 +99,6 @@
if "getsitepackages" in dir(_site):
_site_packages_dirs += _site.getsitepackages()

if "sysconfig" in dir(_distutils):
_site_packages_dirs += [_distutils.sysconfig.get_python_lib()]

_site_packages_dirs = list(set(_site_packages_dirs))

# Find the location of this exact file.
Expand Down
4 changes: 2 additions & 2 deletions tensorflow/core/kernels/conv_ops.h
Original file line number Diff line number Diff line change
Expand Up @@ -77,10 +77,10 @@ struct LaunchConvOp<Eigen::GpuDevice, T> {
// It uses malloc and free to avoid the time cost of initializing the memory.
template <class T, size_t size>
struct Im2ColBufferResource : public ResourceBase {
Im2ColBufferResource<T, size>() {
Im2ColBufferResource() {
data = static_cast<T*>(port::Malloc(size * sizeof(T)));
}
~Im2ColBufferResource<T, size>() { port::Free(data); }
~Im2ColBufferResource() { port::Free(data); }
// This mutex ensures that only a single operation at a time is able to use
// the buffer memory held by this resource.
mutex mu;
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/core/kernels/ctc_decoder_ops.cc
Original file line number Diff line number Diff line change
Expand Up @@ -372,7 +372,7 @@ class CTCBeamSearchDecoderOp : public OpKernel {
typename ctc::CTCBeamSearchDecoder<T>::DefaultBeamScorer beam_scorer_;
bool merge_repeated_;
int beam_width_;
CTCBeamSearchDecoderOp<T>(const CTCBeamSearchDecoderOp<T>&) = delete;
CTCBeamSearchDecoderOp(const CTCBeamSearchDecoderOp&) = delete;
void operator=(const CTCBeamSearchDecoderOp<T>&) = delete;
};

Expand Down
2 changes: 1 addition & 1 deletion tensorflow/core/kernels/ctc_loss_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -221,7 +221,7 @@ class CTCLossOp : public OpKernel {
bool ctc_merge_repeated_;
bool ignore_longer_outputs_than_inputs_;

CTCLossOp<T>(const CTCLossOp<T>&) = delete;
CTCLossOp(const CTCLossOp&) = delete;
void operator=(const CTCLossOp<T>&) = delete;
};

Expand Down
5 changes: 5 additions & 0 deletions tensorflow/core/kernels/matmul_op_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,7 @@ struct ParallelMatMulKernel {
z.device(d) = z.conjugate();
}

__attribute__((used))
static void Run(const OpKernelContext* context, const Tensor& in_x,
const Tensor& in_y, bool adj_x, bool adj_y, bool trans_x,
bool trans_y, const MatMulBCast& bcast, Tensor* out,
Expand Down Expand Up @@ -142,6 +143,7 @@ template <typename Scalar>
struct ParallelMatMulKernel<Scalar, false> {
static void Conjugate(const OpKernelContext* context, Tensor* out) {}

__attribute__((used))
static void Run(const OpKernelContext* context, const Tensor& in_x,
const Tensor& in_y, bool adj_x, bool adj_y, bool trans_x,
bool trans_y, const MatMulBCast& bcast, Tensor* out,
Expand Down Expand Up @@ -213,6 +215,7 @@ struct SequentialMatMulKernel {
t->dim_size(1), t->dim_size(2));
}

__attribute__((used))
static void Run(const Tensor& in_x, const Tensor& in_y, bool adj_x,
bool adj_y, bool trans_x, bool trans_y,
const MatMulBCast& bcast, Tensor* out, int start, int limit) {
Expand Down Expand Up @@ -274,6 +277,8 @@ struct SingleBatchParallelMatMulKernel {
return MatrixMap(t->flat<Scalar>().data(), t->dim_size(1), t->dim_size(2));
}


__attribute__((used))
static void Run(const CPUDevice& device, const Tensor& in_x,
const Tensor& in_y, bool adj_x, bool adj_y, bool trans_x,
bool trans_y, Tensor* out) {
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/core/kernels/quantized_resize_bilinear_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -738,7 +738,7 @@ class QuantizedResizeBilinearOp : public OpKernel {
bool align_corners_;
bool half_pixel_centers_;

QuantizedResizeBilinearOp<T>(const QuantizedResizeBilinearOp<T>&) = delete;
QuantizedResizeBilinearOp(const QuantizedResizeBilinearOp<T>&) = delete;
void operator=(const QuantizedResizeBilinearOp<T>&) = delete;
};

Expand Down
4 changes: 2 additions & 2 deletions tensorflow/lite/python/convert.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@
# ==============================================================================
"""Converts a frozen graph into a TFLite FlatBuffer."""

import distutils.spawn
import enum
import hashlib
import os as _os
Expand All @@ -23,6 +22,7 @@
import tempfile as _tempfile
from typing import Optional
import warnings
import shutil

from tensorflow.compiler.mlir.lite.python import wrap_converter
from tensorflow.compiler.mlir.quantization.stablehlo import quantization_config_pb2
Expand Down Expand Up @@ -408,7 +408,7 @@ def _run_deprecated_conversion_binary(
RuntimeError: When conversion fails, an exception is raised with the error
message embedded.
"""
if distutils.spawn.find_executable(_deprecated_conversion_binary) is None:
if shutils.which(_deprecated_conversion_binary) is None:
raise ConverterError("""Could not find `toco_from_protos` binary, make sure
your virtualenv bin directory or pip local bin directory is in your path.
In particular, if you have installed TensorFlow with --user, make sure you
Expand Down
1 change: 1 addition & 0 deletions tensorflow/tools/pip_package/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ transitive_hdrs(
"//tensorflow/core:lib",
"//tensorflow/core:protos_all_cc",
"//tensorflow/core/platform:stream_executor",
"//tensorflow/cc/saved_model:tag_constants",
"//tensorflow/cc/saved_model:loader",
"//tensorflow/cc/saved_model:reader",
"//tensorflow/cc/saved_model:bundle_v2",
Expand Down
6 changes: 2 additions & 4 deletions tensorflow/workspace0.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
load("@build_bazel_apple_support//lib:repositories.bzl", "apple_support_dependencies")
load("@build_bazel_rules_apple//apple:repositories.bzl", "apple_rules_dependencies")
load("@build_bazel_rules_swift//swift:repositories.bzl", "swift_rules_dependencies")
load("@com_github_grpc_grpc//bazel:grpc_extra_deps.bzl", "grpc_extra_deps")
load("@local_config_android//:android.bzl", "android_workspace")
load("@rules_foreign_cc//foreign_cc:repositories.bzl", "rules_foreign_cc_dependencies")
load("//third_party/googleapis:repository_rules.bzl", "config_googleapis")
Expand All @@ -22,11 +21,11 @@ def _tf_bind():
# Needed by Protobuf
native.bind(
name = "grpc_cpp_plugin",
actual = "@com_github_grpc_grpc//src/compiler:grpc_cpp_plugin",
actual = "@com_github_grpc_grpc//:grpc_cpp_plugin",
)
native.bind(
name = "grpc_python_plugin",
actual = "@com_github_grpc_grpc//src/compiler:grpc_python_plugin",
actual = "@com_github_grpc_grpc//:grpc_python_plugin",
)

native.bind(
Expand Down Expand Up @@ -132,7 +131,6 @@ def workspace():
# at the end of the WORKSPACE file.
_tf_bind()

grpc_extra_deps()
rules_foreign_cc_dependencies()
config_googleapis()

Expand Down
2 changes: 0 additions & 2 deletions tensorflow/workspace1.bzl
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
"""TensorFlow workspace initialization. Consult the WORKSPACE on how to use it."""

load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
load("@com_github_grpc_grpc//bazel:grpc_deps.bzl", "grpc_deps")
load("@com_google_benchmark//:bazel/benchmark_deps.bzl", "benchmark_deps")
load("@io_bazel_rules_closure//closure:defs.bzl", "closure_repositories")
load("@rules_pkg//:deps.bzl", "rules_pkg_dependencies")
Expand Down Expand Up @@ -31,7 +30,6 @@ def workspace(with_rules_cc = True):

android_configure(name = "local_config_android")

grpc_deps()
benchmark_deps()

# Alias so it can be loaded without assigning to a different symbol to prevent
Expand Down
125 changes: 4 additions & 121 deletions tensorflow/workspace2.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,7 @@ load("//third_party/benchmark:workspace.bzl", benchmark = "repo")
load("//third_party/clang_toolchain:cc_configure_clang.bzl", "cc_download_clang_toolchain")
load("//third_party/dlpack:workspace.bzl", dlpack = "repo")
load("//third_party/ducc:workspace.bzl", ducc = "repo")
load("//third_party/eigen3:workspace.bzl", eigen3 = "repo")
load("//third_party/farmhash:workspace.bzl", farmhash = "repo")
load("//third_party/flatbuffers:workspace.bzl", flatbuffers = "repo")

# Import third party repository rules. See go/tfbr-thirdparty.
load("//third_party/FP16:workspace.bzl", FP16 = "repo")
Expand All @@ -36,7 +34,6 @@ load("//third_party/highwayhash:workspace.bzl", highwayhash = "repo")
load("//third_party/hwloc:workspace.bzl", hwloc = "repo")
load("//third_party/icu:workspace.bzl", icu = "repo")
load("//third_party/implib_so:workspace.bzl", implib_so = "repo")
load("//third_party/jpeg:workspace.bzl", jpeg = "repo")
load("//third_party/kissfft:workspace.bzl", kissfft = "repo")
load("//third_party/libprotobuf_mutator:workspace.bzl", libprotobuf_mutator = "repo")
load("//third_party/llvm:setup.bzl", "llvm_setup")
Expand All @@ -57,6 +54,9 @@ load("//third_party/tensorrt:workspace.bzl", tensorrt = "repo")
load("//third_party/triton:workspace.bzl", triton = "repo")
load("//third_party/vulkan_headers:workspace.bzl", vulkan_headers = "repo")

#import CMS specific repos
load("//third_party/cms:workspace.bzl", cms= "repos")

def _initialize_third_party():
""" Load third party repositories. See above load() statements. """
FP16()
Expand All @@ -65,16 +65,12 @@ def _initialize_third_party():
benchmark()
ducc()
dlpack()
eigen3()
farmhash()
flatbuffers()
gemmlowp()
hexagon_nn()
highwayhash()
hwloc()
icu()
implib_so()
jpeg()
kissfft()
libprotobuf_mutator()
ml_dtypes()
Expand All @@ -89,6 +85,7 @@ def _initialize_third_party():
vulkan_headers()
tensorrt()
triton()
cms()

# copybara: tsl vendor

Expand Down Expand Up @@ -318,61 +315,6 @@ def _tf_repositories():
urls = tf_mirror_urls("https://github.com/googleapis/googleapis/archive/6b3fdcea8bc5398be4e7e9930c693f0ea09316a0.tar.gz"),
)

tf_http_archive(
name = "png",
build_file = "//third_party:png.BUILD",
patch_file = ["//third_party:png_fix_rpi.patch"],
sha256 = "a00e9d2f2f664186e4202db9299397f851aea71b36a35e74910b8820e380d441",
strip_prefix = "libpng-1.6.39",
system_build_file = "//third_party/systemlibs:png.BUILD",
urls = tf_mirror_urls("https://github.com/glennrp/libpng/archive/v1.6.39.tar.gz"),
)

tf_http_archive(
name = "org_sqlite",
build_file = "//third_party:sqlite.BUILD",
sha256 = "bb5849ae4d7129c09d20596379a0b3f7b1ac59cf9998eba5ef283ea9b6c000a5",
strip_prefix = "sqlite-amalgamation-3430000",
system_build_file = "//third_party/systemlibs:sqlite.BUILD",
urls = tf_mirror_urls("https://www.sqlite.org/2023/sqlite-amalgamation-3430000.zip"),
)

tf_http_archive(
name = "gif",
build_file = "//third_party:gif.BUILD",
patch_file = [
"//third_party:gif_fix_strtok_r.patch",
"//third_party:gif_fix_image_counter.patch",
],
sha256 = "31da5562f44c5f15d63340a09a4fd62b48c45620cd302f77a6d9acf0077879bd",
strip_prefix = "giflib-5.2.1",
system_build_file = "//third_party/systemlibs:gif.BUILD",
urls = tf_mirror_urls("https://pilotfiber.dl.sourceforge.net/project/giflib/giflib-5.2.1.tar.gz"),
)

tf_http_archive(
name = "six_archive",
build_file = "//third_party:six.BUILD",
sha256 = "1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926",
strip_prefix = "six-1.16.0",
system_build_file = "//third_party/systemlibs:six.BUILD",
urls = tf_mirror_urls("https://pypi.python.org/packages/source/s/six/six-1.16.0.tar.gz"),
)

tf_http_archive(
name = "absl_py",
sha256 = "a7c51b2a0aa6357a9cbb2d9437e8cd787200531867dc02565218930b6a32166e",
strip_prefix = "abseil-py-1.0.0",
system_build_file = "//third_party/systemlibs:absl_py.BUILD",
system_link_files = {
"//third_party/systemlibs:absl_py.absl.BUILD": "absl/BUILD",
"//third_party/systemlibs:absl_py.absl.flags.BUILD": "absl/flags/BUILD",
"//third_party/systemlibs:absl_py.absl.testing.BUILD": "absl/testing/BUILD",
"//third_party/systemlibs:absl_py.absl.logging.BUILD": "absl/logging/BUILD",
},
urls = tf_mirror_urls("https://github.com/abseil/abseil-py/archive/refs/tags/v1.0.0.tar.gz"),
)

tf_http_archive(
name = "com_google_protobuf",
patch_file = ["//third_party/protobuf:protobuf.patch"],
Expand Down Expand Up @@ -416,37 +358,6 @@ def _tf_repositories():
urls = tf_mirror_urls("https://github.com/gflags/gflags/archive/v2.2.2.tar.gz"),
)

tf_http_archive(
name = "curl",
build_file = "//third_party:curl.BUILD",
sha256 = "816e41809c043ff285e8c0f06a75a1fa250211bbfb2dc0a037eeef39f1a9e427",
strip_prefix = "curl-8.4.0",
system_build_file = "//third_party/systemlibs:curl.BUILD",
urls = tf_mirror_urls("https://curl.se/download/curl-8.4.0.tar.gz"),
)

# WARNING: make sure ncteisen@ and vpai@ are cc-ed on any CL to change the below rule
tf_http_archive(
name = "com_github_grpc_grpc",
sha256 = "b956598d8cbe168b5ee717b5dafa56563eb5201a947856a6688bbeac9cac4e1f",
strip_prefix = "grpc-b54a5b338637f92bfcf4b0bc05e0f57a5fd8fadd",
system_build_file = "//third_party/systemlibs:grpc.BUILD",
patch_file = [
"//third_party/grpc:generate_cc_env_fix.patch",
"//third_party/grpc:register_go_toolchain.patch",
],
system_link_files = {
"//third_party/systemlibs:BUILD": "bazel/BUILD",
"//third_party/systemlibs:grpc.BUILD": "src/compiler/BUILD",
"//third_party/systemlibs:grpc.bazel.grpc_deps.bzl": "bazel/grpc_deps.bzl",
"//third_party/systemlibs:grpc.bazel.grpc_extra_deps.bzl": "bazel/grpc_extra_deps.bzl",
"//third_party/systemlibs:grpc.bazel.cc_grpc_library.bzl": "bazel/cc_grpc_library.bzl",
"//third_party/systemlibs:grpc.bazel.generate_cc.bzl": "bazel/generate_cc.bzl",
"//third_party/systemlibs:grpc.bazel.protobuf.bzl": "bazel/protobuf.bzl",
},
urls = tf_mirror_urls("https://github.com/grpc/grpc/archive/b54a5b338637f92bfcf4b0bc05e0f57a5fd8fadd.tar.gz"),
)

tf_http_archive(
name = "linenoise",
build_file = "//third_party:linenoise.BUILD",
Expand Down Expand Up @@ -483,16 +394,6 @@ def _tf_repositories():
urls = tf_mirror_urls("https://github.com/google/boringssl/archive/c00d7ca810e93780bd0c8ee4eea28f4f2ea4bcdc.tar.gz"),
)

# Note: if you update this, you have to update libpng too. See cl/437813808
tf_http_archive(
name = "zlib",
build_file = "//third_party:zlib.BUILD",
sha256 = "b3a24de97a8fdbc835b9833169501030b8977031bcb54b3b3ac13740f846ab30",
strip_prefix = "zlib-1.2.13",
system_build_file = "//third_party/systemlibs:zlib.BUILD",
urls = tf_mirror_urls("https://zlib.net/fossils/zlib-1.2.13.tar.gz"),
)

# LINT.IfChange
tf_http_archive(
name = "fft2d",
Expand Down Expand Up @@ -617,15 +518,6 @@ def _tf_repositories():
urls = tf_mirror_urls("https://github.com/nvidia/nccl/archive/v2.19.3-1.tar.gz"),
)

tf_http_archive(
name = "cython",
build_file = "//third_party:cython.BUILD",
sha256 = "0c2eae8a4ceab7955be1e11a4ddc5dcc3aa06ce22ad594262f1555b9d10667f0",
strip_prefix = "cython-3.0.3",
system_build_file = "//third_party/systemlibs:cython.BUILD",
urls = tf_mirror_urls("https://github.com/cython/cython/archive/3.0.3.tar.gz"),
)

# LINT.IfChange
tf_http_archive(
name = "arm_neon_2_x86_sse",
Expand Down Expand Up @@ -775,15 +667,6 @@ def _tf_repositories():
urls = tf_mirror_urls("https://github.com/nlohmann/json/archive/v3.10.5.tar.gz"),
)

tf_http_archive(
name = "pybind11",
urls = tf_mirror_urls("https://github.com/pybind/pybind11/archive/v2.10.4.tar.gz"),
sha256 = "832e2f309c57da9c1e6d4542dedd34b24e4192ecb4d62f6f4866a737454c9970",
strip_prefix = "pybind11-2.10.4",
build_file = "//third_party:pybind11.BUILD",
system_build_file = "//third_party/systemlibs:pybind11.BUILD",
)

tf_http_archive(
name = "pybind11_protobuf",
urls = tf_mirror_urls("https://github.com/pybind/pybind11_protobuf/archive/80f3440cd8fee124e077e2e47a8a17b78b451363.zip"),
Expand Down
13 changes: 13 additions & 0 deletions third_party/absl/absl_neon.patch
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
diff --git a/absl/base/config.h b/absl/base/config.h
index 97c9a22a109..ab1e9860a91 100644
--- a/absl/base/config.h
+++ b/absl/base/config.h
@@ -926,7 +926,7 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
// https://llvm.org/docs/CompileCudaWithLLVM.html#detecting-clang-vs-nvcc-from-code
#ifdef ABSL_INTERNAL_HAVE_ARM_NEON
#error ABSL_INTERNAL_HAVE_ARM_NEON cannot be directly set
-#elif defined(__ARM_NEON) && !defined(__CUDA_ARCH__)
+#elif defined(__ARM_NEON) && !(defined(__NVCC__) && defined(__CUDACC__))
#define ABSL_INTERNAL_HAVE_ARM_NEON 1
#endif

1 change: 1 addition & 0 deletions third_party/absl/workspace.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@ def repo():
build_file = "//third_party/absl:com_google_absl.BUILD",
system_build_file = "//third_party/absl:system.BUILD",
system_link_files = SYS_LINKS,
patch_file = ["//third_party/absl:absl_neon.patch"],
strip_prefix = "abseil-cpp-{commit}".format(commit = ABSL_COMMIT),
urls = tf_mirror_urls("https://github.com/abseil/abseil-cpp/archive/{commit}.tar.gz".format(commit = ABSL_COMMIT)),
)
Empty file added third_party/cms/BUILD
Empty file.
Loading

0 comments on commit 95cfcd1

Please sign in to comment.