From 0f85baf9dc552877473667d5f5b3b95f6698fed0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eric=20M=C3=BCller?= <mueller@kip.uni-heidelberg.de> Date: Fri, 14 Mar 2025 16:15:33 +0100 Subject: [PATCH 01/11] feat(py-jax{,lib}): copy recipes from upstream@develop (2025-03-06) --- packages/py-jax/package.py | 149 +++++++++++++++++++ packages/py-jaxlib/jaxxlatsl.patch | 100 +++++++++++++ packages/py-jaxlib/package.py | 231 +++++++++++++++++++++++++++++ 3 files changed, 480 insertions(+) create mode 100644 packages/py-jax/package.py create mode 100644 packages/py-jaxlib/jaxxlatsl.patch create mode 100644 packages/py-jaxlib/package.py diff --git a/packages/py-jax/package.py b/packages/py-jax/package.py new file mode 100644 index 00000000..f99d02a8 --- /dev/null +++ b/packages/py-jax/package.py @@ -0,0 +1,149 @@ +# Copyright Spack Project Developers. See COPYRIGHT file for details. +# +# SPDX-License-Identifier: (Apache-2.0 OR MIT) + + +from spack.package import * + + +class PyJax(PythonPackage): + """Differentiate, compile, and transform Numpy code. + + JAX is a Python library for accelerator-oriented array computation and program transformation, + designed for high-performance numerical computing and large-scale machine learning. + """ + + homepage = "https://github.com/jax-ml/jax" + pypi = "jax/jax-0.4.27.tar.gz" + + license("Apache-2.0") + maintainers("adamjstewart", "jonas-eschle") + + # version("0.5.0", sha256="49df70bf293a345a7fb519f71193506d37a024c4f850b358042eb32d502c81c8") + # version("0.4.38", sha256="43bae65881628319e0a2148e8f81a202fbc2b8d048e35c7cb1df2416672fa4a8") + # version("0.4.37", sha256="7774f3d9e23fe199c65589c680c5a5be87a183b89598421a632d8245222b637b") + # version("0.4.36", sha256="088bff0575d01fc82682a9af4eb07433d60de7e5164686bd2cea3439492e608a") + # version("0.4.35", sha256="c0c986993026b10bf6f607fecb7417377460254640766ce40f1fef3fd139c12e") + # version("0.4.34", sha256="44196854f40c5f9cea3142824b9f1051f85afc3fcf7593ec5479fc8db01c58db") + # version("0.4.33", sha256="f0d788692fc0179653066c9e1c64e57311b8c15a389837fd7baf328abefcbb92") + # version("0.4.32", sha256="eb703909968da161894fb6135a931c5f3d2aab64fff7cba5fcb803ce6d968e08") + version("0.4.31", sha256="fd2d470643a0073d822737f0788f71391656af7e62cc5b2e7995ee390ceac287") + version("0.4.30", sha256="94d74b5b2db0d80672b61d83f1f63ebf99d2ab7398ec12b2ca0c9d1e97afe577") + version("0.4.29", sha256="12904571eaefddcdc8c3b8d4936482b783d5a216e99ef5adcd3522fdfb4fc186") + version("0.4.28", sha256="dcf0a44aff2e1713f0a2b369281cd5b79d8c18fc1018905c4125897cb06b37e9") + version("0.4.27", sha256="f3d7f19bdc0a17ccdb305086099a5a90c704f904d4272a70debe06ae6552998c") + version("0.4.26", sha256="2cce025d0a279ec630d550524749bc8efe25d2ff47240d2a7d4cfbc5090c5383") + version("0.4.25", sha256="a8ee189c782de2b7b2ffb64a8916da380b882a617e2769aa429b71d79747b982") + version("0.4.24", sha256="4a6b6fd026ddd22653c7fa2fac1904c3de2dbe845b61ede08af9a5cc709662ae") + version("0.4.23", sha256="2a229a5a758d1b803891b2eaed329723f6b15b4258b14dc0ccb1498c84963685") + version("0.4.22", sha256="801434dda6e14f82a45fff753969a33281ab22fb2a50fe801b651390321057ba") + version("0.4.21", sha256="c97fd0d2751d6e1eb15aa2052ff7cfdc129f8fafc2c14cd779720658926a587b") + version("0.4.20", sha256="ea96a763a8b1a9374639d1159ab4de163461d01cd022f67c34c09581b71ed2ac") + version("0.4.19", sha256="29f87f9a50964d3ca5eeb2973de3462f0e8b4eca6d46027894a0e9a903420601") + version("0.4.18", sha256="776cf33890100803e98f45f9af10aa727271c6993d4e766c069118733c928132") + version("0.4.17", sha256="d7508a69e87835f534cb07a2f21d79cc1cb8c4cfdcf7fb010927267ef7355f1d") + version("0.4.16", sha256="e2ca82c9bf973c2c1c01f5340a583692b31f277aa3abd0544229c1fe5fa44b02") + version("0.4.15", sha256="2aa123ccef591e355dea94a6e714b6559f8e1d6368a576a223f97d031ece0d15") + version("0.4.14", sha256="18fed3881f26e8b13c8cb46eeeea3dba9eb4d48e3714d8e8f2304dd6e237083d") + version("0.4.13", sha256="03bfe6749dfe647f16f15f6616638adae6c4a7ca7167c75c21961ecfd3a3baaa") + version("0.4.12", sha256="d2de9a2388ffe002f16506d3ad1cc6e34d7536b98948e49c7e05bbcfe8e57998") + version("0.4.11", sha256="8b1cd443b698339df8d8807578ee141e5b67e36125b3945b146f600177d60d79") + version("0.4.10", sha256="1bf0f2720f778f2937301a16a4d5cd3497f13a4d6c970c24a88918a81816a888") + version("0.4.9", sha256="1ed135cd08f48e4baf10f6eafdb4a4cdae781f9052b5838c09c91a9f4fa75f09") + version("0.4.8", sha256="08116481f7336db16c24812bfb5e6f9786915f4c2f6ff4028331fa69e7535202") + version("0.4.7", sha256="5e7002d74db25f97c99b979d4ba1233b1ef26e1597e5fc468ad11d1c8a9dc4f8") + version("0.4.6", sha256="d06ea8fba4ed315ec55110396058cb48c8edb2ab0b412f28c8a123beee9e58ab") + version("0.4.5", sha256="1633e56d34b18ddfa7d2a216ce214fa6fa712d36552532aaa71da416aede7268") + version("0.4.4", sha256="39b07e07343ed7c74492ee5e75db77456d3afdd038a322671f09fc748f6392cb") + version("0.4.3", sha256="d43f08f940aa30eb339965cfb3d6bee2296537b0dc2f0c65ccae3009279529ae") + + depends_on("py-setuptools", type="build") + + with default_args(type=("build", "run")): + # setup.py + depends_on("python@3.10:", when="@0.4.31:") + depends_on("python@3.9:", when="@0.4.14:") + depends_on("py-ml-dtypes@0.4:", when="@0.4.29,0.4.35:") + depends_on("py-ml-dtypes@0.2:", when="@0.4.14:") + depends_on("py-ml-dtypes@0.1:", when="@0.4.9:") + depends_on("py-ml-dtypes@0.0.3:", when="@0.4.7:") + depends_on("py-numpy@1.25:", when="@0.5:") + depends_on("py-numpy@1.24:", when="@0.4.31:") + depends_on("py-numpy@1.22:", when="@0.4.14:") + depends_on("py-numpy@1.21:", when="@0.4.7:") + depends_on("py-numpy@1.20:", when="@0.3:") + # https://github.com/google/jax/issues/19246 + depends_on("py-numpy@:1", when="@:0.4.25") + depends_on("py-opt-einsum") + depends_on("py-scipy@1.11.1:", when="@0.5:") + depends_on("py-scipy@1.10:", when="@0.4.31:") + depends_on("py-scipy@1.9:", when="@0.4.19:") + depends_on("py-scipy@1.7:", when="@0.4.7:") + depends_on("py-scipy@1.5:", when="@0.3:") + + # jax/_src/lib/__init__.py + # https://github.com/google/jax/commit/8be057de1f50756fe7522f7e98b2f30fad56f7e4 + for v in [ + # "0.5.0", + # "0.4.38", + # "0.4.37", + # "0.4.36", + # "0.4.35", + # "0.4.34", + # "0.4.33", + # "0.4.32", + "0.4.31", + "0.4.30", + "0.4.29", + "0.4.28", + "0.4.27", + "0.4.26", + "0.4.25", + "0.4.24", + "0.4.23", + "0.4.22", + "0.4.21", + "0.4.20", + "0.4.19", + "0.4.18", + "0.4.17", + "0.4.16", + "0.4.15", + "0.4.14", + "0.4.13", + "0.4.12", + "0.4.11", + "0.4.10", + "0.4.9", + "0.4.8", + "0.4.7", + "0.4.6", + "0.4.5", + "0.4.4", + "0.4.3", + ]: + depends_on(f"py-jaxlib@:{v}", when=f"@{v}") + + # See _minimum_jaxlib_version in jax/version.py + # depends_on("py-jaxlib@0.5:", when="@0.5:") + # depends_on("py-jaxlib@0.4.38:", when="@0.4.38:") + # depends_on("py-jaxlib@0.4.36:", when="@0.4.36:") + # depends_on("py-jaxlib@0.4.35:", when="@0.4.35:") + # depends_on("py-jaxlib@0.4.34:", when="@0.4.34:") + # depends_on("py-jaxlib@0.4.33:", when="@0.4.33:") + # depends_on("py-jaxlib@0.4.32:", when="@0.4.32:") + depends_on("py-jaxlib@0.4.30:", when="@0.4.31:") + depends_on("py-jaxlib@0.4.27:", when="@0.4.28:") + depends_on("py-jaxlib@0.4.23:", when="@0.4.27:") + depends_on("py-jaxlib@0.4.20:", when="@0.4.25:") + depends_on("py-jaxlib@0.4.19:", when="@0.4.21:") + depends_on("py-jaxlib@0.4.14:", when="@0.4.15:") + depends_on("py-jaxlib@0.4.11:", when="@0.4.12:") + depends_on("py-jaxlib@0.4.7:", when="@0.4.8:") + depends_on("py-jaxlib@0.4.6:", when="@0.4.7:") + depends_on("py-jaxlib@0.4.4:", when="@0.4.5:") + depends_on("py-jaxlib@0.4.2:", when="@0.4.3:") + depends_on("py-jaxlib@0.4.1:", when="@0.4.2:") + + # Historical dependencies + depends_on("py-importlib-metadata@4.6:", when="@0.4.11:0.4.30 ^python@:3.9") diff --git a/packages/py-jaxlib/jaxxlatsl.patch b/packages/py-jaxlib/jaxxlatsl.patch new file mode 100644 index 00000000..e96cc32e --- /dev/null +++ b/packages/py-jaxlib/jaxxlatsl.patch @@ -0,0 +1,100 @@ +From 8fce7378ed8ce994107568449806cd99274ab22b Mon Sep 17 00:00:00 2001 +From: Andrew Elble <aweits@rit.edu> +Date: Mon, 21 Oct 2024 19:42:31 -0400 +Subject: [PATCH] patchit + +--- + ...ch-for-Abseil-to-fix-build-on-Jetson.patch | 68 +++++++++++++++++++ + third_party/xla/workspace.bzl | 1 + + 2 files changed, 69 insertions(+) + create mode 100644 third_party/xla/0001-Add-patch-for-Abseil-to-fix-build-on-Jetson.patch + +diff --git a/third_party/xla/0001-Add-patch-for-Abseil-to-fix-build-on-Jetson.patch b/third_party/xla/0001-Add-patch-for-Abseil-to-fix-build-on-Jetson.patch +new file mode 100644 +index 000000000000..5138a045082b +--- /dev/null ++++ b/third_party/xla/0001-Add-patch-for-Abseil-to-fix-build-on-Jetson.patch +@@ -0,0 +1,68 @@ ++From 40da87a0476436ca1da2eafe08935787a05e9a61 Mon Sep 17 00:00:00 2001 ++From: David Dunleavy <ddunleavy@google.com> ++Date: Mon, 5 Aug 2024 11:42:53 -0700 ++Subject: [PATCH] Add patch for Abseil to fix build on Jetson ++ ++Patches in https://github.com/abseil/abseil-cpp/commit/372124e6af36a540e74a2ec31d79d7297a831f98 ++ ++PiperOrigin-RevId: 659627531 ++--- ++ .../tsl/third_party/absl/nvidia_jetson.patch | 35 +++++++++++++++++++ ++ .../tsl/third_party/absl/workspace.bzl | 1 + ++ 2 files changed, 36 insertions(+) ++ create mode 100644 third_party/tsl/third_party/absl/nvidia_jetson.patch ++ ++diff --git a/third_party/tsl/third_party/absl/nvidia_jetson.patch b/third_party/tsl/third_party/absl/nvidia_jetson.patch ++new file mode 100644 ++index 000000000000..5328c3a0d605 ++--- /dev/null +++++ b/third_party/tsl/third_party/absl/nvidia_jetson.patch ++@@ -0,0 +1,35 @@ +++From 372124e6af36a540e74a2ec31d79d7297a831f98 Mon Sep 17 00:00:00 2001 +++From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Bastien?= <frederic.bastien@gmail.com> +++Date: Thu, 1 Aug 2024 12:38:52 -0700 +++Subject: [PATCH] PR #1732: Fix build on NVIDIA Jetson board. Fix #1665 +++ +++Imported from GitHub PR https://github.com/abseil/abseil-cpp/pull/1732 +++ +++Fix build on NVIDIA Jetson board. Fix #1665 +++ +++This patch is already used by the spark project. +++I'm fixing this as this break the build of Tensorflow and JAX on Jetson board. +++Merge 7db2d2ab9fbed1f0fabad10a6ec73533ba71bfff into 6b8ebb35c0414ef5a2b6fd4a0f59057e41beaff9 +++ +++Merging this change closes #1732 +++ +++COPYBARA_INTEGRATE_REVIEW=https://github.com/abseil/abseil-cpp/pull/1732 from nouiz:fix_neon_on_jetson 7db2d2ab9fbed1f0fabad10a6ec73533ba71bfff +++PiperOrigin-RevId: 658501520 +++Change-Id: If502ede4efc8c877fb3fed227eca6dc7622dd181 +++--- +++ absl/base/config.h | 2 +- +++ 1 file changed, 1 insertion(+), 1 deletion(-) +++ +++diff --git a/absl/base/config.h b/absl/base/config.h +++index 97c9a22a109..ab1e9860a91 100644 +++--- a/absl/base/config.h ++++++ b/absl/base/config.h +++@@ -926,7 +926,7 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || +++ // https://llvm.org/docs/CompileCudaWithLLVM.html#detecting-clang-vs-nvcc-from-code +++ #ifdef ABSL_INTERNAL_HAVE_ARM_NEON +++ #error ABSL_INTERNAL_HAVE_ARM_NEON cannot be directly set +++-#elif defined(__ARM_NEON) && !defined(__CUDA_ARCH__) ++++#elif defined(__ARM_NEON) && !(defined(__NVCC__) && defined(__CUDACC__)) +++ #define ABSL_INTERNAL_HAVE_ARM_NEON 1 +++ #endif +++ ++diff --git a/third_party/tsl/third_party/absl/workspace.bzl b/third_party/tsl/third_party/absl/workspace.bzl ++index 06f75166ce4b..9565a82c3319 100644 ++--- a/third_party/tsl/third_party/absl/workspace.bzl +++++ b/third_party/tsl/third_party/absl/workspace.bzl ++@@ -44,4 +44,5 @@ def repo(): ++ system_link_files = SYS_LINKS, ++ strip_prefix = "abseil-cpp-{commit}".format(commit = ABSL_COMMIT), ++ urls = tf_mirror_urls("https://github.com/abseil/abseil-cpp/archive/{commit}.tar.gz".format(commit = ABSL_COMMIT)), +++ patch_file = ["//third_party/absl:nvidia_jetson.patch"], ++ ) ++-- ++2.31.1 ++ +diff --git a/third_party/xla/workspace.bzl b/third_party/xla/workspace.bzl +index af52e7671507..70481bc970a5 100644 +--- a/third_party/xla/workspace.bzl ++++ b/third_party/xla/workspace.bzl +@@ -29,6 +29,7 @@ def repo(): + name = "xla", + sha256 = XLA_SHA256, + strip_prefix = "xla-{commit}".format(commit = XLA_COMMIT), ++ patch_file = ["//third_party/xla:0001-Add-patch-for-Abseil-to-fix-build-on-Jetson.patch"], + urls = tf_mirror_urls("https://github.com/openxla/xla/archive/{commit}.tar.gz".format(commit = XLA_COMMIT)), + ) + +-- +2.31.1 + diff --git a/packages/py-jaxlib/package.py b/packages/py-jaxlib/package.py new file mode 100644 index 00000000..cd679311 --- /dev/null +++ b/packages/py-jaxlib/package.py @@ -0,0 +1,231 @@ +# Copyright Spack Project Developers. See COPYRIGHT file for details. +# +# SPDX-License-Identifier: (Apache-2.0 OR MIT) + +import glob + +from spack.build_systems.python import PythonPipBuilder +from spack.package import * + +rocm_dependencies = [ + "hsa-rocr-dev", + "hip", + "rccl", + "rocprim", + "hipcub", + "rocthrust", + "roctracer-dev", + "rocrand", + "hipsparse", + "hipfft", + "rocfft", + "rocblas", + "miopen-hip", + "rocminfo", +] + + +class PyJaxlib(PythonPackage, CudaPackage, ROCmPackage): + """XLA library for Jax. + + jaxlib is the support library for JAX. While JAX itself is a pure Python package, + jaxlib contains the binary (C/C++) parts of the library, including Python bindings, + the XLA compiler, the PJRT runtime, and a handful of handwritten kernels. + """ + + homepage = "https://github.com/jax-ml/jax" + url = "https://github.com/jax-ml/jax/archive/refs/tags/jax-v0.4.34.tar.gz" + + license("Apache-2.0") + maintainers("adamjstewart", "jonas-eschle") + + # version("0.5.0", sha256="04cc2eeb2e7ce1916674cea03a7d75a59d583ddb779d5104e103a2798a283ce9") + # version("0.4.38", sha256="ca1e63c488d505b9c92e81499e8b06cc1977319c50d64a0e58adbd2dae1a625c") + # version("0.4.37", sha256="17a8444a931f26edda8ccbc921ab71c6bf46857287b1db186deebd357e526870") + # version("0.4.36", sha256="442bfdf491b509995aa160361e23a9db488d5b97c87e6648cc733501b06eda77") + # version("0.4.35", sha256="65e086708ae56670676b7b2340ad82b901d8c9993d1241a839c8990bdb8d6212") + # version("0.4.34", sha256="d3a75ad667772309ade81350fa70c4a78028a920028800282e46d8383c0ee6bb") + # version("0.4.33", sha256="122a806e80fc1cd7d8ffaf9620701f2cb8e4fe22271c2cec53a9c60b30bd4c31") + # version("0.4.32", sha256="3fe36d596e4d640443c0a5c533845c74fbc4341e024d9bb1cd75cb49f5f419c2") + version("0.4.31", sha256="022ea1347f9b21cbea31410b3d650d976ea4452a48ea7317a5f91c238031bf94") + version("0.4.30", sha256="0ef9635c734d9bbb44fcc87df4f1c3ccce1cfcfd243572c80d36fcdf826fe1e6") + version("0.4.29", sha256="3a8005f4f62d35a5aad7e3dbd596890b47c81cc6e34fcfe3dcb93b3ca7cb1246") + version("0.4.28", sha256="4dd11577d4ba5a095fbc35258ddd4e4c020829ed6e6afd498c9e38ccbcdfe20b") + version("0.4.27", sha256="c2c82cd9ad3b395d5cbc0affa26a2938e52677a69ca8f0b9ef9922a52cac4f0c") + version("0.4.26", sha256="ddc14da1eaa34f23430d40ad9b9585088575cac439a2fa1c6833a247e1b221fd") + version("0.4.25", sha256="fc1197c401924942eb14185a61688d0c476e3e81ff71f9dc95e620b57c06eec8") + version("0.4.24", sha256="c4e6963c2c36f634a9a1765e476a1ed4e6c4a7954465ebf72e29f344c28ddc28") + version("0.4.23", sha256="e4c06d62ba54becffd91abc862627b8b11b79c5a77366af8843b819665b6d568") + version("0.4.21", sha256="8d57f66d00b9c0b824b1eff84adda5b765a412b3f316ef7c773632d1edbf9477") + version("0.4.20", sha256="058410d2bc12f7562c7b01e0c8cd587cb68059c12f78bc945055e5ddc445f5fd") + version("0.4.19", sha256="51242b217a1f82474e42d24f09ed5dedff951eeb4579c6e49e706d1adfd6949d") + version("0.4.16", sha256="85c8bc050abe0a2cf62e8cfc7edb4904dd3807924b5714ec6277f291c576b5ca") + version("0.4.14", sha256="9f309476a8f6337717b059b8d10b5859b4134c30cf8f1220bb70379b5e2744a4") + version("0.4.11", sha256="bdfc45f33970beba5caf28d061668a4863f05994deea26791db50ea605fc2e36") + version("0.4.7", sha256="0578d5dd5035b5225cadb6a62ca5f93dd76b70292268502fc01a0fd9ca7001d0") + version("0.4.6", sha256="2c9bf8962815bc54ef524e33dc8eda9d165d379fe87e0df210f316adead27787") + version("0.4.4", sha256="881f402c7983b56b185e182d5315dd64c9f5320be96213d0415996ece1826806") + version("0.4.3", sha256="2104735dc22be2b105e5517bd5bc6ae97f40e8e9e54928cac1585c6112a3d910") + + variant("cuda", default=True, description="Build with CUDA enabled") + variant("nccl", default=True, description="Build with NCCL enabled", when="+cuda") + + depends_on("c", type="build") + depends_on("cxx", type="build") + + # docs/installation.md (Compatible with) + with when("+cuda"): + depends_on("cuda@12.1:", when="@0.4.26:") + depends_on("cuda@11.8:", when="@0.4.11:") + depends_on("cuda@11.4:", when="@0.4.0:0.4.7") + depends_on("cudnn@9.1:9", when="@0.4.31:") + depends_on("cudnn@9", when="@0.4.29:0.4.30") + depends_on("cudnn@8.9:8", when="@0.4.26:0.4.28") + depends_on("cudnn@8.8:8", when="@0.4.11:0.4.25") + depends_on("cudnn@8.2:8", when="@0.4:0.4.7") + + with when("+nccl"): + depends_on("nccl@2.18:", when="@0.4.26:") + depends_on("nccl@2.16:", when="@0.4.18:") + depends_on("nccl") + + with when("+rocm"): + for pkg_dep in rocm_dependencies: + depends_on(f"{pkg_dep}@6:", when="@0.4.28:") + depends_on(pkg_dep) + depends_on("py-nanobind") + + with default_args(type="build"): + # .bazelversion + depends_on("bazel@6.5.0", when="@0.4.28:") + depends_on("bazel@6.1.2", when="@0.4.11:0.4.27") + depends_on("bazel@5.1.1", when="@0.3.7:0.4.10") + + # jaxlib/setup.py + depends_on("py-setuptools") + + # build/build.py + depends_on("py-build", when="@0.4.14:") + + with default_args(type=("build", "run")): + # Based on PyPI wheels + depends_on("python@3.10:", when="@0.4.31:") + depends_on("python@3.9:", when="@0.4.14:") + depends_on("python@3.8:", when="@0.4.6:") + depends_on("python@:3.13") + depends_on("python@:3.12", when="@:0.4.33") + depends_on("python@:3.11", when="@:0.4.16") + + # jaxlib/setup.py + depends_on("py-scipy@1.11.1:", when="@0.5:") + depends_on("py-scipy@1.10:", when="@0.4.31:") + depends_on("py-scipy@1.9:", when="@0.4.19:") + depends_on("py-scipy@1.7:", when="@0.4.7:") + depends_on("py-scipy@1.5:") + depends_on("py-numpy@1.25:", when="@0.5:") + depends_on("py-numpy@1.24:", when="@0.4.31:") + depends_on("py-numpy@1.22:", when="@0.4.14:") + depends_on("py-numpy@1.21:", when="@0.4.7:") + depends_on("py-numpy@1.20:", when="@0.3:") + # https://github.com/google/jax/issues/19246 + depends_on("py-numpy@:1", when="@:0.4.25") + depends_on("py-ml-dtypes@0.4:", when="@0.4.29") + depends_on("py-ml-dtypes@0.2:", when="@0.4.14:") + depends_on("py-ml-dtypes@0.1:", when="@0.4.9:") + depends_on("py-ml-dtypes@0.0.3:", when="@0.4.7:") + + patch( + "https://github.com/jax-ml/jax/commit/f62af6457a6cc575a7b1ada08d541f0dd0eb5765.patch?full_index=1", + sha256="d3b7ea2cfeba927e40a11f07e4cbf80939f7fe69448c9eb55231a93bd64e5c02", + when="@0.4.36:0.4.38", + ) + patch( + "https://github.com/jax-ml/jax/pull/25473.patch?full_index=1", + sha256="9d6977bc32046600bf8b15863251283fe7546896340367a7f14e3dccf418b4fe", + when="@0.4.36:0.4.37", + ) + patch( + "https://github.com/google/jax/pull/20101.patch?full_index=1", + sha256="4dfb9f32d4eeb0a0fb3a6f4124c4170e3fe49511f1b768cd634c78d489962275", + when="@:0.4.25", + ) + + # Might be able to be applied to earlier versions + # backports https://github.com/abseil/abseil-cpp/pull/1732 + patch("jaxxlatsl.patch", when="@0.4.28:0.4.32 target=aarch64:") + + conflicts( + "cuda_arch=none", + when="+cuda", + msg="Must specify CUDA compute capabilities of your GPU, see " + "https://developer.nvidia.com/cuda-gpus", + ) + + # https://github.com/google/jax/issues/19992 + conflicts("@0.4.4:", when="target=ppc64le:") + + # Fails to build with freshly released CUDA (#48708). + conflicts("^cuda@12.8:", when="@:0.4.31") + + def url_for_version(self, version): + url = "https://github.com/jax-ml/jax/archive/refs/tags/{}-v{}.tar.gz" + if version >= Version("0.4.33"): + name = "jax" + else: + name = "jaxlib" + return url.format(name, version) + + def install(self, spec, prefix): + # https://jax.readthedocs.io/en/latest/developer.html + args = ["build/build.py"] + + if spec.satisfies("@0.4.36:"): + args.append("build") + + if spec.satisfies("+cuda"): + args.append("--wheels=jaxlib,jax-cuda-plugin,jax-cuda-pjrt") + elif spec.satisfies("+rocm"): + args.append("--wheels=jaxlib,jax-rocm-plugin,jax-rocm-pjrt") + else: + args.append("--wheels=jaxlib") + + if spec.satisfies("@0.4.32:"): + if spec.satisfies("%clang"): + args.append("--use_clang=true") + else: + args.append("--use_clang=false") + + if "+cuda" in spec: + capabilities = CudaPackage.compute_capabilities(spec.variants["cuda_arch"].value) + args.append(f"--cuda_compute_capabilities={','.join(capabilities)}") + if spec.satisfies("@:0.4.35"): + args.append("--enable_cuda") + if spec.satisfies("@0.4.32:"): + args.extend( + [ + f"--bazel_options=--repo_env=LOCAL_CUDA_PATH={spec['cuda'].prefix}", + f"--bazel_options=--repo_env=LOCAL_CUDNN_PATH={spec['cudnn'].prefix}", + ] + ) + else: + args.extend( + [f"--cuda_path={spec['cuda'].prefix}", f"--cudnn_path={spec['cudnn'].prefix}"] + ) + + if "+nccl" in spec and spec.satisfies("@0.4.32:"): + args.append(f"--bazel_options=--repo_env=LOCAL_NCCL_PATH={spec['nccl'].prefix}") + + if "+rocm" in spec: + args.extend(["--enable_rocm", f"--rocm_path={self.spec['hip'].prefix}"]) + + args.extend( + [ + f"--bazel_options=--jobs={make_jobs}", + "--bazel_startup_options=--nohome_rc", + "--bazel_startup_options=--nosystem_rc", + ] + ) + + python(*args) + whl = glob.glob(join_path("dist", "*.whl"))[0] + pip(*PythonPipBuilder.std_args(self), f"--prefix={self.prefix}", whl) -- GitLab From 991e89ee5d8f90be3e870177fda122295216d4c8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eric=20M=C3=BCller?= <mueller@kip.uni-heidelberg.de> Date: Fri, 14 Mar 2025 16:15:39 +0100 Subject: [PATCH 02/11] feat(py-jax{,lib}): enable new versions --- packages/py-jax/package.py | 18 ++++++++++-------- packages/py-jaxlib/package.py | 23 +++++++++++++++-------- 2 files changed, 25 insertions(+), 16 deletions(-) diff --git a/packages/py-jax/package.py b/packages/py-jax/package.py index f99d02a8..c896d469 100644 --- a/packages/py-jax/package.py +++ b/packages/py-jax/package.py @@ -19,14 +19,16 @@ class PyJax(PythonPackage): license("Apache-2.0") maintainers("adamjstewart", "jonas-eschle") - # version("0.5.0", sha256="49df70bf293a345a7fb519f71193506d37a024c4f850b358042eb32d502c81c8") - # version("0.4.38", sha256="43bae65881628319e0a2148e8f81a202fbc2b8d048e35c7cb1df2416672fa4a8") - # version("0.4.37", sha256="7774f3d9e23fe199c65589c680c5a5be87a183b89598421a632d8245222b637b") - # version("0.4.36", sha256="088bff0575d01fc82682a9af4eb07433d60de7e5164686bd2cea3439492e608a") - # version("0.4.35", sha256="c0c986993026b10bf6f607fecb7417377460254640766ce40f1fef3fd139c12e") - # version("0.4.34", sha256="44196854f40c5f9cea3142824b9f1051f85afc3fcf7593ec5479fc8db01c58db") - # version("0.4.33", sha256="f0d788692fc0179653066c9e1c64e57311b8c15a389837fd7baf328abefcbb92") - # version("0.4.32", sha256="eb703909968da161894fb6135a931c5f3d2aab64fff7cba5fcb803ce6d968e08") + version("0.5.2", sha256="2aef7d1912df329470c47ce8f2e6521c105e84aa620311494048c391235087c6") + version("0.5.1", sha256="c098f74846ee718165bbfa83521ae10cd52cf50b47f043f8b33a6cfd3c20ddfd") + version("0.5.0", sha256="49df70bf293a345a7fb519f71193506d37a024c4f850b358042eb32d502c81c8") + version("0.4.38", sha256="43bae65881628319e0a2148e8f81a202fbc2b8d048e35c7cb1df2416672fa4a8") + version("0.4.37", sha256="7774f3d9e23fe199c65589c680c5a5be87a183b89598421a632d8245222b637b") + version("0.4.36", sha256="088bff0575d01fc82682a9af4eb07433d60de7e5164686bd2cea3439492e608a") + version("0.4.35", sha256="c0c986993026b10bf6f607fecb7417377460254640766ce40f1fef3fd139c12e") + version("0.4.34", sha256="44196854f40c5f9cea3142824b9f1051f85afc3fcf7593ec5479fc8db01c58db") + version("0.4.33", sha256="f0d788692fc0179653066c9e1c64e57311b8c15a389837fd7baf328abefcbb92") + version("0.4.32", sha256="eb703909968da161894fb6135a931c5f3d2aab64fff7cba5fcb803ce6d968e08") version("0.4.31", sha256="fd2d470643a0073d822737f0788f71391656af7e62cc5b2e7995ee390ceac287") version("0.4.30", sha256="94d74b5b2db0d80672b61d83f1f63ebf99d2ab7398ec12b2ca0c9d1e97afe577") version("0.4.29", sha256="12904571eaefddcdc8c3b8d4936482b783d5a216e99ef5adcd3522fdfb4fc186") diff --git a/packages/py-jaxlib/package.py b/packages/py-jaxlib/package.py index cd679311..6db1dd80 100644 --- a/packages/py-jaxlib/package.py +++ b/packages/py-jaxlib/package.py @@ -39,14 +39,16 @@ class PyJaxlib(PythonPackage, CudaPackage, ROCmPackage): license("Apache-2.0") maintainers("adamjstewart", "jonas-eschle") - # version("0.5.0", sha256="04cc2eeb2e7ce1916674cea03a7d75a59d583ddb779d5104e103a2798a283ce9") - # version("0.4.38", sha256="ca1e63c488d505b9c92e81499e8b06cc1977319c50d64a0e58adbd2dae1a625c") - # version("0.4.37", sha256="17a8444a931f26edda8ccbc921ab71c6bf46857287b1db186deebd357e526870") - # version("0.4.36", sha256="442bfdf491b509995aa160361e23a9db488d5b97c87e6648cc733501b06eda77") - # version("0.4.35", sha256="65e086708ae56670676b7b2340ad82b901d8c9993d1241a839c8990bdb8d6212") - # version("0.4.34", sha256="d3a75ad667772309ade81350fa70c4a78028a920028800282e46d8383c0ee6bb") - # version("0.4.33", sha256="122a806e80fc1cd7d8ffaf9620701f2cb8e4fe22271c2cec53a9c60b30bd4c31") - # version("0.4.32", sha256="3fe36d596e4d640443c0a5c533845c74fbc4341e024d9bb1cd75cb49f5f419c2") + version("0.5.2", sha256="8e9de1e012dd65fc4a9eec8af4aa2bf6782767130a5d8e1c1e342b7d658280fe") + version("0.5.1", sha256="e74b1209517682075933f757d646b73040d09fe39ee3e9e4cd398407dd0902d2") + version("0.5.0", sha256="04cc2eeb2e7ce1916674cea03a7d75a59d583ddb779d5104e103a2798a283ce9") + version("0.4.38", sha256="ca1e63c488d505b9c92e81499e8b06cc1977319c50d64a0e58adbd2dae1a625c") + version("0.4.37", sha256="17a8444a931f26edda8ccbc921ab71c6bf46857287b1db186deebd357e526870") + version("0.4.36", sha256="442bfdf491b509995aa160361e23a9db488d5b97c87e6648cc733501b06eda77") + version("0.4.35", sha256="65e086708ae56670676b7b2340ad82b901d8c9993d1241a839c8990bdb8d6212") + version("0.4.34", sha256="d3a75ad667772309ade81350fa70c4a78028a920028800282e46d8383c0ee6bb") + version("0.4.33", sha256="122a806e80fc1cd7d8ffaf9620701f2cb8e4fe22271c2cec53a9c60b30bd4c31") + version("0.4.32", sha256="3fe36d596e4d640443c0a5c533845c74fbc4341e024d9bb1cd75cb49f5f419c2") version("0.4.31", sha256="022ea1347f9b21cbea31410b3d650d976ea4452a48ea7317a5f91c238031bf94") version("0.4.30", sha256="0ef9635c734d9bbb44fcc87df4f1c3ccce1cfcfd243572c80d36fcdf826fe1e6") version("0.4.29", sha256="3a8005f4f62d35a5aad7e3dbd596890b47c81cc6e34fcfe3dcb93b3ca7cb1246") @@ -149,6 +151,11 @@ class PyJaxlib(PythonPackage, CudaPackage, ROCmPackage): sha256="4dfb9f32d4eeb0a0fb3a6f4124c4170e3fe49511f1b768cd634c78d489962275", when="@:0.4.25", ) + patch( + "https://github.com/jax-ml/jax/commit/91cae595e427969251a79d1ab3d6d5392dd8e6a9.patch?full_index=1", + sha256="265c8f682df3f573f405b8e089827de81be402e999abd11bbf82e9e49e688152", + when="@0.5.0:0.5.2 ^cuda@12.3:", # PACKED_ALIGNMENT introduced maybe earlier? + ) # Might be able to be applied to earlier versions # backports https://github.com/abseil/abseil-cpp/pull/1732 -- GitLab From 711a1c97f48e4e07e16e89cfbe4e71d14f542c44 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eric=20M=C3=BCller?= <mueller@kip.uni-heidelberg.de> Date: Fri, 14 Mar 2025 16:15:42 +0100 Subject: [PATCH 03/11] feat(cudnn): copy recipes from upstream@develop (2025-03-14) --- packages/cudnn/package.py | 401 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 401 insertions(+) create mode 100644 packages/cudnn/package.py diff --git a/packages/cudnn/package.py b/packages/cudnn/package.py new file mode 100644 index 00000000..2d1a7b3b --- /dev/null +++ b/packages/cudnn/package.py @@ -0,0 +1,401 @@ +# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other +# Spack Project Developers. See the top-level COPYRIGHT file for details. +# +# SPDX-License-Identifier: (Apache-2.0 OR MIT) + +import os +import platform + +from spack.package import * + +_versions = { + # cuDNN 9.2.0 + "9.2.0.82-12": { + "Linux-x86_64": "1362b4d437e37e92c9814c3b4065db5106c2e03268e22275a5869e968cee7aa8", + "Linux-aarch64": "24cc2a0308dfe412c02c7d41d4b07ec12dacb021ebf8c719de38eb77d22f68c1", + }, + "9.2.0.82-11": { + "Linux-x86_64": "99dcb3fa2bf7eed7f35b0f8e58e7d1f04d9a52e01e382efc1de16fed230d3b26" + }, + # cuDNN 8.9.7 + "8.9.7.29-12": { + "Linux-x86_64": "475333625c7e42a7af3ca0b2f7506a106e30c93b1aa0081cd9c13efb6e21e3bb", + "Linux-ppc64le": "8574d291b299f9cc0134304473c9933bd098cc717e8d0876f4aba9f9eebe1b76", + }, + "8.9.7.29-11": { + "Linux-x86_64": "a3e2509028cecda0117ce5a0f42106346e82e86d390f4bb9475afc976c77402e", + "Linux-ppc64le": "f23fd7d59f9d4f743fa926f317dab0d37f6ea21edb2726ceb607bea45b0f9f36", + }, + # cuDNN 8.9.5 + "8.9.5.30-12": { + "Linux-x86_64": "2a2eb89a2ab51071151c6082f1e816c702167a711a9372f9f73a7b5c4b06e01a", + "Linux-ppc64le": "38388ec3c99c6646aaf5c707985cd35e25c67f653d780c4081c2df5557ab665f", + "Linux-aarch64": "0491f7b02f55c22077eb678bf314c1f917524bd507cf5b658239bf98a47233a1", + }, + "8.9.5.30-11": { + "Linux-x86_64": "bbe10e3c08cd7e4aea1012213781e4fe270e1c908263444f567cafefb2cc6525", + "Linux-ppc64le": "d678f8b2903b95de7eeaef38890c5674705864ea049b2b63e90565f2c0ea682f", + }, + # cuDNN 8.9.0 + "8.9.0.131-12": { + "Linux-x86_64": "477631002be61022b60961cba0a501271507a93f81d6b08384bc320cb8706c98", + "Linux-ppc64le": "ff239e4cbbf21fa18104b62a887686e2197f820ad58817d62e509c735a331829", + "Linux-aarch64": "fab70f4fb3b933ff502200a1d954d2c6fc205ff9c9b1d271ea4c41e980a66596", + }, + "8.9.0.131-11": { + "Linux-x86_64": "3cb82c50723f14b41d43523f222cd52cc9d50b3ad67c380f4be51bd1133daa2d", + "Linux-ppc64le": "18778de490550c5b584e96560208e5e37678397037946e10a1c2824174c69725", + }, + # cuDNN 8.8.1 + "8.8.1.3-12": { + "Linux-x86_64": "79d77a769c7e7175abc7b5c2ed5c494148c0618a864138722c887f95c623777c", + "Linux-ppc64le": "b0e89021a846952cad8cfc674edce2883f6e344ebd47a2394f706b1136715bc7", + }, + "8.8.1.3-11": { + "Linux-x86_64": "af7584cae0cc5524b5913ef08c29ba6154113c60eb0a37a0590a91b515a8a8f9", + "Linux-ppc64le": "d086003d09d5388aa42142f07483a773aa74b602478b0933e24fc63f56f1658f", + }, + # cuDNN 8.7.0 + "8.7.0.84-11.8": { + "Linux-x86_64": "976c4cba7233c97ae74006afab5172976300ba40f5b250a21f8cf71f59c9f76d", + "Linux-ppc64le": "0433d6d8b6841298e049e8a542750aa330a6e046a52ad95fae0c2f75dabe5575", + "Linux-aarch64": "cf967f78dbf6c075243cc83aa18759e370db3754aa15b12a0a14e8bf67a3a9d4", + }, + # cuDNN 8.6.0 + "8.6.0.163-11.8": { + "Linux-x86_64": "bbc396df47294c657edc09c600674d608cb1bfc80b82dcf4547060c21711159e", + "Linux-ppc64le": "c8a25e7e3df1bb9c4e18a4f24dd5f25cfd4bbe8b7054e34008e53b2be4f58a80", + "Linux-aarch64": "a0202278d3cbd4f3adc3f7816bff6071621cb042b0903698b477acac8928ac06", + }, + # cuDNN 8.5.0 + "8.5.0.96-11.7": { + "Linux-x86_64": "5454a6fd94f008728caae9adad993c4e85ef36302e26bce43bea7d458a5e7b6d", + "Linux-ppc64le": "00373c3d5e0b536a5557d0d0eb50706777f213a222b4030e1b71b1bec43d205f", + "Linux-aarch64": "86780abbecd4634e7363fad1d000ae23b7905a5f8383bddbf7332c6934791dde", + }, + # cuDNN 8.4.0 + "8.4.0.27-11.6": { + "Linux-x86_64": "d19bdafd9800c79d29e6f6fffa9f9e2c10d1132d6c2ff10b1593e057e74dd050", + "Linux-ppc64le": "7ef72353331cf42b357f53cb4a4971fb07e2f0b2ae66e03d54933df52de411c8", + "Linux-aarch64": "3972ab37b6f0271274931f69c5675c3b61d16f8f5a2dedd422a5efd7b0f358e5", + }, + "8.4.0.27-10.2": { + "Linux-x86_64": "14c5e3ca4258271996d1fd959c42d17c582ce4d9aff451f84524469e784fd154" + }, + # cuDNN 8.3.3 + "8.3.3.40-11.5": { + "Linux-x86_64": "eabe96c75cf03ea4f5379894d914f1f8ae14ceab121989e84b0836d927fb7731", + "Linux-ppc64le": "eaedc8dea675767f9445c11d96e6b472110d2fed728db4179153ca7da6503083", + "Linux-aarch64": "83b1d21b0f6495dfdc2316e6d53489db8ab1b752e4e4d21caca0a08fb2136cdc", + }, + "8.3.3.40-10.2": { + "Linux-x86_64": "d8554f2b32e6295d5fc8f3ac25e68f94058b018c801dab9c143e36812f8926ab" + }, + # cuDNN 8.3.2 + "8.3.2.44-11.5": { + "Linux-x86_64": "5500953c08c5e5d1dddcfda234f9efbddcdbe43a53b26dc0a82c723fa170c457", + "Linux-ppc64le": "0581bce48023a3ee71c3a819aaefcabe693eca18b61e2521dc5f8e6e71567b1b", + "Linux-aarch64": "7eb8c96bfeec98e8aa7cea1e95633d2a9481fc99040eb0311d31bf137a7aa6ea", + }, + # cuDNN 8.3.1 + "8.3.1.22-11.5": { + "Linux-x86_64": "f5ff3c69b6a8a9454289b42eca1dd41c3527f70fcf49428eb80502bcf6b02f6e", + "Linux-ppc64le": "1d2419a20ee193dc6a3a0ba87e79f408286d3d317c9831cbc1f0b7a268c100b0", + "Linux-aarch64": "ff23a881366c0ee79b973a8921c6dd400628a321557550ad4e0a26a21caad263", + }, + # cuDNN 8.2.4 + "8.2.4.15-11.4": { + "Linux-x86_64": "0e5d2df890b9967efa6619da421310d97323565a79f05a1a8cb9b7165baad0d7", + "Linux-ppc64le": "af8749ca83fd6bba117c8bee31b787b7f204946e864294030ee0091eb7d3577e", + "Linux-aarch64": "48b11f19e9cd3414ec3c6c357ad228aebbd43282aae372d42cab2af67c32a08b", + }, + # cuDNN 8.2.0 + "8.2.0.53-11.3": { + "Linux-x86_64": "7a195dc93a7cda2bdd4d9b73958d259c784be422cd941a9a625aab75309f19dc", + "Linux-ppc64le": "cfe06735671a41a5e25fc7542d740177ac8eab1ab146bd30f19e0fa836895611", + "Linux-aarch64": "0f44af94eef7826dc7b41f92aade3d5210891cdb10858bc0a28ba7167909ab7c", + }, + "8.2.0.53-10.2": { + "Linux-x86_64": "6ecbc98b3795e940ce0831ffb7cd2c0781830fdd6b1911f950bcaf6d569f807c" + }, + # cuDNN 8.1.1 + "8.1.1.33-11.2": { + "Linux-x86_64": "98a8784e92862f20018d20c281b30d4a0cd951f93694f6433ccf4ae9c502ba6a", + "Linux-ppc64le": "c3e535a5d633ad8f4d50be0b6f8efd084c6c6ed3525c07cbd89fc508b1d76c7a", + "Linux-aarch64": "4f7e4f5698539659d51f28dff0da11e5445a5ae58439af1d8a8e9f2d93535245", + }, + "8.1.1.33-10.2": { + "Linux-x86_64": "2a4a7b99a6e9bfa690eb19bb41e49553f2a7a491a5b3abfcae900e166c5b6ebd" + }, + # cuDNN 8.1.0 + "8.1.0.77-11.2": { + "Linux-x86_64": "dbe82faf071d91ba9bcf00480146ad33f462482dfee56caf4479c1b8dabe3ecb", + "Linux-ppc64le": "0d3f8fa21959e9f94889841cc8445aecf41d2f3c557091b447313afb43034037", + "Linux-aarch64": "ba16ff486b68a8b50b69b32702612634954de529f39cfff68c12b8bfc1958499", + }, + "8.1.0.77-10.2": { + "Linux-x86_64": "c5bc617d89198b0fbe485156446be15a08aee37f7aff41c797b120912f2b14b4" + }, + # cuDNN 8.0.5 + "8.0.5.39-11.1": { + "Linux-x86_64": "1d046bfa79399dabcc6f6cb1507918754439442ea0ca9e0fbecdd446f9b00cce", + "Linux-aarch64": "0c3542c51b42131247cd9f839d0ebefe4e02bb46d1716be1682cb2919278085a", + }, + "8.0.5.39-11.0": { + "Linux-x86_64": "4e16ee7895deb4a8b1c194b812ba49586ef7d26902051401d3717511898a9b73", + "Linux-ppc64le": "05207a02c0b4f22464dbb0ee646693df4a70ae557640ba576ba8678c26393004", + }, + "8.0.5.39-10.2": { + "Linux-x86_64": "21f84c05c67bf1ec859e77c38ccd5bf154964fa1c308f449959be4c356e382f3", + "Linux-ppc64le": "ce128ea090b05e36d00ffe921e45982ca10e8207e40cfc2e0067d0f62d9b36f9", + }, + "8.0.5.39-10.1": { + "Linux-x86_64": "90908495298896b33aa95063a3471f93c36627d7ac01c17dc36d75c65eea4a00", + "Linux-ppc64le": "e43b10bb3932d5e7a598dcc726d16dc9938dd99dd319cd74b3420f3ed65fe5e0", + }, + # cuDNN 8.0.4 + "8.0.4.30-11.1": { + "Linux-x86_64": "8f4c662343afce5998ce963500fe3bb167e9a508c1a1a949d821a4b80fa9beab", + "Linux-ppc64le": "b4ddb51610cbae806017616698635a9914c3e1eb14259f3a39ee5c84e7106712", + }, + "8.0.4.30-11.0": { + "Linux-x86_64": "38a81a28952e314e21577432b0bab68357ef9de7f6c8858f721f78df9ee60c35", + "Linux-ppc64le": "8da8ed689b1a348182ddd3f59b6758a502e11dc6708c33f96e3b4a40e033d2e1", + }, + "8.0.4.30-10.2": { + "Linux-x86_64": "c12c69eb16698eacac40aa46b9ce399d4cd86efb6ff0c105142f8a28fcfb980e", + "Linux-ppc64le": "32a5b92f9e1ef2be90e10f220c4ab144ca59d215eb6a386e93597f447aa6507e", + }, + "8.0.4.30-10.1": { + "Linux-x86_64": "eb4b888e61715168f57a0a0a21c281ada6856b728e5112618ed15f8637487715", + "Linux-ppc64le": "690811bbf04adef635f4a6f480575fc2a558c4a2c98c85c7090a3a8c60dacea9", + }, + # cuDNN 8.0.3 + "8.0.3.33-11.0": { + "Linux-x86_64": "8924bcc4f833734bdd0009050d110ad0c8419d3796010cf7bc515df654f6065a", + "Linux-ppc64le": "c2d0519831137b43d0eebe07522edb4ef5d62320e65e5d5fa840a9856f25923d", + }, + "8.0.3.33-10.2": { + "Linux-x86_64": "b3d487c621e24b5711983b89bb8ad34f0378bdbf8a1a4b86eefaa23b19956dcc", + "Linux-ppc64le": "ff22c9c37af191c9104989d784427cde744cdde879bfebf3e4e55ca6a9634a11", + }, + "8.0.3.33-10.1": { + "Linux-x86_64": "4752ac6aea4e4d2226061610d6843da6338ef75a93518aa9ce50d0f58df5fb07", + "Linux-ppc64le": "c546175f6ec86a11ee8fb9ab5526fa8d854322545769a87d35b1a505992f89c3", + }, + # cuDNN 8.0.2 + "8.0.2.39-11.0": { + "Linux-x86_64": "672f46288b8edd98f8d156a4f1ff518201ca6de0cff67915ceaa37f6d6d86345", + "Linux-ppc64le": "b7c1ce5b1191eb007ba3455ea5f497fdce293a646545d8a6ed93e9bb06d7f057", + }, + "8.0.2.39-10.2": { + "Linux-x86_64": "c9cbe5c211360f3cfbc0fb104f0e9096b37e53f89392525679f049276b2f701f", + "Linux-ppc64le": "c32325ff84a8123491f2e58b3694885a9a672005bc21764b38874688c0e43262", + }, + "8.0.2.39-10.1": { + "Linux-x86_64": "82148a68bd6bdaab93af5e05bb1842b8ccb3ab7de7bed41f609a7616c102213d", + "Linux-ppc64le": "8196ec4f031356317baeccefbc4f61c8fccb2cf0bdef0a6431438918ddf68fb9", + }, + # cuDNN 8.0 + "8.0.0.180-11.0": { + "Linux-x86_64": "9e75ea70280a77de815e0bdc85d08b67e081bc99a708b574092142344d2ba07e", + "Linux-ppc64le": "1229e94731bbca63ee7f5a239f4e1838a51a301d896f3097fbf7377d74704060", + }, + "8.0.0.180-10.2": { + "Linux-x86_64": "0c87c12358ee2b99d57c2a8c7560e3bb93e54bb929f5f8bec4964a72a2bb261d", + "Linux-ppc64le": "59e4ad6db15fcc374976e8052fe39e3f30f34079710fb3c7751a64c853d9243f", + }, + # cuDNN 7.6.5 + "7.6.5.32-10.2": { + "Linux-x86_64": "600267f2caaed2fd58eb214ba669d8ea35f396a7d19b94822e6b36f9f7088c20", + "Linux-ppc64le": "7dc08b6ab9331bfd12207d4802c61db1ad7cace7395b67a6e7b16efa0335668b", + }, + "7.6.5.32-10.1": { + "Linux-x86_64": "7eaec8039a2c30ab0bc758d303588767693def6bf49b22485a2c00bf2e136cb3", + "Darwin-x86_64": "8ecce28a5ed388a2b9b2d239e08d7c550f53b79288e6d9e5eb4c152bfc711aff", + "Linux-ppc64le": "97b2faf73eedfc128f2f5762784d21467a95b2d5ba719825419c058f427cbf56", + }, + "7.6.5.32-10.0": { + "Linux-x86_64": "28355e395f0b2b93ac2c83b61360b35ba6cd0377e44e78be197b6b61b4b492ba", + "Darwin-x86_64": "6fa0b819374da49102e285ecf7fcb8879df4d0b3cc430cc8b781cdeb41009b47", + "Linux-ppc64le": "b1717f4570083bbfc6b8b59f280bae4e4197cc1cb50e9d873c05adf670084c5b", + }, + "7.6.5.32-9.2": { + "Linux-x86_64": "a2a2c7a8ba7b16d323b651766ee37dcfdbc2b50d920f73f8fde85005424960e4", + "Linux-ppc64le": "a11f44f9a827b7e69f527a9d260f1637694ff7c1674a3e46bd9ec054a08f9a76", + }, + "7.6.5.32-9.0": { + "Linux-x86_64": "bd0a4c0090d5b02feec3f195738968690cc2470b9bc6026e6fe8ff245cd261c8" + }, + # cuDNN 7.6.4 + "7.6.4.38-10.1": { + "Linux-x86_64": "32091d115c0373027418620a09ebec3658a6bc467d011de7cdd0eb07d644b099", + "Darwin-x86_64": "bfced062c3689ced2c1fb49c7d5052e6bc3da6974c1eb707e4dcf8cd209d4236", + "Linux-ppc64le": "f3615fea50986a4dfd05d7a0cf83396dfdceefa9c209e8bf9691e20a48e420ce", + }, + "7.6.4.38-10.0": { + "Linux-x86_64": "417bb5daf51377037eb2f5c87649000ca1b9cec0acb16cfe07cb1d3e9a961dbf", + "Darwin-x86_64": "af01ab841caec25087776a6b8fc7782883da12e590e24825ad1031f9ae0ed4b1", + "Linux-ppc64le": "c1725ad6bd7d7741e080a1e6da4b62eac027a94ac55c606cce261e3f829400bb", + }, + "7.6.4.38-9.2": { + "Linux-x86_64": "c79156531e641289b6a6952888b9637059ef30defd43c3cf82acf38d67f60a27", + "Linux-ppc64le": "98d8aae2dcd851558397a9a30b73242f257e1556be17c83650e63a0685969884", + }, + "7.6.4.38-9.0": { + "Linux-x86_64": "8db78c3623c192d4f03f3087b41c32cb0baac95e13408b5d9dabe626cb4aab5d" + }, + # cuDNN 7.6.3 + "7.6.3.30-10.1": { + "Linux-x86_64": "352557346d8111e2f954c494be1a90207103d316b8777c33e62b3a7f7b708961", + "Linux-ppc64le": "f274735a8fc31923d3623b1c3d2b1d0d35bb176687077c6a4d4353c6b900d8ee", + }, + # cuDNN 7.5.1 + "7.5.1.10-10.1": { + "Linux-x86_64": "2c833f43c9147d9a25a20947a4c5a5f5c33b2443240fd767f63b330c482e68e0", + "Linux-ppc64le": "a9e23bc83c970daec20874ccd1d8d80b648adf15440ecd0164818b330b1e2663", + }, + "7.5.1.10-10.0": { + "Linux-x86_64": "c0a4ec438920aa581dd567117b9c316745b4a451ac739b1e04939a3d8b229985", + "Linux-ppc64le": "d9205718da5fbab85433476f9ff61fcf4b889d216d6eea26753bbc24d115dd70", + }, + # cuDNN 7.5.0 + "7.5.0.56-10.1": { + "Linux-x86_64": "c31697d6b71afe62838ad2e57da3c3c9419c4e9f5635d14b683ebe63f904fbc8", + "Linux-ppc64le": "15415eb714ab86ab6c7531f2cac6474b5dafd989479b062776c670b190e43638", + }, + "7.5.0.56-10.0": { + "Linux-x86_64": "701097882cb745d4683bb7ff6c33b8a35c7c81be31bac78f05bad130e7e0b781", + "Linux-ppc64le": "f0c1cbd9de553c8e2a3893915bd5fff57b30e368ef4c964d783b6a877869e93a", + }, + # cuDNN 7.3.0 + "7.3.0.29-9.0": { + "Linux-x86_64": "403f9043ff2c7b2c5967454872275d07bca11fd41dfc7b21995eadcad6dbe49b" + }, + # cuDNN 7.2.1 + "7.2.1.38-9.0": { + "Linux-x86_64": "cf007437b9ac6250ec63b89c25f248d2597fdd01369c80146567f78e75ce4e37" + }, + # cuDNN 7.1.3 + "7.1.3-9.1": { + "Linux-x86_64": "dd616d3794167ceb923d706bf73e8d6acdda770751492b921ee6827cdf190228", + "Linux-ppc64le": "e3b4837f711b98a52faacc872a68b332c833917ef3cf87c0108f1d01af9b2931", + }, + # cuDNN 6.0 + "6.0-8.0": { + "Linux-x86_64": "9b09110af48c9a4d7b6344eb4b3e344daa84987ed6177d5c44319732f3bb7f9c" + }, + # cuDNN 5.1 + "5.1-8.0": { + "Linux-x86_64": "c10719b36f2dd6e9ddc63e3189affaa1a94d7d027e63b71c3f64d449ab0645ce" + }, +} + + +class Cudnn(Package): + """NVIDIA cuDNN is a GPU-accelerated library of primitives for deep + neural networks""" + + homepage = "https://developer.nvidia.com/cudnn" + + # Latest versions available at: + # https://developer.nvidia.com/rdp/cudnn-download + # Archived versions available at: + # https://developer.nvidia.com/rdp/cudnn-archive + # Note that download links don't work from command line, + # need to use modified URLs like in url_for_version. + maintainers("adamjstewart", "bvanessen") + + skip_version_audit = ["platform=darwin", "platform=windows"] + + license("MIT") + + for ver, packages in _versions.items(): + key = "{0}-{1}".format(platform.system(), platform.machine()) + pkg = packages.get(key) + cudnn_ver, cuda_ver = ver.split("-") + long_ver = "{0}-{1}".format(cudnn_ver, cuda_ver) + if pkg: + version(long_ver, sha256=pkg) + # Add constraints matching CUDA version to cuDNN version + # cuDNN builds for CUDA 11.x are compatible with all CUDA 11.x: + # https://docs.nvidia.com/deeplearning/cudnn/support-matrix/index.html#fntarg_2 + if Version(cuda_ver) >= Version("11"): + cuda_ver = Version(cuda_ver).up_to(1) + depends_on("cuda@{}".format(cuda_ver), when="@{}".format(long_ver)) + + def url_for_version(self, version): + # Get the system and machine arch for building the file path + sys = "{0}-{1}".format(platform.system(), platform.machine()) + # Munge it to match Nvidia's naming scheme + sys_key = sys.lower() + if version < Version("8.3.1"): + sys_key = ( + sys_key.replace("x86_64", "x64") + .replace("darwin", "osx") + .replace("aarch64", "aarch64sbsa") + ) + elif version < Version("8.8.0"): + sys_key = sys_key.replace("aarch64", "sbsa") + + if version >= Version("8.3.1"): + # NOTE: upload layout changed for 8.3.1, they include a 10.2 + # artifact for cuda@10.2 x86_64, but the runtime is only supported + # for cuda@11. See + # https://docs.nvidia.com/deeplearning/cudnn/release-notes/rel_8.html + # As such, hacking the `directory` to include the extra + # local_installers/11.5 is included as this may not happen again. + directory = version[:3] + ver = version[:4] + cuda = version[4:] + directory = "{0}/local_installers/{1}".format(directory, cuda) + elif version >= Version("7.2"): + directory = version[:3] + ver = version[:4] + cuda = version[4:] + elif version >= Version("7.1"): + directory = version[:3] + ver = version[:2] + cuda = version[3:] + elif version >= Version("7.0"): + directory = version[:3] + ver = version[0] + cuda = version[3:] + else: + directory = version[:2] + ver = version[:2] + cuda = version[2:] + + # 8.8.0 changed the base url again + if version >= Version("8.8.0"): + url = "https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/{0}/cudnn-{0}-{1}_cuda{2}-archive.tar.xz" + return url.format(sys_key, ver, cuda.up_to(1)) + # 8.5.0 removed minor from cuda version + elif version >= Version("8.5.0"): + url = "https://developer.download.nvidia.com/compute/redist/cudnn/v{0}/cudnn-{1}-{2}_cuda{3}-archive.tar.xz" + return url.format(directory, sys_key, ver, cuda.up_to(1)) + # 8.3.1 switched to xzip tarballs and reordered url parts. + elif version >= Version("8.3.1"): + url = "https://developer.download.nvidia.com/compute/redist/cudnn/v{0}/cudnn-{1}-{2}_cuda{3}-archive.tar.xz" + return url.format(directory, sys_key, ver, cuda) + else: + url = "https://developer.download.nvidia.com/compute/redist/cudnn/v{0}/cudnn-{1}-{2}-v{3}.tgz" + return url.format(directory, cuda, sys_key, ver) + + def setup_run_environment(self, env): + # Package is not compiled, and does not work unless LD_LIBRARY_PATH is set + env.prepend_path("LD_LIBRARY_PATH", self.prefix.lib) + + if self.spec.satisfies("target=ppc64le: platform=linux"): + env.set("cuDNN_ROOT", os.path.join(self.prefix, "targets", "ppc64le-linux")) + + def install(self, spec, prefix): + install_tree(".", prefix) + + if spec.satisfies("target=ppc64le: platform=linux"): + target_lib = os.path.join(prefix, "targets", "ppc64le-linux", "lib") + if os.path.isdir(target_lib) and not os.path.isdir(prefix.lib): + symlink(target_lib, prefix.lib) + target_include = os.path.join(prefix, "targets", "ppc64le-linux", "include") + if os.path.isdir(target_include) and not os.path.isdir(prefix.include): + symlink(target_include, prefix.include) -- GitLab From 1b54bea24e7851523065cfd7bccddff4c46dd3d2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eric=20M=C3=BCller?= <mueller@kip.uni-heidelberg.de> Date: Fri, 14 Mar 2025 16:15:44 +0100 Subject: [PATCH 04/11] feat(cudnn): add 9.8.0 --- packages/cudnn/package.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/packages/cudnn/package.py b/packages/cudnn/package.py index 2d1a7b3b..3ec59759 100644 --- a/packages/cudnn/package.py +++ b/packages/cudnn/package.py @@ -9,6 +9,14 @@ import platform from spack.package import * _versions = { + # cuDNN 9.8.0 + "9.8.0.87-12": { + "Linux-x86_64": "321b9b33bb1287404d93d5672d352f16feabc4b220ac6ae0b86e4b27f257dcf4", + "Linux-aarch64": "f03ece3ff07d1719f06218973a8797cec1be387cc317baab5bb118dc988199e7", + }, + "9.8.0.87-11": { + "Linux-x86_64": "cf4dfaef8311d987d640a322f668cd5240ac3e5302abe9617dd991b5b2532758" + }, # cuDNN 9.2.0 "9.2.0.82-12": { "Linux-x86_64": "1362b4d437e37e92c9814c3b4065db5106c2e03268e22275a5869e968cee7aa8", -- GitLab From 93a1c3d6eb9ba68cdc11edde56b6524487bd5f02 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eric=20M=C3=BCller?= <mueller@kip.uni-heidelberg.de> Date: Fri, 14 Mar 2025 16:15:46 +0100 Subject: [PATCH 05/11] feat(py-tensorflow): copy recipes from upstream@develop (2025-03-14) --- ...uf-errors-when-using-system-protobuf.patch | 29 + packages/py-tensorflow/absl_neon.patch | 55 + ...w-empty-config-environment-variables.patch | 26 + packages/py-tensorflow/example_parsing.patch | 11 + .../py-tensorflow/null_linker_bin_path.patch | 15 + packages/py-tensorflow/package.py | 1008 +++++++++++++++++ packages/py-tensorflow/set_jit_true.patch | 22 + 7 files changed, 1166 insertions(+) create mode 100644 packages/py-tensorflow/0008-Fix-protobuf-errors-when-using-system-protobuf.patch create mode 100644 packages/py-tensorflow/absl_neon.patch create mode 100644 packages/py-tensorflow/allow-empty-config-environment-variables.patch create mode 100644 packages/py-tensorflow/example_parsing.patch create mode 100644 packages/py-tensorflow/null_linker_bin_path.patch create mode 100644 packages/py-tensorflow/package.py create mode 100644 packages/py-tensorflow/set_jit_true.patch diff --git a/packages/py-tensorflow/0008-Fix-protobuf-errors-when-using-system-protobuf.patch b/packages/py-tensorflow/0008-Fix-protobuf-errors-when-using-system-protobuf.patch new file mode 100644 index 00000000..201129fe --- /dev/null +++ b/packages/py-tensorflow/0008-Fix-protobuf-errors-when-using-system-protobuf.patch @@ -0,0 +1,29 @@ +From 2ea8d31a2a75de75f838b4650e1531c346dfa6fe Mon Sep 17 00:00:00 2001 +From: sclarkson <sc@lambdal.com> +Date: Thu, 12 Aug 2021 03:23:28 -0700 +Subject: [PATCH 8/8] Fix protobuf errors when using system protobuf + +When tensorflow and python protobuf use the same instance of +libprotobuf, pywrap_tensorflow must be imported before anything +else that would import protobuf definitions. +--- + tensorflow/python/__init__.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/tensorflow/python/__init__.py b/tensorflow/python/__init__.py +index 6efba380ca0..38d1eb55027 100644 +--- a/tensorflow/python/__init__.py ++++ b/tensorflow/python/__init__.py +@@ -37,8 +37,8 @@ import traceback + # go/tf-wildcard-import + # pylint: disable=wildcard-import,g-bad-import-order,g-import-not-at-top + +-from tensorflow.python.eager import context + from tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow ++from tensorflow.python.eager import context + + # pylint: enable=wildcard-import + +-- +2.32.0 + diff --git a/packages/py-tensorflow/absl_neon.patch b/packages/py-tensorflow/absl_neon.patch new file mode 100644 index 00000000..595cce7b --- /dev/null +++ b/packages/py-tensorflow/absl_neon.patch @@ -0,0 +1,55 @@ +From 32321ebf22c32ed4bbc9f98b44f2a67fe6c86823 Mon Sep 17 00:00:00 2001 +From: Andrew Elble <aweits@rit.edu> +Date: Wed, 1 May 2024 18:46:42 -0400 +Subject: [PATCH] patch + +--- + third_party/absl/absl_neon.patch | 23 +++++++++++++++++++++++ + third_party/absl/workspace.bzl | 1 + + 2 files changed, 24 insertions(+) + create mode 100644 third_party/absl/absl_neon.patch + +diff --git a/third_party/absl/absl_neon.patch b/third_party/absl/absl_neon.patch +new file mode 100644 +index 000000000000..d4eb77bc3f86 +--- /dev/null ++++ b/third_party/absl/absl_neon.patch +@@ -0,0 +1,23 @@ ++From: Andrew Elble <aweits@rit.edu> ++Date: Thu, 25 Apr 2024 08:09:36 -0400 ++Subject: [PATCH] fix ++ ++--- ++ absl/base/config.h | 2 +- ++ 1 file changed, 1 insertion(+), 1 deletion(-) ++ ++diff --git a/absl/base/config.h b/absl/base/config.h ++index 5fa9f0efe5a4..bfedf4e1d7bd 100644 ++--- a/absl/base/config.h +++++ b/absl/base/config.h ++@@ -962,7 +962,7 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || ++ // https://llvm.org/docs/CompileCudaWithLLVM.html#detecting-clang-vs-nvcc-from-code ++ #ifdef ABSL_INTERNAL_HAVE_ARM_NEON ++ #error ABSL_INTERNAL_HAVE_ARM_NEON cannot be directly set ++-#elif defined(__ARM_NEON) && !defined(__CUDA_ARCH__) +++#elif defined(__ARM_NEON) && !(defined(__NVCC__) && defined(__CUDACC__)) ++ #define ABSL_INTERNAL_HAVE_ARM_NEON 1 ++ #endif ++ ++-- ++2.31.1 +diff --git a/third_party/absl/workspace.bzl b/third_party/absl/workspace.bzl +index 06f75166ce4b..56d146d65abe 100644 +--- a/third_party/absl/workspace.bzl ++++ b/third_party/absl/workspace.bzl +@@ -42,6 +42,7 @@ def repo(): + build_file = "//third_party/absl:com_google_absl.BUILD", + system_build_file = "//third_party/absl:system.BUILD", + system_link_files = SYS_LINKS, ++ patch_file = ["//third_party/absl:absl_neon.patch"], + strip_prefix = "abseil-cpp-{commit}".format(commit = ABSL_COMMIT), + urls = tf_mirror_urls("https://github.com/abseil/abseil-cpp/archive/{commit}.tar.gz".format(commit = ABSL_COMMIT)), + ) +-- +2.31.1 + diff --git a/packages/py-tensorflow/allow-empty-config-environment-variables.patch b/packages/py-tensorflow/allow-empty-config-environment-variables.patch new file mode 100644 index 00000000..92d536c0 --- /dev/null +++ b/packages/py-tensorflow/allow-empty-config-environment-variables.patch @@ -0,0 +1,26 @@ +From ac0fca8559c2384240a00599a46816bbb5afb93f Mon Sep 17 00:00:00 2001 +From: Thomas Dickerson <elfprince13@gmail.com> +Date: Tue, 11 Mar 2025 14:07:34 -0400 +Subject: [PATCH] Allow empty configuration values to be supplied from + environment + +For example it may be desirable for `CC_OPT_FLAGS` to be empty. +--- + configure.py | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/configure.py b/configure.py +index ec04fcfdd0cc67..ac19b856d7c914 100644 +--- a/configure.py ++++ b/configure.py +@@ -529,7 +529,9 @@ def get_from_env_or_user_or_default(environ_cp, var_name, ask_for_var, + string value for var_name + """ + var = environ_cp.get(var_name) +- if not var: ++ # an intentionally empty value in the ++ # environment is not the same as no value ++ if var is None: + var = get_input(ask_for_var) + print('\n') + if not var: diff --git a/packages/py-tensorflow/example_parsing.patch b/packages/py-tensorflow/example_parsing.patch new file mode 100644 index 00000000..b16fd6e3 --- /dev/null +++ b/packages/py-tensorflow/example_parsing.patch @@ -0,0 +1,11 @@ +--- a/tensorflow/core/kernels/example_parsing_ops.cc ++++ b/tensorflow/core/kernels/example_parsing_ops.cc +@@ -1218,7 +1218,7 @@ class DecodeJSONExampleOp : public OpKernel { + resolver_.get(), "type.googleapis.com/tensorflow.Example", &in, &out); + OP_REQUIRES(ctx, status.ok(), + errors::InvalidArgument("Error while parsing JSON: ", +- string(status.error_message()))); ++ string(status.message()))); + } + } + diff --git a/packages/py-tensorflow/null_linker_bin_path.patch b/packages/py-tensorflow/null_linker_bin_path.patch new file mode 100644 index 00000000..43912003 --- /dev/null +++ b/packages/py-tensorflow/null_linker_bin_path.patch @@ -0,0 +1,15 @@ +diff -ru a/third_party/gpus/cuda_configure.bzl b/third_party/gpus/cuda_configure.bzl +--- a/third_party/gpus/cuda_configure.bzl 2021-05-12 13:26:41.000000000 +0000 ++++ b/third_party/gpus/cuda_configure.bzl 2021-10-28 21:38:06.949271099 +0000 +@@ -1205,10 +1205,7 @@ + # TODO: when bazel stops adding '-B/usr/bin' by default, remove this + # flag from the CROSSTOOL completely (see + # https://github.com/bazelbuild/bazel/issues/5634) +- if should_download_clang: +- cuda_defines["%{linker_bin_path}"] = "" +- else: +- cuda_defines["%{linker_bin_path}"] = host_compiler_prefix ++ cuda_defines["%{linker_bin_path}"] = "" + + cuda_defines["%{extra_no_canonical_prefixes_flags}"] = "" + cuda_defines["%{unfiltered_compile_flags}"] = "" diff --git a/packages/py-tensorflow/package.py b/packages/py-tensorflow/package.py new file mode 100644 index 00000000..5e4207fb --- /dev/null +++ b/packages/py-tensorflow/package.py @@ -0,0 +1,1008 @@ +# Copyright Spack Project Developers. See COPYRIGHT file for details. +# +# SPDX-License-Identifier: (Apache-2.0 OR MIT) + +import glob +import sys +import tempfile + +from spack.build_environment import optimization_flags +from spack.build_systems.python import PythonPipBuilder +from spack.package import * + +rocm_dependencies = [ + "hip", + "rocrand", + "rocblas", + "rocfft", + "hipfft", + "rccl", + "hipsparse", + "rocprim", + "llvm-amdgpu", + "hsa-rocr-dev", + "rocminfo", + "hipsolver", + "hiprand", + "rocsolver", + "hipsolver", + "hipblas", + "hipcub", + "rocm-core", + "roctracer-dev", + "miopen-hip", +] + + +class PyTensorflow(Package, CudaPackage, ROCmPackage, PythonExtension): + """TensorFlow is an open source machine learning framework for everyone.""" + + homepage = "https://www.tensorflow.org" + url = "https://github.com/tensorflow/tensorflow/archive/v2.3.1.tar.gz" + git = "https://github.com/tensorflow/tensorflow.git" + import_modules = ["tensorflow"] + + license("Apache-2.0") + maintainers("adamjstewart", "aweits") + + version( + "2.18.0-rocm-enhanced", + sha256="85f44bed166927b2e22db28f5c4e4538da22221fedd9c2f47c763c52a0e40814", + url="https://github.com/ROCm/tensorflow-upstream/archive/refs/tags/v2.18.0-rocm-enhanced.tar.gz", + ) + version("2.18.0", sha256="d7876f4bb0235cac60eb6316392a7c48676729860da1ab659fb440379ad5186d") + version("2.17.1", sha256="2d3cfb48510f92f3a52fb05b820481c6f066a342a9f5296fe26d72c4ea757700") + version("2.17.0", sha256="9cc4d5773b8ee910079baaecb4086d0c28939f024dd74b33fc5e64779b6533dc") + version("2.16.2", sha256="023849bf253080cb1e4f09386f5eb900492da2288274086ed6cfecd6d99da9eb") + version("2.16.1", sha256="c729e56efc945c6df08efe5c9f5b8b89329c7c91b8f40ad2bb3e13900bd4876d") + version( + "2.16.1-rocm-enhanced", + sha256="e1b63b1b5d5b014194ed33113c7fa7f26ecb8d36333282b8c550e795e0eb31c6", + url="https://github.com/ROCm/tensorflow-upstream/archive/refs/tags/v2.16.1-rocm-enhanced.tar.gz", + ) + version("2.15.1", sha256="f36416d831f06fe866e149c7cd752da410a11178b01ff5620e9f265511ed57cf") + version("2.15.0", sha256="9cec5acb0ecf2d47b16891f8bc5bc6fbfdffe1700bdadc0d9ebe27ea34f0c220") + version("2.14.1", sha256="6b31ed347ed7a03c45b906aa41628ac91c3db7c84cb816971400d470e58ba494") + version( + "2.14-rocm-enhanced", + git="https://github.com/ROCm/tensorflow-upstream.git", + branch="r2.14-rocm-enhanced-nohipblaslt-build", + ) + version("2.14.0", sha256="ce357fd0728f0d1b0831d1653f475591662ec5bca736a94ff789e6b1944df19f") + version("2.13.1", sha256="89c07aebd4f41fbe0d08cc88aef00305542134f2f16d3b62918dc3c1182f33e2") + version("2.13.0", sha256="e58c939079588623e6fa1d054aec2f90f95018266e0a970fd353a5244f5173dc") + version("2.12.1", sha256="6bc4600cc0b88e9e40f1800096f5bddbbd3b6e5527a030dea631b87f2ae46b5b") + version("2.12.0", sha256="c030cb1905bff1d2446615992aad8d8d85cbe90c4fb625cee458c63bf466bc8e") + version("2.11.1", sha256="624ed1cc170cdcc19e8a15d8cdde989a9a1c6b0534c90b38a6b2f06fb2963e5f") + version( + "2.11.0-rocm-enhanced", + sha256="0c4ee8d83bc72215cbc1a5cd3e88cde1a9cf7304237d3e3d8d105ff09827d903", + url="https://github.com/ROCmSoftwarePlatform/tensorflow-upstream/archive/refs/tags/v2.11.0-rocm-enhanced.tar.gz", + ) + version("2.11.0", sha256="99c732b92b1b37fc243a559e02f9aef5671771e272758aa4aec7f34dc92dac48") + version("2.10.1", sha256="622a92e22e6f3f4300ea43b3025a0b6122f1cc0e2d9233235e4c628c331a94a3") + version("2.10.0", sha256="b5a1bb04c84b6fe1538377e5a1f649bb5d5f0b2e3625a3c526ff3a8af88633e8") + version("2.9.3", sha256="59d09bd00eef6f07477eea2f50778582edd4b7b2850a396f1fd0c646b357a573") + version("2.9.2", sha256="8cd7ed82b096dc349764c3369331751e870d39c86e73bbb5374e1664a59dcdf7") + version("2.9.1", sha256="6eaf86ead73e23988fe192da1db68f4d3828bcdd0f3a9dc195935e339c95dbdc") + version("2.9.0", sha256="8087cb0c529f04a4bfe480e49925cd64a904ad16d8ec66b98e2aacdfd53c80ff") + version("2.8.4", sha256="c08a222792bdbff9da299c7885561ee27b95d414d1111c426efac4ccdce92cde") + version("2.8.3", sha256="4b7ecbe50b36887e1615bc2a582cb86df1250004d8bb540e18336d539803b5a7") + version("2.8.2", sha256="b3f860c02c22a30e9787e2548ca252ab289a76b7778af6e9fa763d4aafd904c7") + version("2.8.1", sha256="4b487a63d6f0c1ca46a2ac37ba4687eabdc3a260c222616fa414f6df73228cec") + version("2.8.0", sha256="66b953ae7fba61fd78969a2e24e350b26ec116cf2e6a7eb93d02c63939c6f9f7") + version( + "2.7.4-rocm-enhanced", + sha256="45b79c125edfdc008274f1b150d8b5a53b3ff4713fd1ad1ff4738f515aad8191", + url="https://github.com/ROCmSoftwarePlatform/tensorflow-upstream/archive/refs/tags/v2.7.4-rocm-enhanced.tar.gz", + ) + version("2.7.4", sha256="75b2e40a9623df32da16d8e97528f5e02e4a958e23b1f2ee9637be8eec5d021b") + version("2.7.3", sha256="b576c2e124cd6d4d04cbfe985430a0d955614e882172b2258217f0ec9b61f39b") + version("2.7.2", sha256="b3c8577f3b7cc82368ff7f9315821d506abd2f716ea6692977d255b7d8bc54c0") + version("2.7.1", sha256="abebe2cf5ca379e18071693ca5f45b88ade941b16258a21cc1f12d77d5387a21") + version("2.7.0", sha256="bb124905c7fdacd81e7c842b287c169bbf377d29c74c9dacc04f96c9793747bb") + version("2.6.5", sha256="305da42845ac584a42494e521c92a88ce92ee47d93022d4c0bb45180b5c19a8c") + version("2.6.4", sha256="6a9e54f46039ef0a6f0a1adf19befa510044d3203d1e124dba8318ec4b1e0210") + version("2.6.3", sha256="7a71dde0987677b9512b202eb6ae119e0e308b1ea15b66dcfce001a44873997b") + version("2.6.2", sha256="e68c1d346fc3d529653530ca346b2c62f5b31bd4fcca7ffc9c65bb39ab2f6ed3") + version("2.6.1", sha256="8e457f617bc2eb43de2a51900e7922b60a8107e2524b2576438f1acccee1d043") + version("2.6.0", sha256="41b32eeaddcbc02b0583660bcf508469550e4cd0f86b22d2abe72dfebeacde0f") + version("2.5.3", sha256="58d69b7163f7624debc243750976d27fa7dddbc6fb7c5215aec94732bcc670e1") + version("2.5.2", sha256="bcccc6ba0b8ac1d10d3302f766eed71911acecc0bc43d0bd27d97a1e7ce275a8") + version("2.5.1", sha256="8d2728e155a3aa6befd9cb3d0980fabd25e2142d124f8f6b6c78cdf17ff79da5") + version("2.5.0", sha256="233875ea27fc357f6b714b2a0de5f6ff124b50c1ee9b3b41f9e726e9e677b86c") + version( + "2.4.4", + sha256="f1abc3ed92c3ce955db2a7db5ec422a3a98f015331183194f97b99fe77a09bb4", + deprecated=True, + ) + version( + "2.4.3", + sha256="cafd520c753f8755a9eb1262932f685dc722d8658f08373f8ec88d8acd58d7d4", + deprecated=True, + ) + version( + "2.4.2", + sha256="edc88da97277906513d53eeee57997a2036fa32ac1f1937730301764fa06cdc0", + deprecated=True, + ) + version( + "2.4.1", + sha256="f681331f8fc0800883761c7709d13cda11942d4ad5ff9f44ad855e9dc78387e0", + deprecated=True, + ) + version( + "2.4.0", + sha256="26c833b7e1873936379e810a39d14700281125257ddda8cd822c89111db6f6ae", + deprecated=True, + ) + version( + "2.3.4", + sha256="195947838b0918c15d79bc6ed85ff714b24d6d564b4d07ba3de0b745a2f9b656", + deprecated=True, + ) + version( + "2.3.3", + sha256="b91e5bcd373b942c4a62c6bcb7ff6f968b1448152b82f54a95dfb0d8fb9c6093", + deprecated=True, + ) + version( + "2.3.2", + sha256="21a703d2e68cd0677f6f9ce329198c24fd8203125599d791af9f1de61aadf31f", + deprecated=True, + ) + version( + "2.3.2", + sha256="21a703d2e68cd0677f6f9ce329198c24fd8203125599d791af9f1de61aadf31f", + deprecated=True, + ) + version( + "2.3.1", + sha256="ee534dd31a811f7a759453567257d1e643f216d8d55a25c32d2fbfff8153a1ac", + deprecated=True, + ) + version( + "2.3.0", + sha256="2595a5c401521f20a2734c4e5d54120996f8391f00bb62a57267d930bce95350", + deprecated=True, + ) + version( + "2.2.3", + sha256="5e6c779ca8392864d436d88893461dcce783c3a8d46dcb2b2f2ee8ece3cc4538", + deprecated=True, + ) + version( + "2.2.2", + sha256="fb4b5d26c5b983350f7ce8297b71176a86a69e91faf66e6ebb1e58538ad3bb51", + deprecated=True, + ) + version( + "2.2.1", + sha256="e6a28e64236d729e598dbeaa02152219e67d0ac94d6ed22438606026a02e0f88", + deprecated=True, + ) + version( + "2.2.0", + sha256="69cd836f87b8c53506c4f706f655d423270f5a563b76dc1cfa60fbc3184185a3", + deprecated=True, + ) + + depends_on("c", type="build") + depends_on("cxx", type="build") + + variant("mkl", default=False, description="Build with MKL support") + variant("jemalloc", default=False, description="Build with jemalloc as malloc support") + variant("gcp", default=False, description="Build with Google Cloud Platform support") + variant( + "hdfs", default=False, when="@:2.17", description="Build with Hadoop File System support" + ) + variant( + "aws", default=False, when="@:2.17", description="Build with Amazon AWS Platform support" + ) + variant("xla", default=sys.platform != "darwin", description="Build with XLA JIT support") + variant("gdr", default=False, description="Build with GDR support") + variant("verbs", default=False, description="Build with libverbs support") + variant("ngraph", default=False, description="Build with Intel nGraph support") + variant("opencl", default=False, description="Build with OpenCL SYCL support") + variant("computecpp", default=False, description="Build with ComputeCPP support") + variant( + "tensorrt", default=False, description="Build with TensorRT support" + ) # TODO: enable when TensorRT in Spack + variant("cuda", default=sys.platform != "darwin", description="Build with CUDA support") + variant( + "nccl", default=sys.platform.startswith("linux"), description="Enable NVIDIA NCCL support" + ) + variant("mpi", default=False, description="Build with MPI support") + variant("android", default=False, description="Configure for Android builds") + variant("ios", default=False, description="Build with iOS support (macOS only)") + variant("monolithic", default=False, description="Static monolithic build") + variant("numa", default=False, description="Build with NUMA support") + variant( + "dynamic_kernels", + default=sys.platform.startswith("linux"), + description="Build kernels into separate shared objects", + ) + + extends("python") + + with default_args(type="build"): + # See .bazelversion + depends_on("bazel@6.5.0", when="@2.16:") + depends_on("bazel@6.1.0", when="@2.14:2.15") + depends_on("bazel@5.3.0", when="@2.11:2.13") + depends_on("bazel@5.1.1", when="@2.10") + # See _TF_MIN_BAZEL_VERSION and _TF_MAX_BAZEL_VERSION in configure.py + depends_on("bazel@4.2.2:5.99.0", when="@2.9") + depends_on("bazel@4.2.1:4.99.0", when="@2.8") + depends_on("bazel@3.7.2:4.99.0", when="@2.7") + depends_on("bazel@3.7.2:3.99.0", when="@2.5:2.6") + depends_on("bazel@3.1.0:3.99.0", when="@2.3:2.4") + depends_on("bazel@2.0.0", when="@2.2") + + # tensorflow/tools/pip_package/build_pip_package.sh + depends_on("patchelf", when="@2.13: platform=linux") + # https://github.com/tensorflow/tensorflow/issues/60179#issuecomment-1491238631 + depends_on("coreutils", when="@2.13: platform=darwin") + + depends_on("swig") + depends_on("py-pip") + depends_on("py-wheel") + + with default_args(type=("build", "run")): + # Python support based on wheel availability + depends_on("python@3.9:3.12", when="@2.16:") + depends_on("python@3.9:3.11", when="@2.14:2.15") + depends_on("python@3.8:3.11", when="@2.12:2.13") + depends_on("python@:3.10", when="@2.8:2.11") + depends_on("python@:3.9", when="@2.5:2.7") + depends_on("python@:3.8", when="@2.2:2.4") + + # Listed under REQUIRED_PACKAGES in tensorflow/tools/pip_package/setup.py + depends_on("py-absl-py@1:", when="@2.9:") + depends_on("py-absl-py@0.4:", when="@2.7:2.8") + depends_on("py-absl-py@0.10:0", when="@2.4:2.6") + depends_on("py-absl-py@0.7:", when="@:2.3") + depends_on("py-astunparse@1.6:", when="@2.7:") + depends_on("py-astunparse@1.6.3:1.6", when="@2.4:2.6") + depends_on("py-astunparse@1.6.3", when="@2.2:2.3") + depends_on("py-flatbuffers@24.3.25:", when="@2.17:") + depends_on("py-flatbuffers@23.5.26:", when="@2.14:") + depends_on("py-flatbuffers@23.1.21:", when="@2.13") + depends_on("py-flatbuffers@2:", when="@2.10:2.12") + depends_on("py-flatbuffers@1.12:1", when="@2.9") + depends_on("py-flatbuffers@1.12:", when="@2.8") + depends_on("py-flatbuffers@1.12:2", when="@2.7") + depends_on("py-flatbuffers@1.12", when="@2.4:2.6") + depends_on("py-gast@0.2.1:0.4,0.5.3:", when="@2.14:") + depends_on("py-gast@0.2.1:0.4.0", when="@2.9:2.13") + depends_on("py-gast@0.2.1:", when="@2.8") + depends_on("py-gast@0.2.1:0.4", when="@2.7") + depends_on("py-gast@0.4.0", when="@2.5:2.6") + depends_on("py-gast@0.3.3", when="@2.2:2.4") + depends_on("py-gast@0.2.2", when="@:2.1") + depends_on("py-google-pasta@0.1.1:", when="@2.7:") + depends_on("py-google-pasta@0.2:0", when="@2.4:2.6") + depends_on("py-google-pasta@0.1.8:", when="@2.2:2.3") + depends_on("py-google-pasta@0.1.6:", when="@:2.1") + depends_on("py-libclang@13:", when="@2.9:") + depends_on("py-libclang@9.0.1:", when="@2.7:2.8") + depends_on("py-opt-einsum@2.3.2:", when="@:2.3,2.7:") + depends_on("py-opt-einsum@3.3", when="@2.4:2.6") + depends_on("py-packaging", when="@2.9:") + depends_on("py-protobuf@3.20.3:4.20,4.21.6:5", when="@2.18:") + depends_on("py-protobuf@3.20.3:4.20,4.21.6:4", when="@2.12:2.17") + depends_on("py-protobuf@3.9.2:", when="@2.3:2.11") + depends_on("py-protobuf@3.8.0:", when="@:2.2") + # https://github.com/protocolbuffers/protobuf/issues/10051 + # https://github.com/tensorflow/tensorflow/issues/56266 + depends_on("py-protobuf@:3.19", when="@:2.11") + depends_on("py-requests@2.21:2", when="@2.16:") + depends_on("py-requests") + depends_on("py-setuptools") + depends_on("py-six@1.12:", when="@:2.3,2.7:") + depends_on("py-six@1.15", when="@2.4:2.6") + depends_on("py-termcolor@1.1:", when="@:2.3,2.7:") + depends_on("py-termcolor@1.1", when="@2.4:2.6") + depends_on("py-typing-extensions@3.6.6:", when="@2.7:2.12,2.14:") + depends_on("py-typing-extensions@3.6.6:4.5", when="@2.13") + depends_on("py-typing-extensions@3.7.4:3.7", when="@2.4:2.6") + depends_on("py-wrapt@1.11:", when="@2.7:2.11,2.13,2.16:") + depends_on("py-wrapt@1.11:1.14", when="@2.12,2.14:2.15") + depends_on("py-wrapt@1.12.1:1.12", when="@2.4:2.6") + depends_on("py-wrapt@1.11.1:", when="@:2.3") + + # TODO: add packages for these dependencies + # depends_on('py-tensorflow-io-gcs-filesystem@0.23.1:', when='@2.8:') + # depends_on('py-tensorflow-io-gcs-filesystem@0.21:', when='@2.7') + + if sys.byteorder == "little": + # Only builds correctly on little-endian machines + depends_on("py-grpcio@1.24.3:1", when="@2.7:") + depends_on("py-grpcio@1.37.0:1", when="@2.6") + depends_on("py-grpcio@1.34", when="@2.5") + depends_on("py-grpcio@1.32", when="@2.4") + depends_on("py-grpcio@1.8.6:", when="@:2.3") + + for minor_ver in range(2, 19): + depends_on("py-tensorboard@2.{}".format(minor_ver), when="@2.{}".format(minor_ver)) + + # TODO: support circular run-time dependencies + # depends_on('py-keras') + + depends_on("py-numpy@1.26:2.0", when="@2.18:") + depends_on("py-numpy@1.23.5:", when="@2.14:2.17") + depends_on("py-numpy@1.22:1.24.3", when="@2.13") + depends_on("py-numpy@1.22:1.23", when="@2.12") + depends_on("py-numpy@1.20:", when="@2.8:2.11") + depends_on("py-numpy@1.14.5:", when="@2.7") + depends_on("py-numpy@1.19.2:1.19", when="@2.4:2.6") + # https://github.com/tensorflow/tensorflow/issues/40688 + depends_on("py-numpy@1.16.0:1.18", when="@:2.3") + # https://github.com/tensorflow/tensorflow/issues/67291 + depends_on("py-numpy@:1", when="@:2.17") + depends_on("py-h5py@3.11:", when="@2.18:") + depends_on("py-h5py@3.10:", when="@2.16:") + depends_on("py-h5py@2.9:", when="@2.7:2.15") + depends_on("py-h5py@3.1", when="@2.5:2.6") + depends_on("py-h5py@2.10", when="@2.2:2.4") + depends_on("py-h5py@:2.10.0", when="@2.1.3:2.1") + # propagate the mpi variant setting for h5py/hdf5 to avoid unexpected crashes + depends_on("py-h5py+mpi", when="@2.1.3:+mpi") + depends_on("py-h5py~mpi", when="@2.1.3:~mpi") + depends_on("hdf5+mpi", when="@2.1.3:+mpi") + depends_on("hdf5~mpi", when="@2.1.3:~mpi") + depends_on("py-ml-dtypes@0.4", when="@2.18:") + depends_on("py-ml-dtypes@0.3.1:0.4", when="@2.17") + depends_on("py-ml-dtypes@0.3.1:0.3", when="@2.15.1:2.16") + depends_on("py-ml-dtypes@0.2", when="@2.15.0") + depends_on("py-ml-dtypes@0.2.0", when="@2.14") + + # Historical dependencies + depends_on("py-jax@0.3.15:", when="@2.12") + depends_on("py-keras-preprocessing@1.1.1:", when="@2.7:2.10") + depends_on("py-keras-preprocessing@1.1.2:1.1", when="@2.4:2.6") + depends_on("py-keras-preprocessing@1.1.1:1.1", when="@2.3") + depends_on("py-keras-preprocessing@1.1:", when="@2.2") + depends_on("py-scipy@1.4.1", when="@2.2.0,2.3.0") + depends_on("py-wheel@0.32:0", when="@2.7") + depends_on("py-wheel@0.35:0", when="@2.4:2.6") + depends_on("py-wheel@0.26:", when="@:2.3") + + # TODO: add packages for some of these dependencies + depends_on("mkl", when="+mkl") + depends_on("curl", when="+gcp") + # depends_on('computecpp', when='+opencl+computecpp') + # depends_on('trisycl', when='+opencl~computepp') + with when("+cuda"): + # https://www.tensorflow.org/install/source#gpu + depends_on("cuda@12.3:", when="@2.16:") + depends_on("cuda@12.2:", when="@2.15:") + depends_on("cuda@11.8:", when="@2.12:") + depends_on("cuda@11.2:", when="@2.5:") + depends_on("cuda@11.0:", when="@2.4:") + depends_on("cuda@10.1:", when="@2.1:") + + depends_on("cuda@:11.7.0", when="@:2.9") + depends_on("cuda@:11.4", when="@2.4:2.7") + depends_on("cuda@:10.2", when="@:2.3") + + depends_on("cudnn@8.9:8", when="@2.15:") + depends_on("cudnn@8.7:8", when="@2.14:") + depends_on("cudnn@8.6:8", when="@2.12:") + depends_on("cudnn@8.1:8", when="@2.5:") + depends_on("cudnn@8.0:8", when="@2.4:") + depends_on("cudnn@7.6:8", when="@2.1:") + + depends_on("cudnn@:7", when="@:2.2") + # depends_on('tensorrt', when='+tensorrt') + depends_on("nccl", when="+nccl+cuda") + depends_on("mpi", when="+mpi") + # depends_on('android-ndk@10:18', when='+android') + # depends_on('android-sdk', when='+android') + + with when("+rocm"): + for pkg_dep in rocm_dependencies: + depends_on(f"{pkg_dep}@6.0:", when="@2.14:") + depends_on(pkg_dep) + + # Check configure and configure.py to see when these variants are supported + conflicts("+mkl", when="platform=darwin", msg="Darwin is not yet supported") + conflicts( + "+jemalloc", + when="platform=darwin", + msg="Currently jemalloc is only support on Linux platform", + ) + conflicts("+opencl", when="platform=windows") + conflicts("+computecpp", when="~opencl") + conflicts( + "+cuda", + when="+rocm", + msg="CUDA / ROCm are mututally exclusive. At most 1 GPU platform can be configured", + ) + conflicts("+cuda", when="platform=darwin", msg="There is no GPU support for macOS") + conflicts( + "cuda_arch=none", + when="+cuda", + msg="Must specify CUDA compute capabilities of your GPU, see https://developer.nvidia.com/cuda-gpus", + ) + conflicts("cuda_arch=20", msg="TensorFlow only supports compute capabilities >= 3.5") + conflicts("cuda_arch=30", msg="TensorFlow only supports compute capabilities >= 3.5") + conflicts("cuda_arch=32", msg="TensorFlow only supports compute capabilities >= 3.5") + conflicts("+tensorrt", when="~cuda") + conflicts( + "+tensorrt", + when="platform=darwin", + msg="Currently TensorRT is only supported on Linux platform", + ) + conflicts("+nccl", when="~cuda~rocm") + conflicts( + "+nccl", when="platform=darwin", msg="Currently NCCL is only supported on Linux platform" + ) + conflicts("+mpi", when="platform=windows") + conflicts("+ios", when="platform=linux", msg="iOS support only available on macOS") + # https://github.com/tensorflow/tensorflow/pull/45404 + conflicts("platform=darwin target=aarch64:", when="@:2.4") + # https://github.com/tensorflow/tensorflow/pull/39225 + conflicts("target=aarch64:", when="@:2.2") + + rocm_versions = [ + "2.7.4-rocm-enhanced", + "2.11.0-rocm-enhanced", + "2.14-rocm-enhanced", + "2.16.1-rocm-enhanced", + "2.18.0-rocm-enhanced", + ] + rocm_conflicts = [ + ":2.7.4-a", + "2.7.4.0:2.11.0-a", + "2.11.0.0:2.14-a", + "2.14-z:2.16.1-a", + "2.16.1-z:2.18.0-a", + "2.18.0-z:", + ] + conflicts("~rocm", when=f"@{','.join(rocm_versions)}") + conflicts("+rocm", when=f"@{','.join(rocm_conflicts)}") + + # wheel 0.40 upgrades vendored packaging, trips over tensorflow-io-gcs-filesystem identifier + conflicts("^py-wheel@0.40:", when="@2.11:2.13") + + # https://www.tensorflow.org/install/source#tested_build_configurations + # https://github.com/tensorflow/tensorflow/issues/70199 + # (-mavx512fp16 exists in gcc@12:) + conflicts("%gcc@13:", when="@:2.14") + conflicts("%gcc@:11", when="@2.17:") + conflicts("%gcc@:9.3.0", when="@2.9:") + conflicts("%gcc@:7.3.0") + # https://github.com/tensorflow/tensorflow/issues/76908 + conflicts("%clang@:15", when="@2.18:") + # https://github.com/tensorflow/tensorflow/issues/62416 + conflicts("%clang@17:", when="@:2.14") + + # zlib is vendored and downloaded directly from zlib.org (or mirrors), but + # old downloads are removed from that site immediately after a new release. + # If the tf mirrors don't work, make sure the fallback is to something existing. + patch( + "https://github.com/tensorflow/tensorflow/commit/76b9fa22857148a562f3d9b5af6843402a93c15b.patch?full_index=1", + sha256="f9e26c544da729cfd376dbd3b096030e3777d3592459add1f3c78b1b9828d493", + when="@2.9:2.10.0", + ) + + # can set an upper bound if/when + # https://github.com/tensorflow/tensorflow/pull/89032 is merged + patch( + "allow-empty-config-environment-variables.patch", + sha256="e061875c2ca9c157a7837d02afdd25205817def3460745523d5089bbeaa77d29", + when="@1.4.0:", + ) + + # Version 2.10 produces an error related to cuBLAS: + # E tensorflow/stream_executor/cuda/cuda_blas.cc:2981] Unable to register + # cuBLAS factory: Attempting to register factory for plugin cuBLAS when one + # has already been registered + # See https://github.com/tensorflow/tensorflow/issues/57663 + # This is fixed for 2.11 but 2.10 needs the following patch. + patch( + "https://github.com/tensorflow/tensorflow/pull/56691.patch?full_index=1", + sha256="d635ea6d6c1571505871d0caba3e2cd939ea0f4aff972095d552913a8109def3", + when="@2.10", + ) + + # needed for protobuf 3.16+ + patch("example_parsing.patch", when="@:2.7 ^protobuf@3.16:") + + # allow linker to be found in PATH + # https://github.com/tensorflow/tensorflow/issues/39263 + patch("null_linker_bin_path.patch", when="@2.5:") + + # Reset import order to that of 2.4. Part of + # https://bugs.gentoo.org/800824#c3 From the patch: + # When tensorflow and python protobuf use the same instance of libprotobuf, + # pywrap_tensorflow must be imported before anything else that would import + # protobuf definitions. + patch("0008-Fix-protobuf-errors-when-using-system-protobuf.patch", when="@2.5:2.6") + + # remove unnecessary symbol ignores that cause errors with new compilers + # https://github.com/tensorflow/tensorflow/issues/62416 + patch( + "https://raw.githubusercontent.com/getsolus/packages/dfc56ba57a8af8233a635e309b499ff5d27992f4/packages/t/tensorflow/files/fix-clang-18.diff", + sha256="10d730b59284843d6c9ba92668b068582e51d5cdfc7ccfe8e26791ad0f41d4ac", + when="@2.15", + ) + + # see https://github.com/tensorflow/tensorflow/issues/62490 + # and https://github.com/abseil/abseil-cpp/issues/1665 + patch("absl_neon.patch", when="@2.16.1:2.17 target=aarch64:") + + # reverting change otherwise the c467913 commit patch won't apply + patch( + "https://github.com/ROCm/tensorflow-upstream/commit/fd6b0a4356c66f5f30cedbc62b24f18d9e32806f.patch?full_index=1", + sha256="43f1519dfc618b4fb568f760d559c063234248fa12c47a35c1cf3b7114756424", + when="@2.16.1-rocm-enhanced +rocm", + reverse=True, + ) + patch( + "https://github.com/ROCm/tensorflow-upstream/commit/c467913bf4411ce2681391f37a9adf6031d23c2c.patch?full_index=1", + sha256="82554a84d19d99180a6bec274c6106dd217361e809b446e2e4bc4b6b979bdf7a", + when="@2.16.1-rocm-enhanced +rocm", + ) + patch( + "https://github.com/ROCm/tensorflow-upstream/commit/f4f4e8698b90755b0b5ea2d9da1933b0b988b111.patch?full_index=1", + sha256="a4c0fd62a0af3ba113c8933fa531dd17fa6667e507202a144715cd87fbdaf476", + when="@2.16.1-rocm-enhanced +rocm", + ) + patch( + "https://github.com/ROCm/tensorflow-upstream/commit/8b7fcccb2914078737689347540cb79ace579bbb.patch?full_index=1", + sha256="75a61a79ce3aae51fda920f677f4dc045374b20e25628626eb37ca19c3a3b4c4", + when="@2.16.1-rocm-enhanced +rocm", + ) + patch("set_jit_true.patch", when="@2.18.0-rocm-enhanced +rocm") + phases = ["configure", "build", "install"] + + def flag_handler(self, name, flags): + spec = self.spec + # ubuntu gcc has this workaround turned on by default in aarch64 + # and it causes issues with symbol relocation during link + # note, archspec doesn't currently ever report cortex_a53! + if ( + name == "ldflags" + and spec.target.family == "aarch64" + and "ubuntu" in spec.os + and spec.satisfies("%gcc") + and "cortex_a53" not in spec.target.name + ): + flags.append("-mno-fix-cortex-a53-843419") + + return (flags, None, None) + + # https://www.tensorflow.org/install/source + def setup_build_environment(self, env): + spec = self.spec + + # Please specify the location of python + env.set("PYTHON_BIN_PATH", python.path) + + # Please input the desired Python library path to use + env.set("PYTHON_LIB_PATH", python_platlib) + env.set("TF_PYTHON_VERSION", spec["python"].version.up_to(2)) + + # Ensure swig is in PATH or set SWIG_PATH + env.set("SWIG_PATH", spec["swig"].prefix.bin.swig) + + # Do you wish to build TensorFlow with MKL support? + if "+mkl" in spec: + env.set("TF_NEED_MKL", "1") + + # Do you wish to download MKL LIB from the web? + env.set("TF_DOWNLOAD_MKL", "0") + + # Please specify the location where MKL is installed + env.set("MKL_INSTALL_PATH", spec["mkl"].prefix) + else: + env.set("TF_NEED_MKL", "0") + + # Do you wish to build TensorFlow with jemalloc as malloc support? + if "+jemalloc" in spec: + env.set("TF_NEED_JEMALLOC", "1") + else: + env.set("TF_NEED_JEMALLOC", "0") + + # Do you wish to build TensorFlow with Google Cloud Platform support? + if "+gcp" in spec: + env.set("TF_NEED_GCP", "1") + else: + env.set("TF_NEED_GCP", "0") + + # Do you wish to build TensorFlow with Hadoop File System support? + if "+hdfs" in spec: + env.set("TF_NEED_HDFS", "1") + else: + env.set("TF_NEED_HDFS", "0") + + # Do you wish to build TensorFlow with Amazon AWS Platform support? + if "+aws" in spec: + env.set("TF_NEED_AWS", "1") + env.set("TF_NEED_S3", "1") + else: + env.set("TF_NEED_AWS", "0") + env.set("TF_NEED_S3", "0") + + # Do you wish to build TensorFlow with XLA JIT support? + if "+xla" in spec: + env.set("TF_ENABLE_XLA", "1") + else: + env.set("TF_ENABLE_XLA", "0") + + # Do you wish to build TensorFlow with GDR support? + if "+gdr" in spec: + env.set("TF_NEED_GDR", "1") + else: + env.set("TF_NEED_GDR", "0") + + # Do you wish to build TensorFlow with VERBS support? + if "+verbs" in spec: + env.set("TF_NEED_VERBS", "1") + else: + env.set("TF_NEED_VERBS", "0") + + # Do you wish to build TensorFlow with nGraph support? + if "+ngraph" in spec: + env.set("TF_NEED_NGRAPH", "1") + else: + env.set("TF_NEED_NGRAPH", "0") + + # Do you wish to build TensorFlow with OpenCL SYCL support? + if "+opencl" in spec: + env.set("TF_NEED_OPENCL_SYCL", "1") + env.set("TF_NEED_OPENCL", "1") + + # Please specify which C++ compiler should be used as the host + # C++ compiler + env.set("HOST_CXX_COMPILER", spack_cxx) + + # Please specify which C compiler should be used as the host + # C compiler + env.set("HOST_C_COMPILER", spack_cc) + + # Do you wish to build TensorFlow with ComputeCPP support? + if "+computecpp" in spec: + env.set("TF_NEED_COMPUTECPP", "1") + + # Please specify the location where ComputeCpp is installed + env.set("COMPUTECPP_TOOLKIT_PATH", spec["computecpp"].prefix) + else: + env.set("TF_NEED_COMPUTECPP", "0") + + # Please specify the location of the triSYCL include directory + env.set("TRISYCL_INCLUDE_DIR", spec["trisycl"].prefix.include) + else: + env.set("TF_NEED_OPENCL_SYCL", "0") + env.set("TF_NEED_OPENCL", "0") + + # Do you wish to build TensorFlow with ROCm support? + if "+rocm" in spec: + env.set("TF_NEED_ROCM", "1") + env.set("TF_HIPBLASLT", "0") + env.set("MIOPEN_PATH", spec["miopen-hip"].prefix) + env.set("ROCTRACER_PATH", spec["roctracer-dev"].prefix) + env.set("LLVM_PATH", spec["llvm-amdgpu"].prefix) + for pkg_dep in rocm_dependencies: + pkg_dep_cap = pkg_dep.upper().replace("-", "_") + env.set(f"{pkg_dep_cap}_PATH", spec[pkg_dep].prefix) + env.set("TF_ROCM_AMDGPU_TARGETS", ",".join(self.spec.variants["amdgpu_target"].value)) + else: + env.set("TF_NEED_ROCM", "0") + + # Do you wish to build TensorFlow with CUDA support? + if "+cuda" in spec: + env.set("TF_NEED_CUDA", "1") + env.set("CUDA_NVCC", "1") + + # Do you want to use clang as CUDA compiler? + env.set("TF_CUDA_CLANG", "0") + + # Please specify which gcc nvcc should use as the host compiler + env.set("GCC_HOST_COMPILER_PATH", spack_cc) + + cuda_paths = [spec["cuda"].prefix, spec["cudnn"].prefix] + + # Do you wish to build TensorFlow with TensorRT support? + if "+tensorrt" in spec: + env.set("TF_NEED_TENSORRT", "1") + + cuda_paths.append(spec["tensorrt"].prefix) + + # Please specify the TensorRT version you want to use + env.set("TF_TENSORRT_VERSION", spec["tensorrt"].version.up_to(1)) + + # Please specify the location where TensorRT is installed + env.set("TENSORRT_INSTALL_PATH", spec["tensorrt"].prefix) + else: + env.set("TF_NEED_TENSORRT", "0") + env.unset("TF_TENSORRT_VERSION") + + # Please specify the CUDA SDK version you want to use + env.set("TF_CUDA_VERSION", spec["cuda"].version.up_to(2)) + + # Please specify the cuDNN version you want to use + env.set("TF_CUDNN_VERSION", spec["cudnn"].version.up_to(1)) + + if "+nccl" in spec: + cuda_paths.append(spec["nccl"].prefix) + + # Please specify the locally installed NCCL version to use + env.set("TF_NCCL_VERSION", spec["nccl"].version.up_to(1)) + + # Please specify the location where NCCL is installed + env.set("NCCL_INSTALL_PATH", spec["nccl"].prefix) + env.set("NCCL_HDR_PATH", spec["nccl"].prefix.include) + else: + env.unset("TF_NCCL_VERSION") + + # Please specify the comma-separated list of base paths to + # look for CUDA libraries and headers + env.set("TF_CUDA_PATHS", ",".join(cuda_paths)) + + # Please specify the location where CUDA toolkit is installed + env.set("CUDA_TOOLKIT_PATH", spec["cuda"].prefix) + + # Please specify the location where CUDNN library is installed + env.set("CUDNN_INSTALL_PATH", spec["cudnn"].prefix) + + # Please specify a list of comma-separated CUDA compute + # capabilities you want to build with. You can find the compute + # capability of your device at: + # https://developer.nvidia.com/cuda-gpus. + # Please note that each additional compute capability significantly + # increases your build time and binary size, and that TensorFlow + # only supports compute capabilities >= 3.5 + capabilities = CudaPackage.compute_capabilities(spec.variants["cuda_arch"].value) + env.set("TF_CUDA_COMPUTE_CAPABILITIES", ",".join(capabilities)) + env.set("HERMETIC_CUDA_COMPUTE_CAPABILITIES", ",".join(capabilities)) + else: + env.set("TF_NEED_CUDA", "0") + + # Do you want to use Clang to build TensorFlow? + if "%clang" in spec: + env.set("TF_NEED_CLANG", "1") + env.set("CLANG_COMPILER_PATH", spack_cc) + else: + env.set("TF_NEED_CLANG", "0") + + # Do you wish to download a fresh release of clang? (Experimental) + env.set("TF_DOWNLOAD_CLANG", "0") + + # Do you wish to build TensorFlow with MPI support? + if "+mpi" in spec: + env.set("TF_NEED_MPI", "1") + + # Please specify the MPI toolkit folder + env.set("MPI_HOME", spec["mpi"].prefix) + else: + env.set("TF_NEED_MPI", "0") + env.unset("MPI_HOME") + + # Please specify optimization flags to use during compilation when + # bazel option '--config=opt' is specified + env.set("CC_OPT_FLAGS", optimization_flags(self.compiler, spec.target)) + + # Would you like to interactively configure ./WORKSPACE for + # Android builds? + if "+android" in spec: + env.set("TF_SET_ANDROID_WORKSPACE", "1") + + # Please specify the home path of the Android NDK to use + env.set("ANDROID_NDK_HOME", spec["android-ndk"].prefix) + env.set("ANDROID_NDK_API_LEVEL", spec["android-ndk"].version) + + # Please specify the home path of the Android SDK to use + env.set("ANDROID_SDK_HOME", spec["android-sdk"].prefix) + env.set("ANDROID_SDK_API_LEVEL", spec["android-sdk"].version) + + # Please specify the Android SDK API level to use + env.set("ANDROID_API_LEVEL", spec["android-sdk"].version) + + # Please specify an Android build tools version to use + env.set("ANDROID_BUILD_TOOLS_VERSION", spec["android-sdk"].version) + else: + env.set("TF_SET_ANDROID_WORKSPACE", "0") + + # Do you wish to build TensorFlow with iOS support? + if "+ios" in spec: + env.set("TF_CONFIGURE_IOS", "1") + else: + env.set("TF_CONFIGURE_IOS", "0") + + # set tmpdir to a non-NFS filesystem + # (because bazel uses ~/.cache/bazel) + # TODO: This should be checked for non-nfsy filesystem, but the current + # best idea for it is to check + # subprocess.call([ + # 'stat', '--file-system', '--format=%T', tmp_path + # ]) + # to not be nfs. This is only valid for Linux and we'd like to + # stay at least also OSX compatible + tmp_path = tempfile.mkdtemp(prefix="spack") + env.set("TEST_TMPDIR", tmp_path) + + def configure(self, spec, prefix): + # NOTE: configure script is interactive. If you set the appropriate + # environment variables, this interactivity is skipped. If you don't, + # Spack hangs during the configure phase. Use `spack build-env` to + # determine which environment variables must be set for a particular + # version. + configure() + + @run_after("configure") + def post_configure_fixes(self): + spec = self.spec + + if spec.satisfies("@2.17:") and ("patchelf" in spec): + filter_file( + "patchelf", + spec["patchelf"].prefix.bin.patchelf, + "tensorflow/tools/pip_package/build_pip_package.py", + string=True, + ) + + # make sure xla is actually turned off + if spec.satisfies("~xla"): + filter_file( + r"--define with_xla_support=true", + r"--define with_xla_support=false", + ".tf_configure.bazelrc", + ) + + if spec.satisfies("~android"): + # env variable is somehow ignored -> brute force + # TODO: find a better solution + filter_file(r"if workspace_has_any_android_rule\(\)", r"if True", "configure.py") + + if spec.satisfies("~gcp"): + # google cloud support seems to be installed on default, leading + # to boringssl error manually set the flag to false to avoid + # installing gcp support + # https://github.com/tensorflow/tensorflow/issues/20677#issuecomment-404634519 + filter_file( + r"--define with_gcp_support=true", + r"--define with_gcp_support=false", + ".tf_configure.bazelrc", + ) + + if spec.satisfies("~opencl"): + # 1.8.0 and 1.9.0 aborts with numpy import error during python_api + # generation somehow the wrong PYTHONPATH is used... + # set --distinct_host_configuration=false as a workaround + # https://github.com/tensorflow/tensorflow/issues/22395#issuecomment-431229451 + with open(".tf_configure.bazelrc", mode="a") as f: + f.write("build --distinct_host_configuration=false\n") + f.write('build --action_env PYTHONPATH="{0}"\n'.format(env["PYTHONPATH"])) + + if spec.satisfies("+cuda"): + libs = spec["cuda"].libs.directories + libs.extend(spec["cudnn"].libs.directories) + if "+nccl" in spec: + libs.extend(spec["nccl"].libs.directories) + + if "+tensorrt" in spec: + libs.extend(spec["tensorrt"].libs.directories) + slibs = ":".join(libs) + + with open(".tf_configure.bazelrc", mode="a") as f: + f.write('build --action_env LD_LIBRARY_PATH="' + slibs + '"') + + if spec.satisfies("+rocm"): + before = r"/usr/lib/llvm-\d+/bin/clang" + after = spec["llvm-amdgpu"].prefix.bin.clang + filter_file(before, after, ".bazelrc") + + filter_file("build:opt --copt=-march=native", "", ".tf_configure.bazelrc") + filter_file("build:opt --host_copt=-march=native", "", ".tf_configure.bazelrc") + + def build(self, spec, prefix): + # Bazel needs the directory to exist on install + mkdirp(python_platlib) + tmp_path = env["TEST_TMPDIR"] + + # https://docs.bazel.build/versions/master/command-line-reference.html + args = [ + # Don't allow user or system .bazelrc to override build settings + "--nohome_rc", + "--nosystem_rc", + # Bazel does not work properly on NFS, switch to /tmp + "--output_user_root=" + tmp_path, + "build", + # Spack logs don't handle colored output well + "--color=no", + "--jobs={0}".format(make_jobs), + "--config=opt", + # Enable verbose output for failures + "--verbose_failures", + ] + + if spec.satisfies("^bazel@:3.5"): + # removed in bazel 3.6 + args.append("--incompatible_no_support_tools_in_action_inputs=false") + + # See .bazelrc for when each config flag is supported + if "+mkl" in spec: + args.append("--config=mkl") + + if "+monolithic" in spec: + args.append("--config=monolithic") + + if "+gdr" in spec: + args.append("--config=gdr") + + if "+verbs" in spec: + args.append("--config=verbs") + + if "+ngraph" in spec: + args.append("--config=ngraph") + + if "+dynamic_kernels" in spec: + args.append("--config=dynamic_kernels") + + if "+cuda" in spec: + args.append("--config=cuda") + if spec.satisfies("@2.18:"): + args.append("--config=cuda_wheel") + + if "+rocm" in spec: + args.append("--config=rocm") + + if "~aws" in spec: + args.append("--config=noaws") + + if "~gcp" in spec: + args.append("--config=nogcp") + + if "~hdfs" in spec: + args.append("--config=nohdfs") + + if "~nccl" in spec: + args.append("--config=nonccl") + + # https://github.com/tensorflow/tensorflow/issues/63080 + if self.spec.satisfies("@2.14:"): + args.append(f"--define=with_numa_support={'+numa' in spec}") + else: + if "+numa" in spec: + args.append("--config=numa") + + args.append("--config=v2") + + if self.spec.satisfies("@2.18.0-rocm-enhanced: +rocm"): + buildpath = join_path( + self.stage.source_path, "bazel-bin/tensorflow/tools/pip_package/wheel_house/" + ) + args.append(f"--repo_env=OUTPUT_PATH={buildpath}") + # https://github.com/tensorflow/tensorflow/issues/63298 + if self.spec.satisfies("@2.17:"): + args.append("//tensorflow/tools/pip_package:wheel") + else: + args.append("//tensorflow/tools/pip_package:build_pip_package") + + bazel(*args) + + if self.spec.satisfies("@:2.16"): + build_pip_package = Executable( + "bazel-bin/tensorflow/tools/pip_package/build_pip_package" + ) + buildpath = join_path(self.stage.source_path, "spack-build") + build_pip_package("--src", buildpath) + + def install(self, spec, prefix): + tmp_path = env["TEST_TMPDIR"] + if self.spec.satisfies("@2.17:"): + buildpath = join_path( + self.stage.source_path, "bazel-bin/tensorflow/tools/pip_package/wheel_house/" + ) + with working_dir(buildpath): + wheel = glob.glob("*.whl")[0] + pip(*PythonPipBuilder.std_args(self), f"--prefix={self.prefix}", wheel) + else: + buildpath = join_path(self.stage.source_path, "spack-build") + with working_dir(buildpath): + pip(*PythonPipBuilder.std_args(self), f"--prefix={self.prefix}", ".") + + remove_linked_tree(tmp_path) diff --git a/packages/py-tensorflow/set_jit_true.patch b/packages/py-tensorflow/set_jit_true.patch new file mode 100644 index 00000000..9ec77599 --- /dev/null +++ b/packages/py-tensorflow/set_jit_true.patch @@ -0,0 +1,22 @@ +diff --git a/tensorflow/core/kernels/mlir_generated/build_defs.bzl b/tensorflow/core/kernels/mlir_generated/build_defs.bzl +index f574a8da8fd..fc1fbf68bf8 100644 +--- a/tensorflow/core/kernels/mlir_generated/build_defs.bzl ++++ b/tensorflow/core/kernels/mlir_generated/build_defs.bzl +@@ -360,7 +360,7 @@ def _gen_kernel_library( + extra_args = extra_args, + host_triple = host_triple, + gpu_archs = gpu_archs, +- jit = jit, ++ jit = True, + mlir_op = "{op}_{name}_{platform}_{type}_{output_type}.mlir".format( + op = op, + name = name, +@@ -370,7 +370,7 @@ def _gen_kernel_library( + ), + tile_size = typed_tile_size, + unroll_factors = typed_unroll_factors, +- jit_i64_indexed_for_large_tensors = jit_i64_indexed_for_large_tensors, ++ jit_i64_indexed_for_large_tensors = False, + ) + + # We have to use a sh_test instead of build_test because it doesn't properly find the dependent targets. -- GitLab From added67184d45a1e9898e4380ddddfc93ddd4211 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eric=20M=C3=BCller?= <mueller@kip.uni-heidelberg.de> Date: Fri, 14 Mar 2025 16:15:48 +0100 Subject: [PATCH 06/11] fix(py-tensorflow): deps for 2.18.0 --- packages/py-tensorflow/package.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/packages/py-tensorflow/package.py b/packages/py-tensorflow/package.py index 5e4207fb..436a1ea5 100644 --- a/packages/py-tensorflow/package.py +++ b/packages/py-tensorflow/package.py @@ -375,6 +375,7 @@ class PyTensorflow(Package, CudaPackage, ROCmPackage, PythonExtension): # depends_on('trisycl', when='+opencl~computepp') with when("+cuda"): # https://www.tensorflow.org/install/source#gpu + depends_on("cuda@12.5:", when="@2.18:") depends_on("cuda@12.3:", when="@2.16:") depends_on("cuda@12.2:", when="@2.15:") depends_on("cuda@11.8:", when="@2.12:") @@ -386,12 +387,13 @@ class PyTensorflow(Package, CudaPackage, ROCmPackage, PythonExtension): depends_on("cuda@:11.4", when="@2.4:2.7") depends_on("cuda@:10.2", when="@:2.3") - depends_on("cudnn@8.9:8", when="@2.15:") - depends_on("cudnn@8.7:8", when="@2.14:") - depends_on("cudnn@8.6:8", when="@2.12:") - depends_on("cudnn@8.1:8", when="@2.5:") - depends_on("cudnn@8.0:8", when="@2.4:") - depends_on("cudnn@7.6:8", when="@2.1:") + depends_on("cudnn@9.3:9", when="@2.18:") + depends_on("cudnn@8.9:8", when="@2.15:2.17") + depends_on("cudnn@8.7:8", when="@2.14:2.16") + depends_on("cudnn@8.6:8", when="@2.12:2.13") + depends_on("cudnn@8.1:8", when="@2.5:2.11") + depends_on("cudnn@8.0:8", when="@2.4") + depends_on("cudnn@7.6:8", when="@2.1:2.3") depends_on("cudnn@:7", when="@:2.2") # depends_on('tensorrt', when='+tensorrt') -- GitLab From 11d3abe84cc5a7c752d93e1aea8c7571ea70182d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eric=20M=C3=BCller?= <mueller@kip.uni-heidelberg.de> Date: Mon, 17 Mar 2025 11:54:02 +0100 Subject: [PATCH 07/11] fix(py-tensorflow): missing conf env vars for @2.18: --- packages/py-tensorflow/package.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/packages/py-tensorflow/package.py b/packages/py-tensorflow/package.py index 436a1ea5..6b01431b 100644 --- a/packages/py-tensorflow/package.py +++ b/packages/py-tensorflow/package.py @@ -728,6 +728,18 @@ class PyTensorflow(Package, CudaPackage, ROCmPackage, PythonExtension): # Please specify the cuDNN version you want to use env.set("TF_CUDNN_VERSION", spec["cudnn"].version.up_to(1)) + # Please specify the hermetic CUDA version you want to use + env.set("HERMETIC_CUDA_VERSION", spec["cuda"].version.up_to(3)) + + # Please specify the hermetic cuDNN version you want to use + env.set("HERMETIC_CUDNN_VERSION", spec["cudnn"].version.up_to(3)) + + # Please specify the local CUDA path you want to use + env.set("LOCAL_CUDA_PATH", spec["cuda"].prefix) + + # Please specify the local CUDNN path you want to use + env.set("LOCAL_CUDNN_PATH", spec["cudnn"].prefix) + if "+nccl" in spec: cuda_paths.append(spec["nccl"].prefix) @@ -737,6 +749,9 @@ class PyTensorflow(Package, CudaPackage, ROCmPackage, PythonExtension): # Please specify the location where NCCL is installed env.set("NCCL_INSTALL_PATH", spec["nccl"].prefix) env.set("NCCL_HDR_PATH", spec["nccl"].prefix.include) + + # Please specify the local NCCL path you want to use + env.set("LOCAL_NCCL_PATH", spec["nccl"].prefix) else: env.unset("TF_NCCL_VERSION") -- GitLab From 71fcc9f9a0b739fc8c61a10c341253d681a124ed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eric=20M=C3=BCller?= <mueller@kip.uni-heidelberg.de> Date: Mon, 17 Mar 2025 12:45:08 +0100 Subject: [PATCH 08/11] fix(py-tensorflow): more fixes (@2.18.0) * disable XNN's avxvnniint8 on gcc before 13 * some data structure padding issue --- packages/py-tensorflow/package.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/packages/py-tensorflow/package.py b/packages/py-tensorflow/package.py index 6b01431b..5b858b1e 100644 --- a/packages/py-tensorflow/package.py +++ b/packages/py-tensorflow/package.py @@ -560,6 +560,14 @@ class PyTensorflow(Package, CudaPackage, ROCmPackage, PythonExtension): patch("set_jit_true.patch", when="@2.18.0-rocm-enhanced +rocm") phases = ["configure", "build", "install"] + # see https://github.com/tensorflow/tensorflow/pull/74423 + # ("Fix ResolvePadding to be consistent in the size of the small vector") + patch( + "https://github.com/tensorflow/tensorflow/commit/64b7390208319b239d0fedb146b2969fbdbf0ee6.patch?full_index=1", + sha256="2eac2f361ee71c26d129e281990e6a84114c3898dda6178f4510e715e9dbcc95", + when="@2.18.0 %gcc", + ) + def flag_handler(self, name, flags): spec = self.spec # ubuntu gcc has this workaround turned on by default in aarch64 @@ -915,6 +923,11 @@ class PyTensorflow(Package, CudaPackage, ROCmPackage, PythonExtension): filter_file("build:opt --copt=-march=native", "", ".tf_configure.bazelrc") filter_file("build:opt --host_copt=-march=native", "", ".tf_configure.bazelrc") + # option -mavxvnniint8 (via @2.18's xnnpack) only supported from gcc@13: + if spec.satisfies("@2.18.0: %gcc@:12"): + with open(".bazelrc", mode="a") as f: + f.write("build --define=xnn_enable_avxvnniint8=false\n") + def build(self, spec, prefix): # Bazel needs the directory to exist on install mkdirp(python_platlib) -- GitLab From 35ccead65bb33468d442613b1e08653d02c7aa8a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eric=20M=C3=BCller?= <mueller@kip.uni-heidelberg.de> Date: Mon, 17 Mar 2025 13:27:46 +0100 Subject: [PATCH 09/11] fix(py-tensorflow): build w/ modern cuda --- .../cub_ThreadLoadVolatilePointer.patch | 34 +++++++++++++++++++ packages/py-tensorflow/package.py | 3 ++ 2 files changed, 37 insertions(+) create mode 100644 packages/py-tensorflow/cub_ThreadLoadVolatilePointer.patch diff --git a/packages/py-tensorflow/cub_ThreadLoadVolatilePointer.patch b/packages/py-tensorflow/cub_ThreadLoadVolatilePointer.patch new file mode 100644 index 00000000..8ba95826 --- /dev/null +++ b/packages/py-tensorflow/cub_ThreadLoadVolatilePointer.patch @@ -0,0 +1,34 @@ +diff --git a/tensorflow/core/kernels/gpu_prim.h b/tensorflow/core/kernels/gpu_prim.h +index bef22b5..94fd951 100644 +--- a/tensorflow/core/kernels/gpu_prim.h ++++ b/tensorflow/core/kernels/gpu_prim.h +@@ -44,10 +44,10 @@ __device__ __forceinline__ void ThreadStoreVolatilePtr<Eigen::half>( + Eigen::numext::bit_cast<uint16_t>(val); + } +- +-template <> +-__device__ __forceinline__ Eigen::half ThreadLoadVolatilePointer<Eigen::half>( +- Eigen::half *ptr, Int2Type<true> /*is_primitive*/) { +- uint16_t result = *reinterpret_cast<volatile uint16_t *>(ptr); ++ ++__device__ __forceinline__ Eigen::half ThreadLoadVolatilePointer( ++ const Eigen::half *ptr, Int2Type<true> /*is_primitive*/) { ++ uint16_t result = *reinterpret_cast<volatile const uint16_t *>(ptr); + return Eigen::numext::bit_cast<Eigen::half>(result); + } +- +@@ -59,11 +59,9 @@ __device__ __forceinline__ void ThreadStoreVolatilePtr<Eigen::bfloat16>( + Eigen::numext::bit_cast<uint16_t>(val); + } +- +-template <> +-__device__ __forceinline__ Eigen::bfloat16 +-ThreadLoadVolatilePointer<Eigen::bfloat16>(Eigen::bfloat16 *ptr, +- Int2Type<true> /*is_primitive*/) { +- uint16_t result = *reinterpret_cast<volatile uint16_t *>(ptr); ++__device__ __forceinline__ Eigen::bfloat16 ThreadLoadVolatilePointer( ++ const Eigen::bfloat16 *ptr, Int2Type<true> /*is_primitive*/) { ++ uint16_t result = *reinterpret_cast<volatile const uint16_t *>(ptr); + return Eigen::numext::bit_cast<Eigen::bfloat16>(result); + } +- diff --git a/packages/py-tensorflow/package.py b/packages/py-tensorflow/package.py index 5b858b1e..79309899 100644 --- a/packages/py-tensorflow/package.py +++ b/packages/py-tensorflow/package.py @@ -568,6 +568,9 @@ class PyTensorflow(Package, CudaPackage, ROCmPackage, PythonExtension): when="@2.18.0 %gcc", ) + # adapted from https://github.com/tensorflow/tensorflow/commit/5467ee993e1d3e4709c1e99f3a15a978325ae536 + patch("cub_ThreadLoadVolatilePointer.patch", when="@2.18.0 ^cuda@12.8") + def flag_handler(self, name, flags): spec = self.spec # ubuntu gcc has this workaround turned on by default in aarch64 -- GitLab From 2116c57c9ee7decc868bd43af863f0d510663ec9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eric=20M=C3=BCller?= <mueller@kip.uni-heidelberg.de> Date: Mon, 17 Mar 2025 16:32:25 +0100 Subject: [PATCH 10/11] fix(py-jaxlib): there might be multiple whl to install --- packages/py-jaxlib/package.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/py-jaxlib/package.py b/packages/py-jaxlib/package.py index 6db1dd80..b2240979 100644 --- a/packages/py-jaxlib/package.py +++ b/packages/py-jaxlib/package.py @@ -234,5 +234,5 @@ class PyJaxlib(PythonPackage, CudaPackage, ROCmPackage): ) python(*args) - whl = glob.glob(join_path("dist", "*.whl"))[0] - pip(*PythonPipBuilder.std_args(self), f"--prefix={self.prefix}", whl) + for whl in glob.glob(join_path("dist", "*.whl")): + pip(*PythonPipBuilder.std_args(self), f"--prefix={self.prefix}", whl) -- GitLab From 46516cfbb41a19a921e3cfd0b19618709cd42ab0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eric=20M=C3=BCller?= <mueller@kip.uni-heidelberg.de> Date: Mon, 31 Mar 2025 15:25:42 +0200 Subject: [PATCH 11/11] fix(jaxsnn): tests when using modern jax --- packages/jaxsnn/newjax.patch | 41 ++++++++++++++++++++++++++++++++++++ packages/jaxsnn/package.py | 1 + 2 files changed, 42 insertions(+) create mode 100644 packages/jaxsnn/newjax.patch diff --git a/packages/jaxsnn/newjax.patch b/packages/jaxsnn/newjax.patch new file mode 100644 index 00000000..96b2d170 --- /dev/null +++ b/packages/jaxsnn/newjax.patch @@ -0,0 +1,41 @@ +From d12ff24ccb39f861067661b01973862e83552baf Mon Sep 17 00:00:00 2001 +From: Elias Arnold <elias.arnold@kip.uni-heidelberg.de> +Date: Mon, 31 Mar 2025 15:13:03 +0200 +Subject: [PATCH] fix: tests for new jax + +Change-Id: I278454c7a51c0c15071a7ab8496a9655c52ff495 +--- + +diff --git a/tests/sw/event/hardware/utils_test.py b/tests/sw/event/hardware/utils_test.py +index ab73452..0753947 100644 +--- a/tests/sw/event/hardware/utils_test.py ++++ b/tests/sw/event/hardware/utils_test.py +@@ -15,12 +15,12 @@ + rng = random.PRNGKey(42) + with_noise = add_noise_batch(spikes, rng, std=1) + assert_array_equal( +- with_noise.idx, np.array([[0, 1, 2, 5, 3, 4, 6, 7, 8, 9]]) ++ with_noise.idx, np.array([[0, 1, 2, 3, 4, 6, 5, 7, 8, 9]]) + ) + + with_noise = add_noise_batch(spikes, rng, std=3) + assert_array_equal( +- with_noise.idx, np.array([[2, 1, 0, 5, 6, 7, 3, 4, 8, 9]]) ++ with_noise.idx, np.array([[0, 6, 1, 2, 3, 4, 5, 7, 8, 9]]) + ) + + def test_sort_batch(self): +diff --git a/tests/sw/event/tasks/constant_test.py b/tests/sw/event/tasks/constant_test.py +index be82deb..a7906af 100644 +--- a/tests/sw/event/tasks/constant_test.py ++++ b/tests/sw/event/tasks/constant_test.py +@@ -52,7 +52,7 @@ + ) + + # init weights +- rng = random.PRNGKey(42) ++ rng = random.PRNGKey(45) + weights = init_fn(rng, input_shape) + + loss_fn = partial( + diff --git a/packages/jaxsnn/package.py b/packages/jaxsnn/package.py index 8cca2481..25593b9d 100644 --- a/packages/jaxsnn/package.py +++ b/packages/jaxsnn/package.py @@ -46,6 +46,7 @@ class Jaxsnn(build_brainscales.BuildBrainscales): extends('python') patch("include-SparseTensorUtils.patch", when="@:8.0-a5") + patch("newjax.patch", when="@:10.0-a1 ^py-jax@0.5:") def install_test(self): with working_dir('spack-test', create=True): -- GitLab