diff --git a/.bazelignore b/.bazelignore index 3bf32c9d..06d0e0f9 100644 --- a/.bazelignore +++ b/.bazelignore @@ -1,3 +1,4 @@ # nested modules docs/ e2e/ +examples/ diff --git a/MODULE.bazel b/MODULE.bazel index 76a9ba4c..e89ff8e9 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -1,96 +1,21 @@ -"Bazel dependencies" - module( name = "rules_distroless", compatibility_level = 1, ) -bazel_dep(name = "platforms", version = "0.0.10") +bazel_dep(name = "aspect_bazel_lib", version = "2.14.0") bazel_dep(name = "bazel_features", version = "1.20.0") bazel_dep(name = "bazel_skylib", version = "1.5.0") -bazel_dep(name = "aspect_bazel_lib", version = "2.14.0") +bazel_dep(name = "platforms", version = "0.0.10") +bazel_dep(name = "rules_cc", version = "0.2.8") bazel_dep(name = "rules_java", version = "8.8.0") bazel_dep(name = "rules_shell", version = "0.4.1") bazel_lib_toolchains = use_extension("@aspect_bazel_lib//lib:extensions.bzl", "toolchains") use_repo(bazel_lib_toolchains, "zstd_toolchains") use_repo(bazel_lib_toolchains, "bsd_tar_toolchains") -use_repo(bazel_lib_toolchains, "yq_darwin_amd64") -use_repo(bazel_lib_toolchains, "yq_darwin_arm64") -use_repo(bazel_lib_toolchains, "yq_linux_amd64") -use_repo(bazel_lib_toolchains, "yq_linux_arm64") -use_repo(bazel_lib_toolchains, "yq_linux_ppc64le") -use_repo(bazel_lib_toolchains, "yq_linux_s390x") -use_repo(bazel_lib_toolchains, "yq_windows_amd64") # Dev dependencies bazel_dep(name = "gazelle", version = "0.34.0", dev_dependency = True, repo_name = "bazel_gazelle") bazel_dep(name = "bazel_skylib_gazelle_plugin", version = "1.5.0", dev_dependency = True) bazel_dep(name = "buildifier_prebuilt", version = "8.0.1", dev_dependency = True) -bazel_dep(name = "rules_oci", version = "2.0.0", dev_dependency = True) -bazel_dep(name = "container_structure_test", version = "1.16.0", dev_dependency = True) - -http_archive = use_repo_rule("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") - -http_archive( - name = "example-bullseye-ca-certificates", - build_file_content = 'exports_files(["data.tar.xz", "control.tar.xz"])', - sha256 = "b2d488ad4d8d8adb3ba319fc9cb2cf9909fc42cb82ad239a26c570a2e749c389", - urls = ["https://snapshot.debian.org/archive/debian/20231106T210201Z/pool/main/c/ca-certificates/ca-certificates_20210119_all.deb"], -) - -http_archive( - name = "example-bullseye-libc-bin", - build_file_content = 'exports_files(["data.tar.xz"])', - sha256 = "8b048ab5c7e9f5b7444655541230e689631fd9855c384e8c4a802586d9bbc65a", - urls = ["https://snapshot.debian.org/archive/debian-security/20231106T230332Z/pool/updates/main/g/glibc/libc-bin_2.31-13+deb11u7_amd64.deb"], -) - -http_archive( - name = "example-bookworm-libc-bin", - build_file_content = 'exports_files(["data.tar.xz"])', - sha256 = "38c44247c5b3e864d6db2877edd9c9a0555fc4e23ae271b73d7f527802616df5", - urls = ["https://snapshot.debian.org/archive/debian-security/20231106T230332Z/pool/updates/main/g/glibc/libc-bin_2.36-9+deb12u3_armhf.deb"], -) - -apt = use_extension( - "@rules_distroless//apt:extensions.bzl", - "apt", - dev_dependency = True, -) -apt.install( - name = "bullseye", - lock = "//examples/debian_snapshot:bullseye.lock.json", - manifest = "//examples/debian_snapshot:bullseye.yaml", -) -apt.install( - name = "bullseye_nolock", - manifest = "//examples/debian_snapshot:bullseye.yaml", - nolock = True, -) -apt.install( - name = "noble", - lock = "//examples/ubuntu_snapshot:noble.lock.json", - manifest = "//examples/ubuntu_snapshot:noble.yaml", -) -apt.install( - name = "resolution_test", - manifest = "apt/tests/resolution/security.yaml", - nolock = True, -) -apt.install( - name = "resolution_test_empty_lock", - lock = "//apt/tests/resolution:empty.lock.json", - manifest = "apt/tests/resolution/security.yaml", -) -apt.install( - name = "arch_all_test", - manifest = "apt/tests/resolution/arch_all.yaml", - nolock = True, -) -apt.install( - name = "clang", - manifest = "apt/tests/resolution/clang.yaml", - nolock = True, -) -use_repo(apt, "arch_all_test", "arch_all_test_resolve", "bullseye", "bullseye_nolock", "clang", "noble", "resolution_test", "resolution_test_empty_lock_resolve", "resolution_test_resolve") diff --git a/apt/BUILD.bazel b/apt/BUILD.bazel index 9e3a6094..7729ac8f 100644 --- a/apt/BUILD.bazel +++ b/apt/BUILD.bazel @@ -1,7 +1,6 @@ load("@bazel_skylib//:bzl_library.bzl", "bzl_library") exports_files([ - "apt.bzl", "extensions.bzl", ]) @@ -15,25 +14,17 @@ bzl_library( ], ) -bzl_library( - name = "apt", - srcs = ["apt.bzl"], - visibility = ["//visibility:public"], - deps = [ - "//apt/private:deb_resolve", - "//apt/private:deb_translate_lock", - ], -) - bzl_library( name = "extensions", srcs = ["extensions.bzl"], visibility = ["//visibility:public"], deps = [ + "//apt/private:apt_deb_repository", + "//apt/private:apt_dep_resolver", "//apt/private:deb_import", - "//apt/private:deb_resolve", - "//apt/private:deb_translate_lock", "//apt/private:lockfile", - "@bazel_features//:features", + "//apt/private:translate_dependency_set", + "//apt/private:util", + "//apt/private:version_constraint", ], ) diff --git a/apt/apt.bzl b/apt/apt.bzl deleted file mode 100644 index 402ecf34..00000000 --- a/apt/apt.bzl +++ /dev/null @@ -1,140 +0,0 @@ -""" -`apt.install` macro - -This documentation provides an overview of the convenience `apt.install` -repository macro to create Debian repositories with packages "installed" in -them and available to use in Bazel. -""" - -load("//apt/private:deb_resolve.bzl", _deb_resolve = "deb_resolve") -load("//apt/private:deb_translate_lock.bzl", _deb_translate_lock = "deb_translate_lock") - -def _apt_install( - name, - manifest, - lock = None, - nolock = False, - package_template = None, - resolve_transitive = True): - """Repository macro to create Debian repositories. - - > [!WARNING] - > THIS IS A LEGACY MACRO. Use it only if you are still using `WORKSPACE`. - > Otherwise please use the [`apt` module extension](apt.md). - - Here's an example to create a Debian repo with `apt.install`: - - ```starlark - # WORKSPACE - - load("@rules_distroless//apt:apt.bzl", "apt") - - apt.install( - name = "bullseye", - # lock = "//examples/apt:bullseye.lock.json", - manifest = "//examples/apt:bullseye.yaml", - ) - - load("@bullseye//:packages.bzl", "bullseye_packages") - bullseye_packages() - ``` - - Note that, for the initial setup (or if we want to run without a lock) the - lockfile attribute can be omitted. All you need is a YAML - [manifest](/examples/debian_snapshot/bullseye.yaml): - ```yaml - version: 1 - - sources: - - channel: bullseye main - url: https://snapshot-cloudflare.debian.org/archive/debian/20240210T223313Z - - archs: - - amd64 - - packages: - - perl - ``` - - `apt.install` will parse the manifest and will fetch and install the - packages for the given architectures in the Bazel repo `@`. - - Each `/` has two targets that match the usual structure of a - Debian package: `data` and `control`. - - You can use the package like so: `@///:`. - - E.g. for the previous example, you could use `@bullseye//perl/amd64:data`. - - ### Lockfiles - - As mentioned, the macro can be used without a lock because the lock will be - generated internally on-demand. However, this comes with the cost of - performing a new package resolution on repository cache misses. - - The lockfile can be generated by running `bazel run @bullseye//:lock`. This - will generate a `.lock.json` file of the same name and in the same path as - the YAML `manifest` file. - - If you explicitly want to run without a lock and avoid the warning messages - set the `nolock` argument to `True`. - - ### Best Practice: use snapshot archive URLs - - While we strongly encourage users to check in the generated lockfile, it's - not always possible because Debian repositories are rolling by default. - Therefore, a lockfile generated today might not work later if the upstream - repository removes or publishes a new version of a package. - - To avoid this problems and increase the reproducibility it's recommended to - avoid using normal Debian mirrors and use snapshot archives instead. - - Snapshot archives provide a way to access Debian package mirrors at a point - in time. Basically, it's a "wayback machine" that allows access to (almost) - all past and current packages based on dates and version numbers. - - Debian has had snapshot archives for [10+ - years](https://lists.debian.org/debian-announce/2010/msg00002.html). Ubuntu - began providing a similar service recently and has packages available since - March 1st 2023. - - To use this services simply use a snapshot URL in the manifest. Here's two - examples showing how to do this for Debian and Ubuntu: - * [/examples/debian_snapshot](/examples/debian_snapshot) - * [/examples/ubuntu_snapshot](/examples/ubuntu_snapshot) - - For more infomation, please check https://snapshot.debian.org and/or - https://snapshot.ubuntu.com. - - Args: - name: name of the repository - manifest: label to a `manifest.yaml` - lock: label to a `lock.json` - nolock: bool, set to True if you explicitly want to run without a lock - and avoid the DEBUG messages. - package_template: (EXPERIMENTAL!) a template file for generated BUILD - files. Available template replacement keys are: - `{target_name}`, `{deps}`, `{urls}`, `{name}`, - `{arch}`, `{sha256}`, `{repo_name}` - resolve_transitive: whether dependencies of dependencies should be - resolved and added to the lockfile. - """ - _deb_resolve( - name = name + "_resolve", - manifest = manifest, - resolve_transitive = resolve_transitive, - ) - - if not lock and not nolock: - # buildifier: disable=print - print("\nNo lockfile was given, please run `bazel run @%s//:lock` to create the lockfile." % name) - - _deb_translate_lock( - name = name, - lock = lock if lock else "@" + name + "_resolve//:lock.json", - package_template = package_template, - ) - -apt = struct( - install = _apt_install, -) diff --git a/apt/extensions.bzl b/apt/extensions.bzl index 7d91e599..8fd64d75 100644 --- a/apt/extensions.bzl +++ b/apt/extensions.bzl @@ -1,78 +1,248 @@ "apt extensions" -load("@bazel_features//:features.bzl", "bazel_features") +load("//apt/private:apt_deb_repository.bzl", "deb_repository") +load("//apt/private:apt_dep_resolver.bzl", "dependency_resolver") load("//apt/private:deb_import.bzl", "deb_import") -load("//apt/private:deb_resolve.bzl", "deb_resolve", "internal_resolve") -load("//apt/private:deb_translate_lock.bzl", "deb_translate_lock") load("//apt/private:lockfile.bzl", "lockfile") +load("//apt/private:translate_dependency_set.bzl", "translate_dependency_set") +load("//apt/private:util.bzl", "util") +load("//apt/private:version_constraint.bzl", "version_constraint") + +# https://wiki.debian.org/SupportedArchitectures +ALL_SUPPORTED_ARCHES = ["armel", "armhf", "arm64", "i386", "amd64", "mips64el", "ppc64el", "x390x"] + +ITERATION_MAX = 2147483646 + +def _parse_source(src): + parts = src.split(" ") + kind = parts.pop(0) + if parts[0].startswith("["): + # skip arch for now. + arch = parts.pop(0) + url = parts.pop(0) + dist = parts.pop(0) + components = parts + return struct( + kind = kind, + url = url, + dist = dist, + components = components, + ) -def _distroless_extension(module_ctx): +def _distroless_extension(mctx): root_direct_deps = [] root_direct_dev_deps = [] reproducible = False - for mod in module_ctx.modules: - for install in mod.tags.install: - lockf = None - if not install.lock: - lockf = internal_resolve( - module_ctx, - "yq", - install.manifest, - install.resolve_transitive, + # as-in-mach 9 + glock = lockfile.merge(mctx, [ + lockfile.from_json(mctx, mctx.read(lock.into)) + for mod in mctx.modules + for lock in mod.tags.lock + ]) + + repo = deb_repository.new(mctx, glock.facts()) + resolver = dependency_resolver.new(repo) + + for mod in mctx.modules: + # TODO: also enfore that every module explicitly lists their sources_list + # otherwise they'll break if the sources_list that the module depends on + # magically disappears. + for sl in mod.tags.sources_list: + uris = [uri.removeprefix("mirror+") for uri in sl.uris] + architectures = sl.architectures + + for suite in sl.suites: + glock.add_source( + suite, + uris = uris, + types = sl.types, + components = sl.components, + architectures = architectures, ) - if not install.nolock: - # buildifier: disable=print - print("\nNo lockfile was given, please run `bazel run @%s//:lock` to create the lockfile." % install.name) - else: - lockf = lockfile.from_json(module_ctx, module_ctx.read(install.lock)) - reproducible = True - - for (package) in lockf.packages(): - package_key = lockfile.make_package_key( - package["name"], - package["version"], - package["arch"], + repo.add_source( + (uris, suite, sl.components, architectures), ) - deb_import( - name = "%s_%s" % (install.name, package_key), - urls = package["urls"], - sha256 = package["sha256"], - mergedusr = install.mergedusr, - ) + # Fetch all sources_list and parse them. + # Unfortunately repository rules have no concept of threads + # so parsing has to happen sequentially + repo.fetch_and_parse() - deb_resolve( - name = install.name + "_resolve", - manifest = install.manifest, - resolve_transitive = install.resolve_transitive, - ) + sources = glock.sources() + dependency_sets = glock.dependency_sets() - deb_translate_lock( - name = install.name, - lock = install.lock, - lock_content = lockf.as_json(), - package_template = install.package_template, + resolution_queue = [] + already_resolved = {} + + for mod in mctx.modules: + for install in mod.tags.install: + for dep_constraint in install.packages: + constraint = version_constraint.parse_dep(dep_constraint) + architectures = constraint["arch"] + if not architectures: + # For cases where architecture for the package is not specified we need + # to first find out which source contains the package. in order to do + # that we first need to resolve the package for amd64 architecture. + # Once the repository is found, then resolve the package for all the + # architectures the repository supports. + (package, warning) = resolver.resolve_package( + name = constraint["name"], + version = constraint["version"], + arch = "amd64", + suites = install.suites, + ) + if warning: + util.warning(mctx, warning) + + # If the package is not found then add the package + # to the resolution_queue to let the resolver handle + # the error messages. + if not package: + resolution_queue.append(( + install.dependency_set, + constraint["name"], + constraint["version"], + "amd64", + install.suites, + )) + continue + + source = sources[package["Dist"]] + architectures = source["architectures"] + + for arch in architectures: + resolution_queue.append(( + install.dependency_set, + constraint["name"], + constraint["version"], + arch, + install.suites, + )) + + for i in range(0, ITERATION_MAX + 1): + if not len(resolution_queue): + break + if i == ITERATION_MAX: + fail("apt.install exhausted, please file a bug") + + (dependency_set_name, name, version, arch, suites) = resolution_queue.pop() + + mctx.report_progress("Resolving %s:%s" % (name, arch)) + + # TODO: Flattening approach of resolving dependencies has to change. + (package, dependencies, unmet_dependencies, warnings) = resolver.resolve_all( + name = name, + version = version, + arch = arch, + include_transitive = True, + suites = suites, + ) + + if not package: + suite_msg = " in suite(s) [%s]" % ", ".join(suites) if suites else "" + fail( + "\n\nUnable to locate package `%s` for %s%s. It may only exist for specific set of architectures or suites. \n" % (name, arch, suite_msg) + + " 1 - Ensure that the package is available for the specified architecture. \n" + + " 2 - Ensure that the specified version of the package is available for the specified architecture. \n" + + " 3 - Ensure that an apt.sources_list is added for the specified architecture.\n" + + " 4 - If using suite constraints, ensure the package exists in the specified suite(s).", ) - if mod.is_root: - if module_ctx.is_dev_dependency(install): - root_direct_dev_deps.append(install.name) - else: - root_direct_deps.append(install.name) + for warning in warnings: + util.warning(mctx, warning) - metadata_kwargs = {} - if bazel_features.external_deps.extension_metadata_has_reproducible: - metadata_kwargs["reproducible"] = reproducible + if len(unmet_dependencies): + util.warning( + mctx, + "Following dependencies could not be resolved for %s: %s" % ( + name, + ",".join([up[0] for up in unmet_dependencies]), + ), + ) - return module_ctx.extension_metadata( - root_module_direct_deps = root_direct_deps, - root_module_direct_dev_deps = root_direct_dev_deps, - **metadata_kwargs - ) + # TODO: + # Ensure following statements are true. + # 1- Package was resolved from a source that module listed explicitly. + # 2- Package resolution was skipped because some other module asked for this package. + # 3- 1) is enforced even if 2) is the case. + glock.add_package(package) + + pkg_short_key = lockfile.short_package_key(package) + + already_resolved[pkg_short_key] = True + + for dep in dependencies: + glock.add_package(dep) + dep_key = lockfile.short_package_key(dep) + if dep_key not in already_resolved: + resolution_queue.append(( + None, + dep["Package"], + ("=", dep["Version"]), + arch, + suites, + )) + glock.add_package_dependency(package, dep) + + # Add it to dependency set + if dependency_set_name: + dependency_set = dependency_sets.setdefault(dependency_set_name, { + "sets": {}, + }) + arch_set = dependency_set["sets"].setdefault(arch, {}) + arch_set[pkg_short_key] = package["Version"] + + # Generate a hub repo for every dependency set + lock_content = glock.as_json() + for depset_name in dependency_sets.keys(): + translate_dependency_set( + name = depset_name, + depset_name = depset_name, + lock_content = lock_content, + ) + + # Generate a repo per package which will be aliased by hub repo. + for (package_key, package) in glock.packages().items(): + filemap = {} + for key in package["depends_on"]: + (suite, name, arch, version) = lockfile.parse_package_key(key) + filemap[name] = repo.filemap( + name = name, + arch = arch, + ) + + deb_import( + name = util.sanitize(package_key), + target_name = util.sanitize(package_key), + urls = [ + uri + "/" + package["filename"] + for uri in sources[package["suite"]]["uris"] + ], + sha256 = package["sha256"], + mergedusr = False, + depends_on = package["depends_on"], + depends_file_map = json.encode(filemap), + package_name = package["name"], + ) + + for mod in mctx.modules: + if not mod.is_root: + continue + + if len(mod.tags.lock) > 1: + fail("There can only be one apt.lock per module.") + elif len(mod.tags.lock) == 1: + lock = mod.tags.lock[0] + lock_tmp = mctx.path("apt.lock.json") + glock.write(lock_tmp) + lockf_wksp = mctx.path(lock.into) + mctx.execute( + ["cp", "-f", lock_tmp, lockf_wksp], + ) -_install_doc = """ +_doc = """ Module extension to create Debian repositories. Create Debian repositories with packages "installed" in them and available @@ -83,33 +253,33 @@ Here's an example how to create a Debian repo: ```starlark apt = use_extension("@rules_distroless//apt:extensions.bzl", "apt") +apt.sources_list( + types = ["deb"], + uris = [ + "https://snapshot.ubuntu.com/ubuntu/20240301T030400Z", + "mirror+https://snapshot.ubuntu.com/ubuntu/20240301T030400Z" + ], + suites = ["noble", "noble-security", "noble-updates"], + components = ["main"], + architectures = ["all"] +) apt.install( - name = "bullseye", - lock = "//examples/apt:bullseye.lock.json", - manifest = "//examples/apt:bullseye.yaml", + # dependency set isolates these installs into their own scope. + dependency_set = "noble", + suites = ["noble", "noble-security", "noble-updates"], + packages = [ + "ncurses-base", + "libncurses6", + "tzdata", + "coreutils:arm64", + "libstdc++6:i386" + ] ) -use_repo(apt, "bullseye") ``` -Note that, for the initial setup (or if we want to run without a lock) the -lockfile attribute can be omitted. All you need is a YAML -[manifest](/examples/debian_snapshot/bullseye.yaml): -```yaml -version: 1 - -sources: - - channel: bullseye main - url: https://snapshot-cloudflare.debian.org/archive/debian/20240210T223313Z - -archs: - - amd64 - -packages: - - perl -``` -`apt.install` will parse the manifest and will fetch and install the packages -for the given architectures in the Bazel repo `@`. +`apt.install` will install generate a package repository for each package and architecture +combination in the form of `@__`. Each `/` has two targets that match the usual structure of a Debian package: `data` and `control`. @@ -159,46 +329,45 @@ For more infomation, please check https://snapshot.debian.org and/or https://snapshot.ubuntu.com. """ +sources_list = tag_class( + attrs = { + "sources": attr.string_list( + # mandatory = True, + ), + "types": attr.string_list(), + "uris": attr.string_list(), + "suites": attr.string_list(), + "components": attr.string_list(), + "architectures": attr.string_list(), + }, +) + install = tag_class( attrs = { - "name": attr.string( - doc = "Name of the generated repository", + "packages": attr.string_list( mandatory = True, + allow_empty = False, ), - "manifest": attr.label( - doc = "The file used to generate the lock file", + "dependency_set": attr.string(), + "suites": attr.string_list(), + "include_transitive": attr.bool(default = True), + }, +) + +lock = tag_class( + attrs = { + "into": attr.label( mandatory = True, ), - "lock": attr.label( - doc = "The lock file to use for the index.", - ), - "nolock": attr.bool( - doc = "If you explicitly want to run without a lock, set it " + - "to `True` to avoid the DEBUG messages.", - default = False, - ), - "package_template": attr.label( - doc = "(EXPERIMENTAL!) a template file for generated BUILD " + - "files.", - ), - "resolve_transitive": attr.bool( - doc = "Whether dependencies of dependencies should be " + - "resolved and added to the lockfile.", - default = True, - ), - "mergedusr": attr.bool( - doc = "Whether packges should be normalized following mergedusr conventions.\n" + - "Turning this on might fix the following error thrown by docker for ambigious paths: `duplicate of paths are supported.` \n" + - "For more context please see https://salsa.debian.org/md/usrmerge/-/raw/master/debian/README.Debian?ref_type=heads", - default = False, - ), }, - doc = _install_doc, ) apt = module_extension( + doc = _doc, implementation = _distroless_extension, tag_classes = { "install": install, + "sources_list": sources_list, + "lock": lock, }, ) diff --git a/apt/private/BUILD.bazel b/apt/private/BUILD.bazel index 3d745bd7..5dcd0b26 100644 --- a/apt/private/BUILD.bazel +++ b/apt/private/BUILD.bazel @@ -21,20 +21,6 @@ bzl_library( deps = ["//distroless/private:tar"], ) -bzl_library( - name = "deb_translate_lock", - srcs = ["deb_translate_lock.bzl"], - visibility = ["//apt:__subpackages__"], - deps = [ - ":lockfile", - ":starlark_codegen_utils", - "@bazel_skylib//lib:new_sets", - "@bazel_tools//tools/build_defs/repo:cache.bzl", - "@bazel_tools//tools/build_defs/repo:http.bzl", - "@bazel_tools//tools/build_defs/repo:utils.bzl", - ], -) - bzl_library( name = "lockfile", srcs = ["lockfile.bzl"], @@ -62,18 +48,6 @@ bzl_library( ], ) -bzl_library( - name = "deb_resolve", - srcs = ["deb_resolve.bzl"], - visibility = ["//apt:__subpackages__"], - deps = [ - ":apt_deb_repository", - ":apt_dep_resolver", - ":lockfile", - "@aspect_bazel_lib//lib:repo_utils", - ], -) - bzl_library( name = "version", srcs = ["version.bzl"], @@ -106,3 +80,16 @@ bzl_library( srcs = ["util.bzl"], visibility = ["//apt:__subpackages__"], ) + +bzl_library( + name = "translate_dependency_set", + srcs = ["translate_dependency_set.bzl"], + visibility = ["//apt:__subpackages__"], + deps = [ + ":lockfile", + ":starlark_codegen_utils", + ":util", + "//apt:defs", + "//distroless:defs", + ], +) diff --git a/apt/private/apt_deb_repository.bzl b/apt/private/apt_deb_repository.bzl index 95657f59..d6b689db 100644 --- a/apt/private/apt_deb_repository.bzl +++ b/apt/private/apt_deb_repository.bzl @@ -12,7 +12,7 @@ def _get_auth(ctx, urls): netrc = read_user_netrc(ctx) return use_netrc(netrc, urls, {}) -def _fetch_package_index(rctx, urls, dist, comp, arch, integrity): +def _fetch_package_index(mctx, urls, dist, comp, arch, integrity): target_triple = "{dist}/{comp}/{arch}".format(dist = dist, comp = comp, arch = arch) # See https://linux.die.net/man/1/xz , https://linux.die.net/man/1/gzip , and https://linux.die.net/man/1/bzip2 @@ -30,7 +30,7 @@ def _fetch_package_index(rctx, urls, dist, comp, arch, integrity): failed_attempts = [] url = None - base_auth = _get_auth(rctx, urls) + base_auth = _get_auth(mctx, urls) for url in urls: download = None for (ext, cmd) in supported_extensions: @@ -39,7 +39,7 @@ def _fetch_package_index(rctx, urls, dist, comp, arch, integrity): auth = {} if url in base_auth: auth = {dist_url: base_auth[url]} - download = rctx.download( + download = mctx.download( url = dist_url, output = output, integrity = integrity, @@ -48,7 +48,7 @@ def _fetch_package_index(rctx, urls, dist, comp, arch, integrity): ) decompress_r = None if download.success: - decompress_r = rctx.execute(cmd + [output]) + decompress_r = mctx.execute(cmd + [output]) if decompress_r.return_code == 0: integrity = download.integrity break @@ -70,14 +70,79 @@ def _fetch_package_index(rctx, urls, dist, comp, arch, integrity): attempt_messages.append("""\n*) Failed '{}'\n\n{}""".format(failed_url, reason)) fail(""" -** Tried to download {} different package indices and all failed. +** Tried to download {} different package indices and all failed. {} """.format(len(failed_attempts), "\n".join(attempt_messages))) return ("{}/Packages".format(target_triple), url, integrity) -def _parse_repository(state, contents, roots): +def _fetch_contents(mctx, urls, dist, comp, arch, integrity): + target_triple = "{dist}/{comp}/{arch}".format(dist = dist, comp = comp, arch = arch) + + # See https://linux.die.net/man/1/xz , https://linux.die.net/man/1/gzip , and https://linux.die.net/man/1/bzip2 + # --keep -> keep the original file (Bazel might be still committing the output to the cache) + # --force -> overwrite the output if it exists + # --decompress -> decompress + # Order of these matter, we want to try the one that is most likely first. + supported_extensions = [ + (".gz", ["gzip", "--decompress", "--keep", "--force"]), + (".xz", ["xz", "--decompress", "--keep", "--force"]), + (".bz2", ["bzip2", "--decompress", "--keep", "--force"]), + ("", ["true"]), + ] + + failed_attempts = [] + + url = None + base_auth = _get_auth(mctx, urls) + for url in urls: + download = None + for (ext, cmd) in supported_extensions: + output = "{}/Contents{}".format(target_triple, ext) + dist_url = "{}/dists/{}/{}/Contents-{}{}".format(url, dist, comp, arch, ext) + auth = {} + if url in base_auth: + auth = {dist_url: base_auth[url]} + download = mctx.download( + url = dist_url, + output = output, + integrity = integrity, + allow_fail = True, + auth = auth, + ) + decompress_r = None + if download.success: + decompress_r = mctx.execute(cmd + [output]) + if decompress_r.return_code == 0: + integrity = download.integrity + break + + failed_attempts.append((dist_url, download, decompress_r)) + + if download.success: + break + + if len(failed_attempts) == len(supported_extensions) * len(urls): + attempt_messages = [] + for (failed_url, download, decompress) in failed_attempts: + reason = "unknown" + if not download.success: + reason = "Download failed. See warning above for details." + elif decompress.return_code != 0: + reason = "Decompression failed with non-zero exit code.\n\n{}\n{}".format(decompress.stderr, decompress.stdout) + + attempt_messages.append("""\n*) Failed '{}'\n\n{}""".format(failed_url, reason)) + + fail(""" +** Tried to download {} different package indices and all failed. + +{} + """.format(len(failed_attempts), "\n".join(attempt_messages))) + + return ("{}/Contents".format(target_triple), url, integrity) + +def _parse_repository(state, contents, roots, dist): last_key = "" pkg = {} for group in contents.split("\n\n"): @@ -106,10 +171,22 @@ def _parse_repository(state, contents, roots): if "Package" not in pkg: fail("Invalid debian package index format. No 'Package' key found in entry: {}".format(pkg)) pkg["Roots"] = roots + pkg["Dist"] = dist _add_package(state, pkg) last_key = "" pkg = {} +def _parse_contents(state, rcontents, arch): + contents = state.filemap.setdefault(arch, {}) + for line in rcontents.splitlines(): + last_empty_char = line.rfind(" ") + first_empty_char = line.find(" ") + filepath = line[:first_empty_char] + pkgs = line[last_empty_char + 1:].split(",") + for pkg in pkgs: + contents.setdefault(pkg[pkg.find("/") + 1:], []).append(filepath) + state.filemap[arch] = contents + def _add_package(state, package): util.set_dict( state.packages, @@ -151,41 +228,106 @@ def _add_package(state, package): (package["Architecture"], virtual["name"]), ) -def _virtual_packages(state, name, arch): - return util.get_dict(state.virtual_packages, [arch, name], []) - -def _package_versions(state, name, arch): - return util.get_dict(state.packages, [arch, name], {}).keys() - -def _package(state, name, version, arch): - return util.get_dict(state.packages, keys = (arch, name, version)) +def _virtual_packages(state, name, arch, suites = None): + all_providers = util.get_dict(state.virtual_packages, [arch, name], []) + if not suites: + return all_providers + return [(pkg, v) for (pkg, v) in all_providers if pkg["Dist"] in suites] + +def _package_versions(state, name, arch, suites = None): + all_packages = util.get_dict(state.packages, [arch, name], {}) + if not suites: + return all_packages.keys() + return [v for v, pkg in all_packages.items() if pkg["Dist"] in suites] + +def _package(state, name, version, arch, suites = None): + if not version: + return None + package = util.get_dict(state.packages, keys = (arch, name, version)) + if not package: + return None + if suites and package["Dist"] not in suites: + return None + return package + +def _filemap(state, name, arch): + if arch not in state.filemap: + return None + all = state.filemap[arch] + if name not in all: + return None + return state.filemap[arch][name] + +def _fetch_and_parse_sources(state): + mctx = state.mctx + facts = state.facts + + # TODO: make parallel + for source in state.sources.values(): + (urls, dist, component, architecture) = source + + # We assume that `url` does not contain a trailing forward slash when passing to + # functions below. If one is present, remove it. Some HTTP servers do not handle + # redirects properly when a path contains "//" + # (ie. https://mymirror.com/ubuntu//dists/noble/stable/... may return a 404 + # on misconfigured HTTP servers) + urls = [url.rstrip("/") for url in urls] + + fact_key = dist + "/" + component + "/" + architecture + "/Packages" + + mctx.report_progress("fetching Package indices: {}/{} for {}".format(dist, component, architecture)) + (output, url, integrity) = _fetch_package_index(mctx, urls, dist, component, architecture, facts.get(fact_key, "")) + + facts[fact_key] = integrity + + mctx.report_progress("parsing Package indices: {}/{} for {}".format(dist, component, architecture)) + _parse_repository(state, mctx.read(output), urls, dist) + + fact_key = dist + "/" + component + "/" + architecture + "/Contents" + + mctx.report_progress("fetching Contents: {}/{} for {}".format(dist, component, architecture)) + (output, url, integrity) = _fetch_contents(mctx, urls, dist, component, architecture, facts.get(fact_key, "")) + + facts[fact_key] = integrity + + mctx.report_progress("parsing Contents: {}/{} for {}".format(dist, component, architecture)) + _parse_contents(state, mctx.read(output), architecture) + +def _add_source_if_not_present(state, source): + (urls, dist, components, architectures) = source + + for arch in architectures: + for comp in components: + keys = [ + "%".join((url, dist, comp, arch)) + for url in urls + ] + found = any([ + key in state.sources + for key in keys + ]) + if found: + continue + for key in keys: + state.sources[key] = (urls, dist, comp, arch) -def _create(rctx, sources, archs): +def _create(mctx, facts): state = struct( + mctx = mctx, + sources = dict(), + filemap = dict(), packages = dict(), virtual_packages = dict(), + facts = facts, ) - for arch in archs: - for (urls, dist, comp) in sources: - # We assume that `url` does not contain a trailing forward slash when passing to - # functions below. If one is present, remove it. Some HTTP servers do not handle - # redirects properly when a path contains "//" - # (ie. https://mymirror.com/ubuntu//dists/noble/stable/... may return a 404 - # on misconfigured HTTP servers) - urls = [url.rstrip("/") for url in urls] - - rctx.report_progress("Fetching package index: {}/{} for {}".format(dist, comp, arch)) - (output, _, _) = _fetch_package_index(rctx, urls, dist, comp, arch, "") - - # TODO: this is expensive to perform. - rctx.report_progress("Parsing package index: {}/{} for {}".format(dist, comp, arch)) - _parse_repository(state, rctx.read(output), urls) - return struct( + add_source = lambda source: _add_source_if_not_present(state, source), + fetch_and_parse = lambda: _fetch_and_parse_sources(state), package_versions = lambda **kwargs: _package_versions(state, **kwargs), virtual_packages = lambda **kwargs: _virtual_packages(state, **kwargs), package = lambda **kwargs: _package(state, **kwargs), + filemap = lambda **kwargs: _filemap(state, **kwargs), ) deb_repository = struct( @@ -207,7 +349,7 @@ def _create_test_only(): package_versions = lambda **kwargs: _package_versions(state, **kwargs), virtual_packages = lambda **kwargs: _virtual_packages(state, **kwargs), package = lambda **kwargs: _package(state, **kwargs), - parse_repository = lambda contents: _parse_repository(state, contents, "http://nowhere"), + parse_repository = lambda contents, dist = "test": _parse_repository(state, contents, "http://nowhere", dist), packages = state.packages, reset = reset, ) diff --git a/apt/private/apt_dep_resolver.bzl b/apt/private/apt_dep_resolver.bzl index 802a706f..9948d4da 100644 --- a/apt/private/apt_dep_resolver.bzl +++ b/apt/private/apt_dep_resolver.bzl @@ -3,9 +3,9 @@ load(":version.bzl", version_lib = "version") load(":version_constraint.bzl", "version_constraint") -def _resolve_package(state, name, version, arch): +def _resolve_package(state, name, version, arch, suites = None): # First check if the constraint is satisfied by a virtual package - virtual_packages = state.repository.virtual_packages(name = name, arch = arch) + virtual_packages = state.repository.virtual_packages(name = name, arch = arch, suites = suites) candidates = [ package @@ -17,10 +17,13 @@ def _resolve_package(state, name, version, arch): ) ] + warning = None + if len(candidates) == 1: - return candidates[0] + return (candidates[0], warning) if len(candidates) > 1: + versions = {} for package in candidates: # Return 'required' packages immediately since it is implicit that # they should exist on a default debian install. @@ -32,19 +35,25 @@ def _resolve_package(state, name, version, arch): # # In the case of required packages, these defaults are not specified. if "Priority" in package and package["Priority"] == "required": - return package + return (package, warning) + versions[package["Version"]] = package + + sortedversions = version_lib.sort(versions.keys(), reverse = True) + + # First element in the versions list is the latest version. + selected_version = sortedversions[0] + return (versions[selected_version], warning) - # Otherwise, we can't disambiguate the virtual package providers so - # choose none and warn. - # buildifier: disable=print - print("\nMultiple candidates for virtual package '{}': {}".format( - name, - [package["Package"] for package in candidates], - )) + # # Otherwise, we can't disambiguate the virtual package providers so + # # choose none and warn. + # warning = "Multiple candidates for virtual package '{}': {}".format( + # name, + # ", ".join([package["Package"] + "" + package["Version"] for package in candidates]), + # ) # Get available versions of the package - versions_by_arch = state.repository.package_versions(name = name, arch = arch) - versions_by_any_arch = state.repository.package_versions(name = name, arch = "all") + versions_by_arch = state.repository.package_versions(name = name, arch = arch, suites = suites) + versions_by_any_arch = state.repository.package_versions(name = name, arch = "all", suites = suites) # Order packages by highest to lowest versions = version_lib.sort(versions_by_arch + versions_by_any_arch, reverse = True) @@ -64,26 +73,30 @@ def _resolve_package(state, name, version, arch): # First element in the versions list is the latest version. selected_version = versions[0] - package = state.repository.package(name = name, version = selected_version, arch = arch) + package = state.repository.package(name = name, version = selected_version, arch = arch, suites = suites) if not package: - package = state.repository.package(name = name, version = selected_version, arch = "all") + package = state.repository.package(name = name, version = selected_version, arch = "all", suites = suites) - return package + return (package, warning) _ITERATION_MAX_ = 2147483646 # For future: unfortunately this function uses a few state variables to track # certain conditions and package dependency groups. # TODO: Try to simplify it in the future. -def _resolve_all(state, name, version, arch, include_transitive = True): - root_package = None +def _resolve_all(state, name, version, arch, include_transitive = True, suites = None): unmet_dependencies = [] + root_package = None dependencies = [] + direct_dependencies = {} # state variables already_recursed = {} dependency_group = [] stack = [(name, version, -1)] + warnings = [] + + path = [] for i in range(0, _ITERATION_MAX_ + 1): if not len(stack): @@ -97,8 +110,13 @@ def _resolve_all(state, name, version, arch, include_transitive = True): if dependency_group_idx > -1 and dependency_group[dependency_group_idx][0]: continue - package = _resolve_package(state, name, version, arch) + path.append(name) + + (package, warning) = _resolve_package(state, name, version, arch, suites = suites) + if warning: + warnings.append(warning) + # Unmet optional dependency encountered # If this package is not found and is part of a dependency group, then just skip it. if not package and dependency_group_idx > -1: continue @@ -120,12 +138,13 @@ def _resolve_all(state, name, version, arch, include_transitive = True): # If we encountered package before in the transitive closure, skip it if key in already_recursed: + # fail(" -> ".join(path)) continue # Do not add dependency if it's a root package to avoid circular dependency. if i != 0 and key != root_package["Package"]: # Add it to the dependencies - already_recursed[key] = True + already_recursed[key] = package["Version"] dependencies.append(package) deps = [] @@ -157,7 +176,7 @@ def _resolve_all(state, name, version, arch, include_transitive = True): if not met: unmet_dependencies.append((dep, None)) - return (root_package, dependencies, unmet_dependencies) + return (root_package, dependencies, unmet_dependencies, warnings) def _create_resolution(repository): state = struct(repository = repository) diff --git a/apt/private/deb_export.bzl b/apt/private/deb_export.bzl new file mode 100644 index 00000000..c9b7d5ce --- /dev/null +++ b/apt/private/deb_export.bzl @@ -0,0 +1,70 @@ +"normalization rules" + +load("@aspect_bazel_lib//lib:tar.bzl", tar = "tar_lib") + +TAR_TOOLCHAIN_TYPE = tar.toolchain_type + +def _deb_export_impl(ctx): + bsdtar = ctx.toolchains[TAR_TOOLCHAIN_TYPE] + + foreign_symlinks = { + symlink: json.decode(indices_json) + for (symlink, indices_json) in ctx.attr.foreign_symlinks.items() + } + + # foreign_symlinks maps label -> index string (reversed for Bazel 7.0.0 compatibility) + for (target, indices_json) in ctx.attr.foreign_symlinks.items(): + indices = json.decode(indices_json) + for i in indices: + ctx.actions.symlink( + output = ctx.outputs.symlink_outs[i], + # grossly inefficient + target_file = target[DefaultInfo].files.to_list()[0], + ) + + if len(ctx.outputs.outs): + fout = ctx.outputs.outs[0] + output = fout.path[:fout.path.find(fout.owner.repo_name) + len(fout.owner.repo_name)] + args = ctx.actions.args() + args.add("-xf") + args.add_all(ctx.files.srcs) + args.add("-C") + args.add(output) + args.add_all( + ctx.outputs.outs, + map_each = lambda src: src.short_path[len(src.owner.repo_name) + 4:], + allow_closure = True, + ) + ctx.actions.run( + executable = bsdtar.tarinfo.binary, + # the archive may contain symlinks that point to symlinks that reference + # files from other packages, therefore symlink_outs must be present in the + # sandbox for Bazel to succesfully track them. + inputs = ctx.files.srcs + ctx.outputs.symlink_outs, + outputs = ctx.outputs.outs, + arguments = [args], + mnemonic = "Unpack", + toolchain = TAR_TOOLCHAIN_TYPE, + ) + + return DefaultInfo( + files = depset( + ctx.outputs.outs + + ctx.outputs.symlink_outs + + ctx.files.foreign_symlinks, + ), + ) + +deb_export = rule( + implementation = _deb_export_impl, + attrs = { + "srcs": attr.label_list(allow_files = True), + # mapping of foreign label -> symlink_outs index (label_keyed for Bazel 7.0 compat) + "foreign_symlinks": attr.label_keyed_string_dict(allow_files = True), + "symlink_outs": attr.output_list(), + "outs": attr.output_list(), + }, + toolchains = [ + TAR_TOOLCHAIN_TYPE, + ], +) diff --git a/apt/private/deb_import.bzl b/apt/private/deb_import.bzl index 2c182103..ad267363 100644 --- a/apt/private/deb_import.bzl +++ b/apt/private/deb_import.bzl @@ -1,17 +1,23 @@ "deb_import" -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") +load(":lockfile.bzl", "lockfile") +load(":pkgconfig.bzl", "pkgconfig") +load(":util.bzl", "util") # BUILD.bazel template _DEB_IMPORT_BUILD_TMPL = ''' load("@rules_distroless//apt/private:deb_postfix.bzl", "deb_postfix") +load("@rules_distroless//apt/private:deb_export.bzl", "deb_export") +load("@rules_distroless//apt/private:so_library.bzl", "so_library") +load("@rules_cc//cc/private/rules_impl:cc_import.bzl", "cc_import") +load("@rules_cc//cc:cc_library.bzl", "cc_library") +load("@bazel_skylib//rules/directory:directory.bzl", "directory") deb_postfix( name = "data", srcs = glob(["data.tar*"]), - outs = ["layer.tar.gz"], - mergedusr = {}, - + outs = ["content.tar.gz"], + mergedusr = {mergedusr}, visibility = ["//visibility:public"], ) @@ -20,10 +26,402 @@ filegroup( srcs = glob(["control.tar.*"]), visibility = ["//visibility:public"], ) + +filegroup( + name = "{target_name}", + srcs = {depends_on} + [":data"], + visibility = ["//visibility:public"], +) + + +deb_export( + name = "export", + srcs = glob(["data.tar*"]), + foreign_symlinks = {foreign_symlinks}, + symlink_outs = {symlink_outs}, + outs = {outs}, + visibility = ["//visibility:public"] +) + +directory( + name = "directory", + srcs = {symlink_outs} + {outs}, + visibility = ["//visibility:public"] +) + +{cc_import_targets} ''' -def deb_import(mergedusr = False, **kwargs): - http_archive( - build_file_content = _DEB_IMPORT_BUILD_TMPL.format(mergedusr), - **kwargs +_CC_LIBRARY_LIBC_TMPL = """ +alias( + name = "{name}_wodeps", + actual = ":{name}", + visibility = ["//visibility:public"] +) + +cc_library( + name = "{name}", + hdrs = {hdrs}, + additional_compiler_inputs = {additional_compiler_inputs}, + additional_linker_inputs = {additional_linker_inputs}, + includes = {includes}, + visibility = ["//visibility:public"], +) +""" + +_CC_IMPORT_TMPL = """ +cc_import( + name = "{name}", + hdrs = {hdrs}, + includes = {includes}, + linkopts = {linkopts}, + shared_library = {shared_lib}, + static_library = {static_lib}, +) +""" + +_CC_LIBRARY_TMPL = """ +cc_library( + name = "{name}_wodeps", + hdrs = {hdrs}, + deps = {direct_deps}, + linkopts = {linkopts}, + additional_compiler_inputs = {additional_compiler_inputs}, + additional_linker_inputs = {additional_linker_inputs}, + strip_include_prefix = {strip_include_prefix}, + visibility = ["//visibility:public"], +) + +cc_library( + name = "{name}", + deps = [":{name}_wodeps"] + {deps}, + visibility = ["//visibility:public"], +) +""" + +def resolve_symlink(target_path, relative_symlink): + # Split paths into components + target_parts = target_path.split("/") + symlink_parts = relative_symlink.split("/") + + # Remove the file name from target path to get the directory + target_dir_parts = target_parts[:-1] + + # Process the relative symlink + result_parts = target_dir_parts[:] + for part in symlink_parts: + if part == "..": + # Move up one directory by removing the last component + if result_parts: + result_parts.pop() + elif part == "." or part == "": + # Ignore current directory or empty components + continue + else: + # Append the component to the path + result_parts.append(part) + + # Join the parts back into a path + resolved_path = "/".join(result_parts) + return resolved_path + +def _discover_contents(rctx, depends_on, depends_file_map, target_name): + result = rctx.execute(["tar", "--exclude='./usr/share/**'", "--exclude='./**/'", "-tvf", "data.tar.xz"]) + contents_raw = result.stdout.splitlines() + + so_files = [] + a_files = [] + h_files = [] + hpp_files = [] + hpp_files_woext = [] + pc_files = [] + o_files = [] + symlinks = {} + + for line in contents_raw: + # Skip directories + if line.endswith("/"): + continue + + line = line[line.find(" ./") + 3:] + + # Skip everything in man pages and examples + if line.startswith("usr/share"): + continue + + is_symlink_idx = line.find(" -> ") + resolved_symlink = None + if is_symlink_idx != -1: + symlink_target = line[is_symlink_idx + 4:] + line = line[:is_symlink_idx] + if line.endswith(".pc"): + continue + + # An absolute symlink + if symlink_target.startswith("/"): + resolved_symlink = symlink_target.removeprefix("/") + else: + resolved_symlink = resolve_symlink(line, symlink_target).removeprefix("./") + + if (line.endswith(".so") or line.find(".so.") != -1) and line.find("lib") != -1: + if line.find("libthread_db") != -1: + continue + so_files.append(line) + elif line.endswith(".a") and line.find("lib"): + a_files.append(line) + elif line.endswith(".pc") and line.find("pkgconfig"): + pc_files.append(line) + elif line.endswith(".h"): + h_files.append(line) + elif line.endswith(".hpp"): + hpp_files.append(line) + elif line.find("include/c++") != -1: + hpp_files_woext.append(line) + elif line.endswith(".o"): + o_files.append(line) + else: + continue + + if resolved_symlink: + symlinks[line] = resolved_symlink + + # Resolve symlinks: + unresolved_symlinks = {} | symlinks + + # TODO: this is highly inefficient, change the filemapping to be + # file -> package instead of package -> files + for dep in depends_on: + (suite, name, arch, _) = lockfile.parse_package_key(dep) + filemap = depends_file_map.get(name, []) or [] + for file in filemap: + if len(unresolved_symlinks) == 0: + break + for (symlink, symlink_target) in unresolved_symlinks.items(): + if file == symlink_target: + unresolved_symlinks.pop(symlink) + symlinks[symlink] = "@%s//:%s" % (util.sanitize(dep), file) + + + # Resolve self symlinks + self_symlinks = {} + for file in so_files + h_files + hpp_files + a_files + hpp_files_woext: + for (symlink, symlink_target) in unresolved_symlinks.items(): + if file == symlink_target: + self_symlinks[symlink] = symlinks.pop(symlink) + unresolved_symlinks.pop(symlink) + if len(unresolved_symlinks) == 0: + break + + if len(unresolved_symlinks): + util.warning( + rctx, + "some symlinks could not be solved for {}. \nresolved: {}\nunresolved:{}".format( + target_name, + json.encode_indent(symlinks), + json.encode_indent(unresolved_symlinks), + ), + ) + + outs = [] + + for out in so_files + h_files + hpp_files + a_files + hpp_files_woext + o_files: + if out not in symlinks: + outs.append(out) + + deps = [] + for dep in depends_on: + (suite, name, arch, version) = lockfile.parse_package_key(dep) + deps.append( + "@%s//:%s_wodeps" % (util.sanitize(dep), name.removesuffix("-dev")), + ) + + pkgconfigs = [] + if len(pc_files): + # TODO: use rctx.extract instead. + rctx.execute( + ["tar", "-xvf", "data.tar.xz"] + ["./" + pc for pc in pc_files], + ) + for pc in pc_files: + if rctx.path(pc).exists: + pkgconfigs.append(pc) + + build_file_content = """ +so_library( + name = "_so_libs", + dynamic_libs = {} +) +""".format(so_files) + + rpaths = {} + for so in so_files + a_files: + rpath = so[:so.rfind("/")] + rpaths[rpath] = None + + # Package has a pkgconfig, use that as the source of truth. + if len(pkgconfigs): + link_paths = [] + includes = [] + + static_lib = None + shared_lib = None + + import_targets = [] + + for pc_file in pkgconfigs: + pkgc = pkgconfig(rctx, pc_file) + includes += pkgc.includes + link_paths += pkgc.link_paths + + if len(pkgc.libnames) == 0: + continue + + for libname in pkgc.libnames: + if libname + "_import" in import_targets: + continue + + subtarget = libname + "_import" + import_targets.append(subtarget) + + # Look for a static archive + # for ar in a_files: + # if ar.endswith(pkgc.libname + ".a"): + # static_lib = '":%s"' % ar + # break + + # Look for a dynamic library + IGNORE = ["libfl"] + for so_lib in so_files: + if libname and libname not in IGNORE and so_lib.endswith(libname + ".so"): + shared_lib = '":%s"' % so_lib + break + + build_file_content += _CC_IMPORT_TMPL.format( + name = subtarget, + shared_lib = shared_lib, + static_lib = static_lib, + hdrs = [], + includes = { + "external/.." + include: True + for include in includes + ["/usr/include", "/usr/include/x86_64-linux-gnu"] + }.keys(), + linkopts = pkgc.linkopts, + ) + + build_file_content += _CC_LIBRARY_TMPL.format( + name = target_name, + hdrs = h_files + hpp_files, + additional_compiler_inputs = hpp_files_woext, + additional_linker_inputs = so_files + o_files, + linkopts = { + opt: True + for opt in [ + # # Needed for cc_test binaries to locate its dependencies. + # "-Wl,-rpath=../{}/{}".format(rctx.attr.name, rpath) + # for rp in rpaths + ] + [ + # Needed for cc_test binaries to locate its dependencies as a build tool + # "-Wl,-rpath=./external/{}/{}".format(rctx.attr.name, rpath) + # for rp in rpaths + ] + [ + "-L$(BINDIR)/external/{}/{}".format(rctx.attr.name, lp) + for lp in link_paths + ] + [ + "-Wl,-rpath=/" + rp + for rp in rpaths + ] + }.keys(), + direct_deps = import_targets + [":_so_libs"], + deps = deps, + strip_include_prefix = None, + ) + + elif (len(hpp_files) or len(h_files)) and ((target_name.find("libc") != -1 or target_name.find("libstdc") != -1 or target_name.find("libgcc") != -1)): + build_file_content += _CC_LIBRARY_LIBC_TMPL.format( + name = target_name, + hdrs = h_files + hpp_files, + additional_compiler_inputs = hpp_files_woext, + additional_linker_inputs = so_files + a_files + o_files, + includes = [], + ) + else: + extra_linkopts = [] + if target_name == "libbsd0": + extra_linkopts = [ + "-Wl,--remap-inputs=/usr/lib/x86_64-linux-gnu/libbsd.so.0.11.7=$(BINDIR)/external/{}/usr/lib/x86_64-linux-gnu/libbsd.so.0.11.7".format(rctx.attr.name), + ] + build_file_content += _CC_LIBRARY_TMPL.format( + name = target_name, + hdrs = h_files + hpp_files, + deps = deps, + additional_compiler_inputs = hpp_files_woext, + additional_linker_inputs = so_files + o_files, + linkopts = [ + # Required for linker to find .so libraries + "-L$(BINDIR)/external/{}/{}".format(rctx.attr.name, rp) + for rp in rpaths + ] + [ + # # Required for bazel test binary to find its dependencies. + # "-Wl,-rpath=../{}/{}".format(rctx.attr.name, rp) + # for rp in rpaths + ] + [ + # Required for ld to validate rpath entries + "-Wl,-rpath-link=$(BINDIR)/external/{}/{}".format(rctx.attr.name, rp) + for rp in rpaths + ] + [ + # Required for containers to find the dependencies at runtime. + "-Wl,-rpath=/" + rp + for rp in rpaths + ] + extra_linkopts, + strip_include_prefix = '"usr/include"', + direct_deps = [":_so_libs"], + ) + + return (build_file_content, outs, symlinks) + +def _deb_import_impl(rctx): + rctx.download_and_extract( + url = rctx.attr.urls, + sha256 = rctx.attr.sha256, + ) + + # TODO: only do this if package is -dev or dependent of a -dev pkg. + cc_import_targets, outs, symlinks = _discover_contents( + rctx, + rctx.attr.depends_on, + json.decode(rctx.attr.depends_file_map), + rctx.attr.package_name.removesuffix("-dev"), ) + + foreign_symlinks = {} + for (i, symlink) in enumerate(symlinks.values()): + if symlink not in foreign_symlinks: + foreign_symlinks[symlink] = [] + foreign_symlinks[symlink].append(i) + + foreign_symlinks = { + symlink: json.encode(indices) + for (symlink, indices) in foreign_symlinks.items() + } + + rctx.file("BUILD.bazel", _DEB_IMPORT_BUILD_TMPL.format( + mergedusr = rctx.attr.mergedusr, + depends_on = ["@" + util.sanitize(dep_key) + "//:data" for dep_key in rctx.attr.depends_on], + target_name = rctx.attr.target_name, + cc_import_targets = cc_import_targets, + outs = outs, + foreign_symlinks = foreign_symlinks, + symlink_outs = symlinks.keys(), + )) + +deb_import = repository_rule( + implementation = _deb_import_impl, + attrs = { + "urls": attr.string_list(mandatory = True, allow_empty = False), + "sha256": attr.string(), + "depends_on": attr.string_list(), + "depends_file_map": attr.string(), + "mergedusr": attr.bool(), + "target_name": attr.string(), + "package_name": attr.string(), + }, +) diff --git a/apt/private/deb_resolve.bzl b/apt/private/deb_resolve.bzl deleted file mode 100644 index fb4030b7..00000000 --- a/apt/private/deb_resolve.bzl +++ /dev/null @@ -1,162 +0,0 @@ -"repository rule for resolving and generating lockfile" - -load("@aspect_bazel_lib//lib:repo_utils.bzl", "repo_utils") -load(":apt_deb_repository.bzl", "deb_repository") -load(":apt_dep_resolver.bzl", "dependency_resolver") -load(":lockfile.bzl", "lockfile") -load(":util.bzl", "util") -load(":version_constraint.bzl", "version_constraint") - -def _parse_manifest(rctx, yq_toolchain_prefix, manifest): - is_windows = repo_utils.is_windows(rctx) - host_yq = Label("@{}_{}//:yq{}".format(yq_toolchain_prefix, repo_utils.platform(rctx), ".exe" if is_windows else "")) - - if hasattr(rctx, "watch"): - rctx.watch(manifest) - - yq_args = [ - str(rctx.path(host_yq)), - str(rctx.path(manifest)), - "-o=json", - ] - result = rctx.execute(yq_args) - if result.return_code: - fail("failed to parse manifest yq. '{}' exited with {}: \nSTDOUT:\n{}\nSTDERR:\n{}".format(" ".join(yq_args), result.return_code, result.stdout, result.stderr)) - - return json.decode(result.stdout if result.stdout != "null" else "{}") - -# This function is shared between BZLMOD and WORKSPACE implementations. -# INTERNAL: DO NOT DEPEND! -# buildifier: disable=function-docstring-args -def internal_resolve(rctx, yq_toolchain_prefix, manifest, include_transitive): - manifest = _parse_manifest(rctx, yq_toolchain_prefix, manifest) - - if manifest["version"] != 1: - fail("Unsupported manifest version, {}. Please use `version: 1` manifest.".format(manifest["version"])) - - if type(manifest["sources"]) != "list": - fail("`sources` should be an array") - - if type(manifest["archs"]) != "list": - fail("`archs` should be an array") - - if type(manifest["packages"]) != "list": - fail("`packages` should be an array") - - sources = [] - - for src in manifest["sources"]: - distr, components = src["channel"].split(" ", 1) - for comp in components.split(" "): - # TODO: only support urls before 1.0 - if "urls" in src: - urls = src["urls"] - elif "url" in src: - urls = [src["url"]] - else: - fail("Source missing 'url' or 'urls' field") - - sources.append(( - urls, - distr, - comp, - )) - - repository = deb_repository.new(rctx, sources = sources, archs = manifest["archs"]) - resolver = dependency_resolver.new(repository) - lockf = lockfile.empty(rctx) - - resolved_count = 0 - - for arch in manifest["archs"]: - resolved_count = 0 - dep_constraint_set = {} - for dep_constraint in manifest["packages"]: - if dep_constraint in dep_constraint_set: - fail("Duplicate package, {}. Please remove it from your manifest".format(dep_constraint)) - dep_constraint_set[dep_constraint] = True - - constraint = version_constraint.parse_depends(dep_constraint).pop() - - rctx.report_progress("Resolving %s for %s" % (dep_constraint, arch)) - (package, dependencies, unmet_dependencies) = resolver.resolve_all( - name = constraint["name"], - version = constraint["version"], - arch = arch, - include_transitive = include_transitive, - ) - - if not package: - fail("Unable to locate package `%s` for architecture: %s. It may only exist for specific set of architectures." % (dep_constraint, arch)) - - if len(unmet_dependencies): - # buildifier: disable=print - util.warning(rctx, "Following dependencies could not be resolved for %s: %s" % (constraint["name"], ",".join([up[0] for up in unmet_dependencies]))) - - lockf.add_package(package, arch) - - resolved_count += len(dependencies) + 1 - - for dep in dependencies: - lockf.add_package(dep, arch) - lockf.add_package_dependency(package, dep, arch) - - rctx.report_progress("Resolved %d packages for %s" % (resolved_count, arch)) - return lockf - -_BUILD_TMPL = """ -load("@rules_shell//shell:sh_binary.bzl", "sh_binary") - -filegroup( - name = "lockfile", - srcs = ["lock.json"], - tags = ["manual"], - visibility = ["//visibility:public"] -) - -sh_binary( - name = "lock", - srcs = ["copy.sh"], - data = ["lock.json"], - tags = ["manual"], - args = ["$(location :lock.json)"], - visibility = ["//visibility:public"] -) -""" - -def _deb_resolve_impl(rctx): - lockf = internal_resolve(rctx, rctx.attr.yq_toolchain_prefix, rctx.attr.manifest, rctx.attr.resolve_transitive) - lockf.write("lock.json") - - lock_filename = rctx.attr.manifest.name.replace(".yaml", ".lock.json") - lock_label = rctx.attr.manifest.relative(lock_filename) - workspace_relative_path = "{}{}".format( - ("%s/" % lock_label.package) if lock_label.package else "", - lock_label.name, - ) - - rctx.file( - "copy.sh", - rctx.read(rctx.attr._copy_sh_tmpl).format( - repo_name = util.get_repo_name(rctx.name).replace("_resolve", ""), - lock_label = lock_label, - workspace_relative_path = workspace_relative_path, - ), - executable = True, - ) - - rctx.file("BUILD.bazel", _BUILD_TMPL) - -deb_resolve = repository_rule( - implementation = _deb_resolve_impl, - attrs = { - "manifest": attr.label(), - "resolve_transitive": attr.bool(default = True), - "yq_toolchain_prefix": attr.string(default = "yq"), - "_copy_sh_tmpl": attr.label( - default = "//apt/private:copy.sh.tmpl", - doc = "INTERNAL, DO NOT USE - " + - "private attribute label to prevent repo restart", - ), - }, -) diff --git a/apt/private/deb_translate_lock.bzl b/apt/private/deb_translate_lock.bzl deleted file mode 100644 index 34ce22cc..00000000 --- a/apt/private/deb_translate_lock.bzl +++ /dev/null @@ -1,240 +0,0 @@ -"repository rule for generating a dependency graph from a lockfile." - -load(":lockfile.bzl", "lockfile") -load(":starlark_codegen_utils.bzl", "starlark_codegen_utils") -load(":util.bzl", "util") - -# header template for packages.bzl file -_DEB_IMPORT_HEADER_TMPL = '''\ -"""Generated by rules_distroless. DO NOT EDIT.""" -load("@rules_distroless//apt/private:deb_import.bzl", "deb_import") - -# buildifier: disable=function-docstring -def {}_packages(): -''' - -# deb_import template for packages.bzl file -_DEB_IMPORT_TMPL = '''\ - deb_import( - name = "{name}", - urls = {urls}, - sha256 = "{sha256}", - ) -''' - -_PACKAGE_TEMPLATE = '''\ -"""Generated by rules_distroless. DO NOT EDIT.""" - -alias( - name = "data", - actual = select({data_targets}), - visibility = ["//visibility:public"], -) - -alias( - name = "control", - actual = select({control_targets}), - visibility = ["//visibility:public"], -) - -filegroup( - name = "{target_name}", - srcs = select({deps}) + [":data"], - visibility = ["//visibility:public"], -) -''' - -_ROOT_BUILD_TMPL = """\ -"Generated by rules_distroless. DO NOT EDIT." - -load("@rules_distroless//apt:defs.bzl", "dpkg_status") -load("@rules_distroless//distroless:defs.bzl", "flatten") - -exports_files(['packages.bzl']) - -# Map Debian architectures to platform CPUs. -# -# For more info on Debian architectures, see: -# * https://wiki.debian.org/SupportedArchitectures -# * https://wiki.debian.org/ArchitectureSpecificsMemo -# * https://www.debian.org/releases/stable/amd64/ch02s01.en.html#idm186 -# -# For more info on Bazel's platforms CPUs see: -# * https://github.com/bazelbuild/platforms/blob/main/cpu/BUILD -_ARCHITECTURE_MAP = {{ - "amd64": "x86_64", - "arm64": "arm64", - "ppc64el": "ppc64le", - "mips64el": "mips64", - "s390x": "s390x", - "i386": "x86_32", - "armhf": "armv7e-mf", - "all": "all", -}} - -_ARCHITECTURES = {architectures} - -[ - config_setting( - name = os + "_" + arch, - constraint_values = [ - "@platforms//os:" + os, - "@platforms//cpu:" + _ARCHITECTURE_MAP[arch], - ], - ) - for os in ["linux"] - for arch in _ARCHITECTURES -] - - -alias( - name = "lock", - actual = "@{target_name}_resolve//:lock", - visibility = ["//visibility:public"], -) - -# List of installed packages. For now it's private. -_PACKAGES = {packages} - -# Creates /var/lib/dpkg/status with installed package information. -dpkg_status( - name = "dpkg_status", - controls = select({{ - "//:linux_%s" % arch: ["//%s:control" % package for package in packages] - for arch, packages in _PACKAGES.items() - }}) if _PACKAGES else {{}}, - visibility = ["//visibility:public"], -) - -filegroup( - name = "packages", - srcs = select({{ - "//:linux_%s" % arch: ["//%s" % package for package in packages] - for arch, packages in _PACKAGES.items() - }}) if _PACKAGES else {{}}, - visibility = ["//visibility:public"], -) - - -# A filegroup that contains all the packages and the dpkg status file. -filegroup( - name = "{target_name}", - srcs = [ - ":dpkg_status", - ":packages", - ], - visibility = ["//visibility:public"], -) - -flatten( - name = "flat", - tars = [ - "{target_name}", - ], - deduplicate = True, - visibility = ["//visibility:public"], -) -""" - -def _deb_translate_lock_impl(rctx): - lock_content = rctx.attr.lock_content - package_template = rctx.read(rctx.attr.package_template) - lockf = lockfile.from_json(rctx, lock_content if lock_content else rctx.read(rctx.attr.lock)) - - package_defs = [] - - if not lock_content: - package_defs = [_DEB_IMPORT_HEADER_TMPL.format(rctx.attr.name)] - - if len(lockf.packages()) < 1: - package_defs.append(" pass") - - # TODO: rework lockfile to include architecure information - architectures = {} - packages = {} - - for (package) in lockf.packages(): - package_key = lockfile.make_package_key( - package["name"], - package["version"], - package["arch"], - ) - - if package["arch"] not in architectures: - architectures[package["arch"]] = [] - - if package["name"] not in architectures[package["arch"]]: - architectures[package["arch"]].append(package["name"]) - - if package["name"] not in packages: - packages[package["name"]] = [] - if package["arch"] not in packages[package["name"]]: - packages[package["name"]].append(package["arch"]) - - if not lock_content: - package_defs.append( - _DEB_IMPORT_TMPL.format( - name = "%s_%s" % (rctx.attr.name, package_key), - package_name = package["name"], - urls = package["urls"], - sha256 = package["sha256"], - ), - ) - - repo_name = "%s%s_%s" % ("@" if lock_content else "", rctx.attr.name, package_key) - - rctx.file( - "%s/%s/BUILD.bazel" % (package["name"], package["arch"]), - package_template.format( - target_name = package["arch"], - data_targets = '"@%s//:data"' % repo_name, - control_targets = '"@%s//:control"' % repo_name, - src = '"@%s//:data"' % repo_name, - deps = starlark_codegen_utils.to_list_attr([ - "//%s/%s" % (dep["name"], package["arch"]) - for dep in package["dependencies"] - ]), - urls = package["urls"], - name = package["name"], - arch = package["arch"], - sha256 = package["sha256"], - repo_name = "%s" % repo_name, - ), - ) - - # TODO: rework lockfile to include architecure information and merge these two loops - for package_name, package_archs in packages.items(): - rctx.file( - "%s/BUILD.bazel" % (package_name), - _PACKAGE_TEMPLATE.format( - target_name = package_name, - data_targets = starlark_codegen_utils.to_dict_attr({ - "//:linux_%s" % arch: "//%s/%s:data" % (package_name, arch) - for arch in package_archs - }), - control_targets = starlark_codegen_utils.to_dict_attr({ - "//:linux_%s" % arch: "//%s/%s:control" % (package_name, arch) - for arch in package_archs - }), - deps = starlark_codegen_utils.to_dict_list_attr({ - "//:linux_%s" % arch: ["//%s/%s" % (package_name, arch)] - for arch in package_archs - }), - ), - ) - - rctx.file("packages.bzl", "\n".join(package_defs)) - rctx.file("BUILD.bazel", _ROOT_BUILD_TMPL.format( - target_name = util.get_repo_name(rctx.attr.name), - packages = starlark_codegen_utils.to_dict_list_attr(architectures), - architectures = starlark_codegen_utils.to_list_attr(architectures.keys()), - )) - -deb_translate_lock = repository_rule( - implementation = _deb_translate_lock_impl, - attrs = { - "lock": attr.label(), - "lock_content": attr.string(doc = "INTERNAL: DO NOT USE"), - "package_template": attr.label(default = "//apt/private:package.BUILD.tmpl"), - }, -) diff --git a/apt/private/lockfile.bzl b/apt/private/lockfile.bzl index a351d48e..03b58e8b 100644 --- a/apt/private/lockfile.bzl +++ b/apt/private/lockfile.bzl @@ -1,92 +1,126 @@ "lock" -load(":util.bzl", "util") - -def _make_package_key(name, version, arch): - return "%s_%s_%s" % ( - util.sanitize(name), - util.sanitize(version), +def _make_package_key(suite, name, version, arch): + return "/%s/%s:%s=%s" % ( + suite, + name, arch, + version, ) -def _package_key(package, arch): - return _make_package_key(package["Package"], package["Version"], arch) +def _parse_package_key(key): + rest = key[1:] + (suite, rest) = rest.split("/", 1) + (name, rest) = rest.split(":", 1) + (arch, version) = rest.split("=", 1) + return (suite, name, arch, version) + +def _short_package_key(package): + return "/%s/%s:%s" % ( + package["Dist"], + package["Package"], + package["Architecture"], + ) -def _add_package(lock, package, arch): - k = _package_key(package, arch) - if k in lock.fast_package_lookup: +def _package_key(package): + return _make_package_key(package["Dist"], package["Package"], package["Version"], package["Architecture"]) + +def _add_package(lock, package): + k = _package_key(package) + if k in lock.packages: return - lock.packages.append({ - "key": k, + lock.packages[k] = { "name": package["Package"], "version": package["Version"], - "urls": [ - "%s/%s" % (root, package["Filename"]) - for root in package["Roots"] - ], + "architecture": package["Architecture"], "sha256": package["SHA256"], - "arch": arch, - "dependencies": [], - }) - lock.fast_package_lookup[k] = len(lock.packages) - 1 - -def _add_package_dependency(lock, package, dependency, arch): - k = _package_key(package, arch) - if k not in lock.fast_package_lookup: - fail("Broken state: %s is not in the lockfile." % package["Package"]) - i = lock.fast_package_lookup[k] - lock.packages[i]["dependencies"].append(dict( - key = _package_key(dependency, arch), - name = dependency["Package"], - version = dependency["Version"], - )) - -def _has_package(lock, name, version, arch): - key = "%s_%s_%s" % (util.sanitize(name), util.sanitize(version), arch) - return key in lock.fast_package_lookup - -def _create(rctx, lock): + "filename": package["Filename"], + "suite": package["Dist"], + "section": package["Section"], + "size": int(package["Size"]), + "depends_on": [], + } + +def _add_package_dependency(lock, package, dependency): + k = _package_key(package) + if k not in lock.packages: + fail("illegal state: %s is not in the lockfile." % package["Package"]) + sk = _package_key(dependency) + if sk in lock.packages[k]["depends_on"]: + return + lock.packages[k]["depends_on"].append(sk) + +def _has_package(lock, suite, name, version, arch): + return _make_package_key(suite, name, version, arch) in lock.packages + +def _add_source(lock, suite, types, uris, components, architectures): + lock.sources[suite] = { + "types": types, + "uris": uris, + "components": components, + "architectures": architectures, + } + +def _create(mctx, lock): return struct( has_package = lambda *args, **kwargs: _has_package(lock, *args, **kwargs), + add_source = lambda *args, **kwargs: _add_source(lock, *args, **kwargs), add_package = lambda *args, **kwargs: _add_package(lock, *args, **kwargs), add_package_dependency = lambda *args, **kwargs: _add_package_dependency(lock, *args, **kwargs), packages = lambda: lock.packages, - write = lambda out: rctx.file(out, json.encode_indent(struct(version = lock.version, packages = lock.packages))), - as_json = lambda: json.encode_indent(struct(version = lock.version, packages = lock.packages)), + sources = lambda: lock.sources, + dependency_sets = lambda: lock.dependency_sets, + facts = lambda: lock.facts, + write = lambda out: mctx.file(out, _encode_compact(lock)), + as_json = lambda: _encode_compact(lock), ) -def _empty(rctx): +def _empty(mctx): lock = struct( - version = 1, - packages = list(), - fast_package_lookup = dict(), + version = 2, + dependency_sets = dict(), + packages = dict(), + sources = dict(), + facts = dict(), ) - return _create(rctx, lock) + return _create(mctx, lock) + +def _encode_compact(lock): + return json.encode_indent(lock) -def _from_json(rctx, content): +def _from_json(mctx, content): if not content: - return _empty(rctx) + return _empty(mctx) lock = json.decode(content) - if lock["version"] != 1: - fail("invalid lockfile version") + if lock["version"] != 2: + fail("lock file version %d is not supported anymore. please upgrade your lock file" % lock["version"]) lock = struct( version = lock["version"], - packages = lock["packages"], - fast_package_lookup = dict(), + dependency_sets = lock["dependency_sets"] if "dependency_sets" in lock else dict(), + packages = lock["packages"] if "packages" in lock else dict(), + sources = lock["sources"] if "sources" in lock else dict(), + facts = lock["facts"] if "facts" in lock else dict(), ) - for (i, package) in enumerate(lock.packages): - # TODO: only support urls before 1.0 - if "url" in package: - package["urls"] = [package.pop("url")] + return _create(mctx, lock) - lock.packages[i] = package - lock.fast_package_lookup[package["key"]] = i - return _create(rctx, lock) +def _merge(mctx, locks): + mlock = _empty(mctx) + packages = mlock.packages() + facts = mlock.facts() + for lock in locks: + for (key, pkg) in lock.packages().items(): + packages[key] = pkg + for (key, fact) in lock.facts().items(): + facts[key] = fact + return mlock lockfile = struct( empty = _empty, from_json = _from_json, - make_package_key = _make_package_key, + package_key = _package_key, + short_package_key = _short_package_key, + parse_package_key = _parse_package_key, + merge = _merge, ) diff --git a/apt/private/package.BUILD.tmpl b/apt/private/package.BUILD.tmpl index 563c33d2..0e7f4876 100644 --- a/apt/private/package.BUILD.tmpl +++ b/apt/private/package.BUILD.tmpl @@ -16,4 +16,4 @@ filegroup( name = "{target_name}", srcs = {deps} + [":data"], visibility = ["//visibility:public"], -) \ No newline at end of file +) diff --git a/apt/private/pkgconfig.bzl b/apt/private/pkgconfig.bzl new file mode 100644 index 00000000..78f90672 --- /dev/null +++ b/apt/private/pkgconfig.bzl @@ -0,0 +1,167 @@ +# Copyright thesayyn 2025 +# Taken from https://github.com/thesayyn/pkgconfig/blob/main/extensions.bzl +def _expand_value(value, variables): + # fast path + if value.find("$") == -1: + return value + + expanded_value = "" + key = "" + in_subs = False + + def assert_in_subs(): + if not in_subs: + fail("corrupted pc file") + + for c in value.elems(): + if c == "$": + in_subs = True + elif c == "{": + assert_in_subs() + elif c == "}": + assert_in_subs() + if key not in variables: + # fail("corrupted pc file") + value_of_key = "" + else: + value_of_key = variables[key] + + # reset subs state + key = "" + in_subs = False + + expanded_value += value_of_key + elif in_subs: + key += c + else: + expanded_value += c + + return expanded_value + +def parse_pc(pc): + variables = {} + directives = {} + for l in pc.splitlines(): + if l.startswith("#"): + continue + if not l.strip(): + continue + if l.find(": ") != -1: + (k, v) = _split_once(l, ":") + directives[k] = _expand_value(v.removeprefix(" "), variables) + elif l.find("=") != -1: + (k, v) = _split_once(l, "=") + variables[k] = _expand_value(v, variables) + + return (directives, variables) + +def _split_once(l, sep): + values = l.split(sep, 1) + if len(values) < 2: + fail("corrupted pc config") + return (values[0], values[1]) + +def _parse_requires(re): + if not re: + return [] + deps = re.split(",") + return [dep.strip(" ") for dep in deps if dep.strip(" ")] + +def _trim(str): + return str.rstrip(" ").lstrip(" ") + +def process_pcconfig(pc): + (directives, variables) = pc + includedir = "" + libdir = "" + if "includedir" in variables: + includedir = _trim(variables["includedir"]) + if "libdir" in variables: + libdir = _trim(variables["libdir"]) + linkopts = [] + includes = [] + link_paths = [] + defines = [] + libnames = [] + + IGNORE = [ + "-licui18n", + "-licuuc", + "-licudata", + "-lz", + "-llzma", + "-lfl", + ] + + if "Libs" in directives: + libs = _trim(directives["Libs"]).split(" ") + for arg in libs: + if arg.startswith("-L"): + linkpath = arg.removeprefix("-L") + + # skip bare -L args + if not linkpath: + continue + link_paths.append(linkpath) + linkopts.append("-Wl,-rpath=" + arg.removeprefix("-L")) + continue + elif arg.startswith("-l"): + libnames.append("lib" + arg.removeprefix("-l")) + continue + elif arg in IGNORE: + continue + linkopts.append(arg) + + if "Libs.private" in directives: + libs = _trim(directives["Libs.private"]).split(" ") + for arg in libs: + if arg in IGNORE: + continue + elif arg.startswith("-l"): + # The cc_imports we create based on these names are private already, + # so we don't need to do anything special for `Libs.private`. + libnames.append("lib" + arg.removeprefix("-l")) + + if "Cflags" in directives: + cflags = _trim(directives["Cflags"]).split(" ") + for flag in cflags: + if flag.startswith("-I"): + include = flag.removeprefix("-I") + + # skip bare -I arguments + if not include: + continue + includes.append(include) + + # If the include is direct include eg $includedir (/usr/include/hiredis) + # equals to -I/usr/include/hiredis then we need to add /usr/include into + # includes array to satify imports as `#include ` + if include == includedir: + includes.append(include.removesuffix("/" + directives["Name"])) + elif include.startswith(includedir): + includes.append(include.removesuffix("/" + directives["Name"])) + elif flag.startswith("-D"): + define = flag.removeprefix("-D") + defines.append(define) + + if len(includes) == 0: + includes = [ + # Standard include path if the package does not specify includes + "/usr/include", + ] + + return (libnames, includedir, libdir, linkopts, link_paths, includes, defines) + +def pkgconfig(rctx, path): + pc = parse_pc(rctx.read(path)) + (libnames, includedir, libdir, linkopts, link_paths, includes, defines) = process_pcconfig(pc) + + return struct( + libnames = libnames, + includedir = includedir, + libdir = libdir, + linkopts = linkopts, + link_paths = link_paths, + includes = includes, + defines = defines, + ) diff --git a/apt/private/so_library.bzl b/apt/private/so_library.bzl new file mode 100644 index 00000000..49c1a4ea --- /dev/null +++ b/apt/private/so_library.bzl @@ -0,0 +1,66 @@ +load("@rules_cc//cc:find_cc_toolchain.bzl", "find_cpp_toolchain", "use_cc_toolchain") + +def _so_library_impl(ctx): + cc_toolchain = find_cpp_toolchain(ctx) + + feature_configuration = cc_common.configure_features( + ctx = ctx, + cc_toolchain = cc_toolchain, + language = "c++", + requested_features = ctx.features, + unsupported_features = ctx.disabled_features, + ) + + libraries = [] + + ifsos = {} + + for dyn_lib in ctx.files.dynamic_libs: + if dyn_lib.owner.package != ctx.label.package: + fail(".so libraries must reside in current package. %s != %s" % (dyn_lib.owner.package, ctx.label.package)) + short_path = dyn_lib.short_path + repo_relative_path = short_path[short_path.find(dyn_lib.owner.repo_name) + len(dyn_lib.owner.repo_name) + 1:] + ifso_name = repo_relative_path[:repo_relative_path.rfind("/")] + if ifso_name in ifsos: + ifso = ifsos[ifso_name] + else: + # TODO: this potentially wasterful, symlink all so libraries into a directory + # and create one ifso in the folder. + ifso = ctx.actions.declare_file(ifso_name + "/rpath.ifso") + ifsos[ifso_name] = ifso + ctx.actions.write(ifso, content = """ + /* GNU LD script + * Empty linker script for empty interface library */ + """) + lib = cc_common.create_library_to_link( + actions = ctx.actions, + cc_toolchain = cc_toolchain, + interface_library = ifso, + dynamic_library = dyn_lib, + feature_configuration = feature_configuration, + ) + libraries.append(lib) + + linker_input = cc_common.create_linker_input( + owner = ctx.label, + libraries = depset(libraries), + additional_inputs = depset([]), + user_link_flags = depset([]), + ) + + linking_context = cc_common.create_linking_context( + linker_inputs = depset([linker_input]), + ) + + return [ + CcInfo(linking_context = linking_context), + ] + +so_library = rule( + implementation = _so_library_impl, + attrs = { + "dynamic_libs": attr.label_list(allow_files = True), + }, + fragments = ["cpp"], + toolchains = use_cc_toolchain(), +) diff --git a/apt/private/translate_dependency_set.bzl b/apt/private/translate_dependency_set.bzl new file mode 100644 index 00000000..4ac20cff --- /dev/null +++ b/apt/private/translate_dependency_set.bzl @@ -0,0 +1,231 @@ +"repository rule for generating a dependency graph from a lockfile." + +load(":lockfile.bzl", "lockfile") +load(":starlark_codegen_utils.bzl", "starlark_codegen_utils") +load(":util.bzl", "util") + +_ROOT_BUILD_TMPL = """\ +"Generated by rules_distroless. DO NOT EDIT." + +load("@rules_distroless//apt:defs.bzl", "dpkg_status") +load("@rules_distroless//distroless:defs.bzl", "flatten") + +exports_files(['packages.bzl']) + +# Map Debian architectures to platform CPUs. +# +# For more info on Debian architectures, see: +# * https://wiki.debian.org/SupportedArchitectures +# * https://wiki.debian.org/ArchitectureSpecificsMemo +# * https://www.debian.org/releases/stable/amd64/ch02s01.en.html#idm186 +# +# For more info on Bazel's platforms CPUs see: +# * https://github.com/bazelbuild/platforms/blob/main/cpu/BUILD +_ARCHITECTURE_MAP = {{ + "amd64": "x86_64", + "arm64": "arm64", + "ppc64el": "ppc64le", + "mips64el": "mips64", + "s390x": "s390x", + "i386": "x86_32", + "armhf": "armv7e-mf", + "all": "all", +}} + +_ARCHITECTURES = {architectures} + +[ + config_setting( + name = os + "_" + arch, + constraint_values = [ + "@platforms//os:" + os, + "@platforms//cpu:" + _ARCHITECTURE_MAP[arch], + ], + ) + for os in ["linux"] + for arch in _ARCHITECTURES +] + + +# List of installed packages. For now it's private. +_PACKAGES = {packages} + +# Creates /var/lib/dpkg/status with installed package information. +dpkg_status( + name = "dpkg_status", + controls = select({{ + "//:linux_%s" % arch: ["//%s:control" % package for package in packages] + for arch, packages in _PACKAGES.items() + }}) if _PACKAGES else {{}}, + visibility = ["//visibility:public"], +) + +filegroup( + name = "packages", + srcs = select({{ + "//:linux_%s" % arch: ["//%s" % package for package in packages] + for arch, packages in _PACKAGES.items() + }}) if _PACKAGES else {{}}, + visibility = ["//visibility:public"], +) + + +# A filegroup that contains all the packages and the dpkg status file. +filegroup( + name = "{target_name}", + srcs = [ + ":dpkg_status", + ":packages", + ], + visibility = ["//visibility:public"], +) + +flatten( + name = "flat", + tars = [ + "{target_name}", + ], + deduplicate = True, + visibility = ["//visibility:public"], +) +""" + +_PACKAGE_TEMPLATE = '''\ +"""Generated by rules_distroless. DO NOT EDIT.""" + +NO_MATCH_ERROR=""" +Package "{target_name}" is not available for the current target platform. + +Available Platforms: {available_platforms} + +- Set `--platforms` on the command line. +- Perform a transition to one of the available platforms +""" + +alias( + name = "data", + actual = select({data_targets}, no_match_error = NO_MATCH_ERROR), + visibility = ["//visibility:public"], +) + +alias( + name = "control", + actual = select({control_targets}, no_match_error = NO_MATCH_ERROR), + visibility = ["//visibility:public"], +) + +filegroup( + name = "{target_name}", + srcs = select({deps}, no_match_error = NO_MATCH_ERROR) + [":data"], + visibility = ["//visibility:public"], +) + +{extra} +''' + +_DEB_CC_IMPORT = """ +alias( + name = "{target_name}", + actual = select({selects}), + visibility = ["//visibility:public"], +) +""" + +def _translate_dependency_set_impl(rctx): + package_template = rctx.read(rctx.attr.package_template) + lockf = lockfile.from_json(rctx, rctx.attr.lock_content) + + sources = lockf.sources() + packages = lockf.packages() + dependency_sets = lockf.dependency_sets() + dependency_set = dependency_sets[rctx.attr.depset_name] + + packages_to_architectures = {} + + for architecture in dependency_set["sets"].keys(): + for (short_key, version) in dependency_set["sets"][architecture].items(): + package_key = short_key + "=" + version + repo_name = util.sanitize(package_key) + package = packages[package_key] + + packages_to_architectures.setdefault( + package["name"] + "=" + version, + struct( + name = package["name"], + architectures = {}, + ), + ).architectures[architecture] = package_key + + rctx.file( + "%s/%s/BUILD.bazel" % (package["name"], architecture), + package_template.format( + target_name = architecture, + data_targets = '"@%s//:data"' % repo_name, + control_targets = '"@%s//:control"' % repo_name, + src = '"@%s//:data"' % repo_name, + deps = ["@" + util.sanitize(dep_key) for dep_key in package["depends_on"]], + urls = [ + uri + "/" + package["filename"] + for uri in sources[package["suite"]]["uris"] + ], + name = package["name"], + arch = package["architecture"], + sha256 = package["sha256"], + repo_name = repo_name, + ), + ) + + for (_, info) in packages_to_architectures.items(): + package_name = info.name + + architectures = info.architectures.keys() + + extra = "" + if package_name.endswith("-dev"): + target_name = package_name.removesuffix("-dev") + extra = _DEB_CC_IMPORT.format( + target_name = target_name, + selects = starlark_codegen_utils.to_dict_attr({ + "//:linux_%s" % architecture: "@%s//:%s" % (util.sanitize(package_key), target_name) + for (architecture, package_key) in info.architectures.items() + }), + ) + + rctx.file( + "%s/BUILD.bazel" % package_name, + _PACKAGE_TEMPLATE.format( + target_name = package_name, + data_targets = starlark_codegen_utils.to_dict_attr({ + "//:linux_%s" % architecture: "//%s/%s:data" % (package_name, architecture) + for architecture in architectures + }), + control_targets = starlark_codegen_utils.to_dict_attr({ + "//:linux_%s" % architecture: "//%s/%s:control" % (package_name, architecture) + for architecture in architectures + }), + deps = starlark_codegen_utils.to_dict_list_attr({ + "//:linux_%s" % architecture: ["//%s/%s" % (package_name, architecture)] + for architecture in architectures + }), + extra = extra, + available_platforms = " ".join([ + "linux/" + arch + for arch in architectures + ]), + ), + ) + + rctx.file("BUILD.bazel", _ROOT_BUILD_TMPL.format( + target_name = util.get_repo_name(rctx.attr.name), + packages = starlark_codegen_utils.to_dict_list_attr({}), + architectures = starlark_codegen_utils.to_list_attr(dependency_set["sets"].keys()), + )) + +translate_dependency_set = repository_rule( + implementation = _translate_dependency_set_impl, + attrs = { + "depset_name": attr.string(doc = "INTERNAL: DO NOT USE"), + "lock_content": attr.string(doc = "INTERNAL: DO NOT USE"), + "package_template": attr.label(default = "//apt/private:package.BUILD.tmpl"), + }, +) diff --git a/apt/private/util.bzl b/apt/private/util.bzl index fdf8d198..4261bc6c 100644 --- a/apt/private/util.bzl +++ b/apt/private/util.bzl @@ -13,6 +13,8 @@ def _set_dict(struct, value = None, keys = []): def _get_dict(struct, keys = [], default_value = None): value = struct for k in keys: + if type(k) != "string": + fail("Invalid key type: {} {}".format(type(k), k)) if k in value: value = value[k] else: @@ -21,7 +23,7 @@ def _get_dict(struct, keys = [], default_value = None): return value def _sanitize(str): - return str.replace("+", "-p-").replace(":", "-").replace("~", "_") + return str.removeprefix("/").replace("+", "-").replace(":", "-").replace("~", "_").replace("/", "_").replace("=", "_") def _get_repo_name(st): if st.find("+") != -1: diff --git a/apt/tests/resolution/BUILD.bazel b/apt/tests/resolution/BUILD.bazel index e81b4443..ec3ad4ca 100644 --- a/apt/tests/resolution/BUILD.bazel +++ b/apt/tests/resolution/BUILD.bazel @@ -1,73 +1,3 @@ -load("@aspect_bazel_lib//lib:jq.bzl", "jq") -load("@aspect_bazel_lib//lib:testing.bzl", "assert_contains") -load("@bazel_skylib//rules:build_test.bzl", "build_test") - -jq( - name = "pick_libuuid_version", - srcs = [ - "@resolution_test_resolve//:lockfile", - ], - args = ["-rj"], - filter = '.packages | map(select(.name == "libuuid1")) | .[0].version', -) - -assert_contains( - name = "test_libuuid_version", - actual = ":pick_libuuid_version", - expected = "2.38.1-5+deb12u1", -) - -jq( - name = "pick_libuuid_version_empty_lock", - srcs = [ - "@resolution_test_empty_lock_resolve//:lockfile", - ], - args = ["-rj"], - filter = '.packages | map(select(.name == "libuuid1")) | .[0].version', -) - -assert_contains( - name = "test_libuuid_version_empty_lock", - actual = ":pick_libuuid_version_empty_lock", - expected = "2.38.1-5+deb12u1", -) - -jq( - name = "pick_quake_arch", - srcs = [ - "@arch_all_test_resolve//:lockfile", - ], - args = ["-rj"], - filter = '.packages | map(select(.name == "quake")) | .[0].arch', -) - -assert_contains( - name = "test_quake_arch", - actual = ":pick_quake_arch", - expected = "all", -) - -jq( - name = "pick_quake_version", - srcs = [ - "@arch_all_test_resolve//:lockfile", - ], - args = ["-rj"], - filter = '.packages | map(select(.name == "quake")) | .[0].version', -) - -assert_contains( - name = "test_quake_version", - actual = ":pick_quake_version", - expected = "73", -) - -build_test( - name = "build_clang", - target_compatible_with = [ - "@platforms//os:linux", - ], - targets = [ - "@clang//clang", - ], -) +# Tests for the new bzlmod API are done via the bookworm dependency_set +# in the root MODULE.bazel. The old yaml-manifest based tests have been +# removed as part of the new API migration. diff --git a/apt/tests/resolution_test.bzl b/apt/tests/resolution_test.bzl index 7fd33021..1cb835f2 100644 --- a/apt/tests/resolution_test.bzl +++ b/apt/tests/resolution_test.bzl @@ -115,12 +115,14 @@ def _make_index(): def _add_package(idx, **kwargs): kwargs["architecture"] = kwargs.get("architecture", _test_arch) kwargs["version"] = kwargs.get("version", _test_version) + dist = kwargs.pop("dist", "test") r = "\n".join(["{}: {}".format(item[0].title(), item[1]) for item in kwargs.items()]) - idx.parse_repository(r) + idx.parse_repository(r, dist = dist) return struct( add_package = lambda **kwargs: _add_package(idx, **kwargs), resolution = resolution, + idx = idx, reset = lambda: idx.reset(), ) @@ -133,7 +135,7 @@ def _resolve_optionals_test(ctx): idx.add_package(package = "libc6-dev") idx.add_package(package = "eject", depends = "libc6-dev | libc-dev") - (root_package, dependencies, _) = idx.resolution.resolve_all( + (root_package, dependencies, _, _) = idx.resolution.resolve_all( name = "eject", version = ("=", _test_version), arch = _test_arch, @@ -157,7 +159,7 @@ def _resolve_architecture_specific_packages_test(ctx): idx.add_package(package = "glibc", architecture = "all", depends = "foo [i386], bar [amd64]") # bar for amd64 - (root_package, dependencies, _) = idx.resolution.resolve_all( + (root_package, dependencies, _, _) = idx.resolution.resolve_all( name = "glibc", version = ("=", _test_version), arch = "amd64", @@ -168,7 +170,7 @@ def _resolve_architecture_specific_packages_test(ctx): asserts.equals(env, 1, len(dependencies)) # foo for i386 - (root_package, dependencies, _) = idx.resolution.resolve_all( + (root_package, dependencies, _, _) = idx.resolution.resolve_all( name = "glibc", version = ("=", _test_version), arch = "i386", @@ -197,7 +199,7 @@ def _resolve_aliases(ctx): for package in with_packages: package(idx) - (root_package, dependencies, _) = idx.resolution.resolve_all( + (root_package, dependencies, _, _) = idx.resolution.resolve_all( name = "foo", version = ("=", _test_version), arch = "amd64", @@ -244,12 +246,12 @@ def _resolve_aliases(ctx): with_package(package = "bar-plus", provides = "bar (= 1.0)"), ], resolved_name = "bar-plus") - # Un-versioned does not match with multiple candidates + # Un-versioned with multiple candidates - picks the latest version check_resolves([ with_package(package = "foo", depends = "bar"), with_package(package = "bar-plus", provides = "bar"), with_package(package = "bar-plus2", provides = "bar"), - ], resolved_name = None) + ], resolved_name = "bar-plus2") return unittest.end(env) @@ -266,7 +268,7 @@ def _resolve_circular_deps_test(ctx): idx.add_package(package = "ruby-rubygems", depends = "ruby3.1") idx.add_package(package = "ruby", depends = "libruby, ruby-rubygems") - (root_package, dependencies, _) = idx.resolution.resolve_all( + (root_package, dependencies, _, _) = idx.resolution.resolve_all( name = "ruby", version = "", arch = _test_arch, @@ -280,6 +282,208 @@ def _resolve_circular_deps_test(ctx): resolve_circular_deps_test = unittest.make(_resolve_circular_deps_test) +def _resolve_suite_constraint_test(ctx): + env = unittest.begin(ctx) + + idx = _make_index() + + # Add same package name in different suites with different versions + idx.add_package(package = "curl", version = "7.68.0", dist = "noble") + idx.add_package(package = "curl", version = "7.88.0", dist = "jammy") + + # Without suite constraint - should get latest version (7.88.0 from jammy) + (package, _) = idx.resolution.resolve_package( + name = "curl", + version = None, + arch = _test_arch, + ) + asserts.equals(env, "7.88.0", package["Version"]) + asserts.equals(env, "jammy", package["Dist"]) + + # With suite constraint for noble - should get 7.68.0 + (package, _) = idx.resolution.resolve_package( + name = "curl", + version = None, + arch = _test_arch, + suites = ["noble"], + ) + asserts.equals(env, "7.68.0", package["Version"]) + asserts.equals(env, "noble", package["Dist"]) + + # With suite constraint for jammy - should get 7.88.0 + (package, _) = idx.resolution.resolve_package( + name = "curl", + version = None, + arch = _test_arch, + suites = ["jammy"], + ) + asserts.equals(env, "7.88.0", package["Version"]) + asserts.equals(env, "jammy", package["Dist"]) + + return unittest.end(env) + +resolve_suite_constraint_test = unittest.make(_resolve_suite_constraint_test) + +def _resolve_suite_constraint_not_found_test(ctx): + env = unittest.begin(ctx) + + idx = _make_index() + + # Add package only in noble suite + idx.add_package(package = "noble-only-pkg", version = "1.0", dist = "noble") + + # Without suite constraint - should find it + (package, _) = idx.resolution.resolve_package( + name = "noble-only-pkg", + version = None, + arch = _test_arch, + ) + asserts.equals(env, "noble-only-pkg", package["Package"]) + + # With suite constraint for jammy - should NOT find it + (package, _) = idx.resolution.resolve_package( + name = "noble-only-pkg", + version = None, + arch = _test_arch, + suites = ["jammy"], + ) + asserts.equals(env, None, package) + + return unittest.end(env) + +resolve_suite_constraint_not_found_test = unittest.make(_resolve_suite_constraint_not_found_test) + +def _resolve_suite_constraint_transitive_test(ctx): + env = unittest.begin(ctx) + + idx = _make_index() + + # Add packages in noble suite + idx.add_package(package = "libssl", version = "1.0", dist = "noble") + idx.add_package(package = "curl", version = "7.68.0", depends = "libssl", dist = "noble") + + # Add packages in jammy suite + idx.add_package(package = "libssl", version = "2.0", dist = "jammy") + idx.add_package(package = "curl", version = "7.88.0", depends = "libssl", dist = "jammy") + + # Resolve curl with noble suite constraint - should get noble's libssl + (root, deps, _, _) = idx.resolution.resolve_all( + name = "curl", + version = None, + arch = _test_arch, + suites = ["noble"], + ) + asserts.equals(env, "curl", root["Package"]) + asserts.equals(env, "7.68.0", root["Version"]) + asserts.equals(env, "noble", root["Dist"]) + asserts.equals(env, 1, len(deps)) + asserts.equals(env, "libssl", deps[0]["Package"]) + asserts.equals(env, "1.0", deps[0]["Version"]) + asserts.equals(env, "noble", deps[0]["Dist"]) + + # Resolve curl with jammy suite constraint - should get jammy's libssl + (root, deps, _, _) = idx.resolution.resolve_all( + name = "curl", + version = None, + arch = _test_arch, + suites = ["jammy"], + ) + asserts.equals(env, "curl", root["Package"]) + asserts.equals(env, "7.88.0", root["Version"]) + asserts.equals(env, "jammy", root["Dist"]) + asserts.equals(env, 1, len(deps)) + asserts.equals(env, "libssl", deps[0]["Package"]) + asserts.equals(env, "2.0", deps[0]["Version"]) + asserts.equals(env, "jammy", deps[0]["Dist"]) + + return unittest.end(env) + +resolve_suite_constraint_transitive_test = unittest.make(_resolve_suite_constraint_transitive_test) + +def _resolve_suite_constraint_multiple_suites_test(ctx): + env = unittest.begin(ctx) + + idx = _make_index() + + # Add packages across multiple suites + idx.add_package(package = "base-pkg", version = "1.0", dist = "noble") + idx.add_package(package = "security-pkg", version = "1.1", dist = "noble-security") + idx.add_package(package = "updates-pkg", version = "1.2", dist = "noble-updates") + + # Resolve with multiple suite constraint - should find packages in any of the suites + (package, _) = idx.resolution.resolve_package( + name = "base-pkg", + version = None, + arch = _test_arch, + suites = ["noble", "noble-security", "noble-updates"], + ) + asserts.equals(env, "base-pkg", package["Package"]) + asserts.equals(env, "noble", package["Dist"]) + + (package, _) = idx.resolution.resolve_package( + name = "security-pkg", + version = None, + arch = _test_arch, + suites = ["noble", "noble-security", "noble-updates"], + ) + asserts.equals(env, "security-pkg", package["Package"]) + asserts.equals(env, "noble-security", package["Dist"]) + + # Package not in any of the specified suites + idx.add_package(package = "other-pkg", version = "1.0", dist = "jammy") + (package, _) = idx.resolution.resolve_package( + name = "other-pkg", + version = None, + arch = _test_arch, + suites = ["noble", "noble-security", "noble-updates"], + ) + asserts.equals(env, None, package) + + return unittest.end(env) + +resolve_suite_constraint_multiple_suites_test = unittest.make(_resolve_suite_constraint_multiple_suites_test) + +def _resolve_virtual_package_suite_constraint_test(ctx): + env = unittest.begin(ctx) + + idx = _make_index() + + # Add virtual package providers in different suites + idx.add_package(package = "mawk", version = "1.0", provides = "awk", dist = "noble") + idx.add_package(package = "gawk", version = "2.0", provides = "awk", dist = "jammy") + + # Without suite constraint - should pick one (gawk has higher version) + (package, _) = idx.resolution.resolve_package( + name = "awk", + version = None, + arch = _test_arch, + ) + asserts.equals(env, "gawk", package["Package"]) + + # With noble suite constraint - should get mawk + (package, _) = idx.resolution.resolve_package( + name = "awk", + version = None, + arch = _test_arch, + suites = ["noble"], + ) + asserts.equals(env, "mawk", package["Package"]) + asserts.equals(env, "noble", package["Dist"]) + + # With jammy suite constraint - should get gawk + (package, _) = idx.resolution.resolve_package( + name = "awk", + version = None, + arch = _test_arch, + suites = ["jammy"], + ) + asserts.equals(env, "gawk", package["Package"]) + asserts.equals(env, "jammy", package["Dist"]) + + return unittest.end(env) + +resolve_virtual_package_suite_constraint_test = unittest.make(_resolve_virtual_package_suite_constraint_test) + _TEST_SUITE_PREFIX = "package_resolution/" def resolution_tests(): @@ -325,3 +529,8 @@ def resolution_tests(): resolve_architecture_specific_packages_test(name = _TEST_SUITE_PREFIX + "resolve_architectures_specific") resolve_aliases_test(name = _TEST_SUITE_PREFIX + "resolve_aliases") resolve_circular_deps_test(name = _TEST_SUITE_PREFIX + "parse_circular") + resolve_suite_constraint_test(name = _TEST_SUITE_PREFIX + "resolve_suite_constraint") + resolve_suite_constraint_not_found_test(name = _TEST_SUITE_PREFIX + "resolve_suite_constraint_not_found") + resolve_suite_constraint_transitive_test(name = _TEST_SUITE_PREFIX + "resolve_suite_constraint_transitive") + resolve_suite_constraint_multiple_suites_test(name = _TEST_SUITE_PREFIX + "resolve_suite_constraint_multiple_suites") + resolve_virtual_package_suite_constraint_test(name = _TEST_SUITE_PREFIX + "resolve_virtual_package_suite_constraint") diff --git a/examples/.bazelignore b/examples/.bazelignore new file mode 100644 index 00000000..9633d322 --- /dev/null +++ b/examples/.bazelignore @@ -0,0 +1,2 @@ +# cc_lib is a standalone example with external dependencies +cc_lib/ diff --git a/examples/.bazelversion b/examples/.bazelversion new file mode 100644 index 00000000..66ce77b7 --- /dev/null +++ b/examples/.bazelversion @@ -0,0 +1 @@ +7.0.0 diff --git a/examples/MODULE.bazel b/examples/MODULE.bazel new file mode 100644 index 00000000..52314db3 --- /dev/null +++ b/examples/MODULE.bazel @@ -0,0 +1,134 @@ +"Examples module for rules_distroless" + +module( + name = "rules_distroless_examples", + version = "0.0.0", +) + +bazel_dep(name = "rules_distroless", version = "0.0.0") +local_path_override( + module_name = "rules_distroless", + path = "..", +) + +bazel_dep(name = "aspect_bazel_lib", version = "2.14.0") +bazel_dep(name = "bazel_skylib", version = "1.5.0") +bazel_dep(name = "rules_oci", version = "2.0.0") +bazel_dep(name = "rules_java", version = "8.8.0") +bazel_dep(name = "container_structure_test", version = "1.16.0") +bazel_dep(name = "platforms", version = "0.0.10") + +# Toolchains from aspect_bazel_lib +bazel_lib_toolchains = use_extension("@aspect_bazel_lib//lib:extensions.bzl", "toolchains") +use_repo(bazel_lib_toolchains, "bsd_tar_toolchains") +use_repo(bazel_lib_toolchains, "zstd_toolchains") + +# Test fixture packages +http_archive = use_repo_rule("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") + +http_archive( + name = "example-bullseye-ca-certificates", + build_file_content = 'exports_files(["data.tar.xz", "control.tar.xz"])', + sha256 = "b2d488ad4d8d8adb3ba319fc9cb2cf9909fc42cb82ad239a26c570a2e749c389", + urls = ["https://snapshot.debian.org/archive/debian/20231106T210201Z/pool/main/c/ca-certificates/ca-certificates_20210119_all.deb"], +) + +http_archive( + name = "example-bullseye-libc-bin", + build_file_content = 'exports_files(["data.tar.xz"])', + sha256 = "8b048ab5c7e9f5b7444655541230e689631fd9855c384e8c4a802586d9bbc65a", + urls = ["https://snapshot.debian.org/archive/debian-security/20231106T230332Z/pool/updates/main/g/glibc/libc-bin_2.31-13+deb11u7_amd64.deb"], +) + +http_archive( + name = "example-bookworm-libc-bin", + build_file_content = 'exports_files(["data.tar.xz"])', + sha256 = "38c44247c5b3e864d6db2877edd9c9a0555fc4e23ae271b73d7f527802616df5", + urls = ["https://snapshot.debian.org/archive/debian-security/20231106T230332Z/pool/updates/main/g/glibc/libc-bin_2.36-9+deb12u3_armhf.deb"], +) + +# APT extension for Debian/Ubuntu packages +apt = use_extension("@rules_distroless//apt:extensions.bzl", "apt") + +# Debian Bullseye sources (for debian_snapshot example) +# Using September 2024 snapshot +apt.sources_list( + architectures = [ + "amd64", + "arm64", + ], + components = ["main"], + suites = [ + "bullseye", + "bullseye-updates", + ], + types = ["deb"], + uris = ["https://snapshot.debian.org/archive/debian/20240901T024950Z"], +) +apt.sources_list( + architectures = [ + "amd64", + "arm64", + ], + components = ["main"], + suites = ["bullseye-security"], + types = ["deb"], + uris = ["https://snapshot.debian.org/archive/debian-security/20240901T024950Z"], +) + +# Ubuntu Noble sources (for ubuntu_snapshot example) +# NOTE: Commented out because snapshot URLs are returning 404 +# apt.sources_list( +# architectures = [ +# "amd64", +# "arm64", +# ], +# components = ["main"], +# suites = [ +# "noble", +# "noble-security", +# "noble-updates", +# ], +# types = ["deb"], +# uris = ["https://snapshot.ubuntu.com/ubuntu/20240301T030400Z"], +# ) + +# Install packages for Debian Bullseye example +apt.install( + dependency_set = "bullseye", + packages = [ + "bash", + "ca-certificates", + "coreutils", + "dpkg", + "libncurses6", + "ncurses-base", + "tzdata", + ], + suites = [ + "bullseye", + "bullseye-updates", + "bullseye-security", + ], +) + +# Install packages for Ubuntu Noble example +# NOTE: Commented out because snapshot URLs are returning 404 +# apt.install( +# dependency_set = "noble", +# packages = [ +# "bash", +# "coreutils", +# "dpkg", +# "libncurses6", +# "ncurses-base", +# "tzdata", +# ], +# suites = [ +# "noble", +# "noble-security", +# "noble-updates", +# ], +# ) + +use_repo(apt, "bullseye") diff --git a/examples/WORKSPACE b/examples/WORKSPACE new file mode 100644 index 00000000..f9c14bd4 --- /dev/null +++ b/examples/WORKSPACE @@ -0,0 +1 @@ +# This file is intentionally empty - using bzlmod (MODULE.bazel) for dependencies. diff --git a/examples/cacerts/BUILD.bazel b/examples/cacerts/BUILD.bazel index 2eeadea2..175ff793 100644 --- a/examples/cacerts/BUILD.bazel +++ b/examples/cacerts/BUILD.bazel @@ -1,5 +1,5 @@ -load("//distroless:defs.bzl", "cacerts") -load("//distroless/tests:asserts.bzl", "assert_tar_mtree") +load("@rules_distroless//distroless:defs.bzl", "cacerts") +load("@rules_distroless//distroless/tests:asserts.bzl", "assert_tar_mtree") cacerts( name = "cacerts", diff --git a/examples/cc_lib/MODULE.bazel b/examples/cc_lib/MODULE.bazel new file mode 100644 index 00000000..d1f2ca3e --- /dev/null +++ b/examples/cc_lib/MODULE.bazel @@ -0,0 +1,42 @@ +bazel_dep(name = "rules_cc", version = "0.2.8") +bazel_dep(name = "rules_distroless", version = "0.0.0") +local_path_override( + module_name = "rules_distroless", + path = "../..", +) + +bazel_dep(name = "sonic-build-infra", version = "0.0.0") +local_path_override( + module_name = "sonic-build-infra", + path = "../../../sonic-build-infra" +) + +register_toolchains("@sonic-build-infra//toolchains/gcc:host_gcc_toolchain") + +apt = use_extension("@rules_distroless//apt:extensions.bzl", "apt") +apt.sources_list( + architectures = ["amd64"], + components = ["main"], + suites = [ + "bookworm", + "bookworm-updates", + ], + uris = ["https://snapshot.debian.org/archive/debian/20251001T023456Z"], +) +apt.sources_list( + architectures = ["amd64"], + components = ["main"], + suites = ["bookworm-security"], + uris = ["https://snapshot.debian.org/archive/debian-security/20251001T023456Z"], +) +apt.install( + dependency_set = "bookworm", + packages = [ + "libnl-3-dev", + "libnl-genl-3-dev", + "libnl-nf-3-dev", + "nlohmann-json3-dev", + ], + target_release = "bookworm", +) +use_repo(apt, "bookworm") diff --git a/examples/cc_lib/subfolder/BUILD.bazel b/examples/cc_lib/subfolder/BUILD.bazel new file mode 100644 index 00000000..9d6f9ba1 --- /dev/null +++ b/examples/cc_lib/subfolder/BUILD.bazel @@ -0,0 +1,13 @@ +load("@rules_cc//cc:cc_test.bzl", "cc_test") + +cc_test( + name = "test", + srcs = ["test.c"], + copts = ["-Wno-int-conversion"], + deps = [ + "@bookworm//libnl-3-dev:libnl-3", + "@bookworm//libnl-nf-3-dev:libnl-nf-3", + "@bookworm//libnl-genl-3-dev:libnl-genl-3", + # "@bookworm//nlohmann-json3-dev:nlohmann-json3" + ], +) diff --git a/examples/cc_lib/subfolder/test.c b/examples/cc_lib/subfolder/test.c new file mode 100644 index 00000000..cf709a6d --- /dev/null +++ b/examples/cc_lib/subfolder/test.c @@ -0,0 +1,67 @@ +#include +#include +#include +// #include // libnl-3 +// #include // libnl-genl-3 +// #include // libnl-genl-3 +// #include // libnl-nf-3 +// #include + +int main(void) +{ + char *buffer; + size_t size = 1024; // Initial buffer size + + // Allocate memory for the buffer + buffer = (char *)malloc(size); + if (buffer == NULL) { + perror("Failed to allocate memory"); + return 1; + } + if (getcwd(buffer, size) != NULL) { + printf("Current working directory: %s\n", buffer); + } else { + perror("Failed to get current working directory"); + } + return 1; +// struct nl_sock *sk_core = NULL; // from libnl-3 +// struct nl_sock *sk_nf = NULL; // from libnl-nf-3 +// int family_id; + +// /* === libnl-3: basic Netlink socket === */ +// sk_core = nl_socket_alloc(); +// if (!sk_core) { +// perror("nl_socket_alloc"); +// return EXIT_FAILURE; +// } + +// if (nl_connect(sk_core, NETLINK_GENERIC) < 0) { +// fprintf(stderr, "Failed to connect to Generic Netlink\n"); +// goto cleanup; +// } + +// /* === libnl-genl-3: resolve a generic netlink family === */ +// family_id = genl_ctrl_resolve(sk_core, "nl80211"); +// if (family_id < 0) { +// printf("nl80211 family not found (normal on systems without WiFi): %s\n", +// nl_geterror(family_id)); +// } else { +// printf("Found nl80211 family ID = %d\n", family_id); +// } + +// /* === libnl-nf-3: create a netfilter socket (forces linking against libnl-nf-3) === */ +// sk_nf = nfnl_connect(sk_core); // This symbol is ONLY in libnl-nf-3 +// if (!sk_nf) { +// fprintf(stderr, "nfnl_connect() failed — this proves libnl-nf-3 is linked correctly\n"); +// } else { +// printf("Successfully created Netfilter netlink socket (libnl-nf-3 is present)\n"); +// // No need to actually use it — just having the pointer forces the linker to resolve it +// } + +// printf("All three libnl libraries (libnl-3, libnl-genl-3, libnl-nf-3) are present and linked!\n"); + +// cleanup: +// if (sk_core) nl_socket_free(sk_core); +// /* sk_nf is just a pointer alias to sk_core in libnl-nf, no need to free twice */ +// return EXIT_SUCCESS; +} diff --git a/examples/debian_snapshot/BUILD.bazel b/examples/debian_snapshot/BUILD.bazel index 8c930703..86c2dde9 100644 --- a/examples/debian_snapshot/BUILD.bazel +++ b/examples/debian_snapshot/BUILD.bazel @@ -1,11 +1,8 @@ """ -NOTE: +Debian Bullseye example using the new bzlmod API. - This is the main test used in the e2e testing. - - PLEASE KEEP e2e/smoke/BUILD and examples/debian_snapshot/BUILD - IN-SYNC WITH EACH OTHER, AS WELL AS THE REST OF THE TEST FILES - (test_linux_ files and the bullseye YAML manifest) +This example demonstrates how to create a distroless container image +using packages from Debian Bullseye snapshot. """ load("@aspect_bazel_lib//lib:tar.bzl", "tar") @@ -80,9 +77,6 @@ oci_image( "SSL_CERT_FILE": "/etc/ssl/certs/ca-certificates.crt", }, os = "linux", - # NOTE: this is needed because, otherwise, bazel test //... fails, even - # when container_structure_test already has target_compatible_with. - # See 136 target_compatible_with = COMPATIBLE_WITH, tars = [ # This target contains all the installed packages. @@ -125,9 +119,6 @@ oci_load( repo_tags = [ "distroless/test:latest", ], - # NOTE: this is needed because, otherwise, bazel test //... fails, even - # when container_structure_test already has target_compatible_with. - # See 136 target_compatible_with = COMPATIBLE_WITH, ) diff --git a/examples/flatten/BUILD.bazel b/examples/flatten/BUILD.bazel index 3ceff4a7..fed2a5cb 100644 --- a/examples/flatten/BUILD.bazel +++ b/examples/flatten/BUILD.bazel @@ -1,6 +1,6 @@ load("@aspect_bazel_lib//lib:tar.bzl", "tar") -load("//distroless:defs.bzl", "flatten", "home", "passwd") -load("//distroless/tests:asserts.bzl", "assert_tar_listing", "assert_tar_mtree") +load("@rules_distroless//distroless:defs.bzl", "flatten", "home", "passwd") +load("@rules_distroless//distroless/tests:asserts.bzl", "assert_tar_listing", "assert_tar_mtree") passwd( name = "passwd", @@ -54,12 +54,11 @@ assert_tar_mtree( #mtree ./etc time=0.0 mode=755 gid=0 uid=0 type=dir ./etc/passwd time=0.0 mode=644 gid=0 uid=0 type=file size=34 -./examples time=1672560000.0 mode=755 gid=0 uid=0 type=dir -./examples/flatten time=1672560000.0 mode=755 gid=0 uid=0 type=dir -./examples/flatten/dir time=1672560000.0 mode=755 gid=0 uid=0 type=dir -./examples/flatten/dir/changelog time=1672560000.0 mode=755 gid=0 uid=0 type=file size=0 -./examples/flatten/dir/sub time=1672560000.0 mode=755 gid=0 uid=0 type=dir -./examples/flatten/dir/sub/content.txt time=1672560000.0 mode=755 gid=0 uid=0 type=file size=0 +./flatten time=1672560000.0 mode=755 gid=0 uid=0 type=dir +./flatten/dir time=1672560000.0 mode=755 gid=0 uid=0 type=dir +./flatten/dir/changelog time=1672560000.0 mode=755 gid=0 uid=0 type=file size=0 +./flatten/dir/sub time=1672560000.0 mode=755 gid=0 uid=0 type=dir +./flatten/dir/sub/content.txt time=1672560000.0 mode=755 gid=0 uid=0 type=file size=0 ./home/nonroot time=0.0 mode=700 gid=666 uid=666 type=dir ./root time=0.0 mode=700 gid=0 uid=0 type=dir """, @@ -99,12 +98,11 @@ assert_tar_mtree( actual = "flatten_dedup", expected = """\ #mtree -./examples time=1672560000.0 mode=755 gid=0 uid=0 type=dir -./examples/flatten time=1672560000.0 mode=755 gid=0 uid=0 type=dir -./examples/flatten/dir time=1672560000.0 mode=755 gid=0 uid=0 type=dir -./examples/flatten/dir/changelog time=1672560000.0 mode=755 gid=0 uid=0 type=file size=0 -./examples/flatten/dir/sub time=1672560000.0 mode=755 gid=0 uid=0 type=dir -./examples/flatten/dir/sub/content.txt time=1672560000.0 mode=755 gid=0 uid=0 type=file size=0 +./flatten time=1672560000.0 mode=755 gid=0 uid=0 type=dir +./flatten/dir time=1672560000.0 mode=755 gid=0 uid=0 type=dir +./flatten/dir/changelog time=1672560000.0 mode=755 gid=0 uid=0 type=file size=0 +./flatten/dir/sub time=1672560000.0 mode=755 gid=0 uid=0 type=dir +./flatten/dir/sub/content.txt time=1672560000.0 mode=755 gid=0 uid=0 type=file size=0 """, ) @@ -112,11 +110,10 @@ assert_tar_listing( name = "test_flatten_dedup_listing", actual = "flatten_dedup", expected = """\ -examples/ -examples/flatten/ -examples/flatten/dir/ -examples/flatten/dir/changelog -examples/flatten/dir/sub/ -examples/flatten/dir/sub/content.txt +flatten/ +flatten/dir/ +flatten/dir/changelog +flatten/dir/sub/ +flatten/dir/sub/content.txt """, ) diff --git a/examples/group/BUILD.bazel b/examples/group/BUILD.bazel index b5699265..59538bfa 100644 --- a/examples/group/BUILD.bazel +++ b/examples/group/BUILD.bazel @@ -1,6 +1,6 @@ load("@aspect_bazel_lib//lib:diff_test.bzl", "diff_test") -load("//distroless:defs.bzl", "group") -load("//distroless/tests:asserts.bzl", "assert_tar_mtree") +load("@rules_distroless//distroless:defs.bzl", "group") +load("@rules_distroless//distroless/tests:asserts.bzl", "assert_tar_mtree") group( name = "group", diff --git a/examples/home/BUILD.bazel b/examples/home/BUILD.bazel index 354d72f4..e170a07b 100644 --- a/examples/home/BUILD.bazel +++ b/examples/home/BUILD.bazel @@ -1,5 +1,5 @@ -load("//distroless:defs.bzl", "home") -load("//distroless/tests:asserts.bzl", "assert_tar_mtree") +load("@rules_distroless//distroless:defs.bzl", "home") +load("@rules_distroless//distroless/tests:asserts.bzl", "assert_tar_mtree") home( name = "home", diff --git a/examples/java_keystore/BUILD.bazel b/examples/java_keystore/BUILD.bazel index 97ce1b2a..82f58ad7 100644 --- a/examples/java_keystore/BUILD.bazel +++ b/examples/java_keystore/BUILD.bazel @@ -1,5 +1,5 @@ -load("//distroless:defs.bzl", "java_keystore") -load("//distroless/tests:asserts.bzl", "assert_jks_listing", "assert_tar_mtree") +load("@rules_distroless//distroless:defs.bzl", "java_keystore") +load("@rules_distroless//distroless/tests:asserts.bzl", "assert_jks_listing", "assert_tar_mtree") java_keystore( name = "java_keystore", diff --git a/examples/locale/BUILD.bazel b/examples/locale/BUILD.bazel index df316303..ce923c95 100644 --- a/examples/locale/BUILD.bazel +++ b/examples/locale/BUILD.bazel @@ -1,5 +1,5 @@ -load("//distroless:defs.bzl", "locale") -load("//distroless/tests:asserts.bzl", "assert_tar_mtree") +load("@rules_distroless//distroless:defs.bzl", "locale") +load("@rules_distroless//distroless/tests:asserts.bzl", "assert_tar_mtree") EPOCH = 123 diff --git a/examples/os_release/BUILD.bazel b/examples/os_release/BUILD.bazel index f86793d7..18e91959 100644 --- a/examples/os_release/BUILD.bazel +++ b/examples/os_release/BUILD.bazel @@ -1,6 +1,6 @@ load("@aspect_bazel_lib//lib:diff_test.bzl", "diff_test") -load("//distroless:defs.bzl", "os_release") -load("//distroless/tests:asserts.bzl", "assert_tar_mtree") +load("@rules_distroless//distroless:defs.bzl", "os_release") +load("@rules_distroless//distroless/tests:asserts.bzl", "assert_tar_mtree") os_release( name = "os_release", diff --git a/examples/passwd/BUILD.bazel b/examples/passwd/BUILD.bazel index a1bdc376..830a98c6 100644 --- a/examples/passwd/BUILD.bazel +++ b/examples/passwd/BUILD.bazel @@ -1,6 +1,6 @@ load("@aspect_bazel_lib//lib:diff_test.bzl", "diff_test") -load("//distroless:defs.bzl", "passwd") -load("//distroless/tests:asserts.bzl", "assert_tar_mtree") +load("@rules_distroless//distroless:defs.bzl", "passwd") +load("@rules_distroless//distroless/tests:asserts.bzl", "assert_tar_mtree") passwd( name = "passwd", diff --git a/examples/statusd/BUILD.bazel b/examples/statusd/BUILD.bazel index 146f0b01..e7bab5c2 100644 --- a/examples/statusd/BUILD.bazel +++ b/examples/statusd/BUILD.bazel @@ -1,6 +1,6 @@ # buildifier: disable=bzl-visibility -load("//apt:defs.bzl", "dpkg_statusd") -load("//distroless/tests:asserts.bzl", "assert_tar_mtree") +load("@rules_distroless//apt:defs.bzl", "dpkg_statusd") +load("@rules_distroless//distroless/tests:asserts.bzl", "assert_tar_mtree") dpkg_statusd( name = "statusd", diff --git a/examples/ubuntu_snapshot/BUILD.bazel b/examples/ubuntu_snapshot/BUILD.bazel index ebc7c870..8197104a 100644 --- a/examples/ubuntu_snapshot/BUILD.bazel +++ b/examples/ubuntu_snapshot/BUILD.bazel @@ -1,94 +1,8 @@ -load("@aspect_bazel_lib//lib:tar.bzl", "tar") -load("@container_structure_test//:defs.bzl", "container_structure_test") -load("@rules_distroless//distroless:defs.bzl", "group", "passwd") -load("@rules_oci//oci:defs.bzl", "oci_image", "oci_load") - -COMPATIBLE_WITH = select({ - "@platforms//cpu:x86_64": ["@platforms//cpu:x86_64"], - "@platforms//cpu:arm64": ["@platforms//cpu:arm64"], -}) + [ - "@platforms//os:linux", -] - -passwd( - name = "passwd", - entries = [ - { - "uid": 0, - "gid": 0, - "home": "/root", - "shell": "/bin/bash", - "username": "r00t", - }, - { - "uid": 100, - "gid": 65534, - "home": "/home/_apt", - "shell": "/usr/sbin/nologin", - "username": "_apt", - }, - ], -) - -group( - name = "group", - entries = [ - { - "name": "root", - "gid": 0, - }, - { - "name": "_apt", - "gid": 65534, - }, - ], -) - -tar( - name = "sh", - mtree = [ - # needed as dpkg assumes sh is installed in a typical debian installation. - "./bin/sh type=link link=/bin/bash", - ], -) - -oci_image( - name = "noble", - architecture = select({ - "@platforms//cpu:arm64": "arm64", - "@platforms//cpu:x86_64": "amd64", - }), - os = "linux", - # NOTE: this is needed because, otherwise, bazel test //... fails, even - # when container_structure_test already has target_compatible_with. - # See 136 - target_compatible_with = COMPATIBLE_WITH, - tars = [ - ":sh", - ":passwd", - ":group", - "@noble//:noble", - ], -) - -oci_load( - name = "tarball", - image = ":noble", - repo_tags = [ - "distroless/noble:latest", - ], - # NOTE: this is needed because, otherwise, bazel test //... fails, even - # when container_structure_test already has target_compatible_with. - # See 136 - target_compatible_with = COMPATIBLE_WITH, -) - -container_structure_test( - name = "test", - configs = select({ - "@platforms//cpu:arm64": ["test_linux_arm64.yaml"], - "@platforms//cpu:x86_64": ["test_linux_amd64.yaml"], - }), - image = ":noble", - target_compatible_with = COMPATIBLE_WITH, -) +# Ubuntu Noble example is temporarily disabled. +# +# The snapshot URLs at https://snapshot.ubuntu.com/ubuntu/20240301T030400Z +# are returning 404 errors. This example will need to be updated with +# working snapshot URLs when available. +# +# See MODULE.bazel for the commented out apt.sources_list and apt.install +# configuration that was used.