Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 21 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -138,6 +138,26 @@ There may be different preferences for splitting files into groups. A good way t
> [!CAUTION]
> Merging groups by merging their `depset`s is cheap. Calling `.to_list()` on a depset is expensive and should be avoided during analysis. Build group hierarchies purely through `depset(transitive = [...])`.

### Handling `deps` and `data`

Most rules have the attributes `deps` and `data`. You should implement support for them carfully.

**`deps`** typically come from your own ruleset's `*_library` targets — they will likely provide `RunfilesGroupInfo`, so you should merge the groups and metadata with the others.

**`data`** can be arbitrary targets. Some may provide `RunfilesGroupInfo` (e.g., a `*_binary` from a ruleset that supports it), while others won't. Add ungrouped files (when `RunfilesGroupInfo` is missing) to a runfiles group (the default for the current target) so they are not lost.

```starlark
dep_groups = lib.collect_groups(ctx.attr.deps)
data_groups = lib.collect_groups(ctx.attr.data)

groups = {}
groups.update(dep_groups.groups)
groups.update(data_groups.groups)
groups["app_code"] = depset(my_own_files, transitive = data_groups.ungrouped)

metadata = lib.merge_metadata(dep_groups.metadata, data_groups.metadata)
```

### Group count limits

Packaging rules may enforce a maximum group count via `lib.merge_to_limit()`. For example, container image runtimes may limit the total number of layers an image can have. The merge algorithm respects `rank` (only merges within the same rank), `do_not_merge` (never merges protected groups), and `weight` (merges lightest groups first).
Expand Down Expand Up @@ -250,6 +270,7 @@ Note that ordering may not matter for some kinds of packages. In that case, it's

| Ruleset | Ordering | Merge-to-limit | `aspect_hints` support |
|---------|----------|----------------|----------------------|
| [rules_img](https://github.com/bazel-contrib/rules_img) | ✅ | ✅ | ✅ |
| *Your ruleset here* | | | |

> To add your ruleset to these tables, open a pull request.
1 change: 1 addition & 0 deletions example/MODULE.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ module(name = "rules_runfiles_group_example")

bazel_dep(name = "rules_runfiles_group")
bazel_dep(name = "rules_go", version = "0.60.0")
bazel_dep(name = "rules_shell", version = "0.8.0")
bazel_dep(name = "hermetic_launcher", version = "0.0.5")
bazel_dep(name = "gazelle", version = "0.50.0")
bazel_dep(name = "sha256.bzl", version = "0.0.1")
Expand Down
58 changes: 31 additions & 27 deletions example/producer/rules/starlark_binary.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -120,11 +120,9 @@ def _starlark_binary_impl(ctx):
if ctx.attr.runfiles_grouping != "disabled":
groups = {}

# Collect metadata from deps (carries weight from starlark_library).
dep_metadata = None
for dep in ctx.attr.deps:
if RunfilesGroupMetadataInfo in dep:
dep_metadata = lib.merge_metadata(dep_metadata, dep[RunfilesGroupMetadataInfo])
dep_groups = lib.collect_groups(ctx.attr.deps)
data_groups = lib.collect_groups(ctx.attr.data)
dep_metadata = lib.merge_metadata(dep_groups.metadata, data_groups.metadata)

metadata = {}
own_repo = ctx.attr.repository
Expand All @@ -140,36 +138,42 @@ def _starlark_binary_impl(ctx):
groups["std"] = stdlib[DefaultInfo].default_runfiles.files
metadata["std"] = lib.group_metadata(rank = -1)

entrypoint_files = depset([output, entrypoint, loadmap, properties] + ctx.files.data)
entrypoint_files = depset([output, entrypoint, loadmap, properties])

# Dep groups
if ctx.attr.runfiles_grouping == "by_target":
groups["entrypoint"] = entrypoint_files
groups.update(data_groups.groups)
groups["entrypoint"] = depset(transitive = [entrypoint_files] + data_groups.ungrouped)
metadata["entrypoint"] = lib.group_metadata(rank = 2)
for dep in ctx.attr.deps:
if RunfilesGroupInfo in dep:
for name in lib.group_names(dep[RunfilesGroupInfo]):
groups[name] = getattr(dep[RunfilesGroupInfo], name)
dep_weight = _get_dep_weight(dep_metadata, name)
if _extract_repo(name) == own_repo:
metadata[name] = lib.group_metadata(rank = 1, weight = dep_weight)
elif dep_weight != None:
metadata[name] = lib.group_metadata(weight = dep_weight)
for name in data_groups.groups:
dep_weight = _get_dep_weight(dep_metadata, name)
if _extract_repo(name) == own_repo:
metadata[name] = lib.group_metadata(rank = 1, weight = dep_weight)
elif dep_weight != None:
metadata[name] = lib.group_metadata(weight = dep_weight)
for name, files in dep_groups.groups.items():
groups[name] = files
dep_weight = _get_dep_weight(dep_metadata, name)
if _extract_repo(name) == own_repo:
metadata[name] = lib.group_metadata(rank = 1, weight = dep_weight)
elif dep_weight != None:
metadata[name] = lib.group_metadata(weight = dep_weight)

elif ctx.attr.runfiles_grouping == "by_repo":
repo_depsets = {}
repo_weights = {}
repo_depsets[own_repo] = [entrypoint_files]
for dep in ctx.attr.deps:
if RunfilesGroupInfo in dep:
for name in lib.group_names(dep[RunfilesGroupInfo]):
repo = _extract_repo(name)
if repo not in repo_depsets:
repo_depsets[repo] = []
repo_depsets[repo].append(getattr(dep[RunfilesGroupInfo], name))
w = _get_dep_weight(dep_metadata, name)
if w != None:
repo_weights[repo] = repo_weights.get(repo, 0) + w
repo_depsets[own_repo] = [entrypoint_files] + data_groups.ungrouped
all_dep_groups = {}
all_dep_groups.update(data_groups.groups)
all_dep_groups.update(dep_groups.groups)
for name, files in all_dep_groups.items():
repo = _extract_repo(name)
if repo not in repo_depsets:
repo_depsets[repo] = []
repo_depsets[repo].append(files)
w = _get_dep_weight(dep_metadata, name)
if w != None:
repo_weights[repo] = repo_weights.get(repo, 0) + w
for repo, ds in repo_depsets.items():
groups[repo or "_main"] = depset(transitive = ds)
if repo == own_repo:
Expand Down
18 changes: 7 additions & 11 deletions example/producer/rules/starlark_library.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -30,19 +30,15 @@ def _starlark_library_impl(ctx):

group_name = loadpath + ":" + ctx.label.name

groups = {}
for dep in ctx.attr.deps:
if RunfilesGroupInfo in dep:
for name in lib.group_names(dep[RunfilesGroupInfo]):
groups[name] = getattr(dep[RunfilesGroupInfo], name)

groups[group_name] = depset(direct_srcs + ctx.files.data)
dep_groups = lib.collect_groups(ctx.attr.deps)
data_groups = lib.collect_groups(ctx.attr.data)

metadata = None
for dep in ctx.attr.deps:
if RunfilesGroupMetadataInfo in dep:
metadata = lib.merge_metadata(metadata, dep[RunfilesGroupMetadataInfo])
groups = {}
groups.update(dep_groups.groups)
groups.update(data_groups.groups)
groups[group_name] = depset(direct_srcs, transitive = data_groups.ungrouped)

metadata = lib.merge_metadata(dep_groups.metadata, data_groups.metadata)
own_weight = ctx.attr.runfiles_weight if ctx.attr.runfiles_weight > 0 else None
own_metadata = RunfilesGroupMetadataInfo(groups = {
group_name: lib.group_metadata(weight = own_weight),
Expand Down
7 changes: 7 additions & 0 deletions example/src/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,12 @@ starlark_binary(
],
)

starlark_binary(
name = "data_has_executable",
src = "empty.star",
data = ["//src/shell_app:greeter"],
)

fake_package(
name = "adder_pkg",
binary = ":adder",
Expand Down Expand Up @@ -121,6 +127,7 @@ runfiles_group_analysis_test(
":hasher",
":3p_deps_demo",
":merge_demo",
":data_has_executable",
],
)

Expand Down
Empty file added example/src/empty.star
Empty file.
30 changes: 30 additions & 0 deletions example/src/shell_app/BUILD.bazel
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
load("@rules_shell//shell:sh_binary.bzl", "sh_binary")
load("@rules_shell//shell:sh_library.bzl", "sh_library")

package(default_visibility = ["//visibility:public"])

sh_library(
name = "format_lib",
srcs = ["format.sh"],
data = ["templates/banner.txt"],
)

sh_library(
name = "config_lib",
srcs = ["config.sh"],
data = ["data/defaults.conf"],
)

sh_binary(
name = "greeter",
srcs = ["greeter.sh"],
data = [
"data/names.txt",
],
deps = [
"@rules_shell//shell/runfiles",
":config_lib",
":format_lib",
],
visibility = ["//visibility:public"],
)
12 changes: 12 additions & 0 deletions example/src/shell_app/config.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
#!/usr/bin/env bash

GREETING_PREFIX="Hello"

load_config() {
local config_file
config_file="$(rlocation _main/src/shell_app/data/defaults.conf)"
if [[ -f "$config_file" ]]; then
# shellcheck disable=SC1090
source "$config_file"
fi
}
1 change: 1 addition & 0 deletions example/src/shell_app/data/defaults.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
GREETING_PREFIX="Welcome"
4 changes: 4 additions & 0 deletions example/src/shell_app/data/names.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
# List of names to greet
Alice
Bob
Charlie
15 changes: 15 additions & 0 deletions example/src/shell_app/format.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
#!/usr/bin/env bash

format_greeting() {
local name="$1"
local prefix="$2"
echo "${prefix} ${name}!"
}

print_banner() {
local banner_file="$1"
if [[ -f "$banner_file" ]]; then
echo ""
cat "$banner_file"
fi
}
29 changes: 29 additions & 0 deletions example/src/shell_app/greeter.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
#!/usr/bin/env bash
set -euo pipefail

# --- begin runfiles.bash initialization v3 ---
set -uo pipefail; set +e; f=bazel_tools/tools/bash/runfiles/runfiles.bash
# shellcheck disable=SC1090
source "${RUNFILES_DIR:-/dev/null}/$f" 2>/dev/null || \
source "$(grep -sm1 "^$f " "${RUNFILES_MANIFEST_FILE:-/dev/null}" | cut -f2- -d' ')" 2>/dev/null || \
source "$0.runfiles/$f" 2>/dev/null || \
source "$(grep -sm1 "^$f " "$0.runfiles_manifest" | cut -f2- -d' ')" 2>/dev/null || \
source "$(grep -sm1 "^$f " "$0.exe.runfiles_manifest" | cut -f2- -d' ')" 2>/dev/null || \
{ echo>&2 "ERROR: cannot find $f"; exit 1; }; f=; set -e
# --- end runfiles.bash initialization v3 ---

# shellcheck disable=SC1090
source "$(rlocation _main/src/shell_app/config.sh)"
# shellcheck disable=SC1090
source "$(rlocation _main/src/shell_app/format.sh)"

load_config

names_file="$(rlocation _main/src/shell_app/data/names.txt)"
while IFS= read -r name; do
[[ -z "$name" || "$name" == \#* ]] && continue
format_greeting "$name" "$GREETING_PREFIX"
done < "$names_file"

banner_file="$(rlocation _main/src/shell_app/templates/banner.txt)"
print_banner "$banner_file"
3 changes: 3 additions & 0 deletions example/src/shell_app/templates/banner.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
================================
Thanks for using the greeter!
================================
63 changes: 46 additions & 17 deletions runfiles_group/private/lib.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,19 @@ lib.merge_to_limit(runfiles_group_info, metadata_info = None, max_groups, defaul
merged_group_name(lighter_name, lighter_weight, heavier_name, heavier_weight)
to determine the name of the merged group. If None, the heavier group's name is kept.

lib.merge_metadata(base, override)
Dict-merges two RunfilesGroupMetadataInfo instances (or None).
lib.merge_metadata(*metadatas)
Dict-merges any number of RunfilesGroupMetadataInfo instances (or None).
Returns RunfilesGroupMetadataInfo or None. Per-key last-wins.

lib.collect_groups(deps)
Extracts RunfilesGroupInfo and RunfilesGroupMetadataInfo from a list of
dependency targets. For deps providing RunfilesGroupInfo, extracts all
groups and metadata. For deps without it, collects
DefaultInfo.default_runfiles.files as ungrouped.
Returns struct(groups, metadata, ungrouped) where:
groups: dict[str, depset[File]]
metadata: RunfilesGroupMetadataInfo or None
ungrouped: list[depset[File]]
"""

load("@bazel_features//:features.bzl", "bazel_features")
Expand Down Expand Up @@ -116,7 +126,7 @@ def _find_cheapest_pair(groups, meta, default_weight):

def _merge_pair(groups, meta, lighter, heavier, default_weight, merged_group_name_fn):
"""Merges lighter into heavier, returns new (groups, meta) dicts."""
merged_depset = depset(transitive = [groups[lighter], groups[heavier]])
merged_depsets = groups[lighter] + groups[heavier]
merged_weight = _effective_weight(meta[lighter], default_weight) + \
_effective_weight(meta[heavier], default_weight)
merged_entry = struct(
Expand All @@ -133,7 +143,7 @@ def _merge_pair(groups, meta, lighter, heavier, default_weight, merged_group_nam
out_name = heavier

new_groups = {n: d for n, d in groups.items() if n != lighter and n != heavier}
new_groups[out_name] = merged_depset
new_groups[out_name] = merged_depsets
new_meta = {n: e for n, e in meta.items() if n != lighter and n != heavier}
new_meta[out_name] = merged_entry
return (new_groups, new_meta)
Expand All @@ -147,7 +157,7 @@ def _merge_to_limit(runfiles_group_info, runfiles_group_metadata_info = None, *,
group_count = len(names),
)

groups = {name: getattr(runfiles_group_info, name) for name in names}
groups = {name: [getattr(runfiles_group_info, name)] for name in names}
meta = {}
for name in names:
meta[name] = _get_metadata(runfiles_group_metadata_info, name)
Expand All @@ -160,25 +170,43 @@ def _merge_to_limit(runfiles_group_info, runfiles_group_metadata_info = None, *,
break
groups, meta = _merge_pair(groups, meta, pair[0], pair[1], default_weight, merged_group_name)

merged_rgi = RunfilesGroupInfo(**groups)
flat = {}
for name, ds in groups.items():
flat[name] = ds[0] if len(ds) == 1 else depset(transitive = ds)
merged_rgi = RunfilesGroupInfo(**flat)
merged_metadata = RunfilesGroupMetadataInfo(groups = meta) if meta else runfiles_group_metadata_info
return struct(
runfiles_group_info = merged_rgi,
runfiles_group_metadata_info = merged_metadata,
group_count = len(groups),
)

def _merge_metadata(base, override):
if base == None and override == None:
return None
if base == None:
return override
if override == None:
return base

merged = dict(base.groups)
merged.update(override.groups)
return RunfilesGroupMetadataInfo(groups = merged)
def _merge_metadata(*metadatas):
result = None
for m in metadatas:
if m == None:
continue
if result == None:
result = m
else:
merged = dict(result.groups)
merged.update(m.groups)
result = RunfilesGroupMetadataInfo(groups = merged)
return result

def _collect_groups(deps):
groups = {}
metadata = None
ungrouped = []
for dep in deps:
if RunfilesGroupInfo in dep:
for name in _group_names(dep[RunfilesGroupInfo]):
groups[name] = getattr(dep[RunfilesGroupInfo], name)
if RunfilesGroupMetadataInfo in dep:
metadata = _merge_metadata(metadata, dep[RunfilesGroupMetadataInfo])
else:
ungrouped.append(depset(transitive = [dep[DefaultInfo].files, dep[DefaultInfo].default_runfiles.files]))
return struct(groups = groups, metadata = metadata, ungrouped = ungrouped)

lib = struct(
group_metadata = group_metadata,
Expand All @@ -187,4 +215,5 @@ lib = struct(
transform_groups = _transform_groups,
merge_to_limit = _merge_to_limit,
merge_metadata = _merge_metadata,
collect_groups = _collect_groups,
)
Loading