diff --git a/.bazelrc b/.bazelrc
new file mode 100644
index 0000000..04615fa
--- /dev/null
+++ b/.bazelrc
@@ -0,0 +1,3 @@
+# //tests:version_test asserts on the Python interpreter version, so enable
+# toolchains to ensure we get the right one (avoid bazelbuild/bazel#4815).
+build --incompatible_use_python_toolchains
diff --git a/WORKSPACE b/WORKSPACE
index 09df09f..0ddafb5 100644
--- a/WORKSPACE
+++ b/WORKSPACE
@@ -1,5 +1,13 @@
workspace(name = "subpar")
+# These version vars aren't consumed by anything but are good to know for
+# documentation / compatibility purposes.
+#
+# Because of use of `PyInfo` provider.
+MIN_BAZEL_VERSION = "0.23.0"
+# Because tests require --incompatible_use_python_toolchains.
+MIN_BAZEL_VERSION_FOR_TESTS = "0.25.0"
+
# Used by integration tests
local_repository(
name = "test_workspace",
diff --git a/compiler/BUILD b/compiler/BUILD
index 321b692..e69b9f3 100644
--- a/compiler/BUILD
+++ b/compiler/BUILD
@@ -30,7 +30,7 @@ py_library(
py_binary(
name = "compiler",
srcs = ["compiler.py"],
- default_python_version = "PY2",
+ python_version = "PY2",
main = "compiler.py",
srcs_version = "PY2AND3",
deps = [":compiler_lib"],
diff --git a/compiler/cli.py b/compiler/cli.py
index 083f931..a721c13 100644
--- a/compiler/cli.py
+++ b/compiler/cli.py
@@ -53,20 +53,23 @@ def make_command_line_parser():
help='Root directory of all relative paths in manifest file.',
default=os.getcwd())
parser.add_argument(
- '--outputpar',
+ '--output_par',
help='Filename of generated par file.',
required=True)
parser.add_argument(
'--stub_file',
help='Read imports and interpreter path from the specified stub file',
required=True)
+ parser.add_argument(
+ '--interpreter',
+ help='Interpreter to use instead of determining it from the stub file')
# The default timestamp is "Jan 1 1980 00:00:00 utc", which is the
# earliest time that can be stored in a zip file.
#
# "Seconds since Unix epoch" was chosen to be compatible with
# the SOURCE_DATE_EPOCH standard
#
- # Numeric calue is from running this:
+ # Numeric value is from running this:
# "date --date='Jan 1 1980 00:00:00 utc' --utc +%s"
parser.add_argument(
'--timestamp',
@@ -84,50 +87,73 @@ def make_command_line_parser():
'directory at the start of execution.',
type=bool_from_string,
required=True)
+ parser.add_argument(
+ '--import_root',
+ help='Path to add to sys.path, may be repeated to provide multiple roots.',
+ action='append',
+ default=[],
+ dest='import_roots'),
+ parser.add_argument(
+ '--no_remove',
+ help='Keep extracted files after program finishes if --zip_safe is ' +
+ 'enabled.',
+ type=bool_from_string,
+ required=True)
return parser
def parse_stub(stub_filename):
- """Parse the imports and interpreter path from a py_binary() stub.
+ """Parse interpreter path from a py_binary() stub.
We assume the stub is utf-8 encoded.
- TODO(b/29227737): Remove this once we can access imports from skylark.
+ TODO(bazelbuild/bazel#7805): Remove this once we can access the py_runtime from Starlark.
- Returns (list of relative paths, path to Python interpreter)
+ Returns path to Python interpreter
"""
- # Find the list of import roots
- imports_regex = re.compile(r'''^ python_imports = '([^']*)'$''')
+ # Find the interpreter
interpreter_regex = re.compile(r'''^PYTHON_BINARY = '([^']*)'$''')
- import_roots = None
interpreter = None
with io.open(stub_filename, 'rt', encoding='utf8') as stub_file:
for line in stub_file:
- importers_match = imports_regex.match(line)
- if importers_match:
- import_roots = importers_match.group(1).split(':')
- # Filter out empty paths
- import_roots = [x for x in import_roots if x]
interpreter_match = interpreter_regex.match(line)
if interpreter_match:
interpreter = interpreter_match.group(1)
- if import_roots is None or not interpreter:
+ if not interpreter:
raise error.Error('Failed to parse stub file [%s]' % stub_filename)
- # Find the Python interpreter, matching the search logic in
- # stub_template.txt
+ # Determine the Python interpreter, checking for default toolchain.
+ #
+ # This somewhat mirrors the logic in python_stub_template.txt, but we don't support
+ # relative paths (i.e., in-workspace interpreters). This is because the interpreter
+ # will be used in the .par file's shebang, and putting a relative path in a shebang
+ # is extremely brittle and non-relocatable. (The reason the standard py_binary rule
+ # can use an in-workspace interpreter is that its stub script runs in a separate
+ # process and has a shebang referencing the system interpreter). As a special case,
+ # if the Python target is using the autodetecting Python toolchain, which is
+ # technically an in-workspace runtime, we rewrite it to "/usr/bin/env python[2|3]"
+ # rather than fail.
if interpreter.startswith('//'):
raise error.Error('Python interpreter must not be a label [%s]' %
stub_filename)
elif interpreter.startswith('/'):
pass
+ elif interpreter == 'bazel_tools/tools/python/py3wrapper.sh': # Default toolchain
+ # Replace default toolchain python3 wrapper with default python3 on path
+ interpreter = '/usr/bin/env python3'
+ elif interpreter == 'bazel_tools/tools/python/py2wrapper.sh': # Default toolchain
+ # Replace default toolchain python2 wrapper with default python2 on path
+ interpreter = '/usr/bin/env python2'
elif '/' in interpreter:
- pass
+ raise error.Error(
+ 'par files require a Python runtime that is ' +
+ 'installed on the system, not defined inside the workspace. Use ' +
+ 'a `py_runtime` with an absolute path, not a label.')
else:
interpreter = '/usr/bin/env %s' % interpreter
- return (import_roots, interpreter)
+ return interpreter
def main(argv):
@@ -135,17 +161,21 @@ def main(argv):
parser = make_command_line_parser()
args = parser.parse_args(argv[1:])
- # Parse information from stub file that's too hard to compute in Skylark
- import_roots, interpreter = parse_stub(args.stub_file)
+ # Parse interpreter from stub file that's not available in Starlark
+ interpreter = parse_stub(args.stub_file)
+
+ if args.interpreter:
+ interpreter = args.interpreter
par = python_archive.PythonArchive(
main_filename=args.main_filename,
- import_roots=import_roots,
+ import_roots=args.import_roots,
interpreter=interpreter,
- output_filename=args.outputpar,
+ output_filename=args.output_par,
manifest_filename=args.manifest_file,
manifest_root=args.manifest_root,
timestamp=args.timestamp,
zip_safe=args.zip_safe,
+ no_remove=args.no_remove,
)
par.create()
diff --git a/compiler/cli_test.py b/compiler/cli_test.py
index 56a6f18..bd375e6 100644
--- a/compiler/cli_test.py
+++ b/compiler/cli_test.py
@@ -35,45 +35,58 @@ def test_make_command_line_parser(self):
args = parser.parse_args([
'--manifest_file=bar',
'--manifest_root=bazz',
- '--outputpar=baz',
+ '--output_par=baz',
'--stub_file=quux',
'--zip_safe=False',
+ '--no_remove=False',
+ '--import_root=root1',
+ '--import_root=root2',
'foo',
])
self.assertEqual(args.manifest_file, 'bar')
+ self.assertEqual(args.manifest_root, 'bazz')
+ self.assertEqual(args.output_par, 'baz')
+ self.assertEqual(args.stub_file, 'quux')
+ self.assertEqual(args.zip_safe, False)
+ self.assertEqual(args.import_roots, ['root1', 'root2'])
+ self.assertEqual(args.main_filename, 'foo')
+
+ def test_make_command_line_parser_for_interprerter(self):
+ parser = cli.make_command_line_parser()
+ args = parser.parse_args([
+ '--manifest_file=bar',
+ '--manifest_root=bazz',
+ '--output_par=baz',
+ '--stub_file=quux',
+ '--zip_safe=False',
+ '--no_remove=False',
+ '--interpreter=foobar',
+ 'foo',
+ ])
+ self.assertEqual(args.interpreter, 'foobar')
def test_stub(self):
valid_cases = [
- # Empty list
+ # Absolute path to interpreter
[b"""
- python_imports = ''
PYTHON_BINARY = '/usr/bin/python'
""",
- ([], '/usr/bin/python')],
- # Single import
- [b"""
- python_imports = 'myworkspace/spam/eggs'
-PYTHON_BINARY = '/usr/bin/python'
-""",
- (['myworkspace/spam/eggs'], '/usr/bin/python')],
- # Multiple imports
+ '/usr/bin/python'],
+ # Search for interpreter on $PATH
[b"""
- python_imports = 'myworkspace/spam/eggs:otherworkspace'
-PYTHON_BINARY = '/usr/bin/python'
+PYTHON_BINARY = 'python'
""",
- (['myworkspace/spam/eggs', 'otherworkspace'], '/usr/bin/python')],
- # Relative path to interpreter
+ '/usr/bin/env python'],
+ # Default toolchain wrapped python3 interpreter
[b"""
- python_imports = ''
-PYTHON_BINARY = 'mydir/python'
+PYTHON_BINARY = 'bazel_tools/tools/python/py3wrapper.sh'
""",
- ([], 'mydir/python')],
- # Search for interpreter on $PATH
+ '/usr/bin/env python3'],
+ # Default toolchain wrapped python2 interpreter
[b"""
- python_imports = ''
-PYTHON_BINARY = 'python'
+PYTHON_BINARY = 'bazel_tools/tools/python/py2wrapper.sh'
""",
- ([], '/usr/bin/env python')],
+ '/usr/bin/env python2'],
]
for content, expected in valid_cases:
with test_utils.temp_file(content) as stub_file:
@@ -81,15 +94,13 @@ def test_stub(self):
self.assertEqual(actual, expected)
invalid_cases = [
+ # No interpreter
b'',
b'\n\n',
- # No interpreter
- b" python_imports = 'myworkspace/spam/eggs'",
- # No imports
- b"PYTHON_BINARY = 'python'\n",
+ # Relative interpreter path
+ b"PYTHON_BINARY = 'mydir/python'",
# Interpreter is label
b"""
- python_imports = ''
PYTHON_BINARY = '//mypackage:python'
""",
]
diff --git a/compiler/python_archive.py b/compiler/python_archive.py
index 8a119fd..a4a9093 100755
--- a/compiler/python_archive.py
+++ b/compiler/python_archive.py
@@ -29,6 +29,8 @@
"""
from datetime import datetime
+import contextlib
+import errno
import io
import logging
import os
@@ -46,7 +48,8 @@
_boilerplate_template = """\
# Boilerplate added by subpar/compiler/python_archive.py
from %(runtime_package)s import support as _
-_.setup(import_roots=%(import_roots)s, zip_safe=%(zip_safe)s)
+_.setup(import_roots=%(import_roots)s, zip_safe=%(zip_safe)s,
+ no_remove=%(no_remove)s)
del _
# End boilerplate
"""
@@ -100,6 +103,7 @@ def __init__(self,
output_filename,
timestamp,
zip_safe,
+ no_remove,
):
self.main_filename = main_filename
@@ -112,6 +116,7 @@ def __init__(self,
t = datetime.utcfromtimestamp(timestamp)
self.timestamp_tuple = t.timetuple()[0:6]
self.zip_safe = zip_safe
+ self.no_remove = no_remove
self.compression = zipfile.ZIP_DEFLATED
@@ -170,6 +175,7 @@ def generate_boilerplate(self, import_roots):
'runtime_package': _runtime_package,
'import_roots': str(import_roots),
'zip_safe': self.zip_safe,
+ 'no_remove': self.no_remove,
}
return boilerplate_contents.encode('ascii').decode('ascii')
@@ -274,7 +280,7 @@ def write_zip_data(self, temp_parfile, stored_resources):
"""
logging.debug('Storing Files...')
- with zipfile.ZipFile(temp_parfile, 'w', self.compression) as z:
+ with contextlib.closing(zipfile.ZipFile(temp_parfile, 'w', self.compression)) as z:
items = sorted(stored_resources.items())
for relative_path, resource in items:
assert resource.zipinfo.filename == relative_path
@@ -289,9 +295,14 @@ def create_final_from_temp(self, temp_parfile_name):
def remove_if_present(filename):
- """Delete any existing file"""
- if os.path.exists(filename):
+ """Delete a file if it exists"""
+ try:
+ # Remove atomically
os.remove(filename)
+ except OSError as exc:
+ # Ignore failure if file does not exist
+ if exc.errno != errno.ENOENT:
+ raise
def fetch_support_file(name, timestamp_tuple):
diff --git a/compiler/python_archive_test.py b/compiler/python_archive_test.py
index 3fd5ec2..05400d7 100644
--- a/compiler/python_archive_test.py
+++ b/compiler/python_archive_test.py
@@ -14,6 +14,7 @@
import os
import subprocess
+import sys
import time
import unittest
import zipfile
@@ -45,11 +46,12 @@ def setUp(self):
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
self.output_filename = os.path.join(self.output_dir, 'output.par')
- self.interpreter = '/usr/bin/python2'
+ self.interpreter = sys.executable
self.import_roots = []
self.date_time_tuple = (1980, 1, 1, 0, 0, 0)
self.timestamp = 315532800
self.zip_safe = True
+ self.no_remove = False
def _construct(self, manifest_filename=None):
return python_archive.PythonArchive(
@@ -61,6 +63,7 @@ def _construct(self, manifest_filename=None):
output_filename=self.output_filename,
timestamp=self.timestamp,
zip_safe=self.zip_safe,
+ no_remove=self.no_remove,
)
def test_create_manifest_not_found(self):
diff --git a/docs/BUILD b/docs/BUILD
index f72a6ea..555d460 100644
--- a/docs/BUILD
+++ b/docs/BUILD
@@ -1,9 +1,10 @@
# To regenerate html docs, run:
-# ./update_docs.sh
+# ../update_docs.sh
-load("@io_bazel_skydoc//skylark:skylark.bzl", "skylark_doc", "skylark_library")
+load("@bazel_skylib//:bzl_library.bzl", "bzl_library")
+load("@io_bazel_skydoc//skylark:skylark.bzl", "skylark_doc")
-skylark_library(
+bzl_library(
name = "subpar-rules",
srcs = [
"@subpar//:debug.bzl",
@@ -16,7 +17,7 @@ skylark_doc(
format = "markdown",
overview = True,
site_root = ".",
- deps = ["subpar-rules"],
+ srcs = [":subpar-rules"],
)
skylark_doc(
@@ -24,5 +25,5 @@ skylark_doc(
format = "html",
overview = True,
site_root = ".",
- deps = ["subpar-rules"],
+ srcs = [":subpar-rules"],
)
diff --git a/docs/WORKSPACE b/docs/WORKSPACE
index 77a5e97..23bddbd 100644
--- a/docs/WORKSPACE
+++ b/docs/WORKSPACE
@@ -1,28 +1,40 @@
workspace(name = "subpar_docs")
+load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
+
local_repository(
name = "subpar",
path = "..",
)
-# Dependency of skydoc
-git_repository(
- name = "io_bazel_rules_sass",
- remote = "https://github.com/bazelbuild/rules_sass.git",
- tag = "0.0.3",
-)
-
-load("@io_bazel_rules_sass//sass:sass.bzl", "sass_repositories")
+# Skydoc / stardoc and dependencies.
-sass_repositories()
-
-# Used to create documentation
git_repository(
name = "io_bazel_skydoc",
remote = "https://github.com/bazelbuild/skydoc.git",
- commit = "b36d22cc4436a7a7933e36a111bfc00fd494b9fb",
+ commit = "13063139c7c2bca7c725f382fa1e78bdfe93c887", # 2019-02-27
)
-load("@io_bazel_skydoc//skylark:skylark.bzl", "skydoc_repositories")
+# This initialization comes from Stardoc's WORKSPACE file as of commit
+# 13063139c7c2bca7c725f382fa1e78bdfe93c887.
+load("@io_bazel_skydoc//:setup.bzl", "skydoc_repositories")
skydoc_repositories()
+
+load("@io_bazel_rules_sass//:package.bzl", "rules_sass_dependencies")
+rules_sass_dependencies()
+
+load("@build_bazel_rules_nodejs//:defs.bzl", "node_repositories")
+node_repositories()
+
+load("@io_bazel_rules_sass//:defs.bzl", "sass_repositories")
+sass_repositories()
+
+# This definition is also needed for (legacy) Skydoc.
+# TODO(#93): Migrate to Stardoc.
+
+git_repository(
+ name = "com_google_protobuf",
+ remote = "https://github.com/protocolbuffers/protobuf.git",
+ commit = "7b28271a61a3da0a37f6fda399b0c4c86464e5b3", # 2018-11-16
+)
diff --git a/docs/index.html b/docs/index.html
index f46d709..d685f9e 100644
--- a/docs/index.html
+++ b/docs/index.html
@@ -70,7 +70,7 @@
Rule sets
-
+
Macros
-
+
Rules
diff --git a/docs/index.md b/docs/index.md
index a62b873..f4f0c2b 100644
--- a/docs/index.md
+++ b/docs/index.md
@@ -10,7 +10,7 @@
-
+
Macros
-
+
Rules
diff --git a/docs/main.css b/docs/main.css
index f506e12..89f66fe 100755
--- a/docs/main.css
+++ b/docs/main.css
@@ -1,3 +1 @@
-body{background-color:#fafafa}pre,code{font-family:'Liberation Mono', Consolas, Monaco, 'Andale Mono', monospace}pre{background-color:#eee;padding:20px;overflow-x:auto;word-wrap:normal}pre code{overflow-wrap:normal;white-space:pre}code{display:inline-block;font-size:90%;white-space:pre-wrap}.mdl-layout__drawer{background-color:#fff}.mdl-layout__drawer .mdl-layout-title{border-bottom:1px solid #e0e0e0;padding-left:24px}.drawer-nav ul{list-style:none;padding-left:0}.drawer-nav ul li{display:block;padding:0}.drawer-nav ul li ul li a{padding-left:44px;font-weight:400}.drawer-nav ul li a{display:block;flex-shrink:0;padding:15px 0 15px 22px;margin:0;font-weight:600;color:#757575;line-height:1em;text-decoration:none;cursor:pointer}.drawer-nav ul li a:active,.drawer-nav ul li a:hover{background-color:#f0f0f0}.drawer-nav ul li.active a{color:#4caf50;font-weight:500}h1.page-title{font-size:34px;font-weight:400;line-height:40px;margin-bottom:30px;color:#4caf50}p.lead{font-size:20px;line-height:32px}table{border-collapse:collapse;border-spacing:0;background-color:#fff;table-layout:auto}table thead th{background-color:#fafafa;border:1px solid #eee;color:#757575;padding:12px 12px 12px 24px;vertical-align:top}table tbody td{border:1px solid #eee;padding:12px 12px 12px 24px;vertical-align:top}table.params-table{width:100%}table.params-table col.col-param{width:25%}table.params-table col.col-description{width:75%}table.overview-table{width:100%}table.overview-table col.col-name{width:25%}table.overview-table col.col-description{width:75%}table.overview-table td p{margin:0}hr{margin-top:80px;margin-bottom:80px}nav.toc{border-left:5px solid #4caf50;padding-left:20px;margin-bottom:48px}nav.toc h1,nav.toc h2{font-size:15px;line-height:16px;padding-bottom:12px;margin-bottom:0;font-weight:400;color:#757575}nav.toc ul{list-style:none;margin-top:0;padding-left:0}nav.toc ul li{font-size:20px;line-height:40px}nav.toc ul li a{color:#4caf50}.page-content{margin-left:auto;margin-right:auto;padding-top:60px;padding-bottom:60px;width:760px}.page-content a{text-decoration:none}.page-content h1{font-size:34px;font-weight:400;line-height:40px;margin-bottom:30px;color:#4caf50}.page-content h2{font-size:24px;font-weight:400;line-height:32px;margin-bottom:30px;color:#4caf50}.page-content h3{font-size:20px;font-weight:400;line-height:32px;margin-bottom:30px;color:#4caf50}@media (max-width: 768px){.page-content{width:360px}}@media (min-width: 768px){.page-content{width:760px}}@media (min-width: 1476px){.page-content{width:1160px}}.mdl-mini-footer{padding-left:40px}
-
-/*# sourceMappingURL=main.css.map */
\ No newline at end of file
+body{background-color:#fafafa}pre,code{font-family:"Liberation Mono",Consolas,Monaco,"Andale Mono",monospace}pre{background-color:#eee;padding:20px;overflow-x:auto;word-wrap:normal}pre code{overflow-wrap:normal;white-space:pre}code{display:inline-block;font-size:90%;white-space:pre-wrap}.mdl-layout__drawer{background-color:#fff}.mdl-layout__drawer .mdl-layout-title{border-bottom:1px solid #e0e0e0;padding-left:24px}.drawer-nav ul{list-style:none;padding-left:0}.drawer-nav ul li{display:block;padding:0}.drawer-nav ul li ul li a{padding-left:44px;font-weight:400}.drawer-nav ul li a{display:block;flex-shrink:0;padding:15px 0 15px 22px;margin:0;font-weight:600;color:#757575;line-height:1em;text-decoration:none;cursor:pointer}.drawer-nav ul li a:active,.drawer-nav ul li a:hover{background-color:#f0f0f0}.drawer-nav ul li.active a{color:#4caf50;font-weight:500}h1.page-title{font-size:34px;font-weight:400;line-height:40px;margin-bottom:30px;color:#4caf50}p.lead{font-size:20px;line-height:32px}table{border-collapse:collapse;border-spacing:0;background-color:#fff;table-layout:auto}table thead th{background-color:#fafafa;border:1px solid #eee;color:#757575;padding:12px 12px 12px 24px;vertical-align:top}table tbody td{border:1px solid #eee;padding:12px 12px 12px 24px;vertical-align:top}table.params-table{width:100%}table.params-table col.col-param{width:25%}table.params-table col.col-description{width:75%}table.overview-table{width:100%}table.overview-table col.col-name{width:25%}table.overview-table col.col-description{width:75%}table.overview-table td p{margin:0}hr{margin-top:40px;margin-bottom:40px}nav.toc{border-left:5px solid #4caf50;padding-left:20px;margin-bottom:48px}nav.toc h1,nav.toc h2{font-size:15px;line-height:16px;padding-bottom:12px;margin-bottom:0;font-weight:400;color:#757575}nav.toc ul{list-style:none;margin-top:0;padding-left:0}nav.toc ul li{font-size:20px;line-height:40px}nav.toc ul li a{color:#4caf50}.page-content{margin-left:auto;margin-right:auto;padding-top:60px;padding-bottom:60px;width:760px}.page-content a{text-decoration:none}.page-content h1{font-size:34px;font-weight:400;line-height:40px;margin-bottom:30px;color:#4caf50}.page-content h2{font-size:24px;font-weight:400;line-height:32px;margin-bottom:30px;color:#4caf50}.page-content h3{font-size:20px;font-weight:400;line-height:32px;margin-bottom:30px;color:#4caf50}@media(max-width: 768px){.page-content{width:360px}}@media(min-width: 768px){.page-content{width:760px}}@media(min-width: 1476px){.page-content{width:1160px}}.mdl-mini-footer{padding-left:40px}/*# sourceMappingURL=main.css.map */
diff --git a/docs/subpar.html b/docs/subpar.html
index 1009446..d510172 100644
--- a/docs/subpar.html
+++ b/docs/subpar.html
@@ -77,7 +77,7 @@ Macros
par_binary
- par_binary(name)
+ par_binary(name, **kwargs)
An executable Python program.
par_binary() is a drop-in replacement for py_binary() that also
@@ -106,13 +106,20 @@
Attributes
A unique name for this rule.
+
+ **kwargs |
+
+ Unknown; Optional
+
+ |
+
par_test
- par_test(name)
+ par_test(name, **kwargs)
An executable Python test.
Just like par_binary, but for py_test instead of py_binary. Useful if you
@@ -134,13 +141,20 @@
Attributes
A unique name for this rule.
+
+ **kwargs |
+
+ Unknown; Optional
+
+ |
+
parfile
- parfile(name, src, compiler, default_python_version, imports, main, zip_safe)
+ parfile(name, src, compiler, compiler_args, default_python_version, imports, main, no_remove, zip_safe)
A self-contained, single-file Python program, with a .par file extension.
You probably want to use par_binary() instead of this.
@@ -177,6 +191,13 @@ Attributes
Internal use only.
+
+ compiler_args |
+
+ List of strings; Optional; Default is []
+
+ |
+
default_python_version |
@@ -202,6 +223,14 @@ Attributes
See py_binary.main
|
+
+ no_remove |
+
+ Boolean; Optional; Default is False
+ Whether to keep the extracted temporary directory after the
+program finishes, if zip_safe is enabled.
+ |
+
zip_safe |
@@ -218,7 +247,7 @@ Attributes
parfile_test
- parfile_test(name, src, compiler, default_python_version, imports, main, zip_safe)
+ parfile_test(name, src, compiler, compiler_args, default_python_version, imports, main, no_remove, zip_safe)
Identical to par_binary, but the rule is marked as being a test.
You probably want to use par_test() instead of this.
@@ -253,6 +282,13 @@ Attributes
Internal use only.
|
+
+ compiler_args |
+
+ List of strings; Optional; Default is []
+
+ |
+
default_python_version |
@@ -278,6 +314,14 @@ Attributes
See py_binary.main
|
+
+ no_remove |
+
+ Boolean; Optional; Default is False
+ Whether to keep the extracted temporary directory after the
+program finishes, if zip_safe is enabled.
+ |
+
zip_safe |
diff --git a/docs/subpar.md b/docs/subpar.md
index 1d71527..ddde9df 100644
--- a/docs/subpar.md
+++ b/docs/subpar.md
@@ -21,7 +21,7 @@ Documentation generated by Skydoc
## par_binary
-par_binary(name)
+par_binary(name, **kwargs)
An executable Python program.
@@ -56,13 +56,20 @@ for arguments and usage.
A unique name for this rule.
|
+
+ **kwargs |
+
+ Unknown; Optional
+
+ |
+
## par_test
-par_test(name)
+par_test(name, **kwargs)
An executable Python test.
@@ -88,13 +95,20 @@ specifically need to test a module's behaviour when used in a .par binary.
A unique name for this rule.
+
+ **kwargs |
+
+ Unknown; Optional
+
+ |
+
## parfile
-parfile(name, src, compiler, default_python_version, imports, main, zip_safe)
+parfile(name, src, compiler, compiler_args, default_python_version, imports, main, no_remove, zip_safe)
A self-contained, single-file Python program, with a .par file extension.
@@ -136,6 +150,13 @@ is a bug, don't use or depend on it.
Internal use only.
+
+ compiler_args |
+
+ List of strings; Optional; Default is []
+
+ |
+
default_python_version |
@@ -161,6 +182,14 @@ the application.
See py_binary.main
|
+
+ no_remove |
+
+ Boolean; Optional; Default is False
+ Whether to keep the extracted temporary directory after the
+program finishes, if zip_safe is enabled.
+ |
+
zip_safe |
@@ -177,7 +206,7 @@ par file executes.
## parfile_test
-parfile_test(name, src, compiler, default_python_version, imports, main, zip_safe)
+parfile_test(name, src, compiler, compiler_args, default_python_version, imports, main, no_remove, zip_safe)
Identical to par_binary, but the rule is marked as being a test.
@@ -216,6 +245,13 @@ You probably want to use par_test() instead of this.
Internal use only.
|
+
+ compiler_args |
+
+ List of strings; Optional; Default is []
+
+ |
+
default_python_version |
@@ -241,6 +277,14 @@ the application.
See py_binary.main
|
+
+ no_remove |
+
+ Boolean; Optional; Default is False
+ Whether to keep the extracted temporary directory after the
+program finishes, if zip_safe is enabled.
+ |
+
zip_safe |
diff --git a/run_tests.sh b/run_tests.sh
index 1dc7403..acc5410 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -16,8 +16,85 @@
set -euo pipefail
+function die {
+ echo "$*" 1>&2
+ exit 1
+}
+
+# This sets up the toolchain hook to run tests for the given version of Python
+# whose interpreter is located at the given absolute path.
+#
+# $1 may be either "PY2" or "PY3". If it is PY2, then the Python 2 runtime is
+# set to the path in $2, and the Python 3 runtime is set to the default value
+# given by $PYTHON3. If $1 is PY3, then $2 is the Python 3 runtime and the
+# Python 2 runtime is given by $PYTHON2.
+#
+# The PYVER constant is also set to $1. It is consumed at loading time by
+# //tests:BUILD.
+#
+# Note that even though tests are only run for one version of Python at a time,
+# we still need to provide both runtimes in the toolchain for the sake of
+# tools. In particular, the par compiler itself requires PY2, even if the par
+# that we are compiling uses PY3.
+function set_toolchain_hook {
+ pyver=$1
+ if [ $pyver == "PY3" ]; then
+ py2_path="$PYTHON2"
+ py3_path="$2"
+ else
+ py2_path="$2"
+ py3_path="$PYTHON3"
+ fi
+
+ cat > toolchain_test_hook.bzl << EOF
+load("@bazel_tools//tools/python:toolchain.bzl", "py_runtime_pair")
+
+PYVER = "$pyver"
+
+def define_toolchain_for_testing():
+ native.py_runtime(
+ name = "py2_runtime",
+ interpreter_path = "$py2_path",
+ python_version = "PY2",
+ )
+
+ native.py_runtime(
+ name = "py3_runtime",
+ interpreter_path = "$py3_path",
+ python_version = "PY3",
+ )
+
+ py_runtime_pair(
+ name = "runtime_pair_for_testing",
+ py2_runtime = ":py2_runtime",
+ py3_runtime = ":py3_runtime",
+ visibility = ["//visibility:public"],
+ )
+
+ native.toolchain(
+ name = "toolchain_for_testing",
+ toolchain = ":runtime_pair_for_testing",
+ toolchain_type = "@bazel_tools//tools/python:toolchain_type",
+ visibility = ["//visibility:public"],
+ )
+EOF
+}
+
+# Clear the toolchain hook back to its original no-op contents.
+#
+# If the test exits abnormally and this function isn't run, we may be left with
+# a modified version of this file in our source tree.
+function clear_toolchain_hook {
+ cat > toolchain_test_hook.bzl << EOF
+PYVER = "PY3"
+
+def define_toolchain_for_testing():
+ pass
+EOF
+}
+
# Find various tools in environment
-PYTHON2=$(which python)
+PYTHON2=$(which python||true)
if [ -z "${PYTHON2}" ]; then
PYTHON2=$(which python2)
fi
@@ -29,22 +106,30 @@ fi
# https://www.python.org/dev/peps/pep-0394/
if [ -n "${PYTHON2}" ]; then
VERSION=$("${PYTHON2}" -V 2>&1)
- if ! expr match "${VERSION}" "Python 2." >/dev/null ; then
+ if ! expr "${VERSION}" : "Python 2." >/dev/null ; then
PYTHON2=
fi
fi
-PYTHON3=$(which python3)
+PYTHON3=$(which python3||true)
-VIRTUALENV=$(which virtualenv)
+VIRTUALENV=$(which virtualenv) || die "virtualenv not installed"
VIRTUALENVDIR=$(dirname $0)/.env
# Virtualenv `activate` needs $PS1 set
PS1='$ '
-# Must have at least one Python interpreter to test
-if [ -z "${PYTHON2}" -a -z "${PYTHON3}" ]; then
- echo "ERROR: Could not find Python 2 or 3 interpreter on $PATH" 1>&2
- exit 1
+# Must have both Python interpreters to test.
+if [ -z "${PYTHON2}" ]; then
+ die "Could not find Python 2 on $PATH"
+fi
+if [ -z "${PYTHON3}" ]; then
+ die "Could not find Python 3 on $PATH"
+fi
+
+# Must be able to locate toolchain_test_hook.bzl. This will fail if cwd is not
+# the root of the subpar workspace.
+if [ ! -f "toolchain_test_hook.bzl" ]; then
+ die "Could not locate toolchain_test_hook.bzl (are we in the workspace root?)"
fi
# Run test matrix
@@ -53,15 +138,19 @@ for PYTHON_INTERPRETER in "${PYTHON2}" "${PYTHON3}"; do
continue;
fi
+ BAZEL_TEST="bazel test --test_output=errors \
+--incompatible_use_python_toolchains \
+--extra_toolchains=//tests:toolchain_for_testing"
if [ "${PYTHON_INTERPRETER}" = "${PYTHON3}" ]; then
- BAZEL_TEST="bazel test --define subpar_test_python_version=3"
+ PYVER="PY3"
else
- BAZEL_TEST="bazel test"
+ PYVER="PY2"
fi
echo "Testing ${PYTHON_INTERPRETER}"
bazel clean
- ${BAZEL_TEST} --python_path="${PYTHON_INTERPRETER}" --test_output=errors //...
+ set_toolchain_hook "$PYVER" "$PYTHON_INTERPRETER"
+ ${BAZEL_TEST} //...
if [ -n "${VIRTUALENV}" ]; then
echo "Testing bare virtualenv"
@@ -72,7 +161,8 @@ for PYTHON_INTERPRETER in "${PYTHON2}" "${PYTHON3}"; do
"${VIRTUALENVDIR}"
source "${VIRTUALENVDIR}"/bin/activate
bazel clean
- ${BAZEL_TEST} --python_path=$(which python) --test_output=errors //...
+ set_toolchain_hook $PYVER $(which python)
+ ${BAZEL_TEST} //...
deactivate
for REQUIREMENTS in tests/requirements-test-*.txt; do
@@ -84,8 +174,11 @@ for PYTHON_INTERPRETER in "${PYTHON2}" "${PYTHON3}"; do
source "${VIRTUALENVDIR}"/bin/activate
pip install -r "${REQUIREMENTS}"
bazel clean
- ${BAZEL_TEST} --python_path=$(which python) --test_output=errors //...
+ set_toolchain_hook $PYVER $(which python)
+ ${BAZEL_TEST} //...
deactivate
done
fi
done
+
+clear_toolchain_hook
diff --git a/runtime/support.py b/runtime/support.py
index e8ec253..b5bcb84 100644
--- a/runtime/support.py
+++ b/runtime/support.py
@@ -95,7 +95,7 @@ def _find_archive():
return archive_path
-def _extract_files(archive_path):
+def _extract_files(archive_path, no_remove):
"""Extract the contents of this .par file to disk.
This creates a temporary directory, and registers an atexit
@@ -107,9 +107,10 @@ def _extract_files(archive_path):
"""
extract_dir = tempfile.mkdtemp()
- def _extract_files_cleanup():
- shutil.rmtree(extract_dir, ignore_errors=True)
- atexit.register(_extract_files_cleanup)
+ if not no_remove:
+ def _extract_files_cleanup():
+ shutil.rmtree(extract_dir, ignore_errors=True)
+ atexit.register(_extract_files_cleanup)
_log('# extracting %s to %s' % (archive_path, extract_dir))
zip_file = zipfile.ZipFile(archive_path, mode='r')
@@ -282,7 +283,7 @@ def _initialize_import_path(import_roots, import_prefix):
_log('# adding %s to sys.path' % full_roots)
-def setup(import_roots, zip_safe):
+def setup(import_roots, zip_safe, no_remove):
"""Initialize subpar run-time support
Args:
@@ -309,7 +310,7 @@ def setup(import_roots, zip_safe):
# Extract files to disk if necessary
if not zip_safe:
- extract_dir = _extract_files(archive_path)
+ extract_dir = _extract_files(archive_path, no_remove)
# sys.path[0] is the name of the executing .par file. Point
# it to the extract directory instead, so that Python searches
# there for imports.
diff --git a/runtime/support_test.py b/runtime/support_test.py
index 565adb1..8dda700 100644
--- a/runtime/support_test.py
+++ b/runtime/support_test.py
@@ -101,7 +101,7 @@ def test__find_archive(self):
def test__extract_files(self):
# Extract zipfile
- extract_path = support._extract_files(self.zipfile_name)
+ extract_path = support._extract_files(self.zipfile_name, False)
# Check results
self.assertTrue(os.path.isdir(extract_path))
@@ -140,7 +140,7 @@ def test_setup(self):
mock_sys_path[0] = self.zipfile_name
sys.path = mock_sys_path
success = support.setup(import_roots=['some_root', 'another_root'],
- zip_safe=True)
+ zip_safe=True, no_remove=False)
self.assertTrue(success)
finally:
sys.path = old_sys_path
@@ -165,7 +165,8 @@ def test_setup__extract(self):
mock_sys_path = list(sys.path)
mock_sys_path[0] = self.zipfile_name
sys.path = mock_sys_path
- success = support.setup(import_roots=['some_root'], zip_safe=False)
+ success = support.setup(import_roots=['some_root'], zip_safe=False,
+ no_remove=False)
self.assertTrue(success)
finally:
sys.path = old_sys_path
diff --git a/subpar.bzl b/subpar.bzl
index f12230d..11e3b18 100644
--- a/subpar.bzl
+++ b/subpar.bzl
@@ -14,107 +14,121 @@
"""Build self-contained python executables."""
-load("//:debug.bzl", "dump")
-
-DEFAULT_COMPILER = '//compiler:compiler.par'
+DEFAULT_COMPILER = "//compiler:compiler.par"
def _parfile_impl(ctx):
"""Implementation of parfile() rule"""
+
# Find the main entry point
py_files = ctx.files.main
if len(py_files) == 0:
- fail('Expected exactly one .py file, found none', 'main')
+ fail("Expected exactly one .py file, found none", "main")
elif len(py_files) > 1:
- fail('Expected exactly one .py file, found these: [%s]' % py_files, 'main')
+ fail("Expected exactly one .py file, found these: [%s]" % py_files, "main")
main_py_file = py_files[0]
- if main_py_file not in ctx.attr.src.data_runfiles.files:
- fail('Main entry point [%s] not listed in srcs' % main_py_file, 'main')
+ if main_py_file not in ctx.attr.src.data_runfiles.files.to_list():
+ fail("Main entry point [%s] not listed in srcs" % main_py_file, "main")
# Find the list of things that must be built before this thing is built
# TODO: also handle ctx.attr.src.data_runfiles.symlinks
- inputs = list(ctx.attr.src.default_runfiles.files)
+ inputs = ctx.attr.src.default_runfiles.files.to_list()
# Make a manifest of files to store in the .par file. The
# runfiles manifest is not quite right, so we make our own.
sources_map = {}
+
# First, add the zero-length __init__.py files
- for empty in ctx.attr.src.default_runfiles.empty_filenames:
+ for empty in ctx.attr.src.default_runfiles.empty_filenames.to_list():
stored_path = _prepend_workspace(empty, ctx)
- local_path = ''
+ local_path = ""
sources_map[stored_path] = local_path
+
# Now add the regular (source and generated) files
for input_file in inputs:
stored_path = _prepend_workspace(input_file.short_path, ctx)
local_path = input_file.path
sources_map[stored_path] = local_path
+
# Now make a nice sorted list
sources_lines = []
- for k,v in sorted(sources_map.items()):
- sources_lines.append('%s %s' % (k, v))
- sources_content = '\n'.join(sources_lines) + '\n'
+ for k, v in sorted(sources_map.items()):
+ sources_lines.append("%s %s" % (k, v))
+ sources_content = "\n".join(sources_lines) + "\n"
# Write the list to the manifest file
- sources_file = ctx.new_file(ctx.label.name + '_SOURCES')
- ctx.file_action(
- output=sources_file,
- content=sources_content,
- executable=False)
+ sources_file = ctx.actions.declare_file(ctx.label.name + "_SOURCES")
+ ctx.actions.write(
+ output = sources_file,
+ content = sources_content,
+ is_executable = False,
+ )
# Find the list of directories to add to sys.path
- # TODO(b/29227737): Use 'imports' provider from Bazel
- stub_file = ctx.attr.src.files_to_run.executable.path
+ import_roots = ctx.attr.src[PyInfo].imports.to_list()
# Inputs to the action, but don't actually get stored in the .par file
extra_inputs = [
sources_file,
- ctx.attr.src.files_to_run.executable,
ctx.attr.src.files_to_run.runfiles_manifest,
- ]
+ ]
zip_safe = ctx.attr.zip_safe
+ no_remove = ctx.attr.no_remove
# Assemble command line for .par compiler
- args = [
- '--manifest_file', sources_file.path,
- '--outputpar', ctx.outputs.executable.path,
- '--stub_file', stub_file,
- '--zip_safe', str(zip_safe),
- main_py_file.path,
+ args = ctx.attr.compiler_args + [
+ "--manifest_file",
+ sources_file.path,
+ "--output_par",
+ ctx.outputs.executable.path,
+ "--stub_file",
+ ctx.attr.src.files_to_run.executable.path,
+ "--zip_safe",
+ str(zip_safe),
+ '--no_remove',
+ str(no_remove),
]
- ctx.action(
- inputs=inputs + extra_inputs,
- outputs=[ctx.outputs.executable],
- progress_message='Building par file %s' % ctx.label,
- executable=ctx.executable.compiler,
- arguments=args,
- mnemonic='PythonCompile',
+ for import_root in import_roots:
+ args.extend(['--import_root', import_root])
+ args.append(main_py_file.path)
+
+ # Run compiler
+ ctx.actions.run(
+ inputs = inputs + extra_inputs,
+ tools = [ctx.attr.src.files_to_run.executable],
+ outputs = [ctx.outputs.executable],
+ progress_message = "Building par file %s" % ctx.label,
+ executable = ctx.executable.compiler,
+ arguments = args,
+ mnemonic = "PythonCompile",
+ use_default_shell_env = True,
)
# .par file itself has no runfiles and no providers
- return struct()
+ return []
def _prepend_workspace(path, ctx):
"""Given a path, prepend the workspace name as the parent directory"""
+
# It feels like there should be an easier, less fragile way.
- if path.startswith('../'):
+ if path.startswith("../"):
# External workspace, for example
# '../protobuf/python/google/protobuf/any_pb2.py'
- stored_path = path[len('../'):]
- elif path.startswith('external/'):
+ stored_path = path[len("../"):]
+ elif path.startswith("external/"):
# External workspace, for example
# 'external/protobuf/python/__init__.py'
- stored_path = path[len('external/'):]
+ stored_path = path[len("external/"):]
else:
# Main workspace, for example 'mypackage/main.py'
- stored_path = ctx.workspace_name + '/' + path
+ stored_path = ctx.workspace_name + "/" + path
return stored_path
parfile_attrs = {
"src": attr.label(mandatory = True),
"main": attr.label(
mandatory = True,
- allow_files = True,
- single_file = True,
+ allow_single_file = True,
),
"imports": attr.string_list(default = []),
"default_python_version": attr.string(mandatory = True),
@@ -123,7 +137,9 @@ parfile_attrs = {
executable = True,
cfg = "host",
),
- "zip_safe": attr.bool(default=True),
+ "compiler_args": attr.string_list(default = []),
+ "zip_safe": attr.bool(default = True),
+ "no_remove": attr.bool(default = False),
}
# Rule to create a parfile given a py_binary() as input
@@ -159,6 +175,9 @@ Args:
extracted to a temporary directory on disk each time the
par file executes.
+ no_remove: Whether to keep the extracted temporary directory after the
+ program finishes, if zip_safe is enabled.
+
TODO(b/27502830): A directory foo.par.runfiles is also created. This
is a bug, don't use or depend on it.
"""
@@ -189,25 +208,29 @@ def par_binary(name, **kwargs):
See [py_binary](http://www.bazel.io/docs/be/python.html#py_binary)
for arguments and usage.
"""
- compiler = kwargs.pop('compiler', None)
- zip_safe = kwargs.pop('zip_safe', True)
- native.py_binary(name=name, **kwargs)
-
- main = kwargs.get('main', name + '.py')
- imports = kwargs.get('imports')
- default_python_version = kwargs.get('default_python_version', 'PY2')
- visibility = kwargs.get('visibility')
- testonly = kwargs.get('testonly', False)
+ compiler = kwargs.pop("compiler", None)
+ compiler_args = kwargs.pop("compiler_args", [])
+ zip_safe = kwargs.pop("zip_safe", True)
+ no_remove = kwargs.pop('no_remove', False)
+ native.py_binary(name = name, **kwargs)
+
+ main = kwargs.get("main", name + ".py")
+ imports = kwargs.get("imports")
+ default_python_version = kwargs.get("default_python_version", "PY2")
+ visibility = kwargs.get("visibility")
+ testonly = kwargs.get("testonly", False)
parfile(
- compiler=compiler,
- default_python_version=default_python_version,
- imports=imports,
- main=main,
- name=name + '.par',
- src=name,
- testonly=testonly,
- visibility=visibility,
- zip_safe=zip_safe,
+ compiler = compiler,
+ compiler_args = compiler_args,
+ default_python_version = default_python_version,
+ imports = imports,
+ main = main,
+ name = name + ".par",
+ src = name,
+ testonly = testonly,
+ visibility = visibility,
+ zip_safe = zip_safe,
+ no_remove=no_remove,
)
def par_test(name, **kwargs):
@@ -216,23 +239,25 @@ def par_test(name, **kwargs):
Just like par_binary, but for py_test instead of py_binary. Useful if you
specifically need to test a module's behaviour when used in a .par binary.
"""
- compiler = kwargs.pop('compiler', None)
- zip_safe = kwargs.pop('zip_safe', True)
- native.py_test(name=name, **kwargs)
-
- main = kwargs.get('main', name + '.py')
- imports = kwargs.get('imports')
- default_python_version = kwargs.get('default_python_version', 'PY2')
- visibility = kwargs.get('visibility')
- testonly = kwargs.get('testonly', True)
+ compiler = kwargs.pop("compiler", None)
+ zip_safe = kwargs.pop("zip_safe", True)
+ no_remove = kwargs.pop('no_remove', False)
+ native.py_test(name = name, **kwargs)
+
+ main = kwargs.get("main", name + ".py")
+ imports = kwargs.get("imports")
+ default_python_version = kwargs.get("default_python_version", "PY2")
+ visibility = kwargs.get("visibility")
+ testonly = kwargs.get("testonly", True)
parfile_test(
- compiler=compiler,
- default_python_version=default_python_version,
- imports=imports,
- main=main,
- name=name + '.par',
- src=name,
- testonly=testonly,
- visibility=visibility,
- zip_safe=zip_safe,
+ compiler = compiler,
+ default_python_version = default_python_version,
+ imports = imports,
+ main = main,
+ name = name + ".par",
+ src = name,
+ testonly = testonly,
+ visibility = visibility,
+ zip_safe = zip_safe,
+ no_remove=no_remove,
)
diff --git a/tests/BUILD b/tests/BUILD
index 0e72f9d..00030fd 100644
--- a/tests/BUILD
+++ b/tests/BUILD
@@ -11,14 +11,13 @@ exports_files([
])
load("//:subpar.bzl", "par_binary")
+load("//:toolchain_test_hook.bzl", "PYVER", "define_toolchain_for_testing")
-# Configurations
-config_setting(
- name = "subpar_test_python_version_3",
- define_values = {
- "subpar_test_python_version": "3",
- },
-)
+# Creates the target //tests:toolchain_for_testing.
+#
+# This hook is used by run_tests.sh to inject a Python toolchain into the build
+# during tests. When not running tests this hook is a no-op.
+define_toolchain_for_testing()
# Utility targets
sh_library(
@@ -65,14 +64,11 @@ par_binary(
par_binary(
name = "package_f/f",
- srcs = select({
- "subpar_test_python_version_3": ["package_f/f_PY3.py"],
- "//conditions:default": ["package_f/f_PY2.py"],
- }),
- main = select({
- "subpar_test_python_version_3": "package_f/f_PY3.py",
- "//conditions:default": "package_f/f_PY2.py",
- }),
+ # We need to use the PYVER constant defined by the test hook, rather than
+ # select(), because python_version is not a configurable attribute.
+ srcs = ["package_f/f_PY3.py"] if PYVER == "PY3" else ["package_f/f_PY2.py"],
+ main = "package_f/f_PY3.py" if PYVER == "PY3" else "package_f/f_PY2.py",
+ python_version = "PY3" if PYVER == "PY3" else "PY2"
)
par_binary(
@@ -94,6 +90,7 @@ par_binary(
main = "package_extract/extract.py",
srcs_version = "PY2AND3",
zip_safe = False,
+ no_remove = False,
)
par_binary(
@@ -151,16 +148,12 @@ par_binary(
args = [
"--par",
"%s.par" % path,
- ] + select({
- "subpar_test_python_version_3": ["%s_PY3_filelist.txt" % path],
- "//conditions:default": ["%s_PY2_filelist.txt" % path],
- }),
+ ("%s_PY3_filelist.txt" % path) if PYVER == "PY3" else ("%s_PY2_filelist.txt" % path)
+ ],
data = [
"%s.par" % label,
- ] + select({
- "subpar_test_python_version_3": ["%s_PY3_filelist.txt" % label],
- "//conditions:default": ["%s_PY2_filelist.txt" % label],
- }),
+ ("%s_PY3_filelist.txt" % label) if PYVER == "PY3" else ("%s_PY2_filelist.txt" % label)
+ ],
),
) for name, label, path in [
("basic", "//tests/package_a:a", "tests/package_a/a"),
diff --git a/tests/package_f/f_PY3.py b/tests/package_f/f_PY3.py
index c5ac97d..5b42e49 100644
--- a/tests/package_f/f_PY3.py
+++ b/tests/package_f/f_PY3.py
@@ -22,7 +22,7 @@
def main():
assert sys.version_info.major == 3, sys.version
- print('In f_PY2.py main()')
+ print('In f_PY3.py main()')
if __name__ == '__main__':
diff --git a/tests/test_harness.sh b/tests/test_harness.sh
index b3a14d0..7c4b380 100755
--- a/tests/test_harness.sh
+++ b/tests/test_harness.sh
@@ -40,8 +40,12 @@ TMP_EXECUTABLE="$TEST_TMPDIR"/$(basename "$EXECUTABLE")
# Compare list of files in zipfile with expected list
if [ "$PAR" -eq 1 ]; then
+ # Exclude runfiles of the autodetecting toolchain. The test is still brittle
+ # with respect to runfiles introduced by any other toolchain. When tests are
+ # invoked by run_tests.sh, a custom toolchain with no runfiles is used.
diff \
- <(unzip -l "$EXECUTABLE" | awk '{print $NF}' | head -n -2 | tail -n +4) \
+ <(unzip -l -q -q "$EXECUTABLE" | awk '{print $NF}' \
+ | grep -v 'bazel_tools/tools/python/py.wrapper\.sh') \
"$FILELIST" \
|| die 'FATAL: zipfile contents do not match expected'
fi
diff --git a/toolchain_test_hook.bzl b/toolchain_test_hook.bzl
new file mode 100644
index 0000000..c2e3d7d
--- /dev/null
+++ b/toolchain_test_hook.bzl
@@ -0,0 +1,4 @@
+PYVER = "PY3"
+
+def define_toolchain_for_testing():
+ pass
|