diff --git a/.github/workflows/azure-sdk-tools.yml b/.github/workflows/azure-sdk-tools.yml index 9b74aba49588..4858ff6e9aad 100644 --- a/.github/workflows/azure-sdk-tools.yml +++ b/.github/workflows/azure-sdk-tools.yml @@ -53,6 +53,60 @@ jobs: black --check --config eng/black-pyproject.toml eng/tools/azure-sdk-tools --exclude 'templates' shell: bash + verify-azpysdk-checks: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + + - name: Set up Python 3.13 + uses: actions/setup-python@v4 + with: + python-version: 3.13 + + - name: Install uv + run: | + curl -LsSf https://astral.sh/uv/install.sh | sh + shell: bash + + - name: Install azure-sdk-tools on in global uv, discover azpysdk checks + run: | + uv pip install --system eng/tools/azure-sdk-tools[build,ghtools,conda] + + # Discover available azpysdk commands from the {command1,command2,...} line in help output + CHECKS=$(azpysdk -h 2>&1 | \ + grep -oP '\{[^}]+\}' | \ + tail -1 | \ + tr -d '{}' | \ + tr ',' '\n' | \ + grep -v '^next-' | \ + sort | \ + paste -sd,) + + if [ -z "$CHECKS" ]; then + echo "No azpysdk check modules discovered from azpysdk -h" >&2 + exit 1 + fi + echo "Discovered azpysdk checks: $CHECKS" + echo "AZPYSDK_CHECKS=$CHECKS" >> "$GITHUB_ENV" + shell: bash + + - name: Run all discovered checks against azure-template using uv as package manager + run: | + python eng/scripts/dispatch_checks.py --checks "$AZPYSDK_CHECKS" azure-template + shell: bash + env: + TOX_PIP_IMPL: "uv" + + - name: Install azure-sdk-tools on global pip env + run: | + python -m pip install -e eng/tools/azure-sdk-tools[build,ghtools,conda] + shell: bash + + - name: Run all discovered checks against azure-template using pip as package manager + run: | + python eng/scripts/dispatch_checks.py --checks "$AZPYSDK_CHECKS" azure-template + shell: bash + dev-setup-and-import: runs-on: ubuntu-latest steps: diff --git a/.gitignore b/.gitignore index 1c6f8c96989a..84bf07aea48c 100644 --- a/.gitignore +++ b/.gitignore @@ -60,6 +60,7 @@ conda/assembled/ conda/downloaded/ conda/conda-env/ scenario_*.txt +.wheels # tox environment folders .tox/ diff --git a/eng/scripts/dispatch_checks.py b/eng/scripts/dispatch_checks.py index 59f1be3e768a..83f58877790a 100644 --- a/eng/scripts/dispatch_checks.py +++ b/eng/scripts/dispatch_checks.py @@ -10,7 +10,7 @@ from ci_tools.functions import discover_targeted_packages from ci_tools.variables import in_ci -from ci_tools.scenario.generation import build_whl_for_req +from ci_tools.scenario.generation import build_whl_for_req, replace_dev_reqs from ci_tools.logging import configure_logging, logger from ci_tools.environment_exclusions import is_check_enabled, CHECK_DEFAULTS @@ -73,9 +73,7 @@ async def run_check( stderr = stderr_b.decode(errors="replace") exit_code = proc.returncode or 0 status = "OK" if exit_code == 0 else f"FAIL({exit_code})" - logger.info( - f"[END {idx}/{total}] {check} :: {package} -> {status} in {duration:.2f}s" - ) + logger.info(f"[END {idx}/{total}] {check} :: {package} -> {status} in {duration:.2f}s") # Print captured output after completion to avoid interleaving header = f"===== OUTPUT: {check} :: {package} (exit {exit_code}) =====" trailer = "=" * len(header) @@ -96,9 +94,7 @@ async def run_check( try: shutil.rmtree(isolate_dir) except: - logger.warning( - f"Failed to remove isolate dir {isolate_dir} for {package} / {check}" - ) + logger.warning(f"Failed to remove isolate dir {isolate_dir} for {package} / {check}") return CheckResult(package, check, exit_code, duration, stdout, stderr) @@ -122,18 +118,14 @@ def summarize(results: List[CheckResult]) -> int: print("-" * len(header)) for r in sorted(results, key=lambda x: (x.exit_code != 0, x.package, x.check)): status = "OK" if r.exit_code == 0 else f"FAIL({r.exit_code})" - print( - f"{r.package.ljust(pkg_w)} {r.check.ljust(chk_w)} {status.ljust(8)} {r.duration:>10.2f}" - ) + print(f"{r.package.ljust(pkg_w)} {r.check.ljust(chk_w)} {status.ljust(8)} {r.duration:>10.2f}") worst = max((r.exit_code for r in results), default=0) failed = [r for r in results if r.exit_code != 0] - print( - f"\nTotal checks: {len(results)} | Failed: {len(failed)} | Worst exit code: {worst}" - ) + print(f"\nTotal checks: {len(results)} | Failed: {len(failed)} | Worst exit code: {worst}") return worst -async def run_all_checks(packages, checks, max_parallel): +async def run_all_checks(packages, checks, max_parallel, wheel_dir): """Run all checks for all packages concurrently and return the worst exit code. :param packages: Iterable of package paths to run checks against. @@ -142,6 +134,9 @@ async def run_all_checks(packages, checks, max_parallel): :type checks: List[str] :param max_parallel: Maximum number of concurrent checks to run. :type max_parallel: int + :param wheel_dir: The directory where wheels should be located and stored when built. + In CI should correspond to `$(Build.ArtifactStagingDirectory)`. + :type wheel_dir: str :returns: The worst exit code from all checks (0 if all passed). :rtype: int """ @@ -150,17 +145,33 @@ async def run_all_checks(packages, checks, max_parallel): semaphore = asyncio.Semaphore(max_parallel) combos = [(p, c) for p in packages for c in checks] total = len(combos) + + test_tools_path = os.path.join(root_dir, "eng", "test_tools.txt") + dependency_tools_path = os.path.join(root_dir, "eng", "dependency_tools.txt") + + if in_ci(): + logger.info("Replacing relative requirements in eng/test_tools.txt with prebuilt wheels.") + replace_dev_reqs(test_tools_path, root_dir, wheel_dir) + + logger.info("Replacing relative requirements in eng/dependency_tools.txt with prebuilt wheels.") + replace_dev_reqs(dependency_tools_path, root_dir, wheel_dir) + + for pkg in packages: + destination_dev_req = os.path.join(pkg, "dev_requirements.txt") + + logger.info(f"Replacing dev requirements w/ path {destination_dev_req}") + if not os.path.exists(destination_dev_req): + logger.info("No dev_requirements present.") + with open(destination_dev_req, "w+") as file: + file.write("\n") + + replace_dev_reqs(destination_dev_req, pkg, wheel_dir) + for idx, (package, check) in enumerate(combos, start=1): if not is_check_enabled(package, check, CHECK_DEFAULTS.get(check, True)): - logger.warning( - f"Skipping disabled check {check} ({idx}/{total}) for package {package}" - ) + logger.warning(f"Skipping disabled check {check} ({idx}/{total}) for package {package}") continue - tasks.append( - asyncio.create_task( - run_check(semaphore, package, check, base_args, idx, total) - ) - ) + tasks.append(asyncio.create_task(run_check(semaphore, package, check, base_args, idx, total))) # Handle Ctrl+C gracefully pending = set(tasks) @@ -179,9 +190,7 @@ async def run_all_checks(packages, checks, max_parallel): elif isinstance(res, Exception): norm_results.append(CheckResult(package, check, 99, 0.0, "", str(res))) else: - norm_results.append( - CheckResult(package, check, 98, 0.0, "", f"Unknown result type: {res}") - ) + norm_results.append(CheckResult(package, check, 98, 0.0, "", f"Unknown result type: {res}")) return summarize(norm_results) @@ -257,15 +266,11 @@ def handler(signum, frame): ), ) - parser.add_argument( - "--disablecov", help=("Flag. Disables code coverage."), action="store_true" - ) + parser.add_argument("--disablecov", help=("Flag. Disables code coverage."), action="store_true") parser.add_argument( "--service", - help=( - "Name of service directory (under sdk/) to test. Example: --service applicationinsights" - ), + help=("Name of service directory (under sdk/) to test. Example: --service applicationinsights"), ) parser.add_argument( @@ -325,9 +330,7 @@ def handler(signum, frame): else: target_dir = root_dir - logger.info( - f"Beginning discovery for {args.service} and root dir {root_dir}. Resolving to {target_dir}." - ) + logger.info(f"Beginning discovery for {args.service} and root dir {root_dir}. Resolving to {target_dir}.") # ensure that recursive virtual envs aren't messed with by this call os.environ.pop("VIRTUAL_ENV", None) @@ -344,26 +347,21 @@ def handler(signum, frame): ) if len(targeted_packages) == 0: - logger.info( - f"No packages collected for targeting string {args.glob_string} and root dir {root_dir}. Exit 0." - ) + logger.info(f"No packages collected for targeting string {args.glob_string} and root dir {root_dir}. Exit 0.") exit(0) logger.info(f"Executing checks with the executable {sys.executable}.") logger.info(f"Packages targeted: {targeted_packages}") + temp_wheel_dir = args.wheel_dir or os.path.join(root_dir, ".wheels") if args.wheel_dir: os.environ["PREBUILT_WHEEL_DIR"] = args.wheel_dir - - if not os.path.exists(os.path.join(root_dir, ".wheels")): - os.makedirs(os.path.join(root_dir, ".wheels")) + else: + if not os.path.exists(temp_wheel_dir): + os.makedirs(temp_wheel_dir) if in_ci(): - # prepare a build of eng/tools/azure-sdk-tools - # todo: ensure that we honor this .wheels directory when replacing for dev reqs - build_whl_for_req( - "eng/tools/azure-sdk-tools", root_dir, os.path.join(root_dir, ".wheels") - ) + build_whl_for_req("eng/tools/azure-sdk-tools", root_dir, temp_wheel_dir) # so if we have checks whl,import_all and selected package paths `sdk/core/azure-core`, `sdk/storage/azure-storage-blob` we should # shell out to `azypysdk ` with cwd of the package directory, which is what is in `targeted_packages` array @@ -382,9 +380,7 @@ def handler(signum, frame): configure_interrupt_handling() try: - exit_code = asyncio.run( - run_all_checks(targeted_packages, checks, args.max_parallel) - ) + exit_code = asyncio.run(run_all_checks(targeted_packages, checks, args.max_parallel, temp_wheel_dir)) except KeyboardInterrupt: logger.error("Aborted by user.") exit_code = 130 diff --git a/eng/tools/azure-sdk-tools/azpysdk/Check.py b/eng/tools/azure-sdk-tools/azpysdk/Check.py index 03e64ef752c2..bb395211e103 100644 --- a/eng/tools/azure-sdk-tools/azpysdk/Check.py +++ b/eng/tools/azure-sdk-tools/azpysdk/Check.py @@ -11,8 +11,15 @@ import subprocess from ci_tools.parsing import ParsedSetup -from ci_tools.functions import discover_targeted_packages, get_venv_call, install_into_venv, get_venv_python -from ci_tools.variables import discover_repo_root +from ci_tools.functions import ( + discover_targeted_packages, + get_venv_call, + install_into_venv, + get_venv_python, + get_pip_command, + find_whl, +) +from ci_tools.variables import discover_repo_root, in_ci from ci_tools.logging import logger # right now, we are assuming you HAVE to be in the azure-sdk-tools repo @@ -64,9 +71,26 @@ def create_venv(self, isolate: bool, venv_location: str) -> str: subprocess.check_call(venv_cmd + [venv_location]) - # TODO: we should reuse part of build_whl_for_req to integrate with PREBUILT_WHL_DIR so that we don't have to fresh build for each - # venv - install_into_venv(venv_location, [os.path.join(REPO_ROOT, "eng/tools/azure-sdk-tools[build]")], REPO_ROOT) + if in_ci(): + # first attempt to retrieve azure-sdk-tools from the prebuilt wheel directory + # if present, install from there instead of constantly rebuilding azure-sdk-tools in a possible + # parallel situation + wheel_dir = os.getenv("PREBUILT_WHEEL_DIR", None) or os.path.join(REPO_ROOT, ".wheels") + prebuilt_whl = find_whl(wheel_dir, "azure-sdk-tools", "0.0.0") + + if prebuilt_whl: + install_location = os.path.join(wheel_dir, prebuilt_whl) + install_into_venv(venv_location, [f"{install_location}[build]"], REPO_ROOT) + else: + logger.error( + "Falling back to manual build and install of azure-sdk-tools into isolated env," + f" unable to locate prebuilt azure-sdk-tools within {wheel_dir}" + ) + else: + install_into_venv( + venv_location, [os.path.join(REPO_ROOT, "eng/tools/azure-sdk-tools[build]")], REPO_ROOT + ) + venv_python_exe = get_venv_python(venv_location) return venv_python_exe @@ -86,7 +110,7 @@ def get_executable(self, isolate: bool, check_name: str, executable: str, packag return executable, staging_directory def run_venv_command( - self, executable: str, command: Sequence[str], cwd: str, check: bool = False + self, executable: str, command: Sequence[str], cwd: str, check: bool = False, append_executable: bool = True ) -> subprocess.CompletedProcess[str]: """Run a command in the given virtual environment. - Prepends the virtual environment's bin directory to the PATH environment variable (if one exists) @@ -112,8 +136,21 @@ def run_venv_command( else: raise RuntimeError(f"Unable to find parent venv for executable {executable}") + # When not appending executable, resolve the command using the modified PATH + if not append_executable: + resolved = shutil.which(command[0], path=env["PATH"]) + if not resolved: + raise RuntimeError(f"Command '{command[0]}' not found in PATH: {env['PATH']}") + cmd_to_run = [resolved] + list(command[1:]) + else: + cmd_to_run = [executable] + list(command) + + logger.debug(f"Running command: {cmd_to_run}.") + logger.debug(f"VIRTUAL_ENV: {env['VIRTUAL_ENV']}.") + logger.debug(f"PATH : {env['PATH']}.") + result = subprocess.run( - [executable, *command], cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, check=check + cmd_to_run, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, check=check, env=env ) return result @@ -180,3 +217,28 @@ def install_dev_reqs(self, executable: str, args: argparse.Namespace, package_di os.remove(temp_req_file.name) except Exception as cleanup_error: logger.warning(f"Failed to remove temporary requirements file: {cleanup_error}") + + def pip_freeze(self, executable: str) -> None: + """Run pip freeze in the given virtual environment and log the output. This function handles both isolated and non-isolated + environments, as well as calling the proper `uv` executable with additional --python argument if needed. + + :param executable: Path to the python executable that should invoke this check. + :returns None: + """ + try: + # to uv pip install or freeze to a target environment, we have to add `--python ` + # to tell uv which environment to target + command = get_pip_command(executable) + + if command[0] == "uv": + command += ["freeze", "--python", executable] + else: + command += ["freeze"] + + result = subprocess.run(command, cwd=os.getcwd(), check=True, capture_output=True, text=True) + logger.info("Installed packages:") + logger.info(result.stdout) + except subprocess.CalledProcessError as e: + logger.error(f"Failed to run pip freeze: {e}") + logger.error(e.stdout) + logger.error(e.stderr) diff --git a/eng/tools/azure-sdk-tools/azpysdk/bandit.py b/eng/tools/azure-sdk-tools/azpysdk/bandit.py index 132d640c29cd..17491fefc424 100644 --- a/eng/tools/azure-sdk-tools/azpysdk/bandit.py +++ b/eng/tools/azure-sdk-tools/azpysdk/bandit.py @@ -49,13 +49,7 @@ def run(self, args: argparse.Namespace) -> int: logger.error(f"Failed to install bandit: {e}") return e.returncode - # debug a pip freeze result - cmd = get_pip_command(executable) + ["freeze"] - freeze_result = subprocess.run( - cmd, cwd=package_dir, check=False, text=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT - ) - logger.debug(f"Running pip freeze with {cmd}") - logger.debug(freeze_result.stdout) + self.pip_freeze(executable) if in_ci(): if not is_check_enabled(package_dir, "bandit"): diff --git a/eng/tools/azure-sdk-tools/azpysdk/import_all.py b/eng/tools/azure-sdk-tools/azpysdk/import_all.py index fd4a2baa5ca0..a624512013c8 100644 --- a/eng/tools/azure-sdk-tools/azpysdk/import_all.py +++ b/eng/tools/azure-sdk-tools/azpysdk/import_all.py @@ -50,9 +50,6 @@ def run(self, args: argparse.Namespace) -> int: targeted = self.get_targeted_directories(args) - # {[tox]pip_command} freeze - # python {repository_root}/eng/tox/import_all.py -t {tox_root} - outcomes: List[int] = [] for parsed in targeted: diff --git a/eng/tools/azure-sdk-tools/azpysdk/main.py b/eng/tools/azure-sdk-tools/azpysdk/main.py index 03665a993bb0..13643a24ccd2 100644 --- a/eng/tools/azure-sdk-tools/azpysdk/main.py +++ b/eng/tools/azure-sdk-tools/azpysdk/main.py @@ -12,7 +12,6 @@ import os from typing import Sequence, Optional -from .whl import whl from .import_all import import_all from .mypy import mypy from .next_mypy import next_mypy @@ -25,8 +24,8 @@ from .next_pyright import next_pyright from .ruff import ruff from .verifytypes import verifytypes -from .verify_whl import verify_whl from .verify_sdist import verify_sdist +from .verify_whl import verify_whl from .bandit import bandit from .verify_keywords import verify_keywords @@ -72,7 +71,6 @@ def build_parser() -> argparse.ArgumentParser: subparsers = parser.add_subparsers(title="commands", dest="command") # register our checks with the common params as their parent - whl().register(subparsers, [common]) import_all().register(subparsers, [common]) mypy().register(subparsers, [common]) next_mypy().register(subparsers, [common]) diff --git a/eng/tools/azure-sdk-tools/azpysdk/pylint.py b/eng/tools/azure-sdk-tools/azpysdk/pylint.py index 5d60f5439040..8a571008a567 100644 --- a/eng/tools/azure-sdk-tools/azpysdk/pylint.py +++ b/eng/tools/azure-sdk-tools/azpysdk/pylint.py @@ -51,7 +51,6 @@ def run(self, args: argparse.Namespace) -> int: package_name = parsed.name executable, staging_directory = self.get_executable(args.isolate, args.command, sys.executable, package_dir) logger.info(f"Processing {package_name} for pylint check") - pip_cmd = get_pip_command(executable) # install dependencies self.install_dev_reqs(executable, args, package_dir) @@ -91,13 +90,7 @@ def run(self, args: argparse.Namespace) -> int: logger.error(f"Failed to install pylint: {e}") return e.returncode - # debug a pip freeze result - cmd = pip_cmd + ["freeze"] - freeze_result = subprocess.run( - cmd, cwd=package_dir, check=False, text=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT - ) - logger.debug(f"Running pip freeze with {cmd}") - logger.debug(freeze_result.stdout) + self.pip_freeze(executable) top_level_module = parsed.namespace.split(".")[0] diff --git a/eng/tools/azure-sdk-tools/azpysdk/sphinx.py b/eng/tools/azure-sdk-tools/azpysdk/sphinx.py index 70c265f916eb..e89a3460fdac 100644 --- a/eng/tools/azure-sdk-tools/azpysdk/sphinx.py +++ b/eng/tools/azure-sdk-tools/azpysdk/sphinx.py @@ -107,68 +107,6 @@ def copy_existing_docs(source: str, target: str) -> None: shutil.copy(os.path.join(source, file), target) -def mgmt_apidoc(output_dir: str, target_folder: str, executable: str) -> int: - command_array = [ - executable, - generate_mgmt_script, - "-p", - target_folder, - "-o", - output_dir, - "--verbose", - ] - - try: - logger.info("Command to generate management sphinx sources: {}".format(command_array)) - - check_call(command_array) - except CalledProcessError as e: - logger.error("script failed for path {} exited with error {}".format(output_dir, e.returncode)) - return 1 - return 0 - - -def sphinx_apidoc(output_dir: str, target_dir: str, namespace: str) -> int: - working_doc_folder = os.path.join(output_dir, "doc") - command_array = [ - "sphinx-apidoc", - "--no-toc", - "--module-first", - "-o", - os.path.join(output_dir, "docgen"), # This is the output folder - os.path.join(target_dir, ""), # This is the input folder - os.path.join(target_dir, "test*"), # This argument and below are "exclude" directory arguments - os.path.join(target_dir, "example*"), - os.path.join(target_dir, "sample*"), - os.path.join(target_dir, "setup.py"), - os.path.join(target_dir, "conftest.py"), - ] - - try: - # if a `doc` folder exists, just leverage the sphinx sources found therein. - if os.path.exists(working_doc_folder): - logger.info("Copying files into sphinx source folder.") - copy_existing_docs(working_doc_folder, os.path.join(output_dir, "docgen")) - - # otherwise, we will run sphinx-apidoc to generate the sources - else: - logger.info("Sphinx api-doc command: {}".format(command_array)) - check_call(command_array) - # We need to clean "azure.rst", and other RST before the main namespaces, as they are never - # used and will log as a warning later by sphinx-build, which is blocking strict_sphinx - base_path = Path(os.path.join(output_dir, "docgen/")) - namespace = namespace.rpartition(".")[0] - while namespace: - rst_file_to_delete = base_path / f"{namespace}.rst" - logger.info(f"Removing {rst_file_to_delete}") - rst_file_to_delete.unlink(missing_ok=True) - namespace = namespace.rpartition(".")[0] - except CalledProcessError as e: - logger.error("sphinx-apidoc failed for path {} exited with error {}".format(output_dir, e.returncode)) - return 1 - return 0 - - # build helper functions def move_output_and_compress(target_dir: str, package_dir: str, package_name: str) -> None: if not os.path.exists(ci_doc_dir): @@ -194,35 +132,6 @@ def should_build_docs(package_name: str) -> bool: ) -def sphinx_build(package_dir: str, target_dir: str, output_dir: str, fail_on_warning: bool) -> int: - command_array = [ - "sphinx-build", - "-b", - "html", - "-A", - "include_index_link=True", - "-c", - sphinx_conf_dir, - target_dir, - output_dir, - ] - if fail_on_warning: - command_array.append("-W") - command_array.append("--keep-going") - - try: - logger.info("Sphinx build command: {}".format(command_array)) - check_call(command_array, cwd=package_dir) - except CalledProcessError as e: - logger.error("sphinx-build failed for path {} exited with error {}".format(target_dir, e.returncode)) - if in_analyze_weekly(): - from gh_tools.vnext_issue_creator import create_vnext_issue - - create_vnext_issue(package_dir, "sphinx") - return 1 - return 0 - - class sphinx(Check): def __init__(self) -> None: super().__init__() @@ -324,9 +233,9 @@ def run(self, args: argparse.Namespace) -> int: # run apidoc if should_build_docs(parsed.name): if is_mgmt_package(parsed.name): - results.append(mgmt_apidoc(doc_folder, package_dir, executable)) + results.append(self.mgmt_apidoc(doc_folder, package_dir, executable)) else: - results.append(sphinx_apidoc(staging_directory, package_dir, parsed.namespace)) + results.append(self.sphinx_apidoc(staging_directory, package_dir, parsed.namespace, executable)) else: logger.info("Skipping sphinx source generation for {}".format(parsed.name)) @@ -335,12 +244,9 @@ def run(self, args: argparse.Namespace) -> int: # Only data-plane libraries run strict sphinx at the moment fail_on_warning = not is_mgmt_package(package_name) results.append( - sphinx_build( - package_dir, - doc_folder, # source - site_folder, # output - fail_on_warning=fail_on_warning, - ) + # doc_folder = source + # site_folder = output + self.sphinx_build(package_dir, doc_folder, site_folder, fail_on_warning, executable) ) if in_ci() or args.in_ci: @@ -354,3 +260,94 @@ def run(self, args: argparse.Namespace) -> int: logger.info("Skipping sphinx build for {}".format(package_name)) return max(results) if results else 0 + + def sphinx_build( + self, package_dir: str, target_dir: str, output_dir: str, fail_on_warning: bool, executable: str + ) -> int: + command_array = [ + "sphinx-build", + "-b", + "html", + "-A", + "include_index_link=True", + "-c", + sphinx_conf_dir, + target_dir, + output_dir, + ] + if fail_on_warning: + command_array.append("-W") + command_array.append("--keep-going") + + try: + logger.info("Sphinx build command: {}".format(command_array)) + + self.run_venv_command(executable, command_array, cwd=package_dir, check=True, append_executable=False) + except CalledProcessError as e: + logger.error("sphinx-build failed for path {} exited with error {}".format(target_dir, e.returncode)) + if in_analyze_weekly(): + from gh_tools.vnext_issue_creator import create_vnext_issue + + create_vnext_issue(package_dir, "sphinx") + return 1 + return 0 + + def mgmt_apidoc(self, output_dir: str, target_folder: str, executable: str) -> int: + command_array = [ + executable, + generate_mgmt_script, + "-p", + target_folder, + "-o", + output_dir, + "--verbose", + ] + + try: + logger.info("Command to generate management sphinx sources: {}".format(command_array)) + + self.run_venv_command(executable, command_array, cwd=target_folder, check=True, append_executable=False) + except CalledProcessError as e: + logger.error("script failed for path {} exited with error {}".format(output_dir, e.returncode)) + return 1 + return 0 + + def sphinx_apidoc(self, output_dir: str, target_dir: str, namespace: str, executable: str) -> int: + working_doc_folder = os.path.join(output_dir, "doc") + command_array = [ + "sphinx-apidoc", + "--no-toc", + "--module-first", + "-o", + os.path.join(output_dir, "docgen"), # This is the output folder + os.path.join(target_dir, ""), # This is the input folder + os.path.join(target_dir, "test*"), # This argument and below are "exclude" directory arguments + os.path.join(target_dir, "example*"), + os.path.join(target_dir, "sample*"), + os.path.join(target_dir, "setup.py"), + os.path.join(target_dir, "conftest.py"), + ] + + try: + # if a `doc` folder exists, just leverage the sphinx sources found therein. + if os.path.exists(working_doc_folder): + logger.info("Copying files into sphinx source folder.") + copy_existing_docs(working_doc_folder, os.path.join(output_dir, "docgen")) + + # otherwise, we will run sphinx-apidoc to generate the sources + else: + logger.info("Sphinx api-doc command: {}".format(command_array)) + self.run_venv_command(executable, command_array, cwd=target_dir, check=True, append_executable=False) + # We need to clean "azure.rst", and other RST before the main namespaces, as they are never + # used and will log as a warning later by sphinx-build, which is blocking strict_sphinx + base_path = Path(os.path.join(output_dir, "docgen/")) + namespace = namespace.rpartition(".")[0] + while namespace: + rst_file_to_delete = base_path / f"{namespace}.rst" + logger.info(f"Removing {rst_file_to_delete}") + rst_file_to_delete.unlink(missing_ok=True) + namespace = namespace.rpartition(".")[0] + except CalledProcessError as e: + logger.error("sphinx-apidoc failed for path {} exited with error {}".format(output_dir, e.returncode)) + return 1 + return 0 diff --git a/eng/tools/azure-sdk-tools/ci_tools/scenario/generation.py b/eng/tools/azure-sdk-tools/ci_tools/scenario/generation.py index d23923fe127a..518dfa3dc63b 100644 --- a/eng/tools/azure-sdk-tools/ci_tools/scenario/generation.py +++ b/eng/tools/azure-sdk-tools/ci_tools/scenario/generation.py @@ -341,10 +341,13 @@ def build_whl_for_req(req: str, package_path: str, wheel_dir: Optional[str]) -> if prebuilt_whl: whl_path = os.path.join(wheel_dir, prebuilt_whl) else: - # Create temp path if it doesn't exist - temp_dir = os.path.join(package_path, ".tmp_whl_dir") - if not os.path.exists(temp_dir): - os.mkdir(temp_dir) + if wheel_dir: + temp_dir = wheel_dir + else: + # Create temp path if it doesn't exist + temp_dir = os.path.join(package_path, ".tmp_whl_dir") + if not os.path.exists(temp_dir): + os.mkdir(temp_dir) logging.info("Building wheel for package {}".format(parsed.name)) create_package(req_pkg_path, temp_dir, enable_sdist=False) diff --git a/sdk/template/azure-template/pyproject.toml b/sdk/template/azure-template/pyproject.toml index 0fd2dd64f573..2a49cab47359 100644 --- a/sdk/template/azure-template/pyproject.toml +++ b/sdk/template/azure-template/pyproject.toml @@ -36,7 +36,7 @@ version = {attr = "azure.template._version.VERSION"} readme = {file = ["README.md"], content-type = "text/markdown"} [tool.setuptools.packages.find] -exclude = ["tests*", "samples*", "azure"] +exclude = ["tests*", "samples*", "azure", "build*"] [tool.setuptools.package-data] pytyped = ["py.typed"]