Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
54 changes: 54 additions & 0 deletions .github/workflows/azure-sdk-tools.yml
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,60 @@ jobs:
black --check --config eng/black-pyproject.toml eng/tools/azure-sdk-tools --exclude 'templates'
shell: bash

verify-azpysdk-checks:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2

- name: Set up Python 3.13
uses: actions/setup-python@v4
with:
python-version: 3.13

- name: Install uv
run: |
curl -LsSf https://astral.sh/uv/install.sh | sh
shell: bash

- name: Install azure-sdk-tools on in global uv, discover azpysdk checks
run: |
uv pip install --system eng/tools/azure-sdk-tools[build,ghtools,conda]

# Discover available azpysdk commands from the {command1,command2,...} line in help output
CHECKS=$(azpysdk -h 2>&1 | \
grep -oP '\{[^}]+\}' | \
tail -1 | \
tr -d '{}' | \
tr ',' '\n' | \
grep -v '^next-' | \
sort | \
paste -sd,)

if [ -z "$CHECKS" ]; then
echo "No azpysdk check modules discovered from azpysdk -h" >&2
exit 1
fi
echo "Discovered azpysdk checks: $CHECKS"
echo "AZPYSDK_CHECKS=$CHECKS" >> "$GITHUB_ENV"
shell: bash

- name: Run all discovered checks against azure-template using uv as package manager
run: |
python eng/scripts/dispatch_checks.py --checks "$AZPYSDK_CHECKS" azure-template
shell: bash
env:
TOX_PIP_IMPL: "uv"

- name: Install azure-sdk-tools on global pip env
run: |
python -m pip install -e eng/tools/azure-sdk-tools[build,ghtools,conda]
shell: bash

- name: Run all discovered checks against azure-template using pip as package manager
run: |
python eng/scripts/dispatch_checks.py --checks "$AZPYSDK_CHECKS" azure-template
shell: bash

dev-setup-and-import:
runs-on: ubuntu-latest
steps:
Expand Down
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@ conda/assembled/
conda/downloaded/
conda/conda-env/
scenario_*.txt
.wheels

# tox environment folders
.tox/
Expand Down
92 changes: 44 additions & 48 deletions eng/scripts/dispatch_checks.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@

from ci_tools.functions import discover_targeted_packages
from ci_tools.variables import in_ci
from ci_tools.scenario.generation import build_whl_for_req
from ci_tools.scenario.generation import build_whl_for_req, replace_dev_reqs
from ci_tools.logging import configure_logging, logger
from ci_tools.environment_exclusions import is_check_enabled, CHECK_DEFAULTS

Expand Down Expand Up @@ -73,9 +73,7 @@ async def run_check(
stderr = stderr_b.decode(errors="replace")
exit_code = proc.returncode or 0
status = "OK" if exit_code == 0 else f"FAIL({exit_code})"
logger.info(
f"[END {idx}/{total}] {check} :: {package} -> {status} in {duration:.2f}s"
)
logger.info(f"[END {idx}/{total}] {check} :: {package} -> {status} in {duration:.2f}s")
# Print captured output after completion to avoid interleaving
header = f"===== OUTPUT: {check} :: {package} (exit {exit_code}) ====="
trailer = "=" * len(header)
Expand All @@ -96,9 +94,7 @@ async def run_check(
try:
shutil.rmtree(isolate_dir)
except:
logger.warning(
f"Failed to remove isolate dir {isolate_dir} for {package} / {check}"
)
logger.warning(f"Failed to remove isolate dir {isolate_dir} for {package} / {check}")
return CheckResult(package, check, exit_code, duration, stdout, stderr)


Expand All @@ -122,18 +118,14 @@ def summarize(results: List[CheckResult]) -> int:
print("-" * len(header))
for r in sorted(results, key=lambda x: (x.exit_code != 0, x.package, x.check)):
status = "OK" if r.exit_code == 0 else f"FAIL({r.exit_code})"
print(
f"{r.package.ljust(pkg_w)} {r.check.ljust(chk_w)} {status.ljust(8)} {r.duration:>10.2f}"
)
print(f"{r.package.ljust(pkg_w)} {r.check.ljust(chk_w)} {status.ljust(8)} {r.duration:>10.2f}")
worst = max((r.exit_code for r in results), default=0)
failed = [r for r in results if r.exit_code != 0]
print(
f"\nTotal checks: {len(results)} | Failed: {len(failed)} | Worst exit code: {worst}"
)
print(f"\nTotal checks: {len(results)} | Failed: {len(failed)} | Worst exit code: {worst}")
return worst


async def run_all_checks(packages, checks, max_parallel):
async def run_all_checks(packages, checks, max_parallel, wheel_dir):
"""Run all checks for all packages concurrently and return the worst exit code.

:param packages: Iterable of package paths to run checks against.
Expand All @@ -142,6 +134,9 @@ async def run_all_checks(packages, checks, max_parallel):
:type checks: List[str]
:param max_parallel: Maximum number of concurrent checks to run.
:type max_parallel: int
:param wheel_dir: The directory where wheels should be located and stored when built.
In CI should correspond to `$(Build.ArtifactStagingDirectory)`.
:type wheel_dir: str
:returns: The worst exit code from all checks (0 if all passed).
:rtype: int
"""
Expand All @@ -150,17 +145,33 @@ async def run_all_checks(packages, checks, max_parallel):
semaphore = asyncio.Semaphore(max_parallel)
combos = [(p, c) for p in packages for c in checks]
total = len(combos)

test_tools_path = os.path.join(root_dir, "eng", "test_tools.txt")
dependency_tools_path = os.path.join(root_dir, "eng", "dependency_tools.txt")

if in_ci():
logger.info("Replacing relative requirements in eng/test_tools.txt with prebuilt wheels.")
replace_dev_reqs(test_tools_path, root_dir, wheel_dir)

logger.info("Replacing relative requirements in eng/dependency_tools.txt with prebuilt wheels.")
replace_dev_reqs(dependency_tools_path, root_dir, wheel_dir)

for pkg in packages:
destination_dev_req = os.path.join(pkg, "dev_requirements.txt")

logger.info(f"Replacing dev requirements w/ path {destination_dev_req}")
if not os.path.exists(destination_dev_req):
logger.info("No dev_requirements present.")
with open(destination_dev_req, "w+") as file:
file.write("\n")

replace_dev_reqs(destination_dev_req, pkg, wheel_dir)

for idx, (package, check) in enumerate(combos, start=1):
if not is_check_enabled(package, check, CHECK_DEFAULTS.get(check, True)):
logger.warning(
f"Skipping disabled check {check} ({idx}/{total}) for package {package}"
)
logger.warning(f"Skipping disabled check {check} ({idx}/{total}) for package {package}")
continue
tasks.append(
asyncio.create_task(
run_check(semaphore, package, check, base_args, idx, total)
)
)
tasks.append(asyncio.create_task(run_check(semaphore, package, check, base_args, idx, total)))

# Handle Ctrl+C gracefully
pending = set(tasks)
Expand All @@ -179,9 +190,7 @@ async def run_all_checks(packages, checks, max_parallel):
elif isinstance(res, Exception):
norm_results.append(CheckResult(package, check, 99, 0.0, "", str(res)))
else:
norm_results.append(
CheckResult(package, check, 98, 0.0, "", f"Unknown result type: {res}")
)
norm_results.append(CheckResult(package, check, 98, 0.0, "", f"Unknown result type: {res}"))
return summarize(norm_results)


Expand Down Expand Up @@ -257,15 +266,11 @@ def handler(signum, frame):
),
)

parser.add_argument(
"--disablecov", help=("Flag. Disables code coverage."), action="store_true"
)
parser.add_argument("--disablecov", help=("Flag. Disables code coverage."), action="store_true")

parser.add_argument(
"--service",
help=(
"Name of service directory (under sdk/) to test. Example: --service applicationinsights"
),
help=("Name of service directory (under sdk/) to test. Example: --service applicationinsights"),
)

parser.add_argument(
Expand Down Expand Up @@ -325,9 +330,7 @@ def handler(signum, frame):
else:
target_dir = root_dir

logger.info(
f"Beginning discovery for {args.service} and root dir {root_dir}. Resolving to {target_dir}."
)
logger.info(f"Beginning discovery for {args.service} and root dir {root_dir}. Resolving to {target_dir}.")

# ensure that recursive virtual envs aren't messed with by this call
os.environ.pop("VIRTUAL_ENV", None)
Expand All @@ -344,26 +347,21 @@ def handler(signum, frame):
)

if len(targeted_packages) == 0:
logger.info(
f"No packages collected for targeting string {args.glob_string} and root dir {root_dir}. Exit 0."
)
logger.info(f"No packages collected for targeting string {args.glob_string} and root dir {root_dir}. Exit 0.")
exit(0)

logger.info(f"Executing checks with the executable {sys.executable}.")
logger.info(f"Packages targeted: {targeted_packages}")

temp_wheel_dir = args.wheel_dir or os.path.join(root_dir, ".wheels")
if args.wheel_dir:
os.environ["PREBUILT_WHEEL_DIR"] = args.wheel_dir

if not os.path.exists(os.path.join(root_dir, ".wheels")):
os.makedirs(os.path.join(root_dir, ".wheels"))
else:
if not os.path.exists(temp_wheel_dir):
os.makedirs(temp_wheel_dir)

if in_ci():
# prepare a build of eng/tools/azure-sdk-tools
# todo: ensure that we honor this .wheels directory when replacing for dev reqs
build_whl_for_req(
"eng/tools/azure-sdk-tools", root_dir, os.path.join(root_dir, ".wheels")
)
build_whl_for_req("eng/tools/azure-sdk-tools", root_dir, temp_wheel_dir)

# so if we have checks whl,import_all and selected package paths `sdk/core/azure-core`, `sdk/storage/azure-storage-blob` we should
# shell out to `azypysdk <checkname>` with cwd of the package directory, which is what is in `targeted_packages` array
Expand All @@ -382,9 +380,7 @@ def handler(signum, frame):

configure_interrupt_handling()
try:
exit_code = asyncio.run(
run_all_checks(targeted_packages, checks, args.max_parallel)
)
exit_code = asyncio.run(run_all_checks(targeted_packages, checks, args.max_parallel, temp_wheel_dir))
except KeyboardInterrupt:
logger.error("Aborted by user.")
exit_code = 130
Expand Down
76 changes: 69 additions & 7 deletions eng/tools/azure-sdk-tools/azpysdk/Check.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,15 @@
import subprocess

from ci_tools.parsing import ParsedSetup
from ci_tools.functions import discover_targeted_packages, get_venv_call, install_into_venv, get_venv_python
from ci_tools.variables import discover_repo_root
from ci_tools.functions import (
discover_targeted_packages,
get_venv_call,
install_into_venv,
get_venv_python,
get_pip_command,
find_whl,
)
from ci_tools.variables import discover_repo_root, in_ci
from ci_tools.logging import logger

# right now, we are assuming you HAVE to be in the azure-sdk-tools repo
Expand Down Expand Up @@ -64,9 +71,26 @@ def create_venv(self, isolate: bool, venv_location: str) -> str:

subprocess.check_call(venv_cmd + [venv_location])

# TODO: we should reuse part of build_whl_for_req to integrate with PREBUILT_WHL_DIR so that we don't have to fresh build for each
# venv
install_into_venv(venv_location, [os.path.join(REPO_ROOT, "eng/tools/azure-sdk-tools[build]")], REPO_ROOT)
if in_ci():
# first attempt to retrieve azure-sdk-tools from the prebuilt wheel directory
# if present, install from there instead of constantly rebuilding azure-sdk-tools in a possible
# parallel situation
wheel_dir = os.getenv("PREBUILT_WHEEL_DIR", None) or os.path.join(REPO_ROOT, ".wheels")
prebuilt_whl = find_whl(wheel_dir, "azure-sdk-tools", "0.0.0")

if prebuilt_whl:
install_location = os.path.join(wheel_dir, prebuilt_whl)
install_into_venv(venv_location, [f"{install_location}[build]"], REPO_ROOT)
else:
logger.error(
"Falling back to manual build and install of azure-sdk-tools into isolated env,"
f" unable to locate prebuilt azure-sdk-tools within {wheel_dir}"
)
else:
install_into_venv(
venv_location, [os.path.join(REPO_ROOT, "eng/tools/azure-sdk-tools[build]")], REPO_ROOT
)

venv_python_exe = get_venv_python(venv_location)

return venv_python_exe
Expand All @@ -86,7 +110,7 @@ def get_executable(self, isolate: bool, check_name: str, executable: str, packag
return executable, staging_directory

def run_venv_command(
self, executable: str, command: Sequence[str], cwd: str, check: bool = False
self, executable: str, command: Sequence[str], cwd: str, check: bool = False, append_executable: bool = True
) -> subprocess.CompletedProcess[str]:
"""Run a command in the given virtual environment.
- Prepends the virtual environment's bin directory to the PATH environment variable (if one exists)
Expand All @@ -112,8 +136,21 @@ def run_venv_command(
else:
raise RuntimeError(f"Unable to find parent venv for executable {executable}")

# When not appending executable, resolve the command using the modified PATH
if not append_executable:
resolved = shutil.which(command[0], path=env["PATH"])
if not resolved:
raise RuntimeError(f"Command '{command[0]}' not found in PATH: {env['PATH']}")
cmd_to_run = [resolved] + list(command[1:])
else:
cmd_to_run = [executable] + list(command)

logger.debug(f"Running command: {cmd_to_run}.")
logger.debug(f"VIRTUAL_ENV: {env['VIRTUAL_ENV']}.")
logger.debug(f"PATH : {env['PATH']}.")

result = subprocess.run(
[executable, *command], cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, check=check
cmd_to_run, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, check=check, env=env
)

return result
Expand Down Expand Up @@ -180,3 +217,28 @@ def install_dev_reqs(self, executable: str, args: argparse.Namespace, package_di
os.remove(temp_req_file.name)
except Exception as cleanup_error:
logger.warning(f"Failed to remove temporary requirements file: {cleanup_error}")

def pip_freeze(self, executable: str) -> None:
"""Run pip freeze in the given virtual environment and log the output. This function handles both isolated and non-isolated
environments, as well as calling the proper `uv` executable with additional --python argument if needed.

:param executable: Path to the python executable that should invoke this check.
:returns None:
"""
try:
# to uv pip install or freeze to a target environment, we have to add `--python <path to python exe>`
# to tell uv which environment to target
command = get_pip_command(executable)

if command[0] == "uv":
command += ["freeze", "--python", executable]
else:
command += ["freeze"]

result = subprocess.run(command, cwd=os.getcwd(), check=True, capture_output=True, text=True)
logger.info("Installed packages:")
logger.info(result.stdout)
except subprocess.CalledProcessError as e:
logger.error(f"Failed to run pip freeze: {e}")
logger.error(e.stdout)
logger.error(e.stderr)
8 changes: 1 addition & 7 deletions eng/tools/azure-sdk-tools/azpysdk/bandit.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,13 +49,7 @@ def run(self, args: argparse.Namespace) -> int:
logger.error(f"Failed to install bandit: {e}")
return e.returncode

# debug a pip freeze result
cmd = get_pip_command(executable) + ["freeze"]
freeze_result = subprocess.run(
cmd, cwd=package_dir, check=False, text=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
logger.debug(f"Running pip freeze with {cmd}")
logger.debug(freeze_result.stdout)
self.pip_freeze(executable)

if in_ci():
if not is_check_enabled(package_dir, "bandit"):
Expand Down
3 changes: 0 additions & 3 deletions eng/tools/azure-sdk-tools/azpysdk/import_all.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,9 +50,6 @@ def run(self, args: argparse.Namespace) -> int:

targeted = self.get_targeted_directories(args)

# {[tox]pip_command} freeze
# python {repository_root}/eng/tox/import_all.py -t {tox_root}

outcomes: List[int] = []

for parsed in targeted:
Expand Down
Loading