diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 01271f9c..469a6330 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -20,7 +20,7 @@ concurrency: cancel-in-progress: true env: - GO_VERSION: 1.23 + GO_VERSION: 1.25 defaults: run: @@ -36,7 +36,7 @@ jobs: - ubuntu-latest #x64 - ubuntu-24.04-arm #arm64 - windows-latest #x64 - - macos-13 #x64 + - macos-15-intel #x64 - macos-latest #arm64 runs-on: ${{ matrix.os }} steps: diff --git a/.github/workflows/release-image.yml b/.github/workflows/release-image.yml index d11dbf0d..903951b8 100644 --- a/.github/workflows/release-image.yml +++ b/.github/workflows/release-image.yml @@ -14,6 +14,7 @@ env: jobs: publish-platform-images: name: 'Publish: linux-${{ matrix.platform.tag }}' + if: github.repository == 'containers/kubernetes-mcp-server' strategy: fail-fast: true matrix: @@ -47,6 +48,7 @@ jobs: publish-manifest: name: Publish Manifest + if: github.repository == 'containers/kubernetes-mcp-server' runs-on: ubuntu-latest needs: publish-platform-images steps: diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index c035b3a8..2c5cc010 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -11,7 +11,7 @@ concurrency: cancel-in-progress: true env: - GO_VERSION: 1.23 + GO_VERSION: 1.25 UV_PUBLISH_TOKEN: ${{ secrets.UV_PUBLISH_TOKEN }} permissions: @@ -22,6 +22,7 @@ permissions: jobs: release: name: Release + if: github.repository == 'containers/kubernetes-mcp-server' runs-on: macos-latest steps: - name: Checkout @@ -50,6 +51,7 @@ jobs: make npm-publish python: name: Release Python + if: github.repository == 'containers/kubernetes-mcp-server' # Python logic requires the tag/release version to be available from GitHub needs: release runs-on: ubuntu-latest diff --git a/Makefile b/Makefile index 04ff1ac0..b5ee78f4 100644 --- a/Makefile +++ b/Makefile @@ -16,19 +16,16 @@ LD_FLAGS = -s -w \ COMMON_BUILD_ARGS = -ldflags "$(LD_FLAGS)" GOLANGCI_LINT = $(shell pwd)/_output/tools/bin/golangci-lint -GOLANGCI_LINT_VERSION ?= v2.5.0 +GOLANGCI_LINT_VERSION ?= v2.6.1 # NPM version should not append the -dirty flag -NPM_VERSION ?= $(shell echo $(shell git describe --tags --always) | sed 's/^v//') +GIT_TAG_VERSION ?= $(shell echo $(shell git describe --tags --always) | sed 's/^v//') OSES = darwin linux windows ARCHS = amd64 arm64 CLEAN_TARGETS := CLEAN_TARGETS += '$(BINARY_NAME)' CLEAN_TARGETS += $(foreach os,$(OSES),$(foreach arch,$(ARCHS),$(BINARY_NAME)-$(os)-$(arch)$(if $(findstring windows,$(os)),.exe,))) -CLEAN_TARGETS += $(foreach os,$(OSES),$(foreach arch,$(ARCHS),./npm/$(BINARY_NAME)-$(os)-$(arch)/bin/)) -CLEAN_TARGETS += ./npm/kubernetes-mcp-server/.npmrc ./npm/kubernetes-mcp-server/LICENSE ./npm/kubernetes-mcp-server/README.md -CLEAN_TARGETS += $(foreach os,$(OSES),$(foreach arch,$(ARCHS),./npm/$(BINARY_NAME)-$(os)-$(arch)/.npmrc)) # The help will print out all targets with their descriptions organized bellow their categories. The categories are represented by `##@` and the target descriptions by `##`. # The awk commands is responsible to read the entire set of makefiles included in this invocation, looking for lines of the file as xyz: ## something, and then pretty-format the target and help. Then, if there's a line with ##@ something, that gets pretty-printed as a category. @@ -57,33 +54,10 @@ build-all-platforms: clean tidy format lint ## Build the project for all platfor GOOS=$(os) GOARCH=$(arch) go build $(COMMON_BUILD_ARGS) -o $(BINARY_NAME)-$(os)-$(arch)$(if $(findstring windows,$(os)),.exe,) ./cmd/kubernetes-mcp-server; \ )) -.PHONY: npm-copy-binaries -npm-copy-binaries: build-all-platforms ## Copy the binaries to each npm package - $(foreach os,$(OSES),$(foreach arch,$(ARCHS), \ - EXECUTABLE=./$(BINARY_NAME)-$(os)-$(arch)$(if $(findstring windows,$(os)),.exe,); \ - DIRNAME=$(BINARY_NAME)-$(os)-$(arch); \ - mkdir -p ./npm/$$DIRNAME/bin; \ - cp $$EXECUTABLE ./npm/$$DIRNAME/bin/; \ - )) - -.PHONY: npm-publish -npm-publish: npm-copy-binaries ## Publish the npm packages - $(foreach os,$(OSES),$(foreach arch,$(ARCHS), \ - DIRNAME="$(BINARY_NAME)-$(os)-$(arch)"; \ - cd npm/$$DIRNAME; \ - jq '.version = "$(NPM_VERSION)"' package.json > tmp.json && mv tmp.json package.json; \ - npm publish --tag latest; \ - cd ../..; \ - )) - cp README.md LICENSE ./npm/kubernetes-mcp-server/ - jq '.version = "$(NPM_VERSION)"' ./npm/kubernetes-mcp-server/package.json > tmp.json && mv tmp.json ./npm/kubernetes-mcp-server/package.json; \ - jq '.optionalDependencies |= with_entries(.value = "$(NPM_VERSION)")' ./npm/kubernetes-mcp-server/package.json > tmp.json && mv tmp.json ./npm/kubernetes-mcp-server/package.json; \ - cd npm/kubernetes-mcp-server && npm publish --tag latest - .PHONY: python-publish python-publish: ## Publish the python packages cd ./python && \ - sed -i "s/version = \".*\"/version = \"$(NPM_VERSION)\"/" pyproject.toml && \ + sed -i "s/version = \".*\"/version = \"$(GIT_TAG_VERSION)\"/" pyproject.toml && \ uv build && \ uv publish diff --git a/README.md b/README.md index a81daea1..ee592bd5 100644 --- a/README.md +++ b/README.md @@ -249,6 +249,13 @@ In case multi-cluster support is enabled (default) and you have access to multip - `query` (`string`) **(required)** - query specifies services(s) or files from which to return logs (required). Example: "kubelet" to fetch kubelet logs, "/" to fetch a specific log file from the node (e.g., "/var/log/kubelet.log" or "/var/log/kube-proxy.log") - `tailLines` (`integer`) - Number of lines to retrieve from the end of the logs (Optional, 0 means all logs) +- **nodes_stats_summary** - Get detailed resource usage statistics from a Kubernetes node via the kubelet's Summary API. Provides comprehensive metrics including CPU, memory, filesystem, and network usage at the node, pod, and container levels. On systems with cgroup v2 and kernel 4.20+, also includes PSI (Pressure Stall Information) metrics that show resource pressure for CPU, memory, and I/O. See https://kubernetes.io/docs/reference/instrumentation/understand-psi-metrics/ for details on PSI metrics + - `name` (`string`) **(required)** - Name of the node to get stats from + +- **nodes_top** - List the resource consumption (CPU and memory) as recorded by the Kubernetes Metrics Server for the specified Kubernetes Nodes or all nodes in the cluster + - `label_selector` (`string`) - Kubernetes label selector (e.g. 'node-role.kubernetes.io/worker=') to filter nodes by label (Optional, only applicable when name is not provided) + - `name` (`string`) - Name of the Node to get the resource consumption from (Optional, all Nodes if not provided) + - **pods_list** - List all the Kubernetes pods in the current cluster from all namespaces - `labelSelector` (`string`) - Optional Kubernetes label selector (e.g. 'app=myapp,env=prod' or 'app in (myapp,yourapp)'), use this option when you want to filter the pods by label diff --git a/build/acm.mk b/build/acm.mk deleted file mode 100644 index 8539a402..00000000 --- a/build/acm.mk +++ /dev/null @@ -1,36 +0,0 @@ -# ACM (Advanced Cluster Management) installation targets for OpenShift -# This file is specific to downstream OpenShift/OCP features only - -.PHONY: acm-install acm-mce-install acm-operator-install acm-instance-install acm-status acm-import-cluster acm-uninstall acm-dump-manifests - -##@ ACM (OpenShift only) - -acm-install: acm-mce-install acm-operator-install acm-instance-install ## Install MCE, ACM operator and instance - -acm-mce-install: ## Install MultiCluster Engine (required for ACM) - @./hack/acm/install-mce.sh - -acm-operator-install: ## Install ACM operator - @./hack/acm/install-operator.sh - -acm-instance-install: ## Install ACM instance (MultiClusterHub CR) - @./hack/acm/install-instance.sh - -acm-status: ## Check ACM installation status - @./hack/acm/status.sh - -acm-import-cluster: ## Import a managed cluster (requires CLUSTER_NAME and MANAGED_KUBECONFIG) - @./hack/acm/import-cluster.sh "$(CLUSTER_NAME)" "$(MANAGED_KUBECONFIG)" - -acm-uninstall: ## Uninstall ACM (reverse order: instance first, then operator) - @./hack/acm/uninstall.sh - -acm-dump-manifests: ## Dump ACM manifests locally for inspection - @echo "Dumping ACM Operator manifests..." - @mkdir -p _output/acm-manifests - kustomize build https://github.com/redhat-cop/gitops-catalog/advanced-cluster-management/operator/overlays/release-2.14 > _output/acm-manifests/operator.yaml - @echo "Operator manifests saved to _output/acm-manifests/operator.yaml" - @echo "" - @echo "Dumping ACM Instance manifests..." - kustomize build https://github.com/redhat-cop/gitops-catalog/advanced-cluster-management/instance/base > _output/acm-manifests/instance.yaml - @echo "Instance manifests saved to _output/acm-manifests/instance.yaml" diff --git a/build/keycloak.mk b/build/keycloak.mk index d541c8b4..86b63907 100644 --- a/build/keycloak.mk +++ b/build/keycloak.mk @@ -50,6 +50,8 @@ keycloak-install: keycloak-uninstall: @kubectl delete -f dev/config/keycloak/deployment.yaml 2>/dev/null || true +##@ Keycloak + .PHONY: keycloak-status keycloak-status: ## Show Keycloak status and connection info @if kubectl get svc -n $(KEYCLOAK_NAMESPACE) keycloak >/dev/null 2>&1; then \ diff --git a/build/node.mk b/build/node.mk new file mode 100644 index 00000000..f9c18280 --- /dev/null +++ b/build/node.mk @@ -0,0 +1,74 @@ +##@ Node/NPM build targets + +NPM_PACKAGE = kubernetes-mcp-server + +CLEAN_TARGETS += $(foreach os,$(OSES),$(foreach arch,$(ARCHS),./npm/$(BINARY_NAME)-$(os)-$(arch))) +CLEAN_TARGETS += ./npm/$(NPM_PACKAGE)/LICENSE ./npm/$(NPM_PACKAGE)/package.json ./npm/$(NPM_PACKAGE)/README.md + +.PHONY: npm-copy-binaries +npm-copy-binaries: build-all-platforms ## Copy the binaries to each npm package + $(foreach os,$(OSES),$(foreach arch,$(ARCHS), \ + EXECUTABLE=./$(BINARY_NAME)-$(os)-$(arch)$(if $(findstring windows,$(os)),.exe,); \ + NPM_EXECUTABLE=$(NPM_PACKAGE)-$(os)-$(arch)$(if $(findstring windows,$(os)),.exe,); \ + DIRNAME=$(NPM_PACKAGE)-$(os)-$(arch); \ + mkdir -p ./npm/$$DIRNAME/bin; \ + cp $$EXECUTABLE ./npm/$$DIRNAME/bin/$$NPM_EXECUTABLE; \ + )) + + +MAIN_PACKAGE_JSON=./npm/$(NPM_PACKAGE)/package.json +.PHONY: npm-copy-project-files +npm-copy-project-files: npm-copy-binaries ## Copy the project files to the main npm package and generate all package.json files + cp README.md LICENSE ./npm/$(NPM_PACKAGE)/ + @echo '{"name": "$(NPM_PACKAGE)",' > $(MAIN_PACKAGE_JSON) + @echo '"version": "$(GIT_TAG_VERSION)",' >> $(MAIN_PACKAGE_JSON) + @echo '"description": "Model Context Protocol (MCP) server for Kubernetes and OpenShift",' >> $(MAIN_PACKAGE_JSON) + @echo '"main": "./bin/index.js",' >> $(MAIN_PACKAGE_JSON) + @echo '"bin": {"$(NPM_PACKAGE)": "bin/index.js"},' >> $(MAIN_PACKAGE_JSON) + @echo '"optionalDependencies": {' >> $(MAIN_PACKAGE_JSON) + @for os in $(OSES); do \ + for arch in $(ARCHS); do \ + if [ "$$os" = "$(lastword $(OSES))" ] && [ "$$arch" = "$(lastword $(ARCHS))" ]; then \ + echo " \"$(NPM_PACKAGE)-$$os-$$arch\": \"$(GIT_TAG_VERSION)\""; \ + else \ + echo " \"$(NPM_PACKAGE)-$$os-$$arch\": \"$(GIT_TAG_VERSION)\","; \ + fi \ + done; \ + done >> $(MAIN_PACKAGE_JSON) + @echo '},' >> $(MAIN_PACKAGE_JSON) + @echo '"repository": {"type": "git", "url": "git+https://github.com/containers/kubernetes-mcp-server.git"},' >> $(MAIN_PACKAGE_JSON) + @echo '"keywords": ["mcp","kubernetes","openshift","model context protocol","model","context","protocol"],' >> $(MAIN_PACKAGE_JSON) + @echo '"author": {"name": "Marc Nuri", "url": "https://www.marcnuri.com"},' >> $(MAIN_PACKAGE_JSON) + @echo '"license": "Apache-2.0",' >> $(MAIN_PACKAGE_JSON) + @echo '"bugs": {"url": "https://github.com/containers/kubernetes-mcp-server/issues"},' >> $(MAIN_PACKAGE_JSON) + @echo '"homepage": "https://github.com/containers/kubernetes-mcp-server#readme"' >> $(MAIN_PACKAGE_JSON) + @echo '}' >> $(MAIN_PACKAGE_JSON) + $(foreach os,$(OSES),$(foreach arch,$(ARCHS), \ + OS_PACKAGE_JSON=./npm/$(NPM_PACKAGE)-$(os)-$(arch)/package.json; \ + echo '{"name": "$(NPM_PACKAGE)-$(os)-$(arch)",' > $$OS_PACKAGE_JSON; \ + echo '"version": "$(GIT_TAG_VERSION)",' >> $$OS_PACKAGE_JSON; \ + echo '"description": "Model Context Protocol (MCP) server for Kubernetes and OpenShift",' >> $$OS_PACKAGE_JSON; \ + echo '"repository": {"type": "git", "url": "git+https://github.com/containers/kubernetes-mcp-server.git"},' >> $$OS_PACKAGE_JSON; \ + OS="$(os)"; \ + if [ "$$OS" = "windows" ]; then OS="win32"; fi; \ + echo '"os": ["'$$OS'"],' >> $$OS_PACKAGE_JSON; \ + NPM_ARCH="$(arch)"; \ + if [ "$$NPM_ARCH" = "amd64" ]; then NPM_ARCH="x64"; fi; \ + echo '"cpu": ["'$$NPM_ARCH'"]' >> $$OS_PACKAGE_JSON; \ + echo '}' >> $$OS_PACKAGE_JSON; \ + )) + +.PHONY: npm-publish +npm-publish: npm-copy-project-files ## Publish the npm packages + $(foreach os,$(OSES),$(foreach arch,$(ARCHS), \ + DIRNAME="$(BINARY_NAME)-$(os)-$(arch)"; \ + cd npm/$$DIRNAME; \ + jq '.version = "$(GIT_TAG_VERSION)"' package.json > tmp.json && mv tmp.json package.json; \ + npm publish --tag latest; \ + cd ../..; \ + )) + cp README.md LICENSE ./npm/kubernetes-mcp-server/ + jq '.version = "$(GIT_TAG_VERSION)"' ./npm/kubernetes-mcp-server/package.json > tmp.json && mv tmp.json ./npm/kubernetes-mcp-server/package.json; \ + jq '.optionalDependencies |= with_entries(.value = "$(GIT_TAG_VERSION)")' ./npm/kubernetes-mcp-server/package.json > tmp.json && mv tmp.json ./npm/kubernetes-mcp-server/package.json; \ + cd npm/kubernetes-mcp-server && npm publish --tag latest + diff --git a/build/tools.mk b/build/tools.mk index 9c9945a8..20482bc9 100644 --- a/build/tools.mk +++ b/build/tools.mk @@ -17,4 +17,4 @@ $(KIND): GOBIN=$(PWD)/_output/bin go install sigs.k8s.io/kind@$(KIND_VERSION) .PHONY: kind -kind: $(KIND) ## Download kind locally if necessary +kind: $(KIND) diff --git a/docs/openshift/README.md b/docs/openshift/README.md deleted file mode 100644 index eb6ac74c..00000000 --- a/docs/openshift/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# OpenShift Documentation - -This directory contains OpenShift-specific documentation for the OpenShift MCP Server. - -## Available Guides - -| Guide | Description | -|-------|-------------| -| **[ACM Setup](acm.md)** | Setting up Advanced Cluster Management (ACM) and using the MCP Server with multi-cluster environments | - -## Overview - -The OpenShift MCP Server provides OpenShift-specific features and integrations that are only available in the downstream distribution. These features are designed specifically for OpenShift environments and Red Hat Advanced Cluster Management (ACM). diff --git a/docs/openshift/acm.md b/docs/openshift/acm.md deleted file mode 100644 index 02a86d65..00000000 --- a/docs/openshift/acm.md +++ /dev/null @@ -1,123 +0,0 @@ -# Advanced Cluster Management (ACM) Setup - -This guide shows you how to set up Red Hat Advanced Cluster Management (ACM) and use the OpenShift MCP Server with multiple OpenShift clusters. - -## Overview - -Advanced Cluster Management (ACM) allows you to manage multiple OpenShift clusters from a single hub cluster. The OpenShift MCP Server allows interaction with all your managed clusters through a single MCP server instance. - -## Step 1: Install ACM on Hub Cluster - -The hub cluster is the central OpenShift cluster that manages other clusters (managed clusters). You need to install ACM on the dedicated hub cluster first. - -### Complete Installation - -Install all ACM components with a single command: - -```bash -make acm-install -``` - -This installs: -1. MultiCluster Engine (MCE) -2. ACM Operator -3. ACM Instance (MultiClusterHub CR) - -### Verify Installation - -Check ACM installation status: - -```bash -make acm-status -``` - -Expected output: -``` -========================================== -ACM Installation Status -========================================== - -Namespaces: -multicluster-engine Active 5m -open-cluster-management Active 5m - -Operators: -NAME DISPLAY VERSION REPLACES PHASE -advanced-cluster-management.v2.14.0 Advanced Cluster Management 2.14.0 Succeeded - -MultiClusterHub: -NAME STATUS AGE -multiclusterhub Running 5m - -ManagedClusters: -NAME HUB ACCEPTED MANAGED CLUSTER URLS JOINED AVAILABLE AGE -local-cluster true https://... True True 5m -``` - -## Step 2: Import Managed Clusters - -Once ACM is installed on your hub cluster, you can import additional OpenShift clusters. - -### Import a Cluster - -```bash -make acm-import-cluster \ - CLUSTER_NAME=production-east \ - MANAGED_KUBECONFIG=/path/to/production-east-kubeconfig -``` - -**Parameters**: -- `CLUSTER_NAME`: Unique name for the managed cluster -- `MANAGED_KUBECONFIG`: Path to the kubeconfig file for the managed cluster - -### What Happens During Import - -1. Creates `ManagedCluster` resource on hub -2. Generates import manifests (CRDs + import YAML) -3. Applies manifests to managed cluster -4. Installs klusterlet agent on managed cluster -5. Waits for cluster to become available - -### Verify Import - -```bash -oc get managedclusters -``` - -Expected output: -``` -NAME HUB ACCEPTED MANAGED CLUSTER URLS JOINED AVAILABLE AGE -local-cluster true https://api.hub.example.com:6443 True True 10m -production-east true https://api.prod-east.example.com:6443 True True 5m -``` - -## Step 3: Run the MCP Server - -Start the MCP Server with your ACM configuration - -```toml -cluster_provider_strategy = "acm-kubeconfig" -kubeconfig = "/tmp/acm-hub-kubeconfig.yaml" - -[cluster_provider_configs.acm-kubeconfig] -context_name = "acm-hub" -cluster_proxy_addon_ca_file = "./openshift-ca.crt" -``` -Save this configuration as `acm-config.toml` and run the MCP Server: - -```bash -./kubernetes-mcp-server --config acm-config.toml --port 8080 -``` - -The MCP Server will: -1. Connect to the hub cluster using the kubeconfig -2. Discover all managed clusters via ACM -3. Provide tools to interact with all clusters - -## Step 4: Test Multi-Cluster Access - -With the MCP inspector you can test the access to the ACM-managed clusters. The MCP Server automatically discovers all ACM-managed clusters and on each tool you can select the one you want. Below is a screenshot of the `namespaces_list` tool: - - - MCP Inspector with ACM cluster selection - diff --git a/docs/openshift/images/mcp-inspector-acm-managed-cluster.png b/docs/openshift/images/mcp-inspector-acm-managed-cluster.png deleted file mode 100644 index e9163e5e..00000000 Binary files a/docs/openshift/images/mcp-inspector-acm-managed-cluster.png and /dev/null differ diff --git a/go.mod b/go.mod index de49820d..e417d759 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/containers/kubernetes-mcp-server -go 1.24.1 +go 1.24.10 require ( github.com/BurntSushi/toml v1.5.0 @@ -8,26 +8,25 @@ require ( github.com/fsnotify/fsnotify v1.9.0 github.com/go-jose/go-jose/v4 v4.1.3 github.com/google/jsonschema-go v0.3.0 - github.com/mark3labs/mcp-go v0.42.0 + github.com/mark3labs/mcp-go v0.43.0 github.com/pkg/errors v0.9.1 github.com/spf13/afero v1.15.0 github.com/spf13/cobra v1.10.1 github.com/spf13/pflag v1.0.10 github.com/stretchr/testify v1.11.1 - golang.org/x/net v0.46.0 - golang.org/x/oauth2 v0.32.0 - golang.org/x/sync v0.17.0 - helm.sh/helm/v3 v3.19.0 - k8s.io/api v0.34.1 - k8s.io/apiextensions-apiserver v0.34.1 - k8s.io/apimachinery v0.34.1 - k8s.io/cli-runtime v0.34.1 - k8s.io/client-go v0.34.1 + golang.org/x/oauth2 v0.33.0 + golang.org/x/sync v0.18.0 + helm.sh/helm/v3 v3.19.2 + k8s.io/api v0.34.2 + k8s.io/apiextensions-apiserver v0.34.2 + k8s.io/apimachinery v0.34.2 + k8s.io/cli-runtime v0.34.2 + k8s.io/client-go v0.34.2 k8s.io/klog/v2 v2.130.1 - k8s.io/kubectl v0.34.1 - k8s.io/metrics v0.34.1 + k8s.io/kubectl v0.34.2 + k8s.io/metrics v0.34.2 k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 - sigs.k8s.io/controller-runtime v0.22.3 + sigs.k8s.io/controller-runtime v0.22.4 sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20250211091558-894df3a7e664 sigs.k8s.io/yaml v1.6.0 ) @@ -47,11 +46,11 @@ require ( github.com/buger/jsonparser v1.1.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/chai2010/gettext-go v1.0.3 // indirect - github.com/containerd/containerd v1.7.28 // indirect + github.com/containerd/containerd v1.7.29 // indirect github.com/containerd/errdefs v0.3.0 // indirect github.com/containerd/log v0.1.0 // indirect github.com/containerd/platforms v0.2.1 // indirect - github.com/cyphar/filepath-securejoin v0.4.1 // indirect + github.com/cyphar/filepath-securejoin v0.6.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/emicklei/go-restful/v3 v3.12.2 // indirect github.com/evanphx/json-patch v5.9.11+incompatible // indirect @@ -122,10 +121,11 @@ require ( github.com/yosida95/uritemplate/v3 v3.0.2 // indirect go.yaml.in/yaml/v2 v2.4.2 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect - golang.org/x/crypto v0.43.0 // indirect - golang.org/x/sys v0.37.0 // indirect - golang.org/x/term v0.36.0 // indirect - golang.org/x/text v0.30.0 // indirect + golang.org/x/crypto v0.44.0 // indirect + golang.org/x/net v0.47.0 // indirect + golang.org/x/sys v0.38.0 // indirect + golang.org/x/term v0.37.0 // indirect + golang.org/x/text v0.31.0 // indirect golang.org/x/time v0.12.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250728155136-f173205681a0 // indirect google.golang.org/grpc v1.72.1 // indirect @@ -133,8 +133,8 @@ require ( gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiserver v0.34.1 // indirect - k8s.io/component-base v0.34.1 // indirect + k8s.io/apiserver v0.34.2 // indirect + k8s.io/component-base v0.34.2 // indirect k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect oras.land/oras-go/v2 v2.6.0 // indirect sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect diff --git a/go.sum b/go.sum index 52a72c8b..afd4f993 100644 --- a/go.sum +++ b/go.sum @@ -40,8 +40,8 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chai2010/gettext-go v1.0.3 h1:9liNh8t+u26xl5ddmWLmsOsdNLwkdRTg5AG+JnTiM80= github.com/chai2010/gettext-go v1.0.3/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA= -github.com/containerd/containerd v1.7.28 h1:Nsgm1AtcmEh4AHAJ4gGlNSaKgXiNccU270Dnf81FQ3c= -github.com/containerd/containerd v1.7.28/go.mod h1:azUkWcOvHrWvaiUjSQH0fjzuHIwSPg1WL5PshGP4Szs= +github.com/containerd/containerd v1.7.29 h1:90fWABQsaN9mJhGkoVnuzEY+o1XDPbg9BTC9QTAHnuE= +github.com/containerd/containerd v1.7.29/go.mod h1:azUkWcOvHrWvaiUjSQH0fjzuHIwSPg1WL5PshGP4Szs= github.com/containerd/errdefs v0.3.0 h1:FSZgGOeK4yuT/+DnF07/Olde/q4KBoMsaamhXxIMDp4= github.com/containerd/errdefs v0.3.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= @@ -55,8 +55,8 @@ github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSV github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s= -github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= +github.com/cyphar/filepath-securejoin v0.6.0 h1:BtGB77njd6SVO6VztOHfPxKitJvd/VPT+OFBFMOi1Is= +github.com/cyphar/filepath-securejoin v0.6.0/go.mod h1:A8hd4EnAeyujCJRrICiOWqjS1AX0a9kM5XL+NwKoYSc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -187,8 +187,8 @@ github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhn github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= -github.com/mark3labs/mcp-go v0.42.0 h1:gk/8nYJh8t3yroCAOBhNbYsM9TCKvkM13I5t5Hfu6Ls= -github.com/mark3labs/mcp-go v0.42.0/go.mod h1:YnJfOL382MIWDx1kMY+2zsRHU/q78dBg9aFb8W6Thdw= +github.com/mark3labs/mcp-go v0.43.0 h1:lgiKcWMddh4sngbU+hoWOZ9iAe/qp/m851RQpj3Y7jA= +github.com/mark3labs/mcp-go v0.43.0/go.mod h1:YnJfOL382MIWDx1kMY+2zsRHU/q78dBg9aFb8W6Thdw= github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= @@ -357,47 +357,47 @@ go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04= -golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0= +golang.org/x/crypto v0.44.0 h1:A97SsFvM3AIwEEmTBiaxPPTYpDC47w720rdiiUvgoAU= +golang.org/x/crypto v0.44.0/go.mod h1:013i+Nw79BMiQiMsOPcVCB5ZIJbYkerPrGnOa00tvmc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.28.0 h1:gQBtGhjxykdjY9YhZpSlZIsbnaE2+PgjfLWUQTnoZ1U= -golang.org/x/mod v0.28.0/go.mod h1:yfB/L0NOf/kmEbXjzCPOx1iK1fRutOydrCMsqRhEBxI= +golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA= +golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4= -golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210= -golang.org/x/oauth2 v0.32.0 h1:jsCblLleRMDrxMN29H3z/k1KliIvpLgCkE6R8FXXNgY= -golang.org/x/oauth2 v0.32.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= +golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= +golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= +golang.org/x/oauth2 v0.33.0 h1:4Q+qn+E5z8gPRJfmRy7C2gGG3T4jIprK6aSYgTXGRpo= +golang.org/x/oauth2 v0.33.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= -golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= +golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ= -golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= -golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q= -golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss= +golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= +golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= +golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k= -golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM= +golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= +golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.37.0 h1:DVSRzp7FwePZW356yEAChSdNcQo6Nsp+fex1SUW09lE= -golang.org/x/tools v0.37.0/go.mod h1:MBN5QPQtLMHVdvsbtarmTNukZDdgwdwlO5qGacAzF0w= +golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ= +golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -425,36 +425,36 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -helm.sh/helm/v3 v3.19.0 h1:krVyCGa8fa/wzTZgqw0DUiXuRT5BPdeqE/sQXujQ22k= -helm.sh/helm/v3 v3.19.0/go.mod h1:Lk/SfzN0w3a3C3o+TdAKrLwJ0wcZ//t1/SDXAvfgDdc= -k8s.io/api v0.34.1 h1:jC+153630BMdlFukegoEL8E/yT7aLyQkIVuwhmwDgJM= -k8s.io/api v0.34.1/go.mod h1:SB80FxFtXn5/gwzCoN6QCtPD7Vbu5w2n1S0J5gFfTYk= -k8s.io/apiextensions-apiserver v0.34.1 h1:NNPBva8FNAPt1iSVwIE0FsdrVriRXMsaWFMqJbII2CI= -k8s.io/apiextensions-apiserver v0.34.1/go.mod h1:hP9Rld3zF5Ay2Of3BeEpLAToP+l4s5UlxiHfqRaRcMc= -k8s.io/apimachinery v0.34.1 h1:dTlxFls/eikpJxmAC7MVE8oOeP1zryV7iRyIjB0gky4= -k8s.io/apimachinery v0.34.1/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= -k8s.io/apiserver v0.34.1 h1:U3JBGdgANK3dfFcyknWde1G6X1F4bg7PXuvlqt8lITA= -k8s.io/apiserver v0.34.1/go.mod h1:eOOc9nrVqlBI1AFCvVzsob0OxtPZUCPiUJL45JOTBG0= -k8s.io/cli-runtime v0.34.1 h1:btlgAgTrYd4sk8vJTRG6zVtqBKt9ZMDeQZo2PIzbL7M= -k8s.io/cli-runtime v0.34.1/go.mod h1:aVA65c+f0MZiMUPbseU/M9l1Wo2byeaGwUuQEQVVveE= -k8s.io/client-go v0.34.1 h1:ZUPJKgXsnKwVwmKKdPfw4tB58+7/Ik3CrjOEhsiZ7mY= -k8s.io/client-go v0.34.1/go.mod h1:kA8v0FP+tk6sZA0yKLRG67LWjqufAoSHA2xVGKw9Of8= -k8s.io/component-base v0.34.1 h1:v7xFgG+ONhytZNFpIz5/kecwD+sUhVE6HU7qQUiRM4A= -k8s.io/component-base v0.34.1/go.mod h1:mknCpLlTSKHzAQJJnnHVKqjxR7gBeHRv0rPXA7gdtQ0= +helm.sh/helm/v3 v3.19.2 h1:psQjaM8aIWrSVEly6PgYtLu/y6MRSmok4ERiGhZmtUY= +helm.sh/helm/v3 v3.19.2/go.mod h1:gX10tB5ErM+8fr7bglUUS/UfTOO8UUTYWIBH1IYNnpE= +k8s.io/api v0.34.2 h1:fsSUNZhV+bnL6Aqrp6O7lMTy6o5x2C4XLjnh//8SLYY= +k8s.io/api v0.34.2/go.mod h1:MMBPaWlED2a8w4RSeanD76f7opUoypY8TFYkSM+3XHw= +k8s.io/apiextensions-apiserver v0.34.2 h1:WStKftnGeoKP4AZRz/BaAAEJvYp4mlZGN0UCv+uvsqo= +k8s.io/apiextensions-apiserver v0.34.2/go.mod h1:398CJrsgXF1wytdaanynDpJ67zG4Xq7yj91GrmYN2SE= +k8s.io/apimachinery v0.34.2 h1:zQ12Uk3eMHPxrsbUJgNF8bTauTVR2WgqJsTmwTE/NW4= +k8s.io/apimachinery v0.34.2/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= +k8s.io/apiserver v0.34.2 h1:2/yu8suwkmES7IzwlehAovo8dDE07cFRC7KMDb1+MAE= +k8s.io/apiserver v0.34.2/go.mod h1:gqJQy2yDOB50R3JUReHSFr+cwJnL8G1dzTA0YLEqAPI= +k8s.io/cli-runtime v0.34.2 h1:cct1GEuWc3IyVT8MSCoIWzRGw9HJ/C5rgP32H60H6aE= +k8s.io/cli-runtime v0.34.2/go.mod h1:X13tsrYexYUCIq8MarCBy8lrm0k0weFPTpcaNo7lms4= +k8s.io/client-go v0.34.2 h1:Co6XiknN+uUZqiddlfAjT68184/37PS4QAzYvQvDR8M= +k8s.io/client-go v0.34.2/go.mod h1:2VYDl1XXJsdcAxw7BenFslRQX28Dxz91U9MWKjX97fE= +k8s.io/component-base v0.34.2 h1:HQRqK9x2sSAsd8+R4xxRirlTjowsg6fWCPwWYeSvogQ= +k8s.io/component-base v0.34.2/go.mod h1:9xw2FHJavUHBFpiGkZoKuYZ5pdtLKe97DEByaA+hHbM= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA= k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts= -k8s.io/kubectl v0.34.1 h1:1qP1oqT5Xc93K+H8J7ecpBjaz511gan89KO9Vbsh/OI= -k8s.io/kubectl v0.34.1/go.mod h1:JRYlhJpGPyk3dEmJ+BuBiOB9/dAvnrALJEiY/C5qa6A= -k8s.io/metrics v0.34.1 h1:374Rexmp1xxgRt64Bi0TsjAM8cA/Y8skwCoPdjtIslE= -k8s.io/metrics v0.34.1/go.mod h1:Drf5kPfk2NJrlpcNdSiAAHn/7Y9KqxpRNagByM7Ei80= +k8s.io/kubectl v0.34.2 h1:+fWGrVlDONMUmmQLDaGkQ9i91oszjjRAa94cr37hzqA= +k8s.io/kubectl v0.34.2/go.mod h1:X2KTOdtZZNrTWmUD4oHApJ836pevSl+zvC5sI6oO2YQ= +k8s.io/metrics v0.34.2 h1:zao91FNDVPRGIiHLO2vqqe21zZVPien1goyzn0hsz90= +k8s.io/metrics v0.34.2/go.mod h1:Ydulln+8uZZctUM8yrUQX4rfq/Ay6UzsuXf24QJ37Vc= k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y= k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= oras.land/oras-go/v2 v2.6.0 h1:X4ELRsiGkrbeox69+9tzTu492FMUu7zJQW6eJU+I2oc= oras.land/oras-go/v2 v2.6.0/go.mod h1:magiQDfG6H1O9APp+rOsvCPcW1GD2MM7vgnKY0Y+u1o= -sigs.k8s.io/controller-runtime v0.22.3 h1:I7mfqz/a/WdmDCEnXmSPm8/b/yRTy6JsKKENTijTq8Y= -sigs.k8s.io/controller-runtime v0.22.3/go.mod h1:+QX1XUpTXN4mLoblf4tqr5CQcyHPAki2HLXqQMY6vh8= +sigs.k8s.io/controller-runtime v0.22.4 h1:GEjV7KV3TY8e+tJ2LCTxUTanW4z/FmNB7l327UfMq9A= +sigs.k8s.io/controller-runtime v0.22.4/go.mod h1:+QX1XUpTXN4mLoblf4tqr5CQcyHPAki2HLXqQMY6vh8= sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20250211091558-894df3a7e664 h1:xC7x7FsPURJYhZnWHsWFd7nkdD/WRtQVWPC28FWt85Y= sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20250211091558-894df3a7e664/go.mod h1:Cq9jUhwSYol5tNB0O/1vLYxNV9KqnhpvEa6HvJ1w0wY= sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg= diff --git a/hack/acm/import-cluster.sh b/hack/acm/import-cluster.sh deleted file mode 100755 index da5057b3..00000000 --- a/hack/acm/import-cluster.sh +++ /dev/null @@ -1,89 +0,0 @@ -#!/usr/bin/env bash - -# Import a managed cluster into ACM -# Usage: ./import-cluster.sh - -set -euo pipefail - -CLUSTER_NAME="${1:-}" -MANAGED_KUBECONFIG="${2:-}" - -# Validate inputs -if [ -z "$CLUSTER_NAME" ]; then - echo "Error: CLUSTER_NAME is required" - echo "Usage: $0 " - exit 1 -fi - -if [ -z "$MANAGED_KUBECONFIG" ]; then - echo "Error: MANAGED_KUBECONFIG is required" - echo "Usage: $0 " - exit 1 -fi - -if [ ! -f "$MANAGED_KUBECONFIG" ]; then - echo "Error: Kubeconfig file not found: $MANAGED_KUBECONFIG" - exit 1 -fi - -echo "===========================================" -echo "Importing cluster: $CLUSTER_NAME" -echo "===========================================" - -# Step 1: Create ManagedCluster resource -echo "Step 1: Creating ManagedCluster resource on hub..." -cat </dev/null; then - echo "✅ Import secret created!" - break - fi - echo " Waiting for import secret ($i/60)..." - sleep 2 -done - -# Step 3: Extract import manifests -echo "Step 3: Extracting import manifests..." -mkdir -p _output/acm-import -oc get secret -n "$CLUSTER_NAME" "$CLUSTER_NAME-import" -o jsonpath='{.data.crds\.yaml}' | base64 -d > "_output/acm-import/${CLUSTER_NAME}-crds.yaml" -oc get secret -n "$CLUSTER_NAME" "$CLUSTER_NAME-import" -o jsonpath='{.data.import\.yaml}' | base64 -d > "_output/acm-import/${CLUSTER_NAME}-import.yaml" -echo "Import manifests saved to _output/acm-import/" - -# Step 4: Apply CRDs to managed cluster -echo "Step 4: Applying CRDs to managed cluster..." -KUBECONFIG="$MANAGED_KUBECONFIG" oc apply -f "_output/acm-import/${CLUSTER_NAME}-crds.yaml" -echo " Waiting for CRDs to be established..." -sleep 5 - -# Step 5: Apply import manifest -echo "Step 5: Applying import manifest to managed cluster..." -KUBECONFIG="$MANAGED_KUBECONFIG" oc apply -f "_output/acm-import/${CLUSTER_NAME}-import.yaml" - -# Step 6: Wait for klusterlet to be ready -echo "Step 6: Waiting for klusterlet to be ready..." -for i in {1..120}; do - if oc get managedcluster "$CLUSTER_NAME" -o jsonpath='{.status.conditions[?(@.type=="ManagedClusterConditionAvailable")].status}' 2>/dev/null | grep -q "True"; then - echo "✅ Cluster $CLUSTER_NAME is now available!" - break - fi - echo " Waiting for cluster to become available ($i/120)..." - sleep 5 -done - -echo "===========================================" -echo "✓ Cluster import complete!" -echo "===========================================" -oc get managedcluster "$CLUSTER_NAME" \ No newline at end of file diff --git a/hack/acm/install-instance.sh b/hack/acm/install-instance.sh deleted file mode 100755 index e7168b24..00000000 --- a/hack/acm/install-instance.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env bash - -# Install ACM instance (MultiClusterHub CR) - -set -euo pipefail - -echo "Installing ACM Instance (MultiClusterHub)..." -cat </dev/null | grep multicluster-engine || true) - if [ -n "$CSV_NAME" ]; then - echo "MCE CSV found: $CSV_NAME" - break - fi - echo " Waiting for MCE CSV to appear ($i/60)..." - sleep 5 -done - -if [ -z "$CSV_NAME" ]; then - echo "Error: MCE CSV not found after waiting" - exit 1 -fi - -# Wait for CSV to be ready -echo "Waiting for CSV to reach Succeeded phase..." -oc wait --for=jsonpath='{.status.phase}'=Succeeded "$CSV_NAME" -n multicluster-engine --timeout=300s - -# Create MultiClusterEngine instance -echo "Creating MultiClusterEngine instance..." -cat </dev/null 2>&1; then - echo "✅ ManagedCluster CRD is now available!" - break - fi - echo " Waiting for ManagedCluster CRD ($i/120)..." - sleep 5 -done - -echo "✓ MCE installation complete" \ No newline at end of file diff --git a/hack/acm/install-operator.sh b/hack/acm/install-operator.sh deleted file mode 100755 index 22a16d24..00000000 --- a/hack/acm/install-operator.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/usr/bin/env bash - -# Install ACM operator (Subscription, OperatorGroup, etc.) - -set -euo pipefail - -echo "Installing ACM Operator (release 2.14)..." -oc apply -k https://github.com/redhat-cop/gitops-catalog/advanced-cluster-management/operator/overlays/release-2.14 - -# Wait for CSV to appear and get its name -echo "Waiting for ACM operator CSV to be ready..." -CSV_NAME="" -for i in {1..60}; do - CSV_NAME=$(oc get csv -n open-cluster-management -o name 2>/dev/null | grep advanced-cluster-management || true) - if [ -n "$CSV_NAME" ]; then - echo "ACM CSV found: $CSV_NAME" - break - fi - echo " Waiting for ACM CSV to appear ($i/60)..." - sleep 5 -done - -if [ -z "$CSV_NAME" ]; then - echo "Error: ACM CSV not found after waiting" - exit 1 -fi - -# Wait for CSV to be ready -echo "Waiting for CSV to reach Succeeded phase..." -oc wait --for=jsonpath='{.status.phase}'=Succeeded "$CSV_NAME" -n open-cluster-management --timeout=300s - -echo "✓ ACM Operator installation complete" \ No newline at end of file diff --git a/hack/acm/status.sh b/hack/acm/status.sh deleted file mode 100755 index 647eb532..00000000 --- a/hack/acm/status.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/env bash - -# Check ACM installation status - -set -euo pipefail - -echo "==========================================" -echo "ACM Installation Status" -echo "==========================================" -echo "" - -echo "Namespaces:" -oc get namespaces | grep -E "(open-cluster-management|multicluster-engine)" || echo "No ACM namespaces found" -echo "" - -echo "Operators:" -oc get csv -n open-cluster-management 2>/dev/null || echo "No operators found in open-cluster-management namespace" -echo "" - -echo "MultiClusterHub:" -oc get multiclusterhub -n open-cluster-management -o wide 2>/dev/null || echo "No MultiClusterHub found" -echo "" - -echo "ACM Pods:" -oc get pods -n open-cluster-management 2>/dev/null || echo "No pods found in open-cluster-management namespace" -echo "" - -echo "ManagedClusters:" -oc get managedclusters 2>/dev/null || echo "No ManagedClusters found (this is normal for fresh install)" \ No newline at end of file diff --git a/hack/acm/uninstall.sh b/hack/acm/uninstall.sh deleted file mode 100755 index 8bf5a920..00000000 --- a/hack/acm/uninstall.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env bash - -# Uninstall ACM (reverse order: instance first, then operator) - -set -euo pipefail - -echo "Uninstalling ACM Instance..." -oc delete multiclusterhub multiclusterhub -n open-cluster-management 2>/dev/null || true - -echo "Waiting for MultiClusterHub to be deleted..." -oc wait --for=delete multiclusterhub/multiclusterhub -n open-cluster-management --timeout=300s 2>/dev/null || true - -echo "Uninstalling ACM Operator..." -oc delete -k https://github.com/redhat-cop/gitops-catalog/advanced-cluster-management/operator/overlays/release-2.14 2>/dev/null || true - -echo "Cleaning up namespaces..." -oc delete namespace open-cluster-management --timeout=300s 2>/dev/null || true - -echo "✓ ACM uninstallation complete" \ No newline at end of file diff --git a/internal/test/mcp.go b/internal/test/mcp.go index 5fa0d0a4..174fe4eb 100644 --- a/internal/test/mcp.go +++ b/internal/test/mcp.go @@ -1,6 +1,7 @@ package test import ( + "context" "net/http" "net/http/httptest" "testing" @@ -9,9 +10,15 @@ import ( "github.com/mark3labs/mcp-go/client/transport" "github.com/mark3labs/mcp-go/mcp" "github.com/stretchr/testify/require" - "golang.org/x/net/context" ) +func McpInitRequest() mcp.InitializeRequest { + initRequest := mcp.InitializeRequest{} + initRequest.Params.ProtocolVersion = mcp.LATEST_PROTOCOL_VERSION + initRequest.Params.ClientInfo = mcp.Implementation{Name: "test", Version: "1.33.7"} + return initRequest +} + type McpClient struct { ctx context.Context testServer *httptest.Server @@ -23,14 +30,12 @@ func NewMcpClient(t *testing.T, mcpHttpServer http.Handler, options ...transport var err error ret := &McpClient{ctx: t.Context()} ret.testServer = httptest.NewServer(mcpHttpServer) + options = append(options, transport.WithContinuousListening()) ret.Client, err = client.NewStreamableHttpClient(ret.testServer.URL+"/mcp", options...) require.NoError(t, err, "Expected no error creating MCP client") err = ret.Start(t.Context()) require.NoError(t, err, "Expected no error starting MCP client") - initRequest := mcp.InitializeRequest{} - initRequest.Params.ProtocolVersion = mcp.LATEST_PROTOCOL_VERSION - initRequest.Params.ClientInfo = mcp.Implementation{Name: "test", Version: "1.33.7"} - _, err = ret.Initialize(t.Context(), initRequest) + _, err = ret.Initialize(t.Context(), McpInitRequest()) require.NoError(t, err, "Expected no error initializing MCP client") return ret } diff --git a/internal/test/mock_server.go b/internal/test/mock_server.go index 58740ad6..e256f425 100644 --- a/internal/test/mock_server.go +++ b/internal/test/mock_server.go @@ -59,6 +59,10 @@ func (m *MockServer) Handle(handler http.Handler) { m.restHandlers = append(m.restHandlers, handler.ServeHTTP) } +func (m *MockServer) ResetHandlers() { + m.restHandlers = make([]http.HandlerFunc, 0) +} + func (m *MockServer) Config() *rest.Config { return m.config } @@ -216,3 +220,33 @@ func (h *InOpenShiftHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) return } } + +const tokenReviewSuccessful = ` + { + "kind": "TokenReview", + "apiVersion": "authentication.k8s.io/v1", + "spec": {"token": "valid-token"}, + "status": { + "authenticated": true, + "user": { + "username": "test-user", + "groups": ["system:authenticated"] + }, + "audiences": ["the-audience"] + } + }` + +type TokenReviewHandler struct { + TokenReviewed bool +} + +var _ http.Handler = (*TokenReviewHandler)(nil) + +func (h *TokenReviewHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + if req.URL.EscapedPath() == "/apis/authentication.k8s.io/v1/tokenreviews" { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write([]byte(tokenReviewSuccessful)) + h.TokenReviewed = true + return + } +} diff --git a/internal/test/test.go b/internal/test/test.go index 03491422..c2ccec4e 100644 --- a/internal/test/test.go +++ b/internal/test/test.go @@ -1,9 +1,12 @@ package test import ( + "fmt" + "net" "os" "path/filepath" "runtime" + "time" ) func Must[T any](v T, err error) T { @@ -19,3 +22,30 @@ func ReadFile(path ...string) string { fileBytes := Must(os.ReadFile(filePath)) return string(fileBytes) } + +func RandomPortAddress() (*net.TCPAddr, error) { + ln, err := net.Listen("tcp", "0.0.0.0:0") + if err != nil { + return nil, fmt.Errorf("failed to find random port for HTTP server: %v", err) + } + defer func() { _ = ln.Close() }() + tcpAddr, ok := ln.Addr().(*net.TCPAddr) + if !ok { + return nil, fmt.Errorf("failed to cast listener address to TCPAddr") + } + return tcpAddr, nil +} + +func WaitForServer(tcpAddr *net.TCPAddr) error { + var conn *net.TCPConn + var err error + for i := 0; i < 10; i++ { + conn, err = net.DialTCP("tcp", nil, tcpAddr) + if err == nil { + _ = conn.Close() + break + } + time.Sleep(50 * time.Millisecond) + } + return err +} diff --git a/npm/kubernetes-mcp-server-darwin-amd64/package.json b/npm/kubernetes-mcp-server-darwin-amd64/package.json deleted file mode 100644 index 49e05004..00000000 --- a/npm/kubernetes-mcp-server-darwin-amd64/package.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "name": "kubernetes-mcp-server-darwin-amd64", - "version": "0.0.0", - "description": "Model Context Protocol (MCP) server for Kubernetes and OpenShift", - "repository": { - "type": "git", - "url": "git+https://github.com/containers/kubernetes-mcp-server.git" - }, - "os": [ - "darwin" - ], - "cpu": [ - "x64" - ] -} diff --git a/npm/kubernetes-mcp-server-darwin-arm64/package.json b/npm/kubernetes-mcp-server-darwin-arm64/package.json deleted file mode 100644 index f8e313c2..00000000 --- a/npm/kubernetes-mcp-server-darwin-arm64/package.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "name": "kubernetes-mcp-server-darwin-arm64", - "version": "0.0.0", - "description": "Model Context Protocol (MCP) server for Kubernetes and OpenShift", - "repository": { - "type": "git", - "url": "git+https://github.com/containers/kubernetes-mcp-server.git" - }, - "os": [ - "darwin" - ], - "cpu": [ - "arm64" - ] -} diff --git a/npm/kubernetes-mcp-server-linux-amd64/package.json b/npm/kubernetes-mcp-server-linux-amd64/package.json deleted file mode 100644 index 1a519074..00000000 --- a/npm/kubernetes-mcp-server-linux-amd64/package.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "name": "kubernetes-mcp-server-linux-amd64", - "version": "0.0.0", - "description": "Model Context Protocol (MCP) server for Kubernetes and OpenShift", - "repository": { - "type": "git", - "url": "git+https://github.com/containers/kubernetes-mcp-server.git" - }, - "os": [ - "linux" - ], - "cpu": [ - "x64" - ] -} diff --git a/npm/kubernetes-mcp-server-linux-arm64/package.json b/npm/kubernetes-mcp-server-linux-arm64/package.json deleted file mode 100644 index b861abeb..00000000 --- a/npm/kubernetes-mcp-server-linux-arm64/package.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "name": "kubernetes-mcp-server-linux-arm64", - "version": "0.0.0", - "description": "Model Context Protocol (MCP) server for Kubernetes and OpenShift", - "repository": { - "type": "git", - "url": "git+https://github.com/containers/kubernetes-mcp-server.git" - }, - "os": [ - "linux" - ], - "cpu": [ - "arm64" - ] -} diff --git a/npm/kubernetes-mcp-server-windows-amd64/package.json b/npm/kubernetes-mcp-server-windows-amd64/package.json deleted file mode 100644 index 306e5047..00000000 --- a/npm/kubernetes-mcp-server-windows-amd64/package.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "name": "kubernetes-mcp-server-windows-amd64", - "version": "0.0.0", - "description": "Model Context Protocol (MCP) server for Kubernetes and OpenShift", - "repository": { - "type": "git", - "url": "git+https://github.com/containers/kubernetes-mcp-server.git" - }, - "os": [ - "win32" - ], - "cpu": [ - "x64" - ] -} diff --git a/npm/kubernetes-mcp-server-windows-arm64/package.json b/npm/kubernetes-mcp-server-windows-arm64/package.json deleted file mode 100644 index c30c4a30..00000000 --- a/npm/kubernetes-mcp-server-windows-arm64/package.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "name": "kubernetes-mcp-server-windows-arm64", - "version": "0.0.0", - "description": "Model Context Protocol (MCP) server for Kubernetes and OpenShift", - "repository": { - "type": "git", - "url": "git+https://github.com/containers/kubernetes-mcp-server.git" - }, - "os": [ - "win32" - ], - "cpu": [ - "arm64" - ] -} diff --git a/npm/kubernetes-mcp-server/package.json b/npm/kubernetes-mcp-server/package.json deleted file mode 100644 index 318810a9..00000000 --- a/npm/kubernetes-mcp-server/package.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "name": "kubernetes-mcp-server", - "version": "0.0.0", - "description": "Model Context Protocol (MCP) server for Kubernetes and OpenShift", - "main": "./bin/index.js", - "bin": { - "kubernetes-mcp-server": "bin/index.js" - }, - "optionalDependencies": { - "kubernetes-mcp-server-darwin-amd64": "0.0.0", - "kubernetes-mcp-server-darwin-arm64": "0.0.0", - "kubernetes-mcp-server-linux-amd64": "0.0.0", - "kubernetes-mcp-server-linux-arm64": "0.0.0", - "kubernetes-mcp-server-windows-amd64": "0.0.0", - "kubernetes-mcp-server-windows-arm64": "0.0.0" - }, - "repository": { - "type": "git", - "url": "git+https://github.com/containers/kubernetes-mcp-server.git" - }, - "keywords": [ - "mcp", - "kubernetes", - "openshift", - "model context protocol", - "model", - "context", - "protocol" - ], - "author": { - "name": "Marc Nuri", - "url": "https://www.marcnuri.com" - }, - "license": "Apache-2.0", - "bugs": { - "url": "https://github.com/containers/kubernetes-mcp-server/issues" - }, - "homepage": "https://github.com/containers/kubernetes-mcp-server#readme" -} diff --git a/pkg/http/http_authorization_test.go b/pkg/http/http_authorization_test.go new file mode 100644 index 00000000..a8995c45 --- /dev/null +++ b/pkg/http/http_authorization_test.go @@ -0,0 +1,472 @@ +package http + +import ( + "bytes" + "flag" + "fmt" + "net/http" + "strconv" + "strings" + "testing" + "time" + + "github.com/containers/kubernetes-mcp-server/internal/test" + "github.com/coreos/go-oidc/v3/oidc" + "github.com/coreos/go-oidc/v3/oidc/oidctest" + "github.com/mark3labs/mcp-go/client" + "github.com/mark3labs/mcp-go/client/transport" + "github.com/stretchr/testify/suite" + "k8s.io/klog/v2" + "k8s.io/klog/v2/textlogger" +) + +type AuthorizationSuite struct { + BaseHttpSuite + mcpClient *client.Client + klogState klog.State + logBuffer bytes.Buffer +} + +func (s *AuthorizationSuite) SetupTest() { + s.BaseHttpSuite.SetupTest() + + // Capture logs + s.klogState = klog.CaptureState() + flags := flag.NewFlagSet("test", flag.ContinueOnError) + klog.InitFlags(flags) + _ = flags.Set("v", "5") + klog.SetLogger(textlogger.NewLogger(textlogger.NewConfig(textlogger.Verbosity(5), textlogger.Output(&s.logBuffer)))) + + // Default Auth settings (overridden in tests as needed) + s.OidcProvider = nil + s.StaticConfig.RequireOAuth = true + s.StaticConfig.ValidateToken = true + s.StaticConfig.OAuthAudience = "" + s.StaticConfig.StsClientId = "" + s.StaticConfig.StsClientSecret = "" + s.StaticConfig.StsAudience = "" + s.StaticConfig.StsScopes = []string{} +} + +func (s *AuthorizationSuite) TearDownTest() { + s.BaseHttpSuite.TearDownTest() + s.klogState.Restore() + + if s.mcpClient != nil { + _ = s.mcpClient.Close() + } +} + +func (s *AuthorizationSuite) StartClient(options ...transport.StreamableHTTPCOption) { + var err error + s.mcpClient, err = client.NewStreamableHttpClient(fmt.Sprintf("http://127.0.0.1:%d/mcp", s.TcpAddr.Port), options...) + s.Require().NoError(err, "Expected no error creating Streamable HTTP MCP client") + err = s.mcpClient.Start(s.T().Context()) + s.Require().NoError(err, "Expected no error starting Streamable HTTP MCP client") +} + +func (s *AuthorizationSuite) HttpGet(authHeader string) *http.Response { + req, err := http.NewRequest(http.MethodGet, fmt.Sprintf("http://127.0.0.1:%d/mcp", s.TcpAddr.Port), nil) + s.Require().NoError(err, "Failed to create request") + if authHeader != "" { + req.Header.Set("Authorization", authHeader) + } + resp, err := http.DefaultClient.Do(req) + s.Require().NoError(err, "Failed to get protected endpoint") + return resp +} + +func (s *AuthorizationSuite) TestAuthorizationUnauthorizedMissingHeader() { + // Missing Authorization header + s.StartServer() + s.StartClient() + + s.Run("Initialize returns error for MISSING Authorization header", func() { + _, err := s.mcpClient.Initialize(s.T().Context(), test.McpInitRequest()) + s.Require().Error(err, "Expected error creating initial request") + s.ErrorContains(err, "transport error: request failed with status 401: Unauthorized: Bearer token required") + }) + + s.Run("Protected resource with MISSING Authorization header", func() { + resp := s.HttpGet("") + s.T().Cleanup(func() { _ = resp.Body.Close }) + + s.Run("returns 401 - Unauthorized status", func() { + s.Equal(401, resp.StatusCode, "Expected HTTP 401 for MISSING Authorization header") + }) + s.Run("returns WWW-Authenticate header", func() { + authHeader := resp.Header.Get("WWW-Authenticate") + expected := `Bearer realm="Kubernetes MCP Server", error="missing_token"` + s.Equal(expected, authHeader, "Expected WWW-Authenticate header to match") + }) + s.Run("logs error", func() { + s.Contains(s.logBuffer.String(), "Authentication failed - missing or invalid bearer token", "Expected log entry for missing or invalid bearer token") + }) + }) +} + +func (s *AuthorizationSuite) TestAuthorizationUnauthorizedHeaderIncompatible() { + // Authorization header without Bearer prefix + s.StartServer() + s.StartClient(transport.WithHTTPHeaders(map[string]string{ + "Authorization": "Basic YWxhZGRpbjpvcGVuc2VzYW1l", + })) + + s.Run("Initialize returns error for INCOMPATIBLE Authorization header", func() { + _, err := s.mcpClient.Initialize(s.T().Context(), test.McpInitRequest()) + s.Require().Error(err, "Expected error creating initial request") + s.ErrorContains(err, "transport error: request failed with status 401: Unauthorized: Bearer token required") + }) + + s.Run("Protected resource with INCOMPATIBLE Authorization header", func() { + resp := s.HttpGet("Basic YWxhZGRpbjpvcGVuc2VzYW1l") + s.T().Cleanup(func() { _ = resp.Body.Close }) + + s.Run("returns 401 - Unauthorized status", func() { + s.Equal(401, resp.StatusCode, "Expected HTTP 401 for INCOMPATIBLE Authorization header") + }) + s.Run("returns WWW-Authenticate header", func() { + authHeader := resp.Header.Get("WWW-Authenticate") + expected := `Bearer realm="Kubernetes MCP Server", error="missing_token"` + s.Equal(expected, authHeader, "Expected WWW-Authenticate header to match") + }) + s.Run("logs error", func() { + s.Contains(s.logBuffer.String(), "Authentication failed - missing or invalid bearer token", "Expected log entry for missing or invalid bearer token") + }) + }) +} + +func (s *AuthorizationSuite) TestAuthorizationUnauthorizedHeaderInvalid() { + // Invalid Authorization header + s.StartServer() + s.StartClient(transport.WithHTTPHeaders(map[string]string{ + "Authorization": "Bearer " + strings.ReplaceAll(tokenBasicNotExpired, ".", ".invalid"), + })) + + s.Run("Initialize returns error for INVALID Authorization header", func() { + _, err := s.mcpClient.Initialize(s.T().Context(), test.McpInitRequest()) + s.Require().Error(err, "Expected error creating initial request") + s.ErrorContains(err, "transport error: request failed with status 401: Unauthorized: Invalid token") + }) + + s.Run("Protected resource with INVALID Authorization header", func() { + resp := s.HttpGet("Bearer " + strings.ReplaceAll(tokenBasicNotExpired, ".", ".invalid")) + s.T().Cleanup(func() { _ = resp.Body.Close }) + + s.Run("returns 401 - Unauthorized status", func() { + s.Equal(401, resp.StatusCode, "Expected HTTP 401 for INVALID Authorization header") + }) + s.Run("returns WWW-Authenticate header", func() { + authHeader := resp.Header.Get("WWW-Authenticate") + expected := `Bearer realm="Kubernetes MCP Server", error="invalid_token"` + s.Equal(expected, authHeader, "Expected WWW-Authenticate header to match") + }) + s.Run("logs error", func() { + s.Contains(s.logBuffer.String(), "Authentication failed - JWT validation error", "Expected log entry for JWT validation error") + s.Contains(s.logBuffer.String(), "error: failed to parse JWT token: illegal base64 data", "Expected log entry for JWT validation error details") + }) + }) +} + +func (s *AuthorizationSuite) TestAuthorizationUnauthorizedHeaderExpired() { + // Expired Authorization Bearer token + s.StartServer() + s.StartClient(transport.WithHTTPHeaders(map[string]string{ + "Authorization": "Bearer " + tokenBasicExpired, + })) + + s.Run("Initialize returns error for EXPIRED Authorization header", func() { + _, err := s.mcpClient.Initialize(s.T().Context(), test.McpInitRequest()) + s.Require().Error(err, "Expected error creating initial request") + s.ErrorContains(err, "transport error: request failed with status 401: Unauthorized: Invalid token") + }) + + s.Run("Protected resource with EXPIRED Authorization header", func() { + resp := s.HttpGet("Bearer " + tokenBasicExpired) + s.T().Cleanup(func() { _ = resp.Body.Close }) + + s.Run("returns 401 - Unauthorized status", func() { + s.Equal(401, resp.StatusCode, "Expected HTTP 401 for EXPIRED Authorization header") + }) + s.Run("returns WWW-Authenticate header", func() { + authHeader := resp.Header.Get("WWW-Authenticate") + expected := `Bearer realm="Kubernetes MCP Server", error="invalid_token"` + s.Equal(expected, authHeader, "Expected WWW-Authenticate header to match") + }) + s.Run("logs error", func() { + s.Contains(s.logBuffer.String(), "Authentication failed - JWT validation error", "Expected log entry for JWT validation error") + s.Contains(s.logBuffer.String(), "validation failed, token is expired (exp)", "Expected log entry for JWT validation error details") + }) + }) +} + +func (s *AuthorizationSuite) TestAuthorizationUnauthorizedHeaderInvalidAudience() { + // Invalid audience claim Bearer token + s.StaticConfig.OAuthAudience = "expected-audience" + s.StartServer() + s.StartClient(transport.WithHTTPHeaders(map[string]string{ + "Authorization": "Bearer " + tokenBasicNotExpired, + })) + + s.Run("Initialize returns error for INVALID AUDIENCE Authorization header", func() { + _, err := s.mcpClient.Initialize(s.T().Context(), test.McpInitRequest()) + s.Require().Error(err, "Expected error creating initial request") + s.ErrorContains(err, "transport error: request failed with status 401: Unauthorized: Invalid token") + }) + + s.Run("Protected resource with INVALID AUDIENCE Authorization header", func() { + resp := s.HttpGet("Bearer " + tokenBasicNotExpired) + s.T().Cleanup(func() { _ = resp.Body.Close }) + + s.Run("returns 401 - Unauthorized status", func() { + s.Equal(401, resp.StatusCode, "Expected HTTP 401 for INVALID AUDIENCE Authorization header") + }) + s.Run("returns WWW-Authenticate header", func() { + authHeader := resp.Header.Get("WWW-Authenticate") + expected := `Bearer realm="Kubernetes MCP Server", audience="expected-audience", error="invalid_token"` + s.Equal(expected, authHeader, "Expected WWW-Authenticate header to match") + }) + s.Run("logs error", func() { + s.Contains(s.logBuffer.String(), "Authentication failed - JWT validation error", "Expected log entry for JWT validation error") + s.Contains(s.logBuffer.String(), "invalid audience claim (aud)", "Expected log entry for JWT validation error details") + }) + }) +} + +func (s *AuthorizationSuite) TestAuthorizationUnauthorizedOidcValidation() { + // Failed OIDC validation + s.StaticConfig.OAuthAudience = "mcp-server" + oidcTestServer := NewOidcTestServer(s.T()) + s.T().Cleanup(oidcTestServer.Close) + s.OidcProvider = oidcTestServer.Provider + s.StartServer() + s.StartClient(transport.WithHTTPHeaders(map[string]string{ + "Authorization": "Bearer " + tokenBasicNotExpired, + })) + + s.Run("Initialize returns error for INVALID OIDC Authorization header", func() { + _, err := s.mcpClient.Initialize(s.T().Context(), test.McpInitRequest()) + s.Require().Error(err, "Expected error creating initial request") + s.ErrorContains(err, "transport error: request failed with status 401: Unauthorized: Invalid token") + }) + + s.Run("Protected resource with INVALID OIDC Authorization header", func() { + resp := s.HttpGet("Bearer " + tokenBasicNotExpired) + s.T().Cleanup(func() { _ = resp.Body.Close }) + + s.Run("returns 401 - Unauthorized status", func() { + s.Equal(401, resp.StatusCode, "Expected HTTP 401 for INVALID OIDC Authorization header") + }) + s.Run("returns WWW-Authenticate header", func() { + authHeader := resp.Header.Get("WWW-Authenticate") + expected := `Bearer realm="Kubernetes MCP Server", audience="mcp-server", error="invalid_token"` + s.Equal(expected, authHeader, "Expected WWW-Authenticate header to match") + }) + s.Run("logs error", func() { + s.Contains(s.logBuffer.String(), "Authentication failed - JWT validation error", "Expected log entry for JWT validation error") + s.Contains(s.logBuffer.String(), "OIDC token validation error: failed to verify signature", "Expected log entry for OIDC validation error details") + }) + }) +} + +func (s *AuthorizationSuite) TestAuthorizationUnauthorizedKubernetesValidation() { + // Failed Kubernetes TokenReview + s.StaticConfig.OAuthAudience = "mcp-server" + oidcTestServer := NewOidcTestServer(s.T()) + s.T().Cleanup(oidcTestServer.Close) + rawClaims := `{ + "iss": "` + oidcTestServer.URL + `", + "exp": ` + strconv.FormatInt(time.Now().Add(time.Hour).Unix(), 10) + `, + "aud": "mcp-server" + }` + validOidcToken := oidctest.SignIDToken(oidcTestServer.PrivateKey, "test-oidc-key-id", oidc.RS256, rawClaims) + s.OidcProvider = oidcTestServer.Provider + s.StartServer() + s.StartClient(transport.WithHTTPHeaders(map[string]string{ + "Authorization": "Bearer " + validOidcToken, + })) + + s.Run("Initialize returns error for INVALID KUBERNETES Authorization header", func() { + _, err := s.mcpClient.Initialize(s.T().Context(), test.McpInitRequest()) + s.Require().Error(err, "Expected error creating initial request") + s.ErrorContains(err, "transport error: request failed with status 401: Unauthorized: Invalid token") + }) + + s.Run("Protected resource with INVALID KUBERNETES Authorization header", func() { + resp := s.HttpGet("Bearer " + validOidcToken) + s.T().Cleanup(func() { _ = resp.Body.Close }) + + s.Run("returns 401 - Unauthorized status", func() { + s.Equal(401, resp.StatusCode, "Expected HTTP 401 for INVALID KUBERNETES Authorization header") + }) + s.Run("returns WWW-Authenticate header", func() { + authHeader := resp.Header.Get("WWW-Authenticate") + expected := `Bearer realm="Kubernetes MCP Server", audience="mcp-server", error="invalid_token"` + s.Equal(expected, authHeader, "Expected WWW-Authenticate header to match") + }) + s.Run("logs error", func() { + s.Contains(s.logBuffer.String(), "Authentication failed - JWT validation error", "Expected log entry for JWT validation error") + s.Contains(s.logBuffer.String(), "kubernetes API token validation error: failed to create token review", "Expected log entry for Kubernetes TokenReview error details") + }) + }) +} + +func (s *AuthorizationSuite) TestAuthorizationRequireOAuthFalse() { + s.StaticConfig.RequireOAuth = false + s.StartServer() + s.StartClient() + + s.Run("Initialize returns OK for MISSING Authorization header", func() { + result, err := s.mcpClient.Initialize(s.T().Context(), test.McpInitRequest()) + s.Require().NoError(err, "Expected no error creating initial request") + s.Require().NotNil(result, "Expected initial request to not be nil") + }) +} + +func (s *AuthorizationSuite) TestAuthorizationRawToken() { + tokenReviewHandler := &test.TokenReviewHandler{} + s.MockServer.Handle(tokenReviewHandler) + + cases := []struct { + audience string + validateToken bool + }{ + {"", false}, // No audience, no validation + {"", true}, // No audience, validation enabled + {"mcp-server", false}, // Audience set, no validation + {"mcp-server", true}, // Audience set, validation enabled + } + for _, c := range cases { + s.StaticConfig.OAuthAudience = c.audience + s.StaticConfig.ValidateToken = c.validateToken + s.StartServer() + s.StartClient(transport.WithHTTPHeaders(map[string]string{ + "Authorization": "Bearer " + tokenBasicNotExpired, + })) + tokenReviewHandler.TokenReviewed = false + + s.Run(fmt.Sprintf("Protected resource with audience = '%s' and validate-token = '%t'", c.audience, c.validateToken), func() { + s.Run("Initialize returns OK for VALID Authorization header", func() { + result, err := s.mcpClient.Initialize(s.T().Context(), test.McpInitRequest()) + s.Require().NoError(err, "Expected no error creating initial request") + s.Require().NotNil(result, "Expected initial request to not be nil") + }) + + s.Run("Performs token validation accordingly", func() { + if tokenReviewHandler.TokenReviewed == true && !c.validateToken { + s.Fail("Expected token review to be skipped when validate-token is false, but it was performed") + } + if tokenReviewHandler.TokenReviewed == false && c.validateToken { + s.Fail("Expected token review to be performed when validate-token is true, but it was skipped") + } + }) + }) + _ = s.mcpClient.Close() + s.StopServer() + } +} + +func (s *AuthorizationSuite) TestAuthorizationOidcToken() { + tokenReviewHandler := &test.TokenReviewHandler{} + s.MockServer.Handle(tokenReviewHandler) + + oidcTestServer := NewOidcTestServer(s.T()) + s.T().Cleanup(oidcTestServer.Close) + rawClaims := `{ + "iss": "` + oidcTestServer.URL + `", + "exp": ` + strconv.FormatInt(time.Now().Add(time.Hour).Unix(), 10) + `, + "aud": "mcp-server" + }` + validOidcToken := oidctest.SignIDToken(oidcTestServer.PrivateKey, "test-oidc-key-id", oidc.RS256, rawClaims) + + cases := []bool{false, true} + for _, validateToken := range cases { + s.OidcProvider = oidcTestServer.Provider + s.StaticConfig.OAuthAudience = "mcp-server" + s.StaticConfig.ValidateToken = validateToken + s.StartServer() + s.StartClient(transport.WithHTTPHeaders(map[string]string{ + "Authorization": "Bearer " + validOidcToken, + })) + tokenReviewHandler.TokenReviewed = false + + s.Run(fmt.Sprintf("Protected resource with validate-token = '%t'", validateToken), func() { + s.Run("Initialize returns OK for VALID OIDC Authorization header", func() { + result, err := s.mcpClient.Initialize(s.T().Context(), test.McpInitRequest()) + s.Require().NoError(err, "Expected no error creating initial request") + s.Require().NotNil(result, "Expected initial request to not be nil") + }) + + s.Run("Performs token validation accordingly for VALID OIDC Authorization header", func() { + if tokenReviewHandler.TokenReviewed == true && !validateToken { + s.Fail("Expected token review to be skipped when validate-token is false, but it was performed") + } + if tokenReviewHandler.TokenReviewed == false && validateToken { + s.Fail("Expected token review to be performed when validate-token is true, but it was skipped") + } + }) + }) + _ = s.mcpClient.Close() + s.StopServer() + } +} + +func (s *AuthorizationSuite) TestAuthorizationOidcTokenExchange() { + tokenReviewHandler := &test.TokenReviewHandler{} + s.MockServer.Handle(tokenReviewHandler) + + oidcTestServer := NewOidcTestServer(s.T()) + s.T().Cleanup(oidcTestServer.Close) + rawClaims := `{ + "iss": "` + oidcTestServer.URL + `", + "exp": ` + strconv.FormatInt(time.Now().Add(time.Hour).Unix(), 10) + `, + "aud": "%s" + }` + validOidcClientToken := oidctest.SignIDToken(oidcTestServer.PrivateKey, "test-oidc-key-id", oidc.RS256, + fmt.Sprintf(rawClaims, "mcp-server")) + validOidcBackendToken := oidctest.SignIDToken(oidcTestServer.PrivateKey, "test-oidc-key-id", oidc.RS256, + fmt.Sprintf(rawClaims, "backend-audience")) + oidcTestServer.TokenEndpointHandler = func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = fmt.Fprintf(w, `{"access_token":"%s","token_type":"Bearer","expires_in":253402297199}`, validOidcBackendToken) + } + + cases := []bool{false, true} + for _, validateToken := range cases { + s.OidcProvider = oidcTestServer.Provider + s.StaticConfig.OAuthAudience = "mcp-server" + s.StaticConfig.ValidateToken = validateToken + s.StaticConfig.StsClientId = "test-sts-client-id" + s.StaticConfig.StsClientSecret = "test-sts-client-secret" + s.StaticConfig.StsAudience = "backend-audience" + s.StaticConfig.StsScopes = []string{"backend-scope"} + s.StartServer() + s.StartClient(transport.WithHTTPHeaders(map[string]string{ + "Authorization": "Bearer " + validOidcClientToken, + })) + tokenReviewHandler.TokenReviewed = false + + s.Run(fmt.Sprintf("Protected resource with validate-token='%t'", validateToken), func() { + s.Run("Initialize returns OK for VALID OIDC EXCHANGE Authorization header", func() { + result, err := s.mcpClient.Initialize(s.T().Context(), test.McpInitRequest()) + s.Require().NoError(err, "Expected no error creating initial request") + s.Require().NotNil(result, "Expected initial request to not be nil") + }) + + s.Run("Performs token validation accordingly for VALID OIDC EXCHANGE Authorization header", func() { + if tokenReviewHandler.TokenReviewed == true && !validateToken { + s.Fail("Expected token review to be skipped when validate-token is false, but it was performed") + } + if tokenReviewHandler.TokenReviewed == false && validateToken { + s.Fail("Expected token review to be performed when validate-token is true, but it was skipped") + } + }) + }) + _ = s.mcpClient.Close() + s.StopServer() + } +} + +func TestAuthorization(t *testing.T) { + suite.Run(t, new(AuthorizationSuite)) +} diff --git a/pkg/http/http_mcp_test.go b/pkg/http/http_mcp_test.go new file mode 100644 index 00000000..2a79b4be --- /dev/null +++ b/pkg/http/http_mcp_test.go @@ -0,0 +1,67 @@ +package http + +import ( + "fmt" + "testing" + + "github.com/containers/kubernetes-mcp-server/internal/test" + "github.com/mark3labs/mcp-go/client" + "github.com/mark3labs/mcp-go/client/transport" + "github.com/mark3labs/mcp-go/mcp" + "github.com/stretchr/testify/suite" +) + +type McpTransportSuite struct { + BaseHttpSuite +} + +func (s *McpTransportSuite) SetupTest() { + s.BaseHttpSuite.SetupTest() + s.StartServer() +} + +func (s *McpTransportSuite) TearDownTest() { + s.BaseHttpSuite.TearDownTest() +} + +func (s *McpTransportSuite) TestSseTransport() { + sseClient, sseClientErr := client.NewSSEMCPClient(fmt.Sprintf("http://127.0.0.1:%d/sse", s.TcpAddr.Port)) + s.Require().NoError(sseClientErr, "Expected no error creating SSE MCP client") + startErr := sseClient.Start(s.T().Context()) + s.Require().NoError(startErr, "Expected no error starting SSE MCP client") + s.Run("Can Initialize Session", func() { + _, err := sseClient.Initialize(s.T().Context(), test.McpInitRequest()) + s.Require().NoError(err, "Expected no error initializing SSE MCP client") + }) + s.Run("Can List Tools", func() { + tools, err := sseClient.ListTools(s.T().Context(), mcp.ListToolsRequest{}) + s.Require().NoError(err, "Expected no error listing tools from SSE MCP client") + s.Greater(len(tools.Tools), 0, "Expected at least one tool from SSE MCP client") + }) + s.Run("Can close SSE client", func() { + s.Require().NoError(sseClient.Close(), "Expected no error closing SSE MCP client") + }) +} + +func (s *McpTransportSuite) TestStreamableHttpTransport() { + httpClient, httpClientErr := client.NewStreamableHttpClient(fmt.Sprintf("http://127.0.0.1:%d/mcp", s.TcpAddr.Port), transport.WithContinuousListening()) + s.Require().NoError(httpClientErr, "Expected no error creating Streamable HTTP MCP client") + startErr := httpClient.Start(s.T().Context()) + s.Require().NoError(startErr, "Expected no error starting Streamable HTTP MCP client") + s.Run("Can Initialize Session", func() { + _, err := httpClient.Initialize(s.T().Context(), test.McpInitRequest()) + s.Require().NoError(err, "Expected no error initializing Streamable HTTP MCP client") + }) + s.Run("Can List Tools", func() { + tools, err := httpClient.ListTools(s.T().Context(), mcp.ListToolsRequest{}) + s.Require().NoError(err, "Expected no error listing tools from Streamable HTTP MCP client") + s.Greater(len(tools.Tools), 0, "Expected at least one tool from Streamable HTTP MCP client") + }) + s.Run("Can close Streamable HTTP client", func() { + s.Require().NoError(httpClient.Close(), "Expected no error closing Streamable HTTP MCP client") + }) +} + +func TestMcpTransport(t *testing.T) { + suite.Run(t, new(McpTransportSuite)) +} diff --git a/pkg/http/http_test.go b/pkg/http/http_test.go index ab531813..64c3355e 100644 --- a/pkg/http/http_test.go +++ b/pkg/http/http_test.go @@ -1,7 +1,6 @@ package http import ( - "bufio" "bytes" "context" "crypto/rand" @@ -22,6 +21,7 @@ import ( "github.com/containers/kubernetes-mcp-server/internal/test" "github.com/coreos/go-oidc/v3/oidc" "github.com/coreos/go-oidc/v3/oidc/oidctest" + "github.com/stretchr/testify/suite" "golang.org/x/sync/errgroup" "k8s.io/klog/v2" "k8s.io/klog/v2/textlogger" @@ -30,6 +30,53 @@ import ( "github.com/containers/kubernetes-mcp-server/pkg/mcp" ) +type BaseHttpSuite struct { + suite.Suite + MockServer *test.MockServer + TcpAddr *net.TCPAddr + StaticConfig *config.StaticConfig + mcpServer *mcp.Server + OidcProvider *oidc.Provider + timeoutCancel context.CancelFunc + StopServer context.CancelFunc + WaitForShutdown func() error +} + +func (s *BaseHttpSuite) SetupTest() { + var err error + http.DefaultClient.Timeout = 10 * time.Second + s.MockServer = test.NewMockServer() + s.TcpAddr, err = test.RandomPortAddress() + s.Require().NoError(err, "Expected no error getting random port address") + s.StaticConfig = config.Default() + s.StaticConfig.KubeConfig = s.MockServer.KubeconfigFile(s.T()) + s.StaticConfig.Port = strconv.Itoa(s.TcpAddr.Port) +} + +func (s *BaseHttpSuite) StartServer() { + var err error + s.mcpServer, err = mcp.NewServer(mcp.Configuration{StaticConfig: s.StaticConfig}) + s.Require().NoError(err, "Expected no error creating MCP server") + s.Require().NotNil(s.mcpServer, "MCP server should not be nil") + var timeoutCtx, cancelCtx context.Context + timeoutCtx, s.timeoutCancel = context.WithTimeout(s.T().Context(), 10*time.Second) + group, gc := errgroup.WithContext(timeoutCtx) + cancelCtx, s.StopServer = context.WithCancel(gc) + group.Go(func() error { return Serve(cancelCtx, s.mcpServer, s.StaticConfig, s.OidcProvider, nil) }) + s.WaitForShutdown = group.Wait + s.Require().NoError(test.WaitForServer(s.TcpAddr), "HTTP server did not start in time") +} + +func (s *BaseHttpSuite) TearDownTest() { + s.MockServer.Close() + if s.mcpServer != nil { + s.mcpServer.Close() + } + s.StopServer() + s.Require().NoError(s.WaitForShutdown(), "HTTP server did not shut down gracefully") + s.timeoutCancel() +} + type httpContext struct { klogState klog.State mockServer *test.MockServer @@ -42,20 +89,6 @@ type httpContext struct { OidcProvider *oidc.Provider } -const tokenReviewSuccessful = ` - { - "kind": "TokenReview", - "apiVersion": "authentication.k8s.io/v1", - "spec": {"token": "valid-token"}, - "status": { - "authenticated": true, - "user": { - "username": "test-user", - "groups": ["system:authenticated"] - } - } - }` - func (c *httpContext) beforeEach(t *testing.T) { t.Helper() http.DefaultClient.Timeout = 10 * time.Second @@ -192,92 +225,6 @@ func TestGracefulShutdown(t *testing.T) { }) } -func TestSseTransport(t *testing.T) { - testCase(t, func(ctx *httpContext) { - sseResp, sseErr := http.Get(fmt.Sprintf("http://%s/sse", ctx.HttpAddress)) - t.Cleanup(func() { _ = sseResp.Body.Close() }) - t.Run("Exposes SSE endpoint at /sse", func(t *testing.T) { - if sseErr != nil { - t.Fatalf("Failed to get SSE endpoint: %v", sseErr) - } - if sseResp.StatusCode != http.StatusOK { - t.Errorf("Expected HTTP 200 OK, got %d", sseResp.StatusCode) - } - }) - t.Run("SSE endpoint returns text/event-stream content type", func(t *testing.T) { - if sseResp.Header.Get("Content-Type") != "text/event-stream" { - t.Errorf("Expected Content-Type text/event-stream, got %s", sseResp.Header.Get("Content-Type")) - } - }) - responseReader := bufio.NewReader(sseResp.Body) - event, eventErr := responseReader.ReadString('\n') - endpoint, endpointErr := responseReader.ReadString('\n') - t.Run("SSE endpoint returns stream with messages endpoint", func(t *testing.T) { - if eventErr != nil { - t.Fatalf("Failed to read SSE response body (event): %v", eventErr) - } - if event != "event: endpoint\n" { - t.Errorf("Expected SSE event 'endpoint', got %s", event) - } - if endpointErr != nil { - t.Fatalf("Failed to read SSE response body (endpoint): %v", endpointErr) - } - if !strings.HasPrefix(endpoint, "data: /message?sessionId=") { - t.Errorf("Expected SSE data: '/message', got %s", endpoint) - } - }) - messageResp, messageErr := http.Post( - fmt.Sprintf("http://%s/message?sessionId=%s", ctx.HttpAddress, strings.TrimSpace(endpoint[25:])), - "application/json", - bytes.NewBufferString("{}"), - ) - t.Cleanup(func() { _ = messageResp.Body.Close() }) - t.Run("Exposes message endpoint at /message", func(t *testing.T) { - if messageErr != nil { - t.Fatalf("Failed to get message endpoint: %v", messageErr) - } - if messageResp.StatusCode != http.StatusAccepted { - t.Errorf("Expected HTTP 202 OK, got %d", messageResp.StatusCode) - } - }) - }) -} - -func TestStreamableHttpTransport(t *testing.T) { - testCase(t, func(ctx *httpContext) { - mcpGetResp, mcpGetErr := http.Get(fmt.Sprintf("http://%s/mcp", ctx.HttpAddress)) - t.Cleanup(func() { _ = mcpGetResp.Body.Close() }) - t.Run("Exposes MCP GET endpoint at /mcp", func(t *testing.T) { - if mcpGetErr != nil { - t.Fatalf("Failed to get MCP endpoint: %v", mcpGetErr) - } - if mcpGetResp.StatusCode != http.StatusOK { - t.Errorf("Expected HTTP 200 OK, got %d", mcpGetResp.StatusCode) - } - }) - t.Run("MCP GET endpoint returns text/event-stream content type", func(t *testing.T) { - if mcpGetResp.Header.Get("Content-Type") != "text/event-stream" { - t.Errorf("Expected Content-Type text/event-stream (GET), got %s", mcpGetResp.Header.Get("Content-Type")) - } - }) - mcpPostResp, mcpPostErr := http.Post(fmt.Sprintf("http://%s/mcp", ctx.HttpAddress), "application/json", bytes.NewBufferString("{}")) - t.Cleanup(func() { _ = mcpPostResp.Body.Close() }) - t.Run("Exposes MCP POST endpoint at /mcp", func(t *testing.T) { - if mcpPostErr != nil { - t.Fatalf("Failed to post to MCP endpoint: %v", mcpPostErr) - } - if mcpPostResp.StatusCode != http.StatusOK { - t.Errorf("Expected HTTP 200 OK, got %d", mcpPostResp.StatusCode) - } - }) - t.Run("MCP POST endpoint returns application/json content type", func(t *testing.T) { - if mcpPostResp.Header.Get("Content-Type") != "application/json" { - t.Errorf("Expected Content-Type application/json (POST), got %s", mcpPostResp.Header.Get("Content-Type")) - } - }) - }) -} - func TestHealthCheck(t *testing.T) { testCase(t, func(ctx *httpContext) { t.Run("Exposes health check endpoint at /healthz", func(t *testing.T) { @@ -616,396 +563,3 @@ func TestMiddlewareLogging(t *testing.T) { }) }) } - -func TestAuthorizationUnauthorized(t *testing.T) { - // Missing Authorization header - testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, ValidateToken: true, ClusterProviderStrategy: config.ClusterProviderKubeConfig}}, func(ctx *httpContext) { - resp, err := http.Get(fmt.Sprintf("http://%s/mcp", ctx.HttpAddress)) - if err != nil { - t.Fatalf("Failed to get protected endpoint: %v", err) - } - t.Cleanup(func() { _ = resp.Body.Close }) - t.Run("Protected resource with MISSING Authorization header returns 401 - Unauthorized", func(t *testing.T) { - if resp.StatusCode != 401 { - t.Errorf("Expected HTTP 401, got %d", resp.StatusCode) - } - }) - t.Run("Protected resource with MISSING Authorization header returns WWW-Authenticate header", func(t *testing.T) { - authHeader := resp.Header.Get("WWW-Authenticate") - expected := `Bearer realm="Kubernetes MCP Server", error="missing_token"` - if authHeader != expected { - t.Errorf("Expected WWW-Authenticate header to be %q, got %q", expected, authHeader) - } - }) - t.Run("Protected resource with MISSING Authorization header logs error", func(t *testing.T) { - if !strings.Contains(ctx.LogBuffer.String(), "Authentication failed - missing or invalid bearer token") { - t.Errorf("Expected log entry for missing or invalid bearer token, got: %s", ctx.LogBuffer.String()) - } - }) - }) - // Authorization header without Bearer prefix - testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, ValidateToken: true, ClusterProviderStrategy: config.ClusterProviderKubeConfig}}, func(ctx *httpContext) { - req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/mcp", ctx.HttpAddress), nil) - if err != nil { - t.Fatalf("Failed to create request: %v", err) - } - req.Header.Set("Authorization", "Basic YWxhZGRpbjpvcGVuc2VzYW1l") - resp, err := http.DefaultClient.Do(req) - if err != nil { - t.Fatalf("Failed to get protected endpoint: %v", err) - } - t.Cleanup(func() { _ = resp.Body.Close }) - t.Run("Protected resource with INCOMPATIBLE Authorization header returns WWW-Authenticate header", func(t *testing.T) { - authHeader := resp.Header.Get("WWW-Authenticate") - expected := `Bearer realm="Kubernetes MCP Server", error="missing_token"` - if authHeader != expected { - t.Errorf("Expected WWW-Authenticate header to be %q, got %q", expected, authHeader) - } - }) - t.Run("Protected resource with INCOMPATIBLE Authorization header logs error", func(t *testing.T) { - if !strings.Contains(ctx.LogBuffer.String(), "Authentication failed - missing or invalid bearer token") { - t.Errorf("Expected log entry for missing or invalid bearer token, got: %s", ctx.LogBuffer.String()) - } - }) - }) - // Invalid Authorization header - testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, ValidateToken: true, ClusterProviderStrategy: config.ClusterProviderKubeConfig}}, func(ctx *httpContext) { - req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/mcp", ctx.HttpAddress), nil) - if err != nil { - t.Fatalf("Failed to create request: %v", err) - } - req.Header.Set("Authorization", "Bearer "+strings.ReplaceAll(tokenBasicNotExpired, ".", ".invalid")) - resp, err := http.DefaultClient.Do(req) - if err != nil { - t.Fatalf("Failed to get protected endpoint: %v", err) - } - t.Cleanup(func() { _ = resp.Body.Close }) - t.Run("Protected resource with INVALID Authorization header returns 401 - Unauthorized", func(t *testing.T) { - if resp.StatusCode != 401 { - t.Errorf("Expected HTTP 401, got %d", resp.StatusCode) - } - }) - t.Run("Protected resource with INVALID Authorization header returns WWW-Authenticate header", func(t *testing.T) { - authHeader := resp.Header.Get("WWW-Authenticate") - expected := `Bearer realm="Kubernetes MCP Server", error="invalid_token"` - if authHeader != expected { - t.Errorf("Expected WWW-Authenticate header to be %q, got %q", expected, authHeader) - } - }) - t.Run("Protected resource with INVALID Authorization header logs error", func(t *testing.T) { - if !strings.Contains(ctx.LogBuffer.String(), "Authentication failed - JWT validation error") || - !strings.Contains(ctx.LogBuffer.String(), "error: failed to parse JWT token: illegal base64 data") { - t.Errorf("Expected log entry for JWT validation error, got: %s", ctx.LogBuffer.String()) - } - }) - }) - // Expired Authorization Bearer token - testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, ValidateToken: true, ClusterProviderStrategy: config.ClusterProviderKubeConfig}}, func(ctx *httpContext) { - req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/mcp", ctx.HttpAddress), nil) - if err != nil { - t.Fatalf("Failed to create request: %v", err) - } - req.Header.Set("Authorization", "Bearer "+tokenBasicExpired) - resp, err := http.DefaultClient.Do(req) - if err != nil { - t.Fatalf("Failed to get protected endpoint: %v", err) - } - t.Cleanup(func() { _ = resp.Body.Close }) - t.Run("Protected resource with EXPIRED Authorization header returns 401 - Unauthorized", func(t *testing.T) { - if resp.StatusCode != 401 { - t.Errorf("Expected HTTP 401, got %d", resp.StatusCode) - } - }) - t.Run("Protected resource with EXPIRED Authorization header returns WWW-Authenticate header", func(t *testing.T) { - authHeader := resp.Header.Get("WWW-Authenticate") - expected := `Bearer realm="Kubernetes MCP Server", error="invalid_token"` - if authHeader != expected { - t.Errorf("Expected WWW-Authenticate header to be %q, got %q", expected, authHeader) - } - }) - t.Run("Protected resource with EXPIRED Authorization header logs error", func(t *testing.T) { - if !strings.Contains(ctx.LogBuffer.String(), "Authentication failed - JWT validation error") || - !strings.Contains(ctx.LogBuffer.String(), "validation failed, token is expired (exp)") { - t.Errorf("Expected log entry for JWT validation error, got: %s", ctx.LogBuffer.String()) - } - }) - }) - // Invalid audience claim Bearer token - testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, OAuthAudience: "expected-audience", ValidateToken: true, ClusterProviderStrategy: config.ClusterProviderKubeConfig}}, func(ctx *httpContext) { - req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/mcp", ctx.HttpAddress), nil) - if err != nil { - t.Fatalf("Failed to create request: %v", err) - } - req.Header.Set("Authorization", "Bearer "+tokenBasicExpired) - resp, err := http.DefaultClient.Do(req) - if err != nil { - t.Fatalf("Failed to get protected endpoint: %v", err) - } - t.Cleanup(func() { _ = resp.Body.Close }) - t.Run("Protected resource with INVALID AUDIENCE Authorization header returns 401 - Unauthorized", func(t *testing.T) { - if resp.StatusCode != 401 { - t.Errorf("Expected HTTP 401, got %d", resp.StatusCode) - } - }) - t.Run("Protected resource with INVALID AUDIENCE Authorization header returns WWW-Authenticate header", func(t *testing.T) { - authHeader := resp.Header.Get("WWW-Authenticate") - expected := `Bearer realm="Kubernetes MCP Server", audience="expected-audience", error="invalid_token"` - if authHeader != expected { - t.Errorf("Expected WWW-Authenticate header to be %q, got %q", expected, authHeader) - } - }) - t.Run("Protected resource with INVALID AUDIENCE Authorization header logs error", func(t *testing.T) { - if !strings.Contains(ctx.LogBuffer.String(), "Authentication failed - JWT validation error") || - !strings.Contains(ctx.LogBuffer.String(), "invalid audience claim (aud)") { - t.Errorf("Expected log entry for JWT validation error, got: %s", ctx.LogBuffer.String()) - } - }) - }) - // Failed OIDC validation - oidcTestServer := NewOidcTestServer(t) - t.Cleanup(oidcTestServer.Close) - testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, OAuthAudience: "mcp-server", ValidateToken: true, ClusterProviderStrategy: config.ClusterProviderKubeConfig}, OidcProvider: oidcTestServer.Provider}, func(ctx *httpContext) { - req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/mcp", ctx.HttpAddress), nil) - if err != nil { - t.Fatalf("Failed to create request: %v", err) - } - req.Header.Set("Authorization", "Bearer "+tokenBasicNotExpired) - resp, err := http.DefaultClient.Do(req) - if err != nil { - t.Fatalf("Failed to get protected endpoint: %v", err) - } - t.Cleanup(func() { _ = resp.Body.Close }) - t.Run("Protected resource with INVALID OIDC Authorization header returns 401 - Unauthorized", func(t *testing.T) { - if resp.StatusCode != 401 { - t.Errorf("Expected HTTP 401, got %d", resp.StatusCode) - } - }) - t.Run("Protected resource with INVALID OIDC Authorization header returns WWW-Authenticate header", func(t *testing.T) { - authHeader := resp.Header.Get("WWW-Authenticate") - expected := `Bearer realm="Kubernetes MCP Server", audience="mcp-server", error="invalid_token"` - if authHeader != expected { - t.Errorf("Expected WWW-Authenticate header to be %q, got %q", expected, authHeader) - } - }) - t.Run("Protected resource with INVALID OIDC Authorization header logs error", func(t *testing.T) { - if !strings.Contains(ctx.LogBuffer.String(), "Authentication failed - JWT validation error") || - !strings.Contains(ctx.LogBuffer.String(), "OIDC token validation error: failed to verify signature") { - t.Errorf("Expected log entry for OIDC validation error, got: %s", ctx.LogBuffer.String()) - } - }) - }) - // Failed Kubernetes TokenReview - rawClaims := `{ - "iss": "` + oidcTestServer.URL + `", - "exp": ` + strconv.FormatInt(time.Now().Add(time.Hour).Unix(), 10) + `, - "aud": "mcp-server" - }` - validOidcToken := oidctest.SignIDToken(oidcTestServer.PrivateKey, "test-oidc-key-id", oidc.RS256, rawClaims) - testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, OAuthAudience: "mcp-server", ValidateToken: true, ClusterProviderStrategy: config.ClusterProviderKubeConfig}, OidcProvider: oidcTestServer.Provider}, func(ctx *httpContext) { - req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/mcp", ctx.HttpAddress), nil) - if err != nil { - t.Fatalf("Failed to create request: %v", err) - } - req.Header.Set("Authorization", "Bearer "+validOidcToken) - resp, err := http.DefaultClient.Do(req) - if err != nil { - t.Fatalf("Failed to get protected endpoint: %v", err) - } - t.Cleanup(func() { _ = resp.Body.Close }) - t.Run("Protected resource with INVALID KUBERNETES Authorization header returns 401 - Unauthorized", func(t *testing.T) { - if resp.StatusCode != 401 { - t.Errorf("Expected HTTP 401, got %d", resp.StatusCode) - } - }) - t.Run("Protected resource with INVALID KUBERNETES Authorization header returns WWW-Authenticate header", func(t *testing.T) { - authHeader := resp.Header.Get("WWW-Authenticate") - expected := `Bearer realm="Kubernetes MCP Server", audience="mcp-server", error="invalid_token"` - if authHeader != expected { - t.Errorf("Expected WWW-Authenticate header to be %q, got %q", expected, authHeader) - } - }) - t.Run("Protected resource with INVALID KUBERNETES Authorization header logs error", func(t *testing.T) { - if !strings.Contains(ctx.LogBuffer.String(), "Authentication failed - JWT validation error") || - !strings.Contains(ctx.LogBuffer.String(), "kubernetes API token validation error: failed to create token review") { - t.Errorf("Expected log entry for Kubernetes TokenReview error, got: %s", ctx.LogBuffer.String()) - } - }) - }) -} - -func TestAuthorizationRequireOAuthFalse(t *testing.T) { - testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: false, ClusterProviderStrategy: config.ClusterProviderKubeConfig}}, func(ctx *httpContext) { - resp, err := http.Get(fmt.Sprintf("http://%s/mcp", ctx.HttpAddress)) - if err != nil { - t.Fatalf("Failed to get protected endpoint: %v", err) - } - t.Cleanup(func() { _ = resp.Body.Close() }) - t.Run("Protected resource with MISSING Authorization header returns 200 - OK)", func(t *testing.T) { - if resp.StatusCode != http.StatusOK { - t.Errorf("Expected HTTP 200 OK, got %d", resp.StatusCode) - } - }) - }) -} - -func TestAuthorizationRawToken(t *testing.T) { - cases := []struct { - audience string - validateToken bool - }{ - {"", false}, // No audience, no validation - {"", true}, // No audience, validation enabled - {"mcp-server", false}, // Audience set, no validation - {"mcp-server", true}, // Audience set, validation enabled - } - for _, c := range cases { - testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, OAuthAudience: c.audience, ValidateToken: c.validateToken, ClusterProviderStrategy: config.ClusterProviderKubeConfig}}, func(ctx *httpContext) { - tokenReviewed := false - ctx.mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - if req.URL.EscapedPath() == "/apis/authentication.k8s.io/v1/tokenreviews" { - w.Header().Set("Content-Type", "application/json") - _, _ = w.Write([]byte(tokenReviewSuccessful)) - tokenReviewed = true - return - } - })) - req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/mcp", ctx.HttpAddress), nil) - if err != nil { - t.Fatalf("Failed to create request: %v", err) - } - req.Header.Set("Authorization", "Bearer "+tokenBasicNotExpired) - resp, err := http.DefaultClient.Do(req) - if err != nil { - t.Fatalf("Failed to get protected endpoint: %v", err) - } - t.Cleanup(func() { _ = resp.Body.Close() }) - t.Run(fmt.Sprintf("Protected resource with audience = '%s' and validate-token = '%t', with VALID Authorization header returns 200 - OK", c.audience, c.validateToken), func(t *testing.T) { - if resp.StatusCode != http.StatusOK { - t.Errorf("Expected HTTP 200 OK, got %d", resp.StatusCode) - } - }) - t.Run(fmt.Sprintf("Protected resource with audience = '%s' and validate-token = '%t', with VALID Authorization header performs token validation accordingly", c.audience, c.validateToken), func(t *testing.T) { - if tokenReviewed == true && !c.validateToken { - t.Errorf("Expected token review to be skipped when validate-token is false, but it was performed") - } - if tokenReviewed == false && c.validateToken { - t.Errorf("Expected token review to be performed when validate-token is true, but it was skipped") - } - }) - }) - } - -} - -func TestAuthorizationOidcToken(t *testing.T) { - oidcTestServer := NewOidcTestServer(t) - t.Cleanup(oidcTestServer.Close) - rawClaims := `{ - "iss": "` + oidcTestServer.URL + `", - "exp": ` + strconv.FormatInt(time.Now().Add(time.Hour).Unix(), 10) + `, - "aud": "mcp-server" - }` - validOidcToken := oidctest.SignIDToken(oidcTestServer.PrivateKey, "test-oidc-key-id", oidc.RS256, rawClaims) - cases := []bool{false, true} - for _, validateToken := range cases { - testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, OAuthAudience: "mcp-server", ValidateToken: validateToken, ClusterProviderStrategy: config.ClusterProviderKubeConfig}, OidcProvider: oidcTestServer.Provider}, func(ctx *httpContext) { - tokenReviewed := false - ctx.mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - if req.URL.EscapedPath() == "/apis/authentication.k8s.io/v1/tokenreviews" { - w.Header().Set("Content-Type", "application/json") - _, _ = w.Write([]byte(tokenReviewSuccessful)) - tokenReviewed = true - return - } - })) - req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/mcp", ctx.HttpAddress), nil) - if err != nil { - t.Fatalf("Failed to create request: %v", err) - } - req.Header.Set("Authorization", "Bearer "+validOidcToken) - resp, err := http.DefaultClient.Do(req) - if err != nil { - t.Fatalf("Failed to get protected endpoint: %v", err) - } - t.Cleanup(func() { _ = resp.Body.Close() }) - t.Run(fmt.Sprintf("Protected resource with validate-token='%t' with VALID OIDC Authorization header returns 200 - OK", validateToken), func(t *testing.T) { - if resp.StatusCode != http.StatusOK { - t.Errorf("Expected HTTP 200 OK, got %d", resp.StatusCode) - } - }) - t.Run(fmt.Sprintf("Protected resource with validate-token='%t' with VALID OIDC Authorization header performs token validation accordingly", validateToken), func(t *testing.T) { - if tokenReviewed == true && !validateToken { - t.Errorf("Expected token review to be skipped when validate-token is false, but it was performed") - } - if tokenReviewed == false && validateToken { - t.Errorf("Expected token review to be performed when validate-token is true, but it was skipped") - } - }) - }) - } -} - -func TestAuthorizationOidcTokenExchange(t *testing.T) { - oidcTestServer := NewOidcTestServer(t) - t.Cleanup(oidcTestServer.Close) - rawClaims := `{ - "iss": "` + oidcTestServer.URL + `", - "exp": ` + strconv.FormatInt(time.Now().Add(time.Hour).Unix(), 10) + `, - "aud": "%s" - }` - validOidcClientToken := oidctest.SignIDToken(oidcTestServer.PrivateKey, "test-oidc-key-id", oidc.RS256, - fmt.Sprintf(rawClaims, "mcp-server")) - validOidcBackendToken := oidctest.SignIDToken(oidcTestServer.PrivateKey, "test-oidc-key-id", oidc.RS256, - fmt.Sprintf(rawClaims, "backend-audience")) - oidcTestServer.TokenEndpointHandler = func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - _, _ = fmt.Fprintf(w, `{"access_token":"%s","token_type":"Bearer","expires_in":253402297199}`, validOidcBackendToken) - } - cases := []bool{false, true} - for _, validateToken := range cases { - staticConfig := &config.StaticConfig{ - RequireOAuth: true, - OAuthAudience: "mcp-server", - ValidateToken: validateToken, - StsClientId: "test-sts-client-id", - StsClientSecret: "test-sts-client-secret", - StsAudience: "backend-audience", - StsScopes: []string{"backend-scope"}, - ClusterProviderStrategy: config.ClusterProviderKubeConfig, - } - testCaseWithContext(t, &httpContext{StaticConfig: staticConfig, OidcProvider: oidcTestServer.Provider}, func(ctx *httpContext) { - tokenReviewed := false - ctx.mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - if req.URL.EscapedPath() == "/apis/authentication.k8s.io/v1/tokenreviews" { - w.Header().Set("Content-Type", "application/json") - _, _ = w.Write([]byte(tokenReviewSuccessful)) - tokenReviewed = true - return - } - })) - req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/mcp", ctx.HttpAddress), nil) - if err != nil { - t.Fatalf("Failed to create request: %v", err) - } - req.Header.Set("Authorization", "Bearer "+validOidcClientToken) - resp, err := http.DefaultClient.Do(req) - if err != nil { - t.Fatalf("Failed to get protected endpoint: %v", err) - } - t.Cleanup(func() { _ = resp.Body.Close() }) - t.Run(fmt.Sprintf("Protected resource with validate-token='%t' with VALID OIDC EXCHANGE Authorization header returns 200 - OK", validateToken), func(t *testing.T) { - if resp.StatusCode != http.StatusOK { - t.Errorf("Expected HTTP 200 OK, got %d", resp.StatusCode) - } - }) - t.Run(fmt.Sprintf("Protected resource with validate-token='%t' with VALID OIDC EXCHANGE Authorization header performs token validation accordingly", validateToken), func(t *testing.T) { - if tokenReviewed == true && !validateToken { - t.Errorf("Expected token review to be skipped when validate-token is false, but it was performed") - } - if tokenReviewed == false && validateToken { - t.Errorf("Expected token review to be performed when validate-token is true, but it was skipped") - } - }) - }) - } -} diff --git a/pkg/kubernetes-mcp-server/cmd/root.go b/pkg/kubernetes-mcp-server/cmd/root.go index db1782ab..1c5b39e4 100644 --- a/pkg/kubernetes-mcp-server/cmd/root.go +++ b/pkg/kubernetes-mcp-server/cmd/root.go @@ -57,8 +57,6 @@ const ( flagVersion = "version" flagLogLevel = "log-level" flagConfig = "config" - flagSSEPort = "sse-port" - flagHttpPort = "http-port" flagPort = "port" flagSSEBaseUrl = "sse-base-url" flagKubeconfig = "kubeconfig" @@ -79,8 +77,6 @@ type MCPServerOptions struct { Version bool LogLevel int Port string - SSEPort int - HttpPort int SSEBaseUrl string Kubeconfig string Toolsets []string @@ -133,10 +129,6 @@ func NewMCPServer(streams genericiooptions.IOStreams) *cobra.Command { cmd.Flags().BoolVar(&o.Version, flagVersion, o.Version, "Print version information and quit") cmd.Flags().IntVar(&o.LogLevel, flagLogLevel, o.LogLevel, "Set the log level (from 0 to 9)") cmd.Flags().StringVar(&o.ConfigPath, flagConfig, o.ConfigPath, "Path of the config file.") - cmd.Flags().IntVar(&o.SSEPort, flagSSEPort, o.SSEPort, "Start a SSE server on the specified port") - cmd.Flag(flagSSEPort).Deprecated = "Use --port instead" - cmd.Flags().IntVar(&o.HttpPort, flagHttpPort, o.HttpPort, "Start a streamable HTTP server on the specified port") - cmd.Flag(flagHttpPort).Deprecated = "Use --port instead" cmd.Flags().StringVar(&o.Port, flagPort, o.Port, "Start a streamable HTTP and SSE HTTP server on the specified port (e.g. 8080)") cmd.Flags().StringVar(&o.SSEBaseUrl, flagSSEBaseUrl, o.SSEBaseUrl, "SSE public base URL to use when sending the endpoint message (e.g. https://example.com)") cmd.Flags().StringVar(&o.Kubeconfig, flagKubeconfig, o.Kubeconfig, "Path to the kubeconfig file to use for authentication") @@ -188,10 +180,6 @@ func (m *MCPServerOptions) loadFlags(cmd *cobra.Command) { } if cmd.Flag(flagPort).Changed { m.StaticConfig.Port = m.Port - } else if cmd.Flag(flagSSEPort).Changed { - m.StaticConfig.Port = strconv.Itoa(m.SSEPort) - } else if cmd.Flag(flagHttpPort).Changed { - m.StaticConfig.Port = strconv.Itoa(m.HttpPort) } if cmd.Flag(flagSSEBaseUrl).Changed { m.StaticConfig.SSEBaseURL = m.SSEBaseUrl @@ -253,9 +241,6 @@ func (m *MCPServerOptions) initializeLogging() { } func (m *MCPServerOptions) Validate() error { - if m.Port != "" && (m.SSEPort > 0 || m.HttpPort > 0) { - return fmt.Errorf("--port is mutually exclusive with deprecated --http-port and --sse-port flags") - } if output.FromString(m.StaticConfig.ListOutput) == nil { return fmt.Errorf("invalid output name: %s, valid names are: %s", m.StaticConfig.ListOutput, strings.Join(output.Names, ", ")) } diff --git a/pkg/kubernetes/accesscontrol_clientset.go b/pkg/kubernetes/accesscontrol_clientset.go index c36a9b7f..a6c3fccd 100644 --- a/pkg/kubernetes/accesscontrol_clientset.go +++ b/pkg/kubernetes/accesscontrol_clientset.go @@ -39,6 +39,14 @@ func (a *AccessControlClientset) DiscoveryClient() discovery.DiscoveryInterface return a.discoveryClient } +func (a *AccessControlClientset) Nodes() (corev1.NodeInterface, error) { + gvk := &schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Node"} + if !isAllowed(a.staticConfig, gvk) { + return nil, isNotAllowedError(gvk) + } + return a.delegate.CoreV1().Nodes(), nil +} + func (a *AccessControlClientset) NodesLogs(ctx context.Context, name string) (*rest.Request, error) { gvk := &schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Node"} if !isAllowed(a.staticConfig, gvk) { @@ -55,6 +63,29 @@ func (a *AccessControlClientset) NodesLogs(ctx context.Context, name string) (*r AbsPath(url...), nil } +func (a *AccessControlClientset) NodesMetricses(ctx context.Context, name string, listOptions metav1.ListOptions) (*metrics.NodeMetricsList, error) { + gvk := &schema.GroupVersionKind{Group: metrics.GroupName, Version: metricsv1beta1api.SchemeGroupVersion.Version, Kind: "NodeMetrics"} + if !isAllowed(a.staticConfig, gvk) { + return nil, isNotAllowedError(gvk) + } + versionedMetrics := &metricsv1beta1api.NodeMetricsList{} + var err error + if name != "" { + m, err := a.metricsV1beta1.NodeMetricses().Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("failed to get metrics for node %s: %w", name, err) + } + versionedMetrics.Items = []metricsv1beta1api.NodeMetrics{*m} + } else { + versionedMetrics, err = a.metricsV1beta1.NodeMetricses().List(ctx, listOptions) + if err != nil { + return nil, fmt.Errorf("failed to list node metrics: %w", err) + } + } + convertedMetrics := &metrics.NodeMetricsList{} + return convertedMetrics, metricsv1beta1api.Convert_v1beta1_NodeMetricsList_To_metrics_NodeMetricsList(versionedMetrics, convertedMetrics, nil) +} + func (a *AccessControlClientset) NodesStatsSummary(ctx context.Context, name string) (*rest.Request, error) { gvk := &schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Node"} if !isAllowed(a.staticConfig, gvk) { diff --git a/pkg/kubernetes/nodes.go b/pkg/kubernetes/nodes.go index c53ef5b6..a4321a9f 100644 --- a/pkg/kubernetes/nodes.go +++ b/pkg/kubernetes/nodes.go @@ -2,7 +2,12 @@ package kubernetes import ( "context" + "errors" "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/metrics/pkg/apis/metrics" + metricsv1beta1api "k8s.io/metrics/pkg/apis/metrics/v1beta1" ) func (k *Kubernetes) NodesLog(ctx context.Context, name string, query string, tailLines int64) (string, error) { @@ -39,6 +44,7 @@ func (k *Kubernetes) NodesLog(ctx context.Context, name string, query string, ta func (k *Kubernetes) NodesStatsSummary(ctx context.Context, name string) (string, error) { // Use the node proxy API to access stats summary from the kubelet + // https://kubernetes.io/docs/reference/instrumentation/understand-psi-metrics/ // This endpoint provides CPU, memory, filesystem, and network statistics req, err := k.AccessControlClientset().NodesStatsSummary(ctx, name) @@ -58,3 +64,16 @@ func (k *Kubernetes) NodesStatsSummary(ctx context.Context, name string) (string return string(rawData), nil } + +type NodesTopOptions struct { + metav1.ListOptions + Name string +} + +func (k *Kubernetes) NodesTop(ctx context.Context, options NodesTopOptions) (*metrics.NodeMetricsList, error) { + // TODO, maybe move to mcp Tools setup and omit in case metrics aren't available in the target cluster + if !k.supportsGroupVersion(metrics.GroupName + "/" + metricsv1beta1api.SchemeGroupVersion.Version) { + return nil, errors.New("metrics API is not available") + } + return k.manager.accessControlClientSet.NodesMetricses(ctx, options.Name, options.ListOptions) +} diff --git a/pkg/kubernetes/provider_acm_hub.go b/pkg/kubernetes/provider_acm_hub.go deleted file mode 100644 index 6db2faa0..00000000 --- a/pkg/kubernetes/provider_acm_hub.go +++ /dev/null @@ -1,513 +0,0 @@ -package kubernetes - -import ( - "context" - "errors" - "fmt" - "path/filepath" - "time" - - authenticationv1api "k8s.io/api/authentication/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/discovery/cached/memory" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/rest" - "k8s.io/client-go/restmapper" - "k8s.io/client-go/tools/clientcmd" - "k8s.io/klog/v2" - - "github.com/BurntSushi/toml" - "github.com/containers/kubernetes-mcp-server/pkg/config" -) - -const ( - ACMHubTargetParameterName = "cluster" - ClusterProviderACM = "acm" - ClusterProviderACMKubeConfig = "acm-kubeconfig" -) - -// ACMProviderConfig holds ACM-specific configuration that users can set in config.toml -type ACMProviderConfig struct { - // The host for the ACM cluster proxy addon - // Optional: If not provided, will auto-discover the cluster-proxy-addon-user OCP route - // If using the acm-kubeconfig strategy, this should be the route hostname for the proxy - // If using the acm strategy, this should be the service name for the proxy - ClusterProxyAddonHost string `toml:"cluster_proxy_addon_host,omitempty"` - - // Whether to skip verifying the TLS certs from the cluster proxy - ClusterProxyAddonSkipTLSVerify bool `toml:"cluster_proxy_addon_skip_tls_verify"` - - // The CA file for the cluster proxy addon - ClusterProxyAddonCAFile string `toml:"cluster_proxy_addon_ca_file,omitempty"` -} - -func (c *ACMProviderConfig) Validate() error { - var err error = nil - if !c.ClusterProxyAddonSkipTLSVerify && c.ClusterProxyAddonCAFile == "" { - err = errors.Join(err, fmt.Errorf("cluster_proxy_addon_ca_file is required if tls verification is not disabled")) - } - - return err -} - -func (c *ACMProviderConfig) ResolveClusterProxyAddonCAFilePath(ctx context.Context) { - path := config.ConfigDirPathFromContext(ctx) - c.ClusterProxyAddonCAFile = filepath.Join(path, c.ClusterProxyAddonCAFile) -} - -type ACMKubeConfigProviderConfig struct { - ACMProviderConfig - - // Name of the context in the kubeconfig file to look for acm access credentials in. - // Should point to the "hub" cluster. - ContextName string `toml:"context_name,omitempty"` -} - -func (c *ACMKubeConfigProviderConfig) Validate() error { - err := c.ACMProviderConfig.Validate() - - if c.ContextName == "" { - err = errors.Join(err, fmt.Errorf("context_name is required if acm-kubeconfig strategy is used")) - } - - return err -} - -func parseAcmConfig(ctx context.Context, primitive toml.Primitive, md toml.MetaData) (config.ProviderConfig, error) { - cfg := &ACMProviderConfig{} - if err := md.PrimitiveDecode(primitive, cfg); err != nil { - return nil, err - } - - cfg.ResolveClusterProxyAddonCAFilePath(ctx) - - return cfg, nil -} - -func parseAcmKubeConfigConfig(ctx context.Context, primitive toml.Primitive, md toml.MetaData) (config.ProviderConfig, error) { - cfg := &ACMKubeConfigProviderConfig{} - if err := md.PrimitiveDecode(primitive, &cfg); err != nil { - return nil, err - } - - cfg.ResolveClusterProxyAddonCAFilePath(ctx) - - return cfg, nil -} - -type acmHubClusterProvider struct { - hubManager *Manager // for the main "hub" cluster - clusterManagers map[string]*Manager - clusters []string - clusterProxyHost string - skipTLSVerify bool - clusterProxyCAFile string - watchKubeConfig bool // whether or not the kubeconfig should be watched for changes - - // Context for cancelling the watch goroutine - watchCtx context.Context - watchCancel context.CancelFunc - watchStarted bool // Track if watch is already running - - // Resource version from last list operation to use for watch - lastResourceVersion string -} - -var _ Provider = &acmHubClusterProvider{} - -func init() { - RegisterProvider(ClusterProviderACM, newACMHubClusterProvider) - RegisterProvider(ClusterProviderACMKubeConfig, newACMKubeConfigClusterProvider) - - config.RegisterProviderConfig(ClusterProviderACM, parseAcmConfig) - config.RegisterProviderConfig(ClusterProviderACMKubeConfig, parseAcmKubeConfigConfig) -} - -// IsACMHub checks if the current cluster is an ACM hub by looking for ACM CRDs -// This is included here instead of in other files so that it doesn't create conflicts -// with upstream changes -func (m *Manager) IsACMHub() bool { - discoveryClient, err := m.ToDiscoveryClient() - if err != nil { - klog.V(3).Infof("failed to get discovery client for ACM detection: %v", err) - return false - } - - _, apiLists, err := discoveryClient.ServerGroupsAndResources() - if err != nil { - klog.V(3).Infof("failed to discover server resources for ACM detection: %v", err) - return false - } - - for _, apiList := range apiLists { - if apiList.GroupVersion == "cluster.open-cluster-management.io/v1" { - for _, resource := range apiList.APIResources { - if resource.Kind == "ManagedCluster" { - klog.V(2).Info("Detected ACM hub cluster") - return true - } - } - } - } - - return false -} - -func newACMHubClusterProvider(cfg *config.StaticConfig) (Provider, error) { - m, err := NewInClusterManager(cfg) - if err != nil { - return nil, fmt.Errorf("failed to create in-cluster Kubernetes Manager for acm-hub cluster provider strategy: %w", err) - } - - providerCfg, ok := cfg.GetProviderConfig(ClusterProviderACM) - if !ok { - return nil, fmt.Errorf("missing required config for strategy '%s'", ClusterProviderACM) - } - - return newACMClusterProvider(m, providerCfg.(*ACMProviderConfig), false) -} - -func newACMKubeConfigClusterProvider(cfg *config.StaticConfig) (Provider, error) { - providerCfg, ok := cfg.GetProviderConfig(ClusterProviderACMKubeConfig) - if !ok { - return nil, fmt.Errorf("missing required config for strategy '%s'", ClusterProviderACMKubeConfig) - } - - acmKubeConfigProviderCfg := providerCfg.(*ACMKubeConfigProviderConfig) - baseManager, err := NewKubeconfigManager(cfg, acmKubeConfigProviderCfg.ContextName) - if err != nil { - return nil, fmt.Errorf( - "failed to create manager to hub cluster specified by acm_context_name %s: %w", - acmKubeConfigProviderCfg.ContextName, - err, - ) - } - - return newACMClusterProvider(baseManager, &acmKubeConfigProviderCfg.ACMProviderConfig, true) -} - -func discoverClusterProxyHost(m *Manager, isOpenShift bool) (string, error) { - ctx := context.Background() - - // Try to discover the cluster-proxy route (OpenShift) or service (vanilla Kubernetes) - if isOpenShift { - // Try OpenShift Route in multicluster-engine namespace - routeGVR := schema.GroupVersionResource{ - Group: "route.openshift.io", - Version: "v1", - Resource: "routes", - } - - route, err := m.dynamicClient.Resource(routeGVR).Namespace("multicluster-engine").Get(ctx, "cluster-proxy-addon-user", metav1.GetOptions{}) - if err == nil { - host, found, err := unstructured.NestedString(route.Object, "spec", "host") - if err == nil && found && host != "" { - klog.V(2).Infof("Auto-discovered cluster-proxy route: %s", host) - return host, nil - } - } - } - - // Fallback: Try to find the service - svcClient, err := m.accessControlClientSet.Services("multicluster-engine") - if err != nil { - return "", fmt.Errorf("failed to get services client: %w", err) - } - - svc, err := svcClient.Get(ctx, "cluster-proxy-addon-user", metav1.GetOptions{}) - if err == nil { - host := fmt.Sprintf("%s.%s.svc.cluster.local", svc.Name, svc.Namespace) - klog.V(2).Infof("Auto-discovered cluster-proxy service: %s", host) - return host, nil - } - - return "", fmt.Errorf("failed to auto-discover cluster-proxy host: route and service not found") -} - -func newACMClusterProvider(m *Manager, cfg *ACMProviderConfig, watchKubeConfig bool) (Provider, error) { - if !m.IsACMHub() { - return nil, fmt.Errorf("not deployed in an ACM hub cluster") - } - - // Auto-discover cluster-proxy host if not provided - clusterProxyHost := cfg.ClusterProxyAddonHost - if clusterProxyHost == "" { - ctx := context.Background() - isOpenShift := m.IsOpenShift(ctx) - discoveredHost, err := discoverClusterProxyHost(m, isOpenShift) - if err != nil { - return nil, fmt.Errorf("cluster_proxy_addon_host not provided and auto-discovery failed: %w", err) - } - clusterProxyHost = discoveredHost - klog.V(1).Infof("Using auto-discovered cluster-proxy host: %s", clusterProxyHost) - } - - // Create cancellable context for the watch goroutine - watchCtx, watchCancel := context.WithCancel(context.Background()) - - provider := &acmHubClusterProvider{ - hubManager: m, - clusterManagers: make(map[string]*Manager), - watchKubeConfig: watchKubeConfig, - watchCtx: watchCtx, - watchCancel: watchCancel, - clusterProxyHost: clusterProxyHost, - clusterProxyCAFile: cfg.ClusterProxyAddonCAFile, - skipTLSVerify: cfg.ClusterProxyAddonSkipTLSVerify, - } - - ctx := context.Background() - if err := provider.refreshClusters(ctx); err != nil { - klog.Warningf("Failed to discover managed clusters: %v", err) - } - - klog.V(2).Infof("ACM hub provider initialized with %d managed clusters", len(provider.clusters)) - return provider, nil -} - -func (p *acmHubClusterProvider) IsOpenShift(ctx context.Context) bool { - return p.hubManager.IsOpenShift(ctx) -} - -func (p *acmHubClusterProvider) VerifyToken(ctx context.Context, target, token, audience string) (*authenticationv1api.UserInfo, []string, error) { - manager, err := p.managerForCluster(target) - if err != nil { - return nil, nil, fmt.Errorf("failed to get manager for cluster '%s', unable to verify token", target) - } - return manager.VerifyToken(ctx, token, audience) -} - -func (p *acmHubClusterProvider) GetDerivedKubernetes(ctx context.Context, target string) (*Kubernetes, error) { - if target == "" || target == "hub" { - return p.hubManager.Derived(ctx) - } - - manager, err := p.managerForCluster(target) - if err != nil { - return nil, err - } - - return manager.Derived(ctx) -} - -func (p *acmHubClusterProvider) GetDefaultTarget() string { - return "hub" -} - -func (p *acmHubClusterProvider) GetTargets(_ context.Context) ([]string, error) { - return p.clusters, nil -} - -func (p *acmHubClusterProvider) GetTargetParameterName() string { - return ACMHubTargetParameterName -} - -func (p *acmHubClusterProvider) WatchTargets(onTargetsChanged func() error) { - if p.watchKubeConfig { - p.hubManager.WatchKubeConfig(onTargetsChanged) - } - - // Only start watch if not already running - if !p.watchStarted { - p.watchStarted = true - go p.watchManagedClusters(onTargetsChanged) - } -} - -func (p *acmHubClusterProvider) Close() { - // Cancel the watch goroutine first - if p.watchCancel != nil { - p.watchCancel() - } - - // Reset watch state - p.watchStarted = false - - p.hubManager.Close() - - for _, manager := range p.clusterManagers { - if manager != nil { - manager.Close() - } - } -} - -func (p *acmHubClusterProvider) watchManagedClusters(onTargetsChanged func() error) { - gvr := schema.GroupVersionResource{ - Group: "cluster.open-cluster-management.io", - Version: "v1", - Resource: "managedclusters", - } - - // Exponential backoff configuration - const ( - initialDelay = 1 * time.Second - maxDelay = 5 * time.Minute - backoffRate = 2.0 - ) - - delay := initialDelay - - for { - // Check if the context has been cancelled before starting a new watch - select { - case <-p.watchCtx.Done(): - klog.V(2).Info("Watch goroutine cancelled, exiting") - return - default: - } - - watchInterface, err := p.hubManager.dynamicClient.Resource(gvr).Watch(p.watchCtx, metav1.ListOptions{ - ResourceVersion: p.lastResourceVersion, - }) - if err != nil { - klog.Errorf("Failed to start watch on managed clusters: %v", err) - - // Apply exponential backoff - klog.V(2).Infof("Waiting %v before retrying watch", delay) - time.Sleep(delay) - - // Increase delay for next retry, but cap at maxDelay - delay = time.Duration(float64(delay) * backoffRate) - delay = min(delay, maxDelay) - continue - } - - // Reset delay on successful watch start - delay = initialDelay - klog.V(2).Info("Started watching managed clusters for changes") - - for event := range watchInterface.ResultChan() { - switch event.Type { - case watch.Added, watch.Deleted, watch.Modified: - clusterName := "unknown" - if obj, ok := event.Object.(*unstructured.Unstructured); ok { - clusterName = obj.GetName() - } - klog.V(3).Infof("Managed cluster %s: %s", event.Type, clusterName) - - // Notify about target changes - if err := onTargetsChanged(); err != nil { - klog.Warningf("Error in onTargetsChanged callback: %v", err) - } - } - } - - // Clean up the watch interface before restarting - watchInterface.Stop() - klog.Warning("Managed clusters watch closed, restarting...") - // Don't reset delay here since this could be due to connectivity issues - } -} - -func (p *acmHubClusterProvider) refreshClusters(ctx context.Context) error { - dynamicClient := p.hubManager.dynamicClient - - gvr := schema.GroupVersionResource{ - Group: "cluster.open-cluster-management.io", - Version: "v1", - Resource: "managedclusters", - } - - result, err := dynamicClient.Resource(gvr).List(ctx, metav1.ListOptions{}) - if err != nil { - return fmt.Errorf("failed to list cluster managers: %w", err) - } - - clusters := make([]string, 0, len(result.Items)) - for _, item := range result.Items { - name := item.GetName() - if name != "" { - clusters = append(clusters, name) - } - } - - p.clusters = clusters - p.lastResourceVersion = result.GetResourceVersion() - klog.V(3).Infof("discovered %d managed clusters: %v (resourceVersion: %s)", len(clusters), clusters, p.lastResourceVersion) - - return nil -} - -func (p *acmHubClusterProvider) managerForCluster(cluster string) (*Manager, error) { - if manager, exists := p.clusterManagers[cluster]; exists && manager != nil { - return manager, nil - } - - proxyConfig := rest.CopyConfig(p.hubManager.cfg) - proxyHost := fmt.Sprintf("https://%s/%s", p.clusterProxyHost, cluster) - proxyConfig.Host = proxyHost - - if p.skipTLSVerify { - proxyConfig.TLSClientConfig = rest.TLSClientConfig{ - Insecure: true, - } - } else { - proxyConfig.TLSClientConfig = rest.TLSClientConfig{ - CAFile: p.clusterProxyCAFile, - } - } - - // Create modified clientCmdConfig to match the proxy configuration - hubRawConfig, err := p.hubManager.clientCmdConfig.RawConfig() - if err != nil { - return nil, fmt.Errorf("failed to get hub kubeconfig: %w", err) - } - - // Create a copy and modify the server URL to match the proxy - proxyRawConfig := hubRawConfig.DeepCopy() - - // Update all clusters in the config to use the proxy host - for _, clusterConfig := range proxyRawConfig.Clusters { - clusterConfig.Server = proxyHost - if p.skipTLSVerify { - clusterConfig.InsecureSkipTLSVerify = true - clusterConfig.CertificateAuthority = "" - clusterConfig.CertificateAuthorityData = nil - } else { - clusterConfig.CertificateAuthority = p.clusterProxyCAFile - clusterConfig.CertificateAuthorityData = nil - clusterConfig.InsecureSkipTLSVerify = false - } - } - - manager := &Manager{ - cfg: proxyConfig, - staticConfig: p.hubManager.staticConfig, - clientCmdConfig: clientcmd.NewDefaultClientConfig(*proxyRawConfig, nil), - } - - if err := p.initializeManager(manager); err != nil { - return nil, fmt.Errorf("failed to initialize manager for cluster %s: %w", cluster, err) - } - - // Cache the manager before returning - p.clusterManagers[cluster] = manager - return manager, nil -} - -func (p *acmHubClusterProvider) initializeManager(m *Manager) error { - var err error - - m.accessControlClientSet, err = NewAccessControlClientset(m.cfg, m.staticConfig) - if err != nil { - return err - } - - m.discoveryClient = memory.NewMemCacheClient(m.accessControlClientSet.DiscoveryClient()) - - m.accessControlRESTMapper = NewAccessControlRESTMapper( - restmapper.NewDeferredDiscoveryRESTMapper(m.discoveryClient), - m.staticConfig, - ) - - m.dynamicClient, err = dynamic.NewForConfig(m.cfg) - if err != nil { - return err - } - - return nil -} diff --git a/pkg/kubernetes/provider_kubeconfig_test.go b/pkg/kubernetes/provider_kubeconfig_test.go index 17984990..33ba60d6 100644 --- a/pkg/kubernetes/provider_kubeconfig_test.go +++ b/pkg/kubernetes/provider_kubeconfig_test.go @@ -2,7 +2,6 @@ package kubernetes import ( "fmt" - "net/http" "testing" "github.com/containers/kubernetes-mcp-server/internal/test" @@ -57,25 +56,8 @@ func (s *ProviderKubeconfigTestSuite) TestWithOpenShiftCluster() { } func (s *ProviderKubeconfigTestSuite) TestVerifyToken() { - s.mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - if req.URL.EscapedPath() == "/apis/authentication.k8s.io/v1/tokenreviews" { - w.Header().Set("Content-Type", "application/json") - _, _ = w.Write([]byte(` - { - "kind": "TokenReview", - "apiVersion": "authentication.k8s.io/v1", - "spec": {"token": "the-token"}, - "status": { - "authenticated": true, - "user": { - "username": "test-user", - "groups": ["system:authenticated"] - }, - "audiences": ["the-audience"] - } - }`)) - } - })) + s.mockServer.Handle(&test.TokenReviewHandler{}) + s.Run("VerifyToken returns UserInfo for non-empty context", func() { userInfo, audiences, err := s.provider.VerifyToken(s.T().Context(), "fake-context", "some-token", "the-audience") s.Require().NoError(err, "Expected no error from VerifyToken with empty target") diff --git a/pkg/kubernetes/provider_single_test.go b/pkg/kubernetes/provider_single_test.go index ff03e26c..150926b4 100644 --- a/pkg/kubernetes/provider_single_test.go +++ b/pkg/kubernetes/provider_single_test.go @@ -1,7 +1,6 @@ package kubernetes import ( - "net/http" "testing" "github.com/containers/kubernetes-mcp-server/internal/test" @@ -50,6 +49,7 @@ func (s *ProviderSingleTestSuite) TestWithNonOpenShiftCluster() { func (s *ProviderSingleTestSuite) TestWithOpenShiftCluster() { s.mockServer.Handle(&test.InOpenShiftHandler{}) + s.Run("IsOpenShift returns true", func() { inOpenShift := s.provider.IsOpenShift(s.T().Context()) s.True(inOpenShift, "Expected InOpenShift to return true") @@ -57,25 +57,8 @@ func (s *ProviderSingleTestSuite) TestWithOpenShiftCluster() { } func (s *ProviderSingleTestSuite) TestVerifyToken() { - s.mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - if req.URL.EscapedPath() == "/apis/authentication.k8s.io/v1/tokenreviews" { - w.Header().Set("Content-Type", "application/json") - _, _ = w.Write([]byte(` - { - "kind": "TokenReview", - "apiVersion": "authentication.k8s.io/v1", - "spec": {"token": "the-token"}, - "status": { - "authenticated": true, - "user": { - "username": "test-user", - "groups": ["system:authenticated"] - }, - "audiences": ["the-audience"] - } - }`)) - } - })) + s.mockServer.Handle(&test.TokenReviewHandler{}) + s.Run("VerifyToken returns UserInfo for empty target (default target)", func() { userInfo, audiences, err := s.provider.VerifyToken(s.T().Context(), "", "the-token", "the-audience") s.Require().NoError(err, "Expected no error from VerifyToken with empty target") diff --git a/pkg/mcp/common_test.go b/pkg/mcp/common_test.go index 86f2e8d6..b91df691 100644 --- a/pkg/mcp/common_test.go +++ b/pkg/mcp/common_test.go @@ -1,23 +1,16 @@ package mcp import ( - "bytes" "context" "encoding/json" - "flag" "fmt" - "net/http/httptest" "os" "path/filepath" "runtime" - "strconv" "testing" "time" - "github.com/mark3labs/mcp-go/client" "github.com/mark3labs/mcp-go/client/transport" - "github.com/mark3labs/mcp-go/mcp" - "github.com/mark3labs/mcp-go/server" "github.com/pkg/errors" "github.com/spf13/afero" "github.com/stretchr/testify/suite" @@ -30,11 +23,7 @@ import ( "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" - "k8s.io/client-go/tools/clientcmd" - clientcmdapi "k8s.io/client-go/tools/clientcmd/api" toolswatch "k8s.io/client-go/tools/watch" - "k8s.io/klog/v2" - "k8s.io/klog/v2/textlogger" "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/envtest" "sigs.k8s.io/controller-runtime/tools/setup-envtest/env" @@ -45,7 +34,6 @@ import ( "github.com/containers/kubernetes-mcp-server/internal/test" "github.com/containers/kubernetes-mcp-server/pkg/config" - "github.com/containers/kubernetes-mcp-server/pkg/output" ) // envTest has an expensive setup, so we only want to do it once per entire test run. @@ -103,251 +91,6 @@ func TestMain(m *testing.M) { os.Exit(code) } -type mcpContext struct { - toolsets []string - listOutput output.Output - logLevel int - - staticConfig *config.StaticConfig - clientOptions []transport.ClientOption - before func(*mcpContext) - after func(*mcpContext) - ctx context.Context - tempDir string - cancel context.CancelFunc - mcpServer *Server - mcpHttpServer *httptest.Server - mcpClient *client.Client - klogState klog.State - logBuffer bytes.Buffer -} - -func (c *mcpContext) beforeEach(t *testing.T) { - var err error - c.ctx, c.cancel = context.WithCancel(t.Context()) - c.tempDir = t.TempDir() - c.withKubeConfig(nil) - if c.staticConfig == nil { - c.staticConfig = config.Default() - // Default to use YAML output for lists (previously the default) - c.staticConfig.ListOutput = "yaml" - } - if c.toolsets != nil { - c.staticConfig.Toolsets = c.toolsets - - } - if c.listOutput != nil { - c.staticConfig.ListOutput = c.listOutput.GetName() - } - if c.before != nil { - c.before(c) - } - // Set up logging - c.klogState = klog.CaptureState() - flags := flag.NewFlagSet("test", flag.ContinueOnError) - klog.InitFlags(flags) - _ = flags.Set("v", strconv.Itoa(c.logLevel)) - klog.SetLogger(textlogger.NewLogger(textlogger.NewConfig(textlogger.Verbosity(c.logLevel), textlogger.Output(&c.logBuffer)))) - // MCP Server - if c.mcpServer, err = NewServer(Configuration{StaticConfig: c.staticConfig}); err != nil { - t.Fatal(err) - return - } - c.mcpHttpServer = server.NewTestServer(c.mcpServer.server, server.WithSSEContextFunc(contextFunc)) - if c.mcpClient, err = client.NewSSEMCPClient(c.mcpHttpServer.URL+"/sse", c.clientOptions...); err != nil { - t.Fatal(err) - return - } - // MCP Client - if err = c.mcpClient.Start(c.ctx); err != nil { - t.Fatal(err) - return - } - initRequest := mcp.InitializeRequest{} - initRequest.Params.ProtocolVersion = mcp.LATEST_PROTOCOL_VERSION - initRequest.Params.ClientInfo = mcp.Implementation{Name: "test", Version: "1.33.7"} - _, err = c.mcpClient.Initialize(c.ctx, initRequest) - if err != nil { - t.Fatal(err) - return - } -} - -func (c *mcpContext) afterEach() { - if c.after != nil { - c.after(c) - } - c.cancel() - c.mcpServer.Close() - _ = c.mcpClient.Close() - c.mcpHttpServer.Close() - c.klogState.Restore() -} - -func testCase(t *testing.T, test func(c *mcpContext)) { - testCaseWithContext(t, &mcpContext{}, test) -} - -func testCaseWithContext(t *testing.T, mcpCtx *mcpContext, test func(c *mcpContext)) { - mcpCtx.beforeEach(t) - defer mcpCtx.afterEach() - test(mcpCtx) -} - -// withKubeConfig sets up a fake kubeconfig in the temp directory based on the provided rest.Config -func (c *mcpContext) withKubeConfig(rc *rest.Config) *clientcmdapi.Config { - fakeConfig := clientcmdapi.NewConfig() - fakeConfig.Clusters["fake"] = clientcmdapi.NewCluster() - fakeConfig.Clusters["fake"].Server = "https://127.0.0.1:6443" - fakeConfig.Clusters["additional-cluster"] = clientcmdapi.NewCluster() - fakeConfig.AuthInfos["fake"] = clientcmdapi.NewAuthInfo() - fakeConfig.AuthInfos["additional-auth"] = clientcmdapi.NewAuthInfo() - if rc != nil { - fakeConfig.Clusters["fake"].Server = rc.Host - fakeConfig.Clusters["fake"].CertificateAuthorityData = rc.CAData - fakeConfig.AuthInfos["fake"].ClientKeyData = rc.KeyData - fakeConfig.AuthInfos["fake"].ClientCertificateData = rc.CertData - } - fakeConfig.Contexts["fake-context"] = clientcmdapi.NewContext() - fakeConfig.Contexts["fake-context"].Cluster = "fake" - fakeConfig.Contexts["fake-context"].AuthInfo = "fake" - fakeConfig.Contexts["additional-context"] = clientcmdapi.NewContext() - fakeConfig.Contexts["additional-context"].Cluster = "additional-cluster" - fakeConfig.Contexts["additional-context"].AuthInfo = "additional-auth" - fakeConfig.CurrentContext = "fake-context" - kubeConfig := filepath.Join(c.tempDir, "config") - _ = clientcmd.WriteToFile(*fakeConfig, kubeConfig) - _ = os.Setenv("KUBECONFIG", kubeConfig) - if c.mcpServer != nil { - if err := c.mcpServer.reloadKubernetesClusterProvider(); err != nil { - panic(err) - } - } - return fakeConfig -} - -// withEnvTest sets up the environment for kubeconfig to be used with envTest -func (c *mcpContext) withEnvTest() { - c.withKubeConfig(envTestRestConfig) -} - -// inOpenShift sets up the kubernetes environment to seem to be running OpenShift -func inOpenShift(c *mcpContext) { - c.withEnvTest() - crdTemplate := ` - { - "apiVersion": "apiextensions.k8s.io/v1", - "kind": "CustomResourceDefinition", - "metadata": {"name": "%s"}, - "spec": { - "group": "%s", - "versions": [{ - "name": "v1","served": true,"storage": true, - "schema": {"openAPIV3Schema": {"type": "object","x-kubernetes-preserve-unknown-fields": true}} - }], - "scope": "%s", - "names": {"plural": "%s","singular": "%s","kind": "%s"} - } - }` - tasks, _ := errgroup.WithContext(c.ctx) - tasks.Go(func() error { - return c.crdApply(fmt.Sprintf(crdTemplate, "projects.project.openshift.io", "project.openshift.io", - "Cluster", "projects", "project", "Project")) - }) - tasks.Go(func() error { - return c.crdApply(fmt.Sprintf(crdTemplate, "routes.route.openshift.io", "route.openshift.io", - "Namespaced", "routes", "route", "Route")) - }) - if err := tasks.Wait(); err != nil { - panic(err) - } -} - -// inOpenShiftClear clears the kubernetes environment so it no longer seems to be running OpenShift -func inOpenShiftClear(c *mcpContext) { - tasks, _ := errgroup.WithContext(c.ctx) - tasks.Go(func() error { return c.crdDelete("projects.project.openshift.io") }) - tasks.Go(func() error { return c.crdDelete("routes.route.openshift.io") }) - if err := tasks.Wait(); err != nil { - panic(err) - } -} - -// newKubernetesClient creates a new Kubernetes client with the envTest kubeconfig -func (c *mcpContext) newKubernetesClient() *kubernetes.Clientset { - return kubernetes.NewForConfigOrDie(envTestRestConfig) -} - -// newApiExtensionsClient creates a new ApiExtensions client with the envTest kubeconfig -func (c *mcpContext) newApiExtensionsClient() *apiextensionsv1.ApiextensionsV1Client { - return apiextensionsv1.NewForConfigOrDie(envTestRestConfig) -} - -// crdApply creates a CRD from the provided resource string and waits for it to be established -func (c *mcpContext) crdApply(resource string) error { - apiExtensionsV1Client := c.newApiExtensionsClient() - var crd = &apiextensionsv1spec.CustomResourceDefinition{} - err := json.Unmarshal([]byte(resource), crd) - if err != nil { - return fmt.Errorf("failed to create CRD %v", err) - } - _, err = apiExtensionsV1Client.CustomResourceDefinitions().Create(c.ctx, crd, metav1.CreateOptions{}) - if err != nil { - return fmt.Errorf("failed to create CRD %v", err) - } - c.crdWaitUntilReady(crd.Name) - return nil -} - -// crdDelete deletes a CRD by name and waits for it to be removed -func (c *mcpContext) crdDelete(name string) error { - apiExtensionsV1Client := c.newApiExtensionsClient() - err := apiExtensionsV1Client.CustomResourceDefinitions().Delete(c.ctx, name, metav1.DeleteOptions{ - GracePeriodSeconds: ptr.To(int64(0)), - }) - iteration := 0 - for iteration < 100 { - if _, derr := apiExtensionsV1Client.CustomResourceDefinitions().Get(c.ctx, name, metav1.GetOptions{}); derr != nil { - break - } - time.Sleep(5 * time.Millisecond) - iteration++ - } - if err != nil { - return errors.Wrap(err, "failed to delete CRD") - } - return nil -} - -// crdWaitUntilReady waits for a CRD to be established -func (c *mcpContext) crdWaitUntilReady(name string) { - watcher, err := c.newApiExtensionsClient().CustomResourceDefinitions().Watch(c.ctx, metav1.ListOptions{ - FieldSelector: "metadata.name=" + name, - }) - if err != nil { - panic(fmt.Errorf("failed to watch CRD %v", err)) - } - _, err = toolswatch.UntilWithoutRetry(c.ctx, watcher, func(event watch.Event) (bool, error) { - for _, c := range event.Object.(*apiextensionsv1spec.CustomResourceDefinition).Status.Conditions { - if c.Type == apiextensionsv1spec.Established && c.Status == apiextensionsv1spec.ConditionTrue { - return true, nil - } - } - return false, nil - }) - if err != nil { - panic(fmt.Errorf("failed to wait for CRD %v", err)) - } -} - -// callTool helper function to call a tool by name with arguments -func (c *mcpContext) callTool(name string, args map[string]interface{}) (*mcp.CallToolResult, error) { - callToolRequest := mcp.CallToolRequest{} - callToolRequest.Params.Name = name - callToolRequest.Params.Arguments = args - return c.mcpClient.CallTool(c.ctx, callToolRequest) -} - func restoreAuth(ctx context.Context) { kubernetesAdmin := kubernetes.NewForConfigOrDie(envTest.Config) // Authorization @@ -449,3 +192,98 @@ func (s *BaseMcpSuite) InitMcpClient(options ...transport.StreamableHTTPCOption) s.Require().NoError(err, "Expected no error creating MCP server") s.McpClient = test.NewMcpClient(s.T(), s.mcpServer.ServeHTTP(nil), options...) } + +// EnvTestInOpenShift sets up the kubernetes environment to seem to be running OpenShift +func EnvTestInOpenShift(ctx context.Context) error { + crdTemplate := ` + { + "apiVersion": "apiextensions.k8s.io/v1", + "kind": "CustomResourceDefinition", + "metadata": {"name": "%s"}, + "spec": { + "group": "%s", + "versions": [{ + "name": "v1","served": true,"storage": true, + "schema": {"openAPIV3Schema": {"type": "object","x-kubernetes-preserve-unknown-fields": true}} + }], + "scope": "%s", + "names": {"plural": "%s","singular": "%s","kind": "%s"} + } + }` + tasks, _ := errgroup.WithContext(ctx) + tasks.Go(func() error { + return EnvTestCrdApply(ctx, fmt.Sprintf(crdTemplate, "projects.project.openshift.io", "project.openshift.io", + "Cluster", "projects", "project", "Project")) + }) + tasks.Go(func() error { + return EnvTestCrdApply(ctx, fmt.Sprintf(crdTemplate, "routes.route.openshift.io", "route.openshift.io", + "Namespaced", "routes", "route", "Route")) + }) + return tasks.Wait() +} + +// EnvTestInOpenShiftClear clears the kubernetes environment so it no longer seems to be running OpenShift +func EnvTestInOpenShiftClear(ctx context.Context) error { + tasks, _ := errgroup.WithContext(ctx) + tasks.Go(func() error { return EnvTestCrdDelete(ctx, "projects.project.openshift.io") }) + tasks.Go(func() error { return EnvTestCrdDelete(ctx, "routes.route.openshift.io") }) + return tasks.Wait() +} + +// EnvTestCrdWaitUntilReady waits for a CRD to be established +func EnvTestCrdWaitUntilReady(ctx context.Context, name string) error { + apiExtensionClient := apiextensionsv1.NewForConfigOrDie(envTestRestConfig) + watcher, err := apiExtensionClient.CustomResourceDefinitions().Watch(ctx, metav1.ListOptions{ + FieldSelector: "metadata.name=" + name, + }) + if err != nil { + return fmt.Errorf("unable to watch CRDs: %w", err) + } + _, err = toolswatch.UntilWithoutRetry(ctx, watcher, func(event watch.Event) (bool, error) { + for _, c := range event.Object.(*apiextensionsv1spec.CustomResourceDefinition).Status.Conditions { + if c.Type == apiextensionsv1spec.Established && c.Status == apiextensionsv1spec.ConditionTrue { + return true, nil + } + } + return false, nil + }) + if err != nil { + return fmt.Errorf("failed to wait for CRD: %w", err) + } + return nil +} + +// EnvTestCrdApply creates a CRD from the provided resource string and waits for it to be established +func EnvTestCrdApply(ctx context.Context, resource string) error { + apiExtensionsV1Client := apiextensionsv1.NewForConfigOrDie(envTestRestConfig) + var crd = &apiextensionsv1spec.CustomResourceDefinition{} + err := json.Unmarshal([]byte(resource), crd) + if err != nil { + return fmt.Errorf("failed to create CRD %v", err) + } + _, err = apiExtensionsV1Client.CustomResourceDefinitions().Create(ctx, crd, metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("failed to create CRD %v", err) + } + return EnvTestCrdWaitUntilReady(ctx, crd.Name) +} + +// crdDelete deletes a CRD by name and waits for it to be removed +func EnvTestCrdDelete(ctx context.Context, name string) error { + apiExtensionsV1Client := apiextensionsv1.NewForConfigOrDie(envTestRestConfig) + err := apiExtensionsV1Client.CustomResourceDefinitions().Delete(ctx, name, metav1.DeleteOptions{ + GracePeriodSeconds: ptr.To(int64(0)), + }) + iteration := 0 + for iteration < 100 { + if _, derr := apiExtensionsV1Client.CustomResourceDefinitions().Get(ctx, name, metav1.GetOptions{}); derr != nil { + break + } + time.Sleep(5 * time.Millisecond) + iteration++ + } + if err != nil { + return errors.Wrap(err, "failed to delete CRD") + } + return nil +} diff --git a/pkg/mcp/mcp.go b/pkg/mcp/mcp.go index f64d4104..5f7511cc 100644 --- a/pkg/mcp/mcp.go +++ b/pkg/mcp/mcp.go @@ -73,8 +73,6 @@ type Server struct { func NewServer(configuration Configuration) (*Server, error) { var serverOptions []server.ServerOption serverOptions = append(serverOptions, - server.WithResourceCapabilities(true, true), - server.WithPromptCapabilities(true), server.WithToolCapabilities(true), server.WithLogging(), server.WithToolHandlerMiddleware(toolCallLoggingMiddleware), diff --git a/pkg/mcp/mcp_middleware_test.go b/pkg/mcp/mcp_middleware_test.go index 987bfe4f..ce88e7b4 100644 --- a/pkg/mcp/mcp_middleware_test.go +++ b/pkg/mcp/mcp_middleware_test.go @@ -1,68 +1,87 @@ package mcp import ( + "bytes" + "flag" "regexp" - "strings" + "strconv" "testing" "github.com/mark3labs/mcp-go/client/transport" + "github.com/stretchr/testify/suite" + "k8s.io/klog/v2" + "k8s.io/klog/v2/textlogger" ) -func TestToolCallLogging(t *testing.T) { - testCaseWithContext(t, &mcpContext{logLevel: 5}, func(c *mcpContext) { - _, _ = c.callTool("configuration_view", map[string]interface{}{ - "minified": false, - }) - t.Run("Logs tool name", func(t *testing.T) { - expectedLog := "mcp tool call: configuration_view(" - if !strings.Contains(c.logBuffer.String(), expectedLog) { - t.Errorf("Expected log to contain '%s', got: %s", expectedLog, c.logBuffer.String()) - } - }) - t.Run("Logs tool call arguments", func(t *testing.T) { - expected := `"mcp tool call: configuration_view\((.+)\)"` - m := regexp.MustCompile(expected).FindStringSubmatch(c.logBuffer.String()) - if len(m) != 2 { - t.Fatalf("Expected log entry to contain arguments, got %s", c.logBuffer.String()) - } - if m[1] != "map[minified:false]" { - t.Errorf("Expected log arguments to be 'map[minified:false]', got %s", m[1]) - } - }) +type McpLoggingSuite struct { + BaseMcpSuite + klogState klog.State + logBuffer bytes.Buffer +} + +func (s *McpLoggingSuite) SetupTest() { + s.BaseMcpSuite.SetupTest() + s.klogState = klog.CaptureState() +} + +func (s *McpLoggingSuite) TearDownTest() { + s.BaseMcpSuite.TearDownTest() + s.klogState.Restore() +} + +func (s *McpLoggingSuite) SetLogLevel(level int) { + flags := flag.NewFlagSet("test", flag.ContinueOnError) + klog.InitFlags(flags) + _ = flags.Set("v", strconv.Itoa(level)) + klog.SetLogger(textlogger.NewLogger(textlogger.NewConfig(textlogger.Verbosity(level), textlogger.Output(&s.logBuffer)))) +} + +func (s *McpLoggingSuite) TestLogsToolCall() { + s.SetLogLevel(5) + s.InitMcpClient() + _, err := s.CallTool("configuration_view", map[string]interface{}{"minified": false}) + s.Require().NoError(err, "call to tool configuration_view failed") + + s.Run("Logs tool name", func() { + s.Contains(s.logBuffer.String(), "mcp tool call: configuration_view(") }) - before := func(c *mcpContext) { - c.clientOptions = append(c.clientOptions, transport.WithHeaders(map[string]string{ - "Accept-Encoding": "gzip", - "Authorization": "Bearer should-not-be-logged", - "authorization": "Bearer should-not-be-logged", - "a-loggable-header": "should-be-logged", - })) + s.Run("Logs tool call arguments", func() { + expected := `"mcp tool call: configuration_view\((.+)\)"` + m := regexp.MustCompile(expected).FindStringSubmatch(s.logBuffer.String()) + s.Len(m, 2, "Expected log entry to contain arguments") + s.Equal("map[minified:false]", m[1], "Expected log arguments to be 'map[minified:false]'") + }) +} + +func (s *McpLoggingSuite) TestLogsToolCallHeaders() { + s.SetLogLevel(7) + s.InitMcpClient(transport.WithHTTPHeaders(map[string]string{ + "Accept-Encoding": "gzip", + "Authorization": "Bearer should-not-be-logged", + "authorization": "Bearer should-not-be-logged", + "a-loggable-header": "should-be-logged", + })) + _, err := s.CallTool("configuration_view", map[string]interface{}{"minified": false}) + s.Require().NoError(err, "call to tool configuration_view failed") + + s.Run("Logs tool call headers", func() { + expectedLog := "mcp tool call headers: A-Loggable-Header: should-be-logged" + s.Contains(s.logBuffer.String(), expectedLog, "Expected log to contain loggable header") + }) + sensitiveHeaders := []string{ + "Authorization:", + // TODO: Add more sensitive headers as needed } - testCaseWithContext(t, &mcpContext{logLevel: 7, before: before}, func(c *mcpContext) { - _, _ = c.callTool("configuration_view", map[string]interface{}{ - "minified": false, - }) - t.Run("Logs tool call headers", func(t *testing.T) { - expectedLog := "mcp tool call headers: A-Loggable-Header: should-be-logged" - if !strings.Contains(c.logBuffer.String(), expectedLog) { - t.Errorf("Expected log to contain '%s', got: %s", expectedLog, c.logBuffer.String()) - } - }) - sensitiveHeaders := []string{ - "Authorization:", - // TODO: Add more sensitive headers as needed + s.Run("Does not log sensitive headers", func() { + for _, header := range sensitiveHeaders { + s.NotContains(s.logBuffer.String(), header, "Log should not contain sensitive header") } - t.Run("Does not log sensitive headers", func(t *testing.T) { - for _, header := range sensitiveHeaders { - if strings.Contains(c.logBuffer.String(), header) { - t.Errorf("Log should not contain sensitive header '%s', got: %s", header, c.logBuffer.String()) - } - } - }) - t.Run("Does not log sensitive header values", func(t *testing.T) { - if strings.Contains(c.logBuffer.String(), "should-not-be-logged") { - t.Errorf("Log should not contain sensitive header value 'should-not-be-logged', got: %s", c.logBuffer.String()) - } - }) }) + s.Run("Does not log sensitive header values", func() { + s.NotContains(s.logBuffer.String(), "should-not-be-logged", "Log should not contain sensitive header value") + }) +} + +func TestMcpLogging(t *testing.T) { + suite.Run(t, new(McpLoggingSuite)) } diff --git a/pkg/mcp/mcp_test.go b/pkg/mcp/mcp_test.go index 484d8b59..25e1c651 100644 --- a/pkg/mcp/mcp_test.go +++ b/pkg/mcp/mcp_test.go @@ -1,54 +1,14 @@ package mcp import ( - "context" "net/http" - "os" - "path/filepath" - "runtime" "testing" - "time" "github.com/containers/kubernetes-mcp-server/internal/test" "github.com/mark3labs/mcp-go/client/transport" - "github.com/mark3labs/mcp-go/mcp" "github.com/stretchr/testify/suite" ) -func TestWatchKubeConfig(t *testing.T) { - if runtime.GOOS != "linux" && runtime.GOOS != "darwin" { - t.Skip("Skipping test on non-Unix-like platforms") - } - testCase(t, func(c *mcpContext) { - // Given - withTimeout, cancel := context.WithTimeout(c.ctx, 5*time.Second) - defer cancel() - var notification *mcp.JSONRPCNotification - c.mcpClient.OnNotification(func(n mcp.JSONRPCNotification) { - notification = &n - }) - // When - f, _ := os.OpenFile(filepath.Join(c.tempDir, "config"), os.O_APPEND|os.O_WRONLY, 0644) - _, _ = f.WriteString("\n") - for notification == nil { - select { - case <-withTimeout.Done(): - default: - time.Sleep(100 * time.Millisecond) - } - } - // Then - t.Run("WatchKubeConfig notifies tools change", func(t *testing.T) { - if notification == nil { - t.Fatalf("WatchKubeConfig did not notify") - } - if notification.Method != "notifications/tools/list_changed" { - t.Fatalf("WatchKubeConfig did not notify tools change, got %s", notification.Method) - } - }) - }) -} - type McpHeadersSuite struct { BaseMcpSuite mockServer *test.MockServer diff --git a/pkg/mcp/mcp_watch_test.go b/pkg/mcp/mcp_watch_test.go new file mode 100644 index 00000000..68287279 --- /dev/null +++ b/pkg/mcp/mcp_watch_test.go @@ -0,0 +1,103 @@ +package mcp + +import ( + "context" + "os" + "testing" + "time" + + "github.com/containers/kubernetes-mcp-server/internal/test" + "github.com/mark3labs/mcp-go/mcp" + "github.com/stretchr/testify/suite" +) + +type WatchKubeConfigSuite struct { + BaseMcpSuite + mockServer *test.MockServer +} + +func (s *WatchKubeConfigSuite) SetupTest() { + s.BaseMcpSuite.SetupTest() + s.mockServer = test.NewMockServer() + s.Cfg.KubeConfig = s.mockServer.KubeconfigFile(s.T()) +} + +func (s *WatchKubeConfigSuite) TearDownTest() { + s.BaseMcpSuite.TearDownTest() + if s.mockServer != nil { + s.mockServer.Close() + } +} + +func (s *WatchKubeConfigSuite) WriteKubeconfig() { + f, _ := os.OpenFile(s.Cfg.KubeConfig, os.O_APPEND|os.O_WRONLY, 0644) + _, _ = f.WriteString("\n") + _ = f.Close() +} + +// WaitForNotification waits for an MCP server notification or fails the test after a timeout +func (s *WatchKubeConfigSuite) WaitForNotification() *mcp.JSONRPCNotification { + withTimeout, cancel := context.WithTimeout(s.T().Context(), 5*time.Second) + defer cancel() + var notification *mcp.JSONRPCNotification + s.OnNotification(func(n mcp.JSONRPCNotification) { + notification = &n + }) + for notification == nil { + select { + case <-withTimeout.Done(): + s.FailNow("timeout waiting for WatchKubeConfig notification") + default: + time.Sleep(100 * time.Millisecond) + } + } + return notification +} + +func (s *WatchKubeConfigSuite) TestNotifiesToolsChange() { + // Given + s.InitMcpClient() + // When + s.WriteKubeconfig() + notification := s.WaitForNotification() + // Then + s.NotNil(notification, "WatchKubeConfig did not notify") + s.Equal("notifications/tools/list_changed", notification.Method, "WatchKubeConfig did not notify tools change") +} + +func (s *WatchKubeConfigSuite) TestClearsNoLongerAvailableTools() { + s.mockServer.Handle(&test.InOpenShiftHandler{}) + s.InitMcpClient() + + s.Run("OpenShift tool is available", func() { + tools, err := s.ListTools(s.T().Context(), mcp.ListToolsRequest{}) + s.Require().NoError(err, "call ListTools failed") + s.Require().NotNil(tools, "list tools failed") + var found bool + for _, tool := range tools.Tools { + if tool.Name == "projects_list" { + found = true + break + } + } + s.Truef(found, "expected OpenShift tool to be available") + }) + + s.Run("OpenShift tool is removed after kubeconfig change", func() { + // Reload Config without OpenShift + s.mockServer.ResetHandlers() + s.WriteKubeconfig() + s.WaitForNotification() + + tools, err := s.ListTools(s.T().Context(), mcp.ListToolsRequest{}) + s.Require().NoError(err, "call ListTools failed") + s.Require().NotNil(tools, "list tools failed") + for _, tool := range tools.Tools { + s.Require().Falsef(tool.Name == "projects_list", "expected OpenShift tool to be removed") + } + }) +} + +func TestWatchKubeConfig(t *testing.T) { + suite.Run(t, new(WatchKubeConfigSuite)) +} diff --git a/pkg/mcp/namespaces_test.go b/pkg/mcp/namespaces_test.go index a0a6ff23..25565512 100644 --- a/pkg/mcp/namespaces_test.go +++ b/pkg/mcp/namespaces_test.go @@ -13,9 +13,6 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/dynamic" "sigs.k8s.io/yaml" - - "github.com/containers/kubernetes-mcp-server/internal/test" - "github.com/containers/kubernetes-mcp-server/pkg/config" ) type NamespacesSuite struct { @@ -108,68 +105,67 @@ func (s *NamespacesSuite) TestNamespacesListAsTable() { }) } -func TestNamespaces(t *testing.T) { - suite.Run(t, new(NamespacesSuite)) -} +func (s *NamespacesSuite) TestProjectsListInOpenShift() { + s.Require().NoError(EnvTestInOpenShift(s.T().Context()), "Expected to configure test for OpenShift") + s.T().Cleanup(func() { + s.Require().NoError(EnvTestInOpenShiftClear(s.T().Context()), "Expected to clear OpenShift test configuration") + }) + s.InitMcpClient() -func TestProjectsListInOpenShift(t *testing.T) { - testCaseWithContext(t, &mcpContext{before: inOpenShift, after: inOpenShiftClear}, func(c *mcpContext) { + s.Run("projects_list returns project list in OpenShift", func() { dynamicClient := dynamic.NewForConfigOrDie(envTestRestConfig) _, _ = dynamicClient.Resource(schema.GroupVersionResource{Group: "project.openshift.io", Version: "v1", Resource: "projects"}). - Create(c.ctx, &unstructured.Unstructured{Object: map[string]interface{}{ + Create(s.T().Context(), &unstructured.Unstructured{Object: map[string]interface{}{ "apiVersion": "project.openshift.io/v1", "kind": "Project", "metadata": map[string]interface{}{ "name": "an-openshift-project", }, }}, metav1.CreateOptions{}) - toolResult, err := c.callTool("projects_list", map[string]interface{}{}) - t.Run("projects_list returns project list", func(t *testing.T) { - if err != nil { - t.Fatalf("call tool failed %v", err) - } - if toolResult.IsError { - t.Fatalf("call tool failed") - } + toolResult, err := s.CallTool("projects_list", map[string]interface{}{}) + s.Run("no error", func() { + s.Nilf(err, "call tool failed %v", err) + s.Falsef(toolResult.IsError, "call tool failed") }) var decoded []unstructured.Unstructured err = yaml.Unmarshal([]byte(toolResult.Content[0].(mcp.TextContent).Text), &decoded) - t.Run("projects_list has yaml content", func(t *testing.T) { - if err != nil { - t.Fatalf("invalid tool result content %v", err) - } + s.Run("has yaml content", func() { + s.Nilf(err, "invalid tool result content %v", err) }) - t.Run("projects_list returns at least 1 items", func(t *testing.T) { - if len(decoded) < 1 { - t.Errorf("invalid project count, expected at least 1, got %v", len(decoded)) - } + s.Run("returns at least 1 item", func() { + s.GreaterOrEqualf(len(decoded), 1, "invalid project count, expected at least 1, got %v", len(decoded)) idx := slices.IndexFunc(decoded, func(ns unstructured.Unstructured) bool { return ns.GetName() == "an-openshift-project" }) - if idx == -1 { - t.Errorf("namespace %s not found in the list", "an-openshift-project") - } + s.NotEqualf(-1, idx, "namespace %s not found in the list", "an-openshift-project") }) }) } -func TestProjectsListInOpenShiftDenied(t *testing.T) { - deniedResourcesServer := test.Must(config.ReadToml([]byte(` +func (s *NamespacesSuite) TestProjectsListInOpenShiftDenied() { + s.Require().NoError(toml.Unmarshal([]byte(` denied_resources = [ { group = "project.openshift.io", version = "v1" } ] - `))) - testCaseWithContext(t, &mcpContext{staticConfig: deniedResourcesServer, before: inOpenShift, after: inOpenShiftClear}, func(c *mcpContext) { - c.withEnvTest() - projectsList, _ := c.callTool("projects_list", map[string]interface{}{}) - t.Run("projects_list has error", func(t *testing.T) { - if !projectsList.IsError { - t.Fatalf("call tool should fail") - } + `), s.Cfg), "Expected to parse denied resources config") + s.Require().NoError(EnvTestInOpenShift(s.T().Context()), "Expected to configure test for OpenShift") + s.T().Cleanup(func() { + s.Require().NoError(EnvTestInOpenShiftClear(s.T().Context()), "Expected to clear OpenShift test configuration") + }) + s.InitMcpClient() + + s.Run("projects_list (denied)", func() { + projectsList, err := s.CallTool("projects_list", map[string]interface{}{}) + s.Run("has error", func() { + s.Truef(projectsList.IsError, "call tool should fail") + s.Nilf(err, "call tool should not return error object") }) - t.Run("projects_list describes denial", func(t *testing.T) { + s.Run("describes denial", func() { expectedMessage := "failed to list projects: resource not allowed: project.openshift.io/v1, Kind=Project" - if projectsList.Content[0].(mcp.TextContent).Text != expectedMessage { - t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, projectsList.Content[0].(mcp.TextContent).Text) - } + s.Equalf(expectedMessage, projectsList.Content[0].(mcp.TextContent).Text, + "expected descriptive error '%s', got %v", expectedMessage, projectsList.Content[0].(mcp.TextContent).Text) }) }) } + +func TestNamespaces(t *testing.T) { + suite.Run(t, new(NamespacesSuite)) +} diff --git a/pkg/mcp/nodes_top_test.go b/pkg/mcp/nodes_top_test.go new file mode 100644 index 00000000..23ae9945 --- /dev/null +++ b/pkg/mcp/nodes_top_test.go @@ -0,0 +1,248 @@ +package mcp + +import ( + "net/http" + "testing" + + "github.com/BurntSushi/toml" + "github.com/containers/kubernetes-mcp-server/internal/test" + "github.com/mark3labs/mcp-go/mcp" + "github.com/stretchr/testify/suite" +) + +type NodesTopSuite struct { + BaseMcpSuite + mockServer *test.MockServer +} + +func (s *NodesTopSuite) SetupTest() { + s.BaseMcpSuite.SetupTest() + s.mockServer = test.NewMockServer() + s.Cfg.KubeConfig = s.mockServer.KubeconfigFile(s.T()) + s.mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + w.Header().Set("Content-Type", "application/json") + // Request Performed by DiscoveryClient to Kube API (Get API Groups legacy -core-) + if req.URL.Path == "/api" { + _, _ = w.Write([]byte(`{"kind":"APIVersions","versions":[],"serverAddressByClientCIDRs":[{"clientCIDR":"0.0.0.0/0"}]}`)) + return + } + })) +} + +func (s *NodesTopSuite) TearDownTest() { + s.BaseMcpSuite.TearDownTest() + if s.mockServer != nil { + s.mockServer.Close() + } +} + +func (s *NodesTopSuite) WithMetricsServer() { + s.mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + // Request Performed by DiscoveryClient to Kube API (Get API Groups) + if req.URL.Path == "/apis" { + _, _ = w.Write([]byte(`{"kind":"APIGroupList","apiVersion":"v1","groups":[{"name":"metrics.k8s.io","versions":[{"groupVersion":"metrics.k8s.io/v1beta1","version":"v1beta1"}],"preferredVersion":{"groupVersion":"metrics.k8s.io/v1beta1","version":"v1beta1"}}]}`)) + return + } + // Request Performed by DiscoveryClient to Kube API (Get API Resources) + if req.URL.Path == "/apis/metrics.k8s.io/v1beta1" { + _, _ = w.Write([]byte(`{"kind":"APIResourceList","apiVersion":"v1","groupVersion":"metrics.k8s.io/v1beta1","resources":[{"name":"nodes","singularName":"","namespaced":false,"kind":"NodeMetrics","verbs":["get","list"]}]}`)) + return + } + })) +} + +func (s *NodesTopSuite) TestNodesTop() { + s.WithMetricsServer() + s.mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + // List Nodes + if req.URL.Path == "/api/v1/nodes" { + _, _ = w.Write([]byte(`{ + "apiVersion": "v1", + "kind": "NodeList", + "items": [ + { + "metadata": { + "name": "node-1", + "labels": { + "node-role.kubernetes.io/worker": "" + } + }, + "status": { + "allocatable": { + "cpu": "4", + "memory": "16Gi" + }, + "nodeInfo": { + "swap": { + "capacity": 0 + } + } + } + }, + { + "metadata": { + "name": "node-2", + "labels": { + "node-role.kubernetes.io/worker": "" + } + }, + "status": { + "allocatable": { + "cpu": "4", + "memory": "16Gi" + }, + "nodeInfo": { + "swap": { + "capacity": 0 + } + } + } + } + ] + }`)) + return + } + // Get NodeMetrics + if req.URL.Path == "/apis/metrics.k8s.io/v1beta1/nodes" { + _, _ = w.Write([]byte(`{ + "apiVersion": "metrics.k8s.io/v1beta1", + "kind": "NodeMetricsList", + "items": [ + { + "metadata": { + "name": "node-1" + }, + "timestamp": "2025-10-29T09:00:00Z", + "window": "30s", + "usage": { + "cpu": "500m", + "memory": "2Gi" + } + }, + { + "metadata": { + "name": "node-2" + }, + "timestamp": "2025-10-29T09:00:00Z", + "window": "30s", + "usage": { + "cpu": "1000m", + "memory": "4Gi" + } + } + ] + }`)) + return + } + // Get specific NodeMetrics + if req.URL.Path == "/apis/metrics.k8s.io/v1beta1/nodes/node-1" { + _, _ = w.Write([]byte(`{ + "apiVersion": "metrics.k8s.io/v1beta1", + "kind": "NodeMetrics", + "metadata": { + "name": "node-1" + }, + "timestamp": "2025-10-29T09:00:00Z", + "window": "30s", + "usage": { + "cpu": "500m", + "memory": "2Gi" + } + }`)) + return + } + w.WriteHeader(http.StatusNotFound) + })) + s.InitMcpClient() + + s.Run("nodes_top() - all nodes", func() { + toolResult, err := s.CallTool("nodes_top", map[string]interface{}{}) + s.Require().NotNil(toolResult, "toolResult should not be nil") + s.Run("no error", func() { + s.Falsef(toolResult.IsError, "call tool should succeed") + s.Nilf(err, "call tool should not return error object") + }) + s.Run("returns metrics for all nodes", func() { + content := toolResult.Content[0].(mcp.TextContent).Text + s.Contains(content, "node-1", "expected metrics to contain node-1") + s.Contains(content, "node-2", "expected metrics to contain node-2") + s.Contains(content, "CPU(cores)", "expected header with CPU column") + s.Contains(content, "MEMORY(bytes)", "expected header with MEMORY column") + }) + }) + + s.Run("nodes_top(name=node-1) - specific node", func() { + toolResult, err := s.CallTool("nodes_top", map[string]interface{}{ + "name": "node-1", + }) + s.Require().NotNil(toolResult, "toolResult should not be nil") + s.Run("no error", func() { + s.Falsef(toolResult.IsError, "call tool should succeed") + s.Nilf(err, "call tool should not return error object") + }) + s.Run("returns metrics for specific node", func() { + content := toolResult.Content[0].(mcp.TextContent).Text + s.Contains(content, "node-1", "expected metrics to contain node-1") + s.Contains(content, "500m", "expected CPU usage of 500m") + s.Contains(content, "2048Mi", "expected memory usage of 2048Mi") + }) + }) + + s.Run("nodes_top(label_selector=node-role.kubernetes.io/worker=)", func() { + toolResult, err := s.CallTool("nodes_top", map[string]interface{}{ + "label_selector": "node-role.kubernetes.io/worker=", + }) + s.Require().NotNil(toolResult, "toolResult should not be nil") + s.Run("no error", func() { + s.Falsef(toolResult.IsError, "call tool should succeed") + s.Nilf(err, "call tool should not return error object") + }) + s.Run("returns metrics for filtered nodes", func() { + content := toolResult.Content[0].(mcp.TextContent).Text + s.Contains(content, "node-1", "expected metrics to contain node-1") + s.Contains(content, "node-2", "expected metrics to contain node-2") + }) + }) +} + +func (s *NodesTopSuite) TestNodesTopMetricsUnavailable() { + s.InitMcpClient() + + s.Run("nodes_top() - metrics unavailable", func() { + toolResult, err := s.CallTool("nodes_top", map[string]interface{}{}) + s.Require().NotNil(toolResult, "toolResult should not be nil") + s.Run("has error", func() { + s.Truef(toolResult.IsError, "call tool should fail when metrics unavailable") + s.Nilf(err, "call tool should not return error object") + }) + s.Run("describes metrics unavailable", func() { + content := toolResult.Content[0].(mcp.TextContent).Text + s.Contains(content, "failed to get nodes top", "expected error message about failing to get nodes top") + }) + }) +} + +func (s *NodesTopSuite) TestNodesTopDenied() { + s.Require().NoError(toml.Unmarshal([]byte(` + denied_resources = [ { group = "metrics.k8s.io", version = "v1beta1" } ] + `), s.Cfg), "Expected to parse denied resources config") + s.WithMetricsServer() + s.InitMcpClient() + s.Run("nodes_top (denied)", func() { + toolResult, err := s.CallTool("nodes_top", map[string]interface{}{}) + s.Require().NotNil(toolResult, "toolResult should not be nil") + s.Run("has error", func() { + s.Truef(toolResult.IsError, "call tool should fail") + s.Nilf(err, "call tool should not return error object") + }) + s.Run("describes denial", func() { + expectedMessage := "failed to get nodes top: resource not allowed: metrics.k8s.io/v1beta1, Kind=NodeMetrics" + s.Equalf(expectedMessage, toolResult.Content[0].(mcp.TextContent).Text, + "expected descriptive error '%s', got %v", expectedMessage, toolResult.Content[0].(mcp.TextContent).Text) + }) + }) +} + +func TestNodesTop(t *testing.T) { + suite.Run(t, new(NodesTopSuite)) +} diff --git a/pkg/mcp/pods_run_test.go b/pkg/mcp/pods_run_test.go new file mode 100644 index 00000000..4c329f3e --- /dev/null +++ b/pkg/mcp/pods_run_test.go @@ -0,0 +1,145 @@ +package mcp + +import ( + "strings" + "testing" + + "github.com/BurntSushi/toml" + "github.com/mark3labs/mcp-go/mcp" + "github.com/stretchr/testify/suite" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "sigs.k8s.io/yaml" +) + +type PodsRunSuite struct { + BaseMcpSuite +} + +func (s *PodsRunSuite) TestPodsRun() { + s.InitMcpClient() + s.Run("pods_run with nil image returns error", func() { + toolResult, _ := s.CallTool("pods_run", map[string]interface{}{}) + s.Truef(toolResult.IsError, "call tool should fail") + s.Equalf("failed to run pod, missing argument image", toolResult.Content[0].(mcp.TextContent).Text, + "invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) + }) + s.Run("pods_run(image=nginx, namespace=nil), uses configured namespace", func() { + podsRunNilNamespace, err := s.CallTool("pods_run", map[string]interface{}{"image": "nginx"}) + s.Run("no error", func() { + s.Nilf(err, "call tool failed %v", err) + s.Falsef(podsRunNilNamespace.IsError, "call tool failed") + }) + var decodedNilNamespace []unstructured.Unstructured + err = yaml.Unmarshal([]byte(podsRunNilNamespace.Content[0].(mcp.TextContent).Text), &decodedNilNamespace) + s.Run("has yaml content", func() { + s.Nilf(err, "invalid tool result content %v", err) + }) + s.Run("returns 1 item (Pod)", func() { + s.Lenf(decodedNilNamespace, 1, "invalid pods count, expected 1, got %v", len(decodedNilNamespace)) + s.Equalf("Pod", decodedNilNamespace[0].GetKind(), "invalid pod kind, expected Pod, got %v", decodedNilNamespace[0].GetKind()) + }) + s.Run("returns pod in default", func() { + s.Equalf("default", decodedNilNamespace[0].GetNamespace(), "invalid pod namespace, expected default, got %v", decodedNilNamespace[0].GetNamespace()) + }) + s.Run("returns pod with random name", func() { + s.Truef(strings.HasPrefix(decodedNilNamespace[0].GetName(), "kubernetes-mcp-server-run-"), + "invalid pod name, expected random, got %v", decodedNilNamespace[0].GetName()) + }) + s.Run("returns pod with labels", func() { + labels := decodedNilNamespace[0].Object["metadata"].(map[string]interface{})["labels"].(map[string]interface{}) + s.NotEqualf("", labels["app.kubernetes.io/name"], "invalid labels, expected app.kubernetes.io/name, got %v", labels) + s.NotEqualf("", labels["app.kubernetes.io/component"], "invalid labels, expected app.kubernetes.io/component, got %v", labels) + s.Equalf("kubernetes-mcp-server", labels["app.kubernetes.io/managed-by"], "invalid labels, expected app.kubernetes.io/managed-by, got %v", labels) + s.Equalf("kubernetes-mcp-server-run-sandbox", labels["app.kubernetes.io/part-of"], "invalid labels, expected app.kubernetes.io/part-of, got %v", labels) + }) + s.Run("returns pod with nginx container", func() { + containers := decodedNilNamespace[0].Object["spec"].(map[string]interface{})["containers"].([]interface{}) + s.Equalf("nginx", containers[0].(map[string]interface{})["image"], "invalid container name, expected nginx, got %v", containers[0].(map[string]interface{})["image"]) + }) + }) + s.Run("pods_run(image=nginx, namespace=nil, port=80)", func() { + podsRunNamespaceAndPort, err := s.CallTool("pods_run", map[string]interface{}{"image": "nginx", "port": 80}) + s.Run("no error", func() { + s.Nilf(err, "call tool failed %v", err) + s.Falsef(podsRunNamespaceAndPort.IsError, "call tool failed") + }) + var decodedNamespaceAndPort []unstructured.Unstructured + err = yaml.Unmarshal([]byte(podsRunNamespaceAndPort.Content[0].(mcp.TextContent).Text), &decodedNamespaceAndPort) + s.Run("has yaml content", func() { + s.Nilf(err, "invalid tool result content %v", err) + }) + s.Run("returns 2 items (Pod + Service)", func() { + s.Lenf(decodedNamespaceAndPort, 2, "invalid pods count, expected 2, got %v", len(decodedNamespaceAndPort)) + s.Equalf("Pod", decodedNamespaceAndPort[0].GetKind(), "invalid pod kind, expected Pod, got %v", decodedNamespaceAndPort[0].GetKind()) + s.Equalf("Service", decodedNamespaceAndPort[1].GetKind(), "invalid service kind, expected Service, got %v", decodedNamespaceAndPort[1].GetKind()) + }) + s.Run("returns pod with port", func() { + containers := decodedNamespaceAndPort[0].Object["spec"].(map[string]interface{})["containers"].([]interface{}) + ports := containers[0].(map[string]interface{})["ports"].([]interface{}) + s.Equalf(int64(80), ports[0].(map[string]interface{})["containerPort"], "invalid container port, expected 80, got %v", ports[0].(map[string]interface{})["containerPort"]) + }) + s.Run("returns service with port and selector", func() { + ports := decodedNamespaceAndPort[1].Object["spec"].(map[string]interface{})["ports"].([]interface{}) + s.Equalf(int64(80), ports[0].(map[string]interface{})["port"], "invalid service port, expected 80, got %v", ports[0].(map[string]interface{})["port"]) + s.Equalf(int64(80), ports[0].(map[string]interface{})["targetPort"], "invalid service target port, expected 80, got %v", ports[0].(map[string]interface{})["targetPort"]) + selector := decodedNamespaceAndPort[1].Object["spec"].(map[string]interface{})["selector"].(map[string]interface{}) + s.NotEqualf("", selector["app.kubernetes.io/name"], "invalid service selector, expected app.kubernetes.io/name, got %v", selector) + s.Equalf("kubernetes-mcp-server", selector["app.kubernetes.io/managed-by"], "invalid service selector, expected app.kubernetes.io/managed-by, got %v", selector) + s.Equalf("kubernetes-mcp-server-run-sandbox", selector["app.kubernetes.io/part-of"], "invalid service selector, expected app.kubernetes.io/part-of, got %v", selector) + }) + }) +} + +func (s *PodsRunSuite) TestPodsRunDenied() { + s.Require().NoError(toml.Unmarshal([]byte(` + denied_resources = [ { version = "v1", kind = "Pod" } ] + `), s.Cfg), "Expected to parse denied resources config") + s.InitMcpClient() + s.Run("pods_run (denied)", func() { + podsRun, err := s.CallTool("pods_run", map[string]interface{}{"image": "nginx"}) + s.Run("has error", func() { + s.Truef(podsRun.IsError, "call tool should fail") + s.Nilf(err, "call tool should not return error object") + }) + s.Run("describes denial", func() { + expectedMessage := "failed to run pod in namespace : resource not allowed: /v1, Kind=Pod" + s.Equalf(expectedMessage, podsRun.Content[0].(mcp.TextContent).Text, + "expected descriptive error '%s', got %v", expectedMessage, podsRun.Content[0].(mcp.TextContent).Text) + }) + }) +} + +func (s *PodsRunSuite) TestPodsRunInOpenShift() { + s.Require().NoError(EnvTestInOpenShift(s.T().Context()), "Expected to configure test for OpenShift") + s.T().Cleanup(func() { + s.Require().NoError(EnvTestInOpenShiftClear(s.T().Context()), "Expected to clear OpenShift test configuration") + }) + s.InitMcpClient() + + s.Run("pods_run(image=nginx, namespace=nil, port=80) returns route with port", func() { + podsRunInOpenShift, err := s.CallTool("pods_run", map[string]interface{}{"image": "nginx", "port": 80}) + s.Run("no error", func() { + s.Nilf(err, "call tool failed %v", err) + s.Falsef(podsRunInOpenShift.IsError, "call tool failed") + }) + var decodedPodServiceRoute []unstructured.Unstructured + err = yaml.Unmarshal([]byte(podsRunInOpenShift.Content[0].(mcp.TextContent).Text), &decodedPodServiceRoute) + s.Run("has yaml content", func() { + s.Nilf(err, "invalid tool result content %v", err) + }) + s.Run("returns 3 items (Pod + Service + Route)", func() { + s.Lenf(decodedPodServiceRoute, 3, "invalid pods count, expected 3, got %v", len(decodedPodServiceRoute)) + s.Equalf("Pod", decodedPodServiceRoute[0].GetKind(), "invalid pod kind, expected Pod, got %v", decodedPodServiceRoute[0].GetKind()) + s.Equalf("Service", decodedPodServiceRoute[1].GetKind(), "invalid service kind, expected Service, got %v", decodedPodServiceRoute[1].GetKind()) + s.Equalf("Route", decodedPodServiceRoute[2].GetKind(), "invalid route kind, expected Route, got %v", decodedPodServiceRoute[2].GetKind()) + }) + s.Run("returns route with port", func() { + targetPort := decodedPodServiceRoute[2].Object["spec"].(map[string]interface{})["port"].(map[string]interface{})["targetPort"].(int64) + s.Equalf(int64(80), targetPort, "invalid route target port, expected 80, got %v", targetPort) + }) + }) +} + +func TestPodsRun(t *testing.T) { + suite.Run(t, new(PodsRunSuite)) +} diff --git a/pkg/mcp/pods_test.go b/pkg/mcp/pods_test.go index cfa20dcb..ddeec3ea 100644 --- a/pkg/mcp/pods_test.go +++ b/pkg/mcp/pods_test.go @@ -5,9 +5,8 @@ import ( "strings" "testing" - "github.com/containers/kubernetes-mcp-server/internal/test" - "github.com/containers/kubernetes-mcp-server/pkg/config" - "github.com/containers/kubernetes-mcp-server/pkg/output" + "github.com/BurntSushi/toml" + "github.com/stretchr/testify/suite" "github.com/mark3labs/mcp-go/mcp" corev1 "k8s.io/api/core/v1" @@ -16,228 +15,194 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" "sigs.k8s.io/yaml" ) -func TestPodsListInAllNamespaces(t *testing.T) { - testCase(t, func(c *mcpContext) { - c.withEnvTest() - toolResult, err := c.callTool("pods_list", map[string]interface{}{}) - t.Run("pods_list returns pods list", func(t *testing.T) { - if err != nil { - t.Fatalf("call tool failed %v", err) - } - if toolResult.IsError { - t.Fatalf("call tool failed") - } +type PodsSuite struct { + BaseMcpSuite +} + +func (s *PodsSuite) TestPodsListInAllNamespaces() { + s.InitMcpClient() + s.Run("pods_list returns pods list in all namespaces", func() { + toolResult, err := s.CallTool("pods_list", map[string]interface{}{}) + s.Run("no error", func() { + s.Nilf(err, "call tool failed %v", err) + s.Falsef(toolResult.IsError, "call tool failed") }) var decoded []unstructured.Unstructured err = yaml.Unmarshal([]byte(toolResult.Content[0].(mcp.TextContent).Text), &decoded) - t.Run("pods_list has yaml content", func(t *testing.T) { - if err != nil { - t.Fatalf("invalid tool result content %v", err) - } + s.Run("has yaml content", func() { + s.Nilf(err, "invalid tool result content %v", err) }) - t.Run("pods_list returns 3 items", func(t *testing.T) { - if len(decoded) != 3 { - t.Fatalf("invalid pods count, expected 3, got %v", len(decoded)) - } + s.Run("returns at least 3 items", func() { + s.GreaterOrEqualf(len(decoded), 3, "invalid pods count, expected at least 3, got %v", len(decoded)) }) - t.Run("pods_list returns pod in ns-1", func(t *testing.T) { - if decoded[1].GetName() != "a-pod-in-ns-1" { - t.Fatalf("invalid pod name, expected a-pod-in-ns-1, got %v", decoded[1].GetName()) - } - if decoded[1].GetNamespace() != "ns-1" { - t.Fatalf("invalid pod namespace, expected ns-1, got %v", decoded[1].GetNamespace()) + var aPodInNs1, aPodInNs2 *unstructured.Unstructured + for _, pod := range decoded { + switch pod.GetName() { + case "a-pod-in-ns-1": + aPodInNs1 = &pod + case "a-pod-in-ns-2": + aPodInNs2 = &pod } + } + s.Run("returns pod in ns-1", func() { + s.Require().NotNil(aPodInNs1, "aPodInNs1 is nil") + s.Equalf("a-pod-in-ns-1", aPodInNs1.GetName(), "invalid pod name, expected a-pod-in-ns-1, got %v", aPodInNs1.GetName()) + s.Equalf("ns-1", aPodInNs1.GetNamespace(), "invalid pod namespace, expected ns-1, got %v", aPodInNs1.GetNamespace()) }) - t.Run("pods_list returns pod in ns-2", func(t *testing.T) { - if decoded[2].GetName() != "a-pod-in-ns-2" { - t.Fatalf("invalid pod name, expected a-pod-in-ns-2, got %v", decoded[2].GetName()) - } - if decoded[2].GetNamespace() != "ns-2" { - t.Fatalf("invalid pod namespace, expected ns-2, got %v", decoded[2].GetNamespace()) - } + s.Run("returns pod in ns-2", func() { + s.Require().NotNil(aPodInNs2, "aPodInNs2 is nil") + s.Equalf("a-pod-in-ns-2", aPodInNs2.GetName(), "invalid pod name, expected a-pod-in-ns-2, got %v", aPodInNs2.GetName()) + s.Equalf("ns-2", aPodInNs2.GetNamespace(), "invalid pod namespace, expected ns-2, got %v", aPodInNs2.GetNamespace()) }) - t.Run("pods_list omits managed fields", func(t *testing.T) { - if decoded[1].GetManagedFields() != nil { - t.Fatalf("managed fields should be omitted, got %v", decoded[0].GetManagedFields()) - } + s.Run("omits managed fields", func() { + s.Nilf(decoded[1].GetManagedFields(), "managed fields should be omitted, got %v", decoded[1].GetManagedFields()) }) }) } -func TestPodsListInAllNamespacesUnauthorized(t *testing.T) { - testCase(t, func(c *mcpContext) { - c.withEnvTest() - defer restoreAuth(c.ctx) - client := c.newKubernetesClient() - // Authorize user only for default/configured namespace - r, _ := client.RbacV1().Roles("default").Create(c.ctx, &rbacv1.Role{ - ObjectMeta: metav1.ObjectMeta{Name: "allow-pods-list"}, - Rules: []rbacv1.PolicyRule{{ - Verbs: []string{"get", "list"}, - APIGroups: []string{""}, - Resources: []string{"pods"}, - }}, - }, metav1.CreateOptions{}) - _, _ = client.RbacV1().RoleBindings("default").Create(c.ctx, &rbacv1.RoleBinding{ - ObjectMeta: metav1.ObjectMeta{Name: "allow-pods-list"}, - Subjects: []rbacv1.Subject{{Kind: "User", Name: envTestUser.Name}}, - RoleRef: rbacv1.RoleRef{Kind: "Role", Name: r.Name}, - }, metav1.CreateOptions{}) - // Deny cluster by removing cluster rule - _ = client.RbacV1().ClusterRoles().Delete(c.ctx, "allow-all", metav1.DeleteOptions{}) - toolResult, err := c.callTool("pods_list", map[string]interface{}{}) - t.Run("pods_list returns pods list for default namespace only", func(t *testing.T) { - if err != nil { - t.Fatalf("call tool failed %v", err) - return - } - if toolResult.IsError { - t.Fatalf("call tool failed %v", toolResult.Content) - return - } +func (s *PodsSuite) TestPodsListInAllNamespacesUnauthorized() { + s.InitMcpClient() + defer restoreAuth(s.T().Context()) + client := kubernetes.NewForConfigOrDie(envTestRestConfig) + // Authorize user only for default/configured namespace + r, _ := client.RbacV1().Roles("default").Create(s.T().Context(), &rbacv1.Role{ + ObjectMeta: metav1.ObjectMeta{Name: "allow-pods-list"}, + Rules: []rbacv1.PolicyRule{{ + Verbs: []string{"get", "list"}, + APIGroups: []string{""}, + Resources: []string{"pods"}, + }}, + }, metav1.CreateOptions{}) + _, _ = client.RbacV1().RoleBindings("default").Create(s.T().Context(), &rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{Name: "allow-pods-list"}, + Subjects: []rbacv1.Subject{{Kind: "User", Name: envTestUser.Name}}, + RoleRef: rbacv1.RoleRef{Kind: "Role", Name: r.Name}, + }, metav1.CreateOptions{}) + // Deny cluster by removing cluster rule + _ = client.RbacV1().ClusterRoles().Delete(s.T().Context(), "allow-all", metav1.DeleteOptions{}) + s.Run("pods_list returns pods list for default namespace only", func() { + toolResult, err := s.CallTool("pods_list", map[string]interface{}{}) + s.Run("no error", func() { + s.Nilf(err, "call tool failed %v", err) + s.Falsef(toolResult.IsError, "call tool failed %v", toolResult.Content) }) var decoded []unstructured.Unstructured err = yaml.Unmarshal([]byte(toolResult.Content[0].(mcp.TextContent).Text), &decoded) - t.Run("pods_list has yaml content", func(t *testing.T) { - if err != nil { - t.Fatalf("invalid tool result content %v", err) - return - } + s.Run("has yaml content", func() { + s.Nilf(err, "invalid tool result content %v", err) }) - t.Run("pods_list returns 1 items", func(t *testing.T) { - if len(decoded) != 1 { - t.Fatalf("invalid pods count, expected 1, got %v", len(decoded)) - return - } + s.Run("returns at least 1 item", func() { + s.GreaterOrEqualf(len(decoded), 1, "invalid pods count, expected at least 1, got %v", len(decoded)) }) - t.Run("pods_list returns pod in default", func(t *testing.T) { - if decoded[0].GetName() != "a-pod-in-default" { - t.Fatalf("invalid pod name, expected a-pod-in-default, got %v", decoded[0].GetName()) - return + s.Run("all pods are in default namespace", func() { + for _, pod := range decoded { + s.Equalf("default", pod.GetNamespace(), "all pods should be in default namespace, got pod %s in namespace %s", pod.GetName(), pod.GetNamespace()) } - if decoded[0].GetNamespace() != "default" { - t.Fatalf("invalid pod namespace, expected default, got %v", decoded[0].GetNamespace()) - return + }) + s.Run("includes a-pod-in-default", func() { + found := false + for _, pod := range decoded { + if pod.GetName() == "a-pod-in-default" { + found = true + break + } } + s.Truef(found, "expected to find pod a-pod-in-default") }) }) } -func TestPodsListInNamespace(t *testing.T) { - testCase(t, func(c *mcpContext) { - c.withEnvTest() - t.Run("pods_list_in_namespace with nil namespace returns error", func(t *testing.T) { - toolResult, _ := c.callTool("pods_list_in_namespace", map[string]interface{}{}) - if toolResult.IsError != true { - t.Fatalf("call tool should fail") - return - } - if toolResult.Content[0].(mcp.TextContent).Text != "failed to list pods in namespace, missing argument namespace" { - t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) - return - } - }) - toolResult, err := c.callTool("pods_list_in_namespace", map[string]interface{}{ +func (s *PodsSuite) TestPodsListInNamespace() { + s.InitMcpClient() + s.Run("pods_list_in_namespace with nil namespace returns error", func() { + toolResult, _ := s.CallTool("pods_list_in_namespace", map[string]interface{}{}) + s.Truef(toolResult.IsError, "call tool should fail") + s.Equalf("failed to list pods in namespace, missing argument namespace", toolResult.Content[0].(mcp.TextContent).Text, + "invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) + }) + s.Run("pods_list_in_namespace(namespace=ns-1) returns pods list", func() { + toolResult, err := s.CallTool("pods_list_in_namespace", map[string]interface{}{ "namespace": "ns-1", }) - t.Run("pods_list_in_namespace returns pods list", func(t *testing.T) { - if err != nil { - t.Fatalf("call tool failed %v", err) - } - if toolResult.IsError { - t.Fatalf("call tool failed") - } + s.Run("no error", func() { + s.Nilf(err, "call tool failed %v", err) + s.Falsef(toolResult.IsError, "call tool failed") }) var decoded []unstructured.Unstructured err = yaml.Unmarshal([]byte(toolResult.Content[0].(mcp.TextContent).Text), &decoded) - t.Run("pods_list_in_namespace has yaml content", func(t *testing.T) { - if err != nil { - t.Fatalf("invalid tool result content %v", err) - } + s.Run("has yaml content", func() { + s.Nilf(err, "invalid tool result content %v", err) }) - t.Run("pods_list_in_namespace returns 1 items", func(t *testing.T) { - if len(decoded) != 1 { - t.Fatalf("invalid pods count, expected 1, got %v", len(decoded)) - } + s.Run("returns 1 item", func() { + s.Lenf(decoded, 1, "invalid pods count, expected 1, got %v", len(decoded)) }) - t.Run("pods_list_in_namespace returns pod in ns-1", func(t *testing.T) { - if decoded[0].GetName() != "a-pod-in-ns-1" { - t.Errorf("invalid pod name, expected a-pod-in-ns-1, got %v", decoded[0].GetName()) - } - if decoded[0].GetNamespace() != "ns-1" { - t.Errorf("invalid pod namespace, expected ns-1, got %v", decoded[0].GetNamespace()) - } + s.Run("returns pod in ns-1", func() { + s.Equalf("a-pod-in-ns-1", decoded[0].GetName(), "invalid pod name, expected a-pod-in-ns-1, got %v", decoded[0].GetName()) + s.Equalf("ns-1", decoded[0].GetNamespace(), "invalid pod namespace, expected ns-1, got %v", decoded[0].GetNamespace()) }) - t.Run("pods_list_in_namespace omits managed fields", func(t *testing.T) { - if decoded[0].GetManagedFields() != nil { - t.Fatalf("managed fields should be omitted, got %v", decoded[0].GetManagedFields()) - } + s.Run("omits managed fields", func() { + s.Nilf(decoded[0].GetManagedFields(), "managed fields should be omitted, got %v", decoded[0].GetManagedFields()) }) }) } -func TestPodsListDenied(t *testing.T) { - deniedResourcesServer := test.Must(config.ReadToml([]byte(` +func (s *PodsSuite) TestPodsListDenied() { + s.Require().NoError(toml.Unmarshal([]byte(` denied_resources = [ { version = "v1", kind = "Pod" } ] - `))) - testCaseWithContext(t, &mcpContext{staticConfig: deniedResourcesServer}, func(c *mcpContext) { - c.withEnvTest() - podsList, _ := c.callTool("pods_list", map[string]interface{}{}) - t.Run("pods_list has error", func(t *testing.T) { - if !podsList.IsError { - t.Fatalf("call tool should fail") - } - }) - t.Run("pods_list describes denial", func(t *testing.T) { + `), s.Cfg), "Expected to parse denied resources config") + s.InitMcpClient() + s.Run("pods_list (denied)", func() { + podsList, err := s.CallTool("pods_list", map[string]interface{}{}) + s.Run("has error", func() { + s.Truef(podsList.IsError, "call tool should fail") + s.Nilf(err, "call tool should not return error object") + }) + s.Run("describes denial", func() { expectedMessage := "failed to list pods in all namespaces: resource not allowed: /v1, Kind=Pod" - if podsList.Content[0].(mcp.TextContent).Text != expectedMessage { - t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, podsList.Content[0].(mcp.TextContent).Text) - } + s.Equalf(expectedMessage, podsList.Content[0].(mcp.TextContent).Text, + "expected descriptive error '%s', got %v", expectedMessage, podsList.Content[0].(mcp.TextContent).Text) }) - podsListInNamespace, _ := c.callTool("pods_list_in_namespace", map[string]interface{}{"namespace": "ns-1"}) - t.Run("pods_list_in_namespace has error", func(t *testing.T) { - if !podsListInNamespace.IsError { - t.Fatalf("call tool should fail") - } + }) + s.Run("pods_list_in_namespace (denied)", func() { + podsListInNamespace, err := s.CallTool("pods_list_in_namespace", map[string]interface{}{"namespace": "ns-1"}) + s.Run("has error", func() { + s.Truef(podsListInNamespace.IsError, "call tool should fail") + s.Nilf(err, "call tool should not return error object") }) - t.Run("pods_list_in_namespace describes denial", func(t *testing.T) { + s.Run("describes denial", func() { expectedMessage := "failed to list pods in namespace ns-1: resource not allowed: /v1, Kind=Pod" - if podsListInNamespace.Content[0].(mcp.TextContent).Text != expectedMessage { - t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, podsListInNamespace.Content[0].(mcp.TextContent).Text) - } + s.Equalf(expectedMessage, podsListInNamespace.Content[0].(mcp.TextContent).Text, + "expected descriptive error '%s', got %v", expectedMessage, podsListInNamespace.Content[0].(mcp.TextContent).Text) }) }) } -func TestPodsListAsTable(t *testing.T) { - testCaseWithContext(t, &mcpContext{listOutput: output.Table}, func(c *mcpContext) { - c.withEnvTest() - podsList, err := c.callTool("pods_list", map[string]interface{}{}) - t.Run("pods_list returns pods list", func(t *testing.T) { - if err != nil { - t.Fatalf("call tool failed %v", err) - } - if podsList.IsError { - t.Fatalf("call tool failed") - } - }) +func (s *PodsSuite) TestPodsListAsTable() { + s.Cfg.ListOutput = "table" + s.InitMcpClient() + s.Run("pods_list (list_output=table)", func() { + podsList, err := s.CallTool("pods_list", map[string]interface{}{}) + s.Run("no error", func() { + s.Nilf(err, "call tool failed %v", err) + s.Falsef(podsList.IsError, "call tool failed") + }) + s.Require().NotNil(podsList, "Expected tool result from call") outPodsList := podsList.Content[0].(mcp.TextContent).Text - t.Run("pods_list returns table with 1 header and 3 rows", func(t *testing.T) { + s.Run("returns table with header and rows", func() { lines := strings.Count(outPodsList, "\n") - if lines != 4 { - t.Fatalf("invalid line count, expected 4 (1 header, 3 row), got %v", lines) - } + s.GreaterOrEqualf(lines, 3, "invalid line count, expected at least 3 (1 header, 2+ rows), got %v", lines) }) - t.Run("pods_list_in_namespace returns column headers", func(t *testing.T) { + s.Run("returns column headers", func() { expectedHeaders := "NAMESPACE\\s+APIVERSION\\s+KIND\\s+NAME\\s+READY\\s+STATUS\\s+RESTARTS\\s+AGE\\s+IP\\s+NODE\\s+NOMINATED NODE\\s+READINESS GATES\\s+LABELS" - if m, e := regexp.MatchString(expectedHeaders, outPodsList); !m || e != nil { - t.Fatalf("Expected headers '%s' not found in output:\n%s", expectedHeaders, outPodsList) - } + m, e := regexp.MatchString(expectedHeaders, outPodsList) + s.Truef(m, "Expected headers '%s' not found in output:\n%s", expectedHeaders, outPodsList) + s.NoErrorf(e, "Error matching headers regex: %v", e) }) - t.Run("pods_list_in_namespace returns formatted row for a-pod-in-ns-1", func(t *testing.T) { + s.Run("returns formatted row for a-pod-in-ns-1", func() { expectedRow := "(?ns-1)\\s+" + "(?v1)\\s+" + "(?Pod)\\s+" + @@ -251,11 +216,11 @@ func TestPodsListAsTable(t *testing.T) { "(?)\\s+" + "(?)\\s+" + "(?)" - if m, e := regexp.MatchString(expectedRow, outPodsList); !m || e != nil { - t.Fatalf("Expected row '%s' not found in output:\n%s", expectedRow, outPodsList) - } + m, e := regexp.MatchString(expectedRow, outPodsList) + s.Truef(m, "Expected row '%s' not found in output:\n%s", expectedRow, outPodsList) + s.NoErrorf(e, "Error matching a-pod-in-ns-1 regex: %v", e) }) - t.Run("pods_list_in_namespace returns formatted row for a-pod-in-default", func(t *testing.T) { + s.Run("returns formatted row for a-pod-in-default", func() { expectedRow := "(?default)\\s+" + "(?v1)\\s+" + "(?Pod)\\s+" + @@ -269,36 +234,32 @@ func TestPodsListAsTable(t *testing.T) { "(?)\\s+" + "(?)\\s+" + "(?app=nginx)" - if m, e := regexp.MatchString(expectedRow, outPodsList); !m || e != nil { - t.Fatalf("Expected row '%s' not found in output:\n%s", expectedRow, outPodsList) - } + m, e := regexp.MatchString(expectedRow, outPodsList) + s.Truef(m, "Expected row '%s' not found in output:\n%s", expectedRow, outPodsList) + s.NoErrorf(e, "Error matching a-pod-in-default regex: %v", e) }) - podsListInNamespace, err := c.callTool("pods_list_in_namespace", map[string]interface{}{ + }) + s.Run("pods_list_in_namespace (list_output=table)", func() { + podsListInNamespace, err := s.CallTool("pods_list_in_namespace", map[string]interface{}{ "namespace": "ns-1", }) - t.Run("pods_list_in_namespace returns pods list", func(t *testing.T) { - if err != nil { - t.Fatalf("call tool failed %v", err) - return - } - if podsListInNamespace.IsError { - t.Fatalf("call tool failed") - } + s.Run("no error", func() { + s.Nilf(err, "call tool failed %v", err) + s.Falsef(podsListInNamespace.IsError, "call tool failed") }) + s.Require().NotNil(podsListInNamespace, "Expected tool result from call") outPodsListInNamespace := podsListInNamespace.Content[0].(mcp.TextContent).Text - t.Run("pods_list_in_namespace returns table with 1 header and 1 row", func(t *testing.T) { + s.Run("returns table with header and row", func() { lines := strings.Count(outPodsListInNamespace, "\n") - if lines != 2 { - t.Fatalf("invalid line count, expected 2 (1 header, 1 row), got %v", lines) - } + s.GreaterOrEqualf(lines, 1, "invalid line count, expected at least 1 (1 header, 1+ rows), got %v", lines) }) - t.Run("pods_list_in_namespace returns column headers", func(t *testing.T) { + s.Run("returns column headers", func() { expectedHeaders := "NAMESPACE\\s+APIVERSION\\s+KIND\\s+NAME\\s+READY\\s+STATUS\\s+RESTARTS\\s+AGE\\s+IP\\s+NODE\\s+NOMINATED NODE\\s+READINESS GATES\\s+LABELS" - if m, e := regexp.MatchString(expectedHeaders, outPodsListInNamespace); !m || e != nil { - t.Fatalf("Expected headers '%s' not found in output:\n%s", expectedHeaders, outPodsListInNamespace) - } + m, e := regexp.MatchString(expectedHeaders, outPodsListInNamespace) + s.Truef(m, "Expected headers '%s' not found in output:\n%s", expectedHeaders, outPodsListInNamespace) + s.NoErrorf(e, "Error matching headers regex: %v", e) }) - t.Run("pods_list_in_namespace returns formatted row", func(t *testing.T) { + s.Run("returns formatted row", func() { expectedRow := "(?ns-1)\\s+" + "(?v1)\\s+" + "(?Pod)\\s+" + @@ -312,297 +273,207 @@ func TestPodsListAsTable(t *testing.T) { "(?)\\s+" + "(?)\\s+" + "(?)" - if m, e := regexp.MatchString(expectedRow, outPodsListInNamespace); !m || e != nil { - t.Fatalf("Expected row '%s' not found in output:\n%s", expectedRow, outPodsListInNamespace) - } + m, e := regexp.MatchString(expectedRow, outPodsListInNamespace) + s.Truef(m, "Expected row '%s' not found in output:\n%s", expectedRow, outPodsListInNamespace) + s.NoErrorf(e, "Error matching formatted row regex: %v", e) }) }) } -func TestPodsGet(t *testing.T) { - testCase(t, func(c *mcpContext) { - c.withEnvTest() - t.Run("pods_get with nil name returns error", func(t *testing.T) { - toolResult, _ := c.callTool("pods_get", map[string]interface{}{}) - if toolResult.IsError != true { - t.Fatalf("call tool should fail") - return - } - if toolResult.Content[0].(mcp.TextContent).Text != "failed to get pod, missing argument name" { - t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) - return - } - }) - t.Run("pods_get with not found name returns error", func(t *testing.T) { - toolResult, _ := c.callTool("pods_get", map[string]interface{}{"name": "not-found"}) - if toolResult.IsError != true { - t.Fatalf("call tool should fail") - return - } - if toolResult.Content[0].(mcp.TextContent).Text != "failed to get pod not-found in namespace : pods \"not-found\" not found" { - t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) - return - } - }) - podsGetNilNamespace, err := c.callTool("pods_get", map[string]interface{}{ +func (s *PodsSuite) TestPodsGet() { + s.InitMcpClient() + s.Run("pods_get with nil name returns error", func() { + toolResult, _ := s.CallTool("pods_get", map[string]interface{}{}) + s.Truef(toolResult.IsError, "call tool should fail") + s.Equalf("failed to get pod, missing argument name", toolResult.Content[0].(mcp.TextContent).Text, "invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) + }) + s.Run("pods_get(name=not-found) with not found name returns error", func() { + toolResult, _ := s.CallTool("pods_get", map[string]interface{}{"name": "not-found"}) + s.Truef(toolResult.IsError, "call tool should fail") + s.Equalf("failed to get pod not-found in namespace : pods \"not-found\" not found", toolResult.Content[0].(mcp.TextContent).Text, "invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) + }) + s.Run("pods_get(name=a-pod-in-default, namespace=nil), uses configured namespace", func() { + podsGetNilNamespace, err := s.CallTool("pods_get", map[string]interface{}{ "name": "a-pod-in-default", }) - t.Run("pods_get with name and nil namespace returns pod", func(t *testing.T) { - if err != nil { - t.Fatalf("call tool failed %v", err) - return - } - if podsGetNilNamespace.IsError { - t.Fatalf("call tool failed") - return - } + s.Run("returns pod", func() { + s.Nilf(err, "call tool failed %v", err) + s.Falsef(podsGetNilNamespace.IsError, "call tool failed") }) var decodedNilNamespace unstructured.Unstructured err = yaml.Unmarshal([]byte(podsGetNilNamespace.Content[0].(mcp.TextContent).Text), &decodedNilNamespace) - t.Run("pods_get with name and nil namespace has yaml content", func(t *testing.T) { - if err != nil { - t.Fatalf("invalid tool result content %v", err) - return - } + s.Run("has yaml content", func() { + s.Nilf(err, "invalid tool result content %v", err) }) - t.Run("pods_get with name and nil namespace returns pod in default", func(t *testing.T) { - if decodedNilNamespace.GetName() != "a-pod-in-default" { - t.Fatalf("invalid pod name, expected a-pod-in-default, got %v", decodedNilNamespace.GetName()) - return - } - if decodedNilNamespace.GetNamespace() != "default" { - t.Fatalf("invalid pod namespace, expected default, got %v", decodedNilNamespace.GetNamespace()) - return - } + s.Run("returns pod in default", func() { + s.Equalf("a-pod-in-default", decodedNilNamespace.GetName(), "invalid pod name, expected a-pod-in-default, got %v", decodedNilNamespace.GetName()) + s.Equalf("default", decodedNilNamespace.GetNamespace(), "invalid pod namespace, expected default, got %v", decodedNilNamespace.GetNamespace()) }) - t.Run("pods_get with name and nil namespace omits managed fields", func(t *testing.T) { - if decodedNilNamespace.GetManagedFields() != nil { - t.Fatalf("managed fields should be omitted, got %v", decodedNilNamespace.GetManagedFields()) - return - } + s.Run("omits managed fields", func() { + s.Nilf(decodedNilNamespace.GetManagedFields(), "managed fields should be omitted, got %v", decodedNilNamespace.GetManagedFields()) }) - podsGetInNamespace, err := c.callTool("pods_get", map[string]interface{}{ + }) + s.Run("pods_get(name=a-pod-in-default, namespace=ns-1)", func() { + podsGetInNamespace, err := s.CallTool("pods_get", map[string]interface{}{ "namespace": "ns-1", "name": "a-pod-in-ns-1", }) - t.Run("pods_get with name and namespace returns pod", func(t *testing.T) { - if err != nil { - t.Fatalf("call tool failed %v", err) - return - } - if podsGetInNamespace.IsError { - t.Fatalf("call tool failed") - return - } + s.Run("returns pod", func() { + s.Nilf(err, "call tool failed %v", err) + s.Falsef(podsGetInNamespace.IsError, "call tool failed") }) var decodedInNamespace unstructured.Unstructured err = yaml.Unmarshal([]byte(podsGetInNamespace.Content[0].(mcp.TextContent).Text), &decodedInNamespace) - t.Run("pods_get with name and namespace has yaml content", func(t *testing.T) { - if err != nil { - t.Fatalf("invalid tool result content %v", err) - return - } + s.Run("has yaml content", func() { + s.Nilf(err, "invalid tool result content %v", err) }) - t.Run("pods_get with name and namespace returns pod in ns-1", func(t *testing.T) { - if decodedInNamespace.GetName() != "a-pod-in-ns-1" { - t.Fatalf("invalid pod name, expected a-pod-in-ns-1, got %v", decodedInNamespace.GetName()) - return - } - if decodedInNamespace.GetNamespace() != "ns-1" { - t.Fatalf("invalid pod namespace, ns-1 ns-1, got %v", decodedInNamespace.GetNamespace()) - return - } + s.Run("returns pod in ns-1", func() { + s.Equalf("a-pod-in-ns-1", decodedInNamespace.GetName(), "invalid pod name, expected a-pod-in-ns-1, got %v", decodedInNamespace.GetName()) + s.Equalf("ns-1", decodedInNamespace.GetNamespace(), "invalid pod namespace, expected ns-1, got %v", decodedInNamespace.GetNamespace()) }) }) } -func TestPodsGetDenied(t *testing.T) { - deniedResourcesServer := test.Must(config.ReadToml([]byte(` +func (s *PodsSuite) TestPodsGetDenied() { + s.Require().NoError(toml.Unmarshal([]byte(` denied_resources = [ { version = "v1", kind = "Pod" } ] - `))) - testCaseWithContext(t, &mcpContext{staticConfig: deniedResourcesServer}, func(c *mcpContext) { - c.withEnvTest() - podsGet, _ := c.callTool("pods_get", map[string]interface{}{"name": "a-pod-in-default"}) - t.Run("pods_get has error", func(t *testing.T) { - if !podsGet.IsError { - t.Fatalf("call tool should fail") - } - }) - t.Run("pods_get describes denial", func(t *testing.T) { + `), s.Cfg), "Expected to parse denied resources config") + s.InitMcpClient() + s.Run("pods_get (denied)", func() { + podsGet, err := s.CallTool("pods_get", map[string]interface{}{"name": "a-pod-in-default"}) + s.Run("has error", func() { + s.Truef(podsGet.IsError, "call tool should fail") + s.Nilf(err, "call tool should not return error object") + }) + s.Run("describes denial", func() { expectedMessage := "failed to get pod a-pod-in-default in namespace : resource not allowed: /v1, Kind=Pod" - if podsGet.Content[0].(mcp.TextContent).Text != expectedMessage { - t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, podsGet.Content[0].(mcp.TextContent).Text) - } + s.Equalf(expectedMessage, podsGet.Content[0].(mcp.TextContent).Text, + "expected descriptive error '%s', got %v", expectedMessage, podsGet.Content[0].(mcp.TextContent).Text) }) }) } -func TestPodsDelete(t *testing.T) { - testCase(t, func(c *mcpContext) { - c.withEnvTest() - // Errors - t.Run("pods_delete with nil name returns error", func(t *testing.T) { - toolResult, _ := c.callTool("pods_delete", map[string]interface{}{}) - if toolResult.IsError != true { - t.Errorf("call tool should fail") - return - } - if toolResult.Content[0].(mcp.TextContent).Text != "failed to delete pod, missing argument name" { - t.Errorf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) - return - } - }) - t.Run("pods_delete with not found name returns error", func(t *testing.T) { - toolResult, _ := c.callTool("pods_delete", map[string]interface{}{"name": "not-found"}) - if toolResult.IsError != true { - t.Errorf("call tool should fail") - return - } - if toolResult.Content[0].(mcp.TextContent).Text != "failed to delete pod not-found in namespace : pods \"not-found\" not found" { - t.Errorf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) - return - } - }) - // Default/nil Namespace - kc := c.newKubernetesClient() - _, _ = kc.CoreV1().Pods("default").Create(c.ctx, &corev1.Pod{ +func (s *PodsSuite) TestPodsDelete() { + s.InitMcpClient() + s.Run("pods_delete with nil name returns error", func() { + toolResult, _ := s.CallTool("pods_delete", map[string]interface{}{}) + s.Truef(toolResult.IsError, "call tool should fail") + s.Equalf("failed to delete pod, missing argument name", toolResult.Content[0].(mcp.TextContent).Text, "invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) + }) + s.Run("pods_delete(name=not-found) with not found name returns error", func() { + toolResult, _ := s.CallTool("pods_delete", map[string]interface{}{"name": "not-found"}) + s.Truef(toolResult.IsError, "call tool should fail") + s.Equalf("failed to delete pod not-found in namespace : pods \"not-found\" not found", toolResult.Content[0].(mcp.TextContent).Text, "invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) + }) + s.Run("pods_delete(name=a-pod-to-delete, namespace=nil), uses configured namespace", func() { + kc := kubernetes.NewForConfigOrDie(envTestRestConfig) + _, _ = kc.CoreV1().Pods("default").Create(s.T().Context(), &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{Name: "a-pod-to-delete"}, Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, }, metav1.CreateOptions{}) - podsDeleteNilNamespace, err := c.callTool("pods_delete", map[string]interface{}{ + podsDeleteNilNamespace, err := s.CallTool("pods_delete", map[string]interface{}{ "name": "a-pod-to-delete", }) - t.Run("pods_delete with name and nil namespace returns success", func(t *testing.T) { - if err != nil { - t.Errorf("call tool failed %v", err) - return - } - if podsDeleteNilNamespace.IsError { - t.Errorf("call tool failed") - return - } - if podsDeleteNilNamespace.Content[0].(mcp.TextContent).Text != "Pod deleted successfully" { - t.Errorf("invalid tool result content, got %v", podsDeleteNilNamespace.Content[0].(mcp.TextContent).Text) - return - } + s.Run("returns success", func() { + s.Nilf(err, "call tool failed %v", err) + s.Falsef(podsDeleteNilNamespace.IsError, "call tool failed") + s.Equalf("Pod deleted successfully", podsDeleteNilNamespace.Content[0].(mcp.TextContent).Text, "invalid tool result content, got %v", podsDeleteNilNamespace.Content[0].(mcp.TextContent).Text) }) - t.Run("pods_delete with name and nil namespace deletes Pod", func(t *testing.T) { - p, pErr := kc.CoreV1().Pods("default").Get(c.ctx, "a-pod-to-delete", metav1.GetOptions{}) - if pErr == nil && p != nil && p.DeletionTimestamp == nil { - t.Errorf("Pod not deleted") - return - } + s.Run("deletes Pod", func() { + p, pErr := kc.CoreV1().Pods("default").Get(s.T().Context(), "a-pod-to-delete", metav1.GetOptions{}) + s.Truef(pErr != nil || p == nil || p.DeletionTimestamp != nil, "Pod not deleted") }) - // Provided Namespace - _, _ = kc.CoreV1().Pods("ns-1").Create(c.ctx, &corev1.Pod{ + }) + s.Run("pods_delete(name=a-pod-to-delete-in-ns-1, namespace=ns-1)", func() { + kc := kubernetes.NewForConfigOrDie(envTestRestConfig) + _, _ = kc.CoreV1().Pods("ns-1").Create(s.T().Context(), &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{Name: "a-pod-to-delete-in-ns-1"}, Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, }, metav1.CreateOptions{}) - podsDeleteInNamespace, err := c.callTool("pods_delete", map[string]interface{}{ + podsDeleteInNamespace, err := s.CallTool("pods_delete", map[string]interface{}{ "namespace": "ns-1", "name": "a-pod-to-delete-in-ns-1", }) - t.Run("pods_delete with name and namespace returns success", func(t *testing.T) { - if err != nil { - t.Errorf("call tool failed %v", err) - return - } - if podsDeleteInNamespace.IsError { - t.Errorf("call tool failed") - return - } - if podsDeleteInNamespace.Content[0].(mcp.TextContent).Text != "Pod deleted successfully" { - t.Errorf("invalid tool result content, got %v", podsDeleteInNamespace.Content[0].(mcp.TextContent).Text) - return - } + s.Run("returns success", func() { + s.Nilf(err, "call tool failed %v", err) + s.Falsef(podsDeleteInNamespace.IsError, "call tool failed") + s.Equalf("Pod deleted successfully", podsDeleteInNamespace.Content[0].(mcp.TextContent).Text, "invalid tool result content, got %v", podsDeleteInNamespace.Content[0].(mcp.TextContent).Text) }) - t.Run("pods_delete with name and namespace deletes Pod", func(t *testing.T) { - p, pErr := kc.CoreV1().Pods("ns-1").Get(c.ctx, "a-pod-to-delete-in-ns-1", metav1.GetOptions{}) - if pErr == nil && p != nil && p.DeletionTimestamp == nil { - t.Errorf("Pod not deleted") - return - } + s.Run("deletes Pod", func() { + p, pErr := kc.CoreV1().Pods("ns-1").Get(s.T().Context(), "a-pod-to-delete-in-ns-1", metav1.GetOptions{}) + s.Truef(pErr != nil || p == nil || p.DeletionTimestamp != nil, "Pod not deleted") }) - // Managed Pod + }) + s.Run("pods_delete(name=a-managed-pod-to-delete, namespace=ns-1) with managed pod", func() { + kc := kubernetes.NewForConfigOrDie(envTestRestConfig) managedLabels := map[string]string{ "app.kubernetes.io/managed-by": "kubernetes-mcp-server", "app.kubernetes.io/name": "a-manged-pod-to-delete", } - _, _ = kc.CoreV1().Pods("default").Create(c.ctx, &corev1.Pod{ + _, _ = kc.CoreV1().Pods("default").Create(s.T().Context(), &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{Name: "a-managed-pod-to-delete", Labels: managedLabels}, Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, }, metav1.CreateOptions{}) - _, _ = kc.CoreV1().Services("default").Create(c.ctx, &corev1.Service{ + _, _ = kc.CoreV1().Services("default").Create(s.T().Context(), &corev1.Service{ ObjectMeta: metav1.ObjectMeta{Name: "a-managed-service-to-delete", Labels: managedLabels}, Spec: corev1.ServiceSpec{Selector: managedLabels, Ports: []corev1.ServicePort{{Port: 80}}}, }, metav1.CreateOptions{}) - podsDeleteManaged, err := c.callTool("pods_delete", map[string]interface{}{ + podsDeleteManaged, err := s.CallTool("pods_delete", map[string]interface{}{ "name": "a-managed-pod-to-delete", }) - t.Run("pods_delete with managed pod returns success", func(t *testing.T) { - if err != nil { - t.Errorf("call tool failed %v", err) - return - } - if podsDeleteManaged.IsError { - t.Errorf("call tool failed") - return - } - if podsDeleteManaged.Content[0].(mcp.TextContent).Text != "Pod deleted successfully" { - t.Errorf("invalid tool result content, got %v", podsDeleteManaged.Content[0].(mcp.TextContent).Text) - return - } + s.Run("returns success", func() { + s.Nilf(err, "call tool failed %v", err) + s.Falsef(podsDeleteManaged.IsError, "call tool failed") + s.Equalf("Pod deleted successfully", podsDeleteManaged.Content[0].(mcp.TextContent).Text, "invalid tool result content, got %v", podsDeleteManaged.Content[0].(mcp.TextContent).Text) }) - t.Run("pods_delete with managed pod deletes Pod and Service", func(t *testing.T) { - p, pErr := kc.CoreV1().Pods("default").Get(c.ctx, "a-managed-pod-to-delete", metav1.GetOptions{}) - if pErr == nil && p != nil && p.DeletionTimestamp == nil { - t.Errorf("Pod not deleted") - return - } - s, sErr := kc.CoreV1().Services("default").Get(c.ctx, "a-managed-service-to-delete", metav1.GetOptions{}) - if sErr == nil && s != nil && s.DeletionTimestamp == nil { - t.Errorf("Service not deleted") - return - } + s.Run("deletes Pod and Service", func() { + p, pErr := kc.CoreV1().Pods("default").Get(s.T().Context(), "a-managed-pod-to-delete", metav1.GetOptions{}) + s.Truef(pErr != nil || p == nil || p.DeletionTimestamp != nil, "Pod not deleted") + svc, sErr := kc.CoreV1().Services("default").Get(s.T().Context(), "a-managed-service-to-delete", metav1.GetOptions{}) + s.Truef(sErr != nil || svc == nil || svc.DeletionTimestamp != nil, "Service not deleted") }) }) } -func TestPodsDeleteDenied(t *testing.T) { - deniedResourcesServer := test.Must(config.ReadToml([]byte(` +func (s *PodsSuite) TestPodsDeleteDenied() { + s.Require().NoError(toml.Unmarshal([]byte(` denied_resources = [ { version = "v1", kind = "Pod" } ] - `))) - testCaseWithContext(t, &mcpContext{staticConfig: deniedResourcesServer}, func(c *mcpContext) { - c.withEnvTest() - podsDelete, _ := c.callTool("pods_delete", map[string]interface{}{"name": "a-pod-in-default"}) - t.Run("pods_delete has error", func(t *testing.T) { - if !podsDelete.IsError { - t.Fatalf("call tool should fail") - } - }) - t.Run("pods_delete describes denial", func(t *testing.T) { + `), s.Cfg), "Expected to parse denied resources config") + s.InitMcpClient() + s.Run("pods_delete (denied)", func() { + podsDelete, err := s.CallTool("pods_delete", map[string]interface{}{"name": "a-pod-in-default"}) + s.Run("has error", func() { + s.Truef(podsDelete.IsError, "call tool should fail") + s.Nilf(err, "call tool should not return error object") + }) + s.Run("describes denial", func() { expectedMessage := "failed to delete pod a-pod-in-default in namespace : resource not allowed: /v1, Kind=Pod" - if podsDelete.Content[0].(mcp.TextContent).Text != expectedMessage { - t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, podsDelete.Content[0].(mcp.TextContent).Text) - } + s.Equalf(expectedMessage, podsDelete.Content[0].(mcp.TextContent).Text, + "expected descriptive error '%s', got %v", expectedMessage, podsDelete.Content[0].(mcp.TextContent).Text) }) }) } -func TestPodsDeleteInOpenShift(t *testing.T) { - testCaseWithContext(t, &mcpContext{before: inOpenShift, after: inOpenShiftClear}, func(c *mcpContext) { +func (s *PodsSuite) TestPodsDeleteInOpenShift() { + s.Require().NoError(EnvTestInOpenShift(s.T().Context()), "Expected to configure test for OpenShift") + s.T().Cleanup(func() { + s.Require().NoError(EnvTestInOpenShiftClear(s.T().Context()), "Expected to clear OpenShift test configuration") + }) + s.InitMcpClient() + + s.Run("pods_delete with managed pod in OpenShift", func() { managedLabels := map[string]string{ "app.kubernetes.io/managed-by": "kubernetes-mcp-server", "app.kubernetes.io/name": "a-manged-pod-to-delete", } - kc := c.newKubernetesClient() - _, _ = kc.CoreV1().Pods("default").Create(c.ctx, &corev1.Pod{ + kc := kubernetes.NewForConfigOrDie(envTestRestConfig) + _, _ = kc.CoreV1().Pods("default").Create(s.T().Context(), &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{Name: "a-managed-pod-to-delete-in-openshift", Labels: managedLabels}, Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, }, metav1.CreateOptions{}) dynamicClient := dynamic.NewForConfigOrDie(envTestRestConfig) _, _ = dynamicClient.Resource(schema.GroupVersionResource{Group: "route.openshift.io", Version: "v1", Resource: "routes"}). - Namespace("default").Create(c.ctx, &unstructured.Unstructured{Object: map[string]interface{}{ + Namespace("default").Create(s.T().Context(), &unstructured.Unstructured{Object: map[string]interface{}{ "apiVersion": "route.openshift.io/v1", "kind": "Route", "metadata": map[string]interface{}{ @@ -610,519 +481,212 @@ func TestPodsDeleteInOpenShift(t *testing.T) { "labels": managedLabels, }, }}, metav1.CreateOptions{}) - podsDeleteManagedOpenShift, err := c.callTool("pods_delete", map[string]interface{}{ + podsDeleteManagedOpenShift, err := s.CallTool("pods_delete", map[string]interface{}{ "name": "a-managed-pod-to-delete-in-openshift", }) - t.Run("pods_delete with managed pod in OpenShift returns success", func(t *testing.T) { - if err != nil { - t.Errorf("call tool failed %v", err) - return - } - if podsDeleteManagedOpenShift.IsError { - t.Errorf("call tool failed") - return - } - if podsDeleteManagedOpenShift.Content[0].(mcp.TextContent).Text != "Pod deleted successfully" { - t.Errorf("invalid tool result content, got %v", podsDeleteManagedOpenShift.Content[0].(mcp.TextContent).Text) - return - } + s.Run("returns success", func() { + s.Nilf(err, "call tool failed %v", err) + s.Falsef(podsDeleteManagedOpenShift.IsError, "call tool failed") + s.Equalf("Pod deleted successfully", podsDeleteManagedOpenShift.Content[0].(mcp.TextContent).Text, + "invalid tool result content, got %v", podsDeleteManagedOpenShift.Content[0].(mcp.TextContent).Text) }) - t.Run("pods_delete with managed pod in OpenShift deletes Pod and Route", func(t *testing.T) { - p, pErr := kc.CoreV1().Pods("default").Get(c.ctx, "a-managed-pod-to-delete-in-openshift", metav1.GetOptions{}) - if pErr == nil && p != nil && p.DeletionTimestamp == nil { - t.Errorf("Pod not deleted") - return - } + s.Run("deletes Pod and Route", func() { + p, pErr := kc.CoreV1().Pods("default").Get(s.T().Context(), "a-managed-pod-to-delete-in-openshift", metav1.GetOptions{}) + s.False(pErr == nil && p != nil && p.DeletionTimestamp == nil, "Pod not deleted") r, rErr := dynamicClient. Resource(schema.GroupVersionResource{Group: "route.openshift.io", Version: "v1", Resource: "routes"}). - Namespace("default").Get(c.ctx, "a-managed-route-to-delete", metav1.GetOptions{}) - if rErr == nil && r != nil && r.GetDeletionTimestamp() == nil { - t.Errorf("Route not deleted") - return - } + Namespace("default").Get(s.T().Context(), "a-managed-route-to-delete", metav1.GetOptions{}) + s.False(rErr == nil && r != nil && r.GetDeletionTimestamp() == nil, "Route not deleted") }) }) } -func TestPodsLog(t *testing.T) { - testCase(t, func(c *mcpContext) { - c.withEnvTest() - t.Run("pods_log with nil name returns error", func(t *testing.T) { - toolResult, _ := c.callTool("pods_log", map[string]interface{}{}) - if toolResult.IsError != true { - t.Fatalf("call tool should fail") - return - } - if toolResult.Content[0].(mcp.TextContent).Text != "failed to get pod log, missing argument name" { - t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) - return - } - }) - t.Run("pods_log with not found name returns error", func(t *testing.T) { - toolResult, _ := c.callTool("pods_log", map[string]interface{}{"name": "not-found"}) - if toolResult.IsError != true { - t.Fatalf("call tool should fail") - return - } - if toolResult.Content[0].(mcp.TextContent).Text != "failed to get pod not-found log in namespace : pods \"not-found\" not found" { - t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) - return - } - }) - podsLogNilNamespace, err := c.callTool("pods_log", map[string]interface{}{ +func (s *PodsSuite) TestPodsLog() { + s.InitMcpClient() + s.Run("pods_log with nil name returns error", func() { + toolResult, _ := s.CallTool("pods_log", map[string]interface{}{}) + s.Truef(toolResult.IsError, "call tool should fail") + s.Equalf("failed to get pod log, missing argument name", toolResult.Content[0].(mcp.TextContent).Text, "invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) + }) + s.Run("pods_log with not found name returns error", func() { + toolResult, _ := s.CallTool("pods_log", map[string]interface{}{"name": "not-found"}) + s.Truef(toolResult.IsError, "call tool should fail") + s.Equalf("failed to get pod not-found log in namespace : pods \"not-found\" not found", toolResult.Content[0].(mcp.TextContent).Text, "invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) + }) + s.Run("pods_log(name=a-pod-in-default, namespace=nil), uses configured namespace", func() { + podsLogNilNamespace, err := s.CallTool("pods_log", map[string]interface{}{ "name": "a-pod-in-default", }) - t.Run("pods_log with name and nil namespace returns pod log", func(t *testing.T) { - if err != nil { - t.Fatalf("call tool failed %v", err) - return - } - if podsLogNilNamespace.IsError { - t.Fatalf("call tool failed") - return - } - }) - podsLogInNamespace, err := c.callTool("pods_log", map[string]interface{}{ + s.Nilf(err, "call tool failed %v", err) + s.Falsef(podsLogNilNamespace.IsError, "call tool failed") + }) + s.Run("pods_log(name=a-pod-in-ns-1, namespace=ns-1)", func() { + podsLogInNamespace, err := s.CallTool("pods_log", map[string]interface{}{ "namespace": "ns-1", "name": "a-pod-in-ns-1", }) - t.Run("pods_log with name and namespace returns pod log", func(t *testing.T) { - if err != nil { - t.Fatalf("call tool failed %v", err) - return - } - if podsLogInNamespace.IsError { - t.Fatalf("call tool failed") - return - } - }) - podsContainerLogInNamespace, err := c.callTool("pods_log", map[string]interface{}{ + s.Nilf(err, "call tool failed %v", err) + s.Falsef(podsLogInNamespace.IsError, "call tool failed") + }) + s.Run("pods_log(name=a-pod-in-ns-1, namespace=ns-1, container=nginx)", func() { + podsContainerLogInNamespace, err := s.CallTool("pods_log", map[string]interface{}{ "namespace": "ns-1", "name": "a-pod-in-ns-1", "container": "nginx", }) - t.Run("pods_log with name, container and namespace returns pod log", func(t *testing.T) { - if err != nil { - t.Fatalf("call tool failed %v", err) - return - } - if podsContainerLogInNamespace.IsError { - t.Fatalf("call tool failed") - return - } - }) - toolResult, err := c.callTool("pods_log", map[string]interface{}{ + s.Nilf(err, "call tool failed %v", err) + s.Falsef(podsContainerLogInNamespace.IsError, "call tool failed") + }) + s.Run("with non existing container returns error", func() { + toolResult, err := s.CallTool("pods_log", map[string]interface{}{ "namespace": "ns-1", "name": "a-pod-in-ns-1", "container": "a-not-existing-container", }) - t.Run("pods_log with non existing container returns error", func(t *testing.T) { - if toolResult.IsError != true { - t.Fatalf("call tool should fail") - return - } - if toolResult.Content[0].(mcp.TextContent).Text != "failed to get pod a-pod-in-ns-1 log in namespace ns-1: container a-not-existing-container is not valid for pod a-pod-in-ns-1" { - t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) - return - } - }) - podsPreviousLogInNamespace, err := c.callTool("pods_log", map[string]interface{}{ + s.Nilf(err, "call tool should not return error object") + s.Truef(toolResult.IsError, "call tool should fail") + s.Equalf("failed to get pod a-pod-in-ns-1 log in namespace ns-1: container a-not-existing-container is not valid for pod a-pod-in-ns-1", toolResult.Content[0].(mcp.TextContent).Text, "invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) + }) + s.Run("pods_log(previous=true) returns previous pod log", func() { + podsPreviousLogInNamespace, err := s.CallTool("pods_log", map[string]interface{}{ "namespace": "ns-1", "name": "a-pod-in-ns-1", "previous": true, }) - t.Run("pods_log with previous=true returns previous pod log", func(t *testing.T) { - if err != nil { - t.Fatalf("call tool failed %v", err) - return - } - if podsPreviousLogInNamespace.IsError { - t.Fatalf("call tool failed") - return - } - }) - podsPreviousLogFalse, err := c.callTool("pods_log", map[string]interface{}{ + s.Nilf(err, "call tool failed %v", err) + s.Falsef(podsPreviousLogInNamespace.IsError, "call tool failed") + }) + s.Run("pods_log(previous=false) returns current pod log", func() { + podsPreviousLogFalse, err := s.CallTool("pods_log", map[string]interface{}{ "namespace": "ns-1", "name": "a-pod-in-ns-1", "previous": false, }) - t.Run("pods_log with previous=false returns current pod log", func(t *testing.T) { - if err != nil { - t.Fatalf("call tool failed %v", err) - return - } - if podsPreviousLogFalse.IsError { - t.Fatalf("call tool failed") - return - } - }) - - // Test with tail parameter - podsTailLines, err := c.callTool("pods_log", map[string]interface{}{ + s.Nilf(err, "call tool failed %v", err) + s.Falsef(podsPreviousLogFalse.IsError, "call tool failed") + }) + s.Run("pods_log(tail=50) returns pod log", func() { + podsTailLines, err := s.CallTool("pods_log", map[string]interface{}{ "namespace": "ns-1", "name": "a-pod-in-ns-1", "tail": 50, }) - t.Run("pods_log with tail=50 returns pod log", func(t *testing.T) { - if err != nil { - t.Fatalf("call tool failed %v", err) - return - } - if podsTailLines.IsError { - t.Fatalf("call tool failed") - return - } - }) - - // Test with invalid tail parameter - podsInvalidTailLines, _ := c.callTool("pods_log", map[string]interface{}{ + s.Nilf(err, "call tool failed %v", err) + s.Falsef(podsTailLines.IsError, "call tool failed") + }) + s.Run("with invalid tail returns error", func() { + podsInvalidTailLines, _ := s.CallTool("pods_log", map[string]interface{}{ "namespace": "ns-1", "name": "a-pod-in-ns-1", "tail": "invalid", }) - t.Run("pods_log with invalid tail returns error", func(t *testing.T) { - if !podsInvalidTailLines.IsError { - t.Fatalf("call tool should fail") - return - } - expectedErrorMsg := "failed to parse tail parameter: expected integer" - if errMsg := podsInvalidTailLines.Content[0].(mcp.TextContent).Text; !strings.Contains(errMsg, expectedErrorMsg) { - t.Fatalf("unexpected error message, expected to contain '%s', got '%s'", expectedErrorMsg, errMsg) - return - } - }) + s.Truef(podsInvalidTailLines.IsError, "call tool should fail") + expectedErrorMsg := "failed to parse tail parameter: expected integer" + errMsg := podsInvalidTailLines.Content[0].(mcp.TextContent).Text + s.Containsf(errMsg, expectedErrorMsg, "unexpected error message, expected to contain '%s', got '%s'", expectedErrorMsg, errMsg) }) } -func TestPodsLogDenied(t *testing.T) { - deniedResourcesServer := test.Must(config.ReadToml([]byte(` +func (s *PodsSuite) TestPodsLogDenied() { + s.Require().NoError(toml.Unmarshal([]byte(` denied_resources = [ { version = "v1", kind = "Pod" } ] - `))) - testCaseWithContext(t, &mcpContext{staticConfig: deniedResourcesServer}, func(c *mcpContext) { - c.withEnvTest() - podsLog, _ := c.callTool("pods_log", map[string]interface{}{"name": "a-pod-in-default"}) - t.Run("pods_log has error", func(t *testing.T) { - if !podsLog.IsError { - t.Fatalf("call tool should fail") - } - }) - t.Run("pods_log describes denial", func(t *testing.T) { + `), s.Cfg), "Expected to parse denied resources config") + s.InitMcpClient() + s.Run("pods_log (denied)", func() { + podsLog, err := s.CallTool("pods_log", map[string]interface{}{"name": "a-pod-in-default"}) + s.Run("has error", func() { + s.Truef(podsLog.IsError, "call tool should fail") + s.Nilf(err, "call tool should not return error object") + }) + s.Run("describes denial", func() { expectedMessage := "failed to get pod a-pod-in-default log in namespace : resource not allowed: /v1, Kind=Pod" - if podsLog.Content[0].(mcp.TextContent).Text != expectedMessage { - t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, podsLog.Content[0].(mcp.TextContent).Text) - } + s.Equalf(expectedMessage, podsLog.Content[0].(mcp.TextContent).Text, + "expected descriptive error '%s', got %v", expectedMessage, podsLog.Content[0].(mcp.TextContent).Text) }) }) } -func TestPodsRun(t *testing.T) { - testCase(t, func(c *mcpContext) { - c.withEnvTest() - t.Run("pods_run with nil image returns error", func(t *testing.T) { - toolResult, _ := c.callTool("pods_run", map[string]interface{}{}) - if toolResult.IsError != true { - t.Errorf("call tool should fail") - return - } - if toolResult.Content[0].(mcp.TextContent).Text != "failed to run pod, missing argument image" { - t.Errorf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) - return - } - }) - podsRunNilNamespace, err := c.callTool("pods_run", map[string]interface{}{"image": "nginx"}) - t.Run("pods_run with image and nil namespace runs pod", func(t *testing.T) { - if err != nil { - t.Errorf("call tool failed %v", err) - return - } - if podsRunNilNamespace.IsError { - t.Errorf("call tool failed") - return - } - }) - var decodedNilNamespace []unstructured.Unstructured - err = yaml.Unmarshal([]byte(podsRunNilNamespace.Content[0].(mcp.TextContent).Text), &decodedNilNamespace) - t.Run("pods_run with image and nil namespace has yaml content", func(t *testing.T) { - if err != nil { - t.Errorf("invalid tool result content %v", err) - return - } - }) - t.Run("pods_run with image and nil namespace returns 1 item (Pod)", func(t *testing.T) { - if len(decodedNilNamespace) != 1 { - t.Errorf("invalid pods count, expected 1, got %v", len(decodedNilNamespace)) - return - } - if decodedNilNamespace[0].GetKind() != "Pod" { - t.Errorf("invalid pod kind, expected Pod, got %v", decodedNilNamespace[0].GetKind()) - return - } - }) - t.Run("pods_run with image and nil namespace returns pod in default", func(t *testing.T) { - if decodedNilNamespace[0].GetNamespace() != "default" { - t.Errorf("invalid pod namespace, expected default, got %v", decodedNilNamespace[0].GetNamespace()) - return - } +func (s *PodsSuite) TestPodsListWithLabelSelector() { + s.InitMcpClient() + kc := kubernetes.NewForConfigOrDie(envTestRestConfig) + // Create pods with labels + _, _ = kc.CoreV1().Pods("default").Create(s.T().Context(), &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-with-labels", + Labels: map[string]string{"app": "test", "env": "dev"}, + }, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, metav1.CreateOptions{}) + _, _ = kc.CoreV1().Pods("ns-1").Create(s.T().Context(), &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "another-pod-with-labels", + Labels: map[string]string{"app": "test", "env": "prod"}, + }, + Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, + }, metav1.CreateOptions{}) + + s.Run("pods_list(labelSelector=app=test) returns filtered pods from configured namespace", func() { + toolResult, err := s.CallTool("pods_list", map[string]interface{}{ + "labelSelector": "app=test", }) - t.Run("pods_run with image and nil namespace returns pod with random name", func(t *testing.T) { - if !strings.HasPrefix(decodedNilNamespace[0].GetName(), "kubernetes-mcp-server-run-") { - t.Errorf("invalid pod name, expected random, got %v", decodedNilNamespace[0].GetName()) - return - } + s.Run("no error", func() { + s.Nilf(err, "call tool failed %v", err) + s.Falsef(toolResult.IsError, "call tool failed") }) - t.Run("pods_run with image and nil namespace returns pod with labels", func(t *testing.T) { - labels := decodedNilNamespace[0].Object["metadata"].(map[string]interface{})["labels"].(map[string]interface{}) - if labels["app.kubernetes.io/name"] == "" { - t.Errorf("invalid labels, expected app.kubernetes.io/name, got %v", labels) - return - } - if labels["app.kubernetes.io/component"] == "" { - t.Errorf("invalid labels, expected app.kubernetes.io/component, got %v", labels) - return - } - if labels["app.kubernetes.io/managed-by"] != "kubernetes-mcp-server" { - t.Errorf("invalid labels, expected app.kubernetes.io/managed-by, got %v", labels) - return - } - if labels["app.kubernetes.io/part-of"] != "kubernetes-mcp-server-run-sandbox" { - t.Errorf("invalid labels, expected app.kubernetes.io/part-of, got %v", labels) - return - } + var decoded []unstructured.Unstructured + err = yaml.Unmarshal([]byte(toolResult.Content[0].(mcp.TextContent).Text), &decoded) + s.Run("has yaml content", func() { + s.Nilf(err, "invalid tool result content %v", err) }) - t.Run("pods_run with image and nil namespace returns pod with nginx container", func(t *testing.T) { - containers := decodedNilNamespace[0].Object["spec"].(map[string]interface{})["containers"].([]interface{}) - if containers[0].(map[string]interface{})["image"] != "nginx" { - t.Errorf("invalid container name, expected nginx, got %v", containers[0].(map[string]interface{})["image"]) - return - } + s.Run("returns 2 pods", func() { + s.Lenf(decoded, 2, "invalid pods count, expected 2, got %v", len(decoded)) }) + }) - podsRunNamespaceAndPort, err := c.callTool("pods_run", map[string]interface{}{"image": "nginx", "port": 80}) - t.Run("pods_run with image, namespace, and port runs pod", func(t *testing.T) { - if err != nil { - t.Errorf("call tool failed %v", err) - return - } - if podsRunNamespaceAndPort.IsError { - t.Errorf("call tool failed") - return - } + s.Run("pods_list_in_namespace(labelSelector=env=prod, namespace=ns-1) returns filtered pods", func() { + toolResult, err := s.CallTool("pods_list_in_namespace", map[string]interface{}{ + "namespace": "ns-1", + "labelSelector": "env=prod", }) - var decodedNamespaceAndPort []unstructured.Unstructured - err = yaml.Unmarshal([]byte(podsRunNamespaceAndPort.Content[0].(mcp.TextContent).Text), &decodedNamespaceAndPort) - t.Run("pods_run with image, namespace, and port has yaml content", func(t *testing.T) { - if err != nil { - t.Errorf("invalid tool result content %v", err) - return - } + s.Run("no error", func() { + s.Nilf(err, "call tool failed %v", err) + s.Falsef(toolResult.IsError, "call tool failed") }) - t.Run("pods_run with image, namespace, and port returns 2 items (Pod + Service)", func(t *testing.T) { - if len(decodedNamespaceAndPort) != 2 { - t.Errorf("invalid pods count, expected 2, got %v", len(decodedNamespaceAndPort)) - return - } - if decodedNamespaceAndPort[0].GetKind() != "Pod" { - t.Errorf("invalid pod kind, expected Pod, got %v", decodedNamespaceAndPort[0].GetKind()) - return - } - if decodedNamespaceAndPort[1].GetKind() != "Service" { - t.Errorf("invalid service kind, expected Service, got %v", decodedNamespaceAndPort[1].GetKind()) - return - } + var decoded []unstructured.Unstructured + err = yaml.Unmarshal([]byte(toolResult.Content[0].(mcp.TextContent).Text), &decoded) + s.Run("has yaml content", func() { + s.Nilf(err, "invalid tool result content %v", err) }) - t.Run("pods_run with image, namespace, and port returns pod with port", func(t *testing.T) { - containers := decodedNamespaceAndPort[0].Object["spec"].(map[string]interface{})["containers"].([]interface{}) - ports := containers[0].(map[string]interface{})["ports"].([]interface{}) - if ports[0].(map[string]interface{})["containerPort"] != int64(80) { - t.Errorf("invalid container port, expected 80, got %v", ports[0].(map[string]interface{})["containerPort"]) - return - } + s.Run("returns 1 pod", func() { + s.Lenf(decoded, 1, "invalid pods count, expected 1, got %v", len(decoded)) }) - t.Run("pods_run with image, namespace, and port returns service with port and selector", func(t *testing.T) { - ports := decodedNamespaceAndPort[1].Object["spec"].(map[string]interface{})["ports"].([]interface{}) - if ports[0].(map[string]interface{})["port"] != int64(80) { - t.Errorf("invalid service port, expected 80, got %v", ports[0].(map[string]interface{})["port"]) - return - } - if ports[0].(map[string]interface{})["targetPort"] != int64(80) { - t.Errorf("invalid service target port, expected 80, got %v", ports[0].(map[string]interface{})["targetPort"]) - return - } - selector := decodedNamespaceAndPort[1].Object["spec"].(map[string]interface{})["selector"].(map[string]interface{}) - if selector["app.kubernetes.io/name"] == "" { - t.Errorf("invalid service selector, expected app.kubernetes.io/name, got %v", selector) - return - } - if selector["app.kubernetes.io/managed-by"] != "kubernetes-mcp-server" { - t.Errorf("invalid service selector, expected app.kubernetes.io/managed-by, got %v", selector) - return - } - if selector["app.kubernetes.io/part-of"] != "kubernetes-mcp-server-run-sandbox" { - t.Errorf("invalid service selector, expected app.kubernetes.io/part-of, got %v", selector) - return - } + s.Run("returns another-pod-with-labels", func() { + s.Equalf("another-pod-with-labels", decoded[0].GetName(), "invalid pod name, expected another-pod-with-labels, got %v", decoded[0].GetName()) }) }) -} -func TestPodsRunDenied(t *testing.T) { - deniedResourcesServer := test.Must(config.ReadToml([]byte(` - denied_resources = [ { version = "v1", kind = "Pod" } ] - `))) - testCaseWithContext(t, &mcpContext{staticConfig: deniedResourcesServer}, func(c *mcpContext) { - c.withEnvTest() - podsRun, _ := c.callTool("pods_run", map[string]interface{}{"image": "nginx"}) - t.Run("pods_run has error", func(t *testing.T) { - if !podsRun.IsError { - t.Fatalf("call tool should fail") - } + s.Run("pods_list(labelSelector=app=test,env=prod) with multiple label selectors returns filtered pods", func() { + toolResult, err := s.CallTool("pods_list", map[string]interface{}{ + "labelSelector": "app=test,env=prod", }) - t.Run("pods_run describes denial", func(t *testing.T) { - expectedMessage := "failed to run pod in namespace : resource not allowed: /v1, Kind=Pod" - if podsRun.Content[0].(mcp.TextContent).Text != expectedMessage { - t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, podsRun.Content[0].(mcp.TextContent).Text) - } - }) - }) -} - -func TestPodsRunInOpenShift(t *testing.T) { - testCaseWithContext(t, &mcpContext{before: inOpenShift, after: inOpenShiftClear}, func(c *mcpContext) { - t.Run("pods_run with image, namespace, and port returns route with port", func(t *testing.T) { - podsRunInOpenShift, err := c.callTool("pods_run", map[string]interface{}{"image": "nginx", "port": 80}) - if err != nil { - t.Errorf("call tool failed %v", err) - return - } - if podsRunInOpenShift.IsError { - t.Errorf("call tool failed") - return - } - var decodedPodServiceRoute []unstructured.Unstructured - err = yaml.Unmarshal([]byte(podsRunInOpenShift.Content[0].(mcp.TextContent).Text), &decodedPodServiceRoute) - if err != nil { - t.Errorf("invalid tool result content %v", err) - return - } - if len(decodedPodServiceRoute) != 3 { - t.Errorf("invalid pods count, expected 3, got %v", len(decodedPodServiceRoute)) - return - } - if decodedPodServiceRoute[2].GetKind() != "Route" { - t.Errorf("invalid route kind, expected Route, got %v", decodedPodServiceRoute[2].GetKind()) - return - } - targetPort := decodedPodServiceRoute[2].Object["spec"].(map[string]interface{})["port"].(map[string]interface{})["targetPort"].(int64) - if targetPort != 80 { - t.Errorf("invalid route target port, expected 80, got %v", targetPort) - return - } + s.Run("no error", func() { + s.Nilf(err, "call tool failed %v", err) + s.Falsef(toolResult.IsError, "call tool failed") }) - }) -} - -func TestPodsListWithLabelSelector(t *testing.T) { - testCase(t, func(c *mcpContext) { - c.withEnvTest() - kc := c.newKubernetesClient() - // Create pods with labels - _, _ = kc.CoreV1().Pods("default").Create(c.ctx, &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pod-with-labels", - Labels: map[string]string{"app": "test", "env": "dev"}, - }, - Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, - }, metav1.CreateOptions{}) - _, _ = kc.CoreV1().Pods("ns-1").Create(c.ctx, &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "another-pod-with-labels", - Labels: map[string]string{"app": "test", "env": "prod"}, - }, - Spec: corev1.PodSpec{Containers: []corev1.Container{{Name: "nginx", Image: "nginx"}}}, - }, metav1.CreateOptions{}) - - // Test pods_list with label selector - t.Run("pods_list with label selector returns filtered pods", func(t *testing.T) { - toolResult, err := c.callTool("pods_list", map[string]interface{}{ - "labelSelector": "app=test", - }) - if err != nil { - t.Fatalf("call tool failed %v", err) - return - } - if toolResult.IsError { - t.Fatalf("call tool failed") - return - } - var decoded []unstructured.Unstructured - err = yaml.Unmarshal([]byte(toolResult.Content[0].(mcp.TextContent).Text), &decoded) - if err != nil { - t.Fatalf("invalid tool result content %v", err) - return - } - if len(decoded) != 2 { - t.Fatalf("invalid pods count, expected 2, got %v", len(decoded)) - return - } + var decoded []unstructured.Unstructured + err = yaml.Unmarshal([]byte(toolResult.Content[0].(mcp.TextContent).Text), &decoded) + s.Run("has yaml content", func() { + s.Nilf(err, "invalid tool result content %v", err) }) - - // Test pods_list_in_namespace with label selector - t.Run("pods_list_in_namespace with label selector returns filtered pods", func(t *testing.T) { - toolResult, err := c.callTool("pods_list_in_namespace", map[string]interface{}{ - "namespace": "ns-1", - "labelSelector": "env=prod", - }) - if err != nil { - t.Fatalf("call tool failed %v", err) - return - } - if toolResult.IsError { - t.Fatalf("call tool failed") - return - } - var decoded []unstructured.Unstructured - err = yaml.Unmarshal([]byte(toolResult.Content[0].(mcp.TextContent).Text), &decoded) - if err != nil { - t.Fatalf("invalid tool result content %v", err) - return - } - if len(decoded) != 1 { - t.Fatalf("invalid pods count, expected 1, got %v", len(decoded)) - return - } - if decoded[0].GetName() != "another-pod-with-labels" { - t.Fatalf("invalid pod name, expected another-pod-with-labels, got %v", decoded[0].GetName()) - return - } + s.Run("returns 1 pod", func() { + s.Lenf(decoded, 1, "invalid pods count, expected 1, got %v", len(decoded)) }) - - // Test multiple label selectors - t.Run("pods_list with multiple label selectors returns filtered pods", func(t *testing.T) { - toolResult, err := c.callTool("pods_list", map[string]interface{}{ - "labelSelector": "app=test,env=prod", - }) - if err != nil { - t.Fatalf("call tool failed %v", err) - return - } - if toolResult.IsError { - t.Fatalf("call tool failed") - return - } - var decoded []unstructured.Unstructured - err = yaml.Unmarshal([]byte(toolResult.Content[0].(mcp.TextContent).Text), &decoded) - if err != nil { - t.Fatalf("invalid tool result content %v", err) - return - } - if len(decoded) != 1 { - t.Fatalf("invalid pods count, expected 1, got %v", len(decoded)) - return - } - if decoded[0].GetName() != "another-pod-with-labels" { - t.Fatalf("invalid pod name, expected another-pod-with-labels, got %v", decoded[0].GetName()) - return - } + s.Run("returns another-pod-with-labels", func() { + s.Equalf("another-pod-with-labels", decoded[0].GetName(), "invalid pod name, expected another-pod-with-labels, got %v", decoded[0].GetName()) }) }) } + +func TestPods(t *testing.T) { + suite.Run(t, new(PodsSuite)) +} diff --git a/pkg/mcp/resources_test.go b/pkg/mcp/resources_test.go index 3aa7b875..21329d20 100644 --- a/pkg/mcp/resources_test.go +++ b/pkg/mcp/resources_test.go @@ -5,220 +5,168 @@ import ( "strings" "testing" + "github.com/BurntSushi/toml" "github.com/mark3labs/mcp-go/mcp" + "github.com/stretchr/testify/suite" corev1 "k8s.io/api/core/v1" v1 "k8s.io/api/rbac/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" "sigs.k8s.io/yaml" - - "github.com/containers/kubernetes-mcp-server/internal/test" - "github.com/containers/kubernetes-mcp-server/pkg/config" - "github.com/containers/kubernetes-mcp-server/pkg/output" ) -func TestResourcesList(t *testing.T) { - testCase(t, func(c *mcpContext) { - c.withEnvTest() - t.Run("resources_list with missing apiVersion returns error", func(t *testing.T) { - toolResult, _ := c.callTool("resources_list", map[string]interface{}{}) - if !toolResult.IsError { - t.Fatalf("call tool should fail") - } - if toolResult.Content[0].(mcp.TextContent).Text != "failed to list resources, missing argument apiVersion" { - t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) - } - }) - t.Run("resources_list with missing kind returns error", func(t *testing.T) { - toolResult, _ := c.callTool("resources_list", map[string]interface{}{"apiVersion": "v1"}) - if !toolResult.IsError { - t.Fatalf("call tool should fail") - } - if toolResult.Content[0].(mcp.TextContent).Text != "failed to list resources, missing argument kind" { - t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) - } - }) - t.Run("resources_list with invalid apiVersion returns error", func(t *testing.T) { - toolResult, _ := c.callTool("resources_list", map[string]interface{}{"apiVersion": "invalid/api/version", "kind": "Pod"}) - if !toolResult.IsError { - t.Fatalf("call tool should fail") - } - if toolResult.Content[0].(mcp.TextContent).Text != "failed to list resources, invalid argument apiVersion" { - t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) - } - }) - t.Run("resources_list with nonexistent apiVersion returns error", func(t *testing.T) { - toolResult, _ := c.callTool("resources_list", map[string]interface{}{"apiVersion": "custom.non.existent.example.com/v1", "kind": "Custom"}) - if !toolResult.IsError { - t.Fatalf("call tool should fail") - } - if toolResult.Content[0].(mcp.TextContent).Text != `failed to list resources: no matches for kind "Custom" in version "custom.non.existent.example.com/v1"` { - t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) - } - }) - namespaces, err := c.callTool("resources_list", map[string]interface{}{"apiVersion": "v1", "kind": "Namespace"}) - t.Run("resources_list returns namespaces", func(t *testing.T) { - if err != nil { - t.Fatalf("call tool failed %v", err) - return - } - if namespaces.IsError { - t.Fatalf("call tool failed") - return - } +type ResourcesSuite struct { + BaseMcpSuite +} + +func (s *ResourcesSuite) TestResourcesList() { + s.InitMcpClient() + s.Run("resources_list with missing apiVersion returns error", func() { + toolResult, _ := s.CallTool("resources_list", map[string]interface{}{}) + s.Truef(toolResult.IsError, "call tool should fail") + s.Equalf("failed to list resources, missing argument apiVersion", toolResult.Content[0].(mcp.TextContent).Text, + "invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) + }) + s.Run("resources_list with missing kind returns error", func() { + toolResult, _ := s.CallTool("resources_list", map[string]interface{}{"apiVersion": "v1"}) + s.Truef(toolResult.IsError, "call tool should fail") + s.Equalf("failed to list resources, missing argument kind", toolResult.Content[0].(mcp.TextContent).Text, + "invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) + }) + s.Run("resources_list with invalid apiVersion returns error", func() { + toolResult, _ := s.CallTool("resources_list", map[string]interface{}{"apiVersion": "invalid/api/version", "kind": "Pod"}) + s.Truef(toolResult.IsError, "call tool should fail") + s.Equalf("failed to list resources, invalid argument apiVersion", toolResult.Content[0].(mcp.TextContent).Text, + "invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) + }) + s.Run("resources_list with nonexistent apiVersion returns error", func() { + toolResult, _ := s.CallTool("resources_list", map[string]interface{}{"apiVersion": "custom.non.existent.example.com/v1", "kind": "Custom"}) + s.Truef(toolResult.IsError, "call tool should fail") + s.Equalf(`failed to list resources: no matches for kind "Custom" in version "custom.non.existent.example.com/v1"`, + toolResult.Content[0].(mcp.TextContent).Text, "invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) + }) + s.Run("resources_list(apiVersion=v1, kind=Namespace) returns namespaces", func() { + namespaces, err := s.CallTool("resources_list", map[string]interface{}{"apiVersion": "v1", "kind": "Namespace"}) + s.Run("no error", func() { + s.Nilf(err, "call tool failed %v", err) + s.Falsef(namespaces.IsError, "call tool failed") }) var decodedNamespaces []unstructured.Unstructured err = yaml.Unmarshal([]byte(namespaces.Content[0].(mcp.TextContent).Text), &decodedNamespaces) - t.Run("resources_list has yaml content", func(t *testing.T) { - if err != nil { - t.Fatalf("invalid tool result content %v", err) - } + s.Run("has yaml content", func() { + s.Nilf(err, "invalid tool result content %v", err) }) - t.Run("resources_list returns more than 2 items", func(t *testing.T) { - if len(decodedNamespaces) < 3 { - t.Fatalf("invalid namespace count, expected >2, got %v", len(decodedNamespaces)) - } + s.Run("returns more than 2 items", func() { + s.Truef(len(decodedNamespaces) >= 3, "invalid namespace count, expected >2, got %v", len(decodedNamespaces)) }) - - // Test label selector functionality - t.Run("resources_list with label selector returns filtered pods", func(t *testing.T) { - - // List pods with label selector - result, err := c.callTool("resources_list", map[string]interface{}{ + }) + s.Run("resources_list with label selector returns filtered pods", func() { + s.Run("list pods with app=nginx label", func() { + result, err := s.CallTool("resources_list", map[string]interface{}{ "apiVersion": "v1", "kind": "Pod", "namespace": "default", "labelSelector": "app=nginx", }) - - if err != nil { - t.Fatalf("call tool failed %v", err) - return - } - if result.IsError { - t.Fatalf("call tool failed") - return - } + s.Nilf(err, "call tool failed %v", err) + s.Falsef(result.IsError, "call tool failed") var decodedPods []unstructured.Unstructured err = yaml.Unmarshal([]byte(result.Content[0].(mcp.TextContent).Text), &decodedPods) - if err != nil { - t.Fatalf("invalid tool result content %v", err) - return - } - - // Verify only the pod with matching label is returned - if len(decodedPods) != 1 { - t.Fatalf("expected 1 pod, got %d", len(decodedPods)) - return - } + s.Nilf(err, "invalid tool result content %v", err) - if decodedPods[0].GetName() != "a-pod-in-default" { - t.Fatalf("expected pod-with-label, got %s", decodedPods[0].GetName()) - return - } - - // Test that multiple label selectors work - result, err = c.callTool("resources_list", map[string]interface{}{ + s.Lenf(decodedPods, 1, "expected 1 pod, got %d", len(decodedPods)) + s.Equalf("a-pod-in-default", decodedPods[0].GetName(), "expected a-pod-in-default, got %s", decodedPods[0].GetName()) + }) + s.Run("list pods with multiple label selectors", func() { + result, err := s.CallTool("resources_list", map[string]interface{}{ "apiVersion": "v1", "kind": "Pod", "namespace": "default", "labelSelector": "test-label=test-value,another=value", }) + s.Nilf(err, "call tool failed %v", err) + s.Falsef(result.IsError, "call tool failed") - if err != nil { - t.Fatalf("call tool failed %v", err) - return - } - if result.IsError { - t.Fatalf("call tool failed") - return - } - + var decodedPods []unstructured.Unstructured err = yaml.Unmarshal([]byte(result.Content[0].(mcp.TextContent).Text), &decodedPods) - if err != nil { - t.Fatalf("invalid tool result content %v", err) - return - } + s.Nilf(err, "invalid tool result content %v", err) - // Verify no pods match multiple label selector - if len(decodedPods) != 0 { - t.Fatalf("expected 0 pods, got %d", len(decodedPods)) - return - } + s.Lenf(decodedPods, 0, "expected 0 pods, got %d", len(decodedPods)) }) }) } -func TestResourcesListDenied(t *testing.T) { - deniedResourcesServer := test.Must(config.ReadToml([]byte(` +func (s *ResourcesSuite) TestResourcesListDenied() { + s.Require().NoError(toml.Unmarshal([]byte(` denied_resources = [ { version = "v1", kind = "Secret" }, { group = "rbac.authorization.k8s.io", version = "v1" } ] - `))) - testCaseWithContext(t, &mcpContext{staticConfig: deniedResourcesServer}, func(c *mcpContext) { - c.withEnvTest() - deniedByKind, _ := c.callTool("resources_list", map[string]interface{}{"apiVersion": "v1", "kind": "Secret"}) - t.Run("resources_list (denied by kind) has error", func(t *testing.T) { - if !deniedByKind.IsError { - t.Fatalf("call tool should fail") - } - }) - t.Run("resources_list (denied by kind) describes denial", func(t *testing.T) { + `), s.Cfg), "Expected to parse denied resources config") + s.InitMcpClient() + s.Run("resources_list (denied by kind)", func() { + deniedByKind, err := s.CallTool("resources_list", map[string]interface{}{"apiVersion": "v1", "kind": "Secret"}) + s.Run("has error", func() { + s.Truef(deniedByKind.IsError, "call tool should fail") + s.Nilf(err, "call tool should not return error object") + }) + s.Run("describes denial", func() { expectedMessage := "failed to list resources: resource not allowed: /v1, Kind=Secret" - if deniedByKind.Content[0].(mcp.TextContent).Text != expectedMessage { - t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, deniedByKind.Content[0].(mcp.TextContent).Text) - } + s.Equalf(expectedMessage, deniedByKind.Content[0].(mcp.TextContent).Text, + "expected descriptive error '%s', got %v", expectedMessage, deniedByKind.Content[0].(mcp.TextContent).Text) }) - deniedByGroup, _ := c.callTool("resources_list", map[string]interface{}{"apiVersion": "rbac.authorization.k8s.io/v1", "kind": "Role"}) - t.Run("resources_list (denied by group) has error", func(t *testing.T) { - if !deniedByGroup.IsError { - t.Fatalf("call tool should fail") - } + }) + s.Run("resources_list (denied by group)", func() { + deniedByGroup, err := s.CallTool("resources_list", map[string]interface{}{"apiVersion": "rbac.authorization.k8s.io/v1", "kind": "Role"}) + s.Run("has error", func() { + s.Truef(deniedByGroup.IsError, "call tool should fail") + s.Nilf(err, "call tool should not return error object") }) - t.Run("resources_list (denied by group) describes denial", func(t *testing.T) { + s.Run("describes denial", func() { expectedMessage := "failed to list resources: resource not allowed: rbac.authorization.k8s.io/v1, Kind=Role" - if deniedByGroup.Content[0].(mcp.TextContent).Text != expectedMessage { - t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, deniedByKind.Content[0].(mcp.TextContent).Text) - } - }) - allowedResource, _ := c.callTool("resources_list", map[string]interface{}{"apiVersion": "v1", "kind": "Namespace"}) - t.Run("resources_list (not denied) returns list", func(t *testing.T) { - if allowedResource.IsError { - t.Fatalf("call tool should not fail") - } + s.Equalf(expectedMessage, deniedByGroup.Content[0].(mcp.TextContent).Text, + "expected descriptive error '%s', got %v", expectedMessage, deniedByGroup.Content[0].(mcp.TextContent).Text) }) }) + s.Run("resources_list (not denied) returns list", func() { + allowedResource, _ := s.CallTool("resources_list", map[string]interface{}{"apiVersion": "v1", "kind": "Namespace"}) + s.Falsef(allowedResource.IsError, "call tool should not fail") + }) } -func TestResourcesListAsTable(t *testing.T) { - testCaseWithContext(t, &mcpContext{listOutput: output.Table, before: inOpenShift, after: inOpenShiftClear}, func(c *mcpContext) { - c.withEnvTest() - kc := c.newKubernetesClient() - _, _ = kc.CoreV1().ConfigMaps("default").Create(t.Context(), &corev1.ConfigMap{ +func (s *ResourcesSuite) TestResourcesListAsTable() { + s.Cfg.ListOutput = "table" + s.Require().NoError(EnvTestInOpenShift(s.T().Context()), "Expected to configure test for OpenShift") + s.T().Cleanup(func() { + s.Require().NoError(EnvTestInOpenShiftClear(s.T().Context()), "Expected to clear OpenShift test configuration") + }) + s.InitMcpClient() + + s.Run("resources_list(apiVersion=v1, kind=ConfigMap) (list_output=table)", func() { + kc := kubernetes.NewForConfigOrDie(envTestRestConfig) + _, _ = kc.CoreV1().ConfigMaps("default").Create(s.T().Context(), &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{Name: "a-configmap-to-list-as-table", Labels: map[string]string{"resource": "config-map"}}, Data: map[string]string{"key": "value"}, }, metav1.CreateOptions{}) - configMapList, err := c.callTool("resources_list", map[string]interface{}{"apiVersion": "v1", "kind": "ConfigMap"}) - t.Run("resources_list returns ConfigMap list", func(t *testing.T) { - if err != nil { - t.Fatalf("call tool failed %v", err) - } - if configMapList.IsError { - t.Fatalf("call tool failed") - } + configMapList, err := s.CallTool("resources_list", map[string]interface{}{"apiVersion": "v1", "kind": "ConfigMap"}) + s.Run("no error", func() { + s.Nilf(err, "call tool failed %v", err) + s.Falsef(configMapList.IsError, "call tool failed") }) + s.Require().NotNil(configMapList, "Expected tool result from call") outConfigMapList := configMapList.Content[0].(mcp.TextContent).Text - t.Run("resources_list returns column headers for ConfigMap list", func(t *testing.T) { + s.Run("returns column headers for ConfigMap list", func() { expectedHeaders := "NAMESPACE\\s+APIVERSION\\s+KIND\\s+NAME\\s+DATA\\s+AGE\\s+LABELS" - if m, e := regexp.MatchString(expectedHeaders, outConfigMapList); !m || e != nil { - t.Fatalf("Expected headers '%s' not found in output:\n%s", expectedHeaders, outConfigMapList) - } + m, e := regexp.MatchString(expectedHeaders, outConfigMapList) + s.Truef(m, "Expected headers '%s' not found in output:\n%s", expectedHeaders, outConfigMapList) + s.NoErrorf(e, "Error matching headers regex: %v", e) }) - t.Run("resources_list returns formatted row for a-configmap-to-list-as-table", func(t *testing.T) { + s.Run("returns formatted row for a-configmap-to-list-as-table", func() { expectedRow := "(?default)\\s+" + "(?v1)\\s+" + "(?ConfigMap)\\s+" + @@ -226,274 +174,200 @@ func TestResourcesListAsTable(t *testing.T) { "(?1)\\s+" + "(?(\\d+m)?(\\d+s)?)\\s+" + "(?resource=config-map)" - if m, e := regexp.MatchString(expectedRow, outConfigMapList); !m || e != nil { - t.Fatalf("Expected row '%s' not found in output:\n%s", expectedRow, outConfigMapList) - } + m, e := regexp.MatchString(expectedRow, outConfigMapList) + s.Truef(m, "Expected row '%s' not found in output:\n%s", expectedRow, outConfigMapList) + s.NoErrorf(e, "Error matching row regex: %v", e) }) - // Custom Resource List + }) + + s.Run("resources_list(apiVersion=route.openshift.io/v1, kind=Route) (list_output=table)", func() { _, _ = dynamic.NewForConfigOrDie(envTestRestConfig). Resource(schema.GroupVersionResource{Group: "route.openshift.io", Version: "v1", Resource: "routes"}). Namespace("default"). - Create(c.ctx, &unstructured.Unstructured{Object: map[string]interface{}{ + Create(s.T().Context(), &unstructured.Unstructured{Object: map[string]interface{}{ "apiVersion": "route.openshift.io/v1", "kind": "Route", "metadata": map[string]interface{}{ "name": "an-openshift-route-to-list-as-table", }, }}, metav1.CreateOptions{}) - routeList, err := c.callTool("resources_list", map[string]interface{}{"apiVersion": "route.openshift.io/v1", "kind": "Route"}) - t.Run("resources_list returns Route list", func(t *testing.T) { - if err != nil { - t.Fatalf("call tool failed %v", err) - } - if routeList.IsError { - t.Fatalf("call tool failed") - } + routeList, err := s.CallTool("resources_list", map[string]interface{}{"apiVersion": "route.openshift.io/v1", "kind": "Route"}) + s.Run("no error", func() { + s.Nilf(err, "call tool failed %v", err) + s.Falsef(routeList.IsError, "call tool failed") }) + s.Require().NotNil(routeList, "Expected tool result from call") outRouteList := routeList.Content[0].(mcp.TextContent).Text - t.Run("resources_list returns column headers for Route list", func(t *testing.T) { + s.Run("returns column headers for Route list", func() { expectedHeaders := "NAMESPACE\\s+APIVERSION\\s+KIND\\s+NAME\\s+AGE\\s+LABELS" - if m, e := regexp.MatchString(expectedHeaders, outRouteList); !m || e != nil { - t.Fatalf("Expected headers '%s' not found in output:\n%s", expectedHeaders, outRouteList) - } + m, e := regexp.MatchString(expectedHeaders, outRouteList) + s.Truef(m, "Expected headers '%s' not found in output:\n%s", expectedHeaders, outRouteList) + s.NoErrorf(e, "Error matching headers regex: %v", e) }) - t.Run("resources_list returns formatted row for an-openshift-route-to-list-as-table", func(t *testing.T) { + s.Run("returns formatted row for an-openshift-route-to-list-as-table", func() { expectedRow := "(?default)\\s+" + "(?route.openshift.io/v1)\\s+" + "(?Route)\\s+" + "(?an-openshift-route-to-list-as-table)\\s+" + "(?(\\d+m)?(\\d+s)?)\\s+" + "(?)" - if m, e := regexp.MatchString(expectedRow, outRouteList); !m || e != nil { - t.Fatalf("Expected row '%s' not found in output:\n%s", expectedRow, outRouteList) - } + m, e := regexp.MatchString(expectedRow, outRouteList) + s.Truef(m, "Expected row '%s' not found in output:\n%s", expectedRow, outRouteList) + s.NoErrorf(e, "Error matching row regex: %v", e) }) }) } -func TestResourcesGet(t *testing.T) { - testCase(t, func(c *mcpContext) { - c.withEnvTest() - t.Run("resources_get with missing apiVersion returns error", func(t *testing.T) { - toolResult, _ := c.callTool("resources_get", map[string]interface{}{}) - if !toolResult.IsError { - t.Fatalf("call tool should fail") - return - } - if toolResult.Content[0].(mcp.TextContent).Text != "failed to get resource, missing argument apiVersion" { - t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) - return - } - }) - t.Run("resources_get with missing kind returns error", func(t *testing.T) { - toolResult, _ := c.callTool("resources_get", map[string]interface{}{"apiVersion": "v1"}) - if !toolResult.IsError { - t.Fatalf("call tool should fail") - return - } - if toolResult.Content[0].(mcp.TextContent).Text != "failed to get resource, missing argument kind" { - t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) - return - } - }) - t.Run("resources_get with invalid apiVersion returns error", func(t *testing.T) { - toolResult, _ := c.callTool("resources_get", map[string]interface{}{"apiVersion": "invalid/api/version", "kind": "Pod", "name": "a-pod"}) - if !toolResult.IsError { - t.Fatalf("call tool should fail") - return - } - if toolResult.Content[0].(mcp.TextContent).Text != "failed to get resource, invalid argument apiVersion" { - t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) - return - } - }) - t.Run("resources_get with nonexistent apiVersion returns error", func(t *testing.T) { - toolResult, _ := c.callTool("resources_get", map[string]interface{}{"apiVersion": "custom.non.existent.example.com/v1", "kind": "Custom", "name": "a-custom"}) - if !toolResult.IsError { - t.Fatalf("call tool should fail") - return - } - if toolResult.Content[0].(mcp.TextContent).Text != `failed to get resource: no matches for kind "Custom" in version "custom.non.existent.example.com/v1"` { - t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) - return - } - }) - t.Run("resources_get with missing name returns error", func(t *testing.T) { - toolResult, _ := c.callTool("resources_get", map[string]interface{}{"apiVersion": "v1", "kind": "Namespace"}) - if !toolResult.IsError { - t.Fatalf("call tool should fail") - return - } - if toolResult.Content[0].(mcp.TextContent).Text != "failed to get resource, missing argument name" { - t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) - return - } - }) - namespace, err := c.callTool("resources_get", map[string]interface{}{"apiVersion": "v1", "kind": "Namespace", "name": "default"}) - t.Run("resources_get returns namespace", func(t *testing.T) { - if err != nil { - t.Fatalf("call tool failed %v", err) - return - } - if namespace.IsError { - t.Fatalf("call tool failed") - return - } +func (s *ResourcesSuite) TestResourcesGet() { + s.InitMcpClient() + s.Run("resources_get with missing apiVersion returns error", func() { + toolResult, _ := s.CallTool("resources_get", map[string]interface{}{}) + s.Truef(toolResult.IsError, "call tool should fail") + s.Equalf("failed to get resource, missing argument apiVersion", toolResult.Content[0].(mcp.TextContent).Text, + "invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) + }) + s.Run("resources_get with missing kind returns error", func() { + toolResult, _ := s.CallTool("resources_get", map[string]interface{}{"apiVersion": "v1"}) + s.Truef(toolResult.IsError, "call tool should fail") + s.Equalf("failed to get resource, missing argument kind", toolResult.Content[0].(mcp.TextContent).Text, + "invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) + }) + s.Run("resources_get with invalid apiVersion returns error", func() { + toolResult, _ := s.CallTool("resources_get", map[string]interface{}{"apiVersion": "invalid/api/version", "kind": "Pod", "name": "a-pod"}) + s.Truef(toolResult.IsError, "call tool should fail") + s.Equalf("failed to get resource, invalid argument apiVersion", toolResult.Content[0].(mcp.TextContent).Text, + "invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) + }) + s.Run("resources_get with nonexistent apiVersion returns error", func() { + toolResult, _ := s.CallTool("resources_get", map[string]interface{}{"apiVersion": "custom.non.existent.example.com/v1", "kind": "Custom", "name": "a-custom"}) + s.Truef(toolResult.IsError, "call tool should fail") + s.Equalf(`failed to get resource: no matches for kind "Custom" in version "custom.non.existent.example.com/v1"`, + toolResult.Content[0].(mcp.TextContent).Text, "invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) + }) + s.Run("resources_get with missing name returns error", func() { + toolResult, _ := s.CallTool("resources_get", map[string]interface{}{"apiVersion": "v1", "kind": "Namespace"}) + s.Truef(toolResult.IsError, "call tool should fail") + s.Equalf("failed to get resource, missing argument name", toolResult.Content[0].(mcp.TextContent).Text, + "invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) + }) + s.Run("resources_get returns namespace", func() { + namespace, err := s.CallTool("resources_get", map[string]interface{}{"apiVersion": "v1", "kind": "Namespace", "name": "default"}) + s.Run("no error", func() { + s.Nilf(err, "call tool failed %v", err) + s.Falsef(namespace.IsError, "call tool failed") }) var decodedNamespace unstructured.Unstructured err = yaml.Unmarshal([]byte(namespace.Content[0].(mcp.TextContent).Text), &decodedNamespace) - t.Run("resources_get has yaml content", func(t *testing.T) { - if err != nil { - t.Fatalf("invalid tool result content %v", err) - return - } + s.Run("has yaml content", func() { + s.Nilf(err, "invalid tool result content %v", err) }) - t.Run("resources_get returns default namespace", func(t *testing.T) { - if decodedNamespace.GetName() != "default" { - t.Fatalf("invalid namespace name, expected default, got %v", decodedNamespace.GetName()) - return - } + s.Run("returns default namespace", func() { + s.Equalf("default", decodedNamespace.GetName(), "invalid namespace name, expected default, got %v", decodedNamespace.GetName()) }) }) } -func TestResourcesGetDenied(t *testing.T) { - deniedResourcesServer := test.Must(config.ReadToml([]byte(` +func (s *ResourcesSuite) TestResourcesGetDenied() { + s.Require().NoError(toml.Unmarshal([]byte(` denied_resources = [ { version = "v1", kind = "Secret" }, { group = "rbac.authorization.k8s.io", version = "v1" } ] - `))) - testCaseWithContext(t, &mcpContext{staticConfig: deniedResourcesServer}, func(c *mcpContext) { - c.withEnvTest() - kc := c.newKubernetesClient() - _, _ = kc.CoreV1().Secrets("default").Create(c.ctx, &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{Name: "denied-secret"}, - }, metav1.CreateOptions{}) - _, _ = kc.RbacV1().Roles("default").Create(c.ctx, &v1.Role{ - ObjectMeta: metav1.ObjectMeta{Name: "denied-role"}, - }, metav1.CreateOptions{}) - deniedByKind, _ := c.callTool("resources_get", map[string]interface{}{"apiVersion": "v1", "kind": "Secret", "namespace": "default", "name": "denied-secret"}) - t.Run("resources_get (denied by kind) has error", func(t *testing.T) { - if !deniedByKind.IsError { - t.Fatalf("call tool should fail") - } - }) - t.Run("resources_get (denied by kind) describes denial", func(t *testing.T) { + `), s.Cfg), "Expected to parse denied resources config") + s.InitMcpClient() + kc := kubernetes.NewForConfigOrDie(envTestRestConfig) + _, _ = kc.CoreV1().Secrets("default").Create(s.T().Context(), &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "denied-secret"}, + }, metav1.CreateOptions{}) + _, _ = kc.RbacV1().Roles("default").Create(s.T().Context(), &v1.Role{ + ObjectMeta: metav1.ObjectMeta{Name: "denied-role"}, + }, metav1.CreateOptions{}) + s.Run("resources_get (denied by kind)", func() { + deniedByKind, err := s.CallTool("resources_get", map[string]interface{}{"apiVersion": "v1", "kind": "Secret", "namespace": "default", "name": "denied-secret"}) + s.Run("has error", func() { + s.Truef(deniedByKind.IsError, "call tool should fail") + s.Nilf(err, "call tool should not return error object") + }) + s.Run("describes denial", func() { expectedMessage := "failed to get resource: resource not allowed: /v1, Kind=Secret" - if deniedByKind.Content[0].(mcp.TextContent).Text != expectedMessage { - t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, deniedByKind.Content[0].(mcp.TextContent).Text) - } + s.Equalf(expectedMessage, deniedByKind.Content[0].(mcp.TextContent).Text, + "expected descriptive error '%s', got %v", expectedMessage, deniedByKind.Content[0].(mcp.TextContent).Text) }) - deniedByGroup, _ := c.callTool("resources_get", map[string]interface{}{"apiVersion": "rbac.authorization.k8s.io/v1", "kind": "Role", "namespace": "default", "name": "denied-role"}) - t.Run("resources_get (denied by group) has error", func(t *testing.T) { - if !deniedByGroup.IsError { - t.Fatalf("call tool should fail") - } + }) + s.Run("resources_get (denied by group)", func() { + deniedByGroup, err := s.CallTool("resources_get", map[string]interface{}{"apiVersion": "rbac.authorization.k8s.io/v1", "kind": "Role", "namespace": "default", "name": "denied-role"}) + s.Run("has error", func() { + s.Truef(deniedByGroup.IsError, "call tool should fail") + s.Nilf(err, "call tool should not return error object") }) - t.Run("resources_get (denied by group) describes denial", func(t *testing.T) { + s.Run("describes denial", func() { expectedMessage := "failed to get resource: resource not allowed: rbac.authorization.k8s.io/v1, Kind=Role" - if deniedByGroup.Content[0].(mcp.TextContent).Text != expectedMessage { - t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, deniedByKind.Content[0].(mcp.TextContent).Text) - } - }) - allowedResource, _ := c.callTool("resources_get", map[string]interface{}{"apiVersion": "v1", "kind": "Namespace", "name": "default"}) - t.Run("resources_get (not denied) returns resource", func(t *testing.T) { - if allowedResource.IsError { - t.Fatalf("call tool should not fail") - } + s.Equalf(expectedMessage, deniedByGroup.Content[0].(mcp.TextContent).Text, + "expected descriptive error '%s', got %v", expectedMessage, deniedByGroup.Content[0].(mcp.TextContent).Text) }) }) + s.Run("resources_get (not denied) returns resource", func() { + allowedResource, err := s.CallTool("resources_get", map[string]interface{}{"apiVersion": "v1", "kind": "Namespace", "name": "default"}) + s.Falsef(allowedResource.IsError, "call tool should not fail") + s.Nilf(err, "call tool should not return error object") + }) } -func TestResourcesCreateOrUpdate(t *testing.T) { - testCase(t, func(c *mcpContext) { - c.withEnvTest() - t.Run("resources_create_or_update with nil resource returns error", func(t *testing.T) { - toolResult, _ := c.callTool("resources_create_or_update", map[string]interface{}{}) - if toolResult.IsError != true { - t.Fatalf("call tool should fail") - return - } - if toolResult.Content[0].(mcp.TextContent).Text != "failed to create or update resources, missing argument resource" { - t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) - return - } - }) - t.Run("resources_create_or_update with empty resource returns error", func(t *testing.T) { - toolResult, _ := c.callTool("resources_create_or_update", map[string]interface{}{"resource": ""}) - if toolResult.IsError != true { - t.Fatalf("call tool should fail") - return - } - if toolResult.Content[0].(mcp.TextContent).Text != "failed to create or update resources, missing argument resource" { - t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) - return - } - }) - client := c.newKubernetesClient() +func (s *ResourcesSuite) TestResourcesCreateOrUpdate() { + s.InitMcpClient() + client := kubernetes.NewForConfigOrDie(envTestRestConfig) + + s.Run("resources_create_or_update with nil resource returns error", func() { + toolResult, _ := s.CallTool("resources_create_or_update", map[string]interface{}{}) + s.Truef(toolResult.IsError, "call tool should fail") + s.Equalf("failed to create or update resources, missing argument resource", toolResult.Content[0].(mcp.TextContent).Text, + "invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) + }) + s.Run("resources_create_or_update with empty resource returns error", func() { + toolResult, _ := s.CallTool("resources_create_or_update", map[string]interface{}{"resource": ""}) + s.Truef(toolResult.IsError, "call tool should fail") + s.Equalf("failed to create or update resources, missing argument resource", toolResult.Content[0].(mcp.TextContent).Text, + "invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) + }) + + s.Run("resources_create_or_update with valid namespaced yaml resource", func() { configMapYaml := "apiVersion: v1\nkind: ConfigMap\nmetadata:\n name: a-cm-created-or-updated\n namespace: default\n" - resourcesCreateOrUpdateCm1, err := c.callTool("resources_create_or_update", map[string]interface{}{"resource": configMapYaml}) - t.Run("resources_create_or_update with valid namespaced yaml resource returns success", func(t *testing.T) { - if err != nil { - t.Fatalf("call tool failed %v", err) - return - } - if resourcesCreateOrUpdateCm1.IsError { - t.Errorf("call tool failed") - return - } + resourcesCreateOrUpdateCm1, err := s.CallTool("resources_create_or_update", map[string]interface{}{"resource": configMapYaml}) + s.Run("returns success", func() { + s.Nilf(err, "call tool failed %v", err) + s.Falsef(resourcesCreateOrUpdateCm1.IsError, "call tool failed") }) var decodedCreateOrUpdateCm1 []unstructured.Unstructured err = yaml.Unmarshal([]byte(resourcesCreateOrUpdateCm1.Content[0].(mcp.TextContent).Text), &decodedCreateOrUpdateCm1) - t.Run("resources_create_or_update with valid namespaced yaml resource returns yaml content", func(t *testing.T) { - if err != nil { - t.Errorf("invalid tool result content %v", err) - return - } - if !strings.HasPrefix(resourcesCreateOrUpdateCm1.Content[0].(mcp.TextContent).Text, "# The following resources (YAML) have been created or updated successfully") { - t.Errorf("Excpected success message, got %v", resourcesCreateOrUpdateCm1.Content[0].(mcp.TextContent).Text) - return - } - if len(decodedCreateOrUpdateCm1) != 1 { - t.Errorf("invalid resource count, expected 1, got %v", len(decodedCreateOrUpdateCm1)) - return - } - if decodedCreateOrUpdateCm1[0].GetName() != "a-cm-created-or-updated" { - t.Errorf("invalid resource name, expected a-cm-created-or-updated, got %v", decodedCreateOrUpdateCm1[0].GetName()) - return - } - if decodedCreateOrUpdateCm1[0].GetUID() == "" { - t.Errorf("invalid uid, got %v", decodedCreateOrUpdateCm1[0].GetUID()) - return - } - }) - t.Run("resources_create_or_update with valid namespaced yaml resource creates ConfigMap", func(t *testing.T) { - cm, _ := client.CoreV1().ConfigMaps("default").Get(c.ctx, "a-cm-created-or-updated", metav1.GetOptions{}) - if cm == nil { - t.Fatalf("ConfigMap not found") - return - } + s.Run("returns yaml content", func() { + s.Nilf(err, "invalid tool result content %v", err) + s.Truef(strings.HasPrefix(resourcesCreateOrUpdateCm1.Content[0].(mcp.TextContent).Text, "# The following resources (YAML) have been created or updated successfully"), + "Expected success message, got %v", resourcesCreateOrUpdateCm1.Content[0].(mcp.TextContent).Text) + s.Lenf(decodedCreateOrUpdateCm1, 1, "invalid resource count, expected 1, got %v", len(decodedCreateOrUpdateCm1)) + s.Equalf("a-cm-created-or-updated", decodedCreateOrUpdateCm1[0].GetName(), + "invalid resource name, expected a-cm-created-or-updated, got %v", decodedCreateOrUpdateCm1[0].GetName()) + s.NotEmptyf(decodedCreateOrUpdateCm1[0].GetUID(), "invalid uid, got %v", decodedCreateOrUpdateCm1[0].GetUID()) + }) + s.Run("creates ConfigMap", func() { + cm, _ := client.CoreV1().ConfigMaps("default").Get(s.T().Context(), "a-cm-created-or-updated", metav1.GetOptions{}) + s.NotNil(cm, "ConfigMap not found") }) + }) + + s.Run("resources_create_or_update with valid namespaced json resource", func() { configMapJson := "{\"apiVersion\": \"v1\", \"kind\": \"ConfigMap\", \"metadata\": {\"name\": \"a-cm-created-or-updated-2\", \"namespace\": \"default\"}}" - resourcesCreateOrUpdateCm2, err := c.callTool("resources_create_or_update", map[string]interface{}{"resource": configMapJson}) - t.Run("resources_create_or_update with valid namespaced json resource returns success", func(t *testing.T) { - if err != nil { - t.Fatalf("call tool failed %v", err) - return - } - if resourcesCreateOrUpdateCm2.IsError { - t.Fatalf("call tool failed") - return - } - }) - t.Run("resources_create_or_update with valid namespaced json resource creates config map", func(t *testing.T) { - cm, _ := client.CoreV1().ConfigMaps("default").Get(c.ctx, "a-cm-created-or-updated-2", metav1.GetOptions{}) - if cm == nil { - t.Fatalf("ConfigMap not found") - return - } + resourcesCreateOrUpdateCm2, err := s.CallTool("resources_create_or_update", map[string]interface{}{"resource": configMapJson}) + s.Run("returns success", func() { + s.Nilf(err, "call tool failed %v", err) + s.Falsef(resourcesCreateOrUpdateCm2.IsError, "call tool failed") + }) + s.Run("creates config map", func() { + cm, _ := client.CoreV1().ConfigMaps("default").Get(s.T().Context(), "a-cm-created-or-updated-2", metav1.GetOptions{}) + s.NotNil(cm, "ConfigMap not found") }) + }) + + s.Run("resources_create_or_update with valid cluster-scoped json resource", func() { customResourceDefinitionJson := ` { "apiVersion": "apiextensions.k8s.io/v1", @@ -509,284 +383,212 @@ func TestResourcesCreateOrUpdate(t *testing.T) { "names": {"plural": "customs","singular": "custom","kind": "Custom"} } }` - resourcesCreateOrUpdateCrd, err := c.callTool("resources_create_or_update", map[string]interface{}{"resource": customResourceDefinitionJson}) - t.Run("resources_create_or_update with valid cluster-scoped json resource returns success", func(t *testing.T) { - if err != nil { - t.Fatalf("call tool failed %v", err) - return - } - if resourcesCreateOrUpdateCrd.IsError { - t.Fatalf("call tool failed") - return - } - }) - t.Run("resources_create_or_update with valid cluster-scoped json resource creates custom resource definition", func(t *testing.T) { - apiExtensionsV1Client := c.newApiExtensionsClient() - _, err = apiExtensionsV1Client.CustomResourceDefinitions().Get(c.ctx, "customs.example.com", metav1.GetOptions{}) - if err != nil { - t.Fatalf("custom resource definition not found") - return - } - }) - c.crdWaitUntilReady("customs.example.com") + resourcesCreateOrUpdateCrd, err := s.CallTool("resources_create_or_update", map[string]interface{}{"resource": customResourceDefinitionJson}) + s.Run("returns success", func() { + s.Nilf(err, "call tool failed %v", err) + s.Falsef(resourcesCreateOrUpdateCrd.IsError, "call tool failed") + }) + s.Run("creates custom resource definition", func() { + apiExtensionsV1Client := apiextensionsv1.NewForConfigOrDie(envTestRestConfig) + _, err = apiExtensionsV1Client.CustomResourceDefinitions().Get(s.T().Context(), "customs.example.com", metav1.GetOptions{}) + s.Nilf(err, "custom resource definition not found") + }) + s.Require().NoError(EnvTestCrdWaitUntilReady(s.T().Context(), "customs.example.com")) + }) + + s.Run("resources_create_or_update creates custom resource", func() { customJson := "{\"apiVersion\": \"example.com/v1\", \"kind\": \"Custom\", \"metadata\": {\"name\": \"a-custom-resource\"}}" - resourcesCreateOrUpdateCustom, err := c.callTool("resources_create_or_update", map[string]interface{}{"resource": customJson}) - t.Run("resources_create_or_update with valid namespaced json resource returns success", func(t *testing.T) { - if err != nil { - t.Fatalf("call tool failed %v", err) - return - } - if resourcesCreateOrUpdateCustom.IsError { - t.Fatalf("call tool failed, got: %v", resourcesCreateOrUpdateCustom.Content) - return - } - }) - t.Run("resources_create_or_update with valid namespaced json resource creates custom resource", func(t *testing.T) { + resourcesCreateOrUpdateCustom, err := s.CallTool("resources_create_or_update", map[string]interface{}{"resource": customJson}) + s.Run("returns success", func() { + s.Nilf(err, "call tool failed %v", err) + s.Falsef(resourcesCreateOrUpdateCustom.IsError, "call tool failed, got: %v", resourcesCreateOrUpdateCustom.Content) + }) + s.Run("creates custom resource", func() { dynamicClient := dynamic.NewForConfigOrDie(envTestRestConfig) _, err = dynamicClient. Resource(schema.GroupVersionResource{Group: "example.com", Version: "v1", Resource: "customs"}). Namespace("default"). - Get(c.ctx, "a-custom-resource", metav1.GetOptions{}) - if err != nil { - t.Fatalf("custom resource not found") - return - } + Get(s.T().Context(), "a-custom-resource", metav1.GetOptions{}) + s.Nilf(err, "custom resource not found") }) + }) + + s.Run("resources_create_or_update with valid namespaced json resource", func() { customJsonUpdated := "{\"apiVersion\": \"example.com/v1\", \"kind\": \"Custom\", \"metadata\": {\"name\": \"a-custom-resource\",\"annotations\": {\"updated\": \"true\"}}}" - resourcesCreateOrUpdateCustomUpdated, err := c.callTool("resources_create_or_update", map[string]interface{}{"resource": customJsonUpdated}) - t.Run("resources_create_or_update with valid namespaced json resource updates custom resource", func(t *testing.T) { - if err != nil { - t.Fatalf("call tool failed %v", err) - return - } - if resourcesCreateOrUpdateCustomUpdated.IsError { - t.Fatalf("call tool failed") - return - } - }) - t.Run("resources_create_or_update with valid namespaced json resource updates custom resource", func(t *testing.T) { + resourcesCreateOrUpdateCustomUpdated, err := s.CallTool("resources_create_or_update", map[string]interface{}{"resource": customJsonUpdated}) + s.Run("returns success", func() { + s.Nilf(err, "call tool failed %v", err) + s.Falsef(resourcesCreateOrUpdateCustomUpdated.IsError, "call tool failed") + }) + s.Run("updates custom resource", func() { dynamicClient := dynamic.NewForConfigOrDie(envTestRestConfig) customResource, _ := dynamicClient. Resource(schema.GroupVersionResource{Group: "example.com", Version: "v1", Resource: "customs"}). Namespace("default"). - Get(c.ctx, "a-custom-resource", metav1.GetOptions{}) - if customResource == nil { - t.Fatalf("custom resource not found") - return - } + Get(s.T().Context(), "a-custom-resource", metav1.GetOptions{}) + s.NotNil(customResource, "custom resource not found") annotations := customResource.GetAnnotations() - if annotations == nil || annotations["updated"] != "true" { - t.Fatalf("custom resource not updated") - return - } + s.Require().NotNil(annotations, "annotations should not be nil") + s.Equalf("true", annotations["updated"], "custom resource not updated") }) }) } -func TestResourcesCreateOrUpdateDenied(t *testing.T) { - deniedResourcesServer := test.Must(config.ReadToml([]byte(` +func (s *ResourcesSuite) TestResourcesCreateOrUpdateDenied() { + s.Require().NoError(toml.Unmarshal([]byte(` denied_resources = [ { version = "v1", kind = "Secret" }, { group = "rbac.authorization.k8s.io", version = "v1" } ] - `))) - testCaseWithContext(t, &mcpContext{staticConfig: deniedResourcesServer}, func(c *mcpContext) { - c.withEnvTest() + `), s.Cfg), "Expected to parse denied resources config") + s.InitMcpClient() + s.Run("resources_create_or_update (denied by kind)", func() { secretYaml := "apiVersion: v1\nkind: Secret\nmetadata:\n name: a-denied-secret\n namespace: default\n" - deniedByKind, _ := c.callTool("resources_create_or_update", map[string]interface{}{"resource": secretYaml}) - t.Run("resources_create_or_update (denied by kind) has error", func(t *testing.T) { - if !deniedByKind.IsError { - t.Fatalf("call tool should fail") - } + deniedByKind, err := s.CallTool("resources_create_or_update", map[string]interface{}{"resource": secretYaml}) + s.Run("has error", func() { + s.Truef(deniedByKind.IsError, "call tool should fail") + s.Nilf(err, "call tool should not return error object") }) - t.Run("resources_create_or_update (denied by kind) describes denial", func(t *testing.T) { + s.Run("describes denial", func() { expectedMessage := "failed to create or update resources: resource not allowed: /v1, Kind=Secret" - if deniedByKind.Content[0].(mcp.TextContent).Text != expectedMessage { - t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, deniedByKind.Content[0].(mcp.TextContent).Text) - } + s.Equalf(expectedMessage, deniedByKind.Content[0].(mcp.TextContent).Text, + "expected descriptive error '%s', got %v", expectedMessage, deniedByKind.Content[0].(mcp.TextContent).Text) }) + }) + s.Run("resources_create_or_update (denied by group)", func() { roleYaml := "apiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n name: a-denied-role\n namespace: default\n" - deniedByGroup, _ := c.callTool("resources_create_or_update", map[string]interface{}{"resource": roleYaml}) - t.Run("resources_create_or_update (denied by group) has error", func(t *testing.T) { - if !deniedByGroup.IsError { - t.Fatalf("call tool should fail") - } + deniedByGroup, err := s.CallTool("resources_create_or_update", map[string]interface{}{"resource": roleYaml}) + s.Run("has error", func() { + s.Truef(deniedByGroup.IsError, "call tool should fail") + s.Nilf(err, "call tool should not return error object") }) - t.Run("resources_create_or_update (denied by group) describes denial", func(t *testing.T) { + s.Run("describes denial", func() { expectedMessage := "failed to create or update resources: resource not allowed: rbac.authorization.k8s.io/v1, Kind=Role" - if deniedByGroup.Content[0].(mcp.TextContent).Text != expectedMessage { - t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, deniedByKind.Content[0].(mcp.TextContent).Text) - } + s.Equalf(expectedMessage, deniedByGroup.Content[0].(mcp.TextContent).Text, + "expected descriptive error '%s', got %v", expectedMessage, deniedByGroup.Content[0].(mcp.TextContent).Text) }) + }) + s.Run("resources_create_or_update (not denied) creates or updates resource", func() { configMapYaml := "apiVersion: v1\nkind: ConfigMap\nmetadata:\n name: a-cm-created-or-updated\n namespace: default\n" - allowedResource, _ := c.callTool("resources_create_or_update", map[string]interface{}{"resource": configMapYaml}) - t.Run("resources_create_or_update (not denied) creates or updates resource", func(t *testing.T) { - if allowedResource.IsError { - t.Fatalf("call tool should not fail") - } - }) + allowedResource, err := s.CallTool("resources_create_or_update", map[string]interface{}{"resource": configMapYaml}) + s.Falsef(allowedResource.IsError, "call tool should not fail") + s.Nilf(err, "call tool should not return error object") }) } -func TestResourcesDelete(t *testing.T) { - testCase(t, func(c *mcpContext) { - c.withEnvTest() - t.Run("resources_delete with missing apiVersion returns error", func(t *testing.T) { - toolResult, _ := c.callTool("resources_delete", map[string]interface{}{}) - if !toolResult.IsError { - t.Fatalf("call tool should fail") - return - } - if toolResult.Content[0].(mcp.TextContent).Text != "failed to delete resource, missing argument apiVersion" { - t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) - return - } - }) - t.Run("resources_delete with missing kind returns error", func(t *testing.T) { - toolResult, _ := c.callTool("resources_delete", map[string]interface{}{"apiVersion": "v1"}) - if !toolResult.IsError { - t.Fatalf("call tool should fail") - return - } - if toolResult.Content[0].(mcp.TextContent).Text != "failed to delete resource, missing argument kind" { - t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) - return - } - }) - t.Run("resources_delete with invalid apiVersion returns error", func(t *testing.T) { - toolResult, _ := c.callTool("resources_delete", map[string]interface{}{"apiVersion": "invalid/api/version", "kind": "Pod", "name": "a-pod"}) - if !toolResult.IsError { - t.Fatalf("call tool should fail") - return - } - if toolResult.Content[0].(mcp.TextContent).Text != "failed to delete resource, invalid argument apiVersion" { - t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) - return - } - }) - t.Run("resources_delete with nonexistent apiVersion returns error", func(t *testing.T) { - toolResult, _ := c.callTool("resources_delete", map[string]interface{}{"apiVersion": "custom.non.existent.example.com/v1", "kind": "Custom", "name": "a-custom"}) - if !toolResult.IsError { - t.Fatalf("call tool should fail") - return - } - if toolResult.Content[0].(mcp.TextContent).Text != `failed to delete resource: no matches for kind "Custom" in version "custom.non.existent.example.com/v1"` { - t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) - return - } - }) - t.Run("resources_delete with missing name returns error", func(t *testing.T) { - toolResult, _ := c.callTool("resources_delete", map[string]interface{}{"apiVersion": "v1", "kind": "Namespace"}) - if !toolResult.IsError { - t.Fatalf("call tool should fail") - return - } - if toolResult.Content[0].(mcp.TextContent).Text != "failed to delete resource, missing argument name" { - t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) - return - } - }) - t.Run("resources_delete with nonexistent resource returns error", func(t *testing.T) { - toolResult, _ := c.callTool("resources_delete", map[string]interface{}{"apiVersion": "v1", "kind": "ConfigMap", "name": "nonexistent-configmap"}) - if !toolResult.IsError { - t.Fatalf("call tool should fail") - return - } - if toolResult.Content[0].(mcp.TextContent).Text != `failed to delete resource: configmaps "nonexistent-configmap" not found` { - t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) - return - } - }) - resourcesDeleteCm, err := c.callTool("resources_delete", map[string]interface{}{"apiVersion": "v1", "kind": "ConfigMap", "name": "a-configmap-to-delete"}) - t.Run("resources_delete with valid namespaced resource returns success", func(t *testing.T) { - if err != nil { - t.Fatalf("call tool failed %v", err) - return - } - if resourcesDeleteCm.IsError { - t.Fatalf("call tool failed") - return - } - if resourcesDeleteCm.Content[0].(mcp.TextContent).Text != "Resource deleted successfully" { - t.Fatalf("invalid tool result content got: %v", resourcesDeleteCm.Content[0].(mcp.TextContent).Text) - return - } - }) - client := c.newKubernetesClient() - t.Run("resources_delete with valid namespaced resource deletes ConfigMap", func(t *testing.T) { - _, err := client.CoreV1().ConfigMaps("default").Get(c.ctx, "a-configmap-to-delete", metav1.GetOptions{}) - if err == nil { - t.Fatalf("ConfigMap not deleted") - return - } - }) - resourcesDeleteNamespace, err := c.callTool("resources_delete", map[string]interface{}{"apiVersion": "v1", "kind": "Namespace", "name": "ns-to-delete"}) - t.Run("resources_delete with valid namespaced resource returns success", func(t *testing.T) { - if err != nil { - t.Fatalf("call tool failed %v", err) - return - } - if resourcesDeleteNamespace.IsError { - t.Fatalf("call tool failed") - return - } - if resourcesDeleteNamespace.Content[0].(mcp.TextContent).Text != "Resource deleted successfully" { - t.Fatalf("invalid tool result content got: %v", resourcesDeleteNamespace.Content[0].(mcp.TextContent).Text) - return - } - }) - t.Run("resources_delete with valid namespaced resource deletes Namespace", func(t *testing.T) { - ns, err := client.CoreV1().Namespaces().Get(c.ctx, "ns-to-delete", metav1.GetOptions{}) - if err == nil && ns != nil && ns.DeletionTimestamp == nil { - t.Fatalf("Namespace not deleted") - return - } +func (s *ResourcesSuite) TestResourcesDelete() { + s.InitMcpClient() + client := kubernetes.NewForConfigOrDie(envTestRestConfig) + + s.Run("resources_delete with missing apiVersion returns error", func() { + toolResult, _ := s.CallTool("resources_delete", map[string]interface{}{}) + s.Truef(toolResult.IsError, "call tool should fail") + s.Equalf("failed to delete resource, missing argument apiVersion", toolResult.Content[0].(mcp.TextContent).Text, + "invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) + }) + s.Run("resources_delete with missing kind returns error", func() { + toolResult, _ := s.CallTool("resources_delete", map[string]interface{}{"apiVersion": "v1"}) + s.Truef(toolResult.IsError, "call tool should fail") + s.Equalf("failed to delete resource, missing argument kind", toolResult.Content[0].(mcp.TextContent).Text, + "invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) + }) + s.Run("resources_delete with invalid apiVersion returns error", func() { + toolResult, _ := s.CallTool("resources_delete", map[string]interface{}{"apiVersion": "invalid/api/version", "kind": "Pod", "name": "a-pod"}) + s.Truef(toolResult.IsError, "call tool should fail") + s.Equalf("failed to delete resource, invalid argument apiVersion", toolResult.Content[0].(mcp.TextContent).Text, + "invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) + }) + s.Run("resources_delete with nonexistent apiVersion returns error", func() { + toolResult, _ := s.CallTool("resources_delete", map[string]interface{}{"apiVersion": "custom.non.existent.example.com/v1", "kind": "Custom", "name": "a-custom"}) + s.Truef(toolResult.IsError, "call tool should fail") + s.Equalf(`failed to delete resource: no matches for kind "Custom" in version "custom.non.existent.example.com/v1"`, + toolResult.Content[0].(mcp.TextContent).Text, "invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) + }) + s.Run("resources_delete with missing name returns error", func() { + toolResult, _ := s.CallTool("resources_delete", map[string]interface{}{"apiVersion": "v1", "kind": "Namespace"}) + s.Truef(toolResult.IsError, "call tool should fail") + s.Equalf("failed to delete resource, missing argument name", toolResult.Content[0].(mcp.TextContent).Text, + "invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) + }) + s.Run("resources_delete with nonexistent resource returns error", func() { + toolResult, _ := s.CallTool("resources_delete", map[string]interface{}{"apiVersion": "v1", "kind": "ConfigMap", "name": "nonexistent-configmap"}) + s.Truef(toolResult.IsError, "call tool should fail") + s.Equalf(`failed to delete resource: configmaps "nonexistent-configmap" not found`, + toolResult.Content[0].(mcp.TextContent).Text, "invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) + }) + + s.Run("resources_delete with valid namespaced resource", func() { + resourcesDeleteCm, err := s.CallTool("resources_delete", map[string]interface{}{"apiVersion": "v1", "kind": "ConfigMap", "name": "a-configmap-to-delete"}) + s.Run("returns success", func() { + s.Nilf(err, "call tool failed %v", err) + s.Falsef(resourcesDeleteCm.IsError, "call tool failed") + s.Equalf("Resource deleted successfully", resourcesDeleteCm.Content[0].(mcp.TextContent).Text, + "invalid tool result content got: %v", resourcesDeleteCm.Content[0].(mcp.TextContent).Text) + }) + s.Run("deletes ConfigMap", func() { + _, err := client.CoreV1().ConfigMaps("default").Get(s.T().Context(), "a-configmap-to-delete", metav1.GetOptions{}) + s.Error(err, "ConfigMap not deleted") + }) + }) + + s.Run("resources_delete with valid cluster scoped resource", func() { + resourcesDeleteNamespace, err := s.CallTool("resources_delete", map[string]interface{}{"apiVersion": "v1", "kind": "Namespace", "name": "ns-to-delete"}) + s.Run("returns success", func() { + s.Nilf(err, "call tool failed %v", err) + s.Falsef(resourcesDeleteNamespace.IsError, "call tool failed") + s.Equalf("Resource deleted successfully", resourcesDeleteNamespace.Content[0].(mcp.TextContent).Text, + "invalid tool result content got: %v", resourcesDeleteNamespace.Content[0].(mcp.TextContent).Text) + }) + s.Run(" deletes Namespace", func() { + ns, err := client.CoreV1().Namespaces().Get(s.T().Context(), "ns-to-delete", metav1.GetOptions{}) + s.Truef(err != nil || (ns != nil && ns.DeletionTimestamp != nil), "Namespace not deleted") }) }) } -func TestResourcesDeleteDenied(t *testing.T) { - deniedResourcesServer := test.Must(config.ReadToml([]byte(` +func (s *ResourcesSuite) TestResourcesDeleteDenied() { + s.Require().NoError(toml.Unmarshal([]byte(` denied_resources = [ { version = "v1", kind = "Secret" }, { group = "rbac.authorization.k8s.io", version = "v1" } ] - `))) - testCaseWithContext(t, &mcpContext{staticConfig: deniedResourcesServer}, func(c *mcpContext) { - c.withEnvTest() - kc := c.newKubernetesClient() - _, _ = kc.CoreV1().ConfigMaps("default").Create(c.ctx, &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{Name: "allowed-configmap-to-delete"}, - }, metav1.CreateOptions{}) - deniedByKind, _ := c.callTool("resources_delete", map[string]interface{}{"apiVersion": "v1", "kind": "Secret", "namespace": "default", "name": "denied-secret"}) - t.Run("resources_delete (denied by kind) has error", func(t *testing.T) { - if !deniedByKind.IsError { - t.Fatalf("call tool should fail") - } - }) - t.Run("resources_delete (denied by kind) describes denial", func(t *testing.T) { + `), s.Cfg), "Expected to parse denied resources config") + s.InitMcpClient() + kc := kubernetes.NewForConfigOrDie(envTestRestConfig) + _, _ = kc.CoreV1().ConfigMaps("default").Create(s.T().Context(), &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "allowed-configmap-to-delete"}, + }, metav1.CreateOptions{}) + s.Run("resources_delete (denied by kind)", func() { + deniedByKind, err := s.CallTool("resources_delete", map[string]interface{}{"apiVersion": "v1", "kind": "Secret", "namespace": "default", "name": "denied-secret"}) + s.Run("has error", func() { + s.Truef(deniedByKind.IsError, "call tool should fail") + s.Nilf(err, "call tool should not return error object") + }) + s.Run("describes denial", func() { expectedMessage := "failed to delete resource: resource not allowed: /v1, Kind=Secret" - if deniedByKind.Content[0].(mcp.TextContent).Text != expectedMessage { - t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, deniedByKind.Content[0].(mcp.TextContent).Text) - } + s.Equalf(expectedMessage, deniedByKind.Content[0].(mcp.TextContent).Text, + "expected descriptive error '%s', got %v", expectedMessage, deniedByKind.Content[0].(mcp.TextContent).Text) }) - deniedByGroup, _ := c.callTool("resources_delete", map[string]interface{}{"apiVersion": "rbac.authorization.k8s.io/v1", "kind": "Role", "namespace": "default", "name": "denied-role"}) - t.Run("resources_delete (denied by group) has error", func(t *testing.T) { - if !deniedByGroup.IsError { - t.Fatalf("call tool should fail") - } + }) + s.Run("resources_delete (denied by group)", func() { + deniedByGroup, err := s.CallTool("resources_delete", map[string]interface{}{"apiVersion": "rbac.authorization.k8s.io/v1", "kind": "Role", "namespace": "default", "name": "denied-role"}) + s.Run("has error", func() { + s.Truef(deniedByGroup.IsError, "call tool should fail") + s.Nilf(err, "call tool should not return error object") }) - t.Run("resources_delete (denied by group) describes denial", func(t *testing.T) { + s.Run("describes denial", func() { expectedMessage := "failed to delete resource: resource not allowed: rbac.authorization.k8s.io/v1, Kind=Role" - if deniedByGroup.Content[0].(mcp.TextContent).Text != expectedMessage { - t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, deniedByKind.Content[0].(mcp.TextContent).Text) - } - }) - allowedResource, _ := c.callTool("resources_delete", map[string]interface{}{"apiVersion": "v1", "kind": "ConfigMap", "name": "allowed-configmap-to-delete"}) - t.Run("resources_delete (not denied) deletes resource", func(t *testing.T) { - if allowedResource.IsError { - t.Fatalf("call tool should not fail") - } + s.Equalf(expectedMessage, deniedByGroup.Content[0].(mcp.TextContent).Text, + "expected descriptive error '%s', got %v", expectedMessage, deniedByGroup.Content[0].(mcp.TextContent).Text) }) }) + s.Run("resources_delete (not denied) deletes resource", func() { + allowedResource, err := s.CallTool("resources_delete", map[string]interface{}{"apiVersion": "v1", "kind": "ConfigMap", "name": "allowed-configmap-to-delete"}) + s.Falsef(allowedResource.IsError, "call tool should not fail") + s.Nilf(err, "call tool should not return error object") + }) +} + +func TestResources(t *testing.T) { + suite.Run(t, new(ResourcesSuite)) } diff --git a/pkg/mcp/testdata/toolsets-config-tools.json b/pkg/mcp/testdata/toolsets-config-tools.json index c1767491..2c5b7ae8 100644 --- a/pkg/mcp/testdata/toolsets-config-tools.json +++ b/pkg/mcp/testdata/toolsets-config-tools.json @@ -4,7 +4,6 @@ "title": "Configuration: View", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "Get the current Kubernetes configuration content as a kubeconfig YAML", diff --git a/pkg/mcp/testdata/toolsets-core-tools.json b/pkg/mcp/testdata/toolsets-core-tools.json index 56b998da..b4c5667f 100644 --- a/pkg/mcp/testdata/toolsets-core-tools.json +++ b/pkg/mcp/testdata/toolsets-core-tools.json @@ -4,7 +4,6 @@ "title": "Events: List", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "List all the Kubernetes events in the current cluster from all namespaces", @@ -24,7 +23,6 @@ "title": "Namespaces: List", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "List all the Kubernetes namespaces in the current cluster", @@ -38,7 +36,6 @@ "title": "Node: Log", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "Get logs from a Kubernetes node (kubelet, kube-proxy, or other system logs). This accesses node logs through the Kubernetes API proxy to the kubelet", @@ -72,7 +69,6 @@ "title": "Node: Stats Summary", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "Get detailed resource usage statistics from a Kubernetes node via the kubelet's Summary API. Provides comprehensive metrics including CPU, memory, filesystem, and network usage at the node, pod, and container levels. On systems with cgroup v2 and kernel 4.20+, also includes PSI (Pressure Stall Information) metrics that show resource pressure for CPU, memory, and I/O. See https://kubernetes.io/docs/reference/instrumentation/understand-psi-metrics/ for details on PSI metrics", @@ -90,10 +86,34 @@ }, "name": "nodes_stats_summary" }, + { + "annotations": { + "title": "Nodes: Top", + "readOnlyHint": true, + "destructiveHint": false, + "idempotentHint": true, + "openWorldHint": true + }, + "description": "List the resource consumption (CPU and memory) as recorded by the Kubernetes Metrics Server for the specified Kubernetes Nodes or all nodes in the cluster", + "inputSchema": { + "type": "object", + "properties": { + "name": { + "description": "Name of the Node to get the resource consumption from (Optional, all Nodes if not provided)", + "type": "string" + }, + "label_selector": { + "description": "Kubernetes label selector (e.g. 'node-role.kubernetes.io/worker=') to filter nodes by label (Optional, only applicable when name is not provided)", + "pattern": "([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]", + "type": "string" + } + } + }, + "name": "nodes_top" + }, { "annotations": { "title": "Pods: Delete", - "readOnlyHint": false, "destructiveHint": true, "idempotentHint": true, "openWorldHint": true @@ -120,9 +140,7 @@ { "annotations": { "title": "Pods: Exec", - "readOnlyHint": false, "destructiveHint": true, - "idempotentHint": false, "openWorldHint": true }, "description": "Execute a command in a Kubernetes Pod in the current or provided namespace with the provided name and command", @@ -161,7 +179,6 @@ "title": "Pods: Get", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "Get a Kubernetes Pod in the current or provided namespace with the provided name", @@ -188,7 +205,6 @@ "title": "Pods: List", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "List all the Kubernetes pods in the current cluster from all namespaces", @@ -209,7 +225,6 @@ "title": "Pods: List in Namespace", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "List all the Kubernetes pods in the specified namespace in the current cluster", @@ -237,7 +252,6 @@ "title": "Pods: Log", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "Get the logs of a Kubernetes Pod in the current or provided namespace with the provided name", @@ -276,9 +290,7 @@ { "annotations": { "title": "Pods: Run", - "readOnlyHint": false, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "Run a Kubernetes Pod in the current or provided namespace with the provided container image and optional name", @@ -345,7 +357,6 @@ { "annotations": { "title": "Resources: Create or Update", - "readOnlyHint": false, "destructiveHint": true, "idempotentHint": true, "openWorldHint": true @@ -368,7 +379,6 @@ { "annotations": { "title": "Resources: Delete", - "readOnlyHint": false, "destructiveHint": true, "idempotentHint": true, "openWorldHint": true @@ -407,7 +417,6 @@ "title": "Resources: Get", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "Get a Kubernetes resource in the current cluster by providing its apiVersion, kind, optionally the namespace, and its name\n(common apiVersion and kind include: v1 Pod, v1 Service, v1 Node, apps/v1 Deployment, networking.k8s.io/v1 Ingress)", @@ -444,7 +453,6 @@ "title": "Resources: List", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "List Kubernetes resources and objects in the current cluster by providing their apiVersion and kind and optionally the namespace and label selector\n(common apiVersion and kind include: v1 Pod, v1 Service, v1 Node, apps/v1 Deployment, networking.k8s.io/v1 Ingress)", diff --git a/pkg/mcp/testdata/toolsets-full-tools-multicluster-enum.json b/pkg/mcp/testdata/toolsets-full-tools-multicluster-enum.json index 1551b4c2..7831c054 100644 --- a/pkg/mcp/testdata/toolsets-full-tools-multicluster-enum.json +++ b/pkg/mcp/testdata/toolsets-full-tools-multicluster-enum.json @@ -18,7 +18,6 @@ "title": "Configuration: View", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "Get the current Kubernetes configuration content as a kubeconfig YAML", @@ -38,7 +37,6 @@ "title": "Events: List", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "List all the Kubernetes events in the current cluster from all namespaces", @@ -64,9 +62,7 @@ { "annotations": { "title": "Helm: Install", - "readOnlyHint": false, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "Install a Helm chart in the current or provided namespace", @@ -109,7 +105,6 @@ "title": "Helm: List", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "List all the Helm releases in the current or provided namespace (or in all namespaces if specified)", @@ -139,7 +134,6 @@ { "annotations": { "title": "Helm: Uninstall", - "readOnlyHint": false, "destructiveHint": true, "idempotentHint": true, "openWorldHint": true @@ -176,7 +170,6 @@ "title": "Namespaces: List", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "List all the Kubernetes namespaces in the current cluster", @@ -200,7 +193,6 @@ "title": "Node: Log", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "Get logs from a Kubernetes node (kubelet, kube-proxy, or other system logs). This accesses node logs through the Kubernetes API proxy to the kubelet", @@ -242,7 +234,6 @@ "title": "Node: Stats Summary", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "Get detailed resource usage statistics from a Kubernetes node via the kubelet's Summary API. Provides comprehensive metrics including CPU, memory, filesystem, and network usage at the node, pod, and container levels. On systems with cgroup v2 and kernel 4.20+, also includes PSI (Pressure Stall Information) metrics that show resource pressure for CPU, memory, and I/O. See https://kubernetes.io/docs/reference/instrumentation/understand-psi-metrics/ for details on PSI metrics", @@ -268,10 +259,42 @@ }, "name": "nodes_stats_summary" }, + { + "annotations": { + "title": "Nodes: Top", + "readOnlyHint": true, + "destructiveHint": false, + "idempotentHint": true, + "openWorldHint": true + }, + "description": "List the resource consumption (CPU and memory) as recorded by the Kubernetes Metrics Server for the specified Kubernetes Nodes or all nodes in the cluster", + "inputSchema": { + "type": "object", + "properties": { + "context": { + "description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set", + "enum": [ + "extra-cluster", + "fake-context" + ], + "type": "string" + }, + "name": { + "description": "Name of the Node to get the resource consumption from (Optional, all Nodes if not provided)", + "type": "string" + }, + "label_selector": { + "description": "Kubernetes label selector (e.g. 'node-role.kubernetes.io/worker=') to filter nodes by label (Optional, only applicable when name is not provided)", + "pattern": "([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]", + "type": "string" + } + } + }, + "name": "nodes_top" + }, { "annotations": { "title": "Pods: Delete", - "readOnlyHint": false, "destructiveHint": true, "idempotentHint": true, "openWorldHint": true @@ -306,9 +329,7 @@ { "annotations": { "title": "Pods: Exec", - "readOnlyHint": false, "destructiveHint": true, - "idempotentHint": false, "openWorldHint": true }, "description": "Execute a command in a Kubernetes Pod in the current or provided namespace with the provided name and command", @@ -355,7 +376,6 @@ "title": "Pods: Get", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "Get a Kubernetes Pod in the current or provided namespace with the provided name", @@ -390,7 +410,6 @@ "title": "Pods: List", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "List all the Kubernetes pods in the current cluster from all namespaces", @@ -419,7 +438,6 @@ "title": "Pods: List in Namespace", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "List all the Kubernetes pods in the specified namespace in the current cluster", @@ -455,7 +473,6 @@ "title": "Pods: Log", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "Get the logs of a Kubernetes Pod in the current or provided namespace with the provided name", @@ -502,9 +519,7 @@ { "annotations": { "title": "Pods: Run", - "readOnlyHint": false, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "Run a Kubernetes Pod in the current or provided namespace with the provided container image and optional name", @@ -587,7 +602,6 @@ { "annotations": { "title": "Resources: Create or Update", - "readOnlyHint": false, "destructiveHint": true, "idempotentHint": true, "openWorldHint": true @@ -618,7 +632,6 @@ { "annotations": { "title": "Resources: Delete", - "readOnlyHint": false, "destructiveHint": true, "idempotentHint": true, "openWorldHint": true @@ -665,7 +678,6 @@ "title": "Resources: Get", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "Get a Kubernetes resource in the current cluster by providing its apiVersion, kind, optionally the namespace, and its name\n(common apiVersion and kind include: v1 Pod, v1 Service, v1 Node, apps/v1 Deployment, networking.k8s.io/v1 Ingress)", @@ -710,7 +722,6 @@ "title": "Resources: List", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "List Kubernetes resources and objects in the current cluster by providing their apiVersion and kind and optionally the namespace and label selector\n(common apiVersion and kind include: v1 Pod, v1 Service, v1 Node, apps/v1 Deployment, networking.k8s.io/v1 Ingress)", diff --git a/pkg/mcp/testdata/toolsets-full-tools-multicluster.json b/pkg/mcp/testdata/toolsets-full-tools-multicluster.json index 6e85e401..b95f179c 100644 --- a/pkg/mcp/testdata/toolsets-full-tools-multicluster.json +++ b/pkg/mcp/testdata/toolsets-full-tools-multicluster.json @@ -18,7 +18,6 @@ "title": "Configuration: View", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "Get the current Kubernetes configuration content as a kubeconfig YAML", @@ -38,7 +37,6 @@ "title": "Events: List", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "List all the Kubernetes events in the current cluster from all namespaces", @@ -60,9 +58,7 @@ { "annotations": { "title": "Helm: Install", - "readOnlyHint": false, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "Install a Helm chart in the current or provided namespace", @@ -101,7 +97,6 @@ "title": "Helm: List", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "List all the Helm releases in the current or provided namespace (or in all namespaces if specified)", @@ -127,7 +122,6 @@ { "annotations": { "title": "Helm: Uninstall", - "readOnlyHint": false, "destructiveHint": true, "idempotentHint": true, "openWorldHint": true @@ -160,7 +154,6 @@ "title": "Namespaces: List", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "List all the Kubernetes namespaces in the current cluster", @@ -180,7 +173,6 @@ "title": "Node: Log", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "Get logs from a Kubernetes node (kubelet, kube-proxy, or other system logs). This accesses node logs through the Kubernetes API proxy to the kubelet", @@ -218,7 +210,6 @@ "title": "Node: Stats Summary", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "Get detailed resource usage statistics from a Kubernetes node via the kubelet's Summary API. Provides comprehensive metrics including CPU, memory, filesystem, and network usage at the node, pod, and container levels. On systems with cgroup v2 and kernel 4.20+, also includes PSI (Pressure Stall Information) metrics that show resource pressure for CPU, memory, and I/O. See https://kubernetes.io/docs/reference/instrumentation/understand-psi-metrics/ for details on PSI metrics", @@ -240,10 +231,38 @@ }, "name": "nodes_stats_summary" }, + { + "annotations": { + "title": "Nodes: Top", + "readOnlyHint": true, + "destructiveHint": false, + "idempotentHint": true, + "openWorldHint": true + }, + "description": "List the resource consumption (CPU and memory) as recorded by the Kubernetes Metrics Server for the specified Kubernetes Nodes or all nodes in the cluster", + "inputSchema": { + "type": "object", + "properties": { + "context": { + "description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set", + "type": "string" + }, + "name": { + "description": "Name of the Node to get the resource consumption from (Optional, all Nodes if not provided)", + "type": "string" + }, + "label_selector": { + "description": "Kubernetes label selector (e.g. 'node-role.kubernetes.io/worker=') to filter nodes by label (Optional, only applicable when name is not provided)", + "pattern": "([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]", + "type": "string" + } + } + }, + "name": "nodes_top" + }, { "annotations": { "title": "Pods: Delete", - "readOnlyHint": false, "destructiveHint": true, "idempotentHint": true, "openWorldHint": true @@ -274,9 +293,7 @@ { "annotations": { "title": "Pods: Exec", - "readOnlyHint": false, "destructiveHint": true, - "idempotentHint": false, "openWorldHint": true }, "description": "Execute a command in a Kubernetes Pod in the current or provided namespace with the provided name and command", @@ -319,7 +336,6 @@ "title": "Pods: Get", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "Get a Kubernetes Pod in the current or provided namespace with the provided name", @@ -350,7 +366,6 @@ "title": "Pods: List", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "List all the Kubernetes pods in the current cluster from all namespaces", @@ -375,7 +390,6 @@ "title": "Pods: List in Namespace", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "List all the Kubernetes pods in the specified namespace in the current cluster", @@ -407,7 +421,6 @@ "title": "Pods: Log", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "Get the logs of a Kubernetes Pod in the current or provided namespace with the provided name", @@ -450,9 +463,7 @@ { "annotations": { "title": "Pods: Run", - "readOnlyHint": false, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "Run a Kubernetes Pod in the current or provided namespace with the provided container image and optional name", @@ -527,7 +538,6 @@ { "annotations": { "title": "Resources: Create or Update", - "readOnlyHint": false, "destructiveHint": true, "idempotentHint": true, "openWorldHint": true @@ -554,7 +564,6 @@ { "annotations": { "title": "Resources: Delete", - "readOnlyHint": false, "destructiveHint": true, "idempotentHint": true, "openWorldHint": true @@ -597,7 +606,6 @@ "title": "Resources: Get", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "Get a Kubernetes resource in the current cluster by providing its apiVersion, kind, optionally the namespace, and its name\n(common apiVersion and kind include: v1 Pod, v1 Service, v1 Node, apps/v1 Deployment, networking.k8s.io/v1 Ingress)", @@ -638,7 +646,6 @@ "title": "Resources: List", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "List Kubernetes resources and objects in the current cluster by providing their apiVersion and kind and optionally the namespace and label selector\n(common apiVersion and kind include: v1 Pod, v1 Service, v1 Node, apps/v1 Deployment, networking.k8s.io/v1 Ingress)", diff --git a/pkg/mcp/testdata/toolsets-full-tools-openshift.json b/pkg/mcp/testdata/toolsets-full-tools-openshift.json index fb24138e..e4488b0a 100644 --- a/pkg/mcp/testdata/toolsets-full-tools-openshift.json +++ b/pkg/mcp/testdata/toolsets-full-tools-openshift.json @@ -4,7 +4,6 @@ "title": "Configuration: View", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "Get the current Kubernetes configuration content as a kubeconfig YAML", @@ -24,7 +23,6 @@ "title": "Events: List", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "List all the Kubernetes events in the current cluster from all namespaces", @@ -42,9 +40,7 @@ { "annotations": { "title": "Helm: Install", - "readOnlyHint": false, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "Install a Helm chart in the current or provided namespace", @@ -79,7 +75,6 @@ "title": "Helm: List", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "List all the Helm releases in the current or provided namespace (or in all namespaces if specified)", @@ -101,7 +96,6 @@ { "annotations": { "title": "Helm: Uninstall", - "readOnlyHint": false, "destructiveHint": true, "idempotentHint": true, "openWorldHint": true @@ -130,7 +124,6 @@ "title": "Namespaces: List", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "List all the Kubernetes namespaces in the current cluster", @@ -144,7 +137,6 @@ "title": "Node: Log", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "Get logs from a Kubernetes node (kubelet, kube-proxy, or other system logs). This accesses node logs through the Kubernetes API proxy to the kubelet", @@ -178,7 +170,6 @@ "title": "Node: Stats Summary", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "Get detailed resource usage statistics from a Kubernetes node via the kubelet's Summary API. Provides comprehensive metrics including CPU, memory, filesystem, and network usage at the node, pod, and container levels. On systems with cgroup v2 and kernel 4.20+, also includes PSI (Pressure Stall Information) metrics that show resource pressure for CPU, memory, and I/O. See https://kubernetes.io/docs/reference/instrumentation/understand-psi-metrics/ for details on PSI metrics", @@ -196,10 +187,34 @@ }, "name": "nodes_stats_summary" }, + { + "annotations": { + "title": "Nodes: Top", + "readOnlyHint": true, + "destructiveHint": false, + "idempotentHint": true, + "openWorldHint": true + }, + "description": "List the resource consumption (CPU and memory) as recorded by the Kubernetes Metrics Server for the specified Kubernetes Nodes or all nodes in the cluster", + "inputSchema": { + "type": "object", + "properties": { + "name": { + "description": "Name of the Node to get the resource consumption from (Optional, all Nodes if not provided)", + "type": "string" + }, + "label_selector": { + "description": "Kubernetes label selector (e.g. 'node-role.kubernetes.io/worker=') to filter nodes by label (Optional, only applicable when name is not provided)", + "pattern": "([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]", + "type": "string" + } + } + }, + "name": "nodes_top" + }, { "annotations": { "title": "Pods: Delete", - "readOnlyHint": false, "destructiveHint": true, "idempotentHint": true, "openWorldHint": true @@ -226,9 +241,7 @@ { "annotations": { "title": "Pods: Exec", - "readOnlyHint": false, "destructiveHint": true, - "idempotentHint": false, "openWorldHint": true }, "description": "Execute a command in a Kubernetes Pod in the current or provided namespace with the provided name and command", @@ -267,7 +280,6 @@ "title": "Pods: Get", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "Get a Kubernetes Pod in the current or provided namespace with the provided name", @@ -294,7 +306,6 @@ "title": "Pods: List", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "List all the Kubernetes pods in the current cluster from all namespaces", @@ -315,7 +326,6 @@ "title": "Pods: List in Namespace", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "List all the Kubernetes pods in the specified namespace in the current cluster", @@ -343,7 +353,6 @@ "title": "Pods: Log", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "Get the logs of a Kubernetes Pod in the current or provided namespace with the provided name", @@ -382,9 +391,7 @@ { "annotations": { "title": "Pods: Run", - "readOnlyHint": false, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "Run a Kubernetes Pod in the current or provided namespace with the provided container image and optional name", @@ -453,7 +460,6 @@ "title": "Projects: List", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "List all the OpenShift projects in the current cluster", @@ -465,7 +471,6 @@ { "annotations": { "title": "Resources: Create or Update", - "readOnlyHint": false, "destructiveHint": true, "idempotentHint": true, "openWorldHint": true @@ -488,7 +493,6 @@ { "annotations": { "title": "Resources: Delete", - "readOnlyHint": false, "destructiveHint": true, "idempotentHint": true, "openWorldHint": true @@ -527,7 +531,6 @@ "title": "Resources: Get", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "Get a Kubernetes resource in the current cluster by providing its apiVersion, kind, optionally the namespace, and its name\n(common apiVersion and kind include: v1 Pod, v1 Service, v1 Node, apps/v1 Deployment, networking.k8s.io/v1 Ingress, route.openshift.io/v1 Route)", @@ -564,7 +567,6 @@ "title": "Resources: List", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "List Kubernetes resources and objects in the current cluster by providing their apiVersion and kind and optionally the namespace and label selector\n(common apiVersion and kind include: v1 Pod, v1 Service, v1 Node, apps/v1 Deployment, networking.k8s.io/v1 Ingress, route.openshift.io/v1 Route)", diff --git a/pkg/mcp/testdata/toolsets-full-tools.json b/pkg/mcp/testdata/toolsets-full-tools.json index 5a4b5112..ca270027 100644 --- a/pkg/mcp/testdata/toolsets-full-tools.json +++ b/pkg/mcp/testdata/toolsets-full-tools.json @@ -4,7 +4,6 @@ "title": "Configuration: View", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "Get the current Kubernetes configuration content as a kubeconfig YAML", @@ -24,7 +23,6 @@ "title": "Events: List", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "List all the Kubernetes events in the current cluster from all namespaces", @@ -42,9 +40,7 @@ { "annotations": { "title": "Helm: Install", - "readOnlyHint": false, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "Install a Helm chart in the current or provided namespace", @@ -79,7 +75,6 @@ "title": "Helm: List", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "List all the Helm releases in the current or provided namespace (or in all namespaces if specified)", @@ -101,7 +96,6 @@ { "annotations": { "title": "Helm: Uninstall", - "readOnlyHint": false, "destructiveHint": true, "idempotentHint": true, "openWorldHint": true @@ -130,7 +124,6 @@ "title": "Namespaces: List", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "List all the Kubernetes namespaces in the current cluster", @@ -144,7 +137,6 @@ "title": "Node: Log", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "Get logs from a Kubernetes node (kubelet, kube-proxy, or other system logs). This accesses node logs through the Kubernetes API proxy to the kubelet", @@ -178,7 +170,6 @@ "title": "Node: Stats Summary", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "Get detailed resource usage statistics from a Kubernetes node via the kubelet's Summary API. Provides comprehensive metrics including CPU, memory, filesystem, and network usage at the node, pod, and container levels. On systems with cgroup v2 and kernel 4.20+, also includes PSI (Pressure Stall Information) metrics that show resource pressure for CPU, memory, and I/O. See https://kubernetes.io/docs/reference/instrumentation/understand-psi-metrics/ for details on PSI metrics", @@ -196,10 +187,34 @@ }, "name": "nodes_stats_summary" }, + { + "annotations": { + "title": "Nodes: Top", + "readOnlyHint": true, + "destructiveHint": false, + "idempotentHint": true, + "openWorldHint": true + }, + "description": "List the resource consumption (CPU and memory) as recorded by the Kubernetes Metrics Server for the specified Kubernetes Nodes or all nodes in the cluster", + "inputSchema": { + "type": "object", + "properties": { + "name": { + "description": "Name of the Node to get the resource consumption from (Optional, all Nodes if not provided)", + "type": "string" + }, + "label_selector": { + "description": "Kubernetes label selector (e.g. 'node-role.kubernetes.io/worker=') to filter nodes by label (Optional, only applicable when name is not provided)", + "pattern": "([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]", + "type": "string" + } + } + }, + "name": "nodes_top" + }, { "annotations": { "title": "Pods: Delete", - "readOnlyHint": false, "destructiveHint": true, "idempotentHint": true, "openWorldHint": true @@ -226,9 +241,7 @@ { "annotations": { "title": "Pods: Exec", - "readOnlyHint": false, "destructiveHint": true, - "idempotentHint": false, "openWorldHint": true }, "description": "Execute a command in a Kubernetes Pod in the current or provided namespace with the provided name and command", @@ -267,7 +280,6 @@ "title": "Pods: Get", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "Get a Kubernetes Pod in the current or provided namespace with the provided name", @@ -294,7 +306,6 @@ "title": "Pods: List", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "List all the Kubernetes pods in the current cluster from all namespaces", @@ -315,7 +326,6 @@ "title": "Pods: List in Namespace", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "List all the Kubernetes pods in the specified namespace in the current cluster", @@ -343,7 +353,6 @@ "title": "Pods: Log", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "Get the logs of a Kubernetes Pod in the current or provided namespace with the provided name", @@ -382,9 +391,7 @@ { "annotations": { "title": "Pods: Run", - "readOnlyHint": false, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "Run a Kubernetes Pod in the current or provided namespace with the provided container image and optional name", @@ -451,7 +458,6 @@ { "annotations": { "title": "Resources: Create or Update", - "readOnlyHint": false, "destructiveHint": true, "idempotentHint": true, "openWorldHint": true @@ -474,7 +480,6 @@ { "annotations": { "title": "Resources: Delete", - "readOnlyHint": false, "destructiveHint": true, "idempotentHint": true, "openWorldHint": true @@ -513,7 +518,6 @@ "title": "Resources: Get", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "Get a Kubernetes resource in the current cluster by providing its apiVersion, kind, optionally the namespace, and its name\n(common apiVersion and kind include: v1 Pod, v1 Service, v1 Node, apps/v1 Deployment, networking.k8s.io/v1 Ingress)", @@ -550,7 +554,6 @@ "title": "Resources: List", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "List Kubernetes resources and objects in the current cluster by providing their apiVersion and kind and optionally the namespace and label selector\n(common apiVersion and kind include: v1 Pod, v1 Service, v1 Node, apps/v1 Deployment, networking.k8s.io/v1 Ingress)", diff --git a/pkg/mcp/testdata/toolsets-helm-tools.json b/pkg/mcp/testdata/toolsets-helm-tools.json index c57dfc27..6afd3f33 100644 --- a/pkg/mcp/testdata/toolsets-helm-tools.json +++ b/pkg/mcp/testdata/toolsets-helm-tools.json @@ -2,9 +2,7 @@ { "annotations": { "title": "Helm: Install", - "readOnlyHint": false, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "Install a Helm chart in the current or provided namespace", @@ -39,7 +37,6 @@ "title": "Helm: List", "readOnlyHint": true, "destructiveHint": false, - "idempotentHint": false, "openWorldHint": true }, "description": "List all the Helm releases in the current or provided namespace (or in all namespaces if specified)", @@ -61,7 +58,6 @@ { "annotations": { "title": "Helm: Uninstall", - "readOnlyHint": false, "destructiveHint": true, "idempotentHint": true, "openWorldHint": true diff --git a/pkg/toolsets/config/configuration.go b/pkg/toolsets/config/configuration.go index 6b6b45d3..ab973da1 100644 --- a/pkg/toolsets/config/configuration.go +++ b/pkg/toolsets/config/configuration.go @@ -51,7 +51,6 @@ func initConfiguration() []api.ServerTool { Title: "Configuration: View", ReadOnlyHint: ptr.To(true), DestructiveHint: ptr.To(false), - IdempotentHint: ptr.To(false), OpenWorldHint: ptr.To(true), }, }, diff --git a/pkg/toolsets/core/events.go b/pkg/toolsets/core/events.go index f10ff576..43ae1cc1 100644 --- a/pkg/toolsets/core/events.go +++ b/pkg/toolsets/core/events.go @@ -28,7 +28,6 @@ func initEvents() []api.ServerTool { Title: "Events: List", ReadOnlyHint: ptr.To(true), DestructiveHint: ptr.To(false), - IdempotentHint: ptr.To(false), OpenWorldHint: ptr.To(true), }, }, Handler: eventsList}, diff --git a/pkg/toolsets/core/namespaces.go b/pkg/toolsets/core/namespaces.go index 71995d8c..2f2ee8fc 100644 --- a/pkg/toolsets/core/namespaces.go +++ b/pkg/toolsets/core/namespaces.go @@ -24,7 +24,6 @@ func initNamespaces(o internalk8s.Openshift) []api.ServerTool { Title: "Namespaces: List", ReadOnlyHint: ptr.To(true), DestructiveHint: ptr.To(false), - IdempotentHint: ptr.To(false), OpenWorldHint: ptr.To(true), }, }, Handler: namespacesList, @@ -41,7 +40,6 @@ func initNamespaces(o internalk8s.Openshift) []api.ServerTool { Title: "Projects: List", ReadOnlyHint: ptr.To(true), DestructiveHint: ptr.To(false), - IdempotentHint: ptr.To(false), OpenWorldHint: ptr.To(true), }, }, Handler: projectsList, diff --git a/pkg/toolsets/core/nodes.go b/pkg/toolsets/core/nodes.go index fc06a2d9..e42a8a98 100644 --- a/pkg/toolsets/core/nodes.go +++ b/pkg/toolsets/core/nodes.go @@ -1,13 +1,19 @@ package core import ( + "bytes" "errors" "fmt" "github.com/google/jsonschema-go/jsonschema" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/kubectl/pkg/metricsutil" "k8s.io/utils/ptr" "github.com/containers/kubernetes-mcp-server/pkg/api" + "github.com/containers/kubernetes-mcp-server/pkg/kubernetes" ) func initNodes() []api.ServerTool { @@ -39,7 +45,6 @@ func initNodes() []api.ServerTool { Title: "Node: Log", ReadOnlyHint: ptr.To(true), DestructiveHint: ptr.To(false), - IdempotentHint: ptr.To(false), OpenWorldHint: ptr.To(true), }, }, Handler: nodesLog}, @@ -60,10 +65,34 @@ func initNodes() []api.ServerTool { Title: "Node: Stats Summary", ReadOnlyHint: ptr.To(true), DestructiveHint: ptr.To(false), - IdempotentHint: ptr.To(false), OpenWorldHint: ptr.To(true), }, }, Handler: nodesStatsSummary}, + {Tool: api.Tool{ + Name: "nodes_top", + Description: "List the resource consumption (CPU and memory) as recorded by the Kubernetes Metrics Server for the specified Kubernetes Nodes or all nodes in the cluster", + InputSchema: &jsonschema.Schema{ + Type: "object", + Properties: map[string]*jsonschema.Schema{ + "name": { + Type: "string", + Description: "Name of the Node to get the resource consumption from (Optional, all Nodes if not provided)", + }, + "label_selector": { + Type: "string", + Description: "Kubernetes label selector (e.g. 'node-role.kubernetes.io/worker=') to filter nodes by label (Optional, only applicable when name is not provided)", + Pattern: "([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]", + }, + }, + }, + Annotations: api.ToolAnnotations{ + Title: "Nodes: Top", + ReadOnlyHint: ptr.To(true), + DestructiveHint: ptr.To(false), + IdempotentHint: ptr.To(true), + OpenWorldHint: ptr.To(true), + }, + }, Handler: nodesTop}, } } @@ -110,3 +139,53 @@ func nodesStatsSummary(params api.ToolHandlerParams) (*api.ToolCallResult, error } return api.NewToolCallResult(ret, nil), nil } + +func nodesTop(params api.ToolHandlerParams) (*api.ToolCallResult, error) { + nodesTopOptions := kubernetes.NodesTopOptions{} + if v, ok := params.GetArguments()["name"].(string); ok { + nodesTopOptions.Name = v + } + if v, ok := params.GetArguments()["label_selector"].(string); ok { + nodesTopOptions.LabelSelector = v + } + + nodeMetrics, err := params.NodesTop(params, nodesTopOptions) + if err != nil { + return api.NewToolCallResult("", fmt.Errorf("failed to get nodes top: %v", err)), nil + } + + // Get the list of nodes to extract their allocatable resources + nodes, err := params.AccessControlClientset().Nodes() + if err != nil { + return api.NewToolCallResult("", fmt.Errorf("failed to get nodes client: %v", err)), nil + } + + nodeList, err := nodes.List(params, metav1.ListOptions{ + LabelSelector: nodesTopOptions.LabelSelector, + }) + if err != nil { + return api.NewToolCallResult("", fmt.Errorf("failed to list nodes: %v", err)), nil + } + + // Build availableResources map + availableResources := make(map[string]v1.ResourceList) + for _, n := range nodeList.Items { + availableResources[n.Name] = n.Status.Allocatable + + // Handle swap if available + if n.Status.NodeInfo.Swap != nil && n.Status.NodeInfo.Swap.Capacity != nil { + swapCapacity := *n.Status.NodeInfo.Swap.Capacity + availableResources[n.Name]["swap"] = *resource.NewQuantity(swapCapacity, resource.BinarySI) + } + } + + // Print the metrics + buf := new(bytes.Buffer) + printer := metricsutil.NewTopCmdPrinter(buf, true) + err = printer.PrintNodeMetrics(nodeMetrics.Items, availableResources, false, "") + if err != nil { + return api.NewToolCallResult("", fmt.Errorf("failed to print node metrics: %v", err)), nil + } + + return api.NewToolCallResult(buf.String(), nil), nil +} diff --git a/pkg/toolsets/core/pods.go b/pkg/toolsets/core/pods.go index 8744a974..78781332 100644 --- a/pkg/toolsets/core/pods.go +++ b/pkg/toolsets/core/pods.go @@ -33,7 +33,6 @@ func initPods() []api.ServerTool { Title: "Pods: List", ReadOnlyHint: ptr.To(true), DestructiveHint: ptr.To(false), - IdempotentHint: ptr.To(false), OpenWorldHint: ptr.To(true), }, }, Handler: podsListInAllNamespaces}, @@ -59,7 +58,6 @@ func initPods() []api.ServerTool { Title: "Pods: List in Namespace", ReadOnlyHint: ptr.To(true), DestructiveHint: ptr.To(false), - IdempotentHint: ptr.To(false), OpenWorldHint: ptr.To(true), }, }, Handler: podsListInNamespace}, @@ -84,7 +82,6 @@ func initPods() []api.ServerTool { Title: "Pods: Get", ReadOnlyHint: ptr.To(true), DestructiveHint: ptr.To(false), - IdempotentHint: ptr.To(false), OpenWorldHint: ptr.To(true), }, }, Handler: podsGet}, @@ -107,7 +104,6 @@ func initPods() []api.ServerTool { }, Annotations: api.ToolAnnotations{ Title: "Pods: Delete", - ReadOnlyHint: ptr.To(false), DestructiveHint: ptr.To(true), IdempotentHint: ptr.To(true), OpenWorldHint: ptr.To(true), @@ -177,9 +173,7 @@ func initPods() []api.ServerTool { }, Annotations: api.ToolAnnotations{ Title: "Pods: Exec", - ReadOnlyHint: ptr.To(false), DestructiveHint: ptr.To(true), // Depending on the Pod's entrypoint, executing certain commands may kill the Pod - IdempotentHint: ptr.To(false), OpenWorldHint: ptr.To(true), }, }, Handler: podsExec}, @@ -218,7 +212,6 @@ func initPods() []api.ServerTool { Title: "Pods: Log", ReadOnlyHint: ptr.To(true), DestructiveHint: ptr.To(false), - IdempotentHint: ptr.To(false), OpenWorldHint: ptr.To(true), }, }, Handler: podsLog}, @@ -249,9 +242,7 @@ func initPods() []api.ServerTool { }, Annotations: api.ToolAnnotations{ Title: "Pods: Run", - ReadOnlyHint: ptr.To(false), DestructiveHint: ptr.To(false), - IdempotentHint: ptr.To(false), OpenWorldHint: ptr.To(true), }, }, Handler: podsRun}, diff --git a/pkg/toolsets/core/resources.go b/pkg/toolsets/core/resources.go index a3536f56..52a613b3 100644 --- a/pkg/toolsets/core/resources.go +++ b/pkg/toolsets/core/resources.go @@ -51,7 +51,6 @@ func initResources(o internalk8s.Openshift) []api.ServerTool { Title: "Resources: List", ReadOnlyHint: ptr.To(true), DestructiveHint: ptr.To(false), - IdempotentHint: ptr.To(false), OpenWorldHint: ptr.To(true), }, }, Handler: resourcesList}, @@ -84,7 +83,6 @@ func initResources(o internalk8s.Openshift) []api.ServerTool { Title: "Resources: Get", ReadOnlyHint: ptr.To(true), DestructiveHint: ptr.To(false), - IdempotentHint: ptr.To(false), OpenWorldHint: ptr.To(true), }, }, Handler: resourcesGet}, @@ -103,7 +101,6 @@ func initResources(o internalk8s.Openshift) []api.ServerTool { }, Annotations: api.ToolAnnotations{ Title: "Resources: Create or Update", - ReadOnlyHint: ptr.To(false), DestructiveHint: ptr.To(true), IdempotentHint: ptr.To(true), OpenWorldHint: ptr.To(true), @@ -136,7 +133,6 @@ func initResources(o internalk8s.Openshift) []api.ServerTool { }, Annotations: api.ToolAnnotations{ Title: "Resources: Delete", - ReadOnlyHint: ptr.To(false), DestructiveHint: ptr.To(true), IdempotentHint: ptr.To(true), OpenWorldHint: ptr.To(true), diff --git a/pkg/toolsets/helm/helm.go b/pkg/toolsets/helm/helm.go index 0352cf60..646941f1 100644 --- a/pkg/toolsets/helm/helm.go +++ b/pkg/toolsets/helm/helm.go @@ -39,9 +39,8 @@ func initHelm() []api.ServerTool { }, Annotations: api.ToolAnnotations{ Title: "Helm: Install", - ReadOnlyHint: ptr.To(false), DestructiveHint: ptr.To(false), - IdempotentHint: ptr.To(false), // TODO: consider replacing implementation with equivalent to: helm upgrade --install + IdempotentHint: nil, // TODO: consider replacing implementation with equivalent to: helm upgrade --install OpenWorldHint: ptr.To(true), }, }, Handler: helmInstall}, @@ -65,7 +64,6 @@ func initHelm() []api.ServerTool { Title: "Helm: List", ReadOnlyHint: ptr.To(true), DestructiveHint: ptr.To(false), - IdempotentHint: ptr.To(false), OpenWorldHint: ptr.To(true), }, }, Handler: helmList}, @@ -88,7 +86,6 @@ func initHelm() []api.ServerTool { }, Annotations: api.ToolAnnotations{ Title: "Helm: Uninstall", - ReadOnlyHint: ptr.To(false), DestructiveHint: ptr.To(true), IdempotentHint: ptr.To(true), OpenWorldHint: ptr.To(true), diff --git a/vendor/github.com/containerd/containerd/archive/compression/compression.go b/vendor/github.com/containerd/containerd/archive/compression/compression.go index 23ddfab1..3c152f28 100644 --- a/vendor/github.com/containerd/containerd/archive/compression/compression.go +++ b/vendor/github.com/containerd/containerd/archive/compression/compression.go @@ -45,6 +45,8 @@ const ( Gzip // Zstd is zstd compression algorithm. Zstd + // Unknown is used when a plugin handles the algorithm. + Unknown ) const disablePigzEnv = "CONTAINERD_DISABLE_PIGZ" @@ -254,6 +256,8 @@ func (compression *Compression) Extension() string { return "gz" case Zstd: return "zst" + case Unknown: + return "unknown" } return "" } diff --git a/vendor/github.com/cyphar/filepath-securejoin/.golangci.yml b/vendor/github.com/cyphar/filepath-securejoin/.golangci.yml new file mode 100644 index 00000000..3e8dd99b --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/.golangci.yml @@ -0,0 +1,60 @@ +# SPDX-License-Identifier: MPL-2.0 + +# Copyright (C) 2025 Aleksa Sarai +# Copyright (C) 2025 SUSE LLC +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at https://mozilla.org/MPL/2.0/. + +version: "2" + +run: + build-tags: + - libpathrs + +linters: + enable: + - asasalint + - asciicheck + - containedctx + - contextcheck + - errcheck + - errorlint + - exhaustive + - forcetypeassert + - godot + - goprintffuncname + - govet + - importas + - ineffassign + - makezero + - misspell + - musttag + - nilerr + - nilnesserr + - nilnil + - noctx + - prealloc + - revive + - staticcheck + - testifylint + - unconvert + - unparam + - unused + - usetesting + settings: + govet: + enable: + - nilness + testifylint: + enable-all: true + +formatters: + enable: + - gofumpt + - goimports + settings: + goimports: + local-prefixes: + - github.com/cyphar/filepath-securejoin diff --git a/vendor/github.com/cyphar/filepath-securejoin/CHANGELOG.md b/vendor/github.com/cyphar/filepath-securejoin/CHANGELOG.md index ca0e3c62..734cf61e 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/CHANGELOG.md +++ b/vendor/github.com/cyphar/filepath-securejoin/CHANGELOG.md @@ -6,6 +6,208 @@ and this project adheres to [Semantic Versioning](http://semver.org/). ## [Unreleased] ## +## [0.6.0] - 2025-11-03 ## + +> By the Power of Greyskull! + +While quite small code-wise, this release marks a very key point in the +development of filepath-securejoin. + +filepath-securejoin was originally intended (back in 2017) to simply be a +single-purpose library that would take some common code used in container +runtimes (specifically, Docker's `FollowSymlinksInScope`) and make it more +general-purpose (with the eventual goals of it ending up in the Go stdlib). + +Of course, I quickly discovered that this problem was actually far more +complicated to solve when dealing with racing attackers, which lead to me +developing `openat2(2)` and [libpathrs][]. I had originally planned for +libpathrs to completely replace filepath-securejoin "once it was ready" but in +the interim we needed to fix several race attacks in runc as part of security +advisories. Obviously we couldn't require the usage of a pre-0.1 Rust library +in runc so it was necessary to port bits of libpathrs into filepath-securejoin. +(Ironically the first prototypes of libpathrs were originally written in Go and +then rewritten to Rust, so the code in filepath-securejoin is actually Go code +that was rewritten to Rust then re-rewritten to Go.) + +It then became clear that pure-Go libraries will likely not be willing to +require CGo for all of their builds, so it was necessary to accept that +filepath-securejoin will need to stay. As such, in v0.5.0 we provided more +pure-Go implementations of features from libpathrs but moved them into +`pathrs-lite` subpackage to clarify what purpose these helpers serve. + +This release finally closes the loop and makes it so that pathrs-lite can +transparently use libpathrs (via a `libpathrs` build-tag). This means that +upstream libraries can use the pure Go version if they prefer, but downstreams +(either downstream library users or even downstream distributions) are able to +migrate to libpathrs for all usages of pathrs-lite in an entire Go binary. + +I should make it clear that I do not plan to port the rest of libpathrs to Go, +as I do not wish to maintain two copies of the same codebase. pathrs-lite +already provides the core essentials necessary to operate on paths safely for +most modern systems. Users who want additional hardening or more ergonomic APIs +are free to use [`cyphar.com/go-pathrs`][go-pathrs] (libpathrs's Go bindings). + +[libpathrs]: https://github.com/cyphar/libpathrs +[go-pathrs]: https://cyphar.com/go-pathrs + +### Breaking ### +- The deprecated `MkdirAll`, `MkdirAllHandle`, `OpenInRoot`, `OpenatInRoot` and + `Reopen` wrappers have been removed. Please switch to using `pathrs-lite` + directly. + +### Added ### +- `pathrs-lite` now has support for using [libpathrs][libpathrs] as a backend. + This is opt-in and can be enabled at build time with the `libpathrs` build + tag. The intention is to allow for downstream libraries and other projects to + make use of the pure-Go `github.com/cyphar/filepath-securejoin/pathrs-lite` + package and distributors can then opt-in to using `libpathrs` for the entire + binary if they wish. + +## [0.5.1] - 2025-10-31 ## + +> Spooky scary skeletons send shivers down your spine! + +### Changed ### +- `openat2` can return `-EAGAIN` if it detects a possible attack in certain + scenarios (namely if there was a rename or mount while walking a path with a + `..` component). While this is necessary to avoid a denial-of-service in the + kernel, it does require retry loops in userspace. + + In previous versions, `pathrs-lite` would retry `openat2` 32 times before + returning an error, but we've received user reports that this limit can be + hit on systems with very heavy load. In some synthetic benchmarks (testing + the worst-case of an attacker doing renames in a tight loop on every core of + a 16-core machine) we managed to get a ~3% failure rate in runc. We have + improved this situation in two ways: + + * We have now increased this limit to 128, which should be good enough for + most use-cases without becoming a denial-of-service vector (the number of + syscalls called by the `O_PATH` resolver in a typical case is within the + same ballpark). The same benchmarks show a failure rate of ~0.12% which + (while not zero) is probably sufficient for most users. + + * In addition, we now return a `unix.EAGAIN` error that is bubbled up and can + be detected by callers. This means that callers with stricter requirements + to avoid spurious errors can choose to do their own infinite `EAGAIN` retry + loop (though we would strongly recommend users use time-based deadlines in + such retry loops to avoid potentially unbounded denials-of-service). + +## [0.5.0] - 2025-09-26 ## + +> Let the past die. Kill it if you have to. + +> **NOTE**: With this release, some parts of +> `github.com/cyphar/filepath-securejoin` are now licensed under the Mozilla +> Public License (version 2). Please see [COPYING.md][] as well as the the +> license header in each file for more details. + +[COPYING.md]: ./COPYING.md + +### Breaking ### +- The new API introduced in the [0.3.0][] release has been moved to a new + subpackage called `pathrs-lite`. This was primarily done to better indicate + the split between the new and old APIs, as well as indicate to users the + purpose of this subpackage (it is a less complete version of [libpathrs][]). + + We have added some wrappers to the top-level package to ease the transition, + but those are deprecated and will be removed in the next minor release of + filepath-securejoin. Users should update their import paths. + + This new subpackage has also been relicensed under the Mozilla Public License + (version 2), please see [COPYING.md][] for more details. + +### Added ### +- Most of the key bits the safe `procfs` API have now been exported and are + available in `github.com/cyphar/filepath-securejoin/pathrs-lite/procfs`. At + the moment this primarily consists of a new `procfs.Handle` API: + + * `OpenProcRoot` returns a new handle to `/proc`, endeavouring to make it + safe if possible (`subset=pid` to protect against mistaken write attacks + and leaks, as well as using `fsopen(2)` to avoid racing mount attacks). + + `OpenUnsafeProcRoot` returns a handle without attempting to create one + with `subset=pid`, which makes it more dangerous to leak. Most users + should use `OpenProcRoot` (even if you need to use `ProcRoot` as the base + of an operation, as filepath-securejoin will internally open a handle when + necessary). + + * The `(*procfs.Handle).Open*` family of methods lets you get a safe + `O_PATH` handle to subpaths within `/proc` for certain subpaths. + + For `OpenThreadSelf`, the returned `ProcThreadSelfCloser` needs to be + called after you completely finish using the handle (this is necessary + because Go is multi-threaded and `ProcThreadSelf` references + `/proc/thread-self` which may disappear if we do not + `runtime.LockOSThread` -- `ProcThreadSelfCloser` is currently equivalent + to `runtime.UnlockOSThread`). + + Note that you cannot open any `procfs` symlinks (most notably magic-links) + using this API. At the moment, filepath-securejoin does not support this + feature (but [libpathrs][] does). + + * `ProcSelfFdReadlink` lets you get the in-kernel path representation of a + file descriptor (think `readlink("/proc/self/fd/...")`), except that we + verify that there aren't any tricky overmounts that could fool the + process. + + Please be aware that the returned string is simply a snapshot at that + particular moment, and an attacker could move the file being pointed to. + In addition, complex namespace configurations could result in non-sensical + or confusing paths to be returned. The value received from this function + should only be used as secondary verification of some security property, + not as proof that a particular handle has a particular path. + + The procfs handle used internally by the API is the same as the rest of + `filepath-securejoin` (for privileged programs this is usually a private + in-process `procfs` instance created with `fsopen(2)`). + + As before, this is intended as a stop-gap before users migrate to + [libpathrs][], which provides a far more extensive safe `procfs` API and is + generally more robust. + +- Previously, the hardened procfs implementation (used internally within + `Reopen` and `Open(at)InRoot`) only protected against overmount attacks on + systems with `openat2(2)` (Linux 5.6) or systems with `fsopen(2)` or + `open_tree(2)` (Linux 5.2) and programs with privileges to use them (with + some caveats about locked mounts that probably affect very few users). For + other users, an attacker with the ability to create malicious mounts (on most + systems, a sysadmin) could trick you into operating on files you didn't + expect. This attack only really makes sense in the context of container + runtime implementations. + + This was considered a reasonable trade-off, as the long-term intention was to + get all users to just switch to [libpathrs][] if they wanted to use the safe + `procfs` API (which had more extensive protections, and is what these new + protections in `filepath-securejoin` are based on). However, as the API + is now being exported it seems unwise to advertise the API as "safe" if we do + not protect against known attacks. + + The procfs API is now more protected against attackers on systems lacking the + aforementioned protections. However, the most comprehensive of these + protections effectively rely on [`statx(STATX_MNT_ID)`][statx.2] (Linux 5.8). + On older kernel versions, there is no effective protection (there is some + minimal protection against non-`procfs` filesystem components but a + sufficiently clever attacker can work around those). In addition, + `STATX_MNT_ID` is vulnerable to mount ID reuse attacks by sufficiently + motivated and privileged attackers -- this problem is mitigated with + `STATX_MNT_ID_UNIQUE` (Linux 6.8) but that raises the minimum kernel version + for more protection. + + The fact that these protections are quite limited despite needing a fair bit + of extra code to handle was one of the primary reasons we did not initially + implement this in `filepath-securejoin` ([libpathrs][] supports all of this, + of course). + +### Fixed ### +- RHEL 8 kernels have backports of `fsopen(2)` but in some testing we've found + that it has very bad (and very difficult to debug) performance issues, and so + we will explicitly refuse to use `fsopen(2)` if the running kernel version is + pre-5.2 and will instead fallback to `open("/proc")`. + +[CVE-2024-21626]: https://github.com/opencontainers/runc/security/advisories/GHSA-xr7r-f8xq-vfvv +[libpathrs]: https://github.com/cyphar/libpathrs +[statx.2]: https://www.man7.org/linux/man-pages/man2/statx.2.html + ## [0.4.1] - 2025-01-28 ## ### Fixed ### @@ -173,7 +375,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/). safe to start migrating to as we have extensive tests ensuring they behave correctly and are safe against various races and other attacks. -[libpathrs]: https://github.com/openSUSE/libpathrs +[libpathrs]: https://github.com/cyphar/libpathrs [open.2]: https://www.man7.org/linux/man-pages/man2/open.2.html ## [0.2.5] - 2024-05-03 ## @@ -238,7 +440,10 @@ This is our first release of `github.com/cyphar/filepath-securejoin`, containing a full implementation with a coverage of 93.5% (the only missing cases are the error cases, which are hard to mocktest at the moment). -[Unreleased]: https://github.com/cyphar/filepath-securejoin/compare/v0.4.1...HEAD +[Unreleased]: https://github.com/cyphar/filepath-securejoin/compare/v0.6.0...HEAD +[0.6.0]: https://github.com/cyphar/filepath-securejoin/compare/v0.5.1...v0.6.0 +[0.5.1]: https://github.com/cyphar/filepath-securejoin/compare/v0.5.0...v0.5.1 +[0.5.0]: https://github.com/cyphar/filepath-securejoin/compare/v0.4.1...v0.5.0 [0.4.1]: https://github.com/cyphar/filepath-securejoin/compare/v0.4.0...v0.4.1 [0.4.0]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.6...v0.4.0 [0.3.6]: https://github.com/cyphar/filepath-securejoin/compare/v0.3.5...v0.3.6 diff --git a/vendor/github.com/cyphar/filepath-securejoin/COPYING.md b/vendor/github.com/cyphar/filepath-securejoin/COPYING.md new file mode 100644 index 00000000..520e822b --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/COPYING.md @@ -0,0 +1,447 @@ +## COPYING ## + +`SPDX-License-Identifier: BSD-3-Clause AND MPL-2.0` + +This project is made up of code licensed under different licenses. Which code +you use will have an impact on whether only one or both licenses apply to your +usage of this library. + +Note that **each file** in this project individually has a code comment at the +start describing the license of that particular file -- this is the most +accurate license information of this project; in case there is any conflict +between this document and the comment at the start of a file, the comment shall +take precedence. The only purpose of this document is to work around [a known +technical limitation of pkg.go.dev's license checking tool when dealing with +non-trivial project licenses][go75067]. + +[go75067]: https://go.dev/issue/75067 + +### `BSD-3-Clause` ### + +At time of writing, the following files and directories are licensed under the +BSD-3-Clause license: + + * `doc.go` + * `join*.go` + * `vfs.go` + * `internal/consts/*.go` + * `pathrs-lite/internal/gocompat/*.go` + * `pathrs-lite/internal/kernelversion/*.go` + +The text of the BSD-3-Clause license used by this project is the following (the +text is also available from the [`LICENSE.BSD`](./LICENSE.BSD) file): + +``` +Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved. +Copyright (C) 2017-2024 SUSE LLC. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +``` + +### `MPL-2.0` ### + +All other files (unless otherwise marked) are licensed under the Mozilla Public +License (version 2.0). + +The text of the Mozilla Public License (version 2.0) is the following (the text +is also available from the [`LICENSE.MPL-2.0`](./LICENSE.MPL-2.0) file): + +``` +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at https://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. +``` diff --git a/vendor/github.com/cyphar/filepath-securejoin/LICENSE b/vendor/github.com/cyphar/filepath-securejoin/LICENSE.BSD similarity index 100% rename from vendor/github.com/cyphar/filepath-securejoin/LICENSE rename to vendor/github.com/cyphar/filepath-securejoin/LICENSE.BSD diff --git a/vendor/github.com/cyphar/filepath-securejoin/LICENSE.MPL-2.0 b/vendor/github.com/cyphar/filepath-securejoin/LICENSE.MPL-2.0 new file mode 100644 index 00000000..d0a1fa14 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/LICENSE.MPL-2.0 @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at https://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/cyphar/filepath-securejoin/README.md b/vendor/github.com/cyphar/filepath-securejoin/README.md index eaeb53fc..6673abfc 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/README.md +++ b/vendor/github.com/cyphar/filepath-securejoin/README.md @@ -67,7 +67,8 @@ func SecureJoin(root, unsafePath string) (string, error) { [libpathrs]: https://github.com/openSUSE/libpathrs [go#20126]: https://github.com/golang/go/issues/20126 -### New API ### +### New API ### +[#new-api]: #new-api While we recommend users switch to [libpathrs][libpathrs] as soon as it has a stable release, some methods implemented by libpathrs have been ported to this @@ -165,5 +166,19 @@ after `MkdirAll`). ### License ### -The license of this project is the same as Go, which is a BSD 3-clause license -available in the `LICENSE` file. +`SPDX-License-Identifier: BSD-3-Clause AND MPL-2.0` + +Some of the code in this project is derived from Go, and is licensed under a +BSD 3-clause license (available in `LICENSE.BSD`). Other files (many of which +are derived from [libpathrs][libpathrs]) are licensed under the Mozilla Public +License version 2.0 (available in `LICENSE.MPL-2.0`). If you are using the +["New API" described above][#new-api], you are probably using code from files +released under this license. + +Every source file in this project has a copyright header describing its +license. Please check the license headers of each file to see what license +applies to it. + +See [COPYING.md](./COPYING.md) for some more details. + +[umoci]: https://github.com/opencontainers/umoci diff --git a/vendor/github.com/cyphar/filepath-securejoin/VERSION b/vendor/github.com/cyphar/filepath-securejoin/VERSION index 267577d4..a918a2aa 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/VERSION +++ b/vendor/github.com/cyphar/filepath-securejoin/VERSION @@ -1 +1 @@ -0.4.1 +0.6.0 diff --git a/vendor/github.com/cyphar/filepath-securejoin/codecov.yml b/vendor/github.com/cyphar/filepath-securejoin/codecov.yml new file mode 100644 index 00000000..ff284dbf --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/codecov.yml @@ -0,0 +1,29 @@ +# SPDX-License-Identifier: MPL-2.0 + +# Copyright (C) 2025 Aleksa Sarai +# Copyright (C) 2025 SUSE LLC +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at https://mozilla.org/MPL/2.0/. + +comment: + layout: "condensed_header, reach, diff, components, condensed_files, condensed_footer" + require_changes: true + branches: + - main + +coverage: + range: 60..100 + status: + project: + default: + target: 85% + threshold: 0% + patch: + default: + target: auto + informational: true + +github_checks: + annotations: false diff --git a/vendor/github.com/cyphar/filepath-securejoin/doc.go b/vendor/github.com/cyphar/filepath-securejoin/doc.go index 1ec7d065..1438fc9c 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/doc.go +++ b/vendor/github.com/cyphar/filepath-securejoin/doc.go @@ -1,3 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause + // Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved. // Copyright (C) 2017-2024 SUSE LLC. All rights reserved. // Use of this source code is governed by a BSD-style @@ -14,14 +16,13 @@ // **not** safe against race conditions where an attacker changes the // filesystem after (or during) the [SecureJoin] operation. // -// The new API is made up of [OpenInRoot] and [MkdirAll] (and derived -// functions). These are safe against racing attackers and have several other -// protections that are not provided by the legacy API. There are many more -// operations that most programs expect to be able to do safely, but we do not -// provide explicit support for them because we want to encourage users to -// switch to [libpathrs](https://github.com/openSUSE/libpathrs) which is a -// cross-language next-generation library that is entirely designed around -// operating on paths safely. +// The new API is available in the [pathrs-lite] subpackage, and provide +// protections against racing attackers as well as several other key +// protections against attacks often seen by container runtimes. As the name +// suggests, [pathrs-lite] is a stripped down (pure Go) reimplementation of +// [libpathrs]. The main APIs provided are [OpenInRoot], [MkdirAll], and +// [procfs.Handle] -- other APIs are not planned to be ported. The long-term +// goal is for users to migrate to [libpathrs] which is more fully-featured. // // securejoin has been used by several container runtimes (Docker, runc, // Kubernetes, etc) for quite a few years as a de-facto standard for operating @@ -31,9 +32,16 @@ // API as soon as possible (or even better, switch to libpathrs). // // This project was initially intended to be included in the Go standard -// library, but [it was rejected](https://go.dev/issue/20126). There is now a -// [new Go proposal](https://go.dev/issue/67002) for a safe path resolution API -// that shares some of the goals of filepath-securejoin. However, that design -// is intended to work like `openat2(RESOLVE_BENEATH)` which does not fit the -// usecase of container runtimes and most system tools. +// library, but it was rejected (see https://go.dev/issue/20126). Much later, +// [os.Root] was added to the Go stdlib that shares some of the goals of +// filepath-securejoin. However, its design is intended to work like +// openat2(RESOLVE_BENEATH) which does not fit the usecase of container +// runtimes and most system tools. +// +// [pathrs-lite]: https://pkg.go.dev/github.com/cyphar/filepath-securejoin/pathrs-lite +// [libpathrs]: https://github.com/openSUSE/libpathrs +// [OpenInRoot]: https://pkg.go.dev/github.com/cyphar/filepath-securejoin/pathrs-lite#OpenInRoot +// [MkdirAll]: https://pkg.go.dev/github.com/cyphar/filepath-securejoin/pathrs-lite#MkdirAll +// [procfs.Handle]: https://pkg.go.dev/github.com/cyphar/filepath-securejoin/pathrs-lite/procfs#Handle +// [os.Root]: https:///pkg.go.dev/os#Root package securejoin diff --git a/vendor/github.com/cyphar/filepath-securejoin/gocompat_errors_go120.go b/vendor/github.com/cyphar/filepath-securejoin/gocompat_errors_go120.go deleted file mode 100644 index 42452bbf..00000000 --- a/vendor/github.com/cyphar/filepath-securejoin/gocompat_errors_go120.go +++ /dev/null @@ -1,18 +0,0 @@ -//go:build linux && go1.20 - -// Copyright (C) 2024 SUSE LLC. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package securejoin - -import ( - "fmt" -) - -// wrapBaseError is a helper that is equivalent to fmt.Errorf("%w: %w"), except -// that on pre-1.20 Go versions only errors.Is() works properly (errors.Unwrap) -// is only guaranteed to give you baseErr. -func wrapBaseError(baseErr, extraErr error) error { - return fmt.Errorf("%w: %w", extraErr, baseErr) -} diff --git a/vendor/github.com/cyphar/filepath-securejoin/gocompat_errors_unsupported.go b/vendor/github.com/cyphar/filepath-securejoin/gocompat_errors_unsupported.go deleted file mode 100644 index e7adca3f..00000000 --- a/vendor/github.com/cyphar/filepath-securejoin/gocompat_errors_unsupported.go +++ /dev/null @@ -1,38 +0,0 @@ -//go:build linux && !go1.20 - -// Copyright (C) 2024 SUSE LLC. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package securejoin - -import ( - "fmt" -) - -type wrappedError struct { - inner error - isError error -} - -func (err wrappedError) Is(target error) bool { - return err.isError == target -} - -func (err wrappedError) Unwrap() error { - return err.inner -} - -func (err wrappedError) Error() string { - return fmt.Sprintf("%v: %v", err.isError, err.inner) -} - -// wrapBaseError is a helper that is equivalent to fmt.Errorf("%w: %w"), except -// that on pre-1.20 Go versions only errors.Is() works properly (errors.Unwrap) -// is only guaranteed to give you baseErr. -func wrapBaseError(baseErr, extraErr error) error { - return wrappedError{ - inner: baseErr, - isError: extraErr, - } -} diff --git a/vendor/github.com/cyphar/filepath-securejoin/gocompat_generics_go121.go b/vendor/github.com/cyphar/filepath-securejoin/gocompat_generics_go121.go deleted file mode 100644 index ddd6fa9a..00000000 --- a/vendor/github.com/cyphar/filepath-securejoin/gocompat_generics_go121.go +++ /dev/null @@ -1,32 +0,0 @@ -//go:build linux && go1.21 - -// Copyright (C) 2024 SUSE LLC. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package securejoin - -import ( - "slices" - "sync" -) - -func slices_DeleteFunc[S ~[]E, E any](slice S, delFn func(E) bool) S { - return slices.DeleteFunc(slice, delFn) -} - -func slices_Contains[S ~[]E, E comparable](slice S, val E) bool { - return slices.Contains(slice, val) -} - -func slices_Clone[S ~[]E, E any](slice S) S { - return slices.Clone(slice) -} - -func sync_OnceValue[T any](f func() T) func() T { - return sync.OnceValue(f) -} - -func sync_OnceValues[T1, T2 any](f func() (T1, T2)) func() (T1, T2) { - return sync.OnceValues(f) -} diff --git a/vendor/github.com/cyphar/filepath-securejoin/gocompat_generics_unsupported.go b/vendor/github.com/cyphar/filepath-securejoin/gocompat_generics_unsupported.go deleted file mode 100644 index f1e6fe7e..00000000 --- a/vendor/github.com/cyphar/filepath-securejoin/gocompat_generics_unsupported.go +++ /dev/null @@ -1,124 +0,0 @@ -//go:build linux && !go1.21 - -// Copyright (C) 2024 SUSE LLC. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package securejoin - -import ( - "sync" -) - -// These are very minimal implementations of functions that appear in Go 1.21's -// stdlib, included so that we can build on older Go versions. Most are -// borrowed directly from the stdlib, and a few are modified to be "obviously -// correct" without needing to copy too many other helpers. - -// clearSlice is equivalent to the builtin clear from Go 1.21. -// Copied from the Go 1.24 stdlib implementation. -func clearSlice[S ~[]E, E any](slice S) { - var zero E - for i := range slice { - slice[i] = zero - } -} - -// Copied from the Go 1.24 stdlib implementation. -func slices_IndexFunc[S ~[]E, E any](s S, f func(E) bool) int { - for i := range s { - if f(s[i]) { - return i - } - } - return -1 -} - -// Copied from the Go 1.24 stdlib implementation. -func slices_DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S { - i := slices_IndexFunc(s, del) - if i == -1 { - return s - } - // Don't start copying elements until we find one to delete. - for j := i + 1; j < len(s); j++ { - if v := s[j]; !del(v) { - s[i] = v - i++ - } - } - clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC - return s[:i] -} - -// Similar to the stdlib slices.Contains, except that we don't have -// slices.Index so we need to use slices.IndexFunc for this non-Func helper. -func slices_Contains[S ~[]E, E comparable](s S, v E) bool { - return slices_IndexFunc(s, func(e E) bool { return e == v }) >= 0 -} - -// Copied from the Go 1.24 stdlib implementation. -func slices_Clone[S ~[]E, E any](s S) S { - // Preserve nil in case it matters. - if s == nil { - return nil - } - return append(S([]E{}), s...) -} - -// Copied from the Go 1.24 stdlib implementation. -func sync_OnceValue[T any](f func() T) func() T { - var ( - once sync.Once - valid bool - p any - result T - ) - g := func() { - defer func() { - p = recover() - if !valid { - panic(p) - } - }() - result = f() - f = nil - valid = true - } - return func() T { - once.Do(g) - if !valid { - panic(p) - } - return result - } -} - -// Copied from the Go 1.24 stdlib implementation. -func sync_OnceValues[T1, T2 any](f func() (T1, T2)) func() (T1, T2) { - var ( - once sync.Once - valid bool - p any - r1 T1 - r2 T2 - ) - g := func() { - defer func() { - p = recover() - if !valid { - panic(p) - } - }() - r1, r2 = f() - f = nil - valid = true - } - return func() (T1, T2) { - once.Do(g) - if !valid { - panic(p) - } - return r1, r2 - } -} diff --git a/vendor/github.com/cyphar/filepath-securejoin/internal/consts/consts.go b/vendor/github.com/cyphar/filepath-securejoin/internal/consts/consts.go new file mode 100644 index 00000000..c69c4da9 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/internal/consts/consts.go @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: BSD-3-Clause + +// Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved. +// Copyright (C) 2017-2025 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package consts contains the definitions of internal constants used +// throughout filepath-securejoin. +package consts + +// MaxSymlinkLimit is the maximum number of symlinks that can be encountered +// during a single lookup before returning -ELOOP. At time of writing, Linux +// has an internal limit of 40. +const MaxSymlinkLimit = 255 diff --git a/vendor/github.com/cyphar/filepath-securejoin/join.go b/vendor/github.com/cyphar/filepath-securejoin/join.go index e6634d47..199c1d83 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/join.go +++ b/vendor/github.com/cyphar/filepath-securejoin/join.go @@ -1,3 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause + // Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved. // Copyright (C) 2017-2025 SUSE LLC. All rights reserved. // Use of this source code is governed by a BSD-style @@ -11,9 +13,9 @@ import ( "path/filepath" "strings" "syscall" -) -const maxSymlinkLimit = 255 + "github.com/cyphar/filepath-securejoin/internal/consts" +) // IsNotExist tells you if err is an error that implies that either the path // accessed does not exist (or path components don't exist). This is @@ -49,12 +51,13 @@ func hasDotDot(path string) bool { return strings.Contains("/"+path+"/", "/../") } -// SecureJoinVFS joins the two given path components (similar to [filepath.Join]) except -// that the returned path is guaranteed to be scoped inside the provided root -// path (when evaluated). Any symbolic links in the path are evaluated with the -// given root treated as the root of the filesystem, similar to a chroot. The -// filesystem state is evaluated through the given [VFS] interface (if nil, the -// standard [os].* family of functions are used). +// SecureJoinVFS joins the two given path components (similar to +// [filepath.Join]) except that the returned path is guaranteed to be scoped +// inside the provided root path (when evaluated). Any symbolic links in the +// path are evaluated with the given root treated as the root of the +// filesystem, similar to a chroot. The filesystem state is evaluated through +// the given [VFS] interface (if nil, the standard [os].* family of functions +// are used). // // Note that the guarantees provided by this function only apply if the path // components in the returned string are not modified (in other words are not @@ -78,7 +81,7 @@ func hasDotDot(path string) bool { // fully resolved using [filepath.EvalSymlinks] or otherwise constructed to // avoid containing symlink components. Of course, the root also *must not* be // attacker-controlled. -func SecureJoinVFS(root, unsafePath string, vfs VFS) (string, error) { +func SecureJoinVFS(root, unsafePath string, vfs VFS) (string, error) { //nolint:revive // name is part of public API // The root path must not contain ".." components, otherwise when we join // the subpath we will end up with a weird path. We could work around this // in other ways but users shouldn't be giving us non-lexical root paths in @@ -138,7 +141,7 @@ func SecureJoinVFS(root, unsafePath string, vfs VFS) (string, error) { // It's a symlink, so get its contents and expand it by prepending it // to the yet-unparsed path. linksWalked++ - if linksWalked > maxSymlinkLimit { + if linksWalked > consts.MaxSymlinkLimit { return "", &os.PathError{Op: "SecureJoin", Path: root + string(filepath.Separator) + unsafePath, Err: syscall.ELOOP} } diff --git a/vendor/github.com/cyphar/filepath-securejoin/lookup_linux.go b/vendor/github.com/cyphar/filepath-securejoin/lookup_linux.go deleted file mode 100644 index be81e498..00000000 --- a/vendor/github.com/cyphar/filepath-securejoin/lookup_linux.go +++ /dev/null @@ -1,388 +0,0 @@ -//go:build linux - -// Copyright (C) 2024 SUSE LLC. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package securejoin - -import ( - "errors" - "fmt" - "os" - "path" - "path/filepath" - "strings" - - "golang.org/x/sys/unix" -) - -type symlinkStackEntry struct { - // (dir, remainingPath) is what we would've returned if the link didn't - // exist. This matches what openat2(RESOLVE_IN_ROOT) would return in - // this case. - dir *os.File - remainingPath string - // linkUnwalked is the remaining path components from the original - // Readlink which we have yet to walk. When this slice is empty, we - // drop the link from the stack. - linkUnwalked []string -} - -func (se symlinkStackEntry) String() string { - return fmt.Sprintf("<%s>/%s [->%s]", se.dir.Name(), se.remainingPath, strings.Join(se.linkUnwalked, "/")) -} - -func (se symlinkStackEntry) Close() { - _ = se.dir.Close() -} - -type symlinkStack []*symlinkStackEntry - -func (s *symlinkStack) IsEmpty() bool { - return s == nil || len(*s) == 0 -} - -func (s *symlinkStack) Close() { - if s != nil { - for _, link := range *s { - link.Close() - } - // TODO: Switch to clear once we switch to Go 1.21. - *s = nil - } -} - -var ( - errEmptyStack = errors.New("[internal] stack is empty") - errBrokenSymlinkStack = errors.New("[internal error] broken symlink stack") -) - -func (s *symlinkStack) popPart(part string) error { - if s == nil || s.IsEmpty() { - // If there is nothing in the symlink stack, then the part was from the - // real path provided by the user, and this is a no-op. - return errEmptyStack - } - if part == "." { - // "." components are no-ops -- we drop them when doing SwapLink. - return nil - } - - tailEntry := (*s)[len(*s)-1] - - // Double-check that we are popping the component we expect. - if len(tailEntry.linkUnwalked) == 0 { - return fmt.Errorf("%w: trying to pop component %q of empty stack entry %s", errBrokenSymlinkStack, part, tailEntry) - } - headPart := tailEntry.linkUnwalked[0] - if headPart != part { - return fmt.Errorf("%w: trying to pop component %q but the last stack entry is %s (%q)", errBrokenSymlinkStack, part, tailEntry, headPart) - } - - // Drop the component, but keep the entry around in case we are dealing - // with a "tail-chained" symlink. - tailEntry.linkUnwalked = tailEntry.linkUnwalked[1:] - return nil -} - -func (s *symlinkStack) PopPart(part string) error { - if err := s.popPart(part); err != nil { - if errors.Is(err, errEmptyStack) { - // Skip empty stacks. - err = nil - } - return err - } - - // Clean up any of the trailing stack entries that are empty. - for lastGood := len(*s) - 1; lastGood >= 0; lastGood-- { - entry := (*s)[lastGood] - if len(entry.linkUnwalked) > 0 { - break - } - entry.Close() - (*s) = (*s)[:lastGood] - } - return nil -} - -func (s *symlinkStack) push(dir *os.File, remainingPath, linkTarget string) error { - if s == nil { - return nil - } - // Split the link target and clean up any "" parts. - linkTargetParts := slices_DeleteFunc( - strings.Split(linkTarget, "/"), - func(part string) bool { return part == "" || part == "." }) - - // Copy the directory so the caller doesn't close our copy. - dirCopy, err := dupFile(dir) - if err != nil { - return err - } - - // Add to the stack. - *s = append(*s, &symlinkStackEntry{ - dir: dirCopy, - remainingPath: remainingPath, - linkUnwalked: linkTargetParts, - }) - return nil -} - -func (s *symlinkStack) SwapLink(linkPart string, dir *os.File, remainingPath, linkTarget string) error { - // If we are currently inside a symlink resolution, remove the symlink - // component from the last symlink entry, but don't remove the entry even - // if it's empty. If we are a "tail-chained" symlink (a trailing symlink we - // hit during a symlink resolution) we need to keep the old symlink until - // we finish the resolution. - if err := s.popPart(linkPart); err != nil { - if !errors.Is(err, errEmptyStack) { - return err - } - // Push the component regardless of whether the stack was empty. - } - return s.push(dir, remainingPath, linkTarget) -} - -func (s *symlinkStack) PopTopSymlink() (*os.File, string, bool) { - if s == nil || s.IsEmpty() { - return nil, "", false - } - tailEntry := (*s)[0] - *s = (*s)[1:] - return tailEntry.dir, tailEntry.remainingPath, true -} - -// partialLookupInRoot tries to lookup as much of the request path as possible -// within the provided root (a-la RESOLVE_IN_ROOT) and opens the final existing -// component of the requested path, returning a file handle to the final -// existing component and a string containing the remaining path components. -func partialLookupInRoot(root *os.File, unsafePath string) (*os.File, string, error) { - return lookupInRoot(root, unsafePath, true) -} - -func completeLookupInRoot(root *os.File, unsafePath string) (*os.File, error) { - handle, remainingPath, err := lookupInRoot(root, unsafePath, false) - if remainingPath != "" && err == nil { - // should never happen - err = fmt.Errorf("[bug] non-empty remaining path when doing a non-partial lookup: %q", remainingPath) - } - // lookupInRoot(partial=false) will always close the handle if an error is - // returned, so no need to double-check here. - return handle, err -} - -func lookupInRoot(root *os.File, unsafePath string, partial bool) (Handle *os.File, _ string, _ error) { - unsafePath = filepath.ToSlash(unsafePath) // noop - - // This is very similar to SecureJoin, except that we operate on the - // components using file descriptors. We then return the last component we - // managed open, along with the remaining path components not opened. - - // Try to use openat2 if possible. - if hasOpenat2() { - return lookupOpenat2(root, unsafePath, partial) - } - - // Get the "actual" root path from /proc/self/fd. This is necessary if the - // root is some magic-link like /proc/$pid/root, in which case we want to - // make sure when we do checkProcSelfFdPath that we are using the correct - // root path. - logicalRootPath, err := procSelfFdReadlink(root) - if err != nil { - return nil, "", fmt.Errorf("get real root path: %w", err) - } - - currentDir, err := dupFile(root) - if err != nil { - return nil, "", fmt.Errorf("clone root fd: %w", err) - } - defer func() { - // If a handle is not returned, close the internal handle. - if Handle == nil { - _ = currentDir.Close() - } - }() - - // symlinkStack is used to emulate how openat2(RESOLVE_IN_ROOT) treats - // dangling symlinks. If we hit a non-existent path while resolving a - // symlink, we need to return the (dir, remainingPath) that we had when we - // hit the symlink (treating the symlink as though it were a regular file). - // The set of (dir, remainingPath) sets is stored within the symlinkStack - // and we add and remove parts when we hit symlink and non-symlink - // components respectively. We need a stack because of recursive symlinks - // (symlinks that contain symlink components in their target). - // - // Note that the stack is ONLY used for book-keeping. All of the actual - // path walking logic is still based on currentPath/remainingPath and - // currentDir (as in SecureJoin). - var symStack *symlinkStack - if partial { - symStack = new(symlinkStack) - defer symStack.Close() - } - - var ( - linksWalked int - currentPath string - remainingPath = unsafePath - ) - for remainingPath != "" { - // Save the current remaining path so if the part is not real we can - // return the path including the component. - oldRemainingPath := remainingPath - - // Get the next path component. - var part string - if i := strings.IndexByte(remainingPath, '/'); i == -1 { - part, remainingPath = remainingPath, "" - } else { - part, remainingPath = remainingPath[:i], remainingPath[i+1:] - } - // If we hit an empty component, we need to treat it as though it is - // "." so that trailing "/" and "//" components on a non-directory - // correctly return the right error code. - if part == "" { - part = "." - } - - // Apply the component lexically to the path we are building. - // currentPath does not contain any symlinks, and we are lexically - // dealing with a single component, so it's okay to do a filepath.Clean - // here. - nextPath := path.Join("/", currentPath, part) - // If we logically hit the root, just clone the root rather than - // opening the part and doing all of the other checks. - if nextPath == "/" { - if err := symStack.PopPart(part); err != nil { - return nil, "", fmt.Errorf("walking into root with part %q failed: %w", part, err) - } - // Jump to root. - rootClone, err := dupFile(root) - if err != nil { - return nil, "", fmt.Errorf("clone root fd: %w", err) - } - _ = currentDir.Close() - currentDir = rootClone - currentPath = nextPath - continue - } - - // Try to open the next component. - nextDir, err := openatFile(currentDir, part, unix.O_PATH|unix.O_NOFOLLOW|unix.O_CLOEXEC, 0) - switch { - case err == nil: - st, err := nextDir.Stat() - if err != nil { - _ = nextDir.Close() - return nil, "", fmt.Errorf("stat component %q: %w", part, err) - } - - switch st.Mode() & os.ModeType { - case os.ModeSymlink: - // readlinkat implies AT_EMPTY_PATH since Linux 2.6.39. See - // Linux commit 65cfc6722361 ("readlinkat(), fchownat() and - // fstatat() with empty relative pathnames"). - linkDest, err := readlinkatFile(nextDir, "") - // We don't need the handle anymore. - _ = nextDir.Close() - if err != nil { - return nil, "", err - } - - linksWalked++ - if linksWalked > maxSymlinkLimit { - return nil, "", &os.PathError{Op: "securejoin.lookupInRoot", Path: logicalRootPath + "/" + unsafePath, Err: unix.ELOOP} - } - - // Swap out the symlink's component for the link entry itself. - if err := symStack.SwapLink(part, currentDir, oldRemainingPath, linkDest); err != nil { - return nil, "", fmt.Errorf("walking into symlink %q failed: push symlink: %w", part, err) - } - - // Update our logical remaining path. - remainingPath = linkDest + "/" + remainingPath - // Absolute symlinks reset any work we've already done. - if path.IsAbs(linkDest) { - // Jump to root. - rootClone, err := dupFile(root) - if err != nil { - return nil, "", fmt.Errorf("clone root fd: %w", err) - } - _ = currentDir.Close() - currentDir = rootClone - currentPath = "/" - } - - default: - // If we are dealing with a directory, simply walk into it. - _ = currentDir.Close() - currentDir = nextDir - currentPath = nextPath - - // The part was real, so drop it from the symlink stack. - if err := symStack.PopPart(part); err != nil { - return nil, "", fmt.Errorf("walking into directory %q failed: %w", part, err) - } - - // If we are operating on a .., make sure we haven't escaped. - // We only have to check for ".." here because walking down - // into a regular component component cannot cause you to - // escape. This mirrors the logic in RESOLVE_IN_ROOT, except we - // have to check every ".." rather than only checking after a - // rename or mount on the system. - if part == ".." { - // Make sure the root hasn't moved. - if err := checkProcSelfFdPath(logicalRootPath, root); err != nil { - return nil, "", fmt.Errorf("root path moved during lookup: %w", err) - } - // Make sure the path is what we expect. - fullPath := logicalRootPath + nextPath - if err := checkProcSelfFdPath(fullPath, currentDir); err != nil { - return nil, "", fmt.Errorf("walking into %q had unexpected result: %w", part, err) - } - } - } - - default: - if !partial { - return nil, "", err - } - // If there are any remaining components in the symlink stack, we - // are still within a symlink resolution and thus we hit a dangling - // symlink. So pretend that the first symlink in the stack we hit - // was an ENOENT (to match openat2). - if oldDir, remainingPath, ok := symStack.PopTopSymlink(); ok { - _ = currentDir.Close() - return oldDir, remainingPath, err - } - // We have hit a final component that doesn't exist, so we have our - // partial open result. Note that we have to use the OLD remaining - // path, since the lookup failed. - return currentDir, oldRemainingPath, err - } - } - - // If the unsafePath had a trailing slash, we need to make sure we try to - // do a relative "." open so that we will correctly return an error when - // the final component is a non-directory (to match openat2). In the - // context of openat2, a trailing slash and a trailing "/." are completely - // equivalent. - if strings.HasSuffix(unsafePath, "/") { - nextDir, err := openatFile(currentDir, ".", unix.O_PATH|unix.O_NOFOLLOW|unix.O_CLOEXEC, 0) - if err != nil { - if !partial { - _ = currentDir.Close() - currentDir = nil - } - return currentDir, "", err - } - _ = currentDir.Close() - currentDir = nextDir - } - - // All of the components existed! - return currentDir, "", nil -} diff --git a/vendor/github.com/cyphar/filepath-securejoin/mkdir_linux.go b/vendor/github.com/cyphar/filepath-securejoin/mkdir_linux.go deleted file mode 100644 index a17ae3b0..00000000 --- a/vendor/github.com/cyphar/filepath-securejoin/mkdir_linux.go +++ /dev/null @@ -1,236 +0,0 @@ -//go:build linux - -// Copyright (C) 2024 SUSE LLC. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package securejoin - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "strings" - - "golang.org/x/sys/unix" -) - -var ( - errInvalidMode = errors.New("invalid permission mode") - errPossibleAttack = errors.New("possible attack detected") -) - -// modePermExt is like os.ModePerm except that it also includes the set[ug]id -// and sticky bits. -const modePermExt = os.ModePerm | os.ModeSetuid | os.ModeSetgid | os.ModeSticky - -//nolint:cyclop // this function needs to handle a lot of cases -func toUnixMode(mode os.FileMode) (uint32, error) { - sysMode := uint32(mode.Perm()) - if mode&os.ModeSetuid != 0 { - sysMode |= unix.S_ISUID - } - if mode&os.ModeSetgid != 0 { - sysMode |= unix.S_ISGID - } - if mode&os.ModeSticky != 0 { - sysMode |= unix.S_ISVTX - } - // We don't allow file type bits. - if mode&os.ModeType != 0 { - return 0, fmt.Errorf("%w %+.3o (%s): type bits not permitted", errInvalidMode, mode, mode) - } - // We don't allow other unknown modes. - if mode&^modePermExt != 0 || sysMode&unix.S_IFMT != 0 { - return 0, fmt.Errorf("%w %+.3o (%s): unknown mode bits", errInvalidMode, mode, mode) - } - return sysMode, nil -} - -// MkdirAllHandle is equivalent to [MkdirAll], except that it is safer to use -// in two respects: -// -// - The caller provides the root directory as an *[os.File] (preferably O_PATH) -// handle. This means that the caller can be sure which root directory is -// being used. Note that this can be emulated by using /proc/self/fd/... as -// the root path with [os.MkdirAll]. -// -// - Once all of the directories have been created, an *[os.File] O_PATH handle -// to the directory at unsafePath is returned to the caller. This is done in -// an effectively-race-free way (an attacker would only be able to swap the -// final directory component), which is not possible to emulate with -// [MkdirAll]. -// -// In addition, the returned handle is obtained far more efficiently than doing -// a brand new lookup of unsafePath (such as with [SecureJoin] or openat2) after -// doing [MkdirAll]. If you intend to open the directory after creating it, you -// should use MkdirAllHandle. -func MkdirAllHandle(root *os.File, unsafePath string, mode os.FileMode) (_ *os.File, Err error) { - unixMode, err := toUnixMode(mode) - if err != nil { - return nil, err - } - // On Linux, mkdirat(2) (and os.Mkdir) silently ignore the suid and sgid - // bits. We could also silently ignore them but since we have very few - // users it seems more prudent to return an error so users notice that - // these bits will not be set. - if unixMode&^0o1777 != 0 { - return nil, fmt.Errorf("%w for mkdir %+.3o: suid and sgid are ignored by mkdir", errInvalidMode, mode) - } - - // Try to open as much of the path as possible. - currentDir, remainingPath, err := partialLookupInRoot(root, unsafePath) - defer func() { - if Err != nil { - _ = currentDir.Close() - } - }() - if err != nil && !errors.Is(err, unix.ENOENT) { - return nil, fmt.Errorf("find existing subpath of %q: %w", unsafePath, err) - } - - // If there is an attacker deleting directories as we walk into them, - // detect this proactively. Note this is guaranteed to detect if the - // attacker deleted any part of the tree up to currentDir. - // - // Once we walk into a dead directory, partialLookupInRoot would not be - // able to walk further down the tree (directories must be empty before - // they are deleted), and if the attacker has removed the entire tree we - // can be sure that anything that was originally inside a dead directory - // must also be deleted and thus is a dead directory in its own right. - // - // This is mostly a quality-of-life check, because mkdir will simply fail - // later if the attacker deletes the tree after this check. - if err := isDeadInode(currentDir); err != nil { - return nil, fmt.Errorf("finding existing subpath of %q: %w", unsafePath, err) - } - - // Re-open the path to match the O_DIRECTORY reopen loop later (so that we - // always return a non-O_PATH handle). We also check that we actually got a - // directory. - if reopenDir, err := Reopen(currentDir, unix.O_DIRECTORY|unix.O_CLOEXEC); errors.Is(err, unix.ENOTDIR) { - return nil, fmt.Errorf("cannot create subdirectories in %q: %w", currentDir.Name(), unix.ENOTDIR) - } else if err != nil { - return nil, fmt.Errorf("re-opening handle to %q: %w", currentDir.Name(), err) - } else { - _ = currentDir.Close() - currentDir = reopenDir - } - - remainingParts := strings.Split(remainingPath, string(filepath.Separator)) - if slices_Contains(remainingParts, "..") { - // The path contained ".." components after the end of the "real" - // components. We could try to safely resolve ".." here but that would - // add a bunch of extra logic for something that it's not clear even - // needs to be supported. So just return an error. - // - // If we do filepath.Clean(remainingPath) then we end up with the - // problem that ".." can erase a trailing dangling symlink and produce - // a path that doesn't quite match what the user asked for. - return nil, fmt.Errorf("%w: yet-to-be-created path %q contains '..' components", unix.ENOENT, remainingPath) - } - - // Create the remaining components. - for _, part := range remainingParts { - switch part { - case "", ".": - // Skip over no-op paths. - continue - } - - // NOTE: mkdir(2) will not follow trailing symlinks, so we can safely - // create the final component without worrying about symlink-exchange - // attacks. - // - // If we get -EEXIST, it's possible that another program created the - // directory at the same time as us. In that case, just continue on as - // if we created it (if the created inode is not a directory, the - // following open call will fail). - if err := unix.Mkdirat(int(currentDir.Fd()), part, unixMode); err != nil && !errors.Is(err, unix.EEXIST) { - err = &os.PathError{Op: "mkdirat", Path: currentDir.Name() + "/" + part, Err: err} - // Make the error a bit nicer if the directory is dead. - if deadErr := isDeadInode(currentDir); deadErr != nil { - // TODO: Once we bump the minimum Go version to 1.20, we can use - // multiple %w verbs for this wrapping. For now we need to use a - // compatibility shim for older Go versions. - //err = fmt.Errorf("%w (%w)", err, deadErr) - err = wrapBaseError(err, deadErr) - } - return nil, err - } - - // Get a handle to the next component. O_DIRECTORY means we don't need - // to use O_PATH. - var nextDir *os.File - if hasOpenat2() { - nextDir, err = openat2File(currentDir, part, &unix.OpenHow{ - Flags: unix.O_NOFOLLOW | unix.O_DIRECTORY | unix.O_CLOEXEC, - Resolve: unix.RESOLVE_BENEATH | unix.RESOLVE_NO_SYMLINKS | unix.RESOLVE_NO_XDEV, - }) - } else { - nextDir, err = openatFile(currentDir, part, unix.O_NOFOLLOW|unix.O_DIRECTORY|unix.O_CLOEXEC, 0) - } - if err != nil { - return nil, err - } - _ = currentDir.Close() - currentDir = nextDir - - // It's possible that the directory we just opened was swapped by an - // attacker. Unfortunately there isn't much we can do to protect - // against this, and MkdirAll's behaviour is that we will reuse - // existing directories anyway so the need to protect against this is - // incredibly limited (and arguably doesn't even deserve mention here). - // - // Ideally we might want to check that the owner and mode match what we - // would've created -- unfortunately, it is non-trivial to verify that - // the owner and mode of the created directory match. While plain Unix - // DAC rules seem simple enough to emulate, there are a bunch of other - // factors that can change the mode or owner of created directories - // (default POSIX ACLs, mount options like uid=1,gid=2,umask=0 on - // filesystems like vfat, etc etc). We used to try to verify this but - // it just lead to a series of spurious errors. - // - // We could also check that the directory is non-empty, but - // unfortunately some pseduofilesystems (like cgroupfs) create - // non-empty directories, which would result in different spurious - // errors. - } - return currentDir, nil -} - -// MkdirAll is a race-safe alternative to the [os.MkdirAll] function, -// where the new directory is guaranteed to be within the root directory (if an -// attacker can move directories from inside the root to outside the root, the -// created directory tree might be outside of the root but the key constraint -// is that at no point will we walk outside of the directory tree we are -// creating). -// -// Effectively, MkdirAll(root, unsafePath, mode) is equivalent to -// -// path, _ := securejoin.SecureJoin(root, unsafePath) -// err := os.MkdirAll(path, mode) -// -// But is much safer. The above implementation is unsafe because if an attacker -// can modify the filesystem tree between [SecureJoin] and [os.MkdirAll], it is -// possible for MkdirAll to resolve unsafe symlink components and create -// directories outside of the root. -// -// If you plan to open the directory after you have created it or want to use -// an open directory handle as the root, you should use [MkdirAllHandle] instead. -// This function is a wrapper around [MkdirAllHandle]. -func MkdirAll(root, unsafePath string, mode os.FileMode) error { - rootDir, err := os.OpenFile(root, unix.O_PATH|unix.O_DIRECTORY|unix.O_CLOEXEC, 0) - if err != nil { - return err - } - defer rootDir.Close() - - f, err := MkdirAllHandle(rootDir, unsafePath, mode) - if err != nil { - return err - } - _ = f.Close() - return nil -} diff --git a/vendor/github.com/cyphar/filepath-securejoin/open_linux.go b/vendor/github.com/cyphar/filepath-securejoin/open_linux.go deleted file mode 100644 index 230be73f..00000000 --- a/vendor/github.com/cyphar/filepath-securejoin/open_linux.go +++ /dev/null @@ -1,103 +0,0 @@ -//go:build linux - -// Copyright (C) 2024 SUSE LLC. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package securejoin - -import ( - "fmt" - "os" - "strconv" - - "golang.org/x/sys/unix" -) - -// OpenatInRoot is equivalent to [OpenInRoot], except that the root is provided -// using an *[os.File] handle, to ensure that the correct root directory is used. -func OpenatInRoot(root *os.File, unsafePath string) (*os.File, error) { - handle, err := completeLookupInRoot(root, unsafePath) - if err != nil { - return nil, &os.PathError{Op: "securejoin.OpenInRoot", Path: unsafePath, Err: err} - } - return handle, nil -} - -// OpenInRoot safely opens the provided unsafePath within the root. -// Effectively, OpenInRoot(root, unsafePath) is equivalent to -// -// path, _ := securejoin.SecureJoin(root, unsafePath) -// handle, err := os.OpenFile(path, unix.O_PATH|unix.O_CLOEXEC) -// -// But is much safer. The above implementation is unsafe because if an attacker -// can modify the filesystem tree between [SecureJoin] and [os.OpenFile], it is -// possible for the returned file to be outside of the root. -// -// Note that the returned handle is an O_PATH handle, meaning that only a very -// limited set of operations will work on the handle. This is done to avoid -// accidentally opening an untrusted file that could cause issues (such as a -// disconnected TTY that could cause a DoS, or some other issue). In order to -// use the returned handle, you can "upgrade" it to a proper handle using -// [Reopen]. -func OpenInRoot(root, unsafePath string) (*os.File, error) { - rootDir, err := os.OpenFile(root, unix.O_PATH|unix.O_DIRECTORY|unix.O_CLOEXEC, 0) - if err != nil { - return nil, err - } - defer rootDir.Close() - return OpenatInRoot(rootDir, unsafePath) -} - -// Reopen takes an *[os.File] handle and re-opens it through /proc/self/fd. -// Reopen(file, flags) is effectively equivalent to -// -// fdPath := fmt.Sprintf("/proc/self/fd/%d", file.Fd()) -// os.OpenFile(fdPath, flags|unix.O_CLOEXEC) -// -// But with some extra hardenings to ensure that we are not tricked by a -// maliciously-configured /proc mount. While this attack scenario is not -// common, in container runtimes it is possible for higher-level runtimes to be -// tricked into configuring an unsafe /proc that can be used to attack file -// operations. See [CVE-2019-19921] for more details. -// -// [CVE-2019-19921]: https://github.com/advisories/GHSA-fh74-hm69-rqjw -func Reopen(handle *os.File, flags int) (*os.File, error) { - procRoot, err := getProcRoot() - if err != nil { - return nil, err - } - - // We can't operate on /proc/thread-self/fd/$n directly when doing a - // re-open, so we need to open /proc/thread-self/fd and then open a single - // final component. - procFdDir, closer, err := procThreadSelf(procRoot, "fd/") - if err != nil { - return nil, fmt.Errorf("get safe /proc/thread-self/fd handle: %w", err) - } - defer procFdDir.Close() - defer closer() - - // Try to detect if there is a mount on top of the magic-link we are about - // to open. If we are using unsafeHostProcRoot(), this could change after - // we check it (and there's nothing we can do about that) but for - // privateProcRoot() this should be guaranteed to be safe (at least since - // Linux 5.12[1], when anonymous mount namespaces were completely isolated - // from external mounts including mount propagation events). - // - // [1]: Linux commit ee2e3f50629f ("mount: fix mounting of detached mounts - // onto targets that reside on shared mounts"). - fdStr := strconv.Itoa(int(handle.Fd())) - if err := checkSymlinkOvermount(procRoot, procFdDir, fdStr); err != nil { - return nil, fmt.Errorf("check safety of /proc/thread-self/fd/%s magiclink: %w", fdStr, err) - } - - flags |= unix.O_CLOEXEC - // Rather than just wrapping openatFile, open-code it so we can copy - // handle.Name(). - reopenFd, err := unix.Openat(int(procFdDir.Fd()), fdStr, flags, 0) - if err != nil { - return nil, fmt.Errorf("reopen fd %d: %w", handle.Fd(), err) - } - return os.NewFile(uintptr(reopenFd), handle.Name()), nil -} diff --git a/vendor/github.com/cyphar/filepath-securejoin/openat2_linux.go b/vendor/github.com/cyphar/filepath-securejoin/openat2_linux.go deleted file mode 100644 index f7a13e69..00000000 --- a/vendor/github.com/cyphar/filepath-securejoin/openat2_linux.go +++ /dev/null @@ -1,127 +0,0 @@ -//go:build linux - -// Copyright (C) 2024 SUSE LLC. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package securejoin - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "strings" - - "golang.org/x/sys/unix" -) - -var hasOpenat2 = sync_OnceValue(func() bool { - fd, err := unix.Openat2(unix.AT_FDCWD, ".", &unix.OpenHow{ - Flags: unix.O_PATH | unix.O_CLOEXEC, - Resolve: unix.RESOLVE_NO_SYMLINKS | unix.RESOLVE_IN_ROOT, - }) - if err != nil { - return false - } - _ = unix.Close(fd) - return true -}) - -func scopedLookupShouldRetry(how *unix.OpenHow, err error) bool { - // RESOLVE_IN_ROOT (and RESOLVE_BENEATH) can return -EAGAIN if we resolve - // ".." while a mount or rename occurs anywhere on the system. This could - // happen spuriously, or as the result of an attacker trying to mess with - // us during lookup. - // - // In addition, scoped lookups have a "safety check" at the end of - // complete_walk which will return -EXDEV if the final path is not in the - // root. - return how.Resolve&(unix.RESOLVE_IN_ROOT|unix.RESOLVE_BENEATH) != 0 && - (errors.Is(err, unix.EAGAIN) || errors.Is(err, unix.EXDEV)) -} - -const scopedLookupMaxRetries = 10 - -func openat2File(dir *os.File, path string, how *unix.OpenHow) (*os.File, error) { - fullPath := dir.Name() + "/" + path - // Make sure we always set O_CLOEXEC. - how.Flags |= unix.O_CLOEXEC - var tries int - for tries < scopedLookupMaxRetries { - fd, err := unix.Openat2(int(dir.Fd()), path, how) - if err != nil { - if scopedLookupShouldRetry(how, err) { - // We retry a couple of times to avoid the spurious errors, and - // if we are being attacked then returning -EAGAIN is the best - // we can do. - tries++ - continue - } - return nil, &os.PathError{Op: "openat2", Path: fullPath, Err: err} - } - // If we are using RESOLVE_IN_ROOT, the name we generated may be wrong. - // NOTE: The procRoot code MUST NOT use RESOLVE_IN_ROOT, otherwise - // you'll get infinite recursion here. - if how.Resolve&unix.RESOLVE_IN_ROOT == unix.RESOLVE_IN_ROOT { - if actualPath, err := rawProcSelfFdReadlink(fd); err == nil { - fullPath = actualPath - } - } - return os.NewFile(uintptr(fd), fullPath), nil - } - return nil, &os.PathError{Op: "openat2", Path: fullPath, Err: errPossibleAttack} -} - -func lookupOpenat2(root *os.File, unsafePath string, partial bool) (*os.File, string, error) { - if !partial { - file, err := openat2File(root, unsafePath, &unix.OpenHow{ - Flags: unix.O_PATH | unix.O_CLOEXEC, - Resolve: unix.RESOLVE_IN_ROOT | unix.RESOLVE_NO_MAGICLINKS, - }) - return file, "", err - } - return partialLookupOpenat2(root, unsafePath) -} - -// partialLookupOpenat2 is an alternative implementation of -// partialLookupInRoot, using openat2(RESOLVE_IN_ROOT) to more safely get a -// handle to the deepest existing child of the requested path within the root. -func partialLookupOpenat2(root *os.File, unsafePath string) (*os.File, string, error) { - // TODO: Implement this as a git-bisect-like binary search. - - unsafePath = filepath.ToSlash(unsafePath) // noop - endIdx := len(unsafePath) - var lastError error - for endIdx > 0 { - subpath := unsafePath[:endIdx] - - handle, err := openat2File(root, subpath, &unix.OpenHow{ - Flags: unix.O_PATH | unix.O_CLOEXEC, - Resolve: unix.RESOLVE_IN_ROOT | unix.RESOLVE_NO_MAGICLINKS, - }) - if err == nil { - // Jump over the slash if we have a non-"" remainingPath. - if endIdx < len(unsafePath) { - endIdx += 1 - } - // We found a subpath! - return handle, unsafePath[endIdx:], lastError - } - if errors.Is(err, unix.ENOENT) || errors.Is(err, unix.ENOTDIR) { - // That path doesn't exist, let's try the next directory up. - endIdx = strings.LastIndexByte(subpath, '/') - lastError = err - continue - } - return nil, "", fmt.Errorf("open subpath: %w", err) - } - // If we couldn't open anything, the whole subpath is missing. Return a - // copy of the root fd so that the caller doesn't close this one by - // accident. - rootClone, err := dupFile(root) - if err != nil { - return nil, "", err - } - return rootClone, unsafePath, lastError -} diff --git a/vendor/github.com/cyphar/filepath-securejoin/openat_linux.go b/vendor/github.com/cyphar/filepath-securejoin/openat_linux.go deleted file mode 100644 index 949fb5f2..00000000 --- a/vendor/github.com/cyphar/filepath-securejoin/openat_linux.go +++ /dev/null @@ -1,59 +0,0 @@ -//go:build linux - -// Copyright (C) 2024 SUSE LLC. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package securejoin - -import ( - "os" - "path/filepath" - - "golang.org/x/sys/unix" -) - -func dupFile(f *os.File) (*os.File, error) { - fd, err := unix.FcntlInt(f.Fd(), unix.F_DUPFD_CLOEXEC, 0) - if err != nil { - return nil, os.NewSyscallError("fcntl(F_DUPFD_CLOEXEC)", err) - } - return os.NewFile(uintptr(fd), f.Name()), nil -} - -func openatFile(dir *os.File, path string, flags int, mode int) (*os.File, error) { - // Make sure we always set O_CLOEXEC. - flags |= unix.O_CLOEXEC - fd, err := unix.Openat(int(dir.Fd()), path, flags, uint32(mode)) - if err != nil { - return nil, &os.PathError{Op: "openat", Path: dir.Name() + "/" + path, Err: err} - } - // All of the paths we use with openatFile(2) are guaranteed to be - // lexically safe, so we can use path.Join here. - fullPath := filepath.Join(dir.Name(), path) - return os.NewFile(uintptr(fd), fullPath), nil -} - -func fstatatFile(dir *os.File, path string, flags int) (unix.Stat_t, error) { - var stat unix.Stat_t - if err := unix.Fstatat(int(dir.Fd()), path, &stat, flags); err != nil { - return stat, &os.PathError{Op: "fstatat", Path: dir.Name() + "/" + path, Err: err} - } - return stat, nil -} - -func readlinkatFile(dir *os.File, path string) (string, error) { - size := 4096 - for { - linkBuf := make([]byte, size) - n, err := unix.Readlinkat(int(dir.Fd()), path, linkBuf) - if err != nil { - return "", &os.PathError{Op: "readlinkat", Path: dir.Name() + "/" + path, Err: err} - } - if n != size { - return string(linkBuf[:n]), nil - } - // Possible truncation, resize the buffer. - size *= 2 - } -} diff --git a/vendor/github.com/cyphar/filepath-securejoin/procfs_linux.go b/vendor/github.com/cyphar/filepath-securejoin/procfs_linux.go deleted file mode 100644 index 809a579c..00000000 --- a/vendor/github.com/cyphar/filepath-securejoin/procfs_linux.go +++ /dev/null @@ -1,452 +0,0 @@ -//go:build linux - -// Copyright (C) 2024 SUSE LLC. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package securejoin - -import ( - "errors" - "fmt" - "os" - "runtime" - "strconv" - - "golang.org/x/sys/unix" -) - -func fstat(f *os.File) (unix.Stat_t, error) { - var stat unix.Stat_t - if err := unix.Fstat(int(f.Fd()), &stat); err != nil { - return stat, &os.PathError{Op: "fstat", Path: f.Name(), Err: err} - } - return stat, nil -} - -func fstatfs(f *os.File) (unix.Statfs_t, error) { - var statfs unix.Statfs_t - if err := unix.Fstatfs(int(f.Fd()), &statfs); err != nil { - return statfs, &os.PathError{Op: "fstatfs", Path: f.Name(), Err: err} - } - return statfs, nil -} - -// The kernel guarantees that the root inode of a procfs mount has an -// f_type of PROC_SUPER_MAGIC and st_ino of PROC_ROOT_INO. -const ( - procSuperMagic = 0x9fa0 // PROC_SUPER_MAGIC - procRootIno = 1 // PROC_ROOT_INO -) - -func verifyProcRoot(procRoot *os.File) error { - if statfs, err := fstatfs(procRoot); err != nil { - return err - } else if statfs.Type != procSuperMagic { - return fmt.Errorf("%w: incorrect procfs root filesystem type 0x%x", errUnsafeProcfs, statfs.Type) - } - if stat, err := fstat(procRoot); err != nil { - return err - } else if stat.Ino != procRootIno { - return fmt.Errorf("%w: incorrect procfs root inode number %d", errUnsafeProcfs, stat.Ino) - } - return nil -} - -var hasNewMountApi = sync_OnceValue(func() bool { - // All of the pieces of the new mount API we use (fsopen, fsconfig, - // fsmount, open_tree) were added together in Linux 5.1[1,2], so we can - // just check for one of the syscalls and the others should also be - // available. - // - // Just try to use open_tree(2) to open a file without OPEN_TREE_CLONE. - // This is equivalent to openat(2), but tells us if open_tree is - // available (and thus all of the other basic new mount API syscalls). - // open_tree(2) is most light-weight syscall to test here. - // - // [1]: merge commit 400913252d09 - // [2]: - fd, err := unix.OpenTree(-int(unix.EBADF), "/", unix.OPEN_TREE_CLOEXEC) - if err != nil { - return false - } - _ = unix.Close(fd) - return true -}) - -func fsopen(fsName string, flags int) (*os.File, error) { - // Make sure we always set O_CLOEXEC. - flags |= unix.FSOPEN_CLOEXEC - fd, err := unix.Fsopen(fsName, flags) - if err != nil { - return nil, os.NewSyscallError("fsopen "+fsName, err) - } - return os.NewFile(uintptr(fd), "fscontext:"+fsName), nil -} - -func fsmount(ctx *os.File, flags, mountAttrs int) (*os.File, error) { - // Make sure we always set O_CLOEXEC. - flags |= unix.FSMOUNT_CLOEXEC - fd, err := unix.Fsmount(int(ctx.Fd()), flags, mountAttrs) - if err != nil { - return nil, os.NewSyscallError("fsmount "+ctx.Name(), err) - } - return os.NewFile(uintptr(fd), "fsmount:"+ctx.Name()), nil -} - -func newPrivateProcMount() (*os.File, error) { - procfsCtx, err := fsopen("proc", unix.FSOPEN_CLOEXEC) - if err != nil { - return nil, err - } - defer procfsCtx.Close() - - // Try to configure hidepid=ptraceable,subset=pid if possible, but ignore errors. - _ = unix.FsconfigSetString(int(procfsCtx.Fd()), "hidepid", "ptraceable") - _ = unix.FsconfigSetString(int(procfsCtx.Fd()), "subset", "pid") - - // Get an actual handle. - if err := unix.FsconfigCreate(int(procfsCtx.Fd())); err != nil { - return nil, os.NewSyscallError("fsconfig create procfs", err) - } - return fsmount(procfsCtx, unix.FSMOUNT_CLOEXEC, unix.MS_RDONLY|unix.MS_NODEV|unix.MS_NOEXEC|unix.MS_NOSUID) -} - -func openTree(dir *os.File, path string, flags uint) (*os.File, error) { - dirFd := -int(unix.EBADF) - dirName := "." - if dir != nil { - dirFd = int(dir.Fd()) - dirName = dir.Name() - } - // Make sure we always set O_CLOEXEC. - flags |= unix.OPEN_TREE_CLOEXEC - fd, err := unix.OpenTree(dirFd, path, flags) - if err != nil { - return nil, &os.PathError{Op: "open_tree", Path: path, Err: err} - } - return os.NewFile(uintptr(fd), dirName+"/"+path), nil -} - -func clonePrivateProcMount() (_ *os.File, Err error) { - // Try to make a clone without using AT_RECURSIVE if we can. If this works, - // we can be sure there are no over-mounts and so if the root is valid then - // we're golden. Otherwise, we have to deal with over-mounts. - procfsHandle, err := openTree(nil, "/proc", unix.OPEN_TREE_CLONE) - if err != nil || hookForcePrivateProcRootOpenTreeAtRecursive(procfsHandle) { - procfsHandle, err = openTree(nil, "/proc", unix.OPEN_TREE_CLONE|unix.AT_RECURSIVE) - } - if err != nil { - return nil, fmt.Errorf("creating a detached procfs clone: %w", err) - } - defer func() { - if Err != nil { - _ = procfsHandle.Close() - } - }() - if err := verifyProcRoot(procfsHandle); err != nil { - return nil, err - } - return procfsHandle, nil -} - -func privateProcRoot() (*os.File, error) { - if !hasNewMountApi() || hookForceGetProcRootUnsafe() { - return nil, fmt.Errorf("new mount api: %w", unix.ENOTSUP) - } - // Try to create a new procfs mount from scratch if we can. This ensures we - // can get a procfs mount even if /proc is fake (for whatever reason). - procRoot, err := newPrivateProcMount() - if err != nil || hookForcePrivateProcRootOpenTree(procRoot) { - // Try to clone /proc then... - procRoot, err = clonePrivateProcMount() - } - return procRoot, err -} - -func unsafeHostProcRoot() (_ *os.File, Err error) { - procRoot, err := os.OpenFile("/proc", unix.O_PATH|unix.O_NOFOLLOW|unix.O_DIRECTORY|unix.O_CLOEXEC, 0) - if err != nil { - return nil, err - } - defer func() { - if Err != nil { - _ = procRoot.Close() - } - }() - if err := verifyProcRoot(procRoot); err != nil { - return nil, err - } - return procRoot, nil -} - -func doGetProcRoot() (*os.File, error) { - procRoot, err := privateProcRoot() - if err != nil { - // Fall back to using a /proc handle if making a private mount failed. - // If we have openat2, at least we can avoid some kinds of over-mount - // attacks, but without openat2 there's not much we can do. - procRoot, err = unsafeHostProcRoot() - } - return procRoot, err -} - -var getProcRoot = sync_OnceValues(func() (*os.File, error) { - return doGetProcRoot() -}) - -var hasProcThreadSelf = sync_OnceValue(func() bool { - return unix.Access("/proc/thread-self/", unix.F_OK) == nil -}) - -var errUnsafeProcfs = errors.New("unsafe procfs detected") - -type procThreadSelfCloser func() - -// procThreadSelf returns a handle to /proc/thread-self/ (or an -// equivalent handle on older kernels where /proc/thread-self doesn't exist). -// Once finished with the handle, you must call the returned closer function -// (runtime.UnlockOSThread). You must not pass the returned *os.File to other -// Go threads or use the handle after calling the closer. -// -// This is similar to ProcThreadSelf from runc, but with extra hardening -// applied and using *os.File. -func procThreadSelf(procRoot *os.File, subpath string) (_ *os.File, _ procThreadSelfCloser, Err error) { - // We need to lock our thread until the caller is done with the handle - // because between getting the handle and using it we could get interrupted - // by the Go runtime and hit the case where the underlying thread is - // swapped out and the original thread is killed, resulting in - // pull-your-hair-out-hard-to-debug issues in the caller. - runtime.LockOSThread() - defer func() { - if Err != nil { - runtime.UnlockOSThread() - } - }() - - // Figure out what prefix we want to use. - threadSelf := "thread-self/" - if !hasProcThreadSelf() || hookForceProcSelfTask() { - /// Pre-3.17 kernels don't have /proc/thread-self, so do it manually. - threadSelf = "self/task/" + strconv.Itoa(unix.Gettid()) + "/" - if _, err := fstatatFile(procRoot, threadSelf, unix.AT_SYMLINK_NOFOLLOW); err != nil || hookForceProcSelf() { - // In this case, we running in a pid namespace that doesn't match - // the /proc mount we have. This can happen inside runc. - // - // Unfortunately, there is no nice way to get the correct TID to - // use here because of the age of the kernel, so we have to just - // use /proc/self and hope that it works. - threadSelf = "self/" - } - } - - // Grab the handle. - var ( - handle *os.File - err error - ) - if hasOpenat2() { - // We prefer being able to use RESOLVE_NO_XDEV if we can, to be - // absolutely sure we are operating on a clean /proc handle that - // doesn't have any cheeky overmounts that could trick us (including - // symlink mounts on top of /proc/thread-self). RESOLVE_BENEATH isn't - // strictly needed, but just use it since we have it. - // - // NOTE: /proc/self is technically a magic-link (the contents of the - // symlink are generated dynamically), but it doesn't use - // nd_jump_link() so RESOLVE_NO_MAGICLINKS allows it. - // - // NOTE: We MUST NOT use RESOLVE_IN_ROOT here, as openat2File uses - // procSelfFdReadlink to clean up the returned f.Name() if we use - // RESOLVE_IN_ROOT (which would lead to an infinite recursion). - handle, err = openat2File(procRoot, threadSelf+subpath, &unix.OpenHow{ - Flags: unix.O_PATH | unix.O_NOFOLLOW | unix.O_CLOEXEC, - Resolve: unix.RESOLVE_BENEATH | unix.RESOLVE_NO_XDEV | unix.RESOLVE_NO_MAGICLINKS, - }) - if err != nil { - // TODO: Once we bump the minimum Go version to 1.20, we can use - // multiple %w verbs for this wrapping. For now we need to use a - // compatibility shim for older Go versions. - //err = fmt.Errorf("%w: %w", errUnsafeProcfs, err) - return nil, nil, wrapBaseError(err, errUnsafeProcfs) - } - } else { - handle, err = openatFile(procRoot, threadSelf+subpath, unix.O_PATH|unix.O_NOFOLLOW|unix.O_CLOEXEC, 0) - if err != nil { - // TODO: Once we bump the minimum Go version to 1.20, we can use - // multiple %w verbs for this wrapping. For now we need to use a - // compatibility shim for older Go versions. - //err = fmt.Errorf("%w: %w", errUnsafeProcfs, err) - return nil, nil, wrapBaseError(err, errUnsafeProcfs) - } - defer func() { - if Err != nil { - _ = handle.Close() - } - }() - // We can't detect bind-mounts of different parts of procfs on top of - // /proc (a-la RESOLVE_NO_XDEV), but we can at least be sure that we - // aren't on the wrong filesystem here. - if statfs, err := fstatfs(handle); err != nil { - return nil, nil, err - } else if statfs.Type != procSuperMagic { - return nil, nil, fmt.Errorf("%w: incorrect /proc/self/fd filesystem type 0x%x", errUnsafeProcfs, statfs.Type) - } - } - return handle, runtime.UnlockOSThread, nil -} - -// STATX_MNT_ID_UNIQUE is provided in golang.org/x/sys@v0.20.0, but in order to -// avoid bumping the requirement for a single constant we can just define it -// ourselves. -const STATX_MNT_ID_UNIQUE = 0x4000 - -var hasStatxMountId = sync_OnceValue(func() bool { - var ( - stx unix.Statx_t - // We don't care which mount ID we get. The kernel will give us the - // unique one if it is supported. - wantStxMask uint32 = STATX_MNT_ID_UNIQUE | unix.STATX_MNT_ID - ) - err := unix.Statx(-int(unix.EBADF), "/", 0, int(wantStxMask), &stx) - return err == nil && stx.Mask&wantStxMask != 0 -}) - -func getMountId(dir *os.File, path string) (uint64, error) { - // If we don't have statx(STATX_MNT_ID*) support, we can't do anything. - if !hasStatxMountId() { - return 0, nil - } - - var ( - stx unix.Statx_t - // We don't care which mount ID we get. The kernel will give us the - // unique one if it is supported. - wantStxMask uint32 = STATX_MNT_ID_UNIQUE | unix.STATX_MNT_ID - ) - - err := unix.Statx(int(dir.Fd()), path, unix.AT_EMPTY_PATH|unix.AT_SYMLINK_NOFOLLOW, int(wantStxMask), &stx) - if stx.Mask&wantStxMask == 0 { - // It's not a kernel limitation, for some reason we couldn't get a - // mount ID. Assume it's some kind of attack. - err = fmt.Errorf("%w: could not get mount id", errUnsafeProcfs) - } - if err != nil { - return 0, &os.PathError{Op: "statx(STATX_MNT_ID_...)", Path: dir.Name() + "/" + path, Err: err} - } - return stx.Mnt_id, nil -} - -func checkSymlinkOvermount(procRoot *os.File, dir *os.File, path string) error { - // Get the mntId of our procfs handle. - expectedMountId, err := getMountId(procRoot, "") - if err != nil { - return err - } - // Get the mntId of the target magic-link. - gotMountId, err := getMountId(dir, path) - if err != nil { - return err - } - // As long as the directory mount is alive, even with wrapping mount IDs, - // we would expect to see a different mount ID here. (Of course, if we're - // using unsafeHostProcRoot() then an attaker could change this after we - // did this check.) - if expectedMountId != gotMountId { - return fmt.Errorf("%w: symlink %s/%s has an overmount obscuring the real link (mount ids do not match %d != %d)", errUnsafeProcfs, dir.Name(), path, expectedMountId, gotMountId) - } - return nil -} - -func doRawProcSelfFdReadlink(procRoot *os.File, fd int) (string, error) { - fdPath := fmt.Sprintf("fd/%d", fd) - procFdLink, closer, err := procThreadSelf(procRoot, fdPath) - if err != nil { - return "", fmt.Errorf("get safe /proc/thread-self/%s handle: %w", fdPath, err) - } - defer procFdLink.Close() - defer closer() - - // Try to detect if there is a mount on top of the magic-link. Since we use the handle directly - // provide to the closure. If the closure uses the handle directly, this - // should be safe in general (a mount on top of the path afterwards would - // not affect the handle itself) and will definitely be safe if we are - // using privateProcRoot() (at least since Linux 5.12[1], when anonymous - // mount namespaces were completely isolated from external mounts including - // mount propagation events). - // - // [1]: Linux commit ee2e3f50629f ("mount: fix mounting of detached mounts - // onto targets that reside on shared mounts"). - if err := checkSymlinkOvermount(procRoot, procFdLink, ""); err != nil { - return "", fmt.Errorf("check safety of /proc/thread-self/fd/%d magiclink: %w", fd, err) - } - - // readlinkat implies AT_EMPTY_PATH since Linux 2.6.39. See Linux commit - // 65cfc6722361 ("readlinkat(), fchownat() and fstatat() with empty - // relative pathnames"). - return readlinkatFile(procFdLink, "") -} - -func rawProcSelfFdReadlink(fd int) (string, error) { - procRoot, err := getProcRoot() - if err != nil { - return "", err - } - return doRawProcSelfFdReadlink(procRoot, fd) -} - -func procSelfFdReadlink(f *os.File) (string, error) { - return rawProcSelfFdReadlink(int(f.Fd())) -} - -var ( - errPossibleBreakout = errors.New("possible breakout detected") - errInvalidDirectory = errors.New("wandered into deleted directory") - errDeletedInode = errors.New("cannot verify path of deleted inode") -) - -func isDeadInode(file *os.File) error { - // If the nlink of a file drops to 0, there is an attacker deleting - // directories during our walk, which could result in weird /proc values. - // It's better to error out in this case. - stat, err := fstat(file) - if err != nil { - return fmt.Errorf("check for dead inode: %w", err) - } - if stat.Nlink == 0 { - err := errDeletedInode - if stat.Mode&unix.S_IFMT == unix.S_IFDIR { - err = errInvalidDirectory - } - return fmt.Errorf("%w %q", err, file.Name()) - } - return nil -} - -func checkProcSelfFdPath(path string, file *os.File) error { - if err := isDeadInode(file); err != nil { - return err - } - actualPath, err := procSelfFdReadlink(file) - if err != nil { - return fmt.Errorf("get path of handle: %w", err) - } - if actualPath != path { - return fmt.Errorf("%w: handle path %q doesn't match expected path %q", errPossibleBreakout, actualPath, path) - } - return nil -} - -// Test hooks used in the procfs tests to verify that the fallback logic works. -// See testing_mocks_linux_test.go and procfs_linux_test.go for more details. -var ( - hookForcePrivateProcRootOpenTree = hookDummyFile - hookForcePrivateProcRootOpenTreeAtRecursive = hookDummyFile - hookForceGetProcRootUnsafe = hookDummy - - hookForceProcSelfTask = hookDummy - hookForceProcSelf = hookDummy -) - -func hookDummy() bool { return false } -func hookDummyFile(_ *os.File) bool { return false } diff --git a/vendor/github.com/cyphar/filepath-securejoin/vfs.go b/vendor/github.com/cyphar/filepath-securejoin/vfs.go index 36373f8c..4d89a481 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/vfs.go +++ b/vendor/github.com/cyphar/filepath-securejoin/vfs.go @@ -1,3 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause + // Copyright (C) 2017-2024 SUSE LLC. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/mark3labs/mcp-go/client/client.go b/vendor/github.com/mark3labs/mcp-go/client/client.go index 929785cd..1d75218f 100644 --- a/vendor/github.com/mark3labs/mcp-go/client/client.go +++ b/vendor/github.com/mark3labs/mcp-go/client/client.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "fmt" + "net/http" "slices" "sync" "sync/atomic" @@ -24,6 +25,7 @@ type Client struct { serverCapabilities mcp.ServerCapabilities protocolVersion string samplingHandler SamplingHandler + rootsHandler RootsHandler elicitationHandler ElicitationHandler } @@ -44,6 +46,15 @@ func WithSamplingHandler(handler SamplingHandler) ClientOption { } } +// WithRootsHandler sets the roots handler for the client. +// WithRootsHandler returns a ClientOption that sets the client's RootsHandler. +// When provided, the client will declare the roots capability (ListChanged) during initialization. +func WithRootsHandler(handler RootsHandler) ClientOption { + return func(c *Client) { + c.rootsHandler = handler + } +} + // WithElicitationHandler sets the elicitation handler for the client. // When set, the client will declare elicitation capability during initialization. func WithElicitationHandler(handler ElicitationHandler) ClientOption { @@ -140,6 +151,7 @@ func (c *Client) sendRequest( ctx context.Context, method string, params any, + header http.Header, ) (*json.RawMessage, error) { if !c.initialized && method != "initialize" { return nil, fmt.Errorf("client not initialized") @@ -152,6 +164,7 @@ func (c *Client) sendRequest( ID: mcp.NewRequestId(id), Method: method, Params: params, + Header: header, } response, err := c.transport.SendRequest(ctx, request) @@ -177,6 +190,13 @@ func (c *Client) Initialize( if c.samplingHandler != nil { capabilities.Sampling = &struct{}{} } + if c.rootsHandler != nil { + capabilities.Roots = &struct { + ListChanged bool `json:"listChanged,omitempty"` + }{ + ListChanged: true, + } + } // Add elicitation capability if handler is configured if c.elicitationHandler != nil { capabilities.Elicitation = &struct{}{} @@ -193,7 +213,7 @@ func (c *Client) Initialize( Capabilities: capabilities, } - response, err := c.sendRequest(ctx, "initialize", params) + response, err := c.sendRequest(ctx, "initialize", params, request.Header) if err != nil { return nil, err } @@ -238,7 +258,7 @@ func (c *Client) Initialize( } func (c *Client) Ping(ctx context.Context) error { - _, err := c.sendRequest(ctx, "ping", nil) + _, err := c.sendRequest(ctx, "ping", nil, nil) return err } @@ -319,7 +339,7 @@ func (c *Client) ReadResource( ctx context.Context, request mcp.ReadResourceRequest, ) (*mcp.ReadResourceResult, error) { - response, err := c.sendRequest(ctx, "resources/read", request.Params) + response, err := c.sendRequest(ctx, "resources/read", request.Params, request.Header) if err != nil { return nil, err } @@ -331,7 +351,7 @@ func (c *Client) Subscribe( ctx context.Context, request mcp.SubscribeRequest, ) error { - _, err := c.sendRequest(ctx, "resources/subscribe", request.Params) + _, err := c.sendRequest(ctx, "resources/subscribe", request.Params, request.Header) return err } @@ -339,7 +359,7 @@ func (c *Client) Unsubscribe( ctx context.Context, request mcp.UnsubscribeRequest, ) error { - _, err := c.sendRequest(ctx, "resources/unsubscribe", request.Params) + _, err := c.sendRequest(ctx, "resources/unsubscribe", request.Params, request.Header) return err } @@ -383,7 +403,7 @@ func (c *Client) GetPrompt( ctx context.Context, request mcp.GetPromptRequest, ) (*mcp.GetPromptResult, error) { - response, err := c.sendRequest(ctx, "prompts/get", request.Params) + response, err := c.sendRequest(ctx, "prompts/get", request.Params, request.Header) if err != nil { return nil, err } @@ -431,7 +451,7 @@ func (c *Client) CallTool( ctx context.Context, request mcp.CallToolRequest, ) (*mcp.CallToolResult, error) { - response, err := c.sendRequest(ctx, "tools/call", request.Params) + response, err := c.sendRequest(ctx, "tools/call", request.Params, request.Header) if err != nil { return nil, err } @@ -443,7 +463,7 @@ func (c *Client) SetLevel( ctx context.Context, request mcp.SetLevelRequest, ) error { - _, err := c.sendRequest(ctx, "logging/setLevel", request.Params) + _, err := c.sendRequest(ctx, "logging/setLevel", request.Params, request.Header) return err } @@ -451,7 +471,7 @@ func (c *Client) Complete( ctx context.Context, request mcp.CompleteRequest, ) (*mcp.CompleteResult, error) { - response, err := c.sendRequest(ctx, "completion/complete", request.Params) + response, err := c.sendRequest(ctx, "completion/complete", request.Params, request.Header) if err != nil { return nil, err } @@ -464,6 +484,28 @@ func (c *Client) Complete( return &result, nil } +// RootListChanges sends a roots list-changed notification to the server. +func (c *Client) RootListChanges( + ctx context.Context, +) error { + // Send root list changes notification + notification := mcp.JSONRPCNotification{ + JSONRPC: mcp.JSONRPC_VERSION, + Notification: mcp.Notification{ + Method: mcp.MethodNotificationRootsListChanged, + }, + } + + err := c.transport.SendNotification(ctx, notification) + if err != nil { + return fmt.Errorf( + "failed to send root list change notification: %w", + err, + ) + } + return nil +} + // handleIncomingRequest processes incoming requests from the server. // This is the main entry point for server-to-client requests like sampling and elicitation. func (c *Client) handleIncomingRequest(ctx context.Context, request transport.JSONRPCRequest) (*transport.JSONRPCResponse, error) { @@ -474,6 +516,8 @@ func (c *Client) handleIncomingRequest(ctx context.Context, request transport.JS return c.handleElicitationRequestTransport(ctx, request) case string(mcp.MethodPing): return c.handlePingRequestTransport(ctx, request) + case string(mcp.MethodListRoots): + return c.handleListRootsRequestTransport(ctx, request) default: return nil, fmt.Errorf("unsupported request method: %s", request.Method) } @@ -536,6 +580,37 @@ func (c *Client) handleSamplingRequestTransport(ctx context.Context, request tra return response, nil } +// handleListRootsRequestTransport handles list roots requests at the transport level. +func (c *Client) handleListRootsRequestTransport(ctx context.Context, request transport.JSONRPCRequest) (*transport.JSONRPCResponse, error) { + if c.rootsHandler == nil { + return nil, fmt.Errorf("no roots handler configured") + } + + // Create the MCP request + mcpRequest := mcp.ListRootsRequest{ + Request: mcp.Request{ + Method: string(mcp.MethodListRoots), + }, + } + + // Call the list roots handler + result, err := c.rootsHandler.ListRoots(ctx, mcpRequest) + if err != nil { + return nil, err + } + + // Marshal the result + resultBytes, err := json.Marshal(result) + if err != nil { + return nil, fmt.Errorf("failed to marshal result: %w", err) + } + + // Create the transport response + response := transport.NewJSONRPCResultResponse(request.ID, json.RawMessage(resultBytes)) + + return response, nil +} + // handleElicitationRequestTransport handles elicitation requests at the transport level. func (c *Client) handleElicitationRequestTransport(ctx context.Context, request transport.JSONRPCRequest) (*transport.JSONRPCResponse, error) { if c.elicitationHandler == nil { @@ -591,7 +666,7 @@ func listByPage[T any]( request mcp.PaginatedRequest, method string, ) (*T, error) { - response, err := client.sendRequest(ctx, method, request.Params) + response, err := client.sendRequest(ctx, method, request.Params, nil) if err != nil { return nil, err } diff --git a/vendor/github.com/mark3labs/mcp-go/client/roots.go b/vendor/github.com/mark3labs/mcp-go/client/roots.go new file mode 100644 index 00000000..0a17aaf7 --- /dev/null +++ b/vendor/github.com/mark3labs/mcp-go/client/roots.go @@ -0,0 +1,17 @@ +package client + +import ( + "context" + + "github.com/mark3labs/mcp-go/mcp" +) + +// RootsHandler defines the interface for handling roots requests from servers. +// Clients can implement this interface to provide roots list to servers. +type RootsHandler interface { + // ListRoots handles a list root request from the server and returns the roots list. + // The implementation should: + // 1. Validate input against the requested schema + // 2. Return the appropriate response + ListRoots(ctx context.Context, request mcp.ListRootsRequest) (*mcp.ListRootsResult, error) +} diff --git a/vendor/github.com/mark3labs/mcp-go/client/transport/inprocess.go b/vendor/github.com/mark3labs/mcp-go/client/transport/inprocess.go index 46765426..fe17d97f 100644 --- a/vendor/github.com/mark3labs/mcp-go/client/transport/inprocess.go +++ b/vendor/github.com/mark3labs/mcp-go/client/transport/inprocess.go @@ -14,6 +14,7 @@ type InProcessTransport struct { server *server.MCPServer samplingHandler server.SamplingHandler elicitationHandler server.ElicitationHandler + rootsHandler server.RootsHandler session *server.InProcessSession sessionID string @@ -37,6 +38,12 @@ func WithElicitationHandler(handler server.ElicitationHandler) InProcessOption { } } +func WithRootsHandler(handler server.RootsHandler) InProcessOption { + return func(t *InProcessTransport) { + t.rootsHandler = handler + } +} + func NewInProcessTransport(server *server.MCPServer) *InProcessTransport { return &InProcessTransport{ server: server, @@ -66,8 +73,8 @@ func (c *InProcessTransport) Start(ctx context.Context) error { c.startedMu.Unlock() // Create and register session if we have handlers - if c.samplingHandler != nil || c.elicitationHandler != nil { - c.session = server.NewInProcessSessionWithHandlers(c.sessionID, c.samplingHandler, c.elicitationHandler) + if c.samplingHandler != nil || c.elicitationHandler != nil || c.rootsHandler != nil { + c.session = server.NewInProcessSessionWithHandlers(c.sessionID, c.samplingHandler, c.elicitationHandler, c.rootsHandler) if err := c.server.RegisterSession(ctx, c.session); err != nil { c.startedMu.Lock() c.started = false diff --git a/vendor/github.com/mark3labs/mcp-go/client/transport/interface.go b/vendor/github.com/mark3labs/mcp-go/client/transport/interface.go index b00210e5..e35a5f31 100644 --- a/vendor/github.com/mark3labs/mcp-go/client/transport/interface.go +++ b/vendor/github.com/mark3labs/mcp-go/client/transport/interface.go @@ -3,6 +3,7 @@ package transport import ( "context" "encoding/json" + "net/http" "github.com/mark3labs/mcp-go/mcp" ) @@ -59,6 +60,7 @@ type JSONRPCRequest struct { ID mcp.RequestId `json:"id"` Method string `json:"method"` Params any `json:"params,omitempty"` + Header http.Header `json:"-"` } // JSONRPCResponse represents a JSON-RPC 2.0 response message. diff --git a/vendor/github.com/mark3labs/mcp-go/client/transport/sse.go b/vendor/github.com/mark3labs/mcp-go/client/transport/sse.go index 3cc7e98e..b85fb6ca 100644 --- a/vendor/github.com/mark3labs/mcp-go/client/transport/sse.go +++ b/vendor/github.com/mark3labs/mcp-go/client/transport/sse.go @@ -363,6 +363,12 @@ func (c *SSE) SendRequest( req.Header.Set(k, v) } + for k, v := range request.Header { + if _, ok := req.Header[k]; !ok { + req.Header[k] = v + } + } + // Add OAuth authorization if configured if c.oauthHandler != nil { authHeader, err := c.oauthHandler.GetAuthorizationHeader(ctx) diff --git a/vendor/github.com/mark3labs/mcp-go/client/transport/streamable_http.go b/vendor/github.com/mark3labs/mcp-go/client/transport/streamable_http.go index 000237ce..043c1e45 100644 --- a/vendor/github.com/mark3labs/mcp-go/client/transport/streamable_http.go +++ b/vendor/github.com/mark3labs/mcp-go/client/transport/streamable_http.go @@ -265,7 +265,7 @@ func (c *StreamableHTTP) SendRequest( ctx, cancel := c.contextAwareOfClientClose(ctx) defer cancel() - resp, err := c.sendHTTP(ctx, http.MethodPost, bytes.NewReader(requestBody), "application/json, text/event-stream") + resp, err := c.sendHTTP(ctx, http.MethodPost, bytes.NewReader(requestBody), "application/json, text/event-stream", request.Header) if err != nil { if errors.Is(err, ErrSessionTerminated) && request.Method == string(mcp.MethodInitialize) { // If the request is initialize, should not return a SessionTerminated error @@ -346,6 +346,7 @@ func (c *StreamableHTTP) sendHTTP( method string, body io.Reader, acceptType string, + header http.Header, ) (resp *http.Response, err error) { // Create HTTP request req, err := http.NewRequestWithContext(ctx, method, c.serverURL.String(), body) @@ -353,6 +354,11 @@ func (c *StreamableHTTP) sendHTTP( return nil, fmt.Errorf("failed to create request: %w", err) } + // request headers + if header != nil { + req.Header = header + } + // Set headers req.Header.Set("Content-Type", "application/json") req.Header.Set("Accept", acceptType) @@ -375,7 +381,7 @@ func (c *StreamableHTTP) sendHTTP( authHeader, err := c.oauthHandler.GetAuthorizationHeader(ctx) if err != nil { // If we get an authorization error, return a specific error that can be handled by the client - if err.Error() == "no valid token available, authorization required" { + if errors.Is(err, ErrOAuthAuthorizationRequired) { return nil, &OAuthAuthorizationRequiredError{ Handler: c.oauthHandler, } @@ -546,7 +552,7 @@ func (c *StreamableHTTP) SendNotification(ctx context.Context, notification mcp. ctx, cancel := c.contextAwareOfClientClose(ctx) defer cancel() - resp, err := c.sendHTTP(ctx, http.MethodPost, bytes.NewReader(requestBody), "application/json, text/event-stream") + resp, err := c.sendHTTP(ctx, http.MethodPost, bytes.NewReader(requestBody), "application/json, text/event-stream", nil) if err != nil { return fmt.Errorf("failed to send request: %w", err) } @@ -642,7 +648,7 @@ var ( ) func (c *StreamableHTTP) createGETConnectionToServer(ctx context.Context) error { - resp, err := c.sendHTTP(ctx, http.MethodGet, nil, "text/event-stream") + resp, err := c.sendHTTP(ctx, http.MethodGet, nil, "text/event-stream", nil) if err != nil { return fmt.Errorf("failed to send request: %w", err) } @@ -757,7 +763,7 @@ func (c *StreamableHTTP) sendResponseToServer(ctx context.Context, response *JSO ctx, cancel := c.contextAwareOfClientClose(ctx) defer cancel() - resp, err := c.sendHTTP(ctx, http.MethodPost, bytes.NewReader(responseBody), "application/json, text/event-stream") + resp, err := c.sendHTTP(ctx, http.MethodPost, bytes.NewReader(responseBody), "application/json, text/event-stream", nil) if err != nil { c.logger.Errorf("failed to send response to server: %v", err) return diff --git a/vendor/github.com/mark3labs/mcp-go/mcp/types.go b/vendor/github.com/mark3labs/mcp-go/mcp/types.go index 0f97821b..6e447c61 100644 --- a/vendor/github.com/mark3labs/mcp-go/mcp/types.go +++ b/vendor/github.com/mark3labs/mcp-go/mcp/types.go @@ -59,6 +59,10 @@ const ( // https://modelcontextprotocol.io/docs/concepts/elicitation MethodElicitationCreate MCPMethod = "elicitation/create" + // MethodListRoots requests roots list from the client during interactions. + // https://modelcontextprotocol.io/specification/2025-06-18/client/roots + MethodListRoots MCPMethod = "roots/list" + // MethodNotificationResourcesListChanged notifies when the list of available resources changes. // https://modelcontextprotocol.io/specification/2025-03-26/server/resources#list-changed-notification MethodNotificationResourcesListChanged = "notifications/resources/list_changed" @@ -70,8 +74,12 @@ const ( MethodNotificationPromptsListChanged = "notifications/prompts/list_changed" // MethodNotificationToolsListChanged notifies when the list of available tools changes. - // https://spec.modelcontextprotocol.io/specification/2024-11-05/server/tools/list_changed/ + // https://modelcontextprotocol.io/specification/2025-06-18/server/tools#list-changed-notification MethodNotificationToolsListChanged = "notifications/tools/list_changed" + + // MethodNotificationRootsListChanged notifies when the list of available roots changes. + // https://modelcontextprotocol.io/specification/2025-06-18/client/roots#root-list-changes + MethodNotificationRootsListChanged = "notifications/roots/list_changed" ) type URITemplate struct { @@ -515,12 +523,15 @@ type ServerCapabilities struct { } `json:"tools,omitempty"` // Present if the server supports elicitation requests to the client. Elicitation *struct{} `json:"elicitation,omitempty"` + // Present if the server supports roots requests to the client. + Roots *struct{} `json:"roots,omitempty"` } // Implementation describes the name and version of an MCP implementation. type Implementation struct { Name string `json:"name"` Version string `json:"version"` + Title string `json:"title,omitempty"` } /* Ping */ @@ -1143,7 +1154,6 @@ type PromptReference struct { // structure or access specific locations that the client has permission to read from. type ListRootsRequest struct { Request - Header http.Header `json:"-"` } // ListRootsResult is the client's response to a roots/list request from the server. diff --git a/vendor/github.com/mark3labs/mcp-go/server/errors.go b/vendor/github.com/mark3labs/mcp-go/server/errors.go index 4668e459..5e65f076 100644 --- a/vendor/github.com/mark3labs/mcp-go/server/errors.go +++ b/vendor/github.com/mark3labs/mcp-go/server/errors.go @@ -13,12 +13,13 @@ var ( ErrToolNotFound = errors.New("tool not found") // Session-related errors - ErrSessionNotFound = errors.New("session not found") - ErrSessionExists = errors.New("session already exists") - ErrSessionNotInitialized = errors.New("session not properly initialized") - ErrSessionDoesNotSupportTools = errors.New("session does not support per-session tools") - ErrSessionDoesNotSupportResources = errors.New("session does not support per-session resources") - ErrSessionDoesNotSupportLogging = errors.New("session does not support setting logging level") + ErrSessionNotFound = errors.New("session not found") + ErrSessionExists = errors.New("session already exists") + ErrSessionNotInitialized = errors.New("session not properly initialized") + ErrSessionDoesNotSupportTools = errors.New("session does not support per-session tools") + ErrSessionDoesNotSupportResources = errors.New("session does not support per-session resources") + ErrSessionDoesNotSupportResourceTemplates = errors.New("session does not support resource templates") + ErrSessionDoesNotSupportLogging = errors.New("session does not support setting logging level") // Notification-related errors ErrNotificationNotInitialized = errors.New("notification channel not initialized") diff --git a/vendor/github.com/mark3labs/mcp-go/server/inprocess_session.go b/vendor/github.com/mark3labs/mcp-go/server/inprocess_session.go index c6fddc60..59ab0f36 100644 --- a/vendor/github.com/mark3labs/mcp-go/server/inprocess_session.go +++ b/vendor/github.com/mark3labs/mcp-go/server/inprocess_session.go @@ -20,6 +20,11 @@ type ElicitationHandler interface { Elicit(ctx context.Context, request mcp.ElicitationRequest) (*mcp.ElicitationResult, error) } +// RootsHandler defines the interface for handling roots list requests from servers. +type RootsHandler interface { + ListRoots(ctx context.Context, request mcp.ListRootsRequest) (*mcp.ListRootsResult, error) +} + type InProcessSession struct { sessionID string notifications chan mcp.JSONRPCNotification @@ -29,6 +34,7 @@ type InProcessSession struct { clientCapabilities atomic.Value samplingHandler SamplingHandler elicitationHandler ElicitationHandler + rootsHandler RootsHandler mu sync.RWMutex } @@ -40,12 +46,13 @@ func NewInProcessSession(sessionID string, samplingHandler SamplingHandler) *InP } } -func NewInProcessSessionWithHandlers(sessionID string, samplingHandler SamplingHandler, elicitationHandler ElicitationHandler) *InProcessSession { +func NewInProcessSessionWithHandlers(sessionID string, samplingHandler SamplingHandler, elicitationHandler ElicitationHandler, rootsHandler RootsHandler) *InProcessSession { return &InProcessSession{ sessionID: sessionID, notifications: make(chan mcp.JSONRPCNotification, 100), samplingHandler: samplingHandler, elicitationHandler: elicitationHandler, + rootsHandler: rootsHandler, } } @@ -128,6 +135,20 @@ func (s *InProcessSession) RequestElicitation(ctx context.Context, request mcp.E return handler.Elicit(ctx, request) } +// ListRoots sends a list roots request to the client and waits for the response. +// Returns an error if no roots handler is available. +func (s *InProcessSession) ListRoots(ctx context.Context, request mcp.ListRootsRequest) (*mcp.ListRootsResult, error) { + s.mu.RLock() + handler := s.rootsHandler + s.mu.RUnlock() + + if handler == nil { + return nil, fmt.Errorf("no roots handler available") + } + + return handler.ListRoots(ctx, request) +} + // GenerateInProcessSessionID generates a unique session ID for inprocess clients func GenerateInProcessSessionID() string { return fmt.Sprintf("inprocess-%d", time.Now().UnixNano()) @@ -140,4 +161,5 @@ var ( _ SessionWithClientInfo = (*InProcessSession)(nil) _ SessionWithSampling = (*InProcessSession)(nil) _ SessionWithElicitation = (*InProcessSession)(nil) + _ SessionWithRoots = (*InProcessSession)(nil) ) diff --git a/vendor/github.com/mark3labs/mcp-go/server/roots.go b/vendor/github.com/mark3labs/mcp-go/server/roots.go new file mode 100644 index 00000000..29e0b94d --- /dev/null +++ b/vendor/github.com/mark3labs/mcp-go/server/roots.go @@ -0,0 +1,32 @@ +package server + +import ( + "context" + "errors" + + "github.com/mark3labs/mcp-go/mcp" +) + +var ( + // ErrNoClientSession is returned when there is no active client session in the context + ErrNoClientSession = errors.New("no active client session") + // ErrRootsNotSupported is returned when the session does not support roots + ErrRootsNotSupported = errors.New("session does not support roots") +) + +// RequestRoots sends an list roots request to the client. +// The client must have declared roots capability during initialization. +// The session must implement SessionWithRoots to support this operation. +func (s *MCPServer) RequestRoots(ctx context.Context, request mcp.ListRootsRequest) (*mcp.ListRootsResult, error) { + session := ClientSessionFromContext(ctx) + if session == nil { + return nil, ErrNoClientSession + } + + // Check if the session supports roots requests + if rootsSession, ok := session.(SessionWithRoots); ok { + return rootsSession.ListRoots(ctx, request) + } + + return nil, ErrRootsNotSupported +} diff --git a/vendor/github.com/mark3labs/mcp-go/server/server.go b/vendor/github.com/mark3labs/mcp-go/server/server.go index f45c0353..d46fc868 100644 --- a/vendor/github.com/mark3labs/mcp-go/server/server.go +++ b/vendor/github.com/mark3labs/mcp-go/server/server.go @@ -183,6 +183,7 @@ type serverCapabilities struct { logging *bool sampling *bool elicitation *bool + roots *bool } // resourceCapabilities defines the supported resource-related features @@ -326,6 +327,13 @@ func WithElicitation() ServerOption { } } +// WithRoots returns a ServerOption that enables the roots capability on the MCPServer +func WithRoots() ServerOption { + return func(s *MCPServer) { + s.capabilities.roots = mcp.ToBoolPtr(true) + } +} + // WithInstructions sets the server instructions for the client returned in the initialize response func WithInstructions(instructions string) ServerOption { return func(s *MCPServer) { @@ -696,6 +704,10 @@ func (s *MCPServer) handleInitialize( capabilities.Elicitation = &struct{}{} } + if s.capabilities.roots != nil && *s.capabilities.roots { + capabilities.Roots = &struct{}{} + } + result := mcp.InitializeResult{ ProtocolVersion: s.protocolVersion(request.Params.ProtocolVersion), ServerInfo: mcp.Implementation{ @@ -880,12 +892,34 @@ func (s *MCPServer) handleListResourceTemplates( id any, request mcp.ListResourceTemplatesRequest, ) (*mcp.ListResourceTemplatesResult, *requestError) { + // Get global templates s.resourcesMu.RLock() - templates := make([]mcp.ResourceTemplate, 0, len(s.resourceTemplates)) - for _, entry := range s.resourceTemplates { - templates = append(templates, entry.template) + templateMap := make(map[string]mcp.ResourceTemplate, len(s.resourceTemplates)) + for uri, entry := range s.resourceTemplates { + templateMap[uri] = entry.template } s.resourcesMu.RUnlock() + + // Check if there are session-specific resource templates + session := ClientSessionFromContext(ctx) + if session != nil { + if sessionWithTemplates, ok := session.(SessionWithResourceTemplates); ok { + if sessionTemplates := sessionWithTemplates.GetSessionResourceTemplates(); sessionTemplates != nil { + // Merge session-specific templates with global templates + // Session templates override global ones + for uriTemplate, serverTemplate := range sessionTemplates { + templateMap[uriTemplate] = serverTemplate.Template + } + } + } + } + + // Convert map to slice for sorting and pagination + templates := make([]mcp.ResourceTemplate, 0, len(templateMap)) + for _, template := range templateMap { + templates = append(templates, template) + } + sort.Slice(templates, func(i, j int) bool { return templates[i].Name < templates[j].Name }) @@ -971,18 +1005,48 @@ func (s *MCPServer) handleReadResource( // If no direct handler found, try matching against templates var matchedHandler ResourceTemplateHandlerFunc var matched bool - for _, entry := range s.resourceTemplates { - template := entry.template - if matchesTemplate(request.Params.URI, template.URITemplate) { - matchedHandler = entry.handler - matched = true - matchedVars := template.URITemplate.Match(request.Params.URI) - // Convert matched variables to a map - request.Params.Arguments = make(map[string]any, len(matchedVars)) - for name, value := range matchedVars { - request.Params.Arguments[name] = value.V + + // First check session templates if available + if session != nil { + if sessionWithTemplates, ok := session.(SessionWithResourceTemplates); ok { + sessionTemplates := sessionWithTemplates.GetSessionResourceTemplates() + for _, serverTemplate := range sessionTemplates { + if serverTemplate.Template.URITemplate == nil { + continue + } + if matchesTemplate(request.Params.URI, serverTemplate.Template.URITemplate) { + matchedHandler = serverTemplate.Handler + matched = true + matchedVars := serverTemplate.Template.URITemplate.Match(request.Params.URI) + // Convert matched variables to a map + request.Params.Arguments = make(map[string]any, len(matchedVars)) + for name, value := range matchedVars { + request.Params.Arguments[name] = value.V + } + break + } + } + } + } + + // If not found in session templates, check global templates + if !matched { + for _, entry := range s.resourceTemplates { + template := entry.template + if template.URITemplate == nil { + continue + } + if matchesTemplate(request.Params.URI, template.URITemplate) { + matchedHandler = entry.handler + matched = true + matchedVars := template.URITemplate.Match(request.Params.URI) + // Convert matched variables to a map + request.Params.Arguments = make(map[string]any, len(matchedVars)) + for name, value := range matchedVars { + request.Params.Arguments[name] = value.V + } + break } - break } } s.resourcesMu.RUnlock() diff --git a/vendor/github.com/mark3labs/mcp-go/server/session.go b/vendor/github.com/mark3labs/mcp-go/server/session.go index 99d6db8d..0ded99fb 100644 --- a/vendor/github.com/mark3labs/mcp-go/server/session.go +++ b/vendor/github.com/mark3labs/mcp-go/server/session.go @@ -51,6 +51,17 @@ type SessionWithResources interface { SetSessionResources(resources map[string]ServerResource) } +// SessionWithResourceTemplates is an extension of ClientSession that can store session-specific resource template data +type SessionWithResourceTemplates interface { + ClientSession + // GetSessionResourceTemplates returns the resource templates specific to this session, if any + // This method must be thread-safe for concurrent access + GetSessionResourceTemplates() map[string]ServerResourceTemplate + // SetSessionResourceTemplates sets resource templates specific to this session + // This method must be thread-safe for concurrent access + SetSessionResourceTemplates(templates map[string]ServerResourceTemplate) +} + // SessionWithClientInfo is an extension of ClientSession that can store client info type SessionWithClientInfo interface { ClientSession @@ -71,6 +82,13 @@ type SessionWithElicitation interface { RequestElicitation(ctx context.Context, request mcp.ElicitationRequest) (*mcp.ElicitationResult, error) } +// SessionWithRoots is an extension of ClientSession that can send list roots requests +type SessionWithRoots interface { + ClientSession + // ListRoots sends an list roots request to the client and waits for response + ListRoots(ctx context.Context, request mcp.ListRootsRequest) (*mcp.ListRootsResult, error) +} + // SessionWithStreamableHTTPConfig extends ClientSession to support streamable HTTP transport configurations type SessionWithStreamableHTTPConfig interface { ClientSession @@ -613,3 +631,137 @@ func (s *MCPServer) DeleteSessionResources(sessionID string, uris ...string) err return nil } + +// AddSessionResourceTemplate adds a resource template for a specific session +func (s *MCPServer) AddSessionResourceTemplate(sessionID string, template mcp.ResourceTemplate, handler ResourceTemplateHandlerFunc) error { + return s.AddSessionResourceTemplates(sessionID, ServerResourceTemplate{ + Template: template, + Handler: handler, + }) +} + +// AddSessionResourceTemplates adds resource templates for a specific session +func (s *MCPServer) AddSessionResourceTemplates(sessionID string, templates ...ServerResourceTemplate) error { + sessionValue, ok := s.sessions.Load(sessionID) + if !ok { + return ErrSessionNotFound + } + + session, ok := sessionValue.(SessionWithResourceTemplates) + if !ok { + return ErrSessionDoesNotSupportResourceTemplates + } + + // For session resource templates, enable listChanged by default + // This is the same behavior as session resources + s.implicitlyRegisterCapabilities( + func() bool { return s.capabilities.resources != nil }, + func() { s.capabilities.resources = &resourceCapabilities{listChanged: true} }, + ) + + // Get existing templates (this returns a thread-safe copy) + sessionTemplates := session.GetSessionResourceTemplates() + + // Create a new map to avoid modifying the returned copy + newTemplates := make(map[string]ServerResourceTemplate, len(sessionTemplates)+len(templates)) + + // Copy existing templates + for k, v := range sessionTemplates { + newTemplates[k] = v + } + + // Validate and add new templates + for _, t := range templates { + if t.Template.URITemplate == nil { + return fmt.Errorf("resource template URITemplate cannot be nil") + } + raw := t.Template.URITemplate.Raw() + if raw == "" { + return fmt.Errorf("resource template URITemplate cannot be empty") + } + if t.Template.Name == "" { + return fmt.Errorf("resource template name cannot be empty") + } + newTemplates[raw] = t + } + + // Set the new templates (this method must handle thread-safety) + session.SetSessionResourceTemplates(newTemplates) + + // Send notification if the session is initialized and listChanged is enabled + if session.Initialized() && s.capabilities.resources != nil && s.capabilities.resources.listChanged { + if err := s.SendNotificationToSpecificClient(sessionID, "notifications/resources/list_changed", nil); err != nil { + // Log the error but don't fail the operation + if s.hooks != nil && len(s.hooks.OnError) > 0 { + hooks := s.hooks + go func(sID string, hooks *Hooks) { + ctx := context.Background() + hooks.onError(ctx, nil, "notification", map[string]any{ + "method": "notifications/resources/list_changed", + "sessionID": sID, + }, fmt.Errorf("failed to send notification after adding resource templates: %w", err)) + }(sessionID, hooks) + } + } + } + + return nil +} + +// DeleteSessionResourceTemplates removes resource templates from a specific session +func (s *MCPServer) DeleteSessionResourceTemplates(sessionID string, uriTemplates ...string) error { + sessionValue, ok := s.sessions.Load(sessionID) + if !ok { + return ErrSessionNotFound + } + + session, ok := sessionValue.(SessionWithResourceTemplates) + if !ok { + return ErrSessionDoesNotSupportResourceTemplates + } + + // Get existing templates (this returns a thread-safe copy) + sessionTemplates := session.GetSessionResourceTemplates() + + // Track if any were actually deleted + deletedAny := false + + // Create a new map without the deleted templates + newTemplates := make(map[string]ServerResourceTemplate, len(sessionTemplates)) + for k, v := range sessionTemplates { + newTemplates[k] = v + } + + // Delete specified templates + for _, uriTemplate := range uriTemplates { + if _, exists := newTemplates[uriTemplate]; exists { + delete(newTemplates, uriTemplate) + deletedAny = true + } + } + + // Only update if something was actually deleted + if deletedAny { + // Set the new templates (this method must handle thread-safety) + session.SetSessionResourceTemplates(newTemplates) + + // Send notification if the session is initialized and listChanged is enabled + if session.Initialized() && s.capabilities.resources != nil && s.capabilities.resources.listChanged { + if err := s.SendNotificationToSpecificClient(sessionID, "notifications/resources/list_changed", nil); err != nil { + // Log the error but don't fail the operation + if s.hooks != nil && len(s.hooks.OnError) > 0 { + hooks := s.hooks + go func(sID string, hooks *Hooks) { + ctx := context.Background() + hooks.onError(ctx, nil, "notification", map[string]any{ + "method": "notifications/resources/list_changed", + "sessionID": sID, + }, fmt.Errorf("failed to send notification after deleting resource templates: %w", err)) + }(sessionID, hooks) + } + } + } + } + + return nil +} diff --git a/vendor/github.com/mark3labs/mcp-go/server/sse.go b/vendor/github.com/mark3labs/mcp-go/server/sse.go index 250141ce..97c765cc 100644 --- a/vendor/github.com/mark3labs/mcp-go/server/sse.go +++ b/vendor/github.com/mark3labs/mcp-go/server/sse.go @@ -30,6 +30,7 @@ type sseSession struct { loggingLevel atomic.Value tools sync.Map // stores session-specific tools resources sync.Map // stores session-specific resources + resourceTemplates sync.Map // stores session-specific resource templates clientInfo atomic.Value // stores session-specific client info clientCapabilities atomic.Value // stores session-specific client capabilities } @@ -97,6 +98,27 @@ func (s *sseSession) SetSessionResources(resources map[string]ServerResource) { } } +func (s *sseSession) GetSessionResourceTemplates() map[string]ServerResourceTemplate { + templates := make(map[string]ServerResourceTemplate) + s.resourceTemplates.Range(func(key, value any) bool { + if template, ok := value.(ServerResourceTemplate); ok { + templates[key.(string)] = template + } + return true + }) + return templates +} + +func (s *sseSession) SetSessionResourceTemplates(templates map[string]ServerResourceTemplate) { + // Clear existing templates + s.resourceTemplates.Clear() + + // Set new templates + for uriTemplate, template := range templates { + s.resourceTemplates.Store(uriTemplate, template) + } +} + func (s *sseSession) GetSessionTools() map[string]ServerTool { tools := make(map[string]ServerTool) s.tools.Range(func(key, value any) bool { @@ -145,11 +167,12 @@ func (s *sseSession) GetClientCapabilities() mcp.ClientCapabilities { } var ( - _ ClientSession = (*sseSession)(nil) - _ SessionWithTools = (*sseSession)(nil) - _ SessionWithResources = (*sseSession)(nil) - _ SessionWithLogging = (*sseSession)(nil) - _ SessionWithClientInfo = (*sseSession)(nil) + _ ClientSession = (*sseSession)(nil) + _ SessionWithTools = (*sseSession)(nil) + _ SessionWithResources = (*sseSession)(nil) + _ SessionWithResourceTemplates = (*sseSession)(nil) + _ SessionWithLogging = (*sseSession)(nil) + _ SessionWithClientInfo = (*sseSession)(nil) ) // SSEServer implements a Server-Sent Events (SSE) based MCP server. diff --git a/vendor/github.com/mark3labs/mcp-go/server/stdio.go b/vendor/github.com/mark3labs/mcp-go/server/stdio.go index 80131f06..f5c8ddfd 100644 --- a/vendor/github.com/mark3labs/mcp-go/server/stdio.go +++ b/vendor/github.com/mark3labs/mcp-go/server/stdio.go @@ -102,6 +102,7 @@ type stdioSession struct { mu sync.RWMutex // protects writer pendingRequests map[int64]chan *samplingResponse // for tracking pending sampling requests pendingElicitations map[int64]chan *elicitationResponse // for tracking pending elicitation requests + pendingRoots map[int64]chan *rootsResponse // for tracking pending list roots requests pendingMu sync.RWMutex // protects pendingRequests and pendingElicitations } @@ -117,6 +118,12 @@ type elicitationResponse struct { err error } +// rootsResponse represents a response to an list root request +type rootsResponse struct { + result *mcp.ListRootsResult + err error +} + func (s *stdioSession) SessionID() string { return "stdio" } @@ -236,6 +243,67 @@ func (s *stdioSession) RequestSampling(ctx context.Context, request mcp.CreateMe } } +// ListRoots sends an list roots request to the client and waits for the response. +func (s *stdioSession) ListRoots(ctx context.Context, request mcp.ListRootsRequest) (*mcp.ListRootsResult, error) { + s.mu.RLock() + writer := s.writer + s.mu.RUnlock() + + if writer == nil { + return nil, fmt.Errorf("no writer available for sending requests") + } + + // Generate a unique request ID + id := s.requestID.Add(1) + + // Create a response channel for this request + responseChan := make(chan *rootsResponse, 1) + s.pendingMu.Lock() + s.pendingRoots[id] = responseChan + s.pendingMu.Unlock() + + // Cleanup function to remove the pending request + cleanup := func() { + s.pendingMu.Lock() + delete(s.pendingRoots, id) + s.pendingMu.Unlock() + } + defer cleanup() + + // Create the JSON-RPC request + jsonRPCRequest := struct { + JSONRPC string `json:"jsonrpc"` + ID int64 `json:"id"` + Method string `json:"method"` + }{ + JSONRPC: mcp.JSONRPC_VERSION, + ID: id, + Method: string(mcp.MethodListRoots), + } + + // Marshal and send the request + requestBytes, err := json.Marshal(jsonRPCRequest) + if err != nil { + return nil, fmt.Errorf("failed to marshal list roots request: %w", err) + } + requestBytes = append(requestBytes, '\n') + + if _, err := writer.Write(requestBytes); err != nil { + return nil, fmt.Errorf("failed to write list roots request: %w", err) + } + + // Wait for the response or context cancellation + select { + case <-ctx.Done(): + return nil, ctx.Err() + case response := <-responseChan: + if response.err != nil { + return nil, response.err + } + return response.result, nil + } +} + // RequestElicitation sends an elicitation request to the client and waits for the response. func (s *stdioSession) RequestElicitation(ctx context.Context, request mcp.ElicitationRequest) (*mcp.ElicitationResult, error) { s.mu.RLock() @@ -312,12 +380,14 @@ var ( _ SessionWithClientInfo = (*stdioSession)(nil) _ SessionWithSampling = (*stdioSession)(nil) _ SessionWithElicitation = (*stdioSession)(nil) + _ SessionWithRoots = (*stdioSession)(nil) ) var stdioSessionInstance = stdioSession{ notifications: make(chan mcp.JSONRPCNotification, 100), pendingRequests: make(map[int64]chan *samplingResponse), pendingElicitations: make(map[int64]chan *elicitationResponse), + pendingRoots: make(map[int64]chan *rootsResponse), } // NewStdioServer creates a new stdio server wrapper around an MCPServer. @@ -522,6 +592,11 @@ func (s *StdioServer) processMessage( return nil } + // Check if this is a response to an list roots request + if s.handleListRootsResponse(rawMessage) { + return nil + } + // Check if this is a tool call that might need sampling (and thus should be processed concurrently) var baseMessage struct { Method string `json:"method"` @@ -692,6 +767,67 @@ func (s *stdioSession) handleElicitationResponse(rawMessage json.RawMessage) boo return true } +// handleListRootsResponse checks if the message is a response to an list roots request +// and routes it to the appropriate pending request channel. +func (s *StdioServer) handleListRootsResponse(rawMessage json.RawMessage) bool { + return stdioSessionInstance.handleListRootsResponse(rawMessage) +} + +// handleListRootsResponse handles incoming list root responses for this session +func (s *stdioSession) handleListRootsResponse(rawMessage json.RawMessage) bool { + // Try to parse as a JSON-RPC response + var response struct { + JSONRPC string `json:"jsonrpc"` + ID json.Number `json:"id"` + Result json.RawMessage `json:"result,omitempty"` + Error *struct { + Code int `json:"code"` + Message string `json:"message"` + } `json:"error,omitempty"` + } + + if err := json.Unmarshal(rawMessage, &response); err != nil { + return false + } + // Parse the ID as int64 + id, err := response.ID.Int64() + if err != nil || (response.Result == nil && response.Error == nil) { + return false + } + + // Check if we have a pending list root request with this ID + s.pendingMu.RLock() + responseChan, exists := s.pendingRoots[id] + s.pendingMu.RUnlock() + + if !exists { + return false + } + + // Parse and send the response + rootsResp := &rootsResponse{} + + if response.Error != nil { + rootsResp.err = fmt.Errorf("list root request failed: %s", response.Error.Message) + } else { + var result mcp.ListRootsResult + if err := json.Unmarshal(response.Result, &result); err != nil { + rootsResp.err = fmt.Errorf("failed to unmarshal list root response: %w", err) + } else { + rootsResp.result = &result + } + } + + // Send the response (non-blocking) + select { + case responseChan <- rootsResp: + default: + // Channel is full or closed, ignore + } + + return true +} + // writeResponse marshals and writes a JSON-RPC response message followed by a newline. // Returns an error if marshaling or writing fails. func (s *StdioServer) writeResponse( diff --git a/vendor/github.com/mark3labs/mcp-go/server/streamable_http.go b/vendor/github.com/mark3labs/mcp-go/server/streamable_http.go index 8af6f147..5a596467 100644 --- a/vendor/github.com/mark3labs/mcp-go/server/streamable_http.go +++ b/vendor/github.com/mark3labs/mcp-go/server/streamable_http.go @@ -41,23 +41,41 @@ func WithEndpointPath(endpointPath string) StreamableHTTPOption { // as a new session. No session id returned to the client. // The default is false. // -// Notice: This is a convenience method. It's identical to set WithSessionIdManager option +// Note: This is a convenience method. It's identical to set WithSessionIdManager option // to StatelessSessionIdManager. func WithStateLess(stateLess bool) StreamableHTTPOption { return func(s *StreamableHTTPServer) { if stateLess { - s.sessionIdManager = &StatelessSessionIdManager{} + s.sessionIdManagerResolver = NewDefaultSessionIdManagerResolver(&StatelessSessionIdManager{}) } } } // WithSessionIdManager sets a custom session id generator for the server. -// By default, the server will use SimpleStatefulSessionIdGenerator, which generates -// session ids with uuid, and it's insecure. -// Notice: it will override the WithStateLess option. +// By default, the server uses InsecureStatefulSessionIdManager (UUID-based; insecure). +// Note: Options are applied in order; the last one wins. If combined with +// WithStateLess or WithSessionIdManagerResolver, whichever is applied last takes effect. func WithSessionIdManager(manager SessionIdManager) StreamableHTTPOption { return func(s *StreamableHTTPServer) { - s.sessionIdManager = manager + if manager == nil { + s.sessionIdManagerResolver = NewDefaultSessionIdManagerResolver(&InsecureStatefulSessionIdManager{}) + return + } + s.sessionIdManagerResolver = NewDefaultSessionIdManagerResolver(manager) + } +} + +// WithSessionIdManagerResolver sets a custom session id manager resolver for the server. +// This allows for request-based session id management strategies. +// Note: Options are applied in order; the last one wins. If combined with +// WithStateLess or WithSessionIdManager, whichever is applied last takes effect. +func WithSessionIdManagerResolver(resolver SessionIdManagerResolver) StreamableHTTPOption { + return func(s *StreamableHTTPServer) { + if resolver == nil { + s.sessionIdManagerResolver = NewDefaultSessionIdManagerResolver(&InsecureStatefulSessionIdManager{}) + return + } + s.sessionIdManagerResolver = resolver } } @@ -140,22 +158,23 @@ func WithTLSCert(certFile, keyFile string) StreamableHTTPOption { // The current implementation does not support the following features from the specification: // - Stream Resumability type StreamableHTTPServer struct { - server *MCPServer - sessionTools *sessionToolsStore - sessionResources *sessionResourcesStore - sessionRequestIDs sync.Map // sessionId --> last requestID(*atomic.Int64) - activeSessions sync.Map // sessionId --> *streamableHttpSession (for sampling responses) + server *MCPServer + sessionTools *sessionToolsStore + sessionResources *sessionResourcesStore + sessionResourceTemplates *sessionResourceTemplatesStore + sessionRequestIDs sync.Map // sessionId --> last requestID(*atomic.Int64) + activeSessions sync.Map // sessionId --> *streamableHttpSession (for sampling responses) httpServer *http.Server mu sync.RWMutex - endpointPath string - contextFunc HTTPContextFunc - sessionIdManager SessionIdManager - listenHeartbeatInterval time.Duration - logger util.Logger - sessionLogLevels *sessionLogLevelsStore - disableStreaming bool + endpointPath string + contextFunc HTTPContextFunc + sessionIdManagerResolver SessionIdManagerResolver + listenHeartbeatInterval time.Duration + logger util.Logger + sessionLogLevels *sessionLogLevelsStore + disableStreaming bool tlsCertFile string tlsKeyFile string @@ -164,13 +183,14 @@ type StreamableHTTPServer struct { // NewStreamableHTTPServer creates a new streamable-http server instance func NewStreamableHTTPServer(server *MCPServer, opts ...StreamableHTTPOption) *StreamableHTTPServer { s := &StreamableHTTPServer{ - server: server, - sessionTools: newSessionToolsStore(), - sessionLogLevels: newSessionLogLevelsStore(), - endpointPath: "/mcp", - sessionIdManager: &InsecureStatefulSessionIdManager{}, - logger: util.DefaultLogger(), - sessionResources: newSessionResourcesStore(), + server: server, + sessionTools: newSessionToolsStore(), + sessionLogLevels: newSessionLogLevelsStore(), + endpointPath: "/mcp", + sessionIdManagerResolver: NewDefaultSessionIdManagerResolver(&InsecureStatefulSessionIdManager{}), + logger: util.DefaultLogger(), + sessionResources: newSessionResourcesStore(), + sessionResourceTemplates: newSessionResourceTemplatesStore(), } // Apply all options @@ -305,14 +325,15 @@ func (s *StreamableHTTPServer) handlePost(w http.ResponseWriter, r *http.Request // The session is ephemeral. Its life is the same as the request. It's only created // for interaction with the mcp server. var sessionID string + sessionIdManager := s.sessionIdManagerResolver.ResolveSessionIdManager(r) if isInitializeRequest { // generate a new one for initialize request - sessionID = s.sessionIdManager.Generate() + sessionID = sessionIdManager.Generate() } else { // Get session ID from header. // Stateful servers need the client to carry the session ID. sessionID = r.Header.Get(HeaderKeySessionID) - isTerminated, err := s.sessionIdManager.Validate(sessionID) + isTerminated, err := sessionIdManager.Validate(sessionID) if err != nil { http.Error(w, "Invalid session ID", http.StatusBadRequest) return @@ -345,7 +366,7 @@ func (s *StreamableHTTPServer) handlePost(w http.ResponseWriter, r *http.Request // Create ephemeral session if no persistent session exists if session == nil { - session = newStreamableHttpSession(sessionID, s.sessionTools, s.sessionResources, s.sessionLogLevels) + session = newStreamableHttpSession(sessionID, s.sessionTools, s.sessionResources, s.sessionResourceTemplates, s.sessionLogLevels) } // Set the client context before handling the message @@ -480,7 +501,7 @@ func (s *StreamableHTTPServer) handleGet(w http.ResponseWriter, r *http.Request) // Get or create session atomically to prevent TOCTOU races // where concurrent GETs could both create and register duplicate sessions var session *streamableHttpSession - newSession := newStreamableHttpSession(sessionID, s.sessionTools, s.sessionResources, s.sessionLogLevels) + newSession := newStreamableHttpSession(sessionID, s.sessionTools, s.sessionResources, s.sessionResourceTemplates, s.sessionLogLevels) actual, loaded := s.activeSessions.LoadOrStore(sessionID, newSession) session = actual.(*streamableHttpSession) @@ -552,6 +573,20 @@ func (s *StreamableHTTPServer) handleGet(w http.ResponseWriter, r *http.Request) case <-done: return } + case rootsReq := <-session.rootsRequestChan: + // Send list roots request to client via SSE + jsonrpcRequest := mcp.JSONRPCRequest{ + JSONRPC: "2.0", + ID: mcp.NewRequestId(rootsReq.requestID), + Request: mcp.Request{ + Method: string(mcp.MethodListRoots), + }, + } + select { + case writeChan <- jsonrpcRequest: + case <-done: + return + } case <-done: return } @@ -609,7 +644,8 @@ func (s *StreamableHTTPServer) handleGet(w http.ResponseWriter, r *http.Request) func (s *StreamableHTTPServer) handleDelete(w http.ResponseWriter, r *http.Request) { // delete request terminate the session sessionID := r.Header.Get(HeaderKeySessionID) - notAllowed, err := s.sessionIdManager.Terminate(sessionID) + sessionIdManager := s.sessionIdManagerResolver.ResolveSessionIdManager(r) + notAllowed, err := sessionIdManager.Terminate(sessionID) if err != nil { http.Error(w, fmt.Sprintf("Session termination failed: %v", err), http.StatusInternalServerError) return @@ -622,6 +658,7 @@ func (s *StreamableHTTPServer) handleDelete(w http.ResponseWriter, r *http.Reque // remove the session relateddata from the sessionToolsStore s.sessionTools.delete(sessionID) s.sessionResources.delete(sessionID) + s.sessionResourceTemplates.delete(sessionID) s.sessionLogLevels.delete(sessionID) // remove current session's requstID information s.sessionRequestIDs.Delete(sessionID) @@ -656,7 +693,8 @@ func (s *StreamableHTTPServer) handleSamplingResponse(w http.ResponseWriter, r * } // Validate session - isTerminated, err := s.sessionIdManager.Validate(sessionID) + sessionIdManager := s.sessionIdManagerResolver.ResolveSessionIdManager(r) + isTerminated, err := sessionIdManager.Validate(sessionID) if err != nil { http.Error(w, "Invalid session ID", http.StatusBadRequest) return err @@ -834,6 +872,39 @@ func (s *sessionResourcesStore) delete(sessionID string) { delete(s.resources, sessionID) } +type sessionResourceTemplatesStore struct { + mu sync.RWMutex + templates map[string]map[string]ServerResourceTemplate // sessionID -> uriTemplate -> template +} + +func newSessionResourceTemplatesStore() *sessionResourceTemplatesStore { + return &sessionResourceTemplatesStore{ + templates: make(map[string]map[string]ServerResourceTemplate), + } +} + +func (s *sessionResourceTemplatesStore) get(sessionID string) map[string]ServerResourceTemplate { + s.mu.RLock() + defer s.mu.RUnlock() + cloned := make(map[string]ServerResourceTemplate, len(s.templates[sessionID])) + maps.Copy(cloned, s.templates[sessionID]) + return cloned +} + +func (s *sessionResourceTemplatesStore) set(sessionID string, templates map[string]ServerResourceTemplate) { + s.mu.Lock() + defer s.mu.Unlock() + cloned := make(map[string]ServerResourceTemplate, len(templates)) + maps.Copy(cloned, templates) + s.templates[sessionID] = cloned +} + +func (s *sessionResourceTemplatesStore) delete(sessionID string) { + s.mu.Lock() + defer s.mu.Unlock() + delete(s.templates, sessionID) +} + type sessionToolsStore struct { mu sync.RWMutex tools map[string]map[string]ServerTool // sessionID -> toolName -> tool @@ -887,6 +958,13 @@ type elicitationRequestItem struct { response chan samplingResponseItem } +// Roots support types for HTTP transport +type rootsRequestItem struct { + requestID int64 + request mcp.ListRootsRequest + response chan samplingResponseItem +} + // streamableHttpSession is a session for streamable-http transport // When in POST handlers(request/notification), it's ephemeral, and only exists in the life of the request handler. // When in GET handlers(listening), it's a real session, and will be registered in the MCP server. @@ -895,26 +973,30 @@ type streamableHttpSession struct { notificationChannel chan mcp.JSONRPCNotification // server -> client notifications tools *sessionToolsStore resources *sessionResourcesStore + resourceTemplates *sessionResourceTemplatesStore upgradeToSSE atomic.Bool logLevels *sessionLogLevelsStore // Sampling support for bidirectional communication samplingRequestChan chan samplingRequestItem // server -> client sampling requests elicitationRequestChan chan elicitationRequestItem // server -> client elicitation requests + rootsRequestChan chan rootsRequestItem // server -> client list roots requests samplingRequests sync.Map // requestID -> pending sampling request context requestIDCounter atomic.Int64 // for generating unique request IDs } -func newStreamableHttpSession(sessionID string, toolStore *sessionToolsStore, resourcesStore *sessionResourcesStore, levels *sessionLogLevelsStore) *streamableHttpSession { +func newStreamableHttpSession(sessionID string, toolStore *sessionToolsStore, resourcesStore *sessionResourcesStore, templatesStore *sessionResourceTemplatesStore, levels *sessionLogLevelsStore) *streamableHttpSession { s := &streamableHttpSession{ sessionID: sessionID, notificationChannel: make(chan mcp.JSONRPCNotification, 100), tools: toolStore, resources: resourcesStore, + resourceTemplates: templatesStore, logLevels: levels, samplingRequestChan: make(chan samplingRequestItem, 10), elicitationRequestChan: make(chan elicitationRequestItem, 10), + rootsRequestChan: make(chan rootsRequestItem, 10), } return s } @@ -963,10 +1045,19 @@ func (s *streamableHttpSession) SetSessionResources(resources map[string]ServerR s.resources.set(s.sessionID, resources) } +func (s *streamableHttpSession) GetSessionResourceTemplates() map[string]ServerResourceTemplate { + return s.resourceTemplates.get(s.sessionID) +} + +func (s *streamableHttpSession) SetSessionResourceTemplates(templates map[string]ServerResourceTemplate) { + s.resourceTemplates.set(s.sessionID, templates) +} + var ( - _ SessionWithTools = (*streamableHttpSession)(nil) - _ SessionWithResources = (*streamableHttpSession)(nil) - _ SessionWithLogging = (*streamableHttpSession)(nil) + _ SessionWithTools = (*streamableHttpSession)(nil) + _ SessionWithResources = (*streamableHttpSession)(nil) + _ SessionWithResourceTemplates = (*streamableHttpSession)(nil) + _ SessionWithLogging = (*streamableHttpSession)(nil) ) func (s *streamableHttpSession) UpgradeToSSEWhenReceiveNotification() { @@ -1031,6 +1122,52 @@ func (s *streamableHttpSession) RequestSampling(ctx context.Context, request mcp } } +// ListRoots implements SessionWithRoots interface for HTTP transport. +// It sends a list roots request to the client via SSE and waits for the response. +func (s *streamableHttpSession) ListRoots(ctx context.Context, request mcp.ListRootsRequest) (*mcp.ListRootsResult, error) { + // Generate unique request ID + requestID := s.requestIDCounter.Add(1) + + // Create response channel for this specific request + responseChan := make(chan samplingResponseItem, 1) + + // Create the roots request item + rootsRequest := rootsRequestItem{ + requestID: requestID, + request: request, + response: responseChan, + } + + // Store the pending request + s.samplingRequests.Store(requestID, responseChan) + defer s.samplingRequests.Delete(requestID) + + // Send the list roots request via the channel (non-blocking) + select { + case s.rootsRequestChan <- rootsRequest: + // Request queued successfully + case <-ctx.Done(): + return nil, ctx.Err() + default: + return nil, fmt.Errorf("list roots request queue is full - server overloaded") + } + + // Wait for response or context cancellation + select { + case response := <-responseChan: + if response.err != nil { + return nil, response.err + } + var result mcp.ListRootsResult + if err := json.Unmarshal(response.result, &result); err != nil { + return nil, fmt.Errorf("failed to unmarshal list roots response: %v", err) + } + return &result, nil + case <-ctx.Done(): + return nil, ctx.Err() + } +} + // RequestElicitation implements SessionWithElicitation interface for HTTP transport func (s *streamableHttpSession) RequestElicitation(ctx context.Context, request mcp.ElicitationRequest) (*mcp.ElicitationResult, error) { // Generate unique request ID @@ -1078,9 +1215,15 @@ func (s *streamableHttpSession) RequestElicitation(ctx context.Context, request var _ SessionWithSampling = (*streamableHttpSession)(nil) var _ SessionWithElicitation = (*streamableHttpSession)(nil) +var _ SessionWithRoots = (*streamableHttpSession)(nil) // --- session id manager --- +// SessionIdManagerResolver resolves a SessionIdManager based on the HTTP request +type SessionIdManagerResolver interface { + ResolveSessionIdManager(r *http.Request) SessionIdManager +} + type SessionIdManager interface { Generate() string // Validate checks if a session ID is valid and not terminated. @@ -1093,6 +1236,24 @@ type SessionIdManager interface { Terminate(sessionID string) (isNotAllowed bool, err error) } +// DefaultSessionIdManagerResolver is a simple resolver that returns the same SessionIdManager for all requests +type DefaultSessionIdManagerResolver struct { + manager SessionIdManager +} + +// NewDefaultSessionIdManagerResolver creates a new DefaultSessionIdManagerResolver with the given SessionIdManager +func NewDefaultSessionIdManagerResolver(manager SessionIdManager) *DefaultSessionIdManagerResolver { + if manager == nil { + manager = &InsecureStatefulSessionIdManager{} + } + return &DefaultSessionIdManagerResolver{manager: manager} +} + +// ResolveSessionIdManager returns the configured SessionIdManager for all requests +func (r *DefaultSessionIdManagerResolver) ResolveSessionIdManager(_ *http.Request) SessionIdManager { + return r.manager +} + // StatelessSessionIdManager does nothing, which means it has no session management, which is stateless. type StatelessSessionIdManager struct{} diff --git a/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go b/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go index fa1a9190..490cb633 100644 --- a/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go +++ b/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go @@ -53,7 +53,7 @@ func (c *Config) hash() crypto.Hash { func (c *Config) encodedCount() uint8 { if c == nil || c.S2KCount == 0 { - return 96 // The common case. Correspoding to 65536 + return 96 // The common case. Corresponding to 65536 } i := c.S2KCount diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go deleted file mode 100644 index d3cb9517..00000000 --- a/vendor/golang.org/x/net/context/context.go +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package context defines the Context type, which carries deadlines, -// cancellation signals, and other request-scoped values across API boundaries -// and between processes. -// As of Go 1.7 this package is available in the standard library under the -// name [context]. -// -// Incoming requests to a server should create a [Context], and outgoing -// calls to servers should accept a Context. The chain of function -// calls between them must propagate the Context, optionally replacing -// it with a derived Context created using [WithCancel], [WithDeadline], -// [WithTimeout], or [WithValue]. -// -// Programs that use Contexts should follow these rules to keep interfaces -// consistent across packages and enable static analysis tools to check context -// propagation: -// -// Do not store Contexts inside a struct type; instead, pass a Context -// explicitly to each function that needs it. This is discussed further in -// https://go.dev/blog/context-and-structs. The Context should be the first -// parameter, typically named ctx: -// -// func DoSomething(ctx context.Context, arg Arg) error { -// // ... use ctx ... -// } -// -// Do not pass a nil [Context], even if a function permits it. Pass [context.TODO] -// if you are unsure about which Context to use. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -// -// The same Context may be passed to functions running in different goroutines; -// Contexts are safe for simultaneous use by multiple goroutines. -// -// See https://go.dev/blog/context for example code for a server that uses -// Contexts. -package context - -import ( - "context" // standard library's context, as of Go 1.7 - "time" -) - -// A Context carries a deadline, a cancellation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -// -//go:fix inline -type Context = context.Context - -// Canceled is the error returned by [Context.Err] when the context is canceled -// for some reason other than its deadline passing. -// -//go:fix inline -var Canceled = context.Canceled - -// DeadlineExceeded is the error returned by [Context.Err] when the context is canceled -// due to its deadline passing. -// -//go:fix inline -var DeadlineExceeded = context.DeadlineExceeded - -// Background returns a non-nil, empty Context. It is never canceled, has no -// values, and has no deadline. It is typically used by the main function, -// initialization, and tests, and as the top-level Context for incoming -// requests. -// -//go:fix inline -func Background() Context { return context.Background() } - -// TODO returns a non-nil, empty Context. Code should use context.TODO when -// it's unclear which Context to use or it is not yet available (because the -// surrounding function has not yet been extended to accept a Context -// parameter). -// -//go:fix inline -func TODO() Context { return context.TODO() } - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// A CancelFunc may be called by multiple goroutines simultaneously. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc = context.CancelFunc - -// WithCancel returns a derived context that points to the parent context -// but has a new Done channel. The returned context's Done channel is closed -// when the returned cancel function is called or when the parent context's -// Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this [Context] complete. -// -//go:fix inline -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - return context.WithCancel(parent) -} - -// WithDeadline returns a derived context that points to the parent context -// but has the deadline adjusted to be no later than d. If the parent's -// deadline is already earlier than d, WithDeadline(parent, d) is semantically -// equivalent to parent. The returned [Context.Done] channel is closed when -// the deadline expires, when the returned cancel function is called, -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this [Context] complete. -// -//go:fix inline -func WithDeadline(parent Context, d time.Time) (Context, CancelFunc) { - return context.WithDeadline(parent, d) -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this [Context] complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -// -//go:fix inline -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return context.WithTimeout(parent, timeout) -} - -// WithValue returns a derived context that points to the parent Context. -// In the derived context, the value associated with key is val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -// -// The provided key must be comparable and should not be of type -// string or any other built-in type to avoid collisions between -// packages using context. Users of WithValue should define their own -// types for keys. To avoid allocating when assigning to an -// interface{}, context keys often have concrete type -// struct{}. Alternatively, exported context key variables' static -// type should be a pointer or interface. -// -//go:fix inline -func WithValue(parent Context, key, val interface{}) Context { - return context.WithValue(parent, key, val) -} diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index 93bcaab0..9a4bd123 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -280,6 +280,8 @@ type Framer struct { // lastHeaderStream is non-zero if the last frame was an // unfinished HEADERS/CONTINUATION. lastHeaderStream uint32 + // lastFrameType holds the type of the last frame for verifying frame order. + lastFrameType FrameType maxReadSize uint32 headerBuf [frameHeaderLen]byte @@ -488,30 +490,41 @@ func terminalReadFrameError(err error) bool { return err != nil } -// ReadFrame reads a single frame. The returned Frame is only valid -// until the next call to ReadFrame. +// ReadFrameHeader reads the header of the next frame. +// It reads the 9-byte fixed frame header, and does not read any portion of the +// frame payload. The caller is responsible for consuming the payload, either +// with ReadFrameForHeader or directly from the Framer's io.Reader. // -// If the frame is larger than previously set with SetMaxReadFrameSize, the -// returned error is ErrFrameTooLarge. Other errors may be of type -// ConnectionError, StreamError, or anything else from the underlying -// reader. +// If the frame is larger than previously set with SetMaxReadFrameSize, it +// returns the frame header and ErrFrameTooLarge. // -// If ReadFrame returns an error and a non-nil Frame, the Frame's StreamID -// indicates the stream responsible for the error. -func (fr *Framer) ReadFrame() (Frame, error) { +// If the returned FrameHeader.StreamID is non-zero, it indicates the stream +// responsible for the error. +func (fr *Framer) ReadFrameHeader() (FrameHeader, error) { fr.errDetail = nil - if fr.lastFrame != nil { - fr.lastFrame.invalidate() - } fh, err := readFrameHeader(fr.headerBuf[:], fr.r) if err != nil { - return nil, err + return fh, err } if fh.Length > fr.maxReadSize { if fh == invalidHTTP1LookingFrameHeader() { - return nil, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", ErrFrameTooLarge) + return fh, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", ErrFrameTooLarge) } - return nil, ErrFrameTooLarge + return fh, ErrFrameTooLarge + } + if err := fr.checkFrameOrder(fh); err != nil { + return fh, err + } + return fh, nil +} + +// ReadFrameForHeader reads the payload for the frame with the given FrameHeader. +// +// It behaves identically to ReadFrame, other than not checking the maximum +// frame size. +func (fr *Framer) ReadFrameForHeader(fh FrameHeader) (Frame, error) { + if fr.lastFrame != nil { + fr.lastFrame.invalidate() } payload := fr.getReadBuf(fh.Length) if _, err := io.ReadFull(fr.r, payload); err != nil { @@ -527,9 +540,7 @@ func (fr *Framer) ReadFrame() (Frame, error) { } return nil, err } - if err := fr.checkFrameOrder(f); err != nil { - return nil, err - } + fr.lastFrame = f if fr.logReads { fr.debugReadLoggerf("http2: Framer %p: read %v", fr, summarizeFrame(f)) } @@ -539,6 +550,24 @@ func (fr *Framer) ReadFrame() (Frame, error) { return f, nil } +// ReadFrame reads a single frame. The returned Frame is only valid +// until the next call to ReadFrame or ReadFrameBodyForHeader. +// +// If the frame is larger than previously set with SetMaxReadFrameSize, the +// returned error is ErrFrameTooLarge. Other errors may be of type +// ConnectionError, StreamError, or anything else from the underlying +// reader. +// +// If ReadFrame returns an error and a non-nil Frame, the Frame's StreamID +// indicates the stream responsible for the error. +func (fr *Framer) ReadFrame() (Frame, error) { + fh, err := fr.ReadFrameHeader() + if err != nil { + return nil, err + } + return fr.ReadFrameForHeader(fh) +} + // connError returns ConnectionError(code) but first // stashes away a public reason to the caller can optionally relay it // to the peer before hanging up on them. This might help others debug @@ -551,20 +580,19 @@ func (fr *Framer) connError(code ErrCode, reason string) error { // checkFrameOrder reports an error if f is an invalid frame to return // next from ReadFrame. Mostly it checks whether HEADERS and // CONTINUATION frames are contiguous. -func (fr *Framer) checkFrameOrder(f Frame) error { - last := fr.lastFrame - fr.lastFrame = f +func (fr *Framer) checkFrameOrder(fh FrameHeader) error { + lastType := fr.lastFrameType + fr.lastFrameType = fh.Type if fr.AllowIllegalReads { return nil } - fh := f.Header() if fr.lastHeaderStream != 0 { if fh.Type != FrameContinuation { return fr.connError(ErrCodeProtocol, fmt.Sprintf("got %s for stream %d; expected CONTINUATION following %s for stream %d", fh.Type, fh.StreamID, - last.Header().Type, fr.lastHeaderStream)) + lastType, fr.lastHeaderStream)) } if fh.StreamID != fr.lastHeaderStream { return fr.connError(ErrCodeProtocol, @@ -1161,7 +1189,7 @@ var defaultRFC9218Priority = PriorityParam{ // PriorityParam struct below is a superset of both schemes. The exported // symbols are from RFC 7540 and the non-exported ones are from RFC 9218. -// PriorityParam are the stream prioritzation parameters. +// PriorityParam are the stream prioritization parameters. type PriorityParam struct { // StreamDep is a 31-bit stream identifier for the // stream that this stream depends on. Zero means no diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index be759b60..1965913e 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -9,6 +9,7 @@ package http2 import ( "bufio" "bytes" + "compress/flate" "compress/gzip" "context" "crypto/rand" @@ -3076,35 +3077,102 @@ type erringRoundTripper struct{ err error } func (rt erringRoundTripper) RoundTripErr() error { return rt.err } func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err } +var errConcurrentReadOnResBody = errors.New("http2: concurrent read on response body") + // gzipReader wraps a response body so it can lazily -// call gzip.NewReader on the first call to Read +// get gzip.Reader from the pool on the first call to Read. +// After Close is called it puts gzip.Reader to the pool immediately +// if there is no Read in progress or later when Read completes. type gzipReader struct { _ incomparable body io.ReadCloser // underlying Response.Body - zr *gzip.Reader // lazily-initialized gzip reader - zerr error // sticky error + mu sync.Mutex // guards zr and zerr + zr *gzip.Reader // stores gzip reader from the pool between reads + zerr error // sticky gzip reader init error or sentinel value to detect concurrent read and read after close } -func (gz *gzipReader) Read(p []byte) (n int, err error) { +type eofReader struct{} + +func (eofReader) Read([]byte) (int, error) { return 0, io.EOF } +func (eofReader) ReadByte() (byte, error) { return 0, io.EOF } + +var gzipPool = sync.Pool{New: func() any { return new(gzip.Reader) }} + +// gzipPoolGet gets a gzip.Reader from the pool and resets it to read from r. +func gzipPoolGet(r io.Reader) (*gzip.Reader, error) { + zr := gzipPool.Get().(*gzip.Reader) + if err := zr.Reset(r); err != nil { + gzipPoolPut(zr) + return nil, err + } + return zr, nil +} + +// gzipPoolPut puts a gzip.Reader back into the pool. +func gzipPoolPut(zr *gzip.Reader) { + // Reset will allocate bufio.Reader if we pass it anything + // other than a flate.Reader, so ensure that it's getting one. + var r flate.Reader = eofReader{} + zr.Reset(r) + gzipPool.Put(zr) +} + +// acquire returns a gzip.Reader for reading response body. +// The reader must be released after use. +func (gz *gzipReader) acquire() (*gzip.Reader, error) { + gz.mu.Lock() + defer gz.mu.Unlock() if gz.zerr != nil { - return 0, gz.zerr + return nil, gz.zerr } if gz.zr == nil { - gz.zr, err = gzip.NewReader(gz.body) - if err != nil { - gz.zerr = err - return 0, err + gz.zr, gz.zerr = gzipPoolGet(gz.body) + if gz.zerr != nil { + return nil, gz.zerr } } - return gz.zr.Read(p) + ret := gz.zr + gz.zr, gz.zerr = nil, errConcurrentReadOnResBody + return ret, nil } -func (gz *gzipReader) Close() error { - if err := gz.body.Close(); err != nil { - return err +// release returns the gzip.Reader to the pool if Close was called during Read. +func (gz *gzipReader) release(zr *gzip.Reader) { + gz.mu.Lock() + defer gz.mu.Unlock() + if gz.zerr == errConcurrentReadOnResBody { + gz.zr, gz.zerr = zr, nil + } else { // fs.ErrClosed + gzipPoolPut(zr) + } +} + +// close returns the gzip.Reader to the pool immediately or +// signals release to do so after Read completes. +func (gz *gzipReader) close() { + gz.mu.Lock() + defer gz.mu.Unlock() + if gz.zerr == nil && gz.zr != nil { + gzipPoolPut(gz.zr) + gz.zr = nil } gz.zerr = fs.ErrClosed - return nil +} + +func (gz *gzipReader) Read(p []byte) (n int, err error) { + zr, err := gz.acquire() + if err != nil { + return 0, err + } + defer gz.release(zr) + + return zr.Read(p) +} + +func (gz *gzipReader) Close() error { + gz.close() + + return gz.body.Close() } type errorReader struct{ err error } diff --git a/vendor/golang.org/x/net/http2/writesched.go b/vendor/golang.org/x/net/http2/writesched.go index 4d3890f9..7de27be5 100644 --- a/vendor/golang.org/x/net/http2/writesched.go +++ b/vendor/golang.org/x/net/http2/writesched.go @@ -185,45 +185,75 @@ func (wr *FrameWriteRequest) replyToWriter(err error) { } // writeQueue is used by implementations of WriteScheduler. +// +// Each writeQueue contains a queue of FrameWriteRequests, meant to store all +// FrameWriteRequests associated with a given stream. This is implemented as a +// two-stage queue: currQueue[currPos:] and nextQueue. Removing an item is done +// by incrementing currPos of currQueue. Adding an item is done by appending it +// to the nextQueue. If currQueue is empty when trying to remove an item, we +// can swap currQueue and nextQueue to remedy the situation. +// This two-stage queue is analogous to the use of two lists in Okasaki's +// purely functional queue but without the overhead of reversing the list when +// swapping stages. +// +// writeQueue also contains prev and next, this can be used by implementations +// of WriteScheduler to construct data structures that represent the order of +// writing between different streams (e.g. circular linked list). type writeQueue struct { - s []FrameWriteRequest + currQueue []FrameWriteRequest + nextQueue []FrameWriteRequest + currPos int + prev, next *writeQueue } -func (q *writeQueue) empty() bool { return len(q.s) == 0 } +func (q *writeQueue) empty() bool { + return (len(q.currQueue) - q.currPos + len(q.nextQueue)) == 0 +} func (q *writeQueue) push(wr FrameWriteRequest) { - q.s = append(q.s, wr) + q.nextQueue = append(q.nextQueue, wr) } func (q *writeQueue) shift() FrameWriteRequest { - if len(q.s) == 0 { + if q.empty() { panic("invalid use of queue") } - wr := q.s[0] - // TODO: less copy-happy queue. - copy(q.s, q.s[1:]) - q.s[len(q.s)-1] = FrameWriteRequest{} - q.s = q.s[:len(q.s)-1] + if q.currPos >= len(q.currQueue) { + q.currQueue, q.currPos, q.nextQueue = q.nextQueue, 0, q.currQueue[:0] + } + wr := q.currQueue[q.currPos] + q.currQueue[q.currPos] = FrameWriteRequest{} + q.currPos++ return wr } +func (q *writeQueue) peek() *FrameWriteRequest { + if q.currPos < len(q.currQueue) { + return &q.currQueue[q.currPos] + } + if len(q.nextQueue) > 0 { + return &q.nextQueue[0] + } + return nil +} + // consume consumes up to n bytes from q.s[0]. If the frame is // entirely consumed, it is removed from the queue. If the frame // is partially consumed, the frame is kept with the consumed // bytes removed. Returns true iff any bytes were consumed. func (q *writeQueue) consume(n int32) (FrameWriteRequest, bool) { - if len(q.s) == 0 { + if q.empty() { return FrameWriteRequest{}, false } - consumed, rest, numresult := q.s[0].Consume(n) + consumed, rest, numresult := q.peek().Consume(n) switch numresult { case 0: return FrameWriteRequest{}, false case 1: q.shift() case 2: - q.s[0] = rest + *q.peek() = rest } return consumed, true } @@ -232,10 +262,15 @@ type writeQueuePool []*writeQueue // put inserts an unused writeQueue into the pool. func (p *writeQueuePool) put(q *writeQueue) { - for i := range q.s { - q.s[i] = FrameWriteRequest{} + for i := range q.currQueue { + q.currQueue[i] = FrameWriteRequest{} + } + for i := range q.nextQueue { + q.nextQueue[i] = FrameWriteRequest{} } - q.s = q.s[:0] + q.currQueue = q.currQueue[:0] + q.nextQueue = q.nextQueue[:0] + q.currPos = 0 *p = append(*p, q) } diff --git a/vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go b/vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go index 6d24d6a1..4e33c29a 100644 --- a/vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go +++ b/vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go @@ -214,8 +214,8 @@ func (z sortPriorityNodeSiblingsRFC7540) Swap(i, k int) { z[i], z[k] = z[k], z[i func (z sortPriorityNodeSiblingsRFC7540) Less(i, k int) bool { // Prefer the subtree that has sent fewer bytes relative to its weight. // See sections 5.3.2 and 5.3.4. - wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes) - wk, bk := float64(z[k].weight+1), float64(z[k].subtreeBytes) + wi, bi := float64(z[i].weight)+1, float64(z[i].subtreeBytes) + wk, bk := float64(z[k].weight)+1, float64(z[k].subtreeBytes) if bi == 0 && bk == 0 { return wi >= wk } @@ -302,7 +302,6 @@ func (ws *priorityWriteSchedulerRFC7540) CloseStream(streamID uint32) { q := n.q ws.queuePool.put(&q) - n.q.s = nil if ws.maxClosedNodesInTree > 0 { ws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n) } else { diff --git a/vendor/golang.org/x/net/http2/writesched_priority_rfc9128.go b/vendor/golang.org/x/net/http2/writesched_priority_rfc9218.go similarity index 99% rename from vendor/golang.org/x/net/http2/writesched_priority_rfc9128.go rename to vendor/golang.org/x/net/http2/writesched_priority_rfc9218.go index 9b5b8808..cb4cadc3 100644 --- a/vendor/golang.org/x/net/http2/writesched_priority_rfc9128.go +++ b/vendor/golang.org/x/net/http2/writesched_priority_rfc9218.go @@ -39,7 +39,7 @@ type priorityWriteSchedulerRFC9218 struct { prioritizeIncremental bool } -func newPriorityWriteSchedulerRFC9128() WriteScheduler { +func newPriorityWriteSchedulerRFC9218() WriteScheduler { ws := &priorityWriteSchedulerRFC9218{ streams: make(map[uint32]streamMetadata), } diff --git a/vendor/golang.org/x/oauth2/deviceauth.go b/vendor/golang.org/x/oauth2/deviceauth.go index e99c92f3..e783a943 100644 --- a/vendor/golang.org/x/oauth2/deviceauth.go +++ b/vendor/golang.org/x/oauth2/deviceauth.go @@ -6,6 +6,7 @@ import ( "errors" "fmt" "io" + "mime" "net/http" "net/url" "strings" @@ -116,10 +117,38 @@ func retrieveDeviceAuth(ctx context.Context, c *Config, v url.Values) (*DeviceAu return nil, fmt.Errorf("oauth2: cannot auth device: %v", err) } if code := r.StatusCode; code < 200 || code > 299 { - return nil, &RetrieveError{ + retrieveError := &RetrieveError{ Response: r, Body: body, } + + content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type")) + switch content { + case "application/x-www-form-urlencoded", "text/plain": + // some endpoints return a query string + vals, err := url.ParseQuery(string(body)) + if err != nil { + return nil, retrieveError + } + retrieveError.ErrorCode = vals.Get("error") + retrieveError.ErrorDescription = vals.Get("error_description") + retrieveError.ErrorURI = vals.Get("error_uri") + default: + var tj struct { + // https://datatracker.ietf.org/doc/html/rfc6749#section-5.2 + ErrorCode string `json:"error"` + ErrorDescription string `json:"error_description"` + ErrorURI string `json:"error_uri"` + } + if json.Unmarshal(body, &tj) != nil { + return nil, retrieveError + } + retrieveError.ErrorCode = tj.ErrorCode + retrieveError.ErrorDescription = tj.ErrorDescription + retrieveError.ErrorURI = tj.ErrorURI + } + + return nil, retrieveError } da := &DeviceAuthResponse{} diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go index 3e3b6306..5c527d31 100644 --- a/vendor/golang.org/x/oauth2/oauth2.go +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -98,7 +98,7 @@ const ( // in the POST body as application/x-www-form-urlencoded parameters. AuthStyleInParams AuthStyle = 1 - // AuthStyleInHeader sends the client_id and client_password + // AuthStyleInHeader sends the client_id and client_secret // using HTTP Basic Authorization. This is an optional style // described in the OAuth2 RFC 6749 section 2.3.1. AuthStyleInHeader AuthStyle = 2 diff --git a/vendor/golang.org/x/oauth2/pkce.go b/vendor/golang.org/x/oauth2/pkce.go index cea8374d..f99384f0 100644 --- a/vendor/golang.org/x/oauth2/pkce.go +++ b/vendor/golang.org/x/oauth2/pkce.go @@ -51,7 +51,7 @@ func S256ChallengeFromVerifier(verifier string) string { return base64.RawURLEncoding.EncodeToString(sha[:]) } -// S256ChallengeOption derives a PKCE code challenge derived from verifier with +// S256ChallengeOption derives a PKCE code challenge from the verifier with // method S256. It should be passed to [Config.AuthCodeURL] or [Config.DeviceAuth] // only. func S256ChallengeOption(verifier string) AuthCodeOption { diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go index 239ec329..e995eebb 100644 --- a/vendor/golang.org/x/oauth2/token.go +++ b/vendor/golang.org/x/oauth2/token.go @@ -103,7 +103,7 @@ func (t *Token) WithExtra(extra any) *Token { } // Extra returns an extra field. -// Extra fields are key-value pairs returned by the server as a +// Extra fields are key-value pairs returned by the server as // part of the token retrieval response. func (t *Token) Extra(key string) any { if raw, ok := t.raw.(map[string]any); ok { diff --git a/vendor/golang.org/x/oauth2/transport.go b/vendor/golang.org/x/oauth2/transport.go index 8bbebbac..9922ec33 100644 --- a/vendor/golang.org/x/oauth2/transport.go +++ b/vendor/golang.org/x/oauth2/transport.go @@ -58,7 +58,7 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { var cancelOnce sync.Once // CancelRequest does nothing. It used to be a legacy cancellation mechanism -// but now only it only logs on first use to warn that it's deprecated. +// but now only logs on first use to warn that it's deprecated. // // Deprecated: use contexts for cancellation instead. func (t *Transport) CancelRequest(req *http.Request) { diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go index 1d8cffae..2f45dbc8 100644 --- a/vendor/golang.org/x/sync/errgroup/errgroup.go +++ b/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // Package errgroup provides synchronization, error propagation, and Context -// cancelation for groups of goroutines working on subtasks of a common task. +// cancellation for groups of goroutines working on subtasks of a common task. // // [errgroup.Group] is related to [sync.WaitGroup] but adds handling of tasks // returning errors. diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index d1c8b264..42517077 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -226,6 +226,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -529,6 +530,7 @@ ccflags="$@" $2 ~ /^O[CNPFPL][A-Z]+[^_][A-Z]+$/ || $2 ~ /^(NL|CR|TAB|BS|VT|FF)DLY$/ || $2 ~ /^(NL|CR|TAB|BS|VT|FF)[0-9]$/ || + $2 ~ /^(DT|EI|ELF|EV|NN|NT|PF|SHF|SHN|SHT|STB|STT|VER)_/ || $2 ~ /^O?XTABS$/ || $2 ~ /^TC[IO](ON|OFF)$/ || $2 ~ /^IN_/ || diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 9439af96..06c0eea6 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -2643,3 +2643,9 @@ func SchedGetAttr(pid int, flags uint) (*SchedAttr, error) { //sys Cachestat(fd uint, crange *CachestatRange, cstat *Cachestat_t, flags uint) (err error) //sys Mseal(b []byte, flags uint) (err error) + +//sys setMemPolicy(mode int, mask *CPUSet, size int) (err error) = SYS_SET_MEMPOLICY + +func SetMemPolicy(mode int, mask *CPUSet) error { + return setMemPolicy(mode, mask, _CPU_SETSIZE) +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index b6db27d9..d0a75da5 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -853,20 +853,86 @@ const ( DM_VERSION_MAJOR = 0x4 DM_VERSION_MINOR = 0x32 DM_VERSION_PATCHLEVEL = 0x0 + DT_ADDRRNGHI = 0x6ffffeff + DT_ADDRRNGLO = 0x6ffffe00 DT_BLK = 0x6 DT_CHR = 0x2 + DT_DEBUG = 0x15 DT_DIR = 0x4 + DT_ENCODING = 0x20 DT_FIFO = 0x1 + DT_FINI = 0xd + DT_FLAGS_1 = 0x6ffffffb + DT_GNU_HASH = 0x6ffffef5 + DT_HASH = 0x4 + DT_HIOS = 0x6ffff000 + DT_HIPROC = 0x7fffffff + DT_INIT = 0xc + DT_JMPREL = 0x17 DT_LNK = 0xa + DT_LOOS = 0x6000000d + DT_LOPROC = 0x70000000 + DT_NEEDED = 0x1 + DT_NULL = 0x0 + DT_PLTGOT = 0x3 + DT_PLTREL = 0x14 + DT_PLTRELSZ = 0x2 DT_REG = 0x8 + DT_REL = 0x11 + DT_RELA = 0x7 + DT_RELACOUNT = 0x6ffffff9 + DT_RELAENT = 0x9 + DT_RELASZ = 0x8 + DT_RELCOUNT = 0x6ffffffa + DT_RELENT = 0x13 + DT_RELSZ = 0x12 + DT_RPATH = 0xf DT_SOCK = 0xc + DT_SONAME = 0xe + DT_STRSZ = 0xa + DT_STRTAB = 0x5 + DT_SYMBOLIC = 0x10 + DT_SYMENT = 0xb + DT_SYMTAB = 0x6 + DT_TEXTREL = 0x16 DT_UNKNOWN = 0x0 + DT_VALRNGHI = 0x6ffffdff + DT_VALRNGLO = 0x6ffffd00 + DT_VERDEF = 0x6ffffffc + DT_VERDEFNUM = 0x6ffffffd + DT_VERNEED = 0x6ffffffe + DT_VERNEEDNUM = 0x6fffffff + DT_VERSYM = 0x6ffffff0 DT_WHT = 0xe ECHO = 0x8 ECRYPTFS_SUPER_MAGIC = 0xf15f EFD_SEMAPHORE = 0x1 EFIVARFS_MAGIC = 0xde5e81e4 EFS_SUPER_MAGIC = 0x414a53 + EI_CLASS = 0x4 + EI_DATA = 0x5 + EI_MAG0 = 0x0 + EI_MAG1 = 0x1 + EI_MAG2 = 0x2 + EI_MAG3 = 0x3 + EI_NIDENT = 0x10 + EI_OSABI = 0x7 + EI_PAD = 0x8 + EI_VERSION = 0x6 + ELFCLASS32 = 0x1 + ELFCLASS64 = 0x2 + ELFCLASSNONE = 0x0 + ELFCLASSNUM = 0x3 + ELFDATA2LSB = 0x1 + ELFDATA2MSB = 0x2 + ELFDATANONE = 0x0 + ELFMAG = "\177ELF" + ELFMAG0 = 0x7f + ELFMAG1 = 'E' + ELFMAG2 = 'L' + ELFMAG3 = 'F' + ELFOSABI_LINUX = 0x3 + ELFOSABI_NONE = 0x0 EM_386 = 0x3 EM_486 = 0x6 EM_68K = 0x4 @@ -1152,14 +1218,24 @@ const ( ETH_P_WCCP = 0x883e ETH_P_X25 = 0x805 ETH_P_XDSA = 0xf8 + ET_CORE = 0x4 + ET_DYN = 0x3 + ET_EXEC = 0x2 + ET_HIPROC = 0xffff + ET_LOPROC = 0xff00 + ET_NONE = 0x0 + ET_REL = 0x1 EV_ABS = 0x3 EV_CNT = 0x20 + EV_CURRENT = 0x1 EV_FF = 0x15 EV_FF_STATUS = 0x17 EV_KEY = 0x1 EV_LED = 0x11 EV_MAX = 0x1f EV_MSC = 0x4 + EV_NONE = 0x0 + EV_NUM = 0x2 EV_PWR = 0x16 EV_REL = 0x2 EV_REP = 0x14 @@ -2276,7 +2352,167 @@ const ( NLM_F_REPLACE = 0x100 NLM_F_REQUEST = 0x1 NLM_F_ROOT = 0x100 + NN_386_IOPERM = "LINUX" + NN_386_TLS = "LINUX" + NN_ARC_V2 = "LINUX" + NN_ARM_FPMR = "LINUX" + NN_ARM_GCS = "LINUX" + NN_ARM_HW_BREAK = "LINUX" + NN_ARM_HW_WATCH = "LINUX" + NN_ARM_PACA_KEYS = "LINUX" + NN_ARM_PACG_KEYS = "LINUX" + NN_ARM_PAC_ENABLED_KEYS = "LINUX" + NN_ARM_PAC_MASK = "LINUX" + NN_ARM_POE = "LINUX" + NN_ARM_SSVE = "LINUX" + NN_ARM_SVE = "LINUX" + NN_ARM_SYSTEM_CALL = "LINUX" + NN_ARM_TAGGED_ADDR_CTRL = "LINUX" + NN_ARM_TLS = "LINUX" + NN_ARM_VFP = "LINUX" + NN_ARM_ZA = "LINUX" + NN_ARM_ZT = "LINUX" + NN_AUXV = "CORE" + NN_FILE = "CORE" + NN_GNU_PROPERTY_TYPE_0 = "GNU" + NN_LOONGARCH_CPUCFG = "LINUX" + NN_LOONGARCH_CSR = "LINUX" + NN_LOONGARCH_HW_BREAK = "LINUX" + NN_LOONGARCH_HW_WATCH = "LINUX" + NN_LOONGARCH_LASX = "LINUX" + NN_LOONGARCH_LBT = "LINUX" + NN_LOONGARCH_LSX = "LINUX" + NN_MIPS_DSP = "LINUX" + NN_MIPS_FP_MODE = "LINUX" + NN_MIPS_MSA = "LINUX" + NN_PPC_DEXCR = "LINUX" + NN_PPC_DSCR = "LINUX" + NN_PPC_EBB = "LINUX" + NN_PPC_HASHKEYR = "LINUX" + NN_PPC_PKEY = "LINUX" + NN_PPC_PMU = "LINUX" + NN_PPC_PPR = "LINUX" + NN_PPC_SPE = "LINUX" + NN_PPC_TAR = "LINUX" + NN_PPC_TM_CDSCR = "LINUX" + NN_PPC_TM_CFPR = "LINUX" + NN_PPC_TM_CGPR = "LINUX" + NN_PPC_TM_CPPR = "LINUX" + NN_PPC_TM_CTAR = "LINUX" + NN_PPC_TM_CVMX = "LINUX" + NN_PPC_TM_CVSX = "LINUX" + NN_PPC_TM_SPR = "LINUX" + NN_PPC_VMX = "LINUX" + NN_PPC_VSX = "LINUX" + NN_PRFPREG = "CORE" + NN_PRPSINFO = "CORE" + NN_PRSTATUS = "CORE" + NN_PRXFPREG = "LINUX" + NN_RISCV_CSR = "LINUX" + NN_RISCV_TAGGED_ADDR_CTRL = "LINUX" + NN_RISCV_VECTOR = "LINUX" + NN_S390_CTRS = "LINUX" + NN_S390_GS_BC = "LINUX" + NN_S390_GS_CB = "LINUX" + NN_S390_HIGH_GPRS = "LINUX" + NN_S390_LAST_BREAK = "LINUX" + NN_S390_PREFIX = "LINUX" + NN_S390_PV_CPU_DATA = "LINUX" + NN_S390_RI_CB = "LINUX" + NN_S390_SYSTEM_CALL = "LINUX" + NN_S390_TDB = "LINUX" + NN_S390_TIMER = "LINUX" + NN_S390_TODCMP = "LINUX" + NN_S390_TODPREG = "LINUX" + NN_S390_VXRS_HIGH = "LINUX" + NN_S390_VXRS_LOW = "LINUX" + NN_SIGINFO = "CORE" + NN_TASKSTRUCT = "CORE" + NN_VMCOREDD = "LINUX" + NN_X86_SHSTK = "LINUX" + NN_X86_XSAVE_LAYOUT = "LINUX" + NN_X86_XSTATE = "LINUX" NSFS_MAGIC = 0x6e736673 + NT_386_IOPERM = 0x201 + NT_386_TLS = 0x200 + NT_ARC_V2 = 0x600 + NT_ARM_FPMR = 0x40e + NT_ARM_GCS = 0x410 + NT_ARM_HW_BREAK = 0x402 + NT_ARM_HW_WATCH = 0x403 + NT_ARM_PACA_KEYS = 0x407 + NT_ARM_PACG_KEYS = 0x408 + NT_ARM_PAC_ENABLED_KEYS = 0x40a + NT_ARM_PAC_MASK = 0x406 + NT_ARM_POE = 0x40f + NT_ARM_SSVE = 0x40b + NT_ARM_SVE = 0x405 + NT_ARM_SYSTEM_CALL = 0x404 + NT_ARM_TAGGED_ADDR_CTRL = 0x409 + NT_ARM_TLS = 0x401 + NT_ARM_VFP = 0x400 + NT_ARM_ZA = 0x40c + NT_ARM_ZT = 0x40d + NT_AUXV = 0x6 + NT_FILE = 0x46494c45 + NT_GNU_PROPERTY_TYPE_0 = 0x5 + NT_LOONGARCH_CPUCFG = 0xa00 + NT_LOONGARCH_CSR = 0xa01 + NT_LOONGARCH_HW_BREAK = 0xa05 + NT_LOONGARCH_HW_WATCH = 0xa06 + NT_LOONGARCH_LASX = 0xa03 + NT_LOONGARCH_LBT = 0xa04 + NT_LOONGARCH_LSX = 0xa02 + NT_MIPS_DSP = 0x800 + NT_MIPS_FP_MODE = 0x801 + NT_MIPS_MSA = 0x802 + NT_PPC_DEXCR = 0x111 + NT_PPC_DSCR = 0x105 + NT_PPC_EBB = 0x106 + NT_PPC_HASHKEYR = 0x112 + NT_PPC_PKEY = 0x110 + NT_PPC_PMU = 0x107 + NT_PPC_PPR = 0x104 + NT_PPC_SPE = 0x101 + NT_PPC_TAR = 0x103 + NT_PPC_TM_CDSCR = 0x10f + NT_PPC_TM_CFPR = 0x109 + NT_PPC_TM_CGPR = 0x108 + NT_PPC_TM_CPPR = 0x10e + NT_PPC_TM_CTAR = 0x10d + NT_PPC_TM_CVMX = 0x10a + NT_PPC_TM_CVSX = 0x10b + NT_PPC_TM_SPR = 0x10c + NT_PPC_VMX = 0x100 + NT_PPC_VSX = 0x102 + NT_PRFPREG = 0x2 + NT_PRPSINFO = 0x3 + NT_PRSTATUS = 0x1 + NT_PRXFPREG = 0x46e62b7f + NT_RISCV_CSR = 0x900 + NT_RISCV_TAGGED_ADDR_CTRL = 0x902 + NT_RISCV_VECTOR = 0x901 + NT_S390_CTRS = 0x304 + NT_S390_GS_BC = 0x30c + NT_S390_GS_CB = 0x30b + NT_S390_HIGH_GPRS = 0x300 + NT_S390_LAST_BREAK = 0x306 + NT_S390_PREFIX = 0x305 + NT_S390_PV_CPU_DATA = 0x30e + NT_S390_RI_CB = 0x30d + NT_S390_SYSTEM_CALL = 0x307 + NT_S390_TDB = 0x308 + NT_S390_TIMER = 0x301 + NT_S390_TODCMP = 0x302 + NT_S390_TODPREG = 0x303 + NT_S390_VXRS_HIGH = 0x30a + NT_S390_VXRS_LOW = 0x309 + NT_SIGINFO = 0x53494749 + NT_TASKSTRUCT = 0x4 + NT_VMCOREDD = 0x700 + NT_X86_SHSTK = 0x204 + NT_X86_XSAVE_LAYOUT = 0x205 + NT_X86_XSTATE = 0x202 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -2463,6 +2699,59 @@ const ( PERF_RECORD_MISC_USER = 0x2 PERF_SAMPLE_BRANCH_PLM_ALL = 0x7 PERF_SAMPLE_WEIGHT_TYPE = 0x1004000 + PF_ALG = 0x26 + PF_APPLETALK = 0x5 + PF_ASH = 0x12 + PF_ATMPVC = 0x8 + PF_ATMSVC = 0x14 + PF_AX25 = 0x3 + PF_BLUETOOTH = 0x1f + PF_BRIDGE = 0x7 + PF_CAIF = 0x25 + PF_CAN = 0x1d + PF_DECnet = 0xc + PF_ECONET = 0x13 + PF_FILE = 0x1 + PF_IB = 0x1b + PF_IEEE802154 = 0x24 + PF_INET = 0x2 + PF_INET6 = 0xa + PF_IPX = 0x4 + PF_IRDA = 0x17 + PF_ISDN = 0x22 + PF_IUCV = 0x20 + PF_KCM = 0x29 + PF_KEY = 0xf + PF_LLC = 0x1a + PF_LOCAL = 0x1 + PF_MAX = 0x2e + PF_MCTP = 0x2d + PF_MPLS = 0x1c + PF_NETBEUI = 0xd + PF_NETLINK = 0x10 + PF_NETROM = 0x6 + PF_NFC = 0x27 + PF_PACKET = 0x11 + PF_PHONET = 0x23 + PF_PPPOX = 0x18 + PF_QIPCRTR = 0x2a + PF_R = 0x4 + PF_RDS = 0x15 + PF_ROSE = 0xb + PF_ROUTE = 0x10 + PF_RXRPC = 0x21 + PF_SECURITY = 0xe + PF_SMC = 0x2b + PF_SNA = 0x16 + PF_TIPC = 0x1e + PF_UNIX = 0x1 + PF_UNSPEC = 0x0 + PF_VSOCK = 0x28 + PF_W = 0x2 + PF_WANPIPE = 0x19 + PF_X = 0x1 + PF_X25 = 0x9 + PF_XDP = 0x2c PID_FS_MAGIC = 0x50494446 PIPEFS_MAGIC = 0x50495045 PPPIOCGNPMODE = 0xc008744c @@ -2758,6 +3047,23 @@ const ( PTRACE_SYSCALL_INFO_NONE = 0x0 PTRACE_SYSCALL_INFO_SECCOMP = 0x3 PTRACE_TRACEME = 0x0 + PT_AARCH64_MEMTAG_MTE = 0x70000002 + PT_DYNAMIC = 0x2 + PT_GNU_EH_FRAME = 0x6474e550 + PT_GNU_PROPERTY = 0x6474e553 + PT_GNU_RELRO = 0x6474e552 + PT_GNU_STACK = 0x6474e551 + PT_HIOS = 0x6fffffff + PT_HIPROC = 0x7fffffff + PT_INTERP = 0x3 + PT_LOAD = 0x1 + PT_LOOS = 0x60000000 + PT_LOPROC = 0x70000000 + PT_NOTE = 0x4 + PT_NULL = 0x0 + PT_PHDR = 0x6 + PT_SHLIB = 0x5 + PT_TLS = 0x7 P_ALL = 0x0 P_PGID = 0x2 P_PID = 0x1 @@ -3091,6 +3397,47 @@ const ( SEEK_MAX = 0x4 SEEK_SET = 0x0 SELINUX_MAGIC = 0xf97cff8c + SHF_ALLOC = 0x2 + SHF_EXCLUDE = 0x8000000 + SHF_EXECINSTR = 0x4 + SHF_GROUP = 0x200 + SHF_INFO_LINK = 0x40 + SHF_LINK_ORDER = 0x80 + SHF_MASKOS = 0xff00000 + SHF_MASKPROC = 0xf0000000 + SHF_MERGE = 0x10 + SHF_ORDERED = 0x4000000 + SHF_OS_NONCONFORMING = 0x100 + SHF_RELA_LIVEPATCH = 0x100000 + SHF_RO_AFTER_INIT = 0x200000 + SHF_STRINGS = 0x20 + SHF_TLS = 0x400 + SHF_WRITE = 0x1 + SHN_ABS = 0xfff1 + SHN_COMMON = 0xfff2 + SHN_HIPROC = 0xff1f + SHN_HIRESERVE = 0xffff + SHN_LIVEPATCH = 0xff20 + SHN_LOPROC = 0xff00 + SHN_LORESERVE = 0xff00 + SHN_UNDEF = 0x0 + SHT_DYNAMIC = 0x6 + SHT_DYNSYM = 0xb + SHT_HASH = 0x5 + SHT_HIPROC = 0x7fffffff + SHT_HIUSER = 0xffffffff + SHT_LOPROC = 0x70000000 + SHT_LOUSER = 0x80000000 + SHT_NOBITS = 0x8 + SHT_NOTE = 0x7 + SHT_NULL = 0x0 + SHT_NUM = 0xc + SHT_PROGBITS = 0x1 + SHT_REL = 0x9 + SHT_RELA = 0x4 + SHT_SHLIB = 0xa + SHT_STRTAB = 0x3 + SHT_SYMTAB = 0x2 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -3317,6 +3664,16 @@ const ( STATX_UID = 0x8 STATX_WRITE_ATOMIC = 0x10000 STATX__RESERVED = 0x80000000 + STB_GLOBAL = 0x1 + STB_LOCAL = 0x0 + STB_WEAK = 0x2 + STT_COMMON = 0x5 + STT_FILE = 0x4 + STT_FUNC = 0x2 + STT_NOTYPE = 0x0 + STT_OBJECT = 0x1 + STT_SECTION = 0x3 + STT_TLS = 0x6 SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 SYNC_FILE_RANGE_WRITE = 0x2 @@ -3553,6 +3910,8 @@ const ( UTIME_OMIT = 0x3ffffffe V9FS_MAGIC = 0x1021997 VERASE = 0x2 + VER_FLG_BASE = 0x1 + VER_FLG_WEAK = 0x2 VINTR = 0x0 VKILL = 0x3 VLNEXT = 0xf diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 5cc1e8eb..8935d10a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -2238,3 +2238,13 @@ func Mseal(b []byte, flags uint) (err error) { } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setMemPolicy(mode int, mask *CPUSet, size int) (err error) { + _, _, e1 := Syscall(SYS_SET_MEMPOLICY, uintptr(mode), uintptr(unsafe.Pointer(mask)), uintptr(size)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 944e75a1..c1a46701 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -3590,6 +3590,8 @@ type Nhmsg struct { Flags uint32 } +const SizeofNhmsg = 0x8 + type NexthopGrp struct { Id uint32 Weight uint8 @@ -3597,6 +3599,8 @@ type NexthopGrp struct { Resvd2 uint16 } +const SizeofNexthopGrp = 0x8 + const ( NHA_UNSPEC = 0x0 NHA_ID = 0x1 @@ -6332,3 +6336,30 @@ type SockDiagReq struct { } const RTM_NEWNVLAN = 0x70 + +const ( + MPOL_BIND = 0x2 + MPOL_DEFAULT = 0x0 + MPOL_F_ADDR = 0x2 + MPOL_F_MEMS_ALLOWED = 0x4 + MPOL_F_MOF = 0x8 + MPOL_F_MORON = 0x10 + MPOL_F_NODE = 0x1 + MPOL_F_NUMA_BALANCING = 0x2000 + MPOL_F_RELATIVE_NODES = 0x4000 + MPOL_F_SHARED = 0x1 + MPOL_F_STATIC_NODES = 0x8000 + MPOL_INTERLEAVE = 0x3 + MPOL_LOCAL = 0x4 + MPOL_MAX = 0x7 + MPOL_MF_INTERNAL = 0x10 + MPOL_MF_LAZY = 0x8 + MPOL_MF_MOVE_ALL = 0x4 + MPOL_MF_MOVE = 0x2 + MPOL_MF_STRICT = 0x1 + MPOL_MF_VALID = 0x7 + MPOL_MODE_FLAGS = 0xe000 + MPOL_PREFERRED = 0x1 + MPOL_PREFERRED_MANY = 0x5 + MPOL_WEIGHTED_INTERLEAVE = 0x6 +) diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index bd513373..69439df2 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -892,8 +892,12 @@ const socket_error = uintptr(^uint32(0)) //sys MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) = kernel32.MultiByteToWideChar //sys getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcode error) = iphlpapi.GetBestInterfaceEx //sys GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) = iphlpapi.GetIfEntry2Ex +//sys GetIpForwardEntry2(row *MibIpForwardRow2) (errcode error) = iphlpapi.GetIpForwardEntry2 +//sys GetIpForwardTable2(family uint16, table **MibIpForwardTable2) (errcode error) = iphlpapi.GetIpForwardTable2 //sys GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) = iphlpapi.GetUnicastIpAddressEntry +//sys FreeMibTable(memory unsafe.Pointer) = iphlpapi.FreeMibTable //sys NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyIpInterfaceChange +//sys NotifyRouteChange2(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyRouteChange2 //sys NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyUnicastIpAddressChange //sys CancelMibChangeNotify2(notificationHandle Handle) (errcode error) = iphlpapi.CancelMibChangeNotify2 @@ -916,6 +920,17 @@ type RawSockaddrInet6 struct { Scope_id uint32 } +// RawSockaddrInet is a union that contains an IPv4, an IPv6 address, or an address family. See +// https://learn.microsoft.com/en-us/windows/win32/api/ws2ipdef/ns-ws2ipdef-sockaddr_inet. +// +// A [*RawSockaddrInet] may be converted to a [*RawSockaddrInet4] or [*RawSockaddrInet6] using +// unsafe, depending on the address family. +type RawSockaddrInet struct { + Family uint16 + Port uint16 + Data [6]uint32 +} + type RawSockaddr struct { Family uint16 Data [14]int8 diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 358be3c7..6e4f50eb 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -2320,6 +2320,82 @@ type MibIfRow2 struct { OutQLen uint64 } +// IP_ADDRESS_PREFIX stores an IP address prefix. See +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-ip_address_prefix. +type IpAddressPrefix struct { + Prefix RawSockaddrInet + PrefixLength uint8 +} + +// NL_ROUTE_ORIGIN enumeration from nldef.h or +// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_route_origin. +const ( + NlroManual = 0 + NlroWellKnown = 1 + NlroDHCP = 2 + NlroRouterAdvertisement = 3 + Nlro6to4 = 4 +) + +// NL_ROUTE_ORIGIN enumeration from nldef.h or +// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_route_protocol. +const ( + MIB_IPPROTO_OTHER = 1 + MIB_IPPROTO_LOCAL = 2 + MIB_IPPROTO_NETMGMT = 3 + MIB_IPPROTO_ICMP = 4 + MIB_IPPROTO_EGP = 5 + MIB_IPPROTO_GGP = 6 + MIB_IPPROTO_HELLO = 7 + MIB_IPPROTO_RIP = 8 + MIB_IPPROTO_IS_IS = 9 + MIB_IPPROTO_ES_IS = 10 + MIB_IPPROTO_CISCO = 11 + MIB_IPPROTO_BBN = 12 + MIB_IPPROTO_OSPF = 13 + MIB_IPPROTO_BGP = 14 + MIB_IPPROTO_IDPR = 15 + MIB_IPPROTO_EIGRP = 16 + MIB_IPPROTO_DVMRP = 17 + MIB_IPPROTO_RPL = 18 + MIB_IPPROTO_DHCP = 19 + MIB_IPPROTO_NT_AUTOSTATIC = 10002 + MIB_IPPROTO_NT_STATIC = 10006 + MIB_IPPROTO_NT_STATIC_NON_DOD = 10007 +) + +// MIB_IPFORWARD_ROW2 stores information about an IP route entry. See +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_ipforward_row2. +type MibIpForwardRow2 struct { + InterfaceLuid uint64 + InterfaceIndex uint32 + DestinationPrefix IpAddressPrefix + NextHop RawSockaddrInet + SitePrefixLength uint8 + ValidLifetime uint32 + PreferredLifetime uint32 + Metric uint32 + Protocol uint32 + Loopback uint8 + AutoconfigureAddress uint8 + Publish uint8 + Immortal uint8 + Age uint32 + Origin uint32 +} + +// MIB_IPFORWARD_TABLE2 contains a table of IP route entries. See +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_ipforward_table2. +type MibIpForwardTable2 struct { + NumEntries uint32 + Table [1]MibIpForwardRow2 +} + +// Rows returns the IP route entries in the table. +func (t *MibIpForwardTable2) Rows() []MibIpForwardRow2 { + return unsafe.Slice(&t.Table[0], t.NumEntries) +} + // MIB_UNICASTIPADDRESS_ROW stores information about a unicast IP address. See // https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_unicastipaddress_row. type MibUnicastIpAddressRow struct { diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 426151a0..f25b7308 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -182,13 +182,17 @@ var ( procDwmGetWindowAttribute = moddwmapi.NewProc("DwmGetWindowAttribute") procDwmSetWindowAttribute = moddwmapi.NewProc("DwmSetWindowAttribute") procCancelMibChangeNotify2 = modiphlpapi.NewProc("CancelMibChangeNotify2") + procFreeMibTable = modiphlpapi.NewProc("FreeMibTable") procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses") procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo") procGetBestInterfaceEx = modiphlpapi.NewProc("GetBestInterfaceEx") procGetIfEntry = modiphlpapi.NewProc("GetIfEntry") procGetIfEntry2Ex = modiphlpapi.NewProc("GetIfEntry2Ex") + procGetIpForwardEntry2 = modiphlpapi.NewProc("GetIpForwardEntry2") + procGetIpForwardTable2 = modiphlpapi.NewProc("GetIpForwardTable2") procGetUnicastIpAddressEntry = modiphlpapi.NewProc("GetUnicastIpAddressEntry") procNotifyIpInterfaceChange = modiphlpapi.NewProc("NotifyIpInterfaceChange") + procNotifyRouteChange2 = modiphlpapi.NewProc("NotifyRouteChange2") procNotifyUnicastIpAddressChange = modiphlpapi.NewProc("NotifyUnicastIpAddressChange") procAddDllDirectory = modkernel32.NewProc("AddDllDirectory") procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") @@ -1624,6 +1628,11 @@ func CancelMibChangeNotify2(notificationHandle Handle) (errcode error) { return } +func FreeMibTable(memory unsafe.Pointer) { + syscall.SyscallN(procFreeMibTable.Addr(), uintptr(memory)) + return +} + func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) { r0, _, _ := syscall.SyscallN(procGetAdaptersAddresses.Addr(), uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer))) if r0 != 0 { @@ -1664,6 +1673,22 @@ func GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) { return } +func GetIpForwardEntry2(row *MibIpForwardRow2) (errcode error) { + r0, _, _ := syscall.SyscallN(procGetIpForwardEntry2.Addr(), uintptr(unsafe.Pointer(row))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func GetIpForwardTable2(family uint16, table **MibIpForwardTable2) (errcode error) { + r0, _, _ := syscall.SyscallN(procGetIpForwardTable2.Addr(), uintptr(family), uintptr(unsafe.Pointer(table))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + func GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) { r0, _, _ := syscall.SyscallN(procGetUnicastIpAddressEntry.Addr(), uintptr(unsafe.Pointer(row))) if r0 != 0 { @@ -1684,6 +1709,18 @@ func NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsa return } +func NotifyRouteChange2(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) { + var _p0 uint32 + if initialNotification { + _p0 = 1 + } + r0, _, _ := syscall.SyscallN(procNotifyRouteChange2.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + func NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) { var _p0 uint32 if initialNotification { diff --git a/vendor/golang.org/x/term/terminal.go b/vendor/golang.org/x/term/terminal.go index bddb2e2a..9255449b 100644 --- a/vendor/golang.org/x/term/terminal.go +++ b/vendor/golang.org/x/term/terminal.go @@ -413,7 +413,7 @@ func (t *Terminal) eraseNPreviousChars(n int) { } } -// countToLeftWord returns then number of characters from the cursor to the +// countToLeftWord returns the number of characters from the cursor to the // start of the previous word. func (t *Terminal) countToLeftWord() int { if t.pos == 0 { @@ -438,7 +438,7 @@ func (t *Terminal) countToLeftWord() int { return t.pos - pos } -// countToRightWord returns then number of characters from the cursor to the +// countToRightWord returns the number of characters from the cursor to the // start of the next word. func (t *Terminal) countToRightWord() int { pos := t.pos @@ -478,7 +478,7 @@ func visualLength(runes []rune) int { return length } -// histroryAt unlocks the terminal and relocks it while calling History.At. +// historyAt unlocks the terminal and relocks it while calling History.At. func (t *Terminal) historyAt(idx int) (string, bool) { t.lock.Unlock() // Unlock to avoid deadlock if History methods use the output writer. defer t.lock.Lock() // panic in At (or Len) protection. diff --git a/vendor/helm.sh/helm/v3/pkg/action/pull.go b/vendor/helm.sh/helm/v3/pkg/action/pull.go index 78755312..a16c9bde 100644 --- a/vendor/helm.sh/helm/v3/pkg/action/pull.go +++ b/vendor/helm.sh/helm/v3/pkg/action/pull.go @@ -121,15 +121,16 @@ func (p *Pull) Run(chartRef string) (string, error) { defer os.RemoveAll(dest) } + downloadSourceRef := chartRef if p.RepoURL != "" { chartURL, err := repo.FindChartInAuthAndTLSAndPassRepoURL(p.RepoURL, p.Username, p.Password, chartRef, p.Version, p.CertFile, p.KeyFile, p.CaFile, p.InsecureSkipTLSverify, p.PassCredentialsAll, getter.All(p.Settings)) if err != nil { return out.String(), err } - chartRef = chartURL + downloadSourceRef = chartURL } - saved, v, err := c.DownloadTo(chartRef, p.Version, dest) + saved, v, err := c.DownloadTo(downloadSourceRef, p.Version, dest) if err != nil { return out.String(), err } diff --git a/vendor/helm.sh/helm/v3/pkg/chartutil/jsonschema.go b/vendor/helm.sh/helm/v3/pkg/chartutil/jsonschema.go index d712316c..632e6dfd 100644 --- a/vendor/helm.sh/helm/v3/pkg/chartutil/jsonschema.go +++ b/vendor/helm.sh/helm/v3/pkg/chartutil/jsonschema.go @@ -21,7 +21,9 @@ import ( "crypto/tls" "errors" "fmt" + "log" "strings" + "sync" "time" "github.com/santhosh-tekuri/jsonschema/v6" @@ -82,7 +84,20 @@ func ValidateAgainstSchema(chrt *chart.Chart, values map[string]interface{}) err } for _, subchart := range chrt.Dependencies() { - subchartValues := values[subchart.Name()].(map[string]interface{}) + raw, exists := values[subchart.Name()] + if !exists || raw == nil { + // No values provided for this subchart; nothing to validate + continue + } + + subchartValues, ok := raw.(map[string]any) + if !ok { + sb.WriteString(fmt.Sprintf( + "%s:\ninvalid type for values: expected object (map), got %T\n", + subchart.Name(), raw, + )) + continue + } if err := ValidateAgainstSchema(subchart, subchartValues); err != nil { sb.WriteString(err.Error()) } @@ -115,6 +130,7 @@ func ValidateAgainstSingleSchema(values Values, schemaJSON []byte) (reterr error "file": jsonschema.FileLoader{}, "http": newHTTPURLLoader(), "https": newHTTPURLLoader(), + "urn": urnLoader{}, } compiler := jsonschema.NewCompiler() @@ -148,3 +164,32 @@ func (e JSONSchemaValidationError) Error() string { return errStr + "\n" } + +// URNResolverFunc allows SDK to plug a URN resolver. It must return a +// schema document compatible with the validator (e.g., result of +// jsonschema.UnmarshalJSON). +type URNResolverFunc func(urn string) (any, error) + +// URNResolver is the default resolver used by the URN loader. By default it +// returns a clear error. +var URNResolver URNResolverFunc = func(urn string) (any, error) { + return nil, fmt.Errorf("URN not resolved: %s", urn) +} + +// urnLoader implements resolution for the urn: scheme by delegating to +// URNResolver. If unresolved, it logs a warning and returns a permissive +// boolean-true schema to avoid hard failures (back-compat behavior). +type urnLoader struct{} + +// warnedURNs ensures we log the unresolved-URN warning only once per URN. +var warnedURNs sync.Map + +func (l urnLoader) Load(urlStr string) (any, error) { + if doc, err := URNResolver(urlStr); err == nil && doc != nil { + return doc, nil + } + if _, loaded := warnedURNs.LoadOrStore(urlStr, struct{}{}); !loaded { + log.Printf("WARNING: unresolved URN reference ignored; using permissive schema: %s", urlStr) + } + return jsonschema.UnmarshalJSON(strings.NewReader("true")) +} diff --git a/vendor/helm.sh/helm/v3/pkg/getter/getter.go b/vendor/helm.sh/helm/v3/pkg/getter/getter.go index 1acb2093..7c73a46a 100644 --- a/vendor/helm.sh/helm/v3/pkg/getter/getter.go +++ b/vendor/helm.sh/helm/v3/pkg/getter/getter.go @@ -196,24 +196,32 @@ const ( var defaultOptions = []Option{WithTimeout(time.Second * DefaultHTTPTimeout)} -var httpProvider = Provider{ - Schemes: []string{"http", "https"}, - New: func(options ...Option) (Getter, error) { - options = append(options, defaultOptions...) - return NewHTTPGetter(options...) - }, -} - -var ociProvider = Provider{ - Schemes: []string{registry.OCIScheme}, - New: NewOCIGetter, +func Getters(extraOpts ...Option) Providers { + return Providers{ + Provider{ + Schemes: []string{"http", "https"}, + New: func(options ...Option) (Getter, error) { + options = append(options, defaultOptions...) + options = append(options, extraOpts...) + return NewHTTPGetter(options...) + }, + }, + Provider{ + Schemes: []string{registry.OCIScheme}, + New: func(options ...Option) (Getter, error) { + options = append(options, defaultOptions...) + options = append(options, extraOpts...) + return NewOCIGetter(options...) + }, + }, + } } // All finds all of the registered getters as a list of Provider instances. // Currently, the built-in getters and the discovered plugins with downloader // notations are collected. -func All(settings *cli.EnvSettings) Providers { - result := Providers{httpProvider, ociProvider} +func All(settings *cli.EnvSettings, opts ...Option) Providers { + result := Getters(opts...) pluginDownloaders, _ := collectPlugins(settings) result = append(result, pluginDownloaders...) return result diff --git a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go index 5d205415..79a748b7 100644 --- a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go +++ b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/leaselock.go @@ -77,6 +77,9 @@ func (ll *LeaseLock) Update(ctx context.Context, ler LeaderElectionRecord) error ll.lease.Spec = LeaderElectionRecordToLeaseSpec(&ler) if ll.Labels != nil { + if ll.lease.Labels == nil { + ll.lease.Labels = map[string]string{} + } // Only overwrite the labels that are specifically set for k, v := range ll.Labels { ll.lease.Labels[k] = v diff --git a/vendor/k8s.io/client-go/util/cert/cert.go b/vendor/k8s.io/client-go/util/cert/cert.go index 12204612..48c78b59 100644 --- a/vendor/k8s.io/client-go/util/cert/cert.go +++ b/vendor/k8s.io/client-go/util/cert/cert.go @@ -75,13 +75,15 @@ func NewSelfSignedCACert(cfg Config, key crypto.Signer) (*x509.Certificate, erro CommonName: cfg.CommonName, Organization: cfg.Organization, }, - DNSNames: []string{cfg.CommonName}, NotBefore: notBefore, NotAfter: now.Add(duration365d * 10).UTC(), KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, BasicConstraintsValid: true, IsCA: true, } + if len(cfg.CommonName) > 0 { + tmpl.DNSNames = []string{cfg.CommonName} + } certDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &tmpl, &tmpl, key.Public(), key) if err != nil { diff --git a/vendor/modules.txt b/vendor/modules.txt index a8d920a9..0038f875 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -48,7 +48,7 @@ github.com/chai2010/gettext-go github.com/chai2010/gettext-go/mo github.com/chai2010/gettext-go/plural github.com/chai2010/gettext-go/po -# github.com/containerd/containerd v1.7.28 +# github.com/containerd/containerd v1.7.29 ## explicit; go 1.23.0 github.com/containerd/containerd/archive/compression github.com/containerd/containerd/content @@ -71,9 +71,10 @@ github.com/containerd/platforms ## explicit; go 1.24.0 github.com/coreos/go-oidc/v3/oidc github.com/coreos/go-oidc/v3/oidc/oidctest -# github.com/cyphar/filepath-securejoin v0.4.1 +# github.com/cyphar/filepath-securejoin v0.6.0 ## explicit; go 1.18 github.com/cyphar/filepath-securejoin +github.com/cyphar/filepath-securejoin/internal/consts # github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc ## explicit github.com/davecgh/go-spew/spew @@ -228,7 +229,7 @@ github.com/liggitt/tabwriter github.com/mailru/easyjson/buffer github.com/mailru/easyjson/jlexer github.com/mailru/easyjson/jwriter -# github.com/mark3labs/mcp-go v0.42.0 +# github.com/mark3labs/mcp-go v0.43.0 ## explicit; go 1.23.0 github.com/mark3labs/mcp-go/client github.com/mark3labs/mcp-go/client/transport @@ -366,7 +367,7 @@ go.yaml.in/yaml/v2 # go.yaml.in/yaml/v3 v3.0.4 ## explicit; go 1.16 go.yaml.in/yaml/v3 -# golang.org/x/crypto v0.43.0 +# golang.org/x/crypto v0.44.0 ## explicit; go 1.24.0 golang.org/x/crypto/bcrypt golang.org/x/crypto/blowfish @@ -380,9 +381,8 @@ golang.org/x/crypto/openpgp/packet golang.org/x/crypto/openpgp/s2k golang.org/x/crypto/pbkdf2 golang.org/x/crypto/scrypt -# golang.org/x/net v0.46.0 +# golang.org/x/net v0.47.0 ## explicit; go 1.24.0 -golang.org/x/net/context golang.org/x/net/html golang.org/x/net/html/atom golang.org/x/net/http/httpguts @@ -393,26 +393,26 @@ golang.org/x/net/internal/httpcommon golang.org/x/net/internal/socks golang.org/x/net/proxy golang.org/x/net/websocket -# golang.org/x/oauth2 v0.32.0 +# golang.org/x/oauth2 v0.33.0 ## explicit; go 1.24.0 golang.org/x/oauth2 golang.org/x/oauth2/google/externalaccount golang.org/x/oauth2/google/internal/impersonate golang.org/x/oauth2/google/internal/stsexchange golang.org/x/oauth2/internal -# golang.org/x/sync v0.17.0 +# golang.org/x/sync v0.18.0 ## explicit; go 1.24.0 golang.org/x/sync/errgroup golang.org/x/sync/semaphore -# golang.org/x/sys v0.37.0 +# golang.org/x/sys v0.38.0 ## explicit; go 1.24.0 golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/term v0.36.0 +# golang.org/x/term v0.37.0 ## explicit; go 1.24.0 golang.org/x/term -# golang.org/x/text v0.30.0 +# golang.org/x/text v0.31.0 ## explicit; go 1.24.0 golang.org/x/text/encoding golang.org/x/text/encoding/internal @@ -495,7 +495,7 @@ gopkg.in/inf.v0 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# helm.sh/helm/v3 v3.19.0 +# helm.sh/helm/v3 v3.19.2 ## explicit; go 1.24.0 helm.sh/helm/v3/internal/fileutil helm.sh/helm/v3/internal/resolver @@ -534,7 +534,7 @@ helm.sh/helm/v3/pkg/storage/driver helm.sh/helm/v3/pkg/time helm.sh/helm/v3/pkg/time/ctime helm.sh/helm/v3/pkg/uploader -# k8s.io/api v0.34.1 +# k8s.io/api v0.34.2 ## explicit; go 1.24.0 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -596,7 +596,7 @@ k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 k8s.io/api/storagemigration/v1alpha1 -# k8s.io/apiextensions-apiserver v0.34.1 +# k8s.io/apiextensions-apiserver v0.34.2 ## explicit; go 1.24.0 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 @@ -607,7 +607,7 @@ k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1 -# k8s.io/apimachinery v0.34.1 +# k8s.io/apimachinery v0.34.2 ## explicit; go 1.24.0 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -679,16 +679,16 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.34.1 +# k8s.io/apiserver v0.34.2 ## explicit; go 1.24.0 k8s.io/apiserver/pkg/endpoints/deprecation -# k8s.io/cli-runtime v0.34.1 +# k8s.io/cli-runtime v0.34.2 ## explicit; go 1.24.0 k8s.io/cli-runtime/pkg/genericclioptions k8s.io/cli-runtime/pkg/genericiooptions k8s.io/cli-runtime/pkg/printers k8s.io/cli-runtime/pkg/resource -# k8s.io/client-go v0.34.1 +# k8s.io/client-go v0.34.2 ## explicit; go 1.24.0 k8s.io/client-go/applyconfigurations/admissionregistration/v1 k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1 @@ -980,7 +980,7 @@ k8s.io/client-go/util/jsonpath k8s.io/client-go/util/keyutil k8s.io/client-go/util/retry k8s.io/client-go/util/workqueue -# k8s.io/component-base v0.34.1 +# k8s.io/component-base v0.34.2 ## explicit; go 1.24.0 k8s.io/component-base/version # k8s.io/klog/v2 v2.130.1 @@ -1006,7 +1006,7 @@ k8s.io/kube-openapi/pkg/spec3 k8s.io/kube-openapi/pkg/util/proto k8s.io/kube-openapi/pkg/util/proto/validation k8s.io/kube-openapi/pkg/validation/spec -# k8s.io/kubectl v0.34.1 +# k8s.io/kubectl v0.34.2 ## explicit; go 1.24.0 k8s.io/kubectl/pkg/cmd/util k8s.io/kubectl/pkg/metricsutil @@ -1017,7 +1017,7 @@ k8s.io/kubectl/pkg/util/openapi k8s.io/kubectl/pkg/util/templates k8s.io/kubectl/pkg/util/term k8s.io/kubectl/pkg/validation -# k8s.io/metrics v0.34.1 +# k8s.io/metrics v0.34.2 ## explicit; go 1.24.0 k8s.io/metrics/pkg/apis/metrics k8s.io/metrics/pkg/apis/metrics/v1alpha1 @@ -1067,7 +1067,7 @@ oras.land/oras-go/v2/registry/remote/credentials/trace oras.land/oras-go/v2/registry/remote/errcode oras.land/oras-go/v2/registry/remote/internal/errutil oras.land/oras-go/v2/registry/remote/retry -# sigs.k8s.io/controller-runtime v0.22.3 +# sigs.k8s.io/controller-runtime v0.22.4 ## explicit; go 1.24.0 sigs.k8s.io/controller-runtime/pkg/client sigs.k8s.io/controller-runtime/pkg/client/apiutil diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go index cacba4a9..d4223eda 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go @@ -213,7 +213,12 @@ func (n *namespacedClient) Get(ctx context.Context, key ObjectKey, obj Object, o // List implements client.Client. func (n *namespacedClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { - if n.namespace != "" { + isNamespaceScoped, err := n.IsObjectNamespaced(obj) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %w", err) + } + + if isNamespaceScoped && n.namespace != "" { opts = append(opts, InNamespace(n.namespace)) } return n.client.List(ctx, obj, opts...) diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/server.go b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/server.go index 9bb81ed2..c9f19da9 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/server.go +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/server.go @@ -109,7 +109,11 @@ var ( // Environment creates a Kubernetes test environment that will start / stop the Kubernetes control plane and // install extension APIs. type Environment struct { - // ControlPlane is the ControlPlane including the apiserver and etcd + // ControlPlane is the ControlPlane including the apiserver and etcd. + // Binary paths (APIServer.Path, Etcd.Path, KubectlPath) can be pre-configured in ControlPlane. + // If DownloadBinaryAssets is true, the downloaded paths will always be used. + // If DownloadBinaryAssets is false and paths are not pre-configured (default is empty), they will be + // automatically resolved using BinaryAssetsDirectory. ControlPlane controlplane.ControlPlane // Scheme is used to determine if conversion webhooks should be enabled @@ -211,6 +215,40 @@ func (te *Environment) Stop() error { return te.ControlPlane.Stop() } +// configureBinaryPaths configures the binary paths for the API server, etcd, and kubectl. +// If DownloadBinaryAssets is true, it downloads and uses those paths. +// If DownloadBinaryAssets is false, it only sets paths that are not already configured (empty). +func (te *Environment) configureBinaryPaths() error { + apiServer := te.ControlPlane.GetAPIServer() + + if te.ControlPlane.Etcd == nil { + te.ControlPlane.Etcd = &controlplane.Etcd{} + } + + if te.DownloadBinaryAssets { + apiServerPath, etcdPath, kubectlPath, err := downloadBinaryAssets(context.TODO(), + te.BinaryAssetsDirectory, te.DownloadBinaryAssetsVersion, te.DownloadBinaryAssetsIndexURL) + if err != nil { + return err + } + + apiServer.Path = apiServerPath + te.ControlPlane.Etcd.Path = etcdPath + te.ControlPlane.KubectlPath = kubectlPath + } else { + if apiServer.Path == "" { + apiServer.Path = process.BinPathFinder("kube-apiserver", te.BinaryAssetsDirectory) + } + if te.ControlPlane.Etcd.Path == "" { + te.ControlPlane.Etcd.Path = process.BinPathFinder("etcd", te.BinaryAssetsDirectory) + } + if te.ControlPlane.KubectlPath == "" { + te.ControlPlane.KubectlPath = process.BinPathFinder("kubectl", te.BinaryAssetsDirectory) + } + } + return nil +} + // Start starts a local Kubernetes server and updates te.ApiserverPort with the port it is listening on. func (te *Environment) Start() (*rest.Config, error) { if te.useExistingCluster() { @@ -229,10 +267,6 @@ func (te *Environment) Start() (*rest.Config, error) { } else { apiServer := te.ControlPlane.GetAPIServer() - if te.ControlPlane.Etcd == nil { - te.ControlPlane.Etcd = &controlplane.Etcd{} - } - if os.Getenv(envAttachOutput) == "true" { te.AttachControlPlaneOutput = true } @@ -243,6 +277,9 @@ func (te *Environment) Start() (*rest.Config, error) { if apiServer.Err == nil { apiServer.Err = os.Stderr } + if te.ControlPlane.Etcd == nil { + te.ControlPlane.Etcd = &controlplane.Etcd{} + } if te.ControlPlane.Etcd.Out == nil { te.ControlPlane.Etcd.Out = os.Stdout } @@ -251,20 +288,8 @@ func (te *Environment) Start() (*rest.Config, error) { } } - if te.DownloadBinaryAssets { - apiServerPath, etcdPath, kubectlPath, err := downloadBinaryAssets(context.TODO(), - te.BinaryAssetsDirectory, te.DownloadBinaryAssetsVersion, te.DownloadBinaryAssetsIndexURL) - if err != nil { - return nil, err - } - - apiServer.Path = apiServerPath - te.ControlPlane.Etcd.Path = etcdPath - te.ControlPlane.KubectlPath = kubectlPath - } else { - apiServer.Path = process.BinPathFinder("kube-apiserver", te.BinaryAssetsDirectory) - te.ControlPlane.Etcd.Path = process.BinPathFinder("etcd", te.BinaryAssetsDirectory) - te.ControlPlane.KubectlPath = process.BinPathFinder("kubectl", te.BinaryAssetsDirectory) + if err := te.configureBinaryPaths(); err != nil { + return nil, fmt.Errorf("failed to configure binary paths: %w", err) } if err := te.defaultTimeouts(); err != nil {