diff --git a/.buildkite/pipeline.trigger.integration.tests.sh b/.buildkite/pipeline.trigger.integration.tests.sh index 5dffc5d0e3..1676aaeed3 100755 --- a/.buildkite/pipeline.trigger.integration.tests.sh +++ b/.buildkite/pipeline.trigger.integration.tests.sh @@ -44,6 +44,7 @@ CHECK_PACKAGES_TESTS=( test-check-packages-with-kind test-check-packages-with-custom-agent test-check-packages-benchmarks + test-check-packages-independent-script ) for test in "${CHECK_PACKAGES_TESTS[@]}"; do test_name=${test#"test-check-packages-"} diff --git a/Makefile b/Makefile index 94dd36f1bf..7a79e0a2f1 100644 --- a/Makefile +++ b/Makefile @@ -97,6 +97,9 @@ test-check-packages-with-kind: test-check-packages-other: PACKAGE_TEST_TYPE=other ./scripts/test-check-packages.sh +test-check-packages-independent-script: + elastic-package test script -C test/packages/other/with_script --external-stack=false --defer-cleanup 1s + test-check-packages-false-positives: PACKAGE_TEST_TYPE=false_positives ./scripts/test-check-false-positives.sh @@ -133,7 +136,7 @@ test-profiles-command: test-check-update-version: ./scripts/test-check-update-version.sh -test: test-go test-stack-command test-check-packages test-profiles-command test-build-install-zip test-build-zip test-build-install-zip-file test-build-install-zip-file-shellinit test-check-update-version test-profiles-command test-system-test-flags +test: test-go test-stack-command test-check-packages test-check-packages-independent-script test-profiles-command test-build-install-zip test-build-zip test-build-install-zip-file test-build-install-zip-file-shellinit test-check-update-version test-profiles-command test-system-test-flags check-git-clean: git update-index --really-refresh diff --git a/README.md b/README.md index a705a23033..56f062e35f 100644 --- a/README.md +++ b/README.md @@ -624,6 +624,12 @@ _Context: package_ Run policy tests for the package. +### `elastic-package test script` + +_Context: package_ + +Run script tests for the package. + ### `elastic-package test static` _Context: package_ diff --git a/cmd/testrunner.go b/cmd/testrunner.go index f06a33045c..c63c246aeb 100644 --- a/cmd/testrunner.go +++ b/cmd/testrunner.go @@ -31,6 +31,7 @@ import ( "github.com/elastic/elastic-package/internal/testrunner/runners/policy" "github.com/elastic/elastic-package/internal/testrunner/runners/static" "github.com/elastic/elastic-package/internal/testrunner/runners/system" + "github.com/elastic/elastic-package/internal/testrunner/script" ) const testLongDescription = `Use this command to run tests on a package. Currently, the following types of tests are available: @@ -95,6 +96,9 @@ func setupTestCommand() *cobraext.Command { systemCmd := getTestRunnerSystemCommand() cmd.AddCommand(systemCmd) + scriptCmd := getTestRunnerScriptCommand() + cmd.AddCommand(scriptCmd) + policyCmd := getTestRunnerPolicyCommand() cmd.AddCommand(policyCmd) @@ -600,6 +604,46 @@ func testRunnerSystemCommandAction(cmd *cobra.Command, args []string) error { return nil } +func getTestRunnerScriptCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "script", + Short: "Run script tests", + Long: "Run script tests for the package.", + Args: cobra.NoArgs, + RunE: testRunnerScriptCommandAction, + } + + cmd.Flags().String(cobraext.ScriptsFlagName, "", cobraext.ScriptsFlagDescription) + cmd.Flags().Bool(cobraext.ExternalStackFlagName, true, cobraext.ExternalStackFlagDescription) + cmd.Flags().StringSliceP(cobraext.DataStreamsFlagName, "d", nil, cobraext.DataStreamsFlagDescription) + cmd.Flags().String(cobraext.RunPatternFlagName, "", cobraext.RunPatternFlagDescription) + cmd.Flags().BoolP(cobraext.UpdateScriptTestArchiveFlagName, "u", false, cobraext.UpdateScriptTestArchiveFlagDescription) + cmd.Flags().BoolP(cobraext.WorkScriptTestFlagName, "w", false, cobraext.WorkScriptTestFlagDescription) + cmd.Flags().Bool(cobraext.ContinueOnErrorFlagName, false, cobraext.ContinueOnErrorFlagDescription) + cmd.Flags().Bool(cobraext.VerboseScriptFlagName, false, cobraext.VerboseScriptFlagDescription) + + cmd.MarkFlagsMutuallyExclusive(cobraext.DataStreamsFlagName, cobraext.DataStreamsFlagName) + + return cmd +} + +func testRunnerScriptCommandAction(cmd *cobra.Command, args []string) error { + cmd.Println("Run script tests for the package") + pkgRoot, err := packages.FindPackageRoot() + if err != nil { + if err == packages.ErrPackageRootNotFound { + return errors.New("package root not found") + } + return fmt.Errorf("locating package root failed: %w", err) + } + pkg := filepath.Base(pkgRoot) + cmd.Printf("--- Test results for package: %s - START ---\n", pkg) + err = script.Run(cmd.OutOrStderr(), cmd, args) + cmd.Printf("--- Test results for package: %s - END ---\n", pkg) + cmd.Println("Done") + return err +} + func getTestRunnerPolicyCommand() *cobra.Command { cmd := &cobra.Command{ Use: "policy", diff --git a/docs/howto/script_testing.md b/docs/howto/script_testing.md new file mode 100644 index 0000000000..ed1b25f208 --- /dev/null +++ b/docs/howto/script_testing.md @@ -0,0 +1,210 @@ +# HOWTO: Writing script tests for a package + +Script testing is an advanced topic that assumes knowledge of [pipeline](./pipeline_testing.md) +and [system](./system_testing.md) testing. + +Testing packages with script testing is only intended for testing cases that +cannot be adequately covered by the pipeline and system testing tools such as +testing failure paths and package upgrades. It can also be used for debugging +integrations stack issues. + +## Introduction + +The script testing system is built on the Go testscript package with extensions +provided to allow scripting of stack and integration operations such as +bringing up a stack, installing packages and running agents. For example, using +these commands it is possible to express a system test as described in the +system testing [Conceptual Process](./system_testing.md#conceptual-process) section. + + +## Expressing tests + +Tests are written as [txtar format](https://pkg.go.dev/golang.org/x/tools/txtar#hdr-Txtar_format) +files in a data stream's \_dev/test/scripts directory. The logic for the test is +written in the txtar file's initial comment section and any additional resource +files are included in the txtar file's files sections. + +The standard commands and behaviors for testscript scripts are documented in +the [testscript package documentation](https://pkg.go.dev/github.com/rogpeppe/go-internal/testscript). + + +## Extension commands + +The test script command provides additional commands to aid in interacting with +a stack, starting agents and services and validating results. + +- `sleep `: sleep for a duration (Go `time.Duration` parse syntax) +- `date []`: print the current time in RFC3339, optionally setting a variable with the value +- `GET [-json] `: perform an HTTP GET request, emitting the response body to stdout and optionally formatting indented JSON +- `POST [-json] [-content ] `: perform an HTTP POST request, emitting the response body to stdout and optionally formatting indented JSON +- `match_file `: perform a grep pattern match between a pattern file and a data file + +- stack commands: + - `stack_up [-profile ] [-provider ] [-timeout ] `: bring up a version of the Elastic stack + - `use_stack [-profile ] [-timeout ]`: use a running Elastic stack + - `stack_down [-profile ] [-provider ] [-timeout ]`: take down a started Elastic stack + - `dump_logs [-profile ] [-provider ] [-timeout ] [-since ] []`: dump the logs from the stack into a directory + - `get_policy [-profile ] [-timeout ] `: print the details for a policy + +- agent commands: + - `install_agent [-profile ] [-timeout ] []`: install an Elastic Agent policy, setting the environment variable named in the positional argument + - `uninstall_agent [-profile ] [-timeout ]`: remove an installed Elastic Agent policy + +- package commands: + - `add_package [-profile ] [-timeout ]`: add the current package's assets + - `remove_package [-profile ] [-timeout ]`: remove assets for the current package + - `add_package_zip [-profile ] [-timeout ] `: add assets from a Zip-packaged integration package + - `remove_package_zip [-profile ] [-timeout ] `: remove assets for Zip-packaged integration package + - `upgrade_package_latest [-profile ] [-timeout ] []`: upgrade the current package or another named package to the latest version + +- data stream commands: + - `add_data_stream [-profile ] [-timeout ] [-policy ] `: add a data stream policy, setting the environment variable named in the positional argument + - `remove_data_stream [-profile ] [-timeout ] `: remove a data stream policy + - `get_docs [-profile ] [-timeout ] []`: get documents from a data stream + +- docker commands: + - `docker_up [-profile ] [-timeout ] `: start a docker service defined in the provided directory + - `docker_down [-timeout ] `: stop a started docker service and print the docker logs to stdout + - `docker_signal [-timeout ] `: send a signal to a running docker service + - `docker_wait_exit [-timeout ] `: wait for a docker service to exit + +- pipeline commands: + - `install_pipelines [-profile ] [-timeout ] `: install ingest pipelines from a path + - `simulate [-profile ] [-timeout ] `: run a pipeline test, printing the result as pretty-printed JSON to standard output + - `uninstall_pipelines [-profile ] [-timeout ] `: remove installed ingest pipelines + + +## Environment variables + +- `PROFILE`: the `elastic-package` profile being used +- `CONFIG_ROOT`: the `elastic-package` configuration root path +- `CONFIG_PROFILES`: the `elastic-package` profiles configuration root path +- `HOME`: the user's home directory path +- `PKG`: the name of the running package +- `PKG_ROOT`: the path to the root of the running package +- `CURRENT_VERSION`: the current version of the package +- `PREVIOUS_VERSION`: the previous version of the package +- `DATA_STREAM`: the name of the data stream +- `DATA_STREAM_ROOT`: the path to the root of the data stream + + +## Conditions + +The testscript package allows conditions to be set that allow conditional +execution of commands. The test script command adds a condition that reflects +the state of the `--external-stack` flag. This allows tests to be written that +conditionally use either an externally managed stack, or a stack that has been +started by the test script. + + +## Example + +As an example, a basic system test could be expressed as follows. +``` +# Only run the test if --external-stack=true. +[!external_stack] skip 'Skipping external stack test.' +# Only run the test if the jq executable is in $PATH. This is needed for a test below. +[!exec:jq] skip 'Skipping test requiring absent jq command' + +# Register running stack. +use_stack -profile ${CONFIG_PROFILES}/default + +# Install an agent. +install_agent -profile ${CONFIG_PROFILES}/default NETWORK_NAME + +# Bring up a docker container. +# +# The service is described in the test-hits/docker-compose.yml below with +# its logs in test-hits/logs/generated.log. +docker_up -profile ${CONFIG_PROFILES}/default -network ${NETWORK_NAME} test-hits + +# Add the package resources. +add_package -profile ${CONFIG_PROFILES}/default + +# Add the data stream. +# +# The configuration for the test is described in test_config.yaml below. +add_data_stream -profile ${CONFIG_PROFILES}/default test_config.yaml DATA_STREAM_NAME + +# Start the service. +docker_signal test-hits SIGHUP + +# Wait for the service to exit. +docker_wait_exit -timeout 5m test-hits + +# Check that we can see our policy. +get_policy -profile ${CONFIG_PROFILES}/default -timeout 1m ${DATA_STREAM_NAME} +cp stdout got_policy.json +exec jq '.name=="'${DATA_STREAM_NAME}'"' got_policy.json +stdout true + +# Take down the service and check logs for our message. +docker_down test-hits +! stderr . +stdout '"total_lines":10' + +# Get documents from the data stream. +get_docs -profile ${CONFIG_PROFILES}/default -want 10 -timeout 5m ${DATA_STREAM_NAME} +cp stdout got_docs.json + +# Remove the data stream. +remove_data_stream -profile ${CONFIG_PROFILES}/default ${DATA_STREAM_NAME} + +# Uninstall the agent. +uninstall_agent -profile ${CONFIG_PROFILES}/default -timeout 1m + +# Remove the package resources. +remove_package -profile ${CONFIG_PROFILES}/default + +-- test-hits/docker-compose.yml -- +version: '2.3' +services: + test-hits: + image: docker.elastic.co/observability/stream:v0.20.0 + volumes: + - ./logs:/logs:ro + command: log --start-signal=SIGHUP --delay=5s --addr elastic-agent:9999 -p=tcp /logs/generated.log +-- test-hits/logs/generated.log -- +ntpd[1001]: kernel time sync enabled utl +restorecond: : Reset file context quasiarc: liqua +auditd[5699]: Audit daemon rotating log files +anacron[5066]: Normal exit ehend +restorecond: : Reset file context vol: luptat +heartbeat: : < Processing command: accept +restorecond: : Reset file context nci: ofdeFin +auditd[6668]: Audit daemon rotating log files +anacron[1613]: Normal exit mvolu +ntpd[2959]: ntpd gelit-r tatno +-- test_config.yaml -- +input: tcp +vars: ~ +data_stream: + vars: + tcp_host: 0.0.0.0 + tcp_port: 9999 +``` + +Other complete examples can be found in the [with_script test package](https://github.com/elastic/elastic-package/blob/main/test/packages/other/with_script/data_stream/first/_dev/test/scripts). + + +## Running script tests + +The `elastic-package test script` command has the following sub-command-specific +flags: + +- `--continue`: continue running the script if an error occurs +- `--data-streams`: comma-separated data streams to test +- `--external-stack`: use external stack for script tests (default true) +- `--run`: run only tests matching the regular expression +- `--scripts`: path to directory containing test scripts (advanced use only) +- `--update`: update archive file if a cmp fails +- `--verbose-scripts`: verbose script test output (show all script logging) +- `--work`: print temporary work directory and do not remove when done + + +## Limitations + +While the testscript package allows reference to paths outside the configuration +root and the package's root, the backing `elastic-package` infrastructure does +not, so it is advised that tests only refer to paths within the `$WORK` and +`$PKG_ROOT` directories. \ No newline at end of file diff --git a/go.mod b/go.mod index 293f297ec1..638946decb 100644 --- a/go.mod +++ b/go.mod @@ -18,7 +18,7 @@ require ( github.com/elastic/go-licenser v0.4.2 github.com/elastic/go-resource v0.2.0 github.com/elastic/go-ucfg v0.8.8 - github.com/elastic/package-spec/v3 v3.5.0 + github.com/elastic/package-spec/v3 v3.5.1 github.com/fatih/color v1.18.0 github.com/go-viper/mapstructure/v2 v2.4.0 github.com/google/go-cmp v0.7.0 @@ -32,6 +32,7 @@ require ( github.com/mholt/archives v0.1.5 github.com/olekukonko/tablewriter v1.1.0 github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 + github.com/rogpeppe/go-internal v1.13.1 github.com/shirou/gopsutil/v3 v3.24.5 github.com/spf13/cobra v1.10.1 github.com/stretchr/testify v1.11.1 @@ -183,7 +184,6 @@ require ( golang.org/x/term v0.36.0 // indirect golang.org/x/text v0.30.0 // indirect golang.org/x/time v0.12.0 // indirect - golang.org/x/tools/go/expect v0.1.1-deprecated // indirect google.golang.org/protobuf v1.36.5 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/go.sum b/go.sum index b34f5a5d5c..e71447d4a1 100644 --- a/go.sum +++ b/go.sum @@ -132,8 +132,8 @@ github.com/elastic/gojsonschema v1.2.1 h1:cUMbgsz0wyEB4x7xf3zUEvUVDl6WCz2RKcQPul github.com/elastic/gojsonschema v1.2.1/go.mod h1:biw5eBS2Z4T02wjATMRSfecfjCmwaDPvuaqf844gLrg= github.com/elastic/kbncontent v0.1.4 h1:GoUkJkqkn2H6iJTnOHcxEqYVVYyjvcebLQVaSR1aSvU= github.com/elastic/kbncontent v0.1.4/go.mod h1:kOPREITK9gSJsiw/WKe7QWSO+PRiZMyEFQCw+CMLAHI= -github.com/elastic/package-spec/v3 v3.5.0 h1:rvB+lWXXoUkSVx4TaHerV/eO6uN0NH1E5sPW1kW74Lk= -github.com/elastic/package-spec/v3 v3.5.0/go.mod h1:dH//Q1geKx3fxC0lwPrVmnjN6RMqyDf5tnsw7trwqWE= +github.com/elastic/package-spec/v3 v3.5.1 h1:TSGG2ZO3P7gF0MrmMkZDYVlTVifJAF2hHaOKPwJMlKg= +github.com/elastic/package-spec/v3 v3.5.1/go.mod h1:Wj829iTa2lFVCz0qrXJcx9bVLPYMrYb8guQeYwZPNnA= github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= diff --git a/internal/cobraext/flags.go b/internal/cobraext/flags.go index 1838404fd6..906d1d585f 100644 --- a/internal/cobraext/flags.go +++ b/internal/cobraext/flags.go @@ -112,6 +112,9 @@ const ( CheckConditionFlagName = "check-condition" CheckConditionFlagDescription = "check if the condition is met for the package, but don't install the package (e.g. kibana.version=7.10.0)" + ContinueOnErrorFlagName = "continue" + ContinueOnErrorFlagDescription = "continue running the script if an error occurs" + DaemonModeFlagName = "daemon" DaemonModeFlagDescription = "daemon mode" @@ -130,6 +133,9 @@ const ( DumpOutputFlagName = "output" DumpOutputFlagDescription = "path to directory where exported assets will be stored" + ExternalStackFlagName = "external-stack" + ExternalStackFlagDescription = "use external stack for script tests" + FailOnMissingFlagName = "fail-on-missing" FailOnMissingFlagDescription = "fail if tests are missing" @@ -165,6 +171,12 @@ const ( ReportOutputPathFlagName = "report-output-path" ReportOutputPathFlagDescription = "output path for test report (defaults to %q in build directory)" + RunPatternFlagName = "run" + RunPatternFlagDescription = "run only tests matching the regular expression" + + ScriptsFlagName = "scripts" + ScriptsFlagDescription = "path to directory containing test scripts" + ShowAllFlagName = "all" ShowAllFlagDescription = "show all deployed package revisions" @@ -224,6 +236,15 @@ const ( NoProvisionFlagName = "no-provision" NoProvisionFlagDescription = "trigger just system tests wihout setup nor teardown" + UpdateScriptTestArchiveFlagName = "update" + UpdateScriptTestArchiveFlagDescription = "update archive file if a cmp fails" + + VerboseScriptFlagName = "verbose-scripts" + VerboseScriptFlagDescription = "verbose script test output" + + WorkScriptTestFlagName = "work" + WorkScriptTestFlagDescription = "print temporary work directory and do not remove when done" + ZipPackageFilePathFlagName = "zip" ZipPackageFilePathFlagShorthand = "z" ZipPackageFilePathFlagDescription = "path to the zip package file (*.zip)" diff --git a/internal/testrunner/script/agents.go b/internal/testrunner/script/agents.go new file mode 100644 index 0000000000..81c180ebd7 --- /dev/null +++ b/internal/testrunner/script/agents.go @@ -0,0 +1,234 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package script + +import ( + "context" + "errors" + "flag" + "fmt" + "time" + + "github.com/rogpeppe/go-internal/testscript" + + "github.com/elastic/elastic-package/internal/agentdeployer" + "github.com/elastic/elastic-package/internal/common" + "github.com/elastic/elastic-package/internal/kibana" + "github.com/elastic/elastic-package/internal/testrunner/runners/system" +) + +func installAgent(ts *testscript.TestScript, neg bool, args []string) { + clearStdStreams(ts) + + pkgRoot := ts.Getenv("PKG_ROOT") + if pkgRoot == "" { + ts.Fatalf("PKG_ROOT is not set") + } + pkg := ts.Getenv("PKG") + if pkg == "" { + ts.Fatalf("PKG is not set") + } + ds := ts.Getenv("DATA_STREAM") + if ds == "" { + ts.Fatalf("DATA_STREAM is not set") + } + + stacks, ok := ts.Value(runningStackTag{}).(map[string]*runningStack) + if !ok { + ts.Fatalf("no active stacks registry") + } + agents, ok := ts.Value(installedAgentsTag{}).(map[string]*installedAgent) + if !ok { + ts.Fatalf("no installed installed agent registry") + } + + flg := flag.NewFlagSet("install", flag.ContinueOnError) + profName := flg.String("profile", "default", "profile name") + timeout := flg.Duration("timeout", 0, "timeout (zero or lower indicates no timeout)") + ts.Check(flg.Parse(args)) + if flg.NArg() != 0 && flg.NArg() != 1 { + ts.Fatalf("usage: install_agent [-profile ] [-timeout ] []") + } + + var networkNameLabel string + if flg.NArg() == 1 { + networkNameLabel = flg.Arg(0) + } + + stk, ok := stacks[*profName] + if !ok { + ts.Fatalf("no active client for %s", *profName) + } + + ctx := context.Background() + if *timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, *timeout) + defer cancel() + } + + var installed installedAgent + defer func() { + r := recover() + switch r := r.(type) { + case nil: + return + case error: + if errors.Is(r, failedRun) { + ts.Check(decoratedWith("deleting failed policies", deletePolicies(ctx, stk.kibana, &installed))) + } + } + panic(r) + }() + + installed.started = time.Now() + var err error + installed.enrolledPolicy, err = stk.kibana.CreatePolicy(ctx, kibana.Policy{ + Name: fmt.Sprintf("ep-test-system-enroll-%s-%s-%s-%s-%s", pkg, ds, "", ts.Name(), installed.started.Format("20060102T15:04:05Z")), + Description: fmt.Sprintf("test policy created by elastic-package to enroll agent for data stream %s/%s", pkg, ds), + Namespace: common.CreateTestRunID(), + }) + ts.Check(decoratedWith("creating kibana enrolled policy", err)) + installed.testingPolicy, err = stk.kibana.CreatePolicy(ctx, kibana.Policy{ + Name: fmt.Sprintf("ep-test-system-%s-%s-%s-%s-%s", pkg, ds, "", ts.Name(), installed.started.Format("20060102T15:04:05Z")), + Description: fmt.Sprintf("test policy created by elastic-package to enroll agent for data stream %s/%s", pkg, ds), + Namespace: common.CreateTestRunID(), + }) + ts.Check(decoratedWith("creating kibana testing policy", err)) + + dep, err := agentdeployer.NewCustomAgentDeployer(agentdeployer.DockerComposeAgentDeployerOptions{ + Profile: stk.profile, + StackVersion: stk.version, + PackageName: pkg, + DataStream: ds, + PolicyName: installed.enrolledPolicy.Name, + }) + ts.Check(decoratedWith("making agent deployer", err)) + + info := agentdeployer.AgentInfo{Name: pkg} + info.Policy.Name = installed.enrolledPolicy.Name + info.Policy.ID = installed.enrolledPolicy.ID + info.Agent.AgentSettings.Runtime = "docker" + info.Logs.Folder.Agent = system.ServiceLogsAgentDir + info.Test.RunID = common.CreateTestRunID() + info.Logs.Folder.Local, err = agentdeployer.CreateServiceLogsDir(stk.profile, pkgRoot, ds, info.Test.RunID) + ts.Check(decoratedWith("creating service logs directory", err)) + + // This will break for internal stacks if + // ELASTIC_PACKAGE_CA_CERT is set. ¯\_(ツ)_/¯ + installed.deployed, err = dep.SetUp(ctx, info) + ts.Check(decoratedWith("setting up agent", err)) + if networkNameLabel != "" { + ts.Setenv(networkNameLabel, installed.deployed.Info().NetworkName) + } + polID := installed.deployed.Info().Policy.ID + ts.Check(decoratedWith("getting kibana agent", doKibanaAgent(ctx, stk.kibana, func(a kibana.Agent) (bool, error) { + if a.PolicyID != polID { + return false, nil + } + installed.enrolled = a + return true, nil + }))) + ts.Check(decoratedWith("setting log level to debug", stk.kibana.SetAgentLogLevel(ctx, installed.enrolled.ID, "debug"))) + + agents[*profName] = &installed + fmt.Fprintf(ts.Stdout(), "installed agent policies for %s/%s\n", pkg, ds) +} + +func doKibanaAgent(ctx context.Context, cli *kibana.Client, fn func(a kibana.Agent) (done bool, _ error)) error { + for { + enrolled, err := cli.QueryAgents(ctx, "") + if err != nil { + return decoratedWith("getting enrolled agents", err) + } + for _, a := range enrolled { + if a.PolicyRevision == 0 || a.Status != "online" { + continue + } + if done, err := fn(a); done || err != nil { + return err + } + } + time.Sleep(5 * time.Second) + } +} + +func uninstallAgent(ts *testscript.TestScript, neg bool, args []string) { + clearStdStreams(ts) + + pkg := ts.Getenv("PKG") + if pkg == "" { + ts.Fatalf("PKG is not set") + } + ds := ts.Getenv("DATA_STREAM") + if ds == "" { + ts.Fatalf("DATA_STREAM is not set") + } + + stacks, ok := ts.Value(runningStackTag{}).(map[string]*runningStack) + if !ok { + ts.Fatalf("no active stacks registry") + } + agents, ok := ts.Value(installedAgentsTag{}).(map[string]*installedAgent) + if !ok { + ts.Fatalf("no installed installed agent registry") + } + + flg := flag.NewFlagSet("uninstall", flag.ContinueOnError) + profName := flg.String("profile", "default", "profile name") + timeout := flg.Duration("timeout", 0, "timeout (zero or lower indicates no timeout)") + ts.Check(flg.Parse(args)) + if flg.NArg() != 0 { + ts.Fatalf("usage: uninstall_agent [-profile ] [-timeout ]") + } + + stk, ok := stacks[*profName] + if !ok { + ts.Fatalf("no active client for %s", *profName) + } + installed, ok := agents[*profName] + if !ok { + ts.Fatalf("agent policy in %s is not installed", *profName) + } + + ctx := context.Background() + if *timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, *timeout) + defer cancel() + } + + delete(agents, *profName) + + ts.Check(decoratedWith("removing agent", stk.kibana.RemoveAgent(ctx, installed.enrolled))) + ts.Check(decoratedWith("tearing down agent", installed.deployed.TearDown(ctx))) + ts.Check(decoratedWith("deleting policies", deletePolicies(ctx, stk.kibana, installed))) + + fmt.Fprintf(ts.Stdout(), "deleted agent policies for %s/%s (testing:%s enrolled:%s)\n", pkg, ds, installed.testingPolicy.ID, installed.enrolledPolicy.ID) +} + +type installedAgentsTag struct{} + +type installedAgent struct { + // agent details + deployed agentdeployer.DeployedAgent + enrolled kibana.Agent // ᕙ(⇀‸↼‶)ᕗ + + // policy details + enrolledPolicy, testingPolicy *kibana.Policy + + started time.Time +} + +func deletePolicies(ctx context.Context, cli *kibana.Client, a *installedAgent) error { + var errs []error + if a.testingPolicy != nil { + errs = append(errs, cli.DeletePolicy(ctx, a.testingPolicy.ID)) + } + if a.enrolledPolicy != nil { + errs = append(errs, cli.DeletePolicy(ctx, a.enrolledPolicy.ID)) + } + return errors.Join(errs...) +} diff --git a/internal/testrunner/script/data_stream.go b/internal/testrunner/script/data_stream.go new file mode 100644 index 0000000000..076cb70ac8 --- /dev/null +++ b/internal/testrunner/script/data_stream.go @@ -0,0 +1,191 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package script + +import ( + "bytes" + "context" + "flag" + "fmt" + "io" + "net/http" + "os" + + "github.com/rogpeppe/go-internal/testscript" + + "github.com/elastic/go-ucfg" + "github.com/elastic/go-ucfg/yaml" + + "github.com/elastic/elastic-package/internal/common" + "github.com/elastic/elastic-package/internal/packages" + "github.com/elastic/elastic-package/internal/testrunner/runners/system" +) + +func addDataStream(ts *testscript.TestScript, neg bool, args []string) { + clearStdStreams(ts) + + pkgRoot := ts.Getenv("PKG_ROOT") + if pkgRoot == "" { + ts.Fatalf("PKG_ROOT is not set") + } + pkg := ts.Getenv("PKG") + if pkg == "" { + ts.Fatalf("PKG is not set") + } + ds := ts.Getenv("DATA_STREAM") + if ds == "" { + ts.Fatalf("DATA_STREAM is not set") + } + + stacks, ok := ts.Value(runningStackTag{}).(map[string]*runningStack) + if !ok { + ts.Fatalf("no active stacks registry") + } + agents, ok := ts.Value(installedAgentsTag{}).(map[string]*installedAgent) + if !ok { + ts.Fatalf("no installed installed agent registry") + } + dataStreams, ok := ts.Value(installedDataStreamsTag{}).(map[string]struct{}) + if !ok { + ts.Fatalf("no installed installed data streams registry") + } + + flg := flag.NewFlagSet("add", flag.ContinueOnError) + profName := flg.String("profile", "default", "profile name") + polName := flg.String("policy", "", "policy name") + timeout := flg.Duration("timeout", 0, "timeout (zero or lower indicates no timeout)") + ts.Check(flg.Parse(args)) + if flg.NArg() != 2 { + ts.Fatalf("usage: add_data_stream [-profile ] [-timeout ] [-policy ] ") + } + + cfgPath := ts.MkAbs(flg.Arg(0)) + dsNameLabel := flg.Arg(1) + + cfgData, err := os.ReadFile(cfgPath) + ts.Check(decoratedWith("reading data stream configuration", err)) + cfg, err := yaml.NewConfig(cfgData, ucfg.PathSep(".")) + ts.Check(decoratedWith("deserializing data stream configuration", err)) + var config struct { + Input string `config:"input"` + Vars common.MapStr `config:"vars"` + DataStream struct { + Vars common.MapStr `config:"vars"` + } `config:"data_stream"` + } + ts.Check(decoratedWith("unpacking configuration", cfg.Unpack(&config))) + + stk, ok := stacks[*profName] + if !ok { + ts.Fatalf("no active client for %s", *profName) + } + installed, ok := agents[*profName] + if !ok { + ts.Fatalf("agent policy in %s is not installed", *profName) + } + + ctx := context.Background() + if *timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, *timeout) + defer cancel() + } + + pkgMan, err := packages.ReadPackageManifestFromPackageRoot(pkgRoot) + ts.Check(decoratedWith("reading package manifest", err)) + dsMan, err := packages.ReadDataStreamManifestFromPackageRoot(pkgRoot, ds) + ts.Check(decoratedWith("reading data stream manifest", err)) + + if *polName == "" { + *polName, err = system.FindPolicyTemplateForInput(pkgMan, dsMan, config.Input) + ts.Check(decoratedWith("finding policy template name", err)) + } + templ, err := system.SelectPolicyTemplateByName(pkgMan.PolicyTemplates, *polName) + ts.Check(decoratedWith("finding policy template", err)) + + pds, err := system.CreatePackageDatastream(installed.testingPolicy, pkgMan, templ, dsMan, config.Input, config.Vars, config.DataStream.Vars, installed.testingPolicy.Namespace) + ts.Check(decoratedWith("creating package data stream", err)) + ts.Check(decoratedWith("adding data stream to policy", stk.kibana.AddPackageDataStreamToPolicy(ctx, pds))) + + pol, err := stk.kibana.GetPolicy(ctx, installed.testingPolicy.ID) + ts.Check(decoratedWith("reading policy", err)) + ts.Check(decoratedWith("assigning policy", stk.kibana.AssignPolicyToAgent(ctx, installed.enrolled, *pol))) + + dsName := system.BuildDataStreamName(templ.Input, pds, pkgMan.Type, config.Vars) + ts.Setenv(dsNameLabel, dsName) + dataStreams[dsName] = struct{}{} + + fmt.Fprintf(ts.Stdout(), "added %s data stream policy templates for %s/%s\n", dsName, pkg, ds) +} + +func removeDataStream(ts *testscript.TestScript, neg bool, args []string) { + clearStdStreams(ts) + + pkg := ts.Getenv("PKG") + if pkg == "" { + ts.Fatalf("PKG is not set") + } + ds := ts.Getenv("DATA_STREAM") + if ds == "" { + ts.Fatalf("DATA_STREAM is not set") + } + + stacks, ok := ts.Value(runningStackTag{}).(map[string]*runningStack) + if !ok { + ts.Fatalf("no active stacks registry") + } + dataStreams, ok := ts.Value(installedDataStreamsTag{}).(map[string]struct{}) + if !ok { + ts.Fatalf("no installed installed data streams registry") + } + + flg := flag.NewFlagSet("remove", flag.ContinueOnError) + profName := flg.String("profile", "default", "profile name") + timeout := flg.Duration("timeout", 0, "timeout (zero or lower indicates no timeout)") + ts.Check(flg.Parse(args)) + if flg.NArg() != 1 { + ts.Fatalf("usage: remove_data_stream [-profile ] [-timeout ] ") + } + + dsName := flg.Arg(0) + + stk, ok := stacks[*profName] + if !ok { + ts.Fatalf("no active client for %s", *profName) + } + _, ok = dataStreams[dsName] + if !ok { + ts.Fatalf("no data stream for %s", dsName) + } + + ctx := context.Background() + if *timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, *timeout) + defer cancel() + } + + resp, err := stk.es.Indices.DeleteDataStream([]string{dsName}, + stk.es.Indices.DeleteDataStream.WithContext(ctx), + ) + ts.Check(decoratedWith("requesting data stream removal for "+dsName, err)) + defer resp.Body.Close() + var body bytes.Buffer + io.Copy(&body, resp.Body) + if resp.StatusCode == http.StatusNotFound { + // Data stream doesn't exist, there was nothing to do. + fmt.Fprintf(ts.Stderr(), "%s data stream policy templates do not exist for %s/%s\n", dsName, pkg, ds) + return + } + if resp.StatusCode >= 300 { + ts.Fatalf("delete request failed for data stream %s: %s", dsName, body.Bytes()) + } + + delete(dataStreams, dsName) + + fmt.Fprintf(ts.Stdout(), "removed %s data stream policy templates for %s/%s\n", dsName, pkg, ds) +} + +type installedDataStreamsTag struct{} diff --git a/internal/testrunner/script/debugging.go b/internal/testrunner/script/debugging.go new file mode 100644 index 0000000000..33e1a174f0 --- /dev/null +++ b/internal/testrunner/script/debugging.go @@ -0,0 +1,131 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package script + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "flag" + "fmt" + "io" + "strings" + "time" + + "github.com/rogpeppe/go-internal/testscript" + + "github.com/elastic/elastic-package/internal/elasticsearch" +) + +var errPolicyNotFound = errors.New("not found") + +func getPolicyCommand(ts *testscript.TestScript, neg bool, args []string) { + clearStdStreams(ts) + + pkg := ts.Getenv("PKG") + if pkg == "" { + ts.Fatalf("PKG is not set") + } + ds := ts.Getenv("DATA_STREAM") + if ds == "" { + ts.Fatalf("DATA_STREAM is not set") + } + + stacks, ok := ts.Value(runningStackTag{}).(map[string]*runningStack) + if !ok { + ts.Fatalf("no active stacks registry") + } + + flg := flag.NewFlagSet("policies", flag.ContinueOnError) + profName := flg.String("profile", "default", "profile name") + timeout := flg.Duration("timeout", -1, "timeout (negative indicates single probe only, zero indicates no timeout)") + ts.Check(flg.Parse(args)) + if flg.NArg() != 1 { + ts.Fatalf("usage: get_policy [-profile ] [-timeout ] ") + } + + policyName := flg.Arg(0) + + stk, ok := stacks[*profName] + if !ok { + ts.Fatalf("no active client for %s", *profName) + } + + ctx := context.Background() + if *timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, *timeout) + defer cancel() + } + + if *timeout < 0 { + // Single check. + pol, err := getPolicy(ctx, stk.es.API, policyName) + switch err { + case nil: + fmt.Fprint(ts.Stdout(), pol) + case errPolicyNotFound: + fmt.Fprint(ts.Stdout(), "not found") + default: + fmt.Fprint(ts.Stdout(), err) + } + return + } + + for { + // Check until found or timeout. + pol, err := getPolicy(ctx, stk.es.API, policyName) + switch err { + case nil: + fmt.Fprint(ts.Stdout(), pol) + return + case errPolicyNotFound: + time.Sleep(time.Second) + continue + default: + fmt.Fprint(ts.Stdout(), err) + return + } + } +} + +func getPolicy(ctx context.Context, cli *elasticsearch.API, name string) (string, error) { + resp, err := cli.Indices.GetDataStream(cli.Indices.GetDataStream.WithContext(ctx)) + if err != nil { + return "", err + } + defer resp.Body.Close() + var buf bytes.Buffer + io.Copy(&buf, resp.Body) + var body struct { + DataStreams []json.RawMessage `json:"data_streams"` + } + err = json.Unmarshal(buf.Bytes(), &body) + if err != nil { + return "", err + } + var names []string + for _, ds := range body.DataStreams { + var probe struct { + Name string `json:"name"` + } + err = json.Unmarshal(ds, &probe) + if err != nil { + return "", err + } + if name == "" { + names = append(names, probe.Name) + continue + } + if probe.Name == name { + return string(ds), nil + } + } + if names != nil { + return strings.Join(names, "\n"), nil + } + return "", errPolicyNotFound +} diff --git a/internal/testrunner/script/docker.go b/internal/testrunner/script/docker.go new file mode 100644 index 0000000000..5bdbe0b9ed --- /dev/null +++ b/internal/testrunner/script/docker.go @@ -0,0 +1,245 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package script + +import ( + "context" + "flag" + "fmt" + "io" + "os" + "path/filepath" + + "github.com/rogpeppe/go-internal/testscript" + + "github.com/elastic/elastic-package/internal/common" + "github.com/elastic/elastic-package/internal/compose" + "github.com/elastic/elastic-package/internal/configuration/locations" + "github.com/elastic/elastic-package/internal/servicedeployer" + "github.com/elastic/elastic-package/internal/testrunner/runners/system" +) + +// dockerUp brings up a service using docker-compose. +func dockerUp(ts *testscript.TestScript, neg bool, args []string) { + clearStdStreams(ts) + + if neg { + ts.Fatalf("unsupported: ! docker_up") + } + + stacks, ok := ts.Value(runningStackTag{}).(map[string]*runningStack) + if !ok { + ts.Fatalf("no active stacks registry") + } + srvs, ok := ts.Value(deployedServiceTag{}).(map[string]servicedeployer.DeployedService) + if !ok { + ts.Fatalf("no deployed services registry") + } + + flg := flag.NewFlagSet("up", flag.ContinueOnError) + profName := flg.String("profile", "default", "profile name") + netName := flg.String("network", "", "network name") + timeout := flg.Duration("timeout", 0, "timeout (zero or lower indicates no timeout)") + ts.Check(flg.Parse(args)) + if flg.NArg() != 1 { + ts.Fatalf("usage: docker_up [-profile ] [-timeout ] ") + } + name := flg.Arg(0) + dir := ts.MkAbs(name) + compose := filepath.Join(dir, "docker-compose.yml") + _, err := os.Stat(compose) + ts.Check(err) + + ctx := context.Background() + if *timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, *timeout) + defer cancel() + } + + stk, ok := stacks[*profName] + if !ok { + ts.Fatalf("no active client for %s", *profName) + } + + dep, err := servicedeployer.NewDockerComposeServiceDeployer(servicedeployer.DockerComposeServiceDeployerOptions{ + Profile: stk.profile, + YmlPaths: []string{compose}, + DeployIndependentAgent: *netName != "", + }) + ts.Check(decoratedWith("making service deployer", err)) + + loc, err := locations.NewLocationManager() + ts.Check(err) + + info := servicedeployer.ServiceInfo{ + Name: name, + AgentNetworkName: *netName, + } + info.Logs.Folder.Agent = system.ServiceLogsAgentDir + info.Logs.Folder.Local = ts.MkAbs(loc.ServiceLogDir()) + info.Test.RunID = common.CreateTestRunID() + + up, err := dep.SetUp(ctx, info) + ts.Check(decoratedWith("setting up service", err)) + srvs[name] = up + fmt.Fprintf(ts.Stdout(), "deployed %s:%s-1", info.ProjectName(), name) +} + +// dockerSignal sends a signal to the named service. +func dockerSignal(ts *testscript.TestScript, neg bool, args []string) { + clearStdStreams(ts) + + if neg { + ts.Fatalf("unsupported: ! docker_signal") + } + + srvs, ok := ts.Value(deployedServiceTag{}).(map[string]servicedeployer.DeployedService) + if !ok { + ts.Fatalf("no deployed services registry") + } + + flg := flag.NewFlagSet("signal", flag.ContinueOnError) + timeout := flg.Duration("timeout", 0, "timeout (zero or lower indicates no timeout)") + ts.Check(flg.Parse(args)) + if flg.NArg() != 2 { + ts.Fatalf("usage: docker_signal [-timeout ] ") + } + name := flg.Arg(0) + signal := flg.Arg(1) + + up, ok := srvs[name] + if !ok { + ts.Fatalf("service %s is not deployed", name) + } + + ctx := context.Background() + if *timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, *timeout) + defer cancel() + } + ts.Check(decoratedWith("sending signal", up.Signal(ctx, signal))) +} + +// dockerWaitExit waits for the service to exit +func dockerWaitExit(ts *testscript.TestScript, neg bool, args []string) { + clearStdStreams(ts) + + if neg { + ts.Fatalf("unsupported: ! docker_wait_exit") + } + + srvs, ok := ts.Value(deployedServiceTag{}).(map[string]servicedeployer.DeployedService) + if !ok { + ts.Fatalf("no deployed services registry") + } + + flg := flag.NewFlagSet("exit", flag.ContinueOnError) + timeout := flg.Duration("timeout", 0, "timeout (zero or lower indicates no timeout)") + ts.Check(flg.Parse(args)) + if flg.NArg() != 1 { + ts.Fatalf("usage: docker_wait_exit [-timeout ] ") + } + name := flg.Arg(0) + + up, ok := srvs[name] + if !ok { + ts.Fatalf("service %s is not deployed", name) + } + + ctx := context.Background() + if *timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, *timeout) + defer cancel() + } + for { + done, code, err := up.ExitCode(ctx, name) + ts.Check(decoratedWith("checking exit", err)) + if done { + fmt.Fprintf(ts.Stdout(), "%s exited with %d", name, code) + return + } + } +} + +// dockerDown takes down a deployed service and emits the service's logs to stdout. +func dockerDown(ts *testscript.TestScript, neg bool, args []string) { + clearStdStreams(ts) + + if neg { + ts.Fatalf("unsupported: ! docker_down") + } + + srvs, ok := ts.Value(deployedServiceTag{}).(map[string]servicedeployer.DeployedService) + if !ok { + ts.Fatalf("no deployed services registry") + } + + flg := flag.NewFlagSet("down", flag.ContinueOnError) + timeout := flg.Duration("timeout", 0, "timeout (zero or lower indicates no timeout)") + ts.Check(flg.Parse(args)) + if flg.NArg() != 1 { + ts.Fatalf("usage: docker_down [-timeout ] ") + } + name := flg.Arg(0) + + up, ok := srvs[name] + if !ok { + ts.Fatalf("service %s is not deployed", name) + } + + ctx := context.Background() + if *timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, *timeout) + defer cancel() + } + ts.Check(decoratedWith("writing logs", writeLogsTo(ctx, ts.Stdout(), up))) + ts.Check(decoratedWith("stopping service", up.TearDown(ctx))) + delete(srvs, name) +} + +type deployedServiceTag struct{} + +func writeLogsTo(ctx context.Context, w io.Writer, s servicedeployer.DeployedService) error { + p, err := projectFor(s) + if err != nil { + return err + } + env, err := envOf(s) + if err != nil { + return err + } + b, err := p.Logs(ctx, compose.CommandOptions{Env: env}) + if err != nil { + return err + } + _, err = w.Write(b) + return err +} + +func projectFor(s servicedeployer.DeployedService) (*compose.Project, error) { + type projector interface { + Project() (*compose.Project, error) + } + p, ok := s.(projector) + if !ok { + return nil, fmt.Errorf("cannot get project from %T", s) + } + return p.Project() +} + +func envOf(s servicedeployer.DeployedService) ([]string, error) { + type enver interface { + Env() []string + } + e, ok := s.(enver) + if !ok { + return nil, fmt.Errorf("cannot get environment from %T", s) + } + return e.Env(), nil +} diff --git a/internal/testrunner/script/package.go b/internal/testrunner/script/package.go new file mode 100644 index 0000000000..05f4f6a338 --- /dev/null +++ b/internal/testrunner/script/package.go @@ -0,0 +1,270 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package script + +import ( + "context" + "encoding/json" + "flag" + "fmt" + "os" + + "github.com/rogpeppe/go-internal/testscript" + + "github.com/elastic/elastic-package/internal/packages" + "github.com/elastic/elastic-package/internal/resources" +) + +func addPackage(ts *testscript.TestScript, neg bool, args []string) { + clearStdStreams(ts) + + pkgRoot := ts.Getenv("PKG_ROOT") + if pkgRoot == "" { + ts.Fatalf("PKG_ROOT is not set") + } + root, err := os.OpenRoot(pkgRoot) + ts.Check(err) + pkg := ts.Getenv("PKG") + if pkg == "" { + ts.Fatalf("PKG is not set") + } + + stacks, ok := ts.Value(runningStackTag{}).(map[string]*runningStack) + if !ok { + ts.Fatalf("no active stacks registry") + } + + flg := flag.NewFlagSet("add", flag.ContinueOnError) + profName := flg.String("profile", "default", "profile name") + timeout := flg.Duration("timeout", 0, "timeout (zero or lower indicates no timeout)") + ts.Check(flg.Parse(args)) + if flg.NArg() != 0 { + ts.Fatalf("usage: add_package [-profile ] [-timeout ]") + } + + stk, ok := stacks[*profName] + if !ok { + ts.Fatalf("no active client for %s", *profName) + } + + ctx := context.Background() + if *timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, *timeout) + defer cancel() + } + + m := resources.NewManager() + m.RegisterProvider(resources.DefaultKibanaProviderName, &resources.KibanaProvider{Client: stk.kibana}) + _, err = m.ApplyCtx(ctx, resources.Resources{&resources.FleetPackage{ + PackageRootPath: pkgRoot, + Absent: false, + Force: true, + RepositoryRoot: root, + }}) + ts.Check(decoratedWith("installing package resources", err)) + + fmt.Fprintf(ts.Stdout(), "added package resources for %s\n", pkg) +} + +func removePackage(ts *testscript.TestScript, neg bool, args []string) { + clearStdStreams(ts) + + pkgRoot := ts.Getenv("PKG_ROOT") + if pkgRoot == "" { + ts.Fatalf("PKG_ROOT is not set") + } + root, err := os.OpenRoot(pkgRoot) + ts.Check(err) + pkg := ts.Getenv("PKG") + if pkg == "" { + ts.Fatalf("PKG is not set") + } + + stacks, ok := ts.Value(runningStackTag{}).(map[string]*runningStack) + if !ok { + ts.Fatalf("no active stacks registry") + } + + flg := flag.NewFlagSet("remove", flag.ContinueOnError) + profName := flg.String("profile", "default", "profile name") + timeout := flg.Duration("timeout", 0, "timeout (zero or lower indicates no timeout)") + ts.Check(flg.Parse(args)) + if flg.NArg() != 0 { + ts.Fatalf("usage: remove_package [-profile ] [-timeout ]") + } + + stk, ok := stacks[*profName] + if !ok { + ts.Fatalf("no active client for %s", *profName) + } + + ctx := context.Background() + if *timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, *timeout) + defer cancel() + } + + m := resources.NewManager() + m.RegisterProvider(resources.DefaultKibanaProviderName, &resources.KibanaProvider{Client: stk.kibana}) + _, err = m.ApplyCtx(ctx, resources.Resources{&resources.FleetPackage{ + PackageRootPath: pkgRoot, + Absent: true, + Force: true, + RepositoryRoot: root, // Apparently not required, but adding for safety. + }}) + ts.Check(decoratedWith("removing package resources", err)) + + fmt.Fprintf(ts.Stdout(), "removed package resources for %s\n", pkg) +} + +func upgradePackageLatest(ts *testscript.TestScript, neg bool, args []string) { + clearStdStreams(ts) + + pkg := ts.Getenv("PKG") + if pkg == "" { + ts.Fatalf("PKG is not set") + } + + stacks, ok := ts.Value(runningStackTag{}).(map[string]*runningStack) + if !ok { + ts.Fatalf("no active stacks registry") + } + + flg := flag.NewFlagSet("upgrade", flag.ContinueOnError) + profName := flg.String("profile", "default", "profile name") + timeout := flg.Duration("timeout", 0, "timeout (zero or lower indicates no timeout)") + ts.Check(flg.Parse(args)) + if flg.NArg() != 0 && flg.NArg() != 1 { + ts.Fatalf("usage: upgrade_package_latest [-profile ] [-timeout ] []") + } + + if flg.NArg() == 1 { + pkg = flg.Arg(0) + } + + stk, ok := stacks[*profName] + if !ok { + ts.Fatalf("no active client for %s", *profName) + } + + ctx := context.Background() + if *timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, *timeout) + defer cancel() + } + + msgs, err := stk.kibana.ListRawPackagePolicies(ctx) + ts.Check(decoratedWith("upgrade package", err)) + var n int + for _, m := range msgs { + var pol struct { + ID string `json:"id"` + Package struct { + Name string `json:"name"` + Version string `json:"version"` + } `json:"package"` + } + ts.Check(decoratedWith("getting package policy id", json.Unmarshal(m, &pol))) + if pol.Package.Name == pkg { + n++ + ts.Check(decoratedWith("upgrading package policy", stk.kibana.UpgradePackagePolicyToLatest(ctx, pol.ID))) + fmt.Fprintf(ts.Stdout(), "upgraded package %s from version %s\n", pkg, pol.Package.Version) + } + } + if n == 0 { + ts.Fatalf("could not find policy for %s", pkg) + } +} + +func addPackageZip(ts *testscript.TestScript, neg bool, args []string) { + clearStdStreams(ts) + + pkg := ts.Getenv("PKG") + if pkg == "" { + ts.Fatalf("PKG is not set") + } + + stacks, ok := ts.Value(runningStackTag{}).(map[string]*runningStack) + if !ok { + ts.Fatalf("no active stacks registry") + } + + flg := flag.NewFlagSet("add", flag.ContinueOnError) + profName := flg.String("profile", "default", "profile name") + timeout := flg.Duration("timeout", 0, "timeout (zero or lower indicates no timeout)") + ts.Check(flg.Parse(args)) + if flg.NArg() != 1 { + ts.Fatalf("usage: add_package_zip [-profile ] [-timeout ] ") + } + + path := ts.MkAbs(flg.Arg(0)) + + stk, ok := stacks[*profName] + if !ok { + ts.Fatalf("no active client for %s", *profName) + } + + ctx := context.Background() + if *timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, *timeout) + defer cancel() + } + + m, err := packages.ReadPackageManifestFromZipPackage(path) + ts.Check(decoratedWith("reading zip manifest", err)) + + _, err = stk.kibana.InstallZipPackage(ctx, path) + ts.Check(decoratedWith("installing package zip", err)) + + fmt.Fprintf(ts.Stdout(), "added zipped package resources in %s for %s in test for %s\n", path, m.Name, pkg) +} + +func removePackageZip(ts *testscript.TestScript, neg bool, args []string) { + clearStdStreams(ts) + + pkg := ts.Getenv("PKG") + if pkg == "" { + ts.Fatalf("PKG is not set") + } + + stacks, ok := ts.Value(runningStackTag{}).(map[string]*runningStack) + if !ok { + ts.Fatalf("no active stacks registry") + } + + flg := flag.NewFlagSet("remove_zip", flag.ContinueOnError) + profName := flg.String("profile", "default", "profile name") + timeout := flg.Duration("timeout", 0, "timeout (zero or lower indicates no timeout)") + ts.Check(flg.Parse(args)) + if flg.NArg() != 1 { + ts.Fatalf("usage: remove_package_zip [-profile ] [-timeout ] ") + } + + path := ts.MkAbs(flg.Arg(0)) + + stk, ok := stacks[*profName] + if !ok { + ts.Fatalf("no active client for %s", *profName) + } + + ctx := context.Background() + if *timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, *timeout) + defer cancel() + } + + m, err := packages.ReadPackageManifestFromZipPackage(path) + ts.Check(decoratedWith("reading zip manifest", err)) + + _, err = stk.kibana.RemovePackage(ctx, m.Name, m.Version) + ts.Check(decoratedWith("removing package zip", err)) + + fmt.Fprintf(ts.Stdout(), "removed zipped package resources in %s for %s in test for %s\n", path, m.Name, pkg) +} diff --git a/internal/testrunner/script/pipelines.go b/internal/testrunner/script/pipelines.go new file mode 100644 index 0000000000..f3eec3213a --- /dev/null +++ b/internal/testrunner/script/pipelines.go @@ -0,0 +1,210 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package script + +import ( + "context" + "encoding/json" + "flag" + "fmt" + "io" + "os" + "path/filepath" + "time" + + "github.com/rogpeppe/go-internal/testscript" + + "github.com/elastic/elastic-package/internal/elasticsearch/ingest" +) + +// installPipelines installs a data stream's pipelines. +func installPipelines(ts *testscript.TestScript, neg bool, args []string) { + clearStdStreams(ts) + + if neg { + ts.Fatalf("unsupported: ! install_pipelines") + } + + stacks, ok := ts.Value(runningStackTag{}).(map[string]*runningStack) + if !ok { + ts.Fatalf("no active stacks registry") + } + + flg := flag.NewFlagSet("install", flag.ContinueOnError) + profName := flg.String("profile", "default", "profile name") + timeout := flg.Duration("timeout", 0, "timeout (zero or lower indicates no timeout)") + ts.Check(flg.Parse(args)) + if flg.NArg() != 1 { + ts.Fatalf("usage: install_pipelines [-profile ] [-timeout ] ") + } + + name := flg.Arg(0) + path := ts.MkAbs(name) + root, err := os.OpenRoot("/") + ts.Check(err) + + _, err = os.Stat(filepath.Join(path, filepath.FromSlash("elasticsearch/ingest_pipeline"))) + ts.Check(err) + + stk, ok := stacks[*profName] + if !ok { + ts.Fatalf("no active client for %s", *profName) + } + + ctx := context.Background() + if *timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, *timeout) + defer cancel() + } + + nonce := time.Now().UnixNano() + pipes, err := ingest.LoadIngestPipelineFiles(path, nonce, root) + ts.Check(decoratedWith("loading pipelines", err)) + + ts.Check(decoratedWith("installing pipelines", ingest.InstallPipelinesInElasticsearch(ctx, stk.es.API, pipes))) + + pipelines, ok := ts.Value(installedPipelinesTag{}).(map[string]installedPipelines) + if !ok { + ts.Fatalf("no installed pipelines registry") + } + pipelines[name] = installedPipelines{ + path: path, + nonce: nonce, + pipes: pipes, + } + fmt.Fprintf(ts.Stdout(), "installed pipelines in %s with nonce %d", filepath.Base(path), nonce) +} + +// simulate runs the simulate API endpoint using a data stream's pipelines. +func simulate(ts *testscript.TestScript, neg bool, args []string) { + clearStdStreams(ts) + + if neg { + ts.Fatalf("unsupported: ! simulate") + } + + stacks, ok := ts.Value(runningStackTag{}).(map[string]*runningStack) + if !ok { + ts.Fatalf("no active stacks registry") + } + pipelines, ok := ts.Value(installedPipelinesTag{}).(map[string]installedPipelines) + if !ok { + ts.Fatalf("no installed pipelines registry") + } + + flg := flag.NewFlagSet("uninstall", flag.ContinueOnError) + profName := flg.String("profile", "default", "profile name") + index := flg.String("index", "index-default", "simulate index name") + timeout := flg.Duration("timeout", 0, "timeout (zero or lower indicates no timeout)") + ts.Check(flg.Parse(args)) + if flg.NArg() != 3 { + ts.Fatalf("usage: simulate [-profile ] [-timeout ] ") + } + + name := flg.Arg(0) + pipeline := flg.Arg(1) + path := ts.MkAbs(flg.Arg(2)) + + f, err := os.Open(path) + ts.Check(err) + defer f.Close() + dec := json.NewDecoder(f) + var events []json.RawMessage + for { + var e json.RawMessage + err := dec.Decode(&e) + if err == io.EOF { + break + } + ts.Check(err) + events = append(events, e) + } + + installed, ok := pipelines[name] + if !ok { + ts.Fatalf("pipelines in %s are not installed", name) + } + pipeline = ingest.GetPipelineNameWithNonce(pipeline, installed.nonce) + + stk, ok := stacks[*profName] + if !ok { + ts.Fatalf("no active client for %s", *profName) + } + + ctx := context.Background() + if *timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, *timeout) + defer cancel() + } + + msg, err := ingest.SimulatePipeline(ctx, stk.es.API, pipeline, events, *index) + ts.Check(decoratedWith("running simulate", err)) + + for _, m := range msg { + m, err := json.MarshalIndent(m, "", "\t") + ts.Check(err) + fmt.Fprintf(ts.Stdout(), "%s\n", m) + } +} + +// uninstallPipelines uninstalls a data stream's pipelines. +func uninstallPipelines(ts *testscript.TestScript, neg bool, args []string) { + clearStdStreams(ts) + + if neg { + ts.Fatalf("unsupported: ! uninstall_pipelines") + } + + stacks, ok := ts.Value(runningStackTag{}).(map[string]*runningStack) + if !ok { + ts.Fatalf("no active stacks registry") + } + pipelines, ok := ts.Value(installedPipelinesTag{}).(map[string]installedPipelines) + if !ok { + ts.Fatalf("no installed pipelines registry") + } + + flg := flag.NewFlagSet("uninstall", flag.ContinueOnError) + profName := flg.String("profile", "default", "profile name") + timeout := flg.Duration("timeout", 0, "timeout (zero or lower indicates no timeout)") + ts.Check(flg.Parse(args)) + if flg.NArg() != 1 { + ts.Fatalf("usage: uninstall_pipelines [-profile ] [-timeout ] ") + } + + name := flg.Arg(0) + + installed, ok := pipelines[name] + if !ok { + ts.Fatalf("pipelines in %s are not installed", name) + } + + stk, ok := stacks[*profName] + if !ok { + ts.Fatalf("no active client for %s", *profName) + } + + ctx := context.Background() + if *timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, *timeout) + defer cancel() + } + + ts.Check(decoratedWith("uninstall pipelines", ingest.UninstallPipelines(ctx, stk.es.API, installed.pipes))) + + delete(pipelines, name) + fmt.Fprintf(ts.Stdout(), "uninstalled pipelines in %s", filepath.Base(installed.path)) +} + +type installedPipelinesTag struct{} + +type installedPipelines struct { + path string + nonce int64 + pipes []ingest.Pipeline +} diff --git a/internal/testrunner/script/script.go b/internal/testrunner/script/script.go new file mode 100644 index 0000000000..fb1cef4686 --- /dev/null +++ b/internal/testrunner/script/script.go @@ -0,0 +1,694 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package script + +import ( + "bytes" + "context" + "crypto/tls" + "encoding/json" + "errors" + "flag" + "fmt" + "io" + "io/fs" + "net/http" + "os" + "os/signal" + "path/filepath" + "regexp" + "strings" + "sync/atomic" + "time" + + "github.com/rogpeppe/go-internal/testscript" + "github.com/spf13/cobra" + + "github.com/elastic/elastic-package/internal/cobraext" + "github.com/elastic/elastic-package/internal/configuration/locations" + "github.com/elastic/elastic-package/internal/elasticsearch/ingest" + "github.com/elastic/elastic-package/internal/install" + "github.com/elastic/elastic-package/internal/packages" + "github.com/elastic/elastic-package/internal/packages/changelog" + "github.com/elastic/elastic-package/internal/resources" + "github.com/elastic/elastic-package/internal/servicedeployer" + "github.com/elastic/elastic-package/internal/stack" +) + +func Run(dst io.Writer, cmd *cobra.Command, args []string) error { + home, err := os.UserHomeDir() + if err != nil { + return fmt.Errorf("could not find home: %w", err) + } + config, err := install.Configuration() + if err != nil { + return fmt.Errorf("could read configuration: %w", err) + } + loc, err := locations.NewLocationManager() + if err != nil { + return err + } + work, err := cmd.Flags().GetBool(cobraext.WorkScriptTestFlagName) + if err != nil { + return err + } + workRoot := filepath.Join(home, loc.TempDir(), "script_tests") + err = os.MkdirAll(workRoot, 0o700) + if err != nil { + return fmt.Errorf("could not make work space root: %w", err) + } + var workdirRoot string + if work { + // Only create a work root and pass it in if --work has been requested. + // The behaviour of testscript is to set TestWork to true if the work + // root is non-zero, so just let testscript put it where it wants in the + // case that we have not requested work to be retained. This will be in + // os.MkdirTemp(os.Getenv("GOTMPDIR"), "go-test-script") which on most + // systems will be /tmp/go-test-script. However, due to… decisions, we + // cannot operate in that directory… + workdirRoot, err = os.MkdirTemp(workRoot, "*") + if err != nil { + return fmt.Errorf("could not make work space: %w", err) + } + } else { + // … so set $GOTMPDIR to a location that we can work in. + // + // This is all obviously awful. + err = os.Setenv("GOTMPDIR", workRoot) + if err != nil { + return fmt.Errorf("could not set temp dir var: %w", err) + } + } + + externalStack, err := cmd.Flags().GetBool(cobraext.ExternalStackFlagName) + if err != nil { + return err + } + run, err := cmd.Flags().GetString(cobraext.RunPatternFlagName) + if err != nil { + return err + } + verbose, err := cmd.Flags().GetCount(cobraext.VerboseFlagName) + if err != nil { + return err + } + verboseScript, err := cmd.Flags().GetBool(cobraext.VerboseScriptFlagName) + if err != nil { + return err + } + update, err := cmd.Flags().GetBool(cobraext.UpdateScriptTestArchiveFlagName) + if err != nil { + return err + } + cont, err := cmd.Flags().GetBool(cobraext.ContinueOnErrorFlagName) + if err != nil { + return err + } + + dirs, err := scripts(cmd) + if err != nil { + return err + } + var pkgRoot, currVersion, prevVersion string + if len(dirs) == 0 { + pkgRoot, err = packages.FindPackageRoot() + if err != nil { + if err == packages.ErrPackageRootNotFound { + return errors.New("package root not found") + } + return fmt.Errorf("locating package root failed: %w", err) + } + dirs, err = datastreams(cmd, pkgRoot) + if err != nil { + return err + } + if len(dirs) == 0 { + return nil + } + revs, err := changelog.ReadChangelogFromPackageRoot(pkgRoot) + if err != nil { + return err + } + if len(revs) > 0 { + currVersion = revs[0].Version + } + if len(revs) > 1 { + prevVersion = revs[1].Version + } + } + + var stdinTempFile string + t := &T{ + verbose: verbose != 0 || verboseScript, + stdinTempFile: stdinTempFile, + + out: dst, + + deployedService: make(map[string]servicedeployer.DeployedService), + runningStack: make(map[string]*runningStack), + installedAgents: make(map[string]*installedAgent), + installedDataStreams: make(map[string]struct{}), + installedPipelines: make(map[string]installedPipelines), + } + if run != "" { + t.run, err = regexp.Compile(run) + if err != nil { + return nil + } + } + var errs []error + if pkgRoot != "" { + t.Log("PKG ", filepath.Base(pkgRoot)) + } + ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt) + defer cancel() + var n int + for _, d := range dirs { + scripts := d + var dsRoot string + if pkgRoot != "" { + dsRoot = filepath.Join(pkgRoot, "data_stream", d) + scripts = filepath.Join(dsRoot, filepath.FromSlash("_dev/test/scripts")) + } + _, err := os.Stat(scripts) + if errors.Is(err, fs.ErrNotExist) { + continue + } + n++ + p := testscript.Params{ + Dir: scripts, + WorkdirRoot: workdirRoot, + UpdateScripts: update, + ContinueOnError: cont, + TestWork: work, + Cmds: map[string]func(ts *testscript.TestScript, neg bool, args []string){ + "sleep": sleep, + "date": date, + "GET": get, + "POST": post, + "stack_up": stackUp, + "use_stack": useStack, + "stack_down": stackDown, + "docker_up": dockerUp, + "docker_down": dockerDown, + "docker_signal": dockerSignal, + "docker_wait_exit": dockerWaitExit, + "install_pipelines": installPipelines, + "simulate": simulate, + "uninstall_pipelines": uninstallPipelines, + "install_agent": installAgent, + "add_package": addPackage, + "remove_package": removePackage, + "upgrade_package_latest": upgradePackageLatest, + "add_package_zip": addPackageZip, + "remove_package_zip": removePackageZip, + "add_data_stream": addDataStream, + "remove_data_stream": removeDataStream, + "uninstall_agent": uninstallAgent, + "get_docs": getDocs, + "dump_logs": dumpLogs, + "match_file": match, + "get_policy": getPolicyCommand, + }, + Setup: func(e *testscript.Env) error { + e.Setenv("PROFILE", config.CurrentProfile()) + e.Setenv("CONFIG_ROOT", loc.RootDir()) + e.Setenv("CONFIG_PROFILES", loc.ProfileDir()) + e.Setenv("HOME", home) + if pkgRoot != "" { + e.Setenv("PKG", filepath.Base(pkgRoot)) + e.Setenv("PKG_ROOT", pkgRoot) + } + if currVersion != "" { + e.Setenv("CURRENT_VERSION", currVersion) + } + if prevVersion != "" { + e.Setenv("PREVIOUS_VERSION", prevVersion) + } + if dsRoot != "" { + e.Setenv("DATA_STREAM", d) + e.Setenv("DATA_STREAM_ROOT", dsRoot) + } + e.Values[deployedServiceTag{}] = t.deployedService + e.Values[runningStackTag{}] = t.runningStack + e.Values[installedAgentsTag{}] = t.installedAgents + e.Values[installedDataStreamsTag{}] = t.installedDataStreams + e.Values[installedPipelinesTag{}] = t.installedPipelines + return nil + }, + Condition: func(cond string) (bool, error) { + switch cond { + case "external_stack": + return externalStack, nil + default: + return false, fmt.Errorf("unknown condition: %s", cond) + } + }, + } + // This is not the ideal approach. What I would like would + // be to pass this into the testscript, but that is a bunch + // of wiring and likely should either be added later when + // needed, or have the option of passing it in to the library + // upstream. + if ctx.Err() != nil { + t.Fatal("interrupted") + } + t.Log("DATA_STREAM ", d) + err = runTests(t, p) + if err != nil { + errs = append(errs, err) + } + if work { + continue + } + cleanUp( + context.Background(), // Not the interrupt context. + pkgRoot, + t.deployedService, + t.installedDataStreams, + t.installedAgents, + t.installedPipelines, + t.runningStack, + ) + } + if n == 0 { + t.Log("[no test files]") + } + return errors.Join(errs...) +} + +func cleanUp(ctx context.Context, pkgRoot string, srvs map[string]servicedeployer.DeployedService, streams map[string]struct{}, agents map[string]*installedAgent, pipes map[string]installedPipelines, stacks map[string]*runningStack) { + // We most likely have only one stack, but just iterate over + // all if there is more than one. What could possibly go wrong? + // If this _is_ problematic, we'll need to record the stack that + // was used for each item when it's created. + for _, stk := range stacks { + for _, pipe := range pipes { + ingest.UninstallPipelines(ctx, stk.es.API, pipe.pipes) + } + + for _, srv := range srvs { + srv.TearDown(ctx) + } + + for ds := range streams { + stk.es.Indices.DeleteDataStream([]string{ds}, + stk.es.Indices.DeleteDataStream.WithContext(ctx), + ) + } + + for _, installed := range agents { + stk.kibana.RemoveAgent(ctx, installed.enrolled) + installed.deployed.TearDown(ctx) + deletePolicies(ctx, stk.kibana, installed) + } + + m := resources.NewManager() + m.RegisterProvider(resources.DefaultKibanaProviderName, &resources.KibanaProvider{Client: stk.kibana}) + m.ApplyCtx(ctx, resources.Resources{&resources.FleetPackage{ + PackageRootPath: pkgRoot, + Absent: true, + Force: true, + }}) + + if stk.external { + continue + } + stk.provider.TearDown(ctx, stack.Options{Profile: stk.profile}) + } +} + +func scripts(cmd *cobra.Command) ([]string, error) { + dir, err := cmd.Flags().GetString(cobraext.ScriptsFlagName) + if err != nil { + return nil, err + } + if dir == "" { + return nil, nil + } + fi, err := os.Stat(dir) + if errors.Is(err, fs.ErrNotExist) { + return nil, nil + } + if err != nil { + return nil, fmt.Errorf("stat directory failed (path: %s): %w", dir, err) + } + if !fi.IsDir() { + return nil, fmt.Errorf("data stream must be a directory (path: %s)", dir) + } + ent, err := os.ReadDir(dir) + if err != nil { + return nil, err + } + if len(ent) == 0 { + return nil, nil + } + return []string{dir}, nil +} + +func clearStdStreams(ts *testscript.TestScript) { + fmt.Fprint(ts.Stdout(), "") + fmt.Fprint(ts.Stderr(), "") +} + +func datastreams(cmd *cobra.Command, root string) ([]string, error) { + streams, err := cmd.Flags().GetStringSlice(cobraext.DataStreamsFlagName) + if err != nil { + return nil, cobraext.FlagParsingError(err, cobraext.DataStreamsFlagName) + } + if len(streams) == 0 { + p := filepath.Join(root, "data_stream") + fi, err := os.Stat(p) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + err = nil + } + return nil, err + } + if !fi.IsDir() { + return nil, fmt.Errorf("data_stream must be a directory (path: %s)", p) + } + d, err := os.Open(p) + if err != nil { + return nil, err + } + defer d.Close() + streams, err = d.Readdirnames(-1) + if err != nil { + return nil, err + } + } + for i, ds := range streams { + ds = strings.TrimSpace(ds) + p := filepath.Join(root, "data_stream", ds) + fi, err := os.Stat(p) + if err != nil { + return nil, fmt.Errorf("stat directory failed (path: %s): %w", p, err) + } + if !fi.IsDir() { + return nil, fmt.Errorf("data stream must be a directory (path: %s)", p) + } + ent, err := os.ReadDir(p) + if err != nil { + return nil, err + } + if len(ent) == 0 { + continue + } + streams[i] = ds + } + return streams, nil +} + +func runTests(t *T, p testscript.Params) (err error) { + defer func() { + switch r := recover().(type) { + case nil: + case error: + switch { + case errors.Is(r, skipRun): + default: + err = r + } + default: + panic(r) + } + }() + + testscript.RunT(t, p) + if t.failed.Load() { + return failedRun + } + return nil +} + +var ( + //lint:ignore ST1012 This naming is conventional for testscript. + failedRun = errors.New("failed run") + //lint:ignore ST1012 This naming is conventional for testscript. + skipRun = errors.New("skip") +) + +// T implements testscript.T and is used in the call to testscript.Run +type T struct { + run *regexp.Regexp + verbose bool + stdinTempFile string + failed atomic.Bool + + out io.Writer + + // stack registries + deployedService map[string]servicedeployer.DeployedService + runningStack map[string]*runningStack + installedAgents map[string]*installedAgent + installedDataStreams map[string]struct{} + installedPipelines map[string]installedPipelines +} + +// clearRegistries prevents tests within a directory from communicating +// with each other. This is required because we need a way to share the +// registries with the environment in order to do the clean-up. +func (t *T) clearRegistries() { + clear(t.installedPipelines) + clear(t.deployedService) + clear(t.installedDataStreams) + clear(t.installedAgents) + clear(t.runningStack) +} + +func (t *T) Skip(is ...any) { + panic(skipRun) +} + +func (t *T) Fatal(is ...any) { + t.Log(is...) + t.FailNow() +} + +func (t *T) Parallel() { + // Not supported. +} + +func (t *T) Log(is ...any) { + msg := fmt.Sprint(is...) + if t.stdinTempFile != "" { + msg = strings.ReplaceAll(msg, t.stdinTempFile, "") + } + if !strings.HasSuffix(msg, "\n") { + msg += "\n" + } + if t.out == nil { + t.out = os.Stdout + } + fmt.Fprint(t.out, msg) +} + +func (t *T) FailNow() { + panic(failedRun) +} + +func (t *T) Run(name string, f func(t testscript.T)) { + if t.run != nil && !t.run.MatchString(name) { + return + } + defer func() { + switch err := recover(); err { + case nil: + case skipRun: + t.Log("SKIPPED ", name) + case failedRun: + t.Log("FAILED ", name) + t.failed.Store(true) + default: + panic(fmt.Errorf("unexpected panic: %v [%T]", err, err)) + } + }() + t.Log("RUN ", name) + t.clearRegistries() + f(t) +} + +func (t *T) Verbose() bool { + return t.verbose +} + +func decoratedWith(msg string, err error) error { + if err == nil { + return nil + } + return fmt.Errorf("%s: %w", msg, err) +} + +func match(ts *testscript.TestScript, neg bool, args []string) { + if len(args) != 2 { + ts.Fatalf("usage: match_file pattern_file data") + } + pattern, err := os.ReadFile(ts.MkAbs(args[0])) + ts.Check(decoratedWith("read pattern file", err)) + data, err := os.ReadFile(ts.MkAbs(args[1])) + ts.Check(decoratedWith("read data file", err)) + // txtar files always end with a \n, so remove it. + pattern = bytes.TrimRight(pattern, "\n") + re, err := regexp.Compile("(?m)" + string(pattern)) + ts.Check(err) + + if neg { + if re.Match(data) { + ts.Logf("[match_file]\n%s\n", data) + ts.Fatalf("unexpected match for %#q found in match_file: %s\n", pattern, re.Find(data)) + } + } else { + if !re.Match(data) { + ts.Logf("[match_file]\n%s\n", data) + ts.Fatalf("no match for %#q found in match_file", pattern) + } + } +} + +// sleep waits for a specified duration. +func sleep(ts *testscript.TestScript, neg bool, args []string) { + if neg { + ts.Fatalf("unsupported: ! sleep") + } + if len(args) != 1 { + ts.Fatalf("usage: sleep duration") + } + fmt.Println("sleep", args[0]) + d, err := time.ParseDuration(args[0]) + ts.Check(err) + time.Sleep(d) +} + +// date is the unix date command rendering the time in RFC3339, optionally +// storing the time into a named environment variable. +func date(ts *testscript.TestScript, neg bool, args []string) { + if neg { + ts.Fatalf("unsupported: ! date") + } + if len(args) != 0 && len(args) != 1 { + ts.Fatalf("usage: date []") + } + t := time.Now().Format(time.RFC3339Nano) + if len(args) == 1 { + ts.Setenv(args[0], t) + } + _, err := fmt.Fprintln(ts.Stdout(), t) + ts.Check(err) +} + +func get(ts *testscript.TestScript, neg bool, args []string) { + flg := flag.NewFlagSet("get", flag.ContinueOnError) + jsonData := flg.Bool("json", false, "data from GET is JSON") + ts.Check(flg.Parse(args)) + if flg.NArg() != 1 { + ts.Fatalf("usage: GET [-json] ") + } + cli := http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: true, + }, + }, + } + resp, err := cli.Get(flg.Arg(0)) + if neg { + ts.Check(err) + } + var buf bytes.Buffer + _, err = io.Copy(&buf, resp.Body) + if neg { + ts.Check(err) + } + err = resp.Body.Close() + if neg { + ts.Check(err) + } + if *jsonData { + var dst bytes.Buffer + err = json.Indent(&dst, buf.Bytes(), "", "\t") + if neg { + ts.Check(err) + } + buf = dst + } + ts.Stdout().Write(buf.Bytes()) + if !bytes.HasSuffix(buf.Bytes(), []byte{'\n'}) { + fmt.Fprintln(ts.Stdout()) + } + if neg { + ts.Fatalf("get: unexpected success") + } +} + +func post(ts *testscript.TestScript, neg bool, args []string) { + flg := flag.NewFlagSet("post", flag.ContinueOnError) + jsonData := flg.Bool("json", false, "response data from POST is JSON") + content := flg.String("content", "", "data content-type") + ts.Check(flg.Parse(args)) + if flg.NArg() != 2 { + ts.Fatalf("usage: POST [-json] [-content ] ") + } + f, err := os.Open(flg.Arg(0)) + if neg { + ts.Check(err) + } + defer f.Close() + cli := http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: true, + }, + }, + } + resp, err := cli.Post(flg.Arg(1), *content, f) + if neg { + ts.Check(err) + } + var buf bytes.Buffer + _, err = io.Copy(&buf, resp.Body) + if neg { + ts.Check(err) + } + err = resp.Body.Close() + if neg { + ts.Check(err) + } + if *jsonData { + var dst bytes.Buffer + err = json.Indent(&dst, buf.Bytes(), "", "\t") + if neg { + ts.Check(err) + } + buf = dst + } + ts.Stdout().Write(buf.Bytes()) + if !bytes.HasSuffix(buf.Bytes(), []byte{'\n'}) { + fmt.Fprintln(ts.Stdout()) + } + if neg { + ts.Fatalf("get: unexpected success") + } +} + +func expandTilde(path string) (string, error) { + path, ok := strings.CutPrefix(path, "~/") + if !ok { + return path, nil + } + home, err := os.UserHomeDir() + if err != nil { + return "", err + } + return filepath.Join(home, path), nil +} + +type printer struct { + stdout, stderr io.Writer +} + +func (p printer) Print(i ...interface{}) { fmt.Fprint(p.stdout, i...) } +func (p printer) Println(i ...interface{}) { fmt.Fprintln(p.stdout, i...) } +func (p printer) Printf(format string, i ...interface{}) { fmt.Fprintf(p.stdout, format, i...) } diff --git a/internal/testrunner/script/stack.go b/internal/testrunner/script/stack.go new file mode 100644 index 0000000000..133e546cad --- /dev/null +++ b/internal/testrunner/script/stack.go @@ -0,0 +1,405 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package script + +import ( + "bytes" + "context" + "encoding/json" + "flag" + "fmt" + "io" + "net/http" + "os" + "path/filepath" + "strings" + "time" + + "github.com/rogpeppe/go-internal/testscript" + + "github.com/elastic/elastic-package/internal/elasticsearch" + "github.com/elastic/elastic-package/internal/kibana" + "github.com/elastic/elastic-package/internal/profile" + "github.com/elastic/elastic-package/internal/stack" + "github.com/elastic/elastic-package/internal/testrunner/runners/system" +) + +// stackUp brings up a stack in the same way that `elastic-package stack up -d` does. +func stackUp(ts *testscript.TestScript, neg bool, args []string) { + clearStdStreams(ts) + + if neg { + ts.Fatalf("unsupported: ! stack_up") + } + + stacks, ok := ts.Value(runningStackTag{}).(map[string]*runningStack) + if !ok { + ts.Fatalf("no active stacks registry") + } + + flg := flag.NewFlagSet("up", flag.ContinueOnError) + profName := flg.String("profile", "default", "profile name") + provName := flg.String("provider", "compose", "provider name") + timeout := flg.Duration("timeout", 0, "timeout (zero or lower indicates no timeout)") + ts.Check(flg.Parse(args)) + if flg.NArg() != 1 { + ts.Fatalf("usage: stack_up [-profile ] [-provider ] [-timeout ] ") + } + version := flg.Arg(0) + + ctx := context.Background() + if *timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, *timeout) + defer cancel() + } + + prof, err := profile.LoadProfileFrom(ts.MkAbs("profiles"), *profName) + ts.Check(decoratedWith("loading profile", err)) + prov, err := stack.BuildProvider(*provName, prof) + ts.Check(decoratedWith("getting build provider", err)) + ts.Check(decoratedWith("booting stack", prov.BootUp(ctx, stack.Options{ + DaemonMode: true, + StackVersion: version, + Services: nil, // TODO + Profile: prof, + Printer: printer{stdout: ts.Stdout(), stderr: ts.Stderr()}, + }))) + + cfg, err := stack.LoadConfig(prof) + ts.Check(decoratedWith("loading config", err)) + + es, err := stack.NewElasticsearchClientFromProfile(prof, elasticsearch.OptionWithCertificateAuthority(cfg.CACertFile)) + ts.Check(decoratedWith("making elasticsearch client", err)) + ts.Check(decoratedWith("checking cluster health", es.CheckHealth(ctx))) + + kibana, err := stack.NewKibanaClientFromProfile(prof, kibana.CertificateAuthority(cfg.CACertFile)) + ts.Check(decoratedWith("making kibana client", err)) + + stacks[*profName] = &runningStack{ + version: version, + profile: prof, + provider: prov, + config: cfg, + es: es, + kibana: kibana, + } +} + +// stackDown takes down a stack in the same way that `elastic-package stack down` does. +func stackDown(ts *testscript.TestScript, neg bool, args []string) { + clearStdStreams(ts) + + stacks, ok := ts.Value(runningStackTag{}).(map[string]*runningStack) + if !ok { + if neg { + fmt.Fprintf(ts.Stderr(), "no active stacks registry") + return + } + ts.Fatalf("no active stacks registry") + } + + flg := flag.NewFlagSet("down", flag.ContinueOnError) + profName := flg.String("profile", "default", "profile name") + timeout := flg.Duration("timeout", 0, "timeout (zero or lower indicates no timeout)") + ts.Check(flg.Parse(args)) + if flg.NArg() != 0 { + ts.Fatalf("usage: stack_down [-profile ] [-provider ] [-timeout ]") + } + stk, ok := stacks[*profName] + if !ok { + if neg { + fmt.Fprintf(ts.Stderr(), "no running stack for %s", *profName) + return + } + ts.Fatalf("no running stack for %s", *profName) + } + if stk.external { + if neg { + fmt.Fprintf(ts.Stderr(), "cannot take down externally run stack %s", *profName) + return + } + ts.Fatalf("cannot take down externally run stack %s", *profName) + } + + ctx := context.Background() + if *timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, *timeout) + defer cancel() + } + + delete(stacks, *profName) + + ts.Check(decoratedWith("tearing down stack", stk.provider.TearDown(ctx, stack.Options{ + Profile: stk.profile, + Printer: printer{stdout: ts.Stdout(), stderr: ts.Stderr()}, + }))) +} + +// useStack registers a running stack for use in the script. +func useStack(ts *testscript.TestScript, neg bool, args []string) { + clearStdStreams(ts) + + if neg { + ts.Fatalf("unsupported: ! use_stack") + } + + stacks, ok := ts.Value(runningStackTag{}).(map[string]*runningStack) + if !ok { + ts.Fatalf("no active stacks registry") + } + + flg := flag.NewFlagSet("use", flag.ContinueOnError) + profPath := flg.String("profile", "~/.elastic-package/profiles/default", "profile path") + timeout := flg.Duration("timeout", 0, "timeout (zero or lower indicates no timeout)") + ts.Check(flg.Parse(args)) + if flg.NArg() != 0 { + ts.Fatalf("usage: use_stack [-profile ] [-timeout ]") + } + if _, ok = stacks[*profPath]; ok { + // Already registered, so we are done. + return + } + + path, err := expandTilde(*profPath) + ts.Check(decoratedWith("getting home directory", err)) + + ctx := context.Background() + if *timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, *timeout) + defer cancel() + } + + dir := filepath.Dir(path) + base := filepath.Base(path) + prof, err := profile.LoadProfileFrom(ts.MkAbs(dir), base) + ts.Check(decoratedWith("loading profile", err)) + cfg, err := stack.LoadConfig(prof) + ts.Check(decoratedWith("loading config", err)) + provName := stack.DefaultProvider + if cfg.Provider != "" { + provName = cfg.Provider + } + prov, err := stack.BuildProvider(provName, prof) + ts.Check(decoratedWith("getting build provider", err)) + + es, err := stack.NewElasticsearchClientFromProfile(prof, elasticsearch.OptionWithCertificateAuthority(cfg.CACertFile)) + ts.Check(decoratedWith("making elasticsearch client", err)) + ts.Check(decoratedWith("checking cluster health", es.CheckHealth(ctx))) + + kibana, err := stack.NewKibanaClientFromProfile(prof, kibana.CertificateAuthority(cfg.CACertFile)) + ts.Check(decoratedWith("making kibana client", err)) + vi, err := kibana.Version() + ts.Check(decoratedWith("getting kibana version", err)) + + stacks[*profPath] = &runningStack{ + version: vi.Version(), + profile: prof, + provider: prov, + config: cfg, + external: true, + es: es, + kibana: kibana, + } + msg, err := json.MarshalIndent(cfg, "", "\t") + ts.Check(decoratedWith("marshaling config", err)) + fmt.Fprintf(ts.Stdout(), "%s\n", msg) +} + +// getDocs performs a search on the current data stream or a named data stream +// and prints the results. +func getDocs(ts *testscript.TestScript, neg bool, args []string) { + clearStdStreams(ts) + + if neg { + ts.Fatalf("unsupported: ! get_docs") + } + + stacks, ok := ts.Value(runningStackTag{}).(map[string]*runningStack) + if !ok { + ts.Fatalf("no active stacks registry") + } + + flg := flag.NewFlagSet("get_docs", flag.ContinueOnError) + profName := flg.String("profile", "default", "profile name") + want := flg.Int("want", -1, "number of events expected (negative indicates any positive number)") + querySize := flg.Int("size", 500, "profile name") + confirmDuration := flg.Duration("confirm", 4*time.Second, "time to ensure hits do not exceed want count (zero or lower indicates no wait)") + timeout := flg.Duration("timeout", time.Minute, "timeout (zero or lower indicates no timeout)") + ts.Check(flg.Parse(args)) + if flg.NArg() != 0 && flg.NArg() != 1 { + ts.Fatalf("usage: get_docs [-profile ] [-timeout ] []") + } + + ds := ts.Getenv("DATA_STREAM") + if flg.NArg() == 1 { + ds = flg.Arg(0) + } + if ds == "" { + ts.Fatalf("no data stream specified") + } + + stk, ok := stacks[*profName] + if !ok { + ts.Fatalf("no active client for %s", *profName) + } + + ctx := context.Background() + if *timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, *timeout) + defer cancel() + } + + confirmed := false + var body bytes.Buffer + for { + ts.Check(decoratedWith("performing search", ctx.Err())) + + resp, err := stk.es.Search( + stk.es.Search.WithContext(ctx), + stk.es.Search.WithIndex(ds), + stk.es.Search.WithSort("@timestamp:asc"), + stk.es.Search.WithSize(*querySize), + stk.es.Search.WithSource("true"), + stk.es.Search.WithBody(strings.NewReader(system.FieldsQuery)), + stk.es.Search.WithIgnoreUnavailable(true), + ) + resp.String() + ts.Check(decoratedWith("performing search", err)) + body.Reset() + _, err = io.Copy(&body, resp.Body) + ts.Check(decoratedWith("reading search result", err)) + resp.Body.Close() + + if resp.StatusCode == http.StatusServiceUnavailable && bytes.Contains(body.Bytes(), []byte("no_shard_available_action_exception")) { + // Index is being created, but no shards are available yet. + // See https://github.com/elastic/elasticsearch/issues/65846 + time.Sleep(time.Second) + continue + } + if resp.StatusCode >= 300 { + ts.Fatalf("failed to get docs from data stream %s: %s", ds, body.Bytes()) + } + + var res system.FieldsQueryResult + ts.Check(decoratedWith("unmarshaling result", json.Unmarshal(body.Bytes(), &res))) + + n := res.Hits.Total.Value + if n < *want { + time.Sleep(time.Second) + continue + } + if n != 0 && *want < 0 { + break + } + if n > *want && *want >= 0 { + break + } + if n == *want { + if confirmed || *confirmDuration == 0 { + break + } + time.Sleep(*confirmDuration) + confirmed = true + } + time.Sleep(time.Second) + } + fmt.Fprintf(ts.Stdout(), "%s\n", body.Bytes()) +} + +// dumpLogs copies logs to a directory within the work directory. +func dumpLogs(ts *testscript.TestScript, neg bool, args []string) { + clearStdStreams(ts) + + if neg { + ts.Fatalf("unsupported: ! dump_logs") + } + + stacks, ok := ts.Value(runningStackTag{}).(map[string]*runningStack) + if !ok { + ts.Fatalf("no active stacks registry") + } + + flg := flag.NewFlagSet("down", flag.ContinueOnError) + profName := flg.String("profile", "default", "profile name") + snce := flg.String("since", "", "get logs since this time (RFC3339)") + timeout := flg.Duration("timeout", 0, "timeout (zero or lower indicates no timeout)") + ts.Check(flg.Parse(args)) + if flg.NArg() != 0 && flg.NArg() != 1 { + ts.Fatalf("usage: dump_logs [-profile ] [-provider ] [-timeout ] [-since ] []") + } + stk, ok := stacks[*profName] + if !ok { + ts.Fatalf("no running stack for %s", *profName) + } + + dir := "." + if flg.NArg() == 1 { + dir = filepath.Clean(flg.Arg(0)) + } + if dir == "." { + dir = "logs" + } + _, err := os.Stat(ts.MkAbs(dir)) + if err == nil { + ts.Fatalf("%q exists", dir) + } + + // Make the target directory safe, and ensure that + // it its parent is present and within $WORK, and + // the actual target is absent. + r, err := os.OpenRoot(ts.MkAbs(".")) + ts.Check(decoratedWith("making root jail", err)) + ts.Check(decoratedWith("making logs destination", r.MkdirAll(dir, 0o700))) + ts.Check(decoratedWith("cleaning logs destination", r.Remove(dir))) + + // This is necessary to allow writing a log directory to $WORK, + // something that is otherwise impossible because stack.Dump + // doesn't just write to stack.DumpOptions.Output, but to + // filepath.Join(options.Output, "logs"), _and_ deletes all + // of options.Output. So a path of "." deletes $WORK despite + // only needing to, at most, delete $WORK/logs. ¯\_(ツ)_/¯ + tmp, err := os.MkdirTemp(ts.MkAbs("."), "*") + ts.Check(decoratedWith("making temporary logs directory", err)) + + var since time.Time + if *snce != "" { + var err error + since, err = time.Parse(time.RFC3339Nano, *snce) + ts.Check(decoratedWith("parsing since flag", err)) + } + + ctx := context.Background() + if *timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, *timeout) + defer cancel() + } + + _, err = stack.Dump(ctx, stack.DumpOptions{ + Output: tmp, + Profile: stk.profile, + Since: since, + }) + ts.Check(decoratedWith("dumping agent logs", err)) + ts.Check(decoratedWith("moving logs", os.Rename(filepath.Join(tmp, "logs"), filepath.Join(ts.MkAbs(dir))))) + ts.Check(decoratedWith("removing temporary logs target", os.Remove(tmp))) +} + +type runningStackTag struct{} + +type runningStack struct { + version string + profile *profile.Profile + provider stack.Provider + config stack.Config + external bool + + es *elasticsearch.Client + kibana *kibana.Client +} diff --git a/test/packages/other/with_script/_dev/deploy/docker/docker-compose.yml b/test/packages/other/with_script/_dev/deploy/docker/docker-compose.yml new file mode 100644 index 0000000000..98e9a0c4b5 --- /dev/null +++ b/test/packages/other/with_script/_dev/deploy/docker/docker-compose.yml @@ -0,0 +1,7 @@ +version: '2.3' +services: + with_dataset: + image: "alpine:3.16" + command: ["sh", "-c", "while true; do echo '{\"message\": \"hello\"}' >> ./logs/with_dataset.log; sleep 1; done"] + volumes: + - ${SERVICE_LOGS_DIR}:/logs diff --git a/test/packages/other/with_script/changelog.yml b/test/packages/other/with_script/changelog.yml new file mode 100644 index 0000000000..99e7ab0582 --- /dev/null +++ b/test/packages/other/with_script/changelog.yml @@ -0,0 +1,10 @@ +- version: "0.0.2" + changes: + - description: next release + type: enhancement + link: https://github.com/elastic/elastic-package/pull/9001 +- version: "0.0.1" + changes: + - description: initial release + type: enhancement + link: https://github.com/elastic/elastic-package/pull/9001 diff --git a/test/packages/other/with_script/data_stream/first/_dev/test/scripts/agent_up_down.txt b/test/packages/other/with_script/data_stream/first/_dev/test/scripts/agent_up_down.txt new file mode 100644 index 0000000000..72eb7d3893 --- /dev/null +++ b/test/packages/other/with_script/data_stream/first/_dev/test/scripts/agent_up_down.txt @@ -0,0 +1,61 @@ +[external_stack] skip 'Skipping non-external stack test.' + +# Bring up stack. +stack_up 8.19.3 +! stderr . +stdout 'Local package-registry will serve packages from these sources' + +# Install an agent. +install_agent +! stderr . +cmp stdout want_agent_up.text + +# Bring up a docker container and check that we get the expected output. +docker_up hello_service +! stderr . +cp stdout got_up.text +match_file want_up.pattern got_up.text + +# Take down the service and check logs for our message. +docker_down hello_service +! stderr . +cmp stdout want_down.text + +# Uninstall the agent. +uninstall_agent +! stderr . +stdout '^deleted agent policies for '${PKG}/${DATA_STREAM} + +# Take down stack. +stack_down +! stdout . +! stderr . + +-- config.yml -- +stack: + image_ref_overrides: {} +profile: + current: default +-- profiles/default/profile.json -- +{ + "name": "default", + "date_created": "2025-09-15T03:46:37.979361939Z", + "version": "1" +} +-- stack/development/pin -- +This file is required to make the stack/development directory exist to prevent compose from failing. +-- tmp/service_logs/pin -- +This file is required to make the tmp/service_logs directory exist to prevent compose from failing in CI. +-- hello_service/docker-compose.yml -- +version: '3.8' +services: + # Service name matches directory… for reasons. + hello_service: + image: alpine:latest + command: ["echo", "Hello, World!"] +-- want_up.pattern -- +deployed elastic-package-service-[0-9]+:hello_service-1 +-- want_down.text -- +hello_service-1 | Hello, World! +-- want_agent_up.text -- +installed agent policies for with_script/first diff --git a/test/packages/other/with_script/data_stream/first/_dev/test/scripts/agent_up_down_external.txt b/test/packages/other/with_script/data_stream/first/_dev/test/scripts/agent_up_down_external.txt new file mode 100644 index 0000000000..f016be52e6 --- /dev/null +++ b/test/packages/other/with_script/data_stream/first/_dev/test/scripts/agent_up_down_external.txt @@ -0,0 +1,55 @@ +[!external_stack] skip 'Skipping external stack test.' + +# Register stack and check that we get the expected output. +use_stack -profile ${CONFIG_PROFILES}/${PROFILE} +! stderr . +cmpenv stdout want_use.json + +# Install an agent. +install_agent -profile ${CONFIG_PROFILES}/${PROFILE} +! stderr . +cmp stdout want_agent_up.text + +# Bring up a docker container and check that we get the expected output. +docker_up -profile ${CONFIG_PROFILES}/${PROFILE} hello_service +! stderr . +cp stdout got_up.text +match_file want_up.pattern got_up.text + +# Take down the service and check logs for our message. +docker_down hello_service +! stderr . +cmp stdout want_down.text + +# Uninstall the agent. +uninstall_agent -profile ${CONFIG_PROFILES}/${PROFILE} +! stderr . +stdout '^deleted agent policies for '${PKG}/${DATA_STREAM} + +# Then attempt to take it down again. +! stack_down -profile ${CONFIG_PROFILES}/${PROFILE} +! stdout . +stderr '^cannot take down externally run stack '${CONFIG_PROFILES}'/'${PROFILE}'$' + +-- hello_service/docker-compose.yml -- +version: '3.8' +services: + # Service name matches directory… for reasons. + hello_service: + image: alpine:latest + command: ["echo", "Hello, World!"] +-- want_use.json -- +{ + "provider": "compose", + "elasticsearch_host": "https://127.0.0.1:9200", + "elasticsearch_username": "elastic", + "elasticsearch_password": "changeme", + "kibana_host": "https://127.0.0.1:5601", + "ca_cert_file": "${CONFIG_PROFILES}/${PROFILE}/certs/ca-cert.pem" +} +-- want_up.pattern -- +deployed elastic-package-service-[0-9]+:hello_service-1 +-- want_down.text -- +hello_service-1 | Hello, World! +-- want_agent_up.text -- +installed agent policies for with_script/first diff --git a/test/packages/other/with_script/data_stream/first/_dev/test/scripts/docker_up_down.txt b/test/packages/other/with_script/data_stream/first/_dev/test/scripts/docker_up_down.txt new file mode 100644 index 0000000000..7d5a474d0b --- /dev/null +++ b/test/packages/other/with_script/data_stream/first/_dev/test/scripts/docker_up_down.txt @@ -0,0 +1,49 @@ +[external_stack] skip 'Skipping non-external stack test.' + +# Bring up stack. +stack_up 8.19.3 +! stderr . +stdout 'Local package-registry will serve packages from these sources' + +# Bring up a docker container and check that we get the expected output. +docker_up hello_service +! stderr . +cp stdout got_up.text +match_file want_up.pattern got_up.text + +# Take down the service and check logs for our message. +docker_down hello_service +! stderr . +cmp stdout want_down.text + +# Take down stack. +stack_down +! stdout . +! stderr . + +-- config.yml -- +stack: + image_ref_overrides: {} +profile: + current: default +-- profiles/default/profile.json -- +{ + "name": "default", + "date_created": "2025-09-15T03:46:37.979361939Z", + "version": "1" +} +-- stack/development/pin -- +This file is required to make the stack/development directory exist to prevent compose from failing. +-- tmp/service_logs/pin -- +This file is required to make the tmp/service_logs directory exist to prevent compose from failing in CI. +-- hello_service/docker-compose.yml -- +version: '3.8' +services: + # Service name matches directory… for reasons. + hello_service: + image: alpine:latest + command: ["echo", "Hello, World!"] +-- want_up.pattern -- +deployed elastic-package-service-[0-9]+:hello_service-1 +-- want_down.text -- +hello_service-1 | Hello, World! diff --git a/test/packages/other/with_script/data_stream/first/_dev/test/scripts/env.txt b/test/packages/other/with_script/data_stream/first/_dev/test/scripts/env.txt new file mode 100644 index 0000000000..1260f27571 --- /dev/null +++ b/test/packages/other/with_script/data_stream/first/_dev/test/scripts/env.txt @@ -0,0 +1,25 @@ +[!exec:echo] skip 'Skipping test requiring absent echo command' + +exec echo ${CONFIG_ROOT} +stdout '/\.elastic-package$' + +exec echo ${CONFIG_PROFILES} +stdout '/\.elastic-package/profiles$' + +exec echo ${PKG} +stdout '^with_script$' + +exec echo ${PKG_ROOT} +stdout '/elastic/elastic-package/test/packages/other/with_script$' + +exec echo ${DATA_STREAM} +stdout '^first$' + +exec echo ${DATA_STREAM_ROOT} +stdout '/elastic/elastic-package/test/packages/other/with_script/data_stream/first$' + +exec echo ${CURRENT_VERSION} +stdout '^0\.0\.2$' + +exec echo ${PREVIOUS_VERSION} +stdout '^0\.0\.1$' diff --git a/test/packages/other/with_script/data_stream/first/_dev/test/scripts/get_docs_external.txt b/test/packages/other/with_script/data_stream/first/_dev/test/scripts/get_docs_external.txt new file mode 100644 index 0000000000..674ec5d8be --- /dev/null +++ b/test/packages/other/with_script/data_stream/first/_dev/test/scripts/get_docs_external.txt @@ -0,0 +1,133 @@ +[!external_stack] skip 'Skipping external stack test.' +[!exec:echo] skip 'Skipping test requiring absent echo command' +[!exec:jq] skip 'Skipping test requiring absent jq command' + +# Register stack and check that we get the expected output. +use_stack -profile ${CONFIG_PROFILES}/${PROFILE} +! stderr . +cmpenv stdout want_use.json + +# Install an agent. +install_agent -profile ${CONFIG_PROFILES}/${PROFILE} NETWORK_NAME +! stderr . +cmp stdout want_agent_up.text +# Check the network name is valid. +exec echo ${NETWORK_NAME} +stdout 'elastic-package-agent-with_script-first-[0-9]+_default' + +# Bring up a docker container and check that we get the expected output. +docker_up -profile ${CONFIG_PROFILES}/${PROFILE} -network ${NETWORK_NAME} test-hits +! stderr . +cp stdout got_up.text +match_file want_up.pattern got_up.text + +# Add the package resources. +add_package -profile ${CONFIG_PROFILES}/${PROFILE} +! stderr . + +# Add the data stream. +add_data_stream -profile ${CONFIG_PROFILES}/${PROFILE} test_config.yaml DATA_STREAM_NAME +! stderr . +cmpenv stdout want_add_data_stream.text +exec echo ${DATA_STREAM_NAME} +cp stdout got_data_stream_name.text +match_file want_data_stream_name.pattern got_data_stream_name.text + +# Check that no policy exists yet. +# No timeout so we do only a single poll. +get_policy -profile ${CONFIG_PROFILES}/${PROFILE} ${DATA_STREAM_NAME} +stdout 'not found' + +# Start the service. +docker_signal test-hits SIGHUP + +# Wait for the service to exit. +docker_wait_exit -timeout 5m test-hits + +# Check that we can see our policy. +get_policy -profile ${CONFIG_PROFILES}/${PROFILE} -timeout 1m ${DATA_STREAM_NAME} +cp stdout got_policy.json +exec jq '.name=="'${DATA_STREAM_NAME}'"' got_policy.json +stdout true + +# Take down the service and check logs for our message. +docker_down test-hits +! stderr . +cp stdout got_down.text +match_file want_down.pattern got_down.text + +# Get documents from the data stream. +get_docs -profile ${CONFIG_PROFILES}/${PROFILE} -want 10 -timeout 5m ${DATA_STREAM_NAME} +cp stdout got_docs.json + +# Let's upgrade! +upgrade_package_latest -profile ${CONFIG_PROFILES}/${PROFILE} +! stderr . +stdout 'upgraded package '${PKG}' from version '${CURRENT_VERSION@R} + +# Remove the data stream. +remove_data_stream -profile ${CONFIG_PROFILES}/${PROFILE} ${DATA_STREAM_NAME} +! stderr . +cmpenv stdout want_remove_data_stream.text + +# Uninstall the agent. +uninstall_agent -profile ${CONFIG_PROFILES}/${PROFILE} -timeout 1m +stdout '^deleted agent policies for '${PKG}/${DATA_STREAM} +! stderr . + +# Remove the package resources. +remove_package -profile ${CONFIG_PROFILES}/${PROFILE} +! stderr . + +# Then attempt to take the stack down even though we don't own it. +! stack_down -profile ${CONFIG_PROFILES}/${PROFILE} +! stdout . +stderr '^cannot take down externally run stack '${CONFIG_PROFILES}'/'${PROFILE}'$' + +-- test-hits/docker-compose.yml -- +version: '2.3' +services: + test-hits: + image: docker.elastic.co/observability/stream:v0.20.0 + volumes: + - ./logs:/logs:ro + command: log --start-signal=SIGHUP --delay=5s --addr elastic-agent:9999 -p=tcp /logs/generated.log +-- test-hits/logs/generated.log -- +ntpd[1001]: kernel time sync enabled utl +restorecond: : Reset file context quasiarc: liqua +auditd[5699]: Audit daemon rotating log files +anacron[5066]: Normal exit ehend +restorecond: : Reset file context vol: luptat +heartbeat: : < Processing command: accept +restorecond: : Reset file context nci: ofdeFin +auditd[6668]: Audit daemon rotating log files +anacron[1613]: Normal exit mvolu +ntpd[2959]: ntpd gelit-r tatno +-- want_use.json -- +{ + "provider": "compose", + "elasticsearch_host": "https://127.0.0.1:9200", + "elasticsearch_username": "elastic", + "elasticsearch_password": "changeme", + "kibana_host": "https://127.0.0.1:5601", + "ca_cert_file": "${CONFIG_PROFILES}/${PROFILE}/certs/ca-cert.pem" +} +-- want_up.pattern -- +deployed elastic-package-service-[0-9]+:test-hits-1 +-- want_down.pattern -- +test-hits-1 | \{"level":"info","ts":"[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-2][0-9]:[0-9]{2}:[0-9]{2}\.[0-9]{3}Z","caller":"command/log\.go:[0-9]+","msg":"Log data sent\.","address":"elastic-agent:9999","log":"/logs/generated\.log","total_bytes":[0-9]+,"total_lines":10} +-- want_agent_up.text -- +installed agent policies for with_script/first +-- test_config.yaml -- +input: tcp +vars: ~ +data_stream: + vars: + tcp_host: 0.0.0.0 + tcp_port: 9999 +-- want_data_stream_name.pattern -- +logs-with_script\.first-[0-9]+ +-- want_add_data_stream.text -- +added ${DATA_STREAM_NAME} data stream policy templates for ${PKG}/${DATA_STREAM} +-- want_remove_data_stream.text -- +removed ${DATA_STREAM_NAME} data stream policy templates for ${PKG}/${DATA_STREAM} diff --git a/test/packages/other/with_script/data_stream/first/_dev/test/scripts/pipeline.txt b/test/packages/other/with_script/data_stream/first/_dev/test/scripts/pipeline.txt new file mode 100644 index 0000000000..a6d74253ca --- /dev/null +++ b/test/packages/other/with_script/data_stream/first/_dev/test/scripts/pipeline.txt @@ -0,0 +1,72 @@ +[external_stack] skip 'Skipping non-external stack test.' + +date START + +# Bring up a stack and check that we get the expected output. +stack_up 8.19.3 +! stderr . +stdout 'Local package-registry will serve packages from these sources' + +# Install the data stream's pipeline. +install_pipelines ${DATA_STREAM_ROOT} +! stderr . +stdout '^installed pipelines in '${DATA_STREAM}' with nonce' + +# Run a simulation with the data. +simulate ${DATA_STREAM_ROOT} default data.json +cmp stdout want.json +! stderr . + +# Uninstall pipelines. +uninstall_pipelines ${DATA_STREAM_ROOT} +! stderr . +stdout '^uninstalled pipelines in '${DATA_STREAM} + +# Put logs in the work directory check for existence. +dump_logs -since ${START} . +exists ${WORK}/logs/elastic-agent.log +exists ${WORK}/logs/elasticsearch.log +exists ${WORK}/logs/fleet-server.log +exists ${WORK}/logs/kibana.log +exists ${WORK}/logs/package-registry.log + +# Then take it down again. +stack_down +! stdout . +! stderr . + +-- config.yml -- +stack: + image_ref_overrides: {} +profile: + current: default +-- profiles/default/profile.json -- +{ + "name": "default", + "date_created": "2025-09-15T03:46:37.979361939Z", + "version": "1" +} +-- stack/development/pin -- +This file is required to make the stack/development directory exist to prevent compose from failing. +-- tmp/service_logs/pin -- +This file is required to make the tmp/service_logs directory exist to prevent compose from failing in CI. +-- data.json -- +{ + "message": "World!" +} +{ + "message": "from testscript" +} +-- want.json -- +{ + "event": { + "original": "World!" + }, + "hello": "World!" +} +{ + "event": { + "original": "from testscript" + }, + "hello": "from testscript" +} diff --git a/test/packages/other/with_script/data_stream/first/_dev/test/scripts/pipeline_external.txt b/test/packages/other/with_script/data_stream/first/_dev/test/scripts/pipeline_external.txt new file mode 100644 index 0000000000..8cfb31a3f1 --- /dev/null +++ b/test/packages/other/with_script/data_stream/first/_dev/test/scripts/pipeline_external.txt @@ -0,0 +1,66 @@ +[!external_stack] skip 'Skipping external stack test.' + +date START + +# Register stack and check that we get the expected output. +use_stack -profile ${CONFIG_PROFILES}/${PROFILE} +! stderr . +cmpenv stdout want_use.json + +# Install the data stream's pipeline. +install_pipelines -profile ${CONFIG_PROFILES}/${PROFILE} ${DATA_STREAM_ROOT} +! stderr . +stdout '^installed pipelines in '${DATA_STREAM}' with nonce' + +# Run a simulation with the data. +simulate -profile ${CONFIG_PROFILES}/${PROFILE} ${DATA_STREAM_ROOT} default data.json +! stderr . +cmp stdout want.json + +# Uninstall pipelines. +uninstall_pipelines -profile ${CONFIG_PROFILES}/${PROFILE} ${DATA_STREAM_ROOT} +! stderr . +stdout '^uninstalled pipelines in '${DATA_STREAM} + +# Put logs in the work directory check for existence. +dump_logs -profile ${CONFIG_PROFILES}/${PROFILE} -since ${START} +exists ${WORK}/logs/elastic-agent.log +exists ${WORK}/logs/elasticsearch.log +exists ${WORK}/logs/fleet-server.log +exists ${WORK}/logs/kibana.log +exists ${WORK}/logs/package-registry.log + +# Then attempt to take it down again. +! stack_down -profile ${CONFIG_PROFILES}/${PROFILE} +! stdout . +stderr '^cannot take down externally run stack '${CONFIG_PROFILES}'/'${PROFILE}'$' + +-- want_use.json -- +{ + "provider": "compose", + "elasticsearch_host": "https://127.0.0.1:9200", + "elasticsearch_username": "elastic", + "elasticsearch_password": "changeme", + "kibana_host": "https://127.0.0.1:5601", + "ca_cert_file": "${CONFIG_PROFILES}/${PROFILE}/certs/ca-cert.pem" +} +-- data.json -- +{ + "message": "World!" +} +{ + "message": "from testscript" +} +-- want.json -- +{ + "event": { + "original": "World!" + }, + "hello": "World!" +} +{ + "event": { + "original": "from testscript" + }, + "hello": "from testscript" +} diff --git a/test/packages/other/with_script/data_stream/first/_dev/test/scripts/pipeline_txtar_external.txt b/test/packages/other/with_script/data_stream/first/_dev/test/scripts/pipeline_txtar_external.txt new file mode 100644 index 0000000000..73e5c32b6f --- /dev/null +++ b/test/packages/other/with_script/data_stream/first/_dev/test/scripts/pipeline_txtar_external.txt @@ -0,0 +1,82 @@ +[!external_stack] skip 'Skipping external stack test.' + +date START + +# Register stack and check that we get the expected output. +use_stack -profile ${CONFIG_PROFILES}/${PROFILE} +! stderr . +cmpenv stdout want_use.json + +# Install the data stream's pipeline. +install_pipelines -profile ${CONFIG_PROFILES}/${PROFILE} ${WORK} +! stderr . +stdout '^installed pipelines in .* with nonce' + +# Run a simulation with the data. +simulate -profile ${CONFIG_PROFILES}/${PROFILE} ${WORK} default data.json +! stderr . +cmp stdout want.json + +# Uninstall pipelines. +uninstall_pipelines -profile ${CONFIG_PROFILES}/${PROFILE} ${WORK} +! stderr . +stdout '^uninstalled pipelines in' + +# Put logs in the work directory check for existence. +dump_logs -profile ${CONFIG_PROFILES}/${PROFILE} -since ${START} +exists ${WORK}/logs/elastic-agent.log +exists ${WORK}/logs/elasticsearch.log +exists ${WORK}/logs/fleet-server.log +exists ${WORK}/logs/kibana.log +exists ${WORK}/logs/package-registry.log + +# Then attempt to take it down again. +! stack_down -profile ${CONFIG_PROFILES}/${PROFILE} +! stdout . +stderr '^cannot take down externally run stack '${CONFIG_PROFILES}'/'${PROFILE}'$' + +-- want_use.json -- +{ + "provider": "compose", + "elasticsearch_host": "https://127.0.0.1:9200", + "elasticsearch_username": "elastic", + "elasticsearch_password": "changeme", + "kibana_host": "https://127.0.0.1:5601", + "ca_cert_file": "${CONFIG_PROFILES}/${PROFILE}/certs/ca-cert.pem" +} +-- data.json -- +{ + "message": "World!" +} +{ + "message": "from testscript" +} +-- elasticsearch/ingest_pipeline/default.yml -- +--- +description: Test pipeline. +processors: + - set: + field: hello + value: '{{{message}}}' + ignore_empty_value: true + - rename: + field: message + target_field: event.original + ignore_missing: true +on_failure: + - set: + field: error.message + value: '{{{ _ingest.on_failure_message }}}' +-- want.json -- +{ + "event": { + "original": "World!" + }, + "hello": "World!" +} +{ + "event": { + "original": "from testscript" + }, + "hello": "from testscript" +} diff --git a/test/packages/other/with_script/data_stream/first/_dev/test/scripts/stack_up_down.txt b/test/packages/other/with_script/data_stream/first/_dev/test/scripts/stack_up_down.txt new file mode 100644 index 0000000000..2a451e43f2 --- /dev/null +++ b/test/packages/other/with_script/data_stream/first/_dev/test/scripts/stack_up_down.txt @@ -0,0 +1,36 @@ +[external_stack] skip 'Skipping non-external stack test.' + +# Bring up a stack and check that we get the expected output. +stack_up 8.19.3 +! stderr . +cp stdout got_up.text +match_file want_up.pattern got_up.text + +# Then take it down again. +stack_down +! stdout . +! stderr . + +-- config.yml -- +stack: + image_ref_overrides: {} +profile: + current: default +-- profiles/default/profile.json -- +{ + "name": "default", + "date_created": "2025-09-15T03:46:37.979361939Z", + "version": "1" +} +-- stack/development/pin -- +This file is required to make the stack/development directory exist to prevent compose from failing. +-- tmp/service_logs/pin -- +This file is required to make the tmp/service_logs directory exist to prevent compose from failing in CI. +-- want_up.pattern -- +Elasticsearch host: https://127\.0\.0\.1:9200 +Kibana host: https://127\.0\.0\.1:5601 +Username: elastic +Password: changeme +(?:Custom build packages directory found: .*/elastic/elastic-package/build/packages +)?Local package-registry will serve packages from these sources: +- Proxy to https://epr\.elastic\.co diff --git a/test/packages/other/with_script/data_stream/first/_dev/test/scripts/zip_package.txt b/test/packages/other/with_script/data_stream/first/_dev/test/scripts/zip_package.txt new file mode 100644 index 0000000000..49a7d2a12e --- /dev/null +++ b/test/packages/other/with_script/data_stream/first/_dev/test/scripts/zip_package.txt @@ -0,0 +1,32 @@ +[!external_stack] skip 'Skipping external stack test.' +[!exec:curl] skip 'Skipping test requiring absent curl command' + +# Register stack and check that we get the expected output. +use_stack -profile ${CONFIG_PROFILES}/${PROFILE} +! stderr . +cmpenv stdout want_use.json + +exec curl -s -O 'https://epr.elastic.co/epr/cel/cel-0.1.0.zip' + +add_package_zip -profile ${CONFIG_PROFILES}/${PROFILE} cel-0.1.0.zip +! stderr . +stdout 'added zipped package resources in '${WORK@R}'/cel-0\.1\.0\.zip for cel in test for with_script' + +remove_package_zip -profile ${CONFIG_PROFILES}/${PROFILE} cel-0.1.0.zip +! stderr . +stdout 'removed zipped package resources in '${WORK@R}'/cel-0\.1\.0\.zip for cel in test for with_script' + +# Then attempt to take it down again. +! stack_down -profile ${CONFIG_PROFILES}/${PROFILE} +! stdout . +stderr '^cannot take down externally run stack '${CONFIG_PROFILES}'/'${PROFILE}'$' + +-- want_use.json -- +{ + "provider": "compose", + "elasticsearch_host": "https://127.0.0.1:9200", + "elasticsearch_username": "elastic", + "elasticsearch_password": "changeme", + "kibana_host": "https://127.0.0.1:5601", + "ca_cert_file": "${CONFIG_PROFILES}/${PROFILE}/certs/ca-cert.pem" +} diff --git a/test/packages/other/with_script/data_stream/first/agent/stream/hits.yml.hbs b/test/packages/other/with_script/data_stream/first/agent/stream/hits.yml.hbs new file mode 100644 index 0000000000..2c57f3fa01 --- /dev/null +++ b/test/packages/other/with_script/data_stream/first/agent/stream/hits.yml.hbs @@ -0,0 +1,21 @@ +host: "{{tcp_host}}:{{tcp_port}}" +tags: +{{#each tags as |tag i|}} + - {{tag}} +{{/each}} +fields_under_root: true +fields: + observer: + vendor: Test + product: test + type: test +{{#contains tags "forwarded"}} +publisher_pipeline.disable_host: true +{{/contains}} + +processors: +- add_locale: ~ +- add_fields: + target: '' + fields: + ecs.version: 1.7.0 diff --git a/test/packages/other/with_script/data_stream/first/elasticsearch/ingest_pipeline/default.yml b/test/packages/other/with_script/data_stream/first/elasticsearch/ingest_pipeline/default.yml new file mode 100644 index 0000000000..7909a3a3f2 --- /dev/null +++ b/test/packages/other/with_script/data_stream/first/elasticsearch/ingest_pipeline/default.yml @@ -0,0 +1,15 @@ +--- +description: Test pipeline. +processors: + - set: + field: hello + value: '{{{message}}}' + ignore_empty_value: true + - rename: + field: message + target_field: event.original + ignore_missing: true +on_failure: + - set: + field: error.message + value: '{{{ _ingest.on_failure_message }}}' \ No newline at end of file diff --git a/test/packages/other/with_script/data_stream/first/fields/base-fields.yml b/test/packages/other/with_script/data_stream/first/fields/base-fields.yml new file mode 100644 index 0000000000..e8d301a72d --- /dev/null +++ b/test/packages/other/with_script/data_stream/first/fields/base-fields.yml @@ -0,0 +1,22 @@ +- name: data_stream.type + type: constant_keyword + description: Data stream type. +- name: data_stream.dataset + type: constant_keyword + description: Data stream dataset. +- name: data_stream.namespace + type: constant_keyword + description: Data stream namespace. +- name: '@timestamp' + type: date + description: Event timestamp. +- name: input.type + type: keyword +- name: log.file.path + type: keyword +- name: log.offset + type: long +- name: ecs.version + type: keyword +- name: message + type: match_only_text diff --git a/test/packages/other/with_script/data_stream/first/manifest.yml b/test/packages/other/with_script/data_stream/first/manifest.yml new file mode 100644 index 0000000000..cdc9c79344 --- /dev/null +++ b/test/packages/other/with_script/data_stream/first/manifest.yml @@ -0,0 +1,31 @@ +title: Test +release: experimental +type: logs +streams: + - input: tcp + title: logs + description: Collect logs + template_path: hits.yml.hbs + vars: + - name: tags + type: text + title: Tags + multi: true + required: true + show_user: false + default: + - forwarded + - name: tcp_host + type: text + title: TCP host to listen on + multi: false + required: true + show_user: true + default: localhost + - name: tcp_port + type: integer + title: TCP port to listen on + multi: false + required: true + show_user: true + default: 9511 diff --git a/test/packages/other/with_script/docs/README.md b/test/packages/other/with_script/docs/README.md new file mode 100644 index 0000000000..a89b3cd5d3 --- /dev/null +++ b/test/packages/other/with_script/docs/README.md @@ -0,0 +1,2 @@ +# Test integration +This package demonstrates and tests scripted tests. diff --git a/test/packages/other/with_script/manifest.yml b/test/packages/other/with_script/manifest.yml new file mode 100644 index 0000000000..0330dda704 --- /dev/null +++ b/test/packages/other/with_script/manifest.yml @@ -0,0 +1,22 @@ +format_version: 3.5.0 +name: with_script +title: With script test +version: 0.0.2 +description: Package that demonstrates and tests scripted tests. +categories: + - custom +type: integration +conditions: + kibana: + version: '^8.0.0' +policy_templates: + - name: test + title: Test + description: Description + inputs: + - type: tcp + title: Collect hits via tcp. + description: Collect hits via tcp. +owner: + github: elastic/integrations + type: elastic diff --git a/test/packages/other/with_script/validation.yml b/test/packages/other/with_script/validation.yml new file mode 100644 index 0000000000..07c6efea4c --- /dev/null +++ b/test/packages/other/with_script/validation.yml @@ -0,0 +1,3 @@ +errors: + exclude_checks: + - JSE00001